1 // SPDX-License-Identifier: GPL-2.0-only 2 /* Copyright (c) 2017 Facebook 3 */ 4 #include <linux/bpf.h> 5 #include <linux/btf_ids.h> 6 #include <linux/slab.h> 7 #include <linux/vmalloc.h> 8 #include <linux/etherdevice.h> 9 #include <linux/filter.h> 10 #include <linux/sched/signal.h> 11 #include <net/bpf_sk_storage.h> 12 #include <net/sock.h> 13 #include <net/tcp.h> 14 #include <net/net_namespace.h> 15 #include <linux/error-injection.h> 16 #include <linux/smp.h> 17 #include <linux/sock_diag.h> 18 #include <net/xdp.h> 19 20 #define CREATE_TRACE_POINTS 21 #include <trace/events/bpf_test_run.h> 22 23 struct bpf_test_timer { 24 enum { NO_PREEMPT, NO_MIGRATE } mode; 25 u32 i; 26 u64 time_start, time_spent; 27 }; 28 29 static void bpf_test_timer_enter(struct bpf_test_timer *t) 30 __acquires(rcu) 31 { 32 rcu_read_lock(); 33 if (t->mode == NO_PREEMPT) 34 preempt_disable(); 35 else 36 migrate_disable(); 37 38 t->time_start = ktime_get_ns(); 39 } 40 41 static void bpf_test_timer_leave(struct bpf_test_timer *t) 42 __releases(rcu) 43 { 44 t->time_start = 0; 45 46 if (t->mode == NO_PREEMPT) 47 preempt_enable(); 48 else 49 migrate_enable(); 50 rcu_read_unlock(); 51 } 52 53 static bool bpf_test_timer_continue(struct bpf_test_timer *t, u32 repeat, int *err, u32 *duration) 54 __must_hold(rcu) 55 { 56 t->i++; 57 if (t->i >= repeat) { 58 /* We're done. */ 59 t->time_spent += ktime_get_ns() - t->time_start; 60 do_div(t->time_spent, t->i); 61 *duration = t->time_spent > U32_MAX ? U32_MAX : (u32)t->time_spent; 62 *err = 0; 63 goto reset; 64 } 65 66 if (signal_pending(current)) { 67 /* During iteration: we've been cancelled, abort. */ 68 *err = -EINTR; 69 goto reset; 70 } 71 72 if (need_resched()) { 73 /* During iteration: we need to reschedule between runs. */ 74 t->time_spent += ktime_get_ns() - t->time_start; 75 bpf_test_timer_leave(t); 76 cond_resched(); 77 bpf_test_timer_enter(t); 78 } 79 80 /* Do another round. */ 81 return true; 82 83 reset: 84 t->i = 0; 85 return false; 86 } 87 88 static int bpf_test_run(struct bpf_prog *prog, void *ctx, u32 repeat, 89 u32 *retval, u32 *time, bool xdp) 90 { 91 struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE] = { NULL }; 92 struct bpf_test_timer t = { NO_MIGRATE }; 93 enum bpf_cgroup_storage_type stype; 94 int ret; 95 96 for_each_cgroup_storage_type(stype) { 97 storage[stype] = bpf_cgroup_storage_alloc(prog, stype); 98 if (IS_ERR(storage[stype])) { 99 storage[stype] = NULL; 100 for_each_cgroup_storage_type(stype) 101 bpf_cgroup_storage_free(storage[stype]); 102 return -ENOMEM; 103 } 104 } 105 106 if (!repeat) 107 repeat = 1; 108 109 bpf_test_timer_enter(&t); 110 do { 111 ret = bpf_cgroup_storage_set(storage); 112 if (ret) 113 break; 114 115 if (xdp) 116 *retval = bpf_prog_run_xdp(prog, ctx); 117 else 118 *retval = BPF_PROG_RUN(prog, ctx); 119 120 bpf_cgroup_storage_unset(); 121 } while (bpf_test_timer_continue(&t, repeat, &ret, time)); 122 bpf_test_timer_leave(&t); 123 124 for_each_cgroup_storage_type(stype) 125 bpf_cgroup_storage_free(storage[stype]); 126 127 return ret; 128 } 129 130 static int bpf_test_finish(const union bpf_attr *kattr, 131 union bpf_attr __user *uattr, const void *data, 132 u32 size, u32 retval, u32 duration) 133 { 134 void __user *data_out = u64_to_user_ptr(kattr->test.data_out); 135 int err = -EFAULT; 136 u32 copy_size = size; 137 138 /* Clamp copy if the user has provided a size hint, but copy the full 139 * buffer if not to retain old behaviour. 140 */ 141 if (kattr->test.data_size_out && 142 copy_size > kattr->test.data_size_out) { 143 copy_size = kattr->test.data_size_out; 144 err = -ENOSPC; 145 } 146 147 if (data_out && copy_to_user(data_out, data, copy_size)) 148 goto out; 149 if (copy_to_user(&uattr->test.data_size_out, &size, sizeof(size))) 150 goto out; 151 if (copy_to_user(&uattr->test.retval, &retval, sizeof(retval))) 152 goto out; 153 if (copy_to_user(&uattr->test.duration, &duration, sizeof(duration))) 154 goto out; 155 if (err != -ENOSPC) 156 err = 0; 157 out: 158 trace_bpf_test_finish(&err); 159 return err; 160 } 161 162 /* Integer types of various sizes and pointer combinations cover variety of 163 * architecture dependent calling conventions. 7+ can be supported in the 164 * future. 165 */ 166 __diag_push(); 167 __diag_ignore(GCC, 8, "-Wmissing-prototypes", 168 "Global functions as their definitions will be in vmlinux BTF"); 169 int noinline bpf_fentry_test1(int a) 170 { 171 return a + 1; 172 } 173 174 int noinline bpf_fentry_test2(int a, u64 b) 175 { 176 return a + b; 177 } 178 179 int noinline bpf_fentry_test3(char a, int b, u64 c) 180 { 181 return a + b + c; 182 } 183 184 int noinline bpf_fentry_test4(void *a, char b, int c, u64 d) 185 { 186 return (long)a + b + c + d; 187 } 188 189 int noinline bpf_fentry_test5(u64 a, void *b, short c, int d, u64 e) 190 { 191 return a + (long)b + c + d + e; 192 } 193 194 int noinline bpf_fentry_test6(u64 a, void *b, short c, int d, void *e, u64 f) 195 { 196 return a + (long)b + c + d + (long)e + f; 197 } 198 199 struct bpf_fentry_test_t { 200 struct bpf_fentry_test_t *a; 201 }; 202 203 int noinline bpf_fentry_test7(struct bpf_fentry_test_t *arg) 204 { 205 return (long)arg; 206 } 207 208 int noinline bpf_fentry_test8(struct bpf_fentry_test_t *arg) 209 { 210 return (long)arg->a; 211 } 212 213 int noinline bpf_modify_return_test(int a, int *b) 214 { 215 *b += 1; 216 return a + *b; 217 } 218 219 u64 noinline bpf_kfunc_call_test1(struct sock *sk, u32 a, u64 b, u32 c, u64 d) 220 { 221 return a + b + c + d; 222 } 223 224 int noinline bpf_kfunc_call_test2(struct sock *sk, u32 a, u32 b) 225 { 226 return a + b; 227 } 228 229 struct sock * noinline bpf_kfunc_call_test3(struct sock *sk) 230 { 231 return sk; 232 } 233 234 __diag_pop(); 235 236 ALLOW_ERROR_INJECTION(bpf_modify_return_test, ERRNO); 237 238 BTF_SET_START(test_sk_kfunc_ids) 239 BTF_ID(func, bpf_kfunc_call_test1) 240 BTF_ID(func, bpf_kfunc_call_test2) 241 BTF_ID(func, bpf_kfunc_call_test3) 242 BTF_SET_END(test_sk_kfunc_ids) 243 244 bool bpf_prog_test_check_kfunc_call(u32 kfunc_id) 245 { 246 return btf_id_set_contains(&test_sk_kfunc_ids, kfunc_id); 247 } 248 249 static void *bpf_test_init(const union bpf_attr *kattr, u32 size, 250 u32 headroom, u32 tailroom) 251 { 252 void __user *data_in = u64_to_user_ptr(kattr->test.data_in); 253 u32 user_size = kattr->test.data_size_in; 254 void *data; 255 256 if (size < ETH_HLEN || size > PAGE_SIZE - headroom - tailroom) 257 return ERR_PTR(-EINVAL); 258 259 if (user_size > size) 260 return ERR_PTR(-EMSGSIZE); 261 262 data = kzalloc(size + headroom + tailroom, GFP_USER); 263 if (!data) 264 return ERR_PTR(-ENOMEM); 265 266 if (copy_from_user(data + headroom, data_in, user_size)) { 267 kfree(data); 268 return ERR_PTR(-EFAULT); 269 } 270 271 return data; 272 } 273 274 int bpf_prog_test_run_tracing(struct bpf_prog *prog, 275 const union bpf_attr *kattr, 276 union bpf_attr __user *uattr) 277 { 278 struct bpf_fentry_test_t arg = {}; 279 u16 side_effect = 0, ret = 0; 280 int b = 2, err = -EFAULT; 281 u32 retval = 0; 282 283 if (kattr->test.flags || kattr->test.cpu) 284 return -EINVAL; 285 286 switch (prog->expected_attach_type) { 287 case BPF_TRACE_FENTRY: 288 case BPF_TRACE_FEXIT: 289 if (bpf_fentry_test1(1) != 2 || 290 bpf_fentry_test2(2, 3) != 5 || 291 bpf_fentry_test3(4, 5, 6) != 15 || 292 bpf_fentry_test4((void *)7, 8, 9, 10) != 34 || 293 bpf_fentry_test5(11, (void *)12, 13, 14, 15) != 65 || 294 bpf_fentry_test6(16, (void *)17, 18, 19, (void *)20, 21) != 111 || 295 bpf_fentry_test7((struct bpf_fentry_test_t *)0) != 0 || 296 bpf_fentry_test8(&arg) != 0) 297 goto out; 298 break; 299 case BPF_MODIFY_RETURN: 300 ret = bpf_modify_return_test(1, &b); 301 if (b != 2) 302 side_effect = 1; 303 break; 304 default: 305 goto out; 306 } 307 308 retval = ((u32)side_effect << 16) | ret; 309 if (copy_to_user(&uattr->test.retval, &retval, sizeof(retval))) 310 goto out; 311 312 err = 0; 313 out: 314 trace_bpf_test_finish(&err); 315 return err; 316 } 317 318 struct bpf_raw_tp_test_run_info { 319 struct bpf_prog *prog; 320 void *ctx; 321 u32 retval; 322 }; 323 324 static void 325 __bpf_prog_test_run_raw_tp(void *data) 326 { 327 struct bpf_raw_tp_test_run_info *info = data; 328 329 rcu_read_lock(); 330 info->retval = BPF_PROG_RUN(info->prog, info->ctx); 331 rcu_read_unlock(); 332 } 333 334 int bpf_prog_test_run_raw_tp(struct bpf_prog *prog, 335 const union bpf_attr *kattr, 336 union bpf_attr __user *uattr) 337 { 338 void __user *ctx_in = u64_to_user_ptr(kattr->test.ctx_in); 339 __u32 ctx_size_in = kattr->test.ctx_size_in; 340 struct bpf_raw_tp_test_run_info info; 341 int cpu = kattr->test.cpu, err = 0; 342 int current_cpu; 343 344 /* doesn't support data_in/out, ctx_out, duration, or repeat */ 345 if (kattr->test.data_in || kattr->test.data_out || 346 kattr->test.ctx_out || kattr->test.duration || 347 kattr->test.repeat) 348 return -EINVAL; 349 350 if (ctx_size_in < prog->aux->max_ctx_offset || 351 ctx_size_in > MAX_BPF_FUNC_ARGS * sizeof(u64)) 352 return -EINVAL; 353 354 if ((kattr->test.flags & BPF_F_TEST_RUN_ON_CPU) == 0 && cpu != 0) 355 return -EINVAL; 356 357 if (ctx_size_in) { 358 info.ctx = kzalloc(ctx_size_in, GFP_USER); 359 if (!info.ctx) 360 return -ENOMEM; 361 if (copy_from_user(info.ctx, ctx_in, ctx_size_in)) { 362 err = -EFAULT; 363 goto out; 364 } 365 } else { 366 info.ctx = NULL; 367 } 368 369 info.prog = prog; 370 371 current_cpu = get_cpu(); 372 if ((kattr->test.flags & BPF_F_TEST_RUN_ON_CPU) == 0 || 373 cpu == current_cpu) { 374 __bpf_prog_test_run_raw_tp(&info); 375 } else if (cpu >= nr_cpu_ids || !cpu_online(cpu)) { 376 /* smp_call_function_single() also checks cpu_online() 377 * after csd_lock(). However, since cpu is from user 378 * space, let's do an extra quick check to filter out 379 * invalid value before smp_call_function_single(). 380 */ 381 err = -ENXIO; 382 } else { 383 err = smp_call_function_single(cpu, __bpf_prog_test_run_raw_tp, 384 &info, 1); 385 } 386 put_cpu(); 387 388 if (!err && 389 copy_to_user(&uattr->test.retval, &info.retval, sizeof(u32))) 390 err = -EFAULT; 391 392 out: 393 kfree(info.ctx); 394 return err; 395 } 396 397 static void *bpf_ctx_init(const union bpf_attr *kattr, u32 max_size) 398 { 399 void __user *data_in = u64_to_user_ptr(kattr->test.ctx_in); 400 void __user *data_out = u64_to_user_ptr(kattr->test.ctx_out); 401 u32 size = kattr->test.ctx_size_in; 402 void *data; 403 int err; 404 405 if (!data_in && !data_out) 406 return NULL; 407 408 data = kzalloc(max_size, GFP_USER); 409 if (!data) 410 return ERR_PTR(-ENOMEM); 411 412 if (data_in) { 413 err = bpf_check_uarg_tail_zero(USER_BPFPTR(data_in), max_size, size); 414 if (err) { 415 kfree(data); 416 return ERR_PTR(err); 417 } 418 419 size = min_t(u32, max_size, size); 420 if (copy_from_user(data, data_in, size)) { 421 kfree(data); 422 return ERR_PTR(-EFAULT); 423 } 424 } 425 return data; 426 } 427 428 static int bpf_ctx_finish(const union bpf_attr *kattr, 429 union bpf_attr __user *uattr, const void *data, 430 u32 size) 431 { 432 void __user *data_out = u64_to_user_ptr(kattr->test.ctx_out); 433 int err = -EFAULT; 434 u32 copy_size = size; 435 436 if (!data || !data_out) 437 return 0; 438 439 if (copy_size > kattr->test.ctx_size_out) { 440 copy_size = kattr->test.ctx_size_out; 441 err = -ENOSPC; 442 } 443 444 if (copy_to_user(data_out, data, copy_size)) 445 goto out; 446 if (copy_to_user(&uattr->test.ctx_size_out, &size, sizeof(size))) 447 goto out; 448 if (err != -ENOSPC) 449 err = 0; 450 out: 451 return err; 452 } 453 454 /** 455 * range_is_zero - test whether buffer is initialized 456 * @buf: buffer to check 457 * @from: check from this position 458 * @to: check up until (excluding) this position 459 * 460 * This function returns true if the there is a non-zero byte 461 * in the buf in the range [from,to). 462 */ 463 static inline bool range_is_zero(void *buf, size_t from, size_t to) 464 { 465 return !memchr_inv((u8 *)buf + from, 0, to - from); 466 } 467 468 static int convert___skb_to_skb(struct sk_buff *skb, struct __sk_buff *__skb) 469 { 470 struct qdisc_skb_cb *cb = (struct qdisc_skb_cb *)skb->cb; 471 472 if (!__skb) 473 return 0; 474 475 /* make sure the fields we don't use are zeroed */ 476 if (!range_is_zero(__skb, 0, offsetof(struct __sk_buff, mark))) 477 return -EINVAL; 478 479 /* mark is allowed */ 480 481 if (!range_is_zero(__skb, offsetofend(struct __sk_buff, mark), 482 offsetof(struct __sk_buff, priority))) 483 return -EINVAL; 484 485 /* priority is allowed */ 486 487 if (!range_is_zero(__skb, offsetofend(struct __sk_buff, priority), 488 offsetof(struct __sk_buff, ifindex))) 489 return -EINVAL; 490 491 /* ifindex is allowed */ 492 493 if (!range_is_zero(__skb, offsetofend(struct __sk_buff, ifindex), 494 offsetof(struct __sk_buff, cb))) 495 return -EINVAL; 496 497 /* cb is allowed */ 498 499 if (!range_is_zero(__skb, offsetofend(struct __sk_buff, cb), 500 offsetof(struct __sk_buff, tstamp))) 501 return -EINVAL; 502 503 /* tstamp is allowed */ 504 /* wire_len is allowed */ 505 /* gso_segs is allowed */ 506 507 if (!range_is_zero(__skb, offsetofend(struct __sk_buff, gso_segs), 508 offsetof(struct __sk_buff, gso_size))) 509 return -EINVAL; 510 511 /* gso_size is allowed */ 512 513 if (!range_is_zero(__skb, offsetofend(struct __sk_buff, gso_size), 514 sizeof(struct __sk_buff))) 515 return -EINVAL; 516 517 skb->mark = __skb->mark; 518 skb->priority = __skb->priority; 519 skb->tstamp = __skb->tstamp; 520 memcpy(&cb->data, __skb->cb, QDISC_CB_PRIV_LEN); 521 522 if (__skb->wire_len == 0) { 523 cb->pkt_len = skb->len; 524 } else { 525 if (__skb->wire_len < skb->len || 526 __skb->wire_len > GSO_MAX_SIZE) 527 return -EINVAL; 528 cb->pkt_len = __skb->wire_len; 529 } 530 531 if (__skb->gso_segs > GSO_MAX_SEGS) 532 return -EINVAL; 533 skb_shinfo(skb)->gso_segs = __skb->gso_segs; 534 skb_shinfo(skb)->gso_size = __skb->gso_size; 535 536 return 0; 537 } 538 539 static void convert_skb_to___skb(struct sk_buff *skb, struct __sk_buff *__skb) 540 { 541 struct qdisc_skb_cb *cb = (struct qdisc_skb_cb *)skb->cb; 542 543 if (!__skb) 544 return; 545 546 __skb->mark = skb->mark; 547 __skb->priority = skb->priority; 548 __skb->ifindex = skb->dev->ifindex; 549 __skb->tstamp = skb->tstamp; 550 memcpy(__skb->cb, &cb->data, QDISC_CB_PRIV_LEN); 551 __skb->wire_len = cb->pkt_len; 552 __skb->gso_segs = skb_shinfo(skb)->gso_segs; 553 } 554 555 int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr, 556 union bpf_attr __user *uattr) 557 { 558 bool is_l2 = false, is_direct_pkt_access = false; 559 struct net *net = current->nsproxy->net_ns; 560 struct net_device *dev = net->loopback_dev; 561 u32 size = kattr->test.data_size_in; 562 u32 repeat = kattr->test.repeat; 563 struct __sk_buff *ctx = NULL; 564 u32 retval, duration; 565 int hh_len = ETH_HLEN; 566 struct sk_buff *skb; 567 struct sock *sk; 568 void *data; 569 int ret; 570 571 if (kattr->test.flags || kattr->test.cpu) 572 return -EINVAL; 573 574 data = bpf_test_init(kattr, size, NET_SKB_PAD + NET_IP_ALIGN, 575 SKB_DATA_ALIGN(sizeof(struct skb_shared_info))); 576 if (IS_ERR(data)) 577 return PTR_ERR(data); 578 579 ctx = bpf_ctx_init(kattr, sizeof(struct __sk_buff)); 580 if (IS_ERR(ctx)) { 581 kfree(data); 582 return PTR_ERR(ctx); 583 } 584 585 switch (prog->type) { 586 case BPF_PROG_TYPE_SCHED_CLS: 587 case BPF_PROG_TYPE_SCHED_ACT: 588 is_l2 = true; 589 fallthrough; 590 case BPF_PROG_TYPE_LWT_IN: 591 case BPF_PROG_TYPE_LWT_OUT: 592 case BPF_PROG_TYPE_LWT_XMIT: 593 is_direct_pkt_access = true; 594 break; 595 default: 596 break; 597 } 598 599 sk = kzalloc(sizeof(struct sock), GFP_USER); 600 if (!sk) { 601 kfree(data); 602 kfree(ctx); 603 return -ENOMEM; 604 } 605 sock_net_set(sk, net); 606 sock_init_data(NULL, sk); 607 608 skb = build_skb(data, 0); 609 if (!skb) { 610 kfree(data); 611 kfree(ctx); 612 kfree(sk); 613 return -ENOMEM; 614 } 615 skb->sk = sk; 616 617 skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN); 618 __skb_put(skb, size); 619 if (ctx && ctx->ifindex > 1) { 620 dev = dev_get_by_index(net, ctx->ifindex); 621 if (!dev) { 622 ret = -ENODEV; 623 goto out; 624 } 625 } 626 skb->protocol = eth_type_trans(skb, dev); 627 skb_reset_network_header(skb); 628 629 switch (skb->protocol) { 630 case htons(ETH_P_IP): 631 sk->sk_family = AF_INET; 632 if (sizeof(struct iphdr) <= skb_headlen(skb)) { 633 sk->sk_rcv_saddr = ip_hdr(skb)->saddr; 634 sk->sk_daddr = ip_hdr(skb)->daddr; 635 } 636 break; 637 #if IS_ENABLED(CONFIG_IPV6) 638 case htons(ETH_P_IPV6): 639 sk->sk_family = AF_INET6; 640 if (sizeof(struct ipv6hdr) <= skb_headlen(skb)) { 641 sk->sk_v6_rcv_saddr = ipv6_hdr(skb)->saddr; 642 sk->sk_v6_daddr = ipv6_hdr(skb)->daddr; 643 } 644 break; 645 #endif 646 default: 647 break; 648 } 649 650 if (is_l2) 651 __skb_push(skb, hh_len); 652 if (is_direct_pkt_access) 653 bpf_compute_data_pointers(skb); 654 ret = convert___skb_to_skb(skb, ctx); 655 if (ret) 656 goto out; 657 ret = bpf_test_run(prog, skb, repeat, &retval, &duration, false); 658 if (ret) 659 goto out; 660 if (!is_l2) { 661 if (skb_headroom(skb) < hh_len) { 662 int nhead = HH_DATA_ALIGN(hh_len - skb_headroom(skb)); 663 664 if (pskb_expand_head(skb, nhead, 0, GFP_USER)) { 665 ret = -ENOMEM; 666 goto out; 667 } 668 } 669 memset(__skb_push(skb, hh_len), 0, hh_len); 670 } 671 convert_skb_to___skb(skb, ctx); 672 673 size = skb->len; 674 /* bpf program can never convert linear skb to non-linear */ 675 if (WARN_ON_ONCE(skb_is_nonlinear(skb))) 676 size = skb_headlen(skb); 677 ret = bpf_test_finish(kattr, uattr, skb->data, size, retval, duration); 678 if (!ret) 679 ret = bpf_ctx_finish(kattr, uattr, ctx, 680 sizeof(struct __sk_buff)); 681 out: 682 if (dev && dev != net->loopback_dev) 683 dev_put(dev); 684 kfree_skb(skb); 685 bpf_sk_storage_free(sk); 686 kfree(sk); 687 kfree(ctx); 688 return ret; 689 } 690 691 static int xdp_convert_md_to_buff(struct xdp_md *xdp_md, struct xdp_buff *xdp) 692 { 693 unsigned int ingress_ifindex, rx_queue_index; 694 struct netdev_rx_queue *rxqueue; 695 struct net_device *device; 696 697 if (!xdp_md) 698 return 0; 699 700 if (xdp_md->egress_ifindex != 0) 701 return -EINVAL; 702 703 ingress_ifindex = xdp_md->ingress_ifindex; 704 rx_queue_index = xdp_md->rx_queue_index; 705 706 if (!ingress_ifindex && rx_queue_index) 707 return -EINVAL; 708 709 if (ingress_ifindex) { 710 device = dev_get_by_index(current->nsproxy->net_ns, 711 ingress_ifindex); 712 if (!device) 713 return -ENODEV; 714 715 if (rx_queue_index >= device->real_num_rx_queues) 716 goto free_dev; 717 718 rxqueue = __netif_get_rx_queue(device, rx_queue_index); 719 720 if (!xdp_rxq_info_is_reg(&rxqueue->xdp_rxq)) 721 goto free_dev; 722 723 xdp->rxq = &rxqueue->xdp_rxq; 724 /* The device is now tracked in the xdp->rxq for later 725 * dev_put() 726 */ 727 } 728 729 xdp->data = xdp->data_meta + xdp_md->data; 730 return 0; 731 732 free_dev: 733 dev_put(device); 734 return -EINVAL; 735 } 736 737 static void xdp_convert_buff_to_md(struct xdp_buff *xdp, struct xdp_md *xdp_md) 738 { 739 if (!xdp_md) 740 return; 741 742 xdp_md->data = xdp->data - xdp->data_meta; 743 xdp_md->data_end = xdp->data_end - xdp->data_meta; 744 745 if (xdp_md->ingress_ifindex) 746 dev_put(xdp->rxq->dev); 747 } 748 749 int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr, 750 union bpf_attr __user *uattr) 751 { 752 u32 tailroom = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 753 u32 headroom = XDP_PACKET_HEADROOM; 754 u32 size = kattr->test.data_size_in; 755 u32 repeat = kattr->test.repeat; 756 struct netdev_rx_queue *rxqueue; 757 struct xdp_buff xdp = {}; 758 u32 retval, duration; 759 struct xdp_md *ctx; 760 u32 max_data_sz; 761 void *data; 762 int ret = -EINVAL; 763 764 ctx = bpf_ctx_init(kattr, sizeof(struct xdp_md)); 765 if (IS_ERR(ctx)) 766 return PTR_ERR(ctx); 767 768 if (ctx) { 769 /* There can't be user provided data before the meta data */ 770 if (ctx->data_meta || ctx->data_end != size || 771 ctx->data > ctx->data_end || 772 unlikely(xdp_metalen_invalid(ctx->data))) 773 goto free_ctx; 774 /* Meta data is allocated from the headroom */ 775 headroom -= ctx->data; 776 } 777 778 /* XDP have extra tailroom as (most) drivers use full page */ 779 max_data_sz = 4096 - headroom - tailroom; 780 781 data = bpf_test_init(kattr, max_data_sz, headroom, tailroom); 782 if (IS_ERR(data)) { 783 ret = PTR_ERR(data); 784 goto free_ctx; 785 } 786 787 rxqueue = __netif_get_rx_queue(current->nsproxy->net_ns->loopback_dev, 0); 788 xdp_init_buff(&xdp, headroom + max_data_sz + tailroom, 789 &rxqueue->xdp_rxq); 790 xdp_prepare_buff(&xdp, data, headroom, size, true); 791 792 ret = xdp_convert_md_to_buff(ctx, &xdp); 793 if (ret) 794 goto free_data; 795 796 bpf_prog_change_xdp(NULL, prog); 797 ret = bpf_test_run(prog, &xdp, repeat, &retval, &duration, true); 798 /* We convert the xdp_buff back to an xdp_md before checking the return 799 * code so the reference count of any held netdevice will be decremented 800 * even if the test run failed. 801 */ 802 xdp_convert_buff_to_md(&xdp, ctx); 803 if (ret) 804 goto out; 805 806 if (xdp.data_meta != data + headroom || 807 xdp.data_end != xdp.data_meta + size) 808 size = xdp.data_end - xdp.data_meta; 809 810 ret = bpf_test_finish(kattr, uattr, xdp.data_meta, size, retval, 811 duration); 812 if (!ret) 813 ret = bpf_ctx_finish(kattr, uattr, ctx, 814 sizeof(struct xdp_md)); 815 816 out: 817 bpf_prog_change_xdp(prog, NULL); 818 free_data: 819 kfree(data); 820 free_ctx: 821 kfree(ctx); 822 return ret; 823 } 824 825 static int verify_user_bpf_flow_keys(struct bpf_flow_keys *ctx) 826 { 827 /* make sure the fields we don't use are zeroed */ 828 if (!range_is_zero(ctx, 0, offsetof(struct bpf_flow_keys, flags))) 829 return -EINVAL; 830 831 /* flags is allowed */ 832 833 if (!range_is_zero(ctx, offsetofend(struct bpf_flow_keys, flags), 834 sizeof(struct bpf_flow_keys))) 835 return -EINVAL; 836 837 return 0; 838 } 839 840 int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog, 841 const union bpf_attr *kattr, 842 union bpf_attr __user *uattr) 843 { 844 struct bpf_test_timer t = { NO_PREEMPT }; 845 u32 size = kattr->test.data_size_in; 846 struct bpf_flow_dissector ctx = {}; 847 u32 repeat = kattr->test.repeat; 848 struct bpf_flow_keys *user_ctx; 849 struct bpf_flow_keys flow_keys; 850 const struct ethhdr *eth; 851 unsigned int flags = 0; 852 u32 retval, duration; 853 void *data; 854 int ret; 855 856 if (prog->type != BPF_PROG_TYPE_FLOW_DISSECTOR) 857 return -EINVAL; 858 859 if (kattr->test.flags || kattr->test.cpu) 860 return -EINVAL; 861 862 if (size < ETH_HLEN) 863 return -EINVAL; 864 865 data = bpf_test_init(kattr, size, 0, 0); 866 if (IS_ERR(data)) 867 return PTR_ERR(data); 868 869 eth = (struct ethhdr *)data; 870 871 if (!repeat) 872 repeat = 1; 873 874 user_ctx = bpf_ctx_init(kattr, sizeof(struct bpf_flow_keys)); 875 if (IS_ERR(user_ctx)) { 876 kfree(data); 877 return PTR_ERR(user_ctx); 878 } 879 if (user_ctx) { 880 ret = verify_user_bpf_flow_keys(user_ctx); 881 if (ret) 882 goto out; 883 flags = user_ctx->flags; 884 } 885 886 ctx.flow_keys = &flow_keys; 887 ctx.data = data; 888 ctx.data_end = (__u8 *)data + size; 889 890 bpf_test_timer_enter(&t); 891 do { 892 retval = bpf_flow_dissect(prog, &ctx, eth->h_proto, ETH_HLEN, 893 size, flags); 894 } while (bpf_test_timer_continue(&t, repeat, &ret, &duration)); 895 bpf_test_timer_leave(&t); 896 897 if (ret < 0) 898 goto out; 899 900 ret = bpf_test_finish(kattr, uattr, &flow_keys, sizeof(flow_keys), 901 retval, duration); 902 if (!ret) 903 ret = bpf_ctx_finish(kattr, uattr, user_ctx, 904 sizeof(struct bpf_flow_keys)); 905 906 out: 907 kfree(user_ctx); 908 kfree(data); 909 return ret; 910 } 911 912 int bpf_prog_test_run_sk_lookup(struct bpf_prog *prog, const union bpf_attr *kattr, 913 union bpf_attr __user *uattr) 914 { 915 struct bpf_test_timer t = { NO_PREEMPT }; 916 struct bpf_prog_array *progs = NULL; 917 struct bpf_sk_lookup_kern ctx = {}; 918 u32 repeat = kattr->test.repeat; 919 struct bpf_sk_lookup *user_ctx; 920 u32 retval, duration; 921 int ret = -EINVAL; 922 923 if (prog->type != BPF_PROG_TYPE_SK_LOOKUP) 924 return -EINVAL; 925 926 if (kattr->test.flags || kattr->test.cpu) 927 return -EINVAL; 928 929 if (kattr->test.data_in || kattr->test.data_size_in || kattr->test.data_out || 930 kattr->test.data_size_out) 931 return -EINVAL; 932 933 if (!repeat) 934 repeat = 1; 935 936 user_ctx = bpf_ctx_init(kattr, sizeof(*user_ctx)); 937 if (IS_ERR(user_ctx)) 938 return PTR_ERR(user_ctx); 939 940 if (!user_ctx) 941 return -EINVAL; 942 943 if (user_ctx->sk) 944 goto out; 945 946 if (!range_is_zero(user_ctx, offsetofend(typeof(*user_ctx), local_port), sizeof(*user_ctx))) 947 goto out; 948 949 if (user_ctx->local_port > U16_MAX || user_ctx->remote_port > U16_MAX) { 950 ret = -ERANGE; 951 goto out; 952 } 953 954 ctx.family = (u16)user_ctx->family; 955 ctx.protocol = (u16)user_ctx->protocol; 956 ctx.dport = (u16)user_ctx->local_port; 957 ctx.sport = (__force __be16)user_ctx->remote_port; 958 959 switch (ctx.family) { 960 case AF_INET: 961 ctx.v4.daddr = (__force __be32)user_ctx->local_ip4; 962 ctx.v4.saddr = (__force __be32)user_ctx->remote_ip4; 963 break; 964 965 #if IS_ENABLED(CONFIG_IPV6) 966 case AF_INET6: 967 ctx.v6.daddr = (struct in6_addr *)user_ctx->local_ip6; 968 ctx.v6.saddr = (struct in6_addr *)user_ctx->remote_ip6; 969 break; 970 #endif 971 972 default: 973 ret = -EAFNOSUPPORT; 974 goto out; 975 } 976 977 progs = bpf_prog_array_alloc(1, GFP_KERNEL); 978 if (!progs) { 979 ret = -ENOMEM; 980 goto out; 981 } 982 983 progs->items[0].prog = prog; 984 985 bpf_test_timer_enter(&t); 986 do { 987 ctx.selected_sk = NULL; 988 retval = BPF_PROG_SK_LOOKUP_RUN_ARRAY(progs, ctx, BPF_PROG_RUN); 989 } while (bpf_test_timer_continue(&t, repeat, &ret, &duration)); 990 bpf_test_timer_leave(&t); 991 992 if (ret < 0) 993 goto out; 994 995 user_ctx->cookie = 0; 996 if (ctx.selected_sk) { 997 if (ctx.selected_sk->sk_reuseport && !ctx.no_reuseport) { 998 ret = -EOPNOTSUPP; 999 goto out; 1000 } 1001 1002 user_ctx->cookie = sock_gen_cookie(ctx.selected_sk); 1003 } 1004 1005 ret = bpf_test_finish(kattr, uattr, NULL, 0, retval, duration); 1006 if (!ret) 1007 ret = bpf_ctx_finish(kattr, uattr, user_ctx, sizeof(*user_ctx)); 1008 1009 out: 1010 bpf_prog_array_free(progs); 1011 kfree(user_ctx); 1012 return ret; 1013 } 1014 1015 int bpf_prog_test_run_syscall(struct bpf_prog *prog, 1016 const union bpf_attr *kattr, 1017 union bpf_attr __user *uattr) 1018 { 1019 void __user *ctx_in = u64_to_user_ptr(kattr->test.ctx_in); 1020 __u32 ctx_size_in = kattr->test.ctx_size_in; 1021 void *ctx = NULL; 1022 u32 retval; 1023 int err = 0; 1024 1025 /* doesn't support data_in/out, ctx_out, duration, or repeat or flags */ 1026 if (kattr->test.data_in || kattr->test.data_out || 1027 kattr->test.ctx_out || kattr->test.duration || 1028 kattr->test.repeat || kattr->test.flags) 1029 return -EINVAL; 1030 1031 if (ctx_size_in < prog->aux->max_ctx_offset || 1032 ctx_size_in > U16_MAX) 1033 return -EINVAL; 1034 1035 if (ctx_size_in) { 1036 ctx = kzalloc(ctx_size_in, GFP_USER); 1037 if (!ctx) 1038 return -ENOMEM; 1039 if (copy_from_user(ctx, ctx_in, ctx_size_in)) { 1040 err = -EFAULT; 1041 goto out; 1042 } 1043 } 1044 retval = bpf_prog_run_pin_on_cpu(prog, ctx); 1045 1046 if (copy_to_user(&uattr->test.retval, &retval, sizeof(u32))) { 1047 err = -EFAULT; 1048 goto out; 1049 } 1050 if (ctx_size_in) 1051 if (copy_to_user(ctx_in, ctx, ctx_size_in)) 1052 err = -EFAULT; 1053 out: 1054 kfree(ctx); 1055 return err; 1056 } 1057