1 // SPDX-License-Identifier: GPL-2.0-only 2 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com 3 */ 4 #include <linux/bpf.h> 5 #include <linux/btf.h> 6 #include <linux/bpf-cgroup.h> 7 #include <linux/cgroup.h> 8 #include <linux/rcupdate.h> 9 #include <linux/random.h> 10 #include <linux/smp.h> 11 #include <linux/topology.h> 12 #include <linux/ktime.h> 13 #include <linux/sched.h> 14 #include <linux/uidgid.h> 15 #include <linux/filter.h> 16 #include <linux/ctype.h> 17 #include <linux/jiffies.h> 18 #include <linux/pid_namespace.h> 19 #include <linux/poison.h> 20 #include <linux/proc_ns.h> 21 #include <linux/sched/task.h> 22 #include <linux/security.h> 23 #include <linux/btf_ids.h> 24 #include <linux/bpf_mem_alloc.h> 25 26 #include "../../lib/kstrtox.h" 27 28 /* If kernel subsystem is allowing eBPF programs to call this function, 29 * inside its own verifier_ops->get_func_proto() callback it should return 30 * bpf_map_lookup_elem_proto, so that verifier can properly check the arguments 31 * 32 * Different map implementations will rely on rcu in map methods 33 * lookup/update/delete, therefore eBPF programs must run under rcu lock 34 * if program is allowed to access maps, so check rcu_read_lock_held in 35 * all three functions. 36 */ 37 BPF_CALL_2(bpf_map_lookup_elem, struct bpf_map *, map, void *, key) 38 { 39 WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_bh_held()); 40 return (unsigned long) map->ops->map_lookup_elem(map, key); 41 } 42 43 const struct bpf_func_proto bpf_map_lookup_elem_proto = { 44 .func = bpf_map_lookup_elem, 45 .gpl_only = false, 46 .pkt_access = true, 47 .ret_type = RET_PTR_TO_MAP_VALUE_OR_NULL, 48 .arg1_type = ARG_CONST_MAP_PTR, 49 .arg2_type = ARG_PTR_TO_MAP_KEY, 50 }; 51 52 BPF_CALL_4(bpf_map_update_elem, struct bpf_map *, map, void *, key, 53 void *, value, u64, flags) 54 { 55 WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_bh_held()); 56 return map->ops->map_update_elem(map, key, value, flags); 57 } 58 59 const struct bpf_func_proto bpf_map_update_elem_proto = { 60 .func = bpf_map_update_elem, 61 .gpl_only = false, 62 .pkt_access = true, 63 .ret_type = RET_INTEGER, 64 .arg1_type = ARG_CONST_MAP_PTR, 65 .arg2_type = ARG_PTR_TO_MAP_KEY, 66 .arg3_type = ARG_PTR_TO_MAP_VALUE, 67 .arg4_type = ARG_ANYTHING, 68 }; 69 70 BPF_CALL_2(bpf_map_delete_elem, struct bpf_map *, map, void *, key) 71 { 72 WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_bh_held()); 73 return map->ops->map_delete_elem(map, key); 74 } 75 76 const struct bpf_func_proto bpf_map_delete_elem_proto = { 77 .func = bpf_map_delete_elem, 78 .gpl_only = false, 79 .pkt_access = true, 80 .ret_type = RET_INTEGER, 81 .arg1_type = ARG_CONST_MAP_PTR, 82 .arg2_type = ARG_PTR_TO_MAP_KEY, 83 }; 84 85 BPF_CALL_3(bpf_map_push_elem, struct bpf_map *, map, void *, value, u64, flags) 86 { 87 return map->ops->map_push_elem(map, value, flags); 88 } 89 90 const struct bpf_func_proto bpf_map_push_elem_proto = { 91 .func = bpf_map_push_elem, 92 .gpl_only = false, 93 .pkt_access = true, 94 .ret_type = RET_INTEGER, 95 .arg1_type = ARG_CONST_MAP_PTR, 96 .arg2_type = ARG_PTR_TO_MAP_VALUE, 97 .arg3_type = ARG_ANYTHING, 98 }; 99 100 BPF_CALL_2(bpf_map_pop_elem, struct bpf_map *, map, void *, value) 101 { 102 return map->ops->map_pop_elem(map, value); 103 } 104 105 const struct bpf_func_proto bpf_map_pop_elem_proto = { 106 .func = bpf_map_pop_elem, 107 .gpl_only = false, 108 .ret_type = RET_INTEGER, 109 .arg1_type = ARG_CONST_MAP_PTR, 110 .arg2_type = ARG_PTR_TO_MAP_VALUE | MEM_UNINIT, 111 }; 112 113 BPF_CALL_2(bpf_map_peek_elem, struct bpf_map *, map, void *, value) 114 { 115 return map->ops->map_peek_elem(map, value); 116 } 117 118 const struct bpf_func_proto bpf_map_peek_elem_proto = { 119 .func = bpf_map_peek_elem, 120 .gpl_only = false, 121 .ret_type = RET_INTEGER, 122 .arg1_type = ARG_CONST_MAP_PTR, 123 .arg2_type = ARG_PTR_TO_MAP_VALUE | MEM_UNINIT, 124 }; 125 126 BPF_CALL_3(bpf_map_lookup_percpu_elem, struct bpf_map *, map, void *, key, u32, cpu) 127 { 128 WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_bh_held()); 129 return (unsigned long) map->ops->map_lookup_percpu_elem(map, key, cpu); 130 } 131 132 const struct bpf_func_proto bpf_map_lookup_percpu_elem_proto = { 133 .func = bpf_map_lookup_percpu_elem, 134 .gpl_only = false, 135 .pkt_access = true, 136 .ret_type = RET_PTR_TO_MAP_VALUE_OR_NULL, 137 .arg1_type = ARG_CONST_MAP_PTR, 138 .arg2_type = ARG_PTR_TO_MAP_KEY, 139 .arg3_type = ARG_ANYTHING, 140 }; 141 142 const struct bpf_func_proto bpf_get_prandom_u32_proto = { 143 .func = bpf_user_rnd_u32, 144 .gpl_only = false, 145 .ret_type = RET_INTEGER, 146 }; 147 148 BPF_CALL_0(bpf_get_smp_processor_id) 149 { 150 return smp_processor_id(); 151 } 152 153 const struct bpf_func_proto bpf_get_smp_processor_id_proto = { 154 .func = bpf_get_smp_processor_id, 155 .gpl_only = false, 156 .ret_type = RET_INTEGER, 157 }; 158 159 BPF_CALL_0(bpf_get_numa_node_id) 160 { 161 return numa_node_id(); 162 } 163 164 const struct bpf_func_proto bpf_get_numa_node_id_proto = { 165 .func = bpf_get_numa_node_id, 166 .gpl_only = false, 167 .ret_type = RET_INTEGER, 168 }; 169 170 BPF_CALL_0(bpf_ktime_get_ns) 171 { 172 /* NMI safe access to clock monotonic */ 173 return ktime_get_mono_fast_ns(); 174 } 175 176 const struct bpf_func_proto bpf_ktime_get_ns_proto = { 177 .func = bpf_ktime_get_ns, 178 .gpl_only = false, 179 .ret_type = RET_INTEGER, 180 }; 181 182 BPF_CALL_0(bpf_ktime_get_boot_ns) 183 { 184 /* NMI safe access to clock boottime */ 185 return ktime_get_boot_fast_ns(); 186 } 187 188 const struct bpf_func_proto bpf_ktime_get_boot_ns_proto = { 189 .func = bpf_ktime_get_boot_ns, 190 .gpl_only = false, 191 .ret_type = RET_INTEGER, 192 }; 193 194 BPF_CALL_0(bpf_ktime_get_coarse_ns) 195 { 196 return ktime_get_coarse_ns(); 197 } 198 199 const struct bpf_func_proto bpf_ktime_get_coarse_ns_proto = { 200 .func = bpf_ktime_get_coarse_ns, 201 .gpl_only = false, 202 .ret_type = RET_INTEGER, 203 }; 204 205 BPF_CALL_0(bpf_ktime_get_tai_ns) 206 { 207 /* NMI safe access to clock tai */ 208 return ktime_get_tai_fast_ns(); 209 } 210 211 const struct bpf_func_proto bpf_ktime_get_tai_ns_proto = { 212 .func = bpf_ktime_get_tai_ns, 213 .gpl_only = false, 214 .ret_type = RET_INTEGER, 215 }; 216 217 BPF_CALL_0(bpf_get_current_pid_tgid) 218 { 219 struct task_struct *task = current; 220 221 if (unlikely(!task)) 222 return -EINVAL; 223 224 return (u64) task->tgid << 32 | task->pid; 225 } 226 227 const struct bpf_func_proto bpf_get_current_pid_tgid_proto = { 228 .func = bpf_get_current_pid_tgid, 229 .gpl_only = false, 230 .ret_type = RET_INTEGER, 231 }; 232 233 BPF_CALL_0(bpf_get_current_uid_gid) 234 { 235 struct task_struct *task = current; 236 kuid_t uid; 237 kgid_t gid; 238 239 if (unlikely(!task)) 240 return -EINVAL; 241 242 current_uid_gid(&uid, &gid); 243 return (u64) from_kgid(&init_user_ns, gid) << 32 | 244 from_kuid(&init_user_ns, uid); 245 } 246 247 const struct bpf_func_proto bpf_get_current_uid_gid_proto = { 248 .func = bpf_get_current_uid_gid, 249 .gpl_only = false, 250 .ret_type = RET_INTEGER, 251 }; 252 253 BPF_CALL_2(bpf_get_current_comm, char *, buf, u32, size) 254 { 255 struct task_struct *task = current; 256 257 if (unlikely(!task)) 258 goto err_clear; 259 260 /* Verifier guarantees that size > 0 */ 261 strscpy_pad(buf, task->comm, size); 262 return 0; 263 err_clear: 264 memset(buf, 0, size); 265 return -EINVAL; 266 } 267 268 const struct bpf_func_proto bpf_get_current_comm_proto = { 269 .func = bpf_get_current_comm, 270 .gpl_only = false, 271 .ret_type = RET_INTEGER, 272 .arg1_type = ARG_PTR_TO_UNINIT_MEM, 273 .arg2_type = ARG_CONST_SIZE, 274 }; 275 276 #if defined(CONFIG_QUEUED_SPINLOCKS) || defined(CONFIG_BPF_ARCH_SPINLOCK) 277 278 static inline void __bpf_spin_lock(struct bpf_spin_lock *lock) 279 { 280 arch_spinlock_t *l = (void *)lock; 281 union { 282 __u32 val; 283 arch_spinlock_t lock; 284 } u = { .lock = __ARCH_SPIN_LOCK_UNLOCKED }; 285 286 compiletime_assert(u.val == 0, "__ARCH_SPIN_LOCK_UNLOCKED not 0"); 287 BUILD_BUG_ON(sizeof(*l) != sizeof(__u32)); 288 BUILD_BUG_ON(sizeof(*lock) != sizeof(__u32)); 289 preempt_disable(); 290 arch_spin_lock(l); 291 } 292 293 static inline void __bpf_spin_unlock(struct bpf_spin_lock *lock) 294 { 295 arch_spinlock_t *l = (void *)lock; 296 297 arch_spin_unlock(l); 298 preempt_enable(); 299 } 300 301 #else 302 303 static inline void __bpf_spin_lock(struct bpf_spin_lock *lock) 304 { 305 atomic_t *l = (void *)lock; 306 307 BUILD_BUG_ON(sizeof(*l) != sizeof(*lock)); 308 do { 309 atomic_cond_read_relaxed(l, !VAL); 310 } while (atomic_xchg(l, 1)); 311 } 312 313 static inline void __bpf_spin_unlock(struct bpf_spin_lock *lock) 314 { 315 atomic_t *l = (void *)lock; 316 317 atomic_set_release(l, 0); 318 } 319 320 #endif 321 322 static DEFINE_PER_CPU(unsigned long, irqsave_flags); 323 324 static inline void __bpf_spin_lock_irqsave(struct bpf_spin_lock *lock) 325 { 326 unsigned long flags; 327 328 local_irq_save(flags); 329 __bpf_spin_lock(lock); 330 __this_cpu_write(irqsave_flags, flags); 331 } 332 333 notrace BPF_CALL_1(bpf_spin_lock, struct bpf_spin_lock *, lock) 334 { 335 __bpf_spin_lock_irqsave(lock); 336 return 0; 337 } 338 339 const struct bpf_func_proto bpf_spin_lock_proto = { 340 .func = bpf_spin_lock, 341 .gpl_only = false, 342 .ret_type = RET_VOID, 343 .arg1_type = ARG_PTR_TO_SPIN_LOCK, 344 .arg1_btf_id = BPF_PTR_POISON, 345 }; 346 347 static inline void __bpf_spin_unlock_irqrestore(struct bpf_spin_lock *lock) 348 { 349 unsigned long flags; 350 351 flags = __this_cpu_read(irqsave_flags); 352 __bpf_spin_unlock(lock); 353 local_irq_restore(flags); 354 } 355 356 notrace BPF_CALL_1(bpf_spin_unlock, struct bpf_spin_lock *, lock) 357 { 358 __bpf_spin_unlock_irqrestore(lock); 359 return 0; 360 } 361 362 const struct bpf_func_proto bpf_spin_unlock_proto = { 363 .func = bpf_spin_unlock, 364 .gpl_only = false, 365 .ret_type = RET_VOID, 366 .arg1_type = ARG_PTR_TO_SPIN_LOCK, 367 .arg1_btf_id = BPF_PTR_POISON, 368 }; 369 370 void copy_map_value_locked(struct bpf_map *map, void *dst, void *src, 371 bool lock_src) 372 { 373 struct bpf_spin_lock *lock; 374 375 if (lock_src) 376 lock = src + map->record->spin_lock_off; 377 else 378 lock = dst + map->record->spin_lock_off; 379 preempt_disable(); 380 __bpf_spin_lock_irqsave(lock); 381 copy_map_value(map, dst, src); 382 __bpf_spin_unlock_irqrestore(lock); 383 preempt_enable(); 384 } 385 386 BPF_CALL_0(bpf_jiffies64) 387 { 388 return get_jiffies_64(); 389 } 390 391 const struct bpf_func_proto bpf_jiffies64_proto = { 392 .func = bpf_jiffies64, 393 .gpl_only = false, 394 .ret_type = RET_INTEGER, 395 }; 396 397 #ifdef CONFIG_CGROUPS 398 BPF_CALL_0(bpf_get_current_cgroup_id) 399 { 400 struct cgroup *cgrp; 401 u64 cgrp_id; 402 403 rcu_read_lock(); 404 cgrp = task_dfl_cgroup(current); 405 cgrp_id = cgroup_id(cgrp); 406 rcu_read_unlock(); 407 408 return cgrp_id; 409 } 410 411 const struct bpf_func_proto bpf_get_current_cgroup_id_proto = { 412 .func = bpf_get_current_cgroup_id, 413 .gpl_only = false, 414 .ret_type = RET_INTEGER, 415 }; 416 417 BPF_CALL_1(bpf_get_current_ancestor_cgroup_id, int, ancestor_level) 418 { 419 struct cgroup *cgrp; 420 struct cgroup *ancestor; 421 u64 cgrp_id; 422 423 rcu_read_lock(); 424 cgrp = task_dfl_cgroup(current); 425 ancestor = cgroup_ancestor(cgrp, ancestor_level); 426 cgrp_id = ancestor ? cgroup_id(ancestor) : 0; 427 rcu_read_unlock(); 428 429 return cgrp_id; 430 } 431 432 const struct bpf_func_proto bpf_get_current_ancestor_cgroup_id_proto = { 433 .func = bpf_get_current_ancestor_cgroup_id, 434 .gpl_only = false, 435 .ret_type = RET_INTEGER, 436 .arg1_type = ARG_ANYTHING, 437 }; 438 #endif /* CONFIG_CGROUPS */ 439 440 #define BPF_STRTOX_BASE_MASK 0x1F 441 442 static int __bpf_strtoull(const char *buf, size_t buf_len, u64 flags, 443 unsigned long long *res, bool *is_negative) 444 { 445 unsigned int base = flags & BPF_STRTOX_BASE_MASK; 446 const char *cur_buf = buf; 447 size_t cur_len = buf_len; 448 unsigned int consumed; 449 size_t val_len; 450 char str[64]; 451 452 if (!buf || !buf_len || !res || !is_negative) 453 return -EINVAL; 454 455 if (base != 0 && base != 8 && base != 10 && base != 16) 456 return -EINVAL; 457 458 if (flags & ~BPF_STRTOX_BASE_MASK) 459 return -EINVAL; 460 461 while (cur_buf < buf + buf_len && isspace(*cur_buf)) 462 ++cur_buf; 463 464 *is_negative = (cur_buf < buf + buf_len && *cur_buf == '-'); 465 if (*is_negative) 466 ++cur_buf; 467 468 consumed = cur_buf - buf; 469 cur_len -= consumed; 470 if (!cur_len) 471 return -EINVAL; 472 473 cur_len = min(cur_len, sizeof(str) - 1); 474 memcpy(str, cur_buf, cur_len); 475 str[cur_len] = '\0'; 476 cur_buf = str; 477 478 cur_buf = _parse_integer_fixup_radix(cur_buf, &base); 479 val_len = _parse_integer(cur_buf, base, res); 480 481 if (val_len & KSTRTOX_OVERFLOW) 482 return -ERANGE; 483 484 if (val_len == 0) 485 return -EINVAL; 486 487 cur_buf += val_len; 488 consumed += cur_buf - str; 489 490 return consumed; 491 } 492 493 static int __bpf_strtoll(const char *buf, size_t buf_len, u64 flags, 494 long long *res) 495 { 496 unsigned long long _res; 497 bool is_negative; 498 int err; 499 500 err = __bpf_strtoull(buf, buf_len, flags, &_res, &is_negative); 501 if (err < 0) 502 return err; 503 if (is_negative) { 504 if ((long long)-_res > 0) 505 return -ERANGE; 506 *res = -_res; 507 } else { 508 if ((long long)_res < 0) 509 return -ERANGE; 510 *res = _res; 511 } 512 return err; 513 } 514 515 BPF_CALL_4(bpf_strtol, const char *, buf, size_t, buf_len, u64, flags, 516 long *, res) 517 { 518 long long _res; 519 int err; 520 521 err = __bpf_strtoll(buf, buf_len, flags, &_res); 522 if (err < 0) 523 return err; 524 if (_res != (long)_res) 525 return -ERANGE; 526 *res = _res; 527 return err; 528 } 529 530 const struct bpf_func_proto bpf_strtol_proto = { 531 .func = bpf_strtol, 532 .gpl_only = false, 533 .ret_type = RET_INTEGER, 534 .arg1_type = ARG_PTR_TO_MEM | MEM_RDONLY, 535 .arg2_type = ARG_CONST_SIZE, 536 .arg3_type = ARG_ANYTHING, 537 .arg4_type = ARG_PTR_TO_LONG, 538 }; 539 540 BPF_CALL_4(bpf_strtoul, const char *, buf, size_t, buf_len, u64, flags, 541 unsigned long *, res) 542 { 543 unsigned long long _res; 544 bool is_negative; 545 int err; 546 547 err = __bpf_strtoull(buf, buf_len, flags, &_res, &is_negative); 548 if (err < 0) 549 return err; 550 if (is_negative) 551 return -EINVAL; 552 if (_res != (unsigned long)_res) 553 return -ERANGE; 554 *res = _res; 555 return err; 556 } 557 558 const struct bpf_func_proto bpf_strtoul_proto = { 559 .func = bpf_strtoul, 560 .gpl_only = false, 561 .ret_type = RET_INTEGER, 562 .arg1_type = ARG_PTR_TO_MEM | MEM_RDONLY, 563 .arg2_type = ARG_CONST_SIZE, 564 .arg3_type = ARG_ANYTHING, 565 .arg4_type = ARG_PTR_TO_LONG, 566 }; 567 568 BPF_CALL_3(bpf_strncmp, const char *, s1, u32, s1_sz, const char *, s2) 569 { 570 return strncmp(s1, s2, s1_sz); 571 } 572 573 static const struct bpf_func_proto bpf_strncmp_proto = { 574 .func = bpf_strncmp, 575 .gpl_only = false, 576 .ret_type = RET_INTEGER, 577 .arg1_type = ARG_PTR_TO_MEM | MEM_RDONLY, 578 .arg2_type = ARG_CONST_SIZE, 579 .arg3_type = ARG_PTR_TO_CONST_STR, 580 }; 581 582 BPF_CALL_4(bpf_get_ns_current_pid_tgid, u64, dev, u64, ino, 583 struct bpf_pidns_info *, nsdata, u32, size) 584 { 585 struct task_struct *task = current; 586 struct pid_namespace *pidns; 587 int err = -EINVAL; 588 589 if (unlikely(size != sizeof(struct bpf_pidns_info))) 590 goto clear; 591 592 if (unlikely((u64)(dev_t)dev != dev)) 593 goto clear; 594 595 if (unlikely(!task)) 596 goto clear; 597 598 pidns = task_active_pid_ns(task); 599 if (unlikely(!pidns)) { 600 err = -ENOENT; 601 goto clear; 602 } 603 604 if (!ns_match(&pidns->ns, (dev_t)dev, ino)) 605 goto clear; 606 607 nsdata->pid = task_pid_nr_ns(task, pidns); 608 nsdata->tgid = task_tgid_nr_ns(task, pidns); 609 return 0; 610 clear: 611 memset((void *)nsdata, 0, (size_t) size); 612 return err; 613 } 614 615 const struct bpf_func_proto bpf_get_ns_current_pid_tgid_proto = { 616 .func = bpf_get_ns_current_pid_tgid, 617 .gpl_only = false, 618 .ret_type = RET_INTEGER, 619 .arg1_type = ARG_ANYTHING, 620 .arg2_type = ARG_ANYTHING, 621 .arg3_type = ARG_PTR_TO_UNINIT_MEM, 622 .arg4_type = ARG_CONST_SIZE, 623 }; 624 625 static const struct bpf_func_proto bpf_get_raw_smp_processor_id_proto = { 626 .func = bpf_get_raw_cpu_id, 627 .gpl_only = false, 628 .ret_type = RET_INTEGER, 629 }; 630 631 BPF_CALL_5(bpf_event_output_data, void *, ctx, struct bpf_map *, map, 632 u64, flags, void *, data, u64, size) 633 { 634 if (unlikely(flags & ~(BPF_F_INDEX_MASK))) 635 return -EINVAL; 636 637 return bpf_event_output(map, flags, data, size, NULL, 0, NULL); 638 } 639 640 const struct bpf_func_proto bpf_event_output_data_proto = { 641 .func = bpf_event_output_data, 642 .gpl_only = true, 643 .ret_type = RET_INTEGER, 644 .arg1_type = ARG_PTR_TO_CTX, 645 .arg2_type = ARG_CONST_MAP_PTR, 646 .arg3_type = ARG_ANYTHING, 647 .arg4_type = ARG_PTR_TO_MEM | MEM_RDONLY, 648 .arg5_type = ARG_CONST_SIZE_OR_ZERO, 649 }; 650 651 BPF_CALL_3(bpf_copy_from_user, void *, dst, u32, size, 652 const void __user *, user_ptr) 653 { 654 int ret = copy_from_user(dst, user_ptr, size); 655 656 if (unlikely(ret)) { 657 memset(dst, 0, size); 658 ret = -EFAULT; 659 } 660 661 return ret; 662 } 663 664 const struct bpf_func_proto bpf_copy_from_user_proto = { 665 .func = bpf_copy_from_user, 666 .gpl_only = false, 667 .might_sleep = true, 668 .ret_type = RET_INTEGER, 669 .arg1_type = ARG_PTR_TO_UNINIT_MEM, 670 .arg2_type = ARG_CONST_SIZE_OR_ZERO, 671 .arg3_type = ARG_ANYTHING, 672 }; 673 674 BPF_CALL_5(bpf_copy_from_user_task, void *, dst, u32, size, 675 const void __user *, user_ptr, struct task_struct *, tsk, u64, flags) 676 { 677 int ret; 678 679 /* flags is not used yet */ 680 if (unlikely(flags)) 681 return -EINVAL; 682 683 if (unlikely(!size)) 684 return 0; 685 686 ret = access_process_vm(tsk, (unsigned long)user_ptr, dst, size, 0); 687 if (ret == size) 688 return 0; 689 690 memset(dst, 0, size); 691 /* Return -EFAULT for partial read */ 692 return ret < 0 ? ret : -EFAULT; 693 } 694 695 const struct bpf_func_proto bpf_copy_from_user_task_proto = { 696 .func = bpf_copy_from_user_task, 697 .gpl_only = true, 698 .might_sleep = true, 699 .ret_type = RET_INTEGER, 700 .arg1_type = ARG_PTR_TO_UNINIT_MEM, 701 .arg2_type = ARG_CONST_SIZE_OR_ZERO, 702 .arg3_type = ARG_ANYTHING, 703 .arg4_type = ARG_PTR_TO_BTF_ID, 704 .arg4_btf_id = &btf_tracing_ids[BTF_TRACING_TYPE_TASK], 705 .arg5_type = ARG_ANYTHING 706 }; 707 708 BPF_CALL_2(bpf_per_cpu_ptr, const void *, ptr, u32, cpu) 709 { 710 if (cpu >= nr_cpu_ids) 711 return (unsigned long)NULL; 712 713 return (unsigned long)per_cpu_ptr((const void __percpu *)ptr, cpu); 714 } 715 716 const struct bpf_func_proto bpf_per_cpu_ptr_proto = { 717 .func = bpf_per_cpu_ptr, 718 .gpl_only = false, 719 .ret_type = RET_PTR_TO_MEM_OR_BTF_ID | PTR_MAYBE_NULL | MEM_RDONLY, 720 .arg1_type = ARG_PTR_TO_PERCPU_BTF_ID, 721 .arg2_type = ARG_ANYTHING, 722 }; 723 724 BPF_CALL_1(bpf_this_cpu_ptr, const void *, percpu_ptr) 725 { 726 return (unsigned long)this_cpu_ptr((const void __percpu *)percpu_ptr); 727 } 728 729 const struct bpf_func_proto bpf_this_cpu_ptr_proto = { 730 .func = bpf_this_cpu_ptr, 731 .gpl_only = false, 732 .ret_type = RET_PTR_TO_MEM_OR_BTF_ID | MEM_RDONLY, 733 .arg1_type = ARG_PTR_TO_PERCPU_BTF_ID, 734 }; 735 736 static int bpf_trace_copy_string(char *buf, void *unsafe_ptr, char fmt_ptype, 737 size_t bufsz) 738 { 739 void __user *user_ptr = (__force void __user *)unsafe_ptr; 740 741 buf[0] = 0; 742 743 switch (fmt_ptype) { 744 case 's': 745 #ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE 746 if ((unsigned long)unsafe_ptr < TASK_SIZE) 747 return strncpy_from_user_nofault(buf, user_ptr, bufsz); 748 fallthrough; 749 #endif 750 case 'k': 751 return strncpy_from_kernel_nofault(buf, unsafe_ptr, bufsz); 752 case 'u': 753 return strncpy_from_user_nofault(buf, user_ptr, bufsz); 754 } 755 756 return -EINVAL; 757 } 758 759 /* Per-cpu temp buffers used by printf-like helpers to store the bprintf binary 760 * arguments representation. 761 */ 762 #define MAX_BPRINTF_BIN_ARGS 512 763 764 /* Support executing three nested bprintf helper calls on a given CPU */ 765 #define MAX_BPRINTF_NEST_LEVEL 3 766 struct bpf_bprintf_buffers { 767 char bin_args[MAX_BPRINTF_BIN_ARGS]; 768 char buf[MAX_BPRINTF_BUF]; 769 }; 770 771 static DEFINE_PER_CPU(struct bpf_bprintf_buffers[MAX_BPRINTF_NEST_LEVEL], bpf_bprintf_bufs); 772 static DEFINE_PER_CPU(int, bpf_bprintf_nest_level); 773 774 static int try_get_buffers(struct bpf_bprintf_buffers **bufs) 775 { 776 int nest_level; 777 778 preempt_disable(); 779 nest_level = this_cpu_inc_return(bpf_bprintf_nest_level); 780 if (WARN_ON_ONCE(nest_level > MAX_BPRINTF_NEST_LEVEL)) { 781 this_cpu_dec(bpf_bprintf_nest_level); 782 preempt_enable(); 783 return -EBUSY; 784 } 785 *bufs = this_cpu_ptr(&bpf_bprintf_bufs[nest_level - 1]); 786 787 return 0; 788 } 789 790 void bpf_bprintf_cleanup(struct bpf_bprintf_data *data) 791 { 792 if (!data->bin_args && !data->buf) 793 return; 794 if (WARN_ON_ONCE(this_cpu_read(bpf_bprintf_nest_level) == 0)) 795 return; 796 this_cpu_dec(bpf_bprintf_nest_level); 797 preempt_enable(); 798 } 799 800 /* 801 * bpf_bprintf_prepare - Generic pass on format strings for bprintf-like helpers 802 * 803 * Returns a negative value if fmt is an invalid format string or 0 otherwise. 804 * 805 * This can be used in two ways: 806 * - Format string verification only: when data->get_bin_args is false 807 * - Arguments preparation: in addition to the above verification, it writes in 808 * data->bin_args a binary representation of arguments usable by bstr_printf 809 * where pointers from BPF have been sanitized. 810 * 811 * In argument preparation mode, if 0 is returned, safe temporary buffers are 812 * allocated and bpf_bprintf_cleanup should be called to free them after use. 813 */ 814 int bpf_bprintf_prepare(char *fmt, u32 fmt_size, const u64 *raw_args, 815 u32 num_args, struct bpf_bprintf_data *data) 816 { 817 bool get_buffers = (data->get_bin_args && num_args) || data->get_buf; 818 char *unsafe_ptr = NULL, *tmp_buf = NULL, *tmp_buf_end, *fmt_end; 819 struct bpf_bprintf_buffers *buffers = NULL; 820 size_t sizeof_cur_arg, sizeof_cur_ip; 821 int err, i, num_spec = 0; 822 u64 cur_arg; 823 char fmt_ptype, cur_ip[16], ip_spec[] = "%pXX"; 824 825 fmt_end = strnchr(fmt, fmt_size, 0); 826 if (!fmt_end) 827 return -EINVAL; 828 fmt_size = fmt_end - fmt; 829 830 if (get_buffers && try_get_buffers(&buffers)) 831 return -EBUSY; 832 833 if (data->get_bin_args) { 834 if (num_args) 835 tmp_buf = buffers->bin_args; 836 tmp_buf_end = tmp_buf + MAX_BPRINTF_BIN_ARGS; 837 data->bin_args = (u32 *)tmp_buf; 838 } 839 840 if (data->get_buf) 841 data->buf = buffers->buf; 842 843 for (i = 0; i < fmt_size; i++) { 844 if ((!isprint(fmt[i]) && !isspace(fmt[i])) || !isascii(fmt[i])) { 845 err = -EINVAL; 846 goto out; 847 } 848 849 if (fmt[i] != '%') 850 continue; 851 852 if (fmt[i + 1] == '%') { 853 i++; 854 continue; 855 } 856 857 if (num_spec >= num_args) { 858 err = -EINVAL; 859 goto out; 860 } 861 862 /* The string is zero-terminated so if fmt[i] != 0, we can 863 * always access fmt[i + 1], in the worst case it will be a 0 864 */ 865 i++; 866 867 /* skip optional "[0 +-][num]" width formatting field */ 868 while (fmt[i] == '0' || fmt[i] == '+' || fmt[i] == '-' || 869 fmt[i] == ' ') 870 i++; 871 if (fmt[i] >= '1' && fmt[i] <= '9') { 872 i++; 873 while (fmt[i] >= '0' && fmt[i] <= '9') 874 i++; 875 } 876 877 if (fmt[i] == 'p') { 878 sizeof_cur_arg = sizeof(long); 879 880 if ((fmt[i + 1] == 'k' || fmt[i + 1] == 'u') && 881 fmt[i + 2] == 's') { 882 fmt_ptype = fmt[i + 1]; 883 i += 2; 884 goto fmt_str; 885 } 886 887 if (fmt[i + 1] == 0 || isspace(fmt[i + 1]) || 888 ispunct(fmt[i + 1]) || fmt[i + 1] == 'K' || 889 fmt[i + 1] == 'x' || fmt[i + 1] == 's' || 890 fmt[i + 1] == 'S') { 891 /* just kernel pointers */ 892 if (tmp_buf) 893 cur_arg = raw_args[num_spec]; 894 i++; 895 goto nocopy_fmt; 896 } 897 898 if (fmt[i + 1] == 'B') { 899 if (tmp_buf) { 900 err = snprintf(tmp_buf, 901 (tmp_buf_end - tmp_buf), 902 "%pB", 903 (void *)(long)raw_args[num_spec]); 904 tmp_buf += (err + 1); 905 } 906 907 i++; 908 num_spec++; 909 continue; 910 } 911 912 /* only support "%pI4", "%pi4", "%pI6" and "%pi6". */ 913 if ((fmt[i + 1] != 'i' && fmt[i + 1] != 'I') || 914 (fmt[i + 2] != '4' && fmt[i + 2] != '6')) { 915 err = -EINVAL; 916 goto out; 917 } 918 919 i += 2; 920 if (!tmp_buf) 921 goto nocopy_fmt; 922 923 sizeof_cur_ip = (fmt[i] == '4') ? 4 : 16; 924 if (tmp_buf_end - tmp_buf < sizeof_cur_ip) { 925 err = -ENOSPC; 926 goto out; 927 } 928 929 unsafe_ptr = (char *)(long)raw_args[num_spec]; 930 err = copy_from_kernel_nofault(cur_ip, unsafe_ptr, 931 sizeof_cur_ip); 932 if (err < 0) 933 memset(cur_ip, 0, sizeof_cur_ip); 934 935 /* hack: bstr_printf expects IP addresses to be 936 * pre-formatted as strings, ironically, the easiest way 937 * to do that is to call snprintf. 938 */ 939 ip_spec[2] = fmt[i - 1]; 940 ip_spec[3] = fmt[i]; 941 err = snprintf(tmp_buf, tmp_buf_end - tmp_buf, 942 ip_spec, &cur_ip); 943 944 tmp_buf += err + 1; 945 num_spec++; 946 947 continue; 948 } else if (fmt[i] == 's') { 949 fmt_ptype = fmt[i]; 950 fmt_str: 951 if (fmt[i + 1] != 0 && 952 !isspace(fmt[i + 1]) && 953 !ispunct(fmt[i + 1])) { 954 err = -EINVAL; 955 goto out; 956 } 957 958 if (!tmp_buf) 959 goto nocopy_fmt; 960 961 if (tmp_buf_end == tmp_buf) { 962 err = -ENOSPC; 963 goto out; 964 } 965 966 unsafe_ptr = (char *)(long)raw_args[num_spec]; 967 err = bpf_trace_copy_string(tmp_buf, unsafe_ptr, 968 fmt_ptype, 969 tmp_buf_end - tmp_buf); 970 if (err < 0) { 971 tmp_buf[0] = '\0'; 972 err = 1; 973 } 974 975 tmp_buf += err; 976 num_spec++; 977 978 continue; 979 } else if (fmt[i] == 'c') { 980 if (!tmp_buf) 981 goto nocopy_fmt; 982 983 if (tmp_buf_end == tmp_buf) { 984 err = -ENOSPC; 985 goto out; 986 } 987 988 *tmp_buf = raw_args[num_spec]; 989 tmp_buf++; 990 num_spec++; 991 992 continue; 993 } 994 995 sizeof_cur_arg = sizeof(int); 996 997 if (fmt[i] == 'l') { 998 sizeof_cur_arg = sizeof(long); 999 i++; 1000 } 1001 if (fmt[i] == 'l') { 1002 sizeof_cur_arg = sizeof(long long); 1003 i++; 1004 } 1005 1006 if (fmt[i] != 'i' && fmt[i] != 'd' && fmt[i] != 'u' && 1007 fmt[i] != 'x' && fmt[i] != 'X') { 1008 err = -EINVAL; 1009 goto out; 1010 } 1011 1012 if (tmp_buf) 1013 cur_arg = raw_args[num_spec]; 1014 nocopy_fmt: 1015 if (tmp_buf) { 1016 tmp_buf = PTR_ALIGN(tmp_buf, sizeof(u32)); 1017 if (tmp_buf_end - tmp_buf < sizeof_cur_arg) { 1018 err = -ENOSPC; 1019 goto out; 1020 } 1021 1022 if (sizeof_cur_arg == 8) { 1023 *(u32 *)tmp_buf = *(u32 *)&cur_arg; 1024 *(u32 *)(tmp_buf + 4) = *((u32 *)&cur_arg + 1); 1025 } else { 1026 *(u32 *)tmp_buf = (u32)(long)cur_arg; 1027 } 1028 tmp_buf += sizeof_cur_arg; 1029 } 1030 num_spec++; 1031 } 1032 1033 err = 0; 1034 out: 1035 if (err) 1036 bpf_bprintf_cleanup(data); 1037 return err; 1038 } 1039 1040 BPF_CALL_5(bpf_snprintf, char *, str, u32, str_size, char *, fmt, 1041 const void *, args, u32, data_len) 1042 { 1043 struct bpf_bprintf_data data = { 1044 .get_bin_args = true, 1045 }; 1046 int err, num_args; 1047 1048 if (data_len % 8 || data_len > MAX_BPRINTF_VARARGS * 8 || 1049 (data_len && !args)) 1050 return -EINVAL; 1051 num_args = data_len / 8; 1052 1053 /* ARG_PTR_TO_CONST_STR guarantees that fmt is zero-terminated so we 1054 * can safely give an unbounded size. 1055 */ 1056 err = bpf_bprintf_prepare(fmt, UINT_MAX, args, num_args, &data); 1057 if (err < 0) 1058 return err; 1059 1060 err = bstr_printf(str, str_size, fmt, data.bin_args); 1061 1062 bpf_bprintf_cleanup(&data); 1063 1064 return err + 1; 1065 } 1066 1067 const struct bpf_func_proto bpf_snprintf_proto = { 1068 .func = bpf_snprintf, 1069 .gpl_only = true, 1070 .ret_type = RET_INTEGER, 1071 .arg1_type = ARG_PTR_TO_MEM_OR_NULL, 1072 .arg2_type = ARG_CONST_SIZE_OR_ZERO, 1073 .arg3_type = ARG_PTR_TO_CONST_STR, 1074 .arg4_type = ARG_PTR_TO_MEM | PTR_MAYBE_NULL | MEM_RDONLY, 1075 .arg5_type = ARG_CONST_SIZE_OR_ZERO, 1076 }; 1077 1078 /* BPF map elements can contain 'struct bpf_timer'. 1079 * Such map owns all of its BPF timers. 1080 * 'struct bpf_timer' is allocated as part of map element allocation 1081 * and it's zero initialized. 1082 * That space is used to keep 'struct bpf_timer_kern'. 1083 * bpf_timer_init() allocates 'struct bpf_hrtimer', inits hrtimer, and 1084 * remembers 'struct bpf_map *' pointer it's part of. 1085 * bpf_timer_set_callback() increments prog refcnt and assign bpf callback_fn. 1086 * bpf_timer_start() arms the timer. 1087 * If user space reference to a map goes to zero at this point 1088 * ops->map_release_uref callback is responsible for cancelling the timers, 1089 * freeing their memory, and decrementing prog's refcnts. 1090 * bpf_timer_cancel() cancels the timer and decrements prog's refcnt. 1091 * Inner maps can contain bpf timers as well. ops->map_release_uref is 1092 * freeing the timers when inner map is replaced or deleted by user space. 1093 */ 1094 struct bpf_hrtimer { 1095 struct hrtimer timer; 1096 struct bpf_map *map; 1097 struct bpf_prog *prog; 1098 void __rcu *callback_fn; 1099 void *value; 1100 }; 1101 1102 /* the actual struct hidden inside uapi struct bpf_timer */ 1103 struct bpf_timer_kern { 1104 struct bpf_hrtimer *timer; 1105 /* bpf_spin_lock is used here instead of spinlock_t to make 1106 * sure that it always fits into space reserved by struct bpf_timer 1107 * regardless of LOCKDEP and spinlock debug flags. 1108 */ 1109 struct bpf_spin_lock lock; 1110 } __attribute__((aligned(8))); 1111 1112 static DEFINE_PER_CPU(struct bpf_hrtimer *, hrtimer_running); 1113 1114 static enum hrtimer_restart bpf_timer_cb(struct hrtimer *hrtimer) 1115 { 1116 struct bpf_hrtimer *t = container_of(hrtimer, struct bpf_hrtimer, timer); 1117 struct bpf_map *map = t->map; 1118 void *value = t->value; 1119 bpf_callback_t callback_fn; 1120 void *key; 1121 u32 idx; 1122 1123 BTF_TYPE_EMIT(struct bpf_timer); 1124 callback_fn = rcu_dereference_check(t->callback_fn, rcu_read_lock_bh_held()); 1125 if (!callback_fn) 1126 goto out; 1127 1128 /* bpf_timer_cb() runs in hrtimer_run_softirq. It doesn't migrate and 1129 * cannot be preempted by another bpf_timer_cb() on the same cpu. 1130 * Remember the timer this callback is servicing to prevent 1131 * deadlock if callback_fn() calls bpf_timer_cancel() or 1132 * bpf_map_delete_elem() on the same timer. 1133 */ 1134 this_cpu_write(hrtimer_running, t); 1135 if (map->map_type == BPF_MAP_TYPE_ARRAY) { 1136 struct bpf_array *array = container_of(map, struct bpf_array, map); 1137 1138 /* compute the key */ 1139 idx = ((char *)value - array->value) / array->elem_size; 1140 key = &idx; 1141 } else { /* hash or lru */ 1142 key = value - round_up(map->key_size, 8); 1143 } 1144 1145 callback_fn((u64)(long)map, (u64)(long)key, (u64)(long)value, 0, 0); 1146 /* The verifier checked that return value is zero. */ 1147 1148 this_cpu_write(hrtimer_running, NULL); 1149 out: 1150 return HRTIMER_NORESTART; 1151 } 1152 1153 BPF_CALL_3(bpf_timer_init, struct bpf_timer_kern *, timer, struct bpf_map *, map, 1154 u64, flags) 1155 { 1156 clockid_t clockid = flags & (MAX_CLOCKS - 1); 1157 struct bpf_hrtimer *t; 1158 int ret = 0; 1159 1160 BUILD_BUG_ON(MAX_CLOCKS != 16); 1161 BUILD_BUG_ON(sizeof(struct bpf_timer_kern) > sizeof(struct bpf_timer)); 1162 BUILD_BUG_ON(__alignof__(struct bpf_timer_kern) != __alignof__(struct bpf_timer)); 1163 1164 if (in_nmi()) 1165 return -EOPNOTSUPP; 1166 1167 if (flags >= MAX_CLOCKS || 1168 /* similar to timerfd except _ALARM variants are not supported */ 1169 (clockid != CLOCK_MONOTONIC && 1170 clockid != CLOCK_REALTIME && 1171 clockid != CLOCK_BOOTTIME)) 1172 return -EINVAL; 1173 __bpf_spin_lock_irqsave(&timer->lock); 1174 t = timer->timer; 1175 if (t) { 1176 ret = -EBUSY; 1177 goto out; 1178 } 1179 /* allocate hrtimer via map_kmalloc to use memcg accounting */ 1180 t = bpf_map_kmalloc_node(map, sizeof(*t), GFP_ATOMIC, map->numa_node); 1181 if (!t) { 1182 ret = -ENOMEM; 1183 goto out; 1184 } 1185 t->value = (void *)timer - map->record->timer_off; 1186 t->map = map; 1187 t->prog = NULL; 1188 rcu_assign_pointer(t->callback_fn, NULL); 1189 hrtimer_init(&t->timer, clockid, HRTIMER_MODE_REL_SOFT); 1190 t->timer.function = bpf_timer_cb; 1191 WRITE_ONCE(timer->timer, t); 1192 /* Guarantee the order between timer->timer and map->usercnt. So 1193 * when there are concurrent uref release and bpf timer init, either 1194 * bpf_timer_cancel_and_free() called by uref release reads a no-NULL 1195 * timer or atomic64_read() below returns a zero usercnt. 1196 */ 1197 smp_mb(); 1198 if (!atomic64_read(&map->usercnt)) { 1199 /* maps with timers must be either held by user space 1200 * or pinned in bpffs. 1201 */ 1202 WRITE_ONCE(timer->timer, NULL); 1203 kfree(t); 1204 ret = -EPERM; 1205 } 1206 out: 1207 __bpf_spin_unlock_irqrestore(&timer->lock); 1208 return ret; 1209 } 1210 1211 static const struct bpf_func_proto bpf_timer_init_proto = { 1212 .func = bpf_timer_init, 1213 .gpl_only = true, 1214 .ret_type = RET_INTEGER, 1215 .arg1_type = ARG_PTR_TO_TIMER, 1216 .arg2_type = ARG_CONST_MAP_PTR, 1217 .arg3_type = ARG_ANYTHING, 1218 }; 1219 1220 BPF_CALL_3(bpf_timer_set_callback, struct bpf_timer_kern *, timer, void *, callback_fn, 1221 struct bpf_prog_aux *, aux) 1222 { 1223 struct bpf_prog *prev, *prog = aux->prog; 1224 struct bpf_hrtimer *t; 1225 int ret = 0; 1226 1227 if (in_nmi()) 1228 return -EOPNOTSUPP; 1229 __bpf_spin_lock_irqsave(&timer->lock); 1230 t = timer->timer; 1231 if (!t) { 1232 ret = -EINVAL; 1233 goto out; 1234 } 1235 if (!atomic64_read(&t->map->usercnt)) { 1236 /* maps with timers must be either held by user space 1237 * or pinned in bpffs. Otherwise timer might still be 1238 * running even when bpf prog is detached and user space 1239 * is gone, since map_release_uref won't ever be called. 1240 */ 1241 ret = -EPERM; 1242 goto out; 1243 } 1244 prev = t->prog; 1245 if (prev != prog) { 1246 /* Bump prog refcnt once. Every bpf_timer_set_callback() 1247 * can pick different callback_fn-s within the same prog. 1248 */ 1249 prog = bpf_prog_inc_not_zero(prog); 1250 if (IS_ERR(prog)) { 1251 ret = PTR_ERR(prog); 1252 goto out; 1253 } 1254 if (prev) 1255 /* Drop prev prog refcnt when swapping with new prog */ 1256 bpf_prog_put(prev); 1257 t->prog = prog; 1258 } 1259 rcu_assign_pointer(t->callback_fn, callback_fn); 1260 out: 1261 __bpf_spin_unlock_irqrestore(&timer->lock); 1262 return ret; 1263 } 1264 1265 static const struct bpf_func_proto bpf_timer_set_callback_proto = { 1266 .func = bpf_timer_set_callback, 1267 .gpl_only = true, 1268 .ret_type = RET_INTEGER, 1269 .arg1_type = ARG_PTR_TO_TIMER, 1270 .arg2_type = ARG_PTR_TO_FUNC, 1271 }; 1272 1273 BPF_CALL_3(bpf_timer_start, struct bpf_timer_kern *, timer, u64, nsecs, u64, flags) 1274 { 1275 struct bpf_hrtimer *t; 1276 int ret = 0; 1277 enum hrtimer_mode mode; 1278 1279 if (in_nmi()) 1280 return -EOPNOTSUPP; 1281 if (flags > BPF_F_TIMER_ABS) 1282 return -EINVAL; 1283 __bpf_spin_lock_irqsave(&timer->lock); 1284 t = timer->timer; 1285 if (!t || !t->prog) { 1286 ret = -EINVAL; 1287 goto out; 1288 } 1289 1290 if (flags & BPF_F_TIMER_ABS) 1291 mode = HRTIMER_MODE_ABS_SOFT; 1292 else 1293 mode = HRTIMER_MODE_REL_SOFT; 1294 1295 hrtimer_start(&t->timer, ns_to_ktime(nsecs), mode); 1296 out: 1297 __bpf_spin_unlock_irqrestore(&timer->lock); 1298 return ret; 1299 } 1300 1301 static const struct bpf_func_proto bpf_timer_start_proto = { 1302 .func = bpf_timer_start, 1303 .gpl_only = true, 1304 .ret_type = RET_INTEGER, 1305 .arg1_type = ARG_PTR_TO_TIMER, 1306 .arg2_type = ARG_ANYTHING, 1307 .arg3_type = ARG_ANYTHING, 1308 }; 1309 1310 static void drop_prog_refcnt(struct bpf_hrtimer *t) 1311 { 1312 struct bpf_prog *prog = t->prog; 1313 1314 if (prog) { 1315 bpf_prog_put(prog); 1316 t->prog = NULL; 1317 rcu_assign_pointer(t->callback_fn, NULL); 1318 } 1319 } 1320 1321 BPF_CALL_1(bpf_timer_cancel, struct bpf_timer_kern *, timer) 1322 { 1323 struct bpf_hrtimer *t; 1324 int ret = 0; 1325 1326 if (in_nmi()) 1327 return -EOPNOTSUPP; 1328 __bpf_spin_lock_irqsave(&timer->lock); 1329 t = timer->timer; 1330 if (!t) { 1331 ret = -EINVAL; 1332 goto out; 1333 } 1334 if (this_cpu_read(hrtimer_running) == t) { 1335 /* If bpf callback_fn is trying to bpf_timer_cancel() 1336 * its own timer the hrtimer_cancel() will deadlock 1337 * since it waits for callback_fn to finish 1338 */ 1339 ret = -EDEADLK; 1340 goto out; 1341 } 1342 drop_prog_refcnt(t); 1343 out: 1344 __bpf_spin_unlock_irqrestore(&timer->lock); 1345 /* Cancel the timer and wait for associated callback to finish 1346 * if it was running. 1347 */ 1348 ret = ret ?: hrtimer_cancel(&t->timer); 1349 return ret; 1350 } 1351 1352 static const struct bpf_func_proto bpf_timer_cancel_proto = { 1353 .func = bpf_timer_cancel, 1354 .gpl_only = true, 1355 .ret_type = RET_INTEGER, 1356 .arg1_type = ARG_PTR_TO_TIMER, 1357 }; 1358 1359 /* This function is called by map_delete/update_elem for individual element and 1360 * by ops->map_release_uref when the user space reference to a map reaches zero. 1361 */ 1362 void bpf_timer_cancel_and_free(void *val) 1363 { 1364 struct bpf_timer_kern *timer = val; 1365 struct bpf_hrtimer *t; 1366 1367 /* Performance optimization: read timer->timer without lock first. */ 1368 if (!READ_ONCE(timer->timer)) 1369 return; 1370 1371 __bpf_spin_lock_irqsave(&timer->lock); 1372 /* re-read it under lock */ 1373 t = timer->timer; 1374 if (!t) 1375 goto out; 1376 drop_prog_refcnt(t); 1377 /* The subsequent bpf_timer_start/cancel() helpers won't be able to use 1378 * this timer, since it won't be initialized. 1379 */ 1380 WRITE_ONCE(timer->timer, NULL); 1381 out: 1382 __bpf_spin_unlock_irqrestore(&timer->lock); 1383 if (!t) 1384 return; 1385 /* Cancel the timer and wait for callback to complete if it was running. 1386 * If hrtimer_cancel() can be safely called it's safe to call kfree(t) 1387 * right after for both preallocated and non-preallocated maps. 1388 * The timer->timer = NULL was already done and no code path can 1389 * see address 't' anymore. 1390 * 1391 * Check that bpf_map_delete/update_elem() wasn't called from timer 1392 * callback_fn. In such case don't call hrtimer_cancel() (since it will 1393 * deadlock) and don't call hrtimer_try_to_cancel() (since it will just 1394 * return -1). Though callback_fn is still running on this cpu it's 1395 * safe to do kfree(t) because bpf_timer_cb() read everything it needed 1396 * from 't'. The bpf subprog callback_fn won't be able to access 't', 1397 * since timer->timer = NULL was already done. The timer will be 1398 * effectively cancelled because bpf_timer_cb() will return 1399 * HRTIMER_NORESTART. 1400 */ 1401 if (this_cpu_read(hrtimer_running) != t) 1402 hrtimer_cancel(&t->timer); 1403 kfree(t); 1404 } 1405 1406 BPF_CALL_2(bpf_kptr_xchg, void *, map_value, void *, ptr) 1407 { 1408 unsigned long *kptr = map_value; 1409 1410 return xchg(kptr, (unsigned long)ptr); 1411 } 1412 1413 /* Unlike other PTR_TO_BTF_ID helpers the btf_id in bpf_kptr_xchg() 1414 * helper is determined dynamically by the verifier. Use BPF_PTR_POISON to 1415 * denote type that verifier will determine. 1416 */ 1417 static const struct bpf_func_proto bpf_kptr_xchg_proto = { 1418 .func = bpf_kptr_xchg, 1419 .gpl_only = false, 1420 .ret_type = RET_PTR_TO_BTF_ID_OR_NULL, 1421 .ret_btf_id = BPF_PTR_POISON, 1422 .arg1_type = ARG_PTR_TO_KPTR, 1423 .arg2_type = ARG_PTR_TO_BTF_ID_OR_NULL | OBJ_RELEASE, 1424 .arg2_btf_id = BPF_PTR_POISON, 1425 }; 1426 1427 /* Since the upper 8 bits of dynptr->size is reserved, the 1428 * maximum supported size is 2^24 - 1. 1429 */ 1430 #define DYNPTR_MAX_SIZE ((1UL << 24) - 1) 1431 #define DYNPTR_TYPE_SHIFT 28 1432 #define DYNPTR_SIZE_MASK 0xFFFFFF 1433 #define DYNPTR_RDONLY_BIT BIT(31) 1434 1435 static bool __bpf_dynptr_is_rdonly(const struct bpf_dynptr_kern *ptr) 1436 { 1437 return ptr->size & DYNPTR_RDONLY_BIT; 1438 } 1439 1440 void bpf_dynptr_set_rdonly(struct bpf_dynptr_kern *ptr) 1441 { 1442 ptr->size |= DYNPTR_RDONLY_BIT; 1443 } 1444 1445 static void bpf_dynptr_set_type(struct bpf_dynptr_kern *ptr, enum bpf_dynptr_type type) 1446 { 1447 ptr->size |= type << DYNPTR_TYPE_SHIFT; 1448 } 1449 1450 static enum bpf_dynptr_type bpf_dynptr_get_type(const struct bpf_dynptr_kern *ptr) 1451 { 1452 return (ptr->size & ~(DYNPTR_RDONLY_BIT)) >> DYNPTR_TYPE_SHIFT; 1453 } 1454 1455 u32 __bpf_dynptr_size(const struct bpf_dynptr_kern *ptr) 1456 { 1457 return ptr->size & DYNPTR_SIZE_MASK; 1458 } 1459 1460 static void bpf_dynptr_set_size(struct bpf_dynptr_kern *ptr, u32 new_size) 1461 { 1462 u32 metadata = ptr->size & ~DYNPTR_SIZE_MASK; 1463 1464 ptr->size = new_size | metadata; 1465 } 1466 1467 int bpf_dynptr_check_size(u32 size) 1468 { 1469 return size > DYNPTR_MAX_SIZE ? -E2BIG : 0; 1470 } 1471 1472 void bpf_dynptr_init(struct bpf_dynptr_kern *ptr, void *data, 1473 enum bpf_dynptr_type type, u32 offset, u32 size) 1474 { 1475 ptr->data = data; 1476 ptr->offset = offset; 1477 ptr->size = size; 1478 bpf_dynptr_set_type(ptr, type); 1479 } 1480 1481 void bpf_dynptr_set_null(struct bpf_dynptr_kern *ptr) 1482 { 1483 memset(ptr, 0, sizeof(*ptr)); 1484 } 1485 1486 static int bpf_dynptr_check_off_len(const struct bpf_dynptr_kern *ptr, u32 offset, u32 len) 1487 { 1488 u32 size = __bpf_dynptr_size(ptr); 1489 1490 if (len > size || offset > size - len) 1491 return -E2BIG; 1492 1493 return 0; 1494 } 1495 1496 BPF_CALL_4(bpf_dynptr_from_mem, void *, data, u32, size, u64, flags, struct bpf_dynptr_kern *, ptr) 1497 { 1498 int err; 1499 1500 BTF_TYPE_EMIT(struct bpf_dynptr); 1501 1502 err = bpf_dynptr_check_size(size); 1503 if (err) 1504 goto error; 1505 1506 /* flags is currently unsupported */ 1507 if (flags) { 1508 err = -EINVAL; 1509 goto error; 1510 } 1511 1512 bpf_dynptr_init(ptr, data, BPF_DYNPTR_TYPE_LOCAL, 0, size); 1513 1514 return 0; 1515 1516 error: 1517 bpf_dynptr_set_null(ptr); 1518 return err; 1519 } 1520 1521 static const struct bpf_func_proto bpf_dynptr_from_mem_proto = { 1522 .func = bpf_dynptr_from_mem, 1523 .gpl_only = false, 1524 .ret_type = RET_INTEGER, 1525 .arg1_type = ARG_PTR_TO_UNINIT_MEM, 1526 .arg2_type = ARG_CONST_SIZE_OR_ZERO, 1527 .arg3_type = ARG_ANYTHING, 1528 .arg4_type = ARG_PTR_TO_DYNPTR | DYNPTR_TYPE_LOCAL | MEM_UNINIT, 1529 }; 1530 1531 BPF_CALL_5(bpf_dynptr_read, void *, dst, u32, len, const struct bpf_dynptr_kern *, src, 1532 u32, offset, u64, flags) 1533 { 1534 enum bpf_dynptr_type type; 1535 int err; 1536 1537 if (!src->data || flags) 1538 return -EINVAL; 1539 1540 err = bpf_dynptr_check_off_len(src, offset, len); 1541 if (err) 1542 return err; 1543 1544 type = bpf_dynptr_get_type(src); 1545 1546 switch (type) { 1547 case BPF_DYNPTR_TYPE_LOCAL: 1548 case BPF_DYNPTR_TYPE_RINGBUF: 1549 /* Source and destination may possibly overlap, hence use memmove to 1550 * copy the data. E.g. bpf_dynptr_from_mem may create two dynptr 1551 * pointing to overlapping PTR_TO_MAP_VALUE regions. 1552 */ 1553 memmove(dst, src->data + src->offset + offset, len); 1554 return 0; 1555 case BPF_DYNPTR_TYPE_SKB: 1556 return __bpf_skb_load_bytes(src->data, src->offset + offset, dst, len); 1557 case BPF_DYNPTR_TYPE_XDP: 1558 return __bpf_xdp_load_bytes(src->data, src->offset + offset, dst, len); 1559 default: 1560 WARN_ONCE(true, "bpf_dynptr_read: unknown dynptr type %d\n", type); 1561 return -EFAULT; 1562 } 1563 } 1564 1565 static const struct bpf_func_proto bpf_dynptr_read_proto = { 1566 .func = bpf_dynptr_read, 1567 .gpl_only = false, 1568 .ret_type = RET_INTEGER, 1569 .arg1_type = ARG_PTR_TO_UNINIT_MEM, 1570 .arg2_type = ARG_CONST_SIZE_OR_ZERO, 1571 .arg3_type = ARG_PTR_TO_DYNPTR | MEM_RDONLY, 1572 .arg4_type = ARG_ANYTHING, 1573 .arg5_type = ARG_ANYTHING, 1574 }; 1575 1576 BPF_CALL_5(bpf_dynptr_write, const struct bpf_dynptr_kern *, dst, u32, offset, void *, src, 1577 u32, len, u64, flags) 1578 { 1579 enum bpf_dynptr_type type; 1580 int err; 1581 1582 if (!dst->data || __bpf_dynptr_is_rdonly(dst)) 1583 return -EINVAL; 1584 1585 err = bpf_dynptr_check_off_len(dst, offset, len); 1586 if (err) 1587 return err; 1588 1589 type = bpf_dynptr_get_type(dst); 1590 1591 switch (type) { 1592 case BPF_DYNPTR_TYPE_LOCAL: 1593 case BPF_DYNPTR_TYPE_RINGBUF: 1594 if (flags) 1595 return -EINVAL; 1596 /* Source and destination may possibly overlap, hence use memmove to 1597 * copy the data. E.g. bpf_dynptr_from_mem may create two dynptr 1598 * pointing to overlapping PTR_TO_MAP_VALUE regions. 1599 */ 1600 memmove(dst->data + dst->offset + offset, src, len); 1601 return 0; 1602 case BPF_DYNPTR_TYPE_SKB: 1603 return __bpf_skb_store_bytes(dst->data, dst->offset + offset, src, len, 1604 flags); 1605 case BPF_DYNPTR_TYPE_XDP: 1606 if (flags) 1607 return -EINVAL; 1608 return __bpf_xdp_store_bytes(dst->data, dst->offset + offset, src, len); 1609 default: 1610 WARN_ONCE(true, "bpf_dynptr_write: unknown dynptr type %d\n", type); 1611 return -EFAULT; 1612 } 1613 } 1614 1615 static const struct bpf_func_proto bpf_dynptr_write_proto = { 1616 .func = bpf_dynptr_write, 1617 .gpl_only = false, 1618 .ret_type = RET_INTEGER, 1619 .arg1_type = ARG_PTR_TO_DYNPTR | MEM_RDONLY, 1620 .arg2_type = ARG_ANYTHING, 1621 .arg3_type = ARG_PTR_TO_MEM | MEM_RDONLY, 1622 .arg4_type = ARG_CONST_SIZE_OR_ZERO, 1623 .arg5_type = ARG_ANYTHING, 1624 }; 1625 1626 BPF_CALL_3(bpf_dynptr_data, const struct bpf_dynptr_kern *, ptr, u32, offset, u32, len) 1627 { 1628 enum bpf_dynptr_type type; 1629 int err; 1630 1631 if (!ptr->data) 1632 return 0; 1633 1634 err = bpf_dynptr_check_off_len(ptr, offset, len); 1635 if (err) 1636 return 0; 1637 1638 if (__bpf_dynptr_is_rdonly(ptr)) 1639 return 0; 1640 1641 type = bpf_dynptr_get_type(ptr); 1642 1643 switch (type) { 1644 case BPF_DYNPTR_TYPE_LOCAL: 1645 case BPF_DYNPTR_TYPE_RINGBUF: 1646 return (unsigned long)(ptr->data + ptr->offset + offset); 1647 case BPF_DYNPTR_TYPE_SKB: 1648 case BPF_DYNPTR_TYPE_XDP: 1649 /* skb and xdp dynptrs should use bpf_dynptr_slice / bpf_dynptr_slice_rdwr */ 1650 return 0; 1651 default: 1652 WARN_ONCE(true, "bpf_dynptr_data: unknown dynptr type %d\n", type); 1653 return 0; 1654 } 1655 } 1656 1657 static const struct bpf_func_proto bpf_dynptr_data_proto = { 1658 .func = bpf_dynptr_data, 1659 .gpl_only = false, 1660 .ret_type = RET_PTR_TO_DYNPTR_MEM_OR_NULL, 1661 .arg1_type = ARG_PTR_TO_DYNPTR | MEM_RDONLY, 1662 .arg2_type = ARG_ANYTHING, 1663 .arg3_type = ARG_CONST_ALLOC_SIZE_OR_ZERO, 1664 }; 1665 1666 const struct bpf_func_proto bpf_get_current_task_proto __weak; 1667 const struct bpf_func_proto bpf_get_current_task_btf_proto __weak; 1668 const struct bpf_func_proto bpf_probe_read_user_proto __weak; 1669 const struct bpf_func_proto bpf_probe_read_user_str_proto __weak; 1670 const struct bpf_func_proto bpf_probe_read_kernel_proto __weak; 1671 const struct bpf_func_proto bpf_probe_read_kernel_str_proto __weak; 1672 const struct bpf_func_proto bpf_task_pt_regs_proto __weak; 1673 1674 const struct bpf_func_proto * 1675 bpf_base_func_proto(enum bpf_func_id func_id) 1676 { 1677 switch (func_id) { 1678 case BPF_FUNC_map_lookup_elem: 1679 return &bpf_map_lookup_elem_proto; 1680 case BPF_FUNC_map_update_elem: 1681 return &bpf_map_update_elem_proto; 1682 case BPF_FUNC_map_delete_elem: 1683 return &bpf_map_delete_elem_proto; 1684 case BPF_FUNC_map_push_elem: 1685 return &bpf_map_push_elem_proto; 1686 case BPF_FUNC_map_pop_elem: 1687 return &bpf_map_pop_elem_proto; 1688 case BPF_FUNC_map_peek_elem: 1689 return &bpf_map_peek_elem_proto; 1690 case BPF_FUNC_map_lookup_percpu_elem: 1691 return &bpf_map_lookup_percpu_elem_proto; 1692 case BPF_FUNC_get_prandom_u32: 1693 return &bpf_get_prandom_u32_proto; 1694 case BPF_FUNC_get_smp_processor_id: 1695 return &bpf_get_raw_smp_processor_id_proto; 1696 case BPF_FUNC_get_numa_node_id: 1697 return &bpf_get_numa_node_id_proto; 1698 case BPF_FUNC_tail_call: 1699 return &bpf_tail_call_proto; 1700 case BPF_FUNC_ktime_get_ns: 1701 return &bpf_ktime_get_ns_proto; 1702 case BPF_FUNC_ktime_get_boot_ns: 1703 return &bpf_ktime_get_boot_ns_proto; 1704 case BPF_FUNC_ktime_get_tai_ns: 1705 return &bpf_ktime_get_tai_ns_proto; 1706 case BPF_FUNC_ringbuf_output: 1707 return &bpf_ringbuf_output_proto; 1708 case BPF_FUNC_ringbuf_reserve: 1709 return &bpf_ringbuf_reserve_proto; 1710 case BPF_FUNC_ringbuf_submit: 1711 return &bpf_ringbuf_submit_proto; 1712 case BPF_FUNC_ringbuf_discard: 1713 return &bpf_ringbuf_discard_proto; 1714 case BPF_FUNC_ringbuf_query: 1715 return &bpf_ringbuf_query_proto; 1716 case BPF_FUNC_strncmp: 1717 return &bpf_strncmp_proto; 1718 case BPF_FUNC_strtol: 1719 return &bpf_strtol_proto; 1720 case BPF_FUNC_strtoul: 1721 return &bpf_strtoul_proto; 1722 default: 1723 break; 1724 } 1725 1726 if (!bpf_capable()) 1727 return NULL; 1728 1729 switch (func_id) { 1730 case BPF_FUNC_spin_lock: 1731 return &bpf_spin_lock_proto; 1732 case BPF_FUNC_spin_unlock: 1733 return &bpf_spin_unlock_proto; 1734 case BPF_FUNC_jiffies64: 1735 return &bpf_jiffies64_proto; 1736 case BPF_FUNC_per_cpu_ptr: 1737 return &bpf_per_cpu_ptr_proto; 1738 case BPF_FUNC_this_cpu_ptr: 1739 return &bpf_this_cpu_ptr_proto; 1740 case BPF_FUNC_timer_init: 1741 return &bpf_timer_init_proto; 1742 case BPF_FUNC_timer_set_callback: 1743 return &bpf_timer_set_callback_proto; 1744 case BPF_FUNC_timer_start: 1745 return &bpf_timer_start_proto; 1746 case BPF_FUNC_timer_cancel: 1747 return &bpf_timer_cancel_proto; 1748 case BPF_FUNC_kptr_xchg: 1749 return &bpf_kptr_xchg_proto; 1750 case BPF_FUNC_for_each_map_elem: 1751 return &bpf_for_each_map_elem_proto; 1752 case BPF_FUNC_loop: 1753 return &bpf_loop_proto; 1754 case BPF_FUNC_user_ringbuf_drain: 1755 return &bpf_user_ringbuf_drain_proto; 1756 case BPF_FUNC_ringbuf_reserve_dynptr: 1757 return &bpf_ringbuf_reserve_dynptr_proto; 1758 case BPF_FUNC_ringbuf_submit_dynptr: 1759 return &bpf_ringbuf_submit_dynptr_proto; 1760 case BPF_FUNC_ringbuf_discard_dynptr: 1761 return &bpf_ringbuf_discard_dynptr_proto; 1762 case BPF_FUNC_dynptr_from_mem: 1763 return &bpf_dynptr_from_mem_proto; 1764 case BPF_FUNC_dynptr_read: 1765 return &bpf_dynptr_read_proto; 1766 case BPF_FUNC_dynptr_write: 1767 return &bpf_dynptr_write_proto; 1768 case BPF_FUNC_dynptr_data: 1769 return &bpf_dynptr_data_proto; 1770 #ifdef CONFIG_CGROUPS 1771 case BPF_FUNC_cgrp_storage_get: 1772 return &bpf_cgrp_storage_get_proto; 1773 case BPF_FUNC_cgrp_storage_delete: 1774 return &bpf_cgrp_storage_delete_proto; 1775 case BPF_FUNC_get_current_cgroup_id: 1776 return &bpf_get_current_cgroup_id_proto; 1777 case BPF_FUNC_get_current_ancestor_cgroup_id: 1778 return &bpf_get_current_ancestor_cgroup_id_proto; 1779 #endif 1780 default: 1781 break; 1782 } 1783 1784 if (!perfmon_capable()) 1785 return NULL; 1786 1787 switch (func_id) { 1788 case BPF_FUNC_trace_printk: 1789 return bpf_get_trace_printk_proto(); 1790 case BPF_FUNC_get_current_task: 1791 return &bpf_get_current_task_proto; 1792 case BPF_FUNC_get_current_task_btf: 1793 return &bpf_get_current_task_btf_proto; 1794 case BPF_FUNC_probe_read_user: 1795 return &bpf_probe_read_user_proto; 1796 case BPF_FUNC_probe_read_kernel: 1797 return security_locked_down(LOCKDOWN_BPF_READ_KERNEL) < 0 ? 1798 NULL : &bpf_probe_read_kernel_proto; 1799 case BPF_FUNC_probe_read_user_str: 1800 return &bpf_probe_read_user_str_proto; 1801 case BPF_FUNC_probe_read_kernel_str: 1802 return security_locked_down(LOCKDOWN_BPF_READ_KERNEL) < 0 ? 1803 NULL : &bpf_probe_read_kernel_str_proto; 1804 case BPF_FUNC_snprintf_btf: 1805 return &bpf_snprintf_btf_proto; 1806 case BPF_FUNC_snprintf: 1807 return &bpf_snprintf_proto; 1808 case BPF_FUNC_task_pt_regs: 1809 return &bpf_task_pt_regs_proto; 1810 case BPF_FUNC_trace_vprintk: 1811 return bpf_get_trace_vprintk_proto(); 1812 default: 1813 return NULL; 1814 } 1815 } 1816 1817 void __bpf_obj_drop_impl(void *p, const struct btf_record *rec); 1818 1819 void bpf_list_head_free(const struct btf_field *field, void *list_head, 1820 struct bpf_spin_lock *spin_lock) 1821 { 1822 struct list_head *head = list_head, *orig_head = list_head; 1823 1824 BUILD_BUG_ON(sizeof(struct list_head) > sizeof(struct bpf_list_head)); 1825 BUILD_BUG_ON(__alignof__(struct list_head) > __alignof__(struct bpf_list_head)); 1826 1827 /* Do the actual list draining outside the lock to not hold the lock for 1828 * too long, and also prevent deadlocks if tracing programs end up 1829 * executing on entry/exit of functions called inside the critical 1830 * section, and end up doing map ops that call bpf_list_head_free for 1831 * the same map value again. 1832 */ 1833 __bpf_spin_lock_irqsave(spin_lock); 1834 if (!head->next || list_empty(head)) 1835 goto unlock; 1836 head = head->next; 1837 unlock: 1838 INIT_LIST_HEAD(orig_head); 1839 __bpf_spin_unlock_irqrestore(spin_lock); 1840 1841 while (head != orig_head) { 1842 void *obj = head; 1843 1844 obj -= field->graph_root.node_offset; 1845 head = head->next; 1846 /* The contained type can also have resources, including a 1847 * bpf_list_head which needs to be freed. 1848 */ 1849 migrate_disable(); 1850 __bpf_obj_drop_impl(obj, field->graph_root.value_rec); 1851 migrate_enable(); 1852 } 1853 } 1854 1855 /* Like rbtree_postorder_for_each_entry_safe, but 'pos' and 'n' are 1856 * 'rb_node *', so field name of rb_node within containing struct is not 1857 * needed. 1858 * 1859 * Since bpf_rb_tree's node type has a corresponding struct btf_field with 1860 * graph_root.node_offset, it's not necessary to know field name 1861 * or type of node struct 1862 */ 1863 #define bpf_rbtree_postorder_for_each_entry_safe(pos, n, root) \ 1864 for (pos = rb_first_postorder(root); \ 1865 pos && ({ n = rb_next_postorder(pos); 1; }); \ 1866 pos = n) 1867 1868 void bpf_rb_root_free(const struct btf_field *field, void *rb_root, 1869 struct bpf_spin_lock *spin_lock) 1870 { 1871 struct rb_root_cached orig_root, *root = rb_root; 1872 struct rb_node *pos, *n; 1873 void *obj; 1874 1875 BUILD_BUG_ON(sizeof(struct rb_root_cached) > sizeof(struct bpf_rb_root)); 1876 BUILD_BUG_ON(__alignof__(struct rb_root_cached) > __alignof__(struct bpf_rb_root)); 1877 1878 __bpf_spin_lock_irqsave(spin_lock); 1879 orig_root = *root; 1880 *root = RB_ROOT_CACHED; 1881 __bpf_spin_unlock_irqrestore(spin_lock); 1882 1883 bpf_rbtree_postorder_for_each_entry_safe(pos, n, &orig_root.rb_root) { 1884 obj = pos; 1885 obj -= field->graph_root.node_offset; 1886 1887 1888 migrate_disable(); 1889 __bpf_obj_drop_impl(obj, field->graph_root.value_rec); 1890 migrate_enable(); 1891 } 1892 } 1893 1894 __diag_push(); 1895 __diag_ignore_all("-Wmissing-prototypes", 1896 "Global functions as their definitions will be in vmlinux BTF"); 1897 1898 __bpf_kfunc void *bpf_obj_new_impl(u64 local_type_id__k, void *meta__ign) 1899 { 1900 struct btf_struct_meta *meta = meta__ign; 1901 u64 size = local_type_id__k; 1902 void *p; 1903 1904 p = bpf_mem_alloc(&bpf_global_ma, size); 1905 if (!p) 1906 return NULL; 1907 if (meta) 1908 bpf_obj_init(meta->record, p); 1909 return p; 1910 } 1911 1912 /* Must be called under migrate_disable(), as required by bpf_mem_free */ 1913 void __bpf_obj_drop_impl(void *p, const struct btf_record *rec) 1914 { 1915 if (rec && rec->refcount_off >= 0 && 1916 !refcount_dec_and_test((refcount_t *)(p + rec->refcount_off))) { 1917 /* Object is refcounted and refcount_dec didn't result in 0 1918 * refcount. Return without freeing the object 1919 */ 1920 return; 1921 } 1922 1923 if (rec) 1924 bpf_obj_free_fields(rec, p); 1925 1926 if (rec && rec->refcount_off >= 0) 1927 bpf_mem_free_rcu(&bpf_global_ma, p); 1928 else 1929 bpf_mem_free(&bpf_global_ma, p); 1930 } 1931 1932 __bpf_kfunc void bpf_obj_drop_impl(void *p__alloc, void *meta__ign) 1933 { 1934 struct btf_struct_meta *meta = meta__ign; 1935 void *p = p__alloc; 1936 1937 __bpf_obj_drop_impl(p, meta ? meta->record : NULL); 1938 } 1939 1940 __bpf_kfunc void *bpf_refcount_acquire_impl(void *p__refcounted_kptr, void *meta__ign) 1941 { 1942 struct btf_struct_meta *meta = meta__ign; 1943 struct bpf_refcount *ref; 1944 1945 /* Could just cast directly to refcount_t *, but need some code using 1946 * bpf_refcount type so that it is emitted in vmlinux BTF 1947 */ 1948 ref = (struct bpf_refcount *)(p__refcounted_kptr + meta->record->refcount_off); 1949 if (!refcount_inc_not_zero((refcount_t *)ref)) 1950 return NULL; 1951 1952 /* Verifier strips KF_RET_NULL if input is owned ref, see is_kfunc_ret_null 1953 * in verifier.c 1954 */ 1955 return (void *)p__refcounted_kptr; 1956 } 1957 1958 static int __bpf_list_add(struct bpf_list_node_kern *node, 1959 struct bpf_list_head *head, 1960 bool tail, struct btf_record *rec, u64 off) 1961 { 1962 struct list_head *n = &node->list_head, *h = (void *)head; 1963 1964 /* If list_head was 0-initialized by map, bpf_obj_init_field wasn't 1965 * called on its fields, so init here 1966 */ 1967 if (unlikely(!h->next)) 1968 INIT_LIST_HEAD(h); 1969 1970 /* node->owner != NULL implies !list_empty(n), no need to separately 1971 * check the latter 1972 */ 1973 if (cmpxchg(&node->owner, NULL, BPF_PTR_POISON)) { 1974 /* Only called from BPF prog, no need to migrate_disable */ 1975 __bpf_obj_drop_impl((void *)n - off, rec); 1976 return -EINVAL; 1977 } 1978 1979 tail ? list_add_tail(n, h) : list_add(n, h); 1980 WRITE_ONCE(node->owner, head); 1981 1982 return 0; 1983 } 1984 1985 __bpf_kfunc int bpf_list_push_front_impl(struct bpf_list_head *head, 1986 struct bpf_list_node *node, 1987 void *meta__ign, u64 off) 1988 { 1989 struct bpf_list_node_kern *n = (void *)node; 1990 struct btf_struct_meta *meta = meta__ign; 1991 1992 return __bpf_list_add(n, head, false, meta ? meta->record : NULL, off); 1993 } 1994 1995 __bpf_kfunc int bpf_list_push_back_impl(struct bpf_list_head *head, 1996 struct bpf_list_node *node, 1997 void *meta__ign, u64 off) 1998 { 1999 struct bpf_list_node_kern *n = (void *)node; 2000 struct btf_struct_meta *meta = meta__ign; 2001 2002 return __bpf_list_add(n, head, true, meta ? meta->record : NULL, off); 2003 } 2004 2005 static struct bpf_list_node *__bpf_list_del(struct bpf_list_head *head, bool tail) 2006 { 2007 struct list_head *n, *h = (void *)head; 2008 struct bpf_list_node_kern *node; 2009 2010 /* If list_head was 0-initialized by map, bpf_obj_init_field wasn't 2011 * called on its fields, so init here 2012 */ 2013 if (unlikely(!h->next)) 2014 INIT_LIST_HEAD(h); 2015 if (list_empty(h)) 2016 return NULL; 2017 2018 n = tail ? h->prev : h->next; 2019 node = container_of(n, struct bpf_list_node_kern, list_head); 2020 if (WARN_ON_ONCE(READ_ONCE(node->owner) != head)) 2021 return NULL; 2022 2023 list_del_init(n); 2024 WRITE_ONCE(node->owner, NULL); 2025 return (struct bpf_list_node *)n; 2026 } 2027 2028 __bpf_kfunc struct bpf_list_node *bpf_list_pop_front(struct bpf_list_head *head) 2029 { 2030 return __bpf_list_del(head, false); 2031 } 2032 2033 __bpf_kfunc struct bpf_list_node *bpf_list_pop_back(struct bpf_list_head *head) 2034 { 2035 return __bpf_list_del(head, true); 2036 } 2037 2038 __bpf_kfunc struct bpf_rb_node *bpf_rbtree_remove(struct bpf_rb_root *root, 2039 struct bpf_rb_node *node) 2040 { 2041 struct bpf_rb_node_kern *node_internal = (struct bpf_rb_node_kern *)node; 2042 struct rb_root_cached *r = (struct rb_root_cached *)root; 2043 struct rb_node *n = &node_internal->rb_node; 2044 2045 /* node_internal->owner != root implies either RB_EMPTY_NODE(n) or 2046 * n is owned by some other tree. No need to check RB_EMPTY_NODE(n) 2047 */ 2048 if (READ_ONCE(node_internal->owner) != root) 2049 return NULL; 2050 2051 rb_erase_cached(n, r); 2052 RB_CLEAR_NODE(n); 2053 WRITE_ONCE(node_internal->owner, NULL); 2054 return (struct bpf_rb_node *)n; 2055 } 2056 2057 /* Need to copy rbtree_add_cached's logic here because our 'less' is a BPF 2058 * program 2059 */ 2060 static int __bpf_rbtree_add(struct bpf_rb_root *root, 2061 struct bpf_rb_node_kern *node, 2062 void *less, struct btf_record *rec, u64 off) 2063 { 2064 struct rb_node **link = &((struct rb_root_cached *)root)->rb_root.rb_node; 2065 struct rb_node *parent = NULL, *n = &node->rb_node; 2066 bpf_callback_t cb = (bpf_callback_t)less; 2067 bool leftmost = true; 2068 2069 /* node->owner != NULL implies !RB_EMPTY_NODE(n), no need to separately 2070 * check the latter 2071 */ 2072 if (cmpxchg(&node->owner, NULL, BPF_PTR_POISON)) { 2073 /* Only called from BPF prog, no need to migrate_disable */ 2074 __bpf_obj_drop_impl((void *)n - off, rec); 2075 return -EINVAL; 2076 } 2077 2078 while (*link) { 2079 parent = *link; 2080 if (cb((uintptr_t)node, (uintptr_t)parent, 0, 0, 0)) { 2081 link = &parent->rb_left; 2082 } else { 2083 link = &parent->rb_right; 2084 leftmost = false; 2085 } 2086 } 2087 2088 rb_link_node(n, parent, link); 2089 rb_insert_color_cached(n, (struct rb_root_cached *)root, leftmost); 2090 WRITE_ONCE(node->owner, root); 2091 return 0; 2092 } 2093 2094 __bpf_kfunc int bpf_rbtree_add_impl(struct bpf_rb_root *root, struct bpf_rb_node *node, 2095 bool (less)(struct bpf_rb_node *a, const struct bpf_rb_node *b), 2096 void *meta__ign, u64 off) 2097 { 2098 struct btf_struct_meta *meta = meta__ign; 2099 struct bpf_rb_node_kern *n = (void *)node; 2100 2101 return __bpf_rbtree_add(root, n, (void *)less, meta ? meta->record : NULL, off); 2102 } 2103 2104 __bpf_kfunc struct bpf_rb_node *bpf_rbtree_first(struct bpf_rb_root *root) 2105 { 2106 struct rb_root_cached *r = (struct rb_root_cached *)root; 2107 2108 return (struct bpf_rb_node *)rb_first_cached(r); 2109 } 2110 2111 /** 2112 * bpf_task_acquire - Acquire a reference to a task. A task acquired by this 2113 * kfunc which is not stored in a map as a kptr, must be released by calling 2114 * bpf_task_release(). 2115 * @p: The task on which a reference is being acquired. 2116 */ 2117 __bpf_kfunc struct task_struct *bpf_task_acquire(struct task_struct *p) 2118 { 2119 if (refcount_inc_not_zero(&p->rcu_users)) 2120 return p; 2121 return NULL; 2122 } 2123 2124 /** 2125 * bpf_task_release - Release the reference acquired on a task. 2126 * @p: The task on which a reference is being released. 2127 */ 2128 __bpf_kfunc void bpf_task_release(struct task_struct *p) 2129 { 2130 put_task_struct_rcu_user(p); 2131 } 2132 2133 #ifdef CONFIG_CGROUPS 2134 /** 2135 * bpf_cgroup_acquire - Acquire a reference to a cgroup. A cgroup acquired by 2136 * this kfunc which is not stored in a map as a kptr, must be released by 2137 * calling bpf_cgroup_release(). 2138 * @cgrp: The cgroup on which a reference is being acquired. 2139 */ 2140 __bpf_kfunc struct cgroup *bpf_cgroup_acquire(struct cgroup *cgrp) 2141 { 2142 return cgroup_tryget(cgrp) ? cgrp : NULL; 2143 } 2144 2145 /** 2146 * bpf_cgroup_release - Release the reference acquired on a cgroup. 2147 * If this kfunc is invoked in an RCU read region, the cgroup is guaranteed to 2148 * not be freed until the current grace period has ended, even if its refcount 2149 * drops to 0. 2150 * @cgrp: The cgroup on which a reference is being released. 2151 */ 2152 __bpf_kfunc void bpf_cgroup_release(struct cgroup *cgrp) 2153 { 2154 cgroup_put(cgrp); 2155 } 2156 2157 /** 2158 * bpf_cgroup_ancestor - Perform a lookup on an entry in a cgroup's ancestor 2159 * array. A cgroup returned by this kfunc which is not subsequently stored in a 2160 * map, must be released by calling bpf_cgroup_release(). 2161 * @cgrp: The cgroup for which we're performing a lookup. 2162 * @level: The level of ancestor to look up. 2163 */ 2164 __bpf_kfunc struct cgroup *bpf_cgroup_ancestor(struct cgroup *cgrp, int level) 2165 { 2166 struct cgroup *ancestor; 2167 2168 if (level > cgrp->level || level < 0) 2169 return NULL; 2170 2171 /* cgrp's refcnt could be 0 here, but ancestors can still be accessed */ 2172 ancestor = cgrp->ancestors[level]; 2173 if (!cgroup_tryget(ancestor)) 2174 return NULL; 2175 return ancestor; 2176 } 2177 2178 /** 2179 * bpf_cgroup_from_id - Find a cgroup from its ID. A cgroup returned by this 2180 * kfunc which is not subsequently stored in a map, must be released by calling 2181 * bpf_cgroup_release(). 2182 * @cgid: cgroup id. 2183 */ 2184 __bpf_kfunc struct cgroup *bpf_cgroup_from_id(u64 cgid) 2185 { 2186 struct cgroup *cgrp; 2187 2188 cgrp = cgroup_get_from_id(cgid); 2189 if (IS_ERR(cgrp)) 2190 return NULL; 2191 return cgrp; 2192 } 2193 2194 /** 2195 * bpf_task_under_cgroup - wrap task_under_cgroup_hierarchy() as a kfunc, test 2196 * task's membership of cgroup ancestry. 2197 * @task: the task to be tested 2198 * @ancestor: possible ancestor of @task's cgroup 2199 * 2200 * Tests whether @task's default cgroup hierarchy is a descendant of @ancestor. 2201 * It follows all the same rules as cgroup_is_descendant, and only applies 2202 * to the default hierarchy. 2203 */ 2204 __bpf_kfunc long bpf_task_under_cgroup(struct task_struct *task, 2205 struct cgroup *ancestor) 2206 { 2207 long ret; 2208 2209 rcu_read_lock(); 2210 ret = task_under_cgroup_hierarchy(task, ancestor); 2211 rcu_read_unlock(); 2212 return ret; 2213 } 2214 #endif /* CONFIG_CGROUPS */ 2215 2216 /** 2217 * bpf_task_from_pid - Find a struct task_struct from its pid by looking it up 2218 * in the root pid namespace idr. If a task is returned, it must either be 2219 * stored in a map, or released with bpf_task_release(). 2220 * @pid: The pid of the task being looked up. 2221 */ 2222 __bpf_kfunc struct task_struct *bpf_task_from_pid(s32 pid) 2223 { 2224 struct task_struct *p; 2225 2226 rcu_read_lock(); 2227 p = find_task_by_pid_ns(pid, &init_pid_ns); 2228 if (p) 2229 p = bpf_task_acquire(p); 2230 rcu_read_unlock(); 2231 2232 return p; 2233 } 2234 2235 /** 2236 * bpf_dynptr_slice() - Obtain a read-only pointer to the dynptr data. 2237 * @ptr: The dynptr whose data slice to retrieve 2238 * @offset: Offset into the dynptr 2239 * @buffer__opt: User-provided buffer to copy contents into. May be NULL 2240 * @buffer__szk: Size (in bytes) of the buffer if present. This is the 2241 * length of the requested slice. This must be a constant. 2242 * 2243 * For non-skb and non-xdp type dynptrs, there is no difference between 2244 * bpf_dynptr_slice and bpf_dynptr_data. 2245 * 2246 * If buffer__opt is NULL, the call will fail if buffer_opt was needed. 2247 * 2248 * If the intention is to write to the data slice, please use 2249 * bpf_dynptr_slice_rdwr. 2250 * 2251 * The user must check that the returned pointer is not null before using it. 2252 * 2253 * Please note that in the case of skb and xdp dynptrs, bpf_dynptr_slice 2254 * does not change the underlying packet data pointers, so a call to 2255 * bpf_dynptr_slice will not invalidate any ctx->data/data_end pointers in 2256 * the bpf program. 2257 * 2258 * Return: NULL if the call failed (eg invalid dynptr), pointer to a read-only 2259 * data slice (can be either direct pointer to the data or a pointer to the user 2260 * provided buffer, with its contents containing the data, if unable to obtain 2261 * direct pointer) 2262 */ 2263 __bpf_kfunc void *bpf_dynptr_slice(const struct bpf_dynptr_kern *ptr, u32 offset, 2264 void *buffer__opt, u32 buffer__szk) 2265 { 2266 enum bpf_dynptr_type type; 2267 u32 len = buffer__szk; 2268 int err; 2269 2270 if (!ptr->data) 2271 return NULL; 2272 2273 err = bpf_dynptr_check_off_len(ptr, offset, len); 2274 if (err) 2275 return NULL; 2276 2277 type = bpf_dynptr_get_type(ptr); 2278 2279 switch (type) { 2280 case BPF_DYNPTR_TYPE_LOCAL: 2281 case BPF_DYNPTR_TYPE_RINGBUF: 2282 return ptr->data + ptr->offset + offset; 2283 case BPF_DYNPTR_TYPE_SKB: 2284 if (buffer__opt) 2285 return skb_header_pointer(ptr->data, ptr->offset + offset, len, buffer__opt); 2286 else 2287 return skb_pointer_if_linear(ptr->data, ptr->offset + offset, len); 2288 case BPF_DYNPTR_TYPE_XDP: 2289 { 2290 void *xdp_ptr = bpf_xdp_pointer(ptr->data, ptr->offset + offset, len); 2291 if (!IS_ERR_OR_NULL(xdp_ptr)) 2292 return xdp_ptr; 2293 2294 if (!buffer__opt) 2295 return NULL; 2296 bpf_xdp_copy_buf(ptr->data, ptr->offset + offset, buffer__opt, len, false); 2297 return buffer__opt; 2298 } 2299 default: 2300 WARN_ONCE(true, "unknown dynptr type %d\n", type); 2301 return NULL; 2302 } 2303 } 2304 2305 /** 2306 * bpf_dynptr_slice_rdwr() - Obtain a writable pointer to the dynptr data. 2307 * @ptr: The dynptr whose data slice to retrieve 2308 * @offset: Offset into the dynptr 2309 * @buffer__opt: User-provided buffer to copy contents into. May be NULL 2310 * @buffer__szk: Size (in bytes) of the buffer if present. This is the 2311 * length of the requested slice. This must be a constant. 2312 * 2313 * For non-skb and non-xdp type dynptrs, there is no difference between 2314 * bpf_dynptr_slice and bpf_dynptr_data. 2315 * 2316 * If buffer__opt is NULL, the call will fail if buffer_opt was needed. 2317 * 2318 * The returned pointer is writable and may point to either directly the dynptr 2319 * data at the requested offset or to the buffer if unable to obtain a direct 2320 * data pointer to (example: the requested slice is to the paged area of an skb 2321 * packet). In the case where the returned pointer is to the buffer, the user 2322 * is responsible for persisting writes through calling bpf_dynptr_write(). This 2323 * usually looks something like this pattern: 2324 * 2325 * struct eth_hdr *eth = bpf_dynptr_slice_rdwr(&dynptr, 0, buffer, sizeof(buffer)); 2326 * if (!eth) 2327 * return TC_ACT_SHOT; 2328 * 2329 * // mutate eth header // 2330 * 2331 * if (eth == buffer) 2332 * bpf_dynptr_write(&ptr, 0, buffer, sizeof(buffer), 0); 2333 * 2334 * Please note that, as in the example above, the user must check that the 2335 * returned pointer is not null before using it. 2336 * 2337 * Please also note that in the case of skb and xdp dynptrs, bpf_dynptr_slice_rdwr 2338 * does not change the underlying packet data pointers, so a call to 2339 * bpf_dynptr_slice_rdwr will not invalidate any ctx->data/data_end pointers in 2340 * the bpf program. 2341 * 2342 * Return: NULL if the call failed (eg invalid dynptr), pointer to a 2343 * data slice (can be either direct pointer to the data or a pointer to the user 2344 * provided buffer, with its contents containing the data, if unable to obtain 2345 * direct pointer) 2346 */ 2347 __bpf_kfunc void *bpf_dynptr_slice_rdwr(const struct bpf_dynptr_kern *ptr, u32 offset, 2348 void *buffer__opt, u32 buffer__szk) 2349 { 2350 if (!ptr->data || __bpf_dynptr_is_rdonly(ptr)) 2351 return NULL; 2352 2353 /* bpf_dynptr_slice_rdwr is the same logic as bpf_dynptr_slice. 2354 * 2355 * For skb-type dynptrs, it is safe to write into the returned pointer 2356 * if the bpf program allows skb data writes. There are two possiblities 2357 * that may occur when calling bpf_dynptr_slice_rdwr: 2358 * 2359 * 1) The requested slice is in the head of the skb. In this case, the 2360 * returned pointer is directly to skb data, and if the skb is cloned, the 2361 * verifier will have uncloned it (see bpf_unclone_prologue()) already. 2362 * The pointer can be directly written into. 2363 * 2364 * 2) Some portion of the requested slice is in the paged buffer area. 2365 * In this case, the requested data will be copied out into the buffer 2366 * and the returned pointer will be a pointer to the buffer. The skb 2367 * will not be pulled. To persist the write, the user will need to call 2368 * bpf_dynptr_write(), which will pull the skb and commit the write. 2369 * 2370 * Similarly for xdp programs, if the requested slice is not across xdp 2371 * fragments, then a direct pointer will be returned, otherwise the data 2372 * will be copied out into the buffer and the user will need to call 2373 * bpf_dynptr_write() to commit changes. 2374 */ 2375 return bpf_dynptr_slice(ptr, offset, buffer__opt, buffer__szk); 2376 } 2377 2378 __bpf_kfunc int bpf_dynptr_adjust(struct bpf_dynptr_kern *ptr, u32 start, u32 end) 2379 { 2380 u32 size; 2381 2382 if (!ptr->data || start > end) 2383 return -EINVAL; 2384 2385 size = __bpf_dynptr_size(ptr); 2386 2387 if (start > size || end > size) 2388 return -ERANGE; 2389 2390 ptr->offset += start; 2391 bpf_dynptr_set_size(ptr, end - start); 2392 2393 return 0; 2394 } 2395 2396 __bpf_kfunc bool bpf_dynptr_is_null(struct bpf_dynptr_kern *ptr) 2397 { 2398 return !ptr->data; 2399 } 2400 2401 __bpf_kfunc bool bpf_dynptr_is_rdonly(struct bpf_dynptr_kern *ptr) 2402 { 2403 if (!ptr->data) 2404 return false; 2405 2406 return __bpf_dynptr_is_rdonly(ptr); 2407 } 2408 2409 __bpf_kfunc __u32 bpf_dynptr_size(const struct bpf_dynptr_kern *ptr) 2410 { 2411 if (!ptr->data) 2412 return -EINVAL; 2413 2414 return __bpf_dynptr_size(ptr); 2415 } 2416 2417 __bpf_kfunc int bpf_dynptr_clone(struct bpf_dynptr_kern *ptr, 2418 struct bpf_dynptr_kern *clone__uninit) 2419 { 2420 if (!ptr->data) { 2421 bpf_dynptr_set_null(clone__uninit); 2422 return -EINVAL; 2423 } 2424 2425 *clone__uninit = *ptr; 2426 2427 return 0; 2428 } 2429 2430 __bpf_kfunc void *bpf_cast_to_kern_ctx(void *obj) 2431 { 2432 return obj; 2433 } 2434 2435 __bpf_kfunc void *bpf_rdonly_cast(void *obj__ign, u32 btf_id__k) 2436 { 2437 return obj__ign; 2438 } 2439 2440 __bpf_kfunc void bpf_rcu_read_lock(void) 2441 { 2442 rcu_read_lock(); 2443 } 2444 2445 __bpf_kfunc void bpf_rcu_read_unlock(void) 2446 { 2447 rcu_read_unlock(); 2448 } 2449 2450 __diag_pop(); 2451 2452 BTF_SET8_START(generic_btf_ids) 2453 #ifdef CONFIG_KEXEC_CORE 2454 BTF_ID_FLAGS(func, crash_kexec, KF_DESTRUCTIVE) 2455 #endif 2456 BTF_ID_FLAGS(func, bpf_obj_new_impl, KF_ACQUIRE | KF_RET_NULL) 2457 BTF_ID_FLAGS(func, bpf_obj_drop_impl, KF_RELEASE) 2458 BTF_ID_FLAGS(func, bpf_refcount_acquire_impl, KF_ACQUIRE | KF_RET_NULL) 2459 BTF_ID_FLAGS(func, bpf_list_push_front_impl) 2460 BTF_ID_FLAGS(func, bpf_list_push_back_impl) 2461 BTF_ID_FLAGS(func, bpf_list_pop_front, KF_ACQUIRE | KF_RET_NULL) 2462 BTF_ID_FLAGS(func, bpf_list_pop_back, KF_ACQUIRE | KF_RET_NULL) 2463 BTF_ID_FLAGS(func, bpf_task_acquire, KF_ACQUIRE | KF_RCU | KF_RET_NULL) 2464 BTF_ID_FLAGS(func, bpf_task_release, KF_RELEASE) 2465 BTF_ID_FLAGS(func, bpf_rbtree_remove, KF_ACQUIRE | KF_RET_NULL) 2466 BTF_ID_FLAGS(func, bpf_rbtree_add_impl) 2467 BTF_ID_FLAGS(func, bpf_rbtree_first, KF_RET_NULL) 2468 2469 #ifdef CONFIG_CGROUPS 2470 BTF_ID_FLAGS(func, bpf_cgroup_acquire, KF_ACQUIRE | KF_RCU | KF_RET_NULL) 2471 BTF_ID_FLAGS(func, bpf_cgroup_release, KF_RELEASE) 2472 BTF_ID_FLAGS(func, bpf_cgroup_ancestor, KF_ACQUIRE | KF_RCU | KF_RET_NULL) 2473 BTF_ID_FLAGS(func, bpf_cgroup_from_id, KF_ACQUIRE | KF_RET_NULL) 2474 BTF_ID_FLAGS(func, bpf_task_under_cgroup, KF_RCU) 2475 #endif 2476 BTF_ID_FLAGS(func, bpf_task_from_pid, KF_ACQUIRE | KF_RET_NULL) 2477 BTF_SET8_END(generic_btf_ids) 2478 2479 static const struct btf_kfunc_id_set generic_kfunc_set = { 2480 .owner = THIS_MODULE, 2481 .set = &generic_btf_ids, 2482 }; 2483 2484 2485 BTF_ID_LIST(generic_dtor_ids) 2486 BTF_ID(struct, task_struct) 2487 BTF_ID(func, bpf_task_release) 2488 #ifdef CONFIG_CGROUPS 2489 BTF_ID(struct, cgroup) 2490 BTF_ID(func, bpf_cgroup_release) 2491 #endif 2492 2493 BTF_SET8_START(common_btf_ids) 2494 BTF_ID_FLAGS(func, bpf_cast_to_kern_ctx) 2495 BTF_ID_FLAGS(func, bpf_rdonly_cast) 2496 BTF_ID_FLAGS(func, bpf_rcu_read_lock) 2497 BTF_ID_FLAGS(func, bpf_rcu_read_unlock) 2498 BTF_ID_FLAGS(func, bpf_dynptr_slice, KF_RET_NULL) 2499 BTF_ID_FLAGS(func, bpf_dynptr_slice_rdwr, KF_RET_NULL) 2500 BTF_ID_FLAGS(func, bpf_iter_num_new, KF_ITER_NEW) 2501 BTF_ID_FLAGS(func, bpf_iter_num_next, KF_ITER_NEXT | KF_RET_NULL) 2502 BTF_ID_FLAGS(func, bpf_iter_num_destroy, KF_ITER_DESTROY) 2503 BTF_ID_FLAGS(func, bpf_dynptr_adjust) 2504 BTF_ID_FLAGS(func, bpf_dynptr_is_null) 2505 BTF_ID_FLAGS(func, bpf_dynptr_is_rdonly) 2506 BTF_ID_FLAGS(func, bpf_dynptr_size) 2507 BTF_ID_FLAGS(func, bpf_dynptr_clone) 2508 BTF_SET8_END(common_btf_ids) 2509 2510 static const struct btf_kfunc_id_set common_kfunc_set = { 2511 .owner = THIS_MODULE, 2512 .set = &common_btf_ids, 2513 }; 2514 2515 static int __init kfunc_init(void) 2516 { 2517 int ret; 2518 const struct btf_id_dtor_kfunc generic_dtors[] = { 2519 { 2520 .btf_id = generic_dtor_ids[0], 2521 .kfunc_btf_id = generic_dtor_ids[1] 2522 }, 2523 #ifdef CONFIG_CGROUPS 2524 { 2525 .btf_id = generic_dtor_ids[2], 2526 .kfunc_btf_id = generic_dtor_ids[3] 2527 }, 2528 #endif 2529 }; 2530 2531 ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_TRACING, &generic_kfunc_set); 2532 ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_SCHED_CLS, &generic_kfunc_set); 2533 ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS, &generic_kfunc_set); 2534 ret = ret ?: register_btf_id_dtor_kfuncs(generic_dtors, 2535 ARRAY_SIZE(generic_dtors), 2536 THIS_MODULE); 2537 return ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_UNSPEC, &common_kfunc_set); 2538 } 2539 2540 late_initcall(kfunc_init); 2541