1 /* SPDX-License-Identifier: GPL-2.0-only */ 2 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com 3 */ 4 #ifndef _LINUX_BPF_H 5 #define _LINUX_BPF_H 1 6 7 #include <uapi/linux/bpf.h> 8 #include <uapi/linux/filter.h> 9 10 #include <linux/workqueue.h> 11 #include <linux/file.h> 12 #include <linux/percpu.h> 13 #include <linux/err.h> 14 #include <linux/rbtree_latch.h> 15 #include <linux/numa.h> 16 #include <linux/mm_types.h> 17 #include <linux/wait.h> 18 #include <linux/refcount.h> 19 #include <linux/mutex.h> 20 #include <linux/module.h> 21 #include <linux/kallsyms.h> 22 #include <linux/capability.h> 23 #include <linux/sched/mm.h> 24 #include <linux/slab.h> 25 #include <linux/percpu-refcount.h> 26 #include <linux/stddef.h> 27 #include <linux/bpfptr.h> 28 #include <linux/btf.h> 29 #include <linux/rcupdate_trace.h> 30 #include <linux/init.h> 31 32 struct bpf_verifier_env; 33 struct bpf_verifier_log; 34 struct perf_event; 35 struct bpf_prog; 36 struct bpf_prog_aux; 37 struct bpf_map; 38 struct sock; 39 struct seq_file; 40 struct btf; 41 struct btf_type; 42 struct exception_table_entry; 43 struct seq_operations; 44 struct bpf_iter_aux_info; 45 struct bpf_local_storage; 46 struct bpf_local_storage_map; 47 struct kobject; 48 struct mem_cgroup; 49 struct module; 50 struct bpf_func_state; 51 struct ftrace_ops; 52 struct cgroup; 53 54 extern struct idr btf_idr; 55 extern spinlock_t btf_idr_lock; 56 extern struct kobject *btf_kobj; 57 58 typedef u64 (*bpf_callback_t)(u64, u64, u64, u64, u64); 59 typedef int (*bpf_iter_init_seq_priv_t)(void *private_data, 60 struct bpf_iter_aux_info *aux); 61 typedef void (*bpf_iter_fini_seq_priv_t)(void *private_data); 62 typedef unsigned int (*bpf_func_t)(const void *, 63 const struct bpf_insn *); 64 struct bpf_iter_seq_info { 65 const struct seq_operations *seq_ops; 66 bpf_iter_init_seq_priv_t init_seq_private; 67 bpf_iter_fini_seq_priv_t fini_seq_private; 68 u32 seq_priv_size; 69 }; 70 71 /* map is generic key/value storage optionally accessible by eBPF programs */ 72 struct bpf_map_ops { 73 /* funcs callable from userspace (via syscall) */ 74 int (*map_alloc_check)(union bpf_attr *attr); 75 struct bpf_map *(*map_alloc)(union bpf_attr *attr); 76 void (*map_release)(struct bpf_map *map, struct file *map_file); 77 void (*map_free)(struct bpf_map *map); 78 int (*map_get_next_key)(struct bpf_map *map, void *key, void *next_key); 79 void (*map_release_uref)(struct bpf_map *map); 80 void *(*map_lookup_elem_sys_only)(struct bpf_map *map, void *key); 81 int (*map_lookup_batch)(struct bpf_map *map, const union bpf_attr *attr, 82 union bpf_attr __user *uattr); 83 int (*map_lookup_and_delete_elem)(struct bpf_map *map, void *key, 84 void *value, u64 flags); 85 int (*map_lookup_and_delete_batch)(struct bpf_map *map, 86 const union bpf_attr *attr, 87 union bpf_attr __user *uattr); 88 int (*map_update_batch)(struct bpf_map *map, const union bpf_attr *attr, 89 union bpf_attr __user *uattr); 90 int (*map_delete_batch)(struct bpf_map *map, const union bpf_attr *attr, 91 union bpf_attr __user *uattr); 92 93 /* funcs callable from userspace and from eBPF programs */ 94 void *(*map_lookup_elem)(struct bpf_map *map, void *key); 95 int (*map_update_elem)(struct bpf_map *map, void *key, void *value, u64 flags); 96 int (*map_delete_elem)(struct bpf_map *map, void *key); 97 int (*map_push_elem)(struct bpf_map *map, void *value, u64 flags); 98 int (*map_pop_elem)(struct bpf_map *map, void *value); 99 int (*map_peek_elem)(struct bpf_map *map, void *value); 100 void *(*map_lookup_percpu_elem)(struct bpf_map *map, void *key, u32 cpu); 101 102 /* funcs called by prog_array and perf_event_array map */ 103 void *(*map_fd_get_ptr)(struct bpf_map *map, struct file *map_file, 104 int fd); 105 void (*map_fd_put_ptr)(void *ptr); 106 int (*map_gen_lookup)(struct bpf_map *map, struct bpf_insn *insn_buf); 107 u32 (*map_fd_sys_lookup_elem)(void *ptr); 108 void (*map_seq_show_elem)(struct bpf_map *map, void *key, 109 struct seq_file *m); 110 int (*map_check_btf)(const struct bpf_map *map, 111 const struct btf *btf, 112 const struct btf_type *key_type, 113 const struct btf_type *value_type); 114 115 /* Prog poke tracking helpers. */ 116 int (*map_poke_track)(struct bpf_map *map, struct bpf_prog_aux *aux); 117 void (*map_poke_untrack)(struct bpf_map *map, struct bpf_prog_aux *aux); 118 void (*map_poke_run)(struct bpf_map *map, u32 key, struct bpf_prog *old, 119 struct bpf_prog *new); 120 121 /* Direct value access helpers. */ 122 int (*map_direct_value_addr)(const struct bpf_map *map, 123 u64 *imm, u32 off); 124 int (*map_direct_value_meta)(const struct bpf_map *map, 125 u64 imm, u32 *off); 126 int (*map_mmap)(struct bpf_map *map, struct vm_area_struct *vma); 127 __poll_t (*map_poll)(struct bpf_map *map, struct file *filp, 128 struct poll_table_struct *pts); 129 130 /* Functions called by bpf_local_storage maps */ 131 int (*map_local_storage_charge)(struct bpf_local_storage_map *smap, 132 void *owner, u32 size); 133 void (*map_local_storage_uncharge)(struct bpf_local_storage_map *smap, 134 void *owner, u32 size); 135 struct bpf_local_storage __rcu ** (*map_owner_storage_ptr)(void *owner); 136 137 /* Misc helpers.*/ 138 int (*map_redirect)(struct bpf_map *map, u32 ifindex, u64 flags); 139 140 /* map_meta_equal must be implemented for maps that can be 141 * used as an inner map. It is a runtime check to ensure 142 * an inner map can be inserted to an outer map. 143 * 144 * Some properties of the inner map has been used during the 145 * verification time. When inserting an inner map at the runtime, 146 * map_meta_equal has to ensure the inserting map has the same 147 * properties that the verifier has used earlier. 148 */ 149 bool (*map_meta_equal)(const struct bpf_map *meta0, 150 const struct bpf_map *meta1); 151 152 153 int (*map_set_for_each_callback_args)(struct bpf_verifier_env *env, 154 struct bpf_func_state *caller, 155 struct bpf_func_state *callee); 156 int (*map_for_each_callback)(struct bpf_map *map, 157 bpf_callback_t callback_fn, 158 void *callback_ctx, u64 flags); 159 160 /* BTF id of struct allocated by map_alloc */ 161 int *map_btf_id; 162 163 /* bpf_iter info used to open a seq_file */ 164 const struct bpf_iter_seq_info *iter_seq_info; 165 }; 166 167 enum { 168 /* Support at most 8 pointers in a BPF map value */ 169 BPF_MAP_VALUE_OFF_MAX = 8, 170 BPF_MAP_OFF_ARR_MAX = BPF_MAP_VALUE_OFF_MAX + 171 1 + /* for bpf_spin_lock */ 172 1, /* for bpf_timer */ 173 }; 174 175 enum bpf_kptr_type { 176 BPF_KPTR_UNREF, 177 BPF_KPTR_REF, 178 }; 179 180 struct bpf_map_value_off_desc { 181 u32 offset; 182 enum bpf_kptr_type type; 183 struct { 184 struct btf *btf; 185 struct module *module; 186 btf_dtor_kfunc_t dtor; 187 u32 btf_id; 188 } kptr; 189 }; 190 191 struct bpf_map_value_off { 192 u32 nr_off; 193 struct bpf_map_value_off_desc off[]; 194 }; 195 196 struct bpf_map_off_arr { 197 u32 cnt; 198 u32 field_off[BPF_MAP_OFF_ARR_MAX]; 199 u8 field_sz[BPF_MAP_OFF_ARR_MAX]; 200 }; 201 202 struct bpf_map { 203 /* The first two cachelines with read-mostly members of which some 204 * are also accessed in fast-path (e.g. ops, max_entries). 205 */ 206 const struct bpf_map_ops *ops ____cacheline_aligned; 207 struct bpf_map *inner_map_meta; 208 #ifdef CONFIG_SECURITY 209 void *security; 210 #endif 211 enum bpf_map_type map_type; 212 u32 key_size; 213 u32 value_size; 214 u32 max_entries; 215 u64 map_extra; /* any per-map-type extra fields */ 216 u32 map_flags; 217 int spin_lock_off; /* >=0 valid offset, <0 error */ 218 struct bpf_map_value_off *kptr_off_tab; 219 int timer_off; /* >=0 valid offset, <0 error */ 220 u32 id; 221 int numa_node; 222 u32 btf_key_type_id; 223 u32 btf_value_type_id; 224 u32 btf_vmlinux_value_type_id; 225 struct btf *btf; 226 #ifdef CONFIG_MEMCG_KMEM 227 struct obj_cgroup *objcg; 228 #endif 229 char name[BPF_OBJ_NAME_LEN]; 230 struct bpf_map_off_arr *off_arr; 231 /* The 3rd and 4th cacheline with misc members to avoid false sharing 232 * particularly with refcounting. 233 */ 234 atomic64_t refcnt ____cacheline_aligned; 235 atomic64_t usercnt; 236 struct work_struct work; 237 struct mutex freeze_mutex; 238 atomic64_t writecnt; 239 /* 'Ownership' of program-containing map is claimed by the first program 240 * that is going to use this map or by the first program which FD is 241 * stored in the map to make sure that all callers and callees have the 242 * same prog type, JITed flag and xdp_has_frags flag. 243 */ 244 struct { 245 spinlock_t lock; 246 enum bpf_prog_type type; 247 bool jited; 248 bool xdp_has_frags; 249 } owner; 250 bool bypass_spec_v1; 251 bool frozen; /* write-once; write-protected by freeze_mutex */ 252 }; 253 254 static inline bool map_value_has_spin_lock(const struct bpf_map *map) 255 { 256 return map->spin_lock_off >= 0; 257 } 258 259 static inline bool map_value_has_timer(const struct bpf_map *map) 260 { 261 return map->timer_off >= 0; 262 } 263 264 static inline bool map_value_has_kptrs(const struct bpf_map *map) 265 { 266 return !IS_ERR_OR_NULL(map->kptr_off_tab); 267 } 268 269 static inline void check_and_init_map_value(struct bpf_map *map, void *dst) 270 { 271 if (unlikely(map_value_has_spin_lock(map))) 272 memset(dst + map->spin_lock_off, 0, sizeof(struct bpf_spin_lock)); 273 if (unlikely(map_value_has_timer(map))) 274 memset(dst + map->timer_off, 0, sizeof(struct bpf_timer)); 275 if (unlikely(map_value_has_kptrs(map))) { 276 struct bpf_map_value_off *tab = map->kptr_off_tab; 277 int i; 278 279 for (i = 0; i < tab->nr_off; i++) 280 *(u64 *)(dst + tab->off[i].offset) = 0; 281 } 282 } 283 284 /* memcpy that is used with 8-byte aligned pointers, power-of-8 size and 285 * forced to use 'long' read/writes to try to atomically copy long counters. 286 * Best-effort only. No barriers here, since it _will_ race with concurrent 287 * updates from BPF programs. Called from bpf syscall and mostly used with 288 * size 8 or 16 bytes, so ask compiler to inline it. 289 */ 290 static inline void bpf_long_memcpy(void *dst, const void *src, u32 size) 291 { 292 const long *lsrc = src; 293 long *ldst = dst; 294 295 size /= sizeof(long); 296 while (size--) 297 *ldst++ = *lsrc++; 298 } 299 300 /* copy everything but bpf_spin_lock, bpf_timer, and kptrs. There could be one of each. */ 301 static inline void __copy_map_value(struct bpf_map *map, void *dst, void *src, bool long_memcpy) 302 { 303 u32 curr_off = 0; 304 int i; 305 306 if (likely(!map->off_arr)) { 307 if (long_memcpy) 308 bpf_long_memcpy(dst, src, round_up(map->value_size, 8)); 309 else 310 memcpy(dst, src, map->value_size); 311 return; 312 } 313 314 for (i = 0; i < map->off_arr->cnt; i++) { 315 u32 next_off = map->off_arr->field_off[i]; 316 317 memcpy(dst + curr_off, src + curr_off, next_off - curr_off); 318 curr_off += map->off_arr->field_sz[i]; 319 } 320 memcpy(dst + curr_off, src + curr_off, map->value_size - curr_off); 321 } 322 323 static inline void copy_map_value(struct bpf_map *map, void *dst, void *src) 324 { 325 __copy_map_value(map, dst, src, false); 326 } 327 328 static inline void copy_map_value_long(struct bpf_map *map, void *dst, void *src) 329 { 330 __copy_map_value(map, dst, src, true); 331 } 332 333 static inline void zero_map_value(struct bpf_map *map, void *dst) 334 { 335 u32 curr_off = 0; 336 int i; 337 338 if (likely(!map->off_arr)) { 339 memset(dst, 0, map->value_size); 340 return; 341 } 342 343 for (i = 0; i < map->off_arr->cnt; i++) { 344 u32 next_off = map->off_arr->field_off[i]; 345 346 memset(dst + curr_off, 0, next_off - curr_off); 347 curr_off += map->off_arr->field_sz[i]; 348 } 349 memset(dst + curr_off, 0, map->value_size - curr_off); 350 } 351 352 void copy_map_value_locked(struct bpf_map *map, void *dst, void *src, 353 bool lock_src); 354 void bpf_timer_cancel_and_free(void *timer); 355 int bpf_obj_name_cpy(char *dst, const char *src, unsigned int size); 356 357 struct bpf_offload_dev; 358 struct bpf_offloaded_map; 359 360 struct bpf_map_dev_ops { 361 int (*map_get_next_key)(struct bpf_offloaded_map *map, 362 void *key, void *next_key); 363 int (*map_lookup_elem)(struct bpf_offloaded_map *map, 364 void *key, void *value); 365 int (*map_update_elem)(struct bpf_offloaded_map *map, 366 void *key, void *value, u64 flags); 367 int (*map_delete_elem)(struct bpf_offloaded_map *map, void *key); 368 }; 369 370 struct bpf_offloaded_map { 371 struct bpf_map map; 372 struct net_device *netdev; 373 const struct bpf_map_dev_ops *dev_ops; 374 void *dev_priv; 375 struct list_head offloads; 376 }; 377 378 static inline struct bpf_offloaded_map *map_to_offmap(struct bpf_map *map) 379 { 380 return container_of(map, struct bpf_offloaded_map, map); 381 } 382 383 static inline bool bpf_map_offload_neutral(const struct bpf_map *map) 384 { 385 return map->map_type == BPF_MAP_TYPE_PERF_EVENT_ARRAY; 386 } 387 388 static inline bool bpf_map_support_seq_show(const struct bpf_map *map) 389 { 390 return (map->btf_value_type_id || map->btf_vmlinux_value_type_id) && 391 map->ops->map_seq_show_elem; 392 } 393 394 int map_check_no_btf(const struct bpf_map *map, 395 const struct btf *btf, 396 const struct btf_type *key_type, 397 const struct btf_type *value_type); 398 399 bool bpf_map_meta_equal(const struct bpf_map *meta0, 400 const struct bpf_map *meta1); 401 402 extern const struct bpf_map_ops bpf_map_offload_ops; 403 404 /* bpf_type_flag contains a set of flags that are applicable to the values of 405 * arg_type, ret_type and reg_type. For example, a pointer value may be null, 406 * or a memory is read-only. We classify types into two categories: base types 407 * and extended types. Extended types are base types combined with a type flag. 408 * 409 * Currently there are no more than 32 base types in arg_type, ret_type and 410 * reg_types. 411 */ 412 #define BPF_BASE_TYPE_BITS 8 413 414 enum bpf_type_flag { 415 /* PTR may be NULL. */ 416 PTR_MAYBE_NULL = BIT(0 + BPF_BASE_TYPE_BITS), 417 418 /* MEM is read-only. When applied on bpf_arg, it indicates the arg is 419 * compatible with both mutable and immutable memory. 420 */ 421 MEM_RDONLY = BIT(1 + BPF_BASE_TYPE_BITS), 422 423 /* MEM was "allocated" from a different helper, and cannot be mixed 424 * with regular non-MEM_ALLOC'ed MEM types. 425 */ 426 MEM_ALLOC = BIT(2 + BPF_BASE_TYPE_BITS), 427 428 /* MEM is in user address space. */ 429 MEM_USER = BIT(3 + BPF_BASE_TYPE_BITS), 430 431 /* MEM is a percpu memory. MEM_PERCPU tags PTR_TO_BTF_ID. When tagged 432 * with MEM_PERCPU, PTR_TO_BTF_ID _cannot_ be directly accessed. In 433 * order to drop this tag, it must be passed into bpf_per_cpu_ptr() 434 * or bpf_this_cpu_ptr(), which will return the pointer corresponding 435 * to the specified cpu. 436 */ 437 MEM_PERCPU = BIT(4 + BPF_BASE_TYPE_BITS), 438 439 /* Indicates that the argument will be released. */ 440 OBJ_RELEASE = BIT(5 + BPF_BASE_TYPE_BITS), 441 442 /* PTR is not trusted. This is only used with PTR_TO_BTF_ID, to mark 443 * unreferenced and referenced kptr loaded from map value using a load 444 * instruction, so that they can only be dereferenced but not escape the 445 * BPF program into the kernel (i.e. cannot be passed as arguments to 446 * kfunc or bpf helpers). 447 */ 448 PTR_UNTRUSTED = BIT(6 + BPF_BASE_TYPE_BITS), 449 450 MEM_UNINIT = BIT(7 + BPF_BASE_TYPE_BITS), 451 452 /* DYNPTR points to memory local to the bpf program. */ 453 DYNPTR_TYPE_LOCAL = BIT(8 + BPF_BASE_TYPE_BITS), 454 455 /* DYNPTR points to a kernel-produced ringbuf record. */ 456 DYNPTR_TYPE_RINGBUF = BIT(9 + BPF_BASE_TYPE_BITS), 457 458 /* Size is known at compile time. */ 459 MEM_FIXED_SIZE = BIT(10 + BPF_BASE_TYPE_BITS), 460 461 __BPF_TYPE_FLAG_MAX, 462 __BPF_TYPE_LAST_FLAG = __BPF_TYPE_FLAG_MAX - 1, 463 }; 464 465 #define DYNPTR_TYPE_FLAG_MASK (DYNPTR_TYPE_LOCAL | DYNPTR_TYPE_RINGBUF) 466 467 /* Max number of base types. */ 468 #define BPF_BASE_TYPE_LIMIT (1UL << BPF_BASE_TYPE_BITS) 469 470 /* Max number of all types. */ 471 #define BPF_TYPE_LIMIT (__BPF_TYPE_LAST_FLAG | (__BPF_TYPE_LAST_FLAG - 1)) 472 473 /* function argument constraints */ 474 enum bpf_arg_type { 475 ARG_DONTCARE = 0, /* unused argument in helper function */ 476 477 /* the following constraints used to prototype 478 * bpf_map_lookup/update/delete_elem() functions 479 */ 480 ARG_CONST_MAP_PTR, /* const argument used as pointer to bpf_map */ 481 ARG_PTR_TO_MAP_KEY, /* pointer to stack used as map key */ 482 ARG_PTR_TO_MAP_VALUE, /* pointer to stack used as map value */ 483 484 /* Used to prototype bpf_memcmp() and other functions that access data 485 * on eBPF program stack 486 */ 487 ARG_PTR_TO_MEM, /* pointer to valid memory (stack, packet, map value) */ 488 489 ARG_CONST_SIZE, /* number of bytes accessed from memory */ 490 ARG_CONST_SIZE_OR_ZERO, /* number of bytes accessed from memory or 0 */ 491 492 ARG_PTR_TO_CTX, /* pointer to context */ 493 ARG_ANYTHING, /* any (initialized) argument is ok */ 494 ARG_PTR_TO_SPIN_LOCK, /* pointer to bpf_spin_lock */ 495 ARG_PTR_TO_SOCK_COMMON, /* pointer to sock_common */ 496 ARG_PTR_TO_INT, /* pointer to int */ 497 ARG_PTR_TO_LONG, /* pointer to long */ 498 ARG_PTR_TO_SOCKET, /* pointer to bpf_sock (fullsock) */ 499 ARG_PTR_TO_BTF_ID, /* pointer to in-kernel struct */ 500 ARG_PTR_TO_ALLOC_MEM, /* pointer to dynamically allocated memory */ 501 ARG_CONST_ALLOC_SIZE_OR_ZERO, /* number of allocated bytes requested */ 502 ARG_PTR_TO_BTF_ID_SOCK_COMMON, /* pointer to in-kernel sock_common or bpf-mirrored bpf_sock */ 503 ARG_PTR_TO_PERCPU_BTF_ID, /* pointer to in-kernel percpu type */ 504 ARG_PTR_TO_FUNC, /* pointer to a bpf program function */ 505 ARG_PTR_TO_STACK, /* pointer to stack */ 506 ARG_PTR_TO_CONST_STR, /* pointer to a null terminated read-only string */ 507 ARG_PTR_TO_TIMER, /* pointer to bpf_timer */ 508 ARG_PTR_TO_KPTR, /* pointer to referenced kptr */ 509 ARG_PTR_TO_DYNPTR, /* pointer to bpf_dynptr. See bpf_type_flag for dynptr type */ 510 __BPF_ARG_TYPE_MAX, 511 512 /* Extended arg_types. */ 513 ARG_PTR_TO_MAP_VALUE_OR_NULL = PTR_MAYBE_NULL | ARG_PTR_TO_MAP_VALUE, 514 ARG_PTR_TO_MEM_OR_NULL = PTR_MAYBE_NULL | ARG_PTR_TO_MEM, 515 ARG_PTR_TO_CTX_OR_NULL = PTR_MAYBE_NULL | ARG_PTR_TO_CTX, 516 ARG_PTR_TO_SOCKET_OR_NULL = PTR_MAYBE_NULL | ARG_PTR_TO_SOCKET, 517 ARG_PTR_TO_ALLOC_MEM_OR_NULL = PTR_MAYBE_NULL | ARG_PTR_TO_ALLOC_MEM, 518 ARG_PTR_TO_STACK_OR_NULL = PTR_MAYBE_NULL | ARG_PTR_TO_STACK, 519 ARG_PTR_TO_BTF_ID_OR_NULL = PTR_MAYBE_NULL | ARG_PTR_TO_BTF_ID, 520 /* pointer to memory does not need to be initialized, helper function must fill 521 * all bytes or clear them in error case. 522 */ 523 ARG_PTR_TO_UNINIT_MEM = MEM_UNINIT | ARG_PTR_TO_MEM, 524 /* Pointer to valid memory of size known at compile time. */ 525 ARG_PTR_TO_FIXED_SIZE_MEM = MEM_FIXED_SIZE | ARG_PTR_TO_MEM, 526 527 /* This must be the last entry. Its purpose is to ensure the enum is 528 * wide enough to hold the higher bits reserved for bpf_type_flag. 529 */ 530 __BPF_ARG_TYPE_LIMIT = BPF_TYPE_LIMIT, 531 }; 532 static_assert(__BPF_ARG_TYPE_MAX <= BPF_BASE_TYPE_LIMIT); 533 534 /* type of values returned from helper functions */ 535 enum bpf_return_type { 536 RET_INTEGER, /* function returns integer */ 537 RET_VOID, /* function doesn't return anything */ 538 RET_PTR_TO_MAP_VALUE, /* returns a pointer to map elem value */ 539 RET_PTR_TO_SOCKET, /* returns a pointer to a socket */ 540 RET_PTR_TO_TCP_SOCK, /* returns a pointer to a tcp_sock */ 541 RET_PTR_TO_SOCK_COMMON, /* returns a pointer to a sock_common */ 542 RET_PTR_TO_ALLOC_MEM, /* returns a pointer to dynamically allocated memory */ 543 RET_PTR_TO_MEM_OR_BTF_ID, /* returns a pointer to a valid memory or a btf_id */ 544 RET_PTR_TO_BTF_ID, /* returns a pointer to a btf_id */ 545 __BPF_RET_TYPE_MAX, 546 547 /* Extended ret_types. */ 548 RET_PTR_TO_MAP_VALUE_OR_NULL = PTR_MAYBE_NULL | RET_PTR_TO_MAP_VALUE, 549 RET_PTR_TO_SOCKET_OR_NULL = PTR_MAYBE_NULL | RET_PTR_TO_SOCKET, 550 RET_PTR_TO_TCP_SOCK_OR_NULL = PTR_MAYBE_NULL | RET_PTR_TO_TCP_SOCK, 551 RET_PTR_TO_SOCK_COMMON_OR_NULL = PTR_MAYBE_NULL | RET_PTR_TO_SOCK_COMMON, 552 RET_PTR_TO_ALLOC_MEM_OR_NULL = PTR_MAYBE_NULL | MEM_ALLOC | RET_PTR_TO_ALLOC_MEM, 553 RET_PTR_TO_DYNPTR_MEM_OR_NULL = PTR_MAYBE_NULL | RET_PTR_TO_ALLOC_MEM, 554 RET_PTR_TO_BTF_ID_OR_NULL = PTR_MAYBE_NULL | RET_PTR_TO_BTF_ID, 555 556 /* This must be the last entry. Its purpose is to ensure the enum is 557 * wide enough to hold the higher bits reserved for bpf_type_flag. 558 */ 559 __BPF_RET_TYPE_LIMIT = BPF_TYPE_LIMIT, 560 }; 561 static_assert(__BPF_RET_TYPE_MAX <= BPF_BASE_TYPE_LIMIT); 562 563 /* eBPF function prototype used by verifier to allow BPF_CALLs from eBPF programs 564 * to in-kernel helper functions and for adjusting imm32 field in BPF_CALL 565 * instructions after verifying 566 */ 567 struct bpf_func_proto { 568 u64 (*func)(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5); 569 bool gpl_only; 570 bool pkt_access; 571 enum bpf_return_type ret_type; 572 union { 573 struct { 574 enum bpf_arg_type arg1_type; 575 enum bpf_arg_type arg2_type; 576 enum bpf_arg_type arg3_type; 577 enum bpf_arg_type arg4_type; 578 enum bpf_arg_type arg5_type; 579 }; 580 enum bpf_arg_type arg_type[5]; 581 }; 582 union { 583 struct { 584 u32 *arg1_btf_id; 585 u32 *arg2_btf_id; 586 u32 *arg3_btf_id; 587 u32 *arg4_btf_id; 588 u32 *arg5_btf_id; 589 }; 590 u32 *arg_btf_id[5]; 591 struct { 592 size_t arg1_size; 593 size_t arg2_size; 594 size_t arg3_size; 595 size_t arg4_size; 596 size_t arg5_size; 597 }; 598 size_t arg_size[5]; 599 }; 600 int *ret_btf_id; /* return value btf_id */ 601 bool (*allowed)(const struct bpf_prog *prog); 602 }; 603 604 /* bpf_context is intentionally undefined structure. Pointer to bpf_context is 605 * the first argument to eBPF programs. 606 * For socket filters: 'struct bpf_context *' == 'struct sk_buff *' 607 */ 608 struct bpf_context; 609 610 enum bpf_access_type { 611 BPF_READ = 1, 612 BPF_WRITE = 2 613 }; 614 615 /* types of values stored in eBPF registers */ 616 /* Pointer types represent: 617 * pointer 618 * pointer + imm 619 * pointer + (u16) var 620 * pointer + (u16) var + imm 621 * if (range > 0) then [ptr, ptr + range - off) is safe to access 622 * if (id > 0) means that some 'var' was added 623 * if (off > 0) means that 'imm' was added 624 */ 625 enum bpf_reg_type { 626 NOT_INIT = 0, /* nothing was written into register */ 627 SCALAR_VALUE, /* reg doesn't contain a valid pointer */ 628 PTR_TO_CTX, /* reg points to bpf_context */ 629 CONST_PTR_TO_MAP, /* reg points to struct bpf_map */ 630 PTR_TO_MAP_VALUE, /* reg points to map element value */ 631 PTR_TO_MAP_KEY, /* reg points to a map element key */ 632 PTR_TO_STACK, /* reg == frame_pointer + offset */ 633 PTR_TO_PACKET_META, /* skb->data - meta_len */ 634 PTR_TO_PACKET, /* reg points to skb->data */ 635 PTR_TO_PACKET_END, /* skb->data + headlen */ 636 PTR_TO_FLOW_KEYS, /* reg points to bpf_flow_keys */ 637 PTR_TO_SOCKET, /* reg points to struct bpf_sock */ 638 PTR_TO_SOCK_COMMON, /* reg points to sock_common */ 639 PTR_TO_TCP_SOCK, /* reg points to struct tcp_sock */ 640 PTR_TO_TP_BUFFER, /* reg points to a writable raw tp's buffer */ 641 PTR_TO_XDP_SOCK, /* reg points to struct xdp_sock */ 642 /* PTR_TO_BTF_ID points to a kernel struct that does not need 643 * to be null checked by the BPF program. This does not imply the 644 * pointer is _not_ null and in practice this can easily be a null 645 * pointer when reading pointer chains. The assumption is program 646 * context will handle null pointer dereference typically via fault 647 * handling. The verifier must keep this in mind and can make no 648 * assumptions about null or non-null when doing branch analysis. 649 * Further, when passed into helpers the helpers can not, without 650 * additional context, assume the value is non-null. 651 */ 652 PTR_TO_BTF_ID, 653 /* PTR_TO_BTF_ID_OR_NULL points to a kernel struct that has not 654 * been checked for null. Used primarily to inform the verifier 655 * an explicit null check is required for this struct. 656 */ 657 PTR_TO_MEM, /* reg points to valid memory region */ 658 PTR_TO_BUF, /* reg points to a read/write buffer */ 659 PTR_TO_FUNC, /* reg points to a bpf program function */ 660 PTR_TO_DYNPTR, /* reg points to a dynptr */ 661 __BPF_REG_TYPE_MAX, 662 663 /* Extended reg_types. */ 664 PTR_TO_MAP_VALUE_OR_NULL = PTR_MAYBE_NULL | PTR_TO_MAP_VALUE, 665 PTR_TO_SOCKET_OR_NULL = PTR_MAYBE_NULL | PTR_TO_SOCKET, 666 PTR_TO_SOCK_COMMON_OR_NULL = PTR_MAYBE_NULL | PTR_TO_SOCK_COMMON, 667 PTR_TO_TCP_SOCK_OR_NULL = PTR_MAYBE_NULL | PTR_TO_TCP_SOCK, 668 PTR_TO_BTF_ID_OR_NULL = PTR_MAYBE_NULL | PTR_TO_BTF_ID, 669 670 /* This must be the last entry. Its purpose is to ensure the enum is 671 * wide enough to hold the higher bits reserved for bpf_type_flag. 672 */ 673 __BPF_REG_TYPE_LIMIT = BPF_TYPE_LIMIT, 674 }; 675 static_assert(__BPF_REG_TYPE_MAX <= BPF_BASE_TYPE_LIMIT); 676 677 /* The information passed from prog-specific *_is_valid_access 678 * back to the verifier. 679 */ 680 struct bpf_insn_access_aux { 681 enum bpf_reg_type reg_type; 682 union { 683 int ctx_field_size; 684 struct { 685 struct btf *btf; 686 u32 btf_id; 687 }; 688 }; 689 struct bpf_verifier_log *log; /* for verbose logs */ 690 }; 691 692 static inline void 693 bpf_ctx_record_field_size(struct bpf_insn_access_aux *aux, u32 size) 694 { 695 aux->ctx_field_size = size; 696 } 697 698 static inline bool bpf_pseudo_func(const struct bpf_insn *insn) 699 { 700 return insn->code == (BPF_LD | BPF_IMM | BPF_DW) && 701 insn->src_reg == BPF_PSEUDO_FUNC; 702 } 703 704 struct bpf_prog_ops { 705 int (*test_run)(struct bpf_prog *prog, const union bpf_attr *kattr, 706 union bpf_attr __user *uattr); 707 }; 708 709 struct bpf_verifier_ops { 710 /* return eBPF function prototype for verification */ 711 const struct bpf_func_proto * 712 (*get_func_proto)(enum bpf_func_id func_id, 713 const struct bpf_prog *prog); 714 715 /* return true if 'size' wide access at offset 'off' within bpf_context 716 * with 'type' (read or write) is allowed 717 */ 718 bool (*is_valid_access)(int off, int size, enum bpf_access_type type, 719 const struct bpf_prog *prog, 720 struct bpf_insn_access_aux *info); 721 int (*gen_prologue)(struct bpf_insn *insn, bool direct_write, 722 const struct bpf_prog *prog); 723 int (*gen_ld_abs)(const struct bpf_insn *orig, 724 struct bpf_insn *insn_buf); 725 u32 (*convert_ctx_access)(enum bpf_access_type type, 726 const struct bpf_insn *src, 727 struct bpf_insn *dst, 728 struct bpf_prog *prog, u32 *target_size); 729 int (*btf_struct_access)(struct bpf_verifier_log *log, 730 const struct btf *btf, 731 const struct btf_type *t, int off, int size, 732 enum bpf_access_type atype, 733 u32 *next_btf_id, enum bpf_type_flag *flag); 734 }; 735 736 struct bpf_prog_offload_ops { 737 /* verifier basic callbacks */ 738 int (*insn_hook)(struct bpf_verifier_env *env, 739 int insn_idx, int prev_insn_idx); 740 int (*finalize)(struct bpf_verifier_env *env); 741 /* verifier optimization callbacks (called after .finalize) */ 742 int (*replace_insn)(struct bpf_verifier_env *env, u32 off, 743 struct bpf_insn *insn); 744 int (*remove_insns)(struct bpf_verifier_env *env, u32 off, u32 cnt); 745 /* program management callbacks */ 746 int (*prepare)(struct bpf_prog *prog); 747 int (*translate)(struct bpf_prog *prog); 748 void (*destroy)(struct bpf_prog *prog); 749 }; 750 751 struct bpf_prog_offload { 752 struct bpf_prog *prog; 753 struct net_device *netdev; 754 struct bpf_offload_dev *offdev; 755 void *dev_priv; 756 struct list_head offloads; 757 bool dev_state; 758 bool opt_failed; 759 void *jited_image; 760 u32 jited_len; 761 }; 762 763 enum bpf_cgroup_storage_type { 764 BPF_CGROUP_STORAGE_SHARED, 765 BPF_CGROUP_STORAGE_PERCPU, 766 __BPF_CGROUP_STORAGE_MAX 767 }; 768 769 #define MAX_BPF_CGROUP_STORAGE_TYPE __BPF_CGROUP_STORAGE_MAX 770 771 /* The longest tracepoint has 12 args. 772 * See include/trace/bpf_probe.h 773 */ 774 #define MAX_BPF_FUNC_ARGS 12 775 776 /* The maximum number of arguments passed through registers 777 * a single function may have. 778 */ 779 #define MAX_BPF_FUNC_REG_ARGS 5 780 781 /* The argument is a structure. */ 782 #define BTF_FMODEL_STRUCT_ARG BIT(0) 783 784 struct btf_func_model { 785 u8 ret_size; 786 u8 nr_args; 787 u8 arg_size[MAX_BPF_FUNC_ARGS]; 788 u8 arg_flags[MAX_BPF_FUNC_ARGS]; 789 }; 790 791 /* Restore arguments before returning from trampoline to let original function 792 * continue executing. This flag is used for fentry progs when there are no 793 * fexit progs. 794 */ 795 #define BPF_TRAMP_F_RESTORE_REGS BIT(0) 796 /* Call original function after fentry progs, but before fexit progs. 797 * Makes sense for fentry/fexit, normal calls and indirect calls. 798 */ 799 #define BPF_TRAMP_F_CALL_ORIG BIT(1) 800 /* Skip current frame and return to parent. Makes sense for fentry/fexit 801 * programs only. Should not be used with normal calls and indirect calls. 802 */ 803 #define BPF_TRAMP_F_SKIP_FRAME BIT(2) 804 /* Store IP address of the caller on the trampoline stack, 805 * so it's available for trampoline's programs. 806 */ 807 #define BPF_TRAMP_F_IP_ARG BIT(3) 808 /* Return the return value of fentry prog. Only used by bpf_struct_ops. */ 809 #define BPF_TRAMP_F_RET_FENTRY_RET BIT(4) 810 811 /* Get original function from stack instead of from provided direct address. 812 * Makes sense for trampolines with fexit or fmod_ret programs. 813 */ 814 #define BPF_TRAMP_F_ORIG_STACK BIT(5) 815 816 /* This trampoline is on a function with another ftrace_ops with IPMODIFY, 817 * e.g., a live patch. This flag is set and cleared by ftrace call backs, 818 */ 819 #define BPF_TRAMP_F_SHARE_IPMODIFY BIT(6) 820 821 /* Each call __bpf_prog_enter + call bpf_func + call __bpf_prog_exit is ~50 822 * bytes on x86. 823 */ 824 #define BPF_MAX_TRAMP_LINKS 38 825 826 struct bpf_tramp_links { 827 struct bpf_tramp_link *links[BPF_MAX_TRAMP_LINKS]; 828 int nr_links; 829 }; 830 831 struct bpf_tramp_run_ctx; 832 833 /* Different use cases for BPF trampoline: 834 * 1. replace nop at the function entry (kprobe equivalent) 835 * flags = BPF_TRAMP_F_RESTORE_REGS 836 * fentry = a set of programs to run before returning from trampoline 837 * 838 * 2. replace nop at the function entry (kprobe + kretprobe equivalent) 839 * flags = BPF_TRAMP_F_CALL_ORIG | BPF_TRAMP_F_SKIP_FRAME 840 * orig_call = fentry_ip + MCOUNT_INSN_SIZE 841 * fentry = a set of program to run before calling original function 842 * fexit = a set of program to run after original function 843 * 844 * 3. replace direct call instruction anywhere in the function body 845 * or assign a function pointer for indirect call (like tcp_congestion_ops->cong_avoid) 846 * With flags = 0 847 * fentry = a set of programs to run before returning from trampoline 848 * With flags = BPF_TRAMP_F_CALL_ORIG 849 * orig_call = original callback addr or direct function addr 850 * fentry = a set of program to run before calling original function 851 * fexit = a set of program to run after original function 852 */ 853 struct bpf_tramp_image; 854 int arch_prepare_bpf_trampoline(struct bpf_tramp_image *tr, void *image, void *image_end, 855 const struct btf_func_model *m, u32 flags, 856 struct bpf_tramp_links *tlinks, 857 void *orig_call); 858 /* these two functions are called from generated trampoline */ 859 u64 notrace __bpf_prog_enter(struct bpf_prog *prog, struct bpf_tramp_run_ctx *run_ctx); 860 void notrace __bpf_prog_exit(struct bpf_prog *prog, u64 start, struct bpf_tramp_run_ctx *run_ctx); 861 u64 notrace __bpf_prog_enter_sleepable(struct bpf_prog *prog, struct bpf_tramp_run_ctx *run_ctx); 862 void notrace __bpf_prog_exit_sleepable(struct bpf_prog *prog, u64 start, 863 struct bpf_tramp_run_ctx *run_ctx); 864 u64 notrace __bpf_prog_enter_lsm_cgroup(struct bpf_prog *prog, 865 struct bpf_tramp_run_ctx *run_ctx); 866 void notrace __bpf_prog_exit_lsm_cgroup(struct bpf_prog *prog, u64 start, 867 struct bpf_tramp_run_ctx *run_ctx); 868 u64 notrace __bpf_prog_enter_struct_ops(struct bpf_prog *prog, 869 struct bpf_tramp_run_ctx *run_ctx); 870 void notrace __bpf_prog_exit_struct_ops(struct bpf_prog *prog, u64 start, 871 struct bpf_tramp_run_ctx *run_ctx); 872 void notrace __bpf_tramp_enter(struct bpf_tramp_image *tr); 873 void notrace __bpf_tramp_exit(struct bpf_tramp_image *tr); 874 875 struct bpf_ksym { 876 unsigned long start; 877 unsigned long end; 878 char name[KSYM_NAME_LEN]; 879 struct list_head lnode; 880 struct latch_tree_node tnode; 881 bool prog; 882 }; 883 884 enum bpf_tramp_prog_type { 885 BPF_TRAMP_FENTRY, 886 BPF_TRAMP_FEXIT, 887 BPF_TRAMP_MODIFY_RETURN, 888 BPF_TRAMP_MAX, 889 BPF_TRAMP_REPLACE, /* more than MAX */ 890 }; 891 892 struct bpf_tramp_image { 893 void *image; 894 struct bpf_ksym ksym; 895 struct percpu_ref pcref; 896 void *ip_after_call; 897 void *ip_epilogue; 898 union { 899 struct rcu_head rcu; 900 struct work_struct work; 901 }; 902 }; 903 904 struct bpf_trampoline { 905 /* hlist for trampoline_table */ 906 struct hlist_node hlist; 907 struct ftrace_ops *fops; 908 /* serializes access to fields of this trampoline */ 909 struct mutex mutex; 910 refcount_t refcnt; 911 u32 flags; 912 u64 key; 913 struct { 914 struct btf_func_model model; 915 void *addr; 916 bool ftrace_managed; 917 } func; 918 /* if !NULL this is BPF_PROG_TYPE_EXT program that extends another BPF 919 * program by replacing one of its functions. func.addr is the address 920 * of the function it replaced. 921 */ 922 struct bpf_prog *extension_prog; 923 /* list of BPF programs using this trampoline */ 924 struct hlist_head progs_hlist[BPF_TRAMP_MAX]; 925 /* Number of attached programs. A counter per kind. */ 926 int progs_cnt[BPF_TRAMP_MAX]; 927 /* Executable image of trampoline */ 928 struct bpf_tramp_image *cur_image; 929 u64 selector; 930 struct module *mod; 931 }; 932 933 struct bpf_attach_target_info { 934 struct btf_func_model fmodel; 935 long tgt_addr; 936 const char *tgt_name; 937 const struct btf_type *tgt_type; 938 }; 939 940 #define BPF_DISPATCHER_MAX 48 /* Fits in 2048B */ 941 942 struct bpf_dispatcher_prog { 943 struct bpf_prog *prog; 944 refcount_t users; 945 }; 946 947 struct bpf_dispatcher { 948 /* dispatcher mutex */ 949 struct mutex mutex; 950 void *func; 951 struct bpf_dispatcher_prog progs[BPF_DISPATCHER_MAX]; 952 int num_progs; 953 void *image; 954 void *rw_image; 955 u32 image_off; 956 struct bpf_ksym ksym; 957 }; 958 959 static __always_inline __nocfi unsigned int bpf_dispatcher_nop_func( 960 const void *ctx, 961 const struct bpf_insn *insnsi, 962 bpf_func_t bpf_func) 963 { 964 return bpf_func(ctx, insnsi); 965 } 966 967 #ifdef CONFIG_BPF_JIT 968 int bpf_trampoline_link_prog(struct bpf_tramp_link *link, struct bpf_trampoline *tr); 969 int bpf_trampoline_unlink_prog(struct bpf_tramp_link *link, struct bpf_trampoline *tr); 970 struct bpf_trampoline *bpf_trampoline_get(u64 key, 971 struct bpf_attach_target_info *tgt_info); 972 void bpf_trampoline_put(struct bpf_trampoline *tr); 973 int arch_prepare_bpf_dispatcher(void *image, void *buf, s64 *funcs, int num_funcs); 974 int __init bpf_arch_init_dispatcher_early(void *ip); 975 976 #define BPF_DISPATCHER_INIT(_name) { \ 977 .mutex = __MUTEX_INITIALIZER(_name.mutex), \ 978 .func = &_name##_func, \ 979 .progs = {}, \ 980 .num_progs = 0, \ 981 .image = NULL, \ 982 .image_off = 0, \ 983 .ksym = { \ 984 .name = #_name, \ 985 .lnode = LIST_HEAD_INIT(_name.ksym.lnode), \ 986 }, \ 987 } 988 989 #define BPF_DISPATCHER_INIT_CALL(_name) \ 990 static int __init _name##_init(void) \ 991 { \ 992 return bpf_arch_init_dispatcher_early(_name##_func); \ 993 } \ 994 early_initcall(_name##_init) 995 996 #ifdef CONFIG_X86_64 997 #define BPF_DISPATCHER_ATTRIBUTES __attribute__((patchable_function_entry(5))) 998 #else 999 #define BPF_DISPATCHER_ATTRIBUTES 1000 #endif 1001 1002 #define DEFINE_BPF_DISPATCHER(name) \ 1003 notrace BPF_DISPATCHER_ATTRIBUTES \ 1004 noinline __nocfi unsigned int bpf_dispatcher_##name##_func( \ 1005 const void *ctx, \ 1006 const struct bpf_insn *insnsi, \ 1007 bpf_func_t bpf_func) \ 1008 { \ 1009 return bpf_func(ctx, insnsi); \ 1010 } \ 1011 EXPORT_SYMBOL(bpf_dispatcher_##name##_func); \ 1012 struct bpf_dispatcher bpf_dispatcher_##name = \ 1013 BPF_DISPATCHER_INIT(bpf_dispatcher_##name); \ 1014 BPF_DISPATCHER_INIT_CALL(bpf_dispatcher_##name); 1015 1016 #define DECLARE_BPF_DISPATCHER(name) \ 1017 unsigned int bpf_dispatcher_##name##_func( \ 1018 const void *ctx, \ 1019 const struct bpf_insn *insnsi, \ 1020 bpf_func_t bpf_func); \ 1021 extern struct bpf_dispatcher bpf_dispatcher_##name; 1022 #define BPF_DISPATCHER_FUNC(name) bpf_dispatcher_##name##_func 1023 #define BPF_DISPATCHER_PTR(name) (&bpf_dispatcher_##name) 1024 void bpf_dispatcher_change_prog(struct bpf_dispatcher *d, struct bpf_prog *from, 1025 struct bpf_prog *to); 1026 /* Called only from JIT-enabled code, so there's no need for stubs. */ 1027 void bpf_image_ksym_add(void *data, struct bpf_ksym *ksym); 1028 void bpf_image_ksym_del(struct bpf_ksym *ksym); 1029 void bpf_ksym_add(struct bpf_ksym *ksym); 1030 void bpf_ksym_del(struct bpf_ksym *ksym); 1031 int bpf_jit_charge_modmem(u32 size); 1032 void bpf_jit_uncharge_modmem(u32 size); 1033 bool bpf_prog_has_trampoline(const struct bpf_prog *prog); 1034 #else 1035 static inline int bpf_trampoline_link_prog(struct bpf_tramp_link *link, 1036 struct bpf_trampoline *tr) 1037 { 1038 return -ENOTSUPP; 1039 } 1040 static inline int bpf_trampoline_unlink_prog(struct bpf_tramp_link *link, 1041 struct bpf_trampoline *tr) 1042 { 1043 return -ENOTSUPP; 1044 } 1045 static inline struct bpf_trampoline *bpf_trampoline_get(u64 key, 1046 struct bpf_attach_target_info *tgt_info) 1047 { 1048 return ERR_PTR(-EOPNOTSUPP); 1049 } 1050 static inline void bpf_trampoline_put(struct bpf_trampoline *tr) {} 1051 #define DEFINE_BPF_DISPATCHER(name) 1052 #define DECLARE_BPF_DISPATCHER(name) 1053 #define BPF_DISPATCHER_FUNC(name) bpf_dispatcher_nop_func 1054 #define BPF_DISPATCHER_PTR(name) NULL 1055 static inline void bpf_dispatcher_change_prog(struct bpf_dispatcher *d, 1056 struct bpf_prog *from, 1057 struct bpf_prog *to) {} 1058 static inline bool is_bpf_image_address(unsigned long address) 1059 { 1060 return false; 1061 } 1062 static inline bool bpf_prog_has_trampoline(const struct bpf_prog *prog) 1063 { 1064 return false; 1065 } 1066 #endif 1067 1068 struct bpf_func_info_aux { 1069 u16 linkage; 1070 bool unreliable; 1071 }; 1072 1073 enum bpf_jit_poke_reason { 1074 BPF_POKE_REASON_TAIL_CALL, 1075 }; 1076 1077 /* Descriptor of pokes pointing /into/ the JITed image. */ 1078 struct bpf_jit_poke_descriptor { 1079 void *tailcall_target; 1080 void *tailcall_bypass; 1081 void *bypass_addr; 1082 void *aux; 1083 union { 1084 struct { 1085 struct bpf_map *map; 1086 u32 key; 1087 } tail_call; 1088 }; 1089 bool tailcall_target_stable; 1090 u8 adj_off; 1091 u16 reason; 1092 u32 insn_idx; 1093 }; 1094 1095 /* reg_type info for ctx arguments */ 1096 struct bpf_ctx_arg_aux { 1097 u32 offset; 1098 enum bpf_reg_type reg_type; 1099 u32 btf_id; 1100 }; 1101 1102 struct btf_mod_pair { 1103 struct btf *btf; 1104 struct module *module; 1105 }; 1106 1107 struct bpf_kfunc_desc_tab; 1108 1109 struct bpf_prog_aux { 1110 atomic64_t refcnt; 1111 u32 used_map_cnt; 1112 u32 used_btf_cnt; 1113 u32 max_ctx_offset; 1114 u32 max_pkt_offset; 1115 u32 max_tp_access; 1116 u32 stack_depth; 1117 u32 id; 1118 u32 func_cnt; /* used by non-func prog as the number of func progs */ 1119 u32 func_idx; /* 0 for non-func prog, the index in func array for func prog */ 1120 u32 attach_btf_id; /* in-kernel BTF type id to attach to */ 1121 u32 ctx_arg_info_size; 1122 u32 max_rdonly_access; 1123 u32 max_rdwr_access; 1124 struct btf *attach_btf; 1125 const struct bpf_ctx_arg_aux *ctx_arg_info; 1126 struct mutex dst_mutex; /* protects dst_* pointers below, *after* prog becomes visible */ 1127 struct bpf_prog *dst_prog; 1128 struct bpf_trampoline *dst_trampoline; 1129 enum bpf_prog_type saved_dst_prog_type; 1130 enum bpf_attach_type saved_dst_attach_type; 1131 bool verifier_zext; /* Zero extensions has been inserted by verifier. */ 1132 bool offload_requested; 1133 bool attach_btf_trace; /* true if attaching to BTF-enabled raw tp */ 1134 bool func_proto_unreliable; 1135 bool sleepable; 1136 bool tail_call_reachable; 1137 bool xdp_has_frags; 1138 /* BTF_KIND_FUNC_PROTO for valid attach_btf_id */ 1139 const struct btf_type *attach_func_proto; 1140 /* function name for valid attach_btf_id */ 1141 const char *attach_func_name; 1142 struct bpf_prog **func; 1143 void *jit_data; /* JIT specific data. arch dependent */ 1144 struct bpf_jit_poke_descriptor *poke_tab; 1145 struct bpf_kfunc_desc_tab *kfunc_tab; 1146 struct bpf_kfunc_btf_tab *kfunc_btf_tab; 1147 u32 size_poke_tab; 1148 struct bpf_ksym ksym; 1149 const struct bpf_prog_ops *ops; 1150 struct bpf_map **used_maps; 1151 struct mutex used_maps_mutex; /* mutex for used_maps and used_map_cnt */ 1152 struct btf_mod_pair *used_btfs; 1153 struct bpf_prog *prog; 1154 struct user_struct *user; 1155 u64 load_time; /* ns since boottime */ 1156 u32 verified_insns; 1157 int cgroup_atype; /* enum cgroup_bpf_attach_type */ 1158 struct bpf_map *cgroup_storage[MAX_BPF_CGROUP_STORAGE_TYPE]; 1159 char name[BPF_OBJ_NAME_LEN]; 1160 #ifdef CONFIG_SECURITY 1161 void *security; 1162 #endif 1163 struct bpf_prog_offload *offload; 1164 struct btf *btf; 1165 struct bpf_func_info *func_info; 1166 struct bpf_func_info_aux *func_info_aux; 1167 /* bpf_line_info loaded from userspace. linfo->insn_off 1168 * has the xlated insn offset. 1169 * Both the main and sub prog share the same linfo. 1170 * The subprog can access its first linfo by 1171 * using the linfo_idx. 1172 */ 1173 struct bpf_line_info *linfo; 1174 /* jited_linfo is the jited addr of the linfo. It has a 1175 * one to one mapping to linfo: 1176 * jited_linfo[i] is the jited addr for the linfo[i]->insn_off. 1177 * Both the main and sub prog share the same jited_linfo. 1178 * The subprog can access its first jited_linfo by 1179 * using the linfo_idx. 1180 */ 1181 void **jited_linfo; 1182 u32 func_info_cnt; 1183 u32 nr_linfo; 1184 /* subprog can use linfo_idx to access its first linfo and 1185 * jited_linfo. 1186 * main prog always has linfo_idx == 0 1187 */ 1188 u32 linfo_idx; 1189 u32 num_exentries; 1190 struct exception_table_entry *extable; 1191 union { 1192 struct work_struct work; 1193 struct rcu_head rcu; 1194 }; 1195 }; 1196 1197 struct bpf_prog { 1198 u16 pages; /* Number of allocated pages */ 1199 u16 jited:1, /* Is our filter JIT'ed? */ 1200 jit_requested:1,/* archs need to JIT the prog */ 1201 gpl_compatible:1, /* Is filter GPL compatible? */ 1202 cb_access:1, /* Is control block accessed? */ 1203 dst_needed:1, /* Do we need dst entry? */ 1204 blinding_requested:1, /* needs constant blinding */ 1205 blinded:1, /* Was blinded */ 1206 is_func:1, /* program is a bpf function */ 1207 kprobe_override:1, /* Do we override a kprobe? */ 1208 has_callchain_buf:1, /* callchain buffer allocated? */ 1209 enforce_expected_attach_type:1, /* Enforce expected_attach_type checking at attach time */ 1210 call_get_stack:1, /* Do we call bpf_get_stack() or bpf_get_stackid() */ 1211 call_get_func_ip:1, /* Do we call get_func_ip() */ 1212 tstamp_type_access:1; /* Accessed __sk_buff->tstamp_type */ 1213 enum bpf_prog_type type; /* Type of BPF program */ 1214 enum bpf_attach_type expected_attach_type; /* For some prog types */ 1215 u32 len; /* Number of filter blocks */ 1216 u32 jited_len; /* Size of jited insns in bytes */ 1217 u8 tag[BPF_TAG_SIZE]; 1218 struct bpf_prog_stats __percpu *stats; 1219 int __percpu *active; 1220 unsigned int (*bpf_func)(const void *ctx, 1221 const struct bpf_insn *insn); 1222 struct bpf_prog_aux *aux; /* Auxiliary fields */ 1223 struct sock_fprog_kern *orig_prog; /* Original BPF program */ 1224 /* Instructions for interpreter */ 1225 union { 1226 DECLARE_FLEX_ARRAY(struct sock_filter, insns); 1227 DECLARE_FLEX_ARRAY(struct bpf_insn, insnsi); 1228 }; 1229 }; 1230 1231 struct bpf_array_aux { 1232 /* Programs with direct jumps into programs part of this array. */ 1233 struct list_head poke_progs; 1234 struct bpf_map *map; 1235 struct mutex poke_mutex; 1236 struct work_struct work; 1237 }; 1238 1239 struct bpf_link { 1240 atomic64_t refcnt; 1241 u32 id; 1242 enum bpf_link_type type; 1243 const struct bpf_link_ops *ops; 1244 struct bpf_prog *prog; 1245 struct work_struct work; 1246 }; 1247 1248 struct bpf_link_ops { 1249 void (*release)(struct bpf_link *link); 1250 void (*dealloc)(struct bpf_link *link); 1251 int (*detach)(struct bpf_link *link); 1252 int (*update_prog)(struct bpf_link *link, struct bpf_prog *new_prog, 1253 struct bpf_prog *old_prog); 1254 void (*show_fdinfo)(const struct bpf_link *link, struct seq_file *seq); 1255 int (*fill_link_info)(const struct bpf_link *link, 1256 struct bpf_link_info *info); 1257 }; 1258 1259 struct bpf_tramp_link { 1260 struct bpf_link link; 1261 struct hlist_node tramp_hlist; 1262 u64 cookie; 1263 }; 1264 1265 struct bpf_shim_tramp_link { 1266 struct bpf_tramp_link link; 1267 struct bpf_trampoline *trampoline; 1268 }; 1269 1270 struct bpf_tracing_link { 1271 struct bpf_tramp_link link; 1272 enum bpf_attach_type attach_type; 1273 struct bpf_trampoline *trampoline; 1274 struct bpf_prog *tgt_prog; 1275 }; 1276 1277 struct bpf_link_primer { 1278 struct bpf_link *link; 1279 struct file *file; 1280 int fd; 1281 u32 id; 1282 }; 1283 1284 struct bpf_struct_ops_value; 1285 struct btf_member; 1286 1287 #define BPF_STRUCT_OPS_MAX_NR_MEMBERS 64 1288 struct bpf_struct_ops { 1289 const struct bpf_verifier_ops *verifier_ops; 1290 int (*init)(struct btf *btf); 1291 int (*check_member)(const struct btf_type *t, 1292 const struct btf_member *member); 1293 int (*init_member)(const struct btf_type *t, 1294 const struct btf_member *member, 1295 void *kdata, const void *udata); 1296 int (*reg)(void *kdata); 1297 void (*unreg)(void *kdata); 1298 const struct btf_type *type; 1299 const struct btf_type *value_type; 1300 const char *name; 1301 struct btf_func_model func_models[BPF_STRUCT_OPS_MAX_NR_MEMBERS]; 1302 u32 type_id; 1303 u32 value_id; 1304 }; 1305 1306 #if defined(CONFIG_BPF_JIT) && defined(CONFIG_BPF_SYSCALL) 1307 #define BPF_MODULE_OWNER ((void *)((0xeB9FUL << 2) + POISON_POINTER_DELTA)) 1308 const struct bpf_struct_ops *bpf_struct_ops_find(u32 type_id); 1309 void bpf_struct_ops_init(struct btf *btf, struct bpf_verifier_log *log); 1310 bool bpf_struct_ops_get(const void *kdata); 1311 void bpf_struct_ops_put(const void *kdata); 1312 int bpf_struct_ops_map_sys_lookup_elem(struct bpf_map *map, void *key, 1313 void *value); 1314 int bpf_struct_ops_prepare_trampoline(struct bpf_tramp_links *tlinks, 1315 struct bpf_tramp_link *link, 1316 const struct btf_func_model *model, 1317 void *image, void *image_end); 1318 static inline bool bpf_try_module_get(const void *data, struct module *owner) 1319 { 1320 if (owner == BPF_MODULE_OWNER) 1321 return bpf_struct_ops_get(data); 1322 else 1323 return try_module_get(owner); 1324 } 1325 static inline void bpf_module_put(const void *data, struct module *owner) 1326 { 1327 if (owner == BPF_MODULE_OWNER) 1328 bpf_struct_ops_put(data); 1329 else 1330 module_put(owner); 1331 } 1332 1333 #ifdef CONFIG_NET 1334 /* Define it here to avoid the use of forward declaration */ 1335 struct bpf_dummy_ops_state { 1336 int val; 1337 }; 1338 1339 struct bpf_dummy_ops { 1340 int (*test_1)(struct bpf_dummy_ops_state *cb); 1341 int (*test_2)(struct bpf_dummy_ops_state *cb, int a1, unsigned short a2, 1342 char a3, unsigned long a4); 1343 }; 1344 1345 int bpf_struct_ops_test_run(struct bpf_prog *prog, const union bpf_attr *kattr, 1346 union bpf_attr __user *uattr); 1347 #endif 1348 #else 1349 static inline const struct bpf_struct_ops *bpf_struct_ops_find(u32 type_id) 1350 { 1351 return NULL; 1352 } 1353 static inline void bpf_struct_ops_init(struct btf *btf, 1354 struct bpf_verifier_log *log) 1355 { 1356 } 1357 static inline bool bpf_try_module_get(const void *data, struct module *owner) 1358 { 1359 return try_module_get(owner); 1360 } 1361 static inline void bpf_module_put(const void *data, struct module *owner) 1362 { 1363 module_put(owner); 1364 } 1365 static inline int bpf_struct_ops_map_sys_lookup_elem(struct bpf_map *map, 1366 void *key, 1367 void *value) 1368 { 1369 return -EINVAL; 1370 } 1371 #endif 1372 1373 #if defined(CONFIG_CGROUP_BPF) && defined(CONFIG_BPF_LSM) 1374 int bpf_trampoline_link_cgroup_shim(struct bpf_prog *prog, 1375 int cgroup_atype); 1376 void bpf_trampoline_unlink_cgroup_shim(struct bpf_prog *prog); 1377 #else 1378 static inline int bpf_trampoline_link_cgroup_shim(struct bpf_prog *prog, 1379 int cgroup_atype) 1380 { 1381 return -EOPNOTSUPP; 1382 } 1383 static inline void bpf_trampoline_unlink_cgroup_shim(struct bpf_prog *prog) 1384 { 1385 } 1386 #endif 1387 1388 struct bpf_array { 1389 struct bpf_map map; 1390 u32 elem_size; 1391 u32 index_mask; 1392 struct bpf_array_aux *aux; 1393 union { 1394 char value[0] __aligned(8); 1395 void *ptrs[0] __aligned(8); 1396 void __percpu *pptrs[0] __aligned(8); 1397 }; 1398 }; 1399 1400 #define BPF_COMPLEXITY_LIMIT_INSNS 1000000 /* yes. 1M insns */ 1401 #define MAX_TAIL_CALL_CNT 33 1402 1403 /* Maximum number of loops for bpf_loop */ 1404 #define BPF_MAX_LOOPS BIT(23) 1405 1406 #define BPF_F_ACCESS_MASK (BPF_F_RDONLY | \ 1407 BPF_F_RDONLY_PROG | \ 1408 BPF_F_WRONLY | \ 1409 BPF_F_WRONLY_PROG) 1410 1411 #define BPF_MAP_CAN_READ BIT(0) 1412 #define BPF_MAP_CAN_WRITE BIT(1) 1413 1414 /* Maximum number of user-producer ring buffer samples that can be drained in 1415 * a call to bpf_user_ringbuf_drain(). 1416 */ 1417 #define BPF_MAX_USER_RINGBUF_SAMPLES (128 * 1024) 1418 1419 static inline u32 bpf_map_flags_to_cap(struct bpf_map *map) 1420 { 1421 u32 access_flags = map->map_flags & (BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG); 1422 1423 /* Combination of BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG is 1424 * not possible. 1425 */ 1426 if (access_flags & BPF_F_RDONLY_PROG) 1427 return BPF_MAP_CAN_READ; 1428 else if (access_flags & BPF_F_WRONLY_PROG) 1429 return BPF_MAP_CAN_WRITE; 1430 else 1431 return BPF_MAP_CAN_READ | BPF_MAP_CAN_WRITE; 1432 } 1433 1434 static inline bool bpf_map_flags_access_ok(u32 access_flags) 1435 { 1436 return (access_flags & (BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG)) != 1437 (BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG); 1438 } 1439 1440 struct bpf_event_entry { 1441 struct perf_event *event; 1442 struct file *perf_file; 1443 struct file *map_file; 1444 struct rcu_head rcu; 1445 }; 1446 1447 static inline bool map_type_contains_progs(struct bpf_map *map) 1448 { 1449 return map->map_type == BPF_MAP_TYPE_PROG_ARRAY || 1450 map->map_type == BPF_MAP_TYPE_DEVMAP || 1451 map->map_type == BPF_MAP_TYPE_CPUMAP; 1452 } 1453 1454 bool bpf_prog_map_compatible(struct bpf_map *map, const struct bpf_prog *fp); 1455 int bpf_prog_calc_tag(struct bpf_prog *fp); 1456 1457 const struct bpf_func_proto *bpf_get_trace_printk_proto(void); 1458 const struct bpf_func_proto *bpf_get_trace_vprintk_proto(void); 1459 1460 typedef unsigned long (*bpf_ctx_copy_t)(void *dst, const void *src, 1461 unsigned long off, unsigned long len); 1462 typedef u32 (*bpf_convert_ctx_access_t)(enum bpf_access_type type, 1463 const struct bpf_insn *src, 1464 struct bpf_insn *dst, 1465 struct bpf_prog *prog, 1466 u32 *target_size); 1467 1468 u64 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size, 1469 void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy); 1470 1471 /* an array of programs to be executed under rcu_lock. 1472 * 1473 * Typical usage: 1474 * ret = bpf_prog_run_array(rcu_dereference(&bpf_prog_array), ctx, bpf_prog_run); 1475 * 1476 * the structure returned by bpf_prog_array_alloc() should be populated 1477 * with program pointers and the last pointer must be NULL. 1478 * The user has to keep refcnt on the program and make sure the program 1479 * is removed from the array before bpf_prog_put(). 1480 * The 'struct bpf_prog_array *' should only be replaced with xchg() 1481 * since other cpus are walking the array of pointers in parallel. 1482 */ 1483 struct bpf_prog_array_item { 1484 struct bpf_prog *prog; 1485 union { 1486 struct bpf_cgroup_storage *cgroup_storage[MAX_BPF_CGROUP_STORAGE_TYPE]; 1487 u64 bpf_cookie; 1488 }; 1489 }; 1490 1491 struct bpf_prog_array { 1492 struct rcu_head rcu; 1493 struct bpf_prog_array_item items[]; 1494 }; 1495 1496 struct bpf_empty_prog_array { 1497 struct bpf_prog_array hdr; 1498 struct bpf_prog *null_prog; 1499 }; 1500 1501 /* to avoid allocating empty bpf_prog_array for cgroups that 1502 * don't have bpf program attached use one global 'bpf_empty_prog_array' 1503 * It will not be modified the caller of bpf_prog_array_alloc() 1504 * (since caller requested prog_cnt == 0) 1505 * that pointer should be 'freed' by bpf_prog_array_free() 1506 */ 1507 extern struct bpf_empty_prog_array bpf_empty_prog_array; 1508 1509 struct bpf_prog_array *bpf_prog_array_alloc(u32 prog_cnt, gfp_t flags); 1510 void bpf_prog_array_free(struct bpf_prog_array *progs); 1511 /* Use when traversal over the bpf_prog_array uses tasks_trace rcu */ 1512 void bpf_prog_array_free_sleepable(struct bpf_prog_array *progs); 1513 int bpf_prog_array_length(struct bpf_prog_array *progs); 1514 bool bpf_prog_array_is_empty(struct bpf_prog_array *array); 1515 int bpf_prog_array_copy_to_user(struct bpf_prog_array *progs, 1516 __u32 __user *prog_ids, u32 cnt); 1517 1518 void bpf_prog_array_delete_safe(struct bpf_prog_array *progs, 1519 struct bpf_prog *old_prog); 1520 int bpf_prog_array_delete_safe_at(struct bpf_prog_array *array, int index); 1521 int bpf_prog_array_update_at(struct bpf_prog_array *array, int index, 1522 struct bpf_prog *prog); 1523 int bpf_prog_array_copy_info(struct bpf_prog_array *array, 1524 u32 *prog_ids, u32 request_cnt, 1525 u32 *prog_cnt); 1526 int bpf_prog_array_copy(struct bpf_prog_array *old_array, 1527 struct bpf_prog *exclude_prog, 1528 struct bpf_prog *include_prog, 1529 u64 bpf_cookie, 1530 struct bpf_prog_array **new_array); 1531 1532 struct bpf_run_ctx {}; 1533 1534 struct bpf_cg_run_ctx { 1535 struct bpf_run_ctx run_ctx; 1536 const struct bpf_prog_array_item *prog_item; 1537 int retval; 1538 }; 1539 1540 struct bpf_trace_run_ctx { 1541 struct bpf_run_ctx run_ctx; 1542 u64 bpf_cookie; 1543 }; 1544 1545 struct bpf_tramp_run_ctx { 1546 struct bpf_run_ctx run_ctx; 1547 u64 bpf_cookie; 1548 struct bpf_run_ctx *saved_run_ctx; 1549 }; 1550 1551 static inline struct bpf_run_ctx *bpf_set_run_ctx(struct bpf_run_ctx *new_ctx) 1552 { 1553 struct bpf_run_ctx *old_ctx = NULL; 1554 1555 #ifdef CONFIG_BPF_SYSCALL 1556 old_ctx = current->bpf_ctx; 1557 current->bpf_ctx = new_ctx; 1558 #endif 1559 return old_ctx; 1560 } 1561 1562 static inline void bpf_reset_run_ctx(struct bpf_run_ctx *old_ctx) 1563 { 1564 #ifdef CONFIG_BPF_SYSCALL 1565 current->bpf_ctx = old_ctx; 1566 #endif 1567 } 1568 1569 /* BPF program asks to bypass CAP_NET_BIND_SERVICE in bind. */ 1570 #define BPF_RET_BIND_NO_CAP_NET_BIND_SERVICE (1 << 0) 1571 /* BPF program asks to set CN on the packet. */ 1572 #define BPF_RET_SET_CN (1 << 0) 1573 1574 typedef u32 (*bpf_prog_run_fn)(const struct bpf_prog *prog, const void *ctx); 1575 1576 static __always_inline u32 1577 bpf_prog_run_array(const struct bpf_prog_array *array, 1578 const void *ctx, bpf_prog_run_fn run_prog) 1579 { 1580 const struct bpf_prog_array_item *item; 1581 const struct bpf_prog *prog; 1582 struct bpf_run_ctx *old_run_ctx; 1583 struct bpf_trace_run_ctx run_ctx; 1584 u32 ret = 1; 1585 1586 RCU_LOCKDEP_WARN(!rcu_read_lock_held(), "no rcu lock held"); 1587 1588 if (unlikely(!array)) 1589 return ret; 1590 1591 migrate_disable(); 1592 old_run_ctx = bpf_set_run_ctx(&run_ctx.run_ctx); 1593 item = &array->items[0]; 1594 while ((prog = READ_ONCE(item->prog))) { 1595 run_ctx.bpf_cookie = item->bpf_cookie; 1596 ret &= run_prog(prog, ctx); 1597 item++; 1598 } 1599 bpf_reset_run_ctx(old_run_ctx); 1600 migrate_enable(); 1601 return ret; 1602 } 1603 1604 /* Notes on RCU design for bpf_prog_arrays containing sleepable programs: 1605 * 1606 * We use the tasks_trace rcu flavor read section to protect the bpf_prog_array 1607 * overall. As a result, we must use the bpf_prog_array_free_sleepable 1608 * in order to use the tasks_trace rcu grace period. 1609 * 1610 * When a non-sleepable program is inside the array, we take the rcu read 1611 * section and disable preemption for that program alone, so it can access 1612 * rcu-protected dynamically sized maps. 1613 */ 1614 static __always_inline u32 1615 bpf_prog_run_array_sleepable(const struct bpf_prog_array __rcu *array_rcu, 1616 const void *ctx, bpf_prog_run_fn run_prog) 1617 { 1618 const struct bpf_prog_array_item *item; 1619 const struct bpf_prog *prog; 1620 const struct bpf_prog_array *array; 1621 struct bpf_run_ctx *old_run_ctx; 1622 struct bpf_trace_run_ctx run_ctx; 1623 u32 ret = 1; 1624 1625 might_fault(); 1626 1627 rcu_read_lock_trace(); 1628 migrate_disable(); 1629 1630 array = rcu_dereference_check(array_rcu, rcu_read_lock_trace_held()); 1631 if (unlikely(!array)) 1632 goto out; 1633 old_run_ctx = bpf_set_run_ctx(&run_ctx.run_ctx); 1634 item = &array->items[0]; 1635 while ((prog = READ_ONCE(item->prog))) { 1636 if (!prog->aux->sleepable) 1637 rcu_read_lock(); 1638 1639 run_ctx.bpf_cookie = item->bpf_cookie; 1640 ret &= run_prog(prog, ctx); 1641 item++; 1642 1643 if (!prog->aux->sleepable) 1644 rcu_read_unlock(); 1645 } 1646 bpf_reset_run_ctx(old_run_ctx); 1647 out: 1648 migrate_enable(); 1649 rcu_read_unlock_trace(); 1650 return ret; 1651 } 1652 1653 #ifdef CONFIG_BPF_SYSCALL 1654 DECLARE_PER_CPU(int, bpf_prog_active); 1655 extern struct mutex bpf_stats_enabled_mutex; 1656 1657 /* 1658 * Block execution of BPF programs attached to instrumentation (perf, 1659 * kprobes, tracepoints) to prevent deadlocks on map operations as any of 1660 * these events can happen inside a region which holds a map bucket lock 1661 * and can deadlock on it. 1662 */ 1663 static inline void bpf_disable_instrumentation(void) 1664 { 1665 migrate_disable(); 1666 this_cpu_inc(bpf_prog_active); 1667 } 1668 1669 static inline void bpf_enable_instrumentation(void) 1670 { 1671 this_cpu_dec(bpf_prog_active); 1672 migrate_enable(); 1673 } 1674 1675 extern const struct file_operations bpf_map_fops; 1676 extern const struct file_operations bpf_prog_fops; 1677 extern const struct file_operations bpf_iter_fops; 1678 1679 #define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type) \ 1680 extern const struct bpf_prog_ops _name ## _prog_ops; \ 1681 extern const struct bpf_verifier_ops _name ## _verifier_ops; 1682 #define BPF_MAP_TYPE(_id, _ops) \ 1683 extern const struct bpf_map_ops _ops; 1684 #define BPF_LINK_TYPE(_id, _name) 1685 #include <linux/bpf_types.h> 1686 #undef BPF_PROG_TYPE 1687 #undef BPF_MAP_TYPE 1688 #undef BPF_LINK_TYPE 1689 1690 extern const struct bpf_prog_ops bpf_offload_prog_ops; 1691 extern const struct bpf_verifier_ops tc_cls_act_analyzer_ops; 1692 extern const struct bpf_verifier_ops xdp_analyzer_ops; 1693 1694 struct bpf_prog *bpf_prog_get(u32 ufd); 1695 struct bpf_prog *bpf_prog_get_type_dev(u32 ufd, enum bpf_prog_type type, 1696 bool attach_drv); 1697 void bpf_prog_add(struct bpf_prog *prog, int i); 1698 void bpf_prog_sub(struct bpf_prog *prog, int i); 1699 void bpf_prog_inc(struct bpf_prog *prog); 1700 struct bpf_prog * __must_check bpf_prog_inc_not_zero(struct bpf_prog *prog); 1701 void bpf_prog_put(struct bpf_prog *prog); 1702 1703 void bpf_prog_free_id(struct bpf_prog *prog, bool do_idr_lock); 1704 void bpf_map_free_id(struct bpf_map *map, bool do_idr_lock); 1705 1706 struct bpf_map_value_off_desc *bpf_map_kptr_off_contains(struct bpf_map *map, u32 offset); 1707 void bpf_map_free_kptr_off_tab(struct bpf_map *map); 1708 struct bpf_map_value_off *bpf_map_copy_kptr_off_tab(const struct bpf_map *map); 1709 bool bpf_map_equal_kptr_off_tab(const struct bpf_map *map_a, const struct bpf_map *map_b); 1710 void bpf_map_free_kptrs(struct bpf_map *map, void *map_value); 1711 1712 struct bpf_map *bpf_map_get(u32 ufd); 1713 struct bpf_map *bpf_map_get_with_uref(u32 ufd); 1714 struct bpf_map *__bpf_map_get(struct fd f); 1715 void bpf_map_inc(struct bpf_map *map); 1716 void bpf_map_inc_with_uref(struct bpf_map *map); 1717 struct bpf_map * __must_check bpf_map_inc_not_zero(struct bpf_map *map); 1718 void bpf_map_put_with_uref(struct bpf_map *map); 1719 void bpf_map_put(struct bpf_map *map); 1720 void *bpf_map_area_alloc(u64 size, int numa_node); 1721 void *bpf_map_area_mmapable_alloc(u64 size, int numa_node); 1722 void bpf_map_area_free(void *base); 1723 bool bpf_map_write_active(const struct bpf_map *map); 1724 void bpf_map_init_from_attr(struct bpf_map *map, union bpf_attr *attr); 1725 int generic_map_lookup_batch(struct bpf_map *map, 1726 const union bpf_attr *attr, 1727 union bpf_attr __user *uattr); 1728 int generic_map_update_batch(struct bpf_map *map, 1729 const union bpf_attr *attr, 1730 union bpf_attr __user *uattr); 1731 int generic_map_delete_batch(struct bpf_map *map, 1732 const union bpf_attr *attr, 1733 union bpf_attr __user *uattr); 1734 struct bpf_map *bpf_map_get_curr_or_next(u32 *id); 1735 struct bpf_prog *bpf_prog_get_curr_or_next(u32 *id); 1736 1737 #ifdef CONFIG_MEMCG_KMEM 1738 void *bpf_map_kmalloc_node(const struct bpf_map *map, size_t size, gfp_t flags, 1739 int node); 1740 void *bpf_map_kzalloc(const struct bpf_map *map, size_t size, gfp_t flags); 1741 void __percpu *bpf_map_alloc_percpu(const struct bpf_map *map, size_t size, 1742 size_t align, gfp_t flags); 1743 #else 1744 static inline void * 1745 bpf_map_kmalloc_node(const struct bpf_map *map, size_t size, gfp_t flags, 1746 int node) 1747 { 1748 return kmalloc_node(size, flags, node); 1749 } 1750 1751 static inline void * 1752 bpf_map_kzalloc(const struct bpf_map *map, size_t size, gfp_t flags) 1753 { 1754 return kzalloc(size, flags); 1755 } 1756 1757 static inline void __percpu * 1758 bpf_map_alloc_percpu(const struct bpf_map *map, size_t size, size_t align, 1759 gfp_t flags) 1760 { 1761 return __alloc_percpu_gfp(size, align, flags); 1762 } 1763 #endif 1764 1765 extern int sysctl_unprivileged_bpf_disabled; 1766 1767 static inline bool bpf_allow_ptr_leaks(void) 1768 { 1769 return perfmon_capable(); 1770 } 1771 1772 static inline bool bpf_allow_uninit_stack(void) 1773 { 1774 return perfmon_capable(); 1775 } 1776 1777 static inline bool bpf_allow_ptr_to_map_access(void) 1778 { 1779 return perfmon_capable(); 1780 } 1781 1782 static inline bool bpf_bypass_spec_v1(void) 1783 { 1784 return perfmon_capable(); 1785 } 1786 1787 static inline bool bpf_bypass_spec_v4(void) 1788 { 1789 return perfmon_capable(); 1790 } 1791 1792 int bpf_map_new_fd(struct bpf_map *map, int flags); 1793 int bpf_prog_new_fd(struct bpf_prog *prog); 1794 1795 void bpf_link_init(struct bpf_link *link, enum bpf_link_type type, 1796 const struct bpf_link_ops *ops, struct bpf_prog *prog); 1797 int bpf_link_prime(struct bpf_link *link, struct bpf_link_primer *primer); 1798 int bpf_link_settle(struct bpf_link_primer *primer); 1799 void bpf_link_cleanup(struct bpf_link_primer *primer); 1800 void bpf_link_inc(struct bpf_link *link); 1801 void bpf_link_put(struct bpf_link *link); 1802 int bpf_link_new_fd(struct bpf_link *link); 1803 struct file *bpf_link_new_file(struct bpf_link *link, int *reserved_fd); 1804 struct bpf_link *bpf_link_get_from_fd(u32 ufd); 1805 struct bpf_link *bpf_link_get_curr_or_next(u32 *id); 1806 1807 int bpf_obj_pin_user(u32 ufd, const char __user *pathname); 1808 int bpf_obj_get_user(const char __user *pathname, int flags); 1809 1810 #define BPF_ITER_FUNC_PREFIX "bpf_iter_" 1811 #define DEFINE_BPF_ITER_FUNC(target, args...) \ 1812 extern int bpf_iter_ ## target(args); \ 1813 int __init bpf_iter_ ## target(args) { return 0; } 1814 1815 /* 1816 * The task type of iterators. 1817 * 1818 * For BPF task iterators, they can be parameterized with various 1819 * parameters to visit only some of tasks. 1820 * 1821 * BPF_TASK_ITER_ALL (default) 1822 * Iterate over resources of every task. 1823 * 1824 * BPF_TASK_ITER_TID 1825 * Iterate over resources of a task/tid. 1826 * 1827 * BPF_TASK_ITER_TGID 1828 * Iterate over resources of every task of a process / task group. 1829 */ 1830 enum bpf_iter_task_type { 1831 BPF_TASK_ITER_ALL = 0, 1832 BPF_TASK_ITER_TID, 1833 BPF_TASK_ITER_TGID, 1834 }; 1835 1836 struct bpf_iter_aux_info { 1837 /* for map_elem iter */ 1838 struct bpf_map *map; 1839 1840 /* for cgroup iter */ 1841 struct { 1842 struct cgroup *start; /* starting cgroup */ 1843 enum bpf_cgroup_iter_order order; 1844 } cgroup; 1845 struct { 1846 enum bpf_iter_task_type type; 1847 u32 pid; 1848 } task; 1849 }; 1850 1851 typedef int (*bpf_iter_attach_target_t)(struct bpf_prog *prog, 1852 union bpf_iter_link_info *linfo, 1853 struct bpf_iter_aux_info *aux); 1854 typedef void (*bpf_iter_detach_target_t)(struct bpf_iter_aux_info *aux); 1855 typedef void (*bpf_iter_show_fdinfo_t) (const struct bpf_iter_aux_info *aux, 1856 struct seq_file *seq); 1857 typedef int (*bpf_iter_fill_link_info_t)(const struct bpf_iter_aux_info *aux, 1858 struct bpf_link_info *info); 1859 typedef const struct bpf_func_proto * 1860 (*bpf_iter_get_func_proto_t)(enum bpf_func_id func_id, 1861 const struct bpf_prog *prog); 1862 1863 enum bpf_iter_feature { 1864 BPF_ITER_RESCHED = BIT(0), 1865 }; 1866 1867 #define BPF_ITER_CTX_ARG_MAX 2 1868 struct bpf_iter_reg { 1869 const char *target; 1870 bpf_iter_attach_target_t attach_target; 1871 bpf_iter_detach_target_t detach_target; 1872 bpf_iter_show_fdinfo_t show_fdinfo; 1873 bpf_iter_fill_link_info_t fill_link_info; 1874 bpf_iter_get_func_proto_t get_func_proto; 1875 u32 ctx_arg_info_size; 1876 u32 feature; 1877 struct bpf_ctx_arg_aux ctx_arg_info[BPF_ITER_CTX_ARG_MAX]; 1878 const struct bpf_iter_seq_info *seq_info; 1879 }; 1880 1881 struct bpf_iter_meta { 1882 __bpf_md_ptr(struct seq_file *, seq); 1883 u64 session_id; 1884 u64 seq_num; 1885 }; 1886 1887 struct bpf_iter__bpf_map_elem { 1888 __bpf_md_ptr(struct bpf_iter_meta *, meta); 1889 __bpf_md_ptr(struct bpf_map *, map); 1890 __bpf_md_ptr(void *, key); 1891 __bpf_md_ptr(void *, value); 1892 }; 1893 1894 int bpf_iter_reg_target(const struct bpf_iter_reg *reg_info); 1895 void bpf_iter_unreg_target(const struct bpf_iter_reg *reg_info); 1896 bool bpf_iter_prog_supported(struct bpf_prog *prog); 1897 const struct bpf_func_proto * 1898 bpf_iter_get_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog); 1899 int bpf_iter_link_attach(const union bpf_attr *attr, bpfptr_t uattr, struct bpf_prog *prog); 1900 int bpf_iter_new_fd(struct bpf_link *link); 1901 bool bpf_link_is_iter(struct bpf_link *link); 1902 struct bpf_prog *bpf_iter_get_info(struct bpf_iter_meta *meta, bool in_stop); 1903 int bpf_iter_run_prog(struct bpf_prog *prog, void *ctx); 1904 void bpf_iter_map_show_fdinfo(const struct bpf_iter_aux_info *aux, 1905 struct seq_file *seq); 1906 int bpf_iter_map_fill_link_info(const struct bpf_iter_aux_info *aux, 1907 struct bpf_link_info *info); 1908 1909 int map_set_for_each_callback_args(struct bpf_verifier_env *env, 1910 struct bpf_func_state *caller, 1911 struct bpf_func_state *callee); 1912 1913 int bpf_percpu_hash_copy(struct bpf_map *map, void *key, void *value); 1914 int bpf_percpu_array_copy(struct bpf_map *map, void *key, void *value); 1915 int bpf_percpu_hash_update(struct bpf_map *map, void *key, void *value, 1916 u64 flags); 1917 int bpf_percpu_array_update(struct bpf_map *map, void *key, void *value, 1918 u64 flags); 1919 1920 int bpf_stackmap_copy(struct bpf_map *map, void *key, void *value); 1921 1922 int bpf_fd_array_map_update_elem(struct bpf_map *map, struct file *map_file, 1923 void *key, void *value, u64 map_flags); 1924 int bpf_fd_array_map_lookup_elem(struct bpf_map *map, void *key, u32 *value); 1925 int bpf_fd_htab_map_update_elem(struct bpf_map *map, struct file *map_file, 1926 void *key, void *value, u64 map_flags); 1927 int bpf_fd_htab_map_lookup_elem(struct bpf_map *map, void *key, u32 *value); 1928 1929 int bpf_get_file_flag(int flags); 1930 int bpf_check_uarg_tail_zero(bpfptr_t uaddr, size_t expected_size, 1931 size_t actual_size); 1932 1933 /* verify correctness of eBPF program */ 1934 int bpf_check(struct bpf_prog **fp, union bpf_attr *attr, bpfptr_t uattr); 1935 1936 #ifndef CONFIG_BPF_JIT_ALWAYS_ON 1937 void bpf_patch_call_args(struct bpf_insn *insn, u32 stack_depth); 1938 #endif 1939 1940 struct btf *bpf_get_btf_vmlinux(void); 1941 1942 /* Map specifics */ 1943 struct xdp_frame; 1944 struct sk_buff; 1945 struct bpf_dtab_netdev; 1946 struct bpf_cpu_map_entry; 1947 1948 void __dev_flush(void); 1949 int dev_xdp_enqueue(struct net_device *dev, struct xdp_frame *xdpf, 1950 struct net_device *dev_rx); 1951 int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_frame *xdpf, 1952 struct net_device *dev_rx); 1953 int dev_map_enqueue_multi(struct xdp_frame *xdpf, struct net_device *dev_rx, 1954 struct bpf_map *map, bool exclude_ingress); 1955 int dev_map_generic_redirect(struct bpf_dtab_netdev *dst, struct sk_buff *skb, 1956 struct bpf_prog *xdp_prog); 1957 int dev_map_redirect_multi(struct net_device *dev, struct sk_buff *skb, 1958 struct bpf_prog *xdp_prog, struct bpf_map *map, 1959 bool exclude_ingress); 1960 1961 void __cpu_map_flush(void); 1962 int cpu_map_enqueue(struct bpf_cpu_map_entry *rcpu, struct xdp_frame *xdpf, 1963 struct net_device *dev_rx); 1964 int cpu_map_generic_redirect(struct bpf_cpu_map_entry *rcpu, 1965 struct sk_buff *skb); 1966 1967 /* Return map's numa specified by userspace */ 1968 static inline int bpf_map_attr_numa_node(const union bpf_attr *attr) 1969 { 1970 return (attr->map_flags & BPF_F_NUMA_NODE) ? 1971 attr->numa_node : NUMA_NO_NODE; 1972 } 1973 1974 struct bpf_prog *bpf_prog_get_type_path(const char *name, enum bpf_prog_type type); 1975 int array_map_alloc_check(union bpf_attr *attr); 1976 1977 int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr, 1978 union bpf_attr __user *uattr); 1979 int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr, 1980 union bpf_attr __user *uattr); 1981 int bpf_prog_test_run_tracing(struct bpf_prog *prog, 1982 const union bpf_attr *kattr, 1983 union bpf_attr __user *uattr); 1984 int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog, 1985 const union bpf_attr *kattr, 1986 union bpf_attr __user *uattr); 1987 int bpf_prog_test_run_raw_tp(struct bpf_prog *prog, 1988 const union bpf_attr *kattr, 1989 union bpf_attr __user *uattr); 1990 int bpf_prog_test_run_sk_lookup(struct bpf_prog *prog, 1991 const union bpf_attr *kattr, 1992 union bpf_attr __user *uattr); 1993 bool btf_ctx_access(int off, int size, enum bpf_access_type type, 1994 const struct bpf_prog *prog, 1995 struct bpf_insn_access_aux *info); 1996 1997 static inline bool bpf_tracing_ctx_access(int off, int size, 1998 enum bpf_access_type type) 1999 { 2000 if (off < 0 || off >= sizeof(__u64) * MAX_BPF_FUNC_ARGS) 2001 return false; 2002 if (type != BPF_READ) 2003 return false; 2004 if (off % size != 0) 2005 return false; 2006 return true; 2007 } 2008 2009 static inline bool bpf_tracing_btf_ctx_access(int off, int size, 2010 enum bpf_access_type type, 2011 const struct bpf_prog *prog, 2012 struct bpf_insn_access_aux *info) 2013 { 2014 if (!bpf_tracing_ctx_access(off, size, type)) 2015 return false; 2016 return btf_ctx_access(off, size, type, prog, info); 2017 } 2018 2019 int btf_struct_access(struct bpf_verifier_log *log, const struct btf *btf, 2020 const struct btf_type *t, int off, int size, 2021 enum bpf_access_type atype, 2022 u32 *next_btf_id, enum bpf_type_flag *flag); 2023 bool btf_struct_ids_match(struct bpf_verifier_log *log, 2024 const struct btf *btf, u32 id, int off, 2025 const struct btf *need_btf, u32 need_type_id, 2026 bool strict); 2027 2028 int btf_distill_func_proto(struct bpf_verifier_log *log, 2029 struct btf *btf, 2030 const struct btf_type *func_proto, 2031 const char *func_name, 2032 struct btf_func_model *m); 2033 2034 struct bpf_kfunc_arg_meta { 2035 u64 r0_size; 2036 bool r0_rdonly; 2037 int ref_obj_id; 2038 u32 flags; 2039 }; 2040 2041 struct bpf_reg_state; 2042 int btf_check_subprog_arg_match(struct bpf_verifier_env *env, int subprog, 2043 struct bpf_reg_state *regs); 2044 int btf_check_subprog_call(struct bpf_verifier_env *env, int subprog, 2045 struct bpf_reg_state *regs); 2046 int btf_check_kfunc_arg_match(struct bpf_verifier_env *env, 2047 const struct btf *btf, u32 func_id, 2048 struct bpf_reg_state *regs, 2049 struct bpf_kfunc_arg_meta *meta); 2050 int btf_prepare_func_args(struct bpf_verifier_env *env, int subprog, 2051 struct bpf_reg_state *reg); 2052 int btf_check_type_match(struct bpf_verifier_log *log, const struct bpf_prog *prog, 2053 struct btf *btf, const struct btf_type *t); 2054 2055 struct bpf_prog *bpf_prog_by_id(u32 id); 2056 struct bpf_link *bpf_link_by_id(u32 id); 2057 2058 const struct bpf_func_proto *bpf_base_func_proto(enum bpf_func_id func_id); 2059 void bpf_task_storage_free(struct task_struct *task); 2060 bool bpf_prog_has_kfunc_call(const struct bpf_prog *prog); 2061 const struct btf_func_model * 2062 bpf_jit_find_kfunc_model(const struct bpf_prog *prog, 2063 const struct bpf_insn *insn); 2064 struct bpf_core_ctx { 2065 struct bpf_verifier_log *log; 2066 const struct btf *btf; 2067 }; 2068 2069 int bpf_core_apply(struct bpf_core_ctx *ctx, const struct bpf_core_relo *relo, 2070 int relo_idx, void *insn); 2071 2072 static inline bool unprivileged_ebpf_enabled(void) 2073 { 2074 return !sysctl_unprivileged_bpf_disabled; 2075 } 2076 2077 /* Not all bpf prog type has the bpf_ctx. 2078 * For the bpf prog type that has initialized the bpf_ctx, 2079 * this function can be used to decide if a kernel function 2080 * is called by a bpf program. 2081 */ 2082 static inline bool has_current_bpf_ctx(void) 2083 { 2084 return !!current->bpf_ctx; 2085 } 2086 2087 void notrace bpf_prog_inc_misses_counter(struct bpf_prog *prog); 2088 #else /* !CONFIG_BPF_SYSCALL */ 2089 static inline struct bpf_prog *bpf_prog_get(u32 ufd) 2090 { 2091 return ERR_PTR(-EOPNOTSUPP); 2092 } 2093 2094 static inline struct bpf_prog *bpf_prog_get_type_dev(u32 ufd, 2095 enum bpf_prog_type type, 2096 bool attach_drv) 2097 { 2098 return ERR_PTR(-EOPNOTSUPP); 2099 } 2100 2101 static inline void bpf_prog_add(struct bpf_prog *prog, int i) 2102 { 2103 } 2104 2105 static inline void bpf_prog_sub(struct bpf_prog *prog, int i) 2106 { 2107 } 2108 2109 static inline void bpf_prog_put(struct bpf_prog *prog) 2110 { 2111 } 2112 2113 static inline void bpf_prog_inc(struct bpf_prog *prog) 2114 { 2115 } 2116 2117 static inline struct bpf_prog *__must_check 2118 bpf_prog_inc_not_zero(struct bpf_prog *prog) 2119 { 2120 return ERR_PTR(-EOPNOTSUPP); 2121 } 2122 2123 static inline void bpf_link_init(struct bpf_link *link, enum bpf_link_type type, 2124 const struct bpf_link_ops *ops, 2125 struct bpf_prog *prog) 2126 { 2127 } 2128 2129 static inline int bpf_link_prime(struct bpf_link *link, 2130 struct bpf_link_primer *primer) 2131 { 2132 return -EOPNOTSUPP; 2133 } 2134 2135 static inline int bpf_link_settle(struct bpf_link_primer *primer) 2136 { 2137 return -EOPNOTSUPP; 2138 } 2139 2140 static inline void bpf_link_cleanup(struct bpf_link_primer *primer) 2141 { 2142 } 2143 2144 static inline void bpf_link_inc(struct bpf_link *link) 2145 { 2146 } 2147 2148 static inline void bpf_link_put(struct bpf_link *link) 2149 { 2150 } 2151 2152 static inline int bpf_obj_get_user(const char __user *pathname, int flags) 2153 { 2154 return -EOPNOTSUPP; 2155 } 2156 2157 static inline void __dev_flush(void) 2158 { 2159 } 2160 2161 struct xdp_frame; 2162 struct bpf_dtab_netdev; 2163 struct bpf_cpu_map_entry; 2164 2165 static inline 2166 int dev_xdp_enqueue(struct net_device *dev, struct xdp_frame *xdpf, 2167 struct net_device *dev_rx) 2168 { 2169 return 0; 2170 } 2171 2172 static inline 2173 int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_frame *xdpf, 2174 struct net_device *dev_rx) 2175 { 2176 return 0; 2177 } 2178 2179 static inline 2180 int dev_map_enqueue_multi(struct xdp_frame *xdpf, struct net_device *dev_rx, 2181 struct bpf_map *map, bool exclude_ingress) 2182 { 2183 return 0; 2184 } 2185 2186 struct sk_buff; 2187 2188 static inline int dev_map_generic_redirect(struct bpf_dtab_netdev *dst, 2189 struct sk_buff *skb, 2190 struct bpf_prog *xdp_prog) 2191 { 2192 return 0; 2193 } 2194 2195 static inline 2196 int dev_map_redirect_multi(struct net_device *dev, struct sk_buff *skb, 2197 struct bpf_prog *xdp_prog, struct bpf_map *map, 2198 bool exclude_ingress) 2199 { 2200 return 0; 2201 } 2202 2203 static inline void __cpu_map_flush(void) 2204 { 2205 } 2206 2207 static inline int cpu_map_enqueue(struct bpf_cpu_map_entry *rcpu, 2208 struct xdp_frame *xdpf, 2209 struct net_device *dev_rx) 2210 { 2211 return 0; 2212 } 2213 2214 static inline int cpu_map_generic_redirect(struct bpf_cpu_map_entry *rcpu, 2215 struct sk_buff *skb) 2216 { 2217 return -EOPNOTSUPP; 2218 } 2219 2220 static inline struct bpf_prog *bpf_prog_get_type_path(const char *name, 2221 enum bpf_prog_type type) 2222 { 2223 return ERR_PTR(-EOPNOTSUPP); 2224 } 2225 2226 static inline int bpf_prog_test_run_xdp(struct bpf_prog *prog, 2227 const union bpf_attr *kattr, 2228 union bpf_attr __user *uattr) 2229 { 2230 return -ENOTSUPP; 2231 } 2232 2233 static inline int bpf_prog_test_run_skb(struct bpf_prog *prog, 2234 const union bpf_attr *kattr, 2235 union bpf_attr __user *uattr) 2236 { 2237 return -ENOTSUPP; 2238 } 2239 2240 static inline int bpf_prog_test_run_tracing(struct bpf_prog *prog, 2241 const union bpf_attr *kattr, 2242 union bpf_attr __user *uattr) 2243 { 2244 return -ENOTSUPP; 2245 } 2246 2247 static inline int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog, 2248 const union bpf_attr *kattr, 2249 union bpf_attr __user *uattr) 2250 { 2251 return -ENOTSUPP; 2252 } 2253 2254 static inline int bpf_prog_test_run_sk_lookup(struct bpf_prog *prog, 2255 const union bpf_attr *kattr, 2256 union bpf_attr __user *uattr) 2257 { 2258 return -ENOTSUPP; 2259 } 2260 2261 static inline void bpf_map_put(struct bpf_map *map) 2262 { 2263 } 2264 2265 static inline struct bpf_prog *bpf_prog_by_id(u32 id) 2266 { 2267 return ERR_PTR(-ENOTSUPP); 2268 } 2269 2270 static inline int btf_struct_access(struct bpf_verifier_log *log, 2271 const struct btf *btf, 2272 const struct btf_type *t, int off, int size, 2273 enum bpf_access_type atype, 2274 u32 *next_btf_id, enum bpf_type_flag *flag) 2275 { 2276 return -EACCES; 2277 } 2278 2279 static inline const struct bpf_func_proto * 2280 bpf_base_func_proto(enum bpf_func_id func_id) 2281 { 2282 return NULL; 2283 } 2284 2285 static inline void bpf_task_storage_free(struct task_struct *task) 2286 { 2287 } 2288 2289 static inline bool bpf_prog_has_kfunc_call(const struct bpf_prog *prog) 2290 { 2291 return false; 2292 } 2293 2294 static inline const struct btf_func_model * 2295 bpf_jit_find_kfunc_model(const struct bpf_prog *prog, 2296 const struct bpf_insn *insn) 2297 { 2298 return NULL; 2299 } 2300 2301 static inline bool unprivileged_ebpf_enabled(void) 2302 { 2303 return false; 2304 } 2305 2306 static inline bool has_current_bpf_ctx(void) 2307 { 2308 return false; 2309 } 2310 2311 static inline void bpf_prog_inc_misses_counter(struct bpf_prog *prog) 2312 { 2313 } 2314 #endif /* CONFIG_BPF_SYSCALL */ 2315 2316 void __bpf_free_used_btfs(struct bpf_prog_aux *aux, 2317 struct btf_mod_pair *used_btfs, u32 len); 2318 2319 static inline struct bpf_prog *bpf_prog_get_type(u32 ufd, 2320 enum bpf_prog_type type) 2321 { 2322 return bpf_prog_get_type_dev(ufd, type, false); 2323 } 2324 2325 void __bpf_free_used_maps(struct bpf_prog_aux *aux, 2326 struct bpf_map **used_maps, u32 len); 2327 2328 bool bpf_prog_get_ok(struct bpf_prog *, enum bpf_prog_type *, bool); 2329 2330 int bpf_prog_offload_compile(struct bpf_prog *prog); 2331 void bpf_prog_offload_destroy(struct bpf_prog *prog); 2332 int bpf_prog_offload_info_fill(struct bpf_prog_info *info, 2333 struct bpf_prog *prog); 2334 2335 int bpf_map_offload_info_fill(struct bpf_map_info *info, struct bpf_map *map); 2336 2337 int bpf_map_offload_lookup_elem(struct bpf_map *map, void *key, void *value); 2338 int bpf_map_offload_update_elem(struct bpf_map *map, 2339 void *key, void *value, u64 flags); 2340 int bpf_map_offload_delete_elem(struct bpf_map *map, void *key); 2341 int bpf_map_offload_get_next_key(struct bpf_map *map, 2342 void *key, void *next_key); 2343 2344 bool bpf_offload_prog_map_match(struct bpf_prog *prog, struct bpf_map *map); 2345 2346 struct bpf_offload_dev * 2347 bpf_offload_dev_create(const struct bpf_prog_offload_ops *ops, void *priv); 2348 void bpf_offload_dev_destroy(struct bpf_offload_dev *offdev); 2349 void *bpf_offload_dev_priv(struct bpf_offload_dev *offdev); 2350 int bpf_offload_dev_netdev_register(struct bpf_offload_dev *offdev, 2351 struct net_device *netdev); 2352 void bpf_offload_dev_netdev_unregister(struct bpf_offload_dev *offdev, 2353 struct net_device *netdev); 2354 bool bpf_offload_dev_match(struct bpf_prog *prog, struct net_device *netdev); 2355 2356 void unpriv_ebpf_notify(int new_state); 2357 2358 #if defined(CONFIG_NET) && defined(CONFIG_BPF_SYSCALL) 2359 int bpf_prog_offload_init(struct bpf_prog *prog, union bpf_attr *attr); 2360 2361 static inline bool bpf_prog_is_dev_bound(const struct bpf_prog_aux *aux) 2362 { 2363 return aux->offload_requested; 2364 } 2365 2366 static inline bool bpf_map_is_dev_bound(struct bpf_map *map) 2367 { 2368 return unlikely(map->ops == &bpf_map_offload_ops); 2369 } 2370 2371 struct bpf_map *bpf_map_offload_map_alloc(union bpf_attr *attr); 2372 void bpf_map_offload_map_free(struct bpf_map *map); 2373 int bpf_prog_test_run_syscall(struct bpf_prog *prog, 2374 const union bpf_attr *kattr, 2375 union bpf_attr __user *uattr); 2376 2377 int sock_map_get_from_fd(const union bpf_attr *attr, struct bpf_prog *prog); 2378 int sock_map_prog_detach(const union bpf_attr *attr, enum bpf_prog_type ptype); 2379 int sock_map_update_elem_sys(struct bpf_map *map, void *key, void *value, u64 flags); 2380 int sock_map_bpf_prog_query(const union bpf_attr *attr, 2381 union bpf_attr __user *uattr); 2382 2383 void sock_map_unhash(struct sock *sk); 2384 void sock_map_destroy(struct sock *sk); 2385 void sock_map_close(struct sock *sk, long timeout); 2386 #else 2387 static inline int bpf_prog_offload_init(struct bpf_prog *prog, 2388 union bpf_attr *attr) 2389 { 2390 return -EOPNOTSUPP; 2391 } 2392 2393 static inline bool bpf_prog_is_dev_bound(struct bpf_prog_aux *aux) 2394 { 2395 return false; 2396 } 2397 2398 static inline bool bpf_map_is_dev_bound(struct bpf_map *map) 2399 { 2400 return false; 2401 } 2402 2403 static inline struct bpf_map *bpf_map_offload_map_alloc(union bpf_attr *attr) 2404 { 2405 return ERR_PTR(-EOPNOTSUPP); 2406 } 2407 2408 static inline void bpf_map_offload_map_free(struct bpf_map *map) 2409 { 2410 } 2411 2412 static inline int bpf_prog_test_run_syscall(struct bpf_prog *prog, 2413 const union bpf_attr *kattr, 2414 union bpf_attr __user *uattr) 2415 { 2416 return -ENOTSUPP; 2417 } 2418 2419 #ifdef CONFIG_BPF_SYSCALL 2420 static inline int sock_map_get_from_fd(const union bpf_attr *attr, 2421 struct bpf_prog *prog) 2422 { 2423 return -EINVAL; 2424 } 2425 2426 static inline int sock_map_prog_detach(const union bpf_attr *attr, 2427 enum bpf_prog_type ptype) 2428 { 2429 return -EOPNOTSUPP; 2430 } 2431 2432 static inline int sock_map_update_elem_sys(struct bpf_map *map, void *key, void *value, 2433 u64 flags) 2434 { 2435 return -EOPNOTSUPP; 2436 } 2437 2438 static inline int sock_map_bpf_prog_query(const union bpf_attr *attr, 2439 union bpf_attr __user *uattr) 2440 { 2441 return -EINVAL; 2442 } 2443 #endif /* CONFIG_BPF_SYSCALL */ 2444 #endif /* CONFIG_NET && CONFIG_BPF_SYSCALL */ 2445 2446 #if defined(CONFIG_INET) && defined(CONFIG_BPF_SYSCALL) 2447 void bpf_sk_reuseport_detach(struct sock *sk); 2448 int bpf_fd_reuseport_array_lookup_elem(struct bpf_map *map, void *key, 2449 void *value); 2450 int bpf_fd_reuseport_array_update_elem(struct bpf_map *map, void *key, 2451 void *value, u64 map_flags); 2452 #else 2453 static inline void bpf_sk_reuseport_detach(struct sock *sk) 2454 { 2455 } 2456 2457 #ifdef CONFIG_BPF_SYSCALL 2458 static inline int bpf_fd_reuseport_array_lookup_elem(struct bpf_map *map, 2459 void *key, void *value) 2460 { 2461 return -EOPNOTSUPP; 2462 } 2463 2464 static inline int bpf_fd_reuseport_array_update_elem(struct bpf_map *map, 2465 void *key, void *value, 2466 u64 map_flags) 2467 { 2468 return -EOPNOTSUPP; 2469 } 2470 #endif /* CONFIG_BPF_SYSCALL */ 2471 #endif /* defined(CONFIG_INET) && defined(CONFIG_BPF_SYSCALL) */ 2472 2473 /* verifier prototypes for helper functions called from eBPF programs */ 2474 extern const struct bpf_func_proto bpf_map_lookup_elem_proto; 2475 extern const struct bpf_func_proto bpf_map_update_elem_proto; 2476 extern const struct bpf_func_proto bpf_map_delete_elem_proto; 2477 extern const struct bpf_func_proto bpf_map_push_elem_proto; 2478 extern const struct bpf_func_proto bpf_map_pop_elem_proto; 2479 extern const struct bpf_func_proto bpf_map_peek_elem_proto; 2480 extern const struct bpf_func_proto bpf_map_lookup_percpu_elem_proto; 2481 2482 extern const struct bpf_func_proto bpf_get_prandom_u32_proto; 2483 extern const struct bpf_func_proto bpf_get_smp_processor_id_proto; 2484 extern const struct bpf_func_proto bpf_get_numa_node_id_proto; 2485 extern const struct bpf_func_proto bpf_tail_call_proto; 2486 extern const struct bpf_func_proto bpf_ktime_get_ns_proto; 2487 extern const struct bpf_func_proto bpf_ktime_get_boot_ns_proto; 2488 extern const struct bpf_func_proto bpf_ktime_get_tai_ns_proto; 2489 extern const struct bpf_func_proto bpf_get_current_pid_tgid_proto; 2490 extern const struct bpf_func_proto bpf_get_current_uid_gid_proto; 2491 extern const struct bpf_func_proto bpf_get_current_comm_proto; 2492 extern const struct bpf_func_proto bpf_get_stackid_proto; 2493 extern const struct bpf_func_proto bpf_get_stack_proto; 2494 extern const struct bpf_func_proto bpf_get_task_stack_proto; 2495 extern const struct bpf_func_proto bpf_get_stackid_proto_pe; 2496 extern const struct bpf_func_proto bpf_get_stack_proto_pe; 2497 extern const struct bpf_func_proto bpf_sock_map_update_proto; 2498 extern const struct bpf_func_proto bpf_sock_hash_update_proto; 2499 extern const struct bpf_func_proto bpf_get_current_cgroup_id_proto; 2500 extern const struct bpf_func_proto bpf_get_current_ancestor_cgroup_id_proto; 2501 extern const struct bpf_func_proto bpf_get_cgroup_classid_curr_proto; 2502 extern const struct bpf_func_proto bpf_msg_redirect_hash_proto; 2503 extern const struct bpf_func_proto bpf_msg_redirect_map_proto; 2504 extern const struct bpf_func_proto bpf_sk_redirect_hash_proto; 2505 extern const struct bpf_func_proto bpf_sk_redirect_map_proto; 2506 extern const struct bpf_func_proto bpf_spin_lock_proto; 2507 extern const struct bpf_func_proto bpf_spin_unlock_proto; 2508 extern const struct bpf_func_proto bpf_get_local_storage_proto; 2509 extern const struct bpf_func_proto bpf_strtol_proto; 2510 extern const struct bpf_func_proto bpf_strtoul_proto; 2511 extern const struct bpf_func_proto bpf_tcp_sock_proto; 2512 extern const struct bpf_func_proto bpf_jiffies64_proto; 2513 extern const struct bpf_func_proto bpf_get_ns_current_pid_tgid_proto; 2514 extern const struct bpf_func_proto bpf_event_output_data_proto; 2515 extern const struct bpf_func_proto bpf_ringbuf_output_proto; 2516 extern const struct bpf_func_proto bpf_ringbuf_reserve_proto; 2517 extern const struct bpf_func_proto bpf_ringbuf_submit_proto; 2518 extern const struct bpf_func_proto bpf_ringbuf_discard_proto; 2519 extern const struct bpf_func_proto bpf_ringbuf_query_proto; 2520 extern const struct bpf_func_proto bpf_ringbuf_reserve_dynptr_proto; 2521 extern const struct bpf_func_proto bpf_ringbuf_submit_dynptr_proto; 2522 extern const struct bpf_func_proto bpf_ringbuf_discard_dynptr_proto; 2523 extern const struct bpf_func_proto bpf_skc_to_tcp6_sock_proto; 2524 extern const struct bpf_func_proto bpf_skc_to_tcp_sock_proto; 2525 extern const struct bpf_func_proto bpf_skc_to_tcp_timewait_sock_proto; 2526 extern const struct bpf_func_proto bpf_skc_to_tcp_request_sock_proto; 2527 extern const struct bpf_func_proto bpf_skc_to_udp6_sock_proto; 2528 extern const struct bpf_func_proto bpf_skc_to_unix_sock_proto; 2529 extern const struct bpf_func_proto bpf_skc_to_mptcp_sock_proto; 2530 extern const struct bpf_func_proto bpf_copy_from_user_proto; 2531 extern const struct bpf_func_proto bpf_snprintf_btf_proto; 2532 extern const struct bpf_func_proto bpf_snprintf_proto; 2533 extern const struct bpf_func_proto bpf_per_cpu_ptr_proto; 2534 extern const struct bpf_func_proto bpf_this_cpu_ptr_proto; 2535 extern const struct bpf_func_proto bpf_ktime_get_coarse_ns_proto; 2536 extern const struct bpf_func_proto bpf_sock_from_file_proto; 2537 extern const struct bpf_func_proto bpf_get_socket_ptr_cookie_proto; 2538 extern const struct bpf_func_proto bpf_task_storage_get_proto; 2539 extern const struct bpf_func_proto bpf_task_storage_delete_proto; 2540 extern const struct bpf_func_proto bpf_for_each_map_elem_proto; 2541 extern const struct bpf_func_proto bpf_btf_find_by_name_kind_proto; 2542 extern const struct bpf_func_proto bpf_sk_setsockopt_proto; 2543 extern const struct bpf_func_proto bpf_sk_getsockopt_proto; 2544 extern const struct bpf_func_proto bpf_unlocked_sk_setsockopt_proto; 2545 extern const struct bpf_func_proto bpf_unlocked_sk_getsockopt_proto; 2546 extern const struct bpf_func_proto bpf_find_vma_proto; 2547 extern const struct bpf_func_proto bpf_loop_proto; 2548 extern const struct bpf_func_proto bpf_copy_from_user_task_proto; 2549 extern const struct bpf_func_proto bpf_set_retval_proto; 2550 extern const struct bpf_func_proto bpf_get_retval_proto; 2551 extern const struct bpf_func_proto bpf_user_ringbuf_drain_proto; 2552 2553 const struct bpf_func_proto *tracing_prog_func_proto( 2554 enum bpf_func_id func_id, const struct bpf_prog *prog); 2555 2556 /* Shared helpers among cBPF and eBPF. */ 2557 void bpf_user_rnd_init_once(void); 2558 u64 bpf_user_rnd_u32(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5); 2559 u64 bpf_get_raw_cpu_id(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5); 2560 2561 #if defined(CONFIG_NET) 2562 bool bpf_sock_common_is_valid_access(int off, int size, 2563 enum bpf_access_type type, 2564 struct bpf_insn_access_aux *info); 2565 bool bpf_sock_is_valid_access(int off, int size, enum bpf_access_type type, 2566 struct bpf_insn_access_aux *info); 2567 u32 bpf_sock_convert_ctx_access(enum bpf_access_type type, 2568 const struct bpf_insn *si, 2569 struct bpf_insn *insn_buf, 2570 struct bpf_prog *prog, 2571 u32 *target_size); 2572 #else 2573 static inline bool bpf_sock_common_is_valid_access(int off, int size, 2574 enum bpf_access_type type, 2575 struct bpf_insn_access_aux *info) 2576 { 2577 return false; 2578 } 2579 static inline bool bpf_sock_is_valid_access(int off, int size, 2580 enum bpf_access_type type, 2581 struct bpf_insn_access_aux *info) 2582 { 2583 return false; 2584 } 2585 static inline u32 bpf_sock_convert_ctx_access(enum bpf_access_type type, 2586 const struct bpf_insn *si, 2587 struct bpf_insn *insn_buf, 2588 struct bpf_prog *prog, 2589 u32 *target_size) 2590 { 2591 return 0; 2592 } 2593 #endif 2594 2595 #ifdef CONFIG_INET 2596 struct sk_reuseport_kern { 2597 struct sk_buff *skb; 2598 struct sock *sk; 2599 struct sock *selected_sk; 2600 struct sock *migrating_sk; 2601 void *data_end; 2602 u32 hash; 2603 u32 reuseport_id; 2604 bool bind_inany; 2605 }; 2606 bool bpf_tcp_sock_is_valid_access(int off, int size, enum bpf_access_type type, 2607 struct bpf_insn_access_aux *info); 2608 2609 u32 bpf_tcp_sock_convert_ctx_access(enum bpf_access_type type, 2610 const struct bpf_insn *si, 2611 struct bpf_insn *insn_buf, 2612 struct bpf_prog *prog, 2613 u32 *target_size); 2614 2615 bool bpf_xdp_sock_is_valid_access(int off, int size, enum bpf_access_type type, 2616 struct bpf_insn_access_aux *info); 2617 2618 u32 bpf_xdp_sock_convert_ctx_access(enum bpf_access_type type, 2619 const struct bpf_insn *si, 2620 struct bpf_insn *insn_buf, 2621 struct bpf_prog *prog, 2622 u32 *target_size); 2623 #else 2624 static inline bool bpf_tcp_sock_is_valid_access(int off, int size, 2625 enum bpf_access_type type, 2626 struct bpf_insn_access_aux *info) 2627 { 2628 return false; 2629 } 2630 2631 static inline u32 bpf_tcp_sock_convert_ctx_access(enum bpf_access_type type, 2632 const struct bpf_insn *si, 2633 struct bpf_insn *insn_buf, 2634 struct bpf_prog *prog, 2635 u32 *target_size) 2636 { 2637 return 0; 2638 } 2639 static inline bool bpf_xdp_sock_is_valid_access(int off, int size, 2640 enum bpf_access_type type, 2641 struct bpf_insn_access_aux *info) 2642 { 2643 return false; 2644 } 2645 2646 static inline u32 bpf_xdp_sock_convert_ctx_access(enum bpf_access_type type, 2647 const struct bpf_insn *si, 2648 struct bpf_insn *insn_buf, 2649 struct bpf_prog *prog, 2650 u32 *target_size) 2651 { 2652 return 0; 2653 } 2654 #endif /* CONFIG_INET */ 2655 2656 enum bpf_text_poke_type { 2657 BPF_MOD_CALL, 2658 BPF_MOD_JUMP, 2659 }; 2660 2661 int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t, 2662 void *addr1, void *addr2); 2663 2664 void *bpf_arch_text_copy(void *dst, void *src, size_t len); 2665 int bpf_arch_text_invalidate(void *dst, size_t len); 2666 2667 struct btf_id_set; 2668 bool btf_id_set_contains(const struct btf_id_set *set, u32 id); 2669 2670 #define MAX_BPRINTF_VARARGS 12 2671 2672 int bpf_bprintf_prepare(char *fmt, u32 fmt_size, const u64 *raw_args, 2673 u32 **bin_buf, u32 num_args); 2674 void bpf_bprintf_cleanup(void); 2675 2676 /* the implementation of the opaque uapi struct bpf_dynptr */ 2677 struct bpf_dynptr_kern { 2678 void *data; 2679 /* Size represents the number of usable bytes of dynptr data. 2680 * If for example the offset is at 4 for a local dynptr whose data is 2681 * of type u64, the number of usable bytes is 4. 2682 * 2683 * The upper 8 bits are reserved. It is as follows: 2684 * Bits 0 - 23 = size 2685 * Bits 24 - 30 = dynptr type 2686 * Bit 31 = whether dynptr is read-only 2687 */ 2688 u32 size; 2689 u32 offset; 2690 } __aligned(8); 2691 2692 enum bpf_dynptr_type { 2693 BPF_DYNPTR_TYPE_INVALID, 2694 /* Points to memory that is local to the bpf program */ 2695 BPF_DYNPTR_TYPE_LOCAL, 2696 /* Underlying data is a kernel-produced ringbuf record */ 2697 BPF_DYNPTR_TYPE_RINGBUF, 2698 }; 2699 2700 void bpf_dynptr_init(struct bpf_dynptr_kern *ptr, void *data, 2701 enum bpf_dynptr_type type, u32 offset, u32 size); 2702 void bpf_dynptr_set_null(struct bpf_dynptr_kern *ptr); 2703 int bpf_dynptr_check_size(u32 size); 2704 u32 bpf_dynptr_get_size(struct bpf_dynptr_kern *ptr); 2705 2706 #ifdef CONFIG_BPF_LSM 2707 void bpf_cgroup_atype_get(u32 attach_btf_id, int cgroup_atype); 2708 void bpf_cgroup_atype_put(int cgroup_atype); 2709 #else 2710 static inline void bpf_cgroup_atype_get(u32 attach_btf_id, int cgroup_atype) {} 2711 static inline void bpf_cgroup_atype_put(int cgroup_atype) {} 2712 #endif /* CONFIG_BPF_LSM */ 2713 2714 struct key; 2715 2716 #ifdef CONFIG_KEYS 2717 struct bpf_key { 2718 struct key *key; 2719 bool has_ref; 2720 }; 2721 #endif /* CONFIG_KEYS */ 2722 #endif /* _LINUX_BPF_H */ 2723