15b497af4SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only 2d0003ec0SAlexei Starovoitov /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com 3d0003ec0SAlexei Starovoitov */ 4d0003ec0SAlexei Starovoitov #include <linux/bpf.h> 5d0003ec0SAlexei Starovoitov #include <linux/rcupdate.h> 603e69b50SDaniel Borkmann #include <linux/random.h> 7c04167ceSDaniel Borkmann #include <linux/smp.h> 82d0e30c3SDaniel Borkmann #include <linux/topology.h> 917ca8cbfSDaniel Borkmann #include <linux/ktime.h> 10ffeedafbSAlexei Starovoitov #include <linux/sched.h> 11ffeedafbSAlexei Starovoitov #include <linux/uidgid.h> 12f3694e00SDaniel Borkmann #include <linux/filter.h> 13d7a4cb9bSAndrey Ignatov #include <linux/ctype.h> 145576b991SMartin KaFai Lau #include <linux/jiffies.h> 15b4490c5cSCarlos Neira #include <linux/pid_namespace.h> 16b4490c5cSCarlos Neira #include <linux/proc_ns.h> 17d7a4cb9bSAndrey Ignatov 18d7a4cb9bSAndrey Ignatov #include "../../lib/kstrtox.h" 19d0003ec0SAlexei Starovoitov 20d0003ec0SAlexei Starovoitov /* If kernel subsystem is allowing eBPF programs to call this function, 21d0003ec0SAlexei Starovoitov * inside its own verifier_ops->get_func_proto() callback it should return 22d0003ec0SAlexei Starovoitov * bpf_map_lookup_elem_proto, so that verifier can properly check the arguments 23d0003ec0SAlexei Starovoitov * 24d0003ec0SAlexei Starovoitov * Different map implementations will rely on rcu in map methods 25d0003ec0SAlexei Starovoitov * lookup/update/delete, therefore eBPF programs must run under rcu lock 26d0003ec0SAlexei Starovoitov * if program is allowed to access maps, so check rcu_read_lock_held in 27d0003ec0SAlexei Starovoitov * all three functions. 28d0003ec0SAlexei Starovoitov */ 29f3694e00SDaniel Borkmann BPF_CALL_2(bpf_map_lookup_elem, struct bpf_map *, map, void *, key) 30d0003ec0SAlexei Starovoitov { 31d0003ec0SAlexei Starovoitov WARN_ON_ONCE(!rcu_read_lock_held()); 32f3694e00SDaniel Borkmann return (unsigned long) map->ops->map_lookup_elem(map, key); 33d0003ec0SAlexei Starovoitov } 34d0003ec0SAlexei Starovoitov 35a2c83fffSDaniel Borkmann const struct bpf_func_proto bpf_map_lookup_elem_proto = { 36d0003ec0SAlexei Starovoitov .func = bpf_map_lookup_elem, 37d0003ec0SAlexei Starovoitov .gpl_only = false, 3836bbef52SDaniel Borkmann .pkt_access = true, 39d0003ec0SAlexei Starovoitov .ret_type = RET_PTR_TO_MAP_VALUE_OR_NULL, 40d0003ec0SAlexei Starovoitov .arg1_type = ARG_CONST_MAP_PTR, 41d0003ec0SAlexei Starovoitov .arg2_type = ARG_PTR_TO_MAP_KEY, 42d0003ec0SAlexei Starovoitov }; 43d0003ec0SAlexei Starovoitov 44f3694e00SDaniel Borkmann BPF_CALL_4(bpf_map_update_elem, struct bpf_map *, map, void *, key, 45f3694e00SDaniel Borkmann void *, value, u64, flags) 46d0003ec0SAlexei Starovoitov { 47d0003ec0SAlexei Starovoitov WARN_ON_ONCE(!rcu_read_lock_held()); 48f3694e00SDaniel Borkmann return map->ops->map_update_elem(map, key, value, flags); 49d0003ec0SAlexei Starovoitov } 50d0003ec0SAlexei Starovoitov 51a2c83fffSDaniel Borkmann const struct bpf_func_proto bpf_map_update_elem_proto = { 52d0003ec0SAlexei Starovoitov .func = bpf_map_update_elem, 53d0003ec0SAlexei Starovoitov .gpl_only = false, 5436bbef52SDaniel Borkmann .pkt_access = true, 55d0003ec0SAlexei Starovoitov .ret_type = RET_INTEGER, 56d0003ec0SAlexei Starovoitov .arg1_type = ARG_CONST_MAP_PTR, 57d0003ec0SAlexei Starovoitov .arg2_type = ARG_PTR_TO_MAP_KEY, 58d0003ec0SAlexei Starovoitov .arg3_type = ARG_PTR_TO_MAP_VALUE, 59d0003ec0SAlexei Starovoitov .arg4_type = ARG_ANYTHING, 60d0003ec0SAlexei Starovoitov }; 61d0003ec0SAlexei Starovoitov 62f3694e00SDaniel Borkmann BPF_CALL_2(bpf_map_delete_elem, struct bpf_map *, map, void *, key) 63d0003ec0SAlexei Starovoitov { 64d0003ec0SAlexei Starovoitov WARN_ON_ONCE(!rcu_read_lock_held()); 65d0003ec0SAlexei Starovoitov return map->ops->map_delete_elem(map, key); 66d0003ec0SAlexei Starovoitov } 67d0003ec0SAlexei Starovoitov 68a2c83fffSDaniel Borkmann const struct bpf_func_proto bpf_map_delete_elem_proto = { 69d0003ec0SAlexei Starovoitov .func = bpf_map_delete_elem, 70d0003ec0SAlexei Starovoitov .gpl_only = false, 7136bbef52SDaniel Borkmann .pkt_access = true, 72d0003ec0SAlexei Starovoitov .ret_type = RET_INTEGER, 73d0003ec0SAlexei Starovoitov .arg1_type = ARG_CONST_MAP_PTR, 74d0003ec0SAlexei Starovoitov .arg2_type = ARG_PTR_TO_MAP_KEY, 75d0003ec0SAlexei Starovoitov }; 7603e69b50SDaniel Borkmann 77f1a2e44aSMauricio Vasquez B BPF_CALL_3(bpf_map_push_elem, struct bpf_map *, map, void *, value, u64, flags) 78f1a2e44aSMauricio Vasquez B { 79f1a2e44aSMauricio Vasquez B return map->ops->map_push_elem(map, value, flags); 80f1a2e44aSMauricio Vasquez B } 81f1a2e44aSMauricio Vasquez B 82f1a2e44aSMauricio Vasquez B const struct bpf_func_proto bpf_map_push_elem_proto = { 83f1a2e44aSMauricio Vasquez B .func = bpf_map_push_elem, 84f1a2e44aSMauricio Vasquez B .gpl_only = false, 85f1a2e44aSMauricio Vasquez B .pkt_access = true, 86f1a2e44aSMauricio Vasquez B .ret_type = RET_INTEGER, 87f1a2e44aSMauricio Vasquez B .arg1_type = ARG_CONST_MAP_PTR, 88f1a2e44aSMauricio Vasquez B .arg2_type = ARG_PTR_TO_MAP_VALUE, 89f1a2e44aSMauricio Vasquez B .arg3_type = ARG_ANYTHING, 90f1a2e44aSMauricio Vasquez B }; 91f1a2e44aSMauricio Vasquez B 92f1a2e44aSMauricio Vasquez B BPF_CALL_2(bpf_map_pop_elem, struct bpf_map *, map, void *, value) 93f1a2e44aSMauricio Vasquez B { 94f1a2e44aSMauricio Vasquez B return map->ops->map_pop_elem(map, value); 95f1a2e44aSMauricio Vasquez B } 96f1a2e44aSMauricio Vasquez B 97f1a2e44aSMauricio Vasquez B const struct bpf_func_proto bpf_map_pop_elem_proto = { 98f1a2e44aSMauricio Vasquez B .func = bpf_map_pop_elem, 99f1a2e44aSMauricio Vasquez B .gpl_only = false, 100f1a2e44aSMauricio Vasquez B .ret_type = RET_INTEGER, 101f1a2e44aSMauricio Vasquez B .arg1_type = ARG_CONST_MAP_PTR, 102f1a2e44aSMauricio Vasquez B .arg2_type = ARG_PTR_TO_UNINIT_MAP_VALUE, 103f1a2e44aSMauricio Vasquez B }; 104f1a2e44aSMauricio Vasquez B 105f1a2e44aSMauricio Vasquez B BPF_CALL_2(bpf_map_peek_elem, struct bpf_map *, map, void *, value) 106f1a2e44aSMauricio Vasquez B { 107f1a2e44aSMauricio Vasquez B return map->ops->map_peek_elem(map, value); 108f1a2e44aSMauricio Vasquez B } 109f1a2e44aSMauricio Vasquez B 110f1a2e44aSMauricio Vasquez B const struct bpf_func_proto bpf_map_peek_elem_proto = { 111f1a2e44aSMauricio Vasquez B .func = bpf_map_pop_elem, 112f1a2e44aSMauricio Vasquez B .gpl_only = false, 113f1a2e44aSMauricio Vasquez B .ret_type = RET_INTEGER, 114f1a2e44aSMauricio Vasquez B .arg1_type = ARG_CONST_MAP_PTR, 115f1a2e44aSMauricio Vasquez B .arg2_type = ARG_PTR_TO_UNINIT_MAP_VALUE, 116f1a2e44aSMauricio Vasquez B }; 117f1a2e44aSMauricio Vasquez B 11803e69b50SDaniel Borkmann const struct bpf_func_proto bpf_get_prandom_u32_proto = { 1193ad00405SDaniel Borkmann .func = bpf_user_rnd_u32, 12003e69b50SDaniel Borkmann .gpl_only = false, 12103e69b50SDaniel Borkmann .ret_type = RET_INTEGER, 12203e69b50SDaniel Borkmann }; 123c04167ceSDaniel Borkmann 124f3694e00SDaniel Borkmann BPF_CALL_0(bpf_get_smp_processor_id) 125c04167ceSDaniel Borkmann { 12680b48c44SDaniel Borkmann return smp_processor_id(); 127c04167ceSDaniel Borkmann } 128c04167ceSDaniel Borkmann 129c04167ceSDaniel Borkmann const struct bpf_func_proto bpf_get_smp_processor_id_proto = { 130c04167ceSDaniel Borkmann .func = bpf_get_smp_processor_id, 131c04167ceSDaniel Borkmann .gpl_only = false, 132c04167ceSDaniel Borkmann .ret_type = RET_INTEGER, 133c04167ceSDaniel Borkmann }; 13417ca8cbfSDaniel Borkmann 1352d0e30c3SDaniel Borkmann BPF_CALL_0(bpf_get_numa_node_id) 1362d0e30c3SDaniel Borkmann { 1372d0e30c3SDaniel Borkmann return numa_node_id(); 1382d0e30c3SDaniel Borkmann } 1392d0e30c3SDaniel Borkmann 1402d0e30c3SDaniel Borkmann const struct bpf_func_proto bpf_get_numa_node_id_proto = { 1412d0e30c3SDaniel Borkmann .func = bpf_get_numa_node_id, 1422d0e30c3SDaniel Borkmann .gpl_only = false, 1432d0e30c3SDaniel Borkmann .ret_type = RET_INTEGER, 1442d0e30c3SDaniel Borkmann }; 1452d0e30c3SDaniel Borkmann 146f3694e00SDaniel Borkmann BPF_CALL_0(bpf_ktime_get_ns) 14717ca8cbfSDaniel Borkmann { 14817ca8cbfSDaniel Borkmann /* NMI safe access to clock monotonic */ 14917ca8cbfSDaniel Borkmann return ktime_get_mono_fast_ns(); 15017ca8cbfSDaniel Borkmann } 15117ca8cbfSDaniel Borkmann 15217ca8cbfSDaniel Borkmann const struct bpf_func_proto bpf_ktime_get_ns_proto = { 15317ca8cbfSDaniel Borkmann .func = bpf_ktime_get_ns, 154082b57e3SMaciej Żenczykowski .gpl_only = false, 15517ca8cbfSDaniel Borkmann .ret_type = RET_INTEGER, 15617ca8cbfSDaniel Borkmann }; 157ffeedafbSAlexei Starovoitov 15871d19214SMaciej Żenczykowski BPF_CALL_0(bpf_ktime_get_boot_ns) 15971d19214SMaciej Żenczykowski { 16071d19214SMaciej Żenczykowski /* NMI safe access to clock boottime */ 16171d19214SMaciej Żenczykowski return ktime_get_boot_fast_ns(); 16271d19214SMaciej Żenczykowski } 16371d19214SMaciej Żenczykowski 16471d19214SMaciej Żenczykowski const struct bpf_func_proto bpf_ktime_get_boot_ns_proto = { 16571d19214SMaciej Żenczykowski .func = bpf_ktime_get_boot_ns, 16671d19214SMaciej Żenczykowski .gpl_only = false, 16771d19214SMaciej Żenczykowski .ret_type = RET_INTEGER, 16871d19214SMaciej Żenczykowski }; 16971d19214SMaciej Żenczykowski 170f3694e00SDaniel Borkmann BPF_CALL_0(bpf_get_current_pid_tgid) 171ffeedafbSAlexei Starovoitov { 172ffeedafbSAlexei Starovoitov struct task_struct *task = current; 173ffeedafbSAlexei Starovoitov 1746088b582SDaniel Borkmann if (unlikely(!task)) 175ffeedafbSAlexei Starovoitov return -EINVAL; 176ffeedafbSAlexei Starovoitov 177ffeedafbSAlexei Starovoitov return (u64) task->tgid << 32 | task->pid; 178ffeedafbSAlexei Starovoitov } 179ffeedafbSAlexei Starovoitov 180ffeedafbSAlexei Starovoitov const struct bpf_func_proto bpf_get_current_pid_tgid_proto = { 181ffeedafbSAlexei Starovoitov .func = bpf_get_current_pid_tgid, 182ffeedafbSAlexei Starovoitov .gpl_only = false, 183ffeedafbSAlexei Starovoitov .ret_type = RET_INTEGER, 184ffeedafbSAlexei Starovoitov }; 185ffeedafbSAlexei Starovoitov 186f3694e00SDaniel Borkmann BPF_CALL_0(bpf_get_current_uid_gid) 187ffeedafbSAlexei Starovoitov { 188ffeedafbSAlexei Starovoitov struct task_struct *task = current; 189ffeedafbSAlexei Starovoitov kuid_t uid; 190ffeedafbSAlexei Starovoitov kgid_t gid; 191ffeedafbSAlexei Starovoitov 1926088b582SDaniel Borkmann if (unlikely(!task)) 193ffeedafbSAlexei Starovoitov return -EINVAL; 194ffeedafbSAlexei Starovoitov 195ffeedafbSAlexei Starovoitov current_uid_gid(&uid, &gid); 196ffeedafbSAlexei Starovoitov return (u64) from_kgid(&init_user_ns, gid) << 32 | 197ffeedafbSAlexei Starovoitov from_kuid(&init_user_ns, uid); 198ffeedafbSAlexei Starovoitov } 199ffeedafbSAlexei Starovoitov 200ffeedafbSAlexei Starovoitov const struct bpf_func_proto bpf_get_current_uid_gid_proto = { 201ffeedafbSAlexei Starovoitov .func = bpf_get_current_uid_gid, 202ffeedafbSAlexei Starovoitov .gpl_only = false, 203ffeedafbSAlexei Starovoitov .ret_type = RET_INTEGER, 204ffeedafbSAlexei Starovoitov }; 205ffeedafbSAlexei Starovoitov 206f3694e00SDaniel Borkmann BPF_CALL_2(bpf_get_current_comm, char *, buf, u32, size) 207ffeedafbSAlexei Starovoitov { 208ffeedafbSAlexei Starovoitov struct task_struct *task = current; 209ffeedafbSAlexei Starovoitov 210074f528eSDaniel Borkmann if (unlikely(!task)) 211074f528eSDaniel Borkmann goto err_clear; 212ffeedafbSAlexei Starovoitov 213074f528eSDaniel Borkmann strncpy(buf, task->comm, size); 214074f528eSDaniel Borkmann 215074f528eSDaniel Borkmann /* Verifier guarantees that size > 0. For task->comm exceeding 216074f528eSDaniel Borkmann * size, guarantee that buf is %NUL-terminated. Unconditionally 217074f528eSDaniel Borkmann * done here to save the size test. 218074f528eSDaniel Borkmann */ 219074f528eSDaniel Borkmann buf[size - 1] = 0; 220ffeedafbSAlexei Starovoitov return 0; 221074f528eSDaniel Borkmann err_clear: 222074f528eSDaniel Borkmann memset(buf, 0, size); 223074f528eSDaniel Borkmann return -EINVAL; 224ffeedafbSAlexei Starovoitov } 225ffeedafbSAlexei Starovoitov 226ffeedafbSAlexei Starovoitov const struct bpf_func_proto bpf_get_current_comm_proto = { 227ffeedafbSAlexei Starovoitov .func = bpf_get_current_comm, 228ffeedafbSAlexei Starovoitov .gpl_only = false, 229ffeedafbSAlexei Starovoitov .ret_type = RET_INTEGER, 23039f19ebbSAlexei Starovoitov .arg1_type = ARG_PTR_TO_UNINIT_MEM, 23139f19ebbSAlexei Starovoitov .arg2_type = ARG_CONST_SIZE, 232ffeedafbSAlexei Starovoitov }; 233bf6fa2c8SYonghong Song 234d83525caSAlexei Starovoitov #if defined(CONFIG_QUEUED_SPINLOCKS) || defined(CONFIG_BPF_ARCH_SPINLOCK) 235d83525caSAlexei Starovoitov 236d83525caSAlexei Starovoitov static inline void __bpf_spin_lock(struct bpf_spin_lock *lock) 237d83525caSAlexei Starovoitov { 238d83525caSAlexei Starovoitov arch_spinlock_t *l = (void *)lock; 239d83525caSAlexei Starovoitov union { 240d83525caSAlexei Starovoitov __u32 val; 241d83525caSAlexei Starovoitov arch_spinlock_t lock; 242d83525caSAlexei Starovoitov } u = { .lock = __ARCH_SPIN_LOCK_UNLOCKED }; 243d83525caSAlexei Starovoitov 244d83525caSAlexei Starovoitov compiletime_assert(u.val == 0, "__ARCH_SPIN_LOCK_UNLOCKED not 0"); 245d83525caSAlexei Starovoitov BUILD_BUG_ON(sizeof(*l) != sizeof(__u32)); 246d83525caSAlexei Starovoitov BUILD_BUG_ON(sizeof(*lock) != sizeof(__u32)); 247d83525caSAlexei Starovoitov arch_spin_lock(l); 248d83525caSAlexei Starovoitov } 249d83525caSAlexei Starovoitov 250d83525caSAlexei Starovoitov static inline void __bpf_spin_unlock(struct bpf_spin_lock *lock) 251d83525caSAlexei Starovoitov { 252d83525caSAlexei Starovoitov arch_spinlock_t *l = (void *)lock; 253d83525caSAlexei Starovoitov 254d83525caSAlexei Starovoitov arch_spin_unlock(l); 255d83525caSAlexei Starovoitov } 256d83525caSAlexei Starovoitov 257d83525caSAlexei Starovoitov #else 258d83525caSAlexei Starovoitov 259d83525caSAlexei Starovoitov static inline void __bpf_spin_lock(struct bpf_spin_lock *lock) 260d83525caSAlexei Starovoitov { 261d83525caSAlexei Starovoitov atomic_t *l = (void *)lock; 262d83525caSAlexei Starovoitov 263d83525caSAlexei Starovoitov BUILD_BUG_ON(sizeof(*l) != sizeof(*lock)); 264d83525caSAlexei Starovoitov do { 265d83525caSAlexei Starovoitov atomic_cond_read_relaxed(l, !VAL); 266d83525caSAlexei Starovoitov } while (atomic_xchg(l, 1)); 267d83525caSAlexei Starovoitov } 268d83525caSAlexei Starovoitov 269d83525caSAlexei Starovoitov static inline void __bpf_spin_unlock(struct bpf_spin_lock *lock) 270d83525caSAlexei Starovoitov { 271d83525caSAlexei Starovoitov atomic_t *l = (void *)lock; 272d83525caSAlexei Starovoitov 273d83525caSAlexei Starovoitov atomic_set_release(l, 0); 274d83525caSAlexei Starovoitov } 275d83525caSAlexei Starovoitov 276d83525caSAlexei Starovoitov #endif 277d83525caSAlexei Starovoitov 278d83525caSAlexei Starovoitov static DEFINE_PER_CPU(unsigned long, irqsave_flags); 279d83525caSAlexei Starovoitov 280d83525caSAlexei Starovoitov notrace BPF_CALL_1(bpf_spin_lock, struct bpf_spin_lock *, lock) 281d83525caSAlexei Starovoitov { 282d83525caSAlexei Starovoitov unsigned long flags; 283d83525caSAlexei Starovoitov 284d83525caSAlexei Starovoitov local_irq_save(flags); 285d83525caSAlexei Starovoitov __bpf_spin_lock(lock); 286d83525caSAlexei Starovoitov __this_cpu_write(irqsave_flags, flags); 287d83525caSAlexei Starovoitov return 0; 288d83525caSAlexei Starovoitov } 289d83525caSAlexei Starovoitov 290d83525caSAlexei Starovoitov const struct bpf_func_proto bpf_spin_lock_proto = { 291d83525caSAlexei Starovoitov .func = bpf_spin_lock, 292d83525caSAlexei Starovoitov .gpl_only = false, 293d83525caSAlexei Starovoitov .ret_type = RET_VOID, 294d83525caSAlexei Starovoitov .arg1_type = ARG_PTR_TO_SPIN_LOCK, 295d83525caSAlexei Starovoitov }; 296d83525caSAlexei Starovoitov 297d83525caSAlexei Starovoitov notrace BPF_CALL_1(bpf_spin_unlock, struct bpf_spin_lock *, lock) 298d83525caSAlexei Starovoitov { 299d83525caSAlexei Starovoitov unsigned long flags; 300d83525caSAlexei Starovoitov 301d83525caSAlexei Starovoitov flags = __this_cpu_read(irqsave_flags); 302d83525caSAlexei Starovoitov __bpf_spin_unlock(lock); 303d83525caSAlexei Starovoitov local_irq_restore(flags); 304d83525caSAlexei Starovoitov return 0; 305d83525caSAlexei Starovoitov } 306d83525caSAlexei Starovoitov 307d83525caSAlexei Starovoitov const struct bpf_func_proto bpf_spin_unlock_proto = { 308d83525caSAlexei Starovoitov .func = bpf_spin_unlock, 309d83525caSAlexei Starovoitov .gpl_only = false, 310d83525caSAlexei Starovoitov .ret_type = RET_VOID, 311d83525caSAlexei Starovoitov .arg1_type = ARG_PTR_TO_SPIN_LOCK, 312d83525caSAlexei Starovoitov }; 313d83525caSAlexei Starovoitov 31496049f3aSAlexei Starovoitov void copy_map_value_locked(struct bpf_map *map, void *dst, void *src, 31596049f3aSAlexei Starovoitov bool lock_src) 31696049f3aSAlexei Starovoitov { 31796049f3aSAlexei Starovoitov struct bpf_spin_lock *lock; 31896049f3aSAlexei Starovoitov 31996049f3aSAlexei Starovoitov if (lock_src) 32096049f3aSAlexei Starovoitov lock = src + map->spin_lock_off; 32196049f3aSAlexei Starovoitov else 32296049f3aSAlexei Starovoitov lock = dst + map->spin_lock_off; 32396049f3aSAlexei Starovoitov preempt_disable(); 32496049f3aSAlexei Starovoitov ____bpf_spin_lock(lock); 32596049f3aSAlexei Starovoitov copy_map_value(map, dst, src); 32696049f3aSAlexei Starovoitov ____bpf_spin_unlock(lock); 32796049f3aSAlexei Starovoitov preempt_enable(); 32896049f3aSAlexei Starovoitov } 32996049f3aSAlexei Starovoitov 3305576b991SMartin KaFai Lau BPF_CALL_0(bpf_jiffies64) 3315576b991SMartin KaFai Lau { 3325576b991SMartin KaFai Lau return get_jiffies_64(); 3335576b991SMartin KaFai Lau } 3345576b991SMartin KaFai Lau 3355576b991SMartin KaFai Lau const struct bpf_func_proto bpf_jiffies64_proto = { 3365576b991SMartin KaFai Lau .func = bpf_jiffies64, 3375576b991SMartin KaFai Lau .gpl_only = false, 3385576b991SMartin KaFai Lau .ret_type = RET_INTEGER, 3395576b991SMartin KaFai Lau }; 3405576b991SMartin KaFai Lau 341bf6fa2c8SYonghong Song #ifdef CONFIG_CGROUPS 342bf6fa2c8SYonghong Song BPF_CALL_0(bpf_get_current_cgroup_id) 343bf6fa2c8SYonghong Song { 344bf6fa2c8SYonghong Song struct cgroup *cgrp = task_dfl_cgroup(current); 345bf6fa2c8SYonghong Song 34674321038STejun Heo return cgroup_id(cgrp); 347bf6fa2c8SYonghong Song } 348bf6fa2c8SYonghong Song 349bf6fa2c8SYonghong Song const struct bpf_func_proto bpf_get_current_cgroup_id_proto = { 350bf6fa2c8SYonghong Song .func = bpf_get_current_cgroup_id, 351bf6fa2c8SYonghong Song .gpl_only = false, 352bf6fa2c8SYonghong Song .ret_type = RET_INTEGER, 353bf6fa2c8SYonghong Song }; 354cd339431SRoman Gushchin 3550f09abd1SDaniel Borkmann BPF_CALL_1(bpf_get_current_ancestor_cgroup_id, int, ancestor_level) 3560f09abd1SDaniel Borkmann { 3570f09abd1SDaniel Borkmann struct cgroup *cgrp = task_dfl_cgroup(current); 3580f09abd1SDaniel Borkmann struct cgroup *ancestor; 3590f09abd1SDaniel Borkmann 3600f09abd1SDaniel Borkmann ancestor = cgroup_ancestor(cgrp, ancestor_level); 3610f09abd1SDaniel Borkmann if (!ancestor) 3620f09abd1SDaniel Borkmann return 0; 3630f09abd1SDaniel Borkmann return cgroup_id(ancestor); 3640f09abd1SDaniel Borkmann } 3650f09abd1SDaniel Borkmann 3660f09abd1SDaniel Borkmann const struct bpf_func_proto bpf_get_current_ancestor_cgroup_id_proto = { 3670f09abd1SDaniel Borkmann .func = bpf_get_current_ancestor_cgroup_id, 3680f09abd1SDaniel Borkmann .gpl_only = false, 3690f09abd1SDaniel Borkmann .ret_type = RET_INTEGER, 3700f09abd1SDaniel Borkmann .arg1_type = ARG_ANYTHING, 3710f09abd1SDaniel Borkmann }; 3720f09abd1SDaniel Borkmann 3738bad74f9SRoman Gushchin #ifdef CONFIG_CGROUP_BPF 374f294b37eSRoman Gushchin DECLARE_PER_CPU(struct bpf_cgroup_storage*, 375f294b37eSRoman Gushchin bpf_cgroup_storage[MAX_BPF_CGROUP_STORAGE_TYPE]); 376cd339431SRoman Gushchin 377cd339431SRoman Gushchin BPF_CALL_2(bpf_get_local_storage, struct bpf_map *, map, u64, flags) 378cd339431SRoman Gushchin { 3798bad74f9SRoman Gushchin /* flags argument is not used now, 3808bad74f9SRoman Gushchin * but provides an ability to extend the API. 3818bad74f9SRoman Gushchin * verifier checks that its value is correct. 382cd339431SRoman Gushchin */ 3838bad74f9SRoman Gushchin enum bpf_cgroup_storage_type stype = cgroup_storage_type(map); 384f294b37eSRoman Gushchin struct bpf_cgroup_storage *storage; 385b741f163SRoman Gushchin void *ptr; 3868bad74f9SRoman Gushchin 387f294b37eSRoman Gushchin storage = this_cpu_read(bpf_cgroup_storage[stype]); 388f294b37eSRoman Gushchin 389b741f163SRoman Gushchin if (stype == BPF_CGROUP_STORAGE_SHARED) 390b741f163SRoman Gushchin ptr = &READ_ONCE(storage->buf)->data[0]; 391b741f163SRoman Gushchin else 392b741f163SRoman Gushchin ptr = this_cpu_ptr(storage->percpu_buf); 393b741f163SRoman Gushchin 394b741f163SRoman Gushchin return (unsigned long)ptr; 395cd339431SRoman Gushchin } 396cd339431SRoman Gushchin 397cd339431SRoman Gushchin const struct bpf_func_proto bpf_get_local_storage_proto = { 398cd339431SRoman Gushchin .func = bpf_get_local_storage, 399cd339431SRoman Gushchin .gpl_only = false, 400cd339431SRoman Gushchin .ret_type = RET_PTR_TO_MAP_VALUE, 401cd339431SRoman Gushchin .arg1_type = ARG_CONST_MAP_PTR, 402cd339431SRoman Gushchin .arg2_type = ARG_ANYTHING, 403cd339431SRoman Gushchin }; 404bf6fa2c8SYonghong Song #endif 405d7a4cb9bSAndrey Ignatov 406d7a4cb9bSAndrey Ignatov #define BPF_STRTOX_BASE_MASK 0x1F 407d7a4cb9bSAndrey Ignatov 408d7a4cb9bSAndrey Ignatov static int __bpf_strtoull(const char *buf, size_t buf_len, u64 flags, 409d7a4cb9bSAndrey Ignatov unsigned long long *res, bool *is_negative) 410d7a4cb9bSAndrey Ignatov { 411d7a4cb9bSAndrey Ignatov unsigned int base = flags & BPF_STRTOX_BASE_MASK; 412d7a4cb9bSAndrey Ignatov const char *cur_buf = buf; 413d7a4cb9bSAndrey Ignatov size_t cur_len = buf_len; 414d7a4cb9bSAndrey Ignatov unsigned int consumed; 415d7a4cb9bSAndrey Ignatov size_t val_len; 416d7a4cb9bSAndrey Ignatov char str[64]; 417d7a4cb9bSAndrey Ignatov 418d7a4cb9bSAndrey Ignatov if (!buf || !buf_len || !res || !is_negative) 419d7a4cb9bSAndrey Ignatov return -EINVAL; 420d7a4cb9bSAndrey Ignatov 421d7a4cb9bSAndrey Ignatov if (base != 0 && base != 8 && base != 10 && base != 16) 422d7a4cb9bSAndrey Ignatov return -EINVAL; 423d7a4cb9bSAndrey Ignatov 424d7a4cb9bSAndrey Ignatov if (flags & ~BPF_STRTOX_BASE_MASK) 425d7a4cb9bSAndrey Ignatov return -EINVAL; 426d7a4cb9bSAndrey Ignatov 427d7a4cb9bSAndrey Ignatov while (cur_buf < buf + buf_len && isspace(*cur_buf)) 428d7a4cb9bSAndrey Ignatov ++cur_buf; 429d7a4cb9bSAndrey Ignatov 430d7a4cb9bSAndrey Ignatov *is_negative = (cur_buf < buf + buf_len && *cur_buf == '-'); 431d7a4cb9bSAndrey Ignatov if (*is_negative) 432d7a4cb9bSAndrey Ignatov ++cur_buf; 433d7a4cb9bSAndrey Ignatov 434d7a4cb9bSAndrey Ignatov consumed = cur_buf - buf; 435d7a4cb9bSAndrey Ignatov cur_len -= consumed; 436d7a4cb9bSAndrey Ignatov if (!cur_len) 437d7a4cb9bSAndrey Ignatov return -EINVAL; 438d7a4cb9bSAndrey Ignatov 439d7a4cb9bSAndrey Ignatov cur_len = min(cur_len, sizeof(str) - 1); 440d7a4cb9bSAndrey Ignatov memcpy(str, cur_buf, cur_len); 441d7a4cb9bSAndrey Ignatov str[cur_len] = '\0'; 442d7a4cb9bSAndrey Ignatov cur_buf = str; 443d7a4cb9bSAndrey Ignatov 444d7a4cb9bSAndrey Ignatov cur_buf = _parse_integer_fixup_radix(cur_buf, &base); 445d7a4cb9bSAndrey Ignatov val_len = _parse_integer(cur_buf, base, res); 446d7a4cb9bSAndrey Ignatov 447d7a4cb9bSAndrey Ignatov if (val_len & KSTRTOX_OVERFLOW) 448d7a4cb9bSAndrey Ignatov return -ERANGE; 449d7a4cb9bSAndrey Ignatov 450d7a4cb9bSAndrey Ignatov if (val_len == 0) 451d7a4cb9bSAndrey Ignatov return -EINVAL; 452d7a4cb9bSAndrey Ignatov 453d7a4cb9bSAndrey Ignatov cur_buf += val_len; 454d7a4cb9bSAndrey Ignatov consumed += cur_buf - str; 455d7a4cb9bSAndrey Ignatov 456d7a4cb9bSAndrey Ignatov return consumed; 457d7a4cb9bSAndrey Ignatov } 458d7a4cb9bSAndrey Ignatov 459d7a4cb9bSAndrey Ignatov static int __bpf_strtoll(const char *buf, size_t buf_len, u64 flags, 460d7a4cb9bSAndrey Ignatov long long *res) 461d7a4cb9bSAndrey Ignatov { 462d7a4cb9bSAndrey Ignatov unsigned long long _res; 463d7a4cb9bSAndrey Ignatov bool is_negative; 464d7a4cb9bSAndrey Ignatov int err; 465d7a4cb9bSAndrey Ignatov 466d7a4cb9bSAndrey Ignatov err = __bpf_strtoull(buf, buf_len, flags, &_res, &is_negative); 467d7a4cb9bSAndrey Ignatov if (err < 0) 468d7a4cb9bSAndrey Ignatov return err; 469d7a4cb9bSAndrey Ignatov if (is_negative) { 470d7a4cb9bSAndrey Ignatov if ((long long)-_res > 0) 471d7a4cb9bSAndrey Ignatov return -ERANGE; 472d7a4cb9bSAndrey Ignatov *res = -_res; 473d7a4cb9bSAndrey Ignatov } else { 474d7a4cb9bSAndrey Ignatov if ((long long)_res < 0) 475d7a4cb9bSAndrey Ignatov return -ERANGE; 476d7a4cb9bSAndrey Ignatov *res = _res; 477d7a4cb9bSAndrey Ignatov } 478d7a4cb9bSAndrey Ignatov return err; 479d7a4cb9bSAndrey Ignatov } 480d7a4cb9bSAndrey Ignatov 481d7a4cb9bSAndrey Ignatov BPF_CALL_4(bpf_strtol, const char *, buf, size_t, buf_len, u64, flags, 482d7a4cb9bSAndrey Ignatov long *, res) 483d7a4cb9bSAndrey Ignatov { 484d7a4cb9bSAndrey Ignatov long long _res; 485d7a4cb9bSAndrey Ignatov int err; 486d7a4cb9bSAndrey Ignatov 487d7a4cb9bSAndrey Ignatov err = __bpf_strtoll(buf, buf_len, flags, &_res); 488d7a4cb9bSAndrey Ignatov if (err < 0) 489d7a4cb9bSAndrey Ignatov return err; 490d7a4cb9bSAndrey Ignatov if (_res != (long)_res) 491d7a4cb9bSAndrey Ignatov return -ERANGE; 492d7a4cb9bSAndrey Ignatov *res = _res; 493d7a4cb9bSAndrey Ignatov return err; 494d7a4cb9bSAndrey Ignatov } 495d7a4cb9bSAndrey Ignatov 496d7a4cb9bSAndrey Ignatov const struct bpf_func_proto bpf_strtol_proto = { 497d7a4cb9bSAndrey Ignatov .func = bpf_strtol, 498d7a4cb9bSAndrey Ignatov .gpl_only = false, 499d7a4cb9bSAndrey Ignatov .ret_type = RET_INTEGER, 500d7a4cb9bSAndrey Ignatov .arg1_type = ARG_PTR_TO_MEM, 501d7a4cb9bSAndrey Ignatov .arg2_type = ARG_CONST_SIZE, 502d7a4cb9bSAndrey Ignatov .arg3_type = ARG_ANYTHING, 503d7a4cb9bSAndrey Ignatov .arg4_type = ARG_PTR_TO_LONG, 504d7a4cb9bSAndrey Ignatov }; 505d7a4cb9bSAndrey Ignatov 506d7a4cb9bSAndrey Ignatov BPF_CALL_4(bpf_strtoul, const char *, buf, size_t, buf_len, u64, flags, 507d7a4cb9bSAndrey Ignatov unsigned long *, res) 508d7a4cb9bSAndrey Ignatov { 509d7a4cb9bSAndrey Ignatov unsigned long long _res; 510d7a4cb9bSAndrey Ignatov bool is_negative; 511d7a4cb9bSAndrey Ignatov int err; 512d7a4cb9bSAndrey Ignatov 513d7a4cb9bSAndrey Ignatov err = __bpf_strtoull(buf, buf_len, flags, &_res, &is_negative); 514d7a4cb9bSAndrey Ignatov if (err < 0) 515d7a4cb9bSAndrey Ignatov return err; 516d7a4cb9bSAndrey Ignatov if (is_negative) 517d7a4cb9bSAndrey Ignatov return -EINVAL; 518d7a4cb9bSAndrey Ignatov if (_res != (unsigned long)_res) 519d7a4cb9bSAndrey Ignatov return -ERANGE; 520d7a4cb9bSAndrey Ignatov *res = _res; 521d7a4cb9bSAndrey Ignatov return err; 522d7a4cb9bSAndrey Ignatov } 523d7a4cb9bSAndrey Ignatov 524d7a4cb9bSAndrey Ignatov const struct bpf_func_proto bpf_strtoul_proto = { 525d7a4cb9bSAndrey Ignatov .func = bpf_strtoul, 526d7a4cb9bSAndrey Ignatov .gpl_only = false, 527d7a4cb9bSAndrey Ignatov .ret_type = RET_INTEGER, 528d7a4cb9bSAndrey Ignatov .arg1_type = ARG_PTR_TO_MEM, 529d7a4cb9bSAndrey Ignatov .arg2_type = ARG_CONST_SIZE, 530d7a4cb9bSAndrey Ignatov .arg3_type = ARG_ANYTHING, 531d7a4cb9bSAndrey Ignatov .arg4_type = ARG_PTR_TO_LONG, 532d7a4cb9bSAndrey Ignatov }; 5338bad74f9SRoman Gushchin #endif 534b4490c5cSCarlos Neira 535b4490c5cSCarlos Neira BPF_CALL_4(bpf_get_ns_current_pid_tgid, u64, dev, u64, ino, 536b4490c5cSCarlos Neira struct bpf_pidns_info *, nsdata, u32, size) 537b4490c5cSCarlos Neira { 538b4490c5cSCarlos Neira struct task_struct *task = current; 539b4490c5cSCarlos Neira struct pid_namespace *pidns; 540b4490c5cSCarlos Neira int err = -EINVAL; 541b4490c5cSCarlos Neira 542b4490c5cSCarlos Neira if (unlikely(size != sizeof(struct bpf_pidns_info))) 543b4490c5cSCarlos Neira goto clear; 544b4490c5cSCarlos Neira 545b4490c5cSCarlos Neira if (unlikely((u64)(dev_t)dev != dev)) 546b4490c5cSCarlos Neira goto clear; 547b4490c5cSCarlos Neira 548b4490c5cSCarlos Neira if (unlikely(!task)) 549b4490c5cSCarlos Neira goto clear; 550b4490c5cSCarlos Neira 551b4490c5cSCarlos Neira pidns = task_active_pid_ns(task); 552b4490c5cSCarlos Neira if (unlikely(!pidns)) { 553b4490c5cSCarlos Neira err = -ENOENT; 554b4490c5cSCarlos Neira goto clear; 555b4490c5cSCarlos Neira } 556b4490c5cSCarlos Neira 557b4490c5cSCarlos Neira if (!ns_match(&pidns->ns, (dev_t)dev, ino)) 558b4490c5cSCarlos Neira goto clear; 559b4490c5cSCarlos Neira 560b4490c5cSCarlos Neira nsdata->pid = task_pid_nr_ns(task, pidns); 561b4490c5cSCarlos Neira nsdata->tgid = task_tgid_nr_ns(task, pidns); 562b4490c5cSCarlos Neira return 0; 563b4490c5cSCarlos Neira clear: 564b4490c5cSCarlos Neira memset((void *)nsdata, 0, (size_t) size); 565b4490c5cSCarlos Neira return err; 566b4490c5cSCarlos Neira } 567b4490c5cSCarlos Neira 568b4490c5cSCarlos Neira const struct bpf_func_proto bpf_get_ns_current_pid_tgid_proto = { 569b4490c5cSCarlos Neira .func = bpf_get_ns_current_pid_tgid, 570b4490c5cSCarlos Neira .gpl_only = false, 571b4490c5cSCarlos Neira .ret_type = RET_INTEGER, 572b4490c5cSCarlos Neira .arg1_type = ARG_ANYTHING, 573b4490c5cSCarlos Neira .arg2_type = ARG_ANYTHING, 574b4490c5cSCarlos Neira .arg3_type = ARG_PTR_TO_UNINIT_MEM, 575b4490c5cSCarlos Neira .arg4_type = ARG_CONST_SIZE, 576b4490c5cSCarlos Neira }; 5776890896bSStanislav Fomichev 5786890896bSStanislav Fomichev static const struct bpf_func_proto bpf_get_raw_smp_processor_id_proto = { 5796890896bSStanislav Fomichev .func = bpf_get_raw_cpu_id, 5806890896bSStanislav Fomichev .gpl_only = false, 5816890896bSStanislav Fomichev .ret_type = RET_INTEGER, 5826890896bSStanislav Fomichev }; 5836890896bSStanislav Fomichev 5846890896bSStanislav Fomichev BPF_CALL_5(bpf_event_output_data, void *, ctx, struct bpf_map *, map, 5856890896bSStanislav Fomichev u64, flags, void *, data, u64, size) 5866890896bSStanislav Fomichev { 5876890896bSStanislav Fomichev if (unlikely(flags & ~(BPF_F_INDEX_MASK))) 5886890896bSStanislav Fomichev return -EINVAL; 5896890896bSStanislav Fomichev 5906890896bSStanislav Fomichev return bpf_event_output(map, flags, data, size, NULL, 0, NULL); 5916890896bSStanislav Fomichev } 5926890896bSStanislav Fomichev 5936890896bSStanislav Fomichev const struct bpf_func_proto bpf_event_output_data_proto = { 5946890896bSStanislav Fomichev .func = bpf_event_output_data, 5956890896bSStanislav Fomichev .gpl_only = true, 5966890896bSStanislav Fomichev .ret_type = RET_INTEGER, 5976890896bSStanislav Fomichev .arg1_type = ARG_PTR_TO_CTX, 5986890896bSStanislav Fomichev .arg2_type = ARG_CONST_MAP_PTR, 5996890896bSStanislav Fomichev .arg3_type = ARG_ANYTHING, 6006890896bSStanislav Fomichev .arg4_type = ARG_PTR_TO_MEM, 6016890896bSStanislav Fomichev .arg5_type = ARG_CONST_SIZE_OR_ZERO, 6026890896bSStanislav Fomichev }; 6036890896bSStanislav Fomichev 60407be4c4aSAlexei Starovoitov BPF_CALL_3(bpf_copy_from_user, void *, dst, u32, size, 60507be4c4aSAlexei Starovoitov const void __user *, user_ptr) 60607be4c4aSAlexei Starovoitov { 60707be4c4aSAlexei Starovoitov int ret = copy_from_user(dst, user_ptr, size); 60807be4c4aSAlexei Starovoitov 60907be4c4aSAlexei Starovoitov if (unlikely(ret)) { 61007be4c4aSAlexei Starovoitov memset(dst, 0, size); 61107be4c4aSAlexei Starovoitov ret = -EFAULT; 61207be4c4aSAlexei Starovoitov } 61307be4c4aSAlexei Starovoitov 61407be4c4aSAlexei Starovoitov return ret; 61507be4c4aSAlexei Starovoitov } 61607be4c4aSAlexei Starovoitov 61707be4c4aSAlexei Starovoitov const struct bpf_func_proto bpf_copy_from_user_proto = { 61807be4c4aSAlexei Starovoitov .func = bpf_copy_from_user, 61907be4c4aSAlexei Starovoitov .gpl_only = false, 62007be4c4aSAlexei Starovoitov .ret_type = RET_INTEGER, 62107be4c4aSAlexei Starovoitov .arg1_type = ARG_PTR_TO_UNINIT_MEM, 62207be4c4aSAlexei Starovoitov .arg2_type = ARG_CONST_SIZE_OR_ZERO, 62307be4c4aSAlexei Starovoitov .arg3_type = ARG_ANYTHING, 62407be4c4aSAlexei Starovoitov }; 62507be4c4aSAlexei Starovoitov 626*eaa6bcb7SHao Luo BPF_CALL_2(bpf_per_cpu_ptr, const void *, ptr, u32, cpu) 627*eaa6bcb7SHao Luo { 628*eaa6bcb7SHao Luo if (cpu >= nr_cpu_ids) 629*eaa6bcb7SHao Luo return (unsigned long)NULL; 630*eaa6bcb7SHao Luo 631*eaa6bcb7SHao Luo return (unsigned long)per_cpu_ptr((const void __percpu *)ptr, cpu); 632*eaa6bcb7SHao Luo } 633*eaa6bcb7SHao Luo 634*eaa6bcb7SHao Luo const struct bpf_func_proto bpf_per_cpu_ptr_proto = { 635*eaa6bcb7SHao Luo .func = bpf_per_cpu_ptr, 636*eaa6bcb7SHao Luo .gpl_only = false, 637*eaa6bcb7SHao Luo .ret_type = RET_PTR_TO_MEM_OR_BTF_ID_OR_NULL, 638*eaa6bcb7SHao Luo .arg1_type = ARG_PTR_TO_PERCPU_BTF_ID, 639*eaa6bcb7SHao Luo .arg2_type = ARG_ANYTHING, 640*eaa6bcb7SHao Luo }; 641*eaa6bcb7SHao Luo 642f470378cSJohn Fastabend const struct bpf_func_proto bpf_get_current_task_proto __weak; 643f470378cSJohn Fastabend const struct bpf_func_proto bpf_probe_read_user_proto __weak; 644f470378cSJohn Fastabend const struct bpf_func_proto bpf_probe_read_user_str_proto __weak; 645f470378cSJohn Fastabend const struct bpf_func_proto bpf_probe_read_kernel_proto __weak; 646f470378cSJohn Fastabend const struct bpf_func_proto bpf_probe_read_kernel_str_proto __weak; 647f470378cSJohn Fastabend 6486890896bSStanislav Fomichev const struct bpf_func_proto * 6496890896bSStanislav Fomichev bpf_base_func_proto(enum bpf_func_id func_id) 6506890896bSStanislav Fomichev { 6516890896bSStanislav Fomichev switch (func_id) { 6526890896bSStanislav Fomichev case BPF_FUNC_map_lookup_elem: 6536890896bSStanislav Fomichev return &bpf_map_lookup_elem_proto; 6546890896bSStanislav Fomichev case BPF_FUNC_map_update_elem: 6556890896bSStanislav Fomichev return &bpf_map_update_elem_proto; 6566890896bSStanislav Fomichev case BPF_FUNC_map_delete_elem: 6576890896bSStanislav Fomichev return &bpf_map_delete_elem_proto; 6586890896bSStanislav Fomichev case BPF_FUNC_map_push_elem: 6596890896bSStanislav Fomichev return &bpf_map_push_elem_proto; 6606890896bSStanislav Fomichev case BPF_FUNC_map_pop_elem: 6616890896bSStanislav Fomichev return &bpf_map_pop_elem_proto; 6626890896bSStanislav Fomichev case BPF_FUNC_map_peek_elem: 6636890896bSStanislav Fomichev return &bpf_map_peek_elem_proto; 6646890896bSStanislav Fomichev case BPF_FUNC_get_prandom_u32: 6656890896bSStanislav Fomichev return &bpf_get_prandom_u32_proto; 6666890896bSStanislav Fomichev case BPF_FUNC_get_smp_processor_id: 6676890896bSStanislav Fomichev return &bpf_get_raw_smp_processor_id_proto; 6686890896bSStanislav Fomichev case BPF_FUNC_get_numa_node_id: 6696890896bSStanislav Fomichev return &bpf_get_numa_node_id_proto; 6706890896bSStanislav Fomichev case BPF_FUNC_tail_call: 6716890896bSStanislav Fomichev return &bpf_tail_call_proto; 6726890896bSStanislav Fomichev case BPF_FUNC_ktime_get_ns: 6736890896bSStanislav Fomichev return &bpf_ktime_get_ns_proto; 67471d19214SMaciej Żenczykowski case BPF_FUNC_ktime_get_boot_ns: 67571d19214SMaciej Żenczykowski return &bpf_ktime_get_boot_ns_proto; 676457f4436SAndrii Nakryiko case BPF_FUNC_ringbuf_output: 677457f4436SAndrii Nakryiko return &bpf_ringbuf_output_proto; 678457f4436SAndrii Nakryiko case BPF_FUNC_ringbuf_reserve: 679457f4436SAndrii Nakryiko return &bpf_ringbuf_reserve_proto; 680457f4436SAndrii Nakryiko case BPF_FUNC_ringbuf_submit: 681457f4436SAndrii Nakryiko return &bpf_ringbuf_submit_proto; 682457f4436SAndrii Nakryiko case BPF_FUNC_ringbuf_discard: 683457f4436SAndrii Nakryiko return &bpf_ringbuf_discard_proto; 684457f4436SAndrii Nakryiko case BPF_FUNC_ringbuf_query: 685457f4436SAndrii Nakryiko return &bpf_ringbuf_query_proto; 6866890896bSStanislav Fomichev default: 6876890896bSStanislav Fomichev break; 6886890896bSStanislav Fomichev } 6896890896bSStanislav Fomichev 6902c78ee89SAlexei Starovoitov if (!bpf_capable()) 6916890896bSStanislav Fomichev return NULL; 6926890896bSStanislav Fomichev 6936890896bSStanislav Fomichev switch (func_id) { 6946890896bSStanislav Fomichev case BPF_FUNC_spin_lock: 6956890896bSStanislav Fomichev return &bpf_spin_lock_proto; 6966890896bSStanislav Fomichev case BPF_FUNC_spin_unlock: 6976890896bSStanislav Fomichev return &bpf_spin_unlock_proto; 6986890896bSStanislav Fomichev case BPF_FUNC_trace_printk: 6992c78ee89SAlexei Starovoitov if (!perfmon_capable()) 7002c78ee89SAlexei Starovoitov return NULL; 7016890896bSStanislav Fomichev return bpf_get_trace_printk_proto(); 702c4d0bfb4SAlan Maguire case BPF_FUNC_snprintf_btf: 703c4d0bfb4SAlan Maguire if (!perfmon_capable()) 704c4d0bfb4SAlan Maguire return NULL; 705c4d0bfb4SAlan Maguire return &bpf_snprintf_btf_proto; 7066890896bSStanislav Fomichev case BPF_FUNC_jiffies64: 7076890896bSStanislav Fomichev return &bpf_jiffies64_proto; 708*eaa6bcb7SHao Luo case BPF_FUNC_bpf_per_cpu_ptr: 709*eaa6bcb7SHao Luo return &bpf_per_cpu_ptr_proto; 7106890896bSStanislav Fomichev default: 711f470378cSJohn Fastabend break; 712f470378cSJohn Fastabend } 713f470378cSJohn Fastabend 714f470378cSJohn Fastabend if (!perfmon_capable()) 715f470378cSJohn Fastabend return NULL; 716f470378cSJohn Fastabend 717f470378cSJohn Fastabend switch (func_id) { 718f470378cSJohn Fastabend case BPF_FUNC_get_current_task: 719f470378cSJohn Fastabend return &bpf_get_current_task_proto; 720f470378cSJohn Fastabend case BPF_FUNC_probe_read_user: 721f470378cSJohn Fastabend return &bpf_probe_read_user_proto; 722f470378cSJohn Fastabend case BPF_FUNC_probe_read_kernel: 723f470378cSJohn Fastabend return &bpf_probe_read_kernel_proto; 724f470378cSJohn Fastabend case BPF_FUNC_probe_read_user_str: 725f470378cSJohn Fastabend return &bpf_probe_read_user_str_proto; 726f470378cSJohn Fastabend case BPF_FUNC_probe_read_kernel_str: 727f470378cSJohn Fastabend return &bpf_probe_read_kernel_str_proto; 728f470378cSJohn Fastabend default: 7296890896bSStanislav Fomichev return NULL; 7306890896bSStanislav Fomichev } 7316890896bSStanislav Fomichev } 732