1d0003ec0SAlexei Starovoitov /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com 2d0003ec0SAlexei Starovoitov * 3d0003ec0SAlexei Starovoitov * This program is free software; you can redistribute it and/or 4d0003ec0SAlexei Starovoitov * modify it under the terms of version 2 of the GNU General Public 5d0003ec0SAlexei Starovoitov * License as published by the Free Software Foundation. 6d0003ec0SAlexei Starovoitov * 7d0003ec0SAlexei Starovoitov * This program is distributed in the hope that it will be useful, but 8d0003ec0SAlexei Starovoitov * WITHOUT ANY WARRANTY; without even the implied warranty of 9d0003ec0SAlexei Starovoitov * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 10d0003ec0SAlexei Starovoitov * General Public License for more details. 11d0003ec0SAlexei Starovoitov */ 12d0003ec0SAlexei Starovoitov #include <linux/bpf.h> 13d0003ec0SAlexei Starovoitov #include <linux/rcupdate.h> 1403e69b50SDaniel Borkmann #include <linux/random.h> 15c04167ceSDaniel Borkmann #include <linux/smp.h> 162d0e30c3SDaniel Borkmann #include <linux/topology.h> 1717ca8cbfSDaniel Borkmann #include <linux/ktime.h> 18ffeedafbSAlexei Starovoitov #include <linux/sched.h> 19ffeedafbSAlexei Starovoitov #include <linux/uidgid.h> 20f3694e00SDaniel Borkmann #include <linux/filter.h> 21d0003ec0SAlexei Starovoitov 22d0003ec0SAlexei Starovoitov /* If kernel subsystem is allowing eBPF programs to call this function, 23d0003ec0SAlexei Starovoitov * inside its own verifier_ops->get_func_proto() callback it should return 24d0003ec0SAlexei Starovoitov * bpf_map_lookup_elem_proto, so that verifier can properly check the arguments 25d0003ec0SAlexei Starovoitov * 26d0003ec0SAlexei Starovoitov * Different map implementations will rely on rcu in map methods 27d0003ec0SAlexei Starovoitov * lookup/update/delete, therefore eBPF programs must run under rcu lock 28d0003ec0SAlexei Starovoitov * if program is allowed to access maps, so check rcu_read_lock_held in 29d0003ec0SAlexei Starovoitov * all three functions. 30d0003ec0SAlexei Starovoitov */ 31f3694e00SDaniel Borkmann BPF_CALL_2(bpf_map_lookup_elem, struct bpf_map *, map, void *, key) 32d0003ec0SAlexei Starovoitov { 33d0003ec0SAlexei Starovoitov WARN_ON_ONCE(!rcu_read_lock_held()); 34f3694e00SDaniel Borkmann return (unsigned long) map->ops->map_lookup_elem(map, key); 35d0003ec0SAlexei Starovoitov } 36d0003ec0SAlexei Starovoitov 37a2c83fffSDaniel Borkmann const struct bpf_func_proto bpf_map_lookup_elem_proto = { 38d0003ec0SAlexei Starovoitov .func = bpf_map_lookup_elem, 39d0003ec0SAlexei Starovoitov .gpl_only = false, 4036bbef52SDaniel Borkmann .pkt_access = true, 41d0003ec0SAlexei Starovoitov .ret_type = RET_PTR_TO_MAP_VALUE_OR_NULL, 42d0003ec0SAlexei Starovoitov .arg1_type = ARG_CONST_MAP_PTR, 43d0003ec0SAlexei Starovoitov .arg2_type = ARG_PTR_TO_MAP_KEY, 44d0003ec0SAlexei Starovoitov }; 45d0003ec0SAlexei Starovoitov 46f3694e00SDaniel Borkmann BPF_CALL_4(bpf_map_update_elem, struct bpf_map *, map, void *, key, 47f3694e00SDaniel Borkmann void *, value, u64, flags) 48d0003ec0SAlexei Starovoitov { 49d0003ec0SAlexei Starovoitov WARN_ON_ONCE(!rcu_read_lock_held()); 50f3694e00SDaniel Borkmann return map->ops->map_update_elem(map, key, value, flags); 51d0003ec0SAlexei Starovoitov } 52d0003ec0SAlexei Starovoitov 53a2c83fffSDaniel Borkmann const struct bpf_func_proto bpf_map_update_elem_proto = { 54d0003ec0SAlexei Starovoitov .func = bpf_map_update_elem, 55d0003ec0SAlexei Starovoitov .gpl_only = false, 5636bbef52SDaniel Borkmann .pkt_access = true, 57d0003ec0SAlexei Starovoitov .ret_type = RET_INTEGER, 58d0003ec0SAlexei Starovoitov .arg1_type = ARG_CONST_MAP_PTR, 59d0003ec0SAlexei Starovoitov .arg2_type = ARG_PTR_TO_MAP_KEY, 60d0003ec0SAlexei Starovoitov .arg3_type = ARG_PTR_TO_MAP_VALUE, 61d0003ec0SAlexei Starovoitov .arg4_type = ARG_ANYTHING, 62d0003ec0SAlexei Starovoitov }; 63d0003ec0SAlexei Starovoitov 64f3694e00SDaniel Borkmann BPF_CALL_2(bpf_map_delete_elem, struct bpf_map *, map, void *, key) 65d0003ec0SAlexei Starovoitov { 66d0003ec0SAlexei Starovoitov WARN_ON_ONCE(!rcu_read_lock_held()); 67d0003ec0SAlexei Starovoitov return map->ops->map_delete_elem(map, key); 68d0003ec0SAlexei Starovoitov } 69d0003ec0SAlexei Starovoitov 70a2c83fffSDaniel Borkmann const struct bpf_func_proto bpf_map_delete_elem_proto = { 71d0003ec0SAlexei Starovoitov .func = bpf_map_delete_elem, 72d0003ec0SAlexei Starovoitov .gpl_only = false, 7336bbef52SDaniel Borkmann .pkt_access = true, 74d0003ec0SAlexei Starovoitov .ret_type = RET_INTEGER, 75d0003ec0SAlexei Starovoitov .arg1_type = ARG_CONST_MAP_PTR, 76d0003ec0SAlexei Starovoitov .arg2_type = ARG_PTR_TO_MAP_KEY, 77d0003ec0SAlexei Starovoitov }; 7803e69b50SDaniel Borkmann 7903e69b50SDaniel Borkmann const struct bpf_func_proto bpf_get_prandom_u32_proto = { 803ad00405SDaniel Borkmann .func = bpf_user_rnd_u32, 8103e69b50SDaniel Borkmann .gpl_only = false, 8203e69b50SDaniel Borkmann .ret_type = RET_INTEGER, 8303e69b50SDaniel Borkmann }; 84c04167ceSDaniel Borkmann 85f3694e00SDaniel Borkmann BPF_CALL_0(bpf_get_smp_processor_id) 86c04167ceSDaniel Borkmann { 8780b48c44SDaniel Borkmann return smp_processor_id(); 88c04167ceSDaniel Borkmann } 89c04167ceSDaniel Borkmann 90c04167ceSDaniel Borkmann const struct bpf_func_proto bpf_get_smp_processor_id_proto = { 91c04167ceSDaniel Borkmann .func = bpf_get_smp_processor_id, 92c04167ceSDaniel Borkmann .gpl_only = false, 93c04167ceSDaniel Borkmann .ret_type = RET_INTEGER, 94c04167ceSDaniel Borkmann }; 9517ca8cbfSDaniel Borkmann 962d0e30c3SDaniel Borkmann BPF_CALL_0(bpf_get_numa_node_id) 972d0e30c3SDaniel Borkmann { 982d0e30c3SDaniel Borkmann return numa_node_id(); 992d0e30c3SDaniel Borkmann } 1002d0e30c3SDaniel Borkmann 1012d0e30c3SDaniel Borkmann const struct bpf_func_proto bpf_get_numa_node_id_proto = { 1022d0e30c3SDaniel Borkmann .func = bpf_get_numa_node_id, 1032d0e30c3SDaniel Borkmann .gpl_only = false, 1042d0e30c3SDaniel Borkmann .ret_type = RET_INTEGER, 1052d0e30c3SDaniel Borkmann }; 1062d0e30c3SDaniel Borkmann 107f3694e00SDaniel Borkmann BPF_CALL_0(bpf_ktime_get_ns) 10817ca8cbfSDaniel Borkmann { 10917ca8cbfSDaniel Borkmann /* NMI safe access to clock monotonic */ 11017ca8cbfSDaniel Borkmann return ktime_get_mono_fast_ns(); 11117ca8cbfSDaniel Borkmann } 11217ca8cbfSDaniel Borkmann 11317ca8cbfSDaniel Borkmann const struct bpf_func_proto bpf_ktime_get_ns_proto = { 11417ca8cbfSDaniel Borkmann .func = bpf_ktime_get_ns, 11517ca8cbfSDaniel Borkmann .gpl_only = true, 11617ca8cbfSDaniel Borkmann .ret_type = RET_INTEGER, 11717ca8cbfSDaniel Borkmann }; 118ffeedafbSAlexei Starovoitov 119f3694e00SDaniel Borkmann BPF_CALL_0(bpf_get_current_pid_tgid) 120ffeedafbSAlexei Starovoitov { 121ffeedafbSAlexei Starovoitov struct task_struct *task = current; 122ffeedafbSAlexei Starovoitov 1236088b582SDaniel Borkmann if (unlikely(!task)) 124ffeedafbSAlexei Starovoitov return -EINVAL; 125ffeedafbSAlexei Starovoitov 126ffeedafbSAlexei Starovoitov return (u64) task->tgid << 32 | task->pid; 127ffeedafbSAlexei Starovoitov } 128ffeedafbSAlexei Starovoitov 129ffeedafbSAlexei Starovoitov const struct bpf_func_proto bpf_get_current_pid_tgid_proto = { 130ffeedafbSAlexei Starovoitov .func = bpf_get_current_pid_tgid, 131ffeedafbSAlexei Starovoitov .gpl_only = false, 132ffeedafbSAlexei Starovoitov .ret_type = RET_INTEGER, 133ffeedafbSAlexei Starovoitov }; 134ffeedafbSAlexei Starovoitov 135f3694e00SDaniel Borkmann BPF_CALL_0(bpf_get_current_uid_gid) 136ffeedafbSAlexei Starovoitov { 137ffeedafbSAlexei Starovoitov struct task_struct *task = current; 138ffeedafbSAlexei Starovoitov kuid_t uid; 139ffeedafbSAlexei Starovoitov kgid_t gid; 140ffeedafbSAlexei Starovoitov 1416088b582SDaniel Borkmann if (unlikely(!task)) 142ffeedafbSAlexei Starovoitov return -EINVAL; 143ffeedafbSAlexei Starovoitov 144ffeedafbSAlexei Starovoitov current_uid_gid(&uid, &gid); 145ffeedafbSAlexei Starovoitov return (u64) from_kgid(&init_user_ns, gid) << 32 | 146ffeedafbSAlexei Starovoitov from_kuid(&init_user_ns, uid); 147ffeedafbSAlexei Starovoitov } 148ffeedafbSAlexei Starovoitov 149ffeedafbSAlexei Starovoitov const struct bpf_func_proto bpf_get_current_uid_gid_proto = { 150ffeedafbSAlexei Starovoitov .func = bpf_get_current_uid_gid, 151ffeedafbSAlexei Starovoitov .gpl_only = false, 152ffeedafbSAlexei Starovoitov .ret_type = RET_INTEGER, 153ffeedafbSAlexei Starovoitov }; 154ffeedafbSAlexei Starovoitov 155f3694e00SDaniel Borkmann BPF_CALL_2(bpf_get_current_comm, char *, buf, u32, size) 156ffeedafbSAlexei Starovoitov { 157ffeedafbSAlexei Starovoitov struct task_struct *task = current; 158ffeedafbSAlexei Starovoitov 159074f528eSDaniel Borkmann if (unlikely(!task)) 160074f528eSDaniel Borkmann goto err_clear; 161ffeedafbSAlexei Starovoitov 162074f528eSDaniel Borkmann strncpy(buf, task->comm, size); 163074f528eSDaniel Borkmann 164074f528eSDaniel Borkmann /* Verifier guarantees that size > 0. For task->comm exceeding 165074f528eSDaniel Borkmann * size, guarantee that buf is %NUL-terminated. Unconditionally 166074f528eSDaniel Borkmann * done here to save the size test. 167074f528eSDaniel Borkmann */ 168074f528eSDaniel Borkmann buf[size - 1] = 0; 169ffeedafbSAlexei Starovoitov return 0; 170074f528eSDaniel Borkmann err_clear: 171074f528eSDaniel Borkmann memset(buf, 0, size); 172074f528eSDaniel Borkmann return -EINVAL; 173ffeedafbSAlexei Starovoitov } 174ffeedafbSAlexei Starovoitov 175ffeedafbSAlexei Starovoitov const struct bpf_func_proto bpf_get_current_comm_proto = { 176ffeedafbSAlexei Starovoitov .func = bpf_get_current_comm, 177ffeedafbSAlexei Starovoitov .gpl_only = false, 178ffeedafbSAlexei Starovoitov .ret_type = RET_INTEGER, 179*39f19ebbSAlexei Starovoitov .arg1_type = ARG_PTR_TO_UNINIT_MEM, 180*39f19ebbSAlexei Starovoitov .arg2_type = ARG_CONST_SIZE, 181ffeedafbSAlexei Starovoitov }; 182