1 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com 2 * 3 * This program is free software; you can redistribute it and/or 4 * modify it under the terms of version 2 of the GNU General Public 5 * License as published by the Free Software Foundation. 6 * 7 * This program is distributed in the hope that it will be useful, but 8 * WITHOUT ANY WARRANTY; without even the implied warranty of 9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 10 * General Public License for more details. 11 */ 12 #include <linux/bpf.h> 13 #include <linux/rcupdate.h> 14 #include <linux/random.h> 15 #include <linux/smp.h> 16 #include <linux/topology.h> 17 #include <linux/ktime.h> 18 #include <linux/sched.h> 19 #include <linux/uidgid.h> 20 #include <linux/filter.h> 21 22 /* If kernel subsystem is allowing eBPF programs to call this function, 23 * inside its own verifier_ops->get_func_proto() callback it should return 24 * bpf_map_lookup_elem_proto, so that verifier can properly check the arguments 25 * 26 * Different map implementations will rely on rcu in map methods 27 * lookup/update/delete, therefore eBPF programs must run under rcu lock 28 * if program is allowed to access maps, so check rcu_read_lock_held in 29 * all three functions. 30 */ 31 BPF_CALL_2(bpf_map_lookup_elem, struct bpf_map *, map, void *, key) 32 { 33 WARN_ON_ONCE(!rcu_read_lock_held()); 34 return (unsigned long) map->ops->map_lookup_elem(map, key); 35 } 36 37 const struct bpf_func_proto bpf_map_lookup_elem_proto = { 38 .func = bpf_map_lookup_elem, 39 .gpl_only = false, 40 .pkt_access = true, 41 .ret_type = RET_PTR_TO_MAP_VALUE_OR_NULL, 42 .arg1_type = ARG_CONST_MAP_PTR, 43 .arg2_type = ARG_PTR_TO_MAP_KEY, 44 }; 45 46 BPF_CALL_4(bpf_map_update_elem, struct bpf_map *, map, void *, key, 47 void *, value, u64, flags) 48 { 49 WARN_ON_ONCE(!rcu_read_lock_held()); 50 return map->ops->map_update_elem(map, key, value, flags); 51 } 52 53 const struct bpf_func_proto bpf_map_update_elem_proto = { 54 .func = bpf_map_update_elem, 55 .gpl_only = false, 56 .pkt_access = true, 57 .ret_type = RET_INTEGER, 58 .arg1_type = ARG_CONST_MAP_PTR, 59 .arg2_type = ARG_PTR_TO_MAP_KEY, 60 .arg3_type = ARG_PTR_TO_MAP_VALUE, 61 .arg4_type = ARG_ANYTHING, 62 }; 63 64 BPF_CALL_2(bpf_map_delete_elem, struct bpf_map *, map, void *, key) 65 { 66 WARN_ON_ONCE(!rcu_read_lock_held()); 67 return map->ops->map_delete_elem(map, key); 68 } 69 70 const struct bpf_func_proto bpf_map_delete_elem_proto = { 71 .func = bpf_map_delete_elem, 72 .gpl_only = false, 73 .pkt_access = true, 74 .ret_type = RET_INTEGER, 75 .arg1_type = ARG_CONST_MAP_PTR, 76 .arg2_type = ARG_PTR_TO_MAP_KEY, 77 }; 78 79 const struct bpf_func_proto bpf_get_prandom_u32_proto = { 80 .func = bpf_user_rnd_u32, 81 .gpl_only = false, 82 .ret_type = RET_INTEGER, 83 }; 84 85 BPF_CALL_0(bpf_get_smp_processor_id) 86 { 87 return smp_processor_id(); 88 } 89 90 const struct bpf_func_proto bpf_get_smp_processor_id_proto = { 91 .func = bpf_get_smp_processor_id, 92 .gpl_only = false, 93 .ret_type = RET_INTEGER, 94 }; 95 96 BPF_CALL_0(bpf_get_numa_node_id) 97 { 98 return numa_node_id(); 99 } 100 101 const struct bpf_func_proto bpf_get_numa_node_id_proto = { 102 .func = bpf_get_numa_node_id, 103 .gpl_only = false, 104 .ret_type = RET_INTEGER, 105 }; 106 107 BPF_CALL_0(bpf_ktime_get_ns) 108 { 109 /* NMI safe access to clock monotonic */ 110 return ktime_get_mono_fast_ns(); 111 } 112 113 const struct bpf_func_proto bpf_ktime_get_ns_proto = { 114 .func = bpf_ktime_get_ns, 115 .gpl_only = true, 116 .ret_type = RET_INTEGER, 117 }; 118 119 BPF_CALL_0(bpf_get_current_pid_tgid) 120 { 121 struct task_struct *task = current; 122 123 if (unlikely(!task)) 124 return -EINVAL; 125 126 return (u64) task->tgid << 32 | task->pid; 127 } 128 129 const struct bpf_func_proto bpf_get_current_pid_tgid_proto = { 130 .func = bpf_get_current_pid_tgid, 131 .gpl_only = false, 132 .ret_type = RET_INTEGER, 133 }; 134 135 BPF_CALL_0(bpf_get_current_uid_gid) 136 { 137 struct task_struct *task = current; 138 kuid_t uid; 139 kgid_t gid; 140 141 if (unlikely(!task)) 142 return -EINVAL; 143 144 current_uid_gid(&uid, &gid); 145 return (u64) from_kgid(&init_user_ns, gid) << 32 | 146 from_kuid(&init_user_ns, uid); 147 } 148 149 const struct bpf_func_proto bpf_get_current_uid_gid_proto = { 150 .func = bpf_get_current_uid_gid, 151 .gpl_only = false, 152 .ret_type = RET_INTEGER, 153 }; 154 155 BPF_CALL_2(bpf_get_current_comm, char *, buf, u32, size) 156 { 157 struct task_struct *task = current; 158 159 if (unlikely(!task)) 160 goto err_clear; 161 162 strncpy(buf, task->comm, size); 163 164 /* Verifier guarantees that size > 0. For task->comm exceeding 165 * size, guarantee that buf is %NUL-terminated. Unconditionally 166 * done here to save the size test. 167 */ 168 buf[size - 1] = 0; 169 return 0; 170 err_clear: 171 memset(buf, 0, size); 172 return -EINVAL; 173 } 174 175 const struct bpf_func_proto bpf_get_current_comm_proto = { 176 .func = bpf_get_current_comm, 177 .gpl_only = false, 178 .ret_type = RET_INTEGER, 179 .arg1_type = ARG_PTR_TO_RAW_STACK, 180 .arg2_type = ARG_CONST_STACK_SIZE, 181 }; 182