1 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com 2 * 3 * This program is free software; you can redistribute it and/or 4 * modify it under the terms of version 2 of the GNU General Public 5 * License as published by the Free Software Foundation. 6 * 7 * This program is distributed in the hope that it will be useful, but 8 * WITHOUT ANY WARRANTY; without even the implied warranty of 9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 10 * General Public License for more details. 11 */ 12 #include <linux/bpf.h> 13 #include <linux/rcupdate.h> 14 #include <linux/random.h> 15 #include <linux/smp.h> 16 #include <linux/ktime.h> 17 #include <linux/sched.h> 18 #include <linux/uidgid.h> 19 20 /* If kernel subsystem is allowing eBPF programs to call this function, 21 * inside its own verifier_ops->get_func_proto() callback it should return 22 * bpf_map_lookup_elem_proto, so that verifier can properly check the arguments 23 * 24 * Different map implementations will rely on rcu in map methods 25 * lookup/update/delete, therefore eBPF programs must run under rcu lock 26 * if program is allowed to access maps, so check rcu_read_lock_held in 27 * all three functions. 28 */ 29 static u64 bpf_map_lookup_elem(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5) 30 { 31 /* verifier checked that R1 contains a valid pointer to bpf_map 32 * and R2 points to a program stack and map->key_size bytes were 33 * initialized 34 */ 35 struct bpf_map *map = (struct bpf_map *) (unsigned long) r1; 36 void *key = (void *) (unsigned long) r2; 37 void *value; 38 39 WARN_ON_ONCE(!rcu_read_lock_held()); 40 41 value = map->ops->map_lookup_elem(map, key); 42 43 /* lookup() returns either pointer to element value or NULL 44 * which is the meaning of PTR_TO_MAP_VALUE_OR_NULL type 45 */ 46 return (unsigned long) value; 47 } 48 49 const struct bpf_func_proto bpf_map_lookup_elem_proto = { 50 .func = bpf_map_lookup_elem, 51 .gpl_only = false, 52 .ret_type = RET_PTR_TO_MAP_VALUE_OR_NULL, 53 .arg1_type = ARG_CONST_MAP_PTR, 54 .arg2_type = ARG_PTR_TO_MAP_KEY, 55 }; 56 57 static u64 bpf_map_update_elem(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5) 58 { 59 struct bpf_map *map = (struct bpf_map *) (unsigned long) r1; 60 void *key = (void *) (unsigned long) r2; 61 void *value = (void *) (unsigned long) r3; 62 63 WARN_ON_ONCE(!rcu_read_lock_held()); 64 65 return map->ops->map_update_elem(map, key, value, r4); 66 } 67 68 const struct bpf_func_proto bpf_map_update_elem_proto = { 69 .func = bpf_map_update_elem, 70 .gpl_only = false, 71 .ret_type = RET_INTEGER, 72 .arg1_type = ARG_CONST_MAP_PTR, 73 .arg2_type = ARG_PTR_TO_MAP_KEY, 74 .arg3_type = ARG_PTR_TO_MAP_VALUE, 75 .arg4_type = ARG_ANYTHING, 76 }; 77 78 static u64 bpf_map_delete_elem(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5) 79 { 80 struct bpf_map *map = (struct bpf_map *) (unsigned long) r1; 81 void *key = (void *) (unsigned long) r2; 82 83 WARN_ON_ONCE(!rcu_read_lock_held()); 84 85 return map->ops->map_delete_elem(map, key); 86 } 87 88 const struct bpf_func_proto bpf_map_delete_elem_proto = { 89 .func = bpf_map_delete_elem, 90 .gpl_only = false, 91 .ret_type = RET_INTEGER, 92 .arg1_type = ARG_CONST_MAP_PTR, 93 .arg2_type = ARG_PTR_TO_MAP_KEY, 94 }; 95 96 const struct bpf_func_proto bpf_get_prandom_u32_proto = { 97 .func = bpf_user_rnd_u32, 98 .gpl_only = false, 99 .ret_type = RET_INTEGER, 100 }; 101 102 static u64 bpf_get_smp_processor_id(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5) 103 { 104 return raw_smp_processor_id(); 105 } 106 107 const struct bpf_func_proto bpf_get_smp_processor_id_proto = { 108 .func = bpf_get_smp_processor_id, 109 .gpl_only = false, 110 .ret_type = RET_INTEGER, 111 }; 112 113 static u64 bpf_ktime_get_ns(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5) 114 { 115 /* NMI safe access to clock monotonic */ 116 return ktime_get_mono_fast_ns(); 117 } 118 119 const struct bpf_func_proto bpf_ktime_get_ns_proto = { 120 .func = bpf_ktime_get_ns, 121 .gpl_only = true, 122 .ret_type = RET_INTEGER, 123 }; 124 125 static u64 bpf_get_current_pid_tgid(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5) 126 { 127 struct task_struct *task = current; 128 129 if (!task) 130 return -EINVAL; 131 132 return (u64) task->tgid << 32 | task->pid; 133 } 134 135 const struct bpf_func_proto bpf_get_current_pid_tgid_proto = { 136 .func = bpf_get_current_pid_tgid, 137 .gpl_only = false, 138 .ret_type = RET_INTEGER, 139 }; 140 141 static u64 bpf_get_current_uid_gid(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5) 142 { 143 struct task_struct *task = current; 144 kuid_t uid; 145 kgid_t gid; 146 147 if (!task) 148 return -EINVAL; 149 150 current_uid_gid(&uid, &gid); 151 return (u64) from_kgid(&init_user_ns, gid) << 32 | 152 from_kuid(&init_user_ns, uid); 153 } 154 155 const struct bpf_func_proto bpf_get_current_uid_gid_proto = { 156 .func = bpf_get_current_uid_gid, 157 .gpl_only = false, 158 .ret_type = RET_INTEGER, 159 }; 160 161 static u64 bpf_get_current_comm(u64 r1, u64 size, u64 r3, u64 r4, u64 r5) 162 { 163 struct task_struct *task = current; 164 char *buf = (char *) (long) r1; 165 166 if (unlikely(!task)) 167 goto err_clear; 168 169 strncpy(buf, task->comm, size); 170 171 /* Verifier guarantees that size > 0. For task->comm exceeding 172 * size, guarantee that buf is %NUL-terminated. Unconditionally 173 * done here to save the size test. 174 */ 175 buf[size - 1] = 0; 176 return 0; 177 err_clear: 178 memset(buf, 0, size); 179 return -EINVAL; 180 } 181 182 const struct bpf_func_proto bpf_get_current_comm_proto = { 183 .func = bpf_get_current_comm, 184 .gpl_only = false, 185 .ret_type = RET_INTEGER, 186 .arg1_type = ARG_PTR_TO_RAW_STACK, 187 .arg2_type = ARG_CONST_STACK_SIZE, 188 }; 189