1 /* Copyright (c) 2011-2015 PLUMgrid, http://plumgrid.com 2 * 3 * This program is free software; you can redistribute it and/or 4 * modify it under the terms of version 2 of the GNU General Public 5 * License as published by the Free Software Foundation. 6 */ 7 #include <linux/kernel.h> 8 #include <linux/types.h> 9 #include <linux/slab.h> 10 #include <linux/bpf.h> 11 #include <linux/filter.h> 12 #include <linux/uaccess.h> 13 #include <linux/ctype.h> 14 #include "trace.h" 15 16 static DEFINE_PER_CPU(int, bpf_prog_active); 17 18 /** 19 * trace_call_bpf - invoke BPF program 20 * @prog: BPF program 21 * @ctx: opaque context pointer 22 * 23 * kprobe handlers execute BPF programs via this helper. 24 * Can be used from static tracepoints in the future. 25 * 26 * Return: BPF programs always return an integer which is interpreted by 27 * kprobe handler as: 28 * 0 - return from kprobe (event is filtered out) 29 * 1 - store kprobe event into ring buffer 30 * Other values are reserved and currently alias to 1 31 */ 32 unsigned int trace_call_bpf(struct bpf_prog *prog, void *ctx) 33 { 34 unsigned int ret; 35 36 if (in_nmi()) /* not supported yet */ 37 return 1; 38 39 preempt_disable(); 40 41 if (unlikely(__this_cpu_inc_return(bpf_prog_active) != 1)) { 42 /* 43 * since some bpf program is already running on this cpu, 44 * don't call into another bpf program (same or different) 45 * and don't send kprobe event into ring-buffer, 46 * so return zero here 47 */ 48 ret = 0; 49 goto out; 50 } 51 52 rcu_read_lock(); 53 ret = BPF_PROG_RUN(prog, ctx); 54 rcu_read_unlock(); 55 56 out: 57 __this_cpu_dec(bpf_prog_active); 58 preempt_enable(); 59 60 return ret; 61 } 62 EXPORT_SYMBOL_GPL(trace_call_bpf); 63 64 static u64 bpf_probe_read(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5) 65 { 66 void *dst = (void *) (long) r1; 67 int size = (int) r2; 68 void *unsafe_ptr = (void *) (long) r3; 69 70 return probe_kernel_read(dst, unsafe_ptr, size); 71 } 72 73 static const struct bpf_func_proto bpf_probe_read_proto = { 74 .func = bpf_probe_read, 75 .gpl_only = true, 76 .ret_type = RET_INTEGER, 77 .arg1_type = ARG_PTR_TO_STACK, 78 .arg2_type = ARG_CONST_STACK_SIZE, 79 .arg3_type = ARG_ANYTHING, 80 }; 81 82 /* 83 * limited trace_printk() 84 * only %d %u %x %ld %lu %lx %lld %llu %llx %p %s conversion specifiers allowed 85 */ 86 static u64 bpf_trace_printk(u64 r1, u64 fmt_size, u64 r3, u64 r4, u64 r5) 87 { 88 char *fmt = (char *) (long) r1; 89 bool str_seen = false; 90 int mod[3] = {}; 91 int fmt_cnt = 0; 92 u64 unsafe_addr; 93 char buf[64]; 94 int i; 95 96 /* 97 * bpf_check()->check_func_arg()->check_stack_boundary() 98 * guarantees that fmt points to bpf program stack, 99 * fmt_size bytes of it were initialized and fmt_size > 0 100 */ 101 if (fmt[--fmt_size] != 0) 102 return -EINVAL; 103 104 /* check format string for allowed specifiers */ 105 for (i = 0; i < fmt_size; i++) { 106 if ((!isprint(fmt[i]) && !isspace(fmt[i])) || !isascii(fmt[i])) 107 return -EINVAL; 108 109 if (fmt[i] != '%') 110 continue; 111 112 if (fmt_cnt >= 3) 113 return -EINVAL; 114 115 /* fmt[i] != 0 && fmt[last] == 0, so we can access fmt[i + 1] */ 116 i++; 117 if (fmt[i] == 'l') { 118 mod[fmt_cnt]++; 119 i++; 120 } else if (fmt[i] == 'p' || fmt[i] == 's') { 121 mod[fmt_cnt]++; 122 i++; 123 if (!isspace(fmt[i]) && !ispunct(fmt[i]) && fmt[i] != 0) 124 return -EINVAL; 125 fmt_cnt++; 126 if (fmt[i - 1] == 's') { 127 if (str_seen) 128 /* allow only one '%s' per fmt string */ 129 return -EINVAL; 130 str_seen = true; 131 132 switch (fmt_cnt) { 133 case 1: 134 unsafe_addr = r3; 135 r3 = (long) buf; 136 break; 137 case 2: 138 unsafe_addr = r4; 139 r4 = (long) buf; 140 break; 141 case 3: 142 unsafe_addr = r5; 143 r5 = (long) buf; 144 break; 145 } 146 buf[0] = 0; 147 strncpy_from_unsafe(buf, 148 (void *) (long) unsafe_addr, 149 sizeof(buf)); 150 } 151 continue; 152 } 153 154 if (fmt[i] == 'l') { 155 mod[fmt_cnt]++; 156 i++; 157 } 158 159 if (fmt[i] != 'd' && fmt[i] != 'u' && fmt[i] != 'x') 160 return -EINVAL; 161 fmt_cnt++; 162 } 163 164 return __trace_printk(1/* fake ip will not be printed */, fmt, 165 mod[0] == 2 ? r3 : mod[0] == 1 ? (long) r3 : (u32) r3, 166 mod[1] == 2 ? r4 : mod[1] == 1 ? (long) r4 : (u32) r4, 167 mod[2] == 2 ? r5 : mod[2] == 1 ? (long) r5 : (u32) r5); 168 } 169 170 static const struct bpf_func_proto bpf_trace_printk_proto = { 171 .func = bpf_trace_printk, 172 .gpl_only = true, 173 .ret_type = RET_INTEGER, 174 .arg1_type = ARG_PTR_TO_STACK, 175 .arg2_type = ARG_CONST_STACK_SIZE, 176 }; 177 178 const struct bpf_func_proto *bpf_get_trace_printk_proto(void) 179 { 180 /* 181 * this program might be calling bpf_trace_printk, 182 * so allocate per-cpu printk buffers 183 */ 184 trace_printk_init_buffers(); 185 186 return &bpf_trace_printk_proto; 187 } 188 189 static u64 bpf_perf_event_read(u64 r1, u64 index, u64 r3, u64 r4, u64 r5) 190 { 191 struct bpf_map *map = (struct bpf_map *) (unsigned long) r1; 192 struct bpf_array *array = container_of(map, struct bpf_array, map); 193 struct perf_event *event; 194 195 if (unlikely(index >= array->map.max_entries)) 196 return -E2BIG; 197 198 event = (struct perf_event *)array->ptrs[index]; 199 if (!event) 200 return -ENOENT; 201 202 /* make sure event is local and doesn't have pmu::count */ 203 if (event->oncpu != smp_processor_id() || 204 event->pmu->count) 205 return -EINVAL; 206 207 /* 208 * we don't know if the function is run successfully by the 209 * return value. It can be judged in other places, such as 210 * eBPF programs. 211 */ 212 return perf_event_read_local(event); 213 } 214 215 static const struct bpf_func_proto bpf_perf_event_read_proto = { 216 .func = bpf_perf_event_read, 217 .gpl_only = true, 218 .ret_type = RET_INTEGER, 219 .arg1_type = ARG_CONST_MAP_PTR, 220 .arg2_type = ARG_ANYTHING, 221 }; 222 223 static u64 bpf_perf_event_output(u64 r1, u64 r2, u64 index, u64 r4, u64 size) 224 { 225 struct pt_regs *regs = (struct pt_regs *) (long) r1; 226 struct bpf_map *map = (struct bpf_map *) (long) r2; 227 struct bpf_array *array = container_of(map, struct bpf_array, map); 228 void *data = (void *) (long) r4; 229 struct perf_sample_data sample_data; 230 struct perf_event *event; 231 struct perf_raw_record raw = { 232 .size = size, 233 .data = data, 234 }; 235 236 if (unlikely(index >= array->map.max_entries)) 237 return -E2BIG; 238 239 event = (struct perf_event *)array->ptrs[index]; 240 if (unlikely(!event)) 241 return -ENOENT; 242 243 if (unlikely(event->attr.type != PERF_TYPE_SOFTWARE || 244 event->attr.config != PERF_COUNT_SW_BPF_OUTPUT)) 245 return -EINVAL; 246 247 if (unlikely(event->oncpu != smp_processor_id())) 248 return -EOPNOTSUPP; 249 250 perf_sample_data_init(&sample_data, 0, 0); 251 sample_data.raw = &raw; 252 perf_event_output(event, &sample_data, regs); 253 return 0; 254 } 255 256 static const struct bpf_func_proto bpf_perf_event_output_proto = { 257 .func = bpf_perf_event_output, 258 .gpl_only = true, 259 .ret_type = RET_INTEGER, 260 .arg1_type = ARG_PTR_TO_CTX, 261 .arg2_type = ARG_CONST_MAP_PTR, 262 .arg3_type = ARG_ANYTHING, 263 .arg4_type = ARG_PTR_TO_STACK, 264 .arg5_type = ARG_CONST_STACK_SIZE, 265 }; 266 267 static const struct bpf_func_proto *kprobe_prog_func_proto(enum bpf_func_id func_id) 268 { 269 switch (func_id) { 270 case BPF_FUNC_map_lookup_elem: 271 return &bpf_map_lookup_elem_proto; 272 case BPF_FUNC_map_update_elem: 273 return &bpf_map_update_elem_proto; 274 case BPF_FUNC_map_delete_elem: 275 return &bpf_map_delete_elem_proto; 276 case BPF_FUNC_probe_read: 277 return &bpf_probe_read_proto; 278 case BPF_FUNC_ktime_get_ns: 279 return &bpf_ktime_get_ns_proto; 280 case BPF_FUNC_tail_call: 281 return &bpf_tail_call_proto; 282 case BPF_FUNC_get_current_pid_tgid: 283 return &bpf_get_current_pid_tgid_proto; 284 case BPF_FUNC_get_current_uid_gid: 285 return &bpf_get_current_uid_gid_proto; 286 case BPF_FUNC_get_current_comm: 287 return &bpf_get_current_comm_proto; 288 case BPF_FUNC_trace_printk: 289 return bpf_get_trace_printk_proto(); 290 case BPF_FUNC_get_smp_processor_id: 291 return &bpf_get_smp_processor_id_proto; 292 case BPF_FUNC_perf_event_read: 293 return &bpf_perf_event_read_proto; 294 case BPF_FUNC_perf_event_output: 295 return &bpf_perf_event_output_proto; 296 default: 297 return NULL; 298 } 299 } 300 301 /* bpf+kprobe programs can access fields of 'struct pt_regs' */ 302 static bool kprobe_prog_is_valid_access(int off, int size, enum bpf_access_type type) 303 { 304 /* check bounds */ 305 if (off < 0 || off >= sizeof(struct pt_regs)) 306 return false; 307 308 /* only read is allowed */ 309 if (type != BPF_READ) 310 return false; 311 312 /* disallow misaligned access */ 313 if (off % size != 0) 314 return false; 315 316 return true; 317 } 318 319 static struct bpf_verifier_ops kprobe_prog_ops = { 320 .get_func_proto = kprobe_prog_func_proto, 321 .is_valid_access = kprobe_prog_is_valid_access, 322 }; 323 324 static struct bpf_prog_type_list kprobe_tl = { 325 .ops = &kprobe_prog_ops, 326 .type = BPF_PROG_TYPE_KPROBE, 327 }; 328 329 static int __init register_kprobe_prog_ops(void) 330 { 331 bpf_register_prog_type(&kprobe_tl); 332 return 0; 333 } 334 late_initcall(register_kprobe_prog_ops); 335