1 /* 2 * trace event based perf event profiling/tracing 3 * 4 * Copyright (C) 2009 Red Hat Inc, Peter Zijlstra 5 * Copyright (C) 2009-2010 Frederic Weisbecker <fweisbec@gmail.com> 6 */ 7 8 #include <linux/module.h> 9 #include <linux/kprobes.h> 10 #include "trace.h" 11 #include "trace_probe.h" 12 13 static char __percpu *perf_trace_buf[PERF_NR_CONTEXTS]; 14 15 /* 16 * Force it to be aligned to unsigned long to avoid misaligned accesses 17 * suprises 18 */ 19 typedef typeof(unsigned long [PERF_MAX_TRACE_SIZE / sizeof(unsigned long)]) 20 perf_trace_t; 21 22 /* Count the events in use (per event id, not per instance) */ 23 static int total_ref_count; 24 25 static int perf_trace_event_perm(struct trace_event_call *tp_event, 26 struct perf_event *p_event) 27 { 28 if (tp_event->perf_perm) { 29 int ret = tp_event->perf_perm(tp_event, p_event); 30 if (ret) 31 return ret; 32 } 33 34 /* 35 * We checked and allowed to create parent, 36 * allow children without checking. 37 */ 38 if (p_event->parent) 39 return 0; 40 41 /* 42 * It's ok to check current process (owner) permissions in here, 43 * because code below is called only via perf_event_open syscall. 44 */ 45 46 /* The ftrace function trace is allowed only for root. */ 47 if (ftrace_event_is_function(tp_event)) { 48 if (perf_paranoid_tracepoint_raw() && !capable(CAP_SYS_ADMIN)) 49 return -EPERM; 50 51 if (!is_sampling_event(p_event)) 52 return 0; 53 54 /* 55 * We don't allow user space callchains for function trace 56 * event, due to issues with page faults while tracing page 57 * fault handler and its overall trickiness nature. 58 */ 59 if (!p_event->attr.exclude_callchain_user) 60 return -EINVAL; 61 62 /* 63 * Same reason to disable user stack dump as for user space 64 * callchains above. 65 */ 66 if (p_event->attr.sample_type & PERF_SAMPLE_STACK_USER) 67 return -EINVAL; 68 } 69 70 /* No tracing, just counting, so no obvious leak */ 71 if (!(p_event->attr.sample_type & PERF_SAMPLE_RAW)) 72 return 0; 73 74 /* Some events are ok to be traced by non-root users... */ 75 if (p_event->attach_state == PERF_ATTACH_TASK) { 76 if (tp_event->flags & TRACE_EVENT_FL_CAP_ANY) 77 return 0; 78 } 79 80 /* 81 * ...otherwise raw tracepoint data can be a severe data leak, 82 * only allow root to have these. 83 */ 84 if (perf_paranoid_tracepoint_raw() && !capable(CAP_SYS_ADMIN)) 85 return -EPERM; 86 87 return 0; 88 } 89 90 static int perf_trace_event_reg(struct trace_event_call *tp_event, 91 struct perf_event *p_event) 92 { 93 struct hlist_head __percpu *list; 94 int ret = -ENOMEM; 95 int cpu; 96 97 p_event->tp_event = tp_event; 98 if (tp_event->perf_refcount++ > 0) 99 return 0; 100 101 list = alloc_percpu(struct hlist_head); 102 if (!list) 103 goto fail; 104 105 for_each_possible_cpu(cpu) 106 INIT_HLIST_HEAD(per_cpu_ptr(list, cpu)); 107 108 tp_event->perf_events = list; 109 110 if (!total_ref_count) { 111 char __percpu *buf; 112 int i; 113 114 for (i = 0; i < PERF_NR_CONTEXTS; i++) { 115 buf = (char __percpu *)alloc_percpu(perf_trace_t); 116 if (!buf) 117 goto fail; 118 119 perf_trace_buf[i] = buf; 120 } 121 } 122 123 ret = tp_event->class->reg(tp_event, TRACE_REG_PERF_REGISTER, NULL); 124 if (ret) 125 goto fail; 126 127 total_ref_count++; 128 return 0; 129 130 fail: 131 if (!total_ref_count) { 132 int i; 133 134 for (i = 0; i < PERF_NR_CONTEXTS; i++) { 135 free_percpu(perf_trace_buf[i]); 136 perf_trace_buf[i] = NULL; 137 } 138 } 139 140 if (!--tp_event->perf_refcount) { 141 free_percpu(tp_event->perf_events); 142 tp_event->perf_events = NULL; 143 } 144 145 return ret; 146 } 147 148 static void perf_trace_event_unreg(struct perf_event *p_event) 149 { 150 struct trace_event_call *tp_event = p_event->tp_event; 151 int i; 152 153 if (--tp_event->perf_refcount > 0) 154 goto out; 155 156 tp_event->class->reg(tp_event, TRACE_REG_PERF_UNREGISTER, NULL); 157 158 /* 159 * Ensure our callback won't be called anymore. The buffers 160 * will be freed after that. 161 */ 162 tracepoint_synchronize_unregister(); 163 164 free_percpu(tp_event->perf_events); 165 tp_event->perf_events = NULL; 166 167 if (!--total_ref_count) { 168 for (i = 0; i < PERF_NR_CONTEXTS; i++) { 169 free_percpu(perf_trace_buf[i]); 170 perf_trace_buf[i] = NULL; 171 } 172 } 173 out: 174 module_put(tp_event->mod); 175 } 176 177 static int perf_trace_event_open(struct perf_event *p_event) 178 { 179 struct trace_event_call *tp_event = p_event->tp_event; 180 return tp_event->class->reg(tp_event, TRACE_REG_PERF_OPEN, p_event); 181 } 182 183 static void perf_trace_event_close(struct perf_event *p_event) 184 { 185 struct trace_event_call *tp_event = p_event->tp_event; 186 tp_event->class->reg(tp_event, TRACE_REG_PERF_CLOSE, p_event); 187 } 188 189 static int perf_trace_event_init(struct trace_event_call *tp_event, 190 struct perf_event *p_event) 191 { 192 int ret; 193 194 ret = perf_trace_event_perm(tp_event, p_event); 195 if (ret) 196 return ret; 197 198 ret = perf_trace_event_reg(tp_event, p_event); 199 if (ret) 200 return ret; 201 202 ret = perf_trace_event_open(p_event); 203 if (ret) { 204 perf_trace_event_unreg(p_event); 205 return ret; 206 } 207 208 return 0; 209 } 210 211 int perf_trace_init(struct perf_event *p_event) 212 { 213 struct trace_event_call *tp_event; 214 u64 event_id = p_event->attr.config; 215 int ret = -EINVAL; 216 217 mutex_lock(&event_mutex); 218 list_for_each_entry(tp_event, &ftrace_events, list) { 219 if (tp_event->event.type == event_id && 220 tp_event->class && tp_event->class->reg && 221 try_module_get(tp_event->mod)) { 222 ret = perf_trace_event_init(tp_event, p_event); 223 if (ret) 224 module_put(tp_event->mod); 225 break; 226 } 227 } 228 mutex_unlock(&event_mutex); 229 230 return ret; 231 } 232 233 void perf_trace_destroy(struct perf_event *p_event) 234 { 235 mutex_lock(&event_mutex); 236 perf_trace_event_close(p_event); 237 perf_trace_event_unreg(p_event); 238 mutex_unlock(&event_mutex); 239 } 240 241 #ifdef CONFIG_KPROBE_EVENTS 242 int perf_kprobe_init(struct perf_event *p_event, bool is_retprobe) 243 { 244 int ret; 245 char *func = NULL; 246 struct trace_event_call *tp_event; 247 248 if (p_event->attr.kprobe_func) { 249 func = kzalloc(KSYM_NAME_LEN, GFP_KERNEL); 250 if (!func) 251 return -ENOMEM; 252 ret = strncpy_from_user( 253 func, u64_to_user_ptr(p_event->attr.kprobe_func), 254 KSYM_NAME_LEN); 255 if (ret == KSYM_NAME_LEN) 256 ret = -E2BIG; 257 if (ret < 0) 258 goto out; 259 260 if (func[0] == '\0') { 261 kfree(func); 262 func = NULL; 263 } 264 } 265 266 tp_event = create_local_trace_kprobe( 267 func, (void *)(unsigned long)(p_event->attr.kprobe_addr), 268 p_event->attr.probe_offset, is_retprobe); 269 if (IS_ERR(tp_event)) { 270 ret = PTR_ERR(tp_event); 271 goto out; 272 } 273 274 ret = perf_trace_event_init(tp_event, p_event); 275 if (ret) 276 destroy_local_trace_kprobe(tp_event); 277 out: 278 kfree(func); 279 return ret; 280 } 281 282 void perf_kprobe_destroy(struct perf_event *p_event) 283 { 284 perf_trace_event_close(p_event); 285 perf_trace_event_unreg(p_event); 286 287 destroy_local_trace_kprobe(p_event->tp_event); 288 } 289 #endif /* CONFIG_KPROBE_EVENTS */ 290 291 #ifdef CONFIG_UPROBE_EVENTS 292 int perf_uprobe_init(struct perf_event *p_event, bool is_retprobe) 293 { 294 int ret; 295 char *path = NULL; 296 struct trace_event_call *tp_event; 297 298 if (!p_event->attr.uprobe_path) 299 return -EINVAL; 300 path = kzalloc(PATH_MAX, GFP_KERNEL); 301 if (!path) 302 return -ENOMEM; 303 ret = strncpy_from_user( 304 path, u64_to_user_ptr(p_event->attr.uprobe_path), PATH_MAX); 305 if (ret < 0) 306 goto out; 307 if (path[0] == '\0') { 308 ret = -EINVAL; 309 goto out; 310 } 311 312 tp_event = create_local_trace_uprobe( 313 path, p_event->attr.probe_offset, is_retprobe); 314 if (IS_ERR(tp_event)) { 315 ret = PTR_ERR(tp_event); 316 goto out; 317 } 318 319 /* 320 * local trace_uprobe need to hold event_mutex to call 321 * uprobe_buffer_enable() and uprobe_buffer_disable(). 322 * event_mutex is not required for local trace_kprobes. 323 */ 324 mutex_lock(&event_mutex); 325 ret = perf_trace_event_init(tp_event, p_event); 326 if (ret) 327 destroy_local_trace_uprobe(tp_event); 328 mutex_unlock(&event_mutex); 329 out: 330 kfree(path); 331 return ret; 332 } 333 334 void perf_uprobe_destroy(struct perf_event *p_event) 335 { 336 mutex_lock(&event_mutex); 337 perf_trace_event_close(p_event); 338 perf_trace_event_unreg(p_event); 339 mutex_unlock(&event_mutex); 340 destroy_local_trace_uprobe(p_event->tp_event); 341 } 342 #endif /* CONFIG_UPROBE_EVENTS */ 343 344 int perf_trace_add(struct perf_event *p_event, int flags) 345 { 346 struct trace_event_call *tp_event = p_event->tp_event; 347 348 if (!(flags & PERF_EF_START)) 349 p_event->hw.state = PERF_HES_STOPPED; 350 351 /* 352 * If TRACE_REG_PERF_ADD returns false; no custom action was performed 353 * and we need to take the default action of enqueueing our event on 354 * the right per-cpu hlist. 355 */ 356 if (!tp_event->class->reg(tp_event, TRACE_REG_PERF_ADD, p_event)) { 357 struct hlist_head __percpu *pcpu_list; 358 struct hlist_head *list; 359 360 pcpu_list = tp_event->perf_events; 361 if (WARN_ON_ONCE(!pcpu_list)) 362 return -EINVAL; 363 364 list = this_cpu_ptr(pcpu_list); 365 hlist_add_head_rcu(&p_event->hlist_entry, list); 366 } 367 368 return 0; 369 } 370 371 void perf_trace_del(struct perf_event *p_event, int flags) 372 { 373 struct trace_event_call *tp_event = p_event->tp_event; 374 375 /* 376 * If TRACE_REG_PERF_DEL returns false; no custom action was performed 377 * and we need to take the default action of dequeueing our event from 378 * the right per-cpu hlist. 379 */ 380 if (!tp_event->class->reg(tp_event, TRACE_REG_PERF_DEL, p_event)) 381 hlist_del_rcu(&p_event->hlist_entry); 382 } 383 384 void *perf_trace_buf_alloc(int size, struct pt_regs **regs, int *rctxp) 385 { 386 char *raw_data; 387 int rctx; 388 389 BUILD_BUG_ON(PERF_MAX_TRACE_SIZE % sizeof(unsigned long)); 390 391 if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE, 392 "perf buffer not large enough")) 393 return NULL; 394 395 *rctxp = rctx = perf_swevent_get_recursion_context(); 396 if (rctx < 0) 397 return NULL; 398 399 if (regs) 400 *regs = this_cpu_ptr(&__perf_regs[rctx]); 401 raw_data = this_cpu_ptr(perf_trace_buf[rctx]); 402 403 /* zero the dead bytes from align to not leak stack to user */ 404 memset(&raw_data[size - sizeof(u64)], 0, sizeof(u64)); 405 return raw_data; 406 } 407 EXPORT_SYMBOL_GPL(perf_trace_buf_alloc); 408 NOKPROBE_SYMBOL(perf_trace_buf_alloc); 409 410 void perf_trace_buf_update(void *record, u16 type) 411 { 412 struct trace_entry *entry = record; 413 int pc = preempt_count(); 414 unsigned long flags; 415 416 local_save_flags(flags); 417 tracing_generic_entry_update(entry, flags, pc); 418 entry->type = type; 419 } 420 NOKPROBE_SYMBOL(perf_trace_buf_update); 421 422 #ifdef CONFIG_FUNCTION_TRACER 423 static void 424 perf_ftrace_function_call(unsigned long ip, unsigned long parent_ip, 425 struct ftrace_ops *ops, struct pt_regs *pt_regs) 426 { 427 struct ftrace_entry *entry; 428 struct perf_event *event; 429 struct hlist_head head; 430 struct pt_regs regs; 431 int rctx; 432 433 if ((unsigned long)ops->private != smp_processor_id()) 434 return; 435 436 event = container_of(ops, struct perf_event, ftrace_ops); 437 438 /* 439 * @event->hlist entry is NULL (per INIT_HLIST_NODE), and all 440 * the perf code does is hlist_for_each_entry_rcu(), so we can 441 * get away with simply setting the @head.first pointer in order 442 * to create a singular list. 443 */ 444 head.first = &event->hlist_entry; 445 446 #define ENTRY_SIZE (ALIGN(sizeof(struct ftrace_entry) + sizeof(u32), \ 447 sizeof(u64)) - sizeof(u32)) 448 449 BUILD_BUG_ON(ENTRY_SIZE > PERF_MAX_TRACE_SIZE); 450 451 memset(®s, 0, sizeof(regs)); 452 perf_fetch_caller_regs(®s); 453 454 entry = perf_trace_buf_alloc(ENTRY_SIZE, NULL, &rctx); 455 if (!entry) 456 return; 457 458 entry->ip = ip; 459 entry->parent_ip = parent_ip; 460 perf_trace_buf_submit(entry, ENTRY_SIZE, rctx, TRACE_FN, 461 1, ®s, &head, NULL); 462 463 #undef ENTRY_SIZE 464 } 465 466 static int perf_ftrace_function_register(struct perf_event *event) 467 { 468 struct ftrace_ops *ops = &event->ftrace_ops; 469 470 ops->flags = FTRACE_OPS_FL_RCU; 471 ops->func = perf_ftrace_function_call; 472 ops->private = (void *)(unsigned long)nr_cpu_ids; 473 474 return register_ftrace_function(ops); 475 } 476 477 static int perf_ftrace_function_unregister(struct perf_event *event) 478 { 479 struct ftrace_ops *ops = &event->ftrace_ops; 480 int ret = unregister_ftrace_function(ops); 481 ftrace_free_filter(ops); 482 return ret; 483 } 484 485 int perf_ftrace_event_register(struct trace_event_call *call, 486 enum trace_reg type, void *data) 487 { 488 struct perf_event *event = data; 489 490 switch (type) { 491 case TRACE_REG_REGISTER: 492 case TRACE_REG_UNREGISTER: 493 break; 494 case TRACE_REG_PERF_REGISTER: 495 case TRACE_REG_PERF_UNREGISTER: 496 return 0; 497 case TRACE_REG_PERF_OPEN: 498 return perf_ftrace_function_register(data); 499 case TRACE_REG_PERF_CLOSE: 500 return perf_ftrace_function_unregister(data); 501 case TRACE_REG_PERF_ADD: 502 event->ftrace_ops.private = (void *)(unsigned long)smp_processor_id(); 503 return 1; 504 case TRACE_REG_PERF_DEL: 505 event->ftrace_ops.private = (void *)(unsigned long)nr_cpu_ids; 506 return 1; 507 } 508 509 return -EINVAL; 510 } 511 #endif /* CONFIG_FUNCTION_TRACER */ 512