1 /* 2 * Performance events core code: 3 * 4 * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de> 5 * Copyright (C) 2008-2011 Red Hat, Inc., Ingo Molnar 6 * Copyright (C) 2008-2011 Red Hat, Inc., Peter Zijlstra 7 * Copyright © 2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com> 8 * 9 * For licensing details see kernel-base/COPYING 10 */ 11 12 #include <linux/fs.h> 13 #include <linux/mm.h> 14 #include <linux/cpu.h> 15 #include <linux/smp.h> 16 #include <linux/idr.h> 17 #include <linux/file.h> 18 #include <linux/poll.h> 19 #include <linux/slab.h> 20 #include <linux/hash.h> 21 #include <linux/tick.h> 22 #include <linux/sysfs.h> 23 #include <linux/dcache.h> 24 #include <linux/percpu.h> 25 #include <linux/ptrace.h> 26 #include <linux/reboot.h> 27 #include <linux/vmstat.h> 28 #include <linux/device.h> 29 #include <linux/export.h> 30 #include <linux/vmalloc.h> 31 #include <linux/hardirq.h> 32 #include <linux/rculist.h> 33 #include <linux/uaccess.h> 34 #include <linux/syscalls.h> 35 #include <linux/anon_inodes.h> 36 #include <linux/kernel_stat.h> 37 #include <linux/cgroup.h> 38 #include <linux/perf_event.h> 39 #include <linux/trace_events.h> 40 #include <linux/hw_breakpoint.h> 41 #include <linux/mm_types.h> 42 #include <linux/module.h> 43 #include <linux/mman.h> 44 #include <linux/compat.h> 45 #include <linux/bpf.h> 46 #include <linux/filter.h> 47 48 #include "internal.h" 49 50 #include <asm/irq_regs.h> 51 52 static struct workqueue_struct *perf_wq; 53 54 typedef int (*remote_function_f)(void *); 55 56 struct remote_function_call { 57 struct task_struct *p; 58 remote_function_f func; 59 void *info; 60 int ret; 61 }; 62 63 static void remote_function(void *data) 64 { 65 struct remote_function_call *tfc = data; 66 struct task_struct *p = tfc->p; 67 68 if (p) { 69 tfc->ret = -EAGAIN; 70 if (task_cpu(p) != smp_processor_id() || !task_curr(p)) 71 return; 72 } 73 74 tfc->ret = tfc->func(tfc->info); 75 } 76 77 /** 78 * task_function_call - call a function on the cpu on which a task runs 79 * @p: the task to evaluate 80 * @func: the function to be called 81 * @info: the function call argument 82 * 83 * Calls the function @func when the task is currently running. This might 84 * be on the current CPU, which just calls the function directly 85 * 86 * returns: @func return value, or 87 * -ESRCH - when the process isn't running 88 * -EAGAIN - when the process moved away 89 */ 90 static int 91 task_function_call(struct task_struct *p, remote_function_f func, void *info) 92 { 93 struct remote_function_call data = { 94 .p = p, 95 .func = func, 96 .info = info, 97 .ret = -ESRCH, /* No such (running) process */ 98 }; 99 100 if (task_curr(p)) 101 smp_call_function_single(task_cpu(p), remote_function, &data, 1); 102 103 return data.ret; 104 } 105 106 /** 107 * cpu_function_call - call a function on the cpu 108 * @func: the function to be called 109 * @info: the function call argument 110 * 111 * Calls the function @func on the remote cpu. 112 * 113 * returns: @func return value or -ENXIO when the cpu is offline 114 */ 115 static int cpu_function_call(int cpu, remote_function_f func, void *info) 116 { 117 struct remote_function_call data = { 118 .p = NULL, 119 .func = func, 120 .info = info, 121 .ret = -ENXIO, /* No such CPU */ 122 }; 123 124 smp_call_function_single(cpu, remote_function, &data, 1); 125 126 return data.ret; 127 } 128 129 #define EVENT_OWNER_KERNEL ((void *) -1) 130 131 static bool is_kernel_event(struct perf_event *event) 132 { 133 return event->owner == EVENT_OWNER_KERNEL; 134 } 135 136 #define PERF_FLAG_ALL (PERF_FLAG_FD_NO_GROUP |\ 137 PERF_FLAG_FD_OUTPUT |\ 138 PERF_FLAG_PID_CGROUP |\ 139 PERF_FLAG_FD_CLOEXEC) 140 141 /* 142 * branch priv levels that need permission checks 143 */ 144 #define PERF_SAMPLE_BRANCH_PERM_PLM \ 145 (PERF_SAMPLE_BRANCH_KERNEL |\ 146 PERF_SAMPLE_BRANCH_HV) 147 148 enum event_type_t { 149 EVENT_FLEXIBLE = 0x1, 150 EVENT_PINNED = 0x2, 151 EVENT_ALL = EVENT_FLEXIBLE | EVENT_PINNED, 152 }; 153 154 /* 155 * perf_sched_events : >0 events exist 156 * perf_cgroup_events: >0 per-cpu cgroup events exist on this cpu 157 */ 158 struct static_key_deferred perf_sched_events __read_mostly; 159 static DEFINE_PER_CPU(atomic_t, perf_cgroup_events); 160 static DEFINE_PER_CPU(int, perf_sched_cb_usages); 161 162 static atomic_t nr_mmap_events __read_mostly; 163 static atomic_t nr_comm_events __read_mostly; 164 static atomic_t nr_task_events __read_mostly; 165 static atomic_t nr_freq_events __read_mostly; 166 static atomic_t nr_switch_events __read_mostly; 167 168 static LIST_HEAD(pmus); 169 static DEFINE_MUTEX(pmus_lock); 170 static struct srcu_struct pmus_srcu; 171 172 /* 173 * perf event paranoia level: 174 * -1 - not paranoid at all 175 * 0 - disallow raw tracepoint access for unpriv 176 * 1 - disallow cpu events for unpriv 177 * 2 - disallow kernel profiling for unpriv 178 */ 179 int sysctl_perf_event_paranoid __read_mostly = 1; 180 181 /* Minimum for 512 kiB + 1 user control page */ 182 int sysctl_perf_event_mlock __read_mostly = 512 + (PAGE_SIZE / 1024); /* 'free' kiB per user */ 183 184 /* 185 * max perf event sample rate 186 */ 187 #define DEFAULT_MAX_SAMPLE_RATE 100000 188 #define DEFAULT_SAMPLE_PERIOD_NS (NSEC_PER_SEC / DEFAULT_MAX_SAMPLE_RATE) 189 #define DEFAULT_CPU_TIME_MAX_PERCENT 25 190 191 int sysctl_perf_event_sample_rate __read_mostly = DEFAULT_MAX_SAMPLE_RATE; 192 193 static int max_samples_per_tick __read_mostly = DIV_ROUND_UP(DEFAULT_MAX_SAMPLE_RATE, HZ); 194 static int perf_sample_period_ns __read_mostly = DEFAULT_SAMPLE_PERIOD_NS; 195 196 static int perf_sample_allowed_ns __read_mostly = 197 DEFAULT_SAMPLE_PERIOD_NS * DEFAULT_CPU_TIME_MAX_PERCENT / 100; 198 199 static void update_perf_cpu_limits(void) 200 { 201 u64 tmp = perf_sample_period_ns; 202 203 tmp *= sysctl_perf_cpu_time_max_percent; 204 do_div(tmp, 100); 205 ACCESS_ONCE(perf_sample_allowed_ns) = tmp; 206 } 207 208 static int perf_rotate_context(struct perf_cpu_context *cpuctx); 209 210 int perf_proc_update_handler(struct ctl_table *table, int write, 211 void __user *buffer, size_t *lenp, 212 loff_t *ppos) 213 { 214 int ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos); 215 216 if (ret || !write) 217 return ret; 218 219 max_samples_per_tick = DIV_ROUND_UP(sysctl_perf_event_sample_rate, HZ); 220 perf_sample_period_ns = NSEC_PER_SEC / sysctl_perf_event_sample_rate; 221 update_perf_cpu_limits(); 222 223 return 0; 224 } 225 226 int sysctl_perf_cpu_time_max_percent __read_mostly = DEFAULT_CPU_TIME_MAX_PERCENT; 227 228 int perf_cpu_time_max_percent_handler(struct ctl_table *table, int write, 229 void __user *buffer, size_t *lenp, 230 loff_t *ppos) 231 { 232 int ret = proc_dointvec(table, write, buffer, lenp, ppos); 233 234 if (ret || !write) 235 return ret; 236 237 update_perf_cpu_limits(); 238 239 return 0; 240 } 241 242 /* 243 * perf samples are done in some very critical code paths (NMIs). 244 * If they take too much CPU time, the system can lock up and not 245 * get any real work done. This will drop the sample rate when 246 * we detect that events are taking too long. 247 */ 248 #define NR_ACCUMULATED_SAMPLES 128 249 static DEFINE_PER_CPU(u64, running_sample_length); 250 251 static void perf_duration_warn(struct irq_work *w) 252 { 253 u64 allowed_ns = ACCESS_ONCE(perf_sample_allowed_ns); 254 u64 avg_local_sample_len; 255 u64 local_samples_len; 256 257 local_samples_len = __this_cpu_read(running_sample_length); 258 avg_local_sample_len = local_samples_len/NR_ACCUMULATED_SAMPLES; 259 260 printk_ratelimited(KERN_WARNING 261 "perf interrupt took too long (%lld > %lld), lowering " 262 "kernel.perf_event_max_sample_rate to %d\n", 263 avg_local_sample_len, allowed_ns >> 1, 264 sysctl_perf_event_sample_rate); 265 } 266 267 static DEFINE_IRQ_WORK(perf_duration_work, perf_duration_warn); 268 269 void perf_sample_event_took(u64 sample_len_ns) 270 { 271 u64 allowed_ns = ACCESS_ONCE(perf_sample_allowed_ns); 272 u64 avg_local_sample_len; 273 u64 local_samples_len; 274 275 if (allowed_ns == 0) 276 return; 277 278 /* decay the counter by 1 average sample */ 279 local_samples_len = __this_cpu_read(running_sample_length); 280 local_samples_len -= local_samples_len/NR_ACCUMULATED_SAMPLES; 281 local_samples_len += sample_len_ns; 282 __this_cpu_write(running_sample_length, local_samples_len); 283 284 /* 285 * note: this will be biased artifically low until we have 286 * seen NR_ACCUMULATED_SAMPLES. Doing it this way keeps us 287 * from having to maintain a count. 288 */ 289 avg_local_sample_len = local_samples_len/NR_ACCUMULATED_SAMPLES; 290 291 if (avg_local_sample_len <= allowed_ns) 292 return; 293 294 if (max_samples_per_tick <= 1) 295 return; 296 297 max_samples_per_tick = DIV_ROUND_UP(max_samples_per_tick, 2); 298 sysctl_perf_event_sample_rate = max_samples_per_tick * HZ; 299 perf_sample_period_ns = NSEC_PER_SEC / sysctl_perf_event_sample_rate; 300 301 update_perf_cpu_limits(); 302 303 if (!irq_work_queue(&perf_duration_work)) { 304 early_printk("perf interrupt took too long (%lld > %lld), lowering " 305 "kernel.perf_event_max_sample_rate to %d\n", 306 avg_local_sample_len, allowed_ns >> 1, 307 sysctl_perf_event_sample_rate); 308 } 309 } 310 311 static atomic64_t perf_event_id; 312 313 static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx, 314 enum event_type_t event_type); 315 316 static void cpu_ctx_sched_in(struct perf_cpu_context *cpuctx, 317 enum event_type_t event_type, 318 struct task_struct *task); 319 320 static void update_context_time(struct perf_event_context *ctx); 321 static u64 perf_event_time(struct perf_event *event); 322 323 void __weak perf_event_print_debug(void) { } 324 325 extern __weak const char *perf_pmu_name(void) 326 { 327 return "pmu"; 328 } 329 330 static inline u64 perf_clock(void) 331 { 332 return local_clock(); 333 } 334 335 static inline u64 perf_event_clock(struct perf_event *event) 336 { 337 return event->clock(); 338 } 339 340 static inline struct perf_cpu_context * 341 __get_cpu_context(struct perf_event_context *ctx) 342 { 343 return this_cpu_ptr(ctx->pmu->pmu_cpu_context); 344 } 345 346 static void perf_ctx_lock(struct perf_cpu_context *cpuctx, 347 struct perf_event_context *ctx) 348 { 349 raw_spin_lock(&cpuctx->ctx.lock); 350 if (ctx) 351 raw_spin_lock(&ctx->lock); 352 } 353 354 static void perf_ctx_unlock(struct perf_cpu_context *cpuctx, 355 struct perf_event_context *ctx) 356 { 357 if (ctx) 358 raw_spin_unlock(&ctx->lock); 359 raw_spin_unlock(&cpuctx->ctx.lock); 360 } 361 362 #ifdef CONFIG_CGROUP_PERF 363 364 static inline bool 365 perf_cgroup_match(struct perf_event *event) 366 { 367 struct perf_event_context *ctx = event->ctx; 368 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx); 369 370 /* @event doesn't care about cgroup */ 371 if (!event->cgrp) 372 return true; 373 374 /* wants specific cgroup scope but @cpuctx isn't associated with any */ 375 if (!cpuctx->cgrp) 376 return false; 377 378 /* 379 * Cgroup scoping is recursive. An event enabled for a cgroup is 380 * also enabled for all its descendant cgroups. If @cpuctx's 381 * cgroup is a descendant of @event's (the test covers identity 382 * case), it's a match. 383 */ 384 return cgroup_is_descendant(cpuctx->cgrp->css.cgroup, 385 event->cgrp->css.cgroup); 386 } 387 388 static inline void perf_detach_cgroup(struct perf_event *event) 389 { 390 css_put(&event->cgrp->css); 391 event->cgrp = NULL; 392 } 393 394 static inline int is_cgroup_event(struct perf_event *event) 395 { 396 return event->cgrp != NULL; 397 } 398 399 static inline u64 perf_cgroup_event_time(struct perf_event *event) 400 { 401 struct perf_cgroup_info *t; 402 403 t = per_cpu_ptr(event->cgrp->info, event->cpu); 404 return t->time; 405 } 406 407 static inline void __update_cgrp_time(struct perf_cgroup *cgrp) 408 { 409 struct perf_cgroup_info *info; 410 u64 now; 411 412 now = perf_clock(); 413 414 info = this_cpu_ptr(cgrp->info); 415 416 info->time += now - info->timestamp; 417 info->timestamp = now; 418 } 419 420 static inline void update_cgrp_time_from_cpuctx(struct perf_cpu_context *cpuctx) 421 { 422 struct perf_cgroup *cgrp_out = cpuctx->cgrp; 423 if (cgrp_out) 424 __update_cgrp_time(cgrp_out); 425 } 426 427 static inline void update_cgrp_time_from_event(struct perf_event *event) 428 { 429 struct perf_cgroup *cgrp; 430 431 /* 432 * ensure we access cgroup data only when needed and 433 * when we know the cgroup is pinned (css_get) 434 */ 435 if (!is_cgroup_event(event)) 436 return; 437 438 cgrp = perf_cgroup_from_task(current, event->ctx); 439 /* 440 * Do not update time when cgroup is not active 441 */ 442 if (cgrp == event->cgrp) 443 __update_cgrp_time(event->cgrp); 444 } 445 446 static inline void 447 perf_cgroup_set_timestamp(struct task_struct *task, 448 struct perf_event_context *ctx) 449 { 450 struct perf_cgroup *cgrp; 451 struct perf_cgroup_info *info; 452 453 /* 454 * ctx->lock held by caller 455 * ensure we do not access cgroup data 456 * unless we have the cgroup pinned (css_get) 457 */ 458 if (!task || !ctx->nr_cgroups) 459 return; 460 461 cgrp = perf_cgroup_from_task(task, ctx); 462 info = this_cpu_ptr(cgrp->info); 463 info->timestamp = ctx->timestamp; 464 } 465 466 #define PERF_CGROUP_SWOUT 0x1 /* cgroup switch out every event */ 467 #define PERF_CGROUP_SWIN 0x2 /* cgroup switch in events based on task */ 468 469 /* 470 * reschedule events based on the cgroup constraint of task. 471 * 472 * mode SWOUT : schedule out everything 473 * mode SWIN : schedule in based on cgroup for next 474 */ 475 static void perf_cgroup_switch(struct task_struct *task, int mode) 476 { 477 struct perf_cpu_context *cpuctx; 478 struct pmu *pmu; 479 unsigned long flags; 480 481 /* 482 * disable interrupts to avoid geting nr_cgroup 483 * changes via __perf_event_disable(). Also 484 * avoids preemption. 485 */ 486 local_irq_save(flags); 487 488 /* 489 * we reschedule only in the presence of cgroup 490 * constrained events. 491 */ 492 493 list_for_each_entry_rcu(pmu, &pmus, entry) { 494 cpuctx = this_cpu_ptr(pmu->pmu_cpu_context); 495 if (cpuctx->unique_pmu != pmu) 496 continue; /* ensure we process each cpuctx once */ 497 498 /* 499 * perf_cgroup_events says at least one 500 * context on this CPU has cgroup events. 501 * 502 * ctx->nr_cgroups reports the number of cgroup 503 * events for a context. 504 */ 505 if (cpuctx->ctx.nr_cgroups > 0) { 506 perf_ctx_lock(cpuctx, cpuctx->task_ctx); 507 perf_pmu_disable(cpuctx->ctx.pmu); 508 509 if (mode & PERF_CGROUP_SWOUT) { 510 cpu_ctx_sched_out(cpuctx, EVENT_ALL); 511 /* 512 * must not be done before ctxswout due 513 * to event_filter_match() in event_sched_out() 514 */ 515 cpuctx->cgrp = NULL; 516 } 517 518 if (mode & PERF_CGROUP_SWIN) { 519 WARN_ON_ONCE(cpuctx->cgrp); 520 /* 521 * set cgrp before ctxsw in to allow 522 * event_filter_match() to not have to pass 523 * task around 524 * we pass the cpuctx->ctx to perf_cgroup_from_task() 525 * because cgorup events are only per-cpu 526 */ 527 cpuctx->cgrp = perf_cgroup_from_task(task, &cpuctx->ctx); 528 cpu_ctx_sched_in(cpuctx, EVENT_ALL, task); 529 } 530 perf_pmu_enable(cpuctx->ctx.pmu); 531 perf_ctx_unlock(cpuctx, cpuctx->task_ctx); 532 } 533 } 534 535 local_irq_restore(flags); 536 } 537 538 static inline void perf_cgroup_sched_out(struct task_struct *task, 539 struct task_struct *next) 540 { 541 struct perf_cgroup *cgrp1; 542 struct perf_cgroup *cgrp2 = NULL; 543 544 rcu_read_lock(); 545 /* 546 * we come here when we know perf_cgroup_events > 0 547 * we do not need to pass the ctx here because we know 548 * we are holding the rcu lock 549 */ 550 cgrp1 = perf_cgroup_from_task(task, NULL); 551 552 /* 553 * next is NULL when called from perf_event_enable_on_exec() 554 * that will systematically cause a cgroup_switch() 555 */ 556 if (next) 557 cgrp2 = perf_cgroup_from_task(next, NULL); 558 559 /* 560 * only schedule out current cgroup events if we know 561 * that we are switching to a different cgroup. Otherwise, 562 * do no touch the cgroup events. 563 */ 564 if (cgrp1 != cgrp2) 565 perf_cgroup_switch(task, PERF_CGROUP_SWOUT); 566 567 rcu_read_unlock(); 568 } 569 570 static inline void perf_cgroup_sched_in(struct task_struct *prev, 571 struct task_struct *task) 572 { 573 struct perf_cgroup *cgrp1; 574 struct perf_cgroup *cgrp2 = NULL; 575 576 rcu_read_lock(); 577 /* 578 * we come here when we know perf_cgroup_events > 0 579 * we do not need to pass the ctx here because we know 580 * we are holding the rcu lock 581 */ 582 cgrp1 = perf_cgroup_from_task(task, NULL); 583 584 /* prev can never be NULL */ 585 cgrp2 = perf_cgroup_from_task(prev, NULL); 586 587 /* 588 * only need to schedule in cgroup events if we are changing 589 * cgroup during ctxsw. Cgroup events were not scheduled 590 * out of ctxsw out if that was not the case. 591 */ 592 if (cgrp1 != cgrp2) 593 perf_cgroup_switch(task, PERF_CGROUP_SWIN); 594 595 rcu_read_unlock(); 596 } 597 598 static inline int perf_cgroup_connect(int fd, struct perf_event *event, 599 struct perf_event_attr *attr, 600 struct perf_event *group_leader) 601 { 602 struct perf_cgroup *cgrp; 603 struct cgroup_subsys_state *css; 604 struct fd f = fdget(fd); 605 int ret = 0; 606 607 if (!f.file) 608 return -EBADF; 609 610 css = css_tryget_online_from_dir(f.file->f_path.dentry, 611 &perf_event_cgrp_subsys); 612 if (IS_ERR(css)) { 613 ret = PTR_ERR(css); 614 goto out; 615 } 616 617 cgrp = container_of(css, struct perf_cgroup, css); 618 event->cgrp = cgrp; 619 620 /* 621 * all events in a group must monitor 622 * the same cgroup because a task belongs 623 * to only one perf cgroup at a time 624 */ 625 if (group_leader && group_leader->cgrp != cgrp) { 626 perf_detach_cgroup(event); 627 ret = -EINVAL; 628 } 629 out: 630 fdput(f); 631 return ret; 632 } 633 634 static inline void 635 perf_cgroup_set_shadow_time(struct perf_event *event, u64 now) 636 { 637 struct perf_cgroup_info *t; 638 t = per_cpu_ptr(event->cgrp->info, event->cpu); 639 event->shadow_ctx_time = now - t->timestamp; 640 } 641 642 static inline void 643 perf_cgroup_defer_enabled(struct perf_event *event) 644 { 645 /* 646 * when the current task's perf cgroup does not match 647 * the event's, we need to remember to call the 648 * perf_mark_enable() function the first time a task with 649 * a matching perf cgroup is scheduled in. 650 */ 651 if (is_cgroup_event(event) && !perf_cgroup_match(event)) 652 event->cgrp_defer_enabled = 1; 653 } 654 655 static inline void 656 perf_cgroup_mark_enabled(struct perf_event *event, 657 struct perf_event_context *ctx) 658 { 659 struct perf_event *sub; 660 u64 tstamp = perf_event_time(event); 661 662 if (!event->cgrp_defer_enabled) 663 return; 664 665 event->cgrp_defer_enabled = 0; 666 667 event->tstamp_enabled = tstamp - event->total_time_enabled; 668 list_for_each_entry(sub, &event->sibling_list, group_entry) { 669 if (sub->state >= PERF_EVENT_STATE_INACTIVE) { 670 sub->tstamp_enabled = tstamp - sub->total_time_enabled; 671 sub->cgrp_defer_enabled = 0; 672 } 673 } 674 } 675 #else /* !CONFIG_CGROUP_PERF */ 676 677 static inline bool 678 perf_cgroup_match(struct perf_event *event) 679 { 680 return true; 681 } 682 683 static inline void perf_detach_cgroup(struct perf_event *event) 684 {} 685 686 static inline int is_cgroup_event(struct perf_event *event) 687 { 688 return 0; 689 } 690 691 static inline u64 perf_cgroup_event_cgrp_time(struct perf_event *event) 692 { 693 return 0; 694 } 695 696 static inline void update_cgrp_time_from_event(struct perf_event *event) 697 { 698 } 699 700 static inline void update_cgrp_time_from_cpuctx(struct perf_cpu_context *cpuctx) 701 { 702 } 703 704 static inline void perf_cgroup_sched_out(struct task_struct *task, 705 struct task_struct *next) 706 { 707 } 708 709 static inline void perf_cgroup_sched_in(struct task_struct *prev, 710 struct task_struct *task) 711 { 712 } 713 714 static inline int perf_cgroup_connect(pid_t pid, struct perf_event *event, 715 struct perf_event_attr *attr, 716 struct perf_event *group_leader) 717 { 718 return -EINVAL; 719 } 720 721 static inline void 722 perf_cgroup_set_timestamp(struct task_struct *task, 723 struct perf_event_context *ctx) 724 { 725 } 726 727 void 728 perf_cgroup_switch(struct task_struct *task, struct task_struct *next) 729 { 730 } 731 732 static inline void 733 perf_cgroup_set_shadow_time(struct perf_event *event, u64 now) 734 { 735 } 736 737 static inline u64 perf_cgroup_event_time(struct perf_event *event) 738 { 739 return 0; 740 } 741 742 static inline void 743 perf_cgroup_defer_enabled(struct perf_event *event) 744 { 745 } 746 747 static inline void 748 perf_cgroup_mark_enabled(struct perf_event *event, 749 struct perf_event_context *ctx) 750 { 751 } 752 #endif 753 754 /* 755 * set default to be dependent on timer tick just 756 * like original code 757 */ 758 #define PERF_CPU_HRTIMER (1000 / HZ) 759 /* 760 * function must be called with interrupts disbled 761 */ 762 static enum hrtimer_restart perf_mux_hrtimer_handler(struct hrtimer *hr) 763 { 764 struct perf_cpu_context *cpuctx; 765 int rotations = 0; 766 767 WARN_ON(!irqs_disabled()); 768 769 cpuctx = container_of(hr, struct perf_cpu_context, hrtimer); 770 rotations = perf_rotate_context(cpuctx); 771 772 raw_spin_lock(&cpuctx->hrtimer_lock); 773 if (rotations) 774 hrtimer_forward_now(hr, cpuctx->hrtimer_interval); 775 else 776 cpuctx->hrtimer_active = 0; 777 raw_spin_unlock(&cpuctx->hrtimer_lock); 778 779 return rotations ? HRTIMER_RESTART : HRTIMER_NORESTART; 780 } 781 782 static void __perf_mux_hrtimer_init(struct perf_cpu_context *cpuctx, int cpu) 783 { 784 struct hrtimer *timer = &cpuctx->hrtimer; 785 struct pmu *pmu = cpuctx->ctx.pmu; 786 u64 interval; 787 788 /* no multiplexing needed for SW PMU */ 789 if (pmu->task_ctx_nr == perf_sw_context) 790 return; 791 792 /* 793 * check default is sane, if not set then force to 794 * default interval (1/tick) 795 */ 796 interval = pmu->hrtimer_interval_ms; 797 if (interval < 1) 798 interval = pmu->hrtimer_interval_ms = PERF_CPU_HRTIMER; 799 800 cpuctx->hrtimer_interval = ns_to_ktime(NSEC_PER_MSEC * interval); 801 802 raw_spin_lock_init(&cpuctx->hrtimer_lock); 803 hrtimer_init(timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED); 804 timer->function = perf_mux_hrtimer_handler; 805 } 806 807 static int perf_mux_hrtimer_restart(struct perf_cpu_context *cpuctx) 808 { 809 struct hrtimer *timer = &cpuctx->hrtimer; 810 struct pmu *pmu = cpuctx->ctx.pmu; 811 unsigned long flags; 812 813 /* not for SW PMU */ 814 if (pmu->task_ctx_nr == perf_sw_context) 815 return 0; 816 817 raw_spin_lock_irqsave(&cpuctx->hrtimer_lock, flags); 818 if (!cpuctx->hrtimer_active) { 819 cpuctx->hrtimer_active = 1; 820 hrtimer_forward_now(timer, cpuctx->hrtimer_interval); 821 hrtimer_start_expires(timer, HRTIMER_MODE_ABS_PINNED); 822 } 823 raw_spin_unlock_irqrestore(&cpuctx->hrtimer_lock, flags); 824 825 return 0; 826 } 827 828 void perf_pmu_disable(struct pmu *pmu) 829 { 830 int *count = this_cpu_ptr(pmu->pmu_disable_count); 831 if (!(*count)++) 832 pmu->pmu_disable(pmu); 833 } 834 835 void perf_pmu_enable(struct pmu *pmu) 836 { 837 int *count = this_cpu_ptr(pmu->pmu_disable_count); 838 if (!--(*count)) 839 pmu->pmu_enable(pmu); 840 } 841 842 static DEFINE_PER_CPU(struct list_head, active_ctx_list); 843 844 /* 845 * perf_event_ctx_activate(), perf_event_ctx_deactivate(), and 846 * perf_event_task_tick() are fully serialized because they're strictly cpu 847 * affine and perf_event_ctx{activate,deactivate} are called with IRQs 848 * disabled, while perf_event_task_tick is called from IRQ context. 849 */ 850 static void perf_event_ctx_activate(struct perf_event_context *ctx) 851 { 852 struct list_head *head = this_cpu_ptr(&active_ctx_list); 853 854 WARN_ON(!irqs_disabled()); 855 856 WARN_ON(!list_empty(&ctx->active_ctx_list)); 857 858 list_add(&ctx->active_ctx_list, head); 859 } 860 861 static void perf_event_ctx_deactivate(struct perf_event_context *ctx) 862 { 863 WARN_ON(!irqs_disabled()); 864 865 WARN_ON(list_empty(&ctx->active_ctx_list)); 866 867 list_del_init(&ctx->active_ctx_list); 868 } 869 870 static void get_ctx(struct perf_event_context *ctx) 871 { 872 WARN_ON(!atomic_inc_not_zero(&ctx->refcount)); 873 } 874 875 static void free_ctx(struct rcu_head *head) 876 { 877 struct perf_event_context *ctx; 878 879 ctx = container_of(head, struct perf_event_context, rcu_head); 880 kfree(ctx->task_ctx_data); 881 kfree(ctx); 882 } 883 884 static void put_ctx(struct perf_event_context *ctx) 885 { 886 if (atomic_dec_and_test(&ctx->refcount)) { 887 if (ctx->parent_ctx) 888 put_ctx(ctx->parent_ctx); 889 if (ctx->task) 890 put_task_struct(ctx->task); 891 call_rcu(&ctx->rcu_head, free_ctx); 892 } 893 } 894 895 /* 896 * Because of perf_event::ctx migration in sys_perf_event_open::move_group and 897 * perf_pmu_migrate_context() we need some magic. 898 * 899 * Those places that change perf_event::ctx will hold both 900 * perf_event_ctx::mutex of the 'old' and 'new' ctx value. 901 * 902 * Lock ordering is by mutex address. There are two other sites where 903 * perf_event_context::mutex nests and those are: 904 * 905 * - perf_event_exit_task_context() [ child , 0 ] 906 * __perf_event_exit_task() 907 * sync_child_event() 908 * put_event() [ parent, 1 ] 909 * 910 * - perf_event_init_context() [ parent, 0 ] 911 * inherit_task_group() 912 * inherit_group() 913 * inherit_event() 914 * perf_event_alloc() 915 * perf_init_event() 916 * perf_try_init_event() [ child , 1 ] 917 * 918 * While it appears there is an obvious deadlock here -- the parent and child 919 * nesting levels are inverted between the two. This is in fact safe because 920 * life-time rules separate them. That is an exiting task cannot fork, and a 921 * spawning task cannot (yet) exit. 922 * 923 * But remember that that these are parent<->child context relations, and 924 * migration does not affect children, therefore these two orderings should not 925 * interact. 926 * 927 * The change in perf_event::ctx does not affect children (as claimed above) 928 * because the sys_perf_event_open() case will install a new event and break 929 * the ctx parent<->child relation, and perf_pmu_migrate_context() is only 930 * concerned with cpuctx and that doesn't have children. 931 * 932 * The places that change perf_event::ctx will issue: 933 * 934 * perf_remove_from_context(); 935 * synchronize_rcu(); 936 * perf_install_in_context(); 937 * 938 * to affect the change. The remove_from_context() + synchronize_rcu() should 939 * quiesce the event, after which we can install it in the new location. This 940 * means that only external vectors (perf_fops, prctl) can perturb the event 941 * while in transit. Therefore all such accessors should also acquire 942 * perf_event_context::mutex to serialize against this. 943 * 944 * However; because event->ctx can change while we're waiting to acquire 945 * ctx->mutex we must be careful and use the below perf_event_ctx_lock() 946 * function. 947 * 948 * Lock order: 949 * task_struct::perf_event_mutex 950 * perf_event_context::mutex 951 * perf_event_context::lock 952 * perf_event::child_mutex; 953 * perf_event::mmap_mutex 954 * mmap_sem 955 */ 956 static struct perf_event_context * 957 perf_event_ctx_lock_nested(struct perf_event *event, int nesting) 958 { 959 struct perf_event_context *ctx; 960 961 again: 962 rcu_read_lock(); 963 ctx = ACCESS_ONCE(event->ctx); 964 if (!atomic_inc_not_zero(&ctx->refcount)) { 965 rcu_read_unlock(); 966 goto again; 967 } 968 rcu_read_unlock(); 969 970 mutex_lock_nested(&ctx->mutex, nesting); 971 if (event->ctx != ctx) { 972 mutex_unlock(&ctx->mutex); 973 put_ctx(ctx); 974 goto again; 975 } 976 977 return ctx; 978 } 979 980 static inline struct perf_event_context * 981 perf_event_ctx_lock(struct perf_event *event) 982 { 983 return perf_event_ctx_lock_nested(event, 0); 984 } 985 986 static void perf_event_ctx_unlock(struct perf_event *event, 987 struct perf_event_context *ctx) 988 { 989 mutex_unlock(&ctx->mutex); 990 put_ctx(ctx); 991 } 992 993 /* 994 * This must be done under the ctx->lock, such as to serialize against 995 * context_equiv(), therefore we cannot call put_ctx() since that might end up 996 * calling scheduler related locks and ctx->lock nests inside those. 997 */ 998 static __must_check struct perf_event_context * 999 unclone_ctx(struct perf_event_context *ctx) 1000 { 1001 struct perf_event_context *parent_ctx = ctx->parent_ctx; 1002 1003 lockdep_assert_held(&ctx->lock); 1004 1005 if (parent_ctx) 1006 ctx->parent_ctx = NULL; 1007 ctx->generation++; 1008 1009 return parent_ctx; 1010 } 1011 1012 static u32 perf_event_pid(struct perf_event *event, struct task_struct *p) 1013 { 1014 /* 1015 * only top level events have the pid namespace they were created in 1016 */ 1017 if (event->parent) 1018 event = event->parent; 1019 1020 return task_tgid_nr_ns(p, event->ns); 1021 } 1022 1023 static u32 perf_event_tid(struct perf_event *event, struct task_struct *p) 1024 { 1025 /* 1026 * only top level events have the pid namespace they were created in 1027 */ 1028 if (event->parent) 1029 event = event->parent; 1030 1031 return task_pid_nr_ns(p, event->ns); 1032 } 1033 1034 /* 1035 * If we inherit events we want to return the parent event id 1036 * to userspace. 1037 */ 1038 static u64 primary_event_id(struct perf_event *event) 1039 { 1040 u64 id = event->id; 1041 1042 if (event->parent) 1043 id = event->parent->id; 1044 1045 return id; 1046 } 1047 1048 /* 1049 * Get the perf_event_context for a task and lock it. 1050 * This has to cope with with the fact that until it is locked, 1051 * the context could get moved to another task. 1052 */ 1053 static struct perf_event_context * 1054 perf_lock_task_context(struct task_struct *task, int ctxn, unsigned long *flags) 1055 { 1056 struct perf_event_context *ctx; 1057 1058 retry: 1059 /* 1060 * One of the few rules of preemptible RCU is that one cannot do 1061 * rcu_read_unlock() while holding a scheduler (or nested) lock when 1062 * part of the read side critical section was irqs-enabled -- see 1063 * rcu_read_unlock_special(). 1064 * 1065 * Since ctx->lock nests under rq->lock we must ensure the entire read 1066 * side critical section has interrupts disabled. 1067 */ 1068 local_irq_save(*flags); 1069 rcu_read_lock(); 1070 ctx = rcu_dereference(task->perf_event_ctxp[ctxn]); 1071 if (ctx) { 1072 /* 1073 * If this context is a clone of another, it might 1074 * get swapped for another underneath us by 1075 * perf_event_task_sched_out, though the 1076 * rcu_read_lock() protects us from any context 1077 * getting freed. Lock the context and check if it 1078 * got swapped before we could get the lock, and retry 1079 * if so. If we locked the right context, then it 1080 * can't get swapped on us any more. 1081 */ 1082 raw_spin_lock(&ctx->lock); 1083 if (ctx != rcu_dereference(task->perf_event_ctxp[ctxn])) { 1084 raw_spin_unlock(&ctx->lock); 1085 rcu_read_unlock(); 1086 local_irq_restore(*flags); 1087 goto retry; 1088 } 1089 1090 if (!atomic_inc_not_zero(&ctx->refcount)) { 1091 raw_spin_unlock(&ctx->lock); 1092 ctx = NULL; 1093 } 1094 } 1095 rcu_read_unlock(); 1096 if (!ctx) 1097 local_irq_restore(*flags); 1098 return ctx; 1099 } 1100 1101 /* 1102 * Get the context for a task and increment its pin_count so it 1103 * can't get swapped to another task. This also increments its 1104 * reference count so that the context can't get freed. 1105 */ 1106 static struct perf_event_context * 1107 perf_pin_task_context(struct task_struct *task, int ctxn) 1108 { 1109 struct perf_event_context *ctx; 1110 unsigned long flags; 1111 1112 ctx = perf_lock_task_context(task, ctxn, &flags); 1113 if (ctx) { 1114 ++ctx->pin_count; 1115 raw_spin_unlock_irqrestore(&ctx->lock, flags); 1116 } 1117 return ctx; 1118 } 1119 1120 static void perf_unpin_context(struct perf_event_context *ctx) 1121 { 1122 unsigned long flags; 1123 1124 raw_spin_lock_irqsave(&ctx->lock, flags); 1125 --ctx->pin_count; 1126 raw_spin_unlock_irqrestore(&ctx->lock, flags); 1127 } 1128 1129 /* 1130 * Update the record of the current time in a context. 1131 */ 1132 static void update_context_time(struct perf_event_context *ctx) 1133 { 1134 u64 now = perf_clock(); 1135 1136 ctx->time += now - ctx->timestamp; 1137 ctx->timestamp = now; 1138 } 1139 1140 static u64 perf_event_time(struct perf_event *event) 1141 { 1142 struct perf_event_context *ctx = event->ctx; 1143 1144 if (is_cgroup_event(event)) 1145 return perf_cgroup_event_time(event); 1146 1147 return ctx ? ctx->time : 0; 1148 } 1149 1150 /* 1151 * Update the total_time_enabled and total_time_running fields for a event. 1152 * The caller of this function needs to hold the ctx->lock. 1153 */ 1154 static void update_event_times(struct perf_event *event) 1155 { 1156 struct perf_event_context *ctx = event->ctx; 1157 u64 run_end; 1158 1159 if (event->state < PERF_EVENT_STATE_INACTIVE || 1160 event->group_leader->state < PERF_EVENT_STATE_INACTIVE) 1161 return; 1162 /* 1163 * in cgroup mode, time_enabled represents 1164 * the time the event was enabled AND active 1165 * tasks were in the monitored cgroup. This is 1166 * independent of the activity of the context as 1167 * there may be a mix of cgroup and non-cgroup events. 1168 * 1169 * That is why we treat cgroup events differently 1170 * here. 1171 */ 1172 if (is_cgroup_event(event)) 1173 run_end = perf_cgroup_event_time(event); 1174 else if (ctx->is_active) 1175 run_end = ctx->time; 1176 else 1177 run_end = event->tstamp_stopped; 1178 1179 event->total_time_enabled = run_end - event->tstamp_enabled; 1180 1181 if (event->state == PERF_EVENT_STATE_INACTIVE) 1182 run_end = event->tstamp_stopped; 1183 else 1184 run_end = perf_event_time(event); 1185 1186 event->total_time_running = run_end - event->tstamp_running; 1187 1188 } 1189 1190 /* 1191 * Update total_time_enabled and total_time_running for all events in a group. 1192 */ 1193 static void update_group_times(struct perf_event *leader) 1194 { 1195 struct perf_event *event; 1196 1197 update_event_times(leader); 1198 list_for_each_entry(event, &leader->sibling_list, group_entry) 1199 update_event_times(event); 1200 } 1201 1202 static struct list_head * 1203 ctx_group_list(struct perf_event *event, struct perf_event_context *ctx) 1204 { 1205 if (event->attr.pinned) 1206 return &ctx->pinned_groups; 1207 else 1208 return &ctx->flexible_groups; 1209 } 1210 1211 /* 1212 * Add a event from the lists for its context. 1213 * Must be called with ctx->mutex and ctx->lock held. 1214 */ 1215 static void 1216 list_add_event(struct perf_event *event, struct perf_event_context *ctx) 1217 { 1218 WARN_ON_ONCE(event->attach_state & PERF_ATTACH_CONTEXT); 1219 event->attach_state |= PERF_ATTACH_CONTEXT; 1220 1221 /* 1222 * If we're a stand alone event or group leader, we go to the context 1223 * list, group events are kept attached to the group so that 1224 * perf_group_detach can, at all times, locate all siblings. 1225 */ 1226 if (event->group_leader == event) { 1227 struct list_head *list; 1228 1229 if (is_software_event(event)) 1230 event->group_flags |= PERF_GROUP_SOFTWARE; 1231 1232 list = ctx_group_list(event, ctx); 1233 list_add_tail(&event->group_entry, list); 1234 } 1235 1236 if (is_cgroup_event(event)) 1237 ctx->nr_cgroups++; 1238 1239 list_add_rcu(&event->event_entry, &ctx->event_list); 1240 ctx->nr_events++; 1241 if (event->attr.inherit_stat) 1242 ctx->nr_stat++; 1243 1244 ctx->generation++; 1245 } 1246 1247 /* 1248 * Initialize event state based on the perf_event_attr::disabled. 1249 */ 1250 static inline void perf_event__state_init(struct perf_event *event) 1251 { 1252 event->state = event->attr.disabled ? PERF_EVENT_STATE_OFF : 1253 PERF_EVENT_STATE_INACTIVE; 1254 } 1255 1256 static void __perf_event_read_size(struct perf_event *event, int nr_siblings) 1257 { 1258 int entry = sizeof(u64); /* value */ 1259 int size = 0; 1260 int nr = 1; 1261 1262 if (event->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) 1263 size += sizeof(u64); 1264 1265 if (event->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) 1266 size += sizeof(u64); 1267 1268 if (event->attr.read_format & PERF_FORMAT_ID) 1269 entry += sizeof(u64); 1270 1271 if (event->attr.read_format & PERF_FORMAT_GROUP) { 1272 nr += nr_siblings; 1273 size += sizeof(u64); 1274 } 1275 1276 size += entry * nr; 1277 event->read_size = size; 1278 } 1279 1280 static void __perf_event_header_size(struct perf_event *event, u64 sample_type) 1281 { 1282 struct perf_sample_data *data; 1283 u16 size = 0; 1284 1285 if (sample_type & PERF_SAMPLE_IP) 1286 size += sizeof(data->ip); 1287 1288 if (sample_type & PERF_SAMPLE_ADDR) 1289 size += sizeof(data->addr); 1290 1291 if (sample_type & PERF_SAMPLE_PERIOD) 1292 size += sizeof(data->period); 1293 1294 if (sample_type & PERF_SAMPLE_WEIGHT) 1295 size += sizeof(data->weight); 1296 1297 if (sample_type & PERF_SAMPLE_READ) 1298 size += event->read_size; 1299 1300 if (sample_type & PERF_SAMPLE_DATA_SRC) 1301 size += sizeof(data->data_src.val); 1302 1303 if (sample_type & PERF_SAMPLE_TRANSACTION) 1304 size += sizeof(data->txn); 1305 1306 event->header_size = size; 1307 } 1308 1309 /* 1310 * Called at perf_event creation and when events are attached/detached from a 1311 * group. 1312 */ 1313 static void perf_event__header_size(struct perf_event *event) 1314 { 1315 __perf_event_read_size(event, 1316 event->group_leader->nr_siblings); 1317 __perf_event_header_size(event, event->attr.sample_type); 1318 } 1319 1320 static void perf_event__id_header_size(struct perf_event *event) 1321 { 1322 struct perf_sample_data *data; 1323 u64 sample_type = event->attr.sample_type; 1324 u16 size = 0; 1325 1326 if (sample_type & PERF_SAMPLE_TID) 1327 size += sizeof(data->tid_entry); 1328 1329 if (sample_type & PERF_SAMPLE_TIME) 1330 size += sizeof(data->time); 1331 1332 if (sample_type & PERF_SAMPLE_IDENTIFIER) 1333 size += sizeof(data->id); 1334 1335 if (sample_type & PERF_SAMPLE_ID) 1336 size += sizeof(data->id); 1337 1338 if (sample_type & PERF_SAMPLE_STREAM_ID) 1339 size += sizeof(data->stream_id); 1340 1341 if (sample_type & PERF_SAMPLE_CPU) 1342 size += sizeof(data->cpu_entry); 1343 1344 event->id_header_size = size; 1345 } 1346 1347 static bool perf_event_validate_size(struct perf_event *event) 1348 { 1349 /* 1350 * The values computed here will be over-written when we actually 1351 * attach the event. 1352 */ 1353 __perf_event_read_size(event, event->group_leader->nr_siblings + 1); 1354 __perf_event_header_size(event, event->attr.sample_type & ~PERF_SAMPLE_READ); 1355 perf_event__id_header_size(event); 1356 1357 /* 1358 * Sum the lot; should not exceed the 64k limit we have on records. 1359 * Conservative limit to allow for callchains and other variable fields. 1360 */ 1361 if (event->read_size + event->header_size + 1362 event->id_header_size + sizeof(struct perf_event_header) >= 16*1024) 1363 return false; 1364 1365 return true; 1366 } 1367 1368 static void perf_group_attach(struct perf_event *event) 1369 { 1370 struct perf_event *group_leader = event->group_leader, *pos; 1371 1372 /* 1373 * We can have double attach due to group movement in perf_event_open. 1374 */ 1375 if (event->attach_state & PERF_ATTACH_GROUP) 1376 return; 1377 1378 event->attach_state |= PERF_ATTACH_GROUP; 1379 1380 if (group_leader == event) 1381 return; 1382 1383 WARN_ON_ONCE(group_leader->ctx != event->ctx); 1384 1385 if (group_leader->group_flags & PERF_GROUP_SOFTWARE && 1386 !is_software_event(event)) 1387 group_leader->group_flags &= ~PERF_GROUP_SOFTWARE; 1388 1389 list_add_tail(&event->group_entry, &group_leader->sibling_list); 1390 group_leader->nr_siblings++; 1391 1392 perf_event__header_size(group_leader); 1393 1394 list_for_each_entry(pos, &group_leader->sibling_list, group_entry) 1395 perf_event__header_size(pos); 1396 } 1397 1398 /* 1399 * Remove a event from the lists for its context. 1400 * Must be called with ctx->mutex and ctx->lock held. 1401 */ 1402 static void 1403 list_del_event(struct perf_event *event, struct perf_event_context *ctx) 1404 { 1405 struct perf_cpu_context *cpuctx; 1406 1407 WARN_ON_ONCE(event->ctx != ctx); 1408 lockdep_assert_held(&ctx->lock); 1409 1410 /* 1411 * We can have double detach due to exit/hot-unplug + close. 1412 */ 1413 if (!(event->attach_state & PERF_ATTACH_CONTEXT)) 1414 return; 1415 1416 event->attach_state &= ~PERF_ATTACH_CONTEXT; 1417 1418 if (is_cgroup_event(event)) { 1419 ctx->nr_cgroups--; 1420 cpuctx = __get_cpu_context(ctx); 1421 /* 1422 * if there are no more cgroup events 1423 * then cler cgrp to avoid stale pointer 1424 * in update_cgrp_time_from_cpuctx() 1425 */ 1426 if (!ctx->nr_cgroups) 1427 cpuctx->cgrp = NULL; 1428 } 1429 1430 ctx->nr_events--; 1431 if (event->attr.inherit_stat) 1432 ctx->nr_stat--; 1433 1434 list_del_rcu(&event->event_entry); 1435 1436 if (event->group_leader == event) 1437 list_del_init(&event->group_entry); 1438 1439 update_group_times(event); 1440 1441 /* 1442 * If event was in error state, then keep it 1443 * that way, otherwise bogus counts will be 1444 * returned on read(). The only way to get out 1445 * of error state is by explicit re-enabling 1446 * of the event 1447 */ 1448 if (event->state > PERF_EVENT_STATE_OFF) 1449 event->state = PERF_EVENT_STATE_OFF; 1450 1451 ctx->generation++; 1452 } 1453 1454 static void perf_group_detach(struct perf_event *event) 1455 { 1456 struct perf_event *sibling, *tmp; 1457 struct list_head *list = NULL; 1458 1459 /* 1460 * We can have double detach due to exit/hot-unplug + close. 1461 */ 1462 if (!(event->attach_state & PERF_ATTACH_GROUP)) 1463 return; 1464 1465 event->attach_state &= ~PERF_ATTACH_GROUP; 1466 1467 /* 1468 * If this is a sibling, remove it from its group. 1469 */ 1470 if (event->group_leader != event) { 1471 list_del_init(&event->group_entry); 1472 event->group_leader->nr_siblings--; 1473 goto out; 1474 } 1475 1476 if (!list_empty(&event->group_entry)) 1477 list = &event->group_entry; 1478 1479 /* 1480 * If this was a group event with sibling events then 1481 * upgrade the siblings to singleton events by adding them 1482 * to whatever list we are on. 1483 */ 1484 list_for_each_entry_safe(sibling, tmp, &event->sibling_list, group_entry) { 1485 if (list) 1486 list_move_tail(&sibling->group_entry, list); 1487 sibling->group_leader = sibling; 1488 1489 /* Inherit group flags from the previous leader */ 1490 sibling->group_flags = event->group_flags; 1491 1492 WARN_ON_ONCE(sibling->ctx != event->ctx); 1493 } 1494 1495 out: 1496 perf_event__header_size(event->group_leader); 1497 1498 list_for_each_entry(tmp, &event->group_leader->sibling_list, group_entry) 1499 perf_event__header_size(tmp); 1500 } 1501 1502 /* 1503 * User event without the task. 1504 */ 1505 static bool is_orphaned_event(struct perf_event *event) 1506 { 1507 return event && !is_kernel_event(event) && !event->owner; 1508 } 1509 1510 /* 1511 * Event has a parent but parent's task finished and it's 1512 * alive only because of children holding refference. 1513 */ 1514 static bool is_orphaned_child(struct perf_event *event) 1515 { 1516 return is_orphaned_event(event->parent); 1517 } 1518 1519 static void orphans_remove_work(struct work_struct *work); 1520 1521 static void schedule_orphans_remove(struct perf_event_context *ctx) 1522 { 1523 if (!ctx->task || ctx->orphans_remove_sched || !perf_wq) 1524 return; 1525 1526 if (queue_delayed_work(perf_wq, &ctx->orphans_remove, 1)) { 1527 get_ctx(ctx); 1528 ctx->orphans_remove_sched = true; 1529 } 1530 } 1531 1532 static int __init perf_workqueue_init(void) 1533 { 1534 perf_wq = create_singlethread_workqueue("perf"); 1535 WARN(!perf_wq, "failed to create perf workqueue\n"); 1536 return perf_wq ? 0 : -1; 1537 } 1538 1539 core_initcall(perf_workqueue_init); 1540 1541 static inline int pmu_filter_match(struct perf_event *event) 1542 { 1543 struct pmu *pmu = event->pmu; 1544 return pmu->filter_match ? pmu->filter_match(event) : 1; 1545 } 1546 1547 static inline int 1548 event_filter_match(struct perf_event *event) 1549 { 1550 return (event->cpu == -1 || event->cpu == smp_processor_id()) 1551 && perf_cgroup_match(event) && pmu_filter_match(event); 1552 } 1553 1554 static void 1555 event_sched_out(struct perf_event *event, 1556 struct perf_cpu_context *cpuctx, 1557 struct perf_event_context *ctx) 1558 { 1559 u64 tstamp = perf_event_time(event); 1560 u64 delta; 1561 1562 WARN_ON_ONCE(event->ctx != ctx); 1563 lockdep_assert_held(&ctx->lock); 1564 1565 /* 1566 * An event which could not be activated because of 1567 * filter mismatch still needs to have its timings 1568 * maintained, otherwise bogus information is return 1569 * via read() for time_enabled, time_running: 1570 */ 1571 if (event->state == PERF_EVENT_STATE_INACTIVE 1572 && !event_filter_match(event)) { 1573 delta = tstamp - event->tstamp_stopped; 1574 event->tstamp_running += delta; 1575 event->tstamp_stopped = tstamp; 1576 } 1577 1578 if (event->state != PERF_EVENT_STATE_ACTIVE) 1579 return; 1580 1581 perf_pmu_disable(event->pmu); 1582 1583 event->state = PERF_EVENT_STATE_INACTIVE; 1584 if (event->pending_disable) { 1585 event->pending_disable = 0; 1586 event->state = PERF_EVENT_STATE_OFF; 1587 } 1588 event->tstamp_stopped = tstamp; 1589 event->pmu->del(event, 0); 1590 event->oncpu = -1; 1591 1592 if (!is_software_event(event)) 1593 cpuctx->active_oncpu--; 1594 if (!--ctx->nr_active) 1595 perf_event_ctx_deactivate(ctx); 1596 if (event->attr.freq && event->attr.sample_freq) 1597 ctx->nr_freq--; 1598 if (event->attr.exclusive || !cpuctx->active_oncpu) 1599 cpuctx->exclusive = 0; 1600 1601 if (is_orphaned_child(event)) 1602 schedule_orphans_remove(ctx); 1603 1604 perf_pmu_enable(event->pmu); 1605 } 1606 1607 static void 1608 group_sched_out(struct perf_event *group_event, 1609 struct perf_cpu_context *cpuctx, 1610 struct perf_event_context *ctx) 1611 { 1612 struct perf_event *event; 1613 int state = group_event->state; 1614 1615 event_sched_out(group_event, cpuctx, ctx); 1616 1617 /* 1618 * Schedule out siblings (if any): 1619 */ 1620 list_for_each_entry(event, &group_event->sibling_list, group_entry) 1621 event_sched_out(event, cpuctx, ctx); 1622 1623 if (state == PERF_EVENT_STATE_ACTIVE && group_event->attr.exclusive) 1624 cpuctx->exclusive = 0; 1625 } 1626 1627 struct remove_event { 1628 struct perf_event *event; 1629 bool detach_group; 1630 }; 1631 1632 /* 1633 * Cross CPU call to remove a performance event 1634 * 1635 * We disable the event on the hardware level first. After that we 1636 * remove it from the context list. 1637 */ 1638 static int __perf_remove_from_context(void *info) 1639 { 1640 struct remove_event *re = info; 1641 struct perf_event *event = re->event; 1642 struct perf_event_context *ctx = event->ctx; 1643 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx); 1644 1645 raw_spin_lock(&ctx->lock); 1646 event_sched_out(event, cpuctx, ctx); 1647 if (re->detach_group) 1648 perf_group_detach(event); 1649 list_del_event(event, ctx); 1650 if (!ctx->nr_events && cpuctx->task_ctx == ctx) { 1651 ctx->is_active = 0; 1652 cpuctx->task_ctx = NULL; 1653 } 1654 raw_spin_unlock(&ctx->lock); 1655 1656 return 0; 1657 } 1658 1659 1660 /* 1661 * Remove the event from a task's (or a CPU's) list of events. 1662 * 1663 * CPU events are removed with a smp call. For task events we only 1664 * call when the task is on a CPU. 1665 * 1666 * If event->ctx is a cloned context, callers must make sure that 1667 * every task struct that event->ctx->task could possibly point to 1668 * remains valid. This is OK when called from perf_release since 1669 * that only calls us on the top-level context, which can't be a clone. 1670 * When called from perf_event_exit_task, it's OK because the 1671 * context has been detached from its task. 1672 */ 1673 static void perf_remove_from_context(struct perf_event *event, bool detach_group) 1674 { 1675 struct perf_event_context *ctx = event->ctx; 1676 struct task_struct *task = ctx->task; 1677 struct remove_event re = { 1678 .event = event, 1679 .detach_group = detach_group, 1680 }; 1681 1682 lockdep_assert_held(&ctx->mutex); 1683 1684 if (!task) { 1685 /* 1686 * Per cpu events are removed via an smp call. The removal can 1687 * fail if the CPU is currently offline, but in that case we 1688 * already called __perf_remove_from_context from 1689 * perf_event_exit_cpu. 1690 */ 1691 cpu_function_call(event->cpu, __perf_remove_from_context, &re); 1692 return; 1693 } 1694 1695 retry: 1696 if (!task_function_call(task, __perf_remove_from_context, &re)) 1697 return; 1698 1699 raw_spin_lock_irq(&ctx->lock); 1700 /* 1701 * If we failed to find a running task, but find the context active now 1702 * that we've acquired the ctx->lock, retry. 1703 */ 1704 if (ctx->is_active) { 1705 raw_spin_unlock_irq(&ctx->lock); 1706 /* 1707 * Reload the task pointer, it might have been changed by 1708 * a concurrent perf_event_context_sched_out(). 1709 */ 1710 task = ctx->task; 1711 goto retry; 1712 } 1713 1714 /* 1715 * Since the task isn't running, its safe to remove the event, us 1716 * holding the ctx->lock ensures the task won't get scheduled in. 1717 */ 1718 if (detach_group) 1719 perf_group_detach(event); 1720 list_del_event(event, ctx); 1721 raw_spin_unlock_irq(&ctx->lock); 1722 } 1723 1724 /* 1725 * Cross CPU call to disable a performance event 1726 */ 1727 int __perf_event_disable(void *info) 1728 { 1729 struct perf_event *event = info; 1730 struct perf_event_context *ctx = event->ctx; 1731 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx); 1732 1733 /* 1734 * If this is a per-task event, need to check whether this 1735 * event's task is the current task on this cpu. 1736 * 1737 * Can trigger due to concurrent perf_event_context_sched_out() 1738 * flipping contexts around. 1739 */ 1740 if (ctx->task && cpuctx->task_ctx != ctx) 1741 return -EINVAL; 1742 1743 raw_spin_lock(&ctx->lock); 1744 1745 /* 1746 * If the event is on, turn it off. 1747 * If it is in error state, leave it in error state. 1748 */ 1749 if (event->state >= PERF_EVENT_STATE_INACTIVE) { 1750 update_context_time(ctx); 1751 update_cgrp_time_from_event(event); 1752 update_group_times(event); 1753 if (event == event->group_leader) 1754 group_sched_out(event, cpuctx, ctx); 1755 else 1756 event_sched_out(event, cpuctx, ctx); 1757 event->state = PERF_EVENT_STATE_OFF; 1758 } 1759 1760 raw_spin_unlock(&ctx->lock); 1761 1762 return 0; 1763 } 1764 1765 /* 1766 * Disable a event. 1767 * 1768 * If event->ctx is a cloned context, callers must make sure that 1769 * every task struct that event->ctx->task could possibly point to 1770 * remains valid. This condition is satisifed when called through 1771 * perf_event_for_each_child or perf_event_for_each because they 1772 * hold the top-level event's child_mutex, so any descendant that 1773 * goes to exit will block in sync_child_event. 1774 * When called from perf_pending_event it's OK because event->ctx 1775 * is the current context on this CPU and preemption is disabled, 1776 * hence we can't get into perf_event_task_sched_out for this context. 1777 */ 1778 static void _perf_event_disable(struct perf_event *event) 1779 { 1780 struct perf_event_context *ctx = event->ctx; 1781 struct task_struct *task = ctx->task; 1782 1783 if (!task) { 1784 /* 1785 * Disable the event on the cpu that it's on 1786 */ 1787 cpu_function_call(event->cpu, __perf_event_disable, event); 1788 return; 1789 } 1790 1791 retry: 1792 if (!task_function_call(task, __perf_event_disable, event)) 1793 return; 1794 1795 raw_spin_lock_irq(&ctx->lock); 1796 /* 1797 * If the event is still active, we need to retry the cross-call. 1798 */ 1799 if (event->state == PERF_EVENT_STATE_ACTIVE) { 1800 raw_spin_unlock_irq(&ctx->lock); 1801 /* 1802 * Reload the task pointer, it might have been changed by 1803 * a concurrent perf_event_context_sched_out(). 1804 */ 1805 task = ctx->task; 1806 goto retry; 1807 } 1808 1809 /* 1810 * Since we have the lock this context can't be scheduled 1811 * in, so we can change the state safely. 1812 */ 1813 if (event->state == PERF_EVENT_STATE_INACTIVE) { 1814 update_group_times(event); 1815 event->state = PERF_EVENT_STATE_OFF; 1816 } 1817 raw_spin_unlock_irq(&ctx->lock); 1818 } 1819 1820 /* 1821 * Strictly speaking kernel users cannot create groups and therefore this 1822 * interface does not need the perf_event_ctx_lock() magic. 1823 */ 1824 void perf_event_disable(struct perf_event *event) 1825 { 1826 struct perf_event_context *ctx; 1827 1828 ctx = perf_event_ctx_lock(event); 1829 _perf_event_disable(event); 1830 perf_event_ctx_unlock(event, ctx); 1831 } 1832 EXPORT_SYMBOL_GPL(perf_event_disable); 1833 1834 static void perf_set_shadow_time(struct perf_event *event, 1835 struct perf_event_context *ctx, 1836 u64 tstamp) 1837 { 1838 /* 1839 * use the correct time source for the time snapshot 1840 * 1841 * We could get by without this by leveraging the 1842 * fact that to get to this function, the caller 1843 * has most likely already called update_context_time() 1844 * and update_cgrp_time_xx() and thus both timestamp 1845 * are identical (or very close). Given that tstamp is, 1846 * already adjusted for cgroup, we could say that: 1847 * tstamp - ctx->timestamp 1848 * is equivalent to 1849 * tstamp - cgrp->timestamp. 1850 * 1851 * Then, in perf_output_read(), the calculation would 1852 * work with no changes because: 1853 * - event is guaranteed scheduled in 1854 * - no scheduled out in between 1855 * - thus the timestamp would be the same 1856 * 1857 * But this is a bit hairy. 1858 * 1859 * So instead, we have an explicit cgroup call to remain 1860 * within the time time source all along. We believe it 1861 * is cleaner and simpler to understand. 1862 */ 1863 if (is_cgroup_event(event)) 1864 perf_cgroup_set_shadow_time(event, tstamp); 1865 else 1866 event->shadow_ctx_time = tstamp - ctx->timestamp; 1867 } 1868 1869 #define MAX_INTERRUPTS (~0ULL) 1870 1871 static void perf_log_throttle(struct perf_event *event, int enable); 1872 static void perf_log_itrace_start(struct perf_event *event); 1873 1874 static int 1875 event_sched_in(struct perf_event *event, 1876 struct perf_cpu_context *cpuctx, 1877 struct perf_event_context *ctx) 1878 { 1879 u64 tstamp = perf_event_time(event); 1880 int ret = 0; 1881 1882 lockdep_assert_held(&ctx->lock); 1883 1884 if (event->state <= PERF_EVENT_STATE_OFF) 1885 return 0; 1886 1887 event->state = PERF_EVENT_STATE_ACTIVE; 1888 event->oncpu = smp_processor_id(); 1889 1890 /* 1891 * Unthrottle events, since we scheduled we might have missed several 1892 * ticks already, also for a heavily scheduling task there is little 1893 * guarantee it'll get a tick in a timely manner. 1894 */ 1895 if (unlikely(event->hw.interrupts == MAX_INTERRUPTS)) { 1896 perf_log_throttle(event, 1); 1897 event->hw.interrupts = 0; 1898 } 1899 1900 /* 1901 * The new state must be visible before we turn it on in the hardware: 1902 */ 1903 smp_wmb(); 1904 1905 perf_pmu_disable(event->pmu); 1906 1907 perf_set_shadow_time(event, ctx, tstamp); 1908 1909 perf_log_itrace_start(event); 1910 1911 if (event->pmu->add(event, PERF_EF_START)) { 1912 event->state = PERF_EVENT_STATE_INACTIVE; 1913 event->oncpu = -1; 1914 ret = -EAGAIN; 1915 goto out; 1916 } 1917 1918 event->tstamp_running += tstamp - event->tstamp_stopped; 1919 1920 if (!is_software_event(event)) 1921 cpuctx->active_oncpu++; 1922 if (!ctx->nr_active++) 1923 perf_event_ctx_activate(ctx); 1924 if (event->attr.freq && event->attr.sample_freq) 1925 ctx->nr_freq++; 1926 1927 if (event->attr.exclusive) 1928 cpuctx->exclusive = 1; 1929 1930 if (is_orphaned_child(event)) 1931 schedule_orphans_remove(ctx); 1932 1933 out: 1934 perf_pmu_enable(event->pmu); 1935 1936 return ret; 1937 } 1938 1939 static int 1940 group_sched_in(struct perf_event *group_event, 1941 struct perf_cpu_context *cpuctx, 1942 struct perf_event_context *ctx) 1943 { 1944 struct perf_event *event, *partial_group = NULL; 1945 struct pmu *pmu = ctx->pmu; 1946 u64 now = ctx->time; 1947 bool simulate = false; 1948 1949 if (group_event->state == PERF_EVENT_STATE_OFF) 1950 return 0; 1951 1952 pmu->start_txn(pmu, PERF_PMU_TXN_ADD); 1953 1954 if (event_sched_in(group_event, cpuctx, ctx)) { 1955 pmu->cancel_txn(pmu); 1956 perf_mux_hrtimer_restart(cpuctx); 1957 return -EAGAIN; 1958 } 1959 1960 /* 1961 * Schedule in siblings as one group (if any): 1962 */ 1963 list_for_each_entry(event, &group_event->sibling_list, group_entry) { 1964 if (event_sched_in(event, cpuctx, ctx)) { 1965 partial_group = event; 1966 goto group_error; 1967 } 1968 } 1969 1970 if (!pmu->commit_txn(pmu)) 1971 return 0; 1972 1973 group_error: 1974 /* 1975 * Groups can be scheduled in as one unit only, so undo any 1976 * partial group before returning: 1977 * The events up to the failed event are scheduled out normally, 1978 * tstamp_stopped will be updated. 1979 * 1980 * The failed events and the remaining siblings need to have 1981 * their timings updated as if they had gone thru event_sched_in() 1982 * and event_sched_out(). This is required to get consistent timings 1983 * across the group. This also takes care of the case where the group 1984 * could never be scheduled by ensuring tstamp_stopped is set to mark 1985 * the time the event was actually stopped, such that time delta 1986 * calculation in update_event_times() is correct. 1987 */ 1988 list_for_each_entry(event, &group_event->sibling_list, group_entry) { 1989 if (event == partial_group) 1990 simulate = true; 1991 1992 if (simulate) { 1993 event->tstamp_running += now - event->tstamp_stopped; 1994 event->tstamp_stopped = now; 1995 } else { 1996 event_sched_out(event, cpuctx, ctx); 1997 } 1998 } 1999 event_sched_out(group_event, cpuctx, ctx); 2000 2001 pmu->cancel_txn(pmu); 2002 2003 perf_mux_hrtimer_restart(cpuctx); 2004 2005 return -EAGAIN; 2006 } 2007 2008 /* 2009 * Work out whether we can put this event group on the CPU now. 2010 */ 2011 static int group_can_go_on(struct perf_event *event, 2012 struct perf_cpu_context *cpuctx, 2013 int can_add_hw) 2014 { 2015 /* 2016 * Groups consisting entirely of software events can always go on. 2017 */ 2018 if (event->group_flags & PERF_GROUP_SOFTWARE) 2019 return 1; 2020 /* 2021 * If an exclusive group is already on, no other hardware 2022 * events can go on. 2023 */ 2024 if (cpuctx->exclusive) 2025 return 0; 2026 /* 2027 * If this group is exclusive and there are already 2028 * events on the CPU, it can't go on. 2029 */ 2030 if (event->attr.exclusive && cpuctx->active_oncpu) 2031 return 0; 2032 /* 2033 * Otherwise, try to add it if all previous groups were able 2034 * to go on. 2035 */ 2036 return can_add_hw; 2037 } 2038 2039 static void add_event_to_ctx(struct perf_event *event, 2040 struct perf_event_context *ctx) 2041 { 2042 u64 tstamp = perf_event_time(event); 2043 2044 list_add_event(event, ctx); 2045 perf_group_attach(event); 2046 event->tstamp_enabled = tstamp; 2047 event->tstamp_running = tstamp; 2048 event->tstamp_stopped = tstamp; 2049 } 2050 2051 static void task_ctx_sched_out(struct perf_event_context *ctx); 2052 static void 2053 ctx_sched_in(struct perf_event_context *ctx, 2054 struct perf_cpu_context *cpuctx, 2055 enum event_type_t event_type, 2056 struct task_struct *task); 2057 2058 static void perf_event_sched_in(struct perf_cpu_context *cpuctx, 2059 struct perf_event_context *ctx, 2060 struct task_struct *task) 2061 { 2062 cpu_ctx_sched_in(cpuctx, EVENT_PINNED, task); 2063 if (ctx) 2064 ctx_sched_in(ctx, cpuctx, EVENT_PINNED, task); 2065 cpu_ctx_sched_in(cpuctx, EVENT_FLEXIBLE, task); 2066 if (ctx) 2067 ctx_sched_in(ctx, cpuctx, EVENT_FLEXIBLE, task); 2068 } 2069 2070 /* 2071 * Cross CPU call to install and enable a performance event 2072 * 2073 * Must be called with ctx->mutex held 2074 */ 2075 static int __perf_install_in_context(void *info) 2076 { 2077 struct perf_event *event = info; 2078 struct perf_event_context *ctx = event->ctx; 2079 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx); 2080 struct perf_event_context *task_ctx = cpuctx->task_ctx; 2081 struct task_struct *task = current; 2082 2083 perf_ctx_lock(cpuctx, task_ctx); 2084 perf_pmu_disable(cpuctx->ctx.pmu); 2085 2086 /* 2087 * If there was an active task_ctx schedule it out. 2088 */ 2089 if (task_ctx) 2090 task_ctx_sched_out(task_ctx); 2091 2092 /* 2093 * If the context we're installing events in is not the 2094 * active task_ctx, flip them. 2095 */ 2096 if (ctx->task && task_ctx != ctx) { 2097 if (task_ctx) 2098 raw_spin_unlock(&task_ctx->lock); 2099 raw_spin_lock(&ctx->lock); 2100 task_ctx = ctx; 2101 } 2102 2103 if (task_ctx) { 2104 cpuctx->task_ctx = task_ctx; 2105 task = task_ctx->task; 2106 } 2107 2108 cpu_ctx_sched_out(cpuctx, EVENT_ALL); 2109 2110 update_context_time(ctx); 2111 /* 2112 * update cgrp time only if current cgrp 2113 * matches event->cgrp. Must be done before 2114 * calling add_event_to_ctx() 2115 */ 2116 update_cgrp_time_from_event(event); 2117 2118 add_event_to_ctx(event, ctx); 2119 2120 /* 2121 * Schedule everything back in 2122 */ 2123 perf_event_sched_in(cpuctx, task_ctx, task); 2124 2125 perf_pmu_enable(cpuctx->ctx.pmu); 2126 perf_ctx_unlock(cpuctx, task_ctx); 2127 2128 return 0; 2129 } 2130 2131 /* 2132 * Attach a performance event to a context 2133 * 2134 * First we add the event to the list with the hardware enable bit 2135 * in event->hw_config cleared. 2136 * 2137 * If the event is attached to a task which is on a CPU we use a smp 2138 * call to enable it in the task context. The task might have been 2139 * scheduled away, but we check this in the smp call again. 2140 */ 2141 static void 2142 perf_install_in_context(struct perf_event_context *ctx, 2143 struct perf_event *event, 2144 int cpu) 2145 { 2146 struct task_struct *task = ctx->task; 2147 2148 lockdep_assert_held(&ctx->mutex); 2149 2150 event->ctx = ctx; 2151 if (event->cpu != -1) 2152 event->cpu = cpu; 2153 2154 if (!task) { 2155 /* 2156 * Per cpu events are installed via an smp call and 2157 * the install is always successful. 2158 */ 2159 cpu_function_call(cpu, __perf_install_in_context, event); 2160 return; 2161 } 2162 2163 retry: 2164 if (!task_function_call(task, __perf_install_in_context, event)) 2165 return; 2166 2167 raw_spin_lock_irq(&ctx->lock); 2168 /* 2169 * If we failed to find a running task, but find the context active now 2170 * that we've acquired the ctx->lock, retry. 2171 */ 2172 if (ctx->is_active) { 2173 raw_spin_unlock_irq(&ctx->lock); 2174 /* 2175 * Reload the task pointer, it might have been changed by 2176 * a concurrent perf_event_context_sched_out(). 2177 */ 2178 task = ctx->task; 2179 goto retry; 2180 } 2181 2182 /* 2183 * Since the task isn't running, its safe to add the event, us holding 2184 * the ctx->lock ensures the task won't get scheduled in. 2185 */ 2186 add_event_to_ctx(event, ctx); 2187 raw_spin_unlock_irq(&ctx->lock); 2188 } 2189 2190 /* 2191 * Put a event into inactive state and update time fields. 2192 * Enabling the leader of a group effectively enables all 2193 * the group members that aren't explicitly disabled, so we 2194 * have to update their ->tstamp_enabled also. 2195 * Note: this works for group members as well as group leaders 2196 * since the non-leader members' sibling_lists will be empty. 2197 */ 2198 static void __perf_event_mark_enabled(struct perf_event *event) 2199 { 2200 struct perf_event *sub; 2201 u64 tstamp = perf_event_time(event); 2202 2203 event->state = PERF_EVENT_STATE_INACTIVE; 2204 event->tstamp_enabled = tstamp - event->total_time_enabled; 2205 list_for_each_entry(sub, &event->sibling_list, group_entry) { 2206 if (sub->state >= PERF_EVENT_STATE_INACTIVE) 2207 sub->tstamp_enabled = tstamp - sub->total_time_enabled; 2208 } 2209 } 2210 2211 /* 2212 * Cross CPU call to enable a performance event 2213 */ 2214 static int __perf_event_enable(void *info) 2215 { 2216 struct perf_event *event = info; 2217 struct perf_event_context *ctx = event->ctx; 2218 struct perf_event *leader = event->group_leader; 2219 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx); 2220 int err; 2221 2222 /* 2223 * There's a time window between 'ctx->is_active' check 2224 * in perf_event_enable function and this place having: 2225 * - IRQs on 2226 * - ctx->lock unlocked 2227 * 2228 * where the task could be killed and 'ctx' deactivated 2229 * by perf_event_exit_task. 2230 */ 2231 if (!ctx->is_active) 2232 return -EINVAL; 2233 2234 raw_spin_lock(&ctx->lock); 2235 update_context_time(ctx); 2236 2237 if (event->state >= PERF_EVENT_STATE_INACTIVE) 2238 goto unlock; 2239 2240 /* 2241 * set current task's cgroup time reference point 2242 */ 2243 perf_cgroup_set_timestamp(current, ctx); 2244 2245 __perf_event_mark_enabled(event); 2246 2247 if (!event_filter_match(event)) { 2248 if (is_cgroup_event(event)) 2249 perf_cgroup_defer_enabled(event); 2250 goto unlock; 2251 } 2252 2253 /* 2254 * If the event is in a group and isn't the group leader, 2255 * then don't put it on unless the group is on. 2256 */ 2257 if (leader != event && leader->state != PERF_EVENT_STATE_ACTIVE) 2258 goto unlock; 2259 2260 if (!group_can_go_on(event, cpuctx, 1)) { 2261 err = -EEXIST; 2262 } else { 2263 if (event == leader) 2264 err = group_sched_in(event, cpuctx, ctx); 2265 else 2266 err = event_sched_in(event, cpuctx, ctx); 2267 } 2268 2269 if (err) { 2270 /* 2271 * If this event can't go on and it's part of a 2272 * group, then the whole group has to come off. 2273 */ 2274 if (leader != event) { 2275 group_sched_out(leader, cpuctx, ctx); 2276 perf_mux_hrtimer_restart(cpuctx); 2277 } 2278 if (leader->attr.pinned) { 2279 update_group_times(leader); 2280 leader->state = PERF_EVENT_STATE_ERROR; 2281 } 2282 } 2283 2284 unlock: 2285 raw_spin_unlock(&ctx->lock); 2286 2287 return 0; 2288 } 2289 2290 /* 2291 * Enable a event. 2292 * 2293 * If event->ctx is a cloned context, callers must make sure that 2294 * every task struct that event->ctx->task could possibly point to 2295 * remains valid. This condition is satisfied when called through 2296 * perf_event_for_each_child or perf_event_for_each as described 2297 * for perf_event_disable. 2298 */ 2299 static void _perf_event_enable(struct perf_event *event) 2300 { 2301 struct perf_event_context *ctx = event->ctx; 2302 struct task_struct *task = ctx->task; 2303 2304 if (!task) { 2305 /* 2306 * Enable the event on the cpu that it's on 2307 */ 2308 cpu_function_call(event->cpu, __perf_event_enable, event); 2309 return; 2310 } 2311 2312 raw_spin_lock_irq(&ctx->lock); 2313 if (event->state >= PERF_EVENT_STATE_INACTIVE) 2314 goto out; 2315 2316 /* 2317 * If the event is in error state, clear that first. 2318 * That way, if we see the event in error state below, we 2319 * know that it has gone back into error state, as distinct 2320 * from the task having been scheduled away before the 2321 * cross-call arrived. 2322 */ 2323 if (event->state == PERF_EVENT_STATE_ERROR) 2324 event->state = PERF_EVENT_STATE_OFF; 2325 2326 retry: 2327 if (!ctx->is_active) { 2328 __perf_event_mark_enabled(event); 2329 goto out; 2330 } 2331 2332 raw_spin_unlock_irq(&ctx->lock); 2333 2334 if (!task_function_call(task, __perf_event_enable, event)) 2335 return; 2336 2337 raw_spin_lock_irq(&ctx->lock); 2338 2339 /* 2340 * If the context is active and the event is still off, 2341 * we need to retry the cross-call. 2342 */ 2343 if (ctx->is_active && event->state == PERF_EVENT_STATE_OFF) { 2344 /* 2345 * task could have been flipped by a concurrent 2346 * perf_event_context_sched_out() 2347 */ 2348 task = ctx->task; 2349 goto retry; 2350 } 2351 2352 out: 2353 raw_spin_unlock_irq(&ctx->lock); 2354 } 2355 2356 /* 2357 * See perf_event_disable(); 2358 */ 2359 void perf_event_enable(struct perf_event *event) 2360 { 2361 struct perf_event_context *ctx; 2362 2363 ctx = perf_event_ctx_lock(event); 2364 _perf_event_enable(event); 2365 perf_event_ctx_unlock(event, ctx); 2366 } 2367 EXPORT_SYMBOL_GPL(perf_event_enable); 2368 2369 static int _perf_event_refresh(struct perf_event *event, int refresh) 2370 { 2371 /* 2372 * not supported on inherited events 2373 */ 2374 if (event->attr.inherit || !is_sampling_event(event)) 2375 return -EINVAL; 2376 2377 atomic_add(refresh, &event->event_limit); 2378 _perf_event_enable(event); 2379 2380 return 0; 2381 } 2382 2383 /* 2384 * See perf_event_disable() 2385 */ 2386 int perf_event_refresh(struct perf_event *event, int refresh) 2387 { 2388 struct perf_event_context *ctx; 2389 int ret; 2390 2391 ctx = perf_event_ctx_lock(event); 2392 ret = _perf_event_refresh(event, refresh); 2393 perf_event_ctx_unlock(event, ctx); 2394 2395 return ret; 2396 } 2397 EXPORT_SYMBOL_GPL(perf_event_refresh); 2398 2399 static void ctx_sched_out(struct perf_event_context *ctx, 2400 struct perf_cpu_context *cpuctx, 2401 enum event_type_t event_type) 2402 { 2403 struct perf_event *event; 2404 int is_active = ctx->is_active; 2405 2406 ctx->is_active &= ~event_type; 2407 if (likely(!ctx->nr_events)) 2408 return; 2409 2410 update_context_time(ctx); 2411 update_cgrp_time_from_cpuctx(cpuctx); 2412 if (!ctx->nr_active) 2413 return; 2414 2415 perf_pmu_disable(ctx->pmu); 2416 if ((is_active & EVENT_PINNED) && (event_type & EVENT_PINNED)) { 2417 list_for_each_entry(event, &ctx->pinned_groups, group_entry) 2418 group_sched_out(event, cpuctx, ctx); 2419 } 2420 2421 if ((is_active & EVENT_FLEXIBLE) && (event_type & EVENT_FLEXIBLE)) { 2422 list_for_each_entry(event, &ctx->flexible_groups, group_entry) 2423 group_sched_out(event, cpuctx, ctx); 2424 } 2425 perf_pmu_enable(ctx->pmu); 2426 } 2427 2428 /* 2429 * Test whether two contexts are equivalent, i.e. whether they have both been 2430 * cloned from the same version of the same context. 2431 * 2432 * Equivalence is measured using a generation number in the context that is 2433 * incremented on each modification to it; see unclone_ctx(), list_add_event() 2434 * and list_del_event(). 2435 */ 2436 static int context_equiv(struct perf_event_context *ctx1, 2437 struct perf_event_context *ctx2) 2438 { 2439 lockdep_assert_held(&ctx1->lock); 2440 lockdep_assert_held(&ctx2->lock); 2441 2442 /* Pinning disables the swap optimization */ 2443 if (ctx1->pin_count || ctx2->pin_count) 2444 return 0; 2445 2446 /* If ctx1 is the parent of ctx2 */ 2447 if (ctx1 == ctx2->parent_ctx && ctx1->generation == ctx2->parent_gen) 2448 return 1; 2449 2450 /* If ctx2 is the parent of ctx1 */ 2451 if (ctx1->parent_ctx == ctx2 && ctx1->parent_gen == ctx2->generation) 2452 return 1; 2453 2454 /* 2455 * If ctx1 and ctx2 have the same parent; we flatten the parent 2456 * hierarchy, see perf_event_init_context(). 2457 */ 2458 if (ctx1->parent_ctx && ctx1->parent_ctx == ctx2->parent_ctx && 2459 ctx1->parent_gen == ctx2->parent_gen) 2460 return 1; 2461 2462 /* Unmatched */ 2463 return 0; 2464 } 2465 2466 static void __perf_event_sync_stat(struct perf_event *event, 2467 struct perf_event *next_event) 2468 { 2469 u64 value; 2470 2471 if (!event->attr.inherit_stat) 2472 return; 2473 2474 /* 2475 * Update the event value, we cannot use perf_event_read() 2476 * because we're in the middle of a context switch and have IRQs 2477 * disabled, which upsets smp_call_function_single(), however 2478 * we know the event must be on the current CPU, therefore we 2479 * don't need to use it. 2480 */ 2481 switch (event->state) { 2482 case PERF_EVENT_STATE_ACTIVE: 2483 event->pmu->read(event); 2484 /* fall-through */ 2485 2486 case PERF_EVENT_STATE_INACTIVE: 2487 update_event_times(event); 2488 break; 2489 2490 default: 2491 break; 2492 } 2493 2494 /* 2495 * In order to keep per-task stats reliable we need to flip the event 2496 * values when we flip the contexts. 2497 */ 2498 value = local64_read(&next_event->count); 2499 value = local64_xchg(&event->count, value); 2500 local64_set(&next_event->count, value); 2501 2502 swap(event->total_time_enabled, next_event->total_time_enabled); 2503 swap(event->total_time_running, next_event->total_time_running); 2504 2505 /* 2506 * Since we swizzled the values, update the user visible data too. 2507 */ 2508 perf_event_update_userpage(event); 2509 perf_event_update_userpage(next_event); 2510 } 2511 2512 static void perf_event_sync_stat(struct perf_event_context *ctx, 2513 struct perf_event_context *next_ctx) 2514 { 2515 struct perf_event *event, *next_event; 2516 2517 if (!ctx->nr_stat) 2518 return; 2519 2520 update_context_time(ctx); 2521 2522 event = list_first_entry(&ctx->event_list, 2523 struct perf_event, event_entry); 2524 2525 next_event = list_first_entry(&next_ctx->event_list, 2526 struct perf_event, event_entry); 2527 2528 while (&event->event_entry != &ctx->event_list && 2529 &next_event->event_entry != &next_ctx->event_list) { 2530 2531 __perf_event_sync_stat(event, next_event); 2532 2533 event = list_next_entry(event, event_entry); 2534 next_event = list_next_entry(next_event, event_entry); 2535 } 2536 } 2537 2538 static void perf_event_context_sched_out(struct task_struct *task, int ctxn, 2539 struct task_struct *next) 2540 { 2541 struct perf_event_context *ctx = task->perf_event_ctxp[ctxn]; 2542 struct perf_event_context *next_ctx; 2543 struct perf_event_context *parent, *next_parent; 2544 struct perf_cpu_context *cpuctx; 2545 int do_switch = 1; 2546 2547 if (likely(!ctx)) 2548 return; 2549 2550 cpuctx = __get_cpu_context(ctx); 2551 if (!cpuctx->task_ctx) 2552 return; 2553 2554 rcu_read_lock(); 2555 next_ctx = next->perf_event_ctxp[ctxn]; 2556 if (!next_ctx) 2557 goto unlock; 2558 2559 parent = rcu_dereference(ctx->parent_ctx); 2560 next_parent = rcu_dereference(next_ctx->parent_ctx); 2561 2562 /* If neither context have a parent context; they cannot be clones. */ 2563 if (!parent && !next_parent) 2564 goto unlock; 2565 2566 if (next_parent == ctx || next_ctx == parent || next_parent == parent) { 2567 /* 2568 * Looks like the two contexts are clones, so we might be 2569 * able to optimize the context switch. We lock both 2570 * contexts and check that they are clones under the 2571 * lock (including re-checking that neither has been 2572 * uncloned in the meantime). It doesn't matter which 2573 * order we take the locks because no other cpu could 2574 * be trying to lock both of these tasks. 2575 */ 2576 raw_spin_lock(&ctx->lock); 2577 raw_spin_lock_nested(&next_ctx->lock, SINGLE_DEPTH_NESTING); 2578 if (context_equiv(ctx, next_ctx)) { 2579 /* 2580 * XXX do we need a memory barrier of sorts 2581 * wrt to rcu_dereference() of perf_event_ctxp 2582 */ 2583 task->perf_event_ctxp[ctxn] = next_ctx; 2584 next->perf_event_ctxp[ctxn] = ctx; 2585 ctx->task = next; 2586 next_ctx->task = task; 2587 2588 swap(ctx->task_ctx_data, next_ctx->task_ctx_data); 2589 2590 do_switch = 0; 2591 2592 perf_event_sync_stat(ctx, next_ctx); 2593 } 2594 raw_spin_unlock(&next_ctx->lock); 2595 raw_spin_unlock(&ctx->lock); 2596 } 2597 unlock: 2598 rcu_read_unlock(); 2599 2600 if (do_switch) { 2601 raw_spin_lock(&ctx->lock); 2602 ctx_sched_out(ctx, cpuctx, EVENT_ALL); 2603 cpuctx->task_ctx = NULL; 2604 raw_spin_unlock(&ctx->lock); 2605 } 2606 } 2607 2608 void perf_sched_cb_dec(struct pmu *pmu) 2609 { 2610 this_cpu_dec(perf_sched_cb_usages); 2611 } 2612 2613 void perf_sched_cb_inc(struct pmu *pmu) 2614 { 2615 this_cpu_inc(perf_sched_cb_usages); 2616 } 2617 2618 /* 2619 * This function provides the context switch callback to the lower code 2620 * layer. It is invoked ONLY when the context switch callback is enabled. 2621 */ 2622 static void perf_pmu_sched_task(struct task_struct *prev, 2623 struct task_struct *next, 2624 bool sched_in) 2625 { 2626 struct perf_cpu_context *cpuctx; 2627 struct pmu *pmu; 2628 unsigned long flags; 2629 2630 if (prev == next) 2631 return; 2632 2633 local_irq_save(flags); 2634 2635 rcu_read_lock(); 2636 2637 list_for_each_entry_rcu(pmu, &pmus, entry) { 2638 if (pmu->sched_task) { 2639 cpuctx = this_cpu_ptr(pmu->pmu_cpu_context); 2640 2641 perf_ctx_lock(cpuctx, cpuctx->task_ctx); 2642 2643 perf_pmu_disable(pmu); 2644 2645 pmu->sched_task(cpuctx->task_ctx, sched_in); 2646 2647 perf_pmu_enable(pmu); 2648 2649 perf_ctx_unlock(cpuctx, cpuctx->task_ctx); 2650 } 2651 } 2652 2653 rcu_read_unlock(); 2654 2655 local_irq_restore(flags); 2656 } 2657 2658 static void perf_event_switch(struct task_struct *task, 2659 struct task_struct *next_prev, bool sched_in); 2660 2661 #define for_each_task_context_nr(ctxn) \ 2662 for ((ctxn) = 0; (ctxn) < perf_nr_task_contexts; (ctxn)++) 2663 2664 /* 2665 * Called from scheduler to remove the events of the current task, 2666 * with interrupts disabled. 2667 * 2668 * We stop each event and update the event value in event->count. 2669 * 2670 * This does not protect us against NMI, but disable() 2671 * sets the disabled bit in the control field of event _before_ 2672 * accessing the event control register. If a NMI hits, then it will 2673 * not restart the event. 2674 */ 2675 void __perf_event_task_sched_out(struct task_struct *task, 2676 struct task_struct *next) 2677 { 2678 int ctxn; 2679 2680 if (__this_cpu_read(perf_sched_cb_usages)) 2681 perf_pmu_sched_task(task, next, false); 2682 2683 if (atomic_read(&nr_switch_events)) 2684 perf_event_switch(task, next, false); 2685 2686 for_each_task_context_nr(ctxn) 2687 perf_event_context_sched_out(task, ctxn, next); 2688 2689 /* 2690 * if cgroup events exist on this CPU, then we need 2691 * to check if we have to switch out PMU state. 2692 * cgroup event are system-wide mode only 2693 */ 2694 if (atomic_read(this_cpu_ptr(&perf_cgroup_events))) 2695 perf_cgroup_sched_out(task, next); 2696 } 2697 2698 static void task_ctx_sched_out(struct perf_event_context *ctx) 2699 { 2700 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx); 2701 2702 if (!cpuctx->task_ctx) 2703 return; 2704 2705 if (WARN_ON_ONCE(ctx != cpuctx->task_ctx)) 2706 return; 2707 2708 ctx_sched_out(ctx, cpuctx, EVENT_ALL); 2709 cpuctx->task_ctx = NULL; 2710 } 2711 2712 /* 2713 * Called with IRQs disabled 2714 */ 2715 static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx, 2716 enum event_type_t event_type) 2717 { 2718 ctx_sched_out(&cpuctx->ctx, cpuctx, event_type); 2719 } 2720 2721 static void 2722 ctx_pinned_sched_in(struct perf_event_context *ctx, 2723 struct perf_cpu_context *cpuctx) 2724 { 2725 struct perf_event *event; 2726 2727 list_for_each_entry(event, &ctx->pinned_groups, group_entry) { 2728 if (event->state <= PERF_EVENT_STATE_OFF) 2729 continue; 2730 if (!event_filter_match(event)) 2731 continue; 2732 2733 /* may need to reset tstamp_enabled */ 2734 if (is_cgroup_event(event)) 2735 perf_cgroup_mark_enabled(event, ctx); 2736 2737 if (group_can_go_on(event, cpuctx, 1)) 2738 group_sched_in(event, cpuctx, ctx); 2739 2740 /* 2741 * If this pinned group hasn't been scheduled, 2742 * put it in error state. 2743 */ 2744 if (event->state == PERF_EVENT_STATE_INACTIVE) { 2745 update_group_times(event); 2746 event->state = PERF_EVENT_STATE_ERROR; 2747 } 2748 } 2749 } 2750 2751 static void 2752 ctx_flexible_sched_in(struct perf_event_context *ctx, 2753 struct perf_cpu_context *cpuctx) 2754 { 2755 struct perf_event *event; 2756 int can_add_hw = 1; 2757 2758 list_for_each_entry(event, &ctx->flexible_groups, group_entry) { 2759 /* Ignore events in OFF or ERROR state */ 2760 if (event->state <= PERF_EVENT_STATE_OFF) 2761 continue; 2762 /* 2763 * Listen to the 'cpu' scheduling filter constraint 2764 * of events: 2765 */ 2766 if (!event_filter_match(event)) 2767 continue; 2768 2769 /* may need to reset tstamp_enabled */ 2770 if (is_cgroup_event(event)) 2771 perf_cgroup_mark_enabled(event, ctx); 2772 2773 if (group_can_go_on(event, cpuctx, can_add_hw)) { 2774 if (group_sched_in(event, cpuctx, ctx)) 2775 can_add_hw = 0; 2776 } 2777 } 2778 } 2779 2780 static void 2781 ctx_sched_in(struct perf_event_context *ctx, 2782 struct perf_cpu_context *cpuctx, 2783 enum event_type_t event_type, 2784 struct task_struct *task) 2785 { 2786 u64 now; 2787 int is_active = ctx->is_active; 2788 2789 ctx->is_active |= event_type; 2790 if (likely(!ctx->nr_events)) 2791 return; 2792 2793 now = perf_clock(); 2794 ctx->timestamp = now; 2795 perf_cgroup_set_timestamp(task, ctx); 2796 /* 2797 * First go through the list and put on any pinned groups 2798 * in order to give them the best chance of going on. 2799 */ 2800 if (!(is_active & EVENT_PINNED) && (event_type & EVENT_PINNED)) 2801 ctx_pinned_sched_in(ctx, cpuctx); 2802 2803 /* Then walk through the lower prio flexible groups */ 2804 if (!(is_active & EVENT_FLEXIBLE) && (event_type & EVENT_FLEXIBLE)) 2805 ctx_flexible_sched_in(ctx, cpuctx); 2806 } 2807 2808 static void cpu_ctx_sched_in(struct perf_cpu_context *cpuctx, 2809 enum event_type_t event_type, 2810 struct task_struct *task) 2811 { 2812 struct perf_event_context *ctx = &cpuctx->ctx; 2813 2814 ctx_sched_in(ctx, cpuctx, event_type, task); 2815 } 2816 2817 static void perf_event_context_sched_in(struct perf_event_context *ctx, 2818 struct task_struct *task) 2819 { 2820 struct perf_cpu_context *cpuctx; 2821 2822 cpuctx = __get_cpu_context(ctx); 2823 if (cpuctx->task_ctx == ctx) 2824 return; 2825 2826 perf_ctx_lock(cpuctx, ctx); 2827 perf_pmu_disable(ctx->pmu); 2828 /* 2829 * We want to keep the following priority order: 2830 * cpu pinned (that don't need to move), task pinned, 2831 * cpu flexible, task flexible. 2832 */ 2833 cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE); 2834 2835 if (ctx->nr_events) 2836 cpuctx->task_ctx = ctx; 2837 2838 perf_event_sched_in(cpuctx, cpuctx->task_ctx, task); 2839 2840 perf_pmu_enable(ctx->pmu); 2841 perf_ctx_unlock(cpuctx, ctx); 2842 } 2843 2844 /* 2845 * Called from scheduler to add the events of the current task 2846 * with interrupts disabled. 2847 * 2848 * We restore the event value and then enable it. 2849 * 2850 * This does not protect us against NMI, but enable() 2851 * sets the enabled bit in the control field of event _before_ 2852 * accessing the event control register. If a NMI hits, then it will 2853 * keep the event running. 2854 */ 2855 void __perf_event_task_sched_in(struct task_struct *prev, 2856 struct task_struct *task) 2857 { 2858 struct perf_event_context *ctx; 2859 int ctxn; 2860 2861 for_each_task_context_nr(ctxn) { 2862 ctx = task->perf_event_ctxp[ctxn]; 2863 if (likely(!ctx)) 2864 continue; 2865 2866 perf_event_context_sched_in(ctx, task); 2867 } 2868 /* 2869 * if cgroup events exist on this CPU, then we need 2870 * to check if we have to switch in PMU state. 2871 * cgroup event are system-wide mode only 2872 */ 2873 if (atomic_read(this_cpu_ptr(&perf_cgroup_events))) 2874 perf_cgroup_sched_in(prev, task); 2875 2876 if (atomic_read(&nr_switch_events)) 2877 perf_event_switch(task, prev, true); 2878 2879 if (__this_cpu_read(perf_sched_cb_usages)) 2880 perf_pmu_sched_task(prev, task, true); 2881 } 2882 2883 static u64 perf_calculate_period(struct perf_event *event, u64 nsec, u64 count) 2884 { 2885 u64 frequency = event->attr.sample_freq; 2886 u64 sec = NSEC_PER_SEC; 2887 u64 divisor, dividend; 2888 2889 int count_fls, nsec_fls, frequency_fls, sec_fls; 2890 2891 count_fls = fls64(count); 2892 nsec_fls = fls64(nsec); 2893 frequency_fls = fls64(frequency); 2894 sec_fls = 30; 2895 2896 /* 2897 * We got @count in @nsec, with a target of sample_freq HZ 2898 * the target period becomes: 2899 * 2900 * @count * 10^9 2901 * period = ------------------- 2902 * @nsec * sample_freq 2903 * 2904 */ 2905 2906 /* 2907 * Reduce accuracy by one bit such that @a and @b converge 2908 * to a similar magnitude. 2909 */ 2910 #define REDUCE_FLS(a, b) \ 2911 do { \ 2912 if (a##_fls > b##_fls) { \ 2913 a >>= 1; \ 2914 a##_fls--; \ 2915 } else { \ 2916 b >>= 1; \ 2917 b##_fls--; \ 2918 } \ 2919 } while (0) 2920 2921 /* 2922 * Reduce accuracy until either term fits in a u64, then proceed with 2923 * the other, so that finally we can do a u64/u64 division. 2924 */ 2925 while (count_fls + sec_fls > 64 && nsec_fls + frequency_fls > 64) { 2926 REDUCE_FLS(nsec, frequency); 2927 REDUCE_FLS(sec, count); 2928 } 2929 2930 if (count_fls + sec_fls > 64) { 2931 divisor = nsec * frequency; 2932 2933 while (count_fls + sec_fls > 64) { 2934 REDUCE_FLS(count, sec); 2935 divisor >>= 1; 2936 } 2937 2938 dividend = count * sec; 2939 } else { 2940 dividend = count * sec; 2941 2942 while (nsec_fls + frequency_fls > 64) { 2943 REDUCE_FLS(nsec, frequency); 2944 dividend >>= 1; 2945 } 2946 2947 divisor = nsec * frequency; 2948 } 2949 2950 if (!divisor) 2951 return dividend; 2952 2953 return div64_u64(dividend, divisor); 2954 } 2955 2956 static DEFINE_PER_CPU(int, perf_throttled_count); 2957 static DEFINE_PER_CPU(u64, perf_throttled_seq); 2958 2959 static void perf_adjust_period(struct perf_event *event, u64 nsec, u64 count, bool disable) 2960 { 2961 struct hw_perf_event *hwc = &event->hw; 2962 s64 period, sample_period; 2963 s64 delta; 2964 2965 period = perf_calculate_period(event, nsec, count); 2966 2967 delta = (s64)(period - hwc->sample_period); 2968 delta = (delta + 7) / 8; /* low pass filter */ 2969 2970 sample_period = hwc->sample_period + delta; 2971 2972 if (!sample_period) 2973 sample_period = 1; 2974 2975 hwc->sample_period = sample_period; 2976 2977 if (local64_read(&hwc->period_left) > 8*sample_period) { 2978 if (disable) 2979 event->pmu->stop(event, PERF_EF_UPDATE); 2980 2981 local64_set(&hwc->period_left, 0); 2982 2983 if (disable) 2984 event->pmu->start(event, PERF_EF_RELOAD); 2985 } 2986 } 2987 2988 /* 2989 * combine freq adjustment with unthrottling to avoid two passes over the 2990 * events. At the same time, make sure, having freq events does not change 2991 * the rate of unthrottling as that would introduce bias. 2992 */ 2993 static void perf_adjust_freq_unthr_context(struct perf_event_context *ctx, 2994 int needs_unthr) 2995 { 2996 struct perf_event *event; 2997 struct hw_perf_event *hwc; 2998 u64 now, period = TICK_NSEC; 2999 s64 delta; 3000 3001 /* 3002 * only need to iterate over all events iff: 3003 * - context have events in frequency mode (needs freq adjust) 3004 * - there are events to unthrottle on this cpu 3005 */ 3006 if (!(ctx->nr_freq || needs_unthr)) 3007 return; 3008 3009 raw_spin_lock(&ctx->lock); 3010 perf_pmu_disable(ctx->pmu); 3011 3012 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) { 3013 if (event->state != PERF_EVENT_STATE_ACTIVE) 3014 continue; 3015 3016 if (!event_filter_match(event)) 3017 continue; 3018 3019 perf_pmu_disable(event->pmu); 3020 3021 hwc = &event->hw; 3022 3023 if (hwc->interrupts == MAX_INTERRUPTS) { 3024 hwc->interrupts = 0; 3025 perf_log_throttle(event, 1); 3026 event->pmu->start(event, 0); 3027 } 3028 3029 if (!event->attr.freq || !event->attr.sample_freq) 3030 goto next; 3031 3032 /* 3033 * stop the event and update event->count 3034 */ 3035 event->pmu->stop(event, PERF_EF_UPDATE); 3036 3037 now = local64_read(&event->count); 3038 delta = now - hwc->freq_count_stamp; 3039 hwc->freq_count_stamp = now; 3040 3041 /* 3042 * restart the event 3043 * reload only if value has changed 3044 * we have stopped the event so tell that 3045 * to perf_adjust_period() to avoid stopping it 3046 * twice. 3047 */ 3048 if (delta > 0) 3049 perf_adjust_period(event, period, delta, false); 3050 3051 event->pmu->start(event, delta > 0 ? PERF_EF_RELOAD : 0); 3052 next: 3053 perf_pmu_enable(event->pmu); 3054 } 3055 3056 perf_pmu_enable(ctx->pmu); 3057 raw_spin_unlock(&ctx->lock); 3058 } 3059 3060 /* 3061 * Round-robin a context's events: 3062 */ 3063 static void rotate_ctx(struct perf_event_context *ctx) 3064 { 3065 /* 3066 * Rotate the first entry last of non-pinned groups. Rotation might be 3067 * disabled by the inheritance code. 3068 */ 3069 if (!ctx->rotate_disable) 3070 list_rotate_left(&ctx->flexible_groups); 3071 } 3072 3073 static int perf_rotate_context(struct perf_cpu_context *cpuctx) 3074 { 3075 struct perf_event_context *ctx = NULL; 3076 int rotate = 0; 3077 3078 if (cpuctx->ctx.nr_events) { 3079 if (cpuctx->ctx.nr_events != cpuctx->ctx.nr_active) 3080 rotate = 1; 3081 } 3082 3083 ctx = cpuctx->task_ctx; 3084 if (ctx && ctx->nr_events) { 3085 if (ctx->nr_events != ctx->nr_active) 3086 rotate = 1; 3087 } 3088 3089 if (!rotate) 3090 goto done; 3091 3092 perf_ctx_lock(cpuctx, cpuctx->task_ctx); 3093 perf_pmu_disable(cpuctx->ctx.pmu); 3094 3095 cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE); 3096 if (ctx) 3097 ctx_sched_out(ctx, cpuctx, EVENT_FLEXIBLE); 3098 3099 rotate_ctx(&cpuctx->ctx); 3100 if (ctx) 3101 rotate_ctx(ctx); 3102 3103 perf_event_sched_in(cpuctx, ctx, current); 3104 3105 perf_pmu_enable(cpuctx->ctx.pmu); 3106 perf_ctx_unlock(cpuctx, cpuctx->task_ctx); 3107 done: 3108 3109 return rotate; 3110 } 3111 3112 #ifdef CONFIG_NO_HZ_FULL 3113 bool perf_event_can_stop_tick(void) 3114 { 3115 if (atomic_read(&nr_freq_events) || 3116 __this_cpu_read(perf_throttled_count)) 3117 return false; 3118 else 3119 return true; 3120 } 3121 #endif 3122 3123 void perf_event_task_tick(void) 3124 { 3125 struct list_head *head = this_cpu_ptr(&active_ctx_list); 3126 struct perf_event_context *ctx, *tmp; 3127 int throttled; 3128 3129 WARN_ON(!irqs_disabled()); 3130 3131 __this_cpu_inc(perf_throttled_seq); 3132 throttled = __this_cpu_xchg(perf_throttled_count, 0); 3133 3134 list_for_each_entry_safe(ctx, tmp, head, active_ctx_list) 3135 perf_adjust_freq_unthr_context(ctx, throttled); 3136 } 3137 3138 static int event_enable_on_exec(struct perf_event *event, 3139 struct perf_event_context *ctx) 3140 { 3141 if (!event->attr.enable_on_exec) 3142 return 0; 3143 3144 event->attr.enable_on_exec = 0; 3145 if (event->state >= PERF_EVENT_STATE_INACTIVE) 3146 return 0; 3147 3148 __perf_event_mark_enabled(event); 3149 3150 return 1; 3151 } 3152 3153 /* 3154 * Enable all of a task's events that have been marked enable-on-exec. 3155 * This expects task == current. 3156 */ 3157 static void perf_event_enable_on_exec(struct perf_event_context *ctx) 3158 { 3159 struct perf_event_context *clone_ctx = NULL; 3160 struct perf_event *event; 3161 unsigned long flags; 3162 int enabled = 0; 3163 int ret; 3164 3165 local_irq_save(flags); 3166 if (!ctx || !ctx->nr_events) 3167 goto out; 3168 3169 /* 3170 * We must ctxsw out cgroup events to avoid conflict 3171 * when invoking perf_task_event_sched_in() later on 3172 * in this function. Otherwise we end up trying to 3173 * ctxswin cgroup events which are already scheduled 3174 * in. 3175 */ 3176 perf_cgroup_sched_out(current, NULL); 3177 3178 raw_spin_lock(&ctx->lock); 3179 task_ctx_sched_out(ctx); 3180 3181 list_for_each_entry(event, &ctx->event_list, event_entry) { 3182 ret = event_enable_on_exec(event, ctx); 3183 if (ret) 3184 enabled = 1; 3185 } 3186 3187 /* 3188 * Unclone this context if we enabled any event. 3189 */ 3190 if (enabled) 3191 clone_ctx = unclone_ctx(ctx); 3192 3193 raw_spin_unlock(&ctx->lock); 3194 3195 /* 3196 * Also calls ctxswin for cgroup events, if any: 3197 */ 3198 perf_event_context_sched_in(ctx, ctx->task); 3199 out: 3200 local_irq_restore(flags); 3201 3202 if (clone_ctx) 3203 put_ctx(clone_ctx); 3204 } 3205 3206 void perf_event_exec(void) 3207 { 3208 struct perf_event_context *ctx; 3209 int ctxn; 3210 3211 rcu_read_lock(); 3212 for_each_task_context_nr(ctxn) { 3213 ctx = current->perf_event_ctxp[ctxn]; 3214 if (!ctx) 3215 continue; 3216 3217 perf_event_enable_on_exec(ctx); 3218 } 3219 rcu_read_unlock(); 3220 } 3221 3222 struct perf_read_data { 3223 struct perf_event *event; 3224 bool group; 3225 int ret; 3226 }; 3227 3228 /* 3229 * Cross CPU call to read the hardware event 3230 */ 3231 static void __perf_event_read(void *info) 3232 { 3233 struct perf_read_data *data = info; 3234 struct perf_event *sub, *event = data->event; 3235 struct perf_event_context *ctx = event->ctx; 3236 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx); 3237 struct pmu *pmu = event->pmu; 3238 3239 /* 3240 * If this is a task context, we need to check whether it is 3241 * the current task context of this cpu. If not it has been 3242 * scheduled out before the smp call arrived. In that case 3243 * event->count would have been updated to a recent sample 3244 * when the event was scheduled out. 3245 */ 3246 if (ctx->task && cpuctx->task_ctx != ctx) 3247 return; 3248 3249 raw_spin_lock(&ctx->lock); 3250 if (ctx->is_active) { 3251 update_context_time(ctx); 3252 update_cgrp_time_from_event(event); 3253 } 3254 3255 update_event_times(event); 3256 if (event->state != PERF_EVENT_STATE_ACTIVE) 3257 goto unlock; 3258 3259 if (!data->group) { 3260 pmu->read(event); 3261 data->ret = 0; 3262 goto unlock; 3263 } 3264 3265 pmu->start_txn(pmu, PERF_PMU_TXN_READ); 3266 3267 pmu->read(event); 3268 3269 list_for_each_entry(sub, &event->sibling_list, group_entry) { 3270 update_event_times(sub); 3271 if (sub->state == PERF_EVENT_STATE_ACTIVE) { 3272 /* 3273 * Use sibling's PMU rather than @event's since 3274 * sibling could be on different (eg: software) PMU. 3275 */ 3276 sub->pmu->read(sub); 3277 } 3278 } 3279 3280 data->ret = pmu->commit_txn(pmu); 3281 3282 unlock: 3283 raw_spin_unlock(&ctx->lock); 3284 } 3285 3286 static inline u64 perf_event_count(struct perf_event *event) 3287 { 3288 if (event->pmu->count) 3289 return event->pmu->count(event); 3290 3291 return __perf_event_count(event); 3292 } 3293 3294 /* 3295 * NMI-safe method to read a local event, that is an event that 3296 * is: 3297 * - either for the current task, or for this CPU 3298 * - does not have inherit set, for inherited task events 3299 * will not be local and we cannot read them atomically 3300 * - must not have a pmu::count method 3301 */ 3302 u64 perf_event_read_local(struct perf_event *event) 3303 { 3304 unsigned long flags; 3305 u64 val; 3306 3307 /* 3308 * Disabling interrupts avoids all counter scheduling (context 3309 * switches, timer based rotation and IPIs). 3310 */ 3311 local_irq_save(flags); 3312 3313 /* If this is a per-task event, it must be for current */ 3314 WARN_ON_ONCE((event->attach_state & PERF_ATTACH_TASK) && 3315 event->hw.target != current); 3316 3317 /* If this is a per-CPU event, it must be for this CPU */ 3318 WARN_ON_ONCE(!(event->attach_state & PERF_ATTACH_TASK) && 3319 event->cpu != smp_processor_id()); 3320 3321 /* 3322 * It must not be an event with inherit set, we cannot read 3323 * all child counters from atomic context. 3324 */ 3325 WARN_ON_ONCE(event->attr.inherit); 3326 3327 /* 3328 * It must not have a pmu::count method, those are not 3329 * NMI safe. 3330 */ 3331 WARN_ON_ONCE(event->pmu->count); 3332 3333 /* 3334 * If the event is currently on this CPU, its either a per-task event, 3335 * or local to this CPU. Furthermore it means its ACTIVE (otherwise 3336 * oncpu == -1). 3337 */ 3338 if (event->oncpu == smp_processor_id()) 3339 event->pmu->read(event); 3340 3341 val = local64_read(&event->count); 3342 local_irq_restore(flags); 3343 3344 return val; 3345 } 3346 3347 static int perf_event_read(struct perf_event *event, bool group) 3348 { 3349 int ret = 0; 3350 3351 /* 3352 * If event is enabled and currently active on a CPU, update the 3353 * value in the event structure: 3354 */ 3355 if (event->state == PERF_EVENT_STATE_ACTIVE) { 3356 struct perf_read_data data = { 3357 .event = event, 3358 .group = group, 3359 .ret = 0, 3360 }; 3361 smp_call_function_single(event->oncpu, 3362 __perf_event_read, &data, 1); 3363 ret = data.ret; 3364 } else if (event->state == PERF_EVENT_STATE_INACTIVE) { 3365 struct perf_event_context *ctx = event->ctx; 3366 unsigned long flags; 3367 3368 raw_spin_lock_irqsave(&ctx->lock, flags); 3369 /* 3370 * may read while context is not active 3371 * (e.g., thread is blocked), in that case 3372 * we cannot update context time 3373 */ 3374 if (ctx->is_active) { 3375 update_context_time(ctx); 3376 update_cgrp_time_from_event(event); 3377 } 3378 if (group) 3379 update_group_times(event); 3380 else 3381 update_event_times(event); 3382 raw_spin_unlock_irqrestore(&ctx->lock, flags); 3383 } 3384 3385 return ret; 3386 } 3387 3388 /* 3389 * Initialize the perf_event context in a task_struct: 3390 */ 3391 static void __perf_event_init_context(struct perf_event_context *ctx) 3392 { 3393 raw_spin_lock_init(&ctx->lock); 3394 mutex_init(&ctx->mutex); 3395 INIT_LIST_HEAD(&ctx->active_ctx_list); 3396 INIT_LIST_HEAD(&ctx->pinned_groups); 3397 INIT_LIST_HEAD(&ctx->flexible_groups); 3398 INIT_LIST_HEAD(&ctx->event_list); 3399 atomic_set(&ctx->refcount, 1); 3400 INIT_DELAYED_WORK(&ctx->orphans_remove, orphans_remove_work); 3401 } 3402 3403 static struct perf_event_context * 3404 alloc_perf_context(struct pmu *pmu, struct task_struct *task) 3405 { 3406 struct perf_event_context *ctx; 3407 3408 ctx = kzalloc(sizeof(struct perf_event_context), GFP_KERNEL); 3409 if (!ctx) 3410 return NULL; 3411 3412 __perf_event_init_context(ctx); 3413 if (task) { 3414 ctx->task = task; 3415 get_task_struct(task); 3416 } 3417 ctx->pmu = pmu; 3418 3419 return ctx; 3420 } 3421 3422 static struct task_struct * 3423 find_lively_task_by_vpid(pid_t vpid) 3424 { 3425 struct task_struct *task; 3426 int err; 3427 3428 rcu_read_lock(); 3429 if (!vpid) 3430 task = current; 3431 else 3432 task = find_task_by_vpid(vpid); 3433 if (task) 3434 get_task_struct(task); 3435 rcu_read_unlock(); 3436 3437 if (!task) 3438 return ERR_PTR(-ESRCH); 3439 3440 /* Reuse ptrace permission checks for now. */ 3441 err = -EACCES; 3442 if (!ptrace_may_access(task, PTRACE_MODE_READ)) 3443 goto errout; 3444 3445 return task; 3446 errout: 3447 put_task_struct(task); 3448 return ERR_PTR(err); 3449 3450 } 3451 3452 /* 3453 * Returns a matching context with refcount and pincount. 3454 */ 3455 static struct perf_event_context * 3456 find_get_context(struct pmu *pmu, struct task_struct *task, 3457 struct perf_event *event) 3458 { 3459 struct perf_event_context *ctx, *clone_ctx = NULL; 3460 struct perf_cpu_context *cpuctx; 3461 void *task_ctx_data = NULL; 3462 unsigned long flags; 3463 int ctxn, err; 3464 int cpu = event->cpu; 3465 3466 if (!task) { 3467 /* Must be root to operate on a CPU event: */ 3468 if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN)) 3469 return ERR_PTR(-EACCES); 3470 3471 /* 3472 * We could be clever and allow to attach a event to an 3473 * offline CPU and activate it when the CPU comes up, but 3474 * that's for later. 3475 */ 3476 if (!cpu_online(cpu)) 3477 return ERR_PTR(-ENODEV); 3478 3479 cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu); 3480 ctx = &cpuctx->ctx; 3481 get_ctx(ctx); 3482 ++ctx->pin_count; 3483 3484 return ctx; 3485 } 3486 3487 err = -EINVAL; 3488 ctxn = pmu->task_ctx_nr; 3489 if (ctxn < 0) 3490 goto errout; 3491 3492 if (event->attach_state & PERF_ATTACH_TASK_DATA) { 3493 task_ctx_data = kzalloc(pmu->task_ctx_size, GFP_KERNEL); 3494 if (!task_ctx_data) { 3495 err = -ENOMEM; 3496 goto errout; 3497 } 3498 } 3499 3500 retry: 3501 ctx = perf_lock_task_context(task, ctxn, &flags); 3502 if (ctx) { 3503 clone_ctx = unclone_ctx(ctx); 3504 ++ctx->pin_count; 3505 3506 if (task_ctx_data && !ctx->task_ctx_data) { 3507 ctx->task_ctx_data = task_ctx_data; 3508 task_ctx_data = NULL; 3509 } 3510 raw_spin_unlock_irqrestore(&ctx->lock, flags); 3511 3512 if (clone_ctx) 3513 put_ctx(clone_ctx); 3514 } else { 3515 ctx = alloc_perf_context(pmu, task); 3516 err = -ENOMEM; 3517 if (!ctx) 3518 goto errout; 3519 3520 if (task_ctx_data) { 3521 ctx->task_ctx_data = task_ctx_data; 3522 task_ctx_data = NULL; 3523 } 3524 3525 err = 0; 3526 mutex_lock(&task->perf_event_mutex); 3527 /* 3528 * If it has already passed perf_event_exit_task(). 3529 * we must see PF_EXITING, it takes this mutex too. 3530 */ 3531 if (task->flags & PF_EXITING) 3532 err = -ESRCH; 3533 else if (task->perf_event_ctxp[ctxn]) 3534 err = -EAGAIN; 3535 else { 3536 get_ctx(ctx); 3537 ++ctx->pin_count; 3538 rcu_assign_pointer(task->perf_event_ctxp[ctxn], ctx); 3539 } 3540 mutex_unlock(&task->perf_event_mutex); 3541 3542 if (unlikely(err)) { 3543 put_ctx(ctx); 3544 3545 if (err == -EAGAIN) 3546 goto retry; 3547 goto errout; 3548 } 3549 } 3550 3551 kfree(task_ctx_data); 3552 return ctx; 3553 3554 errout: 3555 kfree(task_ctx_data); 3556 return ERR_PTR(err); 3557 } 3558 3559 static void perf_event_free_filter(struct perf_event *event); 3560 static void perf_event_free_bpf_prog(struct perf_event *event); 3561 3562 static void free_event_rcu(struct rcu_head *head) 3563 { 3564 struct perf_event *event; 3565 3566 event = container_of(head, struct perf_event, rcu_head); 3567 if (event->ns) 3568 put_pid_ns(event->ns); 3569 perf_event_free_filter(event); 3570 kfree(event); 3571 } 3572 3573 static void ring_buffer_attach(struct perf_event *event, 3574 struct ring_buffer *rb); 3575 3576 static void unaccount_event_cpu(struct perf_event *event, int cpu) 3577 { 3578 if (event->parent) 3579 return; 3580 3581 if (is_cgroup_event(event)) 3582 atomic_dec(&per_cpu(perf_cgroup_events, cpu)); 3583 } 3584 3585 static void unaccount_event(struct perf_event *event) 3586 { 3587 if (event->parent) 3588 return; 3589 3590 if (event->attach_state & PERF_ATTACH_TASK) 3591 static_key_slow_dec_deferred(&perf_sched_events); 3592 if (event->attr.mmap || event->attr.mmap_data) 3593 atomic_dec(&nr_mmap_events); 3594 if (event->attr.comm) 3595 atomic_dec(&nr_comm_events); 3596 if (event->attr.task) 3597 atomic_dec(&nr_task_events); 3598 if (event->attr.freq) 3599 atomic_dec(&nr_freq_events); 3600 if (event->attr.context_switch) { 3601 static_key_slow_dec_deferred(&perf_sched_events); 3602 atomic_dec(&nr_switch_events); 3603 } 3604 if (is_cgroup_event(event)) 3605 static_key_slow_dec_deferred(&perf_sched_events); 3606 if (has_branch_stack(event)) 3607 static_key_slow_dec_deferred(&perf_sched_events); 3608 3609 unaccount_event_cpu(event, event->cpu); 3610 } 3611 3612 /* 3613 * The following implement mutual exclusion of events on "exclusive" pmus 3614 * (PERF_PMU_CAP_EXCLUSIVE). Such pmus can only have one event scheduled 3615 * at a time, so we disallow creating events that might conflict, namely: 3616 * 3617 * 1) cpu-wide events in the presence of per-task events, 3618 * 2) per-task events in the presence of cpu-wide events, 3619 * 3) two matching events on the same context. 3620 * 3621 * The former two cases are handled in the allocation path (perf_event_alloc(), 3622 * __free_event()), the latter -- before the first perf_install_in_context(). 3623 */ 3624 static int exclusive_event_init(struct perf_event *event) 3625 { 3626 struct pmu *pmu = event->pmu; 3627 3628 if (!(pmu->capabilities & PERF_PMU_CAP_EXCLUSIVE)) 3629 return 0; 3630 3631 /* 3632 * Prevent co-existence of per-task and cpu-wide events on the 3633 * same exclusive pmu. 3634 * 3635 * Negative pmu::exclusive_cnt means there are cpu-wide 3636 * events on this "exclusive" pmu, positive means there are 3637 * per-task events. 3638 * 3639 * Since this is called in perf_event_alloc() path, event::ctx 3640 * doesn't exist yet; it is, however, safe to use PERF_ATTACH_TASK 3641 * to mean "per-task event", because unlike other attach states it 3642 * never gets cleared. 3643 */ 3644 if (event->attach_state & PERF_ATTACH_TASK) { 3645 if (!atomic_inc_unless_negative(&pmu->exclusive_cnt)) 3646 return -EBUSY; 3647 } else { 3648 if (!atomic_dec_unless_positive(&pmu->exclusive_cnt)) 3649 return -EBUSY; 3650 } 3651 3652 return 0; 3653 } 3654 3655 static void exclusive_event_destroy(struct perf_event *event) 3656 { 3657 struct pmu *pmu = event->pmu; 3658 3659 if (!(pmu->capabilities & PERF_PMU_CAP_EXCLUSIVE)) 3660 return; 3661 3662 /* see comment in exclusive_event_init() */ 3663 if (event->attach_state & PERF_ATTACH_TASK) 3664 atomic_dec(&pmu->exclusive_cnt); 3665 else 3666 atomic_inc(&pmu->exclusive_cnt); 3667 } 3668 3669 static bool exclusive_event_match(struct perf_event *e1, struct perf_event *e2) 3670 { 3671 if ((e1->pmu->capabilities & PERF_PMU_CAP_EXCLUSIVE) && 3672 (e1->cpu == e2->cpu || 3673 e1->cpu == -1 || 3674 e2->cpu == -1)) 3675 return true; 3676 return false; 3677 } 3678 3679 /* Called under the same ctx::mutex as perf_install_in_context() */ 3680 static bool exclusive_event_installable(struct perf_event *event, 3681 struct perf_event_context *ctx) 3682 { 3683 struct perf_event *iter_event; 3684 struct pmu *pmu = event->pmu; 3685 3686 if (!(pmu->capabilities & PERF_PMU_CAP_EXCLUSIVE)) 3687 return true; 3688 3689 list_for_each_entry(iter_event, &ctx->event_list, event_entry) { 3690 if (exclusive_event_match(iter_event, event)) 3691 return false; 3692 } 3693 3694 return true; 3695 } 3696 3697 static void __free_event(struct perf_event *event) 3698 { 3699 if (!event->parent) { 3700 if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN) 3701 put_callchain_buffers(); 3702 } 3703 3704 perf_event_free_bpf_prog(event); 3705 3706 if (event->destroy) 3707 event->destroy(event); 3708 3709 if (event->ctx) 3710 put_ctx(event->ctx); 3711 3712 if (event->pmu) { 3713 exclusive_event_destroy(event); 3714 module_put(event->pmu->module); 3715 } 3716 3717 call_rcu(&event->rcu_head, free_event_rcu); 3718 } 3719 3720 static void _free_event(struct perf_event *event) 3721 { 3722 irq_work_sync(&event->pending); 3723 3724 unaccount_event(event); 3725 3726 if (event->rb) { 3727 /* 3728 * Can happen when we close an event with re-directed output. 3729 * 3730 * Since we have a 0 refcount, perf_mmap_close() will skip 3731 * over us; possibly making our ring_buffer_put() the last. 3732 */ 3733 mutex_lock(&event->mmap_mutex); 3734 ring_buffer_attach(event, NULL); 3735 mutex_unlock(&event->mmap_mutex); 3736 } 3737 3738 if (is_cgroup_event(event)) 3739 perf_detach_cgroup(event); 3740 3741 __free_event(event); 3742 } 3743 3744 /* 3745 * Used to free events which have a known refcount of 1, such as in error paths 3746 * where the event isn't exposed yet and inherited events. 3747 */ 3748 static void free_event(struct perf_event *event) 3749 { 3750 if (WARN(atomic_long_cmpxchg(&event->refcount, 1, 0) != 1, 3751 "unexpected event refcount: %ld; ptr=%p\n", 3752 atomic_long_read(&event->refcount), event)) { 3753 /* leak to avoid use-after-free */ 3754 return; 3755 } 3756 3757 _free_event(event); 3758 } 3759 3760 /* 3761 * Remove user event from the owner task. 3762 */ 3763 static void perf_remove_from_owner(struct perf_event *event) 3764 { 3765 struct task_struct *owner; 3766 3767 rcu_read_lock(); 3768 owner = ACCESS_ONCE(event->owner); 3769 /* 3770 * Matches the smp_wmb() in perf_event_exit_task(). If we observe 3771 * !owner it means the list deletion is complete and we can indeed 3772 * free this event, otherwise we need to serialize on 3773 * owner->perf_event_mutex. 3774 */ 3775 smp_read_barrier_depends(); 3776 if (owner) { 3777 /* 3778 * Since delayed_put_task_struct() also drops the last 3779 * task reference we can safely take a new reference 3780 * while holding the rcu_read_lock(). 3781 */ 3782 get_task_struct(owner); 3783 } 3784 rcu_read_unlock(); 3785 3786 if (owner) { 3787 /* 3788 * If we're here through perf_event_exit_task() we're already 3789 * holding ctx->mutex which would be an inversion wrt. the 3790 * normal lock order. 3791 * 3792 * However we can safely take this lock because its the child 3793 * ctx->mutex. 3794 */ 3795 mutex_lock_nested(&owner->perf_event_mutex, SINGLE_DEPTH_NESTING); 3796 3797 /* 3798 * We have to re-check the event->owner field, if it is cleared 3799 * we raced with perf_event_exit_task(), acquiring the mutex 3800 * ensured they're done, and we can proceed with freeing the 3801 * event. 3802 */ 3803 if (event->owner) 3804 list_del_init(&event->owner_entry); 3805 mutex_unlock(&owner->perf_event_mutex); 3806 put_task_struct(owner); 3807 } 3808 } 3809 3810 static void put_event(struct perf_event *event) 3811 { 3812 struct perf_event_context *ctx; 3813 3814 if (!atomic_long_dec_and_test(&event->refcount)) 3815 return; 3816 3817 if (!is_kernel_event(event)) 3818 perf_remove_from_owner(event); 3819 3820 /* 3821 * There are two ways this annotation is useful: 3822 * 3823 * 1) there is a lock recursion from perf_event_exit_task 3824 * see the comment there. 3825 * 3826 * 2) there is a lock-inversion with mmap_sem through 3827 * perf_read_group(), which takes faults while 3828 * holding ctx->mutex, however this is called after 3829 * the last filedesc died, so there is no possibility 3830 * to trigger the AB-BA case. 3831 */ 3832 ctx = perf_event_ctx_lock_nested(event, SINGLE_DEPTH_NESTING); 3833 WARN_ON_ONCE(ctx->parent_ctx); 3834 perf_remove_from_context(event, true); 3835 perf_event_ctx_unlock(event, ctx); 3836 3837 _free_event(event); 3838 } 3839 3840 int perf_event_release_kernel(struct perf_event *event) 3841 { 3842 put_event(event); 3843 return 0; 3844 } 3845 EXPORT_SYMBOL_GPL(perf_event_release_kernel); 3846 3847 /* 3848 * Called when the last reference to the file is gone. 3849 */ 3850 static int perf_release(struct inode *inode, struct file *file) 3851 { 3852 put_event(file->private_data); 3853 return 0; 3854 } 3855 3856 /* 3857 * Remove all orphanes events from the context. 3858 */ 3859 static void orphans_remove_work(struct work_struct *work) 3860 { 3861 struct perf_event_context *ctx; 3862 struct perf_event *event, *tmp; 3863 3864 ctx = container_of(work, struct perf_event_context, 3865 orphans_remove.work); 3866 3867 mutex_lock(&ctx->mutex); 3868 list_for_each_entry_safe(event, tmp, &ctx->event_list, event_entry) { 3869 struct perf_event *parent_event = event->parent; 3870 3871 if (!is_orphaned_child(event)) 3872 continue; 3873 3874 perf_remove_from_context(event, true); 3875 3876 mutex_lock(&parent_event->child_mutex); 3877 list_del_init(&event->child_list); 3878 mutex_unlock(&parent_event->child_mutex); 3879 3880 free_event(event); 3881 put_event(parent_event); 3882 } 3883 3884 raw_spin_lock_irq(&ctx->lock); 3885 ctx->orphans_remove_sched = false; 3886 raw_spin_unlock_irq(&ctx->lock); 3887 mutex_unlock(&ctx->mutex); 3888 3889 put_ctx(ctx); 3890 } 3891 3892 u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running) 3893 { 3894 struct perf_event *child; 3895 u64 total = 0; 3896 3897 *enabled = 0; 3898 *running = 0; 3899 3900 mutex_lock(&event->child_mutex); 3901 3902 (void)perf_event_read(event, false); 3903 total += perf_event_count(event); 3904 3905 *enabled += event->total_time_enabled + 3906 atomic64_read(&event->child_total_time_enabled); 3907 *running += event->total_time_running + 3908 atomic64_read(&event->child_total_time_running); 3909 3910 list_for_each_entry(child, &event->child_list, child_list) { 3911 (void)perf_event_read(child, false); 3912 total += perf_event_count(child); 3913 *enabled += child->total_time_enabled; 3914 *running += child->total_time_running; 3915 } 3916 mutex_unlock(&event->child_mutex); 3917 3918 return total; 3919 } 3920 EXPORT_SYMBOL_GPL(perf_event_read_value); 3921 3922 static int __perf_read_group_add(struct perf_event *leader, 3923 u64 read_format, u64 *values) 3924 { 3925 struct perf_event *sub; 3926 int n = 1; /* skip @nr */ 3927 int ret; 3928 3929 ret = perf_event_read(leader, true); 3930 if (ret) 3931 return ret; 3932 3933 /* 3934 * Since we co-schedule groups, {enabled,running} times of siblings 3935 * will be identical to those of the leader, so we only publish one 3936 * set. 3937 */ 3938 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) { 3939 values[n++] += leader->total_time_enabled + 3940 atomic64_read(&leader->child_total_time_enabled); 3941 } 3942 3943 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) { 3944 values[n++] += leader->total_time_running + 3945 atomic64_read(&leader->child_total_time_running); 3946 } 3947 3948 /* 3949 * Write {count,id} tuples for every sibling. 3950 */ 3951 values[n++] += perf_event_count(leader); 3952 if (read_format & PERF_FORMAT_ID) 3953 values[n++] = primary_event_id(leader); 3954 3955 list_for_each_entry(sub, &leader->sibling_list, group_entry) { 3956 values[n++] += perf_event_count(sub); 3957 if (read_format & PERF_FORMAT_ID) 3958 values[n++] = primary_event_id(sub); 3959 } 3960 3961 return 0; 3962 } 3963 3964 static int perf_read_group(struct perf_event *event, 3965 u64 read_format, char __user *buf) 3966 { 3967 struct perf_event *leader = event->group_leader, *child; 3968 struct perf_event_context *ctx = leader->ctx; 3969 int ret; 3970 u64 *values; 3971 3972 lockdep_assert_held(&ctx->mutex); 3973 3974 values = kzalloc(event->read_size, GFP_KERNEL); 3975 if (!values) 3976 return -ENOMEM; 3977 3978 values[0] = 1 + leader->nr_siblings; 3979 3980 /* 3981 * By locking the child_mutex of the leader we effectively 3982 * lock the child list of all siblings.. XXX explain how. 3983 */ 3984 mutex_lock(&leader->child_mutex); 3985 3986 ret = __perf_read_group_add(leader, read_format, values); 3987 if (ret) 3988 goto unlock; 3989 3990 list_for_each_entry(child, &leader->child_list, child_list) { 3991 ret = __perf_read_group_add(child, read_format, values); 3992 if (ret) 3993 goto unlock; 3994 } 3995 3996 mutex_unlock(&leader->child_mutex); 3997 3998 ret = event->read_size; 3999 if (copy_to_user(buf, values, event->read_size)) 4000 ret = -EFAULT; 4001 goto out; 4002 4003 unlock: 4004 mutex_unlock(&leader->child_mutex); 4005 out: 4006 kfree(values); 4007 return ret; 4008 } 4009 4010 static int perf_read_one(struct perf_event *event, 4011 u64 read_format, char __user *buf) 4012 { 4013 u64 enabled, running; 4014 u64 values[4]; 4015 int n = 0; 4016 4017 values[n++] = perf_event_read_value(event, &enabled, &running); 4018 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) 4019 values[n++] = enabled; 4020 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) 4021 values[n++] = running; 4022 if (read_format & PERF_FORMAT_ID) 4023 values[n++] = primary_event_id(event); 4024 4025 if (copy_to_user(buf, values, n * sizeof(u64))) 4026 return -EFAULT; 4027 4028 return n * sizeof(u64); 4029 } 4030 4031 static bool is_event_hup(struct perf_event *event) 4032 { 4033 bool no_children; 4034 4035 if (event->state != PERF_EVENT_STATE_EXIT) 4036 return false; 4037 4038 mutex_lock(&event->child_mutex); 4039 no_children = list_empty(&event->child_list); 4040 mutex_unlock(&event->child_mutex); 4041 return no_children; 4042 } 4043 4044 /* 4045 * Read the performance event - simple non blocking version for now 4046 */ 4047 static ssize_t 4048 __perf_read(struct perf_event *event, char __user *buf, size_t count) 4049 { 4050 u64 read_format = event->attr.read_format; 4051 int ret; 4052 4053 /* 4054 * Return end-of-file for a read on a event that is in 4055 * error state (i.e. because it was pinned but it couldn't be 4056 * scheduled on to the CPU at some point). 4057 */ 4058 if (event->state == PERF_EVENT_STATE_ERROR) 4059 return 0; 4060 4061 if (count < event->read_size) 4062 return -ENOSPC; 4063 4064 WARN_ON_ONCE(event->ctx->parent_ctx); 4065 if (read_format & PERF_FORMAT_GROUP) 4066 ret = perf_read_group(event, read_format, buf); 4067 else 4068 ret = perf_read_one(event, read_format, buf); 4069 4070 return ret; 4071 } 4072 4073 static ssize_t 4074 perf_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) 4075 { 4076 struct perf_event *event = file->private_data; 4077 struct perf_event_context *ctx; 4078 int ret; 4079 4080 ctx = perf_event_ctx_lock(event); 4081 ret = __perf_read(event, buf, count); 4082 perf_event_ctx_unlock(event, ctx); 4083 4084 return ret; 4085 } 4086 4087 static unsigned int perf_poll(struct file *file, poll_table *wait) 4088 { 4089 struct perf_event *event = file->private_data; 4090 struct ring_buffer *rb; 4091 unsigned int events = POLLHUP; 4092 4093 poll_wait(file, &event->waitq, wait); 4094 4095 if (is_event_hup(event)) 4096 return events; 4097 4098 /* 4099 * Pin the event->rb by taking event->mmap_mutex; otherwise 4100 * perf_event_set_output() can swizzle our rb and make us miss wakeups. 4101 */ 4102 mutex_lock(&event->mmap_mutex); 4103 rb = event->rb; 4104 if (rb) 4105 events = atomic_xchg(&rb->poll, 0); 4106 mutex_unlock(&event->mmap_mutex); 4107 return events; 4108 } 4109 4110 static void _perf_event_reset(struct perf_event *event) 4111 { 4112 (void)perf_event_read(event, false); 4113 local64_set(&event->count, 0); 4114 perf_event_update_userpage(event); 4115 } 4116 4117 /* 4118 * Holding the top-level event's child_mutex means that any 4119 * descendant process that has inherited this event will block 4120 * in sync_child_event if it goes to exit, thus satisfying the 4121 * task existence requirements of perf_event_enable/disable. 4122 */ 4123 static void perf_event_for_each_child(struct perf_event *event, 4124 void (*func)(struct perf_event *)) 4125 { 4126 struct perf_event *child; 4127 4128 WARN_ON_ONCE(event->ctx->parent_ctx); 4129 4130 mutex_lock(&event->child_mutex); 4131 func(event); 4132 list_for_each_entry(child, &event->child_list, child_list) 4133 func(child); 4134 mutex_unlock(&event->child_mutex); 4135 } 4136 4137 static void perf_event_for_each(struct perf_event *event, 4138 void (*func)(struct perf_event *)) 4139 { 4140 struct perf_event_context *ctx = event->ctx; 4141 struct perf_event *sibling; 4142 4143 lockdep_assert_held(&ctx->mutex); 4144 4145 event = event->group_leader; 4146 4147 perf_event_for_each_child(event, func); 4148 list_for_each_entry(sibling, &event->sibling_list, group_entry) 4149 perf_event_for_each_child(sibling, func); 4150 } 4151 4152 struct period_event { 4153 struct perf_event *event; 4154 u64 value; 4155 }; 4156 4157 static int __perf_event_period(void *info) 4158 { 4159 struct period_event *pe = info; 4160 struct perf_event *event = pe->event; 4161 struct perf_event_context *ctx = event->ctx; 4162 u64 value = pe->value; 4163 bool active; 4164 4165 raw_spin_lock(&ctx->lock); 4166 if (event->attr.freq) { 4167 event->attr.sample_freq = value; 4168 } else { 4169 event->attr.sample_period = value; 4170 event->hw.sample_period = value; 4171 } 4172 4173 active = (event->state == PERF_EVENT_STATE_ACTIVE); 4174 if (active) { 4175 perf_pmu_disable(ctx->pmu); 4176 event->pmu->stop(event, PERF_EF_UPDATE); 4177 } 4178 4179 local64_set(&event->hw.period_left, 0); 4180 4181 if (active) { 4182 event->pmu->start(event, PERF_EF_RELOAD); 4183 perf_pmu_enable(ctx->pmu); 4184 } 4185 raw_spin_unlock(&ctx->lock); 4186 4187 return 0; 4188 } 4189 4190 static int perf_event_period(struct perf_event *event, u64 __user *arg) 4191 { 4192 struct period_event pe = { .event = event, }; 4193 struct perf_event_context *ctx = event->ctx; 4194 struct task_struct *task; 4195 u64 value; 4196 4197 if (!is_sampling_event(event)) 4198 return -EINVAL; 4199 4200 if (copy_from_user(&value, arg, sizeof(value))) 4201 return -EFAULT; 4202 4203 if (!value) 4204 return -EINVAL; 4205 4206 if (event->attr.freq && value > sysctl_perf_event_sample_rate) 4207 return -EINVAL; 4208 4209 task = ctx->task; 4210 pe.value = value; 4211 4212 if (!task) { 4213 cpu_function_call(event->cpu, __perf_event_period, &pe); 4214 return 0; 4215 } 4216 4217 retry: 4218 if (!task_function_call(task, __perf_event_period, &pe)) 4219 return 0; 4220 4221 raw_spin_lock_irq(&ctx->lock); 4222 if (ctx->is_active) { 4223 raw_spin_unlock_irq(&ctx->lock); 4224 task = ctx->task; 4225 goto retry; 4226 } 4227 4228 if (event->attr.freq) { 4229 event->attr.sample_freq = value; 4230 } else { 4231 event->attr.sample_period = value; 4232 event->hw.sample_period = value; 4233 } 4234 4235 local64_set(&event->hw.period_left, 0); 4236 raw_spin_unlock_irq(&ctx->lock); 4237 4238 return 0; 4239 } 4240 4241 static const struct file_operations perf_fops; 4242 4243 static inline int perf_fget_light(int fd, struct fd *p) 4244 { 4245 struct fd f = fdget(fd); 4246 if (!f.file) 4247 return -EBADF; 4248 4249 if (f.file->f_op != &perf_fops) { 4250 fdput(f); 4251 return -EBADF; 4252 } 4253 *p = f; 4254 return 0; 4255 } 4256 4257 static int perf_event_set_output(struct perf_event *event, 4258 struct perf_event *output_event); 4259 static int perf_event_set_filter(struct perf_event *event, void __user *arg); 4260 static int perf_event_set_bpf_prog(struct perf_event *event, u32 prog_fd); 4261 4262 static long _perf_ioctl(struct perf_event *event, unsigned int cmd, unsigned long arg) 4263 { 4264 void (*func)(struct perf_event *); 4265 u32 flags = arg; 4266 4267 switch (cmd) { 4268 case PERF_EVENT_IOC_ENABLE: 4269 func = _perf_event_enable; 4270 break; 4271 case PERF_EVENT_IOC_DISABLE: 4272 func = _perf_event_disable; 4273 break; 4274 case PERF_EVENT_IOC_RESET: 4275 func = _perf_event_reset; 4276 break; 4277 4278 case PERF_EVENT_IOC_REFRESH: 4279 return _perf_event_refresh(event, arg); 4280 4281 case PERF_EVENT_IOC_PERIOD: 4282 return perf_event_period(event, (u64 __user *)arg); 4283 4284 case PERF_EVENT_IOC_ID: 4285 { 4286 u64 id = primary_event_id(event); 4287 4288 if (copy_to_user((void __user *)arg, &id, sizeof(id))) 4289 return -EFAULT; 4290 return 0; 4291 } 4292 4293 case PERF_EVENT_IOC_SET_OUTPUT: 4294 { 4295 int ret; 4296 if (arg != -1) { 4297 struct perf_event *output_event; 4298 struct fd output; 4299 ret = perf_fget_light(arg, &output); 4300 if (ret) 4301 return ret; 4302 output_event = output.file->private_data; 4303 ret = perf_event_set_output(event, output_event); 4304 fdput(output); 4305 } else { 4306 ret = perf_event_set_output(event, NULL); 4307 } 4308 return ret; 4309 } 4310 4311 case PERF_EVENT_IOC_SET_FILTER: 4312 return perf_event_set_filter(event, (void __user *)arg); 4313 4314 case PERF_EVENT_IOC_SET_BPF: 4315 return perf_event_set_bpf_prog(event, arg); 4316 4317 default: 4318 return -ENOTTY; 4319 } 4320 4321 if (flags & PERF_IOC_FLAG_GROUP) 4322 perf_event_for_each(event, func); 4323 else 4324 perf_event_for_each_child(event, func); 4325 4326 return 0; 4327 } 4328 4329 static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg) 4330 { 4331 struct perf_event *event = file->private_data; 4332 struct perf_event_context *ctx; 4333 long ret; 4334 4335 ctx = perf_event_ctx_lock(event); 4336 ret = _perf_ioctl(event, cmd, arg); 4337 perf_event_ctx_unlock(event, ctx); 4338 4339 return ret; 4340 } 4341 4342 #ifdef CONFIG_COMPAT 4343 static long perf_compat_ioctl(struct file *file, unsigned int cmd, 4344 unsigned long arg) 4345 { 4346 switch (_IOC_NR(cmd)) { 4347 case _IOC_NR(PERF_EVENT_IOC_SET_FILTER): 4348 case _IOC_NR(PERF_EVENT_IOC_ID): 4349 /* Fix up pointer size (usually 4 -> 8 in 32-on-64-bit case */ 4350 if (_IOC_SIZE(cmd) == sizeof(compat_uptr_t)) { 4351 cmd &= ~IOCSIZE_MASK; 4352 cmd |= sizeof(void *) << IOCSIZE_SHIFT; 4353 } 4354 break; 4355 } 4356 return perf_ioctl(file, cmd, arg); 4357 } 4358 #else 4359 # define perf_compat_ioctl NULL 4360 #endif 4361 4362 int perf_event_task_enable(void) 4363 { 4364 struct perf_event_context *ctx; 4365 struct perf_event *event; 4366 4367 mutex_lock(¤t->perf_event_mutex); 4368 list_for_each_entry(event, ¤t->perf_event_list, owner_entry) { 4369 ctx = perf_event_ctx_lock(event); 4370 perf_event_for_each_child(event, _perf_event_enable); 4371 perf_event_ctx_unlock(event, ctx); 4372 } 4373 mutex_unlock(¤t->perf_event_mutex); 4374 4375 return 0; 4376 } 4377 4378 int perf_event_task_disable(void) 4379 { 4380 struct perf_event_context *ctx; 4381 struct perf_event *event; 4382 4383 mutex_lock(¤t->perf_event_mutex); 4384 list_for_each_entry(event, ¤t->perf_event_list, owner_entry) { 4385 ctx = perf_event_ctx_lock(event); 4386 perf_event_for_each_child(event, _perf_event_disable); 4387 perf_event_ctx_unlock(event, ctx); 4388 } 4389 mutex_unlock(¤t->perf_event_mutex); 4390 4391 return 0; 4392 } 4393 4394 static int perf_event_index(struct perf_event *event) 4395 { 4396 if (event->hw.state & PERF_HES_STOPPED) 4397 return 0; 4398 4399 if (event->state != PERF_EVENT_STATE_ACTIVE) 4400 return 0; 4401 4402 return event->pmu->event_idx(event); 4403 } 4404 4405 static void calc_timer_values(struct perf_event *event, 4406 u64 *now, 4407 u64 *enabled, 4408 u64 *running) 4409 { 4410 u64 ctx_time; 4411 4412 *now = perf_clock(); 4413 ctx_time = event->shadow_ctx_time + *now; 4414 *enabled = ctx_time - event->tstamp_enabled; 4415 *running = ctx_time - event->tstamp_running; 4416 } 4417 4418 static void perf_event_init_userpage(struct perf_event *event) 4419 { 4420 struct perf_event_mmap_page *userpg; 4421 struct ring_buffer *rb; 4422 4423 rcu_read_lock(); 4424 rb = rcu_dereference(event->rb); 4425 if (!rb) 4426 goto unlock; 4427 4428 userpg = rb->user_page; 4429 4430 /* Allow new userspace to detect that bit 0 is deprecated */ 4431 userpg->cap_bit0_is_deprecated = 1; 4432 userpg->size = offsetof(struct perf_event_mmap_page, __reserved); 4433 userpg->data_offset = PAGE_SIZE; 4434 userpg->data_size = perf_data_size(rb); 4435 4436 unlock: 4437 rcu_read_unlock(); 4438 } 4439 4440 void __weak arch_perf_update_userpage( 4441 struct perf_event *event, struct perf_event_mmap_page *userpg, u64 now) 4442 { 4443 } 4444 4445 /* 4446 * Callers need to ensure there can be no nesting of this function, otherwise 4447 * the seqlock logic goes bad. We can not serialize this because the arch 4448 * code calls this from NMI context. 4449 */ 4450 void perf_event_update_userpage(struct perf_event *event) 4451 { 4452 struct perf_event_mmap_page *userpg; 4453 struct ring_buffer *rb; 4454 u64 enabled, running, now; 4455 4456 rcu_read_lock(); 4457 rb = rcu_dereference(event->rb); 4458 if (!rb) 4459 goto unlock; 4460 4461 /* 4462 * compute total_time_enabled, total_time_running 4463 * based on snapshot values taken when the event 4464 * was last scheduled in. 4465 * 4466 * we cannot simply called update_context_time() 4467 * because of locking issue as we can be called in 4468 * NMI context 4469 */ 4470 calc_timer_values(event, &now, &enabled, &running); 4471 4472 userpg = rb->user_page; 4473 /* 4474 * Disable preemption so as to not let the corresponding user-space 4475 * spin too long if we get preempted. 4476 */ 4477 preempt_disable(); 4478 ++userpg->lock; 4479 barrier(); 4480 userpg->index = perf_event_index(event); 4481 userpg->offset = perf_event_count(event); 4482 if (userpg->index) 4483 userpg->offset -= local64_read(&event->hw.prev_count); 4484 4485 userpg->time_enabled = enabled + 4486 atomic64_read(&event->child_total_time_enabled); 4487 4488 userpg->time_running = running + 4489 atomic64_read(&event->child_total_time_running); 4490 4491 arch_perf_update_userpage(event, userpg, now); 4492 4493 barrier(); 4494 ++userpg->lock; 4495 preempt_enable(); 4496 unlock: 4497 rcu_read_unlock(); 4498 } 4499 4500 static int perf_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf) 4501 { 4502 struct perf_event *event = vma->vm_file->private_data; 4503 struct ring_buffer *rb; 4504 int ret = VM_FAULT_SIGBUS; 4505 4506 if (vmf->flags & FAULT_FLAG_MKWRITE) { 4507 if (vmf->pgoff == 0) 4508 ret = 0; 4509 return ret; 4510 } 4511 4512 rcu_read_lock(); 4513 rb = rcu_dereference(event->rb); 4514 if (!rb) 4515 goto unlock; 4516 4517 if (vmf->pgoff && (vmf->flags & FAULT_FLAG_WRITE)) 4518 goto unlock; 4519 4520 vmf->page = perf_mmap_to_page(rb, vmf->pgoff); 4521 if (!vmf->page) 4522 goto unlock; 4523 4524 get_page(vmf->page); 4525 vmf->page->mapping = vma->vm_file->f_mapping; 4526 vmf->page->index = vmf->pgoff; 4527 4528 ret = 0; 4529 unlock: 4530 rcu_read_unlock(); 4531 4532 return ret; 4533 } 4534 4535 static void ring_buffer_attach(struct perf_event *event, 4536 struct ring_buffer *rb) 4537 { 4538 struct ring_buffer *old_rb = NULL; 4539 unsigned long flags; 4540 4541 if (event->rb) { 4542 /* 4543 * Should be impossible, we set this when removing 4544 * event->rb_entry and wait/clear when adding event->rb_entry. 4545 */ 4546 WARN_ON_ONCE(event->rcu_pending); 4547 4548 old_rb = event->rb; 4549 spin_lock_irqsave(&old_rb->event_lock, flags); 4550 list_del_rcu(&event->rb_entry); 4551 spin_unlock_irqrestore(&old_rb->event_lock, flags); 4552 4553 event->rcu_batches = get_state_synchronize_rcu(); 4554 event->rcu_pending = 1; 4555 } 4556 4557 if (rb) { 4558 if (event->rcu_pending) { 4559 cond_synchronize_rcu(event->rcu_batches); 4560 event->rcu_pending = 0; 4561 } 4562 4563 spin_lock_irqsave(&rb->event_lock, flags); 4564 list_add_rcu(&event->rb_entry, &rb->event_list); 4565 spin_unlock_irqrestore(&rb->event_lock, flags); 4566 } 4567 4568 rcu_assign_pointer(event->rb, rb); 4569 4570 if (old_rb) { 4571 ring_buffer_put(old_rb); 4572 /* 4573 * Since we detached before setting the new rb, so that we 4574 * could attach the new rb, we could have missed a wakeup. 4575 * Provide it now. 4576 */ 4577 wake_up_all(&event->waitq); 4578 } 4579 } 4580 4581 static void ring_buffer_wakeup(struct perf_event *event) 4582 { 4583 struct ring_buffer *rb; 4584 4585 rcu_read_lock(); 4586 rb = rcu_dereference(event->rb); 4587 if (rb) { 4588 list_for_each_entry_rcu(event, &rb->event_list, rb_entry) 4589 wake_up_all(&event->waitq); 4590 } 4591 rcu_read_unlock(); 4592 } 4593 4594 struct ring_buffer *ring_buffer_get(struct perf_event *event) 4595 { 4596 struct ring_buffer *rb; 4597 4598 rcu_read_lock(); 4599 rb = rcu_dereference(event->rb); 4600 if (rb) { 4601 if (!atomic_inc_not_zero(&rb->refcount)) 4602 rb = NULL; 4603 } 4604 rcu_read_unlock(); 4605 4606 return rb; 4607 } 4608 4609 void ring_buffer_put(struct ring_buffer *rb) 4610 { 4611 if (!atomic_dec_and_test(&rb->refcount)) 4612 return; 4613 4614 WARN_ON_ONCE(!list_empty(&rb->event_list)); 4615 4616 call_rcu(&rb->rcu_head, rb_free_rcu); 4617 } 4618 4619 static void perf_mmap_open(struct vm_area_struct *vma) 4620 { 4621 struct perf_event *event = vma->vm_file->private_data; 4622 4623 atomic_inc(&event->mmap_count); 4624 atomic_inc(&event->rb->mmap_count); 4625 4626 if (vma->vm_pgoff) 4627 atomic_inc(&event->rb->aux_mmap_count); 4628 4629 if (event->pmu->event_mapped) 4630 event->pmu->event_mapped(event); 4631 } 4632 4633 /* 4634 * A buffer can be mmap()ed multiple times; either directly through the same 4635 * event, or through other events by use of perf_event_set_output(). 4636 * 4637 * In order to undo the VM accounting done by perf_mmap() we need to destroy 4638 * the buffer here, where we still have a VM context. This means we need 4639 * to detach all events redirecting to us. 4640 */ 4641 static void perf_mmap_close(struct vm_area_struct *vma) 4642 { 4643 struct perf_event *event = vma->vm_file->private_data; 4644 4645 struct ring_buffer *rb = ring_buffer_get(event); 4646 struct user_struct *mmap_user = rb->mmap_user; 4647 int mmap_locked = rb->mmap_locked; 4648 unsigned long size = perf_data_size(rb); 4649 4650 if (event->pmu->event_unmapped) 4651 event->pmu->event_unmapped(event); 4652 4653 /* 4654 * rb->aux_mmap_count will always drop before rb->mmap_count and 4655 * event->mmap_count, so it is ok to use event->mmap_mutex to 4656 * serialize with perf_mmap here. 4657 */ 4658 if (rb_has_aux(rb) && vma->vm_pgoff == rb->aux_pgoff && 4659 atomic_dec_and_mutex_lock(&rb->aux_mmap_count, &event->mmap_mutex)) { 4660 atomic_long_sub(rb->aux_nr_pages, &mmap_user->locked_vm); 4661 vma->vm_mm->pinned_vm -= rb->aux_mmap_locked; 4662 4663 rb_free_aux(rb); 4664 mutex_unlock(&event->mmap_mutex); 4665 } 4666 4667 atomic_dec(&rb->mmap_count); 4668 4669 if (!atomic_dec_and_mutex_lock(&event->mmap_count, &event->mmap_mutex)) 4670 goto out_put; 4671 4672 ring_buffer_attach(event, NULL); 4673 mutex_unlock(&event->mmap_mutex); 4674 4675 /* If there's still other mmap()s of this buffer, we're done. */ 4676 if (atomic_read(&rb->mmap_count)) 4677 goto out_put; 4678 4679 /* 4680 * No other mmap()s, detach from all other events that might redirect 4681 * into the now unreachable buffer. Somewhat complicated by the 4682 * fact that rb::event_lock otherwise nests inside mmap_mutex. 4683 */ 4684 again: 4685 rcu_read_lock(); 4686 list_for_each_entry_rcu(event, &rb->event_list, rb_entry) { 4687 if (!atomic_long_inc_not_zero(&event->refcount)) { 4688 /* 4689 * This event is en-route to free_event() which will 4690 * detach it and remove it from the list. 4691 */ 4692 continue; 4693 } 4694 rcu_read_unlock(); 4695 4696 mutex_lock(&event->mmap_mutex); 4697 /* 4698 * Check we didn't race with perf_event_set_output() which can 4699 * swizzle the rb from under us while we were waiting to 4700 * acquire mmap_mutex. 4701 * 4702 * If we find a different rb; ignore this event, a next 4703 * iteration will no longer find it on the list. We have to 4704 * still restart the iteration to make sure we're not now 4705 * iterating the wrong list. 4706 */ 4707 if (event->rb == rb) 4708 ring_buffer_attach(event, NULL); 4709 4710 mutex_unlock(&event->mmap_mutex); 4711 put_event(event); 4712 4713 /* 4714 * Restart the iteration; either we're on the wrong list or 4715 * destroyed its integrity by doing a deletion. 4716 */ 4717 goto again; 4718 } 4719 rcu_read_unlock(); 4720 4721 /* 4722 * It could be there's still a few 0-ref events on the list; they'll 4723 * get cleaned up by free_event() -- they'll also still have their 4724 * ref on the rb and will free it whenever they are done with it. 4725 * 4726 * Aside from that, this buffer is 'fully' detached and unmapped, 4727 * undo the VM accounting. 4728 */ 4729 4730 atomic_long_sub((size >> PAGE_SHIFT) + 1, &mmap_user->locked_vm); 4731 vma->vm_mm->pinned_vm -= mmap_locked; 4732 free_uid(mmap_user); 4733 4734 out_put: 4735 ring_buffer_put(rb); /* could be last */ 4736 } 4737 4738 static const struct vm_operations_struct perf_mmap_vmops = { 4739 .open = perf_mmap_open, 4740 .close = perf_mmap_close, /* non mergable */ 4741 .fault = perf_mmap_fault, 4742 .page_mkwrite = perf_mmap_fault, 4743 }; 4744 4745 static int perf_mmap(struct file *file, struct vm_area_struct *vma) 4746 { 4747 struct perf_event *event = file->private_data; 4748 unsigned long user_locked, user_lock_limit; 4749 struct user_struct *user = current_user(); 4750 unsigned long locked, lock_limit; 4751 struct ring_buffer *rb = NULL; 4752 unsigned long vma_size; 4753 unsigned long nr_pages; 4754 long user_extra = 0, extra = 0; 4755 int ret = 0, flags = 0; 4756 4757 /* 4758 * Don't allow mmap() of inherited per-task counters. This would 4759 * create a performance issue due to all children writing to the 4760 * same rb. 4761 */ 4762 if (event->cpu == -1 && event->attr.inherit) 4763 return -EINVAL; 4764 4765 if (!(vma->vm_flags & VM_SHARED)) 4766 return -EINVAL; 4767 4768 vma_size = vma->vm_end - vma->vm_start; 4769 4770 if (vma->vm_pgoff == 0) { 4771 nr_pages = (vma_size / PAGE_SIZE) - 1; 4772 } else { 4773 /* 4774 * AUX area mapping: if rb->aux_nr_pages != 0, it's already 4775 * mapped, all subsequent mappings should have the same size 4776 * and offset. Must be above the normal perf buffer. 4777 */ 4778 u64 aux_offset, aux_size; 4779 4780 if (!event->rb) 4781 return -EINVAL; 4782 4783 nr_pages = vma_size / PAGE_SIZE; 4784 4785 mutex_lock(&event->mmap_mutex); 4786 ret = -EINVAL; 4787 4788 rb = event->rb; 4789 if (!rb) 4790 goto aux_unlock; 4791 4792 aux_offset = ACCESS_ONCE(rb->user_page->aux_offset); 4793 aux_size = ACCESS_ONCE(rb->user_page->aux_size); 4794 4795 if (aux_offset < perf_data_size(rb) + PAGE_SIZE) 4796 goto aux_unlock; 4797 4798 if (aux_offset != vma->vm_pgoff << PAGE_SHIFT) 4799 goto aux_unlock; 4800 4801 /* already mapped with a different offset */ 4802 if (rb_has_aux(rb) && rb->aux_pgoff != vma->vm_pgoff) 4803 goto aux_unlock; 4804 4805 if (aux_size != vma_size || aux_size != nr_pages * PAGE_SIZE) 4806 goto aux_unlock; 4807 4808 /* already mapped with a different size */ 4809 if (rb_has_aux(rb) && rb->aux_nr_pages != nr_pages) 4810 goto aux_unlock; 4811 4812 if (!is_power_of_2(nr_pages)) 4813 goto aux_unlock; 4814 4815 if (!atomic_inc_not_zero(&rb->mmap_count)) 4816 goto aux_unlock; 4817 4818 if (rb_has_aux(rb)) { 4819 atomic_inc(&rb->aux_mmap_count); 4820 ret = 0; 4821 goto unlock; 4822 } 4823 4824 atomic_set(&rb->aux_mmap_count, 1); 4825 user_extra = nr_pages; 4826 4827 goto accounting; 4828 } 4829 4830 /* 4831 * If we have rb pages ensure they're a power-of-two number, so we 4832 * can do bitmasks instead of modulo. 4833 */ 4834 if (nr_pages != 0 && !is_power_of_2(nr_pages)) 4835 return -EINVAL; 4836 4837 if (vma_size != PAGE_SIZE * (1 + nr_pages)) 4838 return -EINVAL; 4839 4840 WARN_ON_ONCE(event->ctx->parent_ctx); 4841 again: 4842 mutex_lock(&event->mmap_mutex); 4843 if (event->rb) { 4844 if (event->rb->nr_pages != nr_pages) { 4845 ret = -EINVAL; 4846 goto unlock; 4847 } 4848 4849 if (!atomic_inc_not_zero(&event->rb->mmap_count)) { 4850 /* 4851 * Raced against perf_mmap_close() through 4852 * perf_event_set_output(). Try again, hope for better 4853 * luck. 4854 */ 4855 mutex_unlock(&event->mmap_mutex); 4856 goto again; 4857 } 4858 4859 goto unlock; 4860 } 4861 4862 user_extra = nr_pages + 1; 4863 4864 accounting: 4865 user_lock_limit = sysctl_perf_event_mlock >> (PAGE_SHIFT - 10); 4866 4867 /* 4868 * Increase the limit linearly with more CPUs: 4869 */ 4870 user_lock_limit *= num_online_cpus(); 4871 4872 user_locked = atomic_long_read(&user->locked_vm) + user_extra; 4873 4874 if (user_locked > user_lock_limit) 4875 extra = user_locked - user_lock_limit; 4876 4877 lock_limit = rlimit(RLIMIT_MEMLOCK); 4878 lock_limit >>= PAGE_SHIFT; 4879 locked = vma->vm_mm->pinned_vm + extra; 4880 4881 if ((locked > lock_limit) && perf_paranoid_tracepoint_raw() && 4882 !capable(CAP_IPC_LOCK)) { 4883 ret = -EPERM; 4884 goto unlock; 4885 } 4886 4887 WARN_ON(!rb && event->rb); 4888 4889 if (vma->vm_flags & VM_WRITE) 4890 flags |= RING_BUFFER_WRITABLE; 4891 4892 if (!rb) { 4893 rb = rb_alloc(nr_pages, 4894 event->attr.watermark ? event->attr.wakeup_watermark : 0, 4895 event->cpu, flags); 4896 4897 if (!rb) { 4898 ret = -ENOMEM; 4899 goto unlock; 4900 } 4901 4902 atomic_set(&rb->mmap_count, 1); 4903 rb->mmap_user = get_current_user(); 4904 rb->mmap_locked = extra; 4905 4906 ring_buffer_attach(event, rb); 4907 4908 perf_event_init_userpage(event); 4909 perf_event_update_userpage(event); 4910 } else { 4911 ret = rb_alloc_aux(rb, event, vma->vm_pgoff, nr_pages, 4912 event->attr.aux_watermark, flags); 4913 if (!ret) 4914 rb->aux_mmap_locked = extra; 4915 } 4916 4917 unlock: 4918 if (!ret) { 4919 atomic_long_add(user_extra, &user->locked_vm); 4920 vma->vm_mm->pinned_vm += extra; 4921 4922 atomic_inc(&event->mmap_count); 4923 } else if (rb) { 4924 atomic_dec(&rb->mmap_count); 4925 } 4926 aux_unlock: 4927 mutex_unlock(&event->mmap_mutex); 4928 4929 /* 4930 * Since pinned accounting is per vm we cannot allow fork() to copy our 4931 * vma. 4932 */ 4933 vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND | VM_DONTDUMP; 4934 vma->vm_ops = &perf_mmap_vmops; 4935 4936 if (event->pmu->event_mapped) 4937 event->pmu->event_mapped(event); 4938 4939 return ret; 4940 } 4941 4942 static int perf_fasync(int fd, struct file *filp, int on) 4943 { 4944 struct inode *inode = file_inode(filp); 4945 struct perf_event *event = filp->private_data; 4946 int retval; 4947 4948 mutex_lock(&inode->i_mutex); 4949 retval = fasync_helper(fd, filp, on, &event->fasync); 4950 mutex_unlock(&inode->i_mutex); 4951 4952 if (retval < 0) 4953 return retval; 4954 4955 return 0; 4956 } 4957 4958 static const struct file_operations perf_fops = { 4959 .llseek = no_llseek, 4960 .release = perf_release, 4961 .read = perf_read, 4962 .poll = perf_poll, 4963 .unlocked_ioctl = perf_ioctl, 4964 .compat_ioctl = perf_compat_ioctl, 4965 .mmap = perf_mmap, 4966 .fasync = perf_fasync, 4967 }; 4968 4969 /* 4970 * Perf event wakeup 4971 * 4972 * If there's data, ensure we set the poll() state and publish everything 4973 * to user-space before waking everybody up. 4974 */ 4975 4976 static inline struct fasync_struct **perf_event_fasync(struct perf_event *event) 4977 { 4978 /* only the parent has fasync state */ 4979 if (event->parent) 4980 event = event->parent; 4981 return &event->fasync; 4982 } 4983 4984 void perf_event_wakeup(struct perf_event *event) 4985 { 4986 ring_buffer_wakeup(event); 4987 4988 if (event->pending_kill) { 4989 kill_fasync(perf_event_fasync(event), SIGIO, event->pending_kill); 4990 event->pending_kill = 0; 4991 } 4992 } 4993 4994 static void perf_pending_event(struct irq_work *entry) 4995 { 4996 struct perf_event *event = container_of(entry, 4997 struct perf_event, pending); 4998 int rctx; 4999 5000 rctx = perf_swevent_get_recursion_context(); 5001 /* 5002 * If we 'fail' here, that's OK, it means recursion is already disabled 5003 * and we won't recurse 'further'. 5004 */ 5005 5006 if (event->pending_disable) { 5007 event->pending_disable = 0; 5008 __perf_event_disable(event); 5009 } 5010 5011 if (event->pending_wakeup) { 5012 event->pending_wakeup = 0; 5013 perf_event_wakeup(event); 5014 } 5015 5016 if (rctx >= 0) 5017 perf_swevent_put_recursion_context(rctx); 5018 } 5019 5020 /* 5021 * We assume there is only KVM supporting the callbacks. 5022 * Later on, we might change it to a list if there is 5023 * another virtualization implementation supporting the callbacks. 5024 */ 5025 struct perf_guest_info_callbacks *perf_guest_cbs; 5026 5027 int perf_register_guest_info_callbacks(struct perf_guest_info_callbacks *cbs) 5028 { 5029 perf_guest_cbs = cbs; 5030 return 0; 5031 } 5032 EXPORT_SYMBOL_GPL(perf_register_guest_info_callbacks); 5033 5034 int perf_unregister_guest_info_callbacks(struct perf_guest_info_callbacks *cbs) 5035 { 5036 perf_guest_cbs = NULL; 5037 return 0; 5038 } 5039 EXPORT_SYMBOL_GPL(perf_unregister_guest_info_callbacks); 5040 5041 static void 5042 perf_output_sample_regs(struct perf_output_handle *handle, 5043 struct pt_regs *regs, u64 mask) 5044 { 5045 int bit; 5046 5047 for_each_set_bit(bit, (const unsigned long *) &mask, 5048 sizeof(mask) * BITS_PER_BYTE) { 5049 u64 val; 5050 5051 val = perf_reg_value(regs, bit); 5052 perf_output_put(handle, val); 5053 } 5054 } 5055 5056 static void perf_sample_regs_user(struct perf_regs *regs_user, 5057 struct pt_regs *regs, 5058 struct pt_regs *regs_user_copy) 5059 { 5060 if (user_mode(regs)) { 5061 regs_user->abi = perf_reg_abi(current); 5062 regs_user->regs = regs; 5063 } else if (current->mm) { 5064 perf_get_regs_user(regs_user, regs, regs_user_copy); 5065 } else { 5066 regs_user->abi = PERF_SAMPLE_REGS_ABI_NONE; 5067 regs_user->regs = NULL; 5068 } 5069 } 5070 5071 static void perf_sample_regs_intr(struct perf_regs *regs_intr, 5072 struct pt_regs *regs) 5073 { 5074 regs_intr->regs = regs; 5075 regs_intr->abi = perf_reg_abi(current); 5076 } 5077 5078 5079 /* 5080 * Get remaining task size from user stack pointer. 5081 * 5082 * It'd be better to take stack vma map and limit this more 5083 * precisly, but there's no way to get it safely under interrupt, 5084 * so using TASK_SIZE as limit. 5085 */ 5086 static u64 perf_ustack_task_size(struct pt_regs *regs) 5087 { 5088 unsigned long addr = perf_user_stack_pointer(regs); 5089 5090 if (!addr || addr >= TASK_SIZE) 5091 return 0; 5092 5093 return TASK_SIZE - addr; 5094 } 5095 5096 static u16 5097 perf_sample_ustack_size(u16 stack_size, u16 header_size, 5098 struct pt_regs *regs) 5099 { 5100 u64 task_size; 5101 5102 /* No regs, no stack pointer, no dump. */ 5103 if (!regs) 5104 return 0; 5105 5106 /* 5107 * Check if we fit in with the requested stack size into the: 5108 * - TASK_SIZE 5109 * If we don't, we limit the size to the TASK_SIZE. 5110 * 5111 * - remaining sample size 5112 * If we don't, we customize the stack size to 5113 * fit in to the remaining sample size. 5114 */ 5115 5116 task_size = min((u64) USHRT_MAX, perf_ustack_task_size(regs)); 5117 stack_size = min(stack_size, (u16) task_size); 5118 5119 /* Current header size plus static size and dynamic size. */ 5120 header_size += 2 * sizeof(u64); 5121 5122 /* Do we fit in with the current stack dump size? */ 5123 if ((u16) (header_size + stack_size) < header_size) { 5124 /* 5125 * If we overflow the maximum size for the sample, 5126 * we customize the stack dump size to fit in. 5127 */ 5128 stack_size = USHRT_MAX - header_size - sizeof(u64); 5129 stack_size = round_up(stack_size, sizeof(u64)); 5130 } 5131 5132 return stack_size; 5133 } 5134 5135 static void 5136 perf_output_sample_ustack(struct perf_output_handle *handle, u64 dump_size, 5137 struct pt_regs *regs) 5138 { 5139 /* Case of a kernel thread, nothing to dump */ 5140 if (!regs) { 5141 u64 size = 0; 5142 perf_output_put(handle, size); 5143 } else { 5144 unsigned long sp; 5145 unsigned int rem; 5146 u64 dyn_size; 5147 5148 /* 5149 * We dump: 5150 * static size 5151 * - the size requested by user or the best one we can fit 5152 * in to the sample max size 5153 * data 5154 * - user stack dump data 5155 * dynamic size 5156 * - the actual dumped size 5157 */ 5158 5159 /* Static size. */ 5160 perf_output_put(handle, dump_size); 5161 5162 /* Data. */ 5163 sp = perf_user_stack_pointer(regs); 5164 rem = __output_copy_user(handle, (void *) sp, dump_size); 5165 dyn_size = dump_size - rem; 5166 5167 perf_output_skip(handle, rem); 5168 5169 /* Dynamic size. */ 5170 perf_output_put(handle, dyn_size); 5171 } 5172 } 5173 5174 static void __perf_event_header__init_id(struct perf_event_header *header, 5175 struct perf_sample_data *data, 5176 struct perf_event *event) 5177 { 5178 u64 sample_type = event->attr.sample_type; 5179 5180 data->type = sample_type; 5181 header->size += event->id_header_size; 5182 5183 if (sample_type & PERF_SAMPLE_TID) { 5184 /* namespace issues */ 5185 data->tid_entry.pid = perf_event_pid(event, current); 5186 data->tid_entry.tid = perf_event_tid(event, current); 5187 } 5188 5189 if (sample_type & PERF_SAMPLE_TIME) 5190 data->time = perf_event_clock(event); 5191 5192 if (sample_type & (PERF_SAMPLE_ID | PERF_SAMPLE_IDENTIFIER)) 5193 data->id = primary_event_id(event); 5194 5195 if (sample_type & PERF_SAMPLE_STREAM_ID) 5196 data->stream_id = event->id; 5197 5198 if (sample_type & PERF_SAMPLE_CPU) { 5199 data->cpu_entry.cpu = raw_smp_processor_id(); 5200 data->cpu_entry.reserved = 0; 5201 } 5202 } 5203 5204 void perf_event_header__init_id(struct perf_event_header *header, 5205 struct perf_sample_data *data, 5206 struct perf_event *event) 5207 { 5208 if (event->attr.sample_id_all) 5209 __perf_event_header__init_id(header, data, event); 5210 } 5211 5212 static void __perf_event__output_id_sample(struct perf_output_handle *handle, 5213 struct perf_sample_data *data) 5214 { 5215 u64 sample_type = data->type; 5216 5217 if (sample_type & PERF_SAMPLE_TID) 5218 perf_output_put(handle, data->tid_entry); 5219 5220 if (sample_type & PERF_SAMPLE_TIME) 5221 perf_output_put(handle, data->time); 5222 5223 if (sample_type & PERF_SAMPLE_ID) 5224 perf_output_put(handle, data->id); 5225 5226 if (sample_type & PERF_SAMPLE_STREAM_ID) 5227 perf_output_put(handle, data->stream_id); 5228 5229 if (sample_type & PERF_SAMPLE_CPU) 5230 perf_output_put(handle, data->cpu_entry); 5231 5232 if (sample_type & PERF_SAMPLE_IDENTIFIER) 5233 perf_output_put(handle, data->id); 5234 } 5235 5236 void perf_event__output_id_sample(struct perf_event *event, 5237 struct perf_output_handle *handle, 5238 struct perf_sample_data *sample) 5239 { 5240 if (event->attr.sample_id_all) 5241 __perf_event__output_id_sample(handle, sample); 5242 } 5243 5244 static void perf_output_read_one(struct perf_output_handle *handle, 5245 struct perf_event *event, 5246 u64 enabled, u64 running) 5247 { 5248 u64 read_format = event->attr.read_format; 5249 u64 values[4]; 5250 int n = 0; 5251 5252 values[n++] = perf_event_count(event); 5253 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) { 5254 values[n++] = enabled + 5255 atomic64_read(&event->child_total_time_enabled); 5256 } 5257 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) { 5258 values[n++] = running + 5259 atomic64_read(&event->child_total_time_running); 5260 } 5261 if (read_format & PERF_FORMAT_ID) 5262 values[n++] = primary_event_id(event); 5263 5264 __output_copy(handle, values, n * sizeof(u64)); 5265 } 5266 5267 /* 5268 * XXX PERF_FORMAT_GROUP vs inherited events seems difficult. 5269 */ 5270 static void perf_output_read_group(struct perf_output_handle *handle, 5271 struct perf_event *event, 5272 u64 enabled, u64 running) 5273 { 5274 struct perf_event *leader = event->group_leader, *sub; 5275 u64 read_format = event->attr.read_format; 5276 u64 values[5]; 5277 int n = 0; 5278 5279 values[n++] = 1 + leader->nr_siblings; 5280 5281 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) 5282 values[n++] = enabled; 5283 5284 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) 5285 values[n++] = running; 5286 5287 if (leader != event) 5288 leader->pmu->read(leader); 5289 5290 values[n++] = perf_event_count(leader); 5291 if (read_format & PERF_FORMAT_ID) 5292 values[n++] = primary_event_id(leader); 5293 5294 __output_copy(handle, values, n * sizeof(u64)); 5295 5296 list_for_each_entry(sub, &leader->sibling_list, group_entry) { 5297 n = 0; 5298 5299 if ((sub != event) && 5300 (sub->state == PERF_EVENT_STATE_ACTIVE)) 5301 sub->pmu->read(sub); 5302 5303 values[n++] = perf_event_count(sub); 5304 if (read_format & PERF_FORMAT_ID) 5305 values[n++] = primary_event_id(sub); 5306 5307 __output_copy(handle, values, n * sizeof(u64)); 5308 } 5309 } 5310 5311 #define PERF_FORMAT_TOTAL_TIMES (PERF_FORMAT_TOTAL_TIME_ENABLED|\ 5312 PERF_FORMAT_TOTAL_TIME_RUNNING) 5313 5314 static void perf_output_read(struct perf_output_handle *handle, 5315 struct perf_event *event) 5316 { 5317 u64 enabled = 0, running = 0, now; 5318 u64 read_format = event->attr.read_format; 5319 5320 /* 5321 * compute total_time_enabled, total_time_running 5322 * based on snapshot values taken when the event 5323 * was last scheduled in. 5324 * 5325 * we cannot simply called update_context_time() 5326 * because of locking issue as we are called in 5327 * NMI context 5328 */ 5329 if (read_format & PERF_FORMAT_TOTAL_TIMES) 5330 calc_timer_values(event, &now, &enabled, &running); 5331 5332 if (event->attr.read_format & PERF_FORMAT_GROUP) 5333 perf_output_read_group(handle, event, enabled, running); 5334 else 5335 perf_output_read_one(handle, event, enabled, running); 5336 } 5337 5338 void perf_output_sample(struct perf_output_handle *handle, 5339 struct perf_event_header *header, 5340 struct perf_sample_data *data, 5341 struct perf_event *event) 5342 { 5343 u64 sample_type = data->type; 5344 5345 perf_output_put(handle, *header); 5346 5347 if (sample_type & PERF_SAMPLE_IDENTIFIER) 5348 perf_output_put(handle, data->id); 5349 5350 if (sample_type & PERF_SAMPLE_IP) 5351 perf_output_put(handle, data->ip); 5352 5353 if (sample_type & PERF_SAMPLE_TID) 5354 perf_output_put(handle, data->tid_entry); 5355 5356 if (sample_type & PERF_SAMPLE_TIME) 5357 perf_output_put(handle, data->time); 5358 5359 if (sample_type & PERF_SAMPLE_ADDR) 5360 perf_output_put(handle, data->addr); 5361 5362 if (sample_type & PERF_SAMPLE_ID) 5363 perf_output_put(handle, data->id); 5364 5365 if (sample_type & PERF_SAMPLE_STREAM_ID) 5366 perf_output_put(handle, data->stream_id); 5367 5368 if (sample_type & PERF_SAMPLE_CPU) 5369 perf_output_put(handle, data->cpu_entry); 5370 5371 if (sample_type & PERF_SAMPLE_PERIOD) 5372 perf_output_put(handle, data->period); 5373 5374 if (sample_type & PERF_SAMPLE_READ) 5375 perf_output_read(handle, event); 5376 5377 if (sample_type & PERF_SAMPLE_CALLCHAIN) { 5378 if (data->callchain) { 5379 int size = 1; 5380 5381 if (data->callchain) 5382 size += data->callchain->nr; 5383 5384 size *= sizeof(u64); 5385 5386 __output_copy(handle, data->callchain, size); 5387 } else { 5388 u64 nr = 0; 5389 perf_output_put(handle, nr); 5390 } 5391 } 5392 5393 if (sample_type & PERF_SAMPLE_RAW) { 5394 if (data->raw) { 5395 u32 raw_size = data->raw->size; 5396 u32 real_size = round_up(raw_size + sizeof(u32), 5397 sizeof(u64)) - sizeof(u32); 5398 u64 zero = 0; 5399 5400 perf_output_put(handle, real_size); 5401 __output_copy(handle, data->raw->data, raw_size); 5402 if (real_size - raw_size) 5403 __output_copy(handle, &zero, real_size - raw_size); 5404 } else { 5405 struct { 5406 u32 size; 5407 u32 data; 5408 } raw = { 5409 .size = sizeof(u32), 5410 .data = 0, 5411 }; 5412 perf_output_put(handle, raw); 5413 } 5414 } 5415 5416 if (sample_type & PERF_SAMPLE_BRANCH_STACK) { 5417 if (data->br_stack) { 5418 size_t size; 5419 5420 size = data->br_stack->nr 5421 * sizeof(struct perf_branch_entry); 5422 5423 perf_output_put(handle, data->br_stack->nr); 5424 perf_output_copy(handle, data->br_stack->entries, size); 5425 } else { 5426 /* 5427 * we always store at least the value of nr 5428 */ 5429 u64 nr = 0; 5430 perf_output_put(handle, nr); 5431 } 5432 } 5433 5434 if (sample_type & PERF_SAMPLE_REGS_USER) { 5435 u64 abi = data->regs_user.abi; 5436 5437 /* 5438 * If there are no regs to dump, notice it through 5439 * first u64 being zero (PERF_SAMPLE_REGS_ABI_NONE). 5440 */ 5441 perf_output_put(handle, abi); 5442 5443 if (abi) { 5444 u64 mask = event->attr.sample_regs_user; 5445 perf_output_sample_regs(handle, 5446 data->regs_user.regs, 5447 mask); 5448 } 5449 } 5450 5451 if (sample_type & PERF_SAMPLE_STACK_USER) { 5452 perf_output_sample_ustack(handle, 5453 data->stack_user_size, 5454 data->regs_user.regs); 5455 } 5456 5457 if (sample_type & PERF_SAMPLE_WEIGHT) 5458 perf_output_put(handle, data->weight); 5459 5460 if (sample_type & PERF_SAMPLE_DATA_SRC) 5461 perf_output_put(handle, data->data_src.val); 5462 5463 if (sample_type & PERF_SAMPLE_TRANSACTION) 5464 perf_output_put(handle, data->txn); 5465 5466 if (sample_type & PERF_SAMPLE_REGS_INTR) { 5467 u64 abi = data->regs_intr.abi; 5468 /* 5469 * If there are no regs to dump, notice it through 5470 * first u64 being zero (PERF_SAMPLE_REGS_ABI_NONE). 5471 */ 5472 perf_output_put(handle, abi); 5473 5474 if (abi) { 5475 u64 mask = event->attr.sample_regs_intr; 5476 5477 perf_output_sample_regs(handle, 5478 data->regs_intr.regs, 5479 mask); 5480 } 5481 } 5482 5483 if (!event->attr.watermark) { 5484 int wakeup_events = event->attr.wakeup_events; 5485 5486 if (wakeup_events) { 5487 struct ring_buffer *rb = handle->rb; 5488 int events = local_inc_return(&rb->events); 5489 5490 if (events >= wakeup_events) { 5491 local_sub(wakeup_events, &rb->events); 5492 local_inc(&rb->wakeup); 5493 } 5494 } 5495 } 5496 } 5497 5498 void perf_prepare_sample(struct perf_event_header *header, 5499 struct perf_sample_data *data, 5500 struct perf_event *event, 5501 struct pt_regs *regs) 5502 { 5503 u64 sample_type = event->attr.sample_type; 5504 5505 header->type = PERF_RECORD_SAMPLE; 5506 header->size = sizeof(*header) + event->header_size; 5507 5508 header->misc = 0; 5509 header->misc |= perf_misc_flags(regs); 5510 5511 __perf_event_header__init_id(header, data, event); 5512 5513 if (sample_type & PERF_SAMPLE_IP) 5514 data->ip = perf_instruction_pointer(regs); 5515 5516 if (sample_type & PERF_SAMPLE_CALLCHAIN) { 5517 int size = 1; 5518 5519 data->callchain = perf_callchain(event, regs); 5520 5521 if (data->callchain) 5522 size += data->callchain->nr; 5523 5524 header->size += size * sizeof(u64); 5525 } 5526 5527 if (sample_type & PERF_SAMPLE_RAW) { 5528 int size = sizeof(u32); 5529 5530 if (data->raw) 5531 size += data->raw->size; 5532 else 5533 size += sizeof(u32); 5534 5535 header->size += round_up(size, sizeof(u64)); 5536 } 5537 5538 if (sample_type & PERF_SAMPLE_BRANCH_STACK) { 5539 int size = sizeof(u64); /* nr */ 5540 if (data->br_stack) { 5541 size += data->br_stack->nr 5542 * sizeof(struct perf_branch_entry); 5543 } 5544 header->size += size; 5545 } 5546 5547 if (sample_type & (PERF_SAMPLE_REGS_USER | PERF_SAMPLE_STACK_USER)) 5548 perf_sample_regs_user(&data->regs_user, regs, 5549 &data->regs_user_copy); 5550 5551 if (sample_type & PERF_SAMPLE_REGS_USER) { 5552 /* regs dump ABI info */ 5553 int size = sizeof(u64); 5554 5555 if (data->regs_user.regs) { 5556 u64 mask = event->attr.sample_regs_user; 5557 size += hweight64(mask) * sizeof(u64); 5558 } 5559 5560 header->size += size; 5561 } 5562 5563 if (sample_type & PERF_SAMPLE_STACK_USER) { 5564 /* 5565 * Either we need PERF_SAMPLE_STACK_USER bit to be allways 5566 * processed as the last one or have additional check added 5567 * in case new sample type is added, because we could eat 5568 * up the rest of the sample size. 5569 */ 5570 u16 stack_size = event->attr.sample_stack_user; 5571 u16 size = sizeof(u64); 5572 5573 stack_size = perf_sample_ustack_size(stack_size, header->size, 5574 data->regs_user.regs); 5575 5576 /* 5577 * If there is something to dump, add space for the dump 5578 * itself and for the field that tells the dynamic size, 5579 * which is how many have been actually dumped. 5580 */ 5581 if (stack_size) 5582 size += sizeof(u64) + stack_size; 5583 5584 data->stack_user_size = stack_size; 5585 header->size += size; 5586 } 5587 5588 if (sample_type & PERF_SAMPLE_REGS_INTR) { 5589 /* regs dump ABI info */ 5590 int size = sizeof(u64); 5591 5592 perf_sample_regs_intr(&data->regs_intr, regs); 5593 5594 if (data->regs_intr.regs) { 5595 u64 mask = event->attr.sample_regs_intr; 5596 5597 size += hweight64(mask) * sizeof(u64); 5598 } 5599 5600 header->size += size; 5601 } 5602 } 5603 5604 void perf_event_output(struct perf_event *event, 5605 struct perf_sample_data *data, 5606 struct pt_regs *regs) 5607 { 5608 struct perf_output_handle handle; 5609 struct perf_event_header header; 5610 5611 /* protect the callchain buffers */ 5612 rcu_read_lock(); 5613 5614 perf_prepare_sample(&header, data, event, regs); 5615 5616 if (perf_output_begin(&handle, event, header.size)) 5617 goto exit; 5618 5619 perf_output_sample(&handle, &header, data, event); 5620 5621 perf_output_end(&handle); 5622 5623 exit: 5624 rcu_read_unlock(); 5625 } 5626 5627 /* 5628 * read event_id 5629 */ 5630 5631 struct perf_read_event { 5632 struct perf_event_header header; 5633 5634 u32 pid; 5635 u32 tid; 5636 }; 5637 5638 static void 5639 perf_event_read_event(struct perf_event *event, 5640 struct task_struct *task) 5641 { 5642 struct perf_output_handle handle; 5643 struct perf_sample_data sample; 5644 struct perf_read_event read_event = { 5645 .header = { 5646 .type = PERF_RECORD_READ, 5647 .misc = 0, 5648 .size = sizeof(read_event) + event->read_size, 5649 }, 5650 .pid = perf_event_pid(event, task), 5651 .tid = perf_event_tid(event, task), 5652 }; 5653 int ret; 5654 5655 perf_event_header__init_id(&read_event.header, &sample, event); 5656 ret = perf_output_begin(&handle, event, read_event.header.size); 5657 if (ret) 5658 return; 5659 5660 perf_output_put(&handle, read_event); 5661 perf_output_read(&handle, event); 5662 perf_event__output_id_sample(event, &handle, &sample); 5663 5664 perf_output_end(&handle); 5665 } 5666 5667 typedef void (perf_event_aux_output_cb)(struct perf_event *event, void *data); 5668 5669 static void 5670 perf_event_aux_ctx(struct perf_event_context *ctx, 5671 perf_event_aux_output_cb output, 5672 void *data) 5673 { 5674 struct perf_event *event; 5675 5676 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) { 5677 if (event->state < PERF_EVENT_STATE_INACTIVE) 5678 continue; 5679 if (!event_filter_match(event)) 5680 continue; 5681 output(event, data); 5682 } 5683 } 5684 5685 static void 5686 perf_event_aux_task_ctx(perf_event_aux_output_cb output, void *data, 5687 struct perf_event_context *task_ctx) 5688 { 5689 rcu_read_lock(); 5690 preempt_disable(); 5691 perf_event_aux_ctx(task_ctx, output, data); 5692 preempt_enable(); 5693 rcu_read_unlock(); 5694 } 5695 5696 static void 5697 perf_event_aux(perf_event_aux_output_cb output, void *data, 5698 struct perf_event_context *task_ctx) 5699 { 5700 struct perf_cpu_context *cpuctx; 5701 struct perf_event_context *ctx; 5702 struct pmu *pmu; 5703 int ctxn; 5704 5705 /* 5706 * If we have task_ctx != NULL we only notify 5707 * the task context itself. The task_ctx is set 5708 * only for EXIT events before releasing task 5709 * context. 5710 */ 5711 if (task_ctx) { 5712 perf_event_aux_task_ctx(output, data, task_ctx); 5713 return; 5714 } 5715 5716 rcu_read_lock(); 5717 list_for_each_entry_rcu(pmu, &pmus, entry) { 5718 cpuctx = get_cpu_ptr(pmu->pmu_cpu_context); 5719 if (cpuctx->unique_pmu != pmu) 5720 goto next; 5721 perf_event_aux_ctx(&cpuctx->ctx, output, data); 5722 ctxn = pmu->task_ctx_nr; 5723 if (ctxn < 0) 5724 goto next; 5725 ctx = rcu_dereference(current->perf_event_ctxp[ctxn]); 5726 if (ctx) 5727 perf_event_aux_ctx(ctx, output, data); 5728 next: 5729 put_cpu_ptr(pmu->pmu_cpu_context); 5730 } 5731 rcu_read_unlock(); 5732 } 5733 5734 /* 5735 * task tracking -- fork/exit 5736 * 5737 * enabled by: attr.comm | attr.mmap | attr.mmap2 | attr.mmap_data | attr.task 5738 */ 5739 5740 struct perf_task_event { 5741 struct task_struct *task; 5742 struct perf_event_context *task_ctx; 5743 5744 struct { 5745 struct perf_event_header header; 5746 5747 u32 pid; 5748 u32 ppid; 5749 u32 tid; 5750 u32 ptid; 5751 u64 time; 5752 } event_id; 5753 }; 5754 5755 static int perf_event_task_match(struct perf_event *event) 5756 { 5757 return event->attr.comm || event->attr.mmap || 5758 event->attr.mmap2 || event->attr.mmap_data || 5759 event->attr.task; 5760 } 5761 5762 static void perf_event_task_output(struct perf_event *event, 5763 void *data) 5764 { 5765 struct perf_task_event *task_event = data; 5766 struct perf_output_handle handle; 5767 struct perf_sample_data sample; 5768 struct task_struct *task = task_event->task; 5769 int ret, size = task_event->event_id.header.size; 5770 5771 if (!perf_event_task_match(event)) 5772 return; 5773 5774 perf_event_header__init_id(&task_event->event_id.header, &sample, event); 5775 5776 ret = perf_output_begin(&handle, event, 5777 task_event->event_id.header.size); 5778 if (ret) 5779 goto out; 5780 5781 task_event->event_id.pid = perf_event_pid(event, task); 5782 task_event->event_id.ppid = perf_event_pid(event, current); 5783 5784 task_event->event_id.tid = perf_event_tid(event, task); 5785 task_event->event_id.ptid = perf_event_tid(event, current); 5786 5787 task_event->event_id.time = perf_event_clock(event); 5788 5789 perf_output_put(&handle, task_event->event_id); 5790 5791 perf_event__output_id_sample(event, &handle, &sample); 5792 5793 perf_output_end(&handle); 5794 out: 5795 task_event->event_id.header.size = size; 5796 } 5797 5798 static void perf_event_task(struct task_struct *task, 5799 struct perf_event_context *task_ctx, 5800 int new) 5801 { 5802 struct perf_task_event task_event; 5803 5804 if (!atomic_read(&nr_comm_events) && 5805 !atomic_read(&nr_mmap_events) && 5806 !atomic_read(&nr_task_events)) 5807 return; 5808 5809 task_event = (struct perf_task_event){ 5810 .task = task, 5811 .task_ctx = task_ctx, 5812 .event_id = { 5813 .header = { 5814 .type = new ? PERF_RECORD_FORK : PERF_RECORD_EXIT, 5815 .misc = 0, 5816 .size = sizeof(task_event.event_id), 5817 }, 5818 /* .pid */ 5819 /* .ppid */ 5820 /* .tid */ 5821 /* .ptid */ 5822 /* .time */ 5823 }, 5824 }; 5825 5826 perf_event_aux(perf_event_task_output, 5827 &task_event, 5828 task_ctx); 5829 } 5830 5831 void perf_event_fork(struct task_struct *task) 5832 { 5833 perf_event_task(task, NULL, 1); 5834 } 5835 5836 /* 5837 * comm tracking 5838 */ 5839 5840 struct perf_comm_event { 5841 struct task_struct *task; 5842 char *comm; 5843 int comm_size; 5844 5845 struct { 5846 struct perf_event_header header; 5847 5848 u32 pid; 5849 u32 tid; 5850 } event_id; 5851 }; 5852 5853 static int perf_event_comm_match(struct perf_event *event) 5854 { 5855 return event->attr.comm; 5856 } 5857 5858 static void perf_event_comm_output(struct perf_event *event, 5859 void *data) 5860 { 5861 struct perf_comm_event *comm_event = data; 5862 struct perf_output_handle handle; 5863 struct perf_sample_data sample; 5864 int size = comm_event->event_id.header.size; 5865 int ret; 5866 5867 if (!perf_event_comm_match(event)) 5868 return; 5869 5870 perf_event_header__init_id(&comm_event->event_id.header, &sample, event); 5871 ret = perf_output_begin(&handle, event, 5872 comm_event->event_id.header.size); 5873 5874 if (ret) 5875 goto out; 5876 5877 comm_event->event_id.pid = perf_event_pid(event, comm_event->task); 5878 comm_event->event_id.tid = perf_event_tid(event, comm_event->task); 5879 5880 perf_output_put(&handle, comm_event->event_id); 5881 __output_copy(&handle, comm_event->comm, 5882 comm_event->comm_size); 5883 5884 perf_event__output_id_sample(event, &handle, &sample); 5885 5886 perf_output_end(&handle); 5887 out: 5888 comm_event->event_id.header.size = size; 5889 } 5890 5891 static void perf_event_comm_event(struct perf_comm_event *comm_event) 5892 { 5893 char comm[TASK_COMM_LEN]; 5894 unsigned int size; 5895 5896 memset(comm, 0, sizeof(comm)); 5897 strlcpy(comm, comm_event->task->comm, sizeof(comm)); 5898 size = ALIGN(strlen(comm)+1, sizeof(u64)); 5899 5900 comm_event->comm = comm; 5901 comm_event->comm_size = size; 5902 5903 comm_event->event_id.header.size = sizeof(comm_event->event_id) + size; 5904 5905 perf_event_aux(perf_event_comm_output, 5906 comm_event, 5907 NULL); 5908 } 5909 5910 void perf_event_comm(struct task_struct *task, bool exec) 5911 { 5912 struct perf_comm_event comm_event; 5913 5914 if (!atomic_read(&nr_comm_events)) 5915 return; 5916 5917 comm_event = (struct perf_comm_event){ 5918 .task = task, 5919 /* .comm */ 5920 /* .comm_size */ 5921 .event_id = { 5922 .header = { 5923 .type = PERF_RECORD_COMM, 5924 .misc = exec ? PERF_RECORD_MISC_COMM_EXEC : 0, 5925 /* .size */ 5926 }, 5927 /* .pid */ 5928 /* .tid */ 5929 }, 5930 }; 5931 5932 perf_event_comm_event(&comm_event); 5933 } 5934 5935 /* 5936 * mmap tracking 5937 */ 5938 5939 struct perf_mmap_event { 5940 struct vm_area_struct *vma; 5941 5942 const char *file_name; 5943 int file_size; 5944 int maj, min; 5945 u64 ino; 5946 u64 ino_generation; 5947 u32 prot, flags; 5948 5949 struct { 5950 struct perf_event_header header; 5951 5952 u32 pid; 5953 u32 tid; 5954 u64 start; 5955 u64 len; 5956 u64 pgoff; 5957 } event_id; 5958 }; 5959 5960 static int perf_event_mmap_match(struct perf_event *event, 5961 void *data) 5962 { 5963 struct perf_mmap_event *mmap_event = data; 5964 struct vm_area_struct *vma = mmap_event->vma; 5965 int executable = vma->vm_flags & VM_EXEC; 5966 5967 return (!executable && event->attr.mmap_data) || 5968 (executable && (event->attr.mmap || event->attr.mmap2)); 5969 } 5970 5971 static void perf_event_mmap_output(struct perf_event *event, 5972 void *data) 5973 { 5974 struct perf_mmap_event *mmap_event = data; 5975 struct perf_output_handle handle; 5976 struct perf_sample_data sample; 5977 int size = mmap_event->event_id.header.size; 5978 int ret; 5979 5980 if (!perf_event_mmap_match(event, data)) 5981 return; 5982 5983 if (event->attr.mmap2) { 5984 mmap_event->event_id.header.type = PERF_RECORD_MMAP2; 5985 mmap_event->event_id.header.size += sizeof(mmap_event->maj); 5986 mmap_event->event_id.header.size += sizeof(mmap_event->min); 5987 mmap_event->event_id.header.size += sizeof(mmap_event->ino); 5988 mmap_event->event_id.header.size += sizeof(mmap_event->ino_generation); 5989 mmap_event->event_id.header.size += sizeof(mmap_event->prot); 5990 mmap_event->event_id.header.size += sizeof(mmap_event->flags); 5991 } 5992 5993 perf_event_header__init_id(&mmap_event->event_id.header, &sample, event); 5994 ret = perf_output_begin(&handle, event, 5995 mmap_event->event_id.header.size); 5996 if (ret) 5997 goto out; 5998 5999 mmap_event->event_id.pid = perf_event_pid(event, current); 6000 mmap_event->event_id.tid = perf_event_tid(event, current); 6001 6002 perf_output_put(&handle, mmap_event->event_id); 6003 6004 if (event->attr.mmap2) { 6005 perf_output_put(&handle, mmap_event->maj); 6006 perf_output_put(&handle, mmap_event->min); 6007 perf_output_put(&handle, mmap_event->ino); 6008 perf_output_put(&handle, mmap_event->ino_generation); 6009 perf_output_put(&handle, mmap_event->prot); 6010 perf_output_put(&handle, mmap_event->flags); 6011 } 6012 6013 __output_copy(&handle, mmap_event->file_name, 6014 mmap_event->file_size); 6015 6016 perf_event__output_id_sample(event, &handle, &sample); 6017 6018 perf_output_end(&handle); 6019 out: 6020 mmap_event->event_id.header.size = size; 6021 } 6022 6023 static void perf_event_mmap_event(struct perf_mmap_event *mmap_event) 6024 { 6025 struct vm_area_struct *vma = mmap_event->vma; 6026 struct file *file = vma->vm_file; 6027 int maj = 0, min = 0; 6028 u64 ino = 0, gen = 0; 6029 u32 prot = 0, flags = 0; 6030 unsigned int size; 6031 char tmp[16]; 6032 char *buf = NULL; 6033 char *name; 6034 6035 if (file) { 6036 struct inode *inode; 6037 dev_t dev; 6038 6039 buf = kmalloc(PATH_MAX, GFP_KERNEL); 6040 if (!buf) { 6041 name = "//enomem"; 6042 goto cpy_name; 6043 } 6044 /* 6045 * d_path() works from the end of the rb backwards, so we 6046 * need to add enough zero bytes after the string to handle 6047 * the 64bit alignment we do later. 6048 */ 6049 name = file_path(file, buf, PATH_MAX - sizeof(u64)); 6050 if (IS_ERR(name)) { 6051 name = "//toolong"; 6052 goto cpy_name; 6053 } 6054 inode = file_inode(vma->vm_file); 6055 dev = inode->i_sb->s_dev; 6056 ino = inode->i_ino; 6057 gen = inode->i_generation; 6058 maj = MAJOR(dev); 6059 min = MINOR(dev); 6060 6061 if (vma->vm_flags & VM_READ) 6062 prot |= PROT_READ; 6063 if (vma->vm_flags & VM_WRITE) 6064 prot |= PROT_WRITE; 6065 if (vma->vm_flags & VM_EXEC) 6066 prot |= PROT_EXEC; 6067 6068 if (vma->vm_flags & VM_MAYSHARE) 6069 flags = MAP_SHARED; 6070 else 6071 flags = MAP_PRIVATE; 6072 6073 if (vma->vm_flags & VM_DENYWRITE) 6074 flags |= MAP_DENYWRITE; 6075 if (vma->vm_flags & VM_MAYEXEC) 6076 flags |= MAP_EXECUTABLE; 6077 if (vma->vm_flags & VM_LOCKED) 6078 flags |= MAP_LOCKED; 6079 if (vma->vm_flags & VM_HUGETLB) 6080 flags |= MAP_HUGETLB; 6081 6082 goto got_name; 6083 } else { 6084 if (vma->vm_ops && vma->vm_ops->name) { 6085 name = (char *) vma->vm_ops->name(vma); 6086 if (name) 6087 goto cpy_name; 6088 } 6089 6090 name = (char *)arch_vma_name(vma); 6091 if (name) 6092 goto cpy_name; 6093 6094 if (vma->vm_start <= vma->vm_mm->start_brk && 6095 vma->vm_end >= vma->vm_mm->brk) { 6096 name = "[heap]"; 6097 goto cpy_name; 6098 } 6099 if (vma->vm_start <= vma->vm_mm->start_stack && 6100 vma->vm_end >= vma->vm_mm->start_stack) { 6101 name = "[stack]"; 6102 goto cpy_name; 6103 } 6104 6105 name = "//anon"; 6106 goto cpy_name; 6107 } 6108 6109 cpy_name: 6110 strlcpy(tmp, name, sizeof(tmp)); 6111 name = tmp; 6112 got_name: 6113 /* 6114 * Since our buffer works in 8 byte units we need to align our string 6115 * size to a multiple of 8. However, we must guarantee the tail end is 6116 * zero'd out to avoid leaking random bits to userspace. 6117 */ 6118 size = strlen(name)+1; 6119 while (!IS_ALIGNED(size, sizeof(u64))) 6120 name[size++] = '\0'; 6121 6122 mmap_event->file_name = name; 6123 mmap_event->file_size = size; 6124 mmap_event->maj = maj; 6125 mmap_event->min = min; 6126 mmap_event->ino = ino; 6127 mmap_event->ino_generation = gen; 6128 mmap_event->prot = prot; 6129 mmap_event->flags = flags; 6130 6131 if (!(vma->vm_flags & VM_EXEC)) 6132 mmap_event->event_id.header.misc |= PERF_RECORD_MISC_MMAP_DATA; 6133 6134 mmap_event->event_id.header.size = sizeof(mmap_event->event_id) + size; 6135 6136 perf_event_aux(perf_event_mmap_output, 6137 mmap_event, 6138 NULL); 6139 6140 kfree(buf); 6141 } 6142 6143 void perf_event_mmap(struct vm_area_struct *vma) 6144 { 6145 struct perf_mmap_event mmap_event; 6146 6147 if (!atomic_read(&nr_mmap_events)) 6148 return; 6149 6150 mmap_event = (struct perf_mmap_event){ 6151 .vma = vma, 6152 /* .file_name */ 6153 /* .file_size */ 6154 .event_id = { 6155 .header = { 6156 .type = PERF_RECORD_MMAP, 6157 .misc = PERF_RECORD_MISC_USER, 6158 /* .size */ 6159 }, 6160 /* .pid */ 6161 /* .tid */ 6162 .start = vma->vm_start, 6163 .len = vma->vm_end - vma->vm_start, 6164 .pgoff = (u64)vma->vm_pgoff << PAGE_SHIFT, 6165 }, 6166 /* .maj (attr_mmap2 only) */ 6167 /* .min (attr_mmap2 only) */ 6168 /* .ino (attr_mmap2 only) */ 6169 /* .ino_generation (attr_mmap2 only) */ 6170 /* .prot (attr_mmap2 only) */ 6171 /* .flags (attr_mmap2 only) */ 6172 }; 6173 6174 perf_event_mmap_event(&mmap_event); 6175 } 6176 6177 void perf_event_aux_event(struct perf_event *event, unsigned long head, 6178 unsigned long size, u64 flags) 6179 { 6180 struct perf_output_handle handle; 6181 struct perf_sample_data sample; 6182 struct perf_aux_event { 6183 struct perf_event_header header; 6184 u64 offset; 6185 u64 size; 6186 u64 flags; 6187 } rec = { 6188 .header = { 6189 .type = PERF_RECORD_AUX, 6190 .misc = 0, 6191 .size = sizeof(rec), 6192 }, 6193 .offset = head, 6194 .size = size, 6195 .flags = flags, 6196 }; 6197 int ret; 6198 6199 perf_event_header__init_id(&rec.header, &sample, event); 6200 ret = perf_output_begin(&handle, event, rec.header.size); 6201 6202 if (ret) 6203 return; 6204 6205 perf_output_put(&handle, rec); 6206 perf_event__output_id_sample(event, &handle, &sample); 6207 6208 perf_output_end(&handle); 6209 } 6210 6211 /* 6212 * Lost/dropped samples logging 6213 */ 6214 void perf_log_lost_samples(struct perf_event *event, u64 lost) 6215 { 6216 struct perf_output_handle handle; 6217 struct perf_sample_data sample; 6218 int ret; 6219 6220 struct { 6221 struct perf_event_header header; 6222 u64 lost; 6223 } lost_samples_event = { 6224 .header = { 6225 .type = PERF_RECORD_LOST_SAMPLES, 6226 .misc = 0, 6227 .size = sizeof(lost_samples_event), 6228 }, 6229 .lost = lost, 6230 }; 6231 6232 perf_event_header__init_id(&lost_samples_event.header, &sample, event); 6233 6234 ret = perf_output_begin(&handle, event, 6235 lost_samples_event.header.size); 6236 if (ret) 6237 return; 6238 6239 perf_output_put(&handle, lost_samples_event); 6240 perf_event__output_id_sample(event, &handle, &sample); 6241 perf_output_end(&handle); 6242 } 6243 6244 /* 6245 * context_switch tracking 6246 */ 6247 6248 struct perf_switch_event { 6249 struct task_struct *task; 6250 struct task_struct *next_prev; 6251 6252 struct { 6253 struct perf_event_header header; 6254 u32 next_prev_pid; 6255 u32 next_prev_tid; 6256 } event_id; 6257 }; 6258 6259 static int perf_event_switch_match(struct perf_event *event) 6260 { 6261 return event->attr.context_switch; 6262 } 6263 6264 static void perf_event_switch_output(struct perf_event *event, void *data) 6265 { 6266 struct perf_switch_event *se = data; 6267 struct perf_output_handle handle; 6268 struct perf_sample_data sample; 6269 int ret; 6270 6271 if (!perf_event_switch_match(event)) 6272 return; 6273 6274 /* Only CPU-wide events are allowed to see next/prev pid/tid */ 6275 if (event->ctx->task) { 6276 se->event_id.header.type = PERF_RECORD_SWITCH; 6277 se->event_id.header.size = sizeof(se->event_id.header); 6278 } else { 6279 se->event_id.header.type = PERF_RECORD_SWITCH_CPU_WIDE; 6280 se->event_id.header.size = sizeof(se->event_id); 6281 se->event_id.next_prev_pid = 6282 perf_event_pid(event, se->next_prev); 6283 se->event_id.next_prev_tid = 6284 perf_event_tid(event, se->next_prev); 6285 } 6286 6287 perf_event_header__init_id(&se->event_id.header, &sample, event); 6288 6289 ret = perf_output_begin(&handle, event, se->event_id.header.size); 6290 if (ret) 6291 return; 6292 6293 if (event->ctx->task) 6294 perf_output_put(&handle, se->event_id.header); 6295 else 6296 perf_output_put(&handle, se->event_id); 6297 6298 perf_event__output_id_sample(event, &handle, &sample); 6299 6300 perf_output_end(&handle); 6301 } 6302 6303 static void perf_event_switch(struct task_struct *task, 6304 struct task_struct *next_prev, bool sched_in) 6305 { 6306 struct perf_switch_event switch_event; 6307 6308 /* N.B. caller checks nr_switch_events != 0 */ 6309 6310 switch_event = (struct perf_switch_event){ 6311 .task = task, 6312 .next_prev = next_prev, 6313 .event_id = { 6314 .header = { 6315 /* .type */ 6316 .misc = sched_in ? 0 : PERF_RECORD_MISC_SWITCH_OUT, 6317 /* .size */ 6318 }, 6319 /* .next_prev_pid */ 6320 /* .next_prev_tid */ 6321 }, 6322 }; 6323 6324 perf_event_aux(perf_event_switch_output, 6325 &switch_event, 6326 NULL); 6327 } 6328 6329 /* 6330 * IRQ throttle logging 6331 */ 6332 6333 static void perf_log_throttle(struct perf_event *event, int enable) 6334 { 6335 struct perf_output_handle handle; 6336 struct perf_sample_data sample; 6337 int ret; 6338 6339 struct { 6340 struct perf_event_header header; 6341 u64 time; 6342 u64 id; 6343 u64 stream_id; 6344 } throttle_event = { 6345 .header = { 6346 .type = PERF_RECORD_THROTTLE, 6347 .misc = 0, 6348 .size = sizeof(throttle_event), 6349 }, 6350 .time = perf_event_clock(event), 6351 .id = primary_event_id(event), 6352 .stream_id = event->id, 6353 }; 6354 6355 if (enable) 6356 throttle_event.header.type = PERF_RECORD_UNTHROTTLE; 6357 6358 perf_event_header__init_id(&throttle_event.header, &sample, event); 6359 6360 ret = perf_output_begin(&handle, event, 6361 throttle_event.header.size); 6362 if (ret) 6363 return; 6364 6365 perf_output_put(&handle, throttle_event); 6366 perf_event__output_id_sample(event, &handle, &sample); 6367 perf_output_end(&handle); 6368 } 6369 6370 static void perf_log_itrace_start(struct perf_event *event) 6371 { 6372 struct perf_output_handle handle; 6373 struct perf_sample_data sample; 6374 struct perf_aux_event { 6375 struct perf_event_header header; 6376 u32 pid; 6377 u32 tid; 6378 } rec; 6379 int ret; 6380 6381 if (event->parent) 6382 event = event->parent; 6383 6384 if (!(event->pmu->capabilities & PERF_PMU_CAP_ITRACE) || 6385 event->hw.itrace_started) 6386 return; 6387 6388 rec.header.type = PERF_RECORD_ITRACE_START; 6389 rec.header.misc = 0; 6390 rec.header.size = sizeof(rec); 6391 rec.pid = perf_event_pid(event, current); 6392 rec.tid = perf_event_tid(event, current); 6393 6394 perf_event_header__init_id(&rec.header, &sample, event); 6395 ret = perf_output_begin(&handle, event, rec.header.size); 6396 6397 if (ret) 6398 return; 6399 6400 perf_output_put(&handle, rec); 6401 perf_event__output_id_sample(event, &handle, &sample); 6402 6403 perf_output_end(&handle); 6404 } 6405 6406 /* 6407 * Generic event overflow handling, sampling. 6408 */ 6409 6410 static int __perf_event_overflow(struct perf_event *event, 6411 int throttle, struct perf_sample_data *data, 6412 struct pt_regs *regs) 6413 { 6414 int events = atomic_read(&event->event_limit); 6415 struct hw_perf_event *hwc = &event->hw; 6416 u64 seq; 6417 int ret = 0; 6418 6419 /* 6420 * Non-sampling counters might still use the PMI to fold short 6421 * hardware counters, ignore those. 6422 */ 6423 if (unlikely(!is_sampling_event(event))) 6424 return 0; 6425 6426 seq = __this_cpu_read(perf_throttled_seq); 6427 if (seq != hwc->interrupts_seq) { 6428 hwc->interrupts_seq = seq; 6429 hwc->interrupts = 1; 6430 } else { 6431 hwc->interrupts++; 6432 if (unlikely(throttle 6433 && hwc->interrupts >= max_samples_per_tick)) { 6434 __this_cpu_inc(perf_throttled_count); 6435 hwc->interrupts = MAX_INTERRUPTS; 6436 perf_log_throttle(event, 0); 6437 tick_nohz_full_kick(); 6438 ret = 1; 6439 } 6440 } 6441 6442 if (event->attr.freq) { 6443 u64 now = perf_clock(); 6444 s64 delta = now - hwc->freq_time_stamp; 6445 6446 hwc->freq_time_stamp = now; 6447 6448 if (delta > 0 && delta < 2*TICK_NSEC) 6449 perf_adjust_period(event, delta, hwc->last_period, true); 6450 } 6451 6452 /* 6453 * XXX event_limit might not quite work as expected on inherited 6454 * events 6455 */ 6456 6457 event->pending_kill = POLL_IN; 6458 if (events && atomic_dec_and_test(&event->event_limit)) { 6459 ret = 1; 6460 event->pending_kill = POLL_HUP; 6461 event->pending_disable = 1; 6462 irq_work_queue(&event->pending); 6463 } 6464 6465 if (event->overflow_handler) 6466 event->overflow_handler(event, data, regs); 6467 else 6468 perf_event_output(event, data, regs); 6469 6470 if (*perf_event_fasync(event) && event->pending_kill) { 6471 event->pending_wakeup = 1; 6472 irq_work_queue(&event->pending); 6473 } 6474 6475 return ret; 6476 } 6477 6478 int perf_event_overflow(struct perf_event *event, 6479 struct perf_sample_data *data, 6480 struct pt_regs *regs) 6481 { 6482 return __perf_event_overflow(event, 1, data, regs); 6483 } 6484 6485 /* 6486 * Generic software event infrastructure 6487 */ 6488 6489 struct swevent_htable { 6490 struct swevent_hlist *swevent_hlist; 6491 struct mutex hlist_mutex; 6492 int hlist_refcount; 6493 6494 /* Recursion avoidance in each contexts */ 6495 int recursion[PERF_NR_CONTEXTS]; 6496 6497 /* Keeps track of cpu being initialized/exited */ 6498 bool online; 6499 }; 6500 6501 static DEFINE_PER_CPU(struct swevent_htable, swevent_htable); 6502 6503 /* 6504 * We directly increment event->count and keep a second value in 6505 * event->hw.period_left to count intervals. This period event 6506 * is kept in the range [-sample_period, 0] so that we can use the 6507 * sign as trigger. 6508 */ 6509 6510 u64 perf_swevent_set_period(struct perf_event *event) 6511 { 6512 struct hw_perf_event *hwc = &event->hw; 6513 u64 period = hwc->last_period; 6514 u64 nr, offset; 6515 s64 old, val; 6516 6517 hwc->last_period = hwc->sample_period; 6518 6519 again: 6520 old = val = local64_read(&hwc->period_left); 6521 if (val < 0) 6522 return 0; 6523 6524 nr = div64_u64(period + val, period); 6525 offset = nr * period; 6526 val -= offset; 6527 if (local64_cmpxchg(&hwc->period_left, old, val) != old) 6528 goto again; 6529 6530 return nr; 6531 } 6532 6533 static void perf_swevent_overflow(struct perf_event *event, u64 overflow, 6534 struct perf_sample_data *data, 6535 struct pt_regs *regs) 6536 { 6537 struct hw_perf_event *hwc = &event->hw; 6538 int throttle = 0; 6539 6540 if (!overflow) 6541 overflow = perf_swevent_set_period(event); 6542 6543 if (hwc->interrupts == MAX_INTERRUPTS) 6544 return; 6545 6546 for (; overflow; overflow--) { 6547 if (__perf_event_overflow(event, throttle, 6548 data, regs)) { 6549 /* 6550 * We inhibit the overflow from happening when 6551 * hwc->interrupts == MAX_INTERRUPTS. 6552 */ 6553 break; 6554 } 6555 throttle = 1; 6556 } 6557 } 6558 6559 static void perf_swevent_event(struct perf_event *event, u64 nr, 6560 struct perf_sample_data *data, 6561 struct pt_regs *regs) 6562 { 6563 struct hw_perf_event *hwc = &event->hw; 6564 6565 local64_add(nr, &event->count); 6566 6567 if (!regs) 6568 return; 6569 6570 if (!is_sampling_event(event)) 6571 return; 6572 6573 if ((event->attr.sample_type & PERF_SAMPLE_PERIOD) && !event->attr.freq) { 6574 data->period = nr; 6575 return perf_swevent_overflow(event, 1, data, regs); 6576 } else 6577 data->period = event->hw.last_period; 6578 6579 if (nr == 1 && hwc->sample_period == 1 && !event->attr.freq) 6580 return perf_swevent_overflow(event, 1, data, regs); 6581 6582 if (local64_add_negative(nr, &hwc->period_left)) 6583 return; 6584 6585 perf_swevent_overflow(event, 0, data, regs); 6586 } 6587 6588 static int perf_exclude_event(struct perf_event *event, 6589 struct pt_regs *regs) 6590 { 6591 if (event->hw.state & PERF_HES_STOPPED) 6592 return 1; 6593 6594 if (regs) { 6595 if (event->attr.exclude_user && user_mode(regs)) 6596 return 1; 6597 6598 if (event->attr.exclude_kernel && !user_mode(regs)) 6599 return 1; 6600 } 6601 6602 return 0; 6603 } 6604 6605 static int perf_swevent_match(struct perf_event *event, 6606 enum perf_type_id type, 6607 u32 event_id, 6608 struct perf_sample_data *data, 6609 struct pt_regs *regs) 6610 { 6611 if (event->attr.type != type) 6612 return 0; 6613 6614 if (event->attr.config != event_id) 6615 return 0; 6616 6617 if (perf_exclude_event(event, regs)) 6618 return 0; 6619 6620 return 1; 6621 } 6622 6623 static inline u64 swevent_hash(u64 type, u32 event_id) 6624 { 6625 u64 val = event_id | (type << 32); 6626 6627 return hash_64(val, SWEVENT_HLIST_BITS); 6628 } 6629 6630 static inline struct hlist_head * 6631 __find_swevent_head(struct swevent_hlist *hlist, u64 type, u32 event_id) 6632 { 6633 u64 hash = swevent_hash(type, event_id); 6634 6635 return &hlist->heads[hash]; 6636 } 6637 6638 /* For the read side: events when they trigger */ 6639 static inline struct hlist_head * 6640 find_swevent_head_rcu(struct swevent_htable *swhash, u64 type, u32 event_id) 6641 { 6642 struct swevent_hlist *hlist; 6643 6644 hlist = rcu_dereference(swhash->swevent_hlist); 6645 if (!hlist) 6646 return NULL; 6647 6648 return __find_swevent_head(hlist, type, event_id); 6649 } 6650 6651 /* For the event head insertion and removal in the hlist */ 6652 static inline struct hlist_head * 6653 find_swevent_head(struct swevent_htable *swhash, struct perf_event *event) 6654 { 6655 struct swevent_hlist *hlist; 6656 u32 event_id = event->attr.config; 6657 u64 type = event->attr.type; 6658 6659 /* 6660 * Event scheduling is always serialized against hlist allocation 6661 * and release. Which makes the protected version suitable here. 6662 * The context lock guarantees that. 6663 */ 6664 hlist = rcu_dereference_protected(swhash->swevent_hlist, 6665 lockdep_is_held(&event->ctx->lock)); 6666 if (!hlist) 6667 return NULL; 6668 6669 return __find_swevent_head(hlist, type, event_id); 6670 } 6671 6672 static void do_perf_sw_event(enum perf_type_id type, u32 event_id, 6673 u64 nr, 6674 struct perf_sample_data *data, 6675 struct pt_regs *regs) 6676 { 6677 struct swevent_htable *swhash = this_cpu_ptr(&swevent_htable); 6678 struct perf_event *event; 6679 struct hlist_head *head; 6680 6681 rcu_read_lock(); 6682 head = find_swevent_head_rcu(swhash, type, event_id); 6683 if (!head) 6684 goto end; 6685 6686 hlist_for_each_entry_rcu(event, head, hlist_entry) { 6687 if (perf_swevent_match(event, type, event_id, data, regs)) 6688 perf_swevent_event(event, nr, data, regs); 6689 } 6690 end: 6691 rcu_read_unlock(); 6692 } 6693 6694 DEFINE_PER_CPU(struct pt_regs, __perf_regs[4]); 6695 6696 int perf_swevent_get_recursion_context(void) 6697 { 6698 struct swevent_htable *swhash = this_cpu_ptr(&swevent_htable); 6699 6700 return get_recursion_context(swhash->recursion); 6701 } 6702 EXPORT_SYMBOL_GPL(perf_swevent_get_recursion_context); 6703 6704 inline void perf_swevent_put_recursion_context(int rctx) 6705 { 6706 struct swevent_htable *swhash = this_cpu_ptr(&swevent_htable); 6707 6708 put_recursion_context(swhash->recursion, rctx); 6709 } 6710 6711 void ___perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr) 6712 { 6713 struct perf_sample_data data; 6714 6715 if (WARN_ON_ONCE(!regs)) 6716 return; 6717 6718 perf_sample_data_init(&data, addr, 0); 6719 do_perf_sw_event(PERF_TYPE_SOFTWARE, event_id, nr, &data, regs); 6720 } 6721 6722 void __perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr) 6723 { 6724 int rctx; 6725 6726 preempt_disable_notrace(); 6727 rctx = perf_swevent_get_recursion_context(); 6728 if (unlikely(rctx < 0)) 6729 goto fail; 6730 6731 ___perf_sw_event(event_id, nr, regs, addr); 6732 6733 perf_swevent_put_recursion_context(rctx); 6734 fail: 6735 preempt_enable_notrace(); 6736 } 6737 6738 static void perf_swevent_read(struct perf_event *event) 6739 { 6740 } 6741 6742 static int perf_swevent_add(struct perf_event *event, int flags) 6743 { 6744 struct swevent_htable *swhash = this_cpu_ptr(&swevent_htable); 6745 struct hw_perf_event *hwc = &event->hw; 6746 struct hlist_head *head; 6747 6748 if (is_sampling_event(event)) { 6749 hwc->last_period = hwc->sample_period; 6750 perf_swevent_set_period(event); 6751 } 6752 6753 hwc->state = !(flags & PERF_EF_START); 6754 6755 head = find_swevent_head(swhash, event); 6756 if (!head) { 6757 /* 6758 * We can race with cpu hotplug code. Do not 6759 * WARN if the cpu just got unplugged. 6760 */ 6761 WARN_ON_ONCE(swhash->online); 6762 return -EINVAL; 6763 } 6764 6765 hlist_add_head_rcu(&event->hlist_entry, head); 6766 perf_event_update_userpage(event); 6767 6768 return 0; 6769 } 6770 6771 static void perf_swevent_del(struct perf_event *event, int flags) 6772 { 6773 hlist_del_rcu(&event->hlist_entry); 6774 } 6775 6776 static void perf_swevent_start(struct perf_event *event, int flags) 6777 { 6778 event->hw.state = 0; 6779 } 6780 6781 static void perf_swevent_stop(struct perf_event *event, int flags) 6782 { 6783 event->hw.state = PERF_HES_STOPPED; 6784 } 6785 6786 /* Deref the hlist from the update side */ 6787 static inline struct swevent_hlist * 6788 swevent_hlist_deref(struct swevent_htable *swhash) 6789 { 6790 return rcu_dereference_protected(swhash->swevent_hlist, 6791 lockdep_is_held(&swhash->hlist_mutex)); 6792 } 6793 6794 static void swevent_hlist_release(struct swevent_htable *swhash) 6795 { 6796 struct swevent_hlist *hlist = swevent_hlist_deref(swhash); 6797 6798 if (!hlist) 6799 return; 6800 6801 RCU_INIT_POINTER(swhash->swevent_hlist, NULL); 6802 kfree_rcu(hlist, rcu_head); 6803 } 6804 6805 static void swevent_hlist_put_cpu(struct perf_event *event, int cpu) 6806 { 6807 struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu); 6808 6809 mutex_lock(&swhash->hlist_mutex); 6810 6811 if (!--swhash->hlist_refcount) 6812 swevent_hlist_release(swhash); 6813 6814 mutex_unlock(&swhash->hlist_mutex); 6815 } 6816 6817 static void swevent_hlist_put(struct perf_event *event) 6818 { 6819 int cpu; 6820 6821 for_each_possible_cpu(cpu) 6822 swevent_hlist_put_cpu(event, cpu); 6823 } 6824 6825 static int swevent_hlist_get_cpu(struct perf_event *event, int cpu) 6826 { 6827 struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu); 6828 int err = 0; 6829 6830 mutex_lock(&swhash->hlist_mutex); 6831 6832 if (!swevent_hlist_deref(swhash) && cpu_online(cpu)) { 6833 struct swevent_hlist *hlist; 6834 6835 hlist = kzalloc(sizeof(*hlist), GFP_KERNEL); 6836 if (!hlist) { 6837 err = -ENOMEM; 6838 goto exit; 6839 } 6840 rcu_assign_pointer(swhash->swevent_hlist, hlist); 6841 } 6842 swhash->hlist_refcount++; 6843 exit: 6844 mutex_unlock(&swhash->hlist_mutex); 6845 6846 return err; 6847 } 6848 6849 static int swevent_hlist_get(struct perf_event *event) 6850 { 6851 int err; 6852 int cpu, failed_cpu; 6853 6854 get_online_cpus(); 6855 for_each_possible_cpu(cpu) { 6856 err = swevent_hlist_get_cpu(event, cpu); 6857 if (err) { 6858 failed_cpu = cpu; 6859 goto fail; 6860 } 6861 } 6862 put_online_cpus(); 6863 6864 return 0; 6865 fail: 6866 for_each_possible_cpu(cpu) { 6867 if (cpu == failed_cpu) 6868 break; 6869 swevent_hlist_put_cpu(event, cpu); 6870 } 6871 6872 put_online_cpus(); 6873 return err; 6874 } 6875 6876 struct static_key perf_swevent_enabled[PERF_COUNT_SW_MAX]; 6877 6878 static void sw_perf_event_destroy(struct perf_event *event) 6879 { 6880 u64 event_id = event->attr.config; 6881 6882 WARN_ON(event->parent); 6883 6884 static_key_slow_dec(&perf_swevent_enabled[event_id]); 6885 swevent_hlist_put(event); 6886 } 6887 6888 static int perf_swevent_init(struct perf_event *event) 6889 { 6890 u64 event_id = event->attr.config; 6891 6892 if (event->attr.type != PERF_TYPE_SOFTWARE) 6893 return -ENOENT; 6894 6895 /* 6896 * no branch sampling for software events 6897 */ 6898 if (has_branch_stack(event)) 6899 return -EOPNOTSUPP; 6900 6901 switch (event_id) { 6902 case PERF_COUNT_SW_CPU_CLOCK: 6903 case PERF_COUNT_SW_TASK_CLOCK: 6904 return -ENOENT; 6905 6906 default: 6907 break; 6908 } 6909 6910 if (event_id >= PERF_COUNT_SW_MAX) 6911 return -ENOENT; 6912 6913 if (!event->parent) { 6914 int err; 6915 6916 err = swevent_hlist_get(event); 6917 if (err) 6918 return err; 6919 6920 static_key_slow_inc(&perf_swevent_enabled[event_id]); 6921 event->destroy = sw_perf_event_destroy; 6922 } 6923 6924 return 0; 6925 } 6926 6927 static struct pmu perf_swevent = { 6928 .task_ctx_nr = perf_sw_context, 6929 6930 .capabilities = PERF_PMU_CAP_NO_NMI, 6931 6932 .event_init = perf_swevent_init, 6933 .add = perf_swevent_add, 6934 .del = perf_swevent_del, 6935 .start = perf_swevent_start, 6936 .stop = perf_swevent_stop, 6937 .read = perf_swevent_read, 6938 }; 6939 6940 #ifdef CONFIG_EVENT_TRACING 6941 6942 static int perf_tp_filter_match(struct perf_event *event, 6943 struct perf_sample_data *data) 6944 { 6945 void *record = data->raw->data; 6946 6947 /* only top level events have filters set */ 6948 if (event->parent) 6949 event = event->parent; 6950 6951 if (likely(!event->filter) || filter_match_preds(event->filter, record)) 6952 return 1; 6953 return 0; 6954 } 6955 6956 static int perf_tp_event_match(struct perf_event *event, 6957 struct perf_sample_data *data, 6958 struct pt_regs *regs) 6959 { 6960 if (event->hw.state & PERF_HES_STOPPED) 6961 return 0; 6962 /* 6963 * All tracepoints are from kernel-space. 6964 */ 6965 if (event->attr.exclude_kernel) 6966 return 0; 6967 6968 if (!perf_tp_filter_match(event, data)) 6969 return 0; 6970 6971 return 1; 6972 } 6973 6974 void perf_tp_event(u64 addr, u64 count, void *record, int entry_size, 6975 struct pt_regs *regs, struct hlist_head *head, int rctx, 6976 struct task_struct *task) 6977 { 6978 struct perf_sample_data data; 6979 struct perf_event *event; 6980 6981 struct perf_raw_record raw = { 6982 .size = entry_size, 6983 .data = record, 6984 }; 6985 6986 perf_sample_data_init(&data, addr, 0); 6987 data.raw = &raw; 6988 6989 hlist_for_each_entry_rcu(event, head, hlist_entry) { 6990 if (perf_tp_event_match(event, &data, regs)) 6991 perf_swevent_event(event, count, &data, regs); 6992 } 6993 6994 /* 6995 * If we got specified a target task, also iterate its context and 6996 * deliver this event there too. 6997 */ 6998 if (task && task != current) { 6999 struct perf_event_context *ctx; 7000 struct trace_entry *entry = record; 7001 7002 rcu_read_lock(); 7003 ctx = rcu_dereference(task->perf_event_ctxp[perf_sw_context]); 7004 if (!ctx) 7005 goto unlock; 7006 7007 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) { 7008 if (event->attr.type != PERF_TYPE_TRACEPOINT) 7009 continue; 7010 if (event->attr.config != entry->type) 7011 continue; 7012 if (perf_tp_event_match(event, &data, regs)) 7013 perf_swevent_event(event, count, &data, regs); 7014 } 7015 unlock: 7016 rcu_read_unlock(); 7017 } 7018 7019 perf_swevent_put_recursion_context(rctx); 7020 } 7021 EXPORT_SYMBOL_GPL(perf_tp_event); 7022 7023 static void tp_perf_event_destroy(struct perf_event *event) 7024 { 7025 perf_trace_destroy(event); 7026 } 7027 7028 static int perf_tp_event_init(struct perf_event *event) 7029 { 7030 int err; 7031 7032 if (event->attr.type != PERF_TYPE_TRACEPOINT) 7033 return -ENOENT; 7034 7035 /* 7036 * no branch sampling for tracepoint events 7037 */ 7038 if (has_branch_stack(event)) 7039 return -EOPNOTSUPP; 7040 7041 err = perf_trace_init(event); 7042 if (err) 7043 return err; 7044 7045 event->destroy = tp_perf_event_destroy; 7046 7047 return 0; 7048 } 7049 7050 static struct pmu perf_tracepoint = { 7051 .task_ctx_nr = perf_sw_context, 7052 7053 .event_init = perf_tp_event_init, 7054 .add = perf_trace_add, 7055 .del = perf_trace_del, 7056 .start = perf_swevent_start, 7057 .stop = perf_swevent_stop, 7058 .read = perf_swevent_read, 7059 }; 7060 7061 static inline void perf_tp_register(void) 7062 { 7063 perf_pmu_register(&perf_tracepoint, "tracepoint", PERF_TYPE_TRACEPOINT); 7064 } 7065 7066 static int perf_event_set_filter(struct perf_event *event, void __user *arg) 7067 { 7068 char *filter_str; 7069 int ret; 7070 7071 if (event->attr.type != PERF_TYPE_TRACEPOINT) 7072 return -EINVAL; 7073 7074 filter_str = strndup_user(arg, PAGE_SIZE); 7075 if (IS_ERR(filter_str)) 7076 return PTR_ERR(filter_str); 7077 7078 ret = ftrace_profile_set_filter(event, event->attr.config, filter_str); 7079 7080 kfree(filter_str); 7081 return ret; 7082 } 7083 7084 static void perf_event_free_filter(struct perf_event *event) 7085 { 7086 ftrace_profile_free_filter(event); 7087 } 7088 7089 static int perf_event_set_bpf_prog(struct perf_event *event, u32 prog_fd) 7090 { 7091 struct bpf_prog *prog; 7092 7093 if (event->attr.type != PERF_TYPE_TRACEPOINT) 7094 return -EINVAL; 7095 7096 if (event->tp_event->prog) 7097 return -EEXIST; 7098 7099 if (!(event->tp_event->flags & TRACE_EVENT_FL_UKPROBE)) 7100 /* bpf programs can only be attached to u/kprobes */ 7101 return -EINVAL; 7102 7103 prog = bpf_prog_get(prog_fd); 7104 if (IS_ERR(prog)) 7105 return PTR_ERR(prog); 7106 7107 if (prog->type != BPF_PROG_TYPE_KPROBE) { 7108 /* valid fd, but invalid bpf program type */ 7109 bpf_prog_put(prog); 7110 return -EINVAL; 7111 } 7112 7113 event->tp_event->prog = prog; 7114 7115 return 0; 7116 } 7117 7118 static void perf_event_free_bpf_prog(struct perf_event *event) 7119 { 7120 struct bpf_prog *prog; 7121 7122 if (!event->tp_event) 7123 return; 7124 7125 prog = event->tp_event->prog; 7126 if (prog) { 7127 event->tp_event->prog = NULL; 7128 bpf_prog_put(prog); 7129 } 7130 } 7131 7132 #else 7133 7134 static inline void perf_tp_register(void) 7135 { 7136 } 7137 7138 static int perf_event_set_filter(struct perf_event *event, void __user *arg) 7139 { 7140 return -ENOENT; 7141 } 7142 7143 static void perf_event_free_filter(struct perf_event *event) 7144 { 7145 } 7146 7147 static int perf_event_set_bpf_prog(struct perf_event *event, u32 prog_fd) 7148 { 7149 return -ENOENT; 7150 } 7151 7152 static void perf_event_free_bpf_prog(struct perf_event *event) 7153 { 7154 } 7155 #endif /* CONFIG_EVENT_TRACING */ 7156 7157 #ifdef CONFIG_HAVE_HW_BREAKPOINT 7158 void perf_bp_event(struct perf_event *bp, void *data) 7159 { 7160 struct perf_sample_data sample; 7161 struct pt_regs *regs = data; 7162 7163 perf_sample_data_init(&sample, bp->attr.bp_addr, 0); 7164 7165 if (!bp->hw.state && !perf_exclude_event(bp, regs)) 7166 perf_swevent_event(bp, 1, &sample, regs); 7167 } 7168 #endif 7169 7170 /* 7171 * hrtimer based swevent callback 7172 */ 7173 7174 static enum hrtimer_restart perf_swevent_hrtimer(struct hrtimer *hrtimer) 7175 { 7176 enum hrtimer_restart ret = HRTIMER_RESTART; 7177 struct perf_sample_data data; 7178 struct pt_regs *regs; 7179 struct perf_event *event; 7180 u64 period; 7181 7182 event = container_of(hrtimer, struct perf_event, hw.hrtimer); 7183 7184 if (event->state != PERF_EVENT_STATE_ACTIVE) 7185 return HRTIMER_NORESTART; 7186 7187 event->pmu->read(event); 7188 7189 perf_sample_data_init(&data, 0, event->hw.last_period); 7190 regs = get_irq_regs(); 7191 7192 if (regs && !perf_exclude_event(event, regs)) { 7193 if (!(event->attr.exclude_idle && is_idle_task(current))) 7194 if (__perf_event_overflow(event, 1, &data, regs)) 7195 ret = HRTIMER_NORESTART; 7196 } 7197 7198 period = max_t(u64, 10000, event->hw.sample_period); 7199 hrtimer_forward_now(hrtimer, ns_to_ktime(period)); 7200 7201 return ret; 7202 } 7203 7204 static void perf_swevent_start_hrtimer(struct perf_event *event) 7205 { 7206 struct hw_perf_event *hwc = &event->hw; 7207 s64 period; 7208 7209 if (!is_sampling_event(event)) 7210 return; 7211 7212 period = local64_read(&hwc->period_left); 7213 if (period) { 7214 if (period < 0) 7215 period = 10000; 7216 7217 local64_set(&hwc->period_left, 0); 7218 } else { 7219 period = max_t(u64, 10000, hwc->sample_period); 7220 } 7221 hrtimer_start(&hwc->hrtimer, ns_to_ktime(period), 7222 HRTIMER_MODE_REL_PINNED); 7223 } 7224 7225 static void perf_swevent_cancel_hrtimer(struct perf_event *event) 7226 { 7227 struct hw_perf_event *hwc = &event->hw; 7228 7229 if (is_sampling_event(event)) { 7230 ktime_t remaining = hrtimer_get_remaining(&hwc->hrtimer); 7231 local64_set(&hwc->period_left, ktime_to_ns(remaining)); 7232 7233 hrtimer_cancel(&hwc->hrtimer); 7234 } 7235 } 7236 7237 static void perf_swevent_init_hrtimer(struct perf_event *event) 7238 { 7239 struct hw_perf_event *hwc = &event->hw; 7240 7241 if (!is_sampling_event(event)) 7242 return; 7243 7244 hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); 7245 hwc->hrtimer.function = perf_swevent_hrtimer; 7246 7247 /* 7248 * Since hrtimers have a fixed rate, we can do a static freq->period 7249 * mapping and avoid the whole period adjust feedback stuff. 7250 */ 7251 if (event->attr.freq) { 7252 long freq = event->attr.sample_freq; 7253 7254 event->attr.sample_period = NSEC_PER_SEC / freq; 7255 hwc->sample_period = event->attr.sample_period; 7256 local64_set(&hwc->period_left, hwc->sample_period); 7257 hwc->last_period = hwc->sample_period; 7258 event->attr.freq = 0; 7259 } 7260 } 7261 7262 /* 7263 * Software event: cpu wall time clock 7264 */ 7265 7266 static void cpu_clock_event_update(struct perf_event *event) 7267 { 7268 s64 prev; 7269 u64 now; 7270 7271 now = local_clock(); 7272 prev = local64_xchg(&event->hw.prev_count, now); 7273 local64_add(now - prev, &event->count); 7274 } 7275 7276 static void cpu_clock_event_start(struct perf_event *event, int flags) 7277 { 7278 local64_set(&event->hw.prev_count, local_clock()); 7279 perf_swevent_start_hrtimer(event); 7280 } 7281 7282 static void cpu_clock_event_stop(struct perf_event *event, int flags) 7283 { 7284 perf_swevent_cancel_hrtimer(event); 7285 cpu_clock_event_update(event); 7286 } 7287 7288 static int cpu_clock_event_add(struct perf_event *event, int flags) 7289 { 7290 if (flags & PERF_EF_START) 7291 cpu_clock_event_start(event, flags); 7292 perf_event_update_userpage(event); 7293 7294 return 0; 7295 } 7296 7297 static void cpu_clock_event_del(struct perf_event *event, int flags) 7298 { 7299 cpu_clock_event_stop(event, flags); 7300 } 7301 7302 static void cpu_clock_event_read(struct perf_event *event) 7303 { 7304 cpu_clock_event_update(event); 7305 } 7306 7307 static int cpu_clock_event_init(struct perf_event *event) 7308 { 7309 if (event->attr.type != PERF_TYPE_SOFTWARE) 7310 return -ENOENT; 7311 7312 if (event->attr.config != PERF_COUNT_SW_CPU_CLOCK) 7313 return -ENOENT; 7314 7315 /* 7316 * no branch sampling for software events 7317 */ 7318 if (has_branch_stack(event)) 7319 return -EOPNOTSUPP; 7320 7321 perf_swevent_init_hrtimer(event); 7322 7323 return 0; 7324 } 7325 7326 static struct pmu perf_cpu_clock = { 7327 .task_ctx_nr = perf_sw_context, 7328 7329 .capabilities = PERF_PMU_CAP_NO_NMI, 7330 7331 .event_init = cpu_clock_event_init, 7332 .add = cpu_clock_event_add, 7333 .del = cpu_clock_event_del, 7334 .start = cpu_clock_event_start, 7335 .stop = cpu_clock_event_stop, 7336 .read = cpu_clock_event_read, 7337 }; 7338 7339 /* 7340 * Software event: task time clock 7341 */ 7342 7343 static void task_clock_event_update(struct perf_event *event, u64 now) 7344 { 7345 u64 prev; 7346 s64 delta; 7347 7348 prev = local64_xchg(&event->hw.prev_count, now); 7349 delta = now - prev; 7350 local64_add(delta, &event->count); 7351 } 7352 7353 static void task_clock_event_start(struct perf_event *event, int flags) 7354 { 7355 local64_set(&event->hw.prev_count, event->ctx->time); 7356 perf_swevent_start_hrtimer(event); 7357 } 7358 7359 static void task_clock_event_stop(struct perf_event *event, int flags) 7360 { 7361 perf_swevent_cancel_hrtimer(event); 7362 task_clock_event_update(event, event->ctx->time); 7363 } 7364 7365 static int task_clock_event_add(struct perf_event *event, int flags) 7366 { 7367 if (flags & PERF_EF_START) 7368 task_clock_event_start(event, flags); 7369 perf_event_update_userpage(event); 7370 7371 return 0; 7372 } 7373 7374 static void task_clock_event_del(struct perf_event *event, int flags) 7375 { 7376 task_clock_event_stop(event, PERF_EF_UPDATE); 7377 } 7378 7379 static void task_clock_event_read(struct perf_event *event) 7380 { 7381 u64 now = perf_clock(); 7382 u64 delta = now - event->ctx->timestamp; 7383 u64 time = event->ctx->time + delta; 7384 7385 task_clock_event_update(event, time); 7386 } 7387 7388 static int task_clock_event_init(struct perf_event *event) 7389 { 7390 if (event->attr.type != PERF_TYPE_SOFTWARE) 7391 return -ENOENT; 7392 7393 if (event->attr.config != PERF_COUNT_SW_TASK_CLOCK) 7394 return -ENOENT; 7395 7396 /* 7397 * no branch sampling for software events 7398 */ 7399 if (has_branch_stack(event)) 7400 return -EOPNOTSUPP; 7401 7402 perf_swevent_init_hrtimer(event); 7403 7404 return 0; 7405 } 7406 7407 static struct pmu perf_task_clock = { 7408 .task_ctx_nr = perf_sw_context, 7409 7410 .capabilities = PERF_PMU_CAP_NO_NMI, 7411 7412 .event_init = task_clock_event_init, 7413 .add = task_clock_event_add, 7414 .del = task_clock_event_del, 7415 .start = task_clock_event_start, 7416 .stop = task_clock_event_stop, 7417 .read = task_clock_event_read, 7418 }; 7419 7420 static void perf_pmu_nop_void(struct pmu *pmu) 7421 { 7422 } 7423 7424 static void perf_pmu_nop_txn(struct pmu *pmu, unsigned int flags) 7425 { 7426 } 7427 7428 static int perf_pmu_nop_int(struct pmu *pmu) 7429 { 7430 return 0; 7431 } 7432 7433 static DEFINE_PER_CPU(unsigned int, nop_txn_flags); 7434 7435 static void perf_pmu_start_txn(struct pmu *pmu, unsigned int flags) 7436 { 7437 __this_cpu_write(nop_txn_flags, flags); 7438 7439 if (flags & ~PERF_PMU_TXN_ADD) 7440 return; 7441 7442 perf_pmu_disable(pmu); 7443 } 7444 7445 static int perf_pmu_commit_txn(struct pmu *pmu) 7446 { 7447 unsigned int flags = __this_cpu_read(nop_txn_flags); 7448 7449 __this_cpu_write(nop_txn_flags, 0); 7450 7451 if (flags & ~PERF_PMU_TXN_ADD) 7452 return 0; 7453 7454 perf_pmu_enable(pmu); 7455 return 0; 7456 } 7457 7458 static void perf_pmu_cancel_txn(struct pmu *pmu) 7459 { 7460 unsigned int flags = __this_cpu_read(nop_txn_flags); 7461 7462 __this_cpu_write(nop_txn_flags, 0); 7463 7464 if (flags & ~PERF_PMU_TXN_ADD) 7465 return; 7466 7467 perf_pmu_enable(pmu); 7468 } 7469 7470 static int perf_event_idx_default(struct perf_event *event) 7471 { 7472 return 0; 7473 } 7474 7475 /* 7476 * Ensures all contexts with the same task_ctx_nr have the same 7477 * pmu_cpu_context too. 7478 */ 7479 static struct perf_cpu_context __percpu *find_pmu_context(int ctxn) 7480 { 7481 struct pmu *pmu; 7482 7483 if (ctxn < 0) 7484 return NULL; 7485 7486 list_for_each_entry(pmu, &pmus, entry) { 7487 if (pmu->task_ctx_nr == ctxn) 7488 return pmu->pmu_cpu_context; 7489 } 7490 7491 return NULL; 7492 } 7493 7494 static void update_pmu_context(struct pmu *pmu, struct pmu *old_pmu) 7495 { 7496 int cpu; 7497 7498 for_each_possible_cpu(cpu) { 7499 struct perf_cpu_context *cpuctx; 7500 7501 cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu); 7502 7503 if (cpuctx->unique_pmu == old_pmu) 7504 cpuctx->unique_pmu = pmu; 7505 } 7506 } 7507 7508 static void free_pmu_context(struct pmu *pmu) 7509 { 7510 struct pmu *i; 7511 7512 mutex_lock(&pmus_lock); 7513 /* 7514 * Like a real lame refcount. 7515 */ 7516 list_for_each_entry(i, &pmus, entry) { 7517 if (i->pmu_cpu_context == pmu->pmu_cpu_context) { 7518 update_pmu_context(i, pmu); 7519 goto out; 7520 } 7521 } 7522 7523 free_percpu(pmu->pmu_cpu_context); 7524 out: 7525 mutex_unlock(&pmus_lock); 7526 } 7527 static struct idr pmu_idr; 7528 7529 static ssize_t 7530 type_show(struct device *dev, struct device_attribute *attr, char *page) 7531 { 7532 struct pmu *pmu = dev_get_drvdata(dev); 7533 7534 return snprintf(page, PAGE_SIZE-1, "%d\n", pmu->type); 7535 } 7536 static DEVICE_ATTR_RO(type); 7537 7538 static ssize_t 7539 perf_event_mux_interval_ms_show(struct device *dev, 7540 struct device_attribute *attr, 7541 char *page) 7542 { 7543 struct pmu *pmu = dev_get_drvdata(dev); 7544 7545 return snprintf(page, PAGE_SIZE-1, "%d\n", pmu->hrtimer_interval_ms); 7546 } 7547 7548 static DEFINE_MUTEX(mux_interval_mutex); 7549 7550 static ssize_t 7551 perf_event_mux_interval_ms_store(struct device *dev, 7552 struct device_attribute *attr, 7553 const char *buf, size_t count) 7554 { 7555 struct pmu *pmu = dev_get_drvdata(dev); 7556 int timer, cpu, ret; 7557 7558 ret = kstrtoint(buf, 0, &timer); 7559 if (ret) 7560 return ret; 7561 7562 if (timer < 1) 7563 return -EINVAL; 7564 7565 /* same value, noting to do */ 7566 if (timer == pmu->hrtimer_interval_ms) 7567 return count; 7568 7569 mutex_lock(&mux_interval_mutex); 7570 pmu->hrtimer_interval_ms = timer; 7571 7572 /* update all cpuctx for this PMU */ 7573 get_online_cpus(); 7574 for_each_online_cpu(cpu) { 7575 struct perf_cpu_context *cpuctx; 7576 cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu); 7577 cpuctx->hrtimer_interval = ns_to_ktime(NSEC_PER_MSEC * timer); 7578 7579 cpu_function_call(cpu, 7580 (remote_function_f)perf_mux_hrtimer_restart, cpuctx); 7581 } 7582 put_online_cpus(); 7583 mutex_unlock(&mux_interval_mutex); 7584 7585 return count; 7586 } 7587 static DEVICE_ATTR_RW(perf_event_mux_interval_ms); 7588 7589 static struct attribute *pmu_dev_attrs[] = { 7590 &dev_attr_type.attr, 7591 &dev_attr_perf_event_mux_interval_ms.attr, 7592 NULL, 7593 }; 7594 ATTRIBUTE_GROUPS(pmu_dev); 7595 7596 static int pmu_bus_running; 7597 static struct bus_type pmu_bus = { 7598 .name = "event_source", 7599 .dev_groups = pmu_dev_groups, 7600 }; 7601 7602 static void pmu_dev_release(struct device *dev) 7603 { 7604 kfree(dev); 7605 } 7606 7607 static int pmu_dev_alloc(struct pmu *pmu) 7608 { 7609 int ret = -ENOMEM; 7610 7611 pmu->dev = kzalloc(sizeof(struct device), GFP_KERNEL); 7612 if (!pmu->dev) 7613 goto out; 7614 7615 pmu->dev->groups = pmu->attr_groups; 7616 device_initialize(pmu->dev); 7617 ret = dev_set_name(pmu->dev, "%s", pmu->name); 7618 if (ret) 7619 goto free_dev; 7620 7621 dev_set_drvdata(pmu->dev, pmu); 7622 pmu->dev->bus = &pmu_bus; 7623 pmu->dev->release = pmu_dev_release; 7624 ret = device_add(pmu->dev); 7625 if (ret) 7626 goto free_dev; 7627 7628 out: 7629 return ret; 7630 7631 free_dev: 7632 put_device(pmu->dev); 7633 goto out; 7634 } 7635 7636 static struct lock_class_key cpuctx_mutex; 7637 static struct lock_class_key cpuctx_lock; 7638 7639 int perf_pmu_register(struct pmu *pmu, const char *name, int type) 7640 { 7641 int cpu, ret; 7642 7643 mutex_lock(&pmus_lock); 7644 ret = -ENOMEM; 7645 pmu->pmu_disable_count = alloc_percpu(int); 7646 if (!pmu->pmu_disable_count) 7647 goto unlock; 7648 7649 pmu->type = -1; 7650 if (!name) 7651 goto skip_type; 7652 pmu->name = name; 7653 7654 if (type < 0) { 7655 type = idr_alloc(&pmu_idr, pmu, PERF_TYPE_MAX, 0, GFP_KERNEL); 7656 if (type < 0) { 7657 ret = type; 7658 goto free_pdc; 7659 } 7660 } 7661 pmu->type = type; 7662 7663 if (pmu_bus_running) { 7664 ret = pmu_dev_alloc(pmu); 7665 if (ret) 7666 goto free_idr; 7667 } 7668 7669 skip_type: 7670 pmu->pmu_cpu_context = find_pmu_context(pmu->task_ctx_nr); 7671 if (pmu->pmu_cpu_context) 7672 goto got_cpu_context; 7673 7674 ret = -ENOMEM; 7675 pmu->pmu_cpu_context = alloc_percpu(struct perf_cpu_context); 7676 if (!pmu->pmu_cpu_context) 7677 goto free_dev; 7678 7679 for_each_possible_cpu(cpu) { 7680 struct perf_cpu_context *cpuctx; 7681 7682 cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu); 7683 __perf_event_init_context(&cpuctx->ctx); 7684 lockdep_set_class(&cpuctx->ctx.mutex, &cpuctx_mutex); 7685 lockdep_set_class(&cpuctx->ctx.lock, &cpuctx_lock); 7686 cpuctx->ctx.pmu = pmu; 7687 7688 __perf_mux_hrtimer_init(cpuctx, cpu); 7689 7690 cpuctx->unique_pmu = pmu; 7691 } 7692 7693 got_cpu_context: 7694 if (!pmu->start_txn) { 7695 if (pmu->pmu_enable) { 7696 /* 7697 * If we have pmu_enable/pmu_disable calls, install 7698 * transaction stubs that use that to try and batch 7699 * hardware accesses. 7700 */ 7701 pmu->start_txn = perf_pmu_start_txn; 7702 pmu->commit_txn = perf_pmu_commit_txn; 7703 pmu->cancel_txn = perf_pmu_cancel_txn; 7704 } else { 7705 pmu->start_txn = perf_pmu_nop_txn; 7706 pmu->commit_txn = perf_pmu_nop_int; 7707 pmu->cancel_txn = perf_pmu_nop_void; 7708 } 7709 } 7710 7711 if (!pmu->pmu_enable) { 7712 pmu->pmu_enable = perf_pmu_nop_void; 7713 pmu->pmu_disable = perf_pmu_nop_void; 7714 } 7715 7716 if (!pmu->event_idx) 7717 pmu->event_idx = perf_event_idx_default; 7718 7719 list_add_rcu(&pmu->entry, &pmus); 7720 atomic_set(&pmu->exclusive_cnt, 0); 7721 ret = 0; 7722 unlock: 7723 mutex_unlock(&pmus_lock); 7724 7725 return ret; 7726 7727 free_dev: 7728 device_del(pmu->dev); 7729 put_device(pmu->dev); 7730 7731 free_idr: 7732 if (pmu->type >= PERF_TYPE_MAX) 7733 idr_remove(&pmu_idr, pmu->type); 7734 7735 free_pdc: 7736 free_percpu(pmu->pmu_disable_count); 7737 goto unlock; 7738 } 7739 EXPORT_SYMBOL_GPL(perf_pmu_register); 7740 7741 void perf_pmu_unregister(struct pmu *pmu) 7742 { 7743 mutex_lock(&pmus_lock); 7744 list_del_rcu(&pmu->entry); 7745 mutex_unlock(&pmus_lock); 7746 7747 /* 7748 * We dereference the pmu list under both SRCU and regular RCU, so 7749 * synchronize against both of those. 7750 */ 7751 synchronize_srcu(&pmus_srcu); 7752 synchronize_rcu(); 7753 7754 free_percpu(pmu->pmu_disable_count); 7755 if (pmu->type >= PERF_TYPE_MAX) 7756 idr_remove(&pmu_idr, pmu->type); 7757 device_del(pmu->dev); 7758 put_device(pmu->dev); 7759 free_pmu_context(pmu); 7760 } 7761 EXPORT_SYMBOL_GPL(perf_pmu_unregister); 7762 7763 static int perf_try_init_event(struct pmu *pmu, struct perf_event *event) 7764 { 7765 struct perf_event_context *ctx = NULL; 7766 int ret; 7767 7768 if (!try_module_get(pmu->module)) 7769 return -ENODEV; 7770 7771 if (event->group_leader != event) { 7772 /* 7773 * This ctx->mutex can nest when we're called through 7774 * inheritance. See the perf_event_ctx_lock_nested() comment. 7775 */ 7776 ctx = perf_event_ctx_lock_nested(event->group_leader, 7777 SINGLE_DEPTH_NESTING); 7778 BUG_ON(!ctx); 7779 } 7780 7781 event->pmu = pmu; 7782 ret = pmu->event_init(event); 7783 7784 if (ctx) 7785 perf_event_ctx_unlock(event->group_leader, ctx); 7786 7787 if (ret) 7788 module_put(pmu->module); 7789 7790 return ret; 7791 } 7792 7793 static struct pmu *perf_init_event(struct perf_event *event) 7794 { 7795 struct pmu *pmu = NULL; 7796 int idx; 7797 int ret; 7798 7799 idx = srcu_read_lock(&pmus_srcu); 7800 7801 rcu_read_lock(); 7802 pmu = idr_find(&pmu_idr, event->attr.type); 7803 rcu_read_unlock(); 7804 if (pmu) { 7805 ret = perf_try_init_event(pmu, event); 7806 if (ret) 7807 pmu = ERR_PTR(ret); 7808 goto unlock; 7809 } 7810 7811 list_for_each_entry_rcu(pmu, &pmus, entry) { 7812 ret = perf_try_init_event(pmu, event); 7813 if (!ret) 7814 goto unlock; 7815 7816 if (ret != -ENOENT) { 7817 pmu = ERR_PTR(ret); 7818 goto unlock; 7819 } 7820 } 7821 pmu = ERR_PTR(-ENOENT); 7822 unlock: 7823 srcu_read_unlock(&pmus_srcu, idx); 7824 7825 return pmu; 7826 } 7827 7828 static void account_event_cpu(struct perf_event *event, int cpu) 7829 { 7830 if (event->parent) 7831 return; 7832 7833 if (is_cgroup_event(event)) 7834 atomic_inc(&per_cpu(perf_cgroup_events, cpu)); 7835 } 7836 7837 static void account_event(struct perf_event *event) 7838 { 7839 if (event->parent) 7840 return; 7841 7842 if (event->attach_state & PERF_ATTACH_TASK) 7843 static_key_slow_inc(&perf_sched_events.key); 7844 if (event->attr.mmap || event->attr.mmap_data) 7845 atomic_inc(&nr_mmap_events); 7846 if (event->attr.comm) 7847 atomic_inc(&nr_comm_events); 7848 if (event->attr.task) 7849 atomic_inc(&nr_task_events); 7850 if (event->attr.freq) { 7851 if (atomic_inc_return(&nr_freq_events) == 1) 7852 tick_nohz_full_kick_all(); 7853 } 7854 if (event->attr.context_switch) { 7855 atomic_inc(&nr_switch_events); 7856 static_key_slow_inc(&perf_sched_events.key); 7857 } 7858 if (has_branch_stack(event)) 7859 static_key_slow_inc(&perf_sched_events.key); 7860 if (is_cgroup_event(event)) 7861 static_key_slow_inc(&perf_sched_events.key); 7862 7863 account_event_cpu(event, event->cpu); 7864 } 7865 7866 /* 7867 * Allocate and initialize a event structure 7868 */ 7869 static struct perf_event * 7870 perf_event_alloc(struct perf_event_attr *attr, int cpu, 7871 struct task_struct *task, 7872 struct perf_event *group_leader, 7873 struct perf_event *parent_event, 7874 perf_overflow_handler_t overflow_handler, 7875 void *context, int cgroup_fd) 7876 { 7877 struct pmu *pmu; 7878 struct perf_event *event; 7879 struct hw_perf_event *hwc; 7880 long err = -EINVAL; 7881 7882 if ((unsigned)cpu >= nr_cpu_ids) { 7883 if (!task || cpu != -1) 7884 return ERR_PTR(-EINVAL); 7885 } 7886 7887 event = kzalloc(sizeof(*event), GFP_KERNEL); 7888 if (!event) 7889 return ERR_PTR(-ENOMEM); 7890 7891 /* 7892 * Single events are their own group leaders, with an 7893 * empty sibling list: 7894 */ 7895 if (!group_leader) 7896 group_leader = event; 7897 7898 mutex_init(&event->child_mutex); 7899 INIT_LIST_HEAD(&event->child_list); 7900 7901 INIT_LIST_HEAD(&event->group_entry); 7902 INIT_LIST_HEAD(&event->event_entry); 7903 INIT_LIST_HEAD(&event->sibling_list); 7904 INIT_LIST_HEAD(&event->rb_entry); 7905 INIT_LIST_HEAD(&event->active_entry); 7906 INIT_HLIST_NODE(&event->hlist_entry); 7907 7908 7909 init_waitqueue_head(&event->waitq); 7910 init_irq_work(&event->pending, perf_pending_event); 7911 7912 mutex_init(&event->mmap_mutex); 7913 7914 atomic_long_set(&event->refcount, 1); 7915 event->cpu = cpu; 7916 event->attr = *attr; 7917 event->group_leader = group_leader; 7918 event->pmu = NULL; 7919 event->oncpu = -1; 7920 7921 event->parent = parent_event; 7922 7923 event->ns = get_pid_ns(task_active_pid_ns(current)); 7924 event->id = atomic64_inc_return(&perf_event_id); 7925 7926 event->state = PERF_EVENT_STATE_INACTIVE; 7927 7928 if (task) { 7929 event->attach_state = PERF_ATTACH_TASK; 7930 /* 7931 * XXX pmu::event_init needs to know what task to account to 7932 * and we cannot use the ctx information because we need the 7933 * pmu before we get a ctx. 7934 */ 7935 event->hw.target = task; 7936 } 7937 7938 event->clock = &local_clock; 7939 if (parent_event) 7940 event->clock = parent_event->clock; 7941 7942 if (!overflow_handler && parent_event) { 7943 overflow_handler = parent_event->overflow_handler; 7944 context = parent_event->overflow_handler_context; 7945 } 7946 7947 event->overflow_handler = overflow_handler; 7948 event->overflow_handler_context = context; 7949 7950 perf_event__state_init(event); 7951 7952 pmu = NULL; 7953 7954 hwc = &event->hw; 7955 hwc->sample_period = attr->sample_period; 7956 if (attr->freq && attr->sample_freq) 7957 hwc->sample_period = 1; 7958 hwc->last_period = hwc->sample_period; 7959 7960 local64_set(&hwc->period_left, hwc->sample_period); 7961 7962 /* 7963 * we currently do not support PERF_FORMAT_GROUP on inherited events 7964 */ 7965 if (attr->inherit && (attr->read_format & PERF_FORMAT_GROUP)) 7966 goto err_ns; 7967 7968 if (!has_branch_stack(event)) 7969 event->attr.branch_sample_type = 0; 7970 7971 if (cgroup_fd != -1) { 7972 err = perf_cgroup_connect(cgroup_fd, event, attr, group_leader); 7973 if (err) 7974 goto err_ns; 7975 } 7976 7977 pmu = perf_init_event(event); 7978 if (!pmu) 7979 goto err_ns; 7980 else if (IS_ERR(pmu)) { 7981 err = PTR_ERR(pmu); 7982 goto err_ns; 7983 } 7984 7985 err = exclusive_event_init(event); 7986 if (err) 7987 goto err_pmu; 7988 7989 if (!event->parent) { 7990 if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN) { 7991 err = get_callchain_buffers(); 7992 if (err) 7993 goto err_per_task; 7994 } 7995 } 7996 7997 return event; 7998 7999 err_per_task: 8000 exclusive_event_destroy(event); 8001 8002 err_pmu: 8003 if (event->destroy) 8004 event->destroy(event); 8005 module_put(pmu->module); 8006 err_ns: 8007 if (is_cgroup_event(event)) 8008 perf_detach_cgroup(event); 8009 if (event->ns) 8010 put_pid_ns(event->ns); 8011 kfree(event); 8012 8013 return ERR_PTR(err); 8014 } 8015 8016 static int perf_copy_attr(struct perf_event_attr __user *uattr, 8017 struct perf_event_attr *attr) 8018 { 8019 u32 size; 8020 int ret; 8021 8022 if (!access_ok(VERIFY_WRITE, uattr, PERF_ATTR_SIZE_VER0)) 8023 return -EFAULT; 8024 8025 /* 8026 * zero the full structure, so that a short copy will be nice. 8027 */ 8028 memset(attr, 0, sizeof(*attr)); 8029 8030 ret = get_user(size, &uattr->size); 8031 if (ret) 8032 return ret; 8033 8034 if (size > PAGE_SIZE) /* silly large */ 8035 goto err_size; 8036 8037 if (!size) /* abi compat */ 8038 size = PERF_ATTR_SIZE_VER0; 8039 8040 if (size < PERF_ATTR_SIZE_VER0) 8041 goto err_size; 8042 8043 /* 8044 * If we're handed a bigger struct than we know of, 8045 * ensure all the unknown bits are 0 - i.e. new 8046 * user-space does not rely on any kernel feature 8047 * extensions we dont know about yet. 8048 */ 8049 if (size > sizeof(*attr)) { 8050 unsigned char __user *addr; 8051 unsigned char __user *end; 8052 unsigned char val; 8053 8054 addr = (void __user *)uattr + sizeof(*attr); 8055 end = (void __user *)uattr + size; 8056 8057 for (; addr < end; addr++) { 8058 ret = get_user(val, addr); 8059 if (ret) 8060 return ret; 8061 if (val) 8062 goto err_size; 8063 } 8064 size = sizeof(*attr); 8065 } 8066 8067 ret = copy_from_user(attr, uattr, size); 8068 if (ret) 8069 return -EFAULT; 8070 8071 if (attr->__reserved_1) 8072 return -EINVAL; 8073 8074 if (attr->sample_type & ~(PERF_SAMPLE_MAX-1)) 8075 return -EINVAL; 8076 8077 if (attr->read_format & ~(PERF_FORMAT_MAX-1)) 8078 return -EINVAL; 8079 8080 if (attr->sample_type & PERF_SAMPLE_BRANCH_STACK) { 8081 u64 mask = attr->branch_sample_type; 8082 8083 /* only using defined bits */ 8084 if (mask & ~(PERF_SAMPLE_BRANCH_MAX-1)) 8085 return -EINVAL; 8086 8087 /* at least one branch bit must be set */ 8088 if (!(mask & ~PERF_SAMPLE_BRANCH_PLM_ALL)) 8089 return -EINVAL; 8090 8091 /* propagate priv level, when not set for branch */ 8092 if (!(mask & PERF_SAMPLE_BRANCH_PLM_ALL)) { 8093 8094 /* exclude_kernel checked on syscall entry */ 8095 if (!attr->exclude_kernel) 8096 mask |= PERF_SAMPLE_BRANCH_KERNEL; 8097 8098 if (!attr->exclude_user) 8099 mask |= PERF_SAMPLE_BRANCH_USER; 8100 8101 if (!attr->exclude_hv) 8102 mask |= PERF_SAMPLE_BRANCH_HV; 8103 /* 8104 * adjust user setting (for HW filter setup) 8105 */ 8106 attr->branch_sample_type = mask; 8107 } 8108 /* privileged levels capture (kernel, hv): check permissions */ 8109 if ((mask & PERF_SAMPLE_BRANCH_PERM_PLM) 8110 && perf_paranoid_kernel() && !capable(CAP_SYS_ADMIN)) 8111 return -EACCES; 8112 } 8113 8114 if (attr->sample_type & PERF_SAMPLE_REGS_USER) { 8115 ret = perf_reg_validate(attr->sample_regs_user); 8116 if (ret) 8117 return ret; 8118 } 8119 8120 if (attr->sample_type & PERF_SAMPLE_STACK_USER) { 8121 if (!arch_perf_have_user_stack_dump()) 8122 return -ENOSYS; 8123 8124 /* 8125 * We have __u32 type for the size, but so far 8126 * we can only use __u16 as maximum due to the 8127 * __u16 sample size limit. 8128 */ 8129 if (attr->sample_stack_user >= USHRT_MAX) 8130 ret = -EINVAL; 8131 else if (!IS_ALIGNED(attr->sample_stack_user, sizeof(u64))) 8132 ret = -EINVAL; 8133 } 8134 8135 if (attr->sample_type & PERF_SAMPLE_REGS_INTR) 8136 ret = perf_reg_validate(attr->sample_regs_intr); 8137 out: 8138 return ret; 8139 8140 err_size: 8141 put_user(sizeof(*attr), &uattr->size); 8142 ret = -E2BIG; 8143 goto out; 8144 } 8145 8146 static int 8147 perf_event_set_output(struct perf_event *event, struct perf_event *output_event) 8148 { 8149 struct ring_buffer *rb = NULL; 8150 int ret = -EINVAL; 8151 8152 if (!output_event) 8153 goto set; 8154 8155 /* don't allow circular references */ 8156 if (event == output_event) 8157 goto out; 8158 8159 /* 8160 * Don't allow cross-cpu buffers 8161 */ 8162 if (output_event->cpu != event->cpu) 8163 goto out; 8164 8165 /* 8166 * If its not a per-cpu rb, it must be the same task. 8167 */ 8168 if (output_event->cpu == -1 && output_event->ctx != event->ctx) 8169 goto out; 8170 8171 /* 8172 * Mixing clocks in the same buffer is trouble you don't need. 8173 */ 8174 if (output_event->clock != event->clock) 8175 goto out; 8176 8177 /* 8178 * If both events generate aux data, they must be on the same PMU 8179 */ 8180 if (has_aux(event) && has_aux(output_event) && 8181 event->pmu != output_event->pmu) 8182 goto out; 8183 8184 set: 8185 mutex_lock(&event->mmap_mutex); 8186 /* Can't redirect output if we've got an active mmap() */ 8187 if (atomic_read(&event->mmap_count)) 8188 goto unlock; 8189 8190 if (output_event) { 8191 /* get the rb we want to redirect to */ 8192 rb = ring_buffer_get(output_event); 8193 if (!rb) 8194 goto unlock; 8195 } 8196 8197 ring_buffer_attach(event, rb); 8198 8199 ret = 0; 8200 unlock: 8201 mutex_unlock(&event->mmap_mutex); 8202 8203 out: 8204 return ret; 8205 } 8206 8207 static void mutex_lock_double(struct mutex *a, struct mutex *b) 8208 { 8209 if (b < a) 8210 swap(a, b); 8211 8212 mutex_lock(a); 8213 mutex_lock_nested(b, SINGLE_DEPTH_NESTING); 8214 } 8215 8216 static int perf_event_set_clock(struct perf_event *event, clockid_t clk_id) 8217 { 8218 bool nmi_safe = false; 8219 8220 switch (clk_id) { 8221 case CLOCK_MONOTONIC: 8222 event->clock = &ktime_get_mono_fast_ns; 8223 nmi_safe = true; 8224 break; 8225 8226 case CLOCK_MONOTONIC_RAW: 8227 event->clock = &ktime_get_raw_fast_ns; 8228 nmi_safe = true; 8229 break; 8230 8231 case CLOCK_REALTIME: 8232 event->clock = &ktime_get_real_ns; 8233 break; 8234 8235 case CLOCK_BOOTTIME: 8236 event->clock = &ktime_get_boot_ns; 8237 break; 8238 8239 case CLOCK_TAI: 8240 event->clock = &ktime_get_tai_ns; 8241 break; 8242 8243 default: 8244 return -EINVAL; 8245 } 8246 8247 if (!nmi_safe && !(event->pmu->capabilities & PERF_PMU_CAP_NO_NMI)) 8248 return -EINVAL; 8249 8250 return 0; 8251 } 8252 8253 /** 8254 * sys_perf_event_open - open a performance event, associate it to a task/cpu 8255 * 8256 * @attr_uptr: event_id type attributes for monitoring/sampling 8257 * @pid: target pid 8258 * @cpu: target cpu 8259 * @group_fd: group leader event fd 8260 */ 8261 SYSCALL_DEFINE5(perf_event_open, 8262 struct perf_event_attr __user *, attr_uptr, 8263 pid_t, pid, int, cpu, int, group_fd, unsigned long, flags) 8264 { 8265 struct perf_event *group_leader = NULL, *output_event = NULL; 8266 struct perf_event *event, *sibling; 8267 struct perf_event_attr attr; 8268 struct perf_event_context *ctx, *uninitialized_var(gctx); 8269 struct file *event_file = NULL; 8270 struct fd group = {NULL, 0}; 8271 struct task_struct *task = NULL; 8272 struct pmu *pmu; 8273 int event_fd; 8274 int move_group = 0; 8275 int err; 8276 int f_flags = O_RDWR; 8277 int cgroup_fd = -1; 8278 8279 /* for future expandability... */ 8280 if (flags & ~PERF_FLAG_ALL) 8281 return -EINVAL; 8282 8283 err = perf_copy_attr(attr_uptr, &attr); 8284 if (err) 8285 return err; 8286 8287 if (!attr.exclude_kernel) { 8288 if (perf_paranoid_kernel() && !capable(CAP_SYS_ADMIN)) 8289 return -EACCES; 8290 } 8291 8292 if (attr.freq) { 8293 if (attr.sample_freq > sysctl_perf_event_sample_rate) 8294 return -EINVAL; 8295 } else { 8296 if (attr.sample_period & (1ULL << 63)) 8297 return -EINVAL; 8298 } 8299 8300 /* 8301 * In cgroup mode, the pid argument is used to pass the fd 8302 * opened to the cgroup directory in cgroupfs. The cpu argument 8303 * designates the cpu on which to monitor threads from that 8304 * cgroup. 8305 */ 8306 if ((flags & PERF_FLAG_PID_CGROUP) && (pid == -1 || cpu == -1)) 8307 return -EINVAL; 8308 8309 if (flags & PERF_FLAG_FD_CLOEXEC) 8310 f_flags |= O_CLOEXEC; 8311 8312 event_fd = get_unused_fd_flags(f_flags); 8313 if (event_fd < 0) 8314 return event_fd; 8315 8316 if (group_fd != -1) { 8317 err = perf_fget_light(group_fd, &group); 8318 if (err) 8319 goto err_fd; 8320 group_leader = group.file->private_data; 8321 if (flags & PERF_FLAG_FD_OUTPUT) 8322 output_event = group_leader; 8323 if (flags & PERF_FLAG_FD_NO_GROUP) 8324 group_leader = NULL; 8325 } 8326 8327 if (pid != -1 && !(flags & PERF_FLAG_PID_CGROUP)) { 8328 task = find_lively_task_by_vpid(pid); 8329 if (IS_ERR(task)) { 8330 err = PTR_ERR(task); 8331 goto err_group_fd; 8332 } 8333 } 8334 8335 if (task && group_leader && 8336 group_leader->attr.inherit != attr.inherit) { 8337 err = -EINVAL; 8338 goto err_task; 8339 } 8340 8341 get_online_cpus(); 8342 8343 if (flags & PERF_FLAG_PID_CGROUP) 8344 cgroup_fd = pid; 8345 8346 event = perf_event_alloc(&attr, cpu, task, group_leader, NULL, 8347 NULL, NULL, cgroup_fd); 8348 if (IS_ERR(event)) { 8349 err = PTR_ERR(event); 8350 goto err_cpus; 8351 } 8352 8353 if (is_sampling_event(event)) { 8354 if (event->pmu->capabilities & PERF_PMU_CAP_NO_INTERRUPT) { 8355 err = -ENOTSUPP; 8356 goto err_alloc; 8357 } 8358 } 8359 8360 account_event(event); 8361 8362 /* 8363 * Special case software events and allow them to be part of 8364 * any hardware group. 8365 */ 8366 pmu = event->pmu; 8367 8368 if (attr.use_clockid) { 8369 err = perf_event_set_clock(event, attr.clockid); 8370 if (err) 8371 goto err_alloc; 8372 } 8373 8374 if (group_leader && 8375 (is_software_event(event) != is_software_event(group_leader))) { 8376 if (is_software_event(event)) { 8377 /* 8378 * If event and group_leader are not both a software 8379 * event, and event is, then group leader is not. 8380 * 8381 * Allow the addition of software events to !software 8382 * groups, this is safe because software events never 8383 * fail to schedule. 8384 */ 8385 pmu = group_leader->pmu; 8386 } else if (is_software_event(group_leader) && 8387 (group_leader->group_flags & PERF_GROUP_SOFTWARE)) { 8388 /* 8389 * In case the group is a pure software group, and we 8390 * try to add a hardware event, move the whole group to 8391 * the hardware context. 8392 */ 8393 move_group = 1; 8394 } 8395 } 8396 8397 /* 8398 * Get the target context (task or percpu): 8399 */ 8400 ctx = find_get_context(pmu, task, event); 8401 if (IS_ERR(ctx)) { 8402 err = PTR_ERR(ctx); 8403 goto err_alloc; 8404 } 8405 8406 if ((pmu->capabilities & PERF_PMU_CAP_EXCLUSIVE) && group_leader) { 8407 err = -EBUSY; 8408 goto err_context; 8409 } 8410 8411 if (task) { 8412 put_task_struct(task); 8413 task = NULL; 8414 } 8415 8416 /* 8417 * Look up the group leader (we will attach this event to it): 8418 */ 8419 if (group_leader) { 8420 err = -EINVAL; 8421 8422 /* 8423 * Do not allow a recursive hierarchy (this new sibling 8424 * becoming part of another group-sibling): 8425 */ 8426 if (group_leader->group_leader != group_leader) 8427 goto err_context; 8428 8429 /* All events in a group should have the same clock */ 8430 if (group_leader->clock != event->clock) 8431 goto err_context; 8432 8433 /* 8434 * Do not allow to attach to a group in a different 8435 * task or CPU context: 8436 */ 8437 if (move_group) { 8438 /* 8439 * Make sure we're both on the same task, or both 8440 * per-cpu events. 8441 */ 8442 if (group_leader->ctx->task != ctx->task) 8443 goto err_context; 8444 8445 /* 8446 * Make sure we're both events for the same CPU; 8447 * grouping events for different CPUs is broken; since 8448 * you can never concurrently schedule them anyhow. 8449 */ 8450 if (group_leader->cpu != event->cpu) 8451 goto err_context; 8452 } else { 8453 if (group_leader->ctx != ctx) 8454 goto err_context; 8455 } 8456 8457 /* 8458 * Only a group leader can be exclusive or pinned 8459 */ 8460 if (attr.exclusive || attr.pinned) 8461 goto err_context; 8462 } 8463 8464 if (output_event) { 8465 err = perf_event_set_output(event, output_event); 8466 if (err) 8467 goto err_context; 8468 } 8469 8470 event_file = anon_inode_getfile("[perf_event]", &perf_fops, event, 8471 f_flags); 8472 if (IS_ERR(event_file)) { 8473 err = PTR_ERR(event_file); 8474 goto err_context; 8475 } 8476 8477 if (move_group) { 8478 gctx = group_leader->ctx; 8479 mutex_lock_double(&gctx->mutex, &ctx->mutex); 8480 } else { 8481 mutex_lock(&ctx->mutex); 8482 } 8483 8484 if (!perf_event_validate_size(event)) { 8485 err = -E2BIG; 8486 goto err_locked; 8487 } 8488 8489 /* 8490 * Must be under the same ctx::mutex as perf_install_in_context(), 8491 * because we need to serialize with concurrent event creation. 8492 */ 8493 if (!exclusive_event_installable(event, ctx)) { 8494 /* exclusive and group stuff are assumed mutually exclusive */ 8495 WARN_ON_ONCE(move_group); 8496 8497 err = -EBUSY; 8498 goto err_locked; 8499 } 8500 8501 WARN_ON_ONCE(ctx->parent_ctx); 8502 8503 if (move_group) { 8504 /* 8505 * See perf_event_ctx_lock() for comments on the details 8506 * of swizzling perf_event::ctx. 8507 */ 8508 perf_remove_from_context(group_leader, false); 8509 8510 list_for_each_entry(sibling, &group_leader->sibling_list, 8511 group_entry) { 8512 perf_remove_from_context(sibling, false); 8513 put_ctx(gctx); 8514 } 8515 8516 /* 8517 * Wait for everybody to stop referencing the events through 8518 * the old lists, before installing it on new lists. 8519 */ 8520 synchronize_rcu(); 8521 8522 /* 8523 * Install the group siblings before the group leader. 8524 * 8525 * Because a group leader will try and install the entire group 8526 * (through the sibling list, which is still in-tact), we can 8527 * end up with siblings installed in the wrong context. 8528 * 8529 * By installing siblings first we NO-OP because they're not 8530 * reachable through the group lists. 8531 */ 8532 list_for_each_entry(sibling, &group_leader->sibling_list, 8533 group_entry) { 8534 perf_event__state_init(sibling); 8535 perf_install_in_context(ctx, sibling, sibling->cpu); 8536 get_ctx(ctx); 8537 } 8538 8539 /* 8540 * Removing from the context ends up with disabled 8541 * event. What we want here is event in the initial 8542 * startup state, ready to be add into new context. 8543 */ 8544 perf_event__state_init(group_leader); 8545 perf_install_in_context(ctx, group_leader, group_leader->cpu); 8546 get_ctx(ctx); 8547 8548 /* 8549 * Now that all events are installed in @ctx, nothing 8550 * references @gctx anymore, so drop the last reference we have 8551 * on it. 8552 */ 8553 put_ctx(gctx); 8554 } 8555 8556 /* 8557 * Precalculate sample_data sizes; do while holding ctx::mutex such 8558 * that we're serialized against further additions and before 8559 * perf_install_in_context() which is the point the event is active and 8560 * can use these values. 8561 */ 8562 perf_event__header_size(event); 8563 perf_event__id_header_size(event); 8564 8565 perf_install_in_context(ctx, event, event->cpu); 8566 perf_unpin_context(ctx); 8567 8568 if (move_group) 8569 mutex_unlock(&gctx->mutex); 8570 mutex_unlock(&ctx->mutex); 8571 8572 put_online_cpus(); 8573 8574 event->owner = current; 8575 8576 mutex_lock(¤t->perf_event_mutex); 8577 list_add_tail(&event->owner_entry, ¤t->perf_event_list); 8578 mutex_unlock(¤t->perf_event_mutex); 8579 8580 /* 8581 * Drop the reference on the group_event after placing the 8582 * new event on the sibling_list. This ensures destruction 8583 * of the group leader will find the pointer to itself in 8584 * perf_group_detach(). 8585 */ 8586 fdput(group); 8587 fd_install(event_fd, event_file); 8588 return event_fd; 8589 8590 err_locked: 8591 if (move_group) 8592 mutex_unlock(&gctx->mutex); 8593 mutex_unlock(&ctx->mutex); 8594 /* err_file: */ 8595 fput(event_file); 8596 err_context: 8597 perf_unpin_context(ctx); 8598 put_ctx(ctx); 8599 err_alloc: 8600 free_event(event); 8601 err_cpus: 8602 put_online_cpus(); 8603 err_task: 8604 if (task) 8605 put_task_struct(task); 8606 err_group_fd: 8607 fdput(group); 8608 err_fd: 8609 put_unused_fd(event_fd); 8610 return err; 8611 } 8612 8613 /** 8614 * perf_event_create_kernel_counter 8615 * 8616 * @attr: attributes of the counter to create 8617 * @cpu: cpu in which the counter is bound 8618 * @task: task to profile (NULL for percpu) 8619 */ 8620 struct perf_event * 8621 perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu, 8622 struct task_struct *task, 8623 perf_overflow_handler_t overflow_handler, 8624 void *context) 8625 { 8626 struct perf_event_context *ctx; 8627 struct perf_event *event; 8628 int err; 8629 8630 /* 8631 * Get the target context (task or percpu): 8632 */ 8633 8634 event = perf_event_alloc(attr, cpu, task, NULL, NULL, 8635 overflow_handler, context, -1); 8636 if (IS_ERR(event)) { 8637 err = PTR_ERR(event); 8638 goto err; 8639 } 8640 8641 /* Mark owner so we could distinguish it from user events. */ 8642 event->owner = EVENT_OWNER_KERNEL; 8643 8644 account_event(event); 8645 8646 ctx = find_get_context(event->pmu, task, event); 8647 if (IS_ERR(ctx)) { 8648 err = PTR_ERR(ctx); 8649 goto err_free; 8650 } 8651 8652 WARN_ON_ONCE(ctx->parent_ctx); 8653 mutex_lock(&ctx->mutex); 8654 if (!exclusive_event_installable(event, ctx)) { 8655 mutex_unlock(&ctx->mutex); 8656 perf_unpin_context(ctx); 8657 put_ctx(ctx); 8658 err = -EBUSY; 8659 goto err_free; 8660 } 8661 8662 perf_install_in_context(ctx, event, cpu); 8663 perf_unpin_context(ctx); 8664 mutex_unlock(&ctx->mutex); 8665 8666 return event; 8667 8668 err_free: 8669 free_event(event); 8670 err: 8671 return ERR_PTR(err); 8672 } 8673 EXPORT_SYMBOL_GPL(perf_event_create_kernel_counter); 8674 8675 void perf_pmu_migrate_context(struct pmu *pmu, int src_cpu, int dst_cpu) 8676 { 8677 struct perf_event_context *src_ctx; 8678 struct perf_event_context *dst_ctx; 8679 struct perf_event *event, *tmp; 8680 LIST_HEAD(events); 8681 8682 src_ctx = &per_cpu_ptr(pmu->pmu_cpu_context, src_cpu)->ctx; 8683 dst_ctx = &per_cpu_ptr(pmu->pmu_cpu_context, dst_cpu)->ctx; 8684 8685 /* 8686 * See perf_event_ctx_lock() for comments on the details 8687 * of swizzling perf_event::ctx. 8688 */ 8689 mutex_lock_double(&src_ctx->mutex, &dst_ctx->mutex); 8690 list_for_each_entry_safe(event, tmp, &src_ctx->event_list, 8691 event_entry) { 8692 perf_remove_from_context(event, false); 8693 unaccount_event_cpu(event, src_cpu); 8694 put_ctx(src_ctx); 8695 list_add(&event->migrate_entry, &events); 8696 } 8697 8698 /* 8699 * Wait for the events to quiesce before re-instating them. 8700 */ 8701 synchronize_rcu(); 8702 8703 /* 8704 * Re-instate events in 2 passes. 8705 * 8706 * Skip over group leaders and only install siblings on this first 8707 * pass, siblings will not get enabled without a leader, however a 8708 * leader will enable its siblings, even if those are still on the old 8709 * context. 8710 */ 8711 list_for_each_entry_safe(event, tmp, &events, migrate_entry) { 8712 if (event->group_leader == event) 8713 continue; 8714 8715 list_del(&event->migrate_entry); 8716 if (event->state >= PERF_EVENT_STATE_OFF) 8717 event->state = PERF_EVENT_STATE_INACTIVE; 8718 account_event_cpu(event, dst_cpu); 8719 perf_install_in_context(dst_ctx, event, dst_cpu); 8720 get_ctx(dst_ctx); 8721 } 8722 8723 /* 8724 * Once all the siblings are setup properly, install the group leaders 8725 * to make it go. 8726 */ 8727 list_for_each_entry_safe(event, tmp, &events, migrate_entry) { 8728 list_del(&event->migrate_entry); 8729 if (event->state >= PERF_EVENT_STATE_OFF) 8730 event->state = PERF_EVENT_STATE_INACTIVE; 8731 account_event_cpu(event, dst_cpu); 8732 perf_install_in_context(dst_ctx, event, dst_cpu); 8733 get_ctx(dst_ctx); 8734 } 8735 mutex_unlock(&dst_ctx->mutex); 8736 mutex_unlock(&src_ctx->mutex); 8737 } 8738 EXPORT_SYMBOL_GPL(perf_pmu_migrate_context); 8739 8740 static void sync_child_event(struct perf_event *child_event, 8741 struct task_struct *child) 8742 { 8743 struct perf_event *parent_event = child_event->parent; 8744 u64 child_val; 8745 8746 if (child_event->attr.inherit_stat) 8747 perf_event_read_event(child_event, child); 8748 8749 child_val = perf_event_count(child_event); 8750 8751 /* 8752 * Add back the child's count to the parent's count: 8753 */ 8754 atomic64_add(child_val, &parent_event->child_count); 8755 atomic64_add(child_event->total_time_enabled, 8756 &parent_event->child_total_time_enabled); 8757 atomic64_add(child_event->total_time_running, 8758 &parent_event->child_total_time_running); 8759 8760 /* 8761 * Remove this event from the parent's list 8762 */ 8763 WARN_ON_ONCE(parent_event->ctx->parent_ctx); 8764 mutex_lock(&parent_event->child_mutex); 8765 list_del_init(&child_event->child_list); 8766 mutex_unlock(&parent_event->child_mutex); 8767 8768 /* 8769 * Make sure user/parent get notified, that we just 8770 * lost one event. 8771 */ 8772 perf_event_wakeup(parent_event); 8773 8774 /* 8775 * Release the parent event, if this was the last 8776 * reference to it. 8777 */ 8778 put_event(parent_event); 8779 } 8780 8781 static void 8782 __perf_event_exit_task(struct perf_event *child_event, 8783 struct perf_event_context *child_ctx, 8784 struct task_struct *child) 8785 { 8786 /* 8787 * Do not destroy the 'original' grouping; because of the context 8788 * switch optimization the original events could've ended up in a 8789 * random child task. 8790 * 8791 * If we were to destroy the original group, all group related 8792 * operations would cease to function properly after this random 8793 * child dies. 8794 * 8795 * Do destroy all inherited groups, we don't care about those 8796 * and being thorough is better. 8797 */ 8798 perf_remove_from_context(child_event, !!child_event->parent); 8799 8800 /* 8801 * It can happen that the parent exits first, and has events 8802 * that are still around due to the child reference. These 8803 * events need to be zapped. 8804 */ 8805 if (child_event->parent) { 8806 sync_child_event(child_event, child); 8807 free_event(child_event); 8808 } else { 8809 child_event->state = PERF_EVENT_STATE_EXIT; 8810 perf_event_wakeup(child_event); 8811 } 8812 } 8813 8814 static void perf_event_exit_task_context(struct task_struct *child, int ctxn) 8815 { 8816 struct perf_event *child_event, *next; 8817 struct perf_event_context *child_ctx, *clone_ctx = NULL; 8818 unsigned long flags; 8819 8820 if (likely(!child->perf_event_ctxp[ctxn])) 8821 return; 8822 8823 local_irq_save(flags); 8824 /* 8825 * We can't reschedule here because interrupts are disabled, 8826 * and either child is current or it is a task that can't be 8827 * scheduled, so we are now safe from rescheduling changing 8828 * our context. 8829 */ 8830 child_ctx = rcu_dereference_raw(child->perf_event_ctxp[ctxn]); 8831 8832 /* 8833 * Take the context lock here so that if find_get_context is 8834 * reading child->perf_event_ctxp, we wait until it has 8835 * incremented the context's refcount before we do put_ctx below. 8836 */ 8837 raw_spin_lock(&child_ctx->lock); 8838 task_ctx_sched_out(child_ctx); 8839 child->perf_event_ctxp[ctxn] = NULL; 8840 8841 /* 8842 * If this context is a clone; unclone it so it can't get 8843 * swapped to another process while we're removing all 8844 * the events from it. 8845 */ 8846 clone_ctx = unclone_ctx(child_ctx); 8847 update_context_time(child_ctx); 8848 raw_spin_unlock_irqrestore(&child_ctx->lock, flags); 8849 8850 if (clone_ctx) 8851 put_ctx(clone_ctx); 8852 8853 /* 8854 * Report the task dead after unscheduling the events so that we 8855 * won't get any samples after PERF_RECORD_EXIT. We can however still 8856 * get a few PERF_RECORD_READ events. 8857 */ 8858 perf_event_task(child, child_ctx, 0); 8859 8860 /* 8861 * We can recurse on the same lock type through: 8862 * 8863 * __perf_event_exit_task() 8864 * sync_child_event() 8865 * put_event() 8866 * mutex_lock(&ctx->mutex) 8867 * 8868 * But since its the parent context it won't be the same instance. 8869 */ 8870 mutex_lock(&child_ctx->mutex); 8871 8872 list_for_each_entry_safe(child_event, next, &child_ctx->event_list, event_entry) 8873 __perf_event_exit_task(child_event, child_ctx, child); 8874 8875 mutex_unlock(&child_ctx->mutex); 8876 8877 put_ctx(child_ctx); 8878 } 8879 8880 /* 8881 * When a child task exits, feed back event values to parent events. 8882 */ 8883 void perf_event_exit_task(struct task_struct *child) 8884 { 8885 struct perf_event *event, *tmp; 8886 int ctxn; 8887 8888 mutex_lock(&child->perf_event_mutex); 8889 list_for_each_entry_safe(event, tmp, &child->perf_event_list, 8890 owner_entry) { 8891 list_del_init(&event->owner_entry); 8892 8893 /* 8894 * Ensure the list deletion is visible before we clear 8895 * the owner, closes a race against perf_release() where 8896 * we need to serialize on the owner->perf_event_mutex. 8897 */ 8898 smp_wmb(); 8899 event->owner = NULL; 8900 } 8901 mutex_unlock(&child->perf_event_mutex); 8902 8903 for_each_task_context_nr(ctxn) 8904 perf_event_exit_task_context(child, ctxn); 8905 8906 /* 8907 * The perf_event_exit_task_context calls perf_event_task 8908 * with child's task_ctx, which generates EXIT events for 8909 * child contexts and sets child->perf_event_ctxp[] to NULL. 8910 * At this point we need to send EXIT events to cpu contexts. 8911 */ 8912 perf_event_task(child, NULL, 0); 8913 } 8914 8915 static void perf_free_event(struct perf_event *event, 8916 struct perf_event_context *ctx) 8917 { 8918 struct perf_event *parent = event->parent; 8919 8920 if (WARN_ON_ONCE(!parent)) 8921 return; 8922 8923 mutex_lock(&parent->child_mutex); 8924 list_del_init(&event->child_list); 8925 mutex_unlock(&parent->child_mutex); 8926 8927 put_event(parent); 8928 8929 raw_spin_lock_irq(&ctx->lock); 8930 perf_group_detach(event); 8931 list_del_event(event, ctx); 8932 raw_spin_unlock_irq(&ctx->lock); 8933 free_event(event); 8934 } 8935 8936 /* 8937 * Free an unexposed, unused context as created by inheritance by 8938 * perf_event_init_task below, used by fork() in case of fail. 8939 * 8940 * Not all locks are strictly required, but take them anyway to be nice and 8941 * help out with the lockdep assertions. 8942 */ 8943 void perf_event_free_task(struct task_struct *task) 8944 { 8945 struct perf_event_context *ctx; 8946 struct perf_event *event, *tmp; 8947 int ctxn; 8948 8949 for_each_task_context_nr(ctxn) { 8950 ctx = task->perf_event_ctxp[ctxn]; 8951 if (!ctx) 8952 continue; 8953 8954 mutex_lock(&ctx->mutex); 8955 again: 8956 list_for_each_entry_safe(event, tmp, &ctx->pinned_groups, 8957 group_entry) 8958 perf_free_event(event, ctx); 8959 8960 list_for_each_entry_safe(event, tmp, &ctx->flexible_groups, 8961 group_entry) 8962 perf_free_event(event, ctx); 8963 8964 if (!list_empty(&ctx->pinned_groups) || 8965 !list_empty(&ctx->flexible_groups)) 8966 goto again; 8967 8968 mutex_unlock(&ctx->mutex); 8969 8970 put_ctx(ctx); 8971 } 8972 } 8973 8974 void perf_event_delayed_put(struct task_struct *task) 8975 { 8976 int ctxn; 8977 8978 for_each_task_context_nr(ctxn) 8979 WARN_ON_ONCE(task->perf_event_ctxp[ctxn]); 8980 } 8981 8982 struct perf_event *perf_event_get(unsigned int fd) 8983 { 8984 int err; 8985 struct fd f; 8986 struct perf_event *event; 8987 8988 err = perf_fget_light(fd, &f); 8989 if (err) 8990 return ERR_PTR(err); 8991 8992 event = f.file->private_data; 8993 atomic_long_inc(&event->refcount); 8994 fdput(f); 8995 8996 return event; 8997 } 8998 8999 const struct perf_event_attr *perf_event_attrs(struct perf_event *event) 9000 { 9001 if (!event) 9002 return ERR_PTR(-EINVAL); 9003 9004 return &event->attr; 9005 } 9006 9007 /* 9008 * inherit a event from parent task to child task: 9009 */ 9010 static struct perf_event * 9011 inherit_event(struct perf_event *parent_event, 9012 struct task_struct *parent, 9013 struct perf_event_context *parent_ctx, 9014 struct task_struct *child, 9015 struct perf_event *group_leader, 9016 struct perf_event_context *child_ctx) 9017 { 9018 enum perf_event_active_state parent_state = parent_event->state; 9019 struct perf_event *child_event; 9020 unsigned long flags; 9021 9022 /* 9023 * Instead of creating recursive hierarchies of events, 9024 * we link inherited events back to the original parent, 9025 * which has a filp for sure, which we use as the reference 9026 * count: 9027 */ 9028 if (parent_event->parent) 9029 parent_event = parent_event->parent; 9030 9031 child_event = perf_event_alloc(&parent_event->attr, 9032 parent_event->cpu, 9033 child, 9034 group_leader, parent_event, 9035 NULL, NULL, -1); 9036 if (IS_ERR(child_event)) 9037 return child_event; 9038 9039 if (is_orphaned_event(parent_event) || 9040 !atomic_long_inc_not_zero(&parent_event->refcount)) { 9041 free_event(child_event); 9042 return NULL; 9043 } 9044 9045 get_ctx(child_ctx); 9046 9047 /* 9048 * Make the child state follow the state of the parent event, 9049 * not its attr.disabled bit. We hold the parent's mutex, 9050 * so we won't race with perf_event_{en, dis}able_family. 9051 */ 9052 if (parent_state >= PERF_EVENT_STATE_INACTIVE) 9053 child_event->state = PERF_EVENT_STATE_INACTIVE; 9054 else 9055 child_event->state = PERF_EVENT_STATE_OFF; 9056 9057 if (parent_event->attr.freq) { 9058 u64 sample_period = parent_event->hw.sample_period; 9059 struct hw_perf_event *hwc = &child_event->hw; 9060 9061 hwc->sample_period = sample_period; 9062 hwc->last_period = sample_period; 9063 9064 local64_set(&hwc->period_left, sample_period); 9065 } 9066 9067 child_event->ctx = child_ctx; 9068 child_event->overflow_handler = parent_event->overflow_handler; 9069 child_event->overflow_handler_context 9070 = parent_event->overflow_handler_context; 9071 9072 /* 9073 * Precalculate sample_data sizes 9074 */ 9075 perf_event__header_size(child_event); 9076 perf_event__id_header_size(child_event); 9077 9078 /* 9079 * Link it up in the child's context: 9080 */ 9081 raw_spin_lock_irqsave(&child_ctx->lock, flags); 9082 add_event_to_ctx(child_event, child_ctx); 9083 raw_spin_unlock_irqrestore(&child_ctx->lock, flags); 9084 9085 /* 9086 * Link this into the parent event's child list 9087 */ 9088 WARN_ON_ONCE(parent_event->ctx->parent_ctx); 9089 mutex_lock(&parent_event->child_mutex); 9090 list_add_tail(&child_event->child_list, &parent_event->child_list); 9091 mutex_unlock(&parent_event->child_mutex); 9092 9093 return child_event; 9094 } 9095 9096 static int inherit_group(struct perf_event *parent_event, 9097 struct task_struct *parent, 9098 struct perf_event_context *parent_ctx, 9099 struct task_struct *child, 9100 struct perf_event_context *child_ctx) 9101 { 9102 struct perf_event *leader; 9103 struct perf_event *sub; 9104 struct perf_event *child_ctr; 9105 9106 leader = inherit_event(parent_event, parent, parent_ctx, 9107 child, NULL, child_ctx); 9108 if (IS_ERR(leader)) 9109 return PTR_ERR(leader); 9110 list_for_each_entry(sub, &parent_event->sibling_list, group_entry) { 9111 child_ctr = inherit_event(sub, parent, parent_ctx, 9112 child, leader, child_ctx); 9113 if (IS_ERR(child_ctr)) 9114 return PTR_ERR(child_ctr); 9115 } 9116 return 0; 9117 } 9118 9119 static int 9120 inherit_task_group(struct perf_event *event, struct task_struct *parent, 9121 struct perf_event_context *parent_ctx, 9122 struct task_struct *child, int ctxn, 9123 int *inherited_all) 9124 { 9125 int ret; 9126 struct perf_event_context *child_ctx; 9127 9128 if (!event->attr.inherit) { 9129 *inherited_all = 0; 9130 return 0; 9131 } 9132 9133 child_ctx = child->perf_event_ctxp[ctxn]; 9134 if (!child_ctx) { 9135 /* 9136 * This is executed from the parent task context, so 9137 * inherit events that have been marked for cloning. 9138 * First allocate and initialize a context for the 9139 * child. 9140 */ 9141 9142 child_ctx = alloc_perf_context(parent_ctx->pmu, child); 9143 if (!child_ctx) 9144 return -ENOMEM; 9145 9146 child->perf_event_ctxp[ctxn] = child_ctx; 9147 } 9148 9149 ret = inherit_group(event, parent, parent_ctx, 9150 child, child_ctx); 9151 9152 if (ret) 9153 *inherited_all = 0; 9154 9155 return ret; 9156 } 9157 9158 /* 9159 * Initialize the perf_event context in task_struct 9160 */ 9161 static int perf_event_init_context(struct task_struct *child, int ctxn) 9162 { 9163 struct perf_event_context *child_ctx, *parent_ctx; 9164 struct perf_event_context *cloned_ctx; 9165 struct perf_event *event; 9166 struct task_struct *parent = current; 9167 int inherited_all = 1; 9168 unsigned long flags; 9169 int ret = 0; 9170 9171 if (likely(!parent->perf_event_ctxp[ctxn])) 9172 return 0; 9173 9174 /* 9175 * If the parent's context is a clone, pin it so it won't get 9176 * swapped under us. 9177 */ 9178 parent_ctx = perf_pin_task_context(parent, ctxn); 9179 if (!parent_ctx) 9180 return 0; 9181 9182 /* 9183 * No need to check if parent_ctx != NULL here; since we saw 9184 * it non-NULL earlier, the only reason for it to become NULL 9185 * is if we exit, and since we're currently in the middle of 9186 * a fork we can't be exiting at the same time. 9187 */ 9188 9189 /* 9190 * Lock the parent list. No need to lock the child - not PID 9191 * hashed yet and not running, so nobody can access it. 9192 */ 9193 mutex_lock(&parent_ctx->mutex); 9194 9195 /* 9196 * We dont have to disable NMIs - we are only looking at 9197 * the list, not manipulating it: 9198 */ 9199 list_for_each_entry(event, &parent_ctx->pinned_groups, group_entry) { 9200 ret = inherit_task_group(event, parent, parent_ctx, 9201 child, ctxn, &inherited_all); 9202 if (ret) 9203 break; 9204 } 9205 9206 /* 9207 * We can't hold ctx->lock when iterating the ->flexible_group list due 9208 * to allocations, but we need to prevent rotation because 9209 * rotate_ctx() will change the list from interrupt context. 9210 */ 9211 raw_spin_lock_irqsave(&parent_ctx->lock, flags); 9212 parent_ctx->rotate_disable = 1; 9213 raw_spin_unlock_irqrestore(&parent_ctx->lock, flags); 9214 9215 list_for_each_entry(event, &parent_ctx->flexible_groups, group_entry) { 9216 ret = inherit_task_group(event, parent, parent_ctx, 9217 child, ctxn, &inherited_all); 9218 if (ret) 9219 break; 9220 } 9221 9222 raw_spin_lock_irqsave(&parent_ctx->lock, flags); 9223 parent_ctx->rotate_disable = 0; 9224 9225 child_ctx = child->perf_event_ctxp[ctxn]; 9226 9227 if (child_ctx && inherited_all) { 9228 /* 9229 * Mark the child context as a clone of the parent 9230 * context, or of whatever the parent is a clone of. 9231 * 9232 * Note that if the parent is a clone, the holding of 9233 * parent_ctx->lock avoids it from being uncloned. 9234 */ 9235 cloned_ctx = parent_ctx->parent_ctx; 9236 if (cloned_ctx) { 9237 child_ctx->parent_ctx = cloned_ctx; 9238 child_ctx->parent_gen = parent_ctx->parent_gen; 9239 } else { 9240 child_ctx->parent_ctx = parent_ctx; 9241 child_ctx->parent_gen = parent_ctx->generation; 9242 } 9243 get_ctx(child_ctx->parent_ctx); 9244 } 9245 9246 raw_spin_unlock_irqrestore(&parent_ctx->lock, flags); 9247 mutex_unlock(&parent_ctx->mutex); 9248 9249 perf_unpin_context(parent_ctx); 9250 put_ctx(parent_ctx); 9251 9252 return ret; 9253 } 9254 9255 /* 9256 * Initialize the perf_event context in task_struct 9257 */ 9258 int perf_event_init_task(struct task_struct *child) 9259 { 9260 int ctxn, ret; 9261 9262 memset(child->perf_event_ctxp, 0, sizeof(child->perf_event_ctxp)); 9263 mutex_init(&child->perf_event_mutex); 9264 INIT_LIST_HEAD(&child->perf_event_list); 9265 9266 for_each_task_context_nr(ctxn) { 9267 ret = perf_event_init_context(child, ctxn); 9268 if (ret) { 9269 perf_event_free_task(child); 9270 return ret; 9271 } 9272 } 9273 9274 return 0; 9275 } 9276 9277 static void __init perf_event_init_all_cpus(void) 9278 { 9279 struct swevent_htable *swhash; 9280 int cpu; 9281 9282 for_each_possible_cpu(cpu) { 9283 swhash = &per_cpu(swevent_htable, cpu); 9284 mutex_init(&swhash->hlist_mutex); 9285 INIT_LIST_HEAD(&per_cpu(active_ctx_list, cpu)); 9286 } 9287 } 9288 9289 static void perf_event_init_cpu(int cpu) 9290 { 9291 struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu); 9292 9293 mutex_lock(&swhash->hlist_mutex); 9294 swhash->online = true; 9295 if (swhash->hlist_refcount > 0) { 9296 struct swevent_hlist *hlist; 9297 9298 hlist = kzalloc_node(sizeof(*hlist), GFP_KERNEL, cpu_to_node(cpu)); 9299 WARN_ON(!hlist); 9300 rcu_assign_pointer(swhash->swevent_hlist, hlist); 9301 } 9302 mutex_unlock(&swhash->hlist_mutex); 9303 } 9304 9305 #if defined CONFIG_HOTPLUG_CPU || defined CONFIG_KEXEC_CORE 9306 static void __perf_event_exit_context(void *__info) 9307 { 9308 struct remove_event re = { .detach_group = true }; 9309 struct perf_event_context *ctx = __info; 9310 9311 rcu_read_lock(); 9312 list_for_each_entry_rcu(re.event, &ctx->event_list, event_entry) 9313 __perf_remove_from_context(&re); 9314 rcu_read_unlock(); 9315 } 9316 9317 static void perf_event_exit_cpu_context(int cpu) 9318 { 9319 struct perf_event_context *ctx; 9320 struct pmu *pmu; 9321 int idx; 9322 9323 idx = srcu_read_lock(&pmus_srcu); 9324 list_for_each_entry_rcu(pmu, &pmus, entry) { 9325 ctx = &per_cpu_ptr(pmu->pmu_cpu_context, cpu)->ctx; 9326 9327 mutex_lock(&ctx->mutex); 9328 smp_call_function_single(cpu, __perf_event_exit_context, ctx, 1); 9329 mutex_unlock(&ctx->mutex); 9330 } 9331 srcu_read_unlock(&pmus_srcu, idx); 9332 } 9333 9334 static void perf_event_exit_cpu(int cpu) 9335 { 9336 struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu); 9337 9338 perf_event_exit_cpu_context(cpu); 9339 9340 mutex_lock(&swhash->hlist_mutex); 9341 swhash->online = false; 9342 swevent_hlist_release(swhash); 9343 mutex_unlock(&swhash->hlist_mutex); 9344 } 9345 #else 9346 static inline void perf_event_exit_cpu(int cpu) { } 9347 #endif 9348 9349 static int 9350 perf_reboot(struct notifier_block *notifier, unsigned long val, void *v) 9351 { 9352 int cpu; 9353 9354 for_each_online_cpu(cpu) 9355 perf_event_exit_cpu(cpu); 9356 9357 return NOTIFY_OK; 9358 } 9359 9360 /* 9361 * Run the perf reboot notifier at the very last possible moment so that 9362 * the generic watchdog code runs as long as possible. 9363 */ 9364 static struct notifier_block perf_reboot_notifier = { 9365 .notifier_call = perf_reboot, 9366 .priority = INT_MIN, 9367 }; 9368 9369 static int 9370 perf_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu) 9371 { 9372 unsigned int cpu = (long)hcpu; 9373 9374 switch (action & ~CPU_TASKS_FROZEN) { 9375 9376 case CPU_UP_PREPARE: 9377 case CPU_DOWN_FAILED: 9378 perf_event_init_cpu(cpu); 9379 break; 9380 9381 case CPU_UP_CANCELED: 9382 case CPU_DOWN_PREPARE: 9383 perf_event_exit_cpu(cpu); 9384 break; 9385 default: 9386 break; 9387 } 9388 9389 return NOTIFY_OK; 9390 } 9391 9392 void __init perf_event_init(void) 9393 { 9394 int ret; 9395 9396 idr_init(&pmu_idr); 9397 9398 perf_event_init_all_cpus(); 9399 init_srcu_struct(&pmus_srcu); 9400 perf_pmu_register(&perf_swevent, "software", PERF_TYPE_SOFTWARE); 9401 perf_pmu_register(&perf_cpu_clock, NULL, -1); 9402 perf_pmu_register(&perf_task_clock, NULL, -1); 9403 perf_tp_register(); 9404 perf_cpu_notifier(perf_cpu_notify); 9405 register_reboot_notifier(&perf_reboot_notifier); 9406 9407 ret = init_hw_breakpoint(); 9408 WARN(ret, "hw_breakpoint initialization failed with: %d", ret); 9409 9410 /* do not patch jump label more than once per second */ 9411 jump_label_rate_limit(&perf_sched_events, HZ); 9412 9413 /* 9414 * Build time assertion that we keep the data_head at the intended 9415 * location. IOW, validation we got the __reserved[] size right. 9416 */ 9417 BUILD_BUG_ON((offsetof(struct perf_event_mmap_page, data_head)) 9418 != 1024); 9419 } 9420 9421 ssize_t perf_event_sysfs_show(struct device *dev, struct device_attribute *attr, 9422 char *page) 9423 { 9424 struct perf_pmu_events_attr *pmu_attr = 9425 container_of(attr, struct perf_pmu_events_attr, attr); 9426 9427 if (pmu_attr->event_str) 9428 return sprintf(page, "%s\n", pmu_attr->event_str); 9429 9430 return 0; 9431 } 9432 9433 static int __init perf_event_sysfs_init(void) 9434 { 9435 struct pmu *pmu; 9436 int ret; 9437 9438 mutex_lock(&pmus_lock); 9439 9440 ret = bus_register(&pmu_bus); 9441 if (ret) 9442 goto unlock; 9443 9444 list_for_each_entry(pmu, &pmus, entry) { 9445 if (!pmu->name || pmu->type < 0) 9446 continue; 9447 9448 ret = pmu_dev_alloc(pmu); 9449 WARN(ret, "Failed to register pmu: %s, reason %d\n", pmu->name, ret); 9450 } 9451 pmu_bus_running = 1; 9452 ret = 0; 9453 9454 unlock: 9455 mutex_unlock(&pmus_lock); 9456 9457 return ret; 9458 } 9459 device_initcall(perf_event_sysfs_init); 9460 9461 #ifdef CONFIG_CGROUP_PERF 9462 static struct cgroup_subsys_state * 9463 perf_cgroup_css_alloc(struct cgroup_subsys_state *parent_css) 9464 { 9465 struct perf_cgroup *jc; 9466 9467 jc = kzalloc(sizeof(*jc), GFP_KERNEL); 9468 if (!jc) 9469 return ERR_PTR(-ENOMEM); 9470 9471 jc->info = alloc_percpu(struct perf_cgroup_info); 9472 if (!jc->info) { 9473 kfree(jc); 9474 return ERR_PTR(-ENOMEM); 9475 } 9476 9477 return &jc->css; 9478 } 9479 9480 static void perf_cgroup_css_free(struct cgroup_subsys_state *css) 9481 { 9482 struct perf_cgroup *jc = container_of(css, struct perf_cgroup, css); 9483 9484 free_percpu(jc->info); 9485 kfree(jc); 9486 } 9487 9488 static int __perf_cgroup_move(void *info) 9489 { 9490 struct task_struct *task = info; 9491 rcu_read_lock(); 9492 perf_cgroup_switch(task, PERF_CGROUP_SWOUT | PERF_CGROUP_SWIN); 9493 rcu_read_unlock(); 9494 return 0; 9495 } 9496 9497 static void perf_cgroup_attach(struct cgroup_taskset *tset) 9498 { 9499 struct task_struct *task; 9500 struct cgroup_subsys_state *css; 9501 9502 cgroup_taskset_for_each(task, css, tset) 9503 task_function_call(task, __perf_cgroup_move, task); 9504 } 9505 9506 struct cgroup_subsys perf_event_cgrp_subsys = { 9507 .css_alloc = perf_cgroup_css_alloc, 9508 .css_free = perf_cgroup_css_free, 9509 .attach = perf_cgroup_attach, 9510 }; 9511 #endif /* CONFIG_CGROUP_PERF */ 9512