1 /* 2 * Performance events core code: 3 * 4 * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de> 5 * Copyright (C) 2008-2011 Red Hat, Inc., Ingo Molnar 6 * Copyright (C) 2008-2011 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com> 7 * Copyright © 2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com> 8 * 9 * For licensing details see kernel-base/COPYING 10 */ 11 12 #include <linux/fs.h> 13 #include <linux/mm.h> 14 #include <linux/cpu.h> 15 #include <linux/smp.h> 16 #include <linux/idr.h> 17 #include <linux/file.h> 18 #include <linux/poll.h> 19 #include <linux/slab.h> 20 #include <linux/hash.h> 21 #include <linux/tick.h> 22 #include <linux/sysfs.h> 23 #include <linux/dcache.h> 24 #include <linux/percpu.h> 25 #include <linux/ptrace.h> 26 #include <linux/reboot.h> 27 #include <linux/vmstat.h> 28 #include <linux/device.h> 29 #include <linux/export.h> 30 #include <linux/vmalloc.h> 31 #include <linux/hardirq.h> 32 #include <linux/rculist.h> 33 #include <linux/uaccess.h> 34 #include <linux/syscalls.h> 35 #include <linux/anon_inodes.h> 36 #include <linux/kernel_stat.h> 37 #include <linux/cgroup.h> 38 #include <linux/perf_event.h> 39 #include <linux/trace_events.h> 40 #include <linux/hw_breakpoint.h> 41 #include <linux/mm_types.h> 42 #include <linux/module.h> 43 #include <linux/mman.h> 44 #include <linux/compat.h> 45 #include <linux/bpf.h> 46 #include <linux/filter.h> 47 48 #include "internal.h" 49 50 #include <asm/irq_regs.h> 51 52 static struct workqueue_struct *perf_wq; 53 54 typedef int (*remote_function_f)(void *); 55 56 struct remote_function_call { 57 struct task_struct *p; 58 remote_function_f func; 59 void *info; 60 int ret; 61 }; 62 63 static void remote_function(void *data) 64 { 65 struct remote_function_call *tfc = data; 66 struct task_struct *p = tfc->p; 67 68 if (p) { 69 tfc->ret = -EAGAIN; 70 if (task_cpu(p) != smp_processor_id() || !task_curr(p)) 71 return; 72 } 73 74 tfc->ret = tfc->func(tfc->info); 75 } 76 77 /** 78 * task_function_call - call a function on the cpu on which a task runs 79 * @p: the task to evaluate 80 * @func: the function to be called 81 * @info: the function call argument 82 * 83 * Calls the function @func when the task is currently running. This might 84 * be on the current CPU, which just calls the function directly 85 * 86 * returns: @func return value, or 87 * -ESRCH - when the process isn't running 88 * -EAGAIN - when the process moved away 89 */ 90 static int 91 task_function_call(struct task_struct *p, remote_function_f func, void *info) 92 { 93 struct remote_function_call data = { 94 .p = p, 95 .func = func, 96 .info = info, 97 .ret = -ESRCH, /* No such (running) process */ 98 }; 99 100 if (task_curr(p)) 101 smp_call_function_single(task_cpu(p), remote_function, &data, 1); 102 103 return data.ret; 104 } 105 106 /** 107 * cpu_function_call - call a function on the cpu 108 * @func: the function to be called 109 * @info: the function call argument 110 * 111 * Calls the function @func on the remote cpu. 112 * 113 * returns: @func return value or -ENXIO when the cpu is offline 114 */ 115 static int cpu_function_call(int cpu, remote_function_f func, void *info) 116 { 117 struct remote_function_call data = { 118 .p = NULL, 119 .func = func, 120 .info = info, 121 .ret = -ENXIO, /* No such CPU */ 122 }; 123 124 smp_call_function_single(cpu, remote_function, &data, 1); 125 126 return data.ret; 127 } 128 129 #define EVENT_OWNER_KERNEL ((void *) -1) 130 131 static bool is_kernel_event(struct perf_event *event) 132 { 133 return event->owner == EVENT_OWNER_KERNEL; 134 } 135 136 #define PERF_FLAG_ALL (PERF_FLAG_FD_NO_GROUP |\ 137 PERF_FLAG_FD_OUTPUT |\ 138 PERF_FLAG_PID_CGROUP |\ 139 PERF_FLAG_FD_CLOEXEC) 140 141 /* 142 * branch priv levels that need permission checks 143 */ 144 #define PERF_SAMPLE_BRANCH_PERM_PLM \ 145 (PERF_SAMPLE_BRANCH_KERNEL |\ 146 PERF_SAMPLE_BRANCH_HV) 147 148 enum event_type_t { 149 EVENT_FLEXIBLE = 0x1, 150 EVENT_PINNED = 0x2, 151 EVENT_ALL = EVENT_FLEXIBLE | EVENT_PINNED, 152 }; 153 154 /* 155 * perf_sched_events : >0 events exist 156 * perf_cgroup_events: >0 per-cpu cgroup events exist on this cpu 157 */ 158 struct static_key_deferred perf_sched_events __read_mostly; 159 static DEFINE_PER_CPU(atomic_t, perf_cgroup_events); 160 static DEFINE_PER_CPU(int, perf_sched_cb_usages); 161 162 static atomic_t nr_mmap_events __read_mostly; 163 static atomic_t nr_comm_events __read_mostly; 164 static atomic_t nr_task_events __read_mostly; 165 static atomic_t nr_freq_events __read_mostly; 166 167 static LIST_HEAD(pmus); 168 static DEFINE_MUTEX(pmus_lock); 169 static struct srcu_struct pmus_srcu; 170 171 /* 172 * perf event paranoia level: 173 * -1 - not paranoid at all 174 * 0 - disallow raw tracepoint access for unpriv 175 * 1 - disallow cpu events for unpriv 176 * 2 - disallow kernel profiling for unpriv 177 */ 178 int sysctl_perf_event_paranoid __read_mostly = 1; 179 180 /* Minimum for 512 kiB + 1 user control page */ 181 int sysctl_perf_event_mlock __read_mostly = 512 + (PAGE_SIZE / 1024); /* 'free' kiB per user */ 182 183 /* 184 * max perf event sample rate 185 */ 186 #define DEFAULT_MAX_SAMPLE_RATE 100000 187 #define DEFAULT_SAMPLE_PERIOD_NS (NSEC_PER_SEC / DEFAULT_MAX_SAMPLE_RATE) 188 #define DEFAULT_CPU_TIME_MAX_PERCENT 25 189 190 int sysctl_perf_event_sample_rate __read_mostly = DEFAULT_MAX_SAMPLE_RATE; 191 192 static int max_samples_per_tick __read_mostly = DIV_ROUND_UP(DEFAULT_MAX_SAMPLE_RATE, HZ); 193 static int perf_sample_period_ns __read_mostly = DEFAULT_SAMPLE_PERIOD_NS; 194 195 static int perf_sample_allowed_ns __read_mostly = 196 DEFAULT_SAMPLE_PERIOD_NS * DEFAULT_CPU_TIME_MAX_PERCENT / 100; 197 198 void update_perf_cpu_limits(void) 199 { 200 u64 tmp = perf_sample_period_ns; 201 202 tmp *= sysctl_perf_cpu_time_max_percent; 203 do_div(tmp, 100); 204 ACCESS_ONCE(perf_sample_allowed_ns) = tmp; 205 } 206 207 static int perf_rotate_context(struct perf_cpu_context *cpuctx); 208 209 int perf_proc_update_handler(struct ctl_table *table, int write, 210 void __user *buffer, size_t *lenp, 211 loff_t *ppos) 212 { 213 int ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos); 214 215 if (ret || !write) 216 return ret; 217 218 max_samples_per_tick = DIV_ROUND_UP(sysctl_perf_event_sample_rate, HZ); 219 perf_sample_period_ns = NSEC_PER_SEC / sysctl_perf_event_sample_rate; 220 update_perf_cpu_limits(); 221 222 return 0; 223 } 224 225 int sysctl_perf_cpu_time_max_percent __read_mostly = DEFAULT_CPU_TIME_MAX_PERCENT; 226 227 int perf_cpu_time_max_percent_handler(struct ctl_table *table, int write, 228 void __user *buffer, size_t *lenp, 229 loff_t *ppos) 230 { 231 int ret = proc_dointvec(table, write, buffer, lenp, ppos); 232 233 if (ret || !write) 234 return ret; 235 236 update_perf_cpu_limits(); 237 238 return 0; 239 } 240 241 /* 242 * perf samples are done in some very critical code paths (NMIs). 243 * If they take too much CPU time, the system can lock up and not 244 * get any real work done. This will drop the sample rate when 245 * we detect that events are taking too long. 246 */ 247 #define NR_ACCUMULATED_SAMPLES 128 248 static DEFINE_PER_CPU(u64, running_sample_length); 249 250 static void perf_duration_warn(struct irq_work *w) 251 { 252 u64 allowed_ns = ACCESS_ONCE(perf_sample_allowed_ns); 253 u64 avg_local_sample_len; 254 u64 local_samples_len; 255 256 local_samples_len = __this_cpu_read(running_sample_length); 257 avg_local_sample_len = local_samples_len/NR_ACCUMULATED_SAMPLES; 258 259 printk_ratelimited(KERN_WARNING 260 "perf interrupt took too long (%lld > %lld), lowering " 261 "kernel.perf_event_max_sample_rate to %d\n", 262 avg_local_sample_len, allowed_ns >> 1, 263 sysctl_perf_event_sample_rate); 264 } 265 266 static DEFINE_IRQ_WORK(perf_duration_work, perf_duration_warn); 267 268 void perf_sample_event_took(u64 sample_len_ns) 269 { 270 u64 allowed_ns = ACCESS_ONCE(perf_sample_allowed_ns); 271 u64 avg_local_sample_len; 272 u64 local_samples_len; 273 274 if (allowed_ns == 0) 275 return; 276 277 /* decay the counter by 1 average sample */ 278 local_samples_len = __this_cpu_read(running_sample_length); 279 local_samples_len -= local_samples_len/NR_ACCUMULATED_SAMPLES; 280 local_samples_len += sample_len_ns; 281 __this_cpu_write(running_sample_length, local_samples_len); 282 283 /* 284 * note: this will be biased artifically low until we have 285 * seen NR_ACCUMULATED_SAMPLES. Doing it this way keeps us 286 * from having to maintain a count. 287 */ 288 avg_local_sample_len = local_samples_len/NR_ACCUMULATED_SAMPLES; 289 290 if (avg_local_sample_len <= allowed_ns) 291 return; 292 293 if (max_samples_per_tick <= 1) 294 return; 295 296 max_samples_per_tick = DIV_ROUND_UP(max_samples_per_tick, 2); 297 sysctl_perf_event_sample_rate = max_samples_per_tick * HZ; 298 perf_sample_period_ns = NSEC_PER_SEC / sysctl_perf_event_sample_rate; 299 300 update_perf_cpu_limits(); 301 302 if (!irq_work_queue(&perf_duration_work)) { 303 early_printk("perf interrupt took too long (%lld > %lld), lowering " 304 "kernel.perf_event_max_sample_rate to %d\n", 305 avg_local_sample_len, allowed_ns >> 1, 306 sysctl_perf_event_sample_rate); 307 } 308 } 309 310 static atomic64_t perf_event_id; 311 312 static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx, 313 enum event_type_t event_type); 314 315 static void cpu_ctx_sched_in(struct perf_cpu_context *cpuctx, 316 enum event_type_t event_type, 317 struct task_struct *task); 318 319 static void update_context_time(struct perf_event_context *ctx); 320 static u64 perf_event_time(struct perf_event *event); 321 322 void __weak perf_event_print_debug(void) { } 323 324 extern __weak const char *perf_pmu_name(void) 325 { 326 return "pmu"; 327 } 328 329 static inline u64 perf_clock(void) 330 { 331 return local_clock(); 332 } 333 334 static inline u64 perf_event_clock(struct perf_event *event) 335 { 336 return event->clock(); 337 } 338 339 static inline struct perf_cpu_context * 340 __get_cpu_context(struct perf_event_context *ctx) 341 { 342 return this_cpu_ptr(ctx->pmu->pmu_cpu_context); 343 } 344 345 static void perf_ctx_lock(struct perf_cpu_context *cpuctx, 346 struct perf_event_context *ctx) 347 { 348 raw_spin_lock(&cpuctx->ctx.lock); 349 if (ctx) 350 raw_spin_lock(&ctx->lock); 351 } 352 353 static void perf_ctx_unlock(struct perf_cpu_context *cpuctx, 354 struct perf_event_context *ctx) 355 { 356 if (ctx) 357 raw_spin_unlock(&ctx->lock); 358 raw_spin_unlock(&cpuctx->ctx.lock); 359 } 360 361 #ifdef CONFIG_CGROUP_PERF 362 363 static inline bool 364 perf_cgroup_match(struct perf_event *event) 365 { 366 struct perf_event_context *ctx = event->ctx; 367 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx); 368 369 /* @event doesn't care about cgroup */ 370 if (!event->cgrp) 371 return true; 372 373 /* wants specific cgroup scope but @cpuctx isn't associated with any */ 374 if (!cpuctx->cgrp) 375 return false; 376 377 /* 378 * Cgroup scoping is recursive. An event enabled for a cgroup is 379 * also enabled for all its descendant cgroups. If @cpuctx's 380 * cgroup is a descendant of @event's (the test covers identity 381 * case), it's a match. 382 */ 383 return cgroup_is_descendant(cpuctx->cgrp->css.cgroup, 384 event->cgrp->css.cgroup); 385 } 386 387 static inline void perf_detach_cgroup(struct perf_event *event) 388 { 389 css_put(&event->cgrp->css); 390 event->cgrp = NULL; 391 } 392 393 static inline int is_cgroup_event(struct perf_event *event) 394 { 395 return event->cgrp != NULL; 396 } 397 398 static inline u64 perf_cgroup_event_time(struct perf_event *event) 399 { 400 struct perf_cgroup_info *t; 401 402 t = per_cpu_ptr(event->cgrp->info, event->cpu); 403 return t->time; 404 } 405 406 static inline void __update_cgrp_time(struct perf_cgroup *cgrp) 407 { 408 struct perf_cgroup_info *info; 409 u64 now; 410 411 now = perf_clock(); 412 413 info = this_cpu_ptr(cgrp->info); 414 415 info->time += now - info->timestamp; 416 info->timestamp = now; 417 } 418 419 static inline void update_cgrp_time_from_cpuctx(struct perf_cpu_context *cpuctx) 420 { 421 struct perf_cgroup *cgrp_out = cpuctx->cgrp; 422 if (cgrp_out) 423 __update_cgrp_time(cgrp_out); 424 } 425 426 static inline void update_cgrp_time_from_event(struct perf_event *event) 427 { 428 struct perf_cgroup *cgrp; 429 430 /* 431 * ensure we access cgroup data only when needed and 432 * when we know the cgroup is pinned (css_get) 433 */ 434 if (!is_cgroup_event(event)) 435 return; 436 437 cgrp = perf_cgroup_from_task(current); 438 /* 439 * Do not update time when cgroup is not active 440 */ 441 if (cgrp == event->cgrp) 442 __update_cgrp_time(event->cgrp); 443 } 444 445 static inline void 446 perf_cgroup_set_timestamp(struct task_struct *task, 447 struct perf_event_context *ctx) 448 { 449 struct perf_cgroup *cgrp; 450 struct perf_cgroup_info *info; 451 452 /* 453 * ctx->lock held by caller 454 * ensure we do not access cgroup data 455 * unless we have the cgroup pinned (css_get) 456 */ 457 if (!task || !ctx->nr_cgroups) 458 return; 459 460 cgrp = perf_cgroup_from_task(task); 461 info = this_cpu_ptr(cgrp->info); 462 info->timestamp = ctx->timestamp; 463 } 464 465 #define PERF_CGROUP_SWOUT 0x1 /* cgroup switch out every event */ 466 #define PERF_CGROUP_SWIN 0x2 /* cgroup switch in events based on task */ 467 468 /* 469 * reschedule events based on the cgroup constraint of task. 470 * 471 * mode SWOUT : schedule out everything 472 * mode SWIN : schedule in based on cgroup for next 473 */ 474 void perf_cgroup_switch(struct task_struct *task, int mode) 475 { 476 struct perf_cpu_context *cpuctx; 477 struct pmu *pmu; 478 unsigned long flags; 479 480 /* 481 * disable interrupts to avoid geting nr_cgroup 482 * changes via __perf_event_disable(). Also 483 * avoids preemption. 484 */ 485 local_irq_save(flags); 486 487 /* 488 * we reschedule only in the presence of cgroup 489 * constrained events. 490 */ 491 rcu_read_lock(); 492 493 list_for_each_entry_rcu(pmu, &pmus, entry) { 494 cpuctx = this_cpu_ptr(pmu->pmu_cpu_context); 495 if (cpuctx->unique_pmu != pmu) 496 continue; /* ensure we process each cpuctx once */ 497 498 /* 499 * perf_cgroup_events says at least one 500 * context on this CPU has cgroup events. 501 * 502 * ctx->nr_cgroups reports the number of cgroup 503 * events for a context. 504 */ 505 if (cpuctx->ctx.nr_cgroups > 0) { 506 perf_ctx_lock(cpuctx, cpuctx->task_ctx); 507 perf_pmu_disable(cpuctx->ctx.pmu); 508 509 if (mode & PERF_CGROUP_SWOUT) { 510 cpu_ctx_sched_out(cpuctx, EVENT_ALL); 511 /* 512 * must not be done before ctxswout due 513 * to event_filter_match() in event_sched_out() 514 */ 515 cpuctx->cgrp = NULL; 516 } 517 518 if (mode & PERF_CGROUP_SWIN) { 519 WARN_ON_ONCE(cpuctx->cgrp); 520 /* 521 * set cgrp before ctxsw in to allow 522 * event_filter_match() to not have to pass 523 * task around 524 */ 525 cpuctx->cgrp = perf_cgroup_from_task(task); 526 cpu_ctx_sched_in(cpuctx, EVENT_ALL, task); 527 } 528 perf_pmu_enable(cpuctx->ctx.pmu); 529 perf_ctx_unlock(cpuctx, cpuctx->task_ctx); 530 } 531 } 532 533 rcu_read_unlock(); 534 535 local_irq_restore(flags); 536 } 537 538 static inline void perf_cgroup_sched_out(struct task_struct *task, 539 struct task_struct *next) 540 { 541 struct perf_cgroup *cgrp1; 542 struct perf_cgroup *cgrp2 = NULL; 543 544 /* 545 * we come here when we know perf_cgroup_events > 0 546 */ 547 cgrp1 = perf_cgroup_from_task(task); 548 549 /* 550 * next is NULL when called from perf_event_enable_on_exec() 551 * that will systematically cause a cgroup_switch() 552 */ 553 if (next) 554 cgrp2 = perf_cgroup_from_task(next); 555 556 /* 557 * only schedule out current cgroup events if we know 558 * that we are switching to a different cgroup. Otherwise, 559 * do no touch the cgroup events. 560 */ 561 if (cgrp1 != cgrp2) 562 perf_cgroup_switch(task, PERF_CGROUP_SWOUT); 563 } 564 565 static inline void perf_cgroup_sched_in(struct task_struct *prev, 566 struct task_struct *task) 567 { 568 struct perf_cgroup *cgrp1; 569 struct perf_cgroup *cgrp2 = NULL; 570 571 /* 572 * we come here when we know perf_cgroup_events > 0 573 */ 574 cgrp1 = perf_cgroup_from_task(task); 575 576 /* prev can never be NULL */ 577 cgrp2 = perf_cgroup_from_task(prev); 578 579 /* 580 * only need to schedule in cgroup events if we are changing 581 * cgroup during ctxsw. Cgroup events were not scheduled 582 * out of ctxsw out if that was not the case. 583 */ 584 if (cgrp1 != cgrp2) 585 perf_cgroup_switch(task, PERF_CGROUP_SWIN); 586 } 587 588 static inline int perf_cgroup_connect(int fd, struct perf_event *event, 589 struct perf_event_attr *attr, 590 struct perf_event *group_leader) 591 { 592 struct perf_cgroup *cgrp; 593 struct cgroup_subsys_state *css; 594 struct fd f = fdget(fd); 595 int ret = 0; 596 597 if (!f.file) 598 return -EBADF; 599 600 css = css_tryget_online_from_dir(f.file->f_path.dentry, 601 &perf_event_cgrp_subsys); 602 if (IS_ERR(css)) { 603 ret = PTR_ERR(css); 604 goto out; 605 } 606 607 cgrp = container_of(css, struct perf_cgroup, css); 608 event->cgrp = cgrp; 609 610 /* 611 * all events in a group must monitor 612 * the same cgroup because a task belongs 613 * to only one perf cgroup at a time 614 */ 615 if (group_leader && group_leader->cgrp != cgrp) { 616 perf_detach_cgroup(event); 617 ret = -EINVAL; 618 } 619 out: 620 fdput(f); 621 return ret; 622 } 623 624 static inline void 625 perf_cgroup_set_shadow_time(struct perf_event *event, u64 now) 626 { 627 struct perf_cgroup_info *t; 628 t = per_cpu_ptr(event->cgrp->info, event->cpu); 629 event->shadow_ctx_time = now - t->timestamp; 630 } 631 632 static inline void 633 perf_cgroup_defer_enabled(struct perf_event *event) 634 { 635 /* 636 * when the current task's perf cgroup does not match 637 * the event's, we need to remember to call the 638 * perf_mark_enable() function the first time a task with 639 * a matching perf cgroup is scheduled in. 640 */ 641 if (is_cgroup_event(event) && !perf_cgroup_match(event)) 642 event->cgrp_defer_enabled = 1; 643 } 644 645 static inline void 646 perf_cgroup_mark_enabled(struct perf_event *event, 647 struct perf_event_context *ctx) 648 { 649 struct perf_event *sub; 650 u64 tstamp = perf_event_time(event); 651 652 if (!event->cgrp_defer_enabled) 653 return; 654 655 event->cgrp_defer_enabled = 0; 656 657 event->tstamp_enabled = tstamp - event->total_time_enabled; 658 list_for_each_entry(sub, &event->sibling_list, group_entry) { 659 if (sub->state >= PERF_EVENT_STATE_INACTIVE) { 660 sub->tstamp_enabled = tstamp - sub->total_time_enabled; 661 sub->cgrp_defer_enabled = 0; 662 } 663 } 664 } 665 #else /* !CONFIG_CGROUP_PERF */ 666 667 static inline bool 668 perf_cgroup_match(struct perf_event *event) 669 { 670 return true; 671 } 672 673 static inline void perf_detach_cgroup(struct perf_event *event) 674 {} 675 676 static inline int is_cgroup_event(struct perf_event *event) 677 { 678 return 0; 679 } 680 681 static inline u64 perf_cgroup_event_cgrp_time(struct perf_event *event) 682 { 683 return 0; 684 } 685 686 static inline void update_cgrp_time_from_event(struct perf_event *event) 687 { 688 } 689 690 static inline void update_cgrp_time_from_cpuctx(struct perf_cpu_context *cpuctx) 691 { 692 } 693 694 static inline void perf_cgroup_sched_out(struct task_struct *task, 695 struct task_struct *next) 696 { 697 } 698 699 static inline void perf_cgroup_sched_in(struct task_struct *prev, 700 struct task_struct *task) 701 { 702 } 703 704 static inline int perf_cgroup_connect(pid_t pid, struct perf_event *event, 705 struct perf_event_attr *attr, 706 struct perf_event *group_leader) 707 { 708 return -EINVAL; 709 } 710 711 static inline void 712 perf_cgroup_set_timestamp(struct task_struct *task, 713 struct perf_event_context *ctx) 714 { 715 } 716 717 void 718 perf_cgroup_switch(struct task_struct *task, struct task_struct *next) 719 { 720 } 721 722 static inline void 723 perf_cgroup_set_shadow_time(struct perf_event *event, u64 now) 724 { 725 } 726 727 static inline u64 perf_cgroup_event_time(struct perf_event *event) 728 { 729 return 0; 730 } 731 732 static inline void 733 perf_cgroup_defer_enabled(struct perf_event *event) 734 { 735 } 736 737 static inline void 738 perf_cgroup_mark_enabled(struct perf_event *event, 739 struct perf_event_context *ctx) 740 { 741 } 742 #endif 743 744 /* 745 * set default to be dependent on timer tick just 746 * like original code 747 */ 748 #define PERF_CPU_HRTIMER (1000 / HZ) 749 /* 750 * function must be called with interrupts disbled 751 */ 752 static enum hrtimer_restart perf_mux_hrtimer_handler(struct hrtimer *hr) 753 { 754 struct perf_cpu_context *cpuctx; 755 int rotations = 0; 756 757 WARN_ON(!irqs_disabled()); 758 759 cpuctx = container_of(hr, struct perf_cpu_context, hrtimer); 760 rotations = perf_rotate_context(cpuctx); 761 762 raw_spin_lock(&cpuctx->hrtimer_lock); 763 if (rotations) 764 hrtimer_forward_now(hr, cpuctx->hrtimer_interval); 765 else 766 cpuctx->hrtimer_active = 0; 767 raw_spin_unlock(&cpuctx->hrtimer_lock); 768 769 return rotations ? HRTIMER_RESTART : HRTIMER_NORESTART; 770 } 771 772 static void __perf_mux_hrtimer_init(struct perf_cpu_context *cpuctx, int cpu) 773 { 774 struct hrtimer *timer = &cpuctx->hrtimer; 775 struct pmu *pmu = cpuctx->ctx.pmu; 776 u64 interval; 777 778 /* no multiplexing needed for SW PMU */ 779 if (pmu->task_ctx_nr == perf_sw_context) 780 return; 781 782 /* 783 * check default is sane, if not set then force to 784 * default interval (1/tick) 785 */ 786 interval = pmu->hrtimer_interval_ms; 787 if (interval < 1) 788 interval = pmu->hrtimer_interval_ms = PERF_CPU_HRTIMER; 789 790 cpuctx->hrtimer_interval = ns_to_ktime(NSEC_PER_MSEC * interval); 791 792 raw_spin_lock_init(&cpuctx->hrtimer_lock); 793 hrtimer_init(timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED); 794 timer->function = perf_mux_hrtimer_handler; 795 } 796 797 static int perf_mux_hrtimer_restart(struct perf_cpu_context *cpuctx) 798 { 799 struct hrtimer *timer = &cpuctx->hrtimer; 800 struct pmu *pmu = cpuctx->ctx.pmu; 801 unsigned long flags; 802 803 /* not for SW PMU */ 804 if (pmu->task_ctx_nr == perf_sw_context) 805 return 0; 806 807 raw_spin_lock_irqsave(&cpuctx->hrtimer_lock, flags); 808 if (!cpuctx->hrtimer_active) { 809 cpuctx->hrtimer_active = 1; 810 hrtimer_forward_now(timer, cpuctx->hrtimer_interval); 811 hrtimer_start_expires(timer, HRTIMER_MODE_ABS_PINNED); 812 } 813 raw_spin_unlock_irqrestore(&cpuctx->hrtimer_lock, flags); 814 815 return 0; 816 } 817 818 void perf_pmu_disable(struct pmu *pmu) 819 { 820 int *count = this_cpu_ptr(pmu->pmu_disable_count); 821 if (!(*count)++) 822 pmu->pmu_disable(pmu); 823 } 824 825 void perf_pmu_enable(struct pmu *pmu) 826 { 827 int *count = this_cpu_ptr(pmu->pmu_disable_count); 828 if (!--(*count)) 829 pmu->pmu_enable(pmu); 830 } 831 832 static DEFINE_PER_CPU(struct list_head, active_ctx_list); 833 834 /* 835 * perf_event_ctx_activate(), perf_event_ctx_deactivate(), and 836 * perf_event_task_tick() are fully serialized because they're strictly cpu 837 * affine and perf_event_ctx{activate,deactivate} are called with IRQs 838 * disabled, while perf_event_task_tick is called from IRQ context. 839 */ 840 static void perf_event_ctx_activate(struct perf_event_context *ctx) 841 { 842 struct list_head *head = this_cpu_ptr(&active_ctx_list); 843 844 WARN_ON(!irqs_disabled()); 845 846 WARN_ON(!list_empty(&ctx->active_ctx_list)); 847 848 list_add(&ctx->active_ctx_list, head); 849 } 850 851 static void perf_event_ctx_deactivate(struct perf_event_context *ctx) 852 { 853 WARN_ON(!irqs_disabled()); 854 855 WARN_ON(list_empty(&ctx->active_ctx_list)); 856 857 list_del_init(&ctx->active_ctx_list); 858 } 859 860 static void get_ctx(struct perf_event_context *ctx) 861 { 862 WARN_ON(!atomic_inc_not_zero(&ctx->refcount)); 863 } 864 865 static void free_ctx(struct rcu_head *head) 866 { 867 struct perf_event_context *ctx; 868 869 ctx = container_of(head, struct perf_event_context, rcu_head); 870 kfree(ctx->task_ctx_data); 871 kfree(ctx); 872 } 873 874 static void put_ctx(struct perf_event_context *ctx) 875 { 876 if (atomic_dec_and_test(&ctx->refcount)) { 877 if (ctx->parent_ctx) 878 put_ctx(ctx->parent_ctx); 879 if (ctx->task) 880 put_task_struct(ctx->task); 881 call_rcu(&ctx->rcu_head, free_ctx); 882 } 883 } 884 885 /* 886 * Because of perf_event::ctx migration in sys_perf_event_open::move_group and 887 * perf_pmu_migrate_context() we need some magic. 888 * 889 * Those places that change perf_event::ctx will hold both 890 * perf_event_ctx::mutex of the 'old' and 'new' ctx value. 891 * 892 * Lock ordering is by mutex address. There are two other sites where 893 * perf_event_context::mutex nests and those are: 894 * 895 * - perf_event_exit_task_context() [ child , 0 ] 896 * __perf_event_exit_task() 897 * sync_child_event() 898 * put_event() [ parent, 1 ] 899 * 900 * - perf_event_init_context() [ parent, 0 ] 901 * inherit_task_group() 902 * inherit_group() 903 * inherit_event() 904 * perf_event_alloc() 905 * perf_init_event() 906 * perf_try_init_event() [ child , 1 ] 907 * 908 * While it appears there is an obvious deadlock here -- the parent and child 909 * nesting levels are inverted between the two. This is in fact safe because 910 * life-time rules separate them. That is an exiting task cannot fork, and a 911 * spawning task cannot (yet) exit. 912 * 913 * But remember that that these are parent<->child context relations, and 914 * migration does not affect children, therefore these two orderings should not 915 * interact. 916 * 917 * The change in perf_event::ctx does not affect children (as claimed above) 918 * because the sys_perf_event_open() case will install a new event and break 919 * the ctx parent<->child relation, and perf_pmu_migrate_context() is only 920 * concerned with cpuctx and that doesn't have children. 921 * 922 * The places that change perf_event::ctx will issue: 923 * 924 * perf_remove_from_context(); 925 * synchronize_rcu(); 926 * perf_install_in_context(); 927 * 928 * to affect the change. The remove_from_context() + synchronize_rcu() should 929 * quiesce the event, after which we can install it in the new location. This 930 * means that only external vectors (perf_fops, prctl) can perturb the event 931 * while in transit. Therefore all such accessors should also acquire 932 * perf_event_context::mutex to serialize against this. 933 * 934 * However; because event->ctx can change while we're waiting to acquire 935 * ctx->mutex we must be careful and use the below perf_event_ctx_lock() 936 * function. 937 * 938 * Lock order: 939 * task_struct::perf_event_mutex 940 * perf_event_context::mutex 941 * perf_event_context::lock 942 * perf_event::child_mutex; 943 * perf_event::mmap_mutex 944 * mmap_sem 945 */ 946 static struct perf_event_context * 947 perf_event_ctx_lock_nested(struct perf_event *event, int nesting) 948 { 949 struct perf_event_context *ctx; 950 951 again: 952 rcu_read_lock(); 953 ctx = ACCESS_ONCE(event->ctx); 954 if (!atomic_inc_not_zero(&ctx->refcount)) { 955 rcu_read_unlock(); 956 goto again; 957 } 958 rcu_read_unlock(); 959 960 mutex_lock_nested(&ctx->mutex, nesting); 961 if (event->ctx != ctx) { 962 mutex_unlock(&ctx->mutex); 963 put_ctx(ctx); 964 goto again; 965 } 966 967 return ctx; 968 } 969 970 static inline struct perf_event_context * 971 perf_event_ctx_lock(struct perf_event *event) 972 { 973 return perf_event_ctx_lock_nested(event, 0); 974 } 975 976 static void perf_event_ctx_unlock(struct perf_event *event, 977 struct perf_event_context *ctx) 978 { 979 mutex_unlock(&ctx->mutex); 980 put_ctx(ctx); 981 } 982 983 /* 984 * This must be done under the ctx->lock, such as to serialize against 985 * context_equiv(), therefore we cannot call put_ctx() since that might end up 986 * calling scheduler related locks and ctx->lock nests inside those. 987 */ 988 static __must_check struct perf_event_context * 989 unclone_ctx(struct perf_event_context *ctx) 990 { 991 struct perf_event_context *parent_ctx = ctx->parent_ctx; 992 993 lockdep_assert_held(&ctx->lock); 994 995 if (parent_ctx) 996 ctx->parent_ctx = NULL; 997 ctx->generation++; 998 999 return parent_ctx; 1000 } 1001 1002 static u32 perf_event_pid(struct perf_event *event, struct task_struct *p) 1003 { 1004 /* 1005 * only top level events have the pid namespace they were created in 1006 */ 1007 if (event->parent) 1008 event = event->parent; 1009 1010 return task_tgid_nr_ns(p, event->ns); 1011 } 1012 1013 static u32 perf_event_tid(struct perf_event *event, struct task_struct *p) 1014 { 1015 /* 1016 * only top level events have the pid namespace they were created in 1017 */ 1018 if (event->parent) 1019 event = event->parent; 1020 1021 return task_pid_nr_ns(p, event->ns); 1022 } 1023 1024 /* 1025 * If we inherit events we want to return the parent event id 1026 * to userspace. 1027 */ 1028 static u64 primary_event_id(struct perf_event *event) 1029 { 1030 u64 id = event->id; 1031 1032 if (event->parent) 1033 id = event->parent->id; 1034 1035 return id; 1036 } 1037 1038 /* 1039 * Get the perf_event_context for a task and lock it. 1040 * This has to cope with with the fact that until it is locked, 1041 * the context could get moved to another task. 1042 */ 1043 static struct perf_event_context * 1044 perf_lock_task_context(struct task_struct *task, int ctxn, unsigned long *flags) 1045 { 1046 struct perf_event_context *ctx; 1047 1048 retry: 1049 /* 1050 * One of the few rules of preemptible RCU is that one cannot do 1051 * rcu_read_unlock() while holding a scheduler (or nested) lock when 1052 * part of the read side critical section was preemptible -- see 1053 * rcu_read_unlock_special(). 1054 * 1055 * Since ctx->lock nests under rq->lock we must ensure the entire read 1056 * side critical section is non-preemptible. 1057 */ 1058 preempt_disable(); 1059 rcu_read_lock(); 1060 ctx = rcu_dereference(task->perf_event_ctxp[ctxn]); 1061 if (ctx) { 1062 /* 1063 * If this context is a clone of another, it might 1064 * get swapped for another underneath us by 1065 * perf_event_task_sched_out, though the 1066 * rcu_read_lock() protects us from any context 1067 * getting freed. Lock the context and check if it 1068 * got swapped before we could get the lock, and retry 1069 * if so. If we locked the right context, then it 1070 * can't get swapped on us any more. 1071 */ 1072 raw_spin_lock_irqsave(&ctx->lock, *flags); 1073 if (ctx != rcu_dereference(task->perf_event_ctxp[ctxn])) { 1074 raw_spin_unlock_irqrestore(&ctx->lock, *flags); 1075 rcu_read_unlock(); 1076 preempt_enable(); 1077 goto retry; 1078 } 1079 1080 if (!atomic_inc_not_zero(&ctx->refcount)) { 1081 raw_spin_unlock_irqrestore(&ctx->lock, *flags); 1082 ctx = NULL; 1083 } 1084 } 1085 rcu_read_unlock(); 1086 preempt_enable(); 1087 return ctx; 1088 } 1089 1090 /* 1091 * Get the context for a task and increment its pin_count so it 1092 * can't get swapped to another task. This also increments its 1093 * reference count so that the context can't get freed. 1094 */ 1095 static struct perf_event_context * 1096 perf_pin_task_context(struct task_struct *task, int ctxn) 1097 { 1098 struct perf_event_context *ctx; 1099 unsigned long flags; 1100 1101 ctx = perf_lock_task_context(task, ctxn, &flags); 1102 if (ctx) { 1103 ++ctx->pin_count; 1104 raw_spin_unlock_irqrestore(&ctx->lock, flags); 1105 } 1106 return ctx; 1107 } 1108 1109 static void perf_unpin_context(struct perf_event_context *ctx) 1110 { 1111 unsigned long flags; 1112 1113 raw_spin_lock_irqsave(&ctx->lock, flags); 1114 --ctx->pin_count; 1115 raw_spin_unlock_irqrestore(&ctx->lock, flags); 1116 } 1117 1118 /* 1119 * Update the record of the current time in a context. 1120 */ 1121 static void update_context_time(struct perf_event_context *ctx) 1122 { 1123 u64 now = perf_clock(); 1124 1125 ctx->time += now - ctx->timestamp; 1126 ctx->timestamp = now; 1127 } 1128 1129 static u64 perf_event_time(struct perf_event *event) 1130 { 1131 struct perf_event_context *ctx = event->ctx; 1132 1133 if (is_cgroup_event(event)) 1134 return perf_cgroup_event_time(event); 1135 1136 return ctx ? ctx->time : 0; 1137 } 1138 1139 /* 1140 * Update the total_time_enabled and total_time_running fields for a event. 1141 * The caller of this function needs to hold the ctx->lock. 1142 */ 1143 static void update_event_times(struct perf_event *event) 1144 { 1145 struct perf_event_context *ctx = event->ctx; 1146 u64 run_end; 1147 1148 if (event->state < PERF_EVENT_STATE_INACTIVE || 1149 event->group_leader->state < PERF_EVENT_STATE_INACTIVE) 1150 return; 1151 /* 1152 * in cgroup mode, time_enabled represents 1153 * the time the event was enabled AND active 1154 * tasks were in the monitored cgroup. This is 1155 * independent of the activity of the context as 1156 * there may be a mix of cgroup and non-cgroup events. 1157 * 1158 * That is why we treat cgroup events differently 1159 * here. 1160 */ 1161 if (is_cgroup_event(event)) 1162 run_end = perf_cgroup_event_time(event); 1163 else if (ctx->is_active) 1164 run_end = ctx->time; 1165 else 1166 run_end = event->tstamp_stopped; 1167 1168 event->total_time_enabled = run_end - event->tstamp_enabled; 1169 1170 if (event->state == PERF_EVENT_STATE_INACTIVE) 1171 run_end = event->tstamp_stopped; 1172 else 1173 run_end = perf_event_time(event); 1174 1175 event->total_time_running = run_end - event->tstamp_running; 1176 1177 } 1178 1179 /* 1180 * Update total_time_enabled and total_time_running for all events in a group. 1181 */ 1182 static void update_group_times(struct perf_event *leader) 1183 { 1184 struct perf_event *event; 1185 1186 update_event_times(leader); 1187 list_for_each_entry(event, &leader->sibling_list, group_entry) 1188 update_event_times(event); 1189 } 1190 1191 static struct list_head * 1192 ctx_group_list(struct perf_event *event, struct perf_event_context *ctx) 1193 { 1194 if (event->attr.pinned) 1195 return &ctx->pinned_groups; 1196 else 1197 return &ctx->flexible_groups; 1198 } 1199 1200 /* 1201 * Add a event from the lists for its context. 1202 * Must be called with ctx->mutex and ctx->lock held. 1203 */ 1204 static void 1205 list_add_event(struct perf_event *event, struct perf_event_context *ctx) 1206 { 1207 WARN_ON_ONCE(event->attach_state & PERF_ATTACH_CONTEXT); 1208 event->attach_state |= PERF_ATTACH_CONTEXT; 1209 1210 /* 1211 * If we're a stand alone event or group leader, we go to the context 1212 * list, group events are kept attached to the group so that 1213 * perf_group_detach can, at all times, locate all siblings. 1214 */ 1215 if (event->group_leader == event) { 1216 struct list_head *list; 1217 1218 if (is_software_event(event)) 1219 event->group_flags |= PERF_GROUP_SOFTWARE; 1220 1221 list = ctx_group_list(event, ctx); 1222 list_add_tail(&event->group_entry, list); 1223 } 1224 1225 if (is_cgroup_event(event)) 1226 ctx->nr_cgroups++; 1227 1228 list_add_rcu(&event->event_entry, &ctx->event_list); 1229 ctx->nr_events++; 1230 if (event->attr.inherit_stat) 1231 ctx->nr_stat++; 1232 1233 ctx->generation++; 1234 } 1235 1236 /* 1237 * Initialize event state based on the perf_event_attr::disabled. 1238 */ 1239 static inline void perf_event__state_init(struct perf_event *event) 1240 { 1241 event->state = event->attr.disabled ? PERF_EVENT_STATE_OFF : 1242 PERF_EVENT_STATE_INACTIVE; 1243 } 1244 1245 /* 1246 * Called at perf_event creation and when events are attached/detached from a 1247 * group. 1248 */ 1249 static void perf_event__read_size(struct perf_event *event) 1250 { 1251 int entry = sizeof(u64); /* value */ 1252 int size = 0; 1253 int nr = 1; 1254 1255 if (event->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) 1256 size += sizeof(u64); 1257 1258 if (event->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) 1259 size += sizeof(u64); 1260 1261 if (event->attr.read_format & PERF_FORMAT_ID) 1262 entry += sizeof(u64); 1263 1264 if (event->attr.read_format & PERF_FORMAT_GROUP) { 1265 nr += event->group_leader->nr_siblings; 1266 size += sizeof(u64); 1267 } 1268 1269 size += entry * nr; 1270 event->read_size = size; 1271 } 1272 1273 static void perf_event__header_size(struct perf_event *event) 1274 { 1275 struct perf_sample_data *data; 1276 u64 sample_type = event->attr.sample_type; 1277 u16 size = 0; 1278 1279 perf_event__read_size(event); 1280 1281 if (sample_type & PERF_SAMPLE_IP) 1282 size += sizeof(data->ip); 1283 1284 if (sample_type & PERF_SAMPLE_ADDR) 1285 size += sizeof(data->addr); 1286 1287 if (sample_type & PERF_SAMPLE_PERIOD) 1288 size += sizeof(data->period); 1289 1290 if (sample_type & PERF_SAMPLE_WEIGHT) 1291 size += sizeof(data->weight); 1292 1293 if (sample_type & PERF_SAMPLE_READ) 1294 size += event->read_size; 1295 1296 if (sample_type & PERF_SAMPLE_DATA_SRC) 1297 size += sizeof(data->data_src.val); 1298 1299 if (sample_type & PERF_SAMPLE_TRANSACTION) 1300 size += sizeof(data->txn); 1301 1302 event->header_size = size; 1303 } 1304 1305 static void perf_event__id_header_size(struct perf_event *event) 1306 { 1307 struct perf_sample_data *data; 1308 u64 sample_type = event->attr.sample_type; 1309 u16 size = 0; 1310 1311 if (sample_type & PERF_SAMPLE_TID) 1312 size += sizeof(data->tid_entry); 1313 1314 if (sample_type & PERF_SAMPLE_TIME) 1315 size += sizeof(data->time); 1316 1317 if (sample_type & PERF_SAMPLE_IDENTIFIER) 1318 size += sizeof(data->id); 1319 1320 if (sample_type & PERF_SAMPLE_ID) 1321 size += sizeof(data->id); 1322 1323 if (sample_type & PERF_SAMPLE_STREAM_ID) 1324 size += sizeof(data->stream_id); 1325 1326 if (sample_type & PERF_SAMPLE_CPU) 1327 size += sizeof(data->cpu_entry); 1328 1329 event->id_header_size = size; 1330 } 1331 1332 static void perf_group_attach(struct perf_event *event) 1333 { 1334 struct perf_event *group_leader = event->group_leader, *pos; 1335 1336 /* 1337 * We can have double attach due to group movement in perf_event_open. 1338 */ 1339 if (event->attach_state & PERF_ATTACH_GROUP) 1340 return; 1341 1342 event->attach_state |= PERF_ATTACH_GROUP; 1343 1344 if (group_leader == event) 1345 return; 1346 1347 WARN_ON_ONCE(group_leader->ctx != event->ctx); 1348 1349 if (group_leader->group_flags & PERF_GROUP_SOFTWARE && 1350 !is_software_event(event)) 1351 group_leader->group_flags &= ~PERF_GROUP_SOFTWARE; 1352 1353 list_add_tail(&event->group_entry, &group_leader->sibling_list); 1354 group_leader->nr_siblings++; 1355 1356 perf_event__header_size(group_leader); 1357 1358 list_for_each_entry(pos, &group_leader->sibling_list, group_entry) 1359 perf_event__header_size(pos); 1360 } 1361 1362 /* 1363 * Remove a event from the lists for its context. 1364 * Must be called with ctx->mutex and ctx->lock held. 1365 */ 1366 static void 1367 list_del_event(struct perf_event *event, struct perf_event_context *ctx) 1368 { 1369 struct perf_cpu_context *cpuctx; 1370 1371 WARN_ON_ONCE(event->ctx != ctx); 1372 lockdep_assert_held(&ctx->lock); 1373 1374 /* 1375 * We can have double detach due to exit/hot-unplug + close. 1376 */ 1377 if (!(event->attach_state & PERF_ATTACH_CONTEXT)) 1378 return; 1379 1380 event->attach_state &= ~PERF_ATTACH_CONTEXT; 1381 1382 if (is_cgroup_event(event)) { 1383 ctx->nr_cgroups--; 1384 cpuctx = __get_cpu_context(ctx); 1385 /* 1386 * if there are no more cgroup events 1387 * then cler cgrp to avoid stale pointer 1388 * in update_cgrp_time_from_cpuctx() 1389 */ 1390 if (!ctx->nr_cgroups) 1391 cpuctx->cgrp = NULL; 1392 } 1393 1394 ctx->nr_events--; 1395 if (event->attr.inherit_stat) 1396 ctx->nr_stat--; 1397 1398 list_del_rcu(&event->event_entry); 1399 1400 if (event->group_leader == event) 1401 list_del_init(&event->group_entry); 1402 1403 update_group_times(event); 1404 1405 /* 1406 * If event was in error state, then keep it 1407 * that way, otherwise bogus counts will be 1408 * returned on read(). The only way to get out 1409 * of error state is by explicit re-enabling 1410 * of the event 1411 */ 1412 if (event->state > PERF_EVENT_STATE_OFF) 1413 event->state = PERF_EVENT_STATE_OFF; 1414 1415 ctx->generation++; 1416 } 1417 1418 static void perf_group_detach(struct perf_event *event) 1419 { 1420 struct perf_event *sibling, *tmp; 1421 struct list_head *list = NULL; 1422 1423 /* 1424 * We can have double detach due to exit/hot-unplug + close. 1425 */ 1426 if (!(event->attach_state & PERF_ATTACH_GROUP)) 1427 return; 1428 1429 event->attach_state &= ~PERF_ATTACH_GROUP; 1430 1431 /* 1432 * If this is a sibling, remove it from its group. 1433 */ 1434 if (event->group_leader != event) { 1435 list_del_init(&event->group_entry); 1436 event->group_leader->nr_siblings--; 1437 goto out; 1438 } 1439 1440 if (!list_empty(&event->group_entry)) 1441 list = &event->group_entry; 1442 1443 /* 1444 * If this was a group event with sibling events then 1445 * upgrade the siblings to singleton events by adding them 1446 * to whatever list we are on. 1447 */ 1448 list_for_each_entry_safe(sibling, tmp, &event->sibling_list, group_entry) { 1449 if (list) 1450 list_move_tail(&sibling->group_entry, list); 1451 sibling->group_leader = sibling; 1452 1453 /* Inherit group flags from the previous leader */ 1454 sibling->group_flags = event->group_flags; 1455 1456 WARN_ON_ONCE(sibling->ctx != event->ctx); 1457 } 1458 1459 out: 1460 perf_event__header_size(event->group_leader); 1461 1462 list_for_each_entry(tmp, &event->group_leader->sibling_list, group_entry) 1463 perf_event__header_size(tmp); 1464 } 1465 1466 /* 1467 * User event without the task. 1468 */ 1469 static bool is_orphaned_event(struct perf_event *event) 1470 { 1471 return event && !is_kernel_event(event) && !event->owner; 1472 } 1473 1474 /* 1475 * Event has a parent but parent's task finished and it's 1476 * alive only because of children holding refference. 1477 */ 1478 static bool is_orphaned_child(struct perf_event *event) 1479 { 1480 return is_orphaned_event(event->parent); 1481 } 1482 1483 static void orphans_remove_work(struct work_struct *work); 1484 1485 static void schedule_orphans_remove(struct perf_event_context *ctx) 1486 { 1487 if (!ctx->task || ctx->orphans_remove_sched || !perf_wq) 1488 return; 1489 1490 if (queue_delayed_work(perf_wq, &ctx->orphans_remove, 1)) { 1491 get_ctx(ctx); 1492 ctx->orphans_remove_sched = true; 1493 } 1494 } 1495 1496 static int __init perf_workqueue_init(void) 1497 { 1498 perf_wq = create_singlethread_workqueue("perf"); 1499 WARN(!perf_wq, "failed to create perf workqueue\n"); 1500 return perf_wq ? 0 : -1; 1501 } 1502 1503 core_initcall(perf_workqueue_init); 1504 1505 static inline int pmu_filter_match(struct perf_event *event) 1506 { 1507 struct pmu *pmu = event->pmu; 1508 return pmu->filter_match ? pmu->filter_match(event) : 1; 1509 } 1510 1511 static inline int 1512 event_filter_match(struct perf_event *event) 1513 { 1514 return (event->cpu == -1 || event->cpu == smp_processor_id()) 1515 && perf_cgroup_match(event) && pmu_filter_match(event); 1516 } 1517 1518 static void 1519 event_sched_out(struct perf_event *event, 1520 struct perf_cpu_context *cpuctx, 1521 struct perf_event_context *ctx) 1522 { 1523 u64 tstamp = perf_event_time(event); 1524 u64 delta; 1525 1526 WARN_ON_ONCE(event->ctx != ctx); 1527 lockdep_assert_held(&ctx->lock); 1528 1529 /* 1530 * An event which could not be activated because of 1531 * filter mismatch still needs to have its timings 1532 * maintained, otherwise bogus information is return 1533 * via read() for time_enabled, time_running: 1534 */ 1535 if (event->state == PERF_EVENT_STATE_INACTIVE 1536 && !event_filter_match(event)) { 1537 delta = tstamp - event->tstamp_stopped; 1538 event->tstamp_running += delta; 1539 event->tstamp_stopped = tstamp; 1540 } 1541 1542 if (event->state != PERF_EVENT_STATE_ACTIVE) 1543 return; 1544 1545 perf_pmu_disable(event->pmu); 1546 1547 event->state = PERF_EVENT_STATE_INACTIVE; 1548 if (event->pending_disable) { 1549 event->pending_disable = 0; 1550 event->state = PERF_EVENT_STATE_OFF; 1551 } 1552 event->tstamp_stopped = tstamp; 1553 event->pmu->del(event, 0); 1554 event->oncpu = -1; 1555 1556 if (!is_software_event(event)) 1557 cpuctx->active_oncpu--; 1558 if (!--ctx->nr_active) 1559 perf_event_ctx_deactivate(ctx); 1560 if (event->attr.freq && event->attr.sample_freq) 1561 ctx->nr_freq--; 1562 if (event->attr.exclusive || !cpuctx->active_oncpu) 1563 cpuctx->exclusive = 0; 1564 1565 if (is_orphaned_child(event)) 1566 schedule_orphans_remove(ctx); 1567 1568 perf_pmu_enable(event->pmu); 1569 } 1570 1571 static void 1572 group_sched_out(struct perf_event *group_event, 1573 struct perf_cpu_context *cpuctx, 1574 struct perf_event_context *ctx) 1575 { 1576 struct perf_event *event; 1577 int state = group_event->state; 1578 1579 event_sched_out(group_event, cpuctx, ctx); 1580 1581 /* 1582 * Schedule out siblings (if any): 1583 */ 1584 list_for_each_entry(event, &group_event->sibling_list, group_entry) 1585 event_sched_out(event, cpuctx, ctx); 1586 1587 if (state == PERF_EVENT_STATE_ACTIVE && group_event->attr.exclusive) 1588 cpuctx->exclusive = 0; 1589 } 1590 1591 struct remove_event { 1592 struct perf_event *event; 1593 bool detach_group; 1594 }; 1595 1596 /* 1597 * Cross CPU call to remove a performance event 1598 * 1599 * We disable the event on the hardware level first. After that we 1600 * remove it from the context list. 1601 */ 1602 static int __perf_remove_from_context(void *info) 1603 { 1604 struct remove_event *re = info; 1605 struct perf_event *event = re->event; 1606 struct perf_event_context *ctx = event->ctx; 1607 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx); 1608 1609 raw_spin_lock(&ctx->lock); 1610 event_sched_out(event, cpuctx, ctx); 1611 if (re->detach_group) 1612 perf_group_detach(event); 1613 list_del_event(event, ctx); 1614 if (!ctx->nr_events && cpuctx->task_ctx == ctx) { 1615 ctx->is_active = 0; 1616 cpuctx->task_ctx = NULL; 1617 } 1618 raw_spin_unlock(&ctx->lock); 1619 1620 return 0; 1621 } 1622 1623 1624 /* 1625 * Remove the event from a task's (or a CPU's) list of events. 1626 * 1627 * CPU events are removed with a smp call. For task events we only 1628 * call when the task is on a CPU. 1629 * 1630 * If event->ctx is a cloned context, callers must make sure that 1631 * every task struct that event->ctx->task could possibly point to 1632 * remains valid. This is OK when called from perf_release since 1633 * that only calls us on the top-level context, which can't be a clone. 1634 * When called from perf_event_exit_task, it's OK because the 1635 * context has been detached from its task. 1636 */ 1637 static void perf_remove_from_context(struct perf_event *event, bool detach_group) 1638 { 1639 struct perf_event_context *ctx = event->ctx; 1640 struct task_struct *task = ctx->task; 1641 struct remove_event re = { 1642 .event = event, 1643 .detach_group = detach_group, 1644 }; 1645 1646 lockdep_assert_held(&ctx->mutex); 1647 1648 if (!task) { 1649 /* 1650 * Per cpu events are removed via an smp call. The removal can 1651 * fail if the CPU is currently offline, but in that case we 1652 * already called __perf_remove_from_context from 1653 * perf_event_exit_cpu. 1654 */ 1655 cpu_function_call(event->cpu, __perf_remove_from_context, &re); 1656 return; 1657 } 1658 1659 retry: 1660 if (!task_function_call(task, __perf_remove_from_context, &re)) 1661 return; 1662 1663 raw_spin_lock_irq(&ctx->lock); 1664 /* 1665 * If we failed to find a running task, but find the context active now 1666 * that we've acquired the ctx->lock, retry. 1667 */ 1668 if (ctx->is_active) { 1669 raw_spin_unlock_irq(&ctx->lock); 1670 /* 1671 * Reload the task pointer, it might have been changed by 1672 * a concurrent perf_event_context_sched_out(). 1673 */ 1674 task = ctx->task; 1675 goto retry; 1676 } 1677 1678 /* 1679 * Since the task isn't running, its safe to remove the event, us 1680 * holding the ctx->lock ensures the task won't get scheduled in. 1681 */ 1682 if (detach_group) 1683 perf_group_detach(event); 1684 list_del_event(event, ctx); 1685 raw_spin_unlock_irq(&ctx->lock); 1686 } 1687 1688 /* 1689 * Cross CPU call to disable a performance event 1690 */ 1691 int __perf_event_disable(void *info) 1692 { 1693 struct perf_event *event = info; 1694 struct perf_event_context *ctx = event->ctx; 1695 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx); 1696 1697 /* 1698 * If this is a per-task event, need to check whether this 1699 * event's task is the current task on this cpu. 1700 * 1701 * Can trigger due to concurrent perf_event_context_sched_out() 1702 * flipping contexts around. 1703 */ 1704 if (ctx->task && cpuctx->task_ctx != ctx) 1705 return -EINVAL; 1706 1707 raw_spin_lock(&ctx->lock); 1708 1709 /* 1710 * If the event is on, turn it off. 1711 * If it is in error state, leave it in error state. 1712 */ 1713 if (event->state >= PERF_EVENT_STATE_INACTIVE) { 1714 update_context_time(ctx); 1715 update_cgrp_time_from_event(event); 1716 update_group_times(event); 1717 if (event == event->group_leader) 1718 group_sched_out(event, cpuctx, ctx); 1719 else 1720 event_sched_out(event, cpuctx, ctx); 1721 event->state = PERF_EVENT_STATE_OFF; 1722 } 1723 1724 raw_spin_unlock(&ctx->lock); 1725 1726 return 0; 1727 } 1728 1729 /* 1730 * Disable a event. 1731 * 1732 * If event->ctx is a cloned context, callers must make sure that 1733 * every task struct that event->ctx->task could possibly point to 1734 * remains valid. This condition is satisifed when called through 1735 * perf_event_for_each_child or perf_event_for_each because they 1736 * hold the top-level event's child_mutex, so any descendant that 1737 * goes to exit will block in sync_child_event. 1738 * When called from perf_pending_event it's OK because event->ctx 1739 * is the current context on this CPU and preemption is disabled, 1740 * hence we can't get into perf_event_task_sched_out for this context. 1741 */ 1742 static void _perf_event_disable(struct perf_event *event) 1743 { 1744 struct perf_event_context *ctx = event->ctx; 1745 struct task_struct *task = ctx->task; 1746 1747 if (!task) { 1748 /* 1749 * Disable the event on the cpu that it's on 1750 */ 1751 cpu_function_call(event->cpu, __perf_event_disable, event); 1752 return; 1753 } 1754 1755 retry: 1756 if (!task_function_call(task, __perf_event_disable, event)) 1757 return; 1758 1759 raw_spin_lock_irq(&ctx->lock); 1760 /* 1761 * If the event is still active, we need to retry the cross-call. 1762 */ 1763 if (event->state == PERF_EVENT_STATE_ACTIVE) { 1764 raw_spin_unlock_irq(&ctx->lock); 1765 /* 1766 * Reload the task pointer, it might have been changed by 1767 * a concurrent perf_event_context_sched_out(). 1768 */ 1769 task = ctx->task; 1770 goto retry; 1771 } 1772 1773 /* 1774 * Since we have the lock this context can't be scheduled 1775 * in, so we can change the state safely. 1776 */ 1777 if (event->state == PERF_EVENT_STATE_INACTIVE) { 1778 update_group_times(event); 1779 event->state = PERF_EVENT_STATE_OFF; 1780 } 1781 raw_spin_unlock_irq(&ctx->lock); 1782 } 1783 1784 /* 1785 * Strictly speaking kernel users cannot create groups and therefore this 1786 * interface does not need the perf_event_ctx_lock() magic. 1787 */ 1788 void perf_event_disable(struct perf_event *event) 1789 { 1790 struct perf_event_context *ctx; 1791 1792 ctx = perf_event_ctx_lock(event); 1793 _perf_event_disable(event); 1794 perf_event_ctx_unlock(event, ctx); 1795 } 1796 EXPORT_SYMBOL_GPL(perf_event_disable); 1797 1798 static void perf_set_shadow_time(struct perf_event *event, 1799 struct perf_event_context *ctx, 1800 u64 tstamp) 1801 { 1802 /* 1803 * use the correct time source for the time snapshot 1804 * 1805 * We could get by without this by leveraging the 1806 * fact that to get to this function, the caller 1807 * has most likely already called update_context_time() 1808 * and update_cgrp_time_xx() and thus both timestamp 1809 * are identical (or very close). Given that tstamp is, 1810 * already adjusted for cgroup, we could say that: 1811 * tstamp - ctx->timestamp 1812 * is equivalent to 1813 * tstamp - cgrp->timestamp. 1814 * 1815 * Then, in perf_output_read(), the calculation would 1816 * work with no changes because: 1817 * - event is guaranteed scheduled in 1818 * - no scheduled out in between 1819 * - thus the timestamp would be the same 1820 * 1821 * But this is a bit hairy. 1822 * 1823 * So instead, we have an explicit cgroup call to remain 1824 * within the time time source all along. We believe it 1825 * is cleaner and simpler to understand. 1826 */ 1827 if (is_cgroup_event(event)) 1828 perf_cgroup_set_shadow_time(event, tstamp); 1829 else 1830 event->shadow_ctx_time = tstamp - ctx->timestamp; 1831 } 1832 1833 #define MAX_INTERRUPTS (~0ULL) 1834 1835 static void perf_log_throttle(struct perf_event *event, int enable); 1836 static void perf_log_itrace_start(struct perf_event *event); 1837 1838 static int 1839 event_sched_in(struct perf_event *event, 1840 struct perf_cpu_context *cpuctx, 1841 struct perf_event_context *ctx) 1842 { 1843 u64 tstamp = perf_event_time(event); 1844 int ret = 0; 1845 1846 lockdep_assert_held(&ctx->lock); 1847 1848 if (event->state <= PERF_EVENT_STATE_OFF) 1849 return 0; 1850 1851 event->state = PERF_EVENT_STATE_ACTIVE; 1852 event->oncpu = smp_processor_id(); 1853 1854 /* 1855 * Unthrottle events, since we scheduled we might have missed several 1856 * ticks already, also for a heavily scheduling task there is little 1857 * guarantee it'll get a tick in a timely manner. 1858 */ 1859 if (unlikely(event->hw.interrupts == MAX_INTERRUPTS)) { 1860 perf_log_throttle(event, 1); 1861 event->hw.interrupts = 0; 1862 } 1863 1864 /* 1865 * The new state must be visible before we turn it on in the hardware: 1866 */ 1867 smp_wmb(); 1868 1869 perf_pmu_disable(event->pmu); 1870 1871 event->tstamp_running += tstamp - event->tstamp_stopped; 1872 1873 perf_set_shadow_time(event, ctx, tstamp); 1874 1875 perf_log_itrace_start(event); 1876 1877 if (event->pmu->add(event, PERF_EF_START)) { 1878 event->state = PERF_EVENT_STATE_INACTIVE; 1879 event->oncpu = -1; 1880 ret = -EAGAIN; 1881 goto out; 1882 } 1883 1884 if (!is_software_event(event)) 1885 cpuctx->active_oncpu++; 1886 if (!ctx->nr_active++) 1887 perf_event_ctx_activate(ctx); 1888 if (event->attr.freq && event->attr.sample_freq) 1889 ctx->nr_freq++; 1890 1891 if (event->attr.exclusive) 1892 cpuctx->exclusive = 1; 1893 1894 if (is_orphaned_child(event)) 1895 schedule_orphans_remove(ctx); 1896 1897 out: 1898 perf_pmu_enable(event->pmu); 1899 1900 return ret; 1901 } 1902 1903 static int 1904 group_sched_in(struct perf_event *group_event, 1905 struct perf_cpu_context *cpuctx, 1906 struct perf_event_context *ctx) 1907 { 1908 struct perf_event *event, *partial_group = NULL; 1909 struct pmu *pmu = ctx->pmu; 1910 u64 now = ctx->time; 1911 bool simulate = false; 1912 1913 if (group_event->state == PERF_EVENT_STATE_OFF) 1914 return 0; 1915 1916 pmu->start_txn(pmu); 1917 1918 if (event_sched_in(group_event, cpuctx, ctx)) { 1919 pmu->cancel_txn(pmu); 1920 perf_mux_hrtimer_restart(cpuctx); 1921 return -EAGAIN; 1922 } 1923 1924 /* 1925 * Schedule in siblings as one group (if any): 1926 */ 1927 list_for_each_entry(event, &group_event->sibling_list, group_entry) { 1928 if (event_sched_in(event, cpuctx, ctx)) { 1929 partial_group = event; 1930 goto group_error; 1931 } 1932 } 1933 1934 if (!pmu->commit_txn(pmu)) 1935 return 0; 1936 1937 group_error: 1938 /* 1939 * Groups can be scheduled in as one unit only, so undo any 1940 * partial group before returning: 1941 * The events up to the failed event are scheduled out normally, 1942 * tstamp_stopped will be updated. 1943 * 1944 * The failed events and the remaining siblings need to have 1945 * their timings updated as if they had gone thru event_sched_in() 1946 * and event_sched_out(). This is required to get consistent timings 1947 * across the group. This also takes care of the case where the group 1948 * could never be scheduled by ensuring tstamp_stopped is set to mark 1949 * the time the event was actually stopped, such that time delta 1950 * calculation in update_event_times() is correct. 1951 */ 1952 list_for_each_entry(event, &group_event->sibling_list, group_entry) { 1953 if (event == partial_group) 1954 simulate = true; 1955 1956 if (simulate) { 1957 event->tstamp_running += now - event->tstamp_stopped; 1958 event->tstamp_stopped = now; 1959 } else { 1960 event_sched_out(event, cpuctx, ctx); 1961 } 1962 } 1963 event_sched_out(group_event, cpuctx, ctx); 1964 1965 pmu->cancel_txn(pmu); 1966 1967 perf_mux_hrtimer_restart(cpuctx); 1968 1969 return -EAGAIN; 1970 } 1971 1972 /* 1973 * Work out whether we can put this event group on the CPU now. 1974 */ 1975 static int group_can_go_on(struct perf_event *event, 1976 struct perf_cpu_context *cpuctx, 1977 int can_add_hw) 1978 { 1979 /* 1980 * Groups consisting entirely of software events can always go on. 1981 */ 1982 if (event->group_flags & PERF_GROUP_SOFTWARE) 1983 return 1; 1984 /* 1985 * If an exclusive group is already on, no other hardware 1986 * events can go on. 1987 */ 1988 if (cpuctx->exclusive) 1989 return 0; 1990 /* 1991 * If this group is exclusive and there are already 1992 * events on the CPU, it can't go on. 1993 */ 1994 if (event->attr.exclusive && cpuctx->active_oncpu) 1995 return 0; 1996 /* 1997 * Otherwise, try to add it if all previous groups were able 1998 * to go on. 1999 */ 2000 return can_add_hw; 2001 } 2002 2003 static void add_event_to_ctx(struct perf_event *event, 2004 struct perf_event_context *ctx) 2005 { 2006 u64 tstamp = perf_event_time(event); 2007 2008 list_add_event(event, ctx); 2009 perf_group_attach(event); 2010 event->tstamp_enabled = tstamp; 2011 event->tstamp_running = tstamp; 2012 event->tstamp_stopped = tstamp; 2013 } 2014 2015 static void task_ctx_sched_out(struct perf_event_context *ctx); 2016 static void 2017 ctx_sched_in(struct perf_event_context *ctx, 2018 struct perf_cpu_context *cpuctx, 2019 enum event_type_t event_type, 2020 struct task_struct *task); 2021 2022 static void perf_event_sched_in(struct perf_cpu_context *cpuctx, 2023 struct perf_event_context *ctx, 2024 struct task_struct *task) 2025 { 2026 cpu_ctx_sched_in(cpuctx, EVENT_PINNED, task); 2027 if (ctx) 2028 ctx_sched_in(ctx, cpuctx, EVENT_PINNED, task); 2029 cpu_ctx_sched_in(cpuctx, EVENT_FLEXIBLE, task); 2030 if (ctx) 2031 ctx_sched_in(ctx, cpuctx, EVENT_FLEXIBLE, task); 2032 } 2033 2034 /* 2035 * Cross CPU call to install and enable a performance event 2036 * 2037 * Must be called with ctx->mutex held 2038 */ 2039 static int __perf_install_in_context(void *info) 2040 { 2041 struct perf_event *event = info; 2042 struct perf_event_context *ctx = event->ctx; 2043 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx); 2044 struct perf_event_context *task_ctx = cpuctx->task_ctx; 2045 struct task_struct *task = current; 2046 2047 perf_ctx_lock(cpuctx, task_ctx); 2048 perf_pmu_disable(cpuctx->ctx.pmu); 2049 2050 /* 2051 * If there was an active task_ctx schedule it out. 2052 */ 2053 if (task_ctx) 2054 task_ctx_sched_out(task_ctx); 2055 2056 /* 2057 * If the context we're installing events in is not the 2058 * active task_ctx, flip them. 2059 */ 2060 if (ctx->task && task_ctx != ctx) { 2061 if (task_ctx) 2062 raw_spin_unlock(&task_ctx->lock); 2063 raw_spin_lock(&ctx->lock); 2064 task_ctx = ctx; 2065 } 2066 2067 if (task_ctx) { 2068 cpuctx->task_ctx = task_ctx; 2069 task = task_ctx->task; 2070 } 2071 2072 cpu_ctx_sched_out(cpuctx, EVENT_ALL); 2073 2074 update_context_time(ctx); 2075 /* 2076 * update cgrp time only if current cgrp 2077 * matches event->cgrp. Must be done before 2078 * calling add_event_to_ctx() 2079 */ 2080 update_cgrp_time_from_event(event); 2081 2082 add_event_to_ctx(event, ctx); 2083 2084 /* 2085 * Schedule everything back in 2086 */ 2087 perf_event_sched_in(cpuctx, task_ctx, task); 2088 2089 perf_pmu_enable(cpuctx->ctx.pmu); 2090 perf_ctx_unlock(cpuctx, task_ctx); 2091 2092 return 0; 2093 } 2094 2095 /* 2096 * Attach a performance event to a context 2097 * 2098 * First we add the event to the list with the hardware enable bit 2099 * in event->hw_config cleared. 2100 * 2101 * If the event is attached to a task which is on a CPU we use a smp 2102 * call to enable it in the task context. The task might have been 2103 * scheduled away, but we check this in the smp call again. 2104 */ 2105 static void 2106 perf_install_in_context(struct perf_event_context *ctx, 2107 struct perf_event *event, 2108 int cpu) 2109 { 2110 struct task_struct *task = ctx->task; 2111 2112 lockdep_assert_held(&ctx->mutex); 2113 2114 event->ctx = ctx; 2115 if (event->cpu != -1) 2116 event->cpu = cpu; 2117 2118 if (!task) { 2119 /* 2120 * Per cpu events are installed via an smp call and 2121 * the install is always successful. 2122 */ 2123 cpu_function_call(cpu, __perf_install_in_context, event); 2124 return; 2125 } 2126 2127 retry: 2128 if (!task_function_call(task, __perf_install_in_context, event)) 2129 return; 2130 2131 raw_spin_lock_irq(&ctx->lock); 2132 /* 2133 * If we failed to find a running task, but find the context active now 2134 * that we've acquired the ctx->lock, retry. 2135 */ 2136 if (ctx->is_active) { 2137 raw_spin_unlock_irq(&ctx->lock); 2138 /* 2139 * Reload the task pointer, it might have been changed by 2140 * a concurrent perf_event_context_sched_out(). 2141 */ 2142 task = ctx->task; 2143 goto retry; 2144 } 2145 2146 /* 2147 * Since the task isn't running, its safe to add the event, us holding 2148 * the ctx->lock ensures the task won't get scheduled in. 2149 */ 2150 add_event_to_ctx(event, ctx); 2151 raw_spin_unlock_irq(&ctx->lock); 2152 } 2153 2154 /* 2155 * Put a event into inactive state and update time fields. 2156 * Enabling the leader of a group effectively enables all 2157 * the group members that aren't explicitly disabled, so we 2158 * have to update their ->tstamp_enabled also. 2159 * Note: this works for group members as well as group leaders 2160 * since the non-leader members' sibling_lists will be empty. 2161 */ 2162 static void __perf_event_mark_enabled(struct perf_event *event) 2163 { 2164 struct perf_event *sub; 2165 u64 tstamp = perf_event_time(event); 2166 2167 event->state = PERF_EVENT_STATE_INACTIVE; 2168 event->tstamp_enabled = tstamp - event->total_time_enabled; 2169 list_for_each_entry(sub, &event->sibling_list, group_entry) { 2170 if (sub->state >= PERF_EVENT_STATE_INACTIVE) 2171 sub->tstamp_enabled = tstamp - sub->total_time_enabled; 2172 } 2173 } 2174 2175 /* 2176 * Cross CPU call to enable a performance event 2177 */ 2178 static int __perf_event_enable(void *info) 2179 { 2180 struct perf_event *event = info; 2181 struct perf_event_context *ctx = event->ctx; 2182 struct perf_event *leader = event->group_leader; 2183 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx); 2184 int err; 2185 2186 /* 2187 * There's a time window between 'ctx->is_active' check 2188 * in perf_event_enable function and this place having: 2189 * - IRQs on 2190 * - ctx->lock unlocked 2191 * 2192 * where the task could be killed and 'ctx' deactivated 2193 * by perf_event_exit_task. 2194 */ 2195 if (!ctx->is_active) 2196 return -EINVAL; 2197 2198 raw_spin_lock(&ctx->lock); 2199 update_context_time(ctx); 2200 2201 if (event->state >= PERF_EVENT_STATE_INACTIVE) 2202 goto unlock; 2203 2204 /* 2205 * set current task's cgroup time reference point 2206 */ 2207 perf_cgroup_set_timestamp(current, ctx); 2208 2209 __perf_event_mark_enabled(event); 2210 2211 if (!event_filter_match(event)) { 2212 if (is_cgroup_event(event)) 2213 perf_cgroup_defer_enabled(event); 2214 goto unlock; 2215 } 2216 2217 /* 2218 * If the event is in a group and isn't the group leader, 2219 * then don't put it on unless the group is on. 2220 */ 2221 if (leader != event && leader->state != PERF_EVENT_STATE_ACTIVE) 2222 goto unlock; 2223 2224 if (!group_can_go_on(event, cpuctx, 1)) { 2225 err = -EEXIST; 2226 } else { 2227 if (event == leader) 2228 err = group_sched_in(event, cpuctx, ctx); 2229 else 2230 err = event_sched_in(event, cpuctx, ctx); 2231 } 2232 2233 if (err) { 2234 /* 2235 * If this event can't go on and it's part of a 2236 * group, then the whole group has to come off. 2237 */ 2238 if (leader != event) { 2239 group_sched_out(leader, cpuctx, ctx); 2240 perf_mux_hrtimer_restart(cpuctx); 2241 } 2242 if (leader->attr.pinned) { 2243 update_group_times(leader); 2244 leader->state = PERF_EVENT_STATE_ERROR; 2245 } 2246 } 2247 2248 unlock: 2249 raw_spin_unlock(&ctx->lock); 2250 2251 return 0; 2252 } 2253 2254 /* 2255 * Enable a event. 2256 * 2257 * If event->ctx is a cloned context, callers must make sure that 2258 * every task struct that event->ctx->task could possibly point to 2259 * remains valid. This condition is satisfied when called through 2260 * perf_event_for_each_child or perf_event_for_each as described 2261 * for perf_event_disable. 2262 */ 2263 static void _perf_event_enable(struct perf_event *event) 2264 { 2265 struct perf_event_context *ctx = event->ctx; 2266 struct task_struct *task = ctx->task; 2267 2268 if (!task) { 2269 /* 2270 * Enable the event on the cpu that it's on 2271 */ 2272 cpu_function_call(event->cpu, __perf_event_enable, event); 2273 return; 2274 } 2275 2276 raw_spin_lock_irq(&ctx->lock); 2277 if (event->state >= PERF_EVENT_STATE_INACTIVE) 2278 goto out; 2279 2280 /* 2281 * If the event is in error state, clear that first. 2282 * That way, if we see the event in error state below, we 2283 * know that it has gone back into error state, as distinct 2284 * from the task having been scheduled away before the 2285 * cross-call arrived. 2286 */ 2287 if (event->state == PERF_EVENT_STATE_ERROR) 2288 event->state = PERF_EVENT_STATE_OFF; 2289 2290 retry: 2291 if (!ctx->is_active) { 2292 __perf_event_mark_enabled(event); 2293 goto out; 2294 } 2295 2296 raw_spin_unlock_irq(&ctx->lock); 2297 2298 if (!task_function_call(task, __perf_event_enable, event)) 2299 return; 2300 2301 raw_spin_lock_irq(&ctx->lock); 2302 2303 /* 2304 * If the context is active and the event is still off, 2305 * we need to retry the cross-call. 2306 */ 2307 if (ctx->is_active && event->state == PERF_EVENT_STATE_OFF) { 2308 /* 2309 * task could have been flipped by a concurrent 2310 * perf_event_context_sched_out() 2311 */ 2312 task = ctx->task; 2313 goto retry; 2314 } 2315 2316 out: 2317 raw_spin_unlock_irq(&ctx->lock); 2318 } 2319 2320 /* 2321 * See perf_event_disable(); 2322 */ 2323 void perf_event_enable(struct perf_event *event) 2324 { 2325 struct perf_event_context *ctx; 2326 2327 ctx = perf_event_ctx_lock(event); 2328 _perf_event_enable(event); 2329 perf_event_ctx_unlock(event, ctx); 2330 } 2331 EXPORT_SYMBOL_GPL(perf_event_enable); 2332 2333 static int _perf_event_refresh(struct perf_event *event, int refresh) 2334 { 2335 /* 2336 * not supported on inherited events 2337 */ 2338 if (event->attr.inherit || !is_sampling_event(event)) 2339 return -EINVAL; 2340 2341 atomic_add(refresh, &event->event_limit); 2342 _perf_event_enable(event); 2343 2344 return 0; 2345 } 2346 2347 /* 2348 * See perf_event_disable() 2349 */ 2350 int perf_event_refresh(struct perf_event *event, int refresh) 2351 { 2352 struct perf_event_context *ctx; 2353 int ret; 2354 2355 ctx = perf_event_ctx_lock(event); 2356 ret = _perf_event_refresh(event, refresh); 2357 perf_event_ctx_unlock(event, ctx); 2358 2359 return ret; 2360 } 2361 EXPORT_SYMBOL_GPL(perf_event_refresh); 2362 2363 static void ctx_sched_out(struct perf_event_context *ctx, 2364 struct perf_cpu_context *cpuctx, 2365 enum event_type_t event_type) 2366 { 2367 struct perf_event *event; 2368 int is_active = ctx->is_active; 2369 2370 ctx->is_active &= ~event_type; 2371 if (likely(!ctx->nr_events)) 2372 return; 2373 2374 update_context_time(ctx); 2375 update_cgrp_time_from_cpuctx(cpuctx); 2376 if (!ctx->nr_active) 2377 return; 2378 2379 perf_pmu_disable(ctx->pmu); 2380 if ((is_active & EVENT_PINNED) && (event_type & EVENT_PINNED)) { 2381 list_for_each_entry(event, &ctx->pinned_groups, group_entry) 2382 group_sched_out(event, cpuctx, ctx); 2383 } 2384 2385 if ((is_active & EVENT_FLEXIBLE) && (event_type & EVENT_FLEXIBLE)) { 2386 list_for_each_entry(event, &ctx->flexible_groups, group_entry) 2387 group_sched_out(event, cpuctx, ctx); 2388 } 2389 perf_pmu_enable(ctx->pmu); 2390 } 2391 2392 /* 2393 * Test whether two contexts are equivalent, i.e. whether they have both been 2394 * cloned from the same version of the same context. 2395 * 2396 * Equivalence is measured using a generation number in the context that is 2397 * incremented on each modification to it; see unclone_ctx(), list_add_event() 2398 * and list_del_event(). 2399 */ 2400 static int context_equiv(struct perf_event_context *ctx1, 2401 struct perf_event_context *ctx2) 2402 { 2403 lockdep_assert_held(&ctx1->lock); 2404 lockdep_assert_held(&ctx2->lock); 2405 2406 /* Pinning disables the swap optimization */ 2407 if (ctx1->pin_count || ctx2->pin_count) 2408 return 0; 2409 2410 /* If ctx1 is the parent of ctx2 */ 2411 if (ctx1 == ctx2->parent_ctx && ctx1->generation == ctx2->parent_gen) 2412 return 1; 2413 2414 /* If ctx2 is the parent of ctx1 */ 2415 if (ctx1->parent_ctx == ctx2 && ctx1->parent_gen == ctx2->generation) 2416 return 1; 2417 2418 /* 2419 * If ctx1 and ctx2 have the same parent; we flatten the parent 2420 * hierarchy, see perf_event_init_context(). 2421 */ 2422 if (ctx1->parent_ctx && ctx1->parent_ctx == ctx2->parent_ctx && 2423 ctx1->parent_gen == ctx2->parent_gen) 2424 return 1; 2425 2426 /* Unmatched */ 2427 return 0; 2428 } 2429 2430 static void __perf_event_sync_stat(struct perf_event *event, 2431 struct perf_event *next_event) 2432 { 2433 u64 value; 2434 2435 if (!event->attr.inherit_stat) 2436 return; 2437 2438 /* 2439 * Update the event value, we cannot use perf_event_read() 2440 * because we're in the middle of a context switch and have IRQs 2441 * disabled, which upsets smp_call_function_single(), however 2442 * we know the event must be on the current CPU, therefore we 2443 * don't need to use it. 2444 */ 2445 switch (event->state) { 2446 case PERF_EVENT_STATE_ACTIVE: 2447 event->pmu->read(event); 2448 /* fall-through */ 2449 2450 case PERF_EVENT_STATE_INACTIVE: 2451 update_event_times(event); 2452 break; 2453 2454 default: 2455 break; 2456 } 2457 2458 /* 2459 * In order to keep per-task stats reliable we need to flip the event 2460 * values when we flip the contexts. 2461 */ 2462 value = local64_read(&next_event->count); 2463 value = local64_xchg(&event->count, value); 2464 local64_set(&next_event->count, value); 2465 2466 swap(event->total_time_enabled, next_event->total_time_enabled); 2467 swap(event->total_time_running, next_event->total_time_running); 2468 2469 /* 2470 * Since we swizzled the values, update the user visible data too. 2471 */ 2472 perf_event_update_userpage(event); 2473 perf_event_update_userpage(next_event); 2474 } 2475 2476 static void perf_event_sync_stat(struct perf_event_context *ctx, 2477 struct perf_event_context *next_ctx) 2478 { 2479 struct perf_event *event, *next_event; 2480 2481 if (!ctx->nr_stat) 2482 return; 2483 2484 update_context_time(ctx); 2485 2486 event = list_first_entry(&ctx->event_list, 2487 struct perf_event, event_entry); 2488 2489 next_event = list_first_entry(&next_ctx->event_list, 2490 struct perf_event, event_entry); 2491 2492 while (&event->event_entry != &ctx->event_list && 2493 &next_event->event_entry != &next_ctx->event_list) { 2494 2495 __perf_event_sync_stat(event, next_event); 2496 2497 event = list_next_entry(event, event_entry); 2498 next_event = list_next_entry(next_event, event_entry); 2499 } 2500 } 2501 2502 static void perf_event_context_sched_out(struct task_struct *task, int ctxn, 2503 struct task_struct *next) 2504 { 2505 struct perf_event_context *ctx = task->perf_event_ctxp[ctxn]; 2506 struct perf_event_context *next_ctx; 2507 struct perf_event_context *parent, *next_parent; 2508 struct perf_cpu_context *cpuctx; 2509 int do_switch = 1; 2510 2511 if (likely(!ctx)) 2512 return; 2513 2514 cpuctx = __get_cpu_context(ctx); 2515 if (!cpuctx->task_ctx) 2516 return; 2517 2518 rcu_read_lock(); 2519 next_ctx = next->perf_event_ctxp[ctxn]; 2520 if (!next_ctx) 2521 goto unlock; 2522 2523 parent = rcu_dereference(ctx->parent_ctx); 2524 next_parent = rcu_dereference(next_ctx->parent_ctx); 2525 2526 /* If neither context have a parent context; they cannot be clones. */ 2527 if (!parent && !next_parent) 2528 goto unlock; 2529 2530 if (next_parent == ctx || next_ctx == parent || next_parent == parent) { 2531 /* 2532 * Looks like the two contexts are clones, so we might be 2533 * able to optimize the context switch. We lock both 2534 * contexts and check that they are clones under the 2535 * lock (including re-checking that neither has been 2536 * uncloned in the meantime). It doesn't matter which 2537 * order we take the locks because no other cpu could 2538 * be trying to lock both of these tasks. 2539 */ 2540 raw_spin_lock(&ctx->lock); 2541 raw_spin_lock_nested(&next_ctx->lock, SINGLE_DEPTH_NESTING); 2542 if (context_equiv(ctx, next_ctx)) { 2543 /* 2544 * XXX do we need a memory barrier of sorts 2545 * wrt to rcu_dereference() of perf_event_ctxp 2546 */ 2547 task->perf_event_ctxp[ctxn] = next_ctx; 2548 next->perf_event_ctxp[ctxn] = ctx; 2549 ctx->task = next; 2550 next_ctx->task = task; 2551 2552 swap(ctx->task_ctx_data, next_ctx->task_ctx_data); 2553 2554 do_switch = 0; 2555 2556 perf_event_sync_stat(ctx, next_ctx); 2557 } 2558 raw_spin_unlock(&next_ctx->lock); 2559 raw_spin_unlock(&ctx->lock); 2560 } 2561 unlock: 2562 rcu_read_unlock(); 2563 2564 if (do_switch) { 2565 raw_spin_lock(&ctx->lock); 2566 ctx_sched_out(ctx, cpuctx, EVENT_ALL); 2567 cpuctx->task_ctx = NULL; 2568 raw_spin_unlock(&ctx->lock); 2569 } 2570 } 2571 2572 void perf_sched_cb_dec(struct pmu *pmu) 2573 { 2574 this_cpu_dec(perf_sched_cb_usages); 2575 } 2576 2577 void perf_sched_cb_inc(struct pmu *pmu) 2578 { 2579 this_cpu_inc(perf_sched_cb_usages); 2580 } 2581 2582 /* 2583 * This function provides the context switch callback to the lower code 2584 * layer. It is invoked ONLY when the context switch callback is enabled. 2585 */ 2586 static void perf_pmu_sched_task(struct task_struct *prev, 2587 struct task_struct *next, 2588 bool sched_in) 2589 { 2590 struct perf_cpu_context *cpuctx; 2591 struct pmu *pmu; 2592 unsigned long flags; 2593 2594 if (prev == next) 2595 return; 2596 2597 local_irq_save(flags); 2598 2599 rcu_read_lock(); 2600 2601 list_for_each_entry_rcu(pmu, &pmus, entry) { 2602 if (pmu->sched_task) { 2603 cpuctx = this_cpu_ptr(pmu->pmu_cpu_context); 2604 2605 perf_ctx_lock(cpuctx, cpuctx->task_ctx); 2606 2607 perf_pmu_disable(pmu); 2608 2609 pmu->sched_task(cpuctx->task_ctx, sched_in); 2610 2611 perf_pmu_enable(pmu); 2612 2613 perf_ctx_unlock(cpuctx, cpuctx->task_ctx); 2614 } 2615 } 2616 2617 rcu_read_unlock(); 2618 2619 local_irq_restore(flags); 2620 } 2621 2622 #define for_each_task_context_nr(ctxn) \ 2623 for ((ctxn) = 0; (ctxn) < perf_nr_task_contexts; (ctxn)++) 2624 2625 /* 2626 * Called from scheduler to remove the events of the current task, 2627 * with interrupts disabled. 2628 * 2629 * We stop each event and update the event value in event->count. 2630 * 2631 * This does not protect us against NMI, but disable() 2632 * sets the disabled bit in the control field of event _before_ 2633 * accessing the event control register. If a NMI hits, then it will 2634 * not restart the event. 2635 */ 2636 void __perf_event_task_sched_out(struct task_struct *task, 2637 struct task_struct *next) 2638 { 2639 int ctxn; 2640 2641 if (__this_cpu_read(perf_sched_cb_usages)) 2642 perf_pmu_sched_task(task, next, false); 2643 2644 for_each_task_context_nr(ctxn) 2645 perf_event_context_sched_out(task, ctxn, next); 2646 2647 /* 2648 * if cgroup events exist on this CPU, then we need 2649 * to check if we have to switch out PMU state. 2650 * cgroup event are system-wide mode only 2651 */ 2652 if (atomic_read(this_cpu_ptr(&perf_cgroup_events))) 2653 perf_cgroup_sched_out(task, next); 2654 } 2655 2656 static void task_ctx_sched_out(struct perf_event_context *ctx) 2657 { 2658 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx); 2659 2660 if (!cpuctx->task_ctx) 2661 return; 2662 2663 if (WARN_ON_ONCE(ctx != cpuctx->task_ctx)) 2664 return; 2665 2666 ctx_sched_out(ctx, cpuctx, EVENT_ALL); 2667 cpuctx->task_ctx = NULL; 2668 } 2669 2670 /* 2671 * Called with IRQs disabled 2672 */ 2673 static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx, 2674 enum event_type_t event_type) 2675 { 2676 ctx_sched_out(&cpuctx->ctx, cpuctx, event_type); 2677 } 2678 2679 static void 2680 ctx_pinned_sched_in(struct perf_event_context *ctx, 2681 struct perf_cpu_context *cpuctx) 2682 { 2683 struct perf_event *event; 2684 2685 list_for_each_entry(event, &ctx->pinned_groups, group_entry) { 2686 if (event->state <= PERF_EVENT_STATE_OFF) 2687 continue; 2688 if (!event_filter_match(event)) 2689 continue; 2690 2691 /* may need to reset tstamp_enabled */ 2692 if (is_cgroup_event(event)) 2693 perf_cgroup_mark_enabled(event, ctx); 2694 2695 if (group_can_go_on(event, cpuctx, 1)) 2696 group_sched_in(event, cpuctx, ctx); 2697 2698 /* 2699 * If this pinned group hasn't been scheduled, 2700 * put it in error state. 2701 */ 2702 if (event->state == PERF_EVENT_STATE_INACTIVE) { 2703 update_group_times(event); 2704 event->state = PERF_EVENT_STATE_ERROR; 2705 } 2706 } 2707 } 2708 2709 static void 2710 ctx_flexible_sched_in(struct perf_event_context *ctx, 2711 struct perf_cpu_context *cpuctx) 2712 { 2713 struct perf_event *event; 2714 int can_add_hw = 1; 2715 2716 list_for_each_entry(event, &ctx->flexible_groups, group_entry) { 2717 /* Ignore events in OFF or ERROR state */ 2718 if (event->state <= PERF_EVENT_STATE_OFF) 2719 continue; 2720 /* 2721 * Listen to the 'cpu' scheduling filter constraint 2722 * of events: 2723 */ 2724 if (!event_filter_match(event)) 2725 continue; 2726 2727 /* may need to reset tstamp_enabled */ 2728 if (is_cgroup_event(event)) 2729 perf_cgroup_mark_enabled(event, ctx); 2730 2731 if (group_can_go_on(event, cpuctx, can_add_hw)) { 2732 if (group_sched_in(event, cpuctx, ctx)) 2733 can_add_hw = 0; 2734 } 2735 } 2736 } 2737 2738 static void 2739 ctx_sched_in(struct perf_event_context *ctx, 2740 struct perf_cpu_context *cpuctx, 2741 enum event_type_t event_type, 2742 struct task_struct *task) 2743 { 2744 u64 now; 2745 int is_active = ctx->is_active; 2746 2747 ctx->is_active |= event_type; 2748 if (likely(!ctx->nr_events)) 2749 return; 2750 2751 now = perf_clock(); 2752 ctx->timestamp = now; 2753 perf_cgroup_set_timestamp(task, ctx); 2754 /* 2755 * First go through the list and put on any pinned groups 2756 * in order to give them the best chance of going on. 2757 */ 2758 if (!(is_active & EVENT_PINNED) && (event_type & EVENT_PINNED)) 2759 ctx_pinned_sched_in(ctx, cpuctx); 2760 2761 /* Then walk through the lower prio flexible groups */ 2762 if (!(is_active & EVENT_FLEXIBLE) && (event_type & EVENT_FLEXIBLE)) 2763 ctx_flexible_sched_in(ctx, cpuctx); 2764 } 2765 2766 static void cpu_ctx_sched_in(struct perf_cpu_context *cpuctx, 2767 enum event_type_t event_type, 2768 struct task_struct *task) 2769 { 2770 struct perf_event_context *ctx = &cpuctx->ctx; 2771 2772 ctx_sched_in(ctx, cpuctx, event_type, task); 2773 } 2774 2775 static void perf_event_context_sched_in(struct perf_event_context *ctx, 2776 struct task_struct *task) 2777 { 2778 struct perf_cpu_context *cpuctx; 2779 2780 cpuctx = __get_cpu_context(ctx); 2781 if (cpuctx->task_ctx == ctx) 2782 return; 2783 2784 perf_ctx_lock(cpuctx, ctx); 2785 perf_pmu_disable(ctx->pmu); 2786 /* 2787 * We want to keep the following priority order: 2788 * cpu pinned (that don't need to move), task pinned, 2789 * cpu flexible, task flexible. 2790 */ 2791 cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE); 2792 2793 if (ctx->nr_events) 2794 cpuctx->task_ctx = ctx; 2795 2796 perf_event_sched_in(cpuctx, cpuctx->task_ctx, task); 2797 2798 perf_pmu_enable(ctx->pmu); 2799 perf_ctx_unlock(cpuctx, ctx); 2800 } 2801 2802 /* 2803 * Called from scheduler to add the events of the current task 2804 * with interrupts disabled. 2805 * 2806 * We restore the event value and then enable it. 2807 * 2808 * This does not protect us against NMI, but enable() 2809 * sets the enabled bit in the control field of event _before_ 2810 * accessing the event control register. If a NMI hits, then it will 2811 * keep the event running. 2812 */ 2813 void __perf_event_task_sched_in(struct task_struct *prev, 2814 struct task_struct *task) 2815 { 2816 struct perf_event_context *ctx; 2817 int ctxn; 2818 2819 for_each_task_context_nr(ctxn) { 2820 ctx = task->perf_event_ctxp[ctxn]; 2821 if (likely(!ctx)) 2822 continue; 2823 2824 perf_event_context_sched_in(ctx, task); 2825 } 2826 /* 2827 * if cgroup events exist on this CPU, then we need 2828 * to check if we have to switch in PMU state. 2829 * cgroup event are system-wide mode only 2830 */ 2831 if (atomic_read(this_cpu_ptr(&perf_cgroup_events))) 2832 perf_cgroup_sched_in(prev, task); 2833 2834 if (__this_cpu_read(perf_sched_cb_usages)) 2835 perf_pmu_sched_task(prev, task, true); 2836 } 2837 2838 static u64 perf_calculate_period(struct perf_event *event, u64 nsec, u64 count) 2839 { 2840 u64 frequency = event->attr.sample_freq; 2841 u64 sec = NSEC_PER_SEC; 2842 u64 divisor, dividend; 2843 2844 int count_fls, nsec_fls, frequency_fls, sec_fls; 2845 2846 count_fls = fls64(count); 2847 nsec_fls = fls64(nsec); 2848 frequency_fls = fls64(frequency); 2849 sec_fls = 30; 2850 2851 /* 2852 * We got @count in @nsec, with a target of sample_freq HZ 2853 * the target period becomes: 2854 * 2855 * @count * 10^9 2856 * period = ------------------- 2857 * @nsec * sample_freq 2858 * 2859 */ 2860 2861 /* 2862 * Reduce accuracy by one bit such that @a and @b converge 2863 * to a similar magnitude. 2864 */ 2865 #define REDUCE_FLS(a, b) \ 2866 do { \ 2867 if (a##_fls > b##_fls) { \ 2868 a >>= 1; \ 2869 a##_fls--; \ 2870 } else { \ 2871 b >>= 1; \ 2872 b##_fls--; \ 2873 } \ 2874 } while (0) 2875 2876 /* 2877 * Reduce accuracy until either term fits in a u64, then proceed with 2878 * the other, so that finally we can do a u64/u64 division. 2879 */ 2880 while (count_fls + sec_fls > 64 && nsec_fls + frequency_fls > 64) { 2881 REDUCE_FLS(nsec, frequency); 2882 REDUCE_FLS(sec, count); 2883 } 2884 2885 if (count_fls + sec_fls > 64) { 2886 divisor = nsec * frequency; 2887 2888 while (count_fls + sec_fls > 64) { 2889 REDUCE_FLS(count, sec); 2890 divisor >>= 1; 2891 } 2892 2893 dividend = count * sec; 2894 } else { 2895 dividend = count * sec; 2896 2897 while (nsec_fls + frequency_fls > 64) { 2898 REDUCE_FLS(nsec, frequency); 2899 dividend >>= 1; 2900 } 2901 2902 divisor = nsec * frequency; 2903 } 2904 2905 if (!divisor) 2906 return dividend; 2907 2908 return div64_u64(dividend, divisor); 2909 } 2910 2911 static DEFINE_PER_CPU(int, perf_throttled_count); 2912 static DEFINE_PER_CPU(u64, perf_throttled_seq); 2913 2914 static void perf_adjust_period(struct perf_event *event, u64 nsec, u64 count, bool disable) 2915 { 2916 struct hw_perf_event *hwc = &event->hw; 2917 s64 period, sample_period; 2918 s64 delta; 2919 2920 period = perf_calculate_period(event, nsec, count); 2921 2922 delta = (s64)(period - hwc->sample_period); 2923 delta = (delta + 7) / 8; /* low pass filter */ 2924 2925 sample_period = hwc->sample_period + delta; 2926 2927 if (!sample_period) 2928 sample_period = 1; 2929 2930 hwc->sample_period = sample_period; 2931 2932 if (local64_read(&hwc->period_left) > 8*sample_period) { 2933 if (disable) 2934 event->pmu->stop(event, PERF_EF_UPDATE); 2935 2936 local64_set(&hwc->period_left, 0); 2937 2938 if (disable) 2939 event->pmu->start(event, PERF_EF_RELOAD); 2940 } 2941 } 2942 2943 /* 2944 * combine freq adjustment with unthrottling to avoid two passes over the 2945 * events. At the same time, make sure, having freq events does not change 2946 * the rate of unthrottling as that would introduce bias. 2947 */ 2948 static void perf_adjust_freq_unthr_context(struct perf_event_context *ctx, 2949 int needs_unthr) 2950 { 2951 struct perf_event *event; 2952 struct hw_perf_event *hwc; 2953 u64 now, period = TICK_NSEC; 2954 s64 delta; 2955 2956 /* 2957 * only need to iterate over all events iff: 2958 * - context have events in frequency mode (needs freq adjust) 2959 * - there are events to unthrottle on this cpu 2960 */ 2961 if (!(ctx->nr_freq || needs_unthr)) 2962 return; 2963 2964 raw_spin_lock(&ctx->lock); 2965 perf_pmu_disable(ctx->pmu); 2966 2967 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) { 2968 if (event->state != PERF_EVENT_STATE_ACTIVE) 2969 continue; 2970 2971 if (!event_filter_match(event)) 2972 continue; 2973 2974 perf_pmu_disable(event->pmu); 2975 2976 hwc = &event->hw; 2977 2978 if (hwc->interrupts == MAX_INTERRUPTS) { 2979 hwc->interrupts = 0; 2980 perf_log_throttle(event, 1); 2981 event->pmu->start(event, 0); 2982 } 2983 2984 if (!event->attr.freq || !event->attr.sample_freq) 2985 goto next; 2986 2987 /* 2988 * stop the event and update event->count 2989 */ 2990 event->pmu->stop(event, PERF_EF_UPDATE); 2991 2992 now = local64_read(&event->count); 2993 delta = now - hwc->freq_count_stamp; 2994 hwc->freq_count_stamp = now; 2995 2996 /* 2997 * restart the event 2998 * reload only if value has changed 2999 * we have stopped the event so tell that 3000 * to perf_adjust_period() to avoid stopping it 3001 * twice. 3002 */ 3003 if (delta > 0) 3004 perf_adjust_period(event, period, delta, false); 3005 3006 event->pmu->start(event, delta > 0 ? PERF_EF_RELOAD : 0); 3007 next: 3008 perf_pmu_enable(event->pmu); 3009 } 3010 3011 perf_pmu_enable(ctx->pmu); 3012 raw_spin_unlock(&ctx->lock); 3013 } 3014 3015 /* 3016 * Round-robin a context's events: 3017 */ 3018 static void rotate_ctx(struct perf_event_context *ctx) 3019 { 3020 /* 3021 * Rotate the first entry last of non-pinned groups. Rotation might be 3022 * disabled by the inheritance code. 3023 */ 3024 if (!ctx->rotate_disable) 3025 list_rotate_left(&ctx->flexible_groups); 3026 } 3027 3028 static int perf_rotate_context(struct perf_cpu_context *cpuctx) 3029 { 3030 struct perf_event_context *ctx = NULL; 3031 int rotate = 0; 3032 3033 if (cpuctx->ctx.nr_events) { 3034 if (cpuctx->ctx.nr_events != cpuctx->ctx.nr_active) 3035 rotate = 1; 3036 } 3037 3038 ctx = cpuctx->task_ctx; 3039 if (ctx && ctx->nr_events) { 3040 if (ctx->nr_events != ctx->nr_active) 3041 rotate = 1; 3042 } 3043 3044 if (!rotate) 3045 goto done; 3046 3047 perf_ctx_lock(cpuctx, cpuctx->task_ctx); 3048 perf_pmu_disable(cpuctx->ctx.pmu); 3049 3050 cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE); 3051 if (ctx) 3052 ctx_sched_out(ctx, cpuctx, EVENT_FLEXIBLE); 3053 3054 rotate_ctx(&cpuctx->ctx); 3055 if (ctx) 3056 rotate_ctx(ctx); 3057 3058 perf_event_sched_in(cpuctx, ctx, current); 3059 3060 perf_pmu_enable(cpuctx->ctx.pmu); 3061 perf_ctx_unlock(cpuctx, cpuctx->task_ctx); 3062 done: 3063 3064 return rotate; 3065 } 3066 3067 #ifdef CONFIG_NO_HZ_FULL 3068 bool perf_event_can_stop_tick(void) 3069 { 3070 if (atomic_read(&nr_freq_events) || 3071 __this_cpu_read(perf_throttled_count)) 3072 return false; 3073 else 3074 return true; 3075 } 3076 #endif 3077 3078 void perf_event_task_tick(void) 3079 { 3080 struct list_head *head = this_cpu_ptr(&active_ctx_list); 3081 struct perf_event_context *ctx, *tmp; 3082 int throttled; 3083 3084 WARN_ON(!irqs_disabled()); 3085 3086 __this_cpu_inc(perf_throttled_seq); 3087 throttled = __this_cpu_xchg(perf_throttled_count, 0); 3088 3089 list_for_each_entry_safe(ctx, tmp, head, active_ctx_list) 3090 perf_adjust_freq_unthr_context(ctx, throttled); 3091 } 3092 3093 static int event_enable_on_exec(struct perf_event *event, 3094 struct perf_event_context *ctx) 3095 { 3096 if (!event->attr.enable_on_exec) 3097 return 0; 3098 3099 event->attr.enable_on_exec = 0; 3100 if (event->state >= PERF_EVENT_STATE_INACTIVE) 3101 return 0; 3102 3103 __perf_event_mark_enabled(event); 3104 3105 return 1; 3106 } 3107 3108 /* 3109 * Enable all of a task's events that have been marked enable-on-exec. 3110 * This expects task == current. 3111 */ 3112 static void perf_event_enable_on_exec(struct perf_event_context *ctx) 3113 { 3114 struct perf_event_context *clone_ctx = NULL; 3115 struct perf_event *event; 3116 unsigned long flags; 3117 int enabled = 0; 3118 int ret; 3119 3120 local_irq_save(flags); 3121 if (!ctx || !ctx->nr_events) 3122 goto out; 3123 3124 /* 3125 * We must ctxsw out cgroup events to avoid conflict 3126 * when invoking perf_task_event_sched_in() later on 3127 * in this function. Otherwise we end up trying to 3128 * ctxswin cgroup events which are already scheduled 3129 * in. 3130 */ 3131 perf_cgroup_sched_out(current, NULL); 3132 3133 raw_spin_lock(&ctx->lock); 3134 task_ctx_sched_out(ctx); 3135 3136 list_for_each_entry(event, &ctx->event_list, event_entry) { 3137 ret = event_enable_on_exec(event, ctx); 3138 if (ret) 3139 enabled = 1; 3140 } 3141 3142 /* 3143 * Unclone this context if we enabled any event. 3144 */ 3145 if (enabled) 3146 clone_ctx = unclone_ctx(ctx); 3147 3148 raw_spin_unlock(&ctx->lock); 3149 3150 /* 3151 * Also calls ctxswin for cgroup events, if any: 3152 */ 3153 perf_event_context_sched_in(ctx, ctx->task); 3154 out: 3155 local_irq_restore(flags); 3156 3157 if (clone_ctx) 3158 put_ctx(clone_ctx); 3159 } 3160 3161 void perf_event_exec(void) 3162 { 3163 struct perf_event_context *ctx; 3164 int ctxn; 3165 3166 rcu_read_lock(); 3167 for_each_task_context_nr(ctxn) { 3168 ctx = current->perf_event_ctxp[ctxn]; 3169 if (!ctx) 3170 continue; 3171 3172 perf_event_enable_on_exec(ctx); 3173 } 3174 rcu_read_unlock(); 3175 } 3176 3177 /* 3178 * Cross CPU call to read the hardware event 3179 */ 3180 static void __perf_event_read(void *info) 3181 { 3182 struct perf_event *event = info; 3183 struct perf_event_context *ctx = event->ctx; 3184 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx); 3185 3186 /* 3187 * If this is a task context, we need to check whether it is 3188 * the current task context of this cpu. If not it has been 3189 * scheduled out before the smp call arrived. In that case 3190 * event->count would have been updated to a recent sample 3191 * when the event was scheduled out. 3192 */ 3193 if (ctx->task && cpuctx->task_ctx != ctx) 3194 return; 3195 3196 raw_spin_lock(&ctx->lock); 3197 if (ctx->is_active) { 3198 update_context_time(ctx); 3199 update_cgrp_time_from_event(event); 3200 } 3201 update_event_times(event); 3202 if (event->state == PERF_EVENT_STATE_ACTIVE) 3203 event->pmu->read(event); 3204 raw_spin_unlock(&ctx->lock); 3205 } 3206 3207 static inline u64 perf_event_count(struct perf_event *event) 3208 { 3209 if (event->pmu->count) 3210 return event->pmu->count(event); 3211 3212 return __perf_event_count(event); 3213 } 3214 3215 static u64 perf_event_read(struct perf_event *event) 3216 { 3217 /* 3218 * If event is enabled and currently active on a CPU, update the 3219 * value in the event structure: 3220 */ 3221 if (event->state == PERF_EVENT_STATE_ACTIVE) { 3222 smp_call_function_single(event->oncpu, 3223 __perf_event_read, event, 1); 3224 } else if (event->state == PERF_EVENT_STATE_INACTIVE) { 3225 struct perf_event_context *ctx = event->ctx; 3226 unsigned long flags; 3227 3228 raw_spin_lock_irqsave(&ctx->lock, flags); 3229 /* 3230 * may read while context is not active 3231 * (e.g., thread is blocked), in that case 3232 * we cannot update context time 3233 */ 3234 if (ctx->is_active) { 3235 update_context_time(ctx); 3236 update_cgrp_time_from_event(event); 3237 } 3238 update_event_times(event); 3239 raw_spin_unlock_irqrestore(&ctx->lock, flags); 3240 } 3241 3242 return perf_event_count(event); 3243 } 3244 3245 /* 3246 * Initialize the perf_event context in a task_struct: 3247 */ 3248 static void __perf_event_init_context(struct perf_event_context *ctx) 3249 { 3250 raw_spin_lock_init(&ctx->lock); 3251 mutex_init(&ctx->mutex); 3252 INIT_LIST_HEAD(&ctx->active_ctx_list); 3253 INIT_LIST_HEAD(&ctx->pinned_groups); 3254 INIT_LIST_HEAD(&ctx->flexible_groups); 3255 INIT_LIST_HEAD(&ctx->event_list); 3256 atomic_set(&ctx->refcount, 1); 3257 INIT_DELAYED_WORK(&ctx->orphans_remove, orphans_remove_work); 3258 } 3259 3260 static struct perf_event_context * 3261 alloc_perf_context(struct pmu *pmu, struct task_struct *task) 3262 { 3263 struct perf_event_context *ctx; 3264 3265 ctx = kzalloc(sizeof(struct perf_event_context), GFP_KERNEL); 3266 if (!ctx) 3267 return NULL; 3268 3269 __perf_event_init_context(ctx); 3270 if (task) { 3271 ctx->task = task; 3272 get_task_struct(task); 3273 } 3274 ctx->pmu = pmu; 3275 3276 return ctx; 3277 } 3278 3279 static struct task_struct * 3280 find_lively_task_by_vpid(pid_t vpid) 3281 { 3282 struct task_struct *task; 3283 int err; 3284 3285 rcu_read_lock(); 3286 if (!vpid) 3287 task = current; 3288 else 3289 task = find_task_by_vpid(vpid); 3290 if (task) 3291 get_task_struct(task); 3292 rcu_read_unlock(); 3293 3294 if (!task) 3295 return ERR_PTR(-ESRCH); 3296 3297 /* Reuse ptrace permission checks for now. */ 3298 err = -EACCES; 3299 if (!ptrace_may_access(task, PTRACE_MODE_READ)) 3300 goto errout; 3301 3302 return task; 3303 errout: 3304 put_task_struct(task); 3305 return ERR_PTR(err); 3306 3307 } 3308 3309 /* 3310 * Returns a matching context with refcount and pincount. 3311 */ 3312 static struct perf_event_context * 3313 find_get_context(struct pmu *pmu, struct task_struct *task, 3314 struct perf_event *event) 3315 { 3316 struct perf_event_context *ctx, *clone_ctx = NULL; 3317 struct perf_cpu_context *cpuctx; 3318 void *task_ctx_data = NULL; 3319 unsigned long flags; 3320 int ctxn, err; 3321 int cpu = event->cpu; 3322 3323 if (!task) { 3324 /* Must be root to operate on a CPU event: */ 3325 if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN)) 3326 return ERR_PTR(-EACCES); 3327 3328 /* 3329 * We could be clever and allow to attach a event to an 3330 * offline CPU and activate it when the CPU comes up, but 3331 * that's for later. 3332 */ 3333 if (!cpu_online(cpu)) 3334 return ERR_PTR(-ENODEV); 3335 3336 cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu); 3337 ctx = &cpuctx->ctx; 3338 get_ctx(ctx); 3339 ++ctx->pin_count; 3340 3341 return ctx; 3342 } 3343 3344 err = -EINVAL; 3345 ctxn = pmu->task_ctx_nr; 3346 if (ctxn < 0) 3347 goto errout; 3348 3349 if (event->attach_state & PERF_ATTACH_TASK_DATA) { 3350 task_ctx_data = kzalloc(pmu->task_ctx_size, GFP_KERNEL); 3351 if (!task_ctx_data) { 3352 err = -ENOMEM; 3353 goto errout; 3354 } 3355 } 3356 3357 retry: 3358 ctx = perf_lock_task_context(task, ctxn, &flags); 3359 if (ctx) { 3360 clone_ctx = unclone_ctx(ctx); 3361 ++ctx->pin_count; 3362 3363 if (task_ctx_data && !ctx->task_ctx_data) { 3364 ctx->task_ctx_data = task_ctx_data; 3365 task_ctx_data = NULL; 3366 } 3367 raw_spin_unlock_irqrestore(&ctx->lock, flags); 3368 3369 if (clone_ctx) 3370 put_ctx(clone_ctx); 3371 } else { 3372 ctx = alloc_perf_context(pmu, task); 3373 err = -ENOMEM; 3374 if (!ctx) 3375 goto errout; 3376 3377 if (task_ctx_data) { 3378 ctx->task_ctx_data = task_ctx_data; 3379 task_ctx_data = NULL; 3380 } 3381 3382 err = 0; 3383 mutex_lock(&task->perf_event_mutex); 3384 /* 3385 * If it has already passed perf_event_exit_task(). 3386 * we must see PF_EXITING, it takes this mutex too. 3387 */ 3388 if (task->flags & PF_EXITING) 3389 err = -ESRCH; 3390 else if (task->perf_event_ctxp[ctxn]) 3391 err = -EAGAIN; 3392 else { 3393 get_ctx(ctx); 3394 ++ctx->pin_count; 3395 rcu_assign_pointer(task->perf_event_ctxp[ctxn], ctx); 3396 } 3397 mutex_unlock(&task->perf_event_mutex); 3398 3399 if (unlikely(err)) { 3400 put_ctx(ctx); 3401 3402 if (err == -EAGAIN) 3403 goto retry; 3404 goto errout; 3405 } 3406 } 3407 3408 kfree(task_ctx_data); 3409 return ctx; 3410 3411 errout: 3412 kfree(task_ctx_data); 3413 return ERR_PTR(err); 3414 } 3415 3416 static void perf_event_free_filter(struct perf_event *event); 3417 static void perf_event_free_bpf_prog(struct perf_event *event); 3418 3419 static void free_event_rcu(struct rcu_head *head) 3420 { 3421 struct perf_event *event; 3422 3423 event = container_of(head, struct perf_event, rcu_head); 3424 if (event->ns) 3425 put_pid_ns(event->ns); 3426 perf_event_free_filter(event); 3427 kfree(event); 3428 } 3429 3430 static void ring_buffer_attach(struct perf_event *event, 3431 struct ring_buffer *rb); 3432 3433 static void unaccount_event_cpu(struct perf_event *event, int cpu) 3434 { 3435 if (event->parent) 3436 return; 3437 3438 if (is_cgroup_event(event)) 3439 atomic_dec(&per_cpu(perf_cgroup_events, cpu)); 3440 } 3441 3442 static void unaccount_event(struct perf_event *event) 3443 { 3444 if (event->parent) 3445 return; 3446 3447 if (event->attach_state & PERF_ATTACH_TASK) 3448 static_key_slow_dec_deferred(&perf_sched_events); 3449 if (event->attr.mmap || event->attr.mmap_data) 3450 atomic_dec(&nr_mmap_events); 3451 if (event->attr.comm) 3452 atomic_dec(&nr_comm_events); 3453 if (event->attr.task) 3454 atomic_dec(&nr_task_events); 3455 if (event->attr.freq) 3456 atomic_dec(&nr_freq_events); 3457 if (is_cgroup_event(event)) 3458 static_key_slow_dec_deferred(&perf_sched_events); 3459 if (has_branch_stack(event)) 3460 static_key_slow_dec_deferred(&perf_sched_events); 3461 3462 unaccount_event_cpu(event, event->cpu); 3463 } 3464 3465 /* 3466 * The following implement mutual exclusion of events on "exclusive" pmus 3467 * (PERF_PMU_CAP_EXCLUSIVE). Such pmus can only have one event scheduled 3468 * at a time, so we disallow creating events that might conflict, namely: 3469 * 3470 * 1) cpu-wide events in the presence of per-task events, 3471 * 2) per-task events in the presence of cpu-wide events, 3472 * 3) two matching events on the same context. 3473 * 3474 * The former two cases are handled in the allocation path (perf_event_alloc(), 3475 * __free_event()), the latter -- before the first perf_install_in_context(). 3476 */ 3477 static int exclusive_event_init(struct perf_event *event) 3478 { 3479 struct pmu *pmu = event->pmu; 3480 3481 if (!(pmu->capabilities & PERF_PMU_CAP_EXCLUSIVE)) 3482 return 0; 3483 3484 /* 3485 * Prevent co-existence of per-task and cpu-wide events on the 3486 * same exclusive pmu. 3487 * 3488 * Negative pmu::exclusive_cnt means there are cpu-wide 3489 * events on this "exclusive" pmu, positive means there are 3490 * per-task events. 3491 * 3492 * Since this is called in perf_event_alloc() path, event::ctx 3493 * doesn't exist yet; it is, however, safe to use PERF_ATTACH_TASK 3494 * to mean "per-task event", because unlike other attach states it 3495 * never gets cleared. 3496 */ 3497 if (event->attach_state & PERF_ATTACH_TASK) { 3498 if (!atomic_inc_unless_negative(&pmu->exclusive_cnt)) 3499 return -EBUSY; 3500 } else { 3501 if (!atomic_dec_unless_positive(&pmu->exclusive_cnt)) 3502 return -EBUSY; 3503 } 3504 3505 return 0; 3506 } 3507 3508 static void exclusive_event_destroy(struct perf_event *event) 3509 { 3510 struct pmu *pmu = event->pmu; 3511 3512 if (!(pmu->capabilities & PERF_PMU_CAP_EXCLUSIVE)) 3513 return; 3514 3515 /* see comment in exclusive_event_init() */ 3516 if (event->attach_state & PERF_ATTACH_TASK) 3517 atomic_dec(&pmu->exclusive_cnt); 3518 else 3519 atomic_inc(&pmu->exclusive_cnt); 3520 } 3521 3522 static bool exclusive_event_match(struct perf_event *e1, struct perf_event *e2) 3523 { 3524 if ((e1->pmu->capabilities & PERF_PMU_CAP_EXCLUSIVE) && 3525 (e1->cpu == e2->cpu || 3526 e1->cpu == -1 || 3527 e2->cpu == -1)) 3528 return true; 3529 return false; 3530 } 3531 3532 /* Called under the same ctx::mutex as perf_install_in_context() */ 3533 static bool exclusive_event_installable(struct perf_event *event, 3534 struct perf_event_context *ctx) 3535 { 3536 struct perf_event *iter_event; 3537 struct pmu *pmu = event->pmu; 3538 3539 if (!(pmu->capabilities & PERF_PMU_CAP_EXCLUSIVE)) 3540 return true; 3541 3542 list_for_each_entry(iter_event, &ctx->event_list, event_entry) { 3543 if (exclusive_event_match(iter_event, event)) 3544 return false; 3545 } 3546 3547 return true; 3548 } 3549 3550 static void __free_event(struct perf_event *event) 3551 { 3552 if (!event->parent) { 3553 if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN) 3554 put_callchain_buffers(); 3555 } 3556 3557 perf_event_free_bpf_prog(event); 3558 3559 if (event->destroy) 3560 event->destroy(event); 3561 3562 if (event->ctx) 3563 put_ctx(event->ctx); 3564 3565 if (event->pmu) { 3566 exclusive_event_destroy(event); 3567 module_put(event->pmu->module); 3568 } 3569 3570 call_rcu(&event->rcu_head, free_event_rcu); 3571 } 3572 3573 static void _free_event(struct perf_event *event) 3574 { 3575 irq_work_sync(&event->pending); 3576 3577 unaccount_event(event); 3578 3579 if (event->rb) { 3580 /* 3581 * Can happen when we close an event with re-directed output. 3582 * 3583 * Since we have a 0 refcount, perf_mmap_close() will skip 3584 * over us; possibly making our ring_buffer_put() the last. 3585 */ 3586 mutex_lock(&event->mmap_mutex); 3587 ring_buffer_attach(event, NULL); 3588 mutex_unlock(&event->mmap_mutex); 3589 } 3590 3591 if (is_cgroup_event(event)) 3592 perf_detach_cgroup(event); 3593 3594 __free_event(event); 3595 } 3596 3597 /* 3598 * Used to free events which have a known refcount of 1, such as in error paths 3599 * where the event isn't exposed yet and inherited events. 3600 */ 3601 static void free_event(struct perf_event *event) 3602 { 3603 if (WARN(atomic_long_cmpxchg(&event->refcount, 1, 0) != 1, 3604 "unexpected event refcount: %ld; ptr=%p\n", 3605 atomic_long_read(&event->refcount), event)) { 3606 /* leak to avoid use-after-free */ 3607 return; 3608 } 3609 3610 _free_event(event); 3611 } 3612 3613 /* 3614 * Remove user event from the owner task. 3615 */ 3616 static void perf_remove_from_owner(struct perf_event *event) 3617 { 3618 struct task_struct *owner; 3619 3620 rcu_read_lock(); 3621 owner = ACCESS_ONCE(event->owner); 3622 /* 3623 * Matches the smp_wmb() in perf_event_exit_task(). If we observe 3624 * !owner it means the list deletion is complete and we can indeed 3625 * free this event, otherwise we need to serialize on 3626 * owner->perf_event_mutex. 3627 */ 3628 smp_read_barrier_depends(); 3629 if (owner) { 3630 /* 3631 * Since delayed_put_task_struct() also drops the last 3632 * task reference we can safely take a new reference 3633 * while holding the rcu_read_lock(). 3634 */ 3635 get_task_struct(owner); 3636 } 3637 rcu_read_unlock(); 3638 3639 if (owner) { 3640 /* 3641 * If we're here through perf_event_exit_task() we're already 3642 * holding ctx->mutex which would be an inversion wrt. the 3643 * normal lock order. 3644 * 3645 * However we can safely take this lock because its the child 3646 * ctx->mutex. 3647 */ 3648 mutex_lock_nested(&owner->perf_event_mutex, SINGLE_DEPTH_NESTING); 3649 3650 /* 3651 * We have to re-check the event->owner field, if it is cleared 3652 * we raced with perf_event_exit_task(), acquiring the mutex 3653 * ensured they're done, and we can proceed with freeing the 3654 * event. 3655 */ 3656 if (event->owner) 3657 list_del_init(&event->owner_entry); 3658 mutex_unlock(&owner->perf_event_mutex); 3659 put_task_struct(owner); 3660 } 3661 } 3662 3663 static void put_event(struct perf_event *event) 3664 { 3665 struct perf_event_context *ctx; 3666 3667 if (!atomic_long_dec_and_test(&event->refcount)) 3668 return; 3669 3670 if (!is_kernel_event(event)) 3671 perf_remove_from_owner(event); 3672 3673 /* 3674 * There are two ways this annotation is useful: 3675 * 3676 * 1) there is a lock recursion from perf_event_exit_task 3677 * see the comment there. 3678 * 3679 * 2) there is a lock-inversion with mmap_sem through 3680 * perf_event_read_group(), which takes faults while 3681 * holding ctx->mutex, however this is called after 3682 * the last filedesc died, so there is no possibility 3683 * to trigger the AB-BA case. 3684 */ 3685 ctx = perf_event_ctx_lock_nested(event, SINGLE_DEPTH_NESTING); 3686 WARN_ON_ONCE(ctx->parent_ctx); 3687 perf_remove_from_context(event, true); 3688 perf_event_ctx_unlock(event, ctx); 3689 3690 _free_event(event); 3691 } 3692 3693 int perf_event_release_kernel(struct perf_event *event) 3694 { 3695 put_event(event); 3696 return 0; 3697 } 3698 EXPORT_SYMBOL_GPL(perf_event_release_kernel); 3699 3700 /* 3701 * Called when the last reference to the file is gone. 3702 */ 3703 static int perf_release(struct inode *inode, struct file *file) 3704 { 3705 put_event(file->private_data); 3706 return 0; 3707 } 3708 3709 /* 3710 * Remove all orphanes events from the context. 3711 */ 3712 static void orphans_remove_work(struct work_struct *work) 3713 { 3714 struct perf_event_context *ctx; 3715 struct perf_event *event, *tmp; 3716 3717 ctx = container_of(work, struct perf_event_context, 3718 orphans_remove.work); 3719 3720 mutex_lock(&ctx->mutex); 3721 list_for_each_entry_safe(event, tmp, &ctx->event_list, event_entry) { 3722 struct perf_event *parent_event = event->parent; 3723 3724 if (!is_orphaned_child(event)) 3725 continue; 3726 3727 perf_remove_from_context(event, true); 3728 3729 mutex_lock(&parent_event->child_mutex); 3730 list_del_init(&event->child_list); 3731 mutex_unlock(&parent_event->child_mutex); 3732 3733 free_event(event); 3734 put_event(parent_event); 3735 } 3736 3737 raw_spin_lock_irq(&ctx->lock); 3738 ctx->orphans_remove_sched = false; 3739 raw_spin_unlock_irq(&ctx->lock); 3740 mutex_unlock(&ctx->mutex); 3741 3742 put_ctx(ctx); 3743 } 3744 3745 u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running) 3746 { 3747 struct perf_event *child; 3748 u64 total = 0; 3749 3750 *enabled = 0; 3751 *running = 0; 3752 3753 mutex_lock(&event->child_mutex); 3754 total += perf_event_read(event); 3755 *enabled += event->total_time_enabled + 3756 atomic64_read(&event->child_total_time_enabled); 3757 *running += event->total_time_running + 3758 atomic64_read(&event->child_total_time_running); 3759 3760 list_for_each_entry(child, &event->child_list, child_list) { 3761 total += perf_event_read(child); 3762 *enabled += child->total_time_enabled; 3763 *running += child->total_time_running; 3764 } 3765 mutex_unlock(&event->child_mutex); 3766 3767 return total; 3768 } 3769 EXPORT_SYMBOL_GPL(perf_event_read_value); 3770 3771 static int perf_event_read_group(struct perf_event *event, 3772 u64 read_format, char __user *buf) 3773 { 3774 struct perf_event *leader = event->group_leader, *sub; 3775 struct perf_event_context *ctx = leader->ctx; 3776 int n = 0, size = 0, ret; 3777 u64 count, enabled, running; 3778 u64 values[5]; 3779 3780 lockdep_assert_held(&ctx->mutex); 3781 3782 count = perf_event_read_value(leader, &enabled, &running); 3783 3784 values[n++] = 1 + leader->nr_siblings; 3785 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) 3786 values[n++] = enabled; 3787 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) 3788 values[n++] = running; 3789 values[n++] = count; 3790 if (read_format & PERF_FORMAT_ID) 3791 values[n++] = primary_event_id(leader); 3792 3793 size = n * sizeof(u64); 3794 3795 if (copy_to_user(buf, values, size)) 3796 return -EFAULT; 3797 3798 ret = size; 3799 3800 list_for_each_entry(sub, &leader->sibling_list, group_entry) { 3801 n = 0; 3802 3803 values[n++] = perf_event_read_value(sub, &enabled, &running); 3804 if (read_format & PERF_FORMAT_ID) 3805 values[n++] = primary_event_id(sub); 3806 3807 size = n * sizeof(u64); 3808 3809 if (copy_to_user(buf + ret, values, size)) { 3810 return -EFAULT; 3811 } 3812 3813 ret += size; 3814 } 3815 3816 return ret; 3817 } 3818 3819 static int perf_event_read_one(struct perf_event *event, 3820 u64 read_format, char __user *buf) 3821 { 3822 u64 enabled, running; 3823 u64 values[4]; 3824 int n = 0; 3825 3826 values[n++] = perf_event_read_value(event, &enabled, &running); 3827 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) 3828 values[n++] = enabled; 3829 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) 3830 values[n++] = running; 3831 if (read_format & PERF_FORMAT_ID) 3832 values[n++] = primary_event_id(event); 3833 3834 if (copy_to_user(buf, values, n * sizeof(u64))) 3835 return -EFAULT; 3836 3837 return n * sizeof(u64); 3838 } 3839 3840 static bool is_event_hup(struct perf_event *event) 3841 { 3842 bool no_children; 3843 3844 if (event->state != PERF_EVENT_STATE_EXIT) 3845 return false; 3846 3847 mutex_lock(&event->child_mutex); 3848 no_children = list_empty(&event->child_list); 3849 mutex_unlock(&event->child_mutex); 3850 return no_children; 3851 } 3852 3853 /* 3854 * Read the performance event - simple non blocking version for now 3855 */ 3856 static ssize_t 3857 perf_read_hw(struct perf_event *event, char __user *buf, size_t count) 3858 { 3859 u64 read_format = event->attr.read_format; 3860 int ret; 3861 3862 /* 3863 * Return end-of-file for a read on a event that is in 3864 * error state (i.e. because it was pinned but it couldn't be 3865 * scheduled on to the CPU at some point). 3866 */ 3867 if (event->state == PERF_EVENT_STATE_ERROR) 3868 return 0; 3869 3870 if (count < event->read_size) 3871 return -ENOSPC; 3872 3873 WARN_ON_ONCE(event->ctx->parent_ctx); 3874 if (read_format & PERF_FORMAT_GROUP) 3875 ret = perf_event_read_group(event, read_format, buf); 3876 else 3877 ret = perf_event_read_one(event, read_format, buf); 3878 3879 return ret; 3880 } 3881 3882 static ssize_t 3883 perf_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) 3884 { 3885 struct perf_event *event = file->private_data; 3886 struct perf_event_context *ctx; 3887 int ret; 3888 3889 ctx = perf_event_ctx_lock(event); 3890 ret = perf_read_hw(event, buf, count); 3891 perf_event_ctx_unlock(event, ctx); 3892 3893 return ret; 3894 } 3895 3896 static unsigned int perf_poll(struct file *file, poll_table *wait) 3897 { 3898 struct perf_event *event = file->private_data; 3899 struct ring_buffer *rb; 3900 unsigned int events = POLLHUP; 3901 3902 poll_wait(file, &event->waitq, wait); 3903 3904 if (is_event_hup(event)) 3905 return events; 3906 3907 /* 3908 * Pin the event->rb by taking event->mmap_mutex; otherwise 3909 * perf_event_set_output() can swizzle our rb and make us miss wakeups. 3910 */ 3911 mutex_lock(&event->mmap_mutex); 3912 rb = event->rb; 3913 if (rb) 3914 events = atomic_xchg(&rb->poll, 0); 3915 mutex_unlock(&event->mmap_mutex); 3916 return events; 3917 } 3918 3919 static void _perf_event_reset(struct perf_event *event) 3920 { 3921 (void)perf_event_read(event); 3922 local64_set(&event->count, 0); 3923 perf_event_update_userpage(event); 3924 } 3925 3926 /* 3927 * Holding the top-level event's child_mutex means that any 3928 * descendant process that has inherited this event will block 3929 * in sync_child_event if it goes to exit, thus satisfying the 3930 * task existence requirements of perf_event_enable/disable. 3931 */ 3932 static void perf_event_for_each_child(struct perf_event *event, 3933 void (*func)(struct perf_event *)) 3934 { 3935 struct perf_event *child; 3936 3937 WARN_ON_ONCE(event->ctx->parent_ctx); 3938 3939 mutex_lock(&event->child_mutex); 3940 func(event); 3941 list_for_each_entry(child, &event->child_list, child_list) 3942 func(child); 3943 mutex_unlock(&event->child_mutex); 3944 } 3945 3946 static void perf_event_for_each(struct perf_event *event, 3947 void (*func)(struct perf_event *)) 3948 { 3949 struct perf_event_context *ctx = event->ctx; 3950 struct perf_event *sibling; 3951 3952 lockdep_assert_held(&ctx->mutex); 3953 3954 event = event->group_leader; 3955 3956 perf_event_for_each_child(event, func); 3957 list_for_each_entry(sibling, &event->sibling_list, group_entry) 3958 perf_event_for_each_child(sibling, func); 3959 } 3960 3961 static int perf_event_period(struct perf_event *event, u64 __user *arg) 3962 { 3963 struct perf_event_context *ctx = event->ctx; 3964 int ret = 0, active; 3965 u64 value; 3966 3967 if (!is_sampling_event(event)) 3968 return -EINVAL; 3969 3970 if (copy_from_user(&value, arg, sizeof(value))) 3971 return -EFAULT; 3972 3973 if (!value) 3974 return -EINVAL; 3975 3976 raw_spin_lock_irq(&ctx->lock); 3977 if (event->attr.freq) { 3978 if (value > sysctl_perf_event_sample_rate) { 3979 ret = -EINVAL; 3980 goto unlock; 3981 } 3982 3983 event->attr.sample_freq = value; 3984 } else { 3985 event->attr.sample_period = value; 3986 event->hw.sample_period = value; 3987 } 3988 3989 active = (event->state == PERF_EVENT_STATE_ACTIVE); 3990 if (active) { 3991 perf_pmu_disable(ctx->pmu); 3992 event->pmu->stop(event, PERF_EF_UPDATE); 3993 } 3994 3995 local64_set(&event->hw.period_left, 0); 3996 3997 if (active) { 3998 event->pmu->start(event, PERF_EF_RELOAD); 3999 perf_pmu_enable(ctx->pmu); 4000 } 4001 4002 unlock: 4003 raw_spin_unlock_irq(&ctx->lock); 4004 4005 return ret; 4006 } 4007 4008 static const struct file_operations perf_fops; 4009 4010 static inline int perf_fget_light(int fd, struct fd *p) 4011 { 4012 struct fd f = fdget(fd); 4013 if (!f.file) 4014 return -EBADF; 4015 4016 if (f.file->f_op != &perf_fops) { 4017 fdput(f); 4018 return -EBADF; 4019 } 4020 *p = f; 4021 return 0; 4022 } 4023 4024 static int perf_event_set_output(struct perf_event *event, 4025 struct perf_event *output_event); 4026 static int perf_event_set_filter(struct perf_event *event, void __user *arg); 4027 static int perf_event_set_bpf_prog(struct perf_event *event, u32 prog_fd); 4028 4029 static long _perf_ioctl(struct perf_event *event, unsigned int cmd, unsigned long arg) 4030 { 4031 void (*func)(struct perf_event *); 4032 u32 flags = arg; 4033 4034 switch (cmd) { 4035 case PERF_EVENT_IOC_ENABLE: 4036 func = _perf_event_enable; 4037 break; 4038 case PERF_EVENT_IOC_DISABLE: 4039 func = _perf_event_disable; 4040 break; 4041 case PERF_EVENT_IOC_RESET: 4042 func = _perf_event_reset; 4043 break; 4044 4045 case PERF_EVENT_IOC_REFRESH: 4046 return _perf_event_refresh(event, arg); 4047 4048 case PERF_EVENT_IOC_PERIOD: 4049 return perf_event_period(event, (u64 __user *)arg); 4050 4051 case PERF_EVENT_IOC_ID: 4052 { 4053 u64 id = primary_event_id(event); 4054 4055 if (copy_to_user((void __user *)arg, &id, sizeof(id))) 4056 return -EFAULT; 4057 return 0; 4058 } 4059 4060 case PERF_EVENT_IOC_SET_OUTPUT: 4061 { 4062 int ret; 4063 if (arg != -1) { 4064 struct perf_event *output_event; 4065 struct fd output; 4066 ret = perf_fget_light(arg, &output); 4067 if (ret) 4068 return ret; 4069 output_event = output.file->private_data; 4070 ret = perf_event_set_output(event, output_event); 4071 fdput(output); 4072 } else { 4073 ret = perf_event_set_output(event, NULL); 4074 } 4075 return ret; 4076 } 4077 4078 case PERF_EVENT_IOC_SET_FILTER: 4079 return perf_event_set_filter(event, (void __user *)arg); 4080 4081 case PERF_EVENT_IOC_SET_BPF: 4082 return perf_event_set_bpf_prog(event, arg); 4083 4084 default: 4085 return -ENOTTY; 4086 } 4087 4088 if (flags & PERF_IOC_FLAG_GROUP) 4089 perf_event_for_each(event, func); 4090 else 4091 perf_event_for_each_child(event, func); 4092 4093 return 0; 4094 } 4095 4096 static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg) 4097 { 4098 struct perf_event *event = file->private_data; 4099 struct perf_event_context *ctx; 4100 long ret; 4101 4102 ctx = perf_event_ctx_lock(event); 4103 ret = _perf_ioctl(event, cmd, arg); 4104 perf_event_ctx_unlock(event, ctx); 4105 4106 return ret; 4107 } 4108 4109 #ifdef CONFIG_COMPAT 4110 static long perf_compat_ioctl(struct file *file, unsigned int cmd, 4111 unsigned long arg) 4112 { 4113 switch (_IOC_NR(cmd)) { 4114 case _IOC_NR(PERF_EVENT_IOC_SET_FILTER): 4115 case _IOC_NR(PERF_EVENT_IOC_ID): 4116 /* Fix up pointer size (usually 4 -> 8 in 32-on-64-bit case */ 4117 if (_IOC_SIZE(cmd) == sizeof(compat_uptr_t)) { 4118 cmd &= ~IOCSIZE_MASK; 4119 cmd |= sizeof(void *) << IOCSIZE_SHIFT; 4120 } 4121 break; 4122 } 4123 return perf_ioctl(file, cmd, arg); 4124 } 4125 #else 4126 # define perf_compat_ioctl NULL 4127 #endif 4128 4129 int perf_event_task_enable(void) 4130 { 4131 struct perf_event_context *ctx; 4132 struct perf_event *event; 4133 4134 mutex_lock(¤t->perf_event_mutex); 4135 list_for_each_entry(event, ¤t->perf_event_list, owner_entry) { 4136 ctx = perf_event_ctx_lock(event); 4137 perf_event_for_each_child(event, _perf_event_enable); 4138 perf_event_ctx_unlock(event, ctx); 4139 } 4140 mutex_unlock(¤t->perf_event_mutex); 4141 4142 return 0; 4143 } 4144 4145 int perf_event_task_disable(void) 4146 { 4147 struct perf_event_context *ctx; 4148 struct perf_event *event; 4149 4150 mutex_lock(¤t->perf_event_mutex); 4151 list_for_each_entry(event, ¤t->perf_event_list, owner_entry) { 4152 ctx = perf_event_ctx_lock(event); 4153 perf_event_for_each_child(event, _perf_event_disable); 4154 perf_event_ctx_unlock(event, ctx); 4155 } 4156 mutex_unlock(¤t->perf_event_mutex); 4157 4158 return 0; 4159 } 4160 4161 static int perf_event_index(struct perf_event *event) 4162 { 4163 if (event->hw.state & PERF_HES_STOPPED) 4164 return 0; 4165 4166 if (event->state != PERF_EVENT_STATE_ACTIVE) 4167 return 0; 4168 4169 return event->pmu->event_idx(event); 4170 } 4171 4172 static void calc_timer_values(struct perf_event *event, 4173 u64 *now, 4174 u64 *enabled, 4175 u64 *running) 4176 { 4177 u64 ctx_time; 4178 4179 *now = perf_clock(); 4180 ctx_time = event->shadow_ctx_time + *now; 4181 *enabled = ctx_time - event->tstamp_enabled; 4182 *running = ctx_time - event->tstamp_running; 4183 } 4184 4185 static void perf_event_init_userpage(struct perf_event *event) 4186 { 4187 struct perf_event_mmap_page *userpg; 4188 struct ring_buffer *rb; 4189 4190 rcu_read_lock(); 4191 rb = rcu_dereference(event->rb); 4192 if (!rb) 4193 goto unlock; 4194 4195 userpg = rb->user_page; 4196 4197 /* Allow new userspace to detect that bit 0 is deprecated */ 4198 userpg->cap_bit0_is_deprecated = 1; 4199 userpg->size = offsetof(struct perf_event_mmap_page, __reserved); 4200 userpg->data_offset = PAGE_SIZE; 4201 userpg->data_size = perf_data_size(rb); 4202 4203 unlock: 4204 rcu_read_unlock(); 4205 } 4206 4207 void __weak arch_perf_update_userpage( 4208 struct perf_event *event, struct perf_event_mmap_page *userpg, u64 now) 4209 { 4210 } 4211 4212 /* 4213 * Callers need to ensure there can be no nesting of this function, otherwise 4214 * the seqlock logic goes bad. We can not serialize this because the arch 4215 * code calls this from NMI context. 4216 */ 4217 void perf_event_update_userpage(struct perf_event *event) 4218 { 4219 struct perf_event_mmap_page *userpg; 4220 struct ring_buffer *rb; 4221 u64 enabled, running, now; 4222 4223 rcu_read_lock(); 4224 rb = rcu_dereference(event->rb); 4225 if (!rb) 4226 goto unlock; 4227 4228 /* 4229 * compute total_time_enabled, total_time_running 4230 * based on snapshot values taken when the event 4231 * was last scheduled in. 4232 * 4233 * we cannot simply called update_context_time() 4234 * because of locking issue as we can be called in 4235 * NMI context 4236 */ 4237 calc_timer_values(event, &now, &enabled, &running); 4238 4239 userpg = rb->user_page; 4240 /* 4241 * Disable preemption so as to not let the corresponding user-space 4242 * spin too long if we get preempted. 4243 */ 4244 preempt_disable(); 4245 ++userpg->lock; 4246 barrier(); 4247 userpg->index = perf_event_index(event); 4248 userpg->offset = perf_event_count(event); 4249 if (userpg->index) 4250 userpg->offset -= local64_read(&event->hw.prev_count); 4251 4252 userpg->time_enabled = enabled + 4253 atomic64_read(&event->child_total_time_enabled); 4254 4255 userpg->time_running = running + 4256 atomic64_read(&event->child_total_time_running); 4257 4258 arch_perf_update_userpage(event, userpg, now); 4259 4260 barrier(); 4261 ++userpg->lock; 4262 preempt_enable(); 4263 unlock: 4264 rcu_read_unlock(); 4265 } 4266 4267 static int perf_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf) 4268 { 4269 struct perf_event *event = vma->vm_file->private_data; 4270 struct ring_buffer *rb; 4271 int ret = VM_FAULT_SIGBUS; 4272 4273 if (vmf->flags & FAULT_FLAG_MKWRITE) { 4274 if (vmf->pgoff == 0) 4275 ret = 0; 4276 return ret; 4277 } 4278 4279 rcu_read_lock(); 4280 rb = rcu_dereference(event->rb); 4281 if (!rb) 4282 goto unlock; 4283 4284 if (vmf->pgoff && (vmf->flags & FAULT_FLAG_WRITE)) 4285 goto unlock; 4286 4287 vmf->page = perf_mmap_to_page(rb, vmf->pgoff); 4288 if (!vmf->page) 4289 goto unlock; 4290 4291 get_page(vmf->page); 4292 vmf->page->mapping = vma->vm_file->f_mapping; 4293 vmf->page->index = vmf->pgoff; 4294 4295 ret = 0; 4296 unlock: 4297 rcu_read_unlock(); 4298 4299 return ret; 4300 } 4301 4302 static void ring_buffer_attach(struct perf_event *event, 4303 struct ring_buffer *rb) 4304 { 4305 struct ring_buffer *old_rb = NULL; 4306 unsigned long flags; 4307 4308 if (event->rb) { 4309 /* 4310 * Should be impossible, we set this when removing 4311 * event->rb_entry and wait/clear when adding event->rb_entry. 4312 */ 4313 WARN_ON_ONCE(event->rcu_pending); 4314 4315 old_rb = event->rb; 4316 spin_lock_irqsave(&old_rb->event_lock, flags); 4317 list_del_rcu(&event->rb_entry); 4318 spin_unlock_irqrestore(&old_rb->event_lock, flags); 4319 4320 event->rcu_batches = get_state_synchronize_rcu(); 4321 event->rcu_pending = 1; 4322 } 4323 4324 if (rb) { 4325 if (event->rcu_pending) { 4326 cond_synchronize_rcu(event->rcu_batches); 4327 event->rcu_pending = 0; 4328 } 4329 4330 spin_lock_irqsave(&rb->event_lock, flags); 4331 list_add_rcu(&event->rb_entry, &rb->event_list); 4332 spin_unlock_irqrestore(&rb->event_lock, flags); 4333 } 4334 4335 rcu_assign_pointer(event->rb, rb); 4336 4337 if (old_rb) { 4338 ring_buffer_put(old_rb); 4339 /* 4340 * Since we detached before setting the new rb, so that we 4341 * could attach the new rb, we could have missed a wakeup. 4342 * Provide it now. 4343 */ 4344 wake_up_all(&event->waitq); 4345 } 4346 } 4347 4348 static void ring_buffer_wakeup(struct perf_event *event) 4349 { 4350 struct ring_buffer *rb; 4351 4352 rcu_read_lock(); 4353 rb = rcu_dereference(event->rb); 4354 if (rb) { 4355 list_for_each_entry_rcu(event, &rb->event_list, rb_entry) 4356 wake_up_all(&event->waitq); 4357 } 4358 rcu_read_unlock(); 4359 } 4360 4361 static void rb_free_rcu(struct rcu_head *rcu_head) 4362 { 4363 struct ring_buffer *rb; 4364 4365 rb = container_of(rcu_head, struct ring_buffer, rcu_head); 4366 rb_free(rb); 4367 } 4368 4369 struct ring_buffer *ring_buffer_get(struct perf_event *event) 4370 { 4371 struct ring_buffer *rb; 4372 4373 rcu_read_lock(); 4374 rb = rcu_dereference(event->rb); 4375 if (rb) { 4376 if (!atomic_inc_not_zero(&rb->refcount)) 4377 rb = NULL; 4378 } 4379 rcu_read_unlock(); 4380 4381 return rb; 4382 } 4383 4384 void ring_buffer_put(struct ring_buffer *rb) 4385 { 4386 if (!atomic_dec_and_test(&rb->refcount)) 4387 return; 4388 4389 WARN_ON_ONCE(!list_empty(&rb->event_list)); 4390 4391 call_rcu(&rb->rcu_head, rb_free_rcu); 4392 } 4393 4394 static void perf_mmap_open(struct vm_area_struct *vma) 4395 { 4396 struct perf_event *event = vma->vm_file->private_data; 4397 4398 atomic_inc(&event->mmap_count); 4399 atomic_inc(&event->rb->mmap_count); 4400 4401 if (vma->vm_pgoff) 4402 atomic_inc(&event->rb->aux_mmap_count); 4403 4404 if (event->pmu->event_mapped) 4405 event->pmu->event_mapped(event); 4406 } 4407 4408 /* 4409 * A buffer can be mmap()ed multiple times; either directly through the same 4410 * event, or through other events by use of perf_event_set_output(). 4411 * 4412 * In order to undo the VM accounting done by perf_mmap() we need to destroy 4413 * the buffer here, where we still have a VM context. This means we need 4414 * to detach all events redirecting to us. 4415 */ 4416 static void perf_mmap_close(struct vm_area_struct *vma) 4417 { 4418 struct perf_event *event = vma->vm_file->private_data; 4419 4420 struct ring_buffer *rb = ring_buffer_get(event); 4421 struct user_struct *mmap_user = rb->mmap_user; 4422 int mmap_locked = rb->mmap_locked; 4423 unsigned long size = perf_data_size(rb); 4424 4425 if (event->pmu->event_unmapped) 4426 event->pmu->event_unmapped(event); 4427 4428 /* 4429 * rb->aux_mmap_count will always drop before rb->mmap_count and 4430 * event->mmap_count, so it is ok to use event->mmap_mutex to 4431 * serialize with perf_mmap here. 4432 */ 4433 if (rb_has_aux(rb) && vma->vm_pgoff == rb->aux_pgoff && 4434 atomic_dec_and_mutex_lock(&rb->aux_mmap_count, &event->mmap_mutex)) { 4435 atomic_long_sub(rb->aux_nr_pages, &mmap_user->locked_vm); 4436 vma->vm_mm->pinned_vm -= rb->aux_mmap_locked; 4437 4438 rb_free_aux(rb); 4439 mutex_unlock(&event->mmap_mutex); 4440 } 4441 4442 atomic_dec(&rb->mmap_count); 4443 4444 if (!atomic_dec_and_mutex_lock(&event->mmap_count, &event->mmap_mutex)) 4445 goto out_put; 4446 4447 ring_buffer_attach(event, NULL); 4448 mutex_unlock(&event->mmap_mutex); 4449 4450 /* If there's still other mmap()s of this buffer, we're done. */ 4451 if (atomic_read(&rb->mmap_count)) 4452 goto out_put; 4453 4454 /* 4455 * No other mmap()s, detach from all other events that might redirect 4456 * into the now unreachable buffer. Somewhat complicated by the 4457 * fact that rb::event_lock otherwise nests inside mmap_mutex. 4458 */ 4459 again: 4460 rcu_read_lock(); 4461 list_for_each_entry_rcu(event, &rb->event_list, rb_entry) { 4462 if (!atomic_long_inc_not_zero(&event->refcount)) { 4463 /* 4464 * This event is en-route to free_event() which will 4465 * detach it and remove it from the list. 4466 */ 4467 continue; 4468 } 4469 rcu_read_unlock(); 4470 4471 mutex_lock(&event->mmap_mutex); 4472 /* 4473 * Check we didn't race with perf_event_set_output() which can 4474 * swizzle the rb from under us while we were waiting to 4475 * acquire mmap_mutex. 4476 * 4477 * If we find a different rb; ignore this event, a next 4478 * iteration will no longer find it on the list. We have to 4479 * still restart the iteration to make sure we're not now 4480 * iterating the wrong list. 4481 */ 4482 if (event->rb == rb) 4483 ring_buffer_attach(event, NULL); 4484 4485 mutex_unlock(&event->mmap_mutex); 4486 put_event(event); 4487 4488 /* 4489 * Restart the iteration; either we're on the wrong list or 4490 * destroyed its integrity by doing a deletion. 4491 */ 4492 goto again; 4493 } 4494 rcu_read_unlock(); 4495 4496 /* 4497 * It could be there's still a few 0-ref events on the list; they'll 4498 * get cleaned up by free_event() -- they'll also still have their 4499 * ref on the rb and will free it whenever they are done with it. 4500 * 4501 * Aside from that, this buffer is 'fully' detached and unmapped, 4502 * undo the VM accounting. 4503 */ 4504 4505 atomic_long_sub((size >> PAGE_SHIFT) + 1, &mmap_user->locked_vm); 4506 vma->vm_mm->pinned_vm -= mmap_locked; 4507 free_uid(mmap_user); 4508 4509 out_put: 4510 ring_buffer_put(rb); /* could be last */ 4511 } 4512 4513 static const struct vm_operations_struct perf_mmap_vmops = { 4514 .open = perf_mmap_open, 4515 .close = perf_mmap_close, /* non mergable */ 4516 .fault = perf_mmap_fault, 4517 .page_mkwrite = perf_mmap_fault, 4518 }; 4519 4520 static int perf_mmap(struct file *file, struct vm_area_struct *vma) 4521 { 4522 struct perf_event *event = file->private_data; 4523 unsigned long user_locked, user_lock_limit; 4524 struct user_struct *user = current_user(); 4525 unsigned long locked, lock_limit; 4526 struct ring_buffer *rb = NULL; 4527 unsigned long vma_size; 4528 unsigned long nr_pages; 4529 long user_extra = 0, extra = 0; 4530 int ret = 0, flags = 0; 4531 4532 /* 4533 * Don't allow mmap() of inherited per-task counters. This would 4534 * create a performance issue due to all children writing to the 4535 * same rb. 4536 */ 4537 if (event->cpu == -1 && event->attr.inherit) 4538 return -EINVAL; 4539 4540 if (!(vma->vm_flags & VM_SHARED)) 4541 return -EINVAL; 4542 4543 vma_size = vma->vm_end - vma->vm_start; 4544 4545 if (vma->vm_pgoff == 0) { 4546 nr_pages = (vma_size / PAGE_SIZE) - 1; 4547 } else { 4548 /* 4549 * AUX area mapping: if rb->aux_nr_pages != 0, it's already 4550 * mapped, all subsequent mappings should have the same size 4551 * and offset. Must be above the normal perf buffer. 4552 */ 4553 u64 aux_offset, aux_size; 4554 4555 if (!event->rb) 4556 return -EINVAL; 4557 4558 nr_pages = vma_size / PAGE_SIZE; 4559 4560 mutex_lock(&event->mmap_mutex); 4561 ret = -EINVAL; 4562 4563 rb = event->rb; 4564 if (!rb) 4565 goto aux_unlock; 4566 4567 aux_offset = ACCESS_ONCE(rb->user_page->aux_offset); 4568 aux_size = ACCESS_ONCE(rb->user_page->aux_size); 4569 4570 if (aux_offset < perf_data_size(rb) + PAGE_SIZE) 4571 goto aux_unlock; 4572 4573 if (aux_offset != vma->vm_pgoff << PAGE_SHIFT) 4574 goto aux_unlock; 4575 4576 /* already mapped with a different offset */ 4577 if (rb_has_aux(rb) && rb->aux_pgoff != vma->vm_pgoff) 4578 goto aux_unlock; 4579 4580 if (aux_size != vma_size || aux_size != nr_pages * PAGE_SIZE) 4581 goto aux_unlock; 4582 4583 /* already mapped with a different size */ 4584 if (rb_has_aux(rb) && rb->aux_nr_pages != nr_pages) 4585 goto aux_unlock; 4586 4587 if (!is_power_of_2(nr_pages)) 4588 goto aux_unlock; 4589 4590 if (!atomic_inc_not_zero(&rb->mmap_count)) 4591 goto aux_unlock; 4592 4593 if (rb_has_aux(rb)) { 4594 atomic_inc(&rb->aux_mmap_count); 4595 ret = 0; 4596 goto unlock; 4597 } 4598 4599 atomic_set(&rb->aux_mmap_count, 1); 4600 user_extra = nr_pages; 4601 4602 goto accounting; 4603 } 4604 4605 /* 4606 * If we have rb pages ensure they're a power-of-two number, so we 4607 * can do bitmasks instead of modulo. 4608 */ 4609 if (nr_pages != 0 && !is_power_of_2(nr_pages)) 4610 return -EINVAL; 4611 4612 if (vma_size != PAGE_SIZE * (1 + nr_pages)) 4613 return -EINVAL; 4614 4615 WARN_ON_ONCE(event->ctx->parent_ctx); 4616 again: 4617 mutex_lock(&event->mmap_mutex); 4618 if (event->rb) { 4619 if (event->rb->nr_pages != nr_pages) { 4620 ret = -EINVAL; 4621 goto unlock; 4622 } 4623 4624 if (!atomic_inc_not_zero(&event->rb->mmap_count)) { 4625 /* 4626 * Raced against perf_mmap_close() through 4627 * perf_event_set_output(). Try again, hope for better 4628 * luck. 4629 */ 4630 mutex_unlock(&event->mmap_mutex); 4631 goto again; 4632 } 4633 4634 goto unlock; 4635 } 4636 4637 user_extra = nr_pages + 1; 4638 4639 accounting: 4640 user_lock_limit = sysctl_perf_event_mlock >> (PAGE_SHIFT - 10); 4641 4642 /* 4643 * Increase the limit linearly with more CPUs: 4644 */ 4645 user_lock_limit *= num_online_cpus(); 4646 4647 user_locked = atomic_long_read(&user->locked_vm) + user_extra; 4648 4649 if (user_locked > user_lock_limit) 4650 extra = user_locked - user_lock_limit; 4651 4652 lock_limit = rlimit(RLIMIT_MEMLOCK); 4653 lock_limit >>= PAGE_SHIFT; 4654 locked = vma->vm_mm->pinned_vm + extra; 4655 4656 if ((locked > lock_limit) && perf_paranoid_tracepoint_raw() && 4657 !capable(CAP_IPC_LOCK)) { 4658 ret = -EPERM; 4659 goto unlock; 4660 } 4661 4662 WARN_ON(!rb && event->rb); 4663 4664 if (vma->vm_flags & VM_WRITE) 4665 flags |= RING_BUFFER_WRITABLE; 4666 4667 if (!rb) { 4668 rb = rb_alloc(nr_pages, 4669 event->attr.watermark ? event->attr.wakeup_watermark : 0, 4670 event->cpu, flags); 4671 4672 if (!rb) { 4673 ret = -ENOMEM; 4674 goto unlock; 4675 } 4676 4677 atomic_set(&rb->mmap_count, 1); 4678 rb->mmap_user = get_current_user(); 4679 rb->mmap_locked = extra; 4680 4681 ring_buffer_attach(event, rb); 4682 4683 perf_event_init_userpage(event); 4684 perf_event_update_userpage(event); 4685 } else { 4686 ret = rb_alloc_aux(rb, event, vma->vm_pgoff, nr_pages, 4687 event->attr.aux_watermark, flags); 4688 if (!ret) 4689 rb->aux_mmap_locked = extra; 4690 } 4691 4692 unlock: 4693 if (!ret) { 4694 atomic_long_add(user_extra, &user->locked_vm); 4695 vma->vm_mm->pinned_vm += extra; 4696 4697 atomic_inc(&event->mmap_count); 4698 } else if (rb) { 4699 atomic_dec(&rb->mmap_count); 4700 } 4701 aux_unlock: 4702 mutex_unlock(&event->mmap_mutex); 4703 4704 /* 4705 * Since pinned accounting is per vm we cannot allow fork() to copy our 4706 * vma. 4707 */ 4708 vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND | VM_DONTDUMP; 4709 vma->vm_ops = &perf_mmap_vmops; 4710 4711 if (event->pmu->event_mapped) 4712 event->pmu->event_mapped(event); 4713 4714 return ret; 4715 } 4716 4717 static int perf_fasync(int fd, struct file *filp, int on) 4718 { 4719 struct inode *inode = file_inode(filp); 4720 struct perf_event *event = filp->private_data; 4721 int retval; 4722 4723 mutex_lock(&inode->i_mutex); 4724 retval = fasync_helper(fd, filp, on, &event->fasync); 4725 mutex_unlock(&inode->i_mutex); 4726 4727 if (retval < 0) 4728 return retval; 4729 4730 return 0; 4731 } 4732 4733 static const struct file_operations perf_fops = { 4734 .llseek = no_llseek, 4735 .release = perf_release, 4736 .read = perf_read, 4737 .poll = perf_poll, 4738 .unlocked_ioctl = perf_ioctl, 4739 .compat_ioctl = perf_compat_ioctl, 4740 .mmap = perf_mmap, 4741 .fasync = perf_fasync, 4742 }; 4743 4744 /* 4745 * Perf event wakeup 4746 * 4747 * If there's data, ensure we set the poll() state and publish everything 4748 * to user-space before waking everybody up. 4749 */ 4750 4751 void perf_event_wakeup(struct perf_event *event) 4752 { 4753 ring_buffer_wakeup(event); 4754 4755 if (event->pending_kill) { 4756 kill_fasync(&event->fasync, SIGIO, event->pending_kill); 4757 event->pending_kill = 0; 4758 } 4759 } 4760 4761 static void perf_pending_event(struct irq_work *entry) 4762 { 4763 struct perf_event *event = container_of(entry, 4764 struct perf_event, pending); 4765 int rctx; 4766 4767 rctx = perf_swevent_get_recursion_context(); 4768 /* 4769 * If we 'fail' here, that's OK, it means recursion is already disabled 4770 * and we won't recurse 'further'. 4771 */ 4772 4773 if (event->pending_disable) { 4774 event->pending_disable = 0; 4775 __perf_event_disable(event); 4776 } 4777 4778 if (event->pending_wakeup) { 4779 event->pending_wakeup = 0; 4780 perf_event_wakeup(event); 4781 } 4782 4783 if (rctx >= 0) 4784 perf_swevent_put_recursion_context(rctx); 4785 } 4786 4787 /* 4788 * We assume there is only KVM supporting the callbacks. 4789 * Later on, we might change it to a list if there is 4790 * another virtualization implementation supporting the callbacks. 4791 */ 4792 struct perf_guest_info_callbacks *perf_guest_cbs; 4793 4794 int perf_register_guest_info_callbacks(struct perf_guest_info_callbacks *cbs) 4795 { 4796 perf_guest_cbs = cbs; 4797 return 0; 4798 } 4799 EXPORT_SYMBOL_GPL(perf_register_guest_info_callbacks); 4800 4801 int perf_unregister_guest_info_callbacks(struct perf_guest_info_callbacks *cbs) 4802 { 4803 perf_guest_cbs = NULL; 4804 return 0; 4805 } 4806 EXPORT_SYMBOL_GPL(perf_unregister_guest_info_callbacks); 4807 4808 static void 4809 perf_output_sample_regs(struct perf_output_handle *handle, 4810 struct pt_regs *regs, u64 mask) 4811 { 4812 int bit; 4813 4814 for_each_set_bit(bit, (const unsigned long *) &mask, 4815 sizeof(mask) * BITS_PER_BYTE) { 4816 u64 val; 4817 4818 val = perf_reg_value(regs, bit); 4819 perf_output_put(handle, val); 4820 } 4821 } 4822 4823 static void perf_sample_regs_user(struct perf_regs *regs_user, 4824 struct pt_regs *regs, 4825 struct pt_regs *regs_user_copy) 4826 { 4827 if (user_mode(regs)) { 4828 regs_user->abi = perf_reg_abi(current); 4829 regs_user->regs = regs; 4830 } else if (current->mm) { 4831 perf_get_regs_user(regs_user, regs, regs_user_copy); 4832 } else { 4833 regs_user->abi = PERF_SAMPLE_REGS_ABI_NONE; 4834 regs_user->regs = NULL; 4835 } 4836 } 4837 4838 static void perf_sample_regs_intr(struct perf_regs *regs_intr, 4839 struct pt_regs *regs) 4840 { 4841 regs_intr->regs = regs; 4842 regs_intr->abi = perf_reg_abi(current); 4843 } 4844 4845 4846 /* 4847 * Get remaining task size from user stack pointer. 4848 * 4849 * It'd be better to take stack vma map and limit this more 4850 * precisly, but there's no way to get it safely under interrupt, 4851 * so using TASK_SIZE as limit. 4852 */ 4853 static u64 perf_ustack_task_size(struct pt_regs *regs) 4854 { 4855 unsigned long addr = perf_user_stack_pointer(regs); 4856 4857 if (!addr || addr >= TASK_SIZE) 4858 return 0; 4859 4860 return TASK_SIZE - addr; 4861 } 4862 4863 static u16 4864 perf_sample_ustack_size(u16 stack_size, u16 header_size, 4865 struct pt_regs *regs) 4866 { 4867 u64 task_size; 4868 4869 /* No regs, no stack pointer, no dump. */ 4870 if (!regs) 4871 return 0; 4872 4873 /* 4874 * Check if we fit in with the requested stack size into the: 4875 * - TASK_SIZE 4876 * If we don't, we limit the size to the TASK_SIZE. 4877 * 4878 * - remaining sample size 4879 * If we don't, we customize the stack size to 4880 * fit in to the remaining sample size. 4881 */ 4882 4883 task_size = min((u64) USHRT_MAX, perf_ustack_task_size(regs)); 4884 stack_size = min(stack_size, (u16) task_size); 4885 4886 /* Current header size plus static size and dynamic size. */ 4887 header_size += 2 * sizeof(u64); 4888 4889 /* Do we fit in with the current stack dump size? */ 4890 if ((u16) (header_size + stack_size) < header_size) { 4891 /* 4892 * If we overflow the maximum size for the sample, 4893 * we customize the stack dump size to fit in. 4894 */ 4895 stack_size = USHRT_MAX - header_size - sizeof(u64); 4896 stack_size = round_up(stack_size, sizeof(u64)); 4897 } 4898 4899 return stack_size; 4900 } 4901 4902 static void 4903 perf_output_sample_ustack(struct perf_output_handle *handle, u64 dump_size, 4904 struct pt_regs *regs) 4905 { 4906 /* Case of a kernel thread, nothing to dump */ 4907 if (!regs) { 4908 u64 size = 0; 4909 perf_output_put(handle, size); 4910 } else { 4911 unsigned long sp; 4912 unsigned int rem; 4913 u64 dyn_size; 4914 4915 /* 4916 * We dump: 4917 * static size 4918 * - the size requested by user or the best one we can fit 4919 * in to the sample max size 4920 * data 4921 * - user stack dump data 4922 * dynamic size 4923 * - the actual dumped size 4924 */ 4925 4926 /* Static size. */ 4927 perf_output_put(handle, dump_size); 4928 4929 /* Data. */ 4930 sp = perf_user_stack_pointer(regs); 4931 rem = __output_copy_user(handle, (void *) sp, dump_size); 4932 dyn_size = dump_size - rem; 4933 4934 perf_output_skip(handle, rem); 4935 4936 /* Dynamic size. */ 4937 perf_output_put(handle, dyn_size); 4938 } 4939 } 4940 4941 static void __perf_event_header__init_id(struct perf_event_header *header, 4942 struct perf_sample_data *data, 4943 struct perf_event *event) 4944 { 4945 u64 sample_type = event->attr.sample_type; 4946 4947 data->type = sample_type; 4948 header->size += event->id_header_size; 4949 4950 if (sample_type & PERF_SAMPLE_TID) { 4951 /* namespace issues */ 4952 data->tid_entry.pid = perf_event_pid(event, current); 4953 data->tid_entry.tid = perf_event_tid(event, current); 4954 } 4955 4956 if (sample_type & PERF_SAMPLE_TIME) 4957 data->time = perf_event_clock(event); 4958 4959 if (sample_type & (PERF_SAMPLE_ID | PERF_SAMPLE_IDENTIFIER)) 4960 data->id = primary_event_id(event); 4961 4962 if (sample_type & PERF_SAMPLE_STREAM_ID) 4963 data->stream_id = event->id; 4964 4965 if (sample_type & PERF_SAMPLE_CPU) { 4966 data->cpu_entry.cpu = raw_smp_processor_id(); 4967 data->cpu_entry.reserved = 0; 4968 } 4969 } 4970 4971 void perf_event_header__init_id(struct perf_event_header *header, 4972 struct perf_sample_data *data, 4973 struct perf_event *event) 4974 { 4975 if (event->attr.sample_id_all) 4976 __perf_event_header__init_id(header, data, event); 4977 } 4978 4979 static void __perf_event__output_id_sample(struct perf_output_handle *handle, 4980 struct perf_sample_data *data) 4981 { 4982 u64 sample_type = data->type; 4983 4984 if (sample_type & PERF_SAMPLE_TID) 4985 perf_output_put(handle, data->tid_entry); 4986 4987 if (sample_type & PERF_SAMPLE_TIME) 4988 perf_output_put(handle, data->time); 4989 4990 if (sample_type & PERF_SAMPLE_ID) 4991 perf_output_put(handle, data->id); 4992 4993 if (sample_type & PERF_SAMPLE_STREAM_ID) 4994 perf_output_put(handle, data->stream_id); 4995 4996 if (sample_type & PERF_SAMPLE_CPU) 4997 perf_output_put(handle, data->cpu_entry); 4998 4999 if (sample_type & PERF_SAMPLE_IDENTIFIER) 5000 perf_output_put(handle, data->id); 5001 } 5002 5003 void perf_event__output_id_sample(struct perf_event *event, 5004 struct perf_output_handle *handle, 5005 struct perf_sample_data *sample) 5006 { 5007 if (event->attr.sample_id_all) 5008 __perf_event__output_id_sample(handle, sample); 5009 } 5010 5011 static void perf_output_read_one(struct perf_output_handle *handle, 5012 struct perf_event *event, 5013 u64 enabled, u64 running) 5014 { 5015 u64 read_format = event->attr.read_format; 5016 u64 values[4]; 5017 int n = 0; 5018 5019 values[n++] = perf_event_count(event); 5020 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) { 5021 values[n++] = enabled + 5022 atomic64_read(&event->child_total_time_enabled); 5023 } 5024 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) { 5025 values[n++] = running + 5026 atomic64_read(&event->child_total_time_running); 5027 } 5028 if (read_format & PERF_FORMAT_ID) 5029 values[n++] = primary_event_id(event); 5030 5031 __output_copy(handle, values, n * sizeof(u64)); 5032 } 5033 5034 /* 5035 * XXX PERF_FORMAT_GROUP vs inherited events seems difficult. 5036 */ 5037 static void perf_output_read_group(struct perf_output_handle *handle, 5038 struct perf_event *event, 5039 u64 enabled, u64 running) 5040 { 5041 struct perf_event *leader = event->group_leader, *sub; 5042 u64 read_format = event->attr.read_format; 5043 u64 values[5]; 5044 int n = 0; 5045 5046 values[n++] = 1 + leader->nr_siblings; 5047 5048 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) 5049 values[n++] = enabled; 5050 5051 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) 5052 values[n++] = running; 5053 5054 if (leader != event) 5055 leader->pmu->read(leader); 5056 5057 values[n++] = perf_event_count(leader); 5058 if (read_format & PERF_FORMAT_ID) 5059 values[n++] = primary_event_id(leader); 5060 5061 __output_copy(handle, values, n * sizeof(u64)); 5062 5063 list_for_each_entry(sub, &leader->sibling_list, group_entry) { 5064 n = 0; 5065 5066 if ((sub != event) && 5067 (sub->state == PERF_EVENT_STATE_ACTIVE)) 5068 sub->pmu->read(sub); 5069 5070 values[n++] = perf_event_count(sub); 5071 if (read_format & PERF_FORMAT_ID) 5072 values[n++] = primary_event_id(sub); 5073 5074 __output_copy(handle, values, n * sizeof(u64)); 5075 } 5076 } 5077 5078 #define PERF_FORMAT_TOTAL_TIMES (PERF_FORMAT_TOTAL_TIME_ENABLED|\ 5079 PERF_FORMAT_TOTAL_TIME_RUNNING) 5080 5081 static void perf_output_read(struct perf_output_handle *handle, 5082 struct perf_event *event) 5083 { 5084 u64 enabled = 0, running = 0, now; 5085 u64 read_format = event->attr.read_format; 5086 5087 /* 5088 * compute total_time_enabled, total_time_running 5089 * based on snapshot values taken when the event 5090 * was last scheduled in. 5091 * 5092 * we cannot simply called update_context_time() 5093 * because of locking issue as we are called in 5094 * NMI context 5095 */ 5096 if (read_format & PERF_FORMAT_TOTAL_TIMES) 5097 calc_timer_values(event, &now, &enabled, &running); 5098 5099 if (event->attr.read_format & PERF_FORMAT_GROUP) 5100 perf_output_read_group(handle, event, enabled, running); 5101 else 5102 perf_output_read_one(handle, event, enabled, running); 5103 } 5104 5105 void perf_output_sample(struct perf_output_handle *handle, 5106 struct perf_event_header *header, 5107 struct perf_sample_data *data, 5108 struct perf_event *event) 5109 { 5110 u64 sample_type = data->type; 5111 5112 perf_output_put(handle, *header); 5113 5114 if (sample_type & PERF_SAMPLE_IDENTIFIER) 5115 perf_output_put(handle, data->id); 5116 5117 if (sample_type & PERF_SAMPLE_IP) 5118 perf_output_put(handle, data->ip); 5119 5120 if (sample_type & PERF_SAMPLE_TID) 5121 perf_output_put(handle, data->tid_entry); 5122 5123 if (sample_type & PERF_SAMPLE_TIME) 5124 perf_output_put(handle, data->time); 5125 5126 if (sample_type & PERF_SAMPLE_ADDR) 5127 perf_output_put(handle, data->addr); 5128 5129 if (sample_type & PERF_SAMPLE_ID) 5130 perf_output_put(handle, data->id); 5131 5132 if (sample_type & PERF_SAMPLE_STREAM_ID) 5133 perf_output_put(handle, data->stream_id); 5134 5135 if (sample_type & PERF_SAMPLE_CPU) 5136 perf_output_put(handle, data->cpu_entry); 5137 5138 if (sample_type & PERF_SAMPLE_PERIOD) 5139 perf_output_put(handle, data->period); 5140 5141 if (sample_type & PERF_SAMPLE_READ) 5142 perf_output_read(handle, event); 5143 5144 if (sample_type & PERF_SAMPLE_CALLCHAIN) { 5145 if (data->callchain) { 5146 int size = 1; 5147 5148 if (data->callchain) 5149 size += data->callchain->nr; 5150 5151 size *= sizeof(u64); 5152 5153 __output_copy(handle, data->callchain, size); 5154 } else { 5155 u64 nr = 0; 5156 perf_output_put(handle, nr); 5157 } 5158 } 5159 5160 if (sample_type & PERF_SAMPLE_RAW) { 5161 if (data->raw) { 5162 perf_output_put(handle, data->raw->size); 5163 __output_copy(handle, data->raw->data, 5164 data->raw->size); 5165 } else { 5166 struct { 5167 u32 size; 5168 u32 data; 5169 } raw = { 5170 .size = sizeof(u32), 5171 .data = 0, 5172 }; 5173 perf_output_put(handle, raw); 5174 } 5175 } 5176 5177 if (sample_type & PERF_SAMPLE_BRANCH_STACK) { 5178 if (data->br_stack) { 5179 size_t size; 5180 5181 size = data->br_stack->nr 5182 * sizeof(struct perf_branch_entry); 5183 5184 perf_output_put(handle, data->br_stack->nr); 5185 perf_output_copy(handle, data->br_stack->entries, size); 5186 } else { 5187 /* 5188 * we always store at least the value of nr 5189 */ 5190 u64 nr = 0; 5191 perf_output_put(handle, nr); 5192 } 5193 } 5194 5195 if (sample_type & PERF_SAMPLE_REGS_USER) { 5196 u64 abi = data->regs_user.abi; 5197 5198 /* 5199 * If there are no regs to dump, notice it through 5200 * first u64 being zero (PERF_SAMPLE_REGS_ABI_NONE). 5201 */ 5202 perf_output_put(handle, abi); 5203 5204 if (abi) { 5205 u64 mask = event->attr.sample_regs_user; 5206 perf_output_sample_regs(handle, 5207 data->regs_user.regs, 5208 mask); 5209 } 5210 } 5211 5212 if (sample_type & PERF_SAMPLE_STACK_USER) { 5213 perf_output_sample_ustack(handle, 5214 data->stack_user_size, 5215 data->regs_user.regs); 5216 } 5217 5218 if (sample_type & PERF_SAMPLE_WEIGHT) 5219 perf_output_put(handle, data->weight); 5220 5221 if (sample_type & PERF_SAMPLE_DATA_SRC) 5222 perf_output_put(handle, data->data_src.val); 5223 5224 if (sample_type & PERF_SAMPLE_TRANSACTION) 5225 perf_output_put(handle, data->txn); 5226 5227 if (sample_type & PERF_SAMPLE_REGS_INTR) { 5228 u64 abi = data->regs_intr.abi; 5229 /* 5230 * If there are no regs to dump, notice it through 5231 * first u64 being zero (PERF_SAMPLE_REGS_ABI_NONE). 5232 */ 5233 perf_output_put(handle, abi); 5234 5235 if (abi) { 5236 u64 mask = event->attr.sample_regs_intr; 5237 5238 perf_output_sample_regs(handle, 5239 data->regs_intr.regs, 5240 mask); 5241 } 5242 } 5243 5244 if (!event->attr.watermark) { 5245 int wakeup_events = event->attr.wakeup_events; 5246 5247 if (wakeup_events) { 5248 struct ring_buffer *rb = handle->rb; 5249 int events = local_inc_return(&rb->events); 5250 5251 if (events >= wakeup_events) { 5252 local_sub(wakeup_events, &rb->events); 5253 local_inc(&rb->wakeup); 5254 } 5255 } 5256 } 5257 } 5258 5259 void perf_prepare_sample(struct perf_event_header *header, 5260 struct perf_sample_data *data, 5261 struct perf_event *event, 5262 struct pt_regs *regs) 5263 { 5264 u64 sample_type = event->attr.sample_type; 5265 5266 header->type = PERF_RECORD_SAMPLE; 5267 header->size = sizeof(*header) + event->header_size; 5268 5269 header->misc = 0; 5270 header->misc |= perf_misc_flags(regs); 5271 5272 __perf_event_header__init_id(header, data, event); 5273 5274 if (sample_type & PERF_SAMPLE_IP) 5275 data->ip = perf_instruction_pointer(regs); 5276 5277 if (sample_type & PERF_SAMPLE_CALLCHAIN) { 5278 int size = 1; 5279 5280 data->callchain = perf_callchain(event, regs); 5281 5282 if (data->callchain) 5283 size += data->callchain->nr; 5284 5285 header->size += size * sizeof(u64); 5286 } 5287 5288 if (sample_type & PERF_SAMPLE_RAW) { 5289 int size = sizeof(u32); 5290 5291 if (data->raw) 5292 size += data->raw->size; 5293 else 5294 size += sizeof(u32); 5295 5296 WARN_ON_ONCE(size & (sizeof(u64)-1)); 5297 header->size += size; 5298 } 5299 5300 if (sample_type & PERF_SAMPLE_BRANCH_STACK) { 5301 int size = sizeof(u64); /* nr */ 5302 if (data->br_stack) { 5303 size += data->br_stack->nr 5304 * sizeof(struct perf_branch_entry); 5305 } 5306 header->size += size; 5307 } 5308 5309 if (sample_type & (PERF_SAMPLE_REGS_USER | PERF_SAMPLE_STACK_USER)) 5310 perf_sample_regs_user(&data->regs_user, regs, 5311 &data->regs_user_copy); 5312 5313 if (sample_type & PERF_SAMPLE_REGS_USER) { 5314 /* regs dump ABI info */ 5315 int size = sizeof(u64); 5316 5317 if (data->regs_user.regs) { 5318 u64 mask = event->attr.sample_regs_user; 5319 size += hweight64(mask) * sizeof(u64); 5320 } 5321 5322 header->size += size; 5323 } 5324 5325 if (sample_type & PERF_SAMPLE_STACK_USER) { 5326 /* 5327 * Either we need PERF_SAMPLE_STACK_USER bit to be allways 5328 * processed as the last one or have additional check added 5329 * in case new sample type is added, because we could eat 5330 * up the rest of the sample size. 5331 */ 5332 u16 stack_size = event->attr.sample_stack_user; 5333 u16 size = sizeof(u64); 5334 5335 stack_size = perf_sample_ustack_size(stack_size, header->size, 5336 data->regs_user.regs); 5337 5338 /* 5339 * If there is something to dump, add space for the dump 5340 * itself and for the field that tells the dynamic size, 5341 * which is how many have been actually dumped. 5342 */ 5343 if (stack_size) 5344 size += sizeof(u64) + stack_size; 5345 5346 data->stack_user_size = stack_size; 5347 header->size += size; 5348 } 5349 5350 if (sample_type & PERF_SAMPLE_REGS_INTR) { 5351 /* regs dump ABI info */ 5352 int size = sizeof(u64); 5353 5354 perf_sample_regs_intr(&data->regs_intr, regs); 5355 5356 if (data->regs_intr.regs) { 5357 u64 mask = event->attr.sample_regs_intr; 5358 5359 size += hweight64(mask) * sizeof(u64); 5360 } 5361 5362 header->size += size; 5363 } 5364 } 5365 5366 void perf_event_output(struct perf_event *event, 5367 struct perf_sample_data *data, 5368 struct pt_regs *regs) 5369 { 5370 struct perf_output_handle handle; 5371 struct perf_event_header header; 5372 5373 /* protect the callchain buffers */ 5374 rcu_read_lock(); 5375 5376 perf_prepare_sample(&header, data, event, regs); 5377 5378 if (perf_output_begin(&handle, event, header.size)) 5379 goto exit; 5380 5381 perf_output_sample(&handle, &header, data, event); 5382 5383 perf_output_end(&handle); 5384 5385 exit: 5386 rcu_read_unlock(); 5387 } 5388 5389 /* 5390 * read event_id 5391 */ 5392 5393 struct perf_read_event { 5394 struct perf_event_header header; 5395 5396 u32 pid; 5397 u32 tid; 5398 }; 5399 5400 static void 5401 perf_event_read_event(struct perf_event *event, 5402 struct task_struct *task) 5403 { 5404 struct perf_output_handle handle; 5405 struct perf_sample_data sample; 5406 struct perf_read_event read_event = { 5407 .header = { 5408 .type = PERF_RECORD_READ, 5409 .misc = 0, 5410 .size = sizeof(read_event) + event->read_size, 5411 }, 5412 .pid = perf_event_pid(event, task), 5413 .tid = perf_event_tid(event, task), 5414 }; 5415 int ret; 5416 5417 perf_event_header__init_id(&read_event.header, &sample, event); 5418 ret = perf_output_begin(&handle, event, read_event.header.size); 5419 if (ret) 5420 return; 5421 5422 perf_output_put(&handle, read_event); 5423 perf_output_read(&handle, event); 5424 perf_event__output_id_sample(event, &handle, &sample); 5425 5426 perf_output_end(&handle); 5427 } 5428 5429 typedef void (perf_event_aux_output_cb)(struct perf_event *event, void *data); 5430 5431 static void 5432 perf_event_aux_ctx(struct perf_event_context *ctx, 5433 perf_event_aux_output_cb output, 5434 void *data) 5435 { 5436 struct perf_event *event; 5437 5438 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) { 5439 if (event->state < PERF_EVENT_STATE_INACTIVE) 5440 continue; 5441 if (!event_filter_match(event)) 5442 continue; 5443 output(event, data); 5444 } 5445 } 5446 5447 static void 5448 perf_event_aux(perf_event_aux_output_cb output, void *data, 5449 struct perf_event_context *task_ctx) 5450 { 5451 struct perf_cpu_context *cpuctx; 5452 struct perf_event_context *ctx; 5453 struct pmu *pmu; 5454 int ctxn; 5455 5456 rcu_read_lock(); 5457 list_for_each_entry_rcu(pmu, &pmus, entry) { 5458 cpuctx = get_cpu_ptr(pmu->pmu_cpu_context); 5459 if (cpuctx->unique_pmu != pmu) 5460 goto next; 5461 perf_event_aux_ctx(&cpuctx->ctx, output, data); 5462 if (task_ctx) 5463 goto next; 5464 ctxn = pmu->task_ctx_nr; 5465 if (ctxn < 0) 5466 goto next; 5467 ctx = rcu_dereference(current->perf_event_ctxp[ctxn]); 5468 if (ctx) 5469 perf_event_aux_ctx(ctx, output, data); 5470 next: 5471 put_cpu_ptr(pmu->pmu_cpu_context); 5472 } 5473 5474 if (task_ctx) { 5475 preempt_disable(); 5476 perf_event_aux_ctx(task_ctx, output, data); 5477 preempt_enable(); 5478 } 5479 rcu_read_unlock(); 5480 } 5481 5482 /* 5483 * task tracking -- fork/exit 5484 * 5485 * enabled by: attr.comm | attr.mmap | attr.mmap2 | attr.mmap_data | attr.task 5486 */ 5487 5488 struct perf_task_event { 5489 struct task_struct *task; 5490 struct perf_event_context *task_ctx; 5491 5492 struct { 5493 struct perf_event_header header; 5494 5495 u32 pid; 5496 u32 ppid; 5497 u32 tid; 5498 u32 ptid; 5499 u64 time; 5500 } event_id; 5501 }; 5502 5503 static int perf_event_task_match(struct perf_event *event) 5504 { 5505 return event->attr.comm || event->attr.mmap || 5506 event->attr.mmap2 || event->attr.mmap_data || 5507 event->attr.task; 5508 } 5509 5510 static void perf_event_task_output(struct perf_event *event, 5511 void *data) 5512 { 5513 struct perf_task_event *task_event = data; 5514 struct perf_output_handle handle; 5515 struct perf_sample_data sample; 5516 struct task_struct *task = task_event->task; 5517 int ret, size = task_event->event_id.header.size; 5518 5519 if (!perf_event_task_match(event)) 5520 return; 5521 5522 perf_event_header__init_id(&task_event->event_id.header, &sample, event); 5523 5524 ret = perf_output_begin(&handle, event, 5525 task_event->event_id.header.size); 5526 if (ret) 5527 goto out; 5528 5529 task_event->event_id.pid = perf_event_pid(event, task); 5530 task_event->event_id.ppid = perf_event_pid(event, current); 5531 5532 task_event->event_id.tid = perf_event_tid(event, task); 5533 task_event->event_id.ptid = perf_event_tid(event, current); 5534 5535 task_event->event_id.time = perf_event_clock(event); 5536 5537 perf_output_put(&handle, task_event->event_id); 5538 5539 perf_event__output_id_sample(event, &handle, &sample); 5540 5541 perf_output_end(&handle); 5542 out: 5543 task_event->event_id.header.size = size; 5544 } 5545 5546 static void perf_event_task(struct task_struct *task, 5547 struct perf_event_context *task_ctx, 5548 int new) 5549 { 5550 struct perf_task_event task_event; 5551 5552 if (!atomic_read(&nr_comm_events) && 5553 !atomic_read(&nr_mmap_events) && 5554 !atomic_read(&nr_task_events)) 5555 return; 5556 5557 task_event = (struct perf_task_event){ 5558 .task = task, 5559 .task_ctx = task_ctx, 5560 .event_id = { 5561 .header = { 5562 .type = new ? PERF_RECORD_FORK : PERF_RECORD_EXIT, 5563 .misc = 0, 5564 .size = sizeof(task_event.event_id), 5565 }, 5566 /* .pid */ 5567 /* .ppid */ 5568 /* .tid */ 5569 /* .ptid */ 5570 /* .time */ 5571 }, 5572 }; 5573 5574 perf_event_aux(perf_event_task_output, 5575 &task_event, 5576 task_ctx); 5577 } 5578 5579 void perf_event_fork(struct task_struct *task) 5580 { 5581 perf_event_task(task, NULL, 1); 5582 } 5583 5584 /* 5585 * comm tracking 5586 */ 5587 5588 struct perf_comm_event { 5589 struct task_struct *task; 5590 char *comm; 5591 int comm_size; 5592 5593 struct { 5594 struct perf_event_header header; 5595 5596 u32 pid; 5597 u32 tid; 5598 } event_id; 5599 }; 5600 5601 static int perf_event_comm_match(struct perf_event *event) 5602 { 5603 return event->attr.comm; 5604 } 5605 5606 static void perf_event_comm_output(struct perf_event *event, 5607 void *data) 5608 { 5609 struct perf_comm_event *comm_event = data; 5610 struct perf_output_handle handle; 5611 struct perf_sample_data sample; 5612 int size = comm_event->event_id.header.size; 5613 int ret; 5614 5615 if (!perf_event_comm_match(event)) 5616 return; 5617 5618 perf_event_header__init_id(&comm_event->event_id.header, &sample, event); 5619 ret = perf_output_begin(&handle, event, 5620 comm_event->event_id.header.size); 5621 5622 if (ret) 5623 goto out; 5624 5625 comm_event->event_id.pid = perf_event_pid(event, comm_event->task); 5626 comm_event->event_id.tid = perf_event_tid(event, comm_event->task); 5627 5628 perf_output_put(&handle, comm_event->event_id); 5629 __output_copy(&handle, comm_event->comm, 5630 comm_event->comm_size); 5631 5632 perf_event__output_id_sample(event, &handle, &sample); 5633 5634 perf_output_end(&handle); 5635 out: 5636 comm_event->event_id.header.size = size; 5637 } 5638 5639 static void perf_event_comm_event(struct perf_comm_event *comm_event) 5640 { 5641 char comm[TASK_COMM_LEN]; 5642 unsigned int size; 5643 5644 memset(comm, 0, sizeof(comm)); 5645 strlcpy(comm, comm_event->task->comm, sizeof(comm)); 5646 size = ALIGN(strlen(comm)+1, sizeof(u64)); 5647 5648 comm_event->comm = comm; 5649 comm_event->comm_size = size; 5650 5651 comm_event->event_id.header.size = sizeof(comm_event->event_id) + size; 5652 5653 perf_event_aux(perf_event_comm_output, 5654 comm_event, 5655 NULL); 5656 } 5657 5658 void perf_event_comm(struct task_struct *task, bool exec) 5659 { 5660 struct perf_comm_event comm_event; 5661 5662 if (!atomic_read(&nr_comm_events)) 5663 return; 5664 5665 comm_event = (struct perf_comm_event){ 5666 .task = task, 5667 /* .comm */ 5668 /* .comm_size */ 5669 .event_id = { 5670 .header = { 5671 .type = PERF_RECORD_COMM, 5672 .misc = exec ? PERF_RECORD_MISC_COMM_EXEC : 0, 5673 /* .size */ 5674 }, 5675 /* .pid */ 5676 /* .tid */ 5677 }, 5678 }; 5679 5680 perf_event_comm_event(&comm_event); 5681 } 5682 5683 /* 5684 * mmap tracking 5685 */ 5686 5687 struct perf_mmap_event { 5688 struct vm_area_struct *vma; 5689 5690 const char *file_name; 5691 int file_size; 5692 int maj, min; 5693 u64 ino; 5694 u64 ino_generation; 5695 u32 prot, flags; 5696 5697 struct { 5698 struct perf_event_header header; 5699 5700 u32 pid; 5701 u32 tid; 5702 u64 start; 5703 u64 len; 5704 u64 pgoff; 5705 } event_id; 5706 }; 5707 5708 static int perf_event_mmap_match(struct perf_event *event, 5709 void *data) 5710 { 5711 struct perf_mmap_event *mmap_event = data; 5712 struct vm_area_struct *vma = mmap_event->vma; 5713 int executable = vma->vm_flags & VM_EXEC; 5714 5715 return (!executable && event->attr.mmap_data) || 5716 (executable && (event->attr.mmap || event->attr.mmap2)); 5717 } 5718 5719 static void perf_event_mmap_output(struct perf_event *event, 5720 void *data) 5721 { 5722 struct perf_mmap_event *mmap_event = data; 5723 struct perf_output_handle handle; 5724 struct perf_sample_data sample; 5725 int size = mmap_event->event_id.header.size; 5726 int ret; 5727 5728 if (!perf_event_mmap_match(event, data)) 5729 return; 5730 5731 if (event->attr.mmap2) { 5732 mmap_event->event_id.header.type = PERF_RECORD_MMAP2; 5733 mmap_event->event_id.header.size += sizeof(mmap_event->maj); 5734 mmap_event->event_id.header.size += sizeof(mmap_event->min); 5735 mmap_event->event_id.header.size += sizeof(mmap_event->ino); 5736 mmap_event->event_id.header.size += sizeof(mmap_event->ino_generation); 5737 mmap_event->event_id.header.size += sizeof(mmap_event->prot); 5738 mmap_event->event_id.header.size += sizeof(mmap_event->flags); 5739 } 5740 5741 perf_event_header__init_id(&mmap_event->event_id.header, &sample, event); 5742 ret = perf_output_begin(&handle, event, 5743 mmap_event->event_id.header.size); 5744 if (ret) 5745 goto out; 5746 5747 mmap_event->event_id.pid = perf_event_pid(event, current); 5748 mmap_event->event_id.tid = perf_event_tid(event, current); 5749 5750 perf_output_put(&handle, mmap_event->event_id); 5751 5752 if (event->attr.mmap2) { 5753 perf_output_put(&handle, mmap_event->maj); 5754 perf_output_put(&handle, mmap_event->min); 5755 perf_output_put(&handle, mmap_event->ino); 5756 perf_output_put(&handle, mmap_event->ino_generation); 5757 perf_output_put(&handle, mmap_event->prot); 5758 perf_output_put(&handle, mmap_event->flags); 5759 } 5760 5761 __output_copy(&handle, mmap_event->file_name, 5762 mmap_event->file_size); 5763 5764 perf_event__output_id_sample(event, &handle, &sample); 5765 5766 perf_output_end(&handle); 5767 out: 5768 mmap_event->event_id.header.size = size; 5769 } 5770 5771 static void perf_event_mmap_event(struct perf_mmap_event *mmap_event) 5772 { 5773 struct vm_area_struct *vma = mmap_event->vma; 5774 struct file *file = vma->vm_file; 5775 int maj = 0, min = 0; 5776 u64 ino = 0, gen = 0; 5777 u32 prot = 0, flags = 0; 5778 unsigned int size; 5779 char tmp[16]; 5780 char *buf = NULL; 5781 char *name; 5782 5783 if (file) { 5784 struct inode *inode; 5785 dev_t dev; 5786 5787 buf = kmalloc(PATH_MAX, GFP_KERNEL); 5788 if (!buf) { 5789 name = "//enomem"; 5790 goto cpy_name; 5791 } 5792 /* 5793 * d_path() works from the end of the rb backwards, so we 5794 * need to add enough zero bytes after the string to handle 5795 * the 64bit alignment we do later. 5796 */ 5797 name = file_path(file, buf, PATH_MAX - sizeof(u64)); 5798 if (IS_ERR(name)) { 5799 name = "//toolong"; 5800 goto cpy_name; 5801 } 5802 inode = file_inode(vma->vm_file); 5803 dev = inode->i_sb->s_dev; 5804 ino = inode->i_ino; 5805 gen = inode->i_generation; 5806 maj = MAJOR(dev); 5807 min = MINOR(dev); 5808 5809 if (vma->vm_flags & VM_READ) 5810 prot |= PROT_READ; 5811 if (vma->vm_flags & VM_WRITE) 5812 prot |= PROT_WRITE; 5813 if (vma->vm_flags & VM_EXEC) 5814 prot |= PROT_EXEC; 5815 5816 if (vma->vm_flags & VM_MAYSHARE) 5817 flags = MAP_SHARED; 5818 else 5819 flags = MAP_PRIVATE; 5820 5821 if (vma->vm_flags & VM_DENYWRITE) 5822 flags |= MAP_DENYWRITE; 5823 if (vma->vm_flags & VM_MAYEXEC) 5824 flags |= MAP_EXECUTABLE; 5825 if (vma->vm_flags & VM_LOCKED) 5826 flags |= MAP_LOCKED; 5827 if (vma->vm_flags & VM_HUGETLB) 5828 flags |= MAP_HUGETLB; 5829 5830 goto got_name; 5831 } else { 5832 if (vma->vm_ops && vma->vm_ops->name) { 5833 name = (char *) vma->vm_ops->name(vma); 5834 if (name) 5835 goto cpy_name; 5836 } 5837 5838 name = (char *)arch_vma_name(vma); 5839 if (name) 5840 goto cpy_name; 5841 5842 if (vma->vm_start <= vma->vm_mm->start_brk && 5843 vma->vm_end >= vma->vm_mm->brk) { 5844 name = "[heap]"; 5845 goto cpy_name; 5846 } 5847 if (vma->vm_start <= vma->vm_mm->start_stack && 5848 vma->vm_end >= vma->vm_mm->start_stack) { 5849 name = "[stack]"; 5850 goto cpy_name; 5851 } 5852 5853 name = "//anon"; 5854 goto cpy_name; 5855 } 5856 5857 cpy_name: 5858 strlcpy(tmp, name, sizeof(tmp)); 5859 name = tmp; 5860 got_name: 5861 /* 5862 * Since our buffer works in 8 byte units we need to align our string 5863 * size to a multiple of 8. However, we must guarantee the tail end is 5864 * zero'd out to avoid leaking random bits to userspace. 5865 */ 5866 size = strlen(name)+1; 5867 while (!IS_ALIGNED(size, sizeof(u64))) 5868 name[size++] = '\0'; 5869 5870 mmap_event->file_name = name; 5871 mmap_event->file_size = size; 5872 mmap_event->maj = maj; 5873 mmap_event->min = min; 5874 mmap_event->ino = ino; 5875 mmap_event->ino_generation = gen; 5876 mmap_event->prot = prot; 5877 mmap_event->flags = flags; 5878 5879 if (!(vma->vm_flags & VM_EXEC)) 5880 mmap_event->event_id.header.misc |= PERF_RECORD_MISC_MMAP_DATA; 5881 5882 mmap_event->event_id.header.size = sizeof(mmap_event->event_id) + size; 5883 5884 perf_event_aux(perf_event_mmap_output, 5885 mmap_event, 5886 NULL); 5887 5888 kfree(buf); 5889 } 5890 5891 void perf_event_mmap(struct vm_area_struct *vma) 5892 { 5893 struct perf_mmap_event mmap_event; 5894 5895 if (!atomic_read(&nr_mmap_events)) 5896 return; 5897 5898 mmap_event = (struct perf_mmap_event){ 5899 .vma = vma, 5900 /* .file_name */ 5901 /* .file_size */ 5902 .event_id = { 5903 .header = { 5904 .type = PERF_RECORD_MMAP, 5905 .misc = PERF_RECORD_MISC_USER, 5906 /* .size */ 5907 }, 5908 /* .pid */ 5909 /* .tid */ 5910 .start = vma->vm_start, 5911 .len = vma->vm_end - vma->vm_start, 5912 .pgoff = (u64)vma->vm_pgoff << PAGE_SHIFT, 5913 }, 5914 /* .maj (attr_mmap2 only) */ 5915 /* .min (attr_mmap2 only) */ 5916 /* .ino (attr_mmap2 only) */ 5917 /* .ino_generation (attr_mmap2 only) */ 5918 /* .prot (attr_mmap2 only) */ 5919 /* .flags (attr_mmap2 only) */ 5920 }; 5921 5922 perf_event_mmap_event(&mmap_event); 5923 } 5924 5925 void perf_event_aux_event(struct perf_event *event, unsigned long head, 5926 unsigned long size, u64 flags) 5927 { 5928 struct perf_output_handle handle; 5929 struct perf_sample_data sample; 5930 struct perf_aux_event { 5931 struct perf_event_header header; 5932 u64 offset; 5933 u64 size; 5934 u64 flags; 5935 } rec = { 5936 .header = { 5937 .type = PERF_RECORD_AUX, 5938 .misc = 0, 5939 .size = sizeof(rec), 5940 }, 5941 .offset = head, 5942 .size = size, 5943 .flags = flags, 5944 }; 5945 int ret; 5946 5947 perf_event_header__init_id(&rec.header, &sample, event); 5948 ret = perf_output_begin(&handle, event, rec.header.size); 5949 5950 if (ret) 5951 return; 5952 5953 perf_output_put(&handle, rec); 5954 perf_event__output_id_sample(event, &handle, &sample); 5955 5956 perf_output_end(&handle); 5957 } 5958 5959 /* 5960 * Lost/dropped samples logging 5961 */ 5962 void perf_log_lost_samples(struct perf_event *event, u64 lost) 5963 { 5964 struct perf_output_handle handle; 5965 struct perf_sample_data sample; 5966 int ret; 5967 5968 struct { 5969 struct perf_event_header header; 5970 u64 lost; 5971 } lost_samples_event = { 5972 .header = { 5973 .type = PERF_RECORD_LOST_SAMPLES, 5974 .misc = 0, 5975 .size = sizeof(lost_samples_event), 5976 }, 5977 .lost = lost, 5978 }; 5979 5980 perf_event_header__init_id(&lost_samples_event.header, &sample, event); 5981 5982 ret = perf_output_begin(&handle, event, 5983 lost_samples_event.header.size); 5984 if (ret) 5985 return; 5986 5987 perf_output_put(&handle, lost_samples_event); 5988 perf_event__output_id_sample(event, &handle, &sample); 5989 perf_output_end(&handle); 5990 } 5991 5992 /* 5993 * IRQ throttle logging 5994 */ 5995 5996 static void perf_log_throttle(struct perf_event *event, int enable) 5997 { 5998 struct perf_output_handle handle; 5999 struct perf_sample_data sample; 6000 int ret; 6001 6002 struct { 6003 struct perf_event_header header; 6004 u64 time; 6005 u64 id; 6006 u64 stream_id; 6007 } throttle_event = { 6008 .header = { 6009 .type = PERF_RECORD_THROTTLE, 6010 .misc = 0, 6011 .size = sizeof(throttle_event), 6012 }, 6013 .time = perf_event_clock(event), 6014 .id = primary_event_id(event), 6015 .stream_id = event->id, 6016 }; 6017 6018 if (enable) 6019 throttle_event.header.type = PERF_RECORD_UNTHROTTLE; 6020 6021 perf_event_header__init_id(&throttle_event.header, &sample, event); 6022 6023 ret = perf_output_begin(&handle, event, 6024 throttle_event.header.size); 6025 if (ret) 6026 return; 6027 6028 perf_output_put(&handle, throttle_event); 6029 perf_event__output_id_sample(event, &handle, &sample); 6030 perf_output_end(&handle); 6031 } 6032 6033 static void perf_log_itrace_start(struct perf_event *event) 6034 { 6035 struct perf_output_handle handle; 6036 struct perf_sample_data sample; 6037 struct perf_aux_event { 6038 struct perf_event_header header; 6039 u32 pid; 6040 u32 tid; 6041 } rec; 6042 int ret; 6043 6044 if (event->parent) 6045 event = event->parent; 6046 6047 if (!(event->pmu->capabilities & PERF_PMU_CAP_ITRACE) || 6048 event->hw.itrace_started) 6049 return; 6050 6051 event->hw.itrace_started = 1; 6052 6053 rec.header.type = PERF_RECORD_ITRACE_START; 6054 rec.header.misc = 0; 6055 rec.header.size = sizeof(rec); 6056 rec.pid = perf_event_pid(event, current); 6057 rec.tid = perf_event_tid(event, current); 6058 6059 perf_event_header__init_id(&rec.header, &sample, event); 6060 ret = perf_output_begin(&handle, event, rec.header.size); 6061 6062 if (ret) 6063 return; 6064 6065 perf_output_put(&handle, rec); 6066 perf_event__output_id_sample(event, &handle, &sample); 6067 6068 perf_output_end(&handle); 6069 } 6070 6071 /* 6072 * Generic event overflow handling, sampling. 6073 */ 6074 6075 static int __perf_event_overflow(struct perf_event *event, 6076 int throttle, struct perf_sample_data *data, 6077 struct pt_regs *regs) 6078 { 6079 int events = atomic_read(&event->event_limit); 6080 struct hw_perf_event *hwc = &event->hw; 6081 u64 seq; 6082 int ret = 0; 6083 6084 /* 6085 * Non-sampling counters might still use the PMI to fold short 6086 * hardware counters, ignore those. 6087 */ 6088 if (unlikely(!is_sampling_event(event))) 6089 return 0; 6090 6091 seq = __this_cpu_read(perf_throttled_seq); 6092 if (seq != hwc->interrupts_seq) { 6093 hwc->interrupts_seq = seq; 6094 hwc->interrupts = 1; 6095 } else { 6096 hwc->interrupts++; 6097 if (unlikely(throttle 6098 && hwc->interrupts >= max_samples_per_tick)) { 6099 __this_cpu_inc(perf_throttled_count); 6100 hwc->interrupts = MAX_INTERRUPTS; 6101 perf_log_throttle(event, 0); 6102 tick_nohz_full_kick(); 6103 ret = 1; 6104 } 6105 } 6106 6107 if (event->attr.freq) { 6108 u64 now = perf_clock(); 6109 s64 delta = now - hwc->freq_time_stamp; 6110 6111 hwc->freq_time_stamp = now; 6112 6113 if (delta > 0 && delta < 2*TICK_NSEC) 6114 perf_adjust_period(event, delta, hwc->last_period, true); 6115 } 6116 6117 /* 6118 * XXX event_limit might not quite work as expected on inherited 6119 * events 6120 */ 6121 6122 event->pending_kill = POLL_IN; 6123 if (events && atomic_dec_and_test(&event->event_limit)) { 6124 ret = 1; 6125 event->pending_kill = POLL_HUP; 6126 event->pending_disable = 1; 6127 irq_work_queue(&event->pending); 6128 } 6129 6130 if (event->overflow_handler) 6131 event->overflow_handler(event, data, regs); 6132 else 6133 perf_event_output(event, data, regs); 6134 6135 if (event->fasync && event->pending_kill) { 6136 event->pending_wakeup = 1; 6137 irq_work_queue(&event->pending); 6138 } 6139 6140 return ret; 6141 } 6142 6143 int perf_event_overflow(struct perf_event *event, 6144 struct perf_sample_data *data, 6145 struct pt_regs *regs) 6146 { 6147 return __perf_event_overflow(event, 1, data, regs); 6148 } 6149 6150 /* 6151 * Generic software event infrastructure 6152 */ 6153 6154 struct swevent_htable { 6155 struct swevent_hlist *swevent_hlist; 6156 struct mutex hlist_mutex; 6157 int hlist_refcount; 6158 6159 /* Recursion avoidance in each contexts */ 6160 int recursion[PERF_NR_CONTEXTS]; 6161 6162 /* Keeps track of cpu being initialized/exited */ 6163 bool online; 6164 }; 6165 6166 static DEFINE_PER_CPU(struct swevent_htable, swevent_htable); 6167 6168 /* 6169 * We directly increment event->count and keep a second value in 6170 * event->hw.period_left to count intervals. This period event 6171 * is kept in the range [-sample_period, 0] so that we can use the 6172 * sign as trigger. 6173 */ 6174 6175 u64 perf_swevent_set_period(struct perf_event *event) 6176 { 6177 struct hw_perf_event *hwc = &event->hw; 6178 u64 period = hwc->last_period; 6179 u64 nr, offset; 6180 s64 old, val; 6181 6182 hwc->last_period = hwc->sample_period; 6183 6184 again: 6185 old = val = local64_read(&hwc->period_left); 6186 if (val < 0) 6187 return 0; 6188 6189 nr = div64_u64(period + val, period); 6190 offset = nr * period; 6191 val -= offset; 6192 if (local64_cmpxchg(&hwc->period_left, old, val) != old) 6193 goto again; 6194 6195 return nr; 6196 } 6197 6198 static void perf_swevent_overflow(struct perf_event *event, u64 overflow, 6199 struct perf_sample_data *data, 6200 struct pt_regs *regs) 6201 { 6202 struct hw_perf_event *hwc = &event->hw; 6203 int throttle = 0; 6204 6205 if (!overflow) 6206 overflow = perf_swevent_set_period(event); 6207 6208 if (hwc->interrupts == MAX_INTERRUPTS) 6209 return; 6210 6211 for (; overflow; overflow--) { 6212 if (__perf_event_overflow(event, throttle, 6213 data, regs)) { 6214 /* 6215 * We inhibit the overflow from happening when 6216 * hwc->interrupts == MAX_INTERRUPTS. 6217 */ 6218 break; 6219 } 6220 throttle = 1; 6221 } 6222 } 6223 6224 static void perf_swevent_event(struct perf_event *event, u64 nr, 6225 struct perf_sample_data *data, 6226 struct pt_regs *regs) 6227 { 6228 struct hw_perf_event *hwc = &event->hw; 6229 6230 local64_add(nr, &event->count); 6231 6232 if (!regs) 6233 return; 6234 6235 if (!is_sampling_event(event)) 6236 return; 6237 6238 if ((event->attr.sample_type & PERF_SAMPLE_PERIOD) && !event->attr.freq) { 6239 data->period = nr; 6240 return perf_swevent_overflow(event, 1, data, regs); 6241 } else 6242 data->period = event->hw.last_period; 6243 6244 if (nr == 1 && hwc->sample_period == 1 && !event->attr.freq) 6245 return perf_swevent_overflow(event, 1, data, regs); 6246 6247 if (local64_add_negative(nr, &hwc->period_left)) 6248 return; 6249 6250 perf_swevent_overflow(event, 0, data, regs); 6251 } 6252 6253 static int perf_exclude_event(struct perf_event *event, 6254 struct pt_regs *regs) 6255 { 6256 if (event->hw.state & PERF_HES_STOPPED) 6257 return 1; 6258 6259 if (regs) { 6260 if (event->attr.exclude_user && user_mode(regs)) 6261 return 1; 6262 6263 if (event->attr.exclude_kernel && !user_mode(regs)) 6264 return 1; 6265 } 6266 6267 return 0; 6268 } 6269 6270 static int perf_swevent_match(struct perf_event *event, 6271 enum perf_type_id type, 6272 u32 event_id, 6273 struct perf_sample_data *data, 6274 struct pt_regs *regs) 6275 { 6276 if (event->attr.type != type) 6277 return 0; 6278 6279 if (event->attr.config != event_id) 6280 return 0; 6281 6282 if (perf_exclude_event(event, regs)) 6283 return 0; 6284 6285 return 1; 6286 } 6287 6288 static inline u64 swevent_hash(u64 type, u32 event_id) 6289 { 6290 u64 val = event_id | (type << 32); 6291 6292 return hash_64(val, SWEVENT_HLIST_BITS); 6293 } 6294 6295 static inline struct hlist_head * 6296 __find_swevent_head(struct swevent_hlist *hlist, u64 type, u32 event_id) 6297 { 6298 u64 hash = swevent_hash(type, event_id); 6299 6300 return &hlist->heads[hash]; 6301 } 6302 6303 /* For the read side: events when they trigger */ 6304 static inline struct hlist_head * 6305 find_swevent_head_rcu(struct swevent_htable *swhash, u64 type, u32 event_id) 6306 { 6307 struct swevent_hlist *hlist; 6308 6309 hlist = rcu_dereference(swhash->swevent_hlist); 6310 if (!hlist) 6311 return NULL; 6312 6313 return __find_swevent_head(hlist, type, event_id); 6314 } 6315 6316 /* For the event head insertion and removal in the hlist */ 6317 static inline struct hlist_head * 6318 find_swevent_head(struct swevent_htable *swhash, struct perf_event *event) 6319 { 6320 struct swevent_hlist *hlist; 6321 u32 event_id = event->attr.config; 6322 u64 type = event->attr.type; 6323 6324 /* 6325 * Event scheduling is always serialized against hlist allocation 6326 * and release. Which makes the protected version suitable here. 6327 * The context lock guarantees that. 6328 */ 6329 hlist = rcu_dereference_protected(swhash->swevent_hlist, 6330 lockdep_is_held(&event->ctx->lock)); 6331 if (!hlist) 6332 return NULL; 6333 6334 return __find_swevent_head(hlist, type, event_id); 6335 } 6336 6337 static void do_perf_sw_event(enum perf_type_id type, u32 event_id, 6338 u64 nr, 6339 struct perf_sample_data *data, 6340 struct pt_regs *regs) 6341 { 6342 struct swevent_htable *swhash = this_cpu_ptr(&swevent_htable); 6343 struct perf_event *event; 6344 struct hlist_head *head; 6345 6346 rcu_read_lock(); 6347 head = find_swevent_head_rcu(swhash, type, event_id); 6348 if (!head) 6349 goto end; 6350 6351 hlist_for_each_entry_rcu(event, head, hlist_entry) { 6352 if (perf_swevent_match(event, type, event_id, data, regs)) 6353 perf_swevent_event(event, nr, data, regs); 6354 } 6355 end: 6356 rcu_read_unlock(); 6357 } 6358 6359 DEFINE_PER_CPU(struct pt_regs, __perf_regs[4]); 6360 6361 int perf_swevent_get_recursion_context(void) 6362 { 6363 struct swevent_htable *swhash = this_cpu_ptr(&swevent_htable); 6364 6365 return get_recursion_context(swhash->recursion); 6366 } 6367 EXPORT_SYMBOL_GPL(perf_swevent_get_recursion_context); 6368 6369 inline void perf_swevent_put_recursion_context(int rctx) 6370 { 6371 struct swevent_htable *swhash = this_cpu_ptr(&swevent_htable); 6372 6373 put_recursion_context(swhash->recursion, rctx); 6374 } 6375 6376 void ___perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr) 6377 { 6378 struct perf_sample_data data; 6379 6380 if (WARN_ON_ONCE(!regs)) 6381 return; 6382 6383 perf_sample_data_init(&data, addr, 0); 6384 do_perf_sw_event(PERF_TYPE_SOFTWARE, event_id, nr, &data, regs); 6385 } 6386 6387 void __perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr) 6388 { 6389 int rctx; 6390 6391 preempt_disable_notrace(); 6392 rctx = perf_swevent_get_recursion_context(); 6393 if (unlikely(rctx < 0)) 6394 goto fail; 6395 6396 ___perf_sw_event(event_id, nr, regs, addr); 6397 6398 perf_swevent_put_recursion_context(rctx); 6399 fail: 6400 preempt_enable_notrace(); 6401 } 6402 6403 static void perf_swevent_read(struct perf_event *event) 6404 { 6405 } 6406 6407 static int perf_swevent_add(struct perf_event *event, int flags) 6408 { 6409 struct swevent_htable *swhash = this_cpu_ptr(&swevent_htable); 6410 struct hw_perf_event *hwc = &event->hw; 6411 struct hlist_head *head; 6412 6413 if (is_sampling_event(event)) { 6414 hwc->last_period = hwc->sample_period; 6415 perf_swevent_set_period(event); 6416 } 6417 6418 hwc->state = !(flags & PERF_EF_START); 6419 6420 head = find_swevent_head(swhash, event); 6421 if (!head) { 6422 /* 6423 * We can race with cpu hotplug code. Do not 6424 * WARN if the cpu just got unplugged. 6425 */ 6426 WARN_ON_ONCE(swhash->online); 6427 return -EINVAL; 6428 } 6429 6430 hlist_add_head_rcu(&event->hlist_entry, head); 6431 perf_event_update_userpage(event); 6432 6433 return 0; 6434 } 6435 6436 static void perf_swevent_del(struct perf_event *event, int flags) 6437 { 6438 hlist_del_rcu(&event->hlist_entry); 6439 } 6440 6441 static void perf_swevent_start(struct perf_event *event, int flags) 6442 { 6443 event->hw.state = 0; 6444 } 6445 6446 static void perf_swevent_stop(struct perf_event *event, int flags) 6447 { 6448 event->hw.state = PERF_HES_STOPPED; 6449 } 6450 6451 /* Deref the hlist from the update side */ 6452 static inline struct swevent_hlist * 6453 swevent_hlist_deref(struct swevent_htable *swhash) 6454 { 6455 return rcu_dereference_protected(swhash->swevent_hlist, 6456 lockdep_is_held(&swhash->hlist_mutex)); 6457 } 6458 6459 static void swevent_hlist_release(struct swevent_htable *swhash) 6460 { 6461 struct swevent_hlist *hlist = swevent_hlist_deref(swhash); 6462 6463 if (!hlist) 6464 return; 6465 6466 RCU_INIT_POINTER(swhash->swevent_hlist, NULL); 6467 kfree_rcu(hlist, rcu_head); 6468 } 6469 6470 static void swevent_hlist_put_cpu(struct perf_event *event, int cpu) 6471 { 6472 struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu); 6473 6474 mutex_lock(&swhash->hlist_mutex); 6475 6476 if (!--swhash->hlist_refcount) 6477 swevent_hlist_release(swhash); 6478 6479 mutex_unlock(&swhash->hlist_mutex); 6480 } 6481 6482 static void swevent_hlist_put(struct perf_event *event) 6483 { 6484 int cpu; 6485 6486 for_each_possible_cpu(cpu) 6487 swevent_hlist_put_cpu(event, cpu); 6488 } 6489 6490 static int swevent_hlist_get_cpu(struct perf_event *event, int cpu) 6491 { 6492 struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu); 6493 int err = 0; 6494 6495 mutex_lock(&swhash->hlist_mutex); 6496 6497 if (!swevent_hlist_deref(swhash) && cpu_online(cpu)) { 6498 struct swevent_hlist *hlist; 6499 6500 hlist = kzalloc(sizeof(*hlist), GFP_KERNEL); 6501 if (!hlist) { 6502 err = -ENOMEM; 6503 goto exit; 6504 } 6505 rcu_assign_pointer(swhash->swevent_hlist, hlist); 6506 } 6507 swhash->hlist_refcount++; 6508 exit: 6509 mutex_unlock(&swhash->hlist_mutex); 6510 6511 return err; 6512 } 6513 6514 static int swevent_hlist_get(struct perf_event *event) 6515 { 6516 int err; 6517 int cpu, failed_cpu; 6518 6519 get_online_cpus(); 6520 for_each_possible_cpu(cpu) { 6521 err = swevent_hlist_get_cpu(event, cpu); 6522 if (err) { 6523 failed_cpu = cpu; 6524 goto fail; 6525 } 6526 } 6527 put_online_cpus(); 6528 6529 return 0; 6530 fail: 6531 for_each_possible_cpu(cpu) { 6532 if (cpu == failed_cpu) 6533 break; 6534 swevent_hlist_put_cpu(event, cpu); 6535 } 6536 6537 put_online_cpus(); 6538 return err; 6539 } 6540 6541 struct static_key perf_swevent_enabled[PERF_COUNT_SW_MAX]; 6542 6543 static void sw_perf_event_destroy(struct perf_event *event) 6544 { 6545 u64 event_id = event->attr.config; 6546 6547 WARN_ON(event->parent); 6548 6549 static_key_slow_dec(&perf_swevent_enabled[event_id]); 6550 swevent_hlist_put(event); 6551 } 6552 6553 static int perf_swevent_init(struct perf_event *event) 6554 { 6555 u64 event_id = event->attr.config; 6556 6557 if (event->attr.type != PERF_TYPE_SOFTWARE) 6558 return -ENOENT; 6559 6560 /* 6561 * no branch sampling for software events 6562 */ 6563 if (has_branch_stack(event)) 6564 return -EOPNOTSUPP; 6565 6566 switch (event_id) { 6567 case PERF_COUNT_SW_CPU_CLOCK: 6568 case PERF_COUNT_SW_TASK_CLOCK: 6569 return -ENOENT; 6570 6571 default: 6572 break; 6573 } 6574 6575 if (event_id >= PERF_COUNT_SW_MAX) 6576 return -ENOENT; 6577 6578 if (!event->parent) { 6579 int err; 6580 6581 err = swevent_hlist_get(event); 6582 if (err) 6583 return err; 6584 6585 static_key_slow_inc(&perf_swevent_enabled[event_id]); 6586 event->destroy = sw_perf_event_destroy; 6587 } 6588 6589 return 0; 6590 } 6591 6592 static struct pmu perf_swevent = { 6593 .task_ctx_nr = perf_sw_context, 6594 6595 .capabilities = PERF_PMU_CAP_NO_NMI, 6596 6597 .event_init = perf_swevent_init, 6598 .add = perf_swevent_add, 6599 .del = perf_swevent_del, 6600 .start = perf_swevent_start, 6601 .stop = perf_swevent_stop, 6602 .read = perf_swevent_read, 6603 }; 6604 6605 #ifdef CONFIG_EVENT_TRACING 6606 6607 static int perf_tp_filter_match(struct perf_event *event, 6608 struct perf_sample_data *data) 6609 { 6610 void *record = data->raw->data; 6611 6612 if (likely(!event->filter) || filter_match_preds(event->filter, record)) 6613 return 1; 6614 return 0; 6615 } 6616 6617 static int perf_tp_event_match(struct perf_event *event, 6618 struct perf_sample_data *data, 6619 struct pt_regs *regs) 6620 { 6621 if (event->hw.state & PERF_HES_STOPPED) 6622 return 0; 6623 /* 6624 * All tracepoints are from kernel-space. 6625 */ 6626 if (event->attr.exclude_kernel) 6627 return 0; 6628 6629 if (!perf_tp_filter_match(event, data)) 6630 return 0; 6631 6632 return 1; 6633 } 6634 6635 void perf_tp_event(u64 addr, u64 count, void *record, int entry_size, 6636 struct pt_regs *regs, struct hlist_head *head, int rctx, 6637 struct task_struct *task) 6638 { 6639 struct perf_sample_data data; 6640 struct perf_event *event; 6641 6642 struct perf_raw_record raw = { 6643 .size = entry_size, 6644 .data = record, 6645 }; 6646 6647 perf_sample_data_init(&data, addr, 0); 6648 data.raw = &raw; 6649 6650 hlist_for_each_entry_rcu(event, head, hlist_entry) { 6651 if (perf_tp_event_match(event, &data, regs)) 6652 perf_swevent_event(event, count, &data, regs); 6653 } 6654 6655 /* 6656 * If we got specified a target task, also iterate its context and 6657 * deliver this event there too. 6658 */ 6659 if (task && task != current) { 6660 struct perf_event_context *ctx; 6661 struct trace_entry *entry = record; 6662 6663 rcu_read_lock(); 6664 ctx = rcu_dereference(task->perf_event_ctxp[perf_sw_context]); 6665 if (!ctx) 6666 goto unlock; 6667 6668 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) { 6669 if (event->attr.type != PERF_TYPE_TRACEPOINT) 6670 continue; 6671 if (event->attr.config != entry->type) 6672 continue; 6673 if (perf_tp_event_match(event, &data, regs)) 6674 perf_swevent_event(event, count, &data, regs); 6675 } 6676 unlock: 6677 rcu_read_unlock(); 6678 } 6679 6680 perf_swevent_put_recursion_context(rctx); 6681 } 6682 EXPORT_SYMBOL_GPL(perf_tp_event); 6683 6684 static void tp_perf_event_destroy(struct perf_event *event) 6685 { 6686 perf_trace_destroy(event); 6687 } 6688 6689 static int perf_tp_event_init(struct perf_event *event) 6690 { 6691 int err; 6692 6693 if (event->attr.type != PERF_TYPE_TRACEPOINT) 6694 return -ENOENT; 6695 6696 /* 6697 * no branch sampling for tracepoint events 6698 */ 6699 if (has_branch_stack(event)) 6700 return -EOPNOTSUPP; 6701 6702 err = perf_trace_init(event); 6703 if (err) 6704 return err; 6705 6706 event->destroy = tp_perf_event_destroy; 6707 6708 return 0; 6709 } 6710 6711 static struct pmu perf_tracepoint = { 6712 .task_ctx_nr = perf_sw_context, 6713 6714 .event_init = perf_tp_event_init, 6715 .add = perf_trace_add, 6716 .del = perf_trace_del, 6717 .start = perf_swevent_start, 6718 .stop = perf_swevent_stop, 6719 .read = perf_swevent_read, 6720 }; 6721 6722 static inline void perf_tp_register(void) 6723 { 6724 perf_pmu_register(&perf_tracepoint, "tracepoint", PERF_TYPE_TRACEPOINT); 6725 } 6726 6727 static int perf_event_set_filter(struct perf_event *event, void __user *arg) 6728 { 6729 char *filter_str; 6730 int ret; 6731 6732 if (event->attr.type != PERF_TYPE_TRACEPOINT) 6733 return -EINVAL; 6734 6735 filter_str = strndup_user(arg, PAGE_SIZE); 6736 if (IS_ERR(filter_str)) 6737 return PTR_ERR(filter_str); 6738 6739 ret = ftrace_profile_set_filter(event, event->attr.config, filter_str); 6740 6741 kfree(filter_str); 6742 return ret; 6743 } 6744 6745 static void perf_event_free_filter(struct perf_event *event) 6746 { 6747 ftrace_profile_free_filter(event); 6748 } 6749 6750 static int perf_event_set_bpf_prog(struct perf_event *event, u32 prog_fd) 6751 { 6752 struct bpf_prog *prog; 6753 6754 if (event->attr.type != PERF_TYPE_TRACEPOINT) 6755 return -EINVAL; 6756 6757 if (event->tp_event->prog) 6758 return -EEXIST; 6759 6760 if (!(event->tp_event->flags & TRACE_EVENT_FL_KPROBE)) 6761 /* bpf programs can only be attached to kprobes */ 6762 return -EINVAL; 6763 6764 prog = bpf_prog_get(prog_fd); 6765 if (IS_ERR(prog)) 6766 return PTR_ERR(prog); 6767 6768 if (prog->type != BPF_PROG_TYPE_KPROBE) { 6769 /* valid fd, but invalid bpf program type */ 6770 bpf_prog_put(prog); 6771 return -EINVAL; 6772 } 6773 6774 event->tp_event->prog = prog; 6775 6776 return 0; 6777 } 6778 6779 static void perf_event_free_bpf_prog(struct perf_event *event) 6780 { 6781 struct bpf_prog *prog; 6782 6783 if (!event->tp_event) 6784 return; 6785 6786 prog = event->tp_event->prog; 6787 if (prog) { 6788 event->tp_event->prog = NULL; 6789 bpf_prog_put(prog); 6790 } 6791 } 6792 6793 #else 6794 6795 static inline void perf_tp_register(void) 6796 { 6797 } 6798 6799 static int perf_event_set_filter(struct perf_event *event, void __user *arg) 6800 { 6801 return -ENOENT; 6802 } 6803 6804 static void perf_event_free_filter(struct perf_event *event) 6805 { 6806 } 6807 6808 static int perf_event_set_bpf_prog(struct perf_event *event, u32 prog_fd) 6809 { 6810 return -ENOENT; 6811 } 6812 6813 static void perf_event_free_bpf_prog(struct perf_event *event) 6814 { 6815 } 6816 #endif /* CONFIG_EVENT_TRACING */ 6817 6818 #ifdef CONFIG_HAVE_HW_BREAKPOINT 6819 void perf_bp_event(struct perf_event *bp, void *data) 6820 { 6821 struct perf_sample_data sample; 6822 struct pt_regs *regs = data; 6823 6824 perf_sample_data_init(&sample, bp->attr.bp_addr, 0); 6825 6826 if (!bp->hw.state && !perf_exclude_event(bp, regs)) 6827 perf_swevent_event(bp, 1, &sample, regs); 6828 } 6829 #endif 6830 6831 /* 6832 * hrtimer based swevent callback 6833 */ 6834 6835 static enum hrtimer_restart perf_swevent_hrtimer(struct hrtimer *hrtimer) 6836 { 6837 enum hrtimer_restart ret = HRTIMER_RESTART; 6838 struct perf_sample_data data; 6839 struct pt_regs *regs; 6840 struct perf_event *event; 6841 u64 period; 6842 6843 event = container_of(hrtimer, struct perf_event, hw.hrtimer); 6844 6845 if (event->state != PERF_EVENT_STATE_ACTIVE) 6846 return HRTIMER_NORESTART; 6847 6848 event->pmu->read(event); 6849 6850 perf_sample_data_init(&data, 0, event->hw.last_period); 6851 regs = get_irq_regs(); 6852 6853 if (regs && !perf_exclude_event(event, regs)) { 6854 if (!(event->attr.exclude_idle && is_idle_task(current))) 6855 if (__perf_event_overflow(event, 1, &data, regs)) 6856 ret = HRTIMER_NORESTART; 6857 } 6858 6859 period = max_t(u64, 10000, event->hw.sample_period); 6860 hrtimer_forward_now(hrtimer, ns_to_ktime(period)); 6861 6862 return ret; 6863 } 6864 6865 static void perf_swevent_start_hrtimer(struct perf_event *event) 6866 { 6867 struct hw_perf_event *hwc = &event->hw; 6868 s64 period; 6869 6870 if (!is_sampling_event(event)) 6871 return; 6872 6873 period = local64_read(&hwc->period_left); 6874 if (period) { 6875 if (period < 0) 6876 period = 10000; 6877 6878 local64_set(&hwc->period_left, 0); 6879 } else { 6880 period = max_t(u64, 10000, hwc->sample_period); 6881 } 6882 hrtimer_start(&hwc->hrtimer, ns_to_ktime(period), 6883 HRTIMER_MODE_REL_PINNED); 6884 } 6885 6886 static void perf_swevent_cancel_hrtimer(struct perf_event *event) 6887 { 6888 struct hw_perf_event *hwc = &event->hw; 6889 6890 if (is_sampling_event(event)) { 6891 ktime_t remaining = hrtimer_get_remaining(&hwc->hrtimer); 6892 local64_set(&hwc->period_left, ktime_to_ns(remaining)); 6893 6894 hrtimer_cancel(&hwc->hrtimer); 6895 } 6896 } 6897 6898 static void perf_swevent_init_hrtimer(struct perf_event *event) 6899 { 6900 struct hw_perf_event *hwc = &event->hw; 6901 6902 if (!is_sampling_event(event)) 6903 return; 6904 6905 hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); 6906 hwc->hrtimer.function = perf_swevent_hrtimer; 6907 6908 /* 6909 * Since hrtimers have a fixed rate, we can do a static freq->period 6910 * mapping and avoid the whole period adjust feedback stuff. 6911 */ 6912 if (event->attr.freq) { 6913 long freq = event->attr.sample_freq; 6914 6915 event->attr.sample_period = NSEC_PER_SEC / freq; 6916 hwc->sample_period = event->attr.sample_period; 6917 local64_set(&hwc->period_left, hwc->sample_period); 6918 hwc->last_period = hwc->sample_period; 6919 event->attr.freq = 0; 6920 } 6921 } 6922 6923 /* 6924 * Software event: cpu wall time clock 6925 */ 6926 6927 static void cpu_clock_event_update(struct perf_event *event) 6928 { 6929 s64 prev; 6930 u64 now; 6931 6932 now = local_clock(); 6933 prev = local64_xchg(&event->hw.prev_count, now); 6934 local64_add(now - prev, &event->count); 6935 } 6936 6937 static void cpu_clock_event_start(struct perf_event *event, int flags) 6938 { 6939 local64_set(&event->hw.prev_count, local_clock()); 6940 perf_swevent_start_hrtimer(event); 6941 } 6942 6943 static void cpu_clock_event_stop(struct perf_event *event, int flags) 6944 { 6945 perf_swevent_cancel_hrtimer(event); 6946 cpu_clock_event_update(event); 6947 } 6948 6949 static int cpu_clock_event_add(struct perf_event *event, int flags) 6950 { 6951 if (flags & PERF_EF_START) 6952 cpu_clock_event_start(event, flags); 6953 perf_event_update_userpage(event); 6954 6955 return 0; 6956 } 6957 6958 static void cpu_clock_event_del(struct perf_event *event, int flags) 6959 { 6960 cpu_clock_event_stop(event, flags); 6961 } 6962 6963 static void cpu_clock_event_read(struct perf_event *event) 6964 { 6965 cpu_clock_event_update(event); 6966 } 6967 6968 static int cpu_clock_event_init(struct perf_event *event) 6969 { 6970 if (event->attr.type != PERF_TYPE_SOFTWARE) 6971 return -ENOENT; 6972 6973 if (event->attr.config != PERF_COUNT_SW_CPU_CLOCK) 6974 return -ENOENT; 6975 6976 /* 6977 * no branch sampling for software events 6978 */ 6979 if (has_branch_stack(event)) 6980 return -EOPNOTSUPP; 6981 6982 perf_swevent_init_hrtimer(event); 6983 6984 return 0; 6985 } 6986 6987 static struct pmu perf_cpu_clock = { 6988 .task_ctx_nr = perf_sw_context, 6989 6990 .capabilities = PERF_PMU_CAP_NO_NMI, 6991 6992 .event_init = cpu_clock_event_init, 6993 .add = cpu_clock_event_add, 6994 .del = cpu_clock_event_del, 6995 .start = cpu_clock_event_start, 6996 .stop = cpu_clock_event_stop, 6997 .read = cpu_clock_event_read, 6998 }; 6999 7000 /* 7001 * Software event: task time clock 7002 */ 7003 7004 static void task_clock_event_update(struct perf_event *event, u64 now) 7005 { 7006 u64 prev; 7007 s64 delta; 7008 7009 prev = local64_xchg(&event->hw.prev_count, now); 7010 delta = now - prev; 7011 local64_add(delta, &event->count); 7012 } 7013 7014 static void task_clock_event_start(struct perf_event *event, int flags) 7015 { 7016 local64_set(&event->hw.prev_count, event->ctx->time); 7017 perf_swevent_start_hrtimer(event); 7018 } 7019 7020 static void task_clock_event_stop(struct perf_event *event, int flags) 7021 { 7022 perf_swevent_cancel_hrtimer(event); 7023 task_clock_event_update(event, event->ctx->time); 7024 } 7025 7026 static int task_clock_event_add(struct perf_event *event, int flags) 7027 { 7028 if (flags & PERF_EF_START) 7029 task_clock_event_start(event, flags); 7030 perf_event_update_userpage(event); 7031 7032 return 0; 7033 } 7034 7035 static void task_clock_event_del(struct perf_event *event, int flags) 7036 { 7037 task_clock_event_stop(event, PERF_EF_UPDATE); 7038 } 7039 7040 static void task_clock_event_read(struct perf_event *event) 7041 { 7042 u64 now = perf_clock(); 7043 u64 delta = now - event->ctx->timestamp; 7044 u64 time = event->ctx->time + delta; 7045 7046 task_clock_event_update(event, time); 7047 } 7048 7049 static int task_clock_event_init(struct perf_event *event) 7050 { 7051 if (event->attr.type != PERF_TYPE_SOFTWARE) 7052 return -ENOENT; 7053 7054 if (event->attr.config != PERF_COUNT_SW_TASK_CLOCK) 7055 return -ENOENT; 7056 7057 /* 7058 * no branch sampling for software events 7059 */ 7060 if (has_branch_stack(event)) 7061 return -EOPNOTSUPP; 7062 7063 perf_swevent_init_hrtimer(event); 7064 7065 return 0; 7066 } 7067 7068 static struct pmu perf_task_clock = { 7069 .task_ctx_nr = perf_sw_context, 7070 7071 .capabilities = PERF_PMU_CAP_NO_NMI, 7072 7073 .event_init = task_clock_event_init, 7074 .add = task_clock_event_add, 7075 .del = task_clock_event_del, 7076 .start = task_clock_event_start, 7077 .stop = task_clock_event_stop, 7078 .read = task_clock_event_read, 7079 }; 7080 7081 static void perf_pmu_nop_void(struct pmu *pmu) 7082 { 7083 } 7084 7085 static int perf_pmu_nop_int(struct pmu *pmu) 7086 { 7087 return 0; 7088 } 7089 7090 static void perf_pmu_start_txn(struct pmu *pmu) 7091 { 7092 perf_pmu_disable(pmu); 7093 } 7094 7095 static int perf_pmu_commit_txn(struct pmu *pmu) 7096 { 7097 perf_pmu_enable(pmu); 7098 return 0; 7099 } 7100 7101 static void perf_pmu_cancel_txn(struct pmu *pmu) 7102 { 7103 perf_pmu_enable(pmu); 7104 } 7105 7106 static int perf_event_idx_default(struct perf_event *event) 7107 { 7108 return 0; 7109 } 7110 7111 /* 7112 * Ensures all contexts with the same task_ctx_nr have the same 7113 * pmu_cpu_context too. 7114 */ 7115 static struct perf_cpu_context __percpu *find_pmu_context(int ctxn) 7116 { 7117 struct pmu *pmu; 7118 7119 if (ctxn < 0) 7120 return NULL; 7121 7122 list_for_each_entry(pmu, &pmus, entry) { 7123 if (pmu->task_ctx_nr == ctxn) 7124 return pmu->pmu_cpu_context; 7125 } 7126 7127 return NULL; 7128 } 7129 7130 static void update_pmu_context(struct pmu *pmu, struct pmu *old_pmu) 7131 { 7132 int cpu; 7133 7134 for_each_possible_cpu(cpu) { 7135 struct perf_cpu_context *cpuctx; 7136 7137 cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu); 7138 7139 if (cpuctx->unique_pmu == old_pmu) 7140 cpuctx->unique_pmu = pmu; 7141 } 7142 } 7143 7144 static void free_pmu_context(struct pmu *pmu) 7145 { 7146 struct pmu *i; 7147 7148 mutex_lock(&pmus_lock); 7149 /* 7150 * Like a real lame refcount. 7151 */ 7152 list_for_each_entry(i, &pmus, entry) { 7153 if (i->pmu_cpu_context == pmu->pmu_cpu_context) { 7154 update_pmu_context(i, pmu); 7155 goto out; 7156 } 7157 } 7158 7159 free_percpu(pmu->pmu_cpu_context); 7160 out: 7161 mutex_unlock(&pmus_lock); 7162 } 7163 static struct idr pmu_idr; 7164 7165 static ssize_t 7166 type_show(struct device *dev, struct device_attribute *attr, char *page) 7167 { 7168 struct pmu *pmu = dev_get_drvdata(dev); 7169 7170 return snprintf(page, PAGE_SIZE-1, "%d\n", pmu->type); 7171 } 7172 static DEVICE_ATTR_RO(type); 7173 7174 static ssize_t 7175 perf_event_mux_interval_ms_show(struct device *dev, 7176 struct device_attribute *attr, 7177 char *page) 7178 { 7179 struct pmu *pmu = dev_get_drvdata(dev); 7180 7181 return snprintf(page, PAGE_SIZE-1, "%d\n", pmu->hrtimer_interval_ms); 7182 } 7183 7184 static DEFINE_MUTEX(mux_interval_mutex); 7185 7186 static ssize_t 7187 perf_event_mux_interval_ms_store(struct device *dev, 7188 struct device_attribute *attr, 7189 const char *buf, size_t count) 7190 { 7191 struct pmu *pmu = dev_get_drvdata(dev); 7192 int timer, cpu, ret; 7193 7194 ret = kstrtoint(buf, 0, &timer); 7195 if (ret) 7196 return ret; 7197 7198 if (timer < 1) 7199 return -EINVAL; 7200 7201 /* same value, noting to do */ 7202 if (timer == pmu->hrtimer_interval_ms) 7203 return count; 7204 7205 mutex_lock(&mux_interval_mutex); 7206 pmu->hrtimer_interval_ms = timer; 7207 7208 /* update all cpuctx for this PMU */ 7209 get_online_cpus(); 7210 for_each_online_cpu(cpu) { 7211 struct perf_cpu_context *cpuctx; 7212 cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu); 7213 cpuctx->hrtimer_interval = ns_to_ktime(NSEC_PER_MSEC * timer); 7214 7215 cpu_function_call(cpu, 7216 (remote_function_f)perf_mux_hrtimer_restart, cpuctx); 7217 } 7218 put_online_cpus(); 7219 mutex_unlock(&mux_interval_mutex); 7220 7221 return count; 7222 } 7223 static DEVICE_ATTR_RW(perf_event_mux_interval_ms); 7224 7225 static struct attribute *pmu_dev_attrs[] = { 7226 &dev_attr_type.attr, 7227 &dev_attr_perf_event_mux_interval_ms.attr, 7228 NULL, 7229 }; 7230 ATTRIBUTE_GROUPS(pmu_dev); 7231 7232 static int pmu_bus_running; 7233 static struct bus_type pmu_bus = { 7234 .name = "event_source", 7235 .dev_groups = pmu_dev_groups, 7236 }; 7237 7238 static void pmu_dev_release(struct device *dev) 7239 { 7240 kfree(dev); 7241 } 7242 7243 static int pmu_dev_alloc(struct pmu *pmu) 7244 { 7245 int ret = -ENOMEM; 7246 7247 pmu->dev = kzalloc(sizeof(struct device), GFP_KERNEL); 7248 if (!pmu->dev) 7249 goto out; 7250 7251 pmu->dev->groups = pmu->attr_groups; 7252 device_initialize(pmu->dev); 7253 ret = dev_set_name(pmu->dev, "%s", pmu->name); 7254 if (ret) 7255 goto free_dev; 7256 7257 dev_set_drvdata(pmu->dev, pmu); 7258 pmu->dev->bus = &pmu_bus; 7259 pmu->dev->release = pmu_dev_release; 7260 ret = device_add(pmu->dev); 7261 if (ret) 7262 goto free_dev; 7263 7264 out: 7265 return ret; 7266 7267 free_dev: 7268 put_device(pmu->dev); 7269 goto out; 7270 } 7271 7272 static struct lock_class_key cpuctx_mutex; 7273 static struct lock_class_key cpuctx_lock; 7274 7275 int perf_pmu_register(struct pmu *pmu, const char *name, int type) 7276 { 7277 int cpu, ret; 7278 7279 mutex_lock(&pmus_lock); 7280 ret = -ENOMEM; 7281 pmu->pmu_disable_count = alloc_percpu(int); 7282 if (!pmu->pmu_disable_count) 7283 goto unlock; 7284 7285 pmu->type = -1; 7286 if (!name) 7287 goto skip_type; 7288 pmu->name = name; 7289 7290 if (type < 0) { 7291 type = idr_alloc(&pmu_idr, pmu, PERF_TYPE_MAX, 0, GFP_KERNEL); 7292 if (type < 0) { 7293 ret = type; 7294 goto free_pdc; 7295 } 7296 } 7297 pmu->type = type; 7298 7299 if (pmu_bus_running) { 7300 ret = pmu_dev_alloc(pmu); 7301 if (ret) 7302 goto free_idr; 7303 } 7304 7305 skip_type: 7306 pmu->pmu_cpu_context = find_pmu_context(pmu->task_ctx_nr); 7307 if (pmu->pmu_cpu_context) 7308 goto got_cpu_context; 7309 7310 ret = -ENOMEM; 7311 pmu->pmu_cpu_context = alloc_percpu(struct perf_cpu_context); 7312 if (!pmu->pmu_cpu_context) 7313 goto free_dev; 7314 7315 for_each_possible_cpu(cpu) { 7316 struct perf_cpu_context *cpuctx; 7317 7318 cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu); 7319 __perf_event_init_context(&cpuctx->ctx); 7320 lockdep_set_class(&cpuctx->ctx.mutex, &cpuctx_mutex); 7321 lockdep_set_class(&cpuctx->ctx.lock, &cpuctx_lock); 7322 cpuctx->ctx.pmu = pmu; 7323 7324 __perf_mux_hrtimer_init(cpuctx, cpu); 7325 7326 cpuctx->unique_pmu = pmu; 7327 } 7328 7329 got_cpu_context: 7330 if (!pmu->start_txn) { 7331 if (pmu->pmu_enable) { 7332 /* 7333 * If we have pmu_enable/pmu_disable calls, install 7334 * transaction stubs that use that to try and batch 7335 * hardware accesses. 7336 */ 7337 pmu->start_txn = perf_pmu_start_txn; 7338 pmu->commit_txn = perf_pmu_commit_txn; 7339 pmu->cancel_txn = perf_pmu_cancel_txn; 7340 } else { 7341 pmu->start_txn = perf_pmu_nop_void; 7342 pmu->commit_txn = perf_pmu_nop_int; 7343 pmu->cancel_txn = perf_pmu_nop_void; 7344 } 7345 } 7346 7347 if (!pmu->pmu_enable) { 7348 pmu->pmu_enable = perf_pmu_nop_void; 7349 pmu->pmu_disable = perf_pmu_nop_void; 7350 } 7351 7352 if (!pmu->event_idx) 7353 pmu->event_idx = perf_event_idx_default; 7354 7355 list_add_rcu(&pmu->entry, &pmus); 7356 atomic_set(&pmu->exclusive_cnt, 0); 7357 ret = 0; 7358 unlock: 7359 mutex_unlock(&pmus_lock); 7360 7361 return ret; 7362 7363 free_dev: 7364 device_del(pmu->dev); 7365 put_device(pmu->dev); 7366 7367 free_idr: 7368 if (pmu->type >= PERF_TYPE_MAX) 7369 idr_remove(&pmu_idr, pmu->type); 7370 7371 free_pdc: 7372 free_percpu(pmu->pmu_disable_count); 7373 goto unlock; 7374 } 7375 EXPORT_SYMBOL_GPL(perf_pmu_register); 7376 7377 void perf_pmu_unregister(struct pmu *pmu) 7378 { 7379 mutex_lock(&pmus_lock); 7380 list_del_rcu(&pmu->entry); 7381 mutex_unlock(&pmus_lock); 7382 7383 /* 7384 * We dereference the pmu list under both SRCU and regular RCU, so 7385 * synchronize against both of those. 7386 */ 7387 synchronize_srcu(&pmus_srcu); 7388 synchronize_rcu(); 7389 7390 free_percpu(pmu->pmu_disable_count); 7391 if (pmu->type >= PERF_TYPE_MAX) 7392 idr_remove(&pmu_idr, pmu->type); 7393 device_del(pmu->dev); 7394 put_device(pmu->dev); 7395 free_pmu_context(pmu); 7396 } 7397 EXPORT_SYMBOL_GPL(perf_pmu_unregister); 7398 7399 static int perf_try_init_event(struct pmu *pmu, struct perf_event *event) 7400 { 7401 struct perf_event_context *ctx = NULL; 7402 int ret; 7403 7404 if (!try_module_get(pmu->module)) 7405 return -ENODEV; 7406 7407 if (event->group_leader != event) { 7408 /* 7409 * This ctx->mutex can nest when we're called through 7410 * inheritance. See the perf_event_ctx_lock_nested() comment. 7411 */ 7412 ctx = perf_event_ctx_lock_nested(event->group_leader, 7413 SINGLE_DEPTH_NESTING); 7414 BUG_ON(!ctx); 7415 } 7416 7417 event->pmu = pmu; 7418 ret = pmu->event_init(event); 7419 7420 if (ctx) 7421 perf_event_ctx_unlock(event->group_leader, ctx); 7422 7423 if (ret) 7424 module_put(pmu->module); 7425 7426 return ret; 7427 } 7428 7429 struct pmu *perf_init_event(struct perf_event *event) 7430 { 7431 struct pmu *pmu = NULL; 7432 int idx; 7433 int ret; 7434 7435 idx = srcu_read_lock(&pmus_srcu); 7436 7437 rcu_read_lock(); 7438 pmu = idr_find(&pmu_idr, event->attr.type); 7439 rcu_read_unlock(); 7440 if (pmu) { 7441 ret = perf_try_init_event(pmu, event); 7442 if (ret) 7443 pmu = ERR_PTR(ret); 7444 goto unlock; 7445 } 7446 7447 list_for_each_entry_rcu(pmu, &pmus, entry) { 7448 ret = perf_try_init_event(pmu, event); 7449 if (!ret) 7450 goto unlock; 7451 7452 if (ret != -ENOENT) { 7453 pmu = ERR_PTR(ret); 7454 goto unlock; 7455 } 7456 } 7457 pmu = ERR_PTR(-ENOENT); 7458 unlock: 7459 srcu_read_unlock(&pmus_srcu, idx); 7460 7461 return pmu; 7462 } 7463 7464 static void account_event_cpu(struct perf_event *event, int cpu) 7465 { 7466 if (event->parent) 7467 return; 7468 7469 if (is_cgroup_event(event)) 7470 atomic_inc(&per_cpu(perf_cgroup_events, cpu)); 7471 } 7472 7473 static void account_event(struct perf_event *event) 7474 { 7475 if (event->parent) 7476 return; 7477 7478 if (event->attach_state & PERF_ATTACH_TASK) 7479 static_key_slow_inc(&perf_sched_events.key); 7480 if (event->attr.mmap || event->attr.mmap_data) 7481 atomic_inc(&nr_mmap_events); 7482 if (event->attr.comm) 7483 atomic_inc(&nr_comm_events); 7484 if (event->attr.task) 7485 atomic_inc(&nr_task_events); 7486 if (event->attr.freq) { 7487 if (atomic_inc_return(&nr_freq_events) == 1) 7488 tick_nohz_full_kick_all(); 7489 } 7490 if (has_branch_stack(event)) 7491 static_key_slow_inc(&perf_sched_events.key); 7492 if (is_cgroup_event(event)) 7493 static_key_slow_inc(&perf_sched_events.key); 7494 7495 account_event_cpu(event, event->cpu); 7496 } 7497 7498 /* 7499 * Allocate and initialize a event structure 7500 */ 7501 static struct perf_event * 7502 perf_event_alloc(struct perf_event_attr *attr, int cpu, 7503 struct task_struct *task, 7504 struct perf_event *group_leader, 7505 struct perf_event *parent_event, 7506 perf_overflow_handler_t overflow_handler, 7507 void *context, int cgroup_fd) 7508 { 7509 struct pmu *pmu; 7510 struct perf_event *event; 7511 struct hw_perf_event *hwc; 7512 long err = -EINVAL; 7513 7514 if ((unsigned)cpu >= nr_cpu_ids) { 7515 if (!task || cpu != -1) 7516 return ERR_PTR(-EINVAL); 7517 } 7518 7519 event = kzalloc(sizeof(*event), GFP_KERNEL); 7520 if (!event) 7521 return ERR_PTR(-ENOMEM); 7522 7523 /* 7524 * Single events are their own group leaders, with an 7525 * empty sibling list: 7526 */ 7527 if (!group_leader) 7528 group_leader = event; 7529 7530 mutex_init(&event->child_mutex); 7531 INIT_LIST_HEAD(&event->child_list); 7532 7533 INIT_LIST_HEAD(&event->group_entry); 7534 INIT_LIST_HEAD(&event->event_entry); 7535 INIT_LIST_HEAD(&event->sibling_list); 7536 INIT_LIST_HEAD(&event->rb_entry); 7537 INIT_LIST_HEAD(&event->active_entry); 7538 INIT_HLIST_NODE(&event->hlist_entry); 7539 7540 7541 init_waitqueue_head(&event->waitq); 7542 init_irq_work(&event->pending, perf_pending_event); 7543 7544 mutex_init(&event->mmap_mutex); 7545 7546 atomic_long_set(&event->refcount, 1); 7547 event->cpu = cpu; 7548 event->attr = *attr; 7549 event->group_leader = group_leader; 7550 event->pmu = NULL; 7551 event->oncpu = -1; 7552 7553 event->parent = parent_event; 7554 7555 event->ns = get_pid_ns(task_active_pid_ns(current)); 7556 event->id = atomic64_inc_return(&perf_event_id); 7557 7558 event->state = PERF_EVENT_STATE_INACTIVE; 7559 7560 if (task) { 7561 event->attach_state = PERF_ATTACH_TASK; 7562 /* 7563 * XXX pmu::event_init needs to know what task to account to 7564 * and we cannot use the ctx information because we need the 7565 * pmu before we get a ctx. 7566 */ 7567 event->hw.target = task; 7568 } 7569 7570 event->clock = &local_clock; 7571 if (parent_event) 7572 event->clock = parent_event->clock; 7573 7574 if (!overflow_handler && parent_event) { 7575 overflow_handler = parent_event->overflow_handler; 7576 context = parent_event->overflow_handler_context; 7577 } 7578 7579 event->overflow_handler = overflow_handler; 7580 event->overflow_handler_context = context; 7581 7582 perf_event__state_init(event); 7583 7584 pmu = NULL; 7585 7586 hwc = &event->hw; 7587 hwc->sample_period = attr->sample_period; 7588 if (attr->freq && attr->sample_freq) 7589 hwc->sample_period = 1; 7590 hwc->last_period = hwc->sample_period; 7591 7592 local64_set(&hwc->period_left, hwc->sample_period); 7593 7594 /* 7595 * we currently do not support PERF_FORMAT_GROUP on inherited events 7596 */ 7597 if (attr->inherit && (attr->read_format & PERF_FORMAT_GROUP)) 7598 goto err_ns; 7599 7600 if (!has_branch_stack(event)) 7601 event->attr.branch_sample_type = 0; 7602 7603 if (cgroup_fd != -1) { 7604 err = perf_cgroup_connect(cgroup_fd, event, attr, group_leader); 7605 if (err) 7606 goto err_ns; 7607 } 7608 7609 pmu = perf_init_event(event); 7610 if (!pmu) 7611 goto err_ns; 7612 else if (IS_ERR(pmu)) { 7613 err = PTR_ERR(pmu); 7614 goto err_ns; 7615 } 7616 7617 err = exclusive_event_init(event); 7618 if (err) 7619 goto err_pmu; 7620 7621 if (!event->parent) { 7622 if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN) { 7623 err = get_callchain_buffers(); 7624 if (err) 7625 goto err_per_task; 7626 } 7627 } 7628 7629 return event; 7630 7631 err_per_task: 7632 exclusive_event_destroy(event); 7633 7634 err_pmu: 7635 if (event->destroy) 7636 event->destroy(event); 7637 module_put(pmu->module); 7638 err_ns: 7639 if (is_cgroup_event(event)) 7640 perf_detach_cgroup(event); 7641 if (event->ns) 7642 put_pid_ns(event->ns); 7643 kfree(event); 7644 7645 return ERR_PTR(err); 7646 } 7647 7648 static int perf_copy_attr(struct perf_event_attr __user *uattr, 7649 struct perf_event_attr *attr) 7650 { 7651 u32 size; 7652 int ret; 7653 7654 if (!access_ok(VERIFY_WRITE, uattr, PERF_ATTR_SIZE_VER0)) 7655 return -EFAULT; 7656 7657 /* 7658 * zero the full structure, so that a short copy will be nice. 7659 */ 7660 memset(attr, 0, sizeof(*attr)); 7661 7662 ret = get_user(size, &uattr->size); 7663 if (ret) 7664 return ret; 7665 7666 if (size > PAGE_SIZE) /* silly large */ 7667 goto err_size; 7668 7669 if (!size) /* abi compat */ 7670 size = PERF_ATTR_SIZE_VER0; 7671 7672 if (size < PERF_ATTR_SIZE_VER0) 7673 goto err_size; 7674 7675 /* 7676 * If we're handed a bigger struct than we know of, 7677 * ensure all the unknown bits are 0 - i.e. new 7678 * user-space does not rely on any kernel feature 7679 * extensions we dont know about yet. 7680 */ 7681 if (size > sizeof(*attr)) { 7682 unsigned char __user *addr; 7683 unsigned char __user *end; 7684 unsigned char val; 7685 7686 addr = (void __user *)uattr + sizeof(*attr); 7687 end = (void __user *)uattr + size; 7688 7689 for (; addr < end; addr++) { 7690 ret = get_user(val, addr); 7691 if (ret) 7692 return ret; 7693 if (val) 7694 goto err_size; 7695 } 7696 size = sizeof(*attr); 7697 } 7698 7699 ret = copy_from_user(attr, uattr, size); 7700 if (ret) 7701 return -EFAULT; 7702 7703 if (attr->__reserved_1) 7704 return -EINVAL; 7705 7706 if (attr->sample_type & ~(PERF_SAMPLE_MAX-1)) 7707 return -EINVAL; 7708 7709 if (attr->read_format & ~(PERF_FORMAT_MAX-1)) 7710 return -EINVAL; 7711 7712 if (attr->sample_type & PERF_SAMPLE_BRANCH_STACK) { 7713 u64 mask = attr->branch_sample_type; 7714 7715 /* only using defined bits */ 7716 if (mask & ~(PERF_SAMPLE_BRANCH_MAX-1)) 7717 return -EINVAL; 7718 7719 /* at least one branch bit must be set */ 7720 if (!(mask & ~PERF_SAMPLE_BRANCH_PLM_ALL)) 7721 return -EINVAL; 7722 7723 /* propagate priv level, when not set for branch */ 7724 if (!(mask & PERF_SAMPLE_BRANCH_PLM_ALL)) { 7725 7726 /* exclude_kernel checked on syscall entry */ 7727 if (!attr->exclude_kernel) 7728 mask |= PERF_SAMPLE_BRANCH_KERNEL; 7729 7730 if (!attr->exclude_user) 7731 mask |= PERF_SAMPLE_BRANCH_USER; 7732 7733 if (!attr->exclude_hv) 7734 mask |= PERF_SAMPLE_BRANCH_HV; 7735 /* 7736 * adjust user setting (for HW filter setup) 7737 */ 7738 attr->branch_sample_type = mask; 7739 } 7740 /* privileged levels capture (kernel, hv): check permissions */ 7741 if ((mask & PERF_SAMPLE_BRANCH_PERM_PLM) 7742 && perf_paranoid_kernel() && !capable(CAP_SYS_ADMIN)) 7743 return -EACCES; 7744 } 7745 7746 if (attr->sample_type & PERF_SAMPLE_REGS_USER) { 7747 ret = perf_reg_validate(attr->sample_regs_user); 7748 if (ret) 7749 return ret; 7750 } 7751 7752 if (attr->sample_type & PERF_SAMPLE_STACK_USER) { 7753 if (!arch_perf_have_user_stack_dump()) 7754 return -ENOSYS; 7755 7756 /* 7757 * We have __u32 type for the size, but so far 7758 * we can only use __u16 as maximum due to the 7759 * __u16 sample size limit. 7760 */ 7761 if (attr->sample_stack_user >= USHRT_MAX) 7762 ret = -EINVAL; 7763 else if (!IS_ALIGNED(attr->sample_stack_user, sizeof(u64))) 7764 ret = -EINVAL; 7765 } 7766 7767 if (attr->sample_type & PERF_SAMPLE_REGS_INTR) 7768 ret = perf_reg_validate(attr->sample_regs_intr); 7769 out: 7770 return ret; 7771 7772 err_size: 7773 put_user(sizeof(*attr), &uattr->size); 7774 ret = -E2BIG; 7775 goto out; 7776 } 7777 7778 static int 7779 perf_event_set_output(struct perf_event *event, struct perf_event *output_event) 7780 { 7781 struct ring_buffer *rb = NULL; 7782 int ret = -EINVAL; 7783 7784 if (!output_event) 7785 goto set; 7786 7787 /* don't allow circular references */ 7788 if (event == output_event) 7789 goto out; 7790 7791 /* 7792 * Don't allow cross-cpu buffers 7793 */ 7794 if (output_event->cpu != event->cpu) 7795 goto out; 7796 7797 /* 7798 * If its not a per-cpu rb, it must be the same task. 7799 */ 7800 if (output_event->cpu == -1 && output_event->ctx != event->ctx) 7801 goto out; 7802 7803 /* 7804 * Mixing clocks in the same buffer is trouble you don't need. 7805 */ 7806 if (output_event->clock != event->clock) 7807 goto out; 7808 7809 /* 7810 * If both events generate aux data, they must be on the same PMU 7811 */ 7812 if (has_aux(event) && has_aux(output_event) && 7813 event->pmu != output_event->pmu) 7814 goto out; 7815 7816 set: 7817 mutex_lock(&event->mmap_mutex); 7818 /* Can't redirect output if we've got an active mmap() */ 7819 if (atomic_read(&event->mmap_count)) 7820 goto unlock; 7821 7822 if (output_event) { 7823 /* get the rb we want to redirect to */ 7824 rb = ring_buffer_get(output_event); 7825 if (!rb) 7826 goto unlock; 7827 } 7828 7829 ring_buffer_attach(event, rb); 7830 7831 ret = 0; 7832 unlock: 7833 mutex_unlock(&event->mmap_mutex); 7834 7835 out: 7836 return ret; 7837 } 7838 7839 static void mutex_lock_double(struct mutex *a, struct mutex *b) 7840 { 7841 if (b < a) 7842 swap(a, b); 7843 7844 mutex_lock(a); 7845 mutex_lock_nested(b, SINGLE_DEPTH_NESTING); 7846 } 7847 7848 static int perf_event_set_clock(struct perf_event *event, clockid_t clk_id) 7849 { 7850 bool nmi_safe = false; 7851 7852 switch (clk_id) { 7853 case CLOCK_MONOTONIC: 7854 event->clock = &ktime_get_mono_fast_ns; 7855 nmi_safe = true; 7856 break; 7857 7858 case CLOCK_MONOTONIC_RAW: 7859 event->clock = &ktime_get_raw_fast_ns; 7860 nmi_safe = true; 7861 break; 7862 7863 case CLOCK_REALTIME: 7864 event->clock = &ktime_get_real_ns; 7865 break; 7866 7867 case CLOCK_BOOTTIME: 7868 event->clock = &ktime_get_boot_ns; 7869 break; 7870 7871 case CLOCK_TAI: 7872 event->clock = &ktime_get_tai_ns; 7873 break; 7874 7875 default: 7876 return -EINVAL; 7877 } 7878 7879 if (!nmi_safe && !(event->pmu->capabilities & PERF_PMU_CAP_NO_NMI)) 7880 return -EINVAL; 7881 7882 return 0; 7883 } 7884 7885 /** 7886 * sys_perf_event_open - open a performance event, associate it to a task/cpu 7887 * 7888 * @attr_uptr: event_id type attributes for monitoring/sampling 7889 * @pid: target pid 7890 * @cpu: target cpu 7891 * @group_fd: group leader event fd 7892 */ 7893 SYSCALL_DEFINE5(perf_event_open, 7894 struct perf_event_attr __user *, attr_uptr, 7895 pid_t, pid, int, cpu, int, group_fd, unsigned long, flags) 7896 { 7897 struct perf_event *group_leader = NULL, *output_event = NULL; 7898 struct perf_event *event, *sibling; 7899 struct perf_event_attr attr; 7900 struct perf_event_context *ctx, *uninitialized_var(gctx); 7901 struct file *event_file = NULL; 7902 struct fd group = {NULL, 0}; 7903 struct task_struct *task = NULL; 7904 struct pmu *pmu; 7905 int event_fd; 7906 int move_group = 0; 7907 int err; 7908 int f_flags = O_RDWR; 7909 int cgroup_fd = -1; 7910 7911 /* for future expandability... */ 7912 if (flags & ~PERF_FLAG_ALL) 7913 return -EINVAL; 7914 7915 err = perf_copy_attr(attr_uptr, &attr); 7916 if (err) 7917 return err; 7918 7919 if (!attr.exclude_kernel) { 7920 if (perf_paranoid_kernel() && !capable(CAP_SYS_ADMIN)) 7921 return -EACCES; 7922 } 7923 7924 if (attr.freq) { 7925 if (attr.sample_freq > sysctl_perf_event_sample_rate) 7926 return -EINVAL; 7927 } else { 7928 if (attr.sample_period & (1ULL << 63)) 7929 return -EINVAL; 7930 } 7931 7932 /* 7933 * In cgroup mode, the pid argument is used to pass the fd 7934 * opened to the cgroup directory in cgroupfs. The cpu argument 7935 * designates the cpu on which to monitor threads from that 7936 * cgroup. 7937 */ 7938 if ((flags & PERF_FLAG_PID_CGROUP) && (pid == -1 || cpu == -1)) 7939 return -EINVAL; 7940 7941 if (flags & PERF_FLAG_FD_CLOEXEC) 7942 f_flags |= O_CLOEXEC; 7943 7944 event_fd = get_unused_fd_flags(f_flags); 7945 if (event_fd < 0) 7946 return event_fd; 7947 7948 if (group_fd != -1) { 7949 err = perf_fget_light(group_fd, &group); 7950 if (err) 7951 goto err_fd; 7952 group_leader = group.file->private_data; 7953 if (flags & PERF_FLAG_FD_OUTPUT) 7954 output_event = group_leader; 7955 if (flags & PERF_FLAG_FD_NO_GROUP) 7956 group_leader = NULL; 7957 } 7958 7959 if (pid != -1 && !(flags & PERF_FLAG_PID_CGROUP)) { 7960 task = find_lively_task_by_vpid(pid); 7961 if (IS_ERR(task)) { 7962 err = PTR_ERR(task); 7963 goto err_group_fd; 7964 } 7965 } 7966 7967 if (task && group_leader && 7968 group_leader->attr.inherit != attr.inherit) { 7969 err = -EINVAL; 7970 goto err_task; 7971 } 7972 7973 get_online_cpus(); 7974 7975 if (flags & PERF_FLAG_PID_CGROUP) 7976 cgroup_fd = pid; 7977 7978 event = perf_event_alloc(&attr, cpu, task, group_leader, NULL, 7979 NULL, NULL, cgroup_fd); 7980 if (IS_ERR(event)) { 7981 err = PTR_ERR(event); 7982 goto err_cpus; 7983 } 7984 7985 if (is_sampling_event(event)) { 7986 if (event->pmu->capabilities & PERF_PMU_CAP_NO_INTERRUPT) { 7987 err = -ENOTSUPP; 7988 goto err_alloc; 7989 } 7990 } 7991 7992 account_event(event); 7993 7994 /* 7995 * Special case software events and allow them to be part of 7996 * any hardware group. 7997 */ 7998 pmu = event->pmu; 7999 8000 if (attr.use_clockid) { 8001 err = perf_event_set_clock(event, attr.clockid); 8002 if (err) 8003 goto err_alloc; 8004 } 8005 8006 if (group_leader && 8007 (is_software_event(event) != is_software_event(group_leader))) { 8008 if (is_software_event(event)) { 8009 /* 8010 * If event and group_leader are not both a software 8011 * event, and event is, then group leader is not. 8012 * 8013 * Allow the addition of software events to !software 8014 * groups, this is safe because software events never 8015 * fail to schedule. 8016 */ 8017 pmu = group_leader->pmu; 8018 } else if (is_software_event(group_leader) && 8019 (group_leader->group_flags & PERF_GROUP_SOFTWARE)) { 8020 /* 8021 * In case the group is a pure software group, and we 8022 * try to add a hardware event, move the whole group to 8023 * the hardware context. 8024 */ 8025 move_group = 1; 8026 } 8027 } 8028 8029 /* 8030 * Get the target context (task or percpu): 8031 */ 8032 ctx = find_get_context(pmu, task, event); 8033 if (IS_ERR(ctx)) { 8034 err = PTR_ERR(ctx); 8035 goto err_alloc; 8036 } 8037 8038 if ((pmu->capabilities & PERF_PMU_CAP_EXCLUSIVE) && group_leader) { 8039 err = -EBUSY; 8040 goto err_context; 8041 } 8042 8043 if (task) { 8044 put_task_struct(task); 8045 task = NULL; 8046 } 8047 8048 /* 8049 * Look up the group leader (we will attach this event to it): 8050 */ 8051 if (group_leader) { 8052 err = -EINVAL; 8053 8054 /* 8055 * Do not allow a recursive hierarchy (this new sibling 8056 * becoming part of another group-sibling): 8057 */ 8058 if (group_leader->group_leader != group_leader) 8059 goto err_context; 8060 8061 /* All events in a group should have the same clock */ 8062 if (group_leader->clock != event->clock) 8063 goto err_context; 8064 8065 /* 8066 * Do not allow to attach to a group in a different 8067 * task or CPU context: 8068 */ 8069 if (move_group) { 8070 /* 8071 * Make sure we're both on the same task, or both 8072 * per-cpu events. 8073 */ 8074 if (group_leader->ctx->task != ctx->task) 8075 goto err_context; 8076 8077 /* 8078 * Make sure we're both events for the same CPU; 8079 * grouping events for different CPUs is broken; since 8080 * you can never concurrently schedule them anyhow. 8081 */ 8082 if (group_leader->cpu != event->cpu) 8083 goto err_context; 8084 } else { 8085 if (group_leader->ctx != ctx) 8086 goto err_context; 8087 } 8088 8089 /* 8090 * Only a group leader can be exclusive or pinned 8091 */ 8092 if (attr.exclusive || attr.pinned) 8093 goto err_context; 8094 } 8095 8096 if (output_event) { 8097 err = perf_event_set_output(event, output_event); 8098 if (err) 8099 goto err_context; 8100 } 8101 8102 event_file = anon_inode_getfile("[perf_event]", &perf_fops, event, 8103 f_flags); 8104 if (IS_ERR(event_file)) { 8105 err = PTR_ERR(event_file); 8106 goto err_context; 8107 } 8108 8109 if (move_group) { 8110 gctx = group_leader->ctx; 8111 8112 /* 8113 * See perf_event_ctx_lock() for comments on the details 8114 * of swizzling perf_event::ctx. 8115 */ 8116 mutex_lock_double(&gctx->mutex, &ctx->mutex); 8117 8118 perf_remove_from_context(group_leader, false); 8119 8120 list_for_each_entry(sibling, &group_leader->sibling_list, 8121 group_entry) { 8122 perf_remove_from_context(sibling, false); 8123 put_ctx(gctx); 8124 } 8125 } else { 8126 mutex_lock(&ctx->mutex); 8127 } 8128 8129 WARN_ON_ONCE(ctx->parent_ctx); 8130 8131 if (move_group) { 8132 /* 8133 * Wait for everybody to stop referencing the events through 8134 * the old lists, before installing it on new lists. 8135 */ 8136 synchronize_rcu(); 8137 8138 /* 8139 * Install the group siblings before the group leader. 8140 * 8141 * Because a group leader will try and install the entire group 8142 * (through the sibling list, which is still in-tact), we can 8143 * end up with siblings installed in the wrong context. 8144 * 8145 * By installing siblings first we NO-OP because they're not 8146 * reachable through the group lists. 8147 */ 8148 list_for_each_entry(sibling, &group_leader->sibling_list, 8149 group_entry) { 8150 perf_event__state_init(sibling); 8151 perf_install_in_context(ctx, sibling, sibling->cpu); 8152 get_ctx(ctx); 8153 } 8154 8155 /* 8156 * Removing from the context ends up with disabled 8157 * event. What we want here is event in the initial 8158 * startup state, ready to be add into new context. 8159 */ 8160 perf_event__state_init(group_leader); 8161 perf_install_in_context(ctx, group_leader, group_leader->cpu); 8162 get_ctx(ctx); 8163 } 8164 8165 if (!exclusive_event_installable(event, ctx)) { 8166 err = -EBUSY; 8167 mutex_unlock(&ctx->mutex); 8168 fput(event_file); 8169 goto err_context; 8170 } 8171 8172 perf_install_in_context(ctx, event, event->cpu); 8173 perf_unpin_context(ctx); 8174 8175 if (move_group) { 8176 mutex_unlock(&gctx->mutex); 8177 put_ctx(gctx); 8178 } 8179 mutex_unlock(&ctx->mutex); 8180 8181 put_online_cpus(); 8182 8183 event->owner = current; 8184 8185 mutex_lock(¤t->perf_event_mutex); 8186 list_add_tail(&event->owner_entry, ¤t->perf_event_list); 8187 mutex_unlock(¤t->perf_event_mutex); 8188 8189 /* 8190 * Precalculate sample_data sizes 8191 */ 8192 perf_event__header_size(event); 8193 perf_event__id_header_size(event); 8194 8195 /* 8196 * Drop the reference on the group_event after placing the 8197 * new event on the sibling_list. This ensures destruction 8198 * of the group leader will find the pointer to itself in 8199 * perf_group_detach(). 8200 */ 8201 fdput(group); 8202 fd_install(event_fd, event_file); 8203 return event_fd; 8204 8205 err_context: 8206 perf_unpin_context(ctx); 8207 put_ctx(ctx); 8208 err_alloc: 8209 free_event(event); 8210 err_cpus: 8211 put_online_cpus(); 8212 err_task: 8213 if (task) 8214 put_task_struct(task); 8215 err_group_fd: 8216 fdput(group); 8217 err_fd: 8218 put_unused_fd(event_fd); 8219 return err; 8220 } 8221 8222 /** 8223 * perf_event_create_kernel_counter 8224 * 8225 * @attr: attributes of the counter to create 8226 * @cpu: cpu in which the counter is bound 8227 * @task: task to profile (NULL for percpu) 8228 */ 8229 struct perf_event * 8230 perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu, 8231 struct task_struct *task, 8232 perf_overflow_handler_t overflow_handler, 8233 void *context) 8234 { 8235 struct perf_event_context *ctx; 8236 struct perf_event *event; 8237 int err; 8238 8239 /* 8240 * Get the target context (task or percpu): 8241 */ 8242 8243 event = perf_event_alloc(attr, cpu, task, NULL, NULL, 8244 overflow_handler, context, -1); 8245 if (IS_ERR(event)) { 8246 err = PTR_ERR(event); 8247 goto err; 8248 } 8249 8250 /* Mark owner so we could distinguish it from user events. */ 8251 event->owner = EVENT_OWNER_KERNEL; 8252 8253 account_event(event); 8254 8255 ctx = find_get_context(event->pmu, task, event); 8256 if (IS_ERR(ctx)) { 8257 err = PTR_ERR(ctx); 8258 goto err_free; 8259 } 8260 8261 WARN_ON_ONCE(ctx->parent_ctx); 8262 mutex_lock(&ctx->mutex); 8263 if (!exclusive_event_installable(event, ctx)) { 8264 mutex_unlock(&ctx->mutex); 8265 perf_unpin_context(ctx); 8266 put_ctx(ctx); 8267 err = -EBUSY; 8268 goto err_free; 8269 } 8270 8271 perf_install_in_context(ctx, event, cpu); 8272 perf_unpin_context(ctx); 8273 mutex_unlock(&ctx->mutex); 8274 8275 return event; 8276 8277 err_free: 8278 free_event(event); 8279 err: 8280 return ERR_PTR(err); 8281 } 8282 EXPORT_SYMBOL_GPL(perf_event_create_kernel_counter); 8283 8284 void perf_pmu_migrate_context(struct pmu *pmu, int src_cpu, int dst_cpu) 8285 { 8286 struct perf_event_context *src_ctx; 8287 struct perf_event_context *dst_ctx; 8288 struct perf_event *event, *tmp; 8289 LIST_HEAD(events); 8290 8291 src_ctx = &per_cpu_ptr(pmu->pmu_cpu_context, src_cpu)->ctx; 8292 dst_ctx = &per_cpu_ptr(pmu->pmu_cpu_context, dst_cpu)->ctx; 8293 8294 /* 8295 * See perf_event_ctx_lock() for comments on the details 8296 * of swizzling perf_event::ctx. 8297 */ 8298 mutex_lock_double(&src_ctx->mutex, &dst_ctx->mutex); 8299 list_for_each_entry_safe(event, tmp, &src_ctx->event_list, 8300 event_entry) { 8301 perf_remove_from_context(event, false); 8302 unaccount_event_cpu(event, src_cpu); 8303 put_ctx(src_ctx); 8304 list_add(&event->migrate_entry, &events); 8305 } 8306 8307 /* 8308 * Wait for the events to quiesce before re-instating them. 8309 */ 8310 synchronize_rcu(); 8311 8312 /* 8313 * Re-instate events in 2 passes. 8314 * 8315 * Skip over group leaders and only install siblings on this first 8316 * pass, siblings will not get enabled without a leader, however a 8317 * leader will enable its siblings, even if those are still on the old 8318 * context. 8319 */ 8320 list_for_each_entry_safe(event, tmp, &events, migrate_entry) { 8321 if (event->group_leader == event) 8322 continue; 8323 8324 list_del(&event->migrate_entry); 8325 if (event->state >= PERF_EVENT_STATE_OFF) 8326 event->state = PERF_EVENT_STATE_INACTIVE; 8327 account_event_cpu(event, dst_cpu); 8328 perf_install_in_context(dst_ctx, event, dst_cpu); 8329 get_ctx(dst_ctx); 8330 } 8331 8332 /* 8333 * Once all the siblings are setup properly, install the group leaders 8334 * to make it go. 8335 */ 8336 list_for_each_entry_safe(event, tmp, &events, migrate_entry) { 8337 list_del(&event->migrate_entry); 8338 if (event->state >= PERF_EVENT_STATE_OFF) 8339 event->state = PERF_EVENT_STATE_INACTIVE; 8340 account_event_cpu(event, dst_cpu); 8341 perf_install_in_context(dst_ctx, event, dst_cpu); 8342 get_ctx(dst_ctx); 8343 } 8344 mutex_unlock(&dst_ctx->mutex); 8345 mutex_unlock(&src_ctx->mutex); 8346 } 8347 EXPORT_SYMBOL_GPL(perf_pmu_migrate_context); 8348 8349 static void sync_child_event(struct perf_event *child_event, 8350 struct task_struct *child) 8351 { 8352 struct perf_event *parent_event = child_event->parent; 8353 u64 child_val; 8354 8355 if (child_event->attr.inherit_stat) 8356 perf_event_read_event(child_event, child); 8357 8358 child_val = perf_event_count(child_event); 8359 8360 /* 8361 * Add back the child's count to the parent's count: 8362 */ 8363 atomic64_add(child_val, &parent_event->child_count); 8364 atomic64_add(child_event->total_time_enabled, 8365 &parent_event->child_total_time_enabled); 8366 atomic64_add(child_event->total_time_running, 8367 &parent_event->child_total_time_running); 8368 8369 /* 8370 * Remove this event from the parent's list 8371 */ 8372 WARN_ON_ONCE(parent_event->ctx->parent_ctx); 8373 mutex_lock(&parent_event->child_mutex); 8374 list_del_init(&child_event->child_list); 8375 mutex_unlock(&parent_event->child_mutex); 8376 8377 /* 8378 * Make sure user/parent get notified, that we just 8379 * lost one event. 8380 */ 8381 perf_event_wakeup(parent_event); 8382 8383 /* 8384 * Release the parent event, if this was the last 8385 * reference to it. 8386 */ 8387 put_event(parent_event); 8388 } 8389 8390 static void 8391 __perf_event_exit_task(struct perf_event *child_event, 8392 struct perf_event_context *child_ctx, 8393 struct task_struct *child) 8394 { 8395 /* 8396 * Do not destroy the 'original' grouping; because of the context 8397 * switch optimization the original events could've ended up in a 8398 * random child task. 8399 * 8400 * If we were to destroy the original group, all group related 8401 * operations would cease to function properly after this random 8402 * child dies. 8403 * 8404 * Do destroy all inherited groups, we don't care about those 8405 * and being thorough is better. 8406 */ 8407 perf_remove_from_context(child_event, !!child_event->parent); 8408 8409 /* 8410 * It can happen that the parent exits first, and has events 8411 * that are still around due to the child reference. These 8412 * events need to be zapped. 8413 */ 8414 if (child_event->parent) { 8415 sync_child_event(child_event, child); 8416 free_event(child_event); 8417 } else { 8418 child_event->state = PERF_EVENT_STATE_EXIT; 8419 perf_event_wakeup(child_event); 8420 } 8421 } 8422 8423 static void perf_event_exit_task_context(struct task_struct *child, int ctxn) 8424 { 8425 struct perf_event *child_event, *next; 8426 struct perf_event_context *child_ctx, *clone_ctx = NULL; 8427 unsigned long flags; 8428 8429 if (likely(!child->perf_event_ctxp[ctxn])) { 8430 perf_event_task(child, NULL, 0); 8431 return; 8432 } 8433 8434 local_irq_save(flags); 8435 /* 8436 * We can't reschedule here because interrupts are disabled, 8437 * and either child is current or it is a task that can't be 8438 * scheduled, so we are now safe from rescheduling changing 8439 * our context. 8440 */ 8441 child_ctx = rcu_dereference_raw(child->perf_event_ctxp[ctxn]); 8442 8443 /* 8444 * Take the context lock here so that if find_get_context is 8445 * reading child->perf_event_ctxp, we wait until it has 8446 * incremented the context's refcount before we do put_ctx below. 8447 */ 8448 raw_spin_lock(&child_ctx->lock); 8449 task_ctx_sched_out(child_ctx); 8450 child->perf_event_ctxp[ctxn] = NULL; 8451 8452 /* 8453 * If this context is a clone; unclone it so it can't get 8454 * swapped to another process while we're removing all 8455 * the events from it. 8456 */ 8457 clone_ctx = unclone_ctx(child_ctx); 8458 update_context_time(child_ctx); 8459 raw_spin_unlock_irqrestore(&child_ctx->lock, flags); 8460 8461 if (clone_ctx) 8462 put_ctx(clone_ctx); 8463 8464 /* 8465 * Report the task dead after unscheduling the events so that we 8466 * won't get any samples after PERF_RECORD_EXIT. We can however still 8467 * get a few PERF_RECORD_READ events. 8468 */ 8469 perf_event_task(child, child_ctx, 0); 8470 8471 /* 8472 * We can recurse on the same lock type through: 8473 * 8474 * __perf_event_exit_task() 8475 * sync_child_event() 8476 * put_event() 8477 * mutex_lock(&ctx->mutex) 8478 * 8479 * But since its the parent context it won't be the same instance. 8480 */ 8481 mutex_lock(&child_ctx->mutex); 8482 8483 list_for_each_entry_safe(child_event, next, &child_ctx->event_list, event_entry) 8484 __perf_event_exit_task(child_event, child_ctx, child); 8485 8486 mutex_unlock(&child_ctx->mutex); 8487 8488 put_ctx(child_ctx); 8489 } 8490 8491 /* 8492 * When a child task exits, feed back event values to parent events. 8493 */ 8494 void perf_event_exit_task(struct task_struct *child) 8495 { 8496 struct perf_event *event, *tmp; 8497 int ctxn; 8498 8499 mutex_lock(&child->perf_event_mutex); 8500 list_for_each_entry_safe(event, tmp, &child->perf_event_list, 8501 owner_entry) { 8502 list_del_init(&event->owner_entry); 8503 8504 /* 8505 * Ensure the list deletion is visible before we clear 8506 * the owner, closes a race against perf_release() where 8507 * we need to serialize on the owner->perf_event_mutex. 8508 */ 8509 smp_wmb(); 8510 event->owner = NULL; 8511 } 8512 mutex_unlock(&child->perf_event_mutex); 8513 8514 for_each_task_context_nr(ctxn) 8515 perf_event_exit_task_context(child, ctxn); 8516 } 8517 8518 static void perf_free_event(struct perf_event *event, 8519 struct perf_event_context *ctx) 8520 { 8521 struct perf_event *parent = event->parent; 8522 8523 if (WARN_ON_ONCE(!parent)) 8524 return; 8525 8526 mutex_lock(&parent->child_mutex); 8527 list_del_init(&event->child_list); 8528 mutex_unlock(&parent->child_mutex); 8529 8530 put_event(parent); 8531 8532 raw_spin_lock_irq(&ctx->lock); 8533 perf_group_detach(event); 8534 list_del_event(event, ctx); 8535 raw_spin_unlock_irq(&ctx->lock); 8536 free_event(event); 8537 } 8538 8539 /* 8540 * Free an unexposed, unused context as created by inheritance by 8541 * perf_event_init_task below, used by fork() in case of fail. 8542 * 8543 * Not all locks are strictly required, but take them anyway to be nice and 8544 * help out with the lockdep assertions. 8545 */ 8546 void perf_event_free_task(struct task_struct *task) 8547 { 8548 struct perf_event_context *ctx; 8549 struct perf_event *event, *tmp; 8550 int ctxn; 8551 8552 for_each_task_context_nr(ctxn) { 8553 ctx = task->perf_event_ctxp[ctxn]; 8554 if (!ctx) 8555 continue; 8556 8557 mutex_lock(&ctx->mutex); 8558 again: 8559 list_for_each_entry_safe(event, tmp, &ctx->pinned_groups, 8560 group_entry) 8561 perf_free_event(event, ctx); 8562 8563 list_for_each_entry_safe(event, tmp, &ctx->flexible_groups, 8564 group_entry) 8565 perf_free_event(event, ctx); 8566 8567 if (!list_empty(&ctx->pinned_groups) || 8568 !list_empty(&ctx->flexible_groups)) 8569 goto again; 8570 8571 mutex_unlock(&ctx->mutex); 8572 8573 put_ctx(ctx); 8574 } 8575 } 8576 8577 void perf_event_delayed_put(struct task_struct *task) 8578 { 8579 int ctxn; 8580 8581 for_each_task_context_nr(ctxn) 8582 WARN_ON_ONCE(task->perf_event_ctxp[ctxn]); 8583 } 8584 8585 /* 8586 * inherit a event from parent task to child task: 8587 */ 8588 static struct perf_event * 8589 inherit_event(struct perf_event *parent_event, 8590 struct task_struct *parent, 8591 struct perf_event_context *parent_ctx, 8592 struct task_struct *child, 8593 struct perf_event *group_leader, 8594 struct perf_event_context *child_ctx) 8595 { 8596 enum perf_event_active_state parent_state = parent_event->state; 8597 struct perf_event *child_event; 8598 unsigned long flags; 8599 8600 /* 8601 * Instead of creating recursive hierarchies of events, 8602 * we link inherited events back to the original parent, 8603 * which has a filp for sure, which we use as the reference 8604 * count: 8605 */ 8606 if (parent_event->parent) 8607 parent_event = parent_event->parent; 8608 8609 child_event = perf_event_alloc(&parent_event->attr, 8610 parent_event->cpu, 8611 child, 8612 group_leader, parent_event, 8613 NULL, NULL, -1); 8614 if (IS_ERR(child_event)) 8615 return child_event; 8616 8617 if (is_orphaned_event(parent_event) || 8618 !atomic_long_inc_not_zero(&parent_event->refcount)) { 8619 free_event(child_event); 8620 return NULL; 8621 } 8622 8623 get_ctx(child_ctx); 8624 8625 /* 8626 * Make the child state follow the state of the parent event, 8627 * not its attr.disabled bit. We hold the parent's mutex, 8628 * so we won't race with perf_event_{en, dis}able_family. 8629 */ 8630 if (parent_state >= PERF_EVENT_STATE_INACTIVE) 8631 child_event->state = PERF_EVENT_STATE_INACTIVE; 8632 else 8633 child_event->state = PERF_EVENT_STATE_OFF; 8634 8635 if (parent_event->attr.freq) { 8636 u64 sample_period = parent_event->hw.sample_period; 8637 struct hw_perf_event *hwc = &child_event->hw; 8638 8639 hwc->sample_period = sample_period; 8640 hwc->last_period = sample_period; 8641 8642 local64_set(&hwc->period_left, sample_period); 8643 } 8644 8645 child_event->ctx = child_ctx; 8646 child_event->overflow_handler = parent_event->overflow_handler; 8647 child_event->overflow_handler_context 8648 = parent_event->overflow_handler_context; 8649 8650 /* 8651 * Precalculate sample_data sizes 8652 */ 8653 perf_event__header_size(child_event); 8654 perf_event__id_header_size(child_event); 8655 8656 /* 8657 * Link it up in the child's context: 8658 */ 8659 raw_spin_lock_irqsave(&child_ctx->lock, flags); 8660 add_event_to_ctx(child_event, child_ctx); 8661 raw_spin_unlock_irqrestore(&child_ctx->lock, flags); 8662 8663 /* 8664 * Link this into the parent event's child list 8665 */ 8666 WARN_ON_ONCE(parent_event->ctx->parent_ctx); 8667 mutex_lock(&parent_event->child_mutex); 8668 list_add_tail(&child_event->child_list, &parent_event->child_list); 8669 mutex_unlock(&parent_event->child_mutex); 8670 8671 return child_event; 8672 } 8673 8674 static int inherit_group(struct perf_event *parent_event, 8675 struct task_struct *parent, 8676 struct perf_event_context *parent_ctx, 8677 struct task_struct *child, 8678 struct perf_event_context *child_ctx) 8679 { 8680 struct perf_event *leader; 8681 struct perf_event *sub; 8682 struct perf_event *child_ctr; 8683 8684 leader = inherit_event(parent_event, parent, parent_ctx, 8685 child, NULL, child_ctx); 8686 if (IS_ERR(leader)) 8687 return PTR_ERR(leader); 8688 list_for_each_entry(sub, &parent_event->sibling_list, group_entry) { 8689 child_ctr = inherit_event(sub, parent, parent_ctx, 8690 child, leader, child_ctx); 8691 if (IS_ERR(child_ctr)) 8692 return PTR_ERR(child_ctr); 8693 } 8694 return 0; 8695 } 8696 8697 static int 8698 inherit_task_group(struct perf_event *event, struct task_struct *parent, 8699 struct perf_event_context *parent_ctx, 8700 struct task_struct *child, int ctxn, 8701 int *inherited_all) 8702 { 8703 int ret; 8704 struct perf_event_context *child_ctx; 8705 8706 if (!event->attr.inherit) { 8707 *inherited_all = 0; 8708 return 0; 8709 } 8710 8711 child_ctx = child->perf_event_ctxp[ctxn]; 8712 if (!child_ctx) { 8713 /* 8714 * This is executed from the parent task context, so 8715 * inherit events that have been marked for cloning. 8716 * First allocate and initialize a context for the 8717 * child. 8718 */ 8719 8720 child_ctx = alloc_perf_context(parent_ctx->pmu, child); 8721 if (!child_ctx) 8722 return -ENOMEM; 8723 8724 child->perf_event_ctxp[ctxn] = child_ctx; 8725 } 8726 8727 ret = inherit_group(event, parent, parent_ctx, 8728 child, child_ctx); 8729 8730 if (ret) 8731 *inherited_all = 0; 8732 8733 return ret; 8734 } 8735 8736 /* 8737 * Initialize the perf_event context in task_struct 8738 */ 8739 static int perf_event_init_context(struct task_struct *child, int ctxn) 8740 { 8741 struct perf_event_context *child_ctx, *parent_ctx; 8742 struct perf_event_context *cloned_ctx; 8743 struct perf_event *event; 8744 struct task_struct *parent = current; 8745 int inherited_all = 1; 8746 unsigned long flags; 8747 int ret = 0; 8748 8749 if (likely(!parent->perf_event_ctxp[ctxn])) 8750 return 0; 8751 8752 /* 8753 * If the parent's context is a clone, pin it so it won't get 8754 * swapped under us. 8755 */ 8756 parent_ctx = perf_pin_task_context(parent, ctxn); 8757 if (!parent_ctx) 8758 return 0; 8759 8760 /* 8761 * No need to check if parent_ctx != NULL here; since we saw 8762 * it non-NULL earlier, the only reason for it to become NULL 8763 * is if we exit, and since we're currently in the middle of 8764 * a fork we can't be exiting at the same time. 8765 */ 8766 8767 /* 8768 * Lock the parent list. No need to lock the child - not PID 8769 * hashed yet and not running, so nobody can access it. 8770 */ 8771 mutex_lock(&parent_ctx->mutex); 8772 8773 /* 8774 * We dont have to disable NMIs - we are only looking at 8775 * the list, not manipulating it: 8776 */ 8777 list_for_each_entry(event, &parent_ctx->pinned_groups, group_entry) { 8778 ret = inherit_task_group(event, parent, parent_ctx, 8779 child, ctxn, &inherited_all); 8780 if (ret) 8781 break; 8782 } 8783 8784 /* 8785 * We can't hold ctx->lock when iterating the ->flexible_group list due 8786 * to allocations, but we need to prevent rotation because 8787 * rotate_ctx() will change the list from interrupt context. 8788 */ 8789 raw_spin_lock_irqsave(&parent_ctx->lock, flags); 8790 parent_ctx->rotate_disable = 1; 8791 raw_spin_unlock_irqrestore(&parent_ctx->lock, flags); 8792 8793 list_for_each_entry(event, &parent_ctx->flexible_groups, group_entry) { 8794 ret = inherit_task_group(event, parent, parent_ctx, 8795 child, ctxn, &inherited_all); 8796 if (ret) 8797 break; 8798 } 8799 8800 raw_spin_lock_irqsave(&parent_ctx->lock, flags); 8801 parent_ctx->rotate_disable = 0; 8802 8803 child_ctx = child->perf_event_ctxp[ctxn]; 8804 8805 if (child_ctx && inherited_all) { 8806 /* 8807 * Mark the child context as a clone of the parent 8808 * context, or of whatever the parent is a clone of. 8809 * 8810 * Note that if the parent is a clone, the holding of 8811 * parent_ctx->lock avoids it from being uncloned. 8812 */ 8813 cloned_ctx = parent_ctx->parent_ctx; 8814 if (cloned_ctx) { 8815 child_ctx->parent_ctx = cloned_ctx; 8816 child_ctx->parent_gen = parent_ctx->parent_gen; 8817 } else { 8818 child_ctx->parent_ctx = parent_ctx; 8819 child_ctx->parent_gen = parent_ctx->generation; 8820 } 8821 get_ctx(child_ctx->parent_ctx); 8822 } 8823 8824 raw_spin_unlock_irqrestore(&parent_ctx->lock, flags); 8825 mutex_unlock(&parent_ctx->mutex); 8826 8827 perf_unpin_context(parent_ctx); 8828 put_ctx(parent_ctx); 8829 8830 return ret; 8831 } 8832 8833 /* 8834 * Initialize the perf_event context in task_struct 8835 */ 8836 int perf_event_init_task(struct task_struct *child) 8837 { 8838 int ctxn, ret; 8839 8840 memset(child->perf_event_ctxp, 0, sizeof(child->perf_event_ctxp)); 8841 mutex_init(&child->perf_event_mutex); 8842 INIT_LIST_HEAD(&child->perf_event_list); 8843 8844 for_each_task_context_nr(ctxn) { 8845 ret = perf_event_init_context(child, ctxn); 8846 if (ret) { 8847 perf_event_free_task(child); 8848 return ret; 8849 } 8850 } 8851 8852 return 0; 8853 } 8854 8855 static void __init perf_event_init_all_cpus(void) 8856 { 8857 struct swevent_htable *swhash; 8858 int cpu; 8859 8860 for_each_possible_cpu(cpu) { 8861 swhash = &per_cpu(swevent_htable, cpu); 8862 mutex_init(&swhash->hlist_mutex); 8863 INIT_LIST_HEAD(&per_cpu(active_ctx_list, cpu)); 8864 } 8865 } 8866 8867 static void perf_event_init_cpu(int cpu) 8868 { 8869 struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu); 8870 8871 mutex_lock(&swhash->hlist_mutex); 8872 swhash->online = true; 8873 if (swhash->hlist_refcount > 0) { 8874 struct swevent_hlist *hlist; 8875 8876 hlist = kzalloc_node(sizeof(*hlist), GFP_KERNEL, cpu_to_node(cpu)); 8877 WARN_ON(!hlist); 8878 rcu_assign_pointer(swhash->swevent_hlist, hlist); 8879 } 8880 mutex_unlock(&swhash->hlist_mutex); 8881 } 8882 8883 #if defined CONFIG_HOTPLUG_CPU || defined CONFIG_KEXEC 8884 static void __perf_event_exit_context(void *__info) 8885 { 8886 struct remove_event re = { .detach_group = true }; 8887 struct perf_event_context *ctx = __info; 8888 8889 rcu_read_lock(); 8890 list_for_each_entry_rcu(re.event, &ctx->event_list, event_entry) 8891 __perf_remove_from_context(&re); 8892 rcu_read_unlock(); 8893 } 8894 8895 static void perf_event_exit_cpu_context(int cpu) 8896 { 8897 struct perf_event_context *ctx; 8898 struct pmu *pmu; 8899 int idx; 8900 8901 idx = srcu_read_lock(&pmus_srcu); 8902 list_for_each_entry_rcu(pmu, &pmus, entry) { 8903 ctx = &per_cpu_ptr(pmu->pmu_cpu_context, cpu)->ctx; 8904 8905 mutex_lock(&ctx->mutex); 8906 smp_call_function_single(cpu, __perf_event_exit_context, ctx, 1); 8907 mutex_unlock(&ctx->mutex); 8908 } 8909 srcu_read_unlock(&pmus_srcu, idx); 8910 } 8911 8912 static void perf_event_exit_cpu(int cpu) 8913 { 8914 struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu); 8915 8916 perf_event_exit_cpu_context(cpu); 8917 8918 mutex_lock(&swhash->hlist_mutex); 8919 swhash->online = false; 8920 swevent_hlist_release(swhash); 8921 mutex_unlock(&swhash->hlist_mutex); 8922 } 8923 #else 8924 static inline void perf_event_exit_cpu(int cpu) { } 8925 #endif 8926 8927 static int 8928 perf_reboot(struct notifier_block *notifier, unsigned long val, void *v) 8929 { 8930 int cpu; 8931 8932 for_each_online_cpu(cpu) 8933 perf_event_exit_cpu(cpu); 8934 8935 return NOTIFY_OK; 8936 } 8937 8938 /* 8939 * Run the perf reboot notifier at the very last possible moment so that 8940 * the generic watchdog code runs as long as possible. 8941 */ 8942 static struct notifier_block perf_reboot_notifier = { 8943 .notifier_call = perf_reboot, 8944 .priority = INT_MIN, 8945 }; 8946 8947 static int 8948 perf_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu) 8949 { 8950 unsigned int cpu = (long)hcpu; 8951 8952 switch (action & ~CPU_TASKS_FROZEN) { 8953 8954 case CPU_UP_PREPARE: 8955 case CPU_DOWN_FAILED: 8956 perf_event_init_cpu(cpu); 8957 break; 8958 8959 case CPU_UP_CANCELED: 8960 case CPU_DOWN_PREPARE: 8961 perf_event_exit_cpu(cpu); 8962 break; 8963 default: 8964 break; 8965 } 8966 8967 return NOTIFY_OK; 8968 } 8969 8970 void __init perf_event_init(void) 8971 { 8972 int ret; 8973 8974 idr_init(&pmu_idr); 8975 8976 perf_event_init_all_cpus(); 8977 init_srcu_struct(&pmus_srcu); 8978 perf_pmu_register(&perf_swevent, "software", PERF_TYPE_SOFTWARE); 8979 perf_pmu_register(&perf_cpu_clock, NULL, -1); 8980 perf_pmu_register(&perf_task_clock, NULL, -1); 8981 perf_tp_register(); 8982 perf_cpu_notifier(perf_cpu_notify); 8983 register_reboot_notifier(&perf_reboot_notifier); 8984 8985 ret = init_hw_breakpoint(); 8986 WARN(ret, "hw_breakpoint initialization failed with: %d", ret); 8987 8988 /* do not patch jump label more than once per second */ 8989 jump_label_rate_limit(&perf_sched_events, HZ); 8990 8991 /* 8992 * Build time assertion that we keep the data_head at the intended 8993 * location. IOW, validation we got the __reserved[] size right. 8994 */ 8995 BUILD_BUG_ON((offsetof(struct perf_event_mmap_page, data_head)) 8996 != 1024); 8997 } 8998 8999 ssize_t perf_event_sysfs_show(struct device *dev, struct device_attribute *attr, 9000 char *page) 9001 { 9002 struct perf_pmu_events_attr *pmu_attr = 9003 container_of(attr, struct perf_pmu_events_attr, attr); 9004 9005 if (pmu_attr->event_str) 9006 return sprintf(page, "%s\n", pmu_attr->event_str); 9007 9008 return 0; 9009 } 9010 9011 static int __init perf_event_sysfs_init(void) 9012 { 9013 struct pmu *pmu; 9014 int ret; 9015 9016 mutex_lock(&pmus_lock); 9017 9018 ret = bus_register(&pmu_bus); 9019 if (ret) 9020 goto unlock; 9021 9022 list_for_each_entry(pmu, &pmus, entry) { 9023 if (!pmu->name || pmu->type < 0) 9024 continue; 9025 9026 ret = pmu_dev_alloc(pmu); 9027 WARN(ret, "Failed to register pmu: %s, reason %d\n", pmu->name, ret); 9028 } 9029 pmu_bus_running = 1; 9030 ret = 0; 9031 9032 unlock: 9033 mutex_unlock(&pmus_lock); 9034 9035 return ret; 9036 } 9037 device_initcall(perf_event_sysfs_init); 9038 9039 #ifdef CONFIG_CGROUP_PERF 9040 static struct cgroup_subsys_state * 9041 perf_cgroup_css_alloc(struct cgroup_subsys_state *parent_css) 9042 { 9043 struct perf_cgroup *jc; 9044 9045 jc = kzalloc(sizeof(*jc), GFP_KERNEL); 9046 if (!jc) 9047 return ERR_PTR(-ENOMEM); 9048 9049 jc->info = alloc_percpu(struct perf_cgroup_info); 9050 if (!jc->info) { 9051 kfree(jc); 9052 return ERR_PTR(-ENOMEM); 9053 } 9054 9055 return &jc->css; 9056 } 9057 9058 static void perf_cgroup_css_free(struct cgroup_subsys_state *css) 9059 { 9060 struct perf_cgroup *jc = container_of(css, struct perf_cgroup, css); 9061 9062 free_percpu(jc->info); 9063 kfree(jc); 9064 } 9065 9066 static int __perf_cgroup_move(void *info) 9067 { 9068 struct task_struct *task = info; 9069 perf_cgroup_switch(task, PERF_CGROUP_SWOUT | PERF_CGROUP_SWIN); 9070 return 0; 9071 } 9072 9073 static void perf_cgroup_attach(struct cgroup_subsys_state *css, 9074 struct cgroup_taskset *tset) 9075 { 9076 struct task_struct *task; 9077 9078 cgroup_taskset_for_each(task, tset) 9079 task_function_call(task, __perf_cgroup_move, task); 9080 } 9081 9082 static void perf_cgroup_exit(struct cgroup_subsys_state *css, 9083 struct cgroup_subsys_state *old_css, 9084 struct task_struct *task) 9085 { 9086 /* 9087 * cgroup_exit() is called in the copy_process() failure path. 9088 * Ignore this case since the task hasn't ran yet, this avoids 9089 * trying to poke a half freed task state from generic code. 9090 */ 9091 if (!(task->flags & PF_EXITING)) 9092 return; 9093 9094 task_function_call(task, __perf_cgroup_move, task); 9095 } 9096 9097 struct cgroup_subsys perf_event_cgrp_subsys = { 9098 .css_alloc = perf_cgroup_css_alloc, 9099 .css_free = perf_cgroup_css_free, 9100 .exit = perf_cgroup_exit, 9101 .attach = perf_cgroup_attach, 9102 }; 9103 #endif /* CONFIG_CGROUP_PERF */ 9104