1 /* 2 * Performance events core code: 3 * 4 * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de> 5 * Copyright (C) 2008-2011 Red Hat, Inc., Ingo Molnar 6 * Copyright (C) 2008-2011 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com> 7 * Copyright © 2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com> 8 * 9 * For licensing details see kernel-base/COPYING 10 */ 11 12 #include <linux/fs.h> 13 #include <linux/mm.h> 14 #include <linux/cpu.h> 15 #include <linux/smp.h> 16 #include <linux/idr.h> 17 #include <linux/file.h> 18 #include <linux/poll.h> 19 #include <linux/slab.h> 20 #include <linux/hash.h> 21 #include <linux/tick.h> 22 #include <linux/sysfs.h> 23 #include <linux/dcache.h> 24 #include <linux/percpu.h> 25 #include <linux/ptrace.h> 26 #include <linux/reboot.h> 27 #include <linux/vmstat.h> 28 #include <linux/device.h> 29 #include <linux/export.h> 30 #include <linux/vmalloc.h> 31 #include <linux/hardirq.h> 32 #include <linux/rculist.h> 33 #include <linux/uaccess.h> 34 #include <linux/syscalls.h> 35 #include <linux/anon_inodes.h> 36 #include <linux/kernel_stat.h> 37 #include <linux/cgroup.h> 38 #include <linux/perf_event.h> 39 #include <linux/trace_events.h> 40 #include <linux/hw_breakpoint.h> 41 #include <linux/mm_types.h> 42 #include <linux/module.h> 43 #include <linux/mman.h> 44 #include <linux/compat.h> 45 #include <linux/bpf.h> 46 #include <linux/filter.h> 47 48 #include "internal.h" 49 50 #include <asm/irq_regs.h> 51 52 static struct workqueue_struct *perf_wq; 53 54 typedef int (*remote_function_f)(void *); 55 56 struct remote_function_call { 57 struct task_struct *p; 58 remote_function_f func; 59 void *info; 60 int ret; 61 }; 62 63 static void remote_function(void *data) 64 { 65 struct remote_function_call *tfc = data; 66 struct task_struct *p = tfc->p; 67 68 if (p) { 69 tfc->ret = -EAGAIN; 70 if (task_cpu(p) != smp_processor_id() || !task_curr(p)) 71 return; 72 } 73 74 tfc->ret = tfc->func(tfc->info); 75 } 76 77 /** 78 * task_function_call - call a function on the cpu on which a task runs 79 * @p: the task to evaluate 80 * @func: the function to be called 81 * @info: the function call argument 82 * 83 * Calls the function @func when the task is currently running. This might 84 * be on the current CPU, which just calls the function directly 85 * 86 * returns: @func return value, or 87 * -ESRCH - when the process isn't running 88 * -EAGAIN - when the process moved away 89 */ 90 static int 91 task_function_call(struct task_struct *p, remote_function_f func, void *info) 92 { 93 struct remote_function_call data = { 94 .p = p, 95 .func = func, 96 .info = info, 97 .ret = -ESRCH, /* No such (running) process */ 98 }; 99 100 if (task_curr(p)) 101 smp_call_function_single(task_cpu(p), remote_function, &data, 1); 102 103 return data.ret; 104 } 105 106 /** 107 * cpu_function_call - call a function on the cpu 108 * @func: the function to be called 109 * @info: the function call argument 110 * 111 * Calls the function @func on the remote cpu. 112 * 113 * returns: @func return value or -ENXIO when the cpu is offline 114 */ 115 static int cpu_function_call(int cpu, remote_function_f func, void *info) 116 { 117 struct remote_function_call data = { 118 .p = NULL, 119 .func = func, 120 .info = info, 121 .ret = -ENXIO, /* No such CPU */ 122 }; 123 124 smp_call_function_single(cpu, remote_function, &data, 1); 125 126 return data.ret; 127 } 128 129 #define EVENT_OWNER_KERNEL ((void *) -1) 130 131 static bool is_kernel_event(struct perf_event *event) 132 { 133 return event->owner == EVENT_OWNER_KERNEL; 134 } 135 136 #define PERF_FLAG_ALL (PERF_FLAG_FD_NO_GROUP |\ 137 PERF_FLAG_FD_OUTPUT |\ 138 PERF_FLAG_PID_CGROUP |\ 139 PERF_FLAG_FD_CLOEXEC) 140 141 /* 142 * branch priv levels that need permission checks 143 */ 144 #define PERF_SAMPLE_BRANCH_PERM_PLM \ 145 (PERF_SAMPLE_BRANCH_KERNEL |\ 146 PERF_SAMPLE_BRANCH_HV) 147 148 enum event_type_t { 149 EVENT_FLEXIBLE = 0x1, 150 EVENT_PINNED = 0x2, 151 EVENT_ALL = EVENT_FLEXIBLE | EVENT_PINNED, 152 }; 153 154 /* 155 * perf_sched_events : >0 events exist 156 * perf_cgroup_events: >0 per-cpu cgroup events exist on this cpu 157 */ 158 struct static_key_deferred perf_sched_events __read_mostly; 159 static DEFINE_PER_CPU(atomic_t, perf_cgroup_events); 160 static DEFINE_PER_CPU(int, perf_sched_cb_usages); 161 162 static atomic_t nr_mmap_events __read_mostly; 163 static atomic_t nr_comm_events __read_mostly; 164 static atomic_t nr_task_events __read_mostly; 165 static atomic_t nr_freq_events __read_mostly; 166 static atomic_t nr_switch_events __read_mostly; 167 168 static LIST_HEAD(pmus); 169 static DEFINE_MUTEX(pmus_lock); 170 static struct srcu_struct pmus_srcu; 171 172 /* 173 * perf event paranoia level: 174 * -1 - not paranoid at all 175 * 0 - disallow raw tracepoint access for unpriv 176 * 1 - disallow cpu events for unpriv 177 * 2 - disallow kernel profiling for unpriv 178 */ 179 int sysctl_perf_event_paranoid __read_mostly = 1; 180 181 /* Minimum for 512 kiB + 1 user control page */ 182 int sysctl_perf_event_mlock __read_mostly = 512 + (PAGE_SIZE / 1024); /* 'free' kiB per user */ 183 184 /* 185 * max perf event sample rate 186 */ 187 #define DEFAULT_MAX_SAMPLE_RATE 100000 188 #define DEFAULT_SAMPLE_PERIOD_NS (NSEC_PER_SEC / DEFAULT_MAX_SAMPLE_RATE) 189 #define DEFAULT_CPU_TIME_MAX_PERCENT 25 190 191 int sysctl_perf_event_sample_rate __read_mostly = DEFAULT_MAX_SAMPLE_RATE; 192 193 static int max_samples_per_tick __read_mostly = DIV_ROUND_UP(DEFAULT_MAX_SAMPLE_RATE, HZ); 194 static int perf_sample_period_ns __read_mostly = DEFAULT_SAMPLE_PERIOD_NS; 195 196 static int perf_sample_allowed_ns __read_mostly = 197 DEFAULT_SAMPLE_PERIOD_NS * DEFAULT_CPU_TIME_MAX_PERCENT / 100; 198 199 void update_perf_cpu_limits(void) 200 { 201 u64 tmp = perf_sample_period_ns; 202 203 tmp *= sysctl_perf_cpu_time_max_percent; 204 do_div(tmp, 100); 205 ACCESS_ONCE(perf_sample_allowed_ns) = tmp; 206 } 207 208 static int perf_rotate_context(struct perf_cpu_context *cpuctx); 209 210 int perf_proc_update_handler(struct ctl_table *table, int write, 211 void __user *buffer, size_t *lenp, 212 loff_t *ppos) 213 { 214 int ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos); 215 216 if (ret || !write) 217 return ret; 218 219 max_samples_per_tick = DIV_ROUND_UP(sysctl_perf_event_sample_rate, HZ); 220 perf_sample_period_ns = NSEC_PER_SEC / sysctl_perf_event_sample_rate; 221 update_perf_cpu_limits(); 222 223 return 0; 224 } 225 226 int sysctl_perf_cpu_time_max_percent __read_mostly = DEFAULT_CPU_TIME_MAX_PERCENT; 227 228 int perf_cpu_time_max_percent_handler(struct ctl_table *table, int write, 229 void __user *buffer, size_t *lenp, 230 loff_t *ppos) 231 { 232 int ret = proc_dointvec(table, write, buffer, lenp, ppos); 233 234 if (ret || !write) 235 return ret; 236 237 update_perf_cpu_limits(); 238 239 return 0; 240 } 241 242 /* 243 * perf samples are done in some very critical code paths (NMIs). 244 * If they take too much CPU time, the system can lock up and not 245 * get any real work done. This will drop the sample rate when 246 * we detect that events are taking too long. 247 */ 248 #define NR_ACCUMULATED_SAMPLES 128 249 static DEFINE_PER_CPU(u64, running_sample_length); 250 251 static void perf_duration_warn(struct irq_work *w) 252 { 253 u64 allowed_ns = ACCESS_ONCE(perf_sample_allowed_ns); 254 u64 avg_local_sample_len; 255 u64 local_samples_len; 256 257 local_samples_len = __this_cpu_read(running_sample_length); 258 avg_local_sample_len = local_samples_len/NR_ACCUMULATED_SAMPLES; 259 260 printk_ratelimited(KERN_WARNING 261 "perf interrupt took too long (%lld > %lld), lowering " 262 "kernel.perf_event_max_sample_rate to %d\n", 263 avg_local_sample_len, allowed_ns >> 1, 264 sysctl_perf_event_sample_rate); 265 } 266 267 static DEFINE_IRQ_WORK(perf_duration_work, perf_duration_warn); 268 269 void perf_sample_event_took(u64 sample_len_ns) 270 { 271 u64 allowed_ns = ACCESS_ONCE(perf_sample_allowed_ns); 272 u64 avg_local_sample_len; 273 u64 local_samples_len; 274 275 if (allowed_ns == 0) 276 return; 277 278 /* decay the counter by 1 average sample */ 279 local_samples_len = __this_cpu_read(running_sample_length); 280 local_samples_len -= local_samples_len/NR_ACCUMULATED_SAMPLES; 281 local_samples_len += sample_len_ns; 282 __this_cpu_write(running_sample_length, local_samples_len); 283 284 /* 285 * note: this will be biased artifically low until we have 286 * seen NR_ACCUMULATED_SAMPLES. Doing it this way keeps us 287 * from having to maintain a count. 288 */ 289 avg_local_sample_len = local_samples_len/NR_ACCUMULATED_SAMPLES; 290 291 if (avg_local_sample_len <= allowed_ns) 292 return; 293 294 if (max_samples_per_tick <= 1) 295 return; 296 297 max_samples_per_tick = DIV_ROUND_UP(max_samples_per_tick, 2); 298 sysctl_perf_event_sample_rate = max_samples_per_tick * HZ; 299 perf_sample_period_ns = NSEC_PER_SEC / sysctl_perf_event_sample_rate; 300 301 update_perf_cpu_limits(); 302 303 if (!irq_work_queue(&perf_duration_work)) { 304 early_printk("perf interrupt took too long (%lld > %lld), lowering " 305 "kernel.perf_event_max_sample_rate to %d\n", 306 avg_local_sample_len, allowed_ns >> 1, 307 sysctl_perf_event_sample_rate); 308 } 309 } 310 311 static atomic64_t perf_event_id; 312 313 static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx, 314 enum event_type_t event_type); 315 316 static void cpu_ctx_sched_in(struct perf_cpu_context *cpuctx, 317 enum event_type_t event_type, 318 struct task_struct *task); 319 320 static void update_context_time(struct perf_event_context *ctx); 321 static u64 perf_event_time(struct perf_event *event); 322 323 void __weak perf_event_print_debug(void) { } 324 325 extern __weak const char *perf_pmu_name(void) 326 { 327 return "pmu"; 328 } 329 330 static inline u64 perf_clock(void) 331 { 332 return local_clock(); 333 } 334 335 static inline u64 perf_event_clock(struct perf_event *event) 336 { 337 return event->clock(); 338 } 339 340 static inline struct perf_cpu_context * 341 __get_cpu_context(struct perf_event_context *ctx) 342 { 343 return this_cpu_ptr(ctx->pmu->pmu_cpu_context); 344 } 345 346 static void perf_ctx_lock(struct perf_cpu_context *cpuctx, 347 struct perf_event_context *ctx) 348 { 349 raw_spin_lock(&cpuctx->ctx.lock); 350 if (ctx) 351 raw_spin_lock(&ctx->lock); 352 } 353 354 static void perf_ctx_unlock(struct perf_cpu_context *cpuctx, 355 struct perf_event_context *ctx) 356 { 357 if (ctx) 358 raw_spin_unlock(&ctx->lock); 359 raw_spin_unlock(&cpuctx->ctx.lock); 360 } 361 362 #ifdef CONFIG_CGROUP_PERF 363 364 static inline bool 365 perf_cgroup_match(struct perf_event *event) 366 { 367 struct perf_event_context *ctx = event->ctx; 368 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx); 369 370 /* @event doesn't care about cgroup */ 371 if (!event->cgrp) 372 return true; 373 374 /* wants specific cgroup scope but @cpuctx isn't associated with any */ 375 if (!cpuctx->cgrp) 376 return false; 377 378 /* 379 * Cgroup scoping is recursive. An event enabled for a cgroup is 380 * also enabled for all its descendant cgroups. If @cpuctx's 381 * cgroup is a descendant of @event's (the test covers identity 382 * case), it's a match. 383 */ 384 return cgroup_is_descendant(cpuctx->cgrp->css.cgroup, 385 event->cgrp->css.cgroup); 386 } 387 388 static inline void perf_detach_cgroup(struct perf_event *event) 389 { 390 css_put(&event->cgrp->css); 391 event->cgrp = NULL; 392 } 393 394 static inline int is_cgroup_event(struct perf_event *event) 395 { 396 return event->cgrp != NULL; 397 } 398 399 static inline u64 perf_cgroup_event_time(struct perf_event *event) 400 { 401 struct perf_cgroup_info *t; 402 403 t = per_cpu_ptr(event->cgrp->info, event->cpu); 404 return t->time; 405 } 406 407 static inline void __update_cgrp_time(struct perf_cgroup *cgrp) 408 { 409 struct perf_cgroup_info *info; 410 u64 now; 411 412 now = perf_clock(); 413 414 info = this_cpu_ptr(cgrp->info); 415 416 info->time += now - info->timestamp; 417 info->timestamp = now; 418 } 419 420 static inline void update_cgrp_time_from_cpuctx(struct perf_cpu_context *cpuctx) 421 { 422 struct perf_cgroup *cgrp_out = cpuctx->cgrp; 423 if (cgrp_out) 424 __update_cgrp_time(cgrp_out); 425 } 426 427 static inline void update_cgrp_time_from_event(struct perf_event *event) 428 { 429 struct perf_cgroup *cgrp; 430 431 /* 432 * ensure we access cgroup data only when needed and 433 * when we know the cgroup is pinned (css_get) 434 */ 435 if (!is_cgroup_event(event)) 436 return; 437 438 cgrp = perf_cgroup_from_task(current); 439 /* 440 * Do not update time when cgroup is not active 441 */ 442 if (cgrp == event->cgrp) 443 __update_cgrp_time(event->cgrp); 444 } 445 446 static inline void 447 perf_cgroup_set_timestamp(struct task_struct *task, 448 struct perf_event_context *ctx) 449 { 450 struct perf_cgroup *cgrp; 451 struct perf_cgroup_info *info; 452 453 /* 454 * ctx->lock held by caller 455 * ensure we do not access cgroup data 456 * unless we have the cgroup pinned (css_get) 457 */ 458 if (!task || !ctx->nr_cgroups) 459 return; 460 461 cgrp = perf_cgroup_from_task(task); 462 info = this_cpu_ptr(cgrp->info); 463 info->timestamp = ctx->timestamp; 464 } 465 466 #define PERF_CGROUP_SWOUT 0x1 /* cgroup switch out every event */ 467 #define PERF_CGROUP_SWIN 0x2 /* cgroup switch in events based on task */ 468 469 /* 470 * reschedule events based on the cgroup constraint of task. 471 * 472 * mode SWOUT : schedule out everything 473 * mode SWIN : schedule in based on cgroup for next 474 */ 475 void perf_cgroup_switch(struct task_struct *task, int mode) 476 { 477 struct perf_cpu_context *cpuctx; 478 struct pmu *pmu; 479 unsigned long flags; 480 481 /* 482 * disable interrupts to avoid geting nr_cgroup 483 * changes via __perf_event_disable(). Also 484 * avoids preemption. 485 */ 486 local_irq_save(flags); 487 488 /* 489 * we reschedule only in the presence of cgroup 490 * constrained events. 491 */ 492 rcu_read_lock(); 493 494 list_for_each_entry_rcu(pmu, &pmus, entry) { 495 cpuctx = this_cpu_ptr(pmu->pmu_cpu_context); 496 if (cpuctx->unique_pmu != pmu) 497 continue; /* ensure we process each cpuctx once */ 498 499 /* 500 * perf_cgroup_events says at least one 501 * context on this CPU has cgroup events. 502 * 503 * ctx->nr_cgroups reports the number of cgroup 504 * events for a context. 505 */ 506 if (cpuctx->ctx.nr_cgroups > 0) { 507 perf_ctx_lock(cpuctx, cpuctx->task_ctx); 508 perf_pmu_disable(cpuctx->ctx.pmu); 509 510 if (mode & PERF_CGROUP_SWOUT) { 511 cpu_ctx_sched_out(cpuctx, EVENT_ALL); 512 /* 513 * must not be done before ctxswout due 514 * to event_filter_match() in event_sched_out() 515 */ 516 cpuctx->cgrp = NULL; 517 } 518 519 if (mode & PERF_CGROUP_SWIN) { 520 WARN_ON_ONCE(cpuctx->cgrp); 521 /* 522 * set cgrp before ctxsw in to allow 523 * event_filter_match() to not have to pass 524 * task around 525 */ 526 cpuctx->cgrp = perf_cgroup_from_task(task); 527 cpu_ctx_sched_in(cpuctx, EVENT_ALL, task); 528 } 529 perf_pmu_enable(cpuctx->ctx.pmu); 530 perf_ctx_unlock(cpuctx, cpuctx->task_ctx); 531 } 532 } 533 534 rcu_read_unlock(); 535 536 local_irq_restore(flags); 537 } 538 539 static inline void perf_cgroup_sched_out(struct task_struct *task, 540 struct task_struct *next) 541 { 542 struct perf_cgroup *cgrp1; 543 struct perf_cgroup *cgrp2 = NULL; 544 545 /* 546 * we come here when we know perf_cgroup_events > 0 547 */ 548 cgrp1 = perf_cgroup_from_task(task); 549 550 /* 551 * next is NULL when called from perf_event_enable_on_exec() 552 * that will systematically cause a cgroup_switch() 553 */ 554 if (next) 555 cgrp2 = perf_cgroup_from_task(next); 556 557 /* 558 * only schedule out current cgroup events if we know 559 * that we are switching to a different cgroup. Otherwise, 560 * do no touch the cgroup events. 561 */ 562 if (cgrp1 != cgrp2) 563 perf_cgroup_switch(task, PERF_CGROUP_SWOUT); 564 } 565 566 static inline void perf_cgroup_sched_in(struct task_struct *prev, 567 struct task_struct *task) 568 { 569 struct perf_cgroup *cgrp1; 570 struct perf_cgroup *cgrp2 = NULL; 571 572 /* 573 * we come here when we know perf_cgroup_events > 0 574 */ 575 cgrp1 = perf_cgroup_from_task(task); 576 577 /* prev can never be NULL */ 578 cgrp2 = perf_cgroup_from_task(prev); 579 580 /* 581 * only need to schedule in cgroup events if we are changing 582 * cgroup during ctxsw. Cgroup events were not scheduled 583 * out of ctxsw out if that was not the case. 584 */ 585 if (cgrp1 != cgrp2) 586 perf_cgroup_switch(task, PERF_CGROUP_SWIN); 587 } 588 589 static inline int perf_cgroup_connect(int fd, struct perf_event *event, 590 struct perf_event_attr *attr, 591 struct perf_event *group_leader) 592 { 593 struct perf_cgroup *cgrp; 594 struct cgroup_subsys_state *css; 595 struct fd f = fdget(fd); 596 int ret = 0; 597 598 if (!f.file) 599 return -EBADF; 600 601 css = css_tryget_online_from_dir(f.file->f_path.dentry, 602 &perf_event_cgrp_subsys); 603 if (IS_ERR(css)) { 604 ret = PTR_ERR(css); 605 goto out; 606 } 607 608 cgrp = container_of(css, struct perf_cgroup, css); 609 event->cgrp = cgrp; 610 611 /* 612 * all events in a group must monitor 613 * the same cgroup because a task belongs 614 * to only one perf cgroup at a time 615 */ 616 if (group_leader && group_leader->cgrp != cgrp) { 617 perf_detach_cgroup(event); 618 ret = -EINVAL; 619 } 620 out: 621 fdput(f); 622 return ret; 623 } 624 625 static inline void 626 perf_cgroup_set_shadow_time(struct perf_event *event, u64 now) 627 { 628 struct perf_cgroup_info *t; 629 t = per_cpu_ptr(event->cgrp->info, event->cpu); 630 event->shadow_ctx_time = now - t->timestamp; 631 } 632 633 static inline void 634 perf_cgroup_defer_enabled(struct perf_event *event) 635 { 636 /* 637 * when the current task's perf cgroup does not match 638 * the event's, we need to remember to call the 639 * perf_mark_enable() function the first time a task with 640 * a matching perf cgroup is scheduled in. 641 */ 642 if (is_cgroup_event(event) && !perf_cgroup_match(event)) 643 event->cgrp_defer_enabled = 1; 644 } 645 646 static inline void 647 perf_cgroup_mark_enabled(struct perf_event *event, 648 struct perf_event_context *ctx) 649 { 650 struct perf_event *sub; 651 u64 tstamp = perf_event_time(event); 652 653 if (!event->cgrp_defer_enabled) 654 return; 655 656 event->cgrp_defer_enabled = 0; 657 658 event->tstamp_enabled = tstamp - event->total_time_enabled; 659 list_for_each_entry(sub, &event->sibling_list, group_entry) { 660 if (sub->state >= PERF_EVENT_STATE_INACTIVE) { 661 sub->tstamp_enabled = tstamp - sub->total_time_enabled; 662 sub->cgrp_defer_enabled = 0; 663 } 664 } 665 } 666 #else /* !CONFIG_CGROUP_PERF */ 667 668 static inline bool 669 perf_cgroup_match(struct perf_event *event) 670 { 671 return true; 672 } 673 674 static inline void perf_detach_cgroup(struct perf_event *event) 675 {} 676 677 static inline int is_cgroup_event(struct perf_event *event) 678 { 679 return 0; 680 } 681 682 static inline u64 perf_cgroup_event_cgrp_time(struct perf_event *event) 683 { 684 return 0; 685 } 686 687 static inline void update_cgrp_time_from_event(struct perf_event *event) 688 { 689 } 690 691 static inline void update_cgrp_time_from_cpuctx(struct perf_cpu_context *cpuctx) 692 { 693 } 694 695 static inline void perf_cgroup_sched_out(struct task_struct *task, 696 struct task_struct *next) 697 { 698 } 699 700 static inline void perf_cgroup_sched_in(struct task_struct *prev, 701 struct task_struct *task) 702 { 703 } 704 705 static inline int perf_cgroup_connect(pid_t pid, struct perf_event *event, 706 struct perf_event_attr *attr, 707 struct perf_event *group_leader) 708 { 709 return -EINVAL; 710 } 711 712 static inline void 713 perf_cgroup_set_timestamp(struct task_struct *task, 714 struct perf_event_context *ctx) 715 { 716 } 717 718 void 719 perf_cgroup_switch(struct task_struct *task, struct task_struct *next) 720 { 721 } 722 723 static inline void 724 perf_cgroup_set_shadow_time(struct perf_event *event, u64 now) 725 { 726 } 727 728 static inline u64 perf_cgroup_event_time(struct perf_event *event) 729 { 730 return 0; 731 } 732 733 static inline void 734 perf_cgroup_defer_enabled(struct perf_event *event) 735 { 736 } 737 738 static inline void 739 perf_cgroup_mark_enabled(struct perf_event *event, 740 struct perf_event_context *ctx) 741 { 742 } 743 #endif 744 745 /* 746 * set default to be dependent on timer tick just 747 * like original code 748 */ 749 #define PERF_CPU_HRTIMER (1000 / HZ) 750 /* 751 * function must be called with interrupts disbled 752 */ 753 static enum hrtimer_restart perf_mux_hrtimer_handler(struct hrtimer *hr) 754 { 755 struct perf_cpu_context *cpuctx; 756 int rotations = 0; 757 758 WARN_ON(!irqs_disabled()); 759 760 cpuctx = container_of(hr, struct perf_cpu_context, hrtimer); 761 rotations = perf_rotate_context(cpuctx); 762 763 raw_spin_lock(&cpuctx->hrtimer_lock); 764 if (rotations) 765 hrtimer_forward_now(hr, cpuctx->hrtimer_interval); 766 else 767 cpuctx->hrtimer_active = 0; 768 raw_spin_unlock(&cpuctx->hrtimer_lock); 769 770 return rotations ? HRTIMER_RESTART : HRTIMER_NORESTART; 771 } 772 773 static void __perf_mux_hrtimer_init(struct perf_cpu_context *cpuctx, int cpu) 774 { 775 struct hrtimer *timer = &cpuctx->hrtimer; 776 struct pmu *pmu = cpuctx->ctx.pmu; 777 u64 interval; 778 779 /* no multiplexing needed for SW PMU */ 780 if (pmu->task_ctx_nr == perf_sw_context) 781 return; 782 783 /* 784 * check default is sane, if not set then force to 785 * default interval (1/tick) 786 */ 787 interval = pmu->hrtimer_interval_ms; 788 if (interval < 1) 789 interval = pmu->hrtimer_interval_ms = PERF_CPU_HRTIMER; 790 791 cpuctx->hrtimer_interval = ns_to_ktime(NSEC_PER_MSEC * interval); 792 793 raw_spin_lock_init(&cpuctx->hrtimer_lock); 794 hrtimer_init(timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED); 795 timer->function = perf_mux_hrtimer_handler; 796 } 797 798 static int perf_mux_hrtimer_restart(struct perf_cpu_context *cpuctx) 799 { 800 struct hrtimer *timer = &cpuctx->hrtimer; 801 struct pmu *pmu = cpuctx->ctx.pmu; 802 unsigned long flags; 803 804 /* not for SW PMU */ 805 if (pmu->task_ctx_nr == perf_sw_context) 806 return 0; 807 808 raw_spin_lock_irqsave(&cpuctx->hrtimer_lock, flags); 809 if (!cpuctx->hrtimer_active) { 810 cpuctx->hrtimer_active = 1; 811 hrtimer_forward_now(timer, cpuctx->hrtimer_interval); 812 hrtimer_start_expires(timer, HRTIMER_MODE_ABS_PINNED); 813 } 814 raw_spin_unlock_irqrestore(&cpuctx->hrtimer_lock, flags); 815 816 return 0; 817 } 818 819 void perf_pmu_disable(struct pmu *pmu) 820 { 821 int *count = this_cpu_ptr(pmu->pmu_disable_count); 822 if (!(*count)++) 823 pmu->pmu_disable(pmu); 824 } 825 826 void perf_pmu_enable(struct pmu *pmu) 827 { 828 int *count = this_cpu_ptr(pmu->pmu_disable_count); 829 if (!--(*count)) 830 pmu->pmu_enable(pmu); 831 } 832 833 static DEFINE_PER_CPU(struct list_head, active_ctx_list); 834 835 /* 836 * perf_event_ctx_activate(), perf_event_ctx_deactivate(), and 837 * perf_event_task_tick() are fully serialized because they're strictly cpu 838 * affine and perf_event_ctx{activate,deactivate} are called with IRQs 839 * disabled, while perf_event_task_tick is called from IRQ context. 840 */ 841 static void perf_event_ctx_activate(struct perf_event_context *ctx) 842 { 843 struct list_head *head = this_cpu_ptr(&active_ctx_list); 844 845 WARN_ON(!irqs_disabled()); 846 847 WARN_ON(!list_empty(&ctx->active_ctx_list)); 848 849 list_add(&ctx->active_ctx_list, head); 850 } 851 852 static void perf_event_ctx_deactivate(struct perf_event_context *ctx) 853 { 854 WARN_ON(!irqs_disabled()); 855 856 WARN_ON(list_empty(&ctx->active_ctx_list)); 857 858 list_del_init(&ctx->active_ctx_list); 859 } 860 861 static void get_ctx(struct perf_event_context *ctx) 862 { 863 WARN_ON(!atomic_inc_not_zero(&ctx->refcount)); 864 } 865 866 static void free_ctx(struct rcu_head *head) 867 { 868 struct perf_event_context *ctx; 869 870 ctx = container_of(head, struct perf_event_context, rcu_head); 871 kfree(ctx->task_ctx_data); 872 kfree(ctx); 873 } 874 875 static void put_ctx(struct perf_event_context *ctx) 876 { 877 if (atomic_dec_and_test(&ctx->refcount)) { 878 if (ctx->parent_ctx) 879 put_ctx(ctx->parent_ctx); 880 if (ctx->task) 881 put_task_struct(ctx->task); 882 call_rcu(&ctx->rcu_head, free_ctx); 883 } 884 } 885 886 /* 887 * Because of perf_event::ctx migration in sys_perf_event_open::move_group and 888 * perf_pmu_migrate_context() we need some magic. 889 * 890 * Those places that change perf_event::ctx will hold both 891 * perf_event_ctx::mutex of the 'old' and 'new' ctx value. 892 * 893 * Lock ordering is by mutex address. There are two other sites where 894 * perf_event_context::mutex nests and those are: 895 * 896 * - perf_event_exit_task_context() [ child , 0 ] 897 * __perf_event_exit_task() 898 * sync_child_event() 899 * put_event() [ parent, 1 ] 900 * 901 * - perf_event_init_context() [ parent, 0 ] 902 * inherit_task_group() 903 * inherit_group() 904 * inherit_event() 905 * perf_event_alloc() 906 * perf_init_event() 907 * perf_try_init_event() [ child , 1 ] 908 * 909 * While it appears there is an obvious deadlock here -- the parent and child 910 * nesting levels are inverted between the two. This is in fact safe because 911 * life-time rules separate them. That is an exiting task cannot fork, and a 912 * spawning task cannot (yet) exit. 913 * 914 * But remember that that these are parent<->child context relations, and 915 * migration does not affect children, therefore these two orderings should not 916 * interact. 917 * 918 * The change in perf_event::ctx does not affect children (as claimed above) 919 * because the sys_perf_event_open() case will install a new event and break 920 * the ctx parent<->child relation, and perf_pmu_migrate_context() is only 921 * concerned with cpuctx and that doesn't have children. 922 * 923 * The places that change perf_event::ctx will issue: 924 * 925 * perf_remove_from_context(); 926 * synchronize_rcu(); 927 * perf_install_in_context(); 928 * 929 * to affect the change. The remove_from_context() + synchronize_rcu() should 930 * quiesce the event, after which we can install it in the new location. This 931 * means that only external vectors (perf_fops, prctl) can perturb the event 932 * while in transit. Therefore all such accessors should also acquire 933 * perf_event_context::mutex to serialize against this. 934 * 935 * However; because event->ctx can change while we're waiting to acquire 936 * ctx->mutex we must be careful and use the below perf_event_ctx_lock() 937 * function. 938 * 939 * Lock order: 940 * task_struct::perf_event_mutex 941 * perf_event_context::mutex 942 * perf_event_context::lock 943 * perf_event::child_mutex; 944 * perf_event::mmap_mutex 945 * mmap_sem 946 */ 947 static struct perf_event_context * 948 perf_event_ctx_lock_nested(struct perf_event *event, int nesting) 949 { 950 struct perf_event_context *ctx; 951 952 again: 953 rcu_read_lock(); 954 ctx = ACCESS_ONCE(event->ctx); 955 if (!atomic_inc_not_zero(&ctx->refcount)) { 956 rcu_read_unlock(); 957 goto again; 958 } 959 rcu_read_unlock(); 960 961 mutex_lock_nested(&ctx->mutex, nesting); 962 if (event->ctx != ctx) { 963 mutex_unlock(&ctx->mutex); 964 put_ctx(ctx); 965 goto again; 966 } 967 968 return ctx; 969 } 970 971 static inline struct perf_event_context * 972 perf_event_ctx_lock(struct perf_event *event) 973 { 974 return perf_event_ctx_lock_nested(event, 0); 975 } 976 977 static void perf_event_ctx_unlock(struct perf_event *event, 978 struct perf_event_context *ctx) 979 { 980 mutex_unlock(&ctx->mutex); 981 put_ctx(ctx); 982 } 983 984 /* 985 * This must be done under the ctx->lock, such as to serialize against 986 * context_equiv(), therefore we cannot call put_ctx() since that might end up 987 * calling scheduler related locks and ctx->lock nests inside those. 988 */ 989 static __must_check struct perf_event_context * 990 unclone_ctx(struct perf_event_context *ctx) 991 { 992 struct perf_event_context *parent_ctx = ctx->parent_ctx; 993 994 lockdep_assert_held(&ctx->lock); 995 996 if (parent_ctx) 997 ctx->parent_ctx = NULL; 998 ctx->generation++; 999 1000 return parent_ctx; 1001 } 1002 1003 static u32 perf_event_pid(struct perf_event *event, struct task_struct *p) 1004 { 1005 /* 1006 * only top level events have the pid namespace they were created in 1007 */ 1008 if (event->parent) 1009 event = event->parent; 1010 1011 return task_tgid_nr_ns(p, event->ns); 1012 } 1013 1014 static u32 perf_event_tid(struct perf_event *event, struct task_struct *p) 1015 { 1016 /* 1017 * only top level events have the pid namespace they were created in 1018 */ 1019 if (event->parent) 1020 event = event->parent; 1021 1022 return task_pid_nr_ns(p, event->ns); 1023 } 1024 1025 /* 1026 * If we inherit events we want to return the parent event id 1027 * to userspace. 1028 */ 1029 static u64 primary_event_id(struct perf_event *event) 1030 { 1031 u64 id = event->id; 1032 1033 if (event->parent) 1034 id = event->parent->id; 1035 1036 return id; 1037 } 1038 1039 /* 1040 * Get the perf_event_context for a task and lock it. 1041 * This has to cope with with the fact that until it is locked, 1042 * the context could get moved to another task. 1043 */ 1044 static struct perf_event_context * 1045 perf_lock_task_context(struct task_struct *task, int ctxn, unsigned long *flags) 1046 { 1047 struct perf_event_context *ctx; 1048 1049 retry: 1050 /* 1051 * One of the few rules of preemptible RCU is that one cannot do 1052 * rcu_read_unlock() while holding a scheduler (or nested) lock when 1053 * part of the read side critical section was preemptible -- see 1054 * rcu_read_unlock_special(). 1055 * 1056 * Since ctx->lock nests under rq->lock we must ensure the entire read 1057 * side critical section is non-preemptible. 1058 */ 1059 preempt_disable(); 1060 rcu_read_lock(); 1061 ctx = rcu_dereference(task->perf_event_ctxp[ctxn]); 1062 if (ctx) { 1063 /* 1064 * If this context is a clone of another, it might 1065 * get swapped for another underneath us by 1066 * perf_event_task_sched_out, though the 1067 * rcu_read_lock() protects us from any context 1068 * getting freed. Lock the context and check if it 1069 * got swapped before we could get the lock, and retry 1070 * if so. If we locked the right context, then it 1071 * can't get swapped on us any more. 1072 */ 1073 raw_spin_lock_irqsave(&ctx->lock, *flags); 1074 if (ctx != rcu_dereference(task->perf_event_ctxp[ctxn])) { 1075 raw_spin_unlock_irqrestore(&ctx->lock, *flags); 1076 rcu_read_unlock(); 1077 preempt_enable(); 1078 goto retry; 1079 } 1080 1081 if (!atomic_inc_not_zero(&ctx->refcount)) { 1082 raw_spin_unlock_irqrestore(&ctx->lock, *flags); 1083 ctx = NULL; 1084 } 1085 } 1086 rcu_read_unlock(); 1087 preempt_enable(); 1088 return ctx; 1089 } 1090 1091 /* 1092 * Get the context for a task and increment its pin_count so it 1093 * can't get swapped to another task. This also increments its 1094 * reference count so that the context can't get freed. 1095 */ 1096 static struct perf_event_context * 1097 perf_pin_task_context(struct task_struct *task, int ctxn) 1098 { 1099 struct perf_event_context *ctx; 1100 unsigned long flags; 1101 1102 ctx = perf_lock_task_context(task, ctxn, &flags); 1103 if (ctx) { 1104 ++ctx->pin_count; 1105 raw_spin_unlock_irqrestore(&ctx->lock, flags); 1106 } 1107 return ctx; 1108 } 1109 1110 static void perf_unpin_context(struct perf_event_context *ctx) 1111 { 1112 unsigned long flags; 1113 1114 raw_spin_lock_irqsave(&ctx->lock, flags); 1115 --ctx->pin_count; 1116 raw_spin_unlock_irqrestore(&ctx->lock, flags); 1117 } 1118 1119 /* 1120 * Update the record of the current time in a context. 1121 */ 1122 static void update_context_time(struct perf_event_context *ctx) 1123 { 1124 u64 now = perf_clock(); 1125 1126 ctx->time += now - ctx->timestamp; 1127 ctx->timestamp = now; 1128 } 1129 1130 static u64 perf_event_time(struct perf_event *event) 1131 { 1132 struct perf_event_context *ctx = event->ctx; 1133 1134 if (is_cgroup_event(event)) 1135 return perf_cgroup_event_time(event); 1136 1137 return ctx ? ctx->time : 0; 1138 } 1139 1140 /* 1141 * Update the total_time_enabled and total_time_running fields for a event. 1142 * The caller of this function needs to hold the ctx->lock. 1143 */ 1144 static void update_event_times(struct perf_event *event) 1145 { 1146 struct perf_event_context *ctx = event->ctx; 1147 u64 run_end; 1148 1149 if (event->state < PERF_EVENT_STATE_INACTIVE || 1150 event->group_leader->state < PERF_EVENT_STATE_INACTIVE) 1151 return; 1152 /* 1153 * in cgroup mode, time_enabled represents 1154 * the time the event was enabled AND active 1155 * tasks were in the monitored cgroup. This is 1156 * independent of the activity of the context as 1157 * there may be a mix of cgroup and non-cgroup events. 1158 * 1159 * That is why we treat cgroup events differently 1160 * here. 1161 */ 1162 if (is_cgroup_event(event)) 1163 run_end = perf_cgroup_event_time(event); 1164 else if (ctx->is_active) 1165 run_end = ctx->time; 1166 else 1167 run_end = event->tstamp_stopped; 1168 1169 event->total_time_enabled = run_end - event->tstamp_enabled; 1170 1171 if (event->state == PERF_EVENT_STATE_INACTIVE) 1172 run_end = event->tstamp_stopped; 1173 else 1174 run_end = perf_event_time(event); 1175 1176 event->total_time_running = run_end - event->tstamp_running; 1177 1178 } 1179 1180 /* 1181 * Update total_time_enabled and total_time_running for all events in a group. 1182 */ 1183 static void update_group_times(struct perf_event *leader) 1184 { 1185 struct perf_event *event; 1186 1187 update_event_times(leader); 1188 list_for_each_entry(event, &leader->sibling_list, group_entry) 1189 update_event_times(event); 1190 } 1191 1192 static struct list_head * 1193 ctx_group_list(struct perf_event *event, struct perf_event_context *ctx) 1194 { 1195 if (event->attr.pinned) 1196 return &ctx->pinned_groups; 1197 else 1198 return &ctx->flexible_groups; 1199 } 1200 1201 /* 1202 * Add a event from the lists for its context. 1203 * Must be called with ctx->mutex and ctx->lock held. 1204 */ 1205 static void 1206 list_add_event(struct perf_event *event, struct perf_event_context *ctx) 1207 { 1208 WARN_ON_ONCE(event->attach_state & PERF_ATTACH_CONTEXT); 1209 event->attach_state |= PERF_ATTACH_CONTEXT; 1210 1211 /* 1212 * If we're a stand alone event or group leader, we go to the context 1213 * list, group events are kept attached to the group so that 1214 * perf_group_detach can, at all times, locate all siblings. 1215 */ 1216 if (event->group_leader == event) { 1217 struct list_head *list; 1218 1219 if (is_software_event(event)) 1220 event->group_flags |= PERF_GROUP_SOFTWARE; 1221 1222 list = ctx_group_list(event, ctx); 1223 list_add_tail(&event->group_entry, list); 1224 } 1225 1226 if (is_cgroup_event(event)) 1227 ctx->nr_cgroups++; 1228 1229 list_add_rcu(&event->event_entry, &ctx->event_list); 1230 ctx->nr_events++; 1231 if (event->attr.inherit_stat) 1232 ctx->nr_stat++; 1233 1234 ctx->generation++; 1235 } 1236 1237 /* 1238 * Initialize event state based on the perf_event_attr::disabled. 1239 */ 1240 static inline void perf_event__state_init(struct perf_event *event) 1241 { 1242 event->state = event->attr.disabled ? PERF_EVENT_STATE_OFF : 1243 PERF_EVENT_STATE_INACTIVE; 1244 } 1245 1246 /* 1247 * Called at perf_event creation and when events are attached/detached from a 1248 * group. 1249 */ 1250 static void perf_event__read_size(struct perf_event *event) 1251 { 1252 int entry = sizeof(u64); /* value */ 1253 int size = 0; 1254 int nr = 1; 1255 1256 if (event->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) 1257 size += sizeof(u64); 1258 1259 if (event->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) 1260 size += sizeof(u64); 1261 1262 if (event->attr.read_format & PERF_FORMAT_ID) 1263 entry += sizeof(u64); 1264 1265 if (event->attr.read_format & PERF_FORMAT_GROUP) { 1266 nr += event->group_leader->nr_siblings; 1267 size += sizeof(u64); 1268 } 1269 1270 size += entry * nr; 1271 event->read_size = size; 1272 } 1273 1274 static void perf_event__header_size(struct perf_event *event) 1275 { 1276 struct perf_sample_data *data; 1277 u64 sample_type = event->attr.sample_type; 1278 u16 size = 0; 1279 1280 perf_event__read_size(event); 1281 1282 if (sample_type & PERF_SAMPLE_IP) 1283 size += sizeof(data->ip); 1284 1285 if (sample_type & PERF_SAMPLE_ADDR) 1286 size += sizeof(data->addr); 1287 1288 if (sample_type & PERF_SAMPLE_PERIOD) 1289 size += sizeof(data->period); 1290 1291 if (sample_type & PERF_SAMPLE_WEIGHT) 1292 size += sizeof(data->weight); 1293 1294 if (sample_type & PERF_SAMPLE_READ) 1295 size += event->read_size; 1296 1297 if (sample_type & PERF_SAMPLE_DATA_SRC) 1298 size += sizeof(data->data_src.val); 1299 1300 if (sample_type & PERF_SAMPLE_TRANSACTION) 1301 size += sizeof(data->txn); 1302 1303 event->header_size = size; 1304 } 1305 1306 static void perf_event__id_header_size(struct perf_event *event) 1307 { 1308 struct perf_sample_data *data; 1309 u64 sample_type = event->attr.sample_type; 1310 u16 size = 0; 1311 1312 if (sample_type & PERF_SAMPLE_TID) 1313 size += sizeof(data->tid_entry); 1314 1315 if (sample_type & PERF_SAMPLE_TIME) 1316 size += sizeof(data->time); 1317 1318 if (sample_type & PERF_SAMPLE_IDENTIFIER) 1319 size += sizeof(data->id); 1320 1321 if (sample_type & PERF_SAMPLE_ID) 1322 size += sizeof(data->id); 1323 1324 if (sample_type & PERF_SAMPLE_STREAM_ID) 1325 size += sizeof(data->stream_id); 1326 1327 if (sample_type & PERF_SAMPLE_CPU) 1328 size += sizeof(data->cpu_entry); 1329 1330 event->id_header_size = size; 1331 } 1332 1333 static void perf_group_attach(struct perf_event *event) 1334 { 1335 struct perf_event *group_leader = event->group_leader, *pos; 1336 1337 /* 1338 * We can have double attach due to group movement in perf_event_open. 1339 */ 1340 if (event->attach_state & PERF_ATTACH_GROUP) 1341 return; 1342 1343 event->attach_state |= PERF_ATTACH_GROUP; 1344 1345 if (group_leader == event) 1346 return; 1347 1348 WARN_ON_ONCE(group_leader->ctx != event->ctx); 1349 1350 if (group_leader->group_flags & PERF_GROUP_SOFTWARE && 1351 !is_software_event(event)) 1352 group_leader->group_flags &= ~PERF_GROUP_SOFTWARE; 1353 1354 list_add_tail(&event->group_entry, &group_leader->sibling_list); 1355 group_leader->nr_siblings++; 1356 1357 perf_event__header_size(group_leader); 1358 1359 list_for_each_entry(pos, &group_leader->sibling_list, group_entry) 1360 perf_event__header_size(pos); 1361 } 1362 1363 /* 1364 * Remove a event from the lists for its context. 1365 * Must be called with ctx->mutex and ctx->lock held. 1366 */ 1367 static void 1368 list_del_event(struct perf_event *event, struct perf_event_context *ctx) 1369 { 1370 struct perf_cpu_context *cpuctx; 1371 1372 WARN_ON_ONCE(event->ctx != ctx); 1373 lockdep_assert_held(&ctx->lock); 1374 1375 /* 1376 * We can have double detach due to exit/hot-unplug + close. 1377 */ 1378 if (!(event->attach_state & PERF_ATTACH_CONTEXT)) 1379 return; 1380 1381 event->attach_state &= ~PERF_ATTACH_CONTEXT; 1382 1383 if (is_cgroup_event(event)) { 1384 ctx->nr_cgroups--; 1385 cpuctx = __get_cpu_context(ctx); 1386 /* 1387 * if there are no more cgroup events 1388 * then cler cgrp to avoid stale pointer 1389 * in update_cgrp_time_from_cpuctx() 1390 */ 1391 if (!ctx->nr_cgroups) 1392 cpuctx->cgrp = NULL; 1393 } 1394 1395 ctx->nr_events--; 1396 if (event->attr.inherit_stat) 1397 ctx->nr_stat--; 1398 1399 list_del_rcu(&event->event_entry); 1400 1401 if (event->group_leader == event) 1402 list_del_init(&event->group_entry); 1403 1404 update_group_times(event); 1405 1406 /* 1407 * If event was in error state, then keep it 1408 * that way, otherwise bogus counts will be 1409 * returned on read(). The only way to get out 1410 * of error state is by explicit re-enabling 1411 * of the event 1412 */ 1413 if (event->state > PERF_EVENT_STATE_OFF) 1414 event->state = PERF_EVENT_STATE_OFF; 1415 1416 ctx->generation++; 1417 } 1418 1419 static void perf_group_detach(struct perf_event *event) 1420 { 1421 struct perf_event *sibling, *tmp; 1422 struct list_head *list = NULL; 1423 1424 /* 1425 * We can have double detach due to exit/hot-unplug + close. 1426 */ 1427 if (!(event->attach_state & PERF_ATTACH_GROUP)) 1428 return; 1429 1430 event->attach_state &= ~PERF_ATTACH_GROUP; 1431 1432 /* 1433 * If this is a sibling, remove it from its group. 1434 */ 1435 if (event->group_leader != event) { 1436 list_del_init(&event->group_entry); 1437 event->group_leader->nr_siblings--; 1438 goto out; 1439 } 1440 1441 if (!list_empty(&event->group_entry)) 1442 list = &event->group_entry; 1443 1444 /* 1445 * If this was a group event with sibling events then 1446 * upgrade the siblings to singleton events by adding them 1447 * to whatever list we are on. 1448 */ 1449 list_for_each_entry_safe(sibling, tmp, &event->sibling_list, group_entry) { 1450 if (list) 1451 list_move_tail(&sibling->group_entry, list); 1452 sibling->group_leader = sibling; 1453 1454 /* Inherit group flags from the previous leader */ 1455 sibling->group_flags = event->group_flags; 1456 1457 WARN_ON_ONCE(sibling->ctx != event->ctx); 1458 } 1459 1460 out: 1461 perf_event__header_size(event->group_leader); 1462 1463 list_for_each_entry(tmp, &event->group_leader->sibling_list, group_entry) 1464 perf_event__header_size(tmp); 1465 } 1466 1467 /* 1468 * User event without the task. 1469 */ 1470 static bool is_orphaned_event(struct perf_event *event) 1471 { 1472 return event && !is_kernel_event(event) && !event->owner; 1473 } 1474 1475 /* 1476 * Event has a parent but parent's task finished and it's 1477 * alive only because of children holding refference. 1478 */ 1479 static bool is_orphaned_child(struct perf_event *event) 1480 { 1481 return is_orphaned_event(event->parent); 1482 } 1483 1484 static void orphans_remove_work(struct work_struct *work); 1485 1486 static void schedule_orphans_remove(struct perf_event_context *ctx) 1487 { 1488 if (!ctx->task || ctx->orphans_remove_sched || !perf_wq) 1489 return; 1490 1491 if (queue_delayed_work(perf_wq, &ctx->orphans_remove, 1)) { 1492 get_ctx(ctx); 1493 ctx->orphans_remove_sched = true; 1494 } 1495 } 1496 1497 static int __init perf_workqueue_init(void) 1498 { 1499 perf_wq = create_singlethread_workqueue("perf"); 1500 WARN(!perf_wq, "failed to create perf workqueue\n"); 1501 return perf_wq ? 0 : -1; 1502 } 1503 1504 core_initcall(perf_workqueue_init); 1505 1506 static inline int pmu_filter_match(struct perf_event *event) 1507 { 1508 struct pmu *pmu = event->pmu; 1509 return pmu->filter_match ? pmu->filter_match(event) : 1; 1510 } 1511 1512 static inline int 1513 event_filter_match(struct perf_event *event) 1514 { 1515 return (event->cpu == -1 || event->cpu == smp_processor_id()) 1516 && perf_cgroup_match(event) && pmu_filter_match(event); 1517 } 1518 1519 static void 1520 event_sched_out(struct perf_event *event, 1521 struct perf_cpu_context *cpuctx, 1522 struct perf_event_context *ctx) 1523 { 1524 u64 tstamp = perf_event_time(event); 1525 u64 delta; 1526 1527 WARN_ON_ONCE(event->ctx != ctx); 1528 lockdep_assert_held(&ctx->lock); 1529 1530 /* 1531 * An event which could not be activated because of 1532 * filter mismatch still needs to have its timings 1533 * maintained, otherwise bogus information is return 1534 * via read() for time_enabled, time_running: 1535 */ 1536 if (event->state == PERF_EVENT_STATE_INACTIVE 1537 && !event_filter_match(event)) { 1538 delta = tstamp - event->tstamp_stopped; 1539 event->tstamp_running += delta; 1540 event->tstamp_stopped = tstamp; 1541 } 1542 1543 if (event->state != PERF_EVENT_STATE_ACTIVE) 1544 return; 1545 1546 perf_pmu_disable(event->pmu); 1547 1548 event->state = PERF_EVENT_STATE_INACTIVE; 1549 if (event->pending_disable) { 1550 event->pending_disable = 0; 1551 event->state = PERF_EVENT_STATE_OFF; 1552 } 1553 event->tstamp_stopped = tstamp; 1554 event->pmu->del(event, 0); 1555 event->oncpu = -1; 1556 1557 if (!is_software_event(event)) 1558 cpuctx->active_oncpu--; 1559 if (!--ctx->nr_active) 1560 perf_event_ctx_deactivate(ctx); 1561 if (event->attr.freq && event->attr.sample_freq) 1562 ctx->nr_freq--; 1563 if (event->attr.exclusive || !cpuctx->active_oncpu) 1564 cpuctx->exclusive = 0; 1565 1566 if (is_orphaned_child(event)) 1567 schedule_orphans_remove(ctx); 1568 1569 perf_pmu_enable(event->pmu); 1570 } 1571 1572 static void 1573 group_sched_out(struct perf_event *group_event, 1574 struct perf_cpu_context *cpuctx, 1575 struct perf_event_context *ctx) 1576 { 1577 struct perf_event *event; 1578 int state = group_event->state; 1579 1580 event_sched_out(group_event, cpuctx, ctx); 1581 1582 /* 1583 * Schedule out siblings (if any): 1584 */ 1585 list_for_each_entry(event, &group_event->sibling_list, group_entry) 1586 event_sched_out(event, cpuctx, ctx); 1587 1588 if (state == PERF_EVENT_STATE_ACTIVE && group_event->attr.exclusive) 1589 cpuctx->exclusive = 0; 1590 } 1591 1592 struct remove_event { 1593 struct perf_event *event; 1594 bool detach_group; 1595 }; 1596 1597 /* 1598 * Cross CPU call to remove a performance event 1599 * 1600 * We disable the event on the hardware level first. After that we 1601 * remove it from the context list. 1602 */ 1603 static int __perf_remove_from_context(void *info) 1604 { 1605 struct remove_event *re = info; 1606 struct perf_event *event = re->event; 1607 struct perf_event_context *ctx = event->ctx; 1608 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx); 1609 1610 raw_spin_lock(&ctx->lock); 1611 event_sched_out(event, cpuctx, ctx); 1612 if (re->detach_group) 1613 perf_group_detach(event); 1614 list_del_event(event, ctx); 1615 if (!ctx->nr_events && cpuctx->task_ctx == ctx) { 1616 ctx->is_active = 0; 1617 cpuctx->task_ctx = NULL; 1618 } 1619 raw_spin_unlock(&ctx->lock); 1620 1621 return 0; 1622 } 1623 1624 1625 /* 1626 * Remove the event from a task's (or a CPU's) list of events. 1627 * 1628 * CPU events are removed with a smp call. For task events we only 1629 * call when the task is on a CPU. 1630 * 1631 * If event->ctx is a cloned context, callers must make sure that 1632 * every task struct that event->ctx->task could possibly point to 1633 * remains valid. This is OK when called from perf_release since 1634 * that only calls us on the top-level context, which can't be a clone. 1635 * When called from perf_event_exit_task, it's OK because the 1636 * context has been detached from its task. 1637 */ 1638 static void perf_remove_from_context(struct perf_event *event, bool detach_group) 1639 { 1640 struct perf_event_context *ctx = event->ctx; 1641 struct task_struct *task = ctx->task; 1642 struct remove_event re = { 1643 .event = event, 1644 .detach_group = detach_group, 1645 }; 1646 1647 lockdep_assert_held(&ctx->mutex); 1648 1649 if (!task) { 1650 /* 1651 * Per cpu events are removed via an smp call. The removal can 1652 * fail if the CPU is currently offline, but in that case we 1653 * already called __perf_remove_from_context from 1654 * perf_event_exit_cpu. 1655 */ 1656 cpu_function_call(event->cpu, __perf_remove_from_context, &re); 1657 return; 1658 } 1659 1660 retry: 1661 if (!task_function_call(task, __perf_remove_from_context, &re)) 1662 return; 1663 1664 raw_spin_lock_irq(&ctx->lock); 1665 /* 1666 * If we failed to find a running task, but find the context active now 1667 * that we've acquired the ctx->lock, retry. 1668 */ 1669 if (ctx->is_active) { 1670 raw_spin_unlock_irq(&ctx->lock); 1671 /* 1672 * Reload the task pointer, it might have been changed by 1673 * a concurrent perf_event_context_sched_out(). 1674 */ 1675 task = ctx->task; 1676 goto retry; 1677 } 1678 1679 /* 1680 * Since the task isn't running, its safe to remove the event, us 1681 * holding the ctx->lock ensures the task won't get scheduled in. 1682 */ 1683 if (detach_group) 1684 perf_group_detach(event); 1685 list_del_event(event, ctx); 1686 raw_spin_unlock_irq(&ctx->lock); 1687 } 1688 1689 /* 1690 * Cross CPU call to disable a performance event 1691 */ 1692 int __perf_event_disable(void *info) 1693 { 1694 struct perf_event *event = info; 1695 struct perf_event_context *ctx = event->ctx; 1696 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx); 1697 1698 /* 1699 * If this is a per-task event, need to check whether this 1700 * event's task is the current task on this cpu. 1701 * 1702 * Can trigger due to concurrent perf_event_context_sched_out() 1703 * flipping contexts around. 1704 */ 1705 if (ctx->task && cpuctx->task_ctx != ctx) 1706 return -EINVAL; 1707 1708 raw_spin_lock(&ctx->lock); 1709 1710 /* 1711 * If the event is on, turn it off. 1712 * If it is in error state, leave it in error state. 1713 */ 1714 if (event->state >= PERF_EVENT_STATE_INACTIVE) { 1715 update_context_time(ctx); 1716 update_cgrp_time_from_event(event); 1717 update_group_times(event); 1718 if (event == event->group_leader) 1719 group_sched_out(event, cpuctx, ctx); 1720 else 1721 event_sched_out(event, cpuctx, ctx); 1722 event->state = PERF_EVENT_STATE_OFF; 1723 } 1724 1725 raw_spin_unlock(&ctx->lock); 1726 1727 return 0; 1728 } 1729 1730 /* 1731 * Disable a event. 1732 * 1733 * If event->ctx is a cloned context, callers must make sure that 1734 * every task struct that event->ctx->task could possibly point to 1735 * remains valid. This condition is satisifed when called through 1736 * perf_event_for_each_child or perf_event_for_each because they 1737 * hold the top-level event's child_mutex, so any descendant that 1738 * goes to exit will block in sync_child_event. 1739 * When called from perf_pending_event it's OK because event->ctx 1740 * is the current context on this CPU and preemption is disabled, 1741 * hence we can't get into perf_event_task_sched_out for this context. 1742 */ 1743 static void _perf_event_disable(struct perf_event *event) 1744 { 1745 struct perf_event_context *ctx = event->ctx; 1746 struct task_struct *task = ctx->task; 1747 1748 if (!task) { 1749 /* 1750 * Disable the event on the cpu that it's on 1751 */ 1752 cpu_function_call(event->cpu, __perf_event_disable, event); 1753 return; 1754 } 1755 1756 retry: 1757 if (!task_function_call(task, __perf_event_disable, event)) 1758 return; 1759 1760 raw_spin_lock_irq(&ctx->lock); 1761 /* 1762 * If the event is still active, we need to retry the cross-call. 1763 */ 1764 if (event->state == PERF_EVENT_STATE_ACTIVE) { 1765 raw_spin_unlock_irq(&ctx->lock); 1766 /* 1767 * Reload the task pointer, it might have been changed by 1768 * a concurrent perf_event_context_sched_out(). 1769 */ 1770 task = ctx->task; 1771 goto retry; 1772 } 1773 1774 /* 1775 * Since we have the lock this context can't be scheduled 1776 * in, so we can change the state safely. 1777 */ 1778 if (event->state == PERF_EVENT_STATE_INACTIVE) { 1779 update_group_times(event); 1780 event->state = PERF_EVENT_STATE_OFF; 1781 } 1782 raw_spin_unlock_irq(&ctx->lock); 1783 } 1784 1785 /* 1786 * Strictly speaking kernel users cannot create groups and therefore this 1787 * interface does not need the perf_event_ctx_lock() magic. 1788 */ 1789 void perf_event_disable(struct perf_event *event) 1790 { 1791 struct perf_event_context *ctx; 1792 1793 ctx = perf_event_ctx_lock(event); 1794 _perf_event_disable(event); 1795 perf_event_ctx_unlock(event, ctx); 1796 } 1797 EXPORT_SYMBOL_GPL(perf_event_disable); 1798 1799 static void perf_set_shadow_time(struct perf_event *event, 1800 struct perf_event_context *ctx, 1801 u64 tstamp) 1802 { 1803 /* 1804 * use the correct time source for the time snapshot 1805 * 1806 * We could get by without this by leveraging the 1807 * fact that to get to this function, the caller 1808 * has most likely already called update_context_time() 1809 * and update_cgrp_time_xx() and thus both timestamp 1810 * are identical (or very close). Given that tstamp is, 1811 * already adjusted for cgroup, we could say that: 1812 * tstamp - ctx->timestamp 1813 * is equivalent to 1814 * tstamp - cgrp->timestamp. 1815 * 1816 * Then, in perf_output_read(), the calculation would 1817 * work with no changes because: 1818 * - event is guaranteed scheduled in 1819 * - no scheduled out in between 1820 * - thus the timestamp would be the same 1821 * 1822 * But this is a bit hairy. 1823 * 1824 * So instead, we have an explicit cgroup call to remain 1825 * within the time time source all along. We believe it 1826 * is cleaner and simpler to understand. 1827 */ 1828 if (is_cgroup_event(event)) 1829 perf_cgroup_set_shadow_time(event, tstamp); 1830 else 1831 event->shadow_ctx_time = tstamp - ctx->timestamp; 1832 } 1833 1834 #define MAX_INTERRUPTS (~0ULL) 1835 1836 static void perf_log_throttle(struct perf_event *event, int enable); 1837 static void perf_log_itrace_start(struct perf_event *event); 1838 1839 static int 1840 event_sched_in(struct perf_event *event, 1841 struct perf_cpu_context *cpuctx, 1842 struct perf_event_context *ctx) 1843 { 1844 u64 tstamp = perf_event_time(event); 1845 int ret = 0; 1846 1847 lockdep_assert_held(&ctx->lock); 1848 1849 if (event->state <= PERF_EVENT_STATE_OFF) 1850 return 0; 1851 1852 event->state = PERF_EVENT_STATE_ACTIVE; 1853 event->oncpu = smp_processor_id(); 1854 1855 /* 1856 * Unthrottle events, since we scheduled we might have missed several 1857 * ticks already, also for a heavily scheduling task there is little 1858 * guarantee it'll get a tick in a timely manner. 1859 */ 1860 if (unlikely(event->hw.interrupts == MAX_INTERRUPTS)) { 1861 perf_log_throttle(event, 1); 1862 event->hw.interrupts = 0; 1863 } 1864 1865 /* 1866 * The new state must be visible before we turn it on in the hardware: 1867 */ 1868 smp_wmb(); 1869 1870 perf_pmu_disable(event->pmu); 1871 1872 perf_set_shadow_time(event, ctx, tstamp); 1873 1874 perf_log_itrace_start(event); 1875 1876 if (event->pmu->add(event, PERF_EF_START)) { 1877 event->state = PERF_EVENT_STATE_INACTIVE; 1878 event->oncpu = -1; 1879 ret = -EAGAIN; 1880 goto out; 1881 } 1882 1883 event->tstamp_running += tstamp - event->tstamp_stopped; 1884 1885 if (!is_software_event(event)) 1886 cpuctx->active_oncpu++; 1887 if (!ctx->nr_active++) 1888 perf_event_ctx_activate(ctx); 1889 if (event->attr.freq && event->attr.sample_freq) 1890 ctx->nr_freq++; 1891 1892 if (event->attr.exclusive) 1893 cpuctx->exclusive = 1; 1894 1895 if (is_orphaned_child(event)) 1896 schedule_orphans_remove(ctx); 1897 1898 out: 1899 perf_pmu_enable(event->pmu); 1900 1901 return ret; 1902 } 1903 1904 static int 1905 group_sched_in(struct perf_event *group_event, 1906 struct perf_cpu_context *cpuctx, 1907 struct perf_event_context *ctx) 1908 { 1909 struct perf_event *event, *partial_group = NULL; 1910 struct pmu *pmu = ctx->pmu; 1911 u64 now = ctx->time; 1912 bool simulate = false; 1913 1914 if (group_event->state == PERF_EVENT_STATE_OFF) 1915 return 0; 1916 1917 pmu->start_txn(pmu); 1918 1919 if (event_sched_in(group_event, cpuctx, ctx)) { 1920 pmu->cancel_txn(pmu); 1921 perf_mux_hrtimer_restart(cpuctx); 1922 return -EAGAIN; 1923 } 1924 1925 /* 1926 * Schedule in siblings as one group (if any): 1927 */ 1928 list_for_each_entry(event, &group_event->sibling_list, group_entry) { 1929 if (event_sched_in(event, cpuctx, ctx)) { 1930 partial_group = event; 1931 goto group_error; 1932 } 1933 } 1934 1935 if (!pmu->commit_txn(pmu)) 1936 return 0; 1937 1938 group_error: 1939 /* 1940 * Groups can be scheduled in as one unit only, so undo any 1941 * partial group before returning: 1942 * The events up to the failed event are scheduled out normally, 1943 * tstamp_stopped will be updated. 1944 * 1945 * The failed events and the remaining siblings need to have 1946 * their timings updated as if they had gone thru event_sched_in() 1947 * and event_sched_out(). This is required to get consistent timings 1948 * across the group. This also takes care of the case where the group 1949 * could never be scheduled by ensuring tstamp_stopped is set to mark 1950 * the time the event was actually stopped, such that time delta 1951 * calculation in update_event_times() is correct. 1952 */ 1953 list_for_each_entry(event, &group_event->sibling_list, group_entry) { 1954 if (event == partial_group) 1955 simulate = true; 1956 1957 if (simulate) { 1958 event->tstamp_running += now - event->tstamp_stopped; 1959 event->tstamp_stopped = now; 1960 } else { 1961 event_sched_out(event, cpuctx, ctx); 1962 } 1963 } 1964 event_sched_out(group_event, cpuctx, ctx); 1965 1966 pmu->cancel_txn(pmu); 1967 1968 perf_mux_hrtimer_restart(cpuctx); 1969 1970 return -EAGAIN; 1971 } 1972 1973 /* 1974 * Work out whether we can put this event group on the CPU now. 1975 */ 1976 static int group_can_go_on(struct perf_event *event, 1977 struct perf_cpu_context *cpuctx, 1978 int can_add_hw) 1979 { 1980 /* 1981 * Groups consisting entirely of software events can always go on. 1982 */ 1983 if (event->group_flags & PERF_GROUP_SOFTWARE) 1984 return 1; 1985 /* 1986 * If an exclusive group is already on, no other hardware 1987 * events can go on. 1988 */ 1989 if (cpuctx->exclusive) 1990 return 0; 1991 /* 1992 * If this group is exclusive and there are already 1993 * events on the CPU, it can't go on. 1994 */ 1995 if (event->attr.exclusive && cpuctx->active_oncpu) 1996 return 0; 1997 /* 1998 * Otherwise, try to add it if all previous groups were able 1999 * to go on. 2000 */ 2001 return can_add_hw; 2002 } 2003 2004 static void add_event_to_ctx(struct perf_event *event, 2005 struct perf_event_context *ctx) 2006 { 2007 u64 tstamp = perf_event_time(event); 2008 2009 list_add_event(event, ctx); 2010 perf_group_attach(event); 2011 event->tstamp_enabled = tstamp; 2012 event->tstamp_running = tstamp; 2013 event->tstamp_stopped = tstamp; 2014 } 2015 2016 static void task_ctx_sched_out(struct perf_event_context *ctx); 2017 static void 2018 ctx_sched_in(struct perf_event_context *ctx, 2019 struct perf_cpu_context *cpuctx, 2020 enum event_type_t event_type, 2021 struct task_struct *task); 2022 2023 static void perf_event_sched_in(struct perf_cpu_context *cpuctx, 2024 struct perf_event_context *ctx, 2025 struct task_struct *task) 2026 { 2027 cpu_ctx_sched_in(cpuctx, EVENT_PINNED, task); 2028 if (ctx) 2029 ctx_sched_in(ctx, cpuctx, EVENT_PINNED, task); 2030 cpu_ctx_sched_in(cpuctx, EVENT_FLEXIBLE, task); 2031 if (ctx) 2032 ctx_sched_in(ctx, cpuctx, EVENT_FLEXIBLE, task); 2033 } 2034 2035 /* 2036 * Cross CPU call to install and enable a performance event 2037 * 2038 * Must be called with ctx->mutex held 2039 */ 2040 static int __perf_install_in_context(void *info) 2041 { 2042 struct perf_event *event = info; 2043 struct perf_event_context *ctx = event->ctx; 2044 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx); 2045 struct perf_event_context *task_ctx = cpuctx->task_ctx; 2046 struct task_struct *task = current; 2047 2048 perf_ctx_lock(cpuctx, task_ctx); 2049 perf_pmu_disable(cpuctx->ctx.pmu); 2050 2051 /* 2052 * If there was an active task_ctx schedule it out. 2053 */ 2054 if (task_ctx) 2055 task_ctx_sched_out(task_ctx); 2056 2057 /* 2058 * If the context we're installing events in is not the 2059 * active task_ctx, flip them. 2060 */ 2061 if (ctx->task && task_ctx != ctx) { 2062 if (task_ctx) 2063 raw_spin_unlock(&task_ctx->lock); 2064 raw_spin_lock(&ctx->lock); 2065 task_ctx = ctx; 2066 } 2067 2068 if (task_ctx) { 2069 cpuctx->task_ctx = task_ctx; 2070 task = task_ctx->task; 2071 } 2072 2073 cpu_ctx_sched_out(cpuctx, EVENT_ALL); 2074 2075 update_context_time(ctx); 2076 /* 2077 * update cgrp time only if current cgrp 2078 * matches event->cgrp. Must be done before 2079 * calling add_event_to_ctx() 2080 */ 2081 update_cgrp_time_from_event(event); 2082 2083 add_event_to_ctx(event, ctx); 2084 2085 /* 2086 * Schedule everything back in 2087 */ 2088 perf_event_sched_in(cpuctx, task_ctx, task); 2089 2090 perf_pmu_enable(cpuctx->ctx.pmu); 2091 perf_ctx_unlock(cpuctx, task_ctx); 2092 2093 return 0; 2094 } 2095 2096 /* 2097 * Attach a performance event to a context 2098 * 2099 * First we add the event to the list with the hardware enable bit 2100 * in event->hw_config cleared. 2101 * 2102 * If the event is attached to a task which is on a CPU we use a smp 2103 * call to enable it in the task context. The task might have been 2104 * scheduled away, but we check this in the smp call again. 2105 */ 2106 static void 2107 perf_install_in_context(struct perf_event_context *ctx, 2108 struct perf_event *event, 2109 int cpu) 2110 { 2111 struct task_struct *task = ctx->task; 2112 2113 lockdep_assert_held(&ctx->mutex); 2114 2115 event->ctx = ctx; 2116 if (event->cpu != -1) 2117 event->cpu = cpu; 2118 2119 if (!task) { 2120 /* 2121 * Per cpu events are installed via an smp call and 2122 * the install is always successful. 2123 */ 2124 cpu_function_call(cpu, __perf_install_in_context, event); 2125 return; 2126 } 2127 2128 retry: 2129 if (!task_function_call(task, __perf_install_in_context, event)) 2130 return; 2131 2132 raw_spin_lock_irq(&ctx->lock); 2133 /* 2134 * If we failed to find a running task, but find the context active now 2135 * that we've acquired the ctx->lock, retry. 2136 */ 2137 if (ctx->is_active) { 2138 raw_spin_unlock_irq(&ctx->lock); 2139 /* 2140 * Reload the task pointer, it might have been changed by 2141 * a concurrent perf_event_context_sched_out(). 2142 */ 2143 task = ctx->task; 2144 goto retry; 2145 } 2146 2147 /* 2148 * Since the task isn't running, its safe to add the event, us holding 2149 * the ctx->lock ensures the task won't get scheduled in. 2150 */ 2151 add_event_to_ctx(event, ctx); 2152 raw_spin_unlock_irq(&ctx->lock); 2153 } 2154 2155 /* 2156 * Put a event into inactive state and update time fields. 2157 * Enabling the leader of a group effectively enables all 2158 * the group members that aren't explicitly disabled, so we 2159 * have to update their ->tstamp_enabled also. 2160 * Note: this works for group members as well as group leaders 2161 * since the non-leader members' sibling_lists will be empty. 2162 */ 2163 static void __perf_event_mark_enabled(struct perf_event *event) 2164 { 2165 struct perf_event *sub; 2166 u64 tstamp = perf_event_time(event); 2167 2168 event->state = PERF_EVENT_STATE_INACTIVE; 2169 event->tstamp_enabled = tstamp - event->total_time_enabled; 2170 list_for_each_entry(sub, &event->sibling_list, group_entry) { 2171 if (sub->state >= PERF_EVENT_STATE_INACTIVE) 2172 sub->tstamp_enabled = tstamp - sub->total_time_enabled; 2173 } 2174 } 2175 2176 /* 2177 * Cross CPU call to enable a performance event 2178 */ 2179 static int __perf_event_enable(void *info) 2180 { 2181 struct perf_event *event = info; 2182 struct perf_event_context *ctx = event->ctx; 2183 struct perf_event *leader = event->group_leader; 2184 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx); 2185 int err; 2186 2187 /* 2188 * There's a time window between 'ctx->is_active' check 2189 * in perf_event_enable function and this place having: 2190 * - IRQs on 2191 * - ctx->lock unlocked 2192 * 2193 * where the task could be killed and 'ctx' deactivated 2194 * by perf_event_exit_task. 2195 */ 2196 if (!ctx->is_active) 2197 return -EINVAL; 2198 2199 raw_spin_lock(&ctx->lock); 2200 update_context_time(ctx); 2201 2202 if (event->state >= PERF_EVENT_STATE_INACTIVE) 2203 goto unlock; 2204 2205 /* 2206 * set current task's cgroup time reference point 2207 */ 2208 perf_cgroup_set_timestamp(current, ctx); 2209 2210 __perf_event_mark_enabled(event); 2211 2212 if (!event_filter_match(event)) { 2213 if (is_cgroup_event(event)) 2214 perf_cgroup_defer_enabled(event); 2215 goto unlock; 2216 } 2217 2218 /* 2219 * If the event is in a group and isn't the group leader, 2220 * then don't put it on unless the group is on. 2221 */ 2222 if (leader != event && leader->state != PERF_EVENT_STATE_ACTIVE) 2223 goto unlock; 2224 2225 if (!group_can_go_on(event, cpuctx, 1)) { 2226 err = -EEXIST; 2227 } else { 2228 if (event == leader) 2229 err = group_sched_in(event, cpuctx, ctx); 2230 else 2231 err = event_sched_in(event, cpuctx, ctx); 2232 } 2233 2234 if (err) { 2235 /* 2236 * If this event can't go on and it's part of a 2237 * group, then the whole group has to come off. 2238 */ 2239 if (leader != event) { 2240 group_sched_out(leader, cpuctx, ctx); 2241 perf_mux_hrtimer_restart(cpuctx); 2242 } 2243 if (leader->attr.pinned) { 2244 update_group_times(leader); 2245 leader->state = PERF_EVENT_STATE_ERROR; 2246 } 2247 } 2248 2249 unlock: 2250 raw_spin_unlock(&ctx->lock); 2251 2252 return 0; 2253 } 2254 2255 /* 2256 * Enable a event. 2257 * 2258 * If event->ctx is a cloned context, callers must make sure that 2259 * every task struct that event->ctx->task could possibly point to 2260 * remains valid. This condition is satisfied when called through 2261 * perf_event_for_each_child or perf_event_for_each as described 2262 * for perf_event_disable. 2263 */ 2264 static void _perf_event_enable(struct perf_event *event) 2265 { 2266 struct perf_event_context *ctx = event->ctx; 2267 struct task_struct *task = ctx->task; 2268 2269 if (!task) { 2270 /* 2271 * Enable the event on the cpu that it's on 2272 */ 2273 cpu_function_call(event->cpu, __perf_event_enable, event); 2274 return; 2275 } 2276 2277 raw_spin_lock_irq(&ctx->lock); 2278 if (event->state >= PERF_EVENT_STATE_INACTIVE) 2279 goto out; 2280 2281 /* 2282 * If the event is in error state, clear that first. 2283 * That way, if we see the event in error state below, we 2284 * know that it has gone back into error state, as distinct 2285 * from the task having been scheduled away before the 2286 * cross-call arrived. 2287 */ 2288 if (event->state == PERF_EVENT_STATE_ERROR) 2289 event->state = PERF_EVENT_STATE_OFF; 2290 2291 retry: 2292 if (!ctx->is_active) { 2293 __perf_event_mark_enabled(event); 2294 goto out; 2295 } 2296 2297 raw_spin_unlock_irq(&ctx->lock); 2298 2299 if (!task_function_call(task, __perf_event_enable, event)) 2300 return; 2301 2302 raw_spin_lock_irq(&ctx->lock); 2303 2304 /* 2305 * If the context is active and the event is still off, 2306 * we need to retry the cross-call. 2307 */ 2308 if (ctx->is_active && event->state == PERF_EVENT_STATE_OFF) { 2309 /* 2310 * task could have been flipped by a concurrent 2311 * perf_event_context_sched_out() 2312 */ 2313 task = ctx->task; 2314 goto retry; 2315 } 2316 2317 out: 2318 raw_spin_unlock_irq(&ctx->lock); 2319 } 2320 2321 /* 2322 * See perf_event_disable(); 2323 */ 2324 void perf_event_enable(struct perf_event *event) 2325 { 2326 struct perf_event_context *ctx; 2327 2328 ctx = perf_event_ctx_lock(event); 2329 _perf_event_enable(event); 2330 perf_event_ctx_unlock(event, ctx); 2331 } 2332 EXPORT_SYMBOL_GPL(perf_event_enable); 2333 2334 static int _perf_event_refresh(struct perf_event *event, int refresh) 2335 { 2336 /* 2337 * not supported on inherited events 2338 */ 2339 if (event->attr.inherit || !is_sampling_event(event)) 2340 return -EINVAL; 2341 2342 atomic_add(refresh, &event->event_limit); 2343 _perf_event_enable(event); 2344 2345 return 0; 2346 } 2347 2348 /* 2349 * See perf_event_disable() 2350 */ 2351 int perf_event_refresh(struct perf_event *event, int refresh) 2352 { 2353 struct perf_event_context *ctx; 2354 int ret; 2355 2356 ctx = perf_event_ctx_lock(event); 2357 ret = _perf_event_refresh(event, refresh); 2358 perf_event_ctx_unlock(event, ctx); 2359 2360 return ret; 2361 } 2362 EXPORT_SYMBOL_GPL(perf_event_refresh); 2363 2364 static void ctx_sched_out(struct perf_event_context *ctx, 2365 struct perf_cpu_context *cpuctx, 2366 enum event_type_t event_type) 2367 { 2368 struct perf_event *event; 2369 int is_active = ctx->is_active; 2370 2371 ctx->is_active &= ~event_type; 2372 if (likely(!ctx->nr_events)) 2373 return; 2374 2375 update_context_time(ctx); 2376 update_cgrp_time_from_cpuctx(cpuctx); 2377 if (!ctx->nr_active) 2378 return; 2379 2380 perf_pmu_disable(ctx->pmu); 2381 if ((is_active & EVENT_PINNED) && (event_type & EVENT_PINNED)) { 2382 list_for_each_entry(event, &ctx->pinned_groups, group_entry) 2383 group_sched_out(event, cpuctx, ctx); 2384 } 2385 2386 if ((is_active & EVENT_FLEXIBLE) && (event_type & EVENT_FLEXIBLE)) { 2387 list_for_each_entry(event, &ctx->flexible_groups, group_entry) 2388 group_sched_out(event, cpuctx, ctx); 2389 } 2390 perf_pmu_enable(ctx->pmu); 2391 } 2392 2393 /* 2394 * Test whether two contexts are equivalent, i.e. whether they have both been 2395 * cloned from the same version of the same context. 2396 * 2397 * Equivalence is measured using a generation number in the context that is 2398 * incremented on each modification to it; see unclone_ctx(), list_add_event() 2399 * and list_del_event(). 2400 */ 2401 static int context_equiv(struct perf_event_context *ctx1, 2402 struct perf_event_context *ctx2) 2403 { 2404 lockdep_assert_held(&ctx1->lock); 2405 lockdep_assert_held(&ctx2->lock); 2406 2407 /* Pinning disables the swap optimization */ 2408 if (ctx1->pin_count || ctx2->pin_count) 2409 return 0; 2410 2411 /* If ctx1 is the parent of ctx2 */ 2412 if (ctx1 == ctx2->parent_ctx && ctx1->generation == ctx2->parent_gen) 2413 return 1; 2414 2415 /* If ctx2 is the parent of ctx1 */ 2416 if (ctx1->parent_ctx == ctx2 && ctx1->parent_gen == ctx2->generation) 2417 return 1; 2418 2419 /* 2420 * If ctx1 and ctx2 have the same parent; we flatten the parent 2421 * hierarchy, see perf_event_init_context(). 2422 */ 2423 if (ctx1->parent_ctx && ctx1->parent_ctx == ctx2->parent_ctx && 2424 ctx1->parent_gen == ctx2->parent_gen) 2425 return 1; 2426 2427 /* Unmatched */ 2428 return 0; 2429 } 2430 2431 static void __perf_event_sync_stat(struct perf_event *event, 2432 struct perf_event *next_event) 2433 { 2434 u64 value; 2435 2436 if (!event->attr.inherit_stat) 2437 return; 2438 2439 /* 2440 * Update the event value, we cannot use perf_event_read() 2441 * because we're in the middle of a context switch and have IRQs 2442 * disabled, which upsets smp_call_function_single(), however 2443 * we know the event must be on the current CPU, therefore we 2444 * don't need to use it. 2445 */ 2446 switch (event->state) { 2447 case PERF_EVENT_STATE_ACTIVE: 2448 event->pmu->read(event); 2449 /* fall-through */ 2450 2451 case PERF_EVENT_STATE_INACTIVE: 2452 update_event_times(event); 2453 break; 2454 2455 default: 2456 break; 2457 } 2458 2459 /* 2460 * In order to keep per-task stats reliable we need to flip the event 2461 * values when we flip the contexts. 2462 */ 2463 value = local64_read(&next_event->count); 2464 value = local64_xchg(&event->count, value); 2465 local64_set(&next_event->count, value); 2466 2467 swap(event->total_time_enabled, next_event->total_time_enabled); 2468 swap(event->total_time_running, next_event->total_time_running); 2469 2470 /* 2471 * Since we swizzled the values, update the user visible data too. 2472 */ 2473 perf_event_update_userpage(event); 2474 perf_event_update_userpage(next_event); 2475 } 2476 2477 static void perf_event_sync_stat(struct perf_event_context *ctx, 2478 struct perf_event_context *next_ctx) 2479 { 2480 struct perf_event *event, *next_event; 2481 2482 if (!ctx->nr_stat) 2483 return; 2484 2485 update_context_time(ctx); 2486 2487 event = list_first_entry(&ctx->event_list, 2488 struct perf_event, event_entry); 2489 2490 next_event = list_first_entry(&next_ctx->event_list, 2491 struct perf_event, event_entry); 2492 2493 while (&event->event_entry != &ctx->event_list && 2494 &next_event->event_entry != &next_ctx->event_list) { 2495 2496 __perf_event_sync_stat(event, next_event); 2497 2498 event = list_next_entry(event, event_entry); 2499 next_event = list_next_entry(next_event, event_entry); 2500 } 2501 } 2502 2503 static void perf_event_context_sched_out(struct task_struct *task, int ctxn, 2504 struct task_struct *next) 2505 { 2506 struct perf_event_context *ctx = task->perf_event_ctxp[ctxn]; 2507 struct perf_event_context *next_ctx; 2508 struct perf_event_context *parent, *next_parent; 2509 struct perf_cpu_context *cpuctx; 2510 int do_switch = 1; 2511 2512 if (likely(!ctx)) 2513 return; 2514 2515 cpuctx = __get_cpu_context(ctx); 2516 if (!cpuctx->task_ctx) 2517 return; 2518 2519 rcu_read_lock(); 2520 next_ctx = next->perf_event_ctxp[ctxn]; 2521 if (!next_ctx) 2522 goto unlock; 2523 2524 parent = rcu_dereference(ctx->parent_ctx); 2525 next_parent = rcu_dereference(next_ctx->parent_ctx); 2526 2527 /* If neither context have a parent context; they cannot be clones. */ 2528 if (!parent && !next_parent) 2529 goto unlock; 2530 2531 if (next_parent == ctx || next_ctx == parent || next_parent == parent) { 2532 /* 2533 * Looks like the two contexts are clones, so we might be 2534 * able to optimize the context switch. We lock both 2535 * contexts and check that they are clones under the 2536 * lock (including re-checking that neither has been 2537 * uncloned in the meantime). It doesn't matter which 2538 * order we take the locks because no other cpu could 2539 * be trying to lock both of these tasks. 2540 */ 2541 raw_spin_lock(&ctx->lock); 2542 raw_spin_lock_nested(&next_ctx->lock, SINGLE_DEPTH_NESTING); 2543 if (context_equiv(ctx, next_ctx)) { 2544 /* 2545 * XXX do we need a memory barrier of sorts 2546 * wrt to rcu_dereference() of perf_event_ctxp 2547 */ 2548 task->perf_event_ctxp[ctxn] = next_ctx; 2549 next->perf_event_ctxp[ctxn] = ctx; 2550 ctx->task = next; 2551 next_ctx->task = task; 2552 2553 swap(ctx->task_ctx_data, next_ctx->task_ctx_data); 2554 2555 do_switch = 0; 2556 2557 perf_event_sync_stat(ctx, next_ctx); 2558 } 2559 raw_spin_unlock(&next_ctx->lock); 2560 raw_spin_unlock(&ctx->lock); 2561 } 2562 unlock: 2563 rcu_read_unlock(); 2564 2565 if (do_switch) { 2566 raw_spin_lock(&ctx->lock); 2567 ctx_sched_out(ctx, cpuctx, EVENT_ALL); 2568 cpuctx->task_ctx = NULL; 2569 raw_spin_unlock(&ctx->lock); 2570 } 2571 } 2572 2573 void perf_sched_cb_dec(struct pmu *pmu) 2574 { 2575 this_cpu_dec(perf_sched_cb_usages); 2576 } 2577 2578 void perf_sched_cb_inc(struct pmu *pmu) 2579 { 2580 this_cpu_inc(perf_sched_cb_usages); 2581 } 2582 2583 /* 2584 * This function provides the context switch callback to the lower code 2585 * layer. It is invoked ONLY when the context switch callback is enabled. 2586 */ 2587 static void perf_pmu_sched_task(struct task_struct *prev, 2588 struct task_struct *next, 2589 bool sched_in) 2590 { 2591 struct perf_cpu_context *cpuctx; 2592 struct pmu *pmu; 2593 unsigned long flags; 2594 2595 if (prev == next) 2596 return; 2597 2598 local_irq_save(flags); 2599 2600 rcu_read_lock(); 2601 2602 list_for_each_entry_rcu(pmu, &pmus, entry) { 2603 if (pmu->sched_task) { 2604 cpuctx = this_cpu_ptr(pmu->pmu_cpu_context); 2605 2606 perf_ctx_lock(cpuctx, cpuctx->task_ctx); 2607 2608 perf_pmu_disable(pmu); 2609 2610 pmu->sched_task(cpuctx->task_ctx, sched_in); 2611 2612 perf_pmu_enable(pmu); 2613 2614 perf_ctx_unlock(cpuctx, cpuctx->task_ctx); 2615 } 2616 } 2617 2618 rcu_read_unlock(); 2619 2620 local_irq_restore(flags); 2621 } 2622 2623 static void perf_event_switch(struct task_struct *task, 2624 struct task_struct *next_prev, bool sched_in); 2625 2626 #define for_each_task_context_nr(ctxn) \ 2627 for ((ctxn) = 0; (ctxn) < perf_nr_task_contexts; (ctxn)++) 2628 2629 /* 2630 * Called from scheduler to remove the events of the current task, 2631 * with interrupts disabled. 2632 * 2633 * We stop each event and update the event value in event->count. 2634 * 2635 * This does not protect us against NMI, but disable() 2636 * sets the disabled bit in the control field of event _before_ 2637 * accessing the event control register. If a NMI hits, then it will 2638 * not restart the event. 2639 */ 2640 void __perf_event_task_sched_out(struct task_struct *task, 2641 struct task_struct *next) 2642 { 2643 int ctxn; 2644 2645 if (__this_cpu_read(perf_sched_cb_usages)) 2646 perf_pmu_sched_task(task, next, false); 2647 2648 if (atomic_read(&nr_switch_events)) 2649 perf_event_switch(task, next, false); 2650 2651 for_each_task_context_nr(ctxn) 2652 perf_event_context_sched_out(task, ctxn, next); 2653 2654 /* 2655 * if cgroup events exist on this CPU, then we need 2656 * to check if we have to switch out PMU state. 2657 * cgroup event are system-wide mode only 2658 */ 2659 if (atomic_read(this_cpu_ptr(&perf_cgroup_events))) 2660 perf_cgroup_sched_out(task, next); 2661 } 2662 2663 static void task_ctx_sched_out(struct perf_event_context *ctx) 2664 { 2665 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx); 2666 2667 if (!cpuctx->task_ctx) 2668 return; 2669 2670 if (WARN_ON_ONCE(ctx != cpuctx->task_ctx)) 2671 return; 2672 2673 ctx_sched_out(ctx, cpuctx, EVENT_ALL); 2674 cpuctx->task_ctx = NULL; 2675 } 2676 2677 /* 2678 * Called with IRQs disabled 2679 */ 2680 static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx, 2681 enum event_type_t event_type) 2682 { 2683 ctx_sched_out(&cpuctx->ctx, cpuctx, event_type); 2684 } 2685 2686 static void 2687 ctx_pinned_sched_in(struct perf_event_context *ctx, 2688 struct perf_cpu_context *cpuctx) 2689 { 2690 struct perf_event *event; 2691 2692 list_for_each_entry(event, &ctx->pinned_groups, group_entry) { 2693 if (event->state <= PERF_EVENT_STATE_OFF) 2694 continue; 2695 if (!event_filter_match(event)) 2696 continue; 2697 2698 /* may need to reset tstamp_enabled */ 2699 if (is_cgroup_event(event)) 2700 perf_cgroup_mark_enabled(event, ctx); 2701 2702 if (group_can_go_on(event, cpuctx, 1)) 2703 group_sched_in(event, cpuctx, ctx); 2704 2705 /* 2706 * If this pinned group hasn't been scheduled, 2707 * put it in error state. 2708 */ 2709 if (event->state == PERF_EVENT_STATE_INACTIVE) { 2710 update_group_times(event); 2711 event->state = PERF_EVENT_STATE_ERROR; 2712 } 2713 } 2714 } 2715 2716 static void 2717 ctx_flexible_sched_in(struct perf_event_context *ctx, 2718 struct perf_cpu_context *cpuctx) 2719 { 2720 struct perf_event *event; 2721 int can_add_hw = 1; 2722 2723 list_for_each_entry(event, &ctx->flexible_groups, group_entry) { 2724 /* Ignore events in OFF or ERROR state */ 2725 if (event->state <= PERF_EVENT_STATE_OFF) 2726 continue; 2727 /* 2728 * Listen to the 'cpu' scheduling filter constraint 2729 * of events: 2730 */ 2731 if (!event_filter_match(event)) 2732 continue; 2733 2734 /* may need to reset tstamp_enabled */ 2735 if (is_cgroup_event(event)) 2736 perf_cgroup_mark_enabled(event, ctx); 2737 2738 if (group_can_go_on(event, cpuctx, can_add_hw)) { 2739 if (group_sched_in(event, cpuctx, ctx)) 2740 can_add_hw = 0; 2741 } 2742 } 2743 } 2744 2745 static void 2746 ctx_sched_in(struct perf_event_context *ctx, 2747 struct perf_cpu_context *cpuctx, 2748 enum event_type_t event_type, 2749 struct task_struct *task) 2750 { 2751 u64 now; 2752 int is_active = ctx->is_active; 2753 2754 ctx->is_active |= event_type; 2755 if (likely(!ctx->nr_events)) 2756 return; 2757 2758 now = perf_clock(); 2759 ctx->timestamp = now; 2760 perf_cgroup_set_timestamp(task, ctx); 2761 /* 2762 * First go through the list and put on any pinned groups 2763 * in order to give them the best chance of going on. 2764 */ 2765 if (!(is_active & EVENT_PINNED) && (event_type & EVENT_PINNED)) 2766 ctx_pinned_sched_in(ctx, cpuctx); 2767 2768 /* Then walk through the lower prio flexible groups */ 2769 if (!(is_active & EVENT_FLEXIBLE) && (event_type & EVENT_FLEXIBLE)) 2770 ctx_flexible_sched_in(ctx, cpuctx); 2771 } 2772 2773 static void cpu_ctx_sched_in(struct perf_cpu_context *cpuctx, 2774 enum event_type_t event_type, 2775 struct task_struct *task) 2776 { 2777 struct perf_event_context *ctx = &cpuctx->ctx; 2778 2779 ctx_sched_in(ctx, cpuctx, event_type, task); 2780 } 2781 2782 static void perf_event_context_sched_in(struct perf_event_context *ctx, 2783 struct task_struct *task) 2784 { 2785 struct perf_cpu_context *cpuctx; 2786 2787 cpuctx = __get_cpu_context(ctx); 2788 if (cpuctx->task_ctx == ctx) 2789 return; 2790 2791 perf_ctx_lock(cpuctx, ctx); 2792 perf_pmu_disable(ctx->pmu); 2793 /* 2794 * We want to keep the following priority order: 2795 * cpu pinned (that don't need to move), task pinned, 2796 * cpu flexible, task flexible. 2797 */ 2798 cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE); 2799 2800 if (ctx->nr_events) 2801 cpuctx->task_ctx = ctx; 2802 2803 perf_event_sched_in(cpuctx, cpuctx->task_ctx, task); 2804 2805 perf_pmu_enable(ctx->pmu); 2806 perf_ctx_unlock(cpuctx, ctx); 2807 } 2808 2809 /* 2810 * Called from scheduler to add the events of the current task 2811 * with interrupts disabled. 2812 * 2813 * We restore the event value and then enable it. 2814 * 2815 * This does not protect us against NMI, but enable() 2816 * sets the enabled bit in the control field of event _before_ 2817 * accessing the event control register. If a NMI hits, then it will 2818 * keep the event running. 2819 */ 2820 void __perf_event_task_sched_in(struct task_struct *prev, 2821 struct task_struct *task) 2822 { 2823 struct perf_event_context *ctx; 2824 int ctxn; 2825 2826 for_each_task_context_nr(ctxn) { 2827 ctx = task->perf_event_ctxp[ctxn]; 2828 if (likely(!ctx)) 2829 continue; 2830 2831 perf_event_context_sched_in(ctx, task); 2832 } 2833 /* 2834 * if cgroup events exist on this CPU, then we need 2835 * to check if we have to switch in PMU state. 2836 * cgroup event are system-wide mode only 2837 */ 2838 if (atomic_read(this_cpu_ptr(&perf_cgroup_events))) 2839 perf_cgroup_sched_in(prev, task); 2840 2841 if (atomic_read(&nr_switch_events)) 2842 perf_event_switch(task, prev, true); 2843 2844 if (__this_cpu_read(perf_sched_cb_usages)) 2845 perf_pmu_sched_task(prev, task, true); 2846 } 2847 2848 static u64 perf_calculate_period(struct perf_event *event, u64 nsec, u64 count) 2849 { 2850 u64 frequency = event->attr.sample_freq; 2851 u64 sec = NSEC_PER_SEC; 2852 u64 divisor, dividend; 2853 2854 int count_fls, nsec_fls, frequency_fls, sec_fls; 2855 2856 count_fls = fls64(count); 2857 nsec_fls = fls64(nsec); 2858 frequency_fls = fls64(frequency); 2859 sec_fls = 30; 2860 2861 /* 2862 * We got @count in @nsec, with a target of sample_freq HZ 2863 * the target period becomes: 2864 * 2865 * @count * 10^9 2866 * period = ------------------- 2867 * @nsec * sample_freq 2868 * 2869 */ 2870 2871 /* 2872 * Reduce accuracy by one bit such that @a and @b converge 2873 * to a similar magnitude. 2874 */ 2875 #define REDUCE_FLS(a, b) \ 2876 do { \ 2877 if (a##_fls > b##_fls) { \ 2878 a >>= 1; \ 2879 a##_fls--; \ 2880 } else { \ 2881 b >>= 1; \ 2882 b##_fls--; \ 2883 } \ 2884 } while (0) 2885 2886 /* 2887 * Reduce accuracy until either term fits in a u64, then proceed with 2888 * the other, so that finally we can do a u64/u64 division. 2889 */ 2890 while (count_fls + sec_fls > 64 && nsec_fls + frequency_fls > 64) { 2891 REDUCE_FLS(nsec, frequency); 2892 REDUCE_FLS(sec, count); 2893 } 2894 2895 if (count_fls + sec_fls > 64) { 2896 divisor = nsec * frequency; 2897 2898 while (count_fls + sec_fls > 64) { 2899 REDUCE_FLS(count, sec); 2900 divisor >>= 1; 2901 } 2902 2903 dividend = count * sec; 2904 } else { 2905 dividend = count * sec; 2906 2907 while (nsec_fls + frequency_fls > 64) { 2908 REDUCE_FLS(nsec, frequency); 2909 dividend >>= 1; 2910 } 2911 2912 divisor = nsec * frequency; 2913 } 2914 2915 if (!divisor) 2916 return dividend; 2917 2918 return div64_u64(dividend, divisor); 2919 } 2920 2921 static DEFINE_PER_CPU(int, perf_throttled_count); 2922 static DEFINE_PER_CPU(u64, perf_throttled_seq); 2923 2924 static void perf_adjust_period(struct perf_event *event, u64 nsec, u64 count, bool disable) 2925 { 2926 struct hw_perf_event *hwc = &event->hw; 2927 s64 period, sample_period; 2928 s64 delta; 2929 2930 period = perf_calculate_period(event, nsec, count); 2931 2932 delta = (s64)(period - hwc->sample_period); 2933 delta = (delta + 7) / 8; /* low pass filter */ 2934 2935 sample_period = hwc->sample_period + delta; 2936 2937 if (!sample_period) 2938 sample_period = 1; 2939 2940 hwc->sample_period = sample_period; 2941 2942 if (local64_read(&hwc->period_left) > 8*sample_period) { 2943 if (disable) 2944 event->pmu->stop(event, PERF_EF_UPDATE); 2945 2946 local64_set(&hwc->period_left, 0); 2947 2948 if (disable) 2949 event->pmu->start(event, PERF_EF_RELOAD); 2950 } 2951 } 2952 2953 /* 2954 * combine freq adjustment with unthrottling to avoid two passes over the 2955 * events. At the same time, make sure, having freq events does not change 2956 * the rate of unthrottling as that would introduce bias. 2957 */ 2958 static void perf_adjust_freq_unthr_context(struct perf_event_context *ctx, 2959 int needs_unthr) 2960 { 2961 struct perf_event *event; 2962 struct hw_perf_event *hwc; 2963 u64 now, period = TICK_NSEC; 2964 s64 delta; 2965 2966 /* 2967 * only need to iterate over all events iff: 2968 * - context have events in frequency mode (needs freq adjust) 2969 * - there are events to unthrottle on this cpu 2970 */ 2971 if (!(ctx->nr_freq || needs_unthr)) 2972 return; 2973 2974 raw_spin_lock(&ctx->lock); 2975 perf_pmu_disable(ctx->pmu); 2976 2977 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) { 2978 if (event->state != PERF_EVENT_STATE_ACTIVE) 2979 continue; 2980 2981 if (!event_filter_match(event)) 2982 continue; 2983 2984 perf_pmu_disable(event->pmu); 2985 2986 hwc = &event->hw; 2987 2988 if (hwc->interrupts == MAX_INTERRUPTS) { 2989 hwc->interrupts = 0; 2990 perf_log_throttle(event, 1); 2991 event->pmu->start(event, 0); 2992 } 2993 2994 if (!event->attr.freq || !event->attr.sample_freq) 2995 goto next; 2996 2997 /* 2998 * stop the event and update event->count 2999 */ 3000 event->pmu->stop(event, PERF_EF_UPDATE); 3001 3002 now = local64_read(&event->count); 3003 delta = now - hwc->freq_count_stamp; 3004 hwc->freq_count_stamp = now; 3005 3006 /* 3007 * restart the event 3008 * reload only if value has changed 3009 * we have stopped the event so tell that 3010 * to perf_adjust_period() to avoid stopping it 3011 * twice. 3012 */ 3013 if (delta > 0) 3014 perf_adjust_period(event, period, delta, false); 3015 3016 event->pmu->start(event, delta > 0 ? PERF_EF_RELOAD : 0); 3017 next: 3018 perf_pmu_enable(event->pmu); 3019 } 3020 3021 perf_pmu_enable(ctx->pmu); 3022 raw_spin_unlock(&ctx->lock); 3023 } 3024 3025 /* 3026 * Round-robin a context's events: 3027 */ 3028 static void rotate_ctx(struct perf_event_context *ctx) 3029 { 3030 /* 3031 * Rotate the first entry last of non-pinned groups. Rotation might be 3032 * disabled by the inheritance code. 3033 */ 3034 if (!ctx->rotate_disable) 3035 list_rotate_left(&ctx->flexible_groups); 3036 } 3037 3038 static int perf_rotate_context(struct perf_cpu_context *cpuctx) 3039 { 3040 struct perf_event_context *ctx = NULL; 3041 int rotate = 0; 3042 3043 if (cpuctx->ctx.nr_events) { 3044 if (cpuctx->ctx.nr_events != cpuctx->ctx.nr_active) 3045 rotate = 1; 3046 } 3047 3048 ctx = cpuctx->task_ctx; 3049 if (ctx && ctx->nr_events) { 3050 if (ctx->nr_events != ctx->nr_active) 3051 rotate = 1; 3052 } 3053 3054 if (!rotate) 3055 goto done; 3056 3057 perf_ctx_lock(cpuctx, cpuctx->task_ctx); 3058 perf_pmu_disable(cpuctx->ctx.pmu); 3059 3060 cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE); 3061 if (ctx) 3062 ctx_sched_out(ctx, cpuctx, EVENT_FLEXIBLE); 3063 3064 rotate_ctx(&cpuctx->ctx); 3065 if (ctx) 3066 rotate_ctx(ctx); 3067 3068 perf_event_sched_in(cpuctx, ctx, current); 3069 3070 perf_pmu_enable(cpuctx->ctx.pmu); 3071 perf_ctx_unlock(cpuctx, cpuctx->task_ctx); 3072 done: 3073 3074 return rotate; 3075 } 3076 3077 #ifdef CONFIG_NO_HZ_FULL 3078 bool perf_event_can_stop_tick(void) 3079 { 3080 if (atomic_read(&nr_freq_events) || 3081 __this_cpu_read(perf_throttled_count)) 3082 return false; 3083 else 3084 return true; 3085 } 3086 #endif 3087 3088 void perf_event_task_tick(void) 3089 { 3090 struct list_head *head = this_cpu_ptr(&active_ctx_list); 3091 struct perf_event_context *ctx, *tmp; 3092 int throttled; 3093 3094 WARN_ON(!irqs_disabled()); 3095 3096 __this_cpu_inc(perf_throttled_seq); 3097 throttled = __this_cpu_xchg(perf_throttled_count, 0); 3098 3099 list_for_each_entry_safe(ctx, tmp, head, active_ctx_list) 3100 perf_adjust_freq_unthr_context(ctx, throttled); 3101 } 3102 3103 static int event_enable_on_exec(struct perf_event *event, 3104 struct perf_event_context *ctx) 3105 { 3106 if (!event->attr.enable_on_exec) 3107 return 0; 3108 3109 event->attr.enable_on_exec = 0; 3110 if (event->state >= PERF_EVENT_STATE_INACTIVE) 3111 return 0; 3112 3113 __perf_event_mark_enabled(event); 3114 3115 return 1; 3116 } 3117 3118 /* 3119 * Enable all of a task's events that have been marked enable-on-exec. 3120 * This expects task == current. 3121 */ 3122 static void perf_event_enable_on_exec(struct perf_event_context *ctx) 3123 { 3124 struct perf_event_context *clone_ctx = NULL; 3125 struct perf_event *event; 3126 unsigned long flags; 3127 int enabled = 0; 3128 int ret; 3129 3130 local_irq_save(flags); 3131 if (!ctx || !ctx->nr_events) 3132 goto out; 3133 3134 /* 3135 * We must ctxsw out cgroup events to avoid conflict 3136 * when invoking perf_task_event_sched_in() later on 3137 * in this function. Otherwise we end up trying to 3138 * ctxswin cgroup events which are already scheduled 3139 * in. 3140 */ 3141 perf_cgroup_sched_out(current, NULL); 3142 3143 raw_spin_lock(&ctx->lock); 3144 task_ctx_sched_out(ctx); 3145 3146 list_for_each_entry(event, &ctx->event_list, event_entry) { 3147 ret = event_enable_on_exec(event, ctx); 3148 if (ret) 3149 enabled = 1; 3150 } 3151 3152 /* 3153 * Unclone this context if we enabled any event. 3154 */ 3155 if (enabled) 3156 clone_ctx = unclone_ctx(ctx); 3157 3158 raw_spin_unlock(&ctx->lock); 3159 3160 /* 3161 * Also calls ctxswin for cgroup events, if any: 3162 */ 3163 perf_event_context_sched_in(ctx, ctx->task); 3164 out: 3165 local_irq_restore(flags); 3166 3167 if (clone_ctx) 3168 put_ctx(clone_ctx); 3169 } 3170 3171 void perf_event_exec(void) 3172 { 3173 struct perf_event_context *ctx; 3174 int ctxn; 3175 3176 rcu_read_lock(); 3177 for_each_task_context_nr(ctxn) { 3178 ctx = current->perf_event_ctxp[ctxn]; 3179 if (!ctx) 3180 continue; 3181 3182 perf_event_enable_on_exec(ctx); 3183 } 3184 rcu_read_unlock(); 3185 } 3186 3187 /* 3188 * Cross CPU call to read the hardware event 3189 */ 3190 static void __perf_event_read(void *info) 3191 { 3192 struct perf_event *event = info; 3193 struct perf_event_context *ctx = event->ctx; 3194 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx); 3195 3196 /* 3197 * If this is a task context, we need to check whether it is 3198 * the current task context of this cpu. If not it has been 3199 * scheduled out before the smp call arrived. In that case 3200 * event->count would have been updated to a recent sample 3201 * when the event was scheduled out. 3202 */ 3203 if (ctx->task && cpuctx->task_ctx != ctx) 3204 return; 3205 3206 raw_spin_lock(&ctx->lock); 3207 if (ctx->is_active) { 3208 update_context_time(ctx); 3209 update_cgrp_time_from_event(event); 3210 } 3211 update_event_times(event); 3212 if (event->state == PERF_EVENT_STATE_ACTIVE) 3213 event->pmu->read(event); 3214 raw_spin_unlock(&ctx->lock); 3215 } 3216 3217 static inline u64 perf_event_count(struct perf_event *event) 3218 { 3219 if (event->pmu->count) 3220 return event->pmu->count(event); 3221 3222 return __perf_event_count(event); 3223 } 3224 3225 static u64 perf_event_read(struct perf_event *event) 3226 { 3227 /* 3228 * If event is enabled and currently active on a CPU, update the 3229 * value in the event structure: 3230 */ 3231 if (event->state == PERF_EVENT_STATE_ACTIVE) { 3232 smp_call_function_single(event->oncpu, 3233 __perf_event_read, event, 1); 3234 } else if (event->state == PERF_EVENT_STATE_INACTIVE) { 3235 struct perf_event_context *ctx = event->ctx; 3236 unsigned long flags; 3237 3238 raw_spin_lock_irqsave(&ctx->lock, flags); 3239 /* 3240 * may read while context is not active 3241 * (e.g., thread is blocked), in that case 3242 * we cannot update context time 3243 */ 3244 if (ctx->is_active) { 3245 update_context_time(ctx); 3246 update_cgrp_time_from_event(event); 3247 } 3248 update_event_times(event); 3249 raw_spin_unlock_irqrestore(&ctx->lock, flags); 3250 } 3251 3252 return perf_event_count(event); 3253 } 3254 3255 /* 3256 * Initialize the perf_event context in a task_struct: 3257 */ 3258 static void __perf_event_init_context(struct perf_event_context *ctx) 3259 { 3260 raw_spin_lock_init(&ctx->lock); 3261 mutex_init(&ctx->mutex); 3262 INIT_LIST_HEAD(&ctx->active_ctx_list); 3263 INIT_LIST_HEAD(&ctx->pinned_groups); 3264 INIT_LIST_HEAD(&ctx->flexible_groups); 3265 INIT_LIST_HEAD(&ctx->event_list); 3266 atomic_set(&ctx->refcount, 1); 3267 INIT_DELAYED_WORK(&ctx->orphans_remove, orphans_remove_work); 3268 } 3269 3270 static struct perf_event_context * 3271 alloc_perf_context(struct pmu *pmu, struct task_struct *task) 3272 { 3273 struct perf_event_context *ctx; 3274 3275 ctx = kzalloc(sizeof(struct perf_event_context), GFP_KERNEL); 3276 if (!ctx) 3277 return NULL; 3278 3279 __perf_event_init_context(ctx); 3280 if (task) { 3281 ctx->task = task; 3282 get_task_struct(task); 3283 } 3284 ctx->pmu = pmu; 3285 3286 return ctx; 3287 } 3288 3289 static struct task_struct * 3290 find_lively_task_by_vpid(pid_t vpid) 3291 { 3292 struct task_struct *task; 3293 int err; 3294 3295 rcu_read_lock(); 3296 if (!vpid) 3297 task = current; 3298 else 3299 task = find_task_by_vpid(vpid); 3300 if (task) 3301 get_task_struct(task); 3302 rcu_read_unlock(); 3303 3304 if (!task) 3305 return ERR_PTR(-ESRCH); 3306 3307 /* Reuse ptrace permission checks for now. */ 3308 err = -EACCES; 3309 if (!ptrace_may_access(task, PTRACE_MODE_READ)) 3310 goto errout; 3311 3312 return task; 3313 errout: 3314 put_task_struct(task); 3315 return ERR_PTR(err); 3316 3317 } 3318 3319 /* 3320 * Returns a matching context with refcount and pincount. 3321 */ 3322 static struct perf_event_context * 3323 find_get_context(struct pmu *pmu, struct task_struct *task, 3324 struct perf_event *event) 3325 { 3326 struct perf_event_context *ctx, *clone_ctx = NULL; 3327 struct perf_cpu_context *cpuctx; 3328 void *task_ctx_data = NULL; 3329 unsigned long flags; 3330 int ctxn, err; 3331 int cpu = event->cpu; 3332 3333 if (!task) { 3334 /* Must be root to operate on a CPU event: */ 3335 if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN)) 3336 return ERR_PTR(-EACCES); 3337 3338 /* 3339 * We could be clever and allow to attach a event to an 3340 * offline CPU and activate it when the CPU comes up, but 3341 * that's for later. 3342 */ 3343 if (!cpu_online(cpu)) 3344 return ERR_PTR(-ENODEV); 3345 3346 cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu); 3347 ctx = &cpuctx->ctx; 3348 get_ctx(ctx); 3349 ++ctx->pin_count; 3350 3351 return ctx; 3352 } 3353 3354 err = -EINVAL; 3355 ctxn = pmu->task_ctx_nr; 3356 if (ctxn < 0) 3357 goto errout; 3358 3359 if (event->attach_state & PERF_ATTACH_TASK_DATA) { 3360 task_ctx_data = kzalloc(pmu->task_ctx_size, GFP_KERNEL); 3361 if (!task_ctx_data) { 3362 err = -ENOMEM; 3363 goto errout; 3364 } 3365 } 3366 3367 retry: 3368 ctx = perf_lock_task_context(task, ctxn, &flags); 3369 if (ctx) { 3370 clone_ctx = unclone_ctx(ctx); 3371 ++ctx->pin_count; 3372 3373 if (task_ctx_data && !ctx->task_ctx_data) { 3374 ctx->task_ctx_data = task_ctx_data; 3375 task_ctx_data = NULL; 3376 } 3377 raw_spin_unlock_irqrestore(&ctx->lock, flags); 3378 3379 if (clone_ctx) 3380 put_ctx(clone_ctx); 3381 } else { 3382 ctx = alloc_perf_context(pmu, task); 3383 err = -ENOMEM; 3384 if (!ctx) 3385 goto errout; 3386 3387 if (task_ctx_data) { 3388 ctx->task_ctx_data = task_ctx_data; 3389 task_ctx_data = NULL; 3390 } 3391 3392 err = 0; 3393 mutex_lock(&task->perf_event_mutex); 3394 /* 3395 * If it has already passed perf_event_exit_task(). 3396 * we must see PF_EXITING, it takes this mutex too. 3397 */ 3398 if (task->flags & PF_EXITING) 3399 err = -ESRCH; 3400 else if (task->perf_event_ctxp[ctxn]) 3401 err = -EAGAIN; 3402 else { 3403 get_ctx(ctx); 3404 ++ctx->pin_count; 3405 rcu_assign_pointer(task->perf_event_ctxp[ctxn], ctx); 3406 } 3407 mutex_unlock(&task->perf_event_mutex); 3408 3409 if (unlikely(err)) { 3410 put_ctx(ctx); 3411 3412 if (err == -EAGAIN) 3413 goto retry; 3414 goto errout; 3415 } 3416 } 3417 3418 kfree(task_ctx_data); 3419 return ctx; 3420 3421 errout: 3422 kfree(task_ctx_data); 3423 return ERR_PTR(err); 3424 } 3425 3426 static void perf_event_free_filter(struct perf_event *event); 3427 static void perf_event_free_bpf_prog(struct perf_event *event); 3428 3429 static void free_event_rcu(struct rcu_head *head) 3430 { 3431 struct perf_event *event; 3432 3433 event = container_of(head, struct perf_event, rcu_head); 3434 if (event->ns) 3435 put_pid_ns(event->ns); 3436 perf_event_free_filter(event); 3437 kfree(event); 3438 } 3439 3440 static void ring_buffer_attach(struct perf_event *event, 3441 struct ring_buffer *rb); 3442 3443 static void unaccount_event_cpu(struct perf_event *event, int cpu) 3444 { 3445 if (event->parent) 3446 return; 3447 3448 if (is_cgroup_event(event)) 3449 atomic_dec(&per_cpu(perf_cgroup_events, cpu)); 3450 } 3451 3452 static void unaccount_event(struct perf_event *event) 3453 { 3454 if (event->parent) 3455 return; 3456 3457 if (event->attach_state & PERF_ATTACH_TASK) 3458 static_key_slow_dec_deferred(&perf_sched_events); 3459 if (event->attr.mmap || event->attr.mmap_data) 3460 atomic_dec(&nr_mmap_events); 3461 if (event->attr.comm) 3462 atomic_dec(&nr_comm_events); 3463 if (event->attr.task) 3464 atomic_dec(&nr_task_events); 3465 if (event->attr.freq) 3466 atomic_dec(&nr_freq_events); 3467 if (event->attr.context_switch) { 3468 static_key_slow_dec_deferred(&perf_sched_events); 3469 atomic_dec(&nr_switch_events); 3470 } 3471 if (is_cgroup_event(event)) 3472 static_key_slow_dec_deferred(&perf_sched_events); 3473 if (has_branch_stack(event)) 3474 static_key_slow_dec_deferred(&perf_sched_events); 3475 3476 unaccount_event_cpu(event, event->cpu); 3477 } 3478 3479 /* 3480 * The following implement mutual exclusion of events on "exclusive" pmus 3481 * (PERF_PMU_CAP_EXCLUSIVE). Such pmus can only have one event scheduled 3482 * at a time, so we disallow creating events that might conflict, namely: 3483 * 3484 * 1) cpu-wide events in the presence of per-task events, 3485 * 2) per-task events in the presence of cpu-wide events, 3486 * 3) two matching events on the same context. 3487 * 3488 * The former two cases are handled in the allocation path (perf_event_alloc(), 3489 * __free_event()), the latter -- before the first perf_install_in_context(). 3490 */ 3491 static int exclusive_event_init(struct perf_event *event) 3492 { 3493 struct pmu *pmu = event->pmu; 3494 3495 if (!(pmu->capabilities & PERF_PMU_CAP_EXCLUSIVE)) 3496 return 0; 3497 3498 /* 3499 * Prevent co-existence of per-task and cpu-wide events on the 3500 * same exclusive pmu. 3501 * 3502 * Negative pmu::exclusive_cnt means there are cpu-wide 3503 * events on this "exclusive" pmu, positive means there are 3504 * per-task events. 3505 * 3506 * Since this is called in perf_event_alloc() path, event::ctx 3507 * doesn't exist yet; it is, however, safe to use PERF_ATTACH_TASK 3508 * to mean "per-task event", because unlike other attach states it 3509 * never gets cleared. 3510 */ 3511 if (event->attach_state & PERF_ATTACH_TASK) { 3512 if (!atomic_inc_unless_negative(&pmu->exclusive_cnt)) 3513 return -EBUSY; 3514 } else { 3515 if (!atomic_dec_unless_positive(&pmu->exclusive_cnt)) 3516 return -EBUSY; 3517 } 3518 3519 return 0; 3520 } 3521 3522 static void exclusive_event_destroy(struct perf_event *event) 3523 { 3524 struct pmu *pmu = event->pmu; 3525 3526 if (!(pmu->capabilities & PERF_PMU_CAP_EXCLUSIVE)) 3527 return; 3528 3529 /* see comment in exclusive_event_init() */ 3530 if (event->attach_state & PERF_ATTACH_TASK) 3531 atomic_dec(&pmu->exclusive_cnt); 3532 else 3533 atomic_inc(&pmu->exclusive_cnt); 3534 } 3535 3536 static bool exclusive_event_match(struct perf_event *e1, struct perf_event *e2) 3537 { 3538 if ((e1->pmu->capabilities & PERF_PMU_CAP_EXCLUSIVE) && 3539 (e1->cpu == e2->cpu || 3540 e1->cpu == -1 || 3541 e2->cpu == -1)) 3542 return true; 3543 return false; 3544 } 3545 3546 /* Called under the same ctx::mutex as perf_install_in_context() */ 3547 static bool exclusive_event_installable(struct perf_event *event, 3548 struct perf_event_context *ctx) 3549 { 3550 struct perf_event *iter_event; 3551 struct pmu *pmu = event->pmu; 3552 3553 if (!(pmu->capabilities & PERF_PMU_CAP_EXCLUSIVE)) 3554 return true; 3555 3556 list_for_each_entry(iter_event, &ctx->event_list, event_entry) { 3557 if (exclusive_event_match(iter_event, event)) 3558 return false; 3559 } 3560 3561 return true; 3562 } 3563 3564 static void __free_event(struct perf_event *event) 3565 { 3566 if (!event->parent) { 3567 if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN) 3568 put_callchain_buffers(); 3569 } 3570 3571 perf_event_free_bpf_prog(event); 3572 3573 if (event->destroy) 3574 event->destroy(event); 3575 3576 if (event->ctx) 3577 put_ctx(event->ctx); 3578 3579 if (event->pmu) { 3580 exclusive_event_destroy(event); 3581 module_put(event->pmu->module); 3582 } 3583 3584 call_rcu(&event->rcu_head, free_event_rcu); 3585 } 3586 3587 static void _free_event(struct perf_event *event) 3588 { 3589 irq_work_sync(&event->pending); 3590 3591 unaccount_event(event); 3592 3593 if (event->rb) { 3594 /* 3595 * Can happen when we close an event with re-directed output. 3596 * 3597 * Since we have a 0 refcount, perf_mmap_close() will skip 3598 * over us; possibly making our ring_buffer_put() the last. 3599 */ 3600 mutex_lock(&event->mmap_mutex); 3601 ring_buffer_attach(event, NULL); 3602 mutex_unlock(&event->mmap_mutex); 3603 } 3604 3605 if (is_cgroup_event(event)) 3606 perf_detach_cgroup(event); 3607 3608 __free_event(event); 3609 } 3610 3611 /* 3612 * Used to free events which have a known refcount of 1, such as in error paths 3613 * where the event isn't exposed yet and inherited events. 3614 */ 3615 static void free_event(struct perf_event *event) 3616 { 3617 if (WARN(atomic_long_cmpxchg(&event->refcount, 1, 0) != 1, 3618 "unexpected event refcount: %ld; ptr=%p\n", 3619 atomic_long_read(&event->refcount), event)) { 3620 /* leak to avoid use-after-free */ 3621 return; 3622 } 3623 3624 _free_event(event); 3625 } 3626 3627 /* 3628 * Remove user event from the owner task. 3629 */ 3630 static void perf_remove_from_owner(struct perf_event *event) 3631 { 3632 struct task_struct *owner; 3633 3634 rcu_read_lock(); 3635 owner = ACCESS_ONCE(event->owner); 3636 /* 3637 * Matches the smp_wmb() in perf_event_exit_task(). If we observe 3638 * !owner it means the list deletion is complete and we can indeed 3639 * free this event, otherwise we need to serialize on 3640 * owner->perf_event_mutex. 3641 */ 3642 smp_read_barrier_depends(); 3643 if (owner) { 3644 /* 3645 * Since delayed_put_task_struct() also drops the last 3646 * task reference we can safely take a new reference 3647 * while holding the rcu_read_lock(). 3648 */ 3649 get_task_struct(owner); 3650 } 3651 rcu_read_unlock(); 3652 3653 if (owner) { 3654 /* 3655 * If we're here through perf_event_exit_task() we're already 3656 * holding ctx->mutex which would be an inversion wrt. the 3657 * normal lock order. 3658 * 3659 * However we can safely take this lock because its the child 3660 * ctx->mutex. 3661 */ 3662 mutex_lock_nested(&owner->perf_event_mutex, SINGLE_DEPTH_NESTING); 3663 3664 /* 3665 * We have to re-check the event->owner field, if it is cleared 3666 * we raced with perf_event_exit_task(), acquiring the mutex 3667 * ensured they're done, and we can proceed with freeing the 3668 * event. 3669 */ 3670 if (event->owner) 3671 list_del_init(&event->owner_entry); 3672 mutex_unlock(&owner->perf_event_mutex); 3673 put_task_struct(owner); 3674 } 3675 } 3676 3677 static void put_event(struct perf_event *event) 3678 { 3679 struct perf_event_context *ctx; 3680 3681 if (!atomic_long_dec_and_test(&event->refcount)) 3682 return; 3683 3684 if (!is_kernel_event(event)) 3685 perf_remove_from_owner(event); 3686 3687 /* 3688 * There are two ways this annotation is useful: 3689 * 3690 * 1) there is a lock recursion from perf_event_exit_task 3691 * see the comment there. 3692 * 3693 * 2) there is a lock-inversion with mmap_sem through 3694 * perf_event_read_group(), which takes faults while 3695 * holding ctx->mutex, however this is called after 3696 * the last filedesc died, so there is no possibility 3697 * to trigger the AB-BA case. 3698 */ 3699 ctx = perf_event_ctx_lock_nested(event, SINGLE_DEPTH_NESTING); 3700 WARN_ON_ONCE(ctx->parent_ctx); 3701 perf_remove_from_context(event, true); 3702 perf_event_ctx_unlock(event, ctx); 3703 3704 _free_event(event); 3705 } 3706 3707 int perf_event_release_kernel(struct perf_event *event) 3708 { 3709 put_event(event); 3710 return 0; 3711 } 3712 EXPORT_SYMBOL_GPL(perf_event_release_kernel); 3713 3714 /* 3715 * Called when the last reference to the file is gone. 3716 */ 3717 static int perf_release(struct inode *inode, struct file *file) 3718 { 3719 put_event(file->private_data); 3720 return 0; 3721 } 3722 3723 /* 3724 * Remove all orphanes events from the context. 3725 */ 3726 static void orphans_remove_work(struct work_struct *work) 3727 { 3728 struct perf_event_context *ctx; 3729 struct perf_event *event, *tmp; 3730 3731 ctx = container_of(work, struct perf_event_context, 3732 orphans_remove.work); 3733 3734 mutex_lock(&ctx->mutex); 3735 list_for_each_entry_safe(event, tmp, &ctx->event_list, event_entry) { 3736 struct perf_event *parent_event = event->parent; 3737 3738 if (!is_orphaned_child(event)) 3739 continue; 3740 3741 perf_remove_from_context(event, true); 3742 3743 mutex_lock(&parent_event->child_mutex); 3744 list_del_init(&event->child_list); 3745 mutex_unlock(&parent_event->child_mutex); 3746 3747 free_event(event); 3748 put_event(parent_event); 3749 } 3750 3751 raw_spin_lock_irq(&ctx->lock); 3752 ctx->orphans_remove_sched = false; 3753 raw_spin_unlock_irq(&ctx->lock); 3754 mutex_unlock(&ctx->mutex); 3755 3756 put_ctx(ctx); 3757 } 3758 3759 u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running) 3760 { 3761 struct perf_event *child; 3762 u64 total = 0; 3763 3764 *enabled = 0; 3765 *running = 0; 3766 3767 mutex_lock(&event->child_mutex); 3768 total += perf_event_read(event); 3769 *enabled += event->total_time_enabled + 3770 atomic64_read(&event->child_total_time_enabled); 3771 *running += event->total_time_running + 3772 atomic64_read(&event->child_total_time_running); 3773 3774 list_for_each_entry(child, &event->child_list, child_list) { 3775 total += perf_event_read(child); 3776 *enabled += child->total_time_enabled; 3777 *running += child->total_time_running; 3778 } 3779 mutex_unlock(&event->child_mutex); 3780 3781 return total; 3782 } 3783 EXPORT_SYMBOL_GPL(perf_event_read_value); 3784 3785 static int perf_event_read_group(struct perf_event *event, 3786 u64 read_format, char __user *buf) 3787 { 3788 struct perf_event *leader = event->group_leader, *sub; 3789 struct perf_event_context *ctx = leader->ctx; 3790 int n = 0, size = 0, ret; 3791 u64 count, enabled, running; 3792 u64 values[5]; 3793 3794 lockdep_assert_held(&ctx->mutex); 3795 3796 count = perf_event_read_value(leader, &enabled, &running); 3797 3798 values[n++] = 1 + leader->nr_siblings; 3799 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) 3800 values[n++] = enabled; 3801 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) 3802 values[n++] = running; 3803 values[n++] = count; 3804 if (read_format & PERF_FORMAT_ID) 3805 values[n++] = primary_event_id(leader); 3806 3807 size = n * sizeof(u64); 3808 3809 if (copy_to_user(buf, values, size)) 3810 return -EFAULT; 3811 3812 ret = size; 3813 3814 list_for_each_entry(sub, &leader->sibling_list, group_entry) { 3815 n = 0; 3816 3817 values[n++] = perf_event_read_value(sub, &enabled, &running); 3818 if (read_format & PERF_FORMAT_ID) 3819 values[n++] = primary_event_id(sub); 3820 3821 size = n * sizeof(u64); 3822 3823 if (copy_to_user(buf + ret, values, size)) { 3824 return -EFAULT; 3825 } 3826 3827 ret += size; 3828 } 3829 3830 return ret; 3831 } 3832 3833 static int perf_event_read_one(struct perf_event *event, 3834 u64 read_format, char __user *buf) 3835 { 3836 u64 enabled, running; 3837 u64 values[4]; 3838 int n = 0; 3839 3840 values[n++] = perf_event_read_value(event, &enabled, &running); 3841 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) 3842 values[n++] = enabled; 3843 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) 3844 values[n++] = running; 3845 if (read_format & PERF_FORMAT_ID) 3846 values[n++] = primary_event_id(event); 3847 3848 if (copy_to_user(buf, values, n * sizeof(u64))) 3849 return -EFAULT; 3850 3851 return n * sizeof(u64); 3852 } 3853 3854 static bool is_event_hup(struct perf_event *event) 3855 { 3856 bool no_children; 3857 3858 if (event->state != PERF_EVENT_STATE_EXIT) 3859 return false; 3860 3861 mutex_lock(&event->child_mutex); 3862 no_children = list_empty(&event->child_list); 3863 mutex_unlock(&event->child_mutex); 3864 return no_children; 3865 } 3866 3867 /* 3868 * Read the performance event - simple non blocking version for now 3869 */ 3870 static ssize_t 3871 perf_read_hw(struct perf_event *event, char __user *buf, size_t count) 3872 { 3873 u64 read_format = event->attr.read_format; 3874 int ret; 3875 3876 /* 3877 * Return end-of-file for a read on a event that is in 3878 * error state (i.e. because it was pinned but it couldn't be 3879 * scheduled on to the CPU at some point). 3880 */ 3881 if (event->state == PERF_EVENT_STATE_ERROR) 3882 return 0; 3883 3884 if (count < event->read_size) 3885 return -ENOSPC; 3886 3887 WARN_ON_ONCE(event->ctx->parent_ctx); 3888 if (read_format & PERF_FORMAT_GROUP) 3889 ret = perf_event_read_group(event, read_format, buf); 3890 else 3891 ret = perf_event_read_one(event, read_format, buf); 3892 3893 return ret; 3894 } 3895 3896 static ssize_t 3897 perf_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) 3898 { 3899 struct perf_event *event = file->private_data; 3900 struct perf_event_context *ctx; 3901 int ret; 3902 3903 ctx = perf_event_ctx_lock(event); 3904 ret = perf_read_hw(event, buf, count); 3905 perf_event_ctx_unlock(event, ctx); 3906 3907 return ret; 3908 } 3909 3910 static unsigned int perf_poll(struct file *file, poll_table *wait) 3911 { 3912 struct perf_event *event = file->private_data; 3913 struct ring_buffer *rb; 3914 unsigned int events = POLLHUP; 3915 3916 poll_wait(file, &event->waitq, wait); 3917 3918 if (is_event_hup(event)) 3919 return events; 3920 3921 /* 3922 * Pin the event->rb by taking event->mmap_mutex; otherwise 3923 * perf_event_set_output() can swizzle our rb and make us miss wakeups. 3924 */ 3925 mutex_lock(&event->mmap_mutex); 3926 rb = event->rb; 3927 if (rb) 3928 events = atomic_xchg(&rb->poll, 0); 3929 mutex_unlock(&event->mmap_mutex); 3930 return events; 3931 } 3932 3933 static void _perf_event_reset(struct perf_event *event) 3934 { 3935 (void)perf_event_read(event); 3936 local64_set(&event->count, 0); 3937 perf_event_update_userpage(event); 3938 } 3939 3940 /* 3941 * Holding the top-level event's child_mutex means that any 3942 * descendant process that has inherited this event will block 3943 * in sync_child_event if it goes to exit, thus satisfying the 3944 * task existence requirements of perf_event_enable/disable. 3945 */ 3946 static void perf_event_for_each_child(struct perf_event *event, 3947 void (*func)(struct perf_event *)) 3948 { 3949 struct perf_event *child; 3950 3951 WARN_ON_ONCE(event->ctx->parent_ctx); 3952 3953 mutex_lock(&event->child_mutex); 3954 func(event); 3955 list_for_each_entry(child, &event->child_list, child_list) 3956 func(child); 3957 mutex_unlock(&event->child_mutex); 3958 } 3959 3960 static void perf_event_for_each(struct perf_event *event, 3961 void (*func)(struct perf_event *)) 3962 { 3963 struct perf_event_context *ctx = event->ctx; 3964 struct perf_event *sibling; 3965 3966 lockdep_assert_held(&ctx->mutex); 3967 3968 event = event->group_leader; 3969 3970 perf_event_for_each_child(event, func); 3971 list_for_each_entry(sibling, &event->sibling_list, group_entry) 3972 perf_event_for_each_child(sibling, func); 3973 } 3974 3975 static int perf_event_period(struct perf_event *event, u64 __user *arg) 3976 { 3977 struct perf_event_context *ctx = event->ctx; 3978 int ret = 0, active; 3979 u64 value; 3980 3981 if (!is_sampling_event(event)) 3982 return -EINVAL; 3983 3984 if (copy_from_user(&value, arg, sizeof(value))) 3985 return -EFAULT; 3986 3987 if (!value) 3988 return -EINVAL; 3989 3990 raw_spin_lock_irq(&ctx->lock); 3991 if (event->attr.freq) { 3992 if (value > sysctl_perf_event_sample_rate) { 3993 ret = -EINVAL; 3994 goto unlock; 3995 } 3996 3997 event->attr.sample_freq = value; 3998 } else { 3999 event->attr.sample_period = value; 4000 event->hw.sample_period = value; 4001 } 4002 4003 active = (event->state == PERF_EVENT_STATE_ACTIVE); 4004 if (active) { 4005 perf_pmu_disable(ctx->pmu); 4006 event->pmu->stop(event, PERF_EF_UPDATE); 4007 } 4008 4009 local64_set(&event->hw.period_left, 0); 4010 4011 if (active) { 4012 event->pmu->start(event, PERF_EF_RELOAD); 4013 perf_pmu_enable(ctx->pmu); 4014 } 4015 4016 unlock: 4017 raw_spin_unlock_irq(&ctx->lock); 4018 4019 return ret; 4020 } 4021 4022 static const struct file_operations perf_fops; 4023 4024 static inline int perf_fget_light(int fd, struct fd *p) 4025 { 4026 struct fd f = fdget(fd); 4027 if (!f.file) 4028 return -EBADF; 4029 4030 if (f.file->f_op != &perf_fops) { 4031 fdput(f); 4032 return -EBADF; 4033 } 4034 *p = f; 4035 return 0; 4036 } 4037 4038 static int perf_event_set_output(struct perf_event *event, 4039 struct perf_event *output_event); 4040 static int perf_event_set_filter(struct perf_event *event, void __user *arg); 4041 static int perf_event_set_bpf_prog(struct perf_event *event, u32 prog_fd); 4042 4043 static long _perf_ioctl(struct perf_event *event, unsigned int cmd, unsigned long arg) 4044 { 4045 void (*func)(struct perf_event *); 4046 u32 flags = arg; 4047 4048 switch (cmd) { 4049 case PERF_EVENT_IOC_ENABLE: 4050 func = _perf_event_enable; 4051 break; 4052 case PERF_EVENT_IOC_DISABLE: 4053 func = _perf_event_disable; 4054 break; 4055 case PERF_EVENT_IOC_RESET: 4056 func = _perf_event_reset; 4057 break; 4058 4059 case PERF_EVENT_IOC_REFRESH: 4060 return _perf_event_refresh(event, arg); 4061 4062 case PERF_EVENT_IOC_PERIOD: 4063 return perf_event_period(event, (u64 __user *)arg); 4064 4065 case PERF_EVENT_IOC_ID: 4066 { 4067 u64 id = primary_event_id(event); 4068 4069 if (copy_to_user((void __user *)arg, &id, sizeof(id))) 4070 return -EFAULT; 4071 return 0; 4072 } 4073 4074 case PERF_EVENT_IOC_SET_OUTPUT: 4075 { 4076 int ret; 4077 if (arg != -1) { 4078 struct perf_event *output_event; 4079 struct fd output; 4080 ret = perf_fget_light(arg, &output); 4081 if (ret) 4082 return ret; 4083 output_event = output.file->private_data; 4084 ret = perf_event_set_output(event, output_event); 4085 fdput(output); 4086 } else { 4087 ret = perf_event_set_output(event, NULL); 4088 } 4089 return ret; 4090 } 4091 4092 case PERF_EVENT_IOC_SET_FILTER: 4093 return perf_event_set_filter(event, (void __user *)arg); 4094 4095 case PERF_EVENT_IOC_SET_BPF: 4096 return perf_event_set_bpf_prog(event, arg); 4097 4098 default: 4099 return -ENOTTY; 4100 } 4101 4102 if (flags & PERF_IOC_FLAG_GROUP) 4103 perf_event_for_each(event, func); 4104 else 4105 perf_event_for_each_child(event, func); 4106 4107 return 0; 4108 } 4109 4110 static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg) 4111 { 4112 struct perf_event *event = file->private_data; 4113 struct perf_event_context *ctx; 4114 long ret; 4115 4116 ctx = perf_event_ctx_lock(event); 4117 ret = _perf_ioctl(event, cmd, arg); 4118 perf_event_ctx_unlock(event, ctx); 4119 4120 return ret; 4121 } 4122 4123 #ifdef CONFIG_COMPAT 4124 static long perf_compat_ioctl(struct file *file, unsigned int cmd, 4125 unsigned long arg) 4126 { 4127 switch (_IOC_NR(cmd)) { 4128 case _IOC_NR(PERF_EVENT_IOC_SET_FILTER): 4129 case _IOC_NR(PERF_EVENT_IOC_ID): 4130 /* Fix up pointer size (usually 4 -> 8 in 32-on-64-bit case */ 4131 if (_IOC_SIZE(cmd) == sizeof(compat_uptr_t)) { 4132 cmd &= ~IOCSIZE_MASK; 4133 cmd |= sizeof(void *) << IOCSIZE_SHIFT; 4134 } 4135 break; 4136 } 4137 return perf_ioctl(file, cmd, arg); 4138 } 4139 #else 4140 # define perf_compat_ioctl NULL 4141 #endif 4142 4143 int perf_event_task_enable(void) 4144 { 4145 struct perf_event_context *ctx; 4146 struct perf_event *event; 4147 4148 mutex_lock(¤t->perf_event_mutex); 4149 list_for_each_entry(event, ¤t->perf_event_list, owner_entry) { 4150 ctx = perf_event_ctx_lock(event); 4151 perf_event_for_each_child(event, _perf_event_enable); 4152 perf_event_ctx_unlock(event, ctx); 4153 } 4154 mutex_unlock(¤t->perf_event_mutex); 4155 4156 return 0; 4157 } 4158 4159 int perf_event_task_disable(void) 4160 { 4161 struct perf_event_context *ctx; 4162 struct perf_event *event; 4163 4164 mutex_lock(¤t->perf_event_mutex); 4165 list_for_each_entry(event, ¤t->perf_event_list, owner_entry) { 4166 ctx = perf_event_ctx_lock(event); 4167 perf_event_for_each_child(event, _perf_event_disable); 4168 perf_event_ctx_unlock(event, ctx); 4169 } 4170 mutex_unlock(¤t->perf_event_mutex); 4171 4172 return 0; 4173 } 4174 4175 static int perf_event_index(struct perf_event *event) 4176 { 4177 if (event->hw.state & PERF_HES_STOPPED) 4178 return 0; 4179 4180 if (event->state != PERF_EVENT_STATE_ACTIVE) 4181 return 0; 4182 4183 return event->pmu->event_idx(event); 4184 } 4185 4186 static void calc_timer_values(struct perf_event *event, 4187 u64 *now, 4188 u64 *enabled, 4189 u64 *running) 4190 { 4191 u64 ctx_time; 4192 4193 *now = perf_clock(); 4194 ctx_time = event->shadow_ctx_time + *now; 4195 *enabled = ctx_time - event->tstamp_enabled; 4196 *running = ctx_time - event->tstamp_running; 4197 } 4198 4199 static void perf_event_init_userpage(struct perf_event *event) 4200 { 4201 struct perf_event_mmap_page *userpg; 4202 struct ring_buffer *rb; 4203 4204 rcu_read_lock(); 4205 rb = rcu_dereference(event->rb); 4206 if (!rb) 4207 goto unlock; 4208 4209 userpg = rb->user_page; 4210 4211 /* Allow new userspace to detect that bit 0 is deprecated */ 4212 userpg->cap_bit0_is_deprecated = 1; 4213 userpg->size = offsetof(struct perf_event_mmap_page, __reserved); 4214 userpg->data_offset = PAGE_SIZE; 4215 userpg->data_size = perf_data_size(rb); 4216 4217 unlock: 4218 rcu_read_unlock(); 4219 } 4220 4221 void __weak arch_perf_update_userpage( 4222 struct perf_event *event, struct perf_event_mmap_page *userpg, u64 now) 4223 { 4224 } 4225 4226 /* 4227 * Callers need to ensure there can be no nesting of this function, otherwise 4228 * the seqlock logic goes bad. We can not serialize this because the arch 4229 * code calls this from NMI context. 4230 */ 4231 void perf_event_update_userpage(struct perf_event *event) 4232 { 4233 struct perf_event_mmap_page *userpg; 4234 struct ring_buffer *rb; 4235 u64 enabled, running, now; 4236 4237 rcu_read_lock(); 4238 rb = rcu_dereference(event->rb); 4239 if (!rb) 4240 goto unlock; 4241 4242 /* 4243 * compute total_time_enabled, total_time_running 4244 * based on snapshot values taken when the event 4245 * was last scheduled in. 4246 * 4247 * we cannot simply called update_context_time() 4248 * because of locking issue as we can be called in 4249 * NMI context 4250 */ 4251 calc_timer_values(event, &now, &enabled, &running); 4252 4253 userpg = rb->user_page; 4254 /* 4255 * Disable preemption so as to not let the corresponding user-space 4256 * spin too long if we get preempted. 4257 */ 4258 preempt_disable(); 4259 ++userpg->lock; 4260 barrier(); 4261 userpg->index = perf_event_index(event); 4262 userpg->offset = perf_event_count(event); 4263 if (userpg->index) 4264 userpg->offset -= local64_read(&event->hw.prev_count); 4265 4266 userpg->time_enabled = enabled + 4267 atomic64_read(&event->child_total_time_enabled); 4268 4269 userpg->time_running = running + 4270 atomic64_read(&event->child_total_time_running); 4271 4272 arch_perf_update_userpage(event, userpg, now); 4273 4274 barrier(); 4275 ++userpg->lock; 4276 preempt_enable(); 4277 unlock: 4278 rcu_read_unlock(); 4279 } 4280 4281 static int perf_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf) 4282 { 4283 struct perf_event *event = vma->vm_file->private_data; 4284 struct ring_buffer *rb; 4285 int ret = VM_FAULT_SIGBUS; 4286 4287 if (vmf->flags & FAULT_FLAG_MKWRITE) { 4288 if (vmf->pgoff == 0) 4289 ret = 0; 4290 return ret; 4291 } 4292 4293 rcu_read_lock(); 4294 rb = rcu_dereference(event->rb); 4295 if (!rb) 4296 goto unlock; 4297 4298 if (vmf->pgoff && (vmf->flags & FAULT_FLAG_WRITE)) 4299 goto unlock; 4300 4301 vmf->page = perf_mmap_to_page(rb, vmf->pgoff); 4302 if (!vmf->page) 4303 goto unlock; 4304 4305 get_page(vmf->page); 4306 vmf->page->mapping = vma->vm_file->f_mapping; 4307 vmf->page->index = vmf->pgoff; 4308 4309 ret = 0; 4310 unlock: 4311 rcu_read_unlock(); 4312 4313 return ret; 4314 } 4315 4316 static void ring_buffer_attach(struct perf_event *event, 4317 struct ring_buffer *rb) 4318 { 4319 struct ring_buffer *old_rb = NULL; 4320 unsigned long flags; 4321 4322 if (event->rb) { 4323 /* 4324 * Should be impossible, we set this when removing 4325 * event->rb_entry and wait/clear when adding event->rb_entry. 4326 */ 4327 WARN_ON_ONCE(event->rcu_pending); 4328 4329 old_rb = event->rb; 4330 spin_lock_irqsave(&old_rb->event_lock, flags); 4331 list_del_rcu(&event->rb_entry); 4332 spin_unlock_irqrestore(&old_rb->event_lock, flags); 4333 4334 event->rcu_batches = get_state_synchronize_rcu(); 4335 event->rcu_pending = 1; 4336 } 4337 4338 if (rb) { 4339 if (event->rcu_pending) { 4340 cond_synchronize_rcu(event->rcu_batches); 4341 event->rcu_pending = 0; 4342 } 4343 4344 spin_lock_irqsave(&rb->event_lock, flags); 4345 list_add_rcu(&event->rb_entry, &rb->event_list); 4346 spin_unlock_irqrestore(&rb->event_lock, flags); 4347 } 4348 4349 rcu_assign_pointer(event->rb, rb); 4350 4351 if (old_rb) { 4352 ring_buffer_put(old_rb); 4353 /* 4354 * Since we detached before setting the new rb, so that we 4355 * could attach the new rb, we could have missed a wakeup. 4356 * Provide it now. 4357 */ 4358 wake_up_all(&event->waitq); 4359 } 4360 } 4361 4362 static void ring_buffer_wakeup(struct perf_event *event) 4363 { 4364 struct ring_buffer *rb; 4365 4366 rcu_read_lock(); 4367 rb = rcu_dereference(event->rb); 4368 if (rb) { 4369 list_for_each_entry_rcu(event, &rb->event_list, rb_entry) 4370 wake_up_all(&event->waitq); 4371 } 4372 rcu_read_unlock(); 4373 } 4374 4375 struct ring_buffer *ring_buffer_get(struct perf_event *event) 4376 { 4377 struct ring_buffer *rb; 4378 4379 rcu_read_lock(); 4380 rb = rcu_dereference(event->rb); 4381 if (rb) { 4382 if (!atomic_inc_not_zero(&rb->refcount)) 4383 rb = NULL; 4384 } 4385 rcu_read_unlock(); 4386 4387 return rb; 4388 } 4389 4390 void ring_buffer_put(struct ring_buffer *rb) 4391 { 4392 if (!atomic_dec_and_test(&rb->refcount)) 4393 return; 4394 4395 WARN_ON_ONCE(!list_empty(&rb->event_list)); 4396 4397 call_rcu(&rb->rcu_head, rb_free_rcu); 4398 } 4399 4400 static void perf_mmap_open(struct vm_area_struct *vma) 4401 { 4402 struct perf_event *event = vma->vm_file->private_data; 4403 4404 atomic_inc(&event->mmap_count); 4405 atomic_inc(&event->rb->mmap_count); 4406 4407 if (vma->vm_pgoff) 4408 atomic_inc(&event->rb->aux_mmap_count); 4409 4410 if (event->pmu->event_mapped) 4411 event->pmu->event_mapped(event); 4412 } 4413 4414 /* 4415 * A buffer can be mmap()ed multiple times; either directly through the same 4416 * event, or through other events by use of perf_event_set_output(). 4417 * 4418 * In order to undo the VM accounting done by perf_mmap() we need to destroy 4419 * the buffer here, where we still have a VM context. This means we need 4420 * to detach all events redirecting to us. 4421 */ 4422 static void perf_mmap_close(struct vm_area_struct *vma) 4423 { 4424 struct perf_event *event = vma->vm_file->private_data; 4425 4426 struct ring_buffer *rb = ring_buffer_get(event); 4427 struct user_struct *mmap_user = rb->mmap_user; 4428 int mmap_locked = rb->mmap_locked; 4429 unsigned long size = perf_data_size(rb); 4430 4431 if (event->pmu->event_unmapped) 4432 event->pmu->event_unmapped(event); 4433 4434 /* 4435 * rb->aux_mmap_count will always drop before rb->mmap_count and 4436 * event->mmap_count, so it is ok to use event->mmap_mutex to 4437 * serialize with perf_mmap here. 4438 */ 4439 if (rb_has_aux(rb) && vma->vm_pgoff == rb->aux_pgoff && 4440 atomic_dec_and_mutex_lock(&rb->aux_mmap_count, &event->mmap_mutex)) { 4441 atomic_long_sub(rb->aux_nr_pages, &mmap_user->locked_vm); 4442 vma->vm_mm->pinned_vm -= rb->aux_mmap_locked; 4443 4444 rb_free_aux(rb); 4445 mutex_unlock(&event->mmap_mutex); 4446 } 4447 4448 atomic_dec(&rb->mmap_count); 4449 4450 if (!atomic_dec_and_mutex_lock(&event->mmap_count, &event->mmap_mutex)) 4451 goto out_put; 4452 4453 ring_buffer_attach(event, NULL); 4454 mutex_unlock(&event->mmap_mutex); 4455 4456 /* If there's still other mmap()s of this buffer, we're done. */ 4457 if (atomic_read(&rb->mmap_count)) 4458 goto out_put; 4459 4460 /* 4461 * No other mmap()s, detach from all other events that might redirect 4462 * into the now unreachable buffer. Somewhat complicated by the 4463 * fact that rb::event_lock otherwise nests inside mmap_mutex. 4464 */ 4465 again: 4466 rcu_read_lock(); 4467 list_for_each_entry_rcu(event, &rb->event_list, rb_entry) { 4468 if (!atomic_long_inc_not_zero(&event->refcount)) { 4469 /* 4470 * This event is en-route to free_event() which will 4471 * detach it and remove it from the list. 4472 */ 4473 continue; 4474 } 4475 rcu_read_unlock(); 4476 4477 mutex_lock(&event->mmap_mutex); 4478 /* 4479 * Check we didn't race with perf_event_set_output() which can 4480 * swizzle the rb from under us while we were waiting to 4481 * acquire mmap_mutex. 4482 * 4483 * If we find a different rb; ignore this event, a next 4484 * iteration will no longer find it on the list. We have to 4485 * still restart the iteration to make sure we're not now 4486 * iterating the wrong list. 4487 */ 4488 if (event->rb == rb) 4489 ring_buffer_attach(event, NULL); 4490 4491 mutex_unlock(&event->mmap_mutex); 4492 put_event(event); 4493 4494 /* 4495 * Restart the iteration; either we're on the wrong list or 4496 * destroyed its integrity by doing a deletion. 4497 */ 4498 goto again; 4499 } 4500 rcu_read_unlock(); 4501 4502 /* 4503 * It could be there's still a few 0-ref events on the list; they'll 4504 * get cleaned up by free_event() -- they'll also still have their 4505 * ref on the rb and will free it whenever they are done with it. 4506 * 4507 * Aside from that, this buffer is 'fully' detached and unmapped, 4508 * undo the VM accounting. 4509 */ 4510 4511 atomic_long_sub((size >> PAGE_SHIFT) + 1, &mmap_user->locked_vm); 4512 vma->vm_mm->pinned_vm -= mmap_locked; 4513 free_uid(mmap_user); 4514 4515 out_put: 4516 ring_buffer_put(rb); /* could be last */ 4517 } 4518 4519 static const struct vm_operations_struct perf_mmap_vmops = { 4520 .open = perf_mmap_open, 4521 .close = perf_mmap_close, /* non mergable */ 4522 .fault = perf_mmap_fault, 4523 .page_mkwrite = perf_mmap_fault, 4524 }; 4525 4526 static int perf_mmap(struct file *file, struct vm_area_struct *vma) 4527 { 4528 struct perf_event *event = file->private_data; 4529 unsigned long user_locked, user_lock_limit; 4530 struct user_struct *user = current_user(); 4531 unsigned long locked, lock_limit; 4532 struct ring_buffer *rb = NULL; 4533 unsigned long vma_size; 4534 unsigned long nr_pages; 4535 long user_extra = 0, extra = 0; 4536 int ret = 0, flags = 0; 4537 4538 /* 4539 * Don't allow mmap() of inherited per-task counters. This would 4540 * create a performance issue due to all children writing to the 4541 * same rb. 4542 */ 4543 if (event->cpu == -1 && event->attr.inherit) 4544 return -EINVAL; 4545 4546 if (!(vma->vm_flags & VM_SHARED)) 4547 return -EINVAL; 4548 4549 vma_size = vma->vm_end - vma->vm_start; 4550 4551 if (vma->vm_pgoff == 0) { 4552 nr_pages = (vma_size / PAGE_SIZE) - 1; 4553 } else { 4554 /* 4555 * AUX area mapping: if rb->aux_nr_pages != 0, it's already 4556 * mapped, all subsequent mappings should have the same size 4557 * and offset. Must be above the normal perf buffer. 4558 */ 4559 u64 aux_offset, aux_size; 4560 4561 if (!event->rb) 4562 return -EINVAL; 4563 4564 nr_pages = vma_size / PAGE_SIZE; 4565 4566 mutex_lock(&event->mmap_mutex); 4567 ret = -EINVAL; 4568 4569 rb = event->rb; 4570 if (!rb) 4571 goto aux_unlock; 4572 4573 aux_offset = ACCESS_ONCE(rb->user_page->aux_offset); 4574 aux_size = ACCESS_ONCE(rb->user_page->aux_size); 4575 4576 if (aux_offset < perf_data_size(rb) + PAGE_SIZE) 4577 goto aux_unlock; 4578 4579 if (aux_offset != vma->vm_pgoff << PAGE_SHIFT) 4580 goto aux_unlock; 4581 4582 /* already mapped with a different offset */ 4583 if (rb_has_aux(rb) && rb->aux_pgoff != vma->vm_pgoff) 4584 goto aux_unlock; 4585 4586 if (aux_size != vma_size || aux_size != nr_pages * PAGE_SIZE) 4587 goto aux_unlock; 4588 4589 /* already mapped with a different size */ 4590 if (rb_has_aux(rb) && rb->aux_nr_pages != nr_pages) 4591 goto aux_unlock; 4592 4593 if (!is_power_of_2(nr_pages)) 4594 goto aux_unlock; 4595 4596 if (!atomic_inc_not_zero(&rb->mmap_count)) 4597 goto aux_unlock; 4598 4599 if (rb_has_aux(rb)) { 4600 atomic_inc(&rb->aux_mmap_count); 4601 ret = 0; 4602 goto unlock; 4603 } 4604 4605 atomic_set(&rb->aux_mmap_count, 1); 4606 user_extra = nr_pages; 4607 4608 goto accounting; 4609 } 4610 4611 /* 4612 * If we have rb pages ensure they're a power-of-two number, so we 4613 * can do bitmasks instead of modulo. 4614 */ 4615 if (nr_pages != 0 && !is_power_of_2(nr_pages)) 4616 return -EINVAL; 4617 4618 if (vma_size != PAGE_SIZE * (1 + nr_pages)) 4619 return -EINVAL; 4620 4621 WARN_ON_ONCE(event->ctx->parent_ctx); 4622 again: 4623 mutex_lock(&event->mmap_mutex); 4624 if (event->rb) { 4625 if (event->rb->nr_pages != nr_pages) { 4626 ret = -EINVAL; 4627 goto unlock; 4628 } 4629 4630 if (!atomic_inc_not_zero(&event->rb->mmap_count)) { 4631 /* 4632 * Raced against perf_mmap_close() through 4633 * perf_event_set_output(). Try again, hope for better 4634 * luck. 4635 */ 4636 mutex_unlock(&event->mmap_mutex); 4637 goto again; 4638 } 4639 4640 goto unlock; 4641 } 4642 4643 user_extra = nr_pages + 1; 4644 4645 accounting: 4646 user_lock_limit = sysctl_perf_event_mlock >> (PAGE_SHIFT - 10); 4647 4648 /* 4649 * Increase the limit linearly with more CPUs: 4650 */ 4651 user_lock_limit *= num_online_cpus(); 4652 4653 user_locked = atomic_long_read(&user->locked_vm) + user_extra; 4654 4655 if (user_locked > user_lock_limit) 4656 extra = user_locked - user_lock_limit; 4657 4658 lock_limit = rlimit(RLIMIT_MEMLOCK); 4659 lock_limit >>= PAGE_SHIFT; 4660 locked = vma->vm_mm->pinned_vm + extra; 4661 4662 if ((locked > lock_limit) && perf_paranoid_tracepoint_raw() && 4663 !capable(CAP_IPC_LOCK)) { 4664 ret = -EPERM; 4665 goto unlock; 4666 } 4667 4668 WARN_ON(!rb && event->rb); 4669 4670 if (vma->vm_flags & VM_WRITE) 4671 flags |= RING_BUFFER_WRITABLE; 4672 4673 if (!rb) { 4674 rb = rb_alloc(nr_pages, 4675 event->attr.watermark ? event->attr.wakeup_watermark : 0, 4676 event->cpu, flags); 4677 4678 if (!rb) { 4679 ret = -ENOMEM; 4680 goto unlock; 4681 } 4682 4683 atomic_set(&rb->mmap_count, 1); 4684 rb->mmap_user = get_current_user(); 4685 rb->mmap_locked = extra; 4686 4687 ring_buffer_attach(event, rb); 4688 4689 perf_event_init_userpage(event); 4690 perf_event_update_userpage(event); 4691 } else { 4692 ret = rb_alloc_aux(rb, event, vma->vm_pgoff, nr_pages, 4693 event->attr.aux_watermark, flags); 4694 if (!ret) 4695 rb->aux_mmap_locked = extra; 4696 } 4697 4698 unlock: 4699 if (!ret) { 4700 atomic_long_add(user_extra, &user->locked_vm); 4701 vma->vm_mm->pinned_vm += extra; 4702 4703 atomic_inc(&event->mmap_count); 4704 } else if (rb) { 4705 atomic_dec(&rb->mmap_count); 4706 } 4707 aux_unlock: 4708 mutex_unlock(&event->mmap_mutex); 4709 4710 /* 4711 * Since pinned accounting is per vm we cannot allow fork() to copy our 4712 * vma. 4713 */ 4714 vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND | VM_DONTDUMP; 4715 vma->vm_ops = &perf_mmap_vmops; 4716 4717 if (event->pmu->event_mapped) 4718 event->pmu->event_mapped(event); 4719 4720 return ret; 4721 } 4722 4723 static int perf_fasync(int fd, struct file *filp, int on) 4724 { 4725 struct inode *inode = file_inode(filp); 4726 struct perf_event *event = filp->private_data; 4727 int retval; 4728 4729 mutex_lock(&inode->i_mutex); 4730 retval = fasync_helper(fd, filp, on, &event->fasync); 4731 mutex_unlock(&inode->i_mutex); 4732 4733 if (retval < 0) 4734 return retval; 4735 4736 return 0; 4737 } 4738 4739 static const struct file_operations perf_fops = { 4740 .llseek = no_llseek, 4741 .release = perf_release, 4742 .read = perf_read, 4743 .poll = perf_poll, 4744 .unlocked_ioctl = perf_ioctl, 4745 .compat_ioctl = perf_compat_ioctl, 4746 .mmap = perf_mmap, 4747 .fasync = perf_fasync, 4748 }; 4749 4750 /* 4751 * Perf event wakeup 4752 * 4753 * If there's data, ensure we set the poll() state and publish everything 4754 * to user-space before waking everybody up. 4755 */ 4756 4757 void perf_event_wakeup(struct perf_event *event) 4758 { 4759 ring_buffer_wakeup(event); 4760 4761 if (event->pending_kill) { 4762 kill_fasync(&event->fasync, SIGIO, event->pending_kill); 4763 event->pending_kill = 0; 4764 } 4765 } 4766 4767 static void perf_pending_event(struct irq_work *entry) 4768 { 4769 struct perf_event *event = container_of(entry, 4770 struct perf_event, pending); 4771 int rctx; 4772 4773 rctx = perf_swevent_get_recursion_context(); 4774 /* 4775 * If we 'fail' here, that's OK, it means recursion is already disabled 4776 * and we won't recurse 'further'. 4777 */ 4778 4779 if (event->pending_disable) { 4780 event->pending_disable = 0; 4781 __perf_event_disable(event); 4782 } 4783 4784 if (event->pending_wakeup) { 4785 event->pending_wakeup = 0; 4786 perf_event_wakeup(event); 4787 } 4788 4789 if (rctx >= 0) 4790 perf_swevent_put_recursion_context(rctx); 4791 } 4792 4793 /* 4794 * We assume there is only KVM supporting the callbacks. 4795 * Later on, we might change it to a list if there is 4796 * another virtualization implementation supporting the callbacks. 4797 */ 4798 struct perf_guest_info_callbacks *perf_guest_cbs; 4799 4800 int perf_register_guest_info_callbacks(struct perf_guest_info_callbacks *cbs) 4801 { 4802 perf_guest_cbs = cbs; 4803 return 0; 4804 } 4805 EXPORT_SYMBOL_GPL(perf_register_guest_info_callbacks); 4806 4807 int perf_unregister_guest_info_callbacks(struct perf_guest_info_callbacks *cbs) 4808 { 4809 perf_guest_cbs = NULL; 4810 return 0; 4811 } 4812 EXPORT_SYMBOL_GPL(perf_unregister_guest_info_callbacks); 4813 4814 static void 4815 perf_output_sample_regs(struct perf_output_handle *handle, 4816 struct pt_regs *regs, u64 mask) 4817 { 4818 int bit; 4819 4820 for_each_set_bit(bit, (const unsigned long *) &mask, 4821 sizeof(mask) * BITS_PER_BYTE) { 4822 u64 val; 4823 4824 val = perf_reg_value(regs, bit); 4825 perf_output_put(handle, val); 4826 } 4827 } 4828 4829 static void perf_sample_regs_user(struct perf_regs *regs_user, 4830 struct pt_regs *regs, 4831 struct pt_regs *regs_user_copy) 4832 { 4833 if (user_mode(regs)) { 4834 regs_user->abi = perf_reg_abi(current); 4835 regs_user->regs = regs; 4836 } else if (current->mm) { 4837 perf_get_regs_user(regs_user, regs, regs_user_copy); 4838 } else { 4839 regs_user->abi = PERF_SAMPLE_REGS_ABI_NONE; 4840 regs_user->regs = NULL; 4841 } 4842 } 4843 4844 static void perf_sample_regs_intr(struct perf_regs *regs_intr, 4845 struct pt_regs *regs) 4846 { 4847 regs_intr->regs = regs; 4848 regs_intr->abi = perf_reg_abi(current); 4849 } 4850 4851 4852 /* 4853 * Get remaining task size from user stack pointer. 4854 * 4855 * It'd be better to take stack vma map and limit this more 4856 * precisly, but there's no way to get it safely under interrupt, 4857 * so using TASK_SIZE as limit. 4858 */ 4859 static u64 perf_ustack_task_size(struct pt_regs *regs) 4860 { 4861 unsigned long addr = perf_user_stack_pointer(regs); 4862 4863 if (!addr || addr >= TASK_SIZE) 4864 return 0; 4865 4866 return TASK_SIZE - addr; 4867 } 4868 4869 static u16 4870 perf_sample_ustack_size(u16 stack_size, u16 header_size, 4871 struct pt_regs *regs) 4872 { 4873 u64 task_size; 4874 4875 /* No regs, no stack pointer, no dump. */ 4876 if (!regs) 4877 return 0; 4878 4879 /* 4880 * Check if we fit in with the requested stack size into the: 4881 * - TASK_SIZE 4882 * If we don't, we limit the size to the TASK_SIZE. 4883 * 4884 * - remaining sample size 4885 * If we don't, we customize the stack size to 4886 * fit in to the remaining sample size. 4887 */ 4888 4889 task_size = min((u64) USHRT_MAX, perf_ustack_task_size(regs)); 4890 stack_size = min(stack_size, (u16) task_size); 4891 4892 /* Current header size plus static size and dynamic size. */ 4893 header_size += 2 * sizeof(u64); 4894 4895 /* Do we fit in with the current stack dump size? */ 4896 if ((u16) (header_size + stack_size) < header_size) { 4897 /* 4898 * If we overflow the maximum size for the sample, 4899 * we customize the stack dump size to fit in. 4900 */ 4901 stack_size = USHRT_MAX - header_size - sizeof(u64); 4902 stack_size = round_up(stack_size, sizeof(u64)); 4903 } 4904 4905 return stack_size; 4906 } 4907 4908 static void 4909 perf_output_sample_ustack(struct perf_output_handle *handle, u64 dump_size, 4910 struct pt_regs *regs) 4911 { 4912 /* Case of a kernel thread, nothing to dump */ 4913 if (!regs) { 4914 u64 size = 0; 4915 perf_output_put(handle, size); 4916 } else { 4917 unsigned long sp; 4918 unsigned int rem; 4919 u64 dyn_size; 4920 4921 /* 4922 * We dump: 4923 * static size 4924 * - the size requested by user or the best one we can fit 4925 * in to the sample max size 4926 * data 4927 * - user stack dump data 4928 * dynamic size 4929 * - the actual dumped size 4930 */ 4931 4932 /* Static size. */ 4933 perf_output_put(handle, dump_size); 4934 4935 /* Data. */ 4936 sp = perf_user_stack_pointer(regs); 4937 rem = __output_copy_user(handle, (void *) sp, dump_size); 4938 dyn_size = dump_size - rem; 4939 4940 perf_output_skip(handle, rem); 4941 4942 /* Dynamic size. */ 4943 perf_output_put(handle, dyn_size); 4944 } 4945 } 4946 4947 static void __perf_event_header__init_id(struct perf_event_header *header, 4948 struct perf_sample_data *data, 4949 struct perf_event *event) 4950 { 4951 u64 sample_type = event->attr.sample_type; 4952 4953 data->type = sample_type; 4954 header->size += event->id_header_size; 4955 4956 if (sample_type & PERF_SAMPLE_TID) { 4957 /* namespace issues */ 4958 data->tid_entry.pid = perf_event_pid(event, current); 4959 data->tid_entry.tid = perf_event_tid(event, current); 4960 } 4961 4962 if (sample_type & PERF_SAMPLE_TIME) 4963 data->time = perf_event_clock(event); 4964 4965 if (sample_type & (PERF_SAMPLE_ID | PERF_SAMPLE_IDENTIFIER)) 4966 data->id = primary_event_id(event); 4967 4968 if (sample_type & PERF_SAMPLE_STREAM_ID) 4969 data->stream_id = event->id; 4970 4971 if (sample_type & PERF_SAMPLE_CPU) { 4972 data->cpu_entry.cpu = raw_smp_processor_id(); 4973 data->cpu_entry.reserved = 0; 4974 } 4975 } 4976 4977 void perf_event_header__init_id(struct perf_event_header *header, 4978 struct perf_sample_data *data, 4979 struct perf_event *event) 4980 { 4981 if (event->attr.sample_id_all) 4982 __perf_event_header__init_id(header, data, event); 4983 } 4984 4985 static void __perf_event__output_id_sample(struct perf_output_handle *handle, 4986 struct perf_sample_data *data) 4987 { 4988 u64 sample_type = data->type; 4989 4990 if (sample_type & PERF_SAMPLE_TID) 4991 perf_output_put(handle, data->tid_entry); 4992 4993 if (sample_type & PERF_SAMPLE_TIME) 4994 perf_output_put(handle, data->time); 4995 4996 if (sample_type & PERF_SAMPLE_ID) 4997 perf_output_put(handle, data->id); 4998 4999 if (sample_type & PERF_SAMPLE_STREAM_ID) 5000 perf_output_put(handle, data->stream_id); 5001 5002 if (sample_type & PERF_SAMPLE_CPU) 5003 perf_output_put(handle, data->cpu_entry); 5004 5005 if (sample_type & PERF_SAMPLE_IDENTIFIER) 5006 perf_output_put(handle, data->id); 5007 } 5008 5009 void perf_event__output_id_sample(struct perf_event *event, 5010 struct perf_output_handle *handle, 5011 struct perf_sample_data *sample) 5012 { 5013 if (event->attr.sample_id_all) 5014 __perf_event__output_id_sample(handle, sample); 5015 } 5016 5017 static void perf_output_read_one(struct perf_output_handle *handle, 5018 struct perf_event *event, 5019 u64 enabled, u64 running) 5020 { 5021 u64 read_format = event->attr.read_format; 5022 u64 values[4]; 5023 int n = 0; 5024 5025 values[n++] = perf_event_count(event); 5026 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) { 5027 values[n++] = enabled + 5028 atomic64_read(&event->child_total_time_enabled); 5029 } 5030 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) { 5031 values[n++] = running + 5032 atomic64_read(&event->child_total_time_running); 5033 } 5034 if (read_format & PERF_FORMAT_ID) 5035 values[n++] = primary_event_id(event); 5036 5037 __output_copy(handle, values, n * sizeof(u64)); 5038 } 5039 5040 /* 5041 * XXX PERF_FORMAT_GROUP vs inherited events seems difficult. 5042 */ 5043 static void perf_output_read_group(struct perf_output_handle *handle, 5044 struct perf_event *event, 5045 u64 enabled, u64 running) 5046 { 5047 struct perf_event *leader = event->group_leader, *sub; 5048 u64 read_format = event->attr.read_format; 5049 u64 values[5]; 5050 int n = 0; 5051 5052 values[n++] = 1 + leader->nr_siblings; 5053 5054 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) 5055 values[n++] = enabled; 5056 5057 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) 5058 values[n++] = running; 5059 5060 if (leader != event) 5061 leader->pmu->read(leader); 5062 5063 values[n++] = perf_event_count(leader); 5064 if (read_format & PERF_FORMAT_ID) 5065 values[n++] = primary_event_id(leader); 5066 5067 __output_copy(handle, values, n * sizeof(u64)); 5068 5069 list_for_each_entry(sub, &leader->sibling_list, group_entry) { 5070 n = 0; 5071 5072 if ((sub != event) && 5073 (sub->state == PERF_EVENT_STATE_ACTIVE)) 5074 sub->pmu->read(sub); 5075 5076 values[n++] = perf_event_count(sub); 5077 if (read_format & PERF_FORMAT_ID) 5078 values[n++] = primary_event_id(sub); 5079 5080 __output_copy(handle, values, n * sizeof(u64)); 5081 } 5082 } 5083 5084 #define PERF_FORMAT_TOTAL_TIMES (PERF_FORMAT_TOTAL_TIME_ENABLED|\ 5085 PERF_FORMAT_TOTAL_TIME_RUNNING) 5086 5087 static void perf_output_read(struct perf_output_handle *handle, 5088 struct perf_event *event) 5089 { 5090 u64 enabled = 0, running = 0, now; 5091 u64 read_format = event->attr.read_format; 5092 5093 /* 5094 * compute total_time_enabled, total_time_running 5095 * based on snapshot values taken when the event 5096 * was last scheduled in. 5097 * 5098 * we cannot simply called update_context_time() 5099 * because of locking issue as we are called in 5100 * NMI context 5101 */ 5102 if (read_format & PERF_FORMAT_TOTAL_TIMES) 5103 calc_timer_values(event, &now, &enabled, &running); 5104 5105 if (event->attr.read_format & PERF_FORMAT_GROUP) 5106 perf_output_read_group(handle, event, enabled, running); 5107 else 5108 perf_output_read_one(handle, event, enabled, running); 5109 } 5110 5111 void perf_output_sample(struct perf_output_handle *handle, 5112 struct perf_event_header *header, 5113 struct perf_sample_data *data, 5114 struct perf_event *event) 5115 { 5116 u64 sample_type = data->type; 5117 5118 perf_output_put(handle, *header); 5119 5120 if (sample_type & PERF_SAMPLE_IDENTIFIER) 5121 perf_output_put(handle, data->id); 5122 5123 if (sample_type & PERF_SAMPLE_IP) 5124 perf_output_put(handle, data->ip); 5125 5126 if (sample_type & PERF_SAMPLE_TID) 5127 perf_output_put(handle, data->tid_entry); 5128 5129 if (sample_type & PERF_SAMPLE_TIME) 5130 perf_output_put(handle, data->time); 5131 5132 if (sample_type & PERF_SAMPLE_ADDR) 5133 perf_output_put(handle, data->addr); 5134 5135 if (sample_type & PERF_SAMPLE_ID) 5136 perf_output_put(handle, data->id); 5137 5138 if (sample_type & PERF_SAMPLE_STREAM_ID) 5139 perf_output_put(handle, data->stream_id); 5140 5141 if (sample_type & PERF_SAMPLE_CPU) 5142 perf_output_put(handle, data->cpu_entry); 5143 5144 if (sample_type & PERF_SAMPLE_PERIOD) 5145 perf_output_put(handle, data->period); 5146 5147 if (sample_type & PERF_SAMPLE_READ) 5148 perf_output_read(handle, event); 5149 5150 if (sample_type & PERF_SAMPLE_CALLCHAIN) { 5151 if (data->callchain) { 5152 int size = 1; 5153 5154 if (data->callchain) 5155 size += data->callchain->nr; 5156 5157 size *= sizeof(u64); 5158 5159 __output_copy(handle, data->callchain, size); 5160 } else { 5161 u64 nr = 0; 5162 perf_output_put(handle, nr); 5163 } 5164 } 5165 5166 if (sample_type & PERF_SAMPLE_RAW) { 5167 if (data->raw) { 5168 perf_output_put(handle, data->raw->size); 5169 __output_copy(handle, data->raw->data, 5170 data->raw->size); 5171 } else { 5172 struct { 5173 u32 size; 5174 u32 data; 5175 } raw = { 5176 .size = sizeof(u32), 5177 .data = 0, 5178 }; 5179 perf_output_put(handle, raw); 5180 } 5181 } 5182 5183 if (sample_type & PERF_SAMPLE_BRANCH_STACK) { 5184 if (data->br_stack) { 5185 size_t size; 5186 5187 size = data->br_stack->nr 5188 * sizeof(struct perf_branch_entry); 5189 5190 perf_output_put(handle, data->br_stack->nr); 5191 perf_output_copy(handle, data->br_stack->entries, size); 5192 } else { 5193 /* 5194 * we always store at least the value of nr 5195 */ 5196 u64 nr = 0; 5197 perf_output_put(handle, nr); 5198 } 5199 } 5200 5201 if (sample_type & PERF_SAMPLE_REGS_USER) { 5202 u64 abi = data->regs_user.abi; 5203 5204 /* 5205 * If there are no regs to dump, notice it through 5206 * first u64 being zero (PERF_SAMPLE_REGS_ABI_NONE). 5207 */ 5208 perf_output_put(handle, abi); 5209 5210 if (abi) { 5211 u64 mask = event->attr.sample_regs_user; 5212 perf_output_sample_regs(handle, 5213 data->regs_user.regs, 5214 mask); 5215 } 5216 } 5217 5218 if (sample_type & PERF_SAMPLE_STACK_USER) { 5219 perf_output_sample_ustack(handle, 5220 data->stack_user_size, 5221 data->regs_user.regs); 5222 } 5223 5224 if (sample_type & PERF_SAMPLE_WEIGHT) 5225 perf_output_put(handle, data->weight); 5226 5227 if (sample_type & PERF_SAMPLE_DATA_SRC) 5228 perf_output_put(handle, data->data_src.val); 5229 5230 if (sample_type & PERF_SAMPLE_TRANSACTION) 5231 perf_output_put(handle, data->txn); 5232 5233 if (sample_type & PERF_SAMPLE_REGS_INTR) { 5234 u64 abi = data->regs_intr.abi; 5235 /* 5236 * If there are no regs to dump, notice it through 5237 * first u64 being zero (PERF_SAMPLE_REGS_ABI_NONE). 5238 */ 5239 perf_output_put(handle, abi); 5240 5241 if (abi) { 5242 u64 mask = event->attr.sample_regs_intr; 5243 5244 perf_output_sample_regs(handle, 5245 data->regs_intr.regs, 5246 mask); 5247 } 5248 } 5249 5250 if (!event->attr.watermark) { 5251 int wakeup_events = event->attr.wakeup_events; 5252 5253 if (wakeup_events) { 5254 struct ring_buffer *rb = handle->rb; 5255 int events = local_inc_return(&rb->events); 5256 5257 if (events >= wakeup_events) { 5258 local_sub(wakeup_events, &rb->events); 5259 local_inc(&rb->wakeup); 5260 } 5261 } 5262 } 5263 } 5264 5265 void perf_prepare_sample(struct perf_event_header *header, 5266 struct perf_sample_data *data, 5267 struct perf_event *event, 5268 struct pt_regs *regs) 5269 { 5270 u64 sample_type = event->attr.sample_type; 5271 5272 header->type = PERF_RECORD_SAMPLE; 5273 header->size = sizeof(*header) + event->header_size; 5274 5275 header->misc = 0; 5276 header->misc |= perf_misc_flags(regs); 5277 5278 __perf_event_header__init_id(header, data, event); 5279 5280 if (sample_type & PERF_SAMPLE_IP) 5281 data->ip = perf_instruction_pointer(regs); 5282 5283 if (sample_type & PERF_SAMPLE_CALLCHAIN) { 5284 int size = 1; 5285 5286 data->callchain = perf_callchain(event, regs); 5287 5288 if (data->callchain) 5289 size += data->callchain->nr; 5290 5291 header->size += size * sizeof(u64); 5292 } 5293 5294 if (sample_type & PERF_SAMPLE_RAW) { 5295 int size = sizeof(u32); 5296 5297 if (data->raw) 5298 size += data->raw->size; 5299 else 5300 size += sizeof(u32); 5301 5302 WARN_ON_ONCE(size & (sizeof(u64)-1)); 5303 header->size += size; 5304 } 5305 5306 if (sample_type & PERF_SAMPLE_BRANCH_STACK) { 5307 int size = sizeof(u64); /* nr */ 5308 if (data->br_stack) { 5309 size += data->br_stack->nr 5310 * sizeof(struct perf_branch_entry); 5311 } 5312 header->size += size; 5313 } 5314 5315 if (sample_type & (PERF_SAMPLE_REGS_USER | PERF_SAMPLE_STACK_USER)) 5316 perf_sample_regs_user(&data->regs_user, regs, 5317 &data->regs_user_copy); 5318 5319 if (sample_type & PERF_SAMPLE_REGS_USER) { 5320 /* regs dump ABI info */ 5321 int size = sizeof(u64); 5322 5323 if (data->regs_user.regs) { 5324 u64 mask = event->attr.sample_regs_user; 5325 size += hweight64(mask) * sizeof(u64); 5326 } 5327 5328 header->size += size; 5329 } 5330 5331 if (sample_type & PERF_SAMPLE_STACK_USER) { 5332 /* 5333 * Either we need PERF_SAMPLE_STACK_USER bit to be allways 5334 * processed as the last one or have additional check added 5335 * in case new sample type is added, because we could eat 5336 * up the rest of the sample size. 5337 */ 5338 u16 stack_size = event->attr.sample_stack_user; 5339 u16 size = sizeof(u64); 5340 5341 stack_size = perf_sample_ustack_size(stack_size, header->size, 5342 data->regs_user.regs); 5343 5344 /* 5345 * If there is something to dump, add space for the dump 5346 * itself and for the field that tells the dynamic size, 5347 * which is how many have been actually dumped. 5348 */ 5349 if (stack_size) 5350 size += sizeof(u64) + stack_size; 5351 5352 data->stack_user_size = stack_size; 5353 header->size += size; 5354 } 5355 5356 if (sample_type & PERF_SAMPLE_REGS_INTR) { 5357 /* regs dump ABI info */ 5358 int size = sizeof(u64); 5359 5360 perf_sample_regs_intr(&data->regs_intr, regs); 5361 5362 if (data->regs_intr.regs) { 5363 u64 mask = event->attr.sample_regs_intr; 5364 5365 size += hweight64(mask) * sizeof(u64); 5366 } 5367 5368 header->size += size; 5369 } 5370 } 5371 5372 void perf_event_output(struct perf_event *event, 5373 struct perf_sample_data *data, 5374 struct pt_regs *regs) 5375 { 5376 struct perf_output_handle handle; 5377 struct perf_event_header header; 5378 5379 /* protect the callchain buffers */ 5380 rcu_read_lock(); 5381 5382 perf_prepare_sample(&header, data, event, regs); 5383 5384 if (perf_output_begin(&handle, event, header.size)) 5385 goto exit; 5386 5387 perf_output_sample(&handle, &header, data, event); 5388 5389 perf_output_end(&handle); 5390 5391 exit: 5392 rcu_read_unlock(); 5393 } 5394 5395 /* 5396 * read event_id 5397 */ 5398 5399 struct perf_read_event { 5400 struct perf_event_header header; 5401 5402 u32 pid; 5403 u32 tid; 5404 }; 5405 5406 static void 5407 perf_event_read_event(struct perf_event *event, 5408 struct task_struct *task) 5409 { 5410 struct perf_output_handle handle; 5411 struct perf_sample_data sample; 5412 struct perf_read_event read_event = { 5413 .header = { 5414 .type = PERF_RECORD_READ, 5415 .misc = 0, 5416 .size = sizeof(read_event) + event->read_size, 5417 }, 5418 .pid = perf_event_pid(event, task), 5419 .tid = perf_event_tid(event, task), 5420 }; 5421 int ret; 5422 5423 perf_event_header__init_id(&read_event.header, &sample, event); 5424 ret = perf_output_begin(&handle, event, read_event.header.size); 5425 if (ret) 5426 return; 5427 5428 perf_output_put(&handle, read_event); 5429 perf_output_read(&handle, event); 5430 perf_event__output_id_sample(event, &handle, &sample); 5431 5432 perf_output_end(&handle); 5433 } 5434 5435 typedef void (perf_event_aux_output_cb)(struct perf_event *event, void *data); 5436 5437 static void 5438 perf_event_aux_ctx(struct perf_event_context *ctx, 5439 perf_event_aux_output_cb output, 5440 void *data) 5441 { 5442 struct perf_event *event; 5443 5444 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) { 5445 if (event->state < PERF_EVENT_STATE_INACTIVE) 5446 continue; 5447 if (!event_filter_match(event)) 5448 continue; 5449 output(event, data); 5450 } 5451 } 5452 5453 static void 5454 perf_event_aux(perf_event_aux_output_cb output, void *data, 5455 struct perf_event_context *task_ctx) 5456 { 5457 struct perf_cpu_context *cpuctx; 5458 struct perf_event_context *ctx; 5459 struct pmu *pmu; 5460 int ctxn; 5461 5462 rcu_read_lock(); 5463 list_for_each_entry_rcu(pmu, &pmus, entry) { 5464 cpuctx = get_cpu_ptr(pmu->pmu_cpu_context); 5465 if (cpuctx->unique_pmu != pmu) 5466 goto next; 5467 perf_event_aux_ctx(&cpuctx->ctx, output, data); 5468 if (task_ctx) 5469 goto next; 5470 ctxn = pmu->task_ctx_nr; 5471 if (ctxn < 0) 5472 goto next; 5473 ctx = rcu_dereference(current->perf_event_ctxp[ctxn]); 5474 if (ctx) 5475 perf_event_aux_ctx(ctx, output, data); 5476 next: 5477 put_cpu_ptr(pmu->pmu_cpu_context); 5478 } 5479 5480 if (task_ctx) { 5481 preempt_disable(); 5482 perf_event_aux_ctx(task_ctx, output, data); 5483 preempt_enable(); 5484 } 5485 rcu_read_unlock(); 5486 } 5487 5488 /* 5489 * task tracking -- fork/exit 5490 * 5491 * enabled by: attr.comm | attr.mmap | attr.mmap2 | attr.mmap_data | attr.task 5492 */ 5493 5494 struct perf_task_event { 5495 struct task_struct *task; 5496 struct perf_event_context *task_ctx; 5497 5498 struct { 5499 struct perf_event_header header; 5500 5501 u32 pid; 5502 u32 ppid; 5503 u32 tid; 5504 u32 ptid; 5505 u64 time; 5506 } event_id; 5507 }; 5508 5509 static int perf_event_task_match(struct perf_event *event) 5510 { 5511 return event->attr.comm || event->attr.mmap || 5512 event->attr.mmap2 || event->attr.mmap_data || 5513 event->attr.task; 5514 } 5515 5516 static void perf_event_task_output(struct perf_event *event, 5517 void *data) 5518 { 5519 struct perf_task_event *task_event = data; 5520 struct perf_output_handle handle; 5521 struct perf_sample_data sample; 5522 struct task_struct *task = task_event->task; 5523 int ret, size = task_event->event_id.header.size; 5524 5525 if (!perf_event_task_match(event)) 5526 return; 5527 5528 perf_event_header__init_id(&task_event->event_id.header, &sample, event); 5529 5530 ret = perf_output_begin(&handle, event, 5531 task_event->event_id.header.size); 5532 if (ret) 5533 goto out; 5534 5535 task_event->event_id.pid = perf_event_pid(event, task); 5536 task_event->event_id.ppid = perf_event_pid(event, current); 5537 5538 task_event->event_id.tid = perf_event_tid(event, task); 5539 task_event->event_id.ptid = perf_event_tid(event, current); 5540 5541 task_event->event_id.time = perf_event_clock(event); 5542 5543 perf_output_put(&handle, task_event->event_id); 5544 5545 perf_event__output_id_sample(event, &handle, &sample); 5546 5547 perf_output_end(&handle); 5548 out: 5549 task_event->event_id.header.size = size; 5550 } 5551 5552 static void perf_event_task(struct task_struct *task, 5553 struct perf_event_context *task_ctx, 5554 int new) 5555 { 5556 struct perf_task_event task_event; 5557 5558 if (!atomic_read(&nr_comm_events) && 5559 !atomic_read(&nr_mmap_events) && 5560 !atomic_read(&nr_task_events)) 5561 return; 5562 5563 task_event = (struct perf_task_event){ 5564 .task = task, 5565 .task_ctx = task_ctx, 5566 .event_id = { 5567 .header = { 5568 .type = new ? PERF_RECORD_FORK : PERF_RECORD_EXIT, 5569 .misc = 0, 5570 .size = sizeof(task_event.event_id), 5571 }, 5572 /* .pid */ 5573 /* .ppid */ 5574 /* .tid */ 5575 /* .ptid */ 5576 /* .time */ 5577 }, 5578 }; 5579 5580 perf_event_aux(perf_event_task_output, 5581 &task_event, 5582 task_ctx); 5583 } 5584 5585 void perf_event_fork(struct task_struct *task) 5586 { 5587 perf_event_task(task, NULL, 1); 5588 } 5589 5590 /* 5591 * comm tracking 5592 */ 5593 5594 struct perf_comm_event { 5595 struct task_struct *task; 5596 char *comm; 5597 int comm_size; 5598 5599 struct { 5600 struct perf_event_header header; 5601 5602 u32 pid; 5603 u32 tid; 5604 } event_id; 5605 }; 5606 5607 static int perf_event_comm_match(struct perf_event *event) 5608 { 5609 return event->attr.comm; 5610 } 5611 5612 static void perf_event_comm_output(struct perf_event *event, 5613 void *data) 5614 { 5615 struct perf_comm_event *comm_event = data; 5616 struct perf_output_handle handle; 5617 struct perf_sample_data sample; 5618 int size = comm_event->event_id.header.size; 5619 int ret; 5620 5621 if (!perf_event_comm_match(event)) 5622 return; 5623 5624 perf_event_header__init_id(&comm_event->event_id.header, &sample, event); 5625 ret = perf_output_begin(&handle, event, 5626 comm_event->event_id.header.size); 5627 5628 if (ret) 5629 goto out; 5630 5631 comm_event->event_id.pid = perf_event_pid(event, comm_event->task); 5632 comm_event->event_id.tid = perf_event_tid(event, comm_event->task); 5633 5634 perf_output_put(&handle, comm_event->event_id); 5635 __output_copy(&handle, comm_event->comm, 5636 comm_event->comm_size); 5637 5638 perf_event__output_id_sample(event, &handle, &sample); 5639 5640 perf_output_end(&handle); 5641 out: 5642 comm_event->event_id.header.size = size; 5643 } 5644 5645 static void perf_event_comm_event(struct perf_comm_event *comm_event) 5646 { 5647 char comm[TASK_COMM_LEN]; 5648 unsigned int size; 5649 5650 memset(comm, 0, sizeof(comm)); 5651 strlcpy(comm, comm_event->task->comm, sizeof(comm)); 5652 size = ALIGN(strlen(comm)+1, sizeof(u64)); 5653 5654 comm_event->comm = comm; 5655 comm_event->comm_size = size; 5656 5657 comm_event->event_id.header.size = sizeof(comm_event->event_id) + size; 5658 5659 perf_event_aux(perf_event_comm_output, 5660 comm_event, 5661 NULL); 5662 } 5663 5664 void perf_event_comm(struct task_struct *task, bool exec) 5665 { 5666 struct perf_comm_event comm_event; 5667 5668 if (!atomic_read(&nr_comm_events)) 5669 return; 5670 5671 comm_event = (struct perf_comm_event){ 5672 .task = task, 5673 /* .comm */ 5674 /* .comm_size */ 5675 .event_id = { 5676 .header = { 5677 .type = PERF_RECORD_COMM, 5678 .misc = exec ? PERF_RECORD_MISC_COMM_EXEC : 0, 5679 /* .size */ 5680 }, 5681 /* .pid */ 5682 /* .tid */ 5683 }, 5684 }; 5685 5686 perf_event_comm_event(&comm_event); 5687 } 5688 5689 /* 5690 * mmap tracking 5691 */ 5692 5693 struct perf_mmap_event { 5694 struct vm_area_struct *vma; 5695 5696 const char *file_name; 5697 int file_size; 5698 int maj, min; 5699 u64 ino; 5700 u64 ino_generation; 5701 u32 prot, flags; 5702 5703 struct { 5704 struct perf_event_header header; 5705 5706 u32 pid; 5707 u32 tid; 5708 u64 start; 5709 u64 len; 5710 u64 pgoff; 5711 } event_id; 5712 }; 5713 5714 static int perf_event_mmap_match(struct perf_event *event, 5715 void *data) 5716 { 5717 struct perf_mmap_event *mmap_event = data; 5718 struct vm_area_struct *vma = mmap_event->vma; 5719 int executable = vma->vm_flags & VM_EXEC; 5720 5721 return (!executable && event->attr.mmap_data) || 5722 (executable && (event->attr.mmap || event->attr.mmap2)); 5723 } 5724 5725 static void perf_event_mmap_output(struct perf_event *event, 5726 void *data) 5727 { 5728 struct perf_mmap_event *mmap_event = data; 5729 struct perf_output_handle handle; 5730 struct perf_sample_data sample; 5731 int size = mmap_event->event_id.header.size; 5732 int ret; 5733 5734 if (!perf_event_mmap_match(event, data)) 5735 return; 5736 5737 if (event->attr.mmap2) { 5738 mmap_event->event_id.header.type = PERF_RECORD_MMAP2; 5739 mmap_event->event_id.header.size += sizeof(mmap_event->maj); 5740 mmap_event->event_id.header.size += sizeof(mmap_event->min); 5741 mmap_event->event_id.header.size += sizeof(mmap_event->ino); 5742 mmap_event->event_id.header.size += sizeof(mmap_event->ino_generation); 5743 mmap_event->event_id.header.size += sizeof(mmap_event->prot); 5744 mmap_event->event_id.header.size += sizeof(mmap_event->flags); 5745 } 5746 5747 perf_event_header__init_id(&mmap_event->event_id.header, &sample, event); 5748 ret = perf_output_begin(&handle, event, 5749 mmap_event->event_id.header.size); 5750 if (ret) 5751 goto out; 5752 5753 mmap_event->event_id.pid = perf_event_pid(event, current); 5754 mmap_event->event_id.tid = perf_event_tid(event, current); 5755 5756 perf_output_put(&handle, mmap_event->event_id); 5757 5758 if (event->attr.mmap2) { 5759 perf_output_put(&handle, mmap_event->maj); 5760 perf_output_put(&handle, mmap_event->min); 5761 perf_output_put(&handle, mmap_event->ino); 5762 perf_output_put(&handle, mmap_event->ino_generation); 5763 perf_output_put(&handle, mmap_event->prot); 5764 perf_output_put(&handle, mmap_event->flags); 5765 } 5766 5767 __output_copy(&handle, mmap_event->file_name, 5768 mmap_event->file_size); 5769 5770 perf_event__output_id_sample(event, &handle, &sample); 5771 5772 perf_output_end(&handle); 5773 out: 5774 mmap_event->event_id.header.size = size; 5775 } 5776 5777 static void perf_event_mmap_event(struct perf_mmap_event *mmap_event) 5778 { 5779 struct vm_area_struct *vma = mmap_event->vma; 5780 struct file *file = vma->vm_file; 5781 int maj = 0, min = 0; 5782 u64 ino = 0, gen = 0; 5783 u32 prot = 0, flags = 0; 5784 unsigned int size; 5785 char tmp[16]; 5786 char *buf = NULL; 5787 char *name; 5788 5789 if (file) { 5790 struct inode *inode; 5791 dev_t dev; 5792 5793 buf = kmalloc(PATH_MAX, GFP_KERNEL); 5794 if (!buf) { 5795 name = "//enomem"; 5796 goto cpy_name; 5797 } 5798 /* 5799 * d_path() works from the end of the rb backwards, so we 5800 * need to add enough zero bytes after the string to handle 5801 * the 64bit alignment we do later. 5802 */ 5803 name = file_path(file, buf, PATH_MAX - sizeof(u64)); 5804 if (IS_ERR(name)) { 5805 name = "//toolong"; 5806 goto cpy_name; 5807 } 5808 inode = file_inode(vma->vm_file); 5809 dev = inode->i_sb->s_dev; 5810 ino = inode->i_ino; 5811 gen = inode->i_generation; 5812 maj = MAJOR(dev); 5813 min = MINOR(dev); 5814 5815 if (vma->vm_flags & VM_READ) 5816 prot |= PROT_READ; 5817 if (vma->vm_flags & VM_WRITE) 5818 prot |= PROT_WRITE; 5819 if (vma->vm_flags & VM_EXEC) 5820 prot |= PROT_EXEC; 5821 5822 if (vma->vm_flags & VM_MAYSHARE) 5823 flags = MAP_SHARED; 5824 else 5825 flags = MAP_PRIVATE; 5826 5827 if (vma->vm_flags & VM_DENYWRITE) 5828 flags |= MAP_DENYWRITE; 5829 if (vma->vm_flags & VM_MAYEXEC) 5830 flags |= MAP_EXECUTABLE; 5831 if (vma->vm_flags & VM_LOCKED) 5832 flags |= MAP_LOCKED; 5833 if (vma->vm_flags & VM_HUGETLB) 5834 flags |= MAP_HUGETLB; 5835 5836 goto got_name; 5837 } else { 5838 if (vma->vm_ops && vma->vm_ops->name) { 5839 name = (char *) vma->vm_ops->name(vma); 5840 if (name) 5841 goto cpy_name; 5842 } 5843 5844 name = (char *)arch_vma_name(vma); 5845 if (name) 5846 goto cpy_name; 5847 5848 if (vma->vm_start <= vma->vm_mm->start_brk && 5849 vma->vm_end >= vma->vm_mm->brk) { 5850 name = "[heap]"; 5851 goto cpy_name; 5852 } 5853 if (vma->vm_start <= vma->vm_mm->start_stack && 5854 vma->vm_end >= vma->vm_mm->start_stack) { 5855 name = "[stack]"; 5856 goto cpy_name; 5857 } 5858 5859 name = "//anon"; 5860 goto cpy_name; 5861 } 5862 5863 cpy_name: 5864 strlcpy(tmp, name, sizeof(tmp)); 5865 name = tmp; 5866 got_name: 5867 /* 5868 * Since our buffer works in 8 byte units we need to align our string 5869 * size to a multiple of 8. However, we must guarantee the tail end is 5870 * zero'd out to avoid leaking random bits to userspace. 5871 */ 5872 size = strlen(name)+1; 5873 while (!IS_ALIGNED(size, sizeof(u64))) 5874 name[size++] = '\0'; 5875 5876 mmap_event->file_name = name; 5877 mmap_event->file_size = size; 5878 mmap_event->maj = maj; 5879 mmap_event->min = min; 5880 mmap_event->ino = ino; 5881 mmap_event->ino_generation = gen; 5882 mmap_event->prot = prot; 5883 mmap_event->flags = flags; 5884 5885 if (!(vma->vm_flags & VM_EXEC)) 5886 mmap_event->event_id.header.misc |= PERF_RECORD_MISC_MMAP_DATA; 5887 5888 mmap_event->event_id.header.size = sizeof(mmap_event->event_id) + size; 5889 5890 perf_event_aux(perf_event_mmap_output, 5891 mmap_event, 5892 NULL); 5893 5894 kfree(buf); 5895 } 5896 5897 void perf_event_mmap(struct vm_area_struct *vma) 5898 { 5899 struct perf_mmap_event mmap_event; 5900 5901 if (!atomic_read(&nr_mmap_events)) 5902 return; 5903 5904 mmap_event = (struct perf_mmap_event){ 5905 .vma = vma, 5906 /* .file_name */ 5907 /* .file_size */ 5908 .event_id = { 5909 .header = { 5910 .type = PERF_RECORD_MMAP, 5911 .misc = PERF_RECORD_MISC_USER, 5912 /* .size */ 5913 }, 5914 /* .pid */ 5915 /* .tid */ 5916 .start = vma->vm_start, 5917 .len = vma->vm_end - vma->vm_start, 5918 .pgoff = (u64)vma->vm_pgoff << PAGE_SHIFT, 5919 }, 5920 /* .maj (attr_mmap2 only) */ 5921 /* .min (attr_mmap2 only) */ 5922 /* .ino (attr_mmap2 only) */ 5923 /* .ino_generation (attr_mmap2 only) */ 5924 /* .prot (attr_mmap2 only) */ 5925 /* .flags (attr_mmap2 only) */ 5926 }; 5927 5928 perf_event_mmap_event(&mmap_event); 5929 } 5930 5931 void perf_event_aux_event(struct perf_event *event, unsigned long head, 5932 unsigned long size, u64 flags) 5933 { 5934 struct perf_output_handle handle; 5935 struct perf_sample_data sample; 5936 struct perf_aux_event { 5937 struct perf_event_header header; 5938 u64 offset; 5939 u64 size; 5940 u64 flags; 5941 } rec = { 5942 .header = { 5943 .type = PERF_RECORD_AUX, 5944 .misc = 0, 5945 .size = sizeof(rec), 5946 }, 5947 .offset = head, 5948 .size = size, 5949 .flags = flags, 5950 }; 5951 int ret; 5952 5953 perf_event_header__init_id(&rec.header, &sample, event); 5954 ret = perf_output_begin(&handle, event, rec.header.size); 5955 5956 if (ret) 5957 return; 5958 5959 perf_output_put(&handle, rec); 5960 perf_event__output_id_sample(event, &handle, &sample); 5961 5962 perf_output_end(&handle); 5963 } 5964 5965 /* 5966 * Lost/dropped samples logging 5967 */ 5968 void perf_log_lost_samples(struct perf_event *event, u64 lost) 5969 { 5970 struct perf_output_handle handle; 5971 struct perf_sample_data sample; 5972 int ret; 5973 5974 struct { 5975 struct perf_event_header header; 5976 u64 lost; 5977 } lost_samples_event = { 5978 .header = { 5979 .type = PERF_RECORD_LOST_SAMPLES, 5980 .misc = 0, 5981 .size = sizeof(lost_samples_event), 5982 }, 5983 .lost = lost, 5984 }; 5985 5986 perf_event_header__init_id(&lost_samples_event.header, &sample, event); 5987 5988 ret = perf_output_begin(&handle, event, 5989 lost_samples_event.header.size); 5990 if (ret) 5991 return; 5992 5993 perf_output_put(&handle, lost_samples_event); 5994 perf_event__output_id_sample(event, &handle, &sample); 5995 perf_output_end(&handle); 5996 } 5997 5998 /* 5999 * context_switch tracking 6000 */ 6001 6002 struct perf_switch_event { 6003 struct task_struct *task; 6004 struct task_struct *next_prev; 6005 6006 struct { 6007 struct perf_event_header header; 6008 u32 next_prev_pid; 6009 u32 next_prev_tid; 6010 } event_id; 6011 }; 6012 6013 static int perf_event_switch_match(struct perf_event *event) 6014 { 6015 return event->attr.context_switch; 6016 } 6017 6018 static void perf_event_switch_output(struct perf_event *event, void *data) 6019 { 6020 struct perf_switch_event *se = data; 6021 struct perf_output_handle handle; 6022 struct perf_sample_data sample; 6023 int ret; 6024 6025 if (!perf_event_switch_match(event)) 6026 return; 6027 6028 /* Only CPU-wide events are allowed to see next/prev pid/tid */ 6029 if (event->ctx->task) { 6030 se->event_id.header.type = PERF_RECORD_SWITCH; 6031 se->event_id.header.size = sizeof(se->event_id.header); 6032 } else { 6033 se->event_id.header.type = PERF_RECORD_SWITCH_CPU_WIDE; 6034 se->event_id.header.size = sizeof(se->event_id); 6035 se->event_id.next_prev_pid = 6036 perf_event_pid(event, se->next_prev); 6037 se->event_id.next_prev_tid = 6038 perf_event_tid(event, se->next_prev); 6039 } 6040 6041 perf_event_header__init_id(&se->event_id.header, &sample, event); 6042 6043 ret = perf_output_begin(&handle, event, se->event_id.header.size); 6044 if (ret) 6045 return; 6046 6047 if (event->ctx->task) 6048 perf_output_put(&handle, se->event_id.header); 6049 else 6050 perf_output_put(&handle, se->event_id); 6051 6052 perf_event__output_id_sample(event, &handle, &sample); 6053 6054 perf_output_end(&handle); 6055 } 6056 6057 static void perf_event_switch(struct task_struct *task, 6058 struct task_struct *next_prev, bool sched_in) 6059 { 6060 struct perf_switch_event switch_event; 6061 6062 /* N.B. caller checks nr_switch_events != 0 */ 6063 6064 switch_event = (struct perf_switch_event){ 6065 .task = task, 6066 .next_prev = next_prev, 6067 .event_id = { 6068 .header = { 6069 /* .type */ 6070 .misc = sched_in ? 0 : PERF_RECORD_MISC_SWITCH_OUT, 6071 /* .size */ 6072 }, 6073 /* .next_prev_pid */ 6074 /* .next_prev_tid */ 6075 }, 6076 }; 6077 6078 perf_event_aux(perf_event_switch_output, 6079 &switch_event, 6080 NULL); 6081 } 6082 6083 /* 6084 * IRQ throttle logging 6085 */ 6086 6087 static void perf_log_throttle(struct perf_event *event, int enable) 6088 { 6089 struct perf_output_handle handle; 6090 struct perf_sample_data sample; 6091 int ret; 6092 6093 struct { 6094 struct perf_event_header header; 6095 u64 time; 6096 u64 id; 6097 u64 stream_id; 6098 } throttle_event = { 6099 .header = { 6100 .type = PERF_RECORD_THROTTLE, 6101 .misc = 0, 6102 .size = sizeof(throttle_event), 6103 }, 6104 .time = perf_event_clock(event), 6105 .id = primary_event_id(event), 6106 .stream_id = event->id, 6107 }; 6108 6109 if (enable) 6110 throttle_event.header.type = PERF_RECORD_UNTHROTTLE; 6111 6112 perf_event_header__init_id(&throttle_event.header, &sample, event); 6113 6114 ret = perf_output_begin(&handle, event, 6115 throttle_event.header.size); 6116 if (ret) 6117 return; 6118 6119 perf_output_put(&handle, throttle_event); 6120 perf_event__output_id_sample(event, &handle, &sample); 6121 perf_output_end(&handle); 6122 } 6123 6124 static void perf_log_itrace_start(struct perf_event *event) 6125 { 6126 struct perf_output_handle handle; 6127 struct perf_sample_data sample; 6128 struct perf_aux_event { 6129 struct perf_event_header header; 6130 u32 pid; 6131 u32 tid; 6132 } rec; 6133 int ret; 6134 6135 if (event->parent) 6136 event = event->parent; 6137 6138 if (!(event->pmu->capabilities & PERF_PMU_CAP_ITRACE) || 6139 event->hw.itrace_started) 6140 return; 6141 6142 rec.header.type = PERF_RECORD_ITRACE_START; 6143 rec.header.misc = 0; 6144 rec.header.size = sizeof(rec); 6145 rec.pid = perf_event_pid(event, current); 6146 rec.tid = perf_event_tid(event, current); 6147 6148 perf_event_header__init_id(&rec.header, &sample, event); 6149 ret = perf_output_begin(&handle, event, rec.header.size); 6150 6151 if (ret) 6152 return; 6153 6154 perf_output_put(&handle, rec); 6155 perf_event__output_id_sample(event, &handle, &sample); 6156 6157 perf_output_end(&handle); 6158 } 6159 6160 /* 6161 * Generic event overflow handling, sampling. 6162 */ 6163 6164 static int __perf_event_overflow(struct perf_event *event, 6165 int throttle, struct perf_sample_data *data, 6166 struct pt_regs *regs) 6167 { 6168 int events = atomic_read(&event->event_limit); 6169 struct hw_perf_event *hwc = &event->hw; 6170 u64 seq; 6171 int ret = 0; 6172 6173 /* 6174 * Non-sampling counters might still use the PMI to fold short 6175 * hardware counters, ignore those. 6176 */ 6177 if (unlikely(!is_sampling_event(event))) 6178 return 0; 6179 6180 seq = __this_cpu_read(perf_throttled_seq); 6181 if (seq != hwc->interrupts_seq) { 6182 hwc->interrupts_seq = seq; 6183 hwc->interrupts = 1; 6184 } else { 6185 hwc->interrupts++; 6186 if (unlikely(throttle 6187 && hwc->interrupts >= max_samples_per_tick)) { 6188 __this_cpu_inc(perf_throttled_count); 6189 hwc->interrupts = MAX_INTERRUPTS; 6190 perf_log_throttle(event, 0); 6191 tick_nohz_full_kick(); 6192 ret = 1; 6193 } 6194 } 6195 6196 if (event->attr.freq) { 6197 u64 now = perf_clock(); 6198 s64 delta = now - hwc->freq_time_stamp; 6199 6200 hwc->freq_time_stamp = now; 6201 6202 if (delta > 0 && delta < 2*TICK_NSEC) 6203 perf_adjust_period(event, delta, hwc->last_period, true); 6204 } 6205 6206 /* 6207 * XXX event_limit might not quite work as expected on inherited 6208 * events 6209 */ 6210 6211 event->pending_kill = POLL_IN; 6212 if (events && atomic_dec_and_test(&event->event_limit)) { 6213 ret = 1; 6214 event->pending_kill = POLL_HUP; 6215 event->pending_disable = 1; 6216 irq_work_queue(&event->pending); 6217 } 6218 6219 if (event->overflow_handler) 6220 event->overflow_handler(event, data, regs); 6221 else 6222 perf_event_output(event, data, regs); 6223 6224 if (event->fasync && event->pending_kill) { 6225 event->pending_wakeup = 1; 6226 irq_work_queue(&event->pending); 6227 } 6228 6229 return ret; 6230 } 6231 6232 int perf_event_overflow(struct perf_event *event, 6233 struct perf_sample_data *data, 6234 struct pt_regs *regs) 6235 { 6236 return __perf_event_overflow(event, 1, data, regs); 6237 } 6238 6239 /* 6240 * Generic software event infrastructure 6241 */ 6242 6243 struct swevent_htable { 6244 struct swevent_hlist *swevent_hlist; 6245 struct mutex hlist_mutex; 6246 int hlist_refcount; 6247 6248 /* Recursion avoidance in each contexts */ 6249 int recursion[PERF_NR_CONTEXTS]; 6250 6251 /* Keeps track of cpu being initialized/exited */ 6252 bool online; 6253 }; 6254 6255 static DEFINE_PER_CPU(struct swevent_htable, swevent_htable); 6256 6257 /* 6258 * We directly increment event->count and keep a second value in 6259 * event->hw.period_left to count intervals. This period event 6260 * is kept in the range [-sample_period, 0] so that we can use the 6261 * sign as trigger. 6262 */ 6263 6264 u64 perf_swevent_set_period(struct perf_event *event) 6265 { 6266 struct hw_perf_event *hwc = &event->hw; 6267 u64 period = hwc->last_period; 6268 u64 nr, offset; 6269 s64 old, val; 6270 6271 hwc->last_period = hwc->sample_period; 6272 6273 again: 6274 old = val = local64_read(&hwc->period_left); 6275 if (val < 0) 6276 return 0; 6277 6278 nr = div64_u64(period + val, period); 6279 offset = nr * period; 6280 val -= offset; 6281 if (local64_cmpxchg(&hwc->period_left, old, val) != old) 6282 goto again; 6283 6284 return nr; 6285 } 6286 6287 static void perf_swevent_overflow(struct perf_event *event, u64 overflow, 6288 struct perf_sample_data *data, 6289 struct pt_regs *regs) 6290 { 6291 struct hw_perf_event *hwc = &event->hw; 6292 int throttle = 0; 6293 6294 if (!overflow) 6295 overflow = perf_swevent_set_period(event); 6296 6297 if (hwc->interrupts == MAX_INTERRUPTS) 6298 return; 6299 6300 for (; overflow; overflow--) { 6301 if (__perf_event_overflow(event, throttle, 6302 data, regs)) { 6303 /* 6304 * We inhibit the overflow from happening when 6305 * hwc->interrupts == MAX_INTERRUPTS. 6306 */ 6307 break; 6308 } 6309 throttle = 1; 6310 } 6311 } 6312 6313 static void perf_swevent_event(struct perf_event *event, u64 nr, 6314 struct perf_sample_data *data, 6315 struct pt_regs *regs) 6316 { 6317 struct hw_perf_event *hwc = &event->hw; 6318 6319 local64_add(nr, &event->count); 6320 6321 if (!regs) 6322 return; 6323 6324 if (!is_sampling_event(event)) 6325 return; 6326 6327 if ((event->attr.sample_type & PERF_SAMPLE_PERIOD) && !event->attr.freq) { 6328 data->period = nr; 6329 return perf_swevent_overflow(event, 1, data, regs); 6330 } else 6331 data->period = event->hw.last_period; 6332 6333 if (nr == 1 && hwc->sample_period == 1 && !event->attr.freq) 6334 return perf_swevent_overflow(event, 1, data, regs); 6335 6336 if (local64_add_negative(nr, &hwc->period_left)) 6337 return; 6338 6339 perf_swevent_overflow(event, 0, data, regs); 6340 } 6341 6342 static int perf_exclude_event(struct perf_event *event, 6343 struct pt_regs *regs) 6344 { 6345 if (event->hw.state & PERF_HES_STOPPED) 6346 return 1; 6347 6348 if (regs) { 6349 if (event->attr.exclude_user && user_mode(regs)) 6350 return 1; 6351 6352 if (event->attr.exclude_kernel && !user_mode(regs)) 6353 return 1; 6354 } 6355 6356 return 0; 6357 } 6358 6359 static int perf_swevent_match(struct perf_event *event, 6360 enum perf_type_id type, 6361 u32 event_id, 6362 struct perf_sample_data *data, 6363 struct pt_regs *regs) 6364 { 6365 if (event->attr.type != type) 6366 return 0; 6367 6368 if (event->attr.config != event_id) 6369 return 0; 6370 6371 if (perf_exclude_event(event, regs)) 6372 return 0; 6373 6374 return 1; 6375 } 6376 6377 static inline u64 swevent_hash(u64 type, u32 event_id) 6378 { 6379 u64 val = event_id | (type << 32); 6380 6381 return hash_64(val, SWEVENT_HLIST_BITS); 6382 } 6383 6384 static inline struct hlist_head * 6385 __find_swevent_head(struct swevent_hlist *hlist, u64 type, u32 event_id) 6386 { 6387 u64 hash = swevent_hash(type, event_id); 6388 6389 return &hlist->heads[hash]; 6390 } 6391 6392 /* For the read side: events when they trigger */ 6393 static inline struct hlist_head * 6394 find_swevent_head_rcu(struct swevent_htable *swhash, u64 type, u32 event_id) 6395 { 6396 struct swevent_hlist *hlist; 6397 6398 hlist = rcu_dereference(swhash->swevent_hlist); 6399 if (!hlist) 6400 return NULL; 6401 6402 return __find_swevent_head(hlist, type, event_id); 6403 } 6404 6405 /* For the event head insertion and removal in the hlist */ 6406 static inline struct hlist_head * 6407 find_swevent_head(struct swevent_htable *swhash, struct perf_event *event) 6408 { 6409 struct swevent_hlist *hlist; 6410 u32 event_id = event->attr.config; 6411 u64 type = event->attr.type; 6412 6413 /* 6414 * Event scheduling is always serialized against hlist allocation 6415 * and release. Which makes the protected version suitable here. 6416 * The context lock guarantees that. 6417 */ 6418 hlist = rcu_dereference_protected(swhash->swevent_hlist, 6419 lockdep_is_held(&event->ctx->lock)); 6420 if (!hlist) 6421 return NULL; 6422 6423 return __find_swevent_head(hlist, type, event_id); 6424 } 6425 6426 static void do_perf_sw_event(enum perf_type_id type, u32 event_id, 6427 u64 nr, 6428 struct perf_sample_data *data, 6429 struct pt_regs *regs) 6430 { 6431 struct swevent_htable *swhash = this_cpu_ptr(&swevent_htable); 6432 struct perf_event *event; 6433 struct hlist_head *head; 6434 6435 rcu_read_lock(); 6436 head = find_swevent_head_rcu(swhash, type, event_id); 6437 if (!head) 6438 goto end; 6439 6440 hlist_for_each_entry_rcu(event, head, hlist_entry) { 6441 if (perf_swevent_match(event, type, event_id, data, regs)) 6442 perf_swevent_event(event, nr, data, regs); 6443 } 6444 end: 6445 rcu_read_unlock(); 6446 } 6447 6448 DEFINE_PER_CPU(struct pt_regs, __perf_regs[4]); 6449 6450 int perf_swevent_get_recursion_context(void) 6451 { 6452 struct swevent_htable *swhash = this_cpu_ptr(&swevent_htable); 6453 6454 return get_recursion_context(swhash->recursion); 6455 } 6456 EXPORT_SYMBOL_GPL(perf_swevent_get_recursion_context); 6457 6458 inline void perf_swevent_put_recursion_context(int rctx) 6459 { 6460 struct swevent_htable *swhash = this_cpu_ptr(&swevent_htable); 6461 6462 put_recursion_context(swhash->recursion, rctx); 6463 } 6464 6465 void ___perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr) 6466 { 6467 struct perf_sample_data data; 6468 6469 if (WARN_ON_ONCE(!regs)) 6470 return; 6471 6472 perf_sample_data_init(&data, addr, 0); 6473 do_perf_sw_event(PERF_TYPE_SOFTWARE, event_id, nr, &data, regs); 6474 } 6475 6476 void __perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr) 6477 { 6478 int rctx; 6479 6480 preempt_disable_notrace(); 6481 rctx = perf_swevent_get_recursion_context(); 6482 if (unlikely(rctx < 0)) 6483 goto fail; 6484 6485 ___perf_sw_event(event_id, nr, regs, addr); 6486 6487 perf_swevent_put_recursion_context(rctx); 6488 fail: 6489 preempt_enable_notrace(); 6490 } 6491 6492 static void perf_swevent_read(struct perf_event *event) 6493 { 6494 } 6495 6496 static int perf_swevent_add(struct perf_event *event, int flags) 6497 { 6498 struct swevent_htable *swhash = this_cpu_ptr(&swevent_htable); 6499 struct hw_perf_event *hwc = &event->hw; 6500 struct hlist_head *head; 6501 6502 if (is_sampling_event(event)) { 6503 hwc->last_period = hwc->sample_period; 6504 perf_swevent_set_period(event); 6505 } 6506 6507 hwc->state = !(flags & PERF_EF_START); 6508 6509 head = find_swevent_head(swhash, event); 6510 if (!head) { 6511 /* 6512 * We can race with cpu hotplug code. Do not 6513 * WARN if the cpu just got unplugged. 6514 */ 6515 WARN_ON_ONCE(swhash->online); 6516 return -EINVAL; 6517 } 6518 6519 hlist_add_head_rcu(&event->hlist_entry, head); 6520 perf_event_update_userpage(event); 6521 6522 return 0; 6523 } 6524 6525 static void perf_swevent_del(struct perf_event *event, int flags) 6526 { 6527 hlist_del_rcu(&event->hlist_entry); 6528 } 6529 6530 static void perf_swevent_start(struct perf_event *event, int flags) 6531 { 6532 event->hw.state = 0; 6533 } 6534 6535 static void perf_swevent_stop(struct perf_event *event, int flags) 6536 { 6537 event->hw.state = PERF_HES_STOPPED; 6538 } 6539 6540 /* Deref the hlist from the update side */ 6541 static inline struct swevent_hlist * 6542 swevent_hlist_deref(struct swevent_htable *swhash) 6543 { 6544 return rcu_dereference_protected(swhash->swevent_hlist, 6545 lockdep_is_held(&swhash->hlist_mutex)); 6546 } 6547 6548 static void swevent_hlist_release(struct swevent_htable *swhash) 6549 { 6550 struct swevent_hlist *hlist = swevent_hlist_deref(swhash); 6551 6552 if (!hlist) 6553 return; 6554 6555 RCU_INIT_POINTER(swhash->swevent_hlist, NULL); 6556 kfree_rcu(hlist, rcu_head); 6557 } 6558 6559 static void swevent_hlist_put_cpu(struct perf_event *event, int cpu) 6560 { 6561 struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu); 6562 6563 mutex_lock(&swhash->hlist_mutex); 6564 6565 if (!--swhash->hlist_refcount) 6566 swevent_hlist_release(swhash); 6567 6568 mutex_unlock(&swhash->hlist_mutex); 6569 } 6570 6571 static void swevent_hlist_put(struct perf_event *event) 6572 { 6573 int cpu; 6574 6575 for_each_possible_cpu(cpu) 6576 swevent_hlist_put_cpu(event, cpu); 6577 } 6578 6579 static int swevent_hlist_get_cpu(struct perf_event *event, int cpu) 6580 { 6581 struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu); 6582 int err = 0; 6583 6584 mutex_lock(&swhash->hlist_mutex); 6585 6586 if (!swevent_hlist_deref(swhash) && cpu_online(cpu)) { 6587 struct swevent_hlist *hlist; 6588 6589 hlist = kzalloc(sizeof(*hlist), GFP_KERNEL); 6590 if (!hlist) { 6591 err = -ENOMEM; 6592 goto exit; 6593 } 6594 rcu_assign_pointer(swhash->swevent_hlist, hlist); 6595 } 6596 swhash->hlist_refcount++; 6597 exit: 6598 mutex_unlock(&swhash->hlist_mutex); 6599 6600 return err; 6601 } 6602 6603 static int swevent_hlist_get(struct perf_event *event) 6604 { 6605 int err; 6606 int cpu, failed_cpu; 6607 6608 get_online_cpus(); 6609 for_each_possible_cpu(cpu) { 6610 err = swevent_hlist_get_cpu(event, cpu); 6611 if (err) { 6612 failed_cpu = cpu; 6613 goto fail; 6614 } 6615 } 6616 put_online_cpus(); 6617 6618 return 0; 6619 fail: 6620 for_each_possible_cpu(cpu) { 6621 if (cpu == failed_cpu) 6622 break; 6623 swevent_hlist_put_cpu(event, cpu); 6624 } 6625 6626 put_online_cpus(); 6627 return err; 6628 } 6629 6630 struct static_key perf_swevent_enabled[PERF_COUNT_SW_MAX]; 6631 6632 static void sw_perf_event_destroy(struct perf_event *event) 6633 { 6634 u64 event_id = event->attr.config; 6635 6636 WARN_ON(event->parent); 6637 6638 static_key_slow_dec(&perf_swevent_enabled[event_id]); 6639 swevent_hlist_put(event); 6640 } 6641 6642 static int perf_swevent_init(struct perf_event *event) 6643 { 6644 u64 event_id = event->attr.config; 6645 6646 if (event->attr.type != PERF_TYPE_SOFTWARE) 6647 return -ENOENT; 6648 6649 /* 6650 * no branch sampling for software events 6651 */ 6652 if (has_branch_stack(event)) 6653 return -EOPNOTSUPP; 6654 6655 switch (event_id) { 6656 case PERF_COUNT_SW_CPU_CLOCK: 6657 case PERF_COUNT_SW_TASK_CLOCK: 6658 return -ENOENT; 6659 6660 default: 6661 break; 6662 } 6663 6664 if (event_id >= PERF_COUNT_SW_MAX) 6665 return -ENOENT; 6666 6667 if (!event->parent) { 6668 int err; 6669 6670 err = swevent_hlist_get(event); 6671 if (err) 6672 return err; 6673 6674 static_key_slow_inc(&perf_swevent_enabled[event_id]); 6675 event->destroy = sw_perf_event_destroy; 6676 } 6677 6678 return 0; 6679 } 6680 6681 static struct pmu perf_swevent = { 6682 .task_ctx_nr = perf_sw_context, 6683 6684 .capabilities = PERF_PMU_CAP_NO_NMI, 6685 6686 .event_init = perf_swevent_init, 6687 .add = perf_swevent_add, 6688 .del = perf_swevent_del, 6689 .start = perf_swevent_start, 6690 .stop = perf_swevent_stop, 6691 .read = perf_swevent_read, 6692 }; 6693 6694 #ifdef CONFIG_EVENT_TRACING 6695 6696 static int perf_tp_filter_match(struct perf_event *event, 6697 struct perf_sample_data *data) 6698 { 6699 void *record = data->raw->data; 6700 6701 if (likely(!event->filter) || filter_match_preds(event->filter, record)) 6702 return 1; 6703 return 0; 6704 } 6705 6706 static int perf_tp_event_match(struct perf_event *event, 6707 struct perf_sample_data *data, 6708 struct pt_regs *regs) 6709 { 6710 if (event->hw.state & PERF_HES_STOPPED) 6711 return 0; 6712 /* 6713 * All tracepoints are from kernel-space. 6714 */ 6715 if (event->attr.exclude_kernel) 6716 return 0; 6717 6718 if (!perf_tp_filter_match(event, data)) 6719 return 0; 6720 6721 return 1; 6722 } 6723 6724 void perf_tp_event(u64 addr, u64 count, void *record, int entry_size, 6725 struct pt_regs *regs, struct hlist_head *head, int rctx, 6726 struct task_struct *task) 6727 { 6728 struct perf_sample_data data; 6729 struct perf_event *event; 6730 6731 struct perf_raw_record raw = { 6732 .size = entry_size, 6733 .data = record, 6734 }; 6735 6736 perf_sample_data_init(&data, addr, 0); 6737 data.raw = &raw; 6738 6739 hlist_for_each_entry_rcu(event, head, hlist_entry) { 6740 if (perf_tp_event_match(event, &data, regs)) 6741 perf_swevent_event(event, count, &data, regs); 6742 } 6743 6744 /* 6745 * If we got specified a target task, also iterate its context and 6746 * deliver this event there too. 6747 */ 6748 if (task && task != current) { 6749 struct perf_event_context *ctx; 6750 struct trace_entry *entry = record; 6751 6752 rcu_read_lock(); 6753 ctx = rcu_dereference(task->perf_event_ctxp[perf_sw_context]); 6754 if (!ctx) 6755 goto unlock; 6756 6757 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) { 6758 if (event->attr.type != PERF_TYPE_TRACEPOINT) 6759 continue; 6760 if (event->attr.config != entry->type) 6761 continue; 6762 if (perf_tp_event_match(event, &data, regs)) 6763 perf_swevent_event(event, count, &data, regs); 6764 } 6765 unlock: 6766 rcu_read_unlock(); 6767 } 6768 6769 perf_swevent_put_recursion_context(rctx); 6770 } 6771 EXPORT_SYMBOL_GPL(perf_tp_event); 6772 6773 static void tp_perf_event_destroy(struct perf_event *event) 6774 { 6775 perf_trace_destroy(event); 6776 } 6777 6778 static int perf_tp_event_init(struct perf_event *event) 6779 { 6780 int err; 6781 6782 if (event->attr.type != PERF_TYPE_TRACEPOINT) 6783 return -ENOENT; 6784 6785 /* 6786 * no branch sampling for tracepoint events 6787 */ 6788 if (has_branch_stack(event)) 6789 return -EOPNOTSUPP; 6790 6791 err = perf_trace_init(event); 6792 if (err) 6793 return err; 6794 6795 event->destroy = tp_perf_event_destroy; 6796 6797 return 0; 6798 } 6799 6800 static struct pmu perf_tracepoint = { 6801 .task_ctx_nr = perf_sw_context, 6802 6803 .event_init = perf_tp_event_init, 6804 .add = perf_trace_add, 6805 .del = perf_trace_del, 6806 .start = perf_swevent_start, 6807 .stop = perf_swevent_stop, 6808 .read = perf_swevent_read, 6809 }; 6810 6811 static inline void perf_tp_register(void) 6812 { 6813 perf_pmu_register(&perf_tracepoint, "tracepoint", PERF_TYPE_TRACEPOINT); 6814 } 6815 6816 static int perf_event_set_filter(struct perf_event *event, void __user *arg) 6817 { 6818 char *filter_str; 6819 int ret; 6820 6821 if (event->attr.type != PERF_TYPE_TRACEPOINT) 6822 return -EINVAL; 6823 6824 filter_str = strndup_user(arg, PAGE_SIZE); 6825 if (IS_ERR(filter_str)) 6826 return PTR_ERR(filter_str); 6827 6828 ret = ftrace_profile_set_filter(event, event->attr.config, filter_str); 6829 6830 kfree(filter_str); 6831 return ret; 6832 } 6833 6834 static void perf_event_free_filter(struct perf_event *event) 6835 { 6836 ftrace_profile_free_filter(event); 6837 } 6838 6839 static int perf_event_set_bpf_prog(struct perf_event *event, u32 prog_fd) 6840 { 6841 struct bpf_prog *prog; 6842 6843 if (event->attr.type != PERF_TYPE_TRACEPOINT) 6844 return -EINVAL; 6845 6846 if (event->tp_event->prog) 6847 return -EEXIST; 6848 6849 if (!(event->tp_event->flags & TRACE_EVENT_FL_UKPROBE)) 6850 /* bpf programs can only be attached to u/kprobes */ 6851 return -EINVAL; 6852 6853 prog = bpf_prog_get(prog_fd); 6854 if (IS_ERR(prog)) 6855 return PTR_ERR(prog); 6856 6857 if (prog->type != BPF_PROG_TYPE_KPROBE) { 6858 /* valid fd, but invalid bpf program type */ 6859 bpf_prog_put(prog); 6860 return -EINVAL; 6861 } 6862 6863 event->tp_event->prog = prog; 6864 6865 return 0; 6866 } 6867 6868 static void perf_event_free_bpf_prog(struct perf_event *event) 6869 { 6870 struct bpf_prog *prog; 6871 6872 if (!event->tp_event) 6873 return; 6874 6875 prog = event->tp_event->prog; 6876 if (prog) { 6877 event->tp_event->prog = NULL; 6878 bpf_prog_put(prog); 6879 } 6880 } 6881 6882 #else 6883 6884 static inline void perf_tp_register(void) 6885 { 6886 } 6887 6888 static int perf_event_set_filter(struct perf_event *event, void __user *arg) 6889 { 6890 return -ENOENT; 6891 } 6892 6893 static void perf_event_free_filter(struct perf_event *event) 6894 { 6895 } 6896 6897 static int perf_event_set_bpf_prog(struct perf_event *event, u32 prog_fd) 6898 { 6899 return -ENOENT; 6900 } 6901 6902 static void perf_event_free_bpf_prog(struct perf_event *event) 6903 { 6904 } 6905 #endif /* CONFIG_EVENT_TRACING */ 6906 6907 #ifdef CONFIG_HAVE_HW_BREAKPOINT 6908 void perf_bp_event(struct perf_event *bp, void *data) 6909 { 6910 struct perf_sample_data sample; 6911 struct pt_regs *regs = data; 6912 6913 perf_sample_data_init(&sample, bp->attr.bp_addr, 0); 6914 6915 if (!bp->hw.state && !perf_exclude_event(bp, regs)) 6916 perf_swevent_event(bp, 1, &sample, regs); 6917 } 6918 #endif 6919 6920 /* 6921 * hrtimer based swevent callback 6922 */ 6923 6924 static enum hrtimer_restart perf_swevent_hrtimer(struct hrtimer *hrtimer) 6925 { 6926 enum hrtimer_restart ret = HRTIMER_RESTART; 6927 struct perf_sample_data data; 6928 struct pt_regs *regs; 6929 struct perf_event *event; 6930 u64 period; 6931 6932 event = container_of(hrtimer, struct perf_event, hw.hrtimer); 6933 6934 if (event->state != PERF_EVENT_STATE_ACTIVE) 6935 return HRTIMER_NORESTART; 6936 6937 event->pmu->read(event); 6938 6939 perf_sample_data_init(&data, 0, event->hw.last_period); 6940 regs = get_irq_regs(); 6941 6942 if (regs && !perf_exclude_event(event, regs)) { 6943 if (!(event->attr.exclude_idle && is_idle_task(current))) 6944 if (__perf_event_overflow(event, 1, &data, regs)) 6945 ret = HRTIMER_NORESTART; 6946 } 6947 6948 period = max_t(u64, 10000, event->hw.sample_period); 6949 hrtimer_forward_now(hrtimer, ns_to_ktime(period)); 6950 6951 return ret; 6952 } 6953 6954 static void perf_swevent_start_hrtimer(struct perf_event *event) 6955 { 6956 struct hw_perf_event *hwc = &event->hw; 6957 s64 period; 6958 6959 if (!is_sampling_event(event)) 6960 return; 6961 6962 period = local64_read(&hwc->period_left); 6963 if (period) { 6964 if (period < 0) 6965 period = 10000; 6966 6967 local64_set(&hwc->period_left, 0); 6968 } else { 6969 period = max_t(u64, 10000, hwc->sample_period); 6970 } 6971 hrtimer_start(&hwc->hrtimer, ns_to_ktime(period), 6972 HRTIMER_MODE_REL_PINNED); 6973 } 6974 6975 static void perf_swevent_cancel_hrtimer(struct perf_event *event) 6976 { 6977 struct hw_perf_event *hwc = &event->hw; 6978 6979 if (is_sampling_event(event)) { 6980 ktime_t remaining = hrtimer_get_remaining(&hwc->hrtimer); 6981 local64_set(&hwc->period_left, ktime_to_ns(remaining)); 6982 6983 hrtimer_cancel(&hwc->hrtimer); 6984 } 6985 } 6986 6987 static void perf_swevent_init_hrtimer(struct perf_event *event) 6988 { 6989 struct hw_perf_event *hwc = &event->hw; 6990 6991 if (!is_sampling_event(event)) 6992 return; 6993 6994 hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); 6995 hwc->hrtimer.function = perf_swevent_hrtimer; 6996 6997 /* 6998 * Since hrtimers have a fixed rate, we can do a static freq->period 6999 * mapping and avoid the whole period adjust feedback stuff. 7000 */ 7001 if (event->attr.freq) { 7002 long freq = event->attr.sample_freq; 7003 7004 event->attr.sample_period = NSEC_PER_SEC / freq; 7005 hwc->sample_period = event->attr.sample_period; 7006 local64_set(&hwc->period_left, hwc->sample_period); 7007 hwc->last_period = hwc->sample_period; 7008 event->attr.freq = 0; 7009 } 7010 } 7011 7012 /* 7013 * Software event: cpu wall time clock 7014 */ 7015 7016 static void cpu_clock_event_update(struct perf_event *event) 7017 { 7018 s64 prev; 7019 u64 now; 7020 7021 now = local_clock(); 7022 prev = local64_xchg(&event->hw.prev_count, now); 7023 local64_add(now - prev, &event->count); 7024 } 7025 7026 static void cpu_clock_event_start(struct perf_event *event, int flags) 7027 { 7028 local64_set(&event->hw.prev_count, local_clock()); 7029 perf_swevent_start_hrtimer(event); 7030 } 7031 7032 static void cpu_clock_event_stop(struct perf_event *event, int flags) 7033 { 7034 perf_swevent_cancel_hrtimer(event); 7035 cpu_clock_event_update(event); 7036 } 7037 7038 static int cpu_clock_event_add(struct perf_event *event, int flags) 7039 { 7040 if (flags & PERF_EF_START) 7041 cpu_clock_event_start(event, flags); 7042 perf_event_update_userpage(event); 7043 7044 return 0; 7045 } 7046 7047 static void cpu_clock_event_del(struct perf_event *event, int flags) 7048 { 7049 cpu_clock_event_stop(event, flags); 7050 } 7051 7052 static void cpu_clock_event_read(struct perf_event *event) 7053 { 7054 cpu_clock_event_update(event); 7055 } 7056 7057 static int cpu_clock_event_init(struct perf_event *event) 7058 { 7059 if (event->attr.type != PERF_TYPE_SOFTWARE) 7060 return -ENOENT; 7061 7062 if (event->attr.config != PERF_COUNT_SW_CPU_CLOCK) 7063 return -ENOENT; 7064 7065 /* 7066 * no branch sampling for software events 7067 */ 7068 if (has_branch_stack(event)) 7069 return -EOPNOTSUPP; 7070 7071 perf_swevent_init_hrtimer(event); 7072 7073 return 0; 7074 } 7075 7076 static struct pmu perf_cpu_clock = { 7077 .task_ctx_nr = perf_sw_context, 7078 7079 .capabilities = PERF_PMU_CAP_NO_NMI, 7080 7081 .event_init = cpu_clock_event_init, 7082 .add = cpu_clock_event_add, 7083 .del = cpu_clock_event_del, 7084 .start = cpu_clock_event_start, 7085 .stop = cpu_clock_event_stop, 7086 .read = cpu_clock_event_read, 7087 }; 7088 7089 /* 7090 * Software event: task time clock 7091 */ 7092 7093 static void task_clock_event_update(struct perf_event *event, u64 now) 7094 { 7095 u64 prev; 7096 s64 delta; 7097 7098 prev = local64_xchg(&event->hw.prev_count, now); 7099 delta = now - prev; 7100 local64_add(delta, &event->count); 7101 } 7102 7103 static void task_clock_event_start(struct perf_event *event, int flags) 7104 { 7105 local64_set(&event->hw.prev_count, event->ctx->time); 7106 perf_swevent_start_hrtimer(event); 7107 } 7108 7109 static void task_clock_event_stop(struct perf_event *event, int flags) 7110 { 7111 perf_swevent_cancel_hrtimer(event); 7112 task_clock_event_update(event, event->ctx->time); 7113 } 7114 7115 static int task_clock_event_add(struct perf_event *event, int flags) 7116 { 7117 if (flags & PERF_EF_START) 7118 task_clock_event_start(event, flags); 7119 perf_event_update_userpage(event); 7120 7121 return 0; 7122 } 7123 7124 static void task_clock_event_del(struct perf_event *event, int flags) 7125 { 7126 task_clock_event_stop(event, PERF_EF_UPDATE); 7127 } 7128 7129 static void task_clock_event_read(struct perf_event *event) 7130 { 7131 u64 now = perf_clock(); 7132 u64 delta = now - event->ctx->timestamp; 7133 u64 time = event->ctx->time + delta; 7134 7135 task_clock_event_update(event, time); 7136 } 7137 7138 static int task_clock_event_init(struct perf_event *event) 7139 { 7140 if (event->attr.type != PERF_TYPE_SOFTWARE) 7141 return -ENOENT; 7142 7143 if (event->attr.config != PERF_COUNT_SW_TASK_CLOCK) 7144 return -ENOENT; 7145 7146 /* 7147 * no branch sampling for software events 7148 */ 7149 if (has_branch_stack(event)) 7150 return -EOPNOTSUPP; 7151 7152 perf_swevent_init_hrtimer(event); 7153 7154 return 0; 7155 } 7156 7157 static struct pmu perf_task_clock = { 7158 .task_ctx_nr = perf_sw_context, 7159 7160 .capabilities = PERF_PMU_CAP_NO_NMI, 7161 7162 .event_init = task_clock_event_init, 7163 .add = task_clock_event_add, 7164 .del = task_clock_event_del, 7165 .start = task_clock_event_start, 7166 .stop = task_clock_event_stop, 7167 .read = task_clock_event_read, 7168 }; 7169 7170 static void perf_pmu_nop_void(struct pmu *pmu) 7171 { 7172 } 7173 7174 static int perf_pmu_nop_int(struct pmu *pmu) 7175 { 7176 return 0; 7177 } 7178 7179 static void perf_pmu_start_txn(struct pmu *pmu) 7180 { 7181 perf_pmu_disable(pmu); 7182 } 7183 7184 static int perf_pmu_commit_txn(struct pmu *pmu) 7185 { 7186 perf_pmu_enable(pmu); 7187 return 0; 7188 } 7189 7190 static void perf_pmu_cancel_txn(struct pmu *pmu) 7191 { 7192 perf_pmu_enable(pmu); 7193 } 7194 7195 static int perf_event_idx_default(struct perf_event *event) 7196 { 7197 return 0; 7198 } 7199 7200 /* 7201 * Ensures all contexts with the same task_ctx_nr have the same 7202 * pmu_cpu_context too. 7203 */ 7204 static struct perf_cpu_context __percpu *find_pmu_context(int ctxn) 7205 { 7206 struct pmu *pmu; 7207 7208 if (ctxn < 0) 7209 return NULL; 7210 7211 list_for_each_entry(pmu, &pmus, entry) { 7212 if (pmu->task_ctx_nr == ctxn) 7213 return pmu->pmu_cpu_context; 7214 } 7215 7216 return NULL; 7217 } 7218 7219 static void update_pmu_context(struct pmu *pmu, struct pmu *old_pmu) 7220 { 7221 int cpu; 7222 7223 for_each_possible_cpu(cpu) { 7224 struct perf_cpu_context *cpuctx; 7225 7226 cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu); 7227 7228 if (cpuctx->unique_pmu == old_pmu) 7229 cpuctx->unique_pmu = pmu; 7230 } 7231 } 7232 7233 static void free_pmu_context(struct pmu *pmu) 7234 { 7235 struct pmu *i; 7236 7237 mutex_lock(&pmus_lock); 7238 /* 7239 * Like a real lame refcount. 7240 */ 7241 list_for_each_entry(i, &pmus, entry) { 7242 if (i->pmu_cpu_context == pmu->pmu_cpu_context) { 7243 update_pmu_context(i, pmu); 7244 goto out; 7245 } 7246 } 7247 7248 free_percpu(pmu->pmu_cpu_context); 7249 out: 7250 mutex_unlock(&pmus_lock); 7251 } 7252 static struct idr pmu_idr; 7253 7254 static ssize_t 7255 type_show(struct device *dev, struct device_attribute *attr, char *page) 7256 { 7257 struct pmu *pmu = dev_get_drvdata(dev); 7258 7259 return snprintf(page, PAGE_SIZE-1, "%d\n", pmu->type); 7260 } 7261 static DEVICE_ATTR_RO(type); 7262 7263 static ssize_t 7264 perf_event_mux_interval_ms_show(struct device *dev, 7265 struct device_attribute *attr, 7266 char *page) 7267 { 7268 struct pmu *pmu = dev_get_drvdata(dev); 7269 7270 return snprintf(page, PAGE_SIZE-1, "%d\n", pmu->hrtimer_interval_ms); 7271 } 7272 7273 static DEFINE_MUTEX(mux_interval_mutex); 7274 7275 static ssize_t 7276 perf_event_mux_interval_ms_store(struct device *dev, 7277 struct device_attribute *attr, 7278 const char *buf, size_t count) 7279 { 7280 struct pmu *pmu = dev_get_drvdata(dev); 7281 int timer, cpu, ret; 7282 7283 ret = kstrtoint(buf, 0, &timer); 7284 if (ret) 7285 return ret; 7286 7287 if (timer < 1) 7288 return -EINVAL; 7289 7290 /* same value, noting to do */ 7291 if (timer == pmu->hrtimer_interval_ms) 7292 return count; 7293 7294 mutex_lock(&mux_interval_mutex); 7295 pmu->hrtimer_interval_ms = timer; 7296 7297 /* update all cpuctx for this PMU */ 7298 get_online_cpus(); 7299 for_each_online_cpu(cpu) { 7300 struct perf_cpu_context *cpuctx; 7301 cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu); 7302 cpuctx->hrtimer_interval = ns_to_ktime(NSEC_PER_MSEC * timer); 7303 7304 cpu_function_call(cpu, 7305 (remote_function_f)perf_mux_hrtimer_restart, cpuctx); 7306 } 7307 put_online_cpus(); 7308 mutex_unlock(&mux_interval_mutex); 7309 7310 return count; 7311 } 7312 static DEVICE_ATTR_RW(perf_event_mux_interval_ms); 7313 7314 static struct attribute *pmu_dev_attrs[] = { 7315 &dev_attr_type.attr, 7316 &dev_attr_perf_event_mux_interval_ms.attr, 7317 NULL, 7318 }; 7319 ATTRIBUTE_GROUPS(pmu_dev); 7320 7321 static int pmu_bus_running; 7322 static struct bus_type pmu_bus = { 7323 .name = "event_source", 7324 .dev_groups = pmu_dev_groups, 7325 }; 7326 7327 static void pmu_dev_release(struct device *dev) 7328 { 7329 kfree(dev); 7330 } 7331 7332 static int pmu_dev_alloc(struct pmu *pmu) 7333 { 7334 int ret = -ENOMEM; 7335 7336 pmu->dev = kzalloc(sizeof(struct device), GFP_KERNEL); 7337 if (!pmu->dev) 7338 goto out; 7339 7340 pmu->dev->groups = pmu->attr_groups; 7341 device_initialize(pmu->dev); 7342 ret = dev_set_name(pmu->dev, "%s", pmu->name); 7343 if (ret) 7344 goto free_dev; 7345 7346 dev_set_drvdata(pmu->dev, pmu); 7347 pmu->dev->bus = &pmu_bus; 7348 pmu->dev->release = pmu_dev_release; 7349 ret = device_add(pmu->dev); 7350 if (ret) 7351 goto free_dev; 7352 7353 out: 7354 return ret; 7355 7356 free_dev: 7357 put_device(pmu->dev); 7358 goto out; 7359 } 7360 7361 static struct lock_class_key cpuctx_mutex; 7362 static struct lock_class_key cpuctx_lock; 7363 7364 int perf_pmu_register(struct pmu *pmu, const char *name, int type) 7365 { 7366 int cpu, ret; 7367 7368 mutex_lock(&pmus_lock); 7369 ret = -ENOMEM; 7370 pmu->pmu_disable_count = alloc_percpu(int); 7371 if (!pmu->pmu_disable_count) 7372 goto unlock; 7373 7374 pmu->type = -1; 7375 if (!name) 7376 goto skip_type; 7377 pmu->name = name; 7378 7379 if (type < 0) { 7380 type = idr_alloc(&pmu_idr, pmu, PERF_TYPE_MAX, 0, GFP_KERNEL); 7381 if (type < 0) { 7382 ret = type; 7383 goto free_pdc; 7384 } 7385 } 7386 pmu->type = type; 7387 7388 if (pmu_bus_running) { 7389 ret = pmu_dev_alloc(pmu); 7390 if (ret) 7391 goto free_idr; 7392 } 7393 7394 skip_type: 7395 pmu->pmu_cpu_context = find_pmu_context(pmu->task_ctx_nr); 7396 if (pmu->pmu_cpu_context) 7397 goto got_cpu_context; 7398 7399 ret = -ENOMEM; 7400 pmu->pmu_cpu_context = alloc_percpu(struct perf_cpu_context); 7401 if (!pmu->pmu_cpu_context) 7402 goto free_dev; 7403 7404 for_each_possible_cpu(cpu) { 7405 struct perf_cpu_context *cpuctx; 7406 7407 cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu); 7408 __perf_event_init_context(&cpuctx->ctx); 7409 lockdep_set_class(&cpuctx->ctx.mutex, &cpuctx_mutex); 7410 lockdep_set_class(&cpuctx->ctx.lock, &cpuctx_lock); 7411 cpuctx->ctx.pmu = pmu; 7412 7413 __perf_mux_hrtimer_init(cpuctx, cpu); 7414 7415 cpuctx->unique_pmu = pmu; 7416 } 7417 7418 got_cpu_context: 7419 if (!pmu->start_txn) { 7420 if (pmu->pmu_enable) { 7421 /* 7422 * If we have pmu_enable/pmu_disable calls, install 7423 * transaction stubs that use that to try and batch 7424 * hardware accesses. 7425 */ 7426 pmu->start_txn = perf_pmu_start_txn; 7427 pmu->commit_txn = perf_pmu_commit_txn; 7428 pmu->cancel_txn = perf_pmu_cancel_txn; 7429 } else { 7430 pmu->start_txn = perf_pmu_nop_void; 7431 pmu->commit_txn = perf_pmu_nop_int; 7432 pmu->cancel_txn = perf_pmu_nop_void; 7433 } 7434 } 7435 7436 if (!pmu->pmu_enable) { 7437 pmu->pmu_enable = perf_pmu_nop_void; 7438 pmu->pmu_disable = perf_pmu_nop_void; 7439 } 7440 7441 if (!pmu->event_idx) 7442 pmu->event_idx = perf_event_idx_default; 7443 7444 list_add_rcu(&pmu->entry, &pmus); 7445 atomic_set(&pmu->exclusive_cnt, 0); 7446 ret = 0; 7447 unlock: 7448 mutex_unlock(&pmus_lock); 7449 7450 return ret; 7451 7452 free_dev: 7453 device_del(pmu->dev); 7454 put_device(pmu->dev); 7455 7456 free_idr: 7457 if (pmu->type >= PERF_TYPE_MAX) 7458 idr_remove(&pmu_idr, pmu->type); 7459 7460 free_pdc: 7461 free_percpu(pmu->pmu_disable_count); 7462 goto unlock; 7463 } 7464 EXPORT_SYMBOL_GPL(perf_pmu_register); 7465 7466 void perf_pmu_unregister(struct pmu *pmu) 7467 { 7468 mutex_lock(&pmus_lock); 7469 list_del_rcu(&pmu->entry); 7470 mutex_unlock(&pmus_lock); 7471 7472 /* 7473 * We dereference the pmu list under both SRCU and regular RCU, so 7474 * synchronize against both of those. 7475 */ 7476 synchronize_srcu(&pmus_srcu); 7477 synchronize_rcu(); 7478 7479 free_percpu(pmu->pmu_disable_count); 7480 if (pmu->type >= PERF_TYPE_MAX) 7481 idr_remove(&pmu_idr, pmu->type); 7482 device_del(pmu->dev); 7483 put_device(pmu->dev); 7484 free_pmu_context(pmu); 7485 } 7486 EXPORT_SYMBOL_GPL(perf_pmu_unregister); 7487 7488 static int perf_try_init_event(struct pmu *pmu, struct perf_event *event) 7489 { 7490 struct perf_event_context *ctx = NULL; 7491 int ret; 7492 7493 if (!try_module_get(pmu->module)) 7494 return -ENODEV; 7495 7496 if (event->group_leader != event) { 7497 /* 7498 * This ctx->mutex can nest when we're called through 7499 * inheritance. See the perf_event_ctx_lock_nested() comment. 7500 */ 7501 ctx = perf_event_ctx_lock_nested(event->group_leader, 7502 SINGLE_DEPTH_NESTING); 7503 BUG_ON(!ctx); 7504 } 7505 7506 event->pmu = pmu; 7507 ret = pmu->event_init(event); 7508 7509 if (ctx) 7510 perf_event_ctx_unlock(event->group_leader, ctx); 7511 7512 if (ret) 7513 module_put(pmu->module); 7514 7515 return ret; 7516 } 7517 7518 struct pmu *perf_init_event(struct perf_event *event) 7519 { 7520 struct pmu *pmu = NULL; 7521 int idx; 7522 int ret; 7523 7524 idx = srcu_read_lock(&pmus_srcu); 7525 7526 rcu_read_lock(); 7527 pmu = idr_find(&pmu_idr, event->attr.type); 7528 rcu_read_unlock(); 7529 if (pmu) { 7530 ret = perf_try_init_event(pmu, event); 7531 if (ret) 7532 pmu = ERR_PTR(ret); 7533 goto unlock; 7534 } 7535 7536 list_for_each_entry_rcu(pmu, &pmus, entry) { 7537 ret = perf_try_init_event(pmu, event); 7538 if (!ret) 7539 goto unlock; 7540 7541 if (ret != -ENOENT) { 7542 pmu = ERR_PTR(ret); 7543 goto unlock; 7544 } 7545 } 7546 pmu = ERR_PTR(-ENOENT); 7547 unlock: 7548 srcu_read_unlock(&pmus_srcu, idx); 7549 7550 return pmu; 7551 } 7552 7553 static void account_event_cpu(struct perf_event *event, int cpu) 7554 { 7555 if (event->parent) 7556 return; 7557 7558 if (is_cgroup_event(event)) 7559 atomic_inc(&per_cpu(perf_cgroup_events, cpu)); 7560 } 7561 7562 static void account_event(struct perf_event *event) 7563 { 7564 if (event->parent) 7565 return; 7566 7567 if (event->attach_state & PERF_ATTACH_TASK) 7568 static_key_slow_inc(&perf_sched_events.key); 7569 if (event->attr.mmap || event->attr.mmap_data) 7570 atomic_inc(&nr_mmap_events); 7571 if (event->attr.comm) 7572 atomic_inc(&nr_comm_events); 7573 if (event->attr.task) 7574 atomic_inc(&nr_task_events); 7575 if (event->attr.freq) { 7576 if (atomic_inc_return(&nr_freq_events) == 1) 7577 tick_nohz_full_kick_all(); 7578 } 7579 if (event->attr.context_switch) { 7580 atomic_inc(&nr_switch_events); 7581 static_key_slow_inc(&perf_sched_events.key); 7582 } 7583 if (has_branch_stack(event)) 7584 static_key_slow_inc(&perf_sched_events.key); 7585 if (is_cgroup_event(event)) 7586 static_key_slow_inc(&perf_sched_events.key); 7587 7588 account_event_cpu(event, event->cpu); 7589 } 7590 7591 /* 7592 * Allocate and initialize a event structure 7593 */ 7594 static struct perf_event * 7595 perf_event_alloc(struct perf_event_attr *attr, int cpu, 7596 struct task_struct *task, 7597 struct perf_event *group_leader, 7598 struct perf_event *parent_event, 7599 perf_overflow_handler_t overflow_handler, 7600 void *context, int cgroup_fd) 7601 { 7602 struct pmu *pmu; 7603 struct perf_event *event; 7604 struct hw_perf_event *hwc; 7605 long err = -EINVAL; 7606 7607 if ((unsigned)cpu >= nr_cpu_ids) { 7608 if (!task || cpu != -1) 7609 return ERR_PTR(-EINVAL); 7610 } 7611 7612 event = kzalloc(sizeof(*event), GFP_KERNEL); 7613 if (!event) 7614 return ERR_PTR(-ENOMEM); 7615 7616 /* 7617 * Single events are their own group leaders, with an 7618 * empty sibling list: 7619 */ 7620 if (!group_leader) 7621 group_leader = event; 7622 7623 mutex_init(&event->child_mutex); 7624 INIT_LIST_HEAD(&event->child_list); 7625 7626 INIT_LIST_HEAD(&event->group_entry); 7627 INIT_LIST_HEAD(&event->event_entry); 7628 INIT_LIST_HEAD(&event->sibling_list); 7629 INIT_LIST_HEAD(&event->rb_entry); 7630 INIT_LIST_HEAD(&event->active_entry); 7631 INIT_HLIST_NODE(&event->hlist_entry); 7632 7633 7634 init_waitqueue_head(&event->waitq); 7635 init_irq_work(&event->pending, perf_pending_event); 7636 7637 mutex_init(&event->mmap_mutex); 7638 7639 atomic_long_set(&event->refcount, 1); 7640 event->cpu = cpu; 7641 event->attr = *attr; 7642 event->group_leader = group_leader; 7643 event->pmu = NULL; 7644 event->oncpu = -1; 7645 7646 event->parent = parent_event; 7647 7648 event->ns = get_pid_ns(task_active_pid_ns(current)); 7649 event->id = atomic64_inc_return(&perf_event_id); 7650 7651 event->state = PERF_EVENT_STATE_INACTIVE; 7652 7653 if (task) { 7654 event->attach_state = PERF_ATTACH_TASK; 7655 /* 7656 * XXX pmu::event_init needs to know what task to account to 7657 * and we cannot use the ctx information because we need the 7658 * pmu before we get a ctx. 7659 */ 7660 event->hw.target = task; 7661 } 7662 7663 event->clock = &local_clock; 7664 if (parent_event) 7665 event->clock = parent_event->clock; 7666 7667 if (!overflow_handler && parent_event) { 7668 overflow_handler = parent_event->overflow_handler; 7669 context = parent_event->overflow_handler_context; 7670 } 7671 7672 event->overflow_handler = overflow_handler; 7673 event->overflow_handler_context = context; 7674 7675 perf_event__state_init(event); 7676 7677 pmu = NULL; 7678 7679 hwc = &event->hw; 7680 hwc->sample_period = attr->sample_period; 7681 if (attr->freq && attr->sample_freq) 7682 hwc->sample_period = 1; 7683 hwc->last_period = hwc->sample_period; 7684 7685 local64_set(&hwc->period_left, hwc->sample_period); 7686 7687 /* 7688 * we currently do not support PERF_FORMAT_GROUP on inherited events 7689 */ 7690 if (attr->inherit && (attr->read_format & PERF_FORMAT_GROUP)) 7691 goto err_ns; 7692 7693 if (!has_branch_stack(event)) 7694 event->attr.branch_sample_type = 0; 7695 7696 if (cgroup_fd != -1) { 7697 err = perf_cgroup_connect(cgroup_fd, event, attr, group_leader); 7698 if (err) 7699 goto err_ns; 7700 } 7701 7702 pmu = perf_init_event(event); 7703 if (!pmu) 7704 goto err_ns; 7705 else if (IS_ERR(pmu)) { 7706 err = PTR_ERR(pmu); 7707 goto err_ns; 7708 } 7709 7710 err = exclusive_event_init(event); 7711 if (err) 7712 goto err_pmu; 7713 7714 if (!event->parent) { 7715 if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN) { 7716 err = get_callchain_buffers(); 7717 if (err) 7718 goto err_per_task; 7719 } 7720 } 7721 7722 return event; 7723 7724 err_per_task: 7725 exclusive_event_destroy(event); 7726 7727 err_pmu: 7728 if (event->destroy) 7729 event->destroy(event); 7730 module_put(pmu->module); 7731 err_ns: 7732 if (is_cgroup_event(event)) 7733 perf_detach_cgroup(event); 7734 if (event->ns) 7735 put_pid_ns(event->ns); 7736 kfree(event); 7737 7738 return ERR_PTR(err); 7739 } 7740 7741 static int perf_copy_attr(struct perf_event_attr __user *uattr, 7742 struct perf_event_attr *attr) 7743 { 7744 u32 size; 7745 int ret; 7746 7747 if (!access_ok(VERIFY_WRITE, uattr, PERF_ATTR_SIZE_VER0)) 7748 return -EFAULT; 7749 7750 /* 7751 * zero the full structure, so that a short copy will be nice. 7752 */ 7753 memset(attr, 0, sizeof(*attr)); 7754 7755 ret = get_user(size, &uattr->size); 7756 if (ret) 7757 return ret; 7758 7759 if (size > PAGE_SIZE) /* silly large */ 7760 goto err_size; 7761 7762 if (!size) /* abi compat */ 7763 size = PERF_ATTR_SIZE_VER0; 7764 7765 if (size < PERF_ATTR_SIZE_VER0) 7766 goto err_size; 7767 7768 /* 7769 * If we're handed a bigger struct than we know of, 7770 * ensure all the unknown bits are 0 - i.e. new 7771 * user-space does not rely on any kernel feature 7772 * extensions we dont know about yet. 7773 */ 7774 if (size > sizeof(*attr)) { 7775 unsigned char __user *addr; 7776 unsigned char __user *end; 7777 unsigned char val; 7778 7779 addr = (void __user *)uattr + sizeof(*attr); 7780 end = (void __user *)uattr + size; 7781 7782 for (; addr < end; addr++) { 7783 ret = get_user(val, addr); 7784 if (ret) 7785 return ret; 7786 if (val) 7787 goto err_size; 7788 } 7789 size = sizeof(*attr); 7790 } 7791 7792 ret = copy_from_user(attr, uattr, size); 7793 if (ret) 7794 return -EFAULT; 7795 7796 if (attr->__reserved_1) 7797 return -EINVAL; 7798 7799 if (attr->sample_type & ~(PERF_SAMPLE_MAX-1)) 7800 return -EINVAL; 7801 7802 if (attr->read_format & ~(PERF_FORMAT_MAX-1)) 7803 return -EINVAL; 7804 7805 if (attr->sample_type & PERF_SAMPLE_BRANCH_STACK) { 7806 u64 mask = attr->branch_sample_type; 7807 7808 /* only using defined bits */ 7809 if (mask & ~(PERF_SAMPLE_BRANCH_MAX-1)) 7810 return -EINVAL; 7811 7812 /* at least one branch bit must be set */ 7813 if (!(mask & ~PERF_SAMPLE_BRANCH_PLM_ALL)) 7814 return -EINVAL; 7815 7816 /* propagate priv level, when not set for branch */ 7817 if (!(mask & PERF_SAMPLE_BRANCH_PLM_ALL)) { 7818 7819 /* exclude_kernel checked on syscall entry */ 7820 if (!attr->exclude_kernel) 7821 mask |= PERF_SAMPLE_BRANCH_KERNEL; 7822 7823 if (!attr->exclude_user) 7824 mask |= PERF_SAMPLE_BRANCH_USER; 7825 7826 if (!attr->exclude_hv) 7827 mask |= PERF_SAMPLE_BRANCH_HV; 7828 /* 7829 * adjust user setting (for HW filter setup) 7830 */ 7831 attr->branch_sample_type = mask; 7832 } 7833 /* privileged levels capture (kernel, hv): check permissions */ 7834 if ((mask & PERF_SAMPLE_BRANCH_PERM_PLM) 7835 && perf_paranoid_kernel() && !capable(CAP_SYS_ADMIN)) 7836 return -EACCES; 7837 } 7838 7839 if (attr->sample_type & PERF_SAMPLE_REGS_USER) { 7840 ret = perf_reg_validate(attr->sample_regs_user); 7841 if (ret) 7842 return ret; 7843 } 7844 7845 if (attr->sample_type & PERF_SAMPLE_STACK_USER) { 7846 if (!arch_perf_have_user_stack_dump()) 7847 return -ENOSYS; 7848 7849 /* 7850 * We have __u32 type for the size, but so far 7851 * we can only use __u16 as maximum due to the 7852 * __u16 sample size limit. 7853 */ 7854 if (attr->sample_stack_user >= USHRT_MAX) 7855 ret = -EINVAL; 7856 else if (!IS_ALIGNED(attr->sample_stack_user, sizeof(u64))) 7857 ret = -EINVAL; 7858 } 7859 7860 if (attr->sample_type & PERF_SAMPLE_REGS_INTR) 7861 ret = perf_reg_validate(attr->sample_regs_intr); 7862 out: 7863 return ret; 7864 7865 err_size: 7866 put_user(sizeof(*attr), &uattr->size); 7867 ret = -E2BIG; 7868 goto out; 7869 } 7870 7871 static int 7872 perf_event_set_output(struct perf_event *event, struct perf_event *output_event) 7873 { 7874 struct ring_buffer *rb = NULL; 7875 int ret = -EINVAL; 7876 7877 if (!output_event) 7878 goto set; 7879 7880 /* don't allow circular references */ 7881 if (event == output_event) 7882 goto out; 7883 7884 /* 7885 * Don't allow cross-cpu buffers 7886 */ 7887 if (output_event->cpu != event->cpu) 7888 goto out; 7889 7890 /* 7891 * If its not a per-cpu rb, it must be the same task. 7892 */ 7893 if (output_event->cpu == -1 && output_event->ctx != event->ctx) 7894 goto out; 7895 7896 /* 7897 * Mixing clocks in the same buffer is trouble you don't need. 7898 */ 7899 if (output_event->clock != event->clock) 7900 goto out; 7901 7902 /* 7903 * If both events generate aux data, they must be on the same PMU 7904 */ 7905 if (has_aux(event) && has_aux(output_event) && 7906 event->pmu != output_event->pmu) 7907 goto out; 7908 7909 set: 7910 mutex_lock(&event->mmap_mutex); 7911 /* Can't redirect output if we've got an active mmap() */ 7912 if (atomic_read(&event->mmap_count)) 7913 goto unlock; 7914 7915 if (output_event) { 7916 /* get the rb we want to redirect to */ 7917 rb = ring_buffer_get(output_event); 7918 if (!rb) 7919 goto unlock; 7920 } 7921 7922 ring_buffer_attach(event, rb); 7923 7924 ret = 0; 7925 unlock: 7926 mutex_unlock(&event->mmap_mutex); 7927 7928 out: 7929 return ret; 7930 } 7931 7932 static void mutex_lock_double(struct mutex *a, struct mutex *b) 7933 { 7934 if (b < a) 7935 swap(a, b); 7936 7937 mutex_lock(a); 7938 mutex_lock_nested(b, SINGLE_DEPTH_NESTING); 7939 } 7940 7941 static int perf_event_set_clock(struct perf_event *event, clockid_t clk_id) 7942 { 7943 bool nmi_safe = false; 7944 7945 switch (clk_id) { 7946 case CLOCK_MONOTONIC: 7947 event->clock = &ktime_get_mono_fast_ns; 7948 nmi_safe = true; 7949 break; 7950 7951 case CLOCK_MONOTONIC_RAW: 7952 event->clock = &ktime_get_raw_fast_ns; 7953 nmi_safe = true; 7954 break; 7955 7956 case CLOCK_REALTIME: 7957 event->clock = &ktime_get_real_ns; 7958 break; 7959 7960 case CLOCK_BOOTTIME: 7961 event->clock = &ktime_get_boot_ns; 7962 break; 7963 7964 case CLOCK_TAI: 7965 event->clock = &ktime_get_tai_ns; 7966 break; 7967 7968 default: 7969 return -EINVAL; 7970 } 7971 7972 if (!nmi_safe && !(event->pmu->capabilities & PERF_PMU_CAP_NO_NMI)) 7973 return -EINVAL; 7974 7975 return 0; 7976 } 7977 7978 /** 7979 * sys_perf_event_open - open a performance event, associate it to a task/cpu 7980 * 7981 * @attr_uptr: event_id type attributes for monitoring/sampling 7982 * @pid: target pid 7983 * @cpu: target cpu 7984 * @group_fd: group leader event fd 7985 */ 7986 SYSCALL_DEFINE5(perf_event_open, 7987 struct perf_event_attr __user *, attr_uptr, 7988 pid_t, pid, int, cpu, int, group_fd, unsigned long, flags) 7989 { 7990 struct perf_event *group_leader = NULL, *output_event = NULL; 7991 struct perf_event *event, *sibling; 7992 struct perf_event_attr attr; 7993 struct perf_event_context *ctx, *uninitialized_var(gctx); 7994 struct file *event_file = NULL; 7995 struct fd group = {NULL, 0}; 7996 struct task_struct *task = NULL; 7997 struct pmu *pmu; 7998 int event_fd; 7999 int move_group = 0; 8000 int err; 8001 int f_flags = O_RDWR; 8002 int cgroup_fd = -1; 8003 8004 /* for future expandability... */ 8005 if (flags & ~PERF_FLAG_ALL) 8006 return -EINVAL; 8007 8008 err = perf_copy_attr(attr_uptr, &attr); 8009 if (err) 8010 return err; 8011 8012 if (!attr.exclude_kernel) { 8013 if (perf_paranoid_kernel() && !capable(CAP_SYS_ADMIN)) 8014 return -EACCES; 8015 } 8016 8017 if (attr.freq) { 8018 if (attr.sample_freq > sysctl_perf_event_sample_rate) 8019 return -EINVAL; 8020 } else { 8021 if (attr.sample_period & (1ULL << 63)) 8022 return -EINVAL; 8023 } 8024 8025 /* 8026 * In cgroup mode, the pid argument is used to pass the fd 8027 * opened to the cgroup directory in cgroupfs. The cpu argument 8028 * designates the cpu on which to monitor threads from that 8029 * cgroup. 8030 */ 8031 if ((flags & PERF_FLAG_PID_CGROUP) && (pid == -1 || cpu == -1)) 8032 return -EINVAL; 8033 8034 if (flags & PERF_FLAG_FD_CLOEXEC) 8035 f_flags |= O_CLOEXEC; 8036 8037 event_fd = get_unused_fd_flags(f_flags); 8038 if (event_fd < 0) 8039 return event_fd; 8040 8041 if (group_fd != -1) { 8042 err = perf_fget_light(group_fd, &group); 8043 if (err) 8044 goto err_fd; 8045 group_leader = group.file->private_data; 8046 if (flags & PERF_FLAG_FD_OUTPUT) 8047 output_event = group_leader; 8048 if (flags & PERF_FLAG_FD_NO_GROUP) 8049 group_leader = NULL; 8050 } 8051 8052 if (pid != -1 && !(flags & PERF_FLAG_PID_CGROUP)) { 8053 task = find_lively_task_by_vpid(pid); 8054 if (IS_ERR(task)) { 8055 err = PTR_ERR(task); 8056 goto err_group_fd; 8057 } 8058 } 8059 8060 if (task && group_leader && 8061 group_leader->attr.inherit != attr.inherit) { 8062 err = -EINVAL; 8063 goto err_task; 8064 } 8065 8066 get_online_cpus(); 8067 8068 if (flags & PERF_FLAG_PID_CGROUP) 8069 cgroup_fd = pid; 8070 8071 event = perf_event_alloc(&attr, cpu, task, group_leader, NULL, 8072 NULL, NULL, cgroup_fd); 8073 if (IS_ERR(event)) { 8074 err = PTR_ERR(event); 8075 goto err_cpus; 8076 } 8077 8078 if (is_sampling_event(event)) { 8079 if (event->pmu->capabilities & PERF_PMU_CAP_NO_INTERRUPT) { 8080 err = -ENOTSUPP; 8081 goto err_alloc; 8082 } 8083 } 8084 8085 account_event(event); 8086 8087 /* 8088 * Special case software events and allow them to be part of 8089 * any hardware group. 8090 */ 8091 pmu = event->pmu; 8092 8093 if (attr.use_clockid) { 8094 err = perf_event_set_clock(event, attr.clockid); 8095 if (err) 8096 goto err_alloc; 8097 } 8098 8099 if (group_leader && 8100 (is_software_event(event) != is_software_event(group_leader))) { 8101 if (is_software_event(event)) { 8102 /* 8103 * If event and group_leader are not both a software 8104 * event, and event is, then group leader is not. 8105 * 8106 * Allow the addition of software events to !software 8107 * groups, this is safe because software events never 8108 * fail to schedule. 8109 */ 8110 pmu = group_leader->pmu; 8111 } else if (is_software_event(group_leader) && 8112 (group_leader->group_flags & PERF_GROUP_SOFTWARE)) { 8113 /* 8114 * In case the group is a pure software group, and we 8115 * try to add a hardware event, move the whole group to 8116 * the hardware context. 8117 */ 8118 move_group = 1; 8119 } 8120 } 8121 8122 /* 8123 * Get the target context (task or percpu): 8124 */ 8125 ctx = find_get_context(pmu, task, event); 8126 if (IS_ERR(ctx)) { 8127 err = PTR_ERR(ctx); 8128 goto err_alloc; 8129 } 8130 8131 if ((pmu->capabilities & PERF_PMU_CAP_EXCLUSIVE) && group_leader) { 8132 err = -EBUSY; 8133 goto err_context; 8134 } 8135 8136 if (task) { 8137 put_task_struct(task); 8138 task = NULL; 8139 } 8140 8141 /* 8142 * Look up the group leader (we will attach this event to it): 8143 */ 8144 if (group_leader) { 8145 err = -EINVAL; 8146 8147 /* 8148 * Do not allow a recursive hierarchy (this new sibling 8149 * becoming part of another group-sibling): 8150 */ 8151 if (group_leader->group_leader != group_leader) 8152 goto err_context; 8153 8154 /* All events in a group should have the same clock */ 8155 if (group_leader->clock != event->clock) 8156 goto err_context; 8157 8158 /* 8159 * Do not allow to attach to a group in a different 8160 * task or CPU context: 8161 */ 8162 if (move_group) { 8163 /* 8164 * Make sure we're both on the same task, or both 8165 * per-cpu events. 8166 */ 8167 if (group_leader->ctx->task != ctx->task) 8168 goto err_context; 8169 8170 /* 8171 * Make sure we're both events for the same CPU; 8172 * grouping events for different CPUs is broken; since 8173 * you can never concurrently schedule them anyhow. 8174 */ 8175 if (group_leader->cpu != event->cpu) 8176 goto err_context; 8177 } else { 8178 if (group_leader->ctx != ctx) 8179 goto err_context; 8180 } 8181 8182 /* 8183 * Only a group leader can be exclusive or pinned 8184 */ 8185 if (attr.exclusive || attr.pinned) 8186 goto err_context; 8187 } 8188 8189 if (output_event) { 8190 err = perf_event_set_output(event, output_event); 8191 if (err) 8192 goto err_context; 8193 } 8194 8195 event_file = anon_inode_getfile("[perf_event]", &perf_fops, event, 8196 f_flags); 8197 if (IS_ERR(event_file)) { 8198 err = PTR_ERR(event_file); 8199 goto err_context; 8200 } 8201 8202 if (move_group) { 8203 gctx = group_leader->ctx; 8204 8205 /* 8206 * See perf_event_ctx_lock() for comments on the details 8207 * of swizzling perf_event::ctx. 8208 */ 8209 mutex_lock_double(&gctx->mutex, &ctx->mutex); 8210 8211 perf_remove_from_context(group_leader, false); 8212 8213 list_for_each_entry(sibling, &group_leader->sibling_list, 8214 group_entry) { 8215 perf_remove_from_context(sibling, false); 8216 put_ctx(gctx); 8217 } 8218 } else { 8219 mutex_lock(&ctx->mutex); 8220 } 8221 8222 WARN_ON_ONCE(ctx->parent_ctx); 8223 8224 if (move_group) { 8225 /* 8226 * Wait for everybody to stop referencing the events through 8227 * the old lists, before installing it on new lists. 8228 */ 8229 synchronize_rcu(); 8230 8231 /* 8232 * Install the group siblings before the group leader. 8233 * 8234 * Because a group leader will try and install the entire group 8235 * (through the sibling list, which is still in-tact), we can 8236 * end up with siblings installed in the wrong context. 8237 * 8238 * By installing siblings first we NO-OP because they're not 8239 * reachable through the group lists. 8240 */ 8241 list_for_each_entry(sibling, &group_leader->sibling_list, 8242 group_entry) { 8243 perf_event__state_init(sibling); 8244 perf_install_in_context(ctx, sibling, sibling->cpu); 8245 get_ctx(ctx); 8246 } 8247 8248 /* 8249 * Removing from the context ends up with disabled 8250 * event. What we want here is event in the initial 8251 * startup state, ready to be add into new context. 8252 */ 8253 perf_event__state_init(group_leader); 8254 perf_install_in_context(ctx, group_leader, group_leader->cpu); 8255 get_ctx(ctx); 8256 } 8257 8258 if (!exclusive_event_installable(event, ctx)) { 8259 err = -EBUSY; 8260 mutex_unlock(&ctx->mutex); 8261 fput(event_file); 8262 goto err_context; 8263 } 8264 8265 perf_install_in_context(ctx, event, event->cpu); 8266 perf_unpin_context(ctx); 8267 8268 if (move_group) { 8269 mutex_unlock(&gctx->mutex); 8270 put_ctx(gctx); 8271 } 8272 mutex_unlock(&ctx->mutex); 8273 8274 put_online_cpus(); 8275 8276 event->owner = current; 8277 8278 mutex_lock(¤t->perf_event_mutex); 8279 list_add_tail(&event->owner_entry, ¤t->perf_event_list); 8280 mutex_unlock(¤t->perf_event_mutex); 8281 8282 /* 8283 * Precalculate sample_data sizes 8284 */ 8285 perf_event__header_size(event); 8286 perf_event__id_header_size(event); 8287 8288 /* 8289 * Drop the reference on the group_event after placing the 8290 * new event on the sibling_list. This ensures destruction 8291 * of the group leader will find the pointer to itself in 8292 * perf_group_detach(). 8293 */ 8294 fdput(group); 8295 fd_install(event_fd, event_file); 8296 return event_fd; 8297 8298 err_context: 8299 perf_unpin_context(ctx); 8300 put_ctx(ctx); 8301 err_alloc: 8302 free_event(event); 8303 err_cpus: 8304 put_online_cpus(); 8305 err_task: 8306 if (task) 8307 put_task_struct(task); 8308 err_group_fd: 8309 fdput(group); 8310 err_fd: 8311 put_unused_fd(event_fd); 8312 return err; 8313 } 8314 8315 /** 8316 * perf_event_create_kernel_counter 8317 * 8318 * @attr: attributes of the counter to create 8319 * @cpu: cpu in which the counter is bound 8320 * @task: task to profile (NULL for percpu) 8321 */ 8322 struct perf_event * 8323 perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu, 8324 struct task_struct *task, 8325 perf_overflow_handler_t overflow_handler, 8326 void *context) 8327 { 8328 struct perf_event_context *ctx; 8329 struct perf_event *event; 8330 int err; 8331 8332 /* 8333 * Get the target context (task or percpu): 8334 */ 8335 8336 event = perf_event_alloc(attr, cpu, task, NULL, NULL, 8337 overflow_handler, context, -1); 8338 if (IS_ERR(event)) { 8339 err = PTR_ERR(event); 8340 goto err; 8341 } 8342 8343 /* Mark owner so we could distinguish it from user events. */ 8344 event->owner = EVENT_OWNER_KERNEL; 8345 8346 account_event(event); 8347 8348 ctx = find_get_context(event->pmu, task, event); 8349 if (IS_ERR(ctx)) { 8350 err = PTR_ERR(ctx); 8351 goto err_free; 8352 } 8353 8354 WARN_ON_ONCE(ctx->parent_ctx); 8355 mutex_lock(&ctx->mutex); 8356 if (!exclusive_event_installable(event, ctx)) { 8357 mutex_unlock(&ctx->mutex); 8358 perf_unpin_context(ctx); 8359 put_ctx(ctx); 8360 err = -EBUSY; 8361 goto err_free; 8362 } 8363 8364 perf_install_in_context(ctx, event, cpu); 8365 perf_unpin_context(ctx); 8366 mutex_unlock(&ctx->mutex); 8367 8368 return event; 8369 8370 err_free: 8371 free_event(event); 8372 err: 8373 return ERR_PTR(err); 8374 } 8375 EXPORT_SYMBOL_GPL(perf_event_create_kernel_counter); 8376 8377 void perf_pmu_migrate_context(struct pmu *pmu, int src_cpu, int dst_cpu) 8378 { 8379 struct perf_event_context *src_ctx; 8380 struct perf_event_context *dst_ctx; 8381 struct perf_event *event, *tmp; 8382 LIST_HEAD(events); 8383 8384 src_ctx = &per_cpu_ptr(pmu->pmu_cpu_context, src_cpu)->ctx; 8385 dst_ctx = &per_cpu_ptr(pmu->pmu_cpu_context, dst_cpu)->ctx; 8386 8387 /* 8388 * See perf_event_ctx_lock() for comments on the details 8389 * of swizzling perf_event::ctx. 8390 */ 8391 mutex_lock_double(&src_ctx->mutex, &dst_ctx->mutex); 8392 list_for_each_entry_safe(event, tmp, &src_ctx->event_list, 8393 event_entry) { 8394 perf_remove_from_context(event, false); 8395 unaccount_event_cpu(event, src_cpu); 8396 put_ctx(src_ctx); 8397 list_add(&event->migrate_entry, &events); 8398 } 8399 8400 /* 8401 * Wait for the events to quiesce before re-instating them. 8402 */ 8403 synchronize_rcu(); 8404 8405 /* 8406 * Re-instate events in 2 passes. 8407 * 8408 * Skip over group leaders and only install siblings on this first 8409 * pass, siblings will not get enabled without a leader, however a 8410 * leader will enable its siblings, even if those are still on the old 8411 * context. 8412 */ 8413 list_for_each_entry_safe(event, tmp, &events, migrate_entry) { 8414 if (event->group_leader == event) 8415 continue; 8416 8417 list_del(&event->migrate_entry); 8418 if (event->state >= PERF_EVENT_STATE_OFF) 8419 event->state = PERF_EVENT_STATE_INACTIVE; 8420 account_event_cpu(event, dst_cpu); 8421 perf_install_in_context(dst_ctx, event, dst_cpu); 8422 get_ctx(dst_ctx); 8423 } 8424 8425 /* 8426 * Once all the siblings are setup properly, install the group leaders 8427 * to make it go. 8428 */ 8429 list_for_each_entry_safe(event, tmp, &events, migrate_entry) { 8430 list_del(&event->migrate_entry); 8431 if (event->state >= PERF_EVENT_STATE_OFF) 8432 event->state = PERF_EVENT_STATE_INACTIVE; 8433 account_event_cpu(event, dst_cpu); 8434 perf_install_in_context(dst_ctx, event, dst_cpu); 8435 get_ctx(dst_ctx); 8436 } 8437 mutex_unlock(&dst_ctx->mutex); 8438 mutex_unlock(&src_ctx->mutex); 8439 } 8440 EXPORT_SYMBOL_GPL(perf_pmu_migrate_context); 8441 8442 static void sync_child_event(struct perf_event *child_event, 8443 struct task_struct *child) 8444 { 8445 struct perf_event *parent_event = child_event->parent; 8446 u64 child_val; 8447 8448 if (child_event->attr.inherit_stat) 8449 perf_event_read_event(child_event, child); 8450 8451 child_val = perf_event_count(child_event); 8452 8453 /* 8454 * Add back the child's count to the parent's count: 8455 */ 8456 atomic64_add(child_val, &parent_event->child_count); 8457 atomic64_add(child_event->total_time_enabled, 8458 &parent_event->child_total_time_enabled); 8459 atomic64_add(child_event->total_time_running, 8460 &parent_event->child_total_time_running); 8461 8462 /* 8463 * Remove this event from the parent's list 8464 */ 8465 WARN_ON_ONCE(parent_event->ctx->parent_ctx); 8466 mutex_lock(&parent_event->child_mutex); 8467 list_del_init(&child_event->child_list); 8468 mutex_unlock(&parent_event->child_mutex); 8469 8470 /* 8471 * Make sure user/parent get notified, that we just 8472 * lost one event. 8473 */ 8474 perf_event_wakeup(parent_event); 8475 8476 /* 8477 * Release the parent event, if this was the last 8478 * reference to it. 8479 */ 8480 put_event(parent_event); 8481 } 8482 8483 static void 8484 __perf_event_exit_task(struct perf_event *child_event, 8485 struct perf_event_context *child_ctx, 8486 struct task_struct *child) 8487 { 8488 /* 8489 * Do not destroy the 'original' grouping; because of the context 8490 * switch optimization the original events could've ended up in a 8491 * random child task. 8492 * 8493 * If we were to destroy the original group, all group related 8494 * operations would cease to function properly after this random 8495 * child dies. 8496 * 8497 * Do destroy all inherited groups, we don't care about those 8498 * and being thorough is better. 8499 */ 8500 perf_remove_from_context(child_event, !!child_event->parent); 8501 8502 /* 8503 * It can happen that the parent exits first, and has events 8504 * that are still around due to the child reference. These 8505 * events need to be zapped. 8506 */ 8507 if (child_event->parent) { 8508 sync_child_event(child_event, child); 8509 free_event(child_event); 8510 } else { 8511 child_event->state = PERF_EVENT_STATE_EXIT; 8512 perf_event_wakeup(child_event); 8513 } 8514 } 8515 8516 static void perf_event_exit_task_context(struct task_struct *child, int ctxn) 8517 { 8518 struct perf_event *child_event, *next; 8519 struct perf_event_context *child_ctx, *clone_ctx = NULL; 8520 unsigned long flags; 8521 8522 if (likely(!child->perf_event_ctxp[ctxn])) { 8523 perf_event_task(child, NULL, 0); 8524 return; 8525 } 8526 8527 local_irq_save(flags); 8528 /* 8529 * We can't reschedule here because interrupts are disabled, 8530 * and either child is current or it is a task that can't be 8531 * scheduled, so we are now safe from rescheduling changing 8532 * our context. 8533 */ 8534 child_ctx = rcu_dereference_raw(child->perf_event_ctxp[ctxn]); 8535 8536 /* 8537 * Take the context lock here so that if find_get_context is 8538 * reading child->perf_event_ctxp, we wait until it has 8539 * incremented the context's refcount before we do put_ctx below. 8540 */ 8541 raw_spin_lock(&child_ctx->lock); 8542 task_ctx_sched_out(child_ctx); 8543 child->perf_event_ctxp[ctxn] = NULL; 8544 8545 /* 8546 * If this context is a clone; unclone it so it can't get 8547 * swapped to another process while we're removing all 8548 * the events from it. 8549 */ 8550 clone_ctx = unclone_ctx(child_ctx); 8551 update_context_time(child_ctx); 8552 raw_spin_unlock_irqrestore(&child_ctx->lock, flags); 8553 8554 if (clone_ctx) 8555 put_ctx(clone_ctx); 8556 8557 /* 8558 * Report the task dead after unscheduling the events so that we 8559 * won't get any samples after PERF_RECORD_EXIT. We can however still 8560 * get a few PERF_RECORD_READ events. 8561 */ 8562 perf_event_task(child, child_ctx, 0); 8563 8564 /* 8565 * We can recurse on the same lock type through: 8566 * 8567 * __perf_event_exit_task() 8568 * sync_child_event() 8569 * put_event() 8570 * mutex_lock(&ctx->mutex) 8571 * 8572 * But since its the parent context it won't be the same instance. 8573 */ 8574 mutex_lock(&child_ctx->mutex); 8575 8576 list_for_each_entry_safe(child_event, next, &child_ctx->event_list, event_entry) 8577 __perf_event_exit_task(child_event, child_ctx, child); 8578 8579 mutex_unlock(&child_ctx->mutex); 8580 8581 put_ctx(child_ctx); 8582 } 8583 8584 /* 8585 * When a child task exits, feed back event values to parent events. 8586 */ 8587 void perf_event_exit_task(struct task_struct *child) 8588 { 8589 struct perf_event *event, *tmp; 8590 int ctxn; 8591 8592 mutex_lock(&child->perf_event_mutex); 8593 list_for_each_entry_safe(event, tmp, &child->perf_event_list, 8594 owner_entry) { 8595 list_del_init(&event->owner_entry); 8596 8597 /* 8598 * Ensure the list deletion is visible before we clear 8599 * the owner, closes a race against perf_release() where 8600 * we need to serialize on the owner->perf_event_mutex. 8601 */ 8602 smp_wmb(); 8603 event->owner = NULL; 8604 } 8605 mutex_unlock(&child->perf_event_mutex); 8606 8607 for_each_task_context_nr(ctxn) 8608 perf_event_exit_task_context(child, ctxn); 8609 } 8610 8611 static void perf_free_event(struct perf_event *event, 8612 struct perf_event_context *ctx) 8613 { 8614 struct perf_event *parent = event->parent; 8615 8616 if (WARN_ON_ONCE(!parent)) 8617 return; 8618 8619 mutex_lock(&parent->child_mutex); 8620 list_del_init(&event->child_list); 8621 mutex_unlock(&parent->child_mutex); 8622 8623 put_event(parent); 8624 8625 raw_spin_lock_irq(&ctx->lock); 8626 perf_group_detach(event); 8627 list_del_event(event, ctx); 8628 raw_spin_unlock_irq(&ctx->lock); 8629 free_event(event); 8630 } 8631 8632 /* 8633 * Free an unexposed, unused context as created by inheritance by 8634 * perf_event_init_task below, used by fork() in case of fail. 8635 * 8636 * Not all locks are strictly required, but take them anyway to be nice and 8637 * help out with the lockdep assertions. 8638 */ 8639 void perf_event_free_task(struct task_struct *task) 8640 { 8641 struct perf_event_context *ctx; 8642 struct perf_event *event, *tmp; 8643 int ctxn; 8644 8645 for_each_task_context_nr(ctxn) { 8646 ctx = task->perf_event_ctxp[ctxn]; 8647 if (!ctx) 8648 continue; 8649 8650 mutex_lock(&ctx->mutex); 8651 again: 8652 list_for_each_entry_safe(event, tmp, &ctx->pinned_groups, 8653 group_entry) 8654 perf_free_event(event, ctx); 8655 8656 list_for_each_entry_safe(event, tmp, &ctx->flexible_groups, 8657 group_entry) 8658 perf_free_event(event, ctx); 8659 8660 if (!list_empty(&ctx->pinned_groups) || 8661 !list_empty(&ctx->flexible_groups)) 8662 goto again; 8663 8664 mutex_unlock(&ctx->mutex); 8665 8666 put_ctx(ctx); 8667 } 8668 } 8669 8670 void perf_event_delayed_put(struct task_struct *task) 8671 { 8672 int ctxn; 8673 8674 for_each_task_context_nr(ctxn) 8675 WARN_ON_ONCE(task->perf_event_ctxp[ctxn]); 8676 } 8677 8678 /* 8679 * inherit a event from parent task to child task: 8680 */ 8681 static struct perf_event * 8682 inherit_event(struct perf_event *parent_event, 8683 struct task_struct *parent, 8684 struct perf_event_context *parent_ctx, 8685 struct task_struct *child, 8686 struct perf_event *group_leader, 8687 struct perf_event_context *child_ctx) 8688 { 8689 enum perf_event_active_state parent_state = parent_event->state; 8690 struct perf_event *child_event; 8691 unsigned long flags; 8692 8693 /* 8694 * Instead of creating recursive hierarchies of events, 8695 * we link inherited events back to the original parent, 8696 * which has a filp for sure, which we use as the reference 8697 * count: 8698 */ 8699 if (parent_event->parent) 8700 parent_event = parent_event->parent; 8701 8702 child_event = perf_event_alloc(&parent_event->attr, 8703 parent_event->cpu, 8704 child, 8705 group_leader, parent_event, 8706 NULL, NULL, -1); 8707 if (IS_ERR(child_event)) 8708 return child_event; 8709 8710 if (is_orphaned_event(parent_event) || 8711 !atomic_long_inc_not_zero(&parent_event->refcount)) { 8712 free_event(child_event); 8713 return NULL; 8714 } 8715 8716 get_ctx(child_ctx); 8717 8718 /* 8719 * Make the child state follow the state of the parent event, 8720 * not its attr.disabled bit. We hold the parent's mutex, 8721 * so we won't race with perf_event_{en, dis}able_family. 8722 */ 8723 if (parent_state >= PERF_EVENT_STATE_INACTIVE) 8724 child_event->state = PERF_EVENT_STATE_INACTIVE; 8725 else 8726 child_event->state = PERF_EVENT_STATE_OFF; 8727 8728 if (parent_event->attr.freq) { 8729 u64 sample_period = parent_event->hw.sample_period; 8730 struct hw_perf_event *hwc = &child_event->hw; 8731 8732 hwc->sample_period = sample_period; 8733 hwc->last_period = sample_period; 8734 8735 local64_set(&hwc->period_left, sample_period); 8736 } 8737 8738 child_event->ctx = child_ctx; 8739 child_event->overflow_handler = parent_event->overflow_handler; 8740 child_event->overflow_handler_context 8741 = parent_event->overflow_handler_context; 8742 8743 /* 8744 * Precalculate sample_data sizes 8745 */ 8746 perf_event__header_size(child_event); 8747 perf_event__id_header_size(child_event); 8748 8749 /* 8750 * Link it up in the child's context: 8751 */ 8752 raw_spin_lock_irqsave(&child_ctx->lock, flags); 8753 add_event_to_ctx(child_event, child_ctx); 8754 raw_spin_unlock_irqrestore(&child_ctx->lock, flags); 8755 8756 /* 8757 * Link this into the parent event's child list 8758 */ 8759 WARN_ON_ONCE(parent_event->ctx->parent_ctx); 8760 mutex_lock(&parent_event->child_mutex); 8761 list_add_tail(&child_event->child_list, &parent_event->child_list); 8762 mutex_unlock(&parent_event->child_mutex); 8763 8764 return child_event; 8765 } 8766 8767 static int inherit_group(struct perf_event *parent_event, 8768 struct task_struct *parent, 8769 struct perf_event_context *parent_ctx, 8770 struct task_struct *child, 8771 struct perf_event_context *child_ctx) 8772 { 8773 struct perf_event *leader; 8774 struct perf_event *sub; 8775 struct perf_event *child_ctr; 8776 8777 leader = inherit_event(parent_event, parent, parent_ctx, 8778 child, NULL, child_ctx); 8779 if (IS_ERR(leader)) 8780 return PTR_ERR(leader); 8781 list_for_each_entry(sub, &parent_event->sibling_list, group_entry) { 8782 child_ctr = inherit_event(sub, parent, parent_ctx, 8783 child, leader, child_ctx); 8784 if (IS_ERR(child_ctr)) 8785 return PTR_ERR(child_ctr); 8786 } 8787 return 0; 8788 } 8789 8790 static int 8791 inherit_task_group(struct perf_event *event, struct task_struct *parent, 8792 struct perf_event_context *parent_ctx, 8793 struct task_struct *child, int ctxn, 8794 int *inherited_all) 8795 { 8796 int ret; 8797 struct perf_event_context *child_ctx; 8798 8799 if (!event->attr.inherit) { 8800 *inherited_all = 0; 8801 return 0; 8802 } 8803 8804 child_ctx = child->perf_event_ctxp[ctxn]; 8805 if (!child_ctx) { 8806 /* 8807 * This is executed from the parent task context, so 8808 * inherit events that have been marked for cloning. 8809 * First allocate and initialize a context for the 8810 * child. 8811 */ 8812 8813 child_ctx = alloc_perf_context(parent_ctx->pmu, child); 8814 if (!child_ctx) 8815 return -ENOMEM; 8816 8817 child->perf_event_ctxp[ctxn] = child_ctx; 8818 } 8819 8820 ret = inherit_group(event, parent, parent_ctx, 8821 child, child_ctx); 8822 8823 if (ret) 8824 *inherited_all = 0; 8825 8826 return ret; 8827 } 8828 8829 /* 8830 * Initialize the perf_event context in task_struct 8831 */ 8832 static int perf_event_init_context(struct task_struct *child, int ctxn) 8833 { 8834 struct perf_event_context *child_ctx, *parent_ctx; 8835 struct perf_event_context *cloned_ctx; 8836 struct perf_event *event; 8837 struct task_struct *parent = current; 8838 int inherited_all = 1; 8839 unsigned long flags; 8840 int ret = 0; 8841 8842 if (likely(!parent->perf_event_ctxp[ctxn])) 8843 return 0; 8844 8845 /* 8846 * If the parent's context is a clone, pin it so it won't get 8847 * swapped under us. 8848 */ 8849 parent_ctx = perf_pin_task_context(parent, ctxn); 8850 if (!parent_ctx) 8851 return 0; 8852 8853 /* 8854 * No need to check if parent_ctx != NULL here; since we saw 8855 * it non-NULL earlier, the only reason for it to become NULL 8856 * is if we exit, and since we're currently in the middle of 8857 * a fork we can't be exiting at the same time. 8858 */ 8859 8860 /* 8861 * Lock the parent list. No need to lock the child - not PID 8862 * hashed yet and not running, so nobody can access it. 8863 */ 8864 mutex_lock(&parent_ctx->mutex); 8865 8866 /* 8867 * We dont have to disable NMIs - we are only looking at 8868 * the list, not manipulating it: 8869 */ 8870 list_for_each_entry(event, &parent_ctx->pinned_groups, group_entry) { 8871 ret = inherit_task_group(event, parent, parent_ctx, 8872 child, ctxn, &inherited_all); 8873 if (ret) 8874 break; 8875 } 8876 8877 /* 8878 * We can't hold ctx->lock when iterating the ->flexible_group list due 8879 * to allocations, but we need to prevent rotation because 8880 * rotate_ctx() will change the list from interrupt context. 8881 */ 8882 raw_spin_lock_irqsave(&parent_ctx->lock, flags); 8883 parent_ctx->rotate_disable = 1; 8884 raw_spin_unlock_irqrestore(&parent_ctx->lock, flags); 8885 8886 list_for_each_entry(event, &parent_ctx->flexible_groups, group_entry) { 8887 ret = inherit_task_group(event, parent, parent_ctx, 8888 child, ctxn, &inherited_all); 8889 if (ret) 8890 break; 8891 } 8892 8893 raw_spin_lock_irqsave(&parent_ctx->lock, flags); 8894 parent_ctx->rotate_disable = 0; 8895 8896 child_ctx = child->perf_event_ctxp[ctxn]; 8897 8898 if (child_ctx && inherited_all) { 8899 /* 8900 * Mark the child context as a clone of the parent 8901 * context, or of whatever the parent is a clone of. 8902 * 8903 * Note that if the parent is a clone, the holding of 8904 * parent_ctx->lock avoids it from being uncloned. 8905 */ 8906 cloned_ctx = parent_ctx->parent_ctx; 8907 if (cloned_ctx) { 8908 child_ctx->parent_ctx = cloned_ctx; 8909 child_ctx->parent_gen = parent_ctx->parent_gen; 8910 } else { 8911 child_ctx->parent_ctx = parent_ctx; 8912 child_ctx->parent_gen = parent_ctx->generation; 8913 } 8914 get_ctx(child_ctx->parent_ctx); 8915 } 8916 8917 raw_spin_unlock_irqrestore(&parent_ctx->lock, flags); 8918 mutex_unlock(&parent_ctx->mutex); 8919 8920 perf_unpin_context(parent_ctx); 8921 put_ctx(parent_ctx); 8922 8923 return ret; 8924 } 8925 8926 /* 8927 * Initialize the perf_event context in task_struct 8928 */ 8929 int perf_event_init_task(struct task_struct *child) 8930 { 8931 int ctxn, ret; 8932 8933 memset(child->perf_event_ctxp, 0, sizeof(child->perf_event_ctxp)); 8934 mutex_init(&child->perf_event_mutex); 8935 INIT_LIST_HEAD(&child->perf_event_list); 8936 8937 for_each_task_context_nr(ctxn) { 8938 ret = perf_event_init_context(child, ctxn); 8939 if (ret) { 8940 perf_event_free_task(child); 8941 return ret; 8942 } 8943 } 8944 8945 return 0; 8946 } 8947 8948 static void __init perf_event_init_all_cpus(void) 8949 { 8950 struct swevent_htable *swhash; 8951 int cpu; 8952 8953 for_each_possible_cpu(cpu) { 8954 swhash = &per_cpu(swevent_htable, cpu); 8955 mutex_init(&swhash->hlist_mutex); 8956 INIT_LIST_HEAD(&per_cpu(active_ctx_list, cpu)); 8957 } 8958 } 8959 8960 static void perf_event_init_cpu(int cpu) 8961 { 8962 struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu); 8963 8964 mutex_lock(&swhash->hlist_mutex); 8965 swhash->online = true; 8966 if (swhash->hlist_refcount > 0) { 8967 struct swevent_hlist *hlist; 8968 8969 hlist = kzalloc_node(sizeof(*hlist), GFP_KERNEL, cpu_to_node(cpu)); 8970 WARN_ON(!hlist); 8971 rcu_assign_pointer(swhash->swevent_hlist, hlist); 8972 } 8973 mutex_unlock(&swhash->hlist_mutex); 8974 } 8975 8976 #if defined CONFIG_HOTPLUG_CPU || defined CONFIG_KEXEC 8977 static void __perf_event_exit_context(void *__info) 8978 { 8979 struct remove_event re = { .detach_group = true }; 8980 struct perf_event_context *ctx = __info; 8981 8982 rcu_read_lock(); 8983 list_for_each_entry_rcu(re.event, &ctx->event_list, event_entry) 8984 __perf_remove_from_context(&re); 8985 rcu_read_unlock(); 8986 } 8987 8988 static void perf_event_exit_cpu_context(int cpu) 8989 { 8990 struct perf_event_context *ctx; 8991 struct pmu *pmu; 8992 int idx; 8993 8994 idx = srcu_read_lock(&pmus_srcu); 8995 list_for_each_entry_rcu(pmu, &pmus, entry) { 8996 ctx = &per_cpu_ptr(pmu->pmu_cpu_context, cpu)->ctx; 8997 8998 mutex_lock(&ctx->mutex); 8999 smp_call_function_single(cpu, __perf_event_exit_context, ctx, 1); 9000 mutex_unlock(&ctx->mutex); 9001 } 9002 srcu_read_unlock(&pmus_srcu, idx); 9003 } 9004 9005 static void perf_event_exit_cpu(int cpu) 9006 { 9007 struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu); 9008 9009 perf_event_exit_cpu_context(cpu); 9010 9011 mutex_lock(&swhash->hlist_mutex); 9012 swhash->online = false; 9013 swevent_hlist_release(swhash); 9014 mutex_unlock(&swhash->hlist_mutex); 9015 } 9016 #else 9017 static inline void perf_event_exit_cpu(int cpu) { } 9018 #endif 9019 9020 static int 9021 perf_reboot(struct notifier_block *notifier, unsigned long val, void *v) 9022 { 9023 int cpu; 9024 9025 for_each_online_cpu(cpu) 9026 perf_event_exit_cpu(cpu); 9027 9028 return NOTIFY_OK; 9029 } 9030 9031 /* 9032 * Run the perf reboot notifier at the very last possible moment so that 9033 * the generic watchdog code runs as long as possible. 9034 */ 9035 static struct notifier_block perf_reboot_notifier = { 9036 .notifier_call = perf_reboot, 9037 .priority = INT_MIN, 9038 }; 9039 9040 static int 9041 perf_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu) 9042 { 9043 unsigned int cpu = (long)hcpu; 9044 9045 switch (action & ~CPU_TASKS_FROZEN) { 9046 9047 case CPU_UP_PREPARE: 9048 case CPU_DOWN_FAILED: 9049 perf_event_init_cpu(cpu); 9050 break; 9051 9052 case CPU_UP_CANCELED: 9053 case CPU_DOWN_PREPARE: 9054 perf_event_exit_cpu(cpu); 9055 break; 9056 default: 9057 break; 9058 } 9059 9060 return NOTIFY_OK; 9061 } 9062 9063 void __init perf_event_init(void) 9064 { 9065 int ret; 9066 9067 idr_init(&pmu_idr); 9068 9069 perf_event_init_all_cpus(); 9070 init_srcu_struct(&pmus_srcu); 9071 perf_pmu_register(&perf_swevent, "software", PERF_TYPE_SOFTWARE); 9072 perf_pmu_register(&perf_cpu_clock, NULL, -1); 9073 perf_pmu_register(&perf_task_clock, NULL, -1); 9074 perf_tp_register(); 9075 perf_cpu_notifier(perf_cpu_notify); 9076 register_reboot_notifier(&perf_reboot_notifier); 9077 9078 ret = init_hw_breakpoint(); 9079 WARN(ret, "hw_breakpoint initialization failed with: %d", ret); 9080 9081 /* do not patch jump label more than once per second */ 9082 jump_label_rate_limit(&perf_sched_events, HZ); 9083 9084 /* 9085 * Build time assertion that we keep the data_head at the intended 9086 * location. IOW, validation we got the __reserved[] size right. 9087 */ 9088 BUILD_BUG_ON((offsetof(struct perf_event_mmap_page, data_head)) 9089 != 1024); 9090 } 9091 9092 ssize_t perf_event_sysfs_show(struct device *dev, struct device_attribute *attr, 9093 char *page) 9094 { 9095 struct perf_pmu_events_attr *pmu_attr = 9096 container_of(attr, struct perf_pmu_events_attr, attr); 9097 9098 if (pmu_attr->event_str) 9099 return sprintf(page, "%s\n", pmu_attr->event_str); 9100 9101 return 0; 9102 } 9103 9104 static int __init perf_event_sysfs_init(void) 9105 { 9106 struct pmu *pmu; 9107 int ret; 9108 9109 mutex_lock(&pmus_lock); 9110 9111 ret = bus_register(&pmu_bus); 9112 if (ret) 9113 goto unlock; 9114 9115 list_for_each_entry(pmu, &pmus, entry) { 9116 if (!pmu->name || pmu->type < 0) 9117 continue; 9118 9119 ret = pmu_dev_alloc(pmu); 9120 WARN(ret, "Failed to register pmu: %s, reason %d\n", pmu->name, ret); 9121 } 9122 pmu_bus_running = 1; 9123 ret = 0; 9124 9125 unlock: 9126 mutex_unlock(&pmus_lock); 9127 9128 return ret; 9129 } 9130 device_initcall(perf_event_sysfs_init); 9131 9132 #ifdef CONFIG_CGROUP_PERF 9133 static struct cgroup_subsys_state * 9134 perf_cgroup_css_alloc(struct cgroup_subsys_state *parent_css) 9135 { 9136 struct perf_cgroup *jc; 9137 9138 jc = kzalloc(sizeof(*jc), GFP_KERNEL); 9139 if (!jc) 9140 return ERR_PTR(-ENOMEM); 9141 9142 jc->info = alloc_percpu(struct perf_cgroup_info); 9143 if (!jc->info) { 9144 kfree(jc); 9145 return ERR_PTR(-ENOMEM); 9146 } 9147 9148 return &jc->css; 9149 } 9150 9151 static void perf_cgroup_css_free(struct cgroup_subsys_state *css) 9152 { 9153 struct perf_cgroup *jc = container_of(css, struct perf_cgroup, css); 9154 9155 free_percpu(jc->info); 9156 kfree(jc); 9157 } 9158 9159 static int __perf_cgroup_move(void *info) 9160 { 9161 struct task_struct *task = info; 9162 perf_cgroup_switch(task, PERF_CGROUP_SWOUT | PERF_CGROUP_SWIN); 9163 return 0; 9164 } 9165 9166 static void perf_cgroup_attach(struct cgroup_subsys_state *css, 9167 struct cgroup_taskset *tset) 9168 { 9169 struct task_struct *task; 9170 9171 cgroup_taskset_for_each(task, tset) 9172 task_function_call(task, __perf_cgroup_move, task); 9173 } 9174 9175 static void perf_cgroup_exit(struct cgroup_subsys_state *css, 9176 struct cgroup_subsys_state *old_css, 9177 struct task_struct *task) 9178 { 9179 /* 9180 * cgroup_exit() is called in the copy_process() failure path. 9181 * Ignore this case since the task hasn't ran yet, this avoids 9182 * trying to poke a half freed task state from generic code. 9183 */ 9184 if (!(task->flags & PF_EXITING)) 9185 return; 9186 9187 task_function_call(task, __perf_cgroup_move, task); 9188 } 9189 9190 struct cgroup_subsys perf_event_cgrp_subsys = { 9191 .css_alloc = perf_cgroup_css_alloc, 9192 .css_free = perf_cgroup_css_free, 9193 .exit = perf_cgroup_exit, 9194 .attach = perf_cgroup_attach, 9195 }; 9196 #endif /* CONFIG_CGROUP_PERF */ 9197