1 /* 2 * Performance events core code: 3 * 4 * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de> 5 * Copyright (C) 2008-2011 Red Hat, Inc., Ingo Molnar 6 * Copyright (C) 2008-2011 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com> 7 * Copyright © 2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com> 8 * 9 * For licensing details see kernel-base/COPYING 10 */ 11 12 #include <linux/fs.h> 13 #include <linux/mm.h> 14 #include <linux/cpu.h> 15 #include <linux/smp.h> 16 #include <linux/idr.h> 17 #include <linux/file.h> 18 #include <linux/poll.h> 19 #include <linux/slab.h> 20 #include <linux/hash.h> 21 #include <linux/tick.h> 22 #include <linux/sysfs.h> 23 #include <linux/dcache.h> 24 #include <linux/percpu.h> 25 #include <linux/ptrace.h> 26 #include <linux/reboot.h> 27 #include <linux/vmstat.h> 28 #include <linux/device.h> 29 #include <linux/export.h> 30 #include <linux/vmalloc.h> 31 #include <linux/hardirq.h> 32 #include <linux/rculist.h> 33 #include <linux/uaccess.h> 34 #include <linux/syscalls.h> 35 #include <linux/anon_inodes.h> 36 #include <linux/kernel_stat.h> 37 #include <linux/cgroup.h> 38 #include <linux/perf_event.h> 39 #include <linux/trace_events.h> 40 #include <linux/hw_breakpoint.h> 41 #include <linux/mm_types.h> 42 #include <linux/module.h> 43 #include <linux/mman.h> 44 #include <linux/compat.h> 45 #include <linux/bpf.h> 46 #include <linux/filter.h> 47 48 #include "internal.h" 49 50 #include <asm/irq_regs.h> 51 52 static struct workqueue_struct *perf_wq; 53 54 typedef int (*remote_function_f)(void *); 55 56 struct remote_function_call { 57 struct task_struct *p; 58 remote_function_f func; 59 void *info; 60 int ret; 61 }; 62 63 static void remote_function(void *data) 64 { 65 struct remote_function_call *tfc = data; 66 struct task_struct *p = tfc->p; 67 68 if (p) { 69 tfc->ret = -EAGAIN; 70 if (task_cpu(p) != smp_processor_id() || !task_curr(p)) 71 return; 72 } 73 74 tfc->ret = tfc->func(tfc->info); 75 } 76 77 /** 78 * task_function_call - call a function on the cpu on which a task runs 79 * @p: the task to evaluate 80 * @func: the function to be called 81 * @info: the function call argument 82 * 83 * Calls the function @func when the task is currently running. This might 84 * be on the current CPU, which just calls the function directly 85 * 86 * returns: @func return value, or 87 * -ESRCH - when the process isn't running 88 * -EAGAIN - when the process moved away 89 */ 90 static int 91 task_function_call(struct task_struct *p, remote_function_f func, void *info) 92 { 93 struct remote_function_call data = { 94 .p = p, 95 .func = func, 96 .info = info, 97 .ret = -ESRCH, /* No such (running) process */ 98 }; 99 100 if (task_curr(p)) 101 smp_call_function_single(task_cpu(p), remote_function, &data, 1); 102 103 return data.ret; 104 } 105 106 /** 107 * cpu_function_call - call a function on the cpu 108 * @func: the function to be called 109 * @info: the function call argument 110 * 111 * Calls the function @func on the remote cpu. 112 * 113 * returns: @func return value or -ENXIO when the cpu is offline 114 */ 115 static int cpu_function_call(int cpu, remote_function_f func, void *info) 116 { 117 struct remote_function_call data = { 118 .p = NULL, 119 .func = func, 120 .info = info, 121 .ret = -ENXIO, /* No such CPU */ 122 }; 123 124 smp_call_function_single(cpu, remote_function, &data, 1); 125 126 return data.ret; 127 } 128 129 #define EVENT_OWNER_KERNEL ((void *) -1) 130 131 static bool is_kernel_event(struct perf_event *event) 132 { 133 return event->owner == EVENT_OWNER_KERNEL; 134 } 135 136 #define PERF_FLAG_ALL (PERF_FLAG_FD_NO_GROUP |\ 137 PERF_FLAG_FD_OUTPUT |\ 138 PERF_FLAG_PID_CGROUP |\ 139 PERF_FLAG_FD_CLOEXEC) 140 141 /* 142 * branch priv levels that need permission checks 143 */ 144 #define PERF_SAMPLE_BRANCH_PERM_PLM \ 145 (PERF_SAMPLE_BRANCH_KERNEL |\ 146 PERF_SAMPLE_BRANCH_HV) 147 148 enum event_type_t { 149 EVENT_FLEXIBLE = 0x1, 150 EVENT_PINNED = 0x2, 151 EVENT_ALL = EVENT_FLEXIBLE | EVENT_PINNED, 152 }; 153 154 /* 155 * perf_sched_events : >0 events exist 156 * perf_cgroup_events: >0 per-cpu cgroup events exist on this cpu 157 */ 158 struct static_key_deferred perf_sched_events __read_mostly; 159 static DEFINE_PER_CPU(atomic_t, perf_cgroup_events); 160 static DEFINE_PER_CPU(int, perf_sched_cb_usages); 161 162 static atomic_t nr_mmap_events __read_mostly; 163 static atomic_t nr_comm_events __read_mostly; 164 static atomic_t nr_task_events __read_mostly; 165 static atomic_t nr_freq_events __read_mostly; 166 static atomic_t nr_switch_events __read_mostly; 167 168 static LIST_HEAD(pmus); 169 static DEFINE_MUTEX(pmus_lock); 170 static struct srcu_struct pmus_srcu; 171 172 /* 173 * perf event paranoia level: 174 * -1 - not paranoid at all 175 * 0 - disallow raw tracepoint access for unpriv 176 * 1 - disallow cpu events for unpriv 177 * 2 - disallow kernel profiling for unpriv 178 */ 179 int sysctl_perf_event_paranoid __read_mostly = 1; 180 181 /* Minimum for 512 kiB + 1 user control page */ 182 int sysctl_perf_event_mlock __read_mostly = 512 + (PAGE_SIZE / 1024); /* 'free' kiB per user */ 183 184 /* 185 * max perf event sample rate 186 */ 187 #define DEFAULT_MAX_SAMPLE_RATE 100000 188 #define DEFAULT_SAMPLE_PERIOD_NS (NSEC_PER_SEC / DEFAULT_MAX_SAMPLE_RATE) 189 #define DEFAULT_CPU_TIME_MAX_PERCENT 25 190 191 int sysctl_perf_event_sample_rate __read_mostly = DEFAULT_MAX_SAMPLE_RATE; 192 193 static int max_samples_per_tick __read_mostly = DIV_ROUND_UP(DEFAULT_MAX_SAMPLE_RATE, HZ); 194 static int perf_sample_period_ns __read_mostly = DEFAULT_SAMPLE_PERIOD_NS; 195 196 static int perf_sample_allowed_ns __read_mostly = 197 DEFAULT_SAMPLE_PERIOD_NS * DEFAULT_CPU_TIME_MAX_PERCENT / 100; 198 199 static void update_perf_cpu_limits(void) 200 { 201 u64 tmp = perf_sample_period_ns; 202 203 tmp *= sysctl_perf_cpu_time_max_percent; 204 do_div(tmp, 100); 205 ACCESS_ONCE(perf_sample_allowed_ns) = tmp; 206 } 207 208 static int perf_rotate_context(struct perf_cpu_context *cpuctx); 209 210 int perf_proc_update_handler(struct ctl_table *table, int write, 211 void __user *buffer, size_t *lenp, 212 loff_t *ppos) 213 { 214 int ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos); 215 216 if (ret || !write) 217 return ret; 218 219 max_samples_per_tick = DIV_ROUND_UP(sysctl_perf_event_sample_rate, HZ); 220 perf_sample_period_ns = NSEC_PER_SEC / sysctl_perf_event_sample_rate; 221 update_perf_cpu_limits(); 222 223 return 0; 224 } 225 226 int sysctl_perf_cpu_time_max_percent __read_mostly = DEFAULT_CPU_TIME_MAX_PERCENT; 227 228 int perf_cpu_time_max_percent_handler(struct ctl_table *table, int write, 229 void __user *buffer, size_t *lenp, 230 loff_t *ppos) 231 { 232 int ret = proc_dointvec(table, write, buffer, lenp, ppos); 233 234 if (ret || !write) 235 return ret; 236 237 update_perf_cpu_limits(); 238 239 return 0; 240 } 241 242 /* 243 * perf samples are done in some very critical code paths (NMIs). 244 * If they take too much CPU time, the system can lock up and not 245 * get any real work done. This will drop the sample rate when 246 * we detect that events are taking too long. 247 */ 248 #define NR_ACCUMULATED_SAMPLES 128 249 static DEFINE_PER_CPU(u64, running_sample_length); 250 251 static void perf_duration_warn(struct irq_work *w) 252 { 253 u64 allowed_ns = ACCESS_ONCE(perf_sample_allowed_ns); 254 u64 avg_local_sample_len; 255 u64 local_samples_len; 256 257 local_samples_len = __this_cpu_read(running_sample_length); 258 avg_local_sample_len = local_samples_len/NR_ACCUMULATED_SAMPLES; 259 260 printk_ratelimited(KERN_WARNING 261 "perf interrupt took too long (%lld > %lld), lowering " 262 "kernel.perf_event_max_sample_rate to %d\n", 263 avg_local_sample_len, allowed_ns >> 1, 264 sysctl_perf_event_sample_rate); 265 } 266 267 static DEFINE_IRQ_WORK(perf_duration_work, perf_duration_warn); 268 269 void perf_sample_event_took(u64 sample_len_ns) 270 { 271 u64 allowed_ns = ACCESS_ONCE(perf_sample_allowed_ns); 272 u64 avg_local_sample_len; 273 u64 local_samples_len; 274 275 if (allowed_ns == 0) 276 return; 277 278 /* decay the counter by 1 average sample */ 279 local_samples_len = __this_cpu_read(running_sample_length); 280 local_samples_len -= local_samples_len/NR_ACCUMULATED_SAMPLES; 281 local_samples_len += sample_len_ns; 282 __this_cpu_write(running_sample_length, local_samples_len); 283 284 /* 285 * note: this will be biased artifically low until we have 286 * seen NR_ACCUMULATED_SAMPLES. Doing it this way keeps us 287 * from having to maintain a count. 288 */ 289 avg_local_sample_len = local_samples_len/NR_ACCUMULATED_SAMPLES; 290 291 if (avg_local_sample_len <= allowed_ns) 292 return; 293 294 if (max_samples_per_tick <= 1) 295 return; 296 297 max_samples_per_tick = DIV_ROUND_UP(max_samples_per_tick, 2); 298 sysctl_perf_event_sample_rate = max_samples_per_tick * HZ; 299 perf_sample_period_ns = NSEC_PER_SEC / sysctl_perf_event_sample_rate; 300 301 update_perf_cpu_limits(); 302 303 if (!irq_work_queue(&perf_duration_work)) { 304 early_printk("perf interrupt took too long (%lld > %lld), lowering " 305 "kernel.perf_event_max_sample_rate to %d\n", 306 avg_local_sample_len, allowed_ns >> 1, 307 sysctl_perf_event_sample_rate); 308 } 309 } 310 311 static atomic64_t perf_event_id; 312 313 static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx, 314 enum event_type_t event_type); 315 316 static void cpu_ctx_sched_in(struct perf_cpu_context *cpuctx, 317 enum event_type_t event_type, 318 struct task_struct *task); 319 320 static void update_context_time(struct perf_event_context *ctx); 321 static u64 perf_event_time(struct perf_event *event); 322 323 void __weak perf_event_print_debug(void) { } 324 325 extern __weak const char *perf_pmu_name(void) 326 { 327 return "pmu"; 328 } 329 330 static inline u64 perf_clock(void) 331 { 332 return local_clock(); 333 } 334 335 static inline u64 perf_event_clock(struct perf_event *event) 336 { 337 return event->clock(); 338 } 339 340 static inline struct perf_cpu_context * 341 __get_cpu_context(struct perf_event_context *ctx) 342 { 343 return this_cpu_ptr(ctx->pmu->pmu_cpu_context); 344 } 345 346 static void perf_ctx_lock(struct perf_cpu_context *cpuctx, 347 struct perf_event_context *ctx) 348 { 349 raw_spin_lock(&cpuctx->ctx.lock); 350 if (ctx) 351 raw_spin_lock(&ctx->lock); 352 } 353 354 static void perf_ctx_unlock(struct perf_cpu_context *cpuctx, 355 struct perf_event_context *ctx) 356 { 357 if (ctx) 358 raw_spin_unlock(&ctx->lock); 359 raw_spin_unlock(&cpuctx->ctx.lock); 360 } 361 362 #ifdef CONFIG_CGROUP_PERF 363 364 static inline bool 365 perf_cgroup_match(struct perf_event *event) 366 { 367 struct perf_event_context *ctx = event->ctx; 368 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx); 369 370 /* @event doesn't care about cgroup */ 371 if (!event->cgrp) 372 return true; 373 374 /* wants specific cgroup scope but @cpuctx isn't associated with any */ 375 if (!cpuctx->cgrp) 376 return false; 377 378 /* 379 * Cgroup scoping is recursive. An event enabled for a cgroup is 380 * also enabled for all its descendant cgroups. If @cpuctx's 381 * cgroup is a descendant of @event's (the test covers identity 382 * case), it's a match. 383 */ 384 return cgroup_is_descendant(cpuctx->cgrp->css.cgroup, 385 event->cgrp->css.cgroup); 386 } 387 388 static inline void perf_detach_cgroup(struct perf_event *event) 389 { 390 css_put(&event->cgrp->css); 391 event->cgrp = NULL; 392 } 393 394 static inline int is_cgroup_event(struct perf_event *event) 395 { 396 return event->cgrp != NULL; 397 } 398 399 static inline u64 perf_cgroup_event_time(struct perf_event *event) 400 { 401 struct perf_cgroup_info *t; 402 403 t = per_cpu_ptr(event->cgrp->info, event->cpu); 404 return t->time; 405 } 406 407 static inline void __update_cgrp_time(struct perf_cgroup *cgrp) 408 { 409 struct perf_cgroup_info *info; 410 u64 now; 411 412 now = perf_clock(); 413 414 info = this_cpu_ptr(cgrp->info); 415 416 info->time += now - info->timestamp; 417 info->timestamp = now; 418 } 419 420 static inline void update_cgrp_time_from_cpuctx(struct perf_cpu_context *cpuctx) 421 { 422 struct perf_cgroup *cgrp_out = cpuctx->cgrp; 423 if (cgrp_out) 424 __update_cgrp_time(cgrp_out); 425 } 426 427 static inline void update_cgrp_time_from_event(struct perf_event *event) 428 { 429 struct perf_cgroup *cgrp; 430 431 /* 432 * ensure we access cgroup data only when needed and 433 * when we know the cgroup is pinned (css_get) 434 */ 435 if (!is_cgroup_event(event)) 436 return; 437 438 cgrp = perf_cgroup_from_task(current); 439 /* 440 * Do not update time when cgroup is not active 441 */ 442 if (cgrp == event->cgrp) 443 __update_cgrp_time(event->cgrp); 444 } 445 446 static inline void 447 perf_cgroup_set_timestamp(struct task_struct *task, 448 struct perf_event_context *ctx) 449 { 450 struct perf_cgroup *cgrp; 451 struct perf_cgroup_info *info; 452 453 /* 454 * ctx->lock held by caller 455 * ensure we do not access cgroup data 456 * unless we have the cgroup pinned (css_get) 457 */ 458 if (!task || !ctx->nr_cgroups) 459 return; 460 461 cgrp = perf_cgroup_from_task(task); 462 info = this_cpu_ptr(cgrp->info); 463 info->timestamp = ctx->timestamp; 464 } 465 466 #define PERF_CGROUP_SWOUT 0x1 /* cgroup switch out every event */ 467 #define PERF_CGROUP_SWIN 0x2 /* cgroup switch in events based on task */ 468 469 /* 470 * reschedule events based on the cgroup constraint of task. 471 * 472 * mode SWOUT : schedule out everything 473 * mode SWIN : schedule in based on cgroup for next 474 */ 475 static void perf_cgroup_switch(struct task_struct *task, int mode) 476 { 477 struct perf_cpu_context *cpuctx; 478 struct pmu *pmu; 479 unsigned long flags; 480 481 /* 482 * disable interrupts to avoid geting nr_cgroup 483 * changes via __perf_event_disable(). Also 484 * avoids preemption. 485 */ 486 local_irq_save(flags); 487 488 /* 489 * we reschedule only in the presence of cgroup 490 * constrained events. 491 */ 492 rcu_read_lock(); 493 494 list_for_each_entry_rcu(pmu, &pmus, entry) { 495 cpuctx = this_cpu_ptr(pmu->pmu_cpu_context); 496 if (cpuctx->unique_pmu != pmu) 497 continue; /* ensure we process each cpuctx once */ 498 499 /* 500 * perf_cgroup_events says at least one 501 * context on this CPU has cgroup events. 502 * 503 * ctx->nr_cgroups reports the number of cgroup 504 * events for a context. 505 */ 506 if (cpuctx->ctx.nr_cgroups > 0) { 507 perf_ctx_lock(cpuctx, cpuctx->task_ctx); 508 perf_pmu_disable(cpuctx->ctx.pmu); 509 510 if (mode & PERF_CGROUP_SWOUT) { 511 cpu_ctx_sched_out(cpuctx, EVENT_ALL); 512 /* 513 * must not be done before ctxswout due 514 * to event_filter_match() in event_sched_out() 515 */ 516 cpuctx->cgrp = NULL; 517 } 518 519 if (mode & PERF_CGROUP_SWIN) { 520 WARN_ON_ONCE(cpuctx->cgrp); 521 /* 522 * set cgrp before ctxsw in to allow 523 * event_filter_match() to not have to pass 524 * task around 525 */ 526 cpuctx->cgrp = perf_cgroup_from_task(task); 527 cpu_ctx_sched_in(cpuctx, EVENT_ALL, task); 528 } 529 perf_pmu_enable(cpuctx->ctx.pmu); 530 perf_ctx_unlock(cpuctx, cpuctx->task_ctx); 531 } 532 } 533 534 rcu_read_unlock(); 535 536 local_irq_restore(flags); 537 } 538 539 static inline void perf_cgroup_sched_out(struct task_struct *task, 540 struct task_struct *next) 541 { 542 struct perf_cgroup *cgrp1; 543 struct perf_cgroup *cgrp2 = NULL; 544 545 /* 546 * we come here when we know perf_cgroup_events > 0 547 */ 548 cgrp1 = perf_cgroup_from_task(task); 549 550 /* 551 * next is NULL when called from perf_event_enable_on_exec() 552 * that will systematically cause a cgroup_switch() 553 */ 554 if (next) 555 cgrp2 = perf_cgroup_from_task(next); 556 557 /* 558 * only schedule out current cgroup events if we know 559 * that we are switching to a different cgroup. Otherwise, 560 * do no touch the cgroup events. 561 */ 562 if (cgrp1 != cgrp2) 563 perf_cgroup_switch(task, PERF_CGROUP_SWOUT); 564 } 565 566 static inline void perf_cgroup_sched_in(struct task_struct *prev, 567 struct task_struct *task) 568 { 569 struct perf_cgroup *cgrp1; 570 struct perf_cgroup *cgrp2 = NULL; 571 572 /* 573 * we come here when we know perf_cgroup_events > 0 574 */ 575 cgrp1 = perf_cgroup_from_task(task); 576 577 /* prev can never be NULL */ 578 cgrp2 = perf_cgroup_from_task(prev); 579 580 /* 581 * only need to schedule in cgroup events if we are changing 582 * cgroup during ctxsw. Cgroup events were not scheduled 583 * out of ctxsw out if that was not the case. 584 */ 585 if (cgrp1 != cgrp2) 586 perf_cgroup_switch(task, PERF_CGROUP_SWIN); 587 } 588 589 static inline int perf_cgroup_connect(int fd, struct perf_event *event, 590 struct perf_event_attr *attr, 591 struct perf_event *group_leader) 592 { 593 struct perf_cgroup *cgrp; 594 struct cgroup_subsys_state *css; 595 struct fd f = fdget(fd); 596 int ret = 0; 597 598 if (!f.file) 599 return -EBADF; 600 601 css = css_tryget_online_from_dir(f.file->f_path.dentry, 602 &perf_event_cgrp_subsys); 603 if (IS_ERR(css)) { 604 ret = PTR_ERR(css); 605 goto out; 606 } 607 608 cgrp = container_of(css, struct perf_cgroup, css); 609 event->cgrp = cgrp; 610 611 /* 612 * all events in a group must monitor 613 * the same cgroup because a task belongs 614 * to only one perf cgroup at a time 615 */ 616 if (group_leader && group_leader->cgrp != cgrp) { 617 perf_detach_cgroup(event); 618 ret = -EINVAL; 619 } 620 out: 621 fdput(f); 622 return ret; 623 } 624 625 static inline void 626 perf_cgroup_set_shadow_time(struct perf_event *event, u64 now) 627 { 628 struct perf_cgroup_info *t; 629 t = per_cpu_ptr(event->cgrp->info, event->cpu); 630 event->shadow_ctx_time = now - t->timestamp; 631 } 632 633 static inline void 634 perf_cgroup_defer_enabled(struct perf_event *event) 635 { 636 /* 637 * when the current task's perf cgroup does not match 638 * the event's, we need to remember to call the 639 * perf_mark_enable() function the first time a task with 640 * a matching perf cgroup is scheduled in. 641 */ 642 if (is_cgroup_event(event) && !perf_cgroup_match(event)) 643 event->cgrp_defer_enabled = 1; 644 } 645 646 static inline void 647 perf_cgroup_mark_enabled(struct perf_event *event, 648 struct perf_event_context *ctx) 649 { 650 struct perf_event *sub; 651 u64 tstamp = perf_event_time(event); 652 653 if (!event->cgrp_defer_enabled) 654 return; 655 656 event->cgrp_defer_enabled = 0; 657 658 event->tstamp_enabled = tstamp - event->total_time_enabled; 659 list_for_each_entry(sub, &event->sibling_list, group_entry) { 660 if (sub->state >= PERF_EVENT_STATE_INACTIVE) { 661 sub->tstamp_enabled = tstamp - sub->total_time_enabled; 662 sub->cgrp_defer_enabled = 0; 663 } 664 } 665 } 666 #else /* !CONFIG_CGROUP_PERF */ 667 668 static inline bool 669 perf_cgroup_match(struct perf_event *event) 670 { 671 return true; 672 } 673 674 static inline void perf_detach_cgroup(struct perf_event *event) 675 {} 676 677 static inline int is_cgroup_event(struct perf_event *event) 678 { 679 return 0; 680 } 681 682 static inline u64 perf_cgroup_event_cgrp_time(struct perf_event *event) 683 { 684 return 0; 685 } 686 687 static inline void update_cgrp_time_from_event(struct perf_event *event) 688 { 689 } 690 691 static inline void update_cgrp_time_from_cpuctx(struct perf_cpu_context *cpuctx) 692 { 693 } 694 695 static inline void perf_cgroup_sched_out(struct task_struct *task, 696 struct task_struct *next) 697 { 698 } 699 700 static inline void perf_cgroup_sched_in(struct task_struct *prev, 701 struct task_struct *task) 702 { 703 } 704 705 static inline int perf_cgroup_connect(pid_t pid, struct perf_event *event, 706 struct perf_event_attr *attr, 707 struct perf_event *group_leader) 708 { 709 return -EINVAL; 710 } 711 712 static inline void 713 perf_cgroup_set_timestamp(struct task_struct *task, 714 struct perf_event_context *ctx) 715 { 716 } 717 718 void 719 perf_cgroup_switch(struct task_struct *task, struct task_struct *next) 720 { 721 } 722 723 static inline void 724 perf_cgroup_set_shadow_time(struct perf_event *event, u64 now) 725 { 726 } 727 728 static inline u64 perf_cgroup_event_time(struct perf_event *event) 729 { 730 return 0; 731 } 732 733 static inline void 734 perf_cgroup_defer_enabled(struct perf_event *event) 735 { 736 } 737 738 static inline void 739 perf_cgroup_mark_enabled(struct perf_event *event, 740 struct perf_event_context *ctx) 741 { 742 } 743 #endif 744 745 /* 746 * set default to be dependent on timer tick just 747 * like original code 748 */ 749 #define PERF_CPU_HRTIMER (1000 / HZ) 750 /* 751 * function must be called with interrupts disbled 752 */ 753 static enum hrtimer_restart perf_mux_hrtimer_handler(struct hrtimer *hr) 754 { 755 struct perf_cpu_context *cpuctx; 756 int rotations = 0; 757 758 WARN_ON(!irqs_disabled()); 759 760 cpuctx = container_of(hr, struct perf_cpu_context, hrtimer); 761 rotations = perf_rotate_context(cpuctx); 762 763 raw_spin_lock(&cpuctx->hrtimer_lock); 764 if (rotations) 765 hrtimer_forward_now(hr, cpuctx->hrtimer_interval); 766 else 767 cpuctx->hrtimer_active = 0; 768 raw_spin_unlock(&cpuctx->hrtimer_lock); 769 770 return rotations ? HRTIMER_RESTART : HRTIMER_NORESTART; 771 } 772 773 static void __perf_mux_hrtimer_init(struct perf_cpu_context *cpuctx, int cpu) 774 { 775 struct hrtimer *timer = &cpuctx->hrtimer; 776 struct pmu *pmu = cpuctx->ctx.pmu; 777 u64 interval; 778 779 /* no multiplexing needed for SW PMU */ 780 if (pmu->task_ctx_nr == perf_sw_context) 781 return; 782 783 /* 784 * check default is sane, if not set then force to 785 * default interval (1/tick) 786 */ 787 interval = pmu->hrtimer_interval_ms; 788 if (interval < 1) 789 interval = pmu->hrtimer_interval_ms = PERF_CPU_HRTIMER; 790 791 cpuctx->hrtimer_interval = ns_to_ktime(NSEC_PER_MSEC * interval); 792 793 raw_spin_lock_init(&cpuctx->hrtimer_lock); 794 hrtimer_init(timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED); 795 timer->function = perf_mux_hrtimer_handler; 796 } 797 798 static int perf_mux_hrtimer_restart(struct perf_cpu_context *cpuctx) 799 { 800 struct hrtimer *timer = &cpuctx->hrtimer; 801 struct pmu *pmu = cpuctx->ctx.pmu; 802 unsigned long flags; 803 804 /* not for SW PMU */ 805 if (pmu->task_ctx_nr == perf_sw_context) 806 return 0; 807 808 raw_spin_lock_irqsave(&cpuctx->hrtimer_lock, flags); 809 if (!cpuctx->hrtimer_active) { 810 cpuctx->hrtimer_active = 1; 811 hrtimer_forward_now(timer, cpuctx->hrtimer_interval); 812 hrtimer_start_expires(timer, HRTIMER_MODE_ABS_PINNED); 813 } 814 raw_spin_unlock_irqrestore(&cpuctx->hrtimer_lock, flags); 815 816 return 0; 817 } 818 819 void perf_pmu_disable(struct pmu *pmu) 820 { 821 int *count = this_cpu_ptr(pmu->pmu_disable_count); 822 if (!(*count)++) 823 pmu->pmu_disable(pmu); 824 } 825 826 void perf_pmu_enable(struct pmu *pmu) 827 { 828 int *count = this_cpu_ptr(pmu->pmu_disable_count); 829 if (!--(*count)) 830 pmu->pmu_enable(pmu); 831 } 832 833 static DEFINE_PER_CPU(struct list_head, active_ctx_list); 834 835 /* 836 * perf_event_ctx_activate(), perf_event_ctx_deactivate(), and 837 * perf_event_task_tick() are fully serialized because they're strictly cpu 838 * affine and perf_event_ctx{activate,deactivate} are called with IRQs 839 * disabled, while perf_event_task_tick is called from IRQ context. 840 */ 841 static void perf_event_ctx_activate(struct perf_event_context *ctx) 842 { 843 struct list_head *head = this_cpu_ptr(&active_ctx_list); 844 845 WARN_ON(!irqs_disabled()); 846 847 WARN_ON(!list_empty(&ctx->active_ctx_list)); 848 849 list_add(&ctx->active_ctx_list, head); 850 } 851 852 static void perf_event_ctx_deactivate(struct perf_event_context *ctx) 853 { 854 WARN_ON(!irqs_disabled()); 855 856 WARN_ON(list_empty(&ctx->active_ctx_list)); 857 858 list_del_init(&ctx->active_ctx_list); 859 } 860 861 static void get_ctx(struct perf_event_context *ctx) 862 { 863 WARN_ON(!atomic_inc_not_zero(&ctx->refcount)); 864 } 865 866 static void free_ctx(struct rcu_head *head) 867 { 868 struct perf_event_context *ctx; 869 870 ctx = container_of(head, struct perf_event_context, rcu_head); 871 kfree(ctx->task_ctx_data); 872 kfree(ctx); 873 } 874 875 static void put_ctx(struct perf_event_context *ctx) 876 { 877 if (atomic_dec_and_test(&ctx->refcount)) { 878 if (ctx->parent_ctx) 879 put_ctx(ctx->parent_ctx); 880 if (ctx->task) 881 put_task_struct(ctx->task); 882 call_rcu(&ctx->rcu_head, free_ctx); 883 } 884 } 885 886 /* 887 * Because of perf_event::ctx migration in sys_perf_event_open::move_group and 888 * perf_pmu_migrate_context() we need some magic. 889 * 890 * Those places that change perf_event::ctx will hold both 891 * perf_event_ctx::mutex of the 'old' and 'new' ctx value. 892 * 893 * Lock ordering is by mutex address. There are two other sites where 894 * perf_event_context::mutex nests and those are: 895 * 896 * - perf_event_exit_task_context() [ child , 0 ] 897 * __perf_event_exit_task() 898 * sync_child_event() 899 * put_event() [ parent, 1 ] 900 * 901 * - perf_event_init_context() [ parent, 0 ] 902 * inherit_task_group() 903 * inherit_group() 904 * inherit_event() 905 * perf_event_alloc() 906 * perf_init_event() 907 * perf_try_init_event() [ child , 1 ] 908 * 909 * While it appears there is an obvious deadlock here -- the parent and child 910 * nesting levels are inverted between the two. This is in fact safe because 911 * life-time rules separate them. That is an exiting task cannot fork, and a 912 * spawning task cannot (yet) exit. 913 * 914 * But remember that that these are parent<->child context relations, and 915 * migration does not affect children, therefore these two orderings should not 916 * interact. 917 * 918 * The change in perf_event::ctx does not affect children (as claimed above) 919 * because the sys_perf_event_open() case will install a new event and break 920 * the ctx parent<->child relation, and perf_pmu_migrate_context() is only 921 * concerned with cpuctx and that doesn't have children. 922 * 923 * The places that change perf_event::ctx will issue: 924 * 925 * perf_remove_from_context(); 926 * synchronize_rcu(); 927 * perf_install_in_context(); 928 * 929 * to affect the change. The remove_from_context() + synchronize_rcu() should 930 * quiesce the event, after which we can install it in the new location. This 931 * means that only external vectors (perf_fops, prctl) can perturb the event 932 * while in transit. Therefore all such accessors should also acquire 933 * perf_event_context::mutex to serialize against this. 934 * 935 * However; because event->ctx can change while we're waiting to acquire 936 * ctx->mutex we must be careful and use the below perf_event_ctx_lock() 937 * function. 938 * 939 * Lock order: 940 * task_struct::perf_event_mutex 941 * perf_event_context::mutex 942 * perf_event_context::lock 943 * perf_event::child_mutex; 944 * perf_event::mmap_mutex 945 * mmap_sem 946 */ 947 static struct perf_event_context * 948 perf_event_ctx_lock_nested(struct perf_event *event, int nesting) 949 { 950 struct perf_event_context *ctx; 951 952 again: 953 rcu_read_lock(); 954 ctx = ACCESS_ONCE(event->ctx); 955 if (!atomic_inc_not_zero(&ctx->refcount)) { 956 rcu_read_unlock(); 957 goto again; 958 } 959 rcu_read_unlock(); 960 961 mutex_lock_nested(&ctx->mutex, nesting); 962 if (event->ctx != ctx) { 963 mutex_unlock(&ctx->mutex); 964 put_ctx(ctx); 965 goto again; 966 } 967 968 return ctx; 969 } 970 971 static inline struct perf_event_context * 972 perf_event_ctx_lock(struct perf_event *event) 973 { 974 return perf_event_ctx_lock_nested(event, 0); 975 } 976 977 static void perf_event_ctx_unlock(struct perf_event *event, 978 struct perf_event_context *ctx) 979 { 980 mutex_unlock(&ctx->mutex); 981 put_ctx(ctx); 982 } 983 984 /* 985 * This must be done under the ctx->lock, such as to serialize against 986 * context_equiv(), therefore we cannot call put_ctx() since that might end up 987 * calling scheduler related locks and ctx->lock nests inside those. 988 */ 989 static __must_check struct perf_event_context * 990 unclone_ctx(struct perf_event_context *ctx) 991 { 992 struct perf_event_context *parent_ctx = ctx->parent_ctx; 993 994 lockdep_assert_held(&ctx->lock); 995 996 if (parent_ctx) 997 ctx->parent_ctx = NULL; 998 ctx->generation++; 999 1000 return parent_ctx; 1001 } 1002 1003 static u32 perf_event_pid(struct perf_event *event, struct task_struct *p) 1004 { 1005 /* 1006 * only top level events have the pid namespace they were created in 1007 */ 1008 if (event->parent) 1009 event = event->parent; 1010 1011 return task_tgid_nr_ns(p, event->ns); 1012 } 1013 1014 static u32 perf_event_tid(struct perf_event *event, struct task_struct *p) 1015 { 1016 /* 1017 * only top level events have the pid namespace they were created in 1018 */ 1019 if (event->parent) 1020 event = event->parent; 1021 1022 return task_pid_nr_ns(p, event->ns); 1023 } 1024 1025 /* 1026 * If we inherit events we want to return the parent event id 1027 * to userspace. 1028 */ 1029 static u64 primary_event_id(struct perf_event *event) 1030 { 1031 u64 id = event->id; 1032 1033 if (event->parent) 1034 id = event->parent->id; 1035 1036 return id; 1037 } 1038 1039 /* 1040 * Get the perf_event_context for a task and lock it. 1041 * This has to cope with with the fact that until it is locked, 1042 * the context could get moved to another task. 1043 */ 1044 static struct perf_event_context * 1045 perf_lock_task_context(struct task_struct *task, int ctxn, unsigned long *flags) 1046 { 1047 struct perf_event_context *ctx; 1048 1049 retry: 1050 /* 1051 * One of the few rules of preemptible RCU is that one cannot do 1052 * rcu_read_unlock() while holding a scheduler (or nested) lock when 1053 * part of the read side critical section was irqs-enabled -- see 1054 * rcu_read_unlock_special(). 1055 * 1056 * Since ctx->lock nests under rq->lock we must ensure the entire read 1057 * side critical section has interrupts disabled. 1058 */ 1059 local_irq_save(*flags); 1060 rcu_read_lock(); 1061 ctx = rcu_dereference(task->perf_event_ctxp[ctxn]); 1062 if (ctx) { 1063 /* 1064 * If this context is a clone of another, it might 1065 * get swapped for another underneath us by 1066 * perf_event_task_sched_out, though the 1067 * rcu_read_lock() protects us from any context 1068 * getting freed. Lock the context and check if it 1069 * got swapped before we could get the lock, and retry 1070 * if so. If we locked the right context, then it 1071 * can't get swapped on us any more. 1072 */ 1073 raw_spin_lock(&ctx->lock); 1074 if (ctx != rcu_dereference(task->perf_event_ctxp[ctxn])) { 1075 raw_spin_unlock(&ctx->lock); 1076 rcu_read_unlock(); 1077 local_irq_restore(*flags); 1078 goto retry; 1079 } 1080 1081 if (!atomic_inc_not_zero(&ctx->refcount)) { 1082 raw_spin_unlock(&ctx->lock); 1083 ctx = NULL; 1084 } 1085 } 1086 rcu_read_unlock(); 1087 if (!ctx) 1088 local_irq_restore(*flags); 1089 return ctx; 1090 } 1091 1092 /* 1093 * Get the context for a task and increment its pin_count so it 1094 * can't get swapped to another task. This also increments its 1095 * reference count so that the context can't get freed. 1096 */ 1097 static struct perf_event_context * 1098 perf_pin_task_context(struct task_struct *task, int ctxn) 1099 { 1100 struct perf_event_context *ctx; 1101 unsigned long flags; 1102 1103 ctx = perf_lock_task_context(task, ctxn, &flags); 1104 if (ctx) { 1105 ++ctx->pin_count; 1106 raw_spin_unlock_irqrestore(&ctx->lock, flags); 1107 } 1108 return ctx; 1109 } 1110 1111 static void perf_unpin_context(struct perf_event_context *ctx) 1112 { 1113 unsigned long flags; 1114 1115 raw_spin_lock_irqsave(&ctx->lock, flags); 1116 --ctx->pin_count; 1117 raw_spin_unlock_irqrestore(&ctx->lock, flags); 1118 } 1119 1120 /* 1121 * Update the record of the current time in a context. 1122 */ 1123 static void update_context_time(struct perf_event_context *ctx) 1124 { 1125 u64 now = perf_clock(); 1126 1127 ctx->time += now - ctx->timestamp; 1128 ctx->timestamp = now; 1129 } 1130 1131 static u64 perf_event_time(struct perf_event *event) 1132 { 1133 struct perf_event_context *ctx = event->ctx; 1134 1135 if (is_cgroup_event(event)) 1136 return perf_cgroup_event_time(event); 1137 1138 return ctx ? ctx->time : 0; 1139 } 1140 1141 /* 1142 * Update the total_time_enabled and total_time_running fields for a event. 1143 * The caller of this function needs to hold the ctx->lock. 1144 */ 1145 static void update_event_times(struct perf_event *event) 1146 { 1147 struct perf_event_context *ctx = event->ctx; 1148 u64 run_end; 1149 1150 if (event->state < PERF_EVENT_STATE_INACTIVE || 1151 event->group_leader->state < PERF_EVENT_STATE_INACTIVE) 1152 return; 1153 /* 1154 * in cgroup mode, time_enabled represents 1155 * the time the event was enabled AND active 1156 * tasks were in the monitored cgroup. This is 1157 * independent of the activity of the context as 1158 * there may be a mix of cgroup and non-cgroup events. 1159 * 1160 * That is why we treat cgroup events differently 1161 * here. 1162 */ 1163 if (is_cgroup_event(event)) 1164 run_end = perf_cgroup_event_time(event); 1165 else if (ctx->is_active) 1166 run_end = ctx->time; 1167 else 1168 run_end = event->tstamp_stopped; 1169 1170 event->total_time_enabled = run_end - event->tstamp_enabled; 1171 1172 if (event->state == PERF_EVENT_STATE_INACTIVE) 1173 run_end = event->tstamp_stopped; 1174 else 1175 run_end = perf_event_time(event); 1176 1177 event->total_time_running = run_end - event->tstamp_running; 1178 1179 } 1180 1181 /* 1182 * Update total_time_enabled and total_time_running for all events in a group. 1183 */ 1184 static void update_group_times(struct perf_event *leader) 1185 { 1186 struct perf_event *event; 1187 1188 update_event_times(leader); 1189 list_for_each_entry(event, &leader->sibling_list, group_entry) 1190 update_event_times(event); 1191 } 1192 1193 static struct list_head * 1194 ctx_group_list(struct perf_event *event, struct perf_event_context *ctx) 1195 { 1196 if (event->attr.pinned) 1197 return &ctx->pinned_groups; 1198 else 1199 return &ctx->flexible_groups; 1200 } 1201 1202 /* 1203 * Add a event from the lists for its context. 1204 * Must be called with ctx->mutex and ctx->lock held. 1205 */ 1206 static void 1207 list_add_event(struct perf_event *event, struct perf_event_context *ctx) 1208 { 1209 WARN_ON_ONCE(event->attach_state & PERF_ATTACH_CONTEXT); 1210 event->attach_state |= PERF_ATTACH_CONTEXT; 1211 1212 /* 1213 * If we're a stand alone event or group leader, we go to the context 1214 * list, group events are kept attached to the group so that 1215 * perf_group_detach can, at all times, locate all siblings. 1216 */ 1217 if (event->group_leader == event) { 1218 struct list_head *list; 1219 1220 if (is_software_event(event)) 1221 event->group_flags |= PERF_GROUP_SOFTWARE; 1222 1223 list = ctx_group_list(event, ctx); 1224 list_add_tail(&event->group_entry, list); 1225 } 1226 1227 if (is_cgroup_event(event)) 1228 ctx->nr_cgroups++; 1229 1230 list_add_rcu(&event->event_entry, &ctx->event_list); 1231 ctx->nr_events++; 1232 if (event->attr.inherit_stat) 1233 ctx->nr_stat++; 1234 1235 ctx->generation++; 1236 } 1237 1238 /* 1239 * Initialize event state based on the perf_event_attr::disabled. 1240 */ 1241 static inline void perf_event__state_init(struct perf_event *event) 1242 { 1243 event->state = event->attr.disabled ? PERF_EVENT_STATE_OFF : 1244 PERF_EVENT_STATE_INACTIVE; 1245 } 1246 1247 static void __perf_event_read_size(struct perf_event *event, int nr_siblings) 1248 { 1249 int entry = sizeof(u64); /* value */ 1250 int size = 0; 1251 int nr = 1; 1252 1253 if (event->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) 1254 size += sizeof(u64); 1255 1256 if (event->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) 1257 size += sizeof(u64); 1258 1259 if (event->attr.read_format & PERF_FORMAT_ID) 1260 entry += sizeof(u64); 1261 1262 if (event->attr.read_format & PERF_FORMAT_GROUP) { 1263 nr += nr_siblings; 1264 size += sizeof(u64); 1265 } 1266 1267 size += entry * nr; 1268 event->read_size = size; 1269 } 1270 1271 static void __perf_event_header_size(struct perf_event *event, u64 sample_type) 1272 { 1273 struct perf_sample_data *data; 1274 u16 size = 0; 1275 1276 if (sample_type & PERF_SAMPLE_IP) 1277 size += sizeof(data->ip); 1278 1279 if (sample_type & PERF_SAMPLE_ADDR) 1280 size += sizeof(data->addr); 1281 1282 if (sample_type & PERF_SAMPLE_PERIOD) 1283 size += sizeof(data->period); 1284 1285 if (sample_type & PERF_SAMPLE_WEIGHT) 1286 size += sizeof(data->weight); 1287 1288 if (sample_type & PERF_SAMPLE_READ) 1289 size += event->read_size; 1290 1291 if (sample_type & PERF_SAMPLE_DATA_SRC) 1292 size += sizeof(data->data_src.val); 1293 1294 if (sample_type & PERF_SAMPLE_TRANSACTION) 1295 size += sizeof(data->txn); 1296 1297 event->header_size = size; 1298 } 1299 1300 /* 1301 * Called at perf_event creation and when events are attached/detached from a 1302 * group. 1303 */ 1304 static void perf_event__header_size(struct perf_event *event) 1305 { 1306 __perf_event_read_size(event, 1307 event->group_leader->nr_siblings); 1308 __perf_event_header_size(event, event->attr.sample_type); 1309 } 1310 1311 static void perf_event__id_header_size(struct perf_event *event) 1312 { 1313 struct perf_sample_data *data; 1314 u64 sample_type = event->attr.sample_type; 1315 u16 size = 0; 1316 1317 if (sample_type & PERF_SAMPLE_TID) 1318 size += sizeof(data->tid_entry); 1319 1320 if (sample_type & PERF_SAMPLE_TIME) 1321 size += sizeof(data->time); 1322 1323 if (sample_type & PERF_SAMPLE_IDENTIFIER) 1324 size += sizeof(data->id); 1325 1326 if (sample_type & PERF_SAMPLE_ID) 1327 size += sizeof(data->id); 1328 1329 if (sample_type & PERF_SAMPLE_STREAM_ID) 1330 size += sizeof(data->stream_id); 1331 1332 if (sample_type & PERF_SAMPLE_CPU) 1333 size += sizeof(data->cpu_entry); 1334 1335 event->id_header_size = size; 1336 } 1337 1338 static bool perf_event_validate_size(struct perf_event *event) 1339 { 1340 /* 1341 * The values computed here will be over-written when we actually 1342 * attach the event. 1343 */ 1344 __perf_event_read_size(event, event->group_leader->nr_siblings + 1); 1345 __perf_event_header_size(event, event->attr.sample_type & ~PERF_SAMPLE_READ); 1346 perf_event__id_header_size(event); 1347 1348 /* 1349 * Sum the lot; should not exceed the 64k limit we have on records. 1350 * Conservative limit to allow for callchains and other variable fields. 1351 */ 1352 if (event->read_size + event->header_size + 1353 event->id_header_size + sizeof(struct perf_event_header) >= 16*1024) 1354 return false; 1355 1356 return true; 1357 } 1358 1359 static void perf_group_attach(struct perf_event *event) 1360 { 1361 struct perf_event *group_leader = event->group_leader, *pos; 1362 1363 /* 1364 * We can have double attach due to group movement in perf_event_open. 1365 */ 1366 if (event->attach_state & PERF_ATTACH_GROUP) 1367 return; 1368 1369 event->attach_state |= PERF_ATTACH_GROUP; 1370 1371 if (group_leader == event) 1372 return; 1373 1374 WARN_ON_ONCE(group_leader->ctx != event->ctx); 1375 1376 if (group_leader->group_flags & PERF_GROUP_SOFTWARE && 1377 !is_software_event(event)) 1378 group_leader->group_flags &= ~PERF_GROUP_SOFTWARE; 1379 1380 list_add_tail(&event->group_entry, &group_leader->sibling_list); 1381 group_leader->nr_siblings++; 1382 1383 perf_event__header_size(group_leader); 1384 1385 list_for_each_entry(pos, &group_leader->sibling_list, group_entry) 1386 perf_event__header_size(pos); 1387 } 1388 1389 /* 1390 * Remove a event from the lists for its context. 1391 * Must be called with ctx->mutex and ctx->lock held. 1392 */ 1393 static void 1394 list_del_event(struct perf_event *event, struct perf_event_context *ctx) 1395 { 1396 struct perf_cpu_context *cpuctx; 1397 1398 WARN_ON_ONCE(event->ctx != ctx); 1399 lockdep_assert_held(&ctx->lock); 1400 1401 /* 1402 * We can have double detach due to exit/hot-unplug + close. 1403 */ 1404 if (!(event->attach_state & PERF_ATTACH_CONTEXT)) 1405 return; 1406 1407 event->attach_state &= ~PERF_ATTACH_CONTEXT; 1408 1409 if (is_cgroup_event(event)) { 1410 ctx->nr_cgroups--; 1411 cpuctx = __get_cpu_context(ctx); 1412 /* 1413 * if there are no more cgroup events 1414 * then cler cgrp to avoid stale pointer 1415 * in update_cgrp_time_from_cpuctx() 1416 */ 1417 if (!ctx->nr_cgroups) 1418 cpuctx->cgrp = NULL; 1419 } 1420 1421 ctx->nr_events--; 1422 if (event->attr.inherit_stat) 1423 ctx->nr_stat--; 1424 1425 list_del_rcu(&event->event_entry); 1426 1427 if (event->group_leader == event) 1428 list_del_init(&event->group_entry); 1429 1430 update_group_times(event); 1431 1432 /* 1433 * If event was in error state, then keep it 1434 * that way, otherwise bogus counts will be 1435 * returned on read(). The only way to get out 1436 * of error state is by explicit re-enabling 1437 * of the event 1438 */ 1439 if (event->state > PERF_EVENT_STATE_OFF) 1440 event->state = PERF_EVENT_STATE_OFF; 1441 1442 ctx->generation++; 1443 } 1444 1445 static void perf_group_detach(struct perf_event *event) 1446 { 1447 struct perf_event *sibling, *tmp; 1448 struct list_head *list = NULL; 1449 1450 /* 1451 * We can have double detach due to exit/hot-unplug + close. 1452 */ 1453 if (!(event->attach_state & PERF_ATTACH_GROUP)) 1454 return; 1455 1456 event->attach_state &= ~PERF_ATTACH_GROUP; 1457 1458 /* 1459 * If this is a sibling, remove it from its group. 1460 */ 1461 if (event->group_leader != event) { 1462 list_del_init(&event->group_entry); 1463 event->group_leader->nr_siblings--; 1464 goto out; 1465 } 1466 1467 if (!list_empty(&event->group_entry)) 1468 list = &event->group_entry; 1469 1470 /* 1471 * If this was a group event with sibling events then 1472 * upgrade the siblings to singleton events by adding them 1473 * to whatever list we are on. 1474 */ 1475 list_for_each_entry_safe(sibling, tmp, &event->sibling_list, group_entry) { 1476 if (list) 1477 list_move_tail(&sibling->group_entry, list); 1478 sibling->group_leader = sibling; 1479 1480 /* Inherit group flags from the previous leader */ 1481 sibling->group_flags = event->group_flags; 1482 1483 WARN_ON_ONCE(sibling->ctx != event->ctx); 1484 } 1485 1486 out: 1487 perf_event__header_size(event->group_leader); 1488 1489 list_for_each_entry(tmp, &event->group_leader->sibling_list, group_entry) 1490 perf_event__header_size(tmp); 1491 } 1492 1493 /* 1494 * User event without the task. 1495 */ 1496 static bool is_orphaned_event(struct perf_event *event) 1497 { 1498 return event && !is_kernel_event(event) && !event->owner; 1499 } 1500 1501 /* 1502 * Event has a parent but parent's task finished and it's 1503 * alive only because of children holding refference. 1504 */ 1505 static bool is_orphaned_child(struct perf_event *event) 1506 { 1507 return is_orphaned_event(event->parent); 1508 } 1509 1510 static void orphans_remove_work(struct work_struct *work); 1511 1512 static void schedule_orphans_remove(struct perf_event_context *ctx) 1513 { 1514 if (!ctx->task || ctx->orphans_remove_sched || !perf_wq) 1515 return; 1516 1517 if (queue_delayed_work(perf_wq, &ctx->orphans_remove, 1)) { 1518 get_ctx(ctx); 1519 ctx->orphans_remove_sched = true; 1520 } 1521 } 1522 1523 static int __init perf_workqueue_init(void) 1524 { 1525 perf_wq = create_singlethread_workqueue("perf"); 1526 WARN(!perf_wq, "failed to create perf workqueue\n"); 1527 return perf_wq ? 0 : -1; 1528 } 1529 1530 core_initcall(perf_workqueue_init); 1531 1532 static inline int pmu_filter_match(struct perf_event *event) 1533 { 1534 struct pmu *pmu = event->pmu; 1535 return pmu->filter_match ? pmu->filter_match(event) : 1; 1536 } 1537 1538 static inline int 1539 event_filter_match(struct perf_event *event) 1540 { 1541 return (event->cpu == -1 || event->cpu == smp_processor_id()) 1542 && perf_cgroup_match(event) && pmu_filter_match(event); 1543 } 1544 1545 static void 1546 event_sched_out(struct perf_event *event, 1547 struct perf_cpu_context *cpuctx, 1548 struct perf_event_context *ctx) 1549 { 1550 u64 tstamp = perf_event_time(event); 1551 u64 delta; 1552 1553 WARN_ON_ONCE(event->ctx != ctx); 1554 lockdep_assert_held(&ctx->lock); 1555 1556 /* 1557 * An event which could not be activated because of 1558 * filter mismatch still needs to have its timings 1559 * maintained, otherwise bogus information is return 1560 * via read() for time_enabled, time_running: 1561 */ 1562 if (event->state == PERF_EVENT_STATE_INACTIVE 1563 && !event_filter_match(event)) { 1564 delta = tstamp - event->tstamp_stopped; 1565 event->tstamp_running += delta; 1566 event->tstamp_stopped = tstamp; 1567 } 1568 1569 if (event->state != PERF_EVENT_STATE_ACTIVE) 1570 return; 1571 1572 perf_pmu_disable(event->pmu); 1573 1574 event->state = PERF_EVENT_STATE_INACTIVE; 1575 if (event->pending_disable) { 1576 event->pending_disable = 0; 1577 event->state = PERF_EVENT_STATE_OFF; 1578 } 1579 event->tstamp_stopped = tstamp; 1580 event->pmu->del(event, 0); 1581 event->oncpu = -1; 1582 1583 if (!is_software_event(event)) 1584 cpuctx->active_oncpu--; 1585 if (!--ctx->nr_active) 1586 perf_event_ctx_deactivate(ctx); 1587 if (event->attr.freq && event->attr.sample_freq) 1588 ctx->nr_freq--; 1589 if (event->attr.exclusive || !cpuctx->active_oncpu) 1590 cpuctx->exclusive = 0; 1591 1592 if (is_orphaned_child(event)) 1593 schedule_orphans_remove(ctx); 1594 1595 perf_pmu_enable(event->pmu); 1596 } 1597 1598 static void 1599 group_sched_out(struct perf_event *group_event, 1600 struct perf_cpu_context *cpuctx, 1601 struct perf_event_context *ctx) 1602 { 1603 struct perf_event *event; 1604 int state = group_event->state; 1605 1606 event_sched_out(group_event, cpuctx, ctx); 1607 1608 /* 1609 * Schedule out siblings (if any): 1610 */ 1611 list_for_each_entry(event, &group_event->sibling_list, group_entry) 1612 event_sched_out(event, cpuctx, ctx); 1613 1614 if (state == PERF_EVENT_STATE_ACTIVE && group_event->attr.exclusive) 1615 cpuctx->exclusive = 0; 1616 } 1617 1618 struct remove_event { 1619 struct perf_event *event; 1620 bool detach_group; 1621 }; 1622 1623 /* 1624 * Cross CPU call to remove a performance event 1625 * 1626 * We disable the event on the hardware level first. After that we 1627 * remove it from the context list. 1628 */ 1629 static int __perf_remove_from_context(void *info) 1630 { 1631 struct remove_event *re = info; 1632 struct perf_event *event = re->event; 1633 struct perf_event_context *ctx = event->ctx; 1634 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx); 1635 1636 raw_spin_lock(&ctx->lock); 1637 event_sched_out(event, cpuctx, ctx); 1638 if (re->detach_group) 1639 perf_group_detach(event); 1640 list_del_event(event, ctx); 1641 if (!ctx->nr_events && cpuctx->task_ctx == ctx) { 1642 ctx->is_active = 0; 1643 cpuctx->task_ctx = NULL; 1644 } 1645 raw_spin_unlock(&ctx->lock); 1646 1647 return 0; 1648 } 1649 1650 1651 /* 1652 * Remove the event from a task's (or a CPU's) list of events. 1653 * 1654 * CPU events are removed with a smp call. For task events we only 1655 * call when the task is on a CPU. 1656 * 1657 * If event->ctx is a cloned context, callers must make sure that 1658 * every task struct that event->ctx->task could possibly point to 1659 * remains valid. This is OK when called from perf_release since 1660 * that only calls us on the top-level context, which can't be a clone. 1661 * When called from perf_event_exit_task, it's OK because the 1662 * context has been detached from its task. 1663 */ 1664 static void perf_remove_from_context(struct perf_event *event, bool detach_group) 1665 { 1666 struct perf_event_context *ctx = event->ctx; 1667 struct task_struct *task = ctx->task; 1668 struct remove_event re = { 1669 .event = event, 1670 .detach_group = detach_group, 1671 }; 1672 1673 lockdep_assert_held(&ctx->mutex); 1674 1675 if (!task) { 1676 /* 1677 * Per cpu events are removed via an smp call. The removal can 1678 * fail if the CPU is currently offline, but in that case we 1679 * already called __perf_remove_from_context from 1680 * perf_event_exit_cpu. 1681 */ 1682 cpu_function_call(event->cpu, __perf_remove_from_context, &re); 1683 return; 1684 } 1685 1686 retry: 1687 if (!task_function_call(task, __perf_remove_from_context, &re)) 1688 return; 1689 1690 raw_spin_lock_irq(&ctx->lock); 1691 /* 1692 * If we failed to find a running task, but find the context active now 1693 * that we've acquired the ctx->lock, retry. 1694 */ 1695 if (ctx->is_active) { 1696 raw_spin_unlock_irq(&ctx->lock); 1697 /* 1698 * Reload the task pointer, it might have been changed by 1699 * a concurrent perf_event_context_sched_out(). 1700 */ 1701 task = ctx->task; 1702 goto retry; 1703 } 1704 1705 /* 1706 * Since the task isn't running, its safe to remove the event, us 1707 * holding the ctx->lock ensures the task won't get scheduled in. 1708 */ 1709 if (detach_group) 1710 perf_group_detach(event); 1711 list_del_event(event, ctx); 1712 raw_spin_unlock_irq(&ctx->lock); 1713 } 1714 1715 /* 1716 * Cross CPU call to disable a performance event 1717 */ 1718 int __perf_event_disable(void *info) 1719 { 1720 struct perf_event *event = info; 1721 struct perf_event_context *ctx = event->ctx; 1722 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx); 1723 1724 /* 1725 * If this is a per-task event, need to check whether this 1726 * event's task is the current task on this cpu. 1727 * 1728 * Can trigger due to concurrent perf_event_context_sched_out() 1729 * flipping contexts around. 1730 */ 1731 if (ctx->task && cpuctx->task_ctx != ctx) 1732 return -EINVAL; 1733 1734 raw_spin_lock(&ctx->lock); 1735 1736 /* 1737 * If the event is on, turn it off. 1738 * If it is in error state, leave it in error state. 1739 */ 1740 if (event->state >= PERF_EVENT_STATE_INACTIVE) { 1741 update_context_time(ctx); 1742 update_cgrp_time_from_event(event); 1743 update_group_times(event); 1744 if (event == event->group_leader) 1745 group_sched_out(event, cpuctx, ctx); 1746 else 1747 event_sched_out(event, cpuctx, ctx); 1748 event->state = PERF_EVENT_STATE_OFF; 1749 } 1750 1751 raw_spin_unlock(&ctx->lock); 1752 1753 return 0; 1754 } 1755 1756 /* 1757 * Disable a event. 1758 * 1759 * If event->ctx is a cloned context, callers must make sure that 1760 * every task struct that event->ctx->task could possibly point to 1761 * remains valid. This condition is satisifed when called through 1762 * perf_event_for_each_child or perf_event_for_each because they 1763 * hold the top-level event's child_mutex, so any descendant that 1764 * goes to exit will block in sync_child_event. 1765 * When called from perf_pending_event it's OK because event->ctx 1766 * is the current context on this CPU and preemption is disabled, 1767 * hence we can't get into perf_event_task_sched_out for this context. 1768 */ 1769 static void _perf_event_disable(struct perf_event *event) 1770 { 1771 struct perf_event_context *ctx = event->ctx; 1772 struct task_struct *task = ctx->task; 1773 1774 if (!task) { 1775 /* 1776 * Disable the event on the cpu that it's on 1777 */ 1778 cpu_function_call(event->cpu, __perf_event_disable, event); 1779 return; 1780 } 1781 1782 retry: 1783 if (!task_function_call(task, __perf_event_disable, event)) 1784 return; 1785 1786 raw_spin_lock_irq(&ctx->lock); 1787 /* 1788 * If the event is still active, we need to retry the cross-call. 1789 */ 1790 if (event->state == PERF_EVENT_STATE_ACTIVE) { 1791 raw_spin_unlock_irq(&ctx->lock); 1792 /* 1793 * Reload the task pointer, it might have been changed by 1794 * a concurrent perf_event_context_sched_out(). 1795 */ 1796 task = ctx->task; 1797 goto retry; 1798 } 1799 1800 /* 1801 * Since we have the lock this context can't be scheduled 1802 * in, so we can change the state safely. 1803 */ 1804 if (event->state == PERF_EVENT_STATE_INACTIVE) { 1805 update_group_times(event); 1806 event->state = PERF_EVENT_STATE_OFF; 1807 } 1808 raw_spin_unlock_irq(&ctx->lock); 1809 } 1810 1811 /* 1812 * Strictly speaking kernel users cannot create groups and therefore this 1813 * interface does not need the perf_event_ctx_lock() magic. 1814 */ 1815 void perf_event_disable(struct perf_event *event) 1816 { 1817 struct perf_event_context *ctx; 1818 1819 ctx = perf_event_ctx_lock(event); 1820 _perf_event_disable(event); 1821 perf_event_ctx_unlock(event, ctx); 1822 } 1823 EXPORT_SYMBOL_GPL(perf_event_disable); 1824 1825 static void perf_set_shadow_time(struct perf_event *event, 1826 struct perf_event_context *ctx, 1827 u64 tstamp) 1828 { 1829 /* 1830 * use the correct time source for the time snapshot 1831 * 1832 * We could get by without this by leveraging the 1833 * fact that to get to this function, the caller 1834 * has most likely already called update_context_time() 1835 * and update_cgrp_time_xx() and thus both timestamp 1836 * are identical (or very close). Given that tstamp is, 1837 * already adjusted for cgroup, we could say that: 1838 * tstamp - ctx->timestamp 1839 * is equivalent to 1840 * tstamp - cgrp->timestamp. 1841 * 1842 * Then, in perf_output_read(), the calculation would 1843 * work with no changes because: 1844 * - event is guaranteed scheduled in 1845 * - no scheduled out in between 1846 * - thus the timestamp would be the same 1847 * 1848 * But this is a bit hairy. 1849 * 1850 * So instead, we have an explicit cgroup call to remain 1851 * within the time time source all along. We believe it 1852 * is cleaner and simpler to understand. 1853 */ 1854 if (is_cgroup_event(event)) 1855 perf_cgroup_set_shadow_time(event, tstamp); 1856 else 1857 event->shadow_ctx_time = tstamp - ctx->timestamp; 1858 } 1859 1860 #define MAX_INTERRUPTS (~0ULL) 1861 1862 static void perf_log_throttle(struct perf_event *event, int enable); 1863 static void perf_log_itrace_start(struct perf_event *event); 1864 1865 static int 1866 event_sched_in(struct perf_event *event, 1867 struct perf_cpu_context *cpuctx, 1868 struct perf_event_context *ctx) 1869 { 1870 u64 tstamp = perf_event_time(event); 1871 int ret = 0; 1872 1873 lockdep_assert_held(&ctx->lock); 1874 1875 if (event->state <= PERF_EVENT_STATE_OFF) 1876 return 0; 1877 1878 event->state = PERF_EVENT_STATE_ACTIVE; 1879 event->oncpu = smp_processor_id(); 1880 1881 /* 1882 * Unthrottle events, since we scheduled we might have missed several 1883 * ticks already, also for a heavily scheduling task there is little 1884 * guarantee it'll get a tick in a timely manner. 1885 */ 1886 if (unlikely(event->hw.interrupts == MAX_INTERRUPTS)) { 1887 perf_log_throttle(event, 1); 1888 event->hw.interrupts = 0; 1889 } 1890 1891 /* 1892 * The new state must be visible before we turn it on in the hardware: 1893 */ 1894 smp_wmb(); 1895 1896 perf_pmu_disable(event->pmu); 1897 1898 perf_set_shadow_time(event, ctx, tstamp); 1899 1900 perf_log_itrace_start(event); 1901 1902 if (event->pmu->add(event, PERF_EF_START)) { 1903 event->state = PERF_EVENT_STATE_INACTIVE; 1904 event->oncpu = -1; 1905 ret = -EAGAIN; 1906 goto out; 1907 } 1908 1909 event->tstamp_running += tstamp - event->tstamp_stopped; 1910 1911 if (!is_software_event(event)) 1912 cpuctx->active_oncpu++; 1913 if (!ctx->nr_active++) 1914 perf_event_ctx_activate(ctx); 1915 if (event->attr.freq && event->attr.sample_freq) 1916 ctx->nr_freq++; 1917 1918 if (event->attr.exclusive) 1919 cpuctx->exclusive = 1; 1920 1921 if (is_orphaned_child(event)) 1922 schedule_orphans_remove(ctx); 1923 1924 out: 1925 perf_pmu_enable(event->pmu); 1926 1927 return ret; 1928 } 1929 1930 static int 1931 group_sched_in(struct perf_event *group_event, 1932 struct perf_cpu_context *cpuctx, 1933 struct perf_event_context *ctx) 1934 { 1935 struct perf_event *event, *partial_group = NULL; 1936 struct pmu *pmu = ctx->pmu; 1937 u64 now = ctx->time; 1938 bool simulate = false; 1939 1940 if (group_event->state == PERF_EVENT_STATE_OFF) 1941 return 0; 1942 1943 pmu->start_txn(pmu, PERF_PMU_TXN_ADD); 1944 1945 if (event_sched_in(group_event, cpuctx, ctx)) { 1946 pmu->cancel_txn(pmu); 1947 perf_mux_hrtimer_restart(cpuctx); 1948 return -EAGAIN; 1949 } 1950 1951 /* 1952 * Schedule in siblings as one group (if any): 1953 */ 1954 list_for_each_entry(event, &group_event->sibling_list, group_entry) { 1955 if (event_sched_in(event, cpuctx, ctx)) { 1956 partial_group = event; 1957 goto group_error; 1958 } 1959 } 1960 1961 if (!pmu->commit_txn(pmu)) 1962 return 0; 1963 1964 group_error: 1965 /* 1966 * Groups can be scheduled in as one unit only, so undo any 1967 * partial group before returning: 1968 * The events up to the failed event are scheduled out normally, 1969 * tstamp_stopped will be updated. 1970 * 1971 * The failed events and the remaining siblings need to have 1972 * their timings updated as if they had gone thru event_sched_in() 1973 * and event_sched_out(). This is required to get consistent timings 1974 * across the group. This also takes care of the case where the group 1975 * could never be scheduled by ensuring tstamp_stopped is set to mark 1976 * the time the event was actually stopped, such that time delta 1977 * calculation in update_event_times() is correct. 1978 */ 1979 list_for_each_entry(event, &group_event->sibling_list, group_entry) { 1980 if (event == partial_group) 1981 simulate = true; 1982 1983 if (simulate) { 1984 event->tstamp_running += now - event->tstamp_stopped; 1985 event->tstamp_stopped = now; 1986 } else { 1987 event_sched_out(event, cpuctx, ctx); 1988 } 1989 } 1990 event_sched_out(group_event, cpuctx, ctx); 1991 1992 pmu->cancel_txn(pmu); 1993 1994 perf_mux_hrtimer_restart(cpuctx); 1995 1996 return -EAGAIN; 1997 } 1998 1999 /* 2000 * Work out whether we can put this event group on the CPU now. 2001 */ 2002 static int group_can_go_on(struct perf_event *event, 2003 struct perf_cpu_context *cpuctx, 2004 int can_add_hw) 2005 { 2006 /* 2007 * Groups consisting entirely of software events can always go on. 2008 */ 2009 if (event->group_flags & PERF_GROUP_SOFTWARE) 2010 return 1; 2011 /* 2012 * If an exclusive group is already on, no other hardware 2013 * events can go on. 2014 */ 2015 if (cpuctx->exclusive) 2016 return 0; 2017 /* 2018 * If this group is exclusive and there are already 2019 * events on the CPU, it can't go on. 2020 */ 2021 if (event->attr.exclusive && cpuctx->active_oncpu) 2022 return 0; 2023 /* 2024 * Otherwise, try to add it if all previous groups were able 2025 * to go on. 2026 */ 2027 return can_add_hw; 2028 } 2029 2030 static void add_event_to_ctx(struct perf_event *event, 2031 struct perf_event_context *ctx) 2032 { 2033 u64 tstamp = perf_event_time(event); 2034 2035 list_add_event(event, ctx); 2036 perf_group_attach(event); 2037 event->tstamp_enabled = tstamp; 2038 event->tstamp_running = tstamp; 2039 event->tstamp_stopped = tstamp; 2040 } 2041 2042 static void task_ctx_sched_out(struct perf_event_context *ctx); 2043 static void 2044 ctx_sched_in(struct perf_event_context *ctx, 2045 struct perf_cpu_context *cpuctx, 2046 enum event_type_t event_type, 2047 struct task_struct *task); 2048 2049 static void perf_event_sched_in(struct perf_cpu_context *cpuctx, 2050 struct perf_event_context *ctx, 2051 struct task_struct *task) 2052 { 2053 cpu_ctx_sched_in(cpuctx, EVENT_PINNED, task); 2054 if (ctx) 2055 ctx_sched_in(ctx, cpuctx, EVENT_PINNED, task); 2056 cpu_ctx_sched_in(cpuctx, EVENT_FLEXIBLE, task); 2057 if (ctx) 2058 ctx_sched_in(ctx, cpuctx, EVENT_FLEXIBLE, task); 2059 } 2060 2061 /* 2062 * Cross CPU call to install and enable a performance event 2063 * 2064 * Must be called with ctx->mutex held 2065 */ 2066 static int __perf_install_in_context(void *info) 2067 { 2068 struct perf_event *event = info; 2069 struct perf_event_context *ctx = event->ctx; 2070 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx); 2071 struct perf_event_context *task_ctx = cpuctx->task_ctx; 2072 struct task_struct *task = current; 2073 2074 perf_ctx_lock(cpuctx, task_ctx); 2075 perf_pmu_disable(cpuctx->ctx.pmu); 2076 2077 /* 2078 * If there was an active task_ctx schedule it out. 2079 */ 2080 if (task_ctx) 2081 task_ctx_sched_out(task_ctx); 2082 2083 /* 2084 * If the context we're installing events in is not the 2085 * active task_ctx, flip them. 2086 */ 2087 if (ctx->task && task_ctx != ctx) { 2088 if (task_ctx) 2089 raw_spin_unlock(&task_ctx->lock); 2090 raw_spin_lock(&ctx->lock); 2091 task_ctx = ctx; 2092 } 2093 2094 if (task_ctx) { 2095 cpuctx->task_ctx = task_ctx; 2096 task = task_ctx->task; 2097 } 2098 2099 cpu_ctx_sched_out(cpuctx, EVENT_ALL); 2100 2101 update_context_time(ctx); 2102 /* 2103 * update cgrp time only if current cgrp 2104 * matches event->cgrp. Must be done before 2105 * calling add_event_to_ctx() 2106 */ 2107 update_cgrp_time_from_event(event); 2108 2109 add_event_to_ctx(event, ctx); 2110 2111 /* 2112 * Schedule everything back in 2113 */ 2114 perf_event_sched_in(cpuctx, task_ctx, task); 2115 2116 perf_pmu_enable(cpuctx->ctx.pmu); 2117 perf_ctx_unlock(cpuctx, task_ctx); 2118 2119 return 0; 2120 } 2121 2122 /* 2123 * Attach a performance event to a context 2124 * 2125 * First we add the event to the list with the hardware enable bit 2126 * in event->hw_config cleared. 2127 * 2128 * If the event is attached to a task which is on a CPU we use a smp 2129 * call to enable it in the task context. The task might have been 2130 * scheduled away, but we check this in the smp call again. 2131 */ 2132 static void 2133 perf_install_in_context(struct perf_event_context *ctx, 2134 struct perf_event *event, 2135 int cpu) 2136 { 2137 struct task_struct *task = ctx->task; 2138 2139 lockdep_assert_held(&ctx->mutex); 2140 2141 event->ctx = ctx; 2142 if (event->cpu != -1) 2143 event->cpu = cpu; 2144 2145 if (!task) { 2146 /* 2147 * Per cpu events are installed via an smp call and 2148 * the install is always successful. 2149 */ 2150 cpu_function_call(cpu, __perf_install_in_context, event); 2151 return; 2152 } 2153 2154 retry: 2155 if (!task_function_call(task, __perf_install_in_context, event)) 2156 return; 2157 2158 raw_spin_lock_irq(&ctx->lock); 2159 /* 2160 * If we failed to find a running task, but find the context active now 2161 * that we've acquired the ctx->lock, retry. 2162 */ 2163 if (ctx->is_active) { 2164 raw_spin_unlock_irq(&ctx->lock); 2165 /* 2166 * Reload the task pointer, it might have been changed by 2167 * a concurrent perf_event_context_sched_out(). 2168 */ 2169 task = ctx->task; 2170 goto retry; 2171 } 2172 2173 /* 2174 * Since the task isn't running, its safe to add the event, us holding 2175 * the ctx->lock ensures the task won't get scheduled in. 2176 */ 2177 add_event_to_ctx(event, ctx); 2178 raw_spin_unlock_irq(&ctx->lock); 2179 } 2180 2181 /* 2182 * Put a event into inactive state and update time fields. 2183 * Enabling the leader of a group effectively enables all 2184 * the group members that aren't explicitly disabled, so we 2185 * have to update their ->tstamp_enabled also. 2186 * Note: this works for group members as well as group leaders 2187 * since the non-leader members' sibling_lists will be empty. 2188 */ 2189 static void __perf_event_mark_enabled(struct perf_event *event) 2190 { 2191 struct perf_event *sub; 2192 u64 tstamp = perf_event_time(event); 2193 2194 event->state = PERF_EVENT_STATE_INACTIVE; 2195 event->tstamp_enabled = tstamp - event->total_time_enabled; 2196 list_for_each_entry(sub, &event->sibling_list, group_entry) { 2197 if (sub->state >= PERF_EVENT_STATE_INACTIVE) 2198 sub->tstamp_enabled = tstamp - sub->total_time_enabled; 2199 } 2200 } 2201 2202 /* 2203 * Cross CPU call to enable a performance event 2204 */ 2205 static int __perf_event_enable(void *info) 2206 { 2207 struct perf_event *event = info; 2208 struct perf_event_context *ctx = event->ctx; 2209 struct perf_event *leader = event->group_leader; 2210 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx); 2211 int err; 2212 2213 /* 2214 * There's a time window between 'ctx->is_active' check 2215 * in perf_event_enable function and this place having: 2216 * - IRQs on 2217 * - ctx->lock unlocked 2218 * 2219 * where the task could be killed and 'ctx' deactivated 2220 * by perf_event_exit_task. 2221 */ 2222 if (!ctx->is_active) 2223 return -EINVAL; 2224 2225 raw_spin_lock(&ctx->lock); 2226 update_context_time(ctx); 2227 2228 if (event->state >= PERF_EVENT_STATE_INACTIVE) 2229 goto unlock; 2230 2231 /* 2232 * set current task's cgroup time reference point 2233 */ 2234 perf_cgroup_set_timestamp(current, ctx); 2235 2236 __perf_event_mark_enabled(event); 2237 2238 if (!event_filter_match(event)) { 2239 if (is_cgroup_event(event)) 2240 perf_cgroup_defer_enabled(event); 2241 goto unlock; 2242 } 2243 2244 /* 2245 * If the event is in a group and isn't the group leader, 2246 * then don't put it on unless the group is on. 2247 */ 2248 if (leader != event && leader->state != PERF_EVENT_STATE_ACTIVE) 2249 goto unlock; 2250 2251 if (!group_can_go_on(event, cpuctx, 1)) { 2252 err = -EEXIST; 2253 } else { 2254 if (event == leader) 2255 err = group_sched_in(event, cpuctx, ctx); 2256 else 2257 err = event_sched_in(event, cpuctx, ctx); 2258 } 2259 2260 if (err) { 2261 /* 2262 * If this event can't go on and it's part of a 2263 * group, then the whole group has to come off. 2264 */ 2265 if (leader != event) { 2266 group_sched_out(leader, cpuctx, ctx); 2267 perf_mux_hrtimer_restart(cpuctx); 2268 } 2269 if (leader->attr.pinned) { 2270 update_group_times(leader); 2271 leader->state = PERF_EVENT_STATE_ERROR; 2272 } 2273 } 2274 2275 unlock: 2276 raw_spin_unlock(&ctx->lock); 2277 2278 return 0; 2279 } 2280 2281 /* 2282 * Enable a event. 2283 * 2284 * If event->ctx is a cloned context, callers must make sure that 2285 * every task struct that event->ctx->task could possibly point to 2286 * remains valid. This condition is satisfied when called through 2287 * perf_event_for_each_child or perf_event_for_each as described 2288 * for perf_event_disable. 2289 */ 2290 static void _perf_event_enable(struct perf_event *event) 2291 { 2292 struct perf_event_context *ctx = event->ctx; 2293 struct task_struct *task = ctx->task; 2294 2295 if (!task) { 2296 /* 2297 * Enable the event on the cpu that it's on 2298 */ 2299 cpu_function_call(event->cpu, __perf_event_enable, event); 2300 return; 2301 } 2302 2303 raw_spin_lock_irq(&ctx->lock); 2304 if (event->state >= PERF_EVENT_STATE_INACTIVE) 2305 goto out; 2306 2307 /* 2308 * If the event is in error state, clear that first. 2309 * That way, if we see the event in error state below, we 2310 * know that it has gone back into error state, as distinct 2311 * from the task having been scheduled away before the 2312 * cross-call arrived. 2313 */ 2314 if (event->state == PERF_EVENT_STATE_ERROR) 2315 event->state = PERF_EVENT_STATE_OFF; 2316 2317 retry: 2318 if (!ctx->is_active) { 2319 __perf_event_mark_enabled(event); 2320 goto out; 2321 } 2322 2323 raw_spin_unlock_irq(&ctx->lock); 2324 2325 if (!task_function_call(task, __perf_event_enable, event)) 2326 return; 2327 2328 raw_spin_lock_irq(&ctx->lock); 2329 2330 /* 2331 * If the context is active and the event is still off, 2332 * we need to retry the cross-call. 2333 */ 2334 if (ctx->is_active && event->state == PERF_EVENT_STATE_OFF) { 2335 /* 2336 * task could have been flipped by a concurrent 2337 * perf_event_context_sched_out() 2338 */ 2339 task = ctx->task; 2340 goto retry; 2341 } 2342 2343 out: 2344 raw_spin_unlock_irq(&ctx->lock); 2345 } 2346 2347 /* 2348 * See perf_event_disable(); 2349 */ 2350 void perf_event_enable(struct perf_event *event) 2351 { 2352 struct perf_event_context *ctx; 2353 2354 ctx = perf_event_ctx_lock(event); 2355 _perf_event_enable(event); 2356 perf_event_ctx_unlock(event, ctx); 2357 } 2358 EXPORT_SYMBOL_GPL(perf_event_enable); 2359 2360 static int _perf_event_refresh(struct perf_event *event, int refresh) 2361 { 2362 /* 2363 * not supported on inherited events 2364 */ 2365 if (event->attr.inherit || !is_sampling_event(event)) 2366 return -EINVAL; 2367 2368 atomic_add(refresh, &event->event_limit); 2369 _perf_event_enable(event); 2370 2371 return 0; 2372 } 2373 2374 /* 2375 * See perf_event_disable() 2376 */ 2377 int perf_event_refresh(struct perf_event *event, int refresh) 2378 { 2379 struct perf_event_context *ctx; 2380 int ret; 2381 2382 ctx = perf_event_ctx_lock(event); 2383 ret = _perf_event_refresh(event, refresh); 2384 perf_event_ctx_unlock(event, ctx); 2385 2386 return ret; 2387 } 2388 EXPORT_SYMBOL_GPL(perf_event_refresh); 2389 2390 static void ctx_sched_out(struct perf_event_context *ctx, 2391 struct perf_cpu_context *cpuctx, 2392 enum event_type_t event_type) 2393 { 2394 struct perf_event *event; 2395 int is_active = ctx->is_active; 2396 2397 ctx->is_active &= ~event_type; 2398 if (likely(!ctx->nr_events)) 2399 return; 2400 2401 update_context_time(ctx); 2402 update_cgrp_time_from_cpuctx(cpuctx); 2403 if (!ctx->nr_active) 2404 return; 2405 2406 perf_pmu_disable(ctx->pmu); 2407 if ((is_active & EVENT_PINNED) && (event_type & EVENT_PINNED)) { 2408 list_for_each_entry(event, &ctx->pinned_groups, group_entry) 2409 group_sched_out(event, cpuctx, ctx); 2410 } 2411 2412 if ((is_active & EVENT_FLEXIBLE) && (event_type & EVENT_FLEXIBLE)) { 2413 list_for_each_entry(event, &ctx->flexible_groups, group_entry) 2414 group_sched_out(event, cpuctx, ctx); 2415 } 2416 perf_pmu_enable(ctx->pmu); 2417 } 2418 2419 /* 2420 * Test whether two contexts are equivalent, i.e. whether they have both been 2421 * cloned from the same version of the same context. 2422 * 2423 * Equivalence is measured using a generation number in the context that is 2424 * incremented on each modification to it; see unclone_ctx(), list_add_event() 2425 * and list_del_event(). 2426 */ 2427 static int context_equiv(struct perf_event_context *ctx1, 2428 struct perf_event_context *ctx2) 2429 { 2430 lockdep_assert_held(&ctx1->lock); 2431 lockdep_assert_held(&ctx2->lock); 2432 2433 /* Pinning disables the swap optimization */ 2434 if (ctx1->pin_count || ctx2->pin_count) 2435 return 0; 2436 2437 /* If ctx1 is the parent of ctx2 */ 2438 if (ctx1 == ctx2->parent_ctx && ctx1->generation == ctx2->parent_gen) 2439 return 1; 2440 2441 /* If ctx2 is the parent of ctx1 */ 2442 if (ctx1->parent_ctx == ctx2 && ctx1->parent_gen == ctx2->generation) 2443 return 1; 2444 2445 /* 2446 * If ctx1 and ctx2 have the same parent; we flatten the parent 2447 * hierarchy, see perf_event_init_context(). 2448 */ 2449 if (ctx1->parent_ctx && ctx1->parent_ctx == ctx2->parent_ctx && 2450 ctx1->parent_gen == ctx2->parent_gen) 2451 return 1; 2452 2453 /* Unmatched */ 2454 return 0; 2455 } 2456 2457 static void __perf_event_sync_stat(struct perf_event *event, 2458 struct perf_event *next_event) 2459 { 2460 u64 value; 2461 2462 if (!event->attr.inherit_stat) 2463 return; 2464 2465 /* 2466 * Update the event value, we cannot use perf_event_read() 2467 * because we're in the middle of a context switch and have IRQs 2468 * disabled, which upsets smp_call_function_single(), however 2469 * we know the event must be on the current CPU, therefore we 2470 * don't need to use it. 2471 */ 2472 switch (event->state) { 2473 case PERF_EVENT_STATE_ACTIVE: 2474 event->pmu->read(event); 2475 /* fall-through */ 2476 2477 case PERF_EVENT_STATE_INACTIVE: 2478 update_event_times(event); 2479 break; 2480 2481 default: 2482 break; 2483 } 2484 2485 /* 2486 * In order to keep per-task stats reliable we need to flip the event 2487 * values when we flip the contexts. 2488 */ 2489 value = local64_read(&next_event->count); 2490 value = local64_xchg(&event->count, value); 2491 local64_set(&next_event->count, value); 2492 2493 swap(event->total_time_enabled, next_event->total_time_enabled); 2494 swap(event->total_time_running, next_event->total_time_running); 2495 2496 /* 2497 * Since we swizzled the values, update the user visible data too. 2498 */ 2499 perf_event_update_userpage(event); 2500 perf_event_update_userpage(next_event); 2501 } 2502 2503 static void perf_event_sync_stat(struct perf_event_context *ctx, 2504 struct perf_event_context *next_ctx) 2505 { 2506 struct perf_event *event, *next_event; 2507 2508 if (!ctx->nr_stat) 2509 return; 2510 2511 update_context_time(ctx); 2512 2513 event = list_first_entry(&ctx->event_list, 2514 struct perf_event, event_entry); 2515 2516 next_event = list_first_entry(&next_ctx->event_list, 2517 struct perf_event, event_entry); 2518 2519 while (&event->event_entry != &ctx->event_list && 2520 &next_event->event_entry != &next_ctx->event_list) { 2521 2522 __perf_event_sync_stat(event, next_event); 2523 2524 event = list_next_entry(event, event_entry); 2525 next_event = list_next_entry(next_event, event_entry); 2526 } 2527 } 2528 2529 static void perf_event_context_sched_out(struct task_struct *task, int ctxn, 2530 struct task_struct *next) 2531 { 2532 struct perf_event_context *ctx = task->perf_event_ctxp[ctxn]; 2533 struct perf_event_context *next_ctx; 2534 struct perf_event_context *parent, *next_parent; 2535 struct perf_cpu_context *cpuctx; 2536 int do_switch = 1; 2537 2538 if (likely(!ctx)) 2539 return; 2540 2541 cpuctx = __get_cpu_context(ctx); 2542 if (!cpuctx->task_ctx) 2543 return; 2544 2545 rcu_read_lock(); 2546 next_ctx = next->perf_event_ctxp[ctxn]; 2547 if (!next_ctx) 2548 goto unlock; 2549 2550 parent = rcu_dereference(ctx->parent_ctx); 2551 next_parent = rcu_dereference(next_ctx->parent_ctx); 2552 2553 /* If neither context have a parent context; they cannot be clones. */ 2554 if (!parent && !next_parent) 2555 goto unlock; 2556 2557 if (next_parent == ctx || next_ctx == parent || next_parent == parent) { 2558 /* 2559 * Looks like the two contexts are clones, so we might be 2560 * able to optimize the context switch. We lock both 2561 * contexts and check that they are clones under the 2562 * lock (including re-checking that neither has been 2563 * uncloned in the meantime). It doesn't matter which 2564 * order we take the locks because no other cpu could 2565 * be trying to lock both of these tasks. 2566 */ 2567 raw_spin_lock(&ctx->lock); 2568 raw_spin_lock_nested(&next_ctx->lock, SINGLE_DEPTH_NESTING); 2569 if (context_equiv(ctx, next_ctx)) { 2570 /* 2571 * XXX do we need a memory barrier of sorts 2572 * wrt to rcu_dereference() of perf_event_ctxp 2573 */ 2574 task->perf_event_ctxp[ctxn] = next_ctx; 2575 next->perf_event_ctxp[ctxn] = ctx; 2576 ctx->task = next; 2577 next_ctx->task = task; 2578 2579 swap(ctx->task_ctx_data, next_ctx->task_ctx_data); 2580 2581 do_switch = 0; 2582 2583 perf_event_sync_stat(ctx, next_ctx); 2584 } 2585 raw_spin_unlock(&next_ctx->lock); 2586 raw_spin_unlock(&ctx->lock); 2587 } 2588 unlock: 2589 rcu_read_unlock(); 2590 2591 if (do_switch) { 2592 raw_spin_lock(&ctx->lock); 2593 ctx_sched_out(ctx, cpuctx, EVENT_ALL); 2594 cpuctx->task_ctx = NULL; 2595 raw_spin_unlock(&ctx->lock); 2596 } 2597 } 2598 2599 void perf_sched_cb_dec(struct pmu *pmu) 2600 { 2601 this_cpu_dec(perf_sched_cb_usages); 2602 } 2603 2604 void perf_sched_cb_inc(struct pmu *pmu) 2605 { 2606 this_cpu_inc(perf_sched_cb_usages); 2607 } 2608 2609 /* 2610 * This function provides the context switch callback to the lower code 2611 * layer. It is invoked ONLY when the context switch callback is enabled. 2612 */ 2613 static void perf_pmu_sched_task(struct task_struct *prev, 2614 struct task_struct *next, 2615 bool sched_in) 2616 { 2617 struct perf_cpu_context *cpuctx; 2618 struct pmu *pmu; 2619 unsigned long flags; 2620 2621 if (prev == next) 2622 return; 2623 2624 local_irq_save(flags); 2625 2626 rcu_read_lock(); 2627 2628 list_for_each_entry_rcu(pmu, &pmus, entry) { 2629 if (pmu->sched_task) { 2630 cpuctx = this_cpu_ptr(pmu->pmu_cpu_context); 2631 2632 perf_ctx_lock(cpuctx, cpuctx->task_ctx); 2633 2634 perf_pmu_disable(pmu); 2635 2636 pmu->sched_task(cpuctx->task_ctx, sched_in); 2637 2638 perf_pmu_enable(pmu); 2639 2640 perf_ctx_unlock(cpuctx, cpuctx->task_ctx); 2641 } 2642 } 2643 2644 rcu_read_unlock(); 2645 2646 local_irq_restore(flags); 2647 } 2648 2649 static void perf_event_switch(struct task_struct *task, 2650 struct task_struct *next_prev, bool sched_in); 2651 2652 #define for_each_task_context_nr(ctxn) \ 2653 for ((ctxn) = 0; (ctxn) < perf_nr_task_contexts; (ctxn)++) 2654 2655 /* 2656 * Called from scheduler to remove the events of the current task, 2657 * with interrupts disabled. 2658 * 2659 * We stop each event and update the event value in event->count. 2660 * 2661 * This does not protect us against NMI, but disable() 2662 * sets the disabled bit in the control field of event _before_ 2663 * accessing the event control register. If a NMI hits, then it will 2664 * not restart the event. 2665 */ 2666 void __perf_event_task_sched_out(struct task_struct *task, 2667 struct task_struct *next) 2668 { 2669 int ctxn; 2670 2671 if (__this_cpu_read(perf_sched_cb_usages)) 2672 perf_pmu_sched_task(task, next, false); 2673 2674 if (atomic_read(&nr_switch_events)) 2675 perf_event_switch(task, next, false); 2676 2677 for_each_task_context_nr(ctxn) 2678 perf_event_context_sched_out(task, ctxn, next); 2679 2680 /* 2681 * if cgroup events exist on this CPU, then we need 2682 * to check if we have to switch out PMU state. 2683 * cgroup event are system-wide mode only 2684 */ 2685 if (atomic_read(this_cpu_ptr(&perf_cgroup_events))) 2686 perf_cgroup_sched_out(task, next); 2687 } 2688 2689 static void task_ctx_sched_out(struct perf_event_context *ctx) 2690 { 2691 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx); 2692 2693 if (!cpuctx->task_ctx) 2694 return; 2695 2696 if (WARN_ON_ONCE(ctx != cpuctx->task_ctx)) 2697 return; 2698 2699 ctx_sched_out(ctx, cpuctx, EVENT_ALL); 2700 cpuctx->task_ctx = NULL; 2701 } 2702 2703 /* 2704 * Called with IRQs disabled 2705 */ 2706 static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx, 2707 enum event_type_t event_type) 2708 { 2709 ctx_sched_out(&cpuctx->ctx, cpuctx, event_type); 2710 } 2711 2712 static void 2713 ctx_pinned_sched_in(struct perf_event_context *ctx, 2714 struct perf_cpu_context *cpuctx) 2715 { 2716 struct perf_event *event; 2717 2718 list_for_each_entry(event, &ctx->pinned_groups, group_entry) { 2719 if (event->state <= PERF_EVENT_STATE_OFF) 2720 continue; 2721 if (!event_filter_match(event)) 2722 continue; 2723 2724 /* may need to reset tstamp_enabled */ 2725 if (is_cgroup_event(event)) 2726 perf_cgroup_mark_enabled(event, ctx); 2727 2728 if (group_can_go_on(event, cpuctx, 1)) 2729 group_sched_in(event, cpuctx, ctx); 2730 2731 /* 2732 * If this pinned group hasn't been scheduled, 2733 * put it in error state. 2734 */ 2735 if (event->state == PERF_EVENT_STATE_INACTIVE) { 2736 update_group_times(event); 2737 event->state = PERF_EVENT_STATE_ERROR; 2738 } 2739 } 2740 } 2741 2742 static void 2743 ctx_flexible_sched_in(struct perf_event_context *ctx, 2744 struct perf_cpu_context *cpuctx) 2745 { 2746 struct perf_event *event; 2747 int can_add_hw = 1; 2748 2749 list_for_each_entry(event, &ctx->flexible_groups, group_entry) { 2750 /* Ignore events in OFF or ERROR state */ 2751 if (event->state <= PERF_EVENT_STATE_OFF) 2752 continue; 2753 /* 2754 * Listen to the 'cpu' scheduling filter constraint 2755 * of events: 2756 */ 2757 if (!event_filter_match(event)) 2758 continue; 2759 2760 /* may need to reset tstamp_enabled */ 2761 if (is_cgroup_event(event)) 2762 perf_cgroup_mark_enabled(event, ctx); 2763 2764 if (group_can_go_on(event, cpuctx, can_add_hw)) { 2765 if (group_sched_in(event, cpuctx, ctx)) 2766 can_add_hw = 0; 2767 } 2768 } 2769 } 2770 2771 static void 2772 ctx_sched_in(struct perf_event_context *ctx, 2773 struct perf_cpu_context *cpuctx, 2774 enum event_type_t event_type, 2775 struct task_struct *task) 2776 { 2777 u64 now; 2778 int is_active = ctx->is_active; 2779 2780 ctx->is_active |= event_type; 2781 if (likely(!ctx->nr_events)) 2782 return; 2783 2784 now = perf_clock(); 2785 ctx->timestamp = now; 2786 perf_cgroup_set_timestamp(task, ctx); 2787 /* 2788 * First go through the list and put on any pinned groups 2789 * in order to give them the best chance of going on. 2790 */ 2791 if (!(is_active & EVENT_PINNED) && (event_type & EVENT_PINNED)) 2792 ctx_pinned_sched_in(ctx, cpuctx); 2793 2794 /* Then walk through the lower prio flexible groups */ 2795 if (!(is_active & EVENT_FLEXIBLE) && (event_type & EVENT_FLEXIBLE)) 2796 ctx_flexible_sched_in(ctx, cpuctx); 2797 } 2798 2799 static void cpu_ctx_sched_in(struct perf_cpu_context *cpuctx, 2800 enum event_type_t event_type, 2801 struct task_struct *task) 2802 { 2803 struct perf_event_context *ctx = &cpuctx->ctx; 2804 2805 ctx_sched_in(ctx, cpuctx, event_type, task); 2806 } 2807 2808 static void perf_event_context_sched_in(struct perf_event_context *ctx, 2809 struct task_struct *task) 2810 { 2811 struct perf_cpu_context *cpuctx; 2812 2813 cpuctx = __get_cpu_context(ctx); 2814 if (cpuctx->task_ctx == ctx) 2815 return; 2816 2817 perf_ctx_lock(cpuctx, ctx); 2818 perf_pmu_disable(ctx->pmu); 2819 /* 2820 * We want to keep the following priority order: 2821 * cpu pinned (that don't need to move), task pinned, 2822 * cpu flexible, task flexible. 2823 */ 2824 cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE); 2825 2826 if (ctx->nr_events) 2827 cpuctx->task_ctx = ctx; 2828 2829 perf_event_sched_in(cpuctx, cpuctx->task_ctx, task); 2830 2831 perf_pmu_enable(ctx->pmu); 2832 perf_ctx_unlock(cpuctx, ctx); 2833 } 2834 2835 /* 2836 * Called from scheduler to add the events of the current task 2837 * with interrupts disabled. 2838 * 2839 * We restore the event value and then enable it. 2840 * 2841 * This does not protect us against NMI, but enable() 2842 * sets the enabled bit in the control field of event _before_ 2843 * accessing the event control register. If a NMI hits, then it will 2844 * keep the event running. 2845 */ 2846 void __perf_event_task_sched_in(struct task_struct *prev, 2847 struct task_struct *task) 2848 { 2849 struct perf_event_context *ctx; 2850 int ctxn; 2851 2852 for_each_task_context_nr(ctxn) { 2853 ctx = task->perf_event_ctxp[ctxn]; 2854 if (likely(!ctx)) 2855 continue; 2856 2857 perf_event_context_sched_in(ctx, task); 2858 } 2859 /* 2860 * if cgroup events exist on this CPU, then we need 2861 * to check if we have to switch in PMU state. 2862 * cgroup event are system-wide mode only 2863 */ 2864 if (atomic_read(this_cpu_ptr(&perf_cgroup_events))) 2865 perf_cgroup_sched_in(prev, task); 2866 2867 if (atomic_read(&nr_switch_events)) 2868 perf_event_switch(task, prev, true); 2869 2870 if (__this_cpu_read(perf_sched_cb_usages)) 2871 perf_pmu_sched_task(prev, task, true); 2872 } 2873 2874 static u64 perf_calculate_period(struct perf_event *event, u64 nsec, u64 count) 2875 { 2876 u64 frequency = event->attr.sample_freq; 2877 u64 sec = NSEC_PER_SEC; 2878 u64 divisor, dividend; 2879 2880 int count_fls, nsec_fls, frequency_fls, sec_fls; 2881 2882 count_fls = fls64(count); 2883 nsec_fls = fls64(nsec); 2884 frequency_fls = fls64(frequency); 2885 sec_fls = 30; 2886 2887 /* 2888 * We got @count in @nsec, with a target of sample_freq HZ 2889 * the target period becomes: 2890 * 2891 * @count * 10^9 2892 * period = ------------------- 2893 * @nsec * sample_freq 2894 * 2895 */ 2896 2897 /* 2898 * Reduce accuracy by one bit such that @a and @b converge 2899 * to a similar magnitude. 2900 */ 2901 #define REDUCE_FLS(a, b) \ 2902 do { \ 2903 if (a##_fls > b##_fls) { \ 2904 a >>= 1; \ 2905 a##_fls--; \ 2906 } else { \ 2907 b >>= 1; \ 2908 b##_fls--; \ 2909 } \ 2910 } while (0) 2911 2912 /* 2913 * Reduce accuracy until either term fits in a u64, then proceed with 2914 * the other, so that finally we can do a u64/u64 division. 2915 */ 2916 while (count_fls + sec_fls > 64 && nsec_fls + frequency_fls > 64) { 2917 REDUCE_FLS(nsec, frequency); 2918 REDUCE_FLS(sec, count); 2919 } 2920 2921 if (count_fls + sec_fls > 64) { 2922 divisor = nsec * frequency; 2923 2924 while (count_fls + sec_fls > 64) { 2925 REDUCE_FLS(count, sec); 2926 divisor >>= 1; 2927 } 2928 2929 dividend = count * sec; 2930 } else { 2931 dividend = count * sec; 2932 2933 while (nsec_fls + frequency_fls > 64) { 2934 REDUCE_FLS(nsec, frequency); 2935 dividend >>= 1; 2936 } 2937 2938 divisor = nsec * frequency; 2939 } 2940 2941 if (!divisor) 2942 return dividend; 2943 2944 return div64_u64(dividend, divisor); 2945 } 2946 2947 static DEFINE_PER_CPU(int, perf_throttled_count); 2948 static DEFINE_PER_CPU(u64, perf_throttled_seq); 2949 2950 static void perf_adjust_period(struct perf_event *event, u64 nsec, u64 count, bool disable) 2951 { 2952 struct hw_perf_event *hwc = &event->hw; 2953 s64 period, sample_period; 2954 s64 delta; 2955 2956 period = perf_calculate_period(event, nsec, count); 2957 2958 delta = (s64)(period - hwc->sample_period); 2959 delta = (delta + 7) / 8; /* low pass filter */ 2960 2961 sample_period = hwc->sample_period + delta; 2962 2963 if (!sample_period) 2964 sample_period = 1; 2965 2966 hwc->sample_period = sample_period; 2967 2968 if (local64_read(&hwc->period_left) > 8*sample_period) { 2969 if (disable) 2970 event->pmu->stop(event, PERF_EF_UPDATE); 2971 2972 local64_set(&hwc->period_left, 0); 2973 2974 if (disable) 2975 event->pmu->start(event, PERF_EF_RELOAD); 2976 } 2977 } 2978 2979 /* 2980 * combine freq adjustment with unthrottling to avoid two passes over the 2981 * events. At the same time, make sure, having freq events does not change 2982 * the rate of unthrottling as that would introduce bias. 2983 */ 2984 static void perf_adjust_freq_unthr_context(struct perf_event_context *ctx, 2985 int needs_unthr) 2986 { 2987 struct perf_event *event; 2988 struct hw_perf_event *hwc; 2989 u64 now, period = TICK_NSEC; 2990 s64 delta; 2991 2992 /* 2993 * only need to iterate over all events iff: 2994 * - context have events in frequency mode (needs freq adjust) 2995 * - there are events to unthrottle on this cpu 2996 */ 2997 if (!(ctx->nr_freq || needs_unthr)) 2998 return; 2999 3000 raw_spin_lock(&ctx->lock); 3001 perf_pmu_disable(ctx->pmu); 3002 3003 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) { 3004 if (event->state != PERF_EVENT_STATE_ACTIVE) 3005 continue; 3006 3007 if (!event_filter_match(event)) 3008 continue; 3009 3010 perf_pmu_disable(event->pmu); 3011 3012 hwc = &event->hw; 3013 3014 if (hwc->interrupts == MAX_INTERRUPTS) { 3015 hwc->interrupts = 0; 3016 perf_log_throttle(event, 1); 3017 event->pmu->start(event, 0); 3018 } 3019 3020 if (!event->attr.freq || !event->attr.sample_freq) 3021 goto next; 3022 3023 /* 3024 * stop the event and update event->count 3025 */ 3026 event->pmu->stop(event, PERF_EF_UPDATE); 3027 3028 now = local64_read(&event->count); 3029 delta = now - hwc->freq_count_stamp; 3030 hwc->freq_count_stamp = now; 3031 3032 /* 3033 * restart the event 3034 * reload only if value has changed 3035 * we have stopped the event so tell that 3036 * to perf_adjust_period() to avoid stopping it 3037 * twice. 3038 */ 3039 if (delta > 0) 3040 perf_adjust_period(event, period, delta, false); 3041 3042 event->pmu->start(event, delta > 0 ? PERF_EF_RELOAD : 0); 3043 next: 3044 perf_pmu_enable(event->pmu); 3045 } 3046 3047 perf_pmu_enable(ctx->pmu); 3048 raw_spin_unlock(&ctx->lock); 3049 } 3050 3051 /* 3052 * Round-robin a context's events: 3053 */ 3054 static void rotate_ctx(struct perf_event_context *ctx) 3055 { 3056 /* 3057 * Rotate the first entry last of non-pinned groups. Rotation might be 3058 * disabled by the inheritance code. 3059 */ 3060 if (!ctx->rotate_disable) 3061 list_rotate_left(&ctx->flexible_groups); 3062 } 3063 3064 static int perf_rotate_context(struct perf_cpu_context *cpuctx) 3065 { 3066 struct perf_event_context *ctx = NULL; 3067 int rotate = 0; 3068 3069 if (cpuctx->ctx.nr_events) { 3070 if (cpuctx->ctx.nr_events != cpuctx->ctx.nr_active) 3071 rotate = 1; 3072 } 3073 3074 ctx = cpuctx->task_ctx; 3075 if (ctx && ctx->nr_events) { 3076 if (ctx->nr_events != ctx->nr_active) 3077 rotate = 1; 3078 } 3079 3080 if (!rotate) 3081 goto done; 3082 3083 perf_ctx_lock(cpuctx, cpuctx->task_ctx); 3084 perf_pmu_disable(cpuctx->ctx.pmu); 3085 3086 cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE); 3087 if (ctx) 3088 ctx_sched_out(ctx, cpuctx, EVENT_FLEXIBLE); 3089 3090 rotate_ctx(&cpuctx->ctx); 3091 if (ctx) 3092 rotate_ctx(ctx); 3093 3094 perf_event_sched_in(cpuctx, ctx, current); 3095 3096 perf_pmu_enable(cpuctx->ctx.pmu); 3097 perf_ctx_unlock(cpuctx, cpuctx->task_ctx); 3098 done: 3099 3100 return rotate; 3101 } 3102 3103 #ifdef CONFIG_NO_HZ_FULL 3104 bool perf_event_can_stop_tick(void) 3105 { 3106 if (atomic_read(&nr_freq_events) || 3107 __this_cpu_read(perf_throttled_count)) 3108 return false; 3109 else 3110 return true; 3111 } 3112 #endif 3113 3114 void perf_event_task_tick(void) 3115 { 3116 struct list_head *head = this_cpu_ptr(&active_ctx_list); 3117 struct perf_event_context *ctx, *tmp; 3118 int throttled; 3119 3120 WARN_ON(!irqs_disabled()); 3121 3122 __this_cpu_inc(perf_throttled_seq); 3123 throttled = __this_cpu_xchg(perf_throttled_count, 0); 3124 3125 list_for_each_entry_safe(ctx, tmp, head, active_ctx_list) 3126 perf_adjust_freq_unthr_context(ctx, throttled); 3127 } 3128 3129 static int event_enable_on_exec(struct perf_event *event, 3130 struct perf_event_context *ctx) 3131 { 3132 if (!event->attr.enable_on_exec) 3133 return 0; 3134 3135 event->attr.enable_on_exec = 0; 3136 if (event->state >= PERF_EVENT_STATE_INACTIVE) 3137 return 0; 3138 3139 __perf_event_mark_enabled(event); 3140 3141 return 1; 3142 } 3143 3144 /* 3145 * Enable all of a task's events that have been marked enable-on-exec. 3146 * This expects task == current. 3147 */ 3148 static void perf_event_enable_on_exec(struct perf_event_context *ctx) 3149 { 3150 struct perf_event_context *clone_ctx = NULL; 3151 struct perf_event *event; 3152 unsigned long flags; 3153 int enabled = 0; 3154 int ret; 3155 3156 local_irq_save(flags); 3157 if (!ctx || !ctx->nr_events) 3158 goto out; 3159 3160 /* 3161 * We must ctxsw out cgroup events to avoid conflict 3162 * when invoking perf_task_event_sched_in() later on 3163 * in this function. Otherwise we end up trying to 3164 * ctxswin cgroup events which are already scheduled 3165 * in. 3166 */ 3167 perf_cgroup_sched_out(current, NULL); 3168 3169 raw_spin_lock(&ctx->lock); 3170 task_ctx_sched_out(ctx); 3171 3172 list_for_each_entry(event, &ctx->event_list, event_entry) { 3173 ret = event_enable_on_exec(event, ctx); 3174 if (ret) 3175 enabled = 1; 3176 } 3177 3178 /* 3179 * Unclone this context if we enabled any event. 3180 */ 3181 if (enabled) 3182 clone_ctx = unclone_ctx(ctx); 3183 3184 raw_spin_unlock(&ctx->lock); 3185 3186 /* 3187 * Also calls ctxswin for cgroup events, if any: 3188 */ 3189 perf_event_context_sched_in(ctx, ctx->task); 3190 out: 3191 local_irq_restore(flags); 3192 3193 if (clone_ctx) 3194 put_ctx(clone_ctx); 3195 } 3196 3197 void perf_event_exec(void) 3198 { 3199 struct perf_event_context *ctx; 3200 int ctxn; 3201 3202 rcu_read_lock(); 3203 for_each_task_context_nr(ctxn) { 3204 ctx = current->perf_event_ctxp[ctxn]; 3205 if (!ctx) 3206 continue; 3207 3208 perf_event_enable_on_exec(ctx); 3209 } 3210 rcu_read_unlock(); 3211 } 3212 3213 struct perf_read_data { 3214 struct perf_event *event; 3215 bool group; 3216 int ret; 3217 }; 3218 3219 /* 3220 * Cross CPU call to read the hardware event 3221 */ 3222 static void __perf_event_read(void *info) 3223 { 3224 struct perf_read_data *data = info; 3225 struct perf_event *sub, *event = data->event; 3226 struct perf_event_context *ctx = event->ctx; 3227 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx); 3228 struct pmu *pmu = event->pmu; 3229 3230 /* 3231 * If this is a task context, we need to check whether it is 3232 * the current task context of this cpu. If not it has been 3233 * scheduled out before the smp call arrived. In that case 3234 * event->count would have been updated to a recent sample 3235 * when the event was scheduled out. 3236 */ 3237 if (ctx->task && cpuctx->task_ctx != ctx) 3238 return; 3239 3240 raw_spin_lock(&ctx->lock); 3241 if (ctx->is_active) { 3242 update_context_time(ctx); 3243 update_cgrp_time_from_event(event); 3244 } 3245 3246 update_event_times(event); 3247 if (event->state != PERF_EVENT_STATE_ACTIVE) 3248 goto unlock; 3249 3250 if (!data->group) { 3251 pmu->read(event); 3252 data->ret = 0; 3253 goto unlock; 3254 } 3255 3256 pmu->start_txn(pmu, PERF_PMU_TXN_READ); 3257 3258 pmu->read(event); 3259 3260 list_for_each_entry(sub, &event->sibling_list, group_entry) { 3261 update_event_times(sub); 3262 if (sub->state == PERF_EVENT_STATE_ACTIVE) { 3263 /* 3264 * Use sibling's PMU rather than @event's since 3265 * sibling could be on different (eg: software) PMU. 3266 */ 3267 sub->pmu->read(sub); 3268 } 3269 } 3270 3271 data->ret = pmu->commit_txn(pmu); 3272 3273 unlock: 3274 raw_spin_unlock(&ctx->lock); 3275 } 3276 3277 static inline u64 perf_event_count(struct perf_event *event) 3278 { 3279 if (event->pmu->count) 3280 return event->pmu->count(event); 3281 3282 return __perf_event_count(event); 3283 } 3284 3285 /* 3286 * NMI-safe method to read a local event, that is an event that 3287 * is: 3288 * - either for the current task, or for this CPU 3289 * - does not have inherit set, for inherited task events 3290 * will not be local and we cannot read them atomically 3291 * - must not have a pmu::count method 3292 */ 3293 u64 perf_event_read_local(struct perf_event *event) 3294 { 3295 unsigned long flags; 3296 u64 val; 3297 3298 /* 3299 * Disabling interrupts avoids all counter scheduling (context 3300 * switches, timer based rotation and IPIs). 3301 */ 3302 local_irq_save(flags); 3303 3304 /* If this is a per-task event, it must be for current */ 3305 WARN_ON_ONCE((event->attach_state & PERF_ATTACH_TASK) && 3306 event->hw.target != current); 3307 3308 /* If this is a per-CPU event, it must be for this CPU */ 3309 WARN_ON_ONCE(!(event->attach_state & PERF_ATTACH_TASK) && 3310 event->cpu != smp_processor_id()); 3311 3312 /* 3313 * It must not be an event with inherit set, we cannot read 3314 * all child counters from atomic context. 3315 */ 3316 WARN_ON_ONCE(event->attr.inherit); 3317 3318 /* 3319 * It must not have a pmu::count method, those are not 3320 * NMI safe. 3321 */ 3322 WARN_ON_ONCE(event->pmu->count); 3323 3324 /* 3325 * If the event is currently on this CPU, its either a per-task event, 3326 * or local to this CPU. Furthermore it means its ACTIVE (otherwise 3327 * oncpu == -1). 3328 */ 3329 if (event->oncpu == smp_processor_id()) 3330 event->pmu->read(event); 3331 3332 val = local64_read(&event->count); 3333 local_irq_restore(flags); 3334 3335 return val; 3336 } 3337 3338 static int perf_event_read(struct perf_event *event, bool group) 3339 { 3340 int ret = 0; 3341 3342 /* 3343 * If event is enabled and currently active on a CPU, update the 3344 * value in the event structure: 3345 */ 3346 if (event->state == PERF_EVENT_STATE_ACTIVE) { 3347 struct perf_read_data data = { 3348 .event = event, 3349 .group = group, 3350 .ret = 0, 3351 }; 3352 smp_call_function_single(event->oncpu, 3353 __perf_event_read, &data, 1); 3354 ret = data.ret; 3355 } else if (event->state == PERF_EVENT_STATE_INACTIVE) { 3356 struct perf_event_context *ctx = event->ctx; 3357 unsigned long flags; 3358 3359 raw_spin_lock_irqsave(&ctx->lock, flags); 3360 /* 3361 * may read while context is not active 3362 * (e.g., thread is blocked), in that case 3363 * we cannot update context time 3364 */ 3365 if (ctx->is_active) { 3366 update_context_time(ctx); 3367 update_cgrp_time_from_event(event); 3368 } 3369 if (group) 3370 update_group_times(event); 3371 else 3372 update_event_times(event); 3373 raw_spin_unlock_irqrestore(&ctx->lock, flags); 3374 } 3375 3376 return ret; 3377 } 3378 3379 /* 3380 * Initialize the perf_event context in a task_struct: 3381 */ 3382 static void __perf_event_init_context(struct perf_event_context *ctx) 3383 { 3384 raw_spin_lock_init(&ctx->lock); 3385 mutex_init(&ctx->mutex); 3386 INIT_LIST_HEAD(&ctx->active_ctx_list); 3387 INIT_LIST_HEAD(&ctx->pinned_groups); 3388 INIT_LIST_HEAD(&ctx->flexible_groups); 3389 INIT_LIST_HEAD(&ctx->event_list); 3390 atomic_set(&ctx->refcount, 1); 3391 INIT_DELAYED_WORK(&ctx->orphans_remove, orphans_remove_work); 3392 } 3393 3394 static struct perf_event_context * 3395 alloc_perf_context(struct pmu *pmu, struct task_struct *task) 3396 { 3397 struct perf_event_context *ctx; 3398 3399 ctx = kzalloc(sizeof(struct perf_event_context), GFP_KERNEL); 3400 if (!ctx) 3401 return NULL; 3402 3403 __perf_event_init_context(ctx); 3404 if (task) { 3405 ctx->task = task; 3406 get_task_struct(task); 3407 } 3408 ctx->pmu = pmu; 3409 3410 return ctx; 3411 } 3412 3413 static struct task_struct * 3414 find_lively_task_by_vpid(pid_t vpid) 3415 { 3416 struct task_struct *task; 3417 int err; 3418 3419 rcu_read_lock(); 3420 if (!vpid) 3421 task = current; 3422 else 3423 task = find_task_by_vpid(vpid); 3424 if (task) 3425 get_task_struct(task); 3426 rcu_read_unlock(); 3427 3428 if (!task) 3429 return ERR_PTR(-ESRCH); 3430 3431 /* Reuse ptrace permission checks for now. */ 3432 err = -EACCES; 3433 if (!ptrace_may_access(task, PTRACE_MODE_READ)) 3434 goto errout; 3435 3436 return task; 3437 errout: 3438 put_task_struct(task); 3439 return ERR_PTR(err); 3440 3441 } 3442 3443 /* 3444 * Returns a matching context with refcount and pincount. 3445 */ 3446 static struct perf_event_context * 3447 find_get_context(struct pmu *pmu, struct task_struct *task, 3448 struct perf_event *event) 3449 { 3450 struct perf_event_context *ctx, *clone_ctx = NULL; 3451 struct perf_cpu_context *cpuctx; 3452 void *task_ctx_data = NULL; 3453 unsigned long flags; 3454 int ctxn, err; 3455 int cpu = event->cpu; 3456 3457 if (!task) { 3458 /* Must be root to operate on a CPU event: */ 3459 if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN)) 3460 return ERR_PTR(-EACCES); 3461 3462 /* 3463 * We could be clever and allow to attach a event to an 3464 * offline CPU and activate it when the CPU comes up, but 3465 * that's for later. 3466 */ 3467 if (!cpu_online(cpu)) 3468 return ERR_PTR(-ENODEV); 3469 3470 cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu); 3471 ctx = &cpuctx->ctx; 3472 get_ctx(ctx); 3473 ++ctx->pin_count; 3474 3475 return ctx; 3476 } 3477 3478 err = -EINVAL; 3479 ctxn = pmu->task_ctx_nr; 3480 if (ctxn < 0) 3481 goto errout; 3482 3483 if (event->attach_state & PERF_ATTACH_TASK_DATA) { 3484 task_ctx_data = kzalloc(pmu->task_ctx_size, GFP_KERNEL); 3485 if (!task_ctx_data) { 3486 err = -ENOMEM; 3487 goto errout; 3488 } 3489 } 3490 3491 retry: 3492 ctx = perf_lock_task_context(task, ctxn, &flags); 3493 if (ctx) { 3494 clone_ctx = unclone_ctx(ctx); 3495 ++ctx->pin_count; 3496 3497 if (task_ctx_data && !ctx->task_ctx_data) { 3498 ctx->task_ctx_data = task_ctx_data; 3499 task_ctx_data = NULL; 3500 } 3501 raw_spin_unlock_irqrestore(&ctx->lock, flags); 3502 3503 if (clone_ctx) 3504 put_ctx(clone_ctx); 3505 } else { 3506 ctx = alloc_perf_context(pmu, task); 3507 err = -ENOMEM; 3508 if (!ctx) 3509 goto errout; 3510 3511 if (task_ctx_data) { 3512 ctx->task_ctx_data = task_ctx_data; 3513 task_ctx_data = NULL; 3514 } 3515 3516 err = 0; 3517 mutex_lock(&task->perf_event_mutex); 3518 /* 3519 * If it has already passed perf_event_exit_task(). 3520 * we must see PF_EXITING, it takes this mutex too. 3521 */ 3522 if (task->flags & PF_EXITING) 3523 err = -ESRCH; 3524 else if (task->perf_event_ctxp[ctxn]) 3525 err = -EAGAIN; 3526 else { 3527 get_ctx(ctx); 3528 ++ctx->pin_count; 3529 rcu_assign_pointer(task->perf_event_ctxp[ctxn], ctx); 3530 } 3531 mutex_unlock(&task->perf_event_mutex); 3532 3533 if (unlikely(err)) { 3534 put_ctx(ctx); 3535 3536 if (err == -EAGAIN) 3537 goto retry; 3538 goto errout; 3539 } 3540 } 3541 3542 kfree(task_ctx_data); 3543 return ctx; 3544 3545 errout: 3546 kfree(task_ctx_data); 3547 return ERR_PTR(err); 3548 } 3549 3550 static void perf_event_free_filter(struct perf_event *event); 3551 static void perf_event_free_bpf_prog(struct perf_event *event); 3552 3553 static void free_event_rcu(struct rcu_head *head) 3554 { 3555 struct perf_event *event; 3556 3557 event = container_of(head, struct perf_event, rcu_head); 3558 if (event->ns) 3559 put_pid_ns(event->ns); 3560 perf_event_free_filter(event); 3561 kfree(event); 3562 } 3563 3564 static void ring_buffer_attach(struct perf_event *event, 3565 struct ring_buffer *rb); 3566 3567 static void unaccount_event_cpu(struct perf_event *event, int cpu) 3568 { 3569 if (event->parent) 3570 return; 3571 3572 if (is_cgroup_event(event)) 3573 atomic_dec(&per_cpu(perf_cgroup_events, cpu)); 3574 } 3575 3576 static void unaccount_event(struct perf_event *event) 3577 { 3578 if (event->parent) 3579 return; 3580 3581 if (event->attach_state & PERF_ATTACH_TASK) 3582 static_key_slow_dec_deferred(&perf_sched_events); 3583 if (event->attr.mmap || event->attr.mmap_data) 3584 atomic_dec(&nr_mmap_events); 3585 if (event->attr.comm) 3586 atomic_dec(&nr_comm_events); 3587 if (event->attr.task) 3588 atomic_dec(&nr_task_events); 3589 if (event->attr.freq) 3590 atomic_dec(&nr_freq_events); 3591 if (event->attr.context_switch) { 3592 static_key_slow_dec_deferred(&perf_sched_events); 3593 atomic_dec(&nr_switch_events); 3594 } 3595 if (is_cgroup_event(event)) 3596 static_key_slow_dec_deferred(&perf_sched_events); 3597 if (has_branch_stack(event)) 3598 static_key_slow_dec_deferred(&perf_sched_events); 3599 3600 unaccount_event_cpu(event, event->cpu); 3601 } 3602 3603 /* 3604 * The following implement mutual exclusion of events on "exclusive" pmus 3605 * (PERF_PMU_CAP_EXCLUSIVE). Such pmus can only have one event scheduled 3606 * at a time, so we disallow creating events that might conflict, namely: 3607 * 3608 * 1) cpu-wide events in the presence of per-task events, 3609 * 2) per-task events in the presence of cpu-wide events, 3610 * 3) two matching events on the same context. 3611 * 3612 * The former two cases are handled in the allocation path (perf_event_alloc(), 3613 * __free_event()), the latter -- before the first perf_install_in_context(). 3614 */ 3615 static int exclusive_event_init(struct perf_event *event) 3616 { 3617 struct pmu *pmu = event->pmu; 3618 3619 if (!(pmu->capabilities & PERF_PMU_CAP_EXCLUSIVE)) 3620 return 0; 3621 3622 /* 3623 * Prevent co-existence of per-task and cpu-wide events on the 3624 * same exclusive pmu. 3625 * 3626 * Negative pmu::exclusive_cnt means there are cpu-wide 3627 * events on this "exclusive" pmu, positive means there are 3628 * per-task events. 3629 * 3630 * Since this is called in perf_event_alloc() path, event::ctx 3631 * doesn't exist yet; it is, however, safe to use PERF_ATTACH_TASK 3632 * to mean "per-task event", because unlike other attach states it 3633 * never gets cleared. 3634 */ 3635 if (event->attach_state & PERF_ATTACH_TASK) { 3636 if (!atomic_inc_unless_negative(&pmu->exclusive_cnt)) 3637 return -EBUSY; 3638 } else { 3639 if (!atomic_dec_unless_positive(&pmu->exclusive_cnt)) 3640 return -EBUSY; 3641 } 3642 3643 return 0; 3644 } 3645 3646 static void exclusive_event_destroy(struct perf_event *event) 3647 { 3648 struct pmu *pmu = event->pmu; 3649 3650 if (!(pmu->capabilities & PERF_PMU_CAP_EXCLUSIVE)) 3651 return; 3652 3653 /* see comment in exclusive_event_init() */ 3654 if (event->attach_state & PERF_ATTACH_TASK) 3655 atomic_dec(&pmu->exclusive_cnt); 3656 else 3657 atomic_inc(&pmu->exclusive_cnt); 3658 } 3659 3660 static bool exclusive_event_match(struct perf_event *e1, struct perf_event *e2) 3661 { 3662 if ((e1->pmu->capabilities & PERF_PMU_CAP_EXCLUSIVE) && 3663 (e1->cpu == e2->cpu || 3664 e1->cpu == -1 || 3665 e2->cpu == -1)) 3666 return true; 3667 return false; 3668 } 3669 3670 /* Called under the same ctx::mutex as perf_install_in_context() */ 3671 static bool exclusive_event_installable(struct perf_event *event, 3672 struct perf_event_context *ctx) 3673 { 3674 struct perf_event *iter_event; 3675 struct pmu *pmu = event->pmu; 3676 3677 if (!(pmu->capabilities & PERF_PMU_CAP_EXCLUSIVE)) 3678 return true; 3679 3680 list_for_each_entry(iter_event, &ctx->event_list, event_entry) { 3681 if (exclusive_event_match(iter_event, event)) 3682 return false; 3683 } 3684 3685 return true; 3686 } 3687 3688 static void __free_event(struct perf_event *event) 3689 { 3690 if (!event->parent) { 3691 if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN) 3692 put_callchain_buffers(); 3693 } 3694 3695 perf_event_free_bpf_prog(event); 3696 3697 if (event->destroy) 3698 event->destroy(event); 3699 3700 if (event->ctx) 3701 put_ctx(event->ctx); 3702 3703 if (event->pmu) { 3704 exclusive_event_destroy(event); 3705 module_put(event->pmu->module); 3706 } 3707 3708 call_rcu(&event->rcu_head, free_event_rcu); 3709 } 3710 3711 static void _free_event(struct perf_event *event) 3712 { 3713 irq_work_sync(&event->pending); 3714 3715 unaccount_event(event); 3716 3717 if (event->rb) { 3718 /* 3719 * Can happen when we close an event with re-directed output. 3720 * 3721 * Since we have a 0 refcount, perf_mmap_close() will skip 3722 * over us; possibly making our ring_buffer_put() the last. 3723 */ 3724 mutex_lock(&event->mmap_mutex); 3725 ring_buffer_attach(event, NULL); 3726 mutex_unlock(&event->mmap_mutex); 3727 } 3728 3729 if (is_cgroup_event(event)) 3730 perf_detach_cgroup(event); 3731 3732 __free_event(event); 3733 } 3734 3735 /* 3736 * Used to free events which have a known refcount of 1, such as in error paths 3737 * where the event isn't exposed yet and inherited events. 3738 */ 3739 static void free_event(struct perf_event *event) 3740 { 3741 if (WARN(atomic_long_cmpxchg(&event->refcount, 1, 0) != 1, 3742 "unexpected event refcount: %ld; ptr=%p\n", 3743 atomic_long_read(&event->refcount), event)) { 3744 /* leak to avoid use-after-free */ 3745 return; 3746 } 3747 3748 _free_event(event); 3749 } 3750 3751 /* 3752 * Remove user event from the owner task. 3753 */ 3754 static void perf_remove_from_owner(struct perf_event *event) 3755 { 3756 struct task_struct *owner; 3757 3758 rcu_read_lock(); 3759 owner = ACCESS_ONCE(event->owner); 3760 /* 3761 * Matches the smp_wmb() in perf_event_exit_task(). If we observe 3762 * !owner it means the list deletion is complete and we can indeed 3763 * free this event, otherwise we need to serialize on 3764 * owner->perf_event_mutex. 3765 */ 3766 smp_read_barrier_depends(); 3767 if (owner) { 3768 /* 3769 * Since delayed_put_task_struct() also drops the last 3770 * task reference we can safely take a new reference 3771 * while holding the rcu_read_lock(). 3772 */ 3773 get_task_struct(owner); 3774 } 3775 rcu_read_unlock(); 3776 3777 if (owner) { 3778 /* 3779 * If we're here through perf_event_exit_task() we're already 3780 * holding ctx->mutex which would be an inversion wrt. the 3781 * normal lock order. 3782 * 3783 * However we can safely take this lock because its the child 3784 * ctx->mutex. 3785 */ 3786 mutex_lock_nested(&owner->perf_event_mutex, SINGLE_DEPTH_NESTING); 3787 3788 /* 3789 * We have to re-check the event->owner field, if it is cleared 3790 * we raced with perf_event_exit_task(), acquiring the mutex 3791 * ensured they're done, and we can proceed with freeing the 3792 * event. 3793 */ 3794 if (event->owner) 3795 list_del_init(&event->owner_entry); 3796 mutex_unlock(&owner->perf_event_mutex); 3797 put_task_struct(owner); 3798 } 3799 } 3800 3801 static void put_event(struct perf_event *event) 3802 { 3803 struct perf_event_context *ctx; 3804 3805 if (!atomic_long_dec_and_test(&event->refcount)) 3806 return; 3807 3808 if (!is_kernel_event(event)) 3809 perf_remove_from_owner(event); 3810 3811 /* 3812 * There are two ways this annotation is useful: 3813 * 3814 * 1) there is a lock recursion from perf_event_exit_task 3815 * see the comment there. 3816 * 3817 * 2) there is a lock-inversion with mmap_sem through 3818 * perf_read_group(), which takes faults while 3819 * holding ctx->mutex, however this is called after 3820 * the last filedesc died, so there is no possibility 3821 * to trigger the AB-BA case. 3822 */ 3823 ctx = perf_event_ctx_lock_nested(event, SINGLE_DEPTH_NESTING); 3824 WARN_ON_ONCE(ctx->parent_ctx); 3825 perf_remove_from_context(event, true); 3826 perf_event_ctx_unlock(event, ctx); 3827 3828 _free_event(event); 3829 } 3830 3831 int perf_event_release_kernel(struct perf_event *event) 3832 { 3833 put_event(event); 3834 return 0; 3835 } 3836 EXPORT_SYMBOL_GPL(perf_event_release_kernel); 3837 3838 /* 3839 * Called when the last reference to the file is gone. 3840 */ 3841 static int perf_release(struct inode *inode, struct file *file) 3842 { 3843 put_event(file->private_data); 3844 return 0; 3845 } 3846 3847 /* 3848 * Remove all orphanes events from the context. 3849 */ 3850 static void orphans_remove_work(struct work_struct *work) 3851 { 3852 struct perf_event_context *ctx; 3853 struct perf_event *event, *tmp; 3854 3855 ctx = container_of(work, struct perf_event_context, 3856 orphans_remove.work); 3857 3858 mutex_lock(&ctx->mutex); 3859 list_for_each_entry_safe(event, tmp, &ctx->event_list, event_entry) { 3860 struct perf_event *parent_event = event->parent; 3861 3862 if (!is_orphaned_child(event)) 3863 continue; 3864 3865 perf_remove_from_context(event, true); 3866 3867 mutex_lock(&parent_event->child_mutex); 3868 list_del_init(&event->child_list); 3869 mutex_unlock(&parent_event->child_mutex); 3870 3871 free_event(event); 3872 put_event(parent_event); 3873 } 3874 3875 raw_spin_lock_irq(&ctx->lock); 3876 ctx->orphans_remove_sched = false; 3877 raw_spin_unlock_irq(&ctx->lock); 3878 mutex_unlock(&ctx->mutex); 3879 3880 put_ctx(ctx); 3881 } 3882 3883 u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running) 3884 { 3885 struct perf_event *child; 3886 u64 total = 0; 3887 3888 *enabled = 0; 3889 *running = 0; 3890 3891 mutex_lock(&event->child_mutex); 3892 3893 (void)perf_event_read(event, false); 3894 total += perf_event_count(event); 3895 3896 *enabled += event->total_time_enabled + 3897 atomic64_read(&event->child_total_time_enabled); 3898 *running += event->total_time_running + 3899 atomic64_read(&event->child_total_time_running); 3900 3901 list_for_each_entry(child, &event->child_list, child_list) { 3902 (void)perf_event_read(child, false); 3903 total += perf_event_count(child); 3904 *enabled += child->total_time_enabled; 3905 *running += child->total_time_running; 3906 } 3907 mutex_unlock(&event->child_mutex); 3908 3909 return total; 3910 } 3911 EXPORT_SYMBOL_GPL(perf_event_read_value); 3912 3913 static int __perf_read_group_add(struct perf_event *leader, 3914 u64 read_format, u64 *values) 3915 { 3916 struct perf_event *sub; 3917 int n = 1; /* skip @nr */ 3918 int ret; 3919 3920 ret = perf_event_read(leader, true); 3921 if (ret) 3922 return ret; 3923 3924 /* 3925 * Since we co-schedule groups, {enabled,running} times of siblings 3926 * will be identical to those of the leader, so we only publish one 3927 * set. 3928 */ 3929 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) { 3930 values[n++] += leader->total_time_enabled + 3931 atomic64_read(&leader->child_total_time_enabled); 3932 } 3933 3934 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) { 3935 values[n++] += leader->total_time_running + 3936 atomic64_read(&leader->child_total_time_running); 3937 } 3938 3939 /* 3940 * Write {count,id} tuples for every sibling. 3941 */ 3942 values[n++] += perf_event_count(leader); 3943 if (read_format & PERF_FORMAT_ID) 3944 values[n++] = primary_event_id(leader); 3945 3946 list_for_each_entry(sub, &leader->sibling_list, group_entry) { 3947 values[n++] += perf_event_count(sub); 3948 if (read_format & PERF_FORMAT_ID) 3949 values[n++] = primary_event_id(sub); 3950 } 3951 3952 return 0; 3953 } 3954 3955 static int perf_read_group(struct perf_event *event, 3956 u64 read_format, char __user *buf) 3957 { 3958 struct perf_event *leader = event->group_leader, *child; 3959 struct perf_event_context *ctx = leader->ctx; 3960 int ret; 3961 u64 *values; 3962 3963 lockdep_assert_held(&ctx->mutex); 3964 3965 values = kzalloc(event->read_size, GFP_KERNEL); 3966 if (!values) 3967 return -ENOMEM; 3968 3969 values[0] = 1 + leader->nr_siblings; 3970 3971 /* 3972 * By locking the child_mutex of the leader we effectively 3973 * lock the child list of all siblings.. XXX explain how. 3974 */ 3975 mutex_lock(&leader->child_mutex); 3976 3977 ret = __perf_read_group_add(leader, read_format, values); 3978 if (ret) 3979 goto unlock; 3980 3981 list_for_each_entry(child, &leader->child_list, child_list) { 3982 ret = __perf_read_group_add(child, read_format, values); 3983 if (ret) 3984 goto unlock; 3985 } 3986 3987 mutex_unlock(&leader->child_mutex); 3988 3989 ret = event->read_size; 3990 if (copy_to_user(buf, values, event->read_size)) 3991 ret = -EFAULT; 3992 goto out; 3993 3994 unlock: 3995 mutex_unlock(&leader->child_mutex); 3996 out: 3997 kfree(values); 3998 return ret; 3999 } 4000 4001 static int perf_read_one(struct perf_event *event, 4002 u64 read_format, char __user *buf) 4003 { 4004 u64 enabled, running; 4005 u64 values[4]; 4006 int n = 0; 4007 4008 values[n++] = perf_event_read_value(event, &enabled, &running); 4009 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) 4010 values[n++] = enabled; 4011 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) 4012 values[n++] = running; 4013 if (read_format & PERF_FORMAT_ID) 4014 values[n++] = primary_event_id(event); 4015 4016 if (copy_to_user(buf, values, n * sizeof(u64))) 4017 return -EFAULT; 4018 4019 return n * sizeof(u64); 4020 } 4021 4022 static bool is_event_hup(struct perf_event *event) 4023 { 4024 bool no_children; 4025 4026 if (event->state != PERF_EVENT_STATE_EXIT) 4027 return false; 4028 4029 mutex_lock(&event->child_mutex); 4030 no_children = list_empty(&event->child_list); 4031 mutex_unlock(&event->child_mutex); 4032 return no_children; 4033 } 4034 4035 /* 4036 * Read the performance event - simple non blocking version for now 4037 */ 4038 static ssize_t 4039 __perf_read(struct perf_event *event, char __user *buf, size_t count) 4040 { 4041 u64 read_format = event->attr.read_format; 4042 int ret; 4043 4044 /* 4045 * Return end-of-file for a read on a event that is in 4046 * error state (i.e. because it was pinned but it couldn't be 4047 * scheduled on to the CPU at some point). 4048 */ 4049 if (event->state == PERF_EVENT_STATE_ERROR) 4050 return 0; 4051 4052 if (count < event->read_size) 4053 return -ENOSPC; 4054 4055 WARN_ON_ONCE(event->ctx->parent_ctx); 4056 if (read_format & PERF_FORMAT_GROUP) 4057 ret = perf_read_group(event, read_format, buf); 4058 else 4059 ret = perf_read_one(event, read_format, buf); 4060 4061 return ret; 4062 } 4063 4064 static ssize_t 4065 perf_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) 4066 { 4067 struct perf_event *event = file->private_data; 4068 struct perf_event_context *ctx; 4069 int ret; 4070 4071 ctx = perf_event_ctx_lock(event); 4072 ret = __perf_read(event, buf, count); 4073 perf_event_ctx_unlock(event, ctx); 4074 4075 return ret; 4076 } 4077 4078 static unsigned int perf_poll(struct file *file, poll_table *wait) 4079 { 4080 struct perf_event *event = file->private_data; 4081 struct ring_buffer *rb; 4082 unsigned int events = POLLHUP; 4083 4084 poll_wait(file, &event->waitq, wait); 4085 4086 if (is_event_hup(event)) 4087 return events; 4088 4089 /* 4090 * Pin the event->rb by taking event->mmap_mutex; otherwise 4091 * perf_event_set_output() can swizzle our rb and make us miss wakeups. 4092 */ 4093 mutex_lock(&event->mmap_mutex); 4094 rb = event->rb; 4095 if (rb) 4096 events = atomic_xchg(&rb->poll, 0); 4097 mutex_unlock(&event->mmap_mutex); 4098 return events; 4099 } 4100 4101 static void _perf_event_reset(struct perf_event *event) 4102 { 4103 (void)perf_event_read(event, false); 4104 local64_set(&event->count, 0); 4105 perf_event_update_userpage(event); 4106 } 4107 4108 /* 4109 * Holding the top-level event's child_mutex means that any 4110 * descendant process that has inherited this event will block 4111 * in sync_child_event if it goes to exit, thus satisfying the 4112 * task existence requirements of perf_event_enable/disable. 4113 */ 4114 static void perf_event_for_each_child(struct perf_event *event, 4115 void (*func)(struct perf_event *)) 4116 { 4117 struct perf_event *child; 4118 4119 WARN_ON_ONCE(event->ctx->parent_ctx); 4120 4121 mutex_lock(&event->child_mutex); 4122 func(event); 4123 list_for_each_entry(child, &event->child_list, child_list) 4124 func(child); 4125 mutex_unlock(&event->child_mutex); 4126 } 4127 4128 static void perf_event_for_each(struct perf_event *event, 4129 void (*func)(struct perf_event *)) 4130 { 4131 struct perf_event_context *ctx = event->ctx; 4132 struct perf_event *sibling; 4133 4134 lockdep_assert_held(&ctx->mutex); 4135 4136 event = event->group_leader; 4137 4138 perf_event_for_each_child(event, func); 4139 list_for_each_entry(sibling, &event->sibling_list, group_entry) 4140 perf_event_for_each_child(sibling, func); 4141 } 4142 4143 struct period_event { 4144 struct perf_event *event; 4145 u64 value; 4146 }; 4147 4148 static int __perf_event_period(void *info) 4149 { 4150 struct period_event *pe = info; 4151 struct perf_event *event = pe->event; 4152 struct perf_event_context *ctx = event->ctx; 4153 u64 value = pe->value; 4154 bool active; 4155 4156 raw_spin_lock(&ctx->lock); 4157 if (event->attr.freq) { 4158 event->attr.sample_freq = value; 4159 } else { 4160 event->attr.sample_period = value; 4161 event->hw.sample_period = value; 4162 } 4163 4164 active = (event->state == PERF_EVENT_STATE_ACTIVE); 4165 if (active) { 4166 perf_pmu_disable(ctx->pmu); 4167 event->pmu->stop(event, PERF_EF_UPDATE); 4168 } 4169 4170 local64_set(&event->hw.period_left, 0); 4171 4172 if (active) { 4173 event->pmu->start(event, PERF_EF_RELOAD); 4174 perf_pmu_enable(ctx->pmu); 4175 } 4176 raw_spin_unlock(&ctx->lock); 4177 4178 return 0; 4179 } 4180 4181 static int perf_event_period(struct perf_event *event, u64 __user *arg) 4182 { 4183 struct period_event pe = { .event = event, }; 4184 struct perf_event_context *ctx = event->ctx; 4185 struct task_struct *task; 4186 u64 value; 4187 4188 if (!is_sampling_event(event)) 4189 return -EINVAL; 4190 4191 if (copy_from_user(&value, arg, sizeof(value))) 4192 return -EFAULT; 4193 4194 if (!value) 4195 return -EINVAL; 4196 4197 if (event->attr.freq && value > sysctl_perf_event_sample_rate) 4198 return -EINVAL; 4199 4200 task = ctx->task; 4201 pe.value = value; 4202 4203 if (!task) { 4204 cpu_function_call(event->cpu, __perf_event_period, &pe); 4205 return 0; 4206 } 4207 4208 retry: 4209 if (!task_function_call(task, __perf_event_period, &pe)) 4210 return 0; 4211 4212 raw_spin_lock_irq(&ctx->lock); 4213 if (ctx->is_active) { 4214 raw_spin_unlock_irq(&ctx->lock); 4215 task = ctx->task; 4216 goto retry; 4217 } 4218 4219 __perf_event_period(&pe); 4220 raw_spin_unlock_irq(&ctx->lock); 4221 4222 return 0; 4223 } 4224 4225 static const struct file_operations perf_fops; 4226 4227 static inline int perf_fget_light(int fd, struct fd *p) 4228 { 4229 struct fd f = fdget(fd); 4230 if (!f.file) 4231 return -EBADF; 4232 4233 if (f.file->f_op != &perf_fops) { 4234 fdput(f); 4235 return -EBADF; 4236 } 4237 *p = f; 4238 return 0; 4239 } 4240 4241 static int perf_event_set_output(struct perf_event *event, 4242 struct perf_event *output_event); 4243 static int perf_event_set_filter(struct perf_event *event, void __user *arg); 4244 static int perf_event_set_bpf_prog(struct perf_event *event, u32 prog_fd); 4245 4246 static long _perf_ioctl(struct perf_event *event, unsigned int cmd, unsigned long arg) 4247 { 4248 void (*func)(struct perf_event *); 4249 u32 flags = arg; 4250 4251 switch (cmd) { 4252 case PERF_EVENT_IOC_ENABLE: 4253 func = _perf_event_enable; 4254 break; 4255 case PERF_EVENT_IOC_DISABLE: 4256 func = _perf_event_disable; 4257 break; 4258 case PERF_EVENT_IOC_RESET: 4259 func = _perf_event_reset; 4260 break; 4261 4262 case PERF_EVENT_IOC_REFRESH: 4263 return _perf_event_refresh(event, arg); 4264 4265 case PERF_EVENT_IOC_PERIOD: 4266 return perf_event_period(event, (u64 __user *)arg); 4267 4268 case PERF_EVENT_IOC_ID: 4269 { 4270 u64 id = primary_event_id(event); 4271 4272 if (copy_to_user((void __user *)arg, &id, sizeof(id))) 4273 return -EFAULT; 4274 return 0; 4275 } 4276 4277 case PERF_EVENT_IOC_SET_OUTPUT: 4278 { 4279 int ret; 4280 if (arg != -1) { 4281 struct perf_event *output_event; 4282 struct fd output; 4283 ret = perf_fget_light(arg, &output); 4284 if (ret) 4285 return ret; 4286 output_event = output.file->private_data; 4287 ret = perf_event_set_output(event, output_event); 4288 fdput(output); 4289 } else { 4290 ret = perf_event_set_output(event, NULL); 4291 } 4292 return ret; 4293 } 4294 4295 case PERF_EVENT_IOC_SET_FILTER: 4296 return perf_event_set_filter(event, (void __user *)arg); 4297 4298 case PERF_EVENT_IOC_SET_BPF: 4299 return perf_event_set_bpf_prog(event, arg); 4300 4301 default: 4302 return -ENOTTY; 4303 } 4304 4305 if (flags & PERF_IOC_FLAG_GROUP) 4306 perf_event_for_each(event, func); 4307 else 4308 perf_event_for_each_child(event, func); 4309 4310 return 0; 4311 } 4312 4313 static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg) 4314 { 4315 struct perf_event *event = file->private_data; 4316 struct perf_event_context *ctx; 4317 long ret; 4318 4319 ctx = perf_event_ctx_lock(event); 4320 ret = _perf_ioctl(event, cmd, arg); 4321 perf_event_ctx_unlock(event, ctx); 4322 4323 return ret; 4324 } 4325 4326 #ifdef CONFIG_COMPAT 4327 static long perf_compat_ioctl(struct file *file, unsigned int cmd, 4328 unsigned long arg) 4329 { 4330 switch (_IOC_NR(cmd)) { 4331 case _IOC_NR(PERF_EVENT_IOC_SET_FILTER): 4332 case _IOC_NR(PERF_EVENT_IOC_ID): 4333 /* Fix up pointer size (usually 4 -> 8 in 32-on-64-bit case */ 4334 if (_IOC_SIZE(cmd) == sizeof(compat_uptr_t)) { 4335 cmd &= ~IOCSIZE_MASK; 4336 cmd |= sizeof(void *) << IOCSIZE_SHIFT; 4337 } 4338 break; 4339 } 4340 return perf_ioctl(file, cmd, arg); 4341 } 4342 #else 4343 # define perf_compat_ioctl NULL 4344 #endif 4345 4346 int perf_event_task_enable(void) 4347 { 4348 struct perf_event_context *ctx; 4349 struct perf_event *event; 4350 4351 mutex_lock(¤t->perf_event_mutex); 4352 list_for_each_entry(event, ¤t->perf_event_list, owner_entry) { 4353 ctx = perf_event_ctx_lock(event); 4354 perf_event_for_each_child(event, _perf_event_enable); 4355 perf_event_ctx_unlock(event, ctx); 4356 } 4357 mutex_unlock(¤t->perf_event_mutex); 4358 4359 return 0; 4360 } 4361 4362 int perf_event_task_disable(void) 4363 { 4364 struct perf_event_context *ctx; 4365 struct perf_event *event; 4366 4367 mutex_lock(¤t->perf_event_mutex); 4368 list_for_each_entry(event, ¤t->perf_event_list, owner_entry) { 4369 ctx = perf_event_ctx_lock(event); 4370 perf_event_for_each_child(event, _perf_event_disable); 4371 perf_event_ctx_unlock(event, ctx); 4372 } 4373 mutex_unlock(¤t->perf_event_mutex); 4374 4375 return 0; 4376 } 4377 4378 static int perf_event_index(struct perf_event *event) 4379 { 4380 if (event->hw.state & PERF_HES_STOPPED) 4381 return 0; 4382 4383 if (event->state != PERF_EVENT_STATE_ACTIVE) 4384 return 0; 4385 4386 return event->pmu->event_idx(event); 4387 } 4388 4389 static void calc_timer_values(struct perf_event *event, 4390 u64 *now, 4391 u64 *enabled, 4392 u64 *running) 4393 { 4394 u64 ctx_time; 4395 4396 *now = perf_clock(); 4397 ctx_time = event->shadow_ctx_time + *now; 4398 *enabled = ctx_time - event->tstamp_enabled; 4399 *running = ctx_time - event->tstamp_running; 4400 } 4401 4402 static void perf_event_init_userpage(struct perf_event *event) 4403 { 4404 struct perf_event_mmap_page *userpg; 4405 struct ring_buffer *rb; 4406 4407 rcu_read_lock(); 4408 rb = rcu_dereference(event->rb); 4409 if (!rb) 4410 goto unlock; 4411 4412 userpg = rb->user_page; 4413 4414 /* Allow new userspace to detect that bit 0 is deprecated */ 4415 userpg->cap_bit0_is_deprecated = 1; 4416 userpg->size = offsetof(struct perf_event_mmap_page, __reserved); 4417 userpg->data_offset = PAGE_SIZE; 4418 userpg->data_size = perf_data_size(rb); 4419 4420 unlock: 4421 rcu_read_unlock(); 4422 } 4423 4424 void __weak arch_perf_update_userpage( 4425 struct perf_event *event, struct perf_event_mmap_page *userpg, u64 now) 4426 { 4427 } 4428 4429 /* 4430 * Callers need to ensure there can be no nesting of this function, otherwise 4431 * the seqlock logic goes bad. We can not serialize this because the arch 4432 * code calls this from NMI context. 4433 */ 4434 void perf_event_update_userpage(struct perf_event *event) 4435 { 4436 struct perf_event_mmap_page *userpg; 4437 struct ring_buffer *rb; 4438 u64 enabled, running, now; 4439 4440 rcu_read_lock(); 4441 rb = rcu_dereference(event->rb); 4442 if (!rb) 4443 goto unlock; 4444 4445 /* 4446 * compute total_time_enabled, total_time_running 4447 * based on snapshot values taken when the event 4448 * was last scheduled in. 4449 * 4450 * we cannot simply called update_context_time() 4451 * because of locking issue as we can be called in 4452 * NMI context 4453 */ 4454 calc_timer_values(event, &now, &enabled, &running); 4455 4456 userpg = rb->user_page; 4457 /* 4458 * Disable preemption so as to not let the corresponding user-space 4459 * spin too long if we get preempted. 4460 */ 4461 preempt_disable(); 4462 ++userpg->lock; 4463 barrier(); 4464 userpg->index = perf_event_index(event); 4465 userpg->offset = perf_event_count(event); 4466 if (userpg->index) 4467 userpg->offset -= local64_read(&event->hw.prev_count); 4468 4469 userpg->time_enabled = enabled + 4470 atomic64_read(&event->child_total_time_enabled); 4471 4472 userpg->time_running = running + 4473 atomic64_read(&event->child_total_time_running); 4474 4475 arch_perf_update_userpage(event, userpg, now); 4476 4477 barrier(); 4478 ++userpg->lock; 4479 preempt_enable(); 4480 unlock: 4481 rcu_read_unlock(); 4482 } 4483 4484 static int perf_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf) 4485 { 4486 struct perf_event *event = vma->vm_file->private_data; 4487 struct ring_buffer *rb; 4488 int ret = VM_FAULT_SIGBUS; 4489 4490 if (vmf->flags & FAULT_FLAG_MKWRITE) { 4491 if (vmf->pgoff == 0) 4492 ret = 0; 4493 return ret; 4494 } 4495 4496 rcu_read_lock(); 4497 rb = rcu_dereference(event->rb); 4498 if (!rb) 4499 goto unlock; 4500 4501 if (vmf->pgoff && (vmf->flags & FAULT_FLAG_WRITE)) 4502 goto unlock; 4503 4504 vmf->page = perf_mmap_to_page(rb, vmf->pgoff); 4505 if (!vmf->page) 4506 goto unlock; 4507 4508 get_page(vmf->page); 4509 vmf->page->mapping = vma->vm_file->f_mapping; 4510 vmf->page->index = vmf->pgoff; 4511 4512 ret = 0; 4513 unlock: 4514 rcu_read_unlock(); 4515 4516 return ret; 4517 } 4518 4519 static void ring_buffer_attach(struct perf_event *event, 4520 struct ring_buffer *rb) 4521 { 4522 struct ring_buffer *old_rb = NULL; 4523 unsigned long flags; 4524 4525 if (event->rb) { 4526 /* 4527 * Should be impossible, we set this when removing 4528 * event->rb_entry and wait/clear when adding event->rb_entry. 4529 */ 4530 WARN_ON_ONCE(event->rcu_pending); 4531 4532 old_rb = event->rb; 4533 spin_lock_irqsave(&old_rb->event_lock, flags); 4534 list_del_rcu(&event->rb_entry); 4535 spin_unlock_irqrestore(&old_rb->event_lock, flags); 4536 4537 event->rcu_batches = get_state_synchronize_rcu(); 4538 event->rcu_pending = 1; 4539 } 4540 4541 if (rb) { 4542 if (event->rcu_pending) { 4543 cond_synchronize_rcu(event->rcu_batches); 4544 event->rcu_pending = 0; 4545 } 4546 4547 spin_lock_irqsave(&rb->event_lock, flags); 4548 list_add_rcu(&event->rb_entry, &rb->event_list); 4549 spin_unlock_irqrestore(&rb->event_lock, flags); 4550 } 4551 4552 rcu_assign_pointer(event->rb, rb); 4553 4554 if (old_rb) { 4555 ring_buffer_put(old_rb); 4556 /* 4557 * Since we detached before setting the new rb, so that we 4558 * could attach the new rb, we could have missed a wakeup. 4559 * Provide it now. 4560 */ 4561 wake_up_all(&event->waitq); 4562 } 4563 } 4564 4565 static void ring_buffer_wakeup(struct perf_event *event) 4566 { 4567 struct ring_buffer *rb; 4568 4569 rcu_read_lock(); 4570 rb = rcu_dereference(event->rb); 4571 if (rb) { 4572 list_for_each_entry_rcu(event, &rb->event_list, rb_entry) 4573 wake_up_all(&event->waitq); 4574 } 4575 rcu_read_unlock(); 4576 } 4577 4578 struct ring_buffer *ring_buffer_get(struct perf_event *event) 4579 { 4580 struct ring_buffer *rb; 4581 4582 rcu_read_lock(); 4583 rb = rcu_dereference(event->rb); 4584 if (rb) { 4585 if (!atomic_inc_not_zero(&rb->refcount)) 4586 rb = NULL; 4587 } 4588 rcu_read_unlock(); 4589 4590 return rb; 4591 } 4592 4593 void ring_buffer_put(struct ring_buffer *rb) 4594 { 4595 if (!atomic_dec_and_test(&rb->refcount)) 4596 return; 4597 4598 WARN_ON_ONCE(!list_empty(&rb->event_list)); 4599 4600 call_rcu(&rb->rcu_head, rb_free_rcu); 4601 } 4602 4603 static void perf_mmap_open(struct vm_area_struct *vma) 4604 { 4605 struct perf_event *event = vma->vm_file->private_data; 4606 4607 atomic_inc(&event->mmap_count); 4608 atomic_inc(&event->rb->mmap_count); 4609 4610 if (vma->vm_pgoff) 4611 atomic_inc(&event->rb->aux_mmap_count); 4612 4613 if (event->pmu->event_mapped) 4614 event->pmu->event_mapped(event); 4615 } 4616 4617 /* 4618 * A buffer can be mmap()ed multiple times; either directly through the same 4619 * event, or through other events by use of perf_event_set_output(). 4620 * 4621 * In order to undo the VM accounting done by perf_mmap() we need to destroy 4622 * the buffer here, where we still have a VM context. This means we need 4623 * to detach all events redirecting to us. 4624 */ 4625 static void perf_mmap_close(struct vm_area_struct *vma) 4626 { 4627 struct perf_event *event = vma->vm_file->private_data; 4628 4629 struct ring_buffer *rb = ring_buffer_get(event); 4630 struct user_struct *mmap_user = rb->mmap_user; 4631 int mmap_locked = rb->mmap_locked; 4632 unsigned long size = perf_data_size(rb); 4633 4634 if (event->pmu->event_unmapped) 4635 event->pmu->event_unmapped(event); 4636 4637 /* 4638 * rb->aux_mmap_count will always drop before rb->mmap_count and 4639 * event->mmap_count, so it is ok to use event->mmap_mutex to 4640 * serialize with perf_mmap here. 4641 */ 4642 if (rb_has_aux(rb) && vma->vm_pgoff == rb->aux_pgoff && 4643 atomic_dec_and_mutex_lock(&rb->aux_mmap_count, &event->mmap_mutex)) { 4644 atomic_long_sub(rb->aux_nr_pages, &mmap_user->locked_vm); 4645 vma->vm_mm->pinned_vm -= rb->aux_mmap_locked; 4646 4647 rb_free_aux(rb); 4648 mutex_unlock(&event->mmap_mutex); 4649 } 4650 4651 atomic_dec(&rb->mmap_count); 4652 4653 if (!atomic_dec_and_mutex_lock(&event->mmap_count, &event->mmap_mutex)) 4654 goto out_put; 4655 4656 ring_buffer_attach(event, NULL); 4657 mutex_unlock(&event->mmap_mutex); 4658 4659 /* If there's still other mmap()s of this buffer, we're done. */ 4660 if (atomic_read(&rb->mmap_count)) 4661 goto out_put; 4662 4663 /* 4664 * No other mmap()s, detach from all other events that might redirect 4665 * into the now unreachable buffer. Somewhat complicated by the 4666 * fact that rb::event_lock otherwise nests inside mmap_mutex. 4667 */ 4668 again: 4669 rcu_read_lock(); 4670 list_for_each_entry_rcu(event, &rb->event_list, rb_entry) { 4671 if (!atomic_long_inc_not_zero(&event->refcount)) { 4672 /* 4673 * This event is en-route to free_event() which will 4674 * detach it and remove it from the list. 4675 */ 4676 continue; 4677 } 4678 rcu_read_unlock(); 4679 4680 mutex_lock(&event->mmap_mutex); 4681 /* 4682 * Check we didn't race with perf_event_set_output() which can 4683 * swizzle the rb from under us while we were waiting to 4684 * acquire mmap_mutex. 4685 * 4686 * If we find a different rb; ignore this event, a next 4687 * iteration will no longer find it on the list. We have to 4688 * still restart the iteration to make sure we're not now 4689 * iterating the wrong list. 4690 */ 4691 if (event->rb == rb) 4692 ring_buffer_attach(event, NULL); 4693 4694 mutex_unlock(&event->mmap_mutex); 4695 put_event(event); 4696 4697 /* 4698 * Restart the iteration; either we're on the wrong list or 4699 * destroyed its integrity by doing a deletion. 4700 */ 4701 goto again; 4702 } 4703 rcu_read_unlock(); 4704 4705 /* 4706 * It could be there's still a few 0-ref events on the list; they'll 4707 * get cleaned up by free_event() -- they'll also still have their 4708 * ref on the rb and will free it whenever they are done with it. 4709 * 4710 * Aside from that, this buffer is 'fully' detached and unmapped, 4711 * undo the VM accounting. 4712 */ 4713 4714 atomic_long_sub((size >> PAGE_SHIFT) + 1, &mmap_user->locked_vm); 4715 vma->vm_mm->pinned_vm -= mmap_locked; 4716 free_uid(mmap_user); 4717 4718 out_put: 4719 ring_buffer_put(rb); /* could be last */ 4720 } 4721 4722 static const struct vm_operations_struct perf_mmap_vmops = { 4723 .open = perf_mmap_open, 4724 .close = perf_mmap_close, /* non mergable */ 4725 .fault = perf_mmap_fault, 4726 .page_mkwrite = perf_mmap_fault, 4727 }; 4728 4729 static int perf_mmap(struct file *file, struct vm_area_struct *vma) 4730 { 4731 struct perf_event *event = file->private_data; 4732 unsigned long user_locked, user_lock_limit; 4733 struct user_struct *user = current_user(); 4734 unsigned long locked, lock_limit; 4735 struct ring_buffer *rb = NULL; 4736 unsigned long vma_size; 4737 unsigned long nr_pages; 4738 long user_extra = 0, extra = 0; 4739 int ret = 0, flags = 0; 4740 4741 /* 4742 * Don't allow mmap() of inherited per-task counters. This would 4743 * create a performance issue due to all children writing to the 4744 * same rb. 4745 */ 4746 if (event->cpu == -1 && event->attr.inherit) 4747 return -EINVAL; 4748 4749 if (!(vma->vm_flags & VM_SHARED)) 4750 return -EINVAL; 4751 4752 vma_size = vma->vm_end - vma->vm_start; 4753 4754 if (vma->vm_pgoff == 0) { 4755 nr_pages = (vma_size / PAGE_SIZE) - 1; 4756 } else { 4757 /* 4758 * AUX area mapping: if rb->aux_nr_pages != 0, it's already 4759 * mapped, all subsequent mappings should have the same size 4760 * and offset. Must be above the normal perf buffer. 4761 */ 4762 u64 aux_offset, aux_size; 4763 4764 if (!event->rb) 4765 return -EINVAL; 4766 4767 nr_pages = vma_size / PAGE_SIZE; 4768 4769 mutex_lock(&event->mmap_mutex); 4770 ret = -EINVAL; 4771 4772 rb = event->rb; 4773 if (!rb) 4774 goto aux_unlock; 4775 4776 aux_offset = ACCESS_ONCE(rb->user_page->aux_offset); 4777 aux_size = ACCESS_ONCE(rb->user_page->aux_size); 4778 4779 if (aux_offset < perf_data_size(rb) + PAGE_SIZE) 4780 goto aux_unlock; 4781 4782 if (aux_offset != vma->vm_pgoff << PAGE_SHIFT) 4783 goto aux_unlock; 4784 4785 /* already mapped with a different offset */ 4786 if (rb_has_aux(rb) && rb->aux_pgoff != vma->vm_pgoff) 4787 goto aux_unlock; 4788 4789 if (aux_size != vma_size || aux_size != nr_pages * PAGE_SIZE) 4790 goto aux_unlock; 4791 4792 /* already mapped with a different size */ 4793 if (rb_has_aux(rb) && rb->aux_nr_pages != nr_pages) 4794 goto aux_unlock; 4795 4796 if (!is_power_of_2(nr_pages)) 4797 goto aux_unlock; 4798 4799 if (!atomic_inc_not_zero(&rb->mmap_count)) 4800 goto aux_unlock; 4801 4802 if (rb_has_aux(rb)) { 4803 atomic_inc(&rb->aux_mmap_count); 4804 ret = 0; 4805 goto unlock; 4806 } 4807 4808 atomic_set(&rb->aux_mmap_count, 1); 4809 user_extra = nr_pages; 4810 4811 goto accounting; 4812 } 4813 4814 /* 4815 * If we have rb pages ensure they're a power-of-two number, so we 4816 * can do bitmasks instead of modulo. 4817 */ 4818 if (nr_pages != 0 && !is_power_of_2(nr_pages)) 4819 return -EINVAL; 4820 4821 if (vma_size != PAGE_SIZE * (1 + nr_pages)) 4822 return -EINVAL; 4823 4824 WARN_ON_ONCE(event->ctx->parent_ctx); 4825 again: 4826 mutex_lock(&event->mmap_mutex); 4827 if (event->rb) { 4828 if (event->rb->nr_pages != nr_pages) { 4829 ret = -EINVAL; 4830 goto unlock; 4831 } 4832 4833 if (!atomic_inc_not_zero(&event->rb->mmap_count)) { 4834 /* 4835 * Raced against perf_mmap_close() through 4836 * perf_event_set_output(). Try again, hope for better 4837 * luck. 4838 */ 4839 mutex_unlock(&event->mmap_mutex); 4840 goto again; 4841 } 4842 4843 goto unlock; 4844 } 4845 4846 user_extra = nr_pages + 1; 4847 4848 accounting: 4849 user_lock_limit = sysctl_perf_event_mlock >> (PAGE_SHIFT - 10); 4850 4851 /* 4852 * Increase the limit linearly with more CPUs: 4853 */ 4854 user_lock_limit *= num_online_cpus(); 4855 4856 user_locked = atomic_long_read(&user->locked_vm) + user_extra; 4857 4858 if (user_locked > user_lock_limit) 4859 extra = user_locked - user_lock_limit; 4860 4861 lock_limit = rlimit(RLIMIT_MEMLOCK); 4862 lock_limit >>= PAGE_SHIFT; 4863 locked = vma->vm_mm->pinned_vm + extra; 4864 4865 if ((locked > lock_limit) && perf_paranoid_tracepoint_raw() && 4866 !capable(CAP_IPC_LOCK)) { 4867 ret = -EPERM; 4868 goto unlock; 4869 } 4870 4871 WARN_ON(!rb && event->rb); 4872 4873 if (vma->vm_flags & VM_WRITE) 4874 flags |= RING_BUFFER_WRITABLE; 4875 4876 if (!rb) { 4877 rb = rb_alloc(nr_pages, 4878 event->attr.watermark ? event->attr.wakeup_watermark : 0, 4879 event->cpu, flags); 4880 4881 if (!rb) { 4882 ret = -ENOMEM; 4883 goto unlock; 4884 } 4885 4886 atomic_set(&rb->mmap_count, 1); 4887 rb->mmap_user = get_current_user(); 4888 rb->mmap_locked = extra; 4889 4890 ring_buffer_attach(event, rb); 4891 4892 perf_event_init_userpage(event); 4893 perf_event_update_userpage(event); 4894 } else { 4895 ret = rb_alloc_aux(rb, event, vma->vm_pgoff, nr_pages, 4896 event->attr.aux_watermark, flags); 4897 if (!ret) 4898 rb->aux_mmap_locked = extra; 4899 } 4900 4901 unlock: 4902 if (!ret) { 4903 atomic_long_add(user_extra, &user->locked_vm); 4904 vma->vm_mm->pinned_vm += extra; 4905 4906 atomic_inc(&event->mmap_count); 4907 } else if (rb) { 4908 atomic_dec(&rb->mmap_count); 4909 } 4910 aux_unlock: 4911 mutex_unlock(&event->mmap_mutex); 4912 4913 /* 4914 * Since pinned accounting is per vm we cannot allow fork() to copy our 4915 * vma. 4916 */ 4917 vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND | VM_DONTDUMP; 4918 vma->vm_ops = &perf_mmap_vmops; 4919 4920 if (event->pmu->event_mapped) 4921 event->pmu->event_mapped(event); 4922 4923 return ret; 4924 } 4925 4926 static int perf_fasync(int fd, struct file *filp, int on) 4927 { 4928 struct inode *inode = file_inode(filp); 4929 struct perf_event *event = filp->private_data; 4930 int retval; 4931 4932 mutex_lock(&inode->i_mutex); 4933 retval = fasync_helper(fd, filp, on, &event->fasync); 4934 mutex_unlock(&inode->i_mutex); 4935 4936 if (retval < 0) 4937 return retval; 4938 4939 return 0; 4940 } 4941 4942 static const struct file_operations perf_fops = { 4943 .llseek = no_llseek, 4944 .release = perf_release, 4945 .read = perf_read, 4946 .poll = perf_poll, 4947 .unlocked_ioctl = perf_ioctl, 4948 .compat_ioctl = perf_compat_ioctl, 4949 .mmap = perf_mmap, 4950 .fasync = perf_fasync, 4951 }; 4952 4953 /* 4954 * Perf event wakeup 4955 * 4956 * If there's data, ensure we set the poll() state and publish everything 4957 * to user-space before waking everybody up. 4958 */ 4959 4960 static inline struct fasync_struct **perf_event_fasync(struct perf_event *event) 4961 { 4962 /* only the parent has fasync state */ 4963 if (event->parent) 4964 event = event->parent; 4965 return &event->fasync; 4966 } 4967 4968 void perf_event_wakeup(struct perf_event *event) 4969 { 4970 ring_buffer_wakeup(event); 4971 4972 if (event->pending_kill) { 4973 kill_fasync(perf_event_fasync(event), SIGIO, event->pending_kill); 4974 event->pending_kill = 0; 4975 } 4976 } 4977 4978 static void perf_pending_event(struct irq_work *entry) 4979 { 4980 struct perf_event *event = container_of(entry, 4981 struct perf_event, pending); 4982 int rctx; 4983 4984 rctx = perf_swevent_get_recursion_context(); 4985 /* 4986 * If we 'fail' here, that's OK, it means recursion is already disabled 4987 * and we won't recurse 'further'. 4988 */ 4989 4990 if (event->pending_disable) { 4991 event->pending_disable = 0; 4992 __perf_event_disable(event); 4993 } 4994 4995 if (event->pending_wakeup) { 4996 event->pending_wakeup = 0; 4997 perf_event_wakeup(event); 4998 } 4999 5000 if (rctx >= 0) 5001 perf_swevent_put_recursion_context(rctx); 5002 } 5003 5004 /* 5005 * We assume there is only KVM supporting the callbacks. 5006 * Later on, we might change it to a list if there is 5007 * another virtualization implementation supporting the callbacks. 5008 */ 5009 struct perf_guest_info_callbacks *perf_guest_cbs; 5010 5011 int perf_register_guest_info_callbacks(struct perf_guest_info_callbacks *cbs) 5012 { 5013 perf_guest_cbs = cbs; 5014 return 0; 5015 } 5016 EXPORT_SYMBOL_GPL(perf_register_guest_info_callbacks); 5017 5018 int perf_unregister_guest_info_callbacks(struct perf_guest_info_callbacks *cbs) 5019 { 5020 perf_guest_cbs = NULL; 5021 return 0; 5022 } 5023 EXPORT_SYMBOL_GPL(perf_unregister_guest_info_callbacks); 5024 5025 static void 5026 perf_output_sample_regs(struct perf_output_handle *handle, 5027 struct pt_regs *regs, u64 mask) 5028 { 5029 int bit; 5030 5031 for_each_set_bit(bit, (const unsigned long *) &mask, 5032 sizeof(mask) * BITS_PER_BYTE) { 5033 u64 val; 5034 5035 val = perf_reg_value(regs, bit); 5036 perf_output_put(handle, val); 5037 } 5038 } 5039 5040 static void perf_sample_regs_user(struct perf_regs *regs_user, 5041 struct pt_regs *regs, 5042 struct pt_regs *regs_user_copy) 5043 { 5044 if (user_mode(regs)) { 5045 regs_user->abi = perf_reg_abi(current); 5046 regs_user->regs = regs; 5047 } else if (current->mm) { 5048 perf_get_regs_user(regs_user, regs, regs_user_copy); 5049 } else { 5050 regs_user->abi = PERF_SAMPLE_REGS_ABI_NONE; 5051 regs_user->regs = NULL; 5052 } 5053 } 5054 5055 static void perf_sample_regs_intr(struct perf_regs *regs_intr, 5056 struct pt_regs *regs) 5057 { 5058 regs_intr->regs = regs; 5059 regs_intr->abi = perf_reg_abi(current); 5060 } 5061 5062 5063 /* 5064 * Get remaining task size from user stack pointer. 5065 * 5066 * It'd be better to take stack vma map and limit this more 5067 * precisly, but there's no way to get it safely under interrupt, 5068 * so using TASK_SIZE as limit. 5069 */ 5070 static u64 perf_ustack_task_size(struct pt_regs *regs) 5071 { 5072 unsigned long addr = perf_user_stack_pointer(regs); 5073 5074 if (!addr || addr >= TASK_SIZE) 5075 return 0; 5076 5077 return TASK_SIZE - addr; 5078 } 5079 5080 static u16 5081 perf_sample_ustack_size(u16 stack_size, u16 header_size, 5082 struct pt_regs *regs) 5083 { 5084 u64 task_size; 5085 5086 /* No regs, no stack pointer, no dump. */ 5087 if (!regs) 5088 return 0; 5089 5090 /* 5091 * Check if we fit in with the requested stack size into the: 5092 * - TASK_SIZE 5093 * If we don't, we limit the size to the TASK_SIZE. 5094 * 5095 * - remaining sample size 5096 * If we don't, we customize the stack size to 5097 * fit in to the remaining sample size. 5098 */ 5099 5100 task_size = min((u64) USHRT_MAX, perf_ustack_task_size(regs)); 5101 stack_size = min(stack_size, (u16) task_size); 5102 5103 /* Current header size plus static size and dynamic size. */ 5104 header_size += 2 * sizeof(u64); 5105 5106 /* Do we fit in with the current stack dump size? */ 5107 if ((u16) (header_size + stack_size) < header_size) { 5108 /* 5109 * If we overflow the maximum size for the sample, 5110 * we customize the stack dump size to fit in. 5111 */ 5112 stack_size = USHRT_MAX - header_size - sizeof(u64); 5113 stack_size = round_up(stack_size, sizeof(u64)); 5114 } 5115 5116 return stack_size; 5117 } 5118 5119 static void 5120 perf_output_sample_ustack(struct perf_output_handle *handle, u64 dump_size, 5121 struct pt_regs *regs) 5122 { 5123 /* Case of a kernel thread, nothing to dump */ 5124 if (!regs) { 5125 u64 size = 0; 5126 perf_output_put(handle, size); 5127 } else { 5128 unsigned long sp; 5129 unsigned int rem; 5130 u64 dyn_size; 5131 5132 /* 5133 * We dump: 5134 * static size 5135 * - the size requested by user or the best one we can fit 5136 * in to the sample max size 5137 * data 5138 * - user stack dump data 5139 * dynamic size 5140 * - the actual dumped size 5141 */ 5142 5143 /* Static size. */ 5144 perf_output_put(handle, dump_size); 5145 5146 /* Data. */ 5147 sp = perf_user_stack_pointer(regs); 5148 rem = __output_copy_user(handle, (void *) sp, dump_size); 5149 dyn_size = dump_size - rem; 5150 5151 perf_output_skip(handle, rem); 5152 5153 /* Dynamic size. */ 5154 perf_output_put(handle, dyn_size); 5155 } 5156 } 5157 5158 static void __perf_event_header__init_id(struct perf_event_header *header, 5159 struct perf_sample_data *data, 5160 struct perf_event *event) 5161 { 5162 u64 sample_type = event->attr.sample_type; 5163 5164 data->type = sample_type; 5165 header->size += event->id_header_size; 5166 5167 if (sample_type & PERF_SAMPLE_TID) { 5168 /* namespace issues */ 5169 data->tid_entry.pid = perf_event_pid(event, current); 5170 data->tid_entry.tid = perf_event_tid(event, current); 5171 } 5172 5173 if (sample_type & PERF_SAMPLE_TIME) 5174 data->time = perf_event_clock(event); 5175 5176 if (sample_type & (PERF_SAMPLE_ID | PERF_SAMPLE_IDENTIFIER)) 5177 data->id = primary_event_id(event); 5178 5179 if (sample_type & PERF_SAMPLE_STREAM_ID) 5180 data->stream_id = event->id; 5181 5182 if (sample_type & PERF_SAMPLE_CPU) { 5183 data->cpu_entry.cpu = raw_smp_processor_id(); 5184 data->cpu_entry.reserved = 0; 5185 } 5186 } 5187 5188 void perf_event_header__init_id(struct perf_event_header *header, 5189 struct perf_sample_data *data, 5190 struct perf_event *event) 5191 { 5192 if (event->attr.sample_id_all) 5193 __perf_event_header__init_id(header, data, event); 5194 } 5195 5196 static void __perf_event__output_id_sample(struct perf_output_handle *handle, 5197 struct perf_sample_data *data) 5198 { 5199 u64 sample_type = data->type; 5200 5201 if (sample_type & PERF_SAMPLE_TID) 5202 perf_output_put(handle, data->tid_entry); 5203 5204 if (sample_type & PERF_SAMPLE_TIME) 5205 perf_output_put(handle, data->time); 5206 5207 if (sample_type & PERF_SAMPLE_ID) 5208 perf_output_put(handle, data->id); 5209 5210 if (sample_type & PERF_SAMPLE_STREAM_ID) 5211 perf_output_put(handle, data->stream_id); 5212 5213 if (sample_type & PERF_SAMPLE_CPU) 5214 perf_output_put(handle, data->cpu_entry); 5215 5216 if (sample_type & PERF_SAMPLE_IDENTIFIER) 5217 perf_output_put(handle, data->id); 5218 } 5219 5220 void perf_event__output_id_sample(struct perf_event *event, 5221 struct perf_output_handle *handle, 5222 struct perf_sample_data *sample) 5223 { 5224 if (event->attr.sample_id_all) 5225 __perf_event__output_id_sample(handle, sample); 5226 } 5227 5228 static void perf_output_read_one(struct perf_output_handle *handle, 5229 struct perf_event *event, 5230 u64 enabled, u64 running) 5231 { 5232 u64 read_format = event->attr.read_format; 5233 u64 values[4]; 5234 int n = 0; 5235 5236 values[n++] = perf_event_count(event); 5237 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) { 5238 values[n++] = enabled + 5239 atomic64_read(&event->child_total_time_enabled); 5240 } 5241 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) { 5242 values[n++] = running + 5243 atomic64_read(&event->child_total_time_running); 5244 } 5245 if (read_format & PERF_FORMAT_ID) 5246 values[n++] = primary_event_id(event); 5247 5248 __output_copy(handle, values, n * sizeof(u64)); 5249 } 5250 5251 /* 5252 * XXX PERF_FORMAT_GROUP vs inherited events seems difficult. 5253 */ 5254 static void perf_output_read_group(struct perf_output_handle *handle, 5255 struct perf_event *event, 5256 u64 enabled, u64 running) 5257 { 5258 struct perf_event *leader = event->group_leader, *sub; 5259 u64 read_format = event->attr.read_format; 5260 u64 values[5]; 5261 int n = 0; 5262 5263 values[n++] = 1 + leader->nr_siblings; 5264 5265 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) 5266 values[n++] = enabled; 5267 5268 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) 5269 values[n++] = running; 5270 5271 if (leader != event) 5272 leader->pmu->read(leader); 5273 5274 values[n++] = perf_event_count(leader); 5275 if (read_format & PERF_FORMAT_ID) 5276 values[n++] = primary_event_id(leader); 5277 5278 __output_copy(handle, values, n * sizeof(u64)); 5279 5280 list_for_each_entry(sub, &leader->sibling_list, group_entry) { 5281 n = 0; 5282 5283 if ((sub != event) && 5284 (sub->state == PERF_EVENT_STATE_ACTIVE)) 5285 sub->pmu->read(sub); 5286 5287 values[n++] = perf_event_count(sub); 5288 if (read_format & PERF_FORMAT_ID) 5289 values[n++] = primary_event_id(sub); 5290 5291 __output_copy(handle, values, n * sizeof(u64)); 5292 } 5293 } 5294 5295 #define PERF_FORMAT_TOTAL_TIMES (PERF_FORMAT_TOTAL_TIME_ENABLED|\ 5296 PERF_FORMAT_TOTAL_TIME_RUNNING) 5297 5298 static void perf_output_read(struct perf_output_handle *handle, 5299 struct perf_event *event) 5300 { 5301 u64 enabled = 0, running = 0, now; 5302 u64 read_format = event->attr.read_format; 5303 5304 /* 5305 * compute total_time_enabled, total_time_running 5306 * based on snapshot values taken when the event 5307 * was last scheduled in. 5308 * 5309 * we cannot simply called update_context_time() 5310 * because of locking issue as we are called in 5311 * NMI context 5312 */ 5313 if (read_format & PERF_FORMAT_TOTAL_TIMES) 5314 calc_timer_values(event, &now, &enabled, &running); 5315 5316 if (event->attr.read_format & PERF_FORMAT_GROUP) 5317 perf_output_read_group(handle, event, enabled, running); 5318 else 5319 perf_output_read_one(handle, event, enabled, running); 5320 } 5321 5322 void perf_output_sample(struct perf_output_handle *handle, 5323 struct perf_event_header *header, 5324 struct perf_sample_data *data, 5325 struct perf_event *event) 5326 { 5327 u64 sample_type = data->type; 5328 5329 perf_output_put(handle, *header); 5330 5331 if (sample_type & PERF_SAMPLE_IDENTIFIER) 5332 perf_output_put(handle, data->id); 5333 5334 if (sample_type & PERF_SAMPLE_IP) 5335 perf_output_put(handle, data->ip); 5336 5337 if (sample_type & PERF_SAMPLE_TID) 5338 perf_output_put(handle, data->tid_entry); 5339 5340 if (sample_type & PERF_SAMPLE_TIME) 5341 perf_output_put(handle, data->time); 5342 5343 if (sample_type & PERF_SAMPLE_ADDR) 5344 perf_output_put(handle, data->addr); 5345 5346 if (sample_type & PERF_SAMPLE_ID) 5347 perf_output_put(handle, data->id); 5348 5349 if (sample_type & PERF_SAMPLE_STREAM_ID) 5350 perf_output_put(handle, data->stream_id); 5351 5352 if (sample_type & PERF_SAMPLE_CPU) 5353 perf_output_put(handle, data->cpu_entry); 5354 5355 if (sample_type & PERF_SAMPLE_PERIOD) 5356 perf_output_put(handle, data->period); 5357 5358 if (sample_type & PERF_SAMPLE_READ) 5359 perf_output_read(handle, event); 5360 5361 if (sample_type & PERF_SAMPLE_CALLCHAIN) { 5362 if (data->callchain) { 5363 int size = 1; 5364 5365 if (data->callchain) 5366 size += data->callchain->nr; 5367 5368 size *= sizeof(u64); 5369 5370 __output_copy(handle, data->callchain, size); 5371 } else { 5372 u64 nr = 0; 5373 perf_output_put(handle, nr); 5374 } 5375 } 5376 5377 if (sample_type & PERF_SAMPLE_RAW) { 5378 if (data->raw) { 5379 u32 raw_size = data->raw->size; 5380 u32 real_size = round_up(raw_size + sizeof(u32), 5381 sizeof(u64)) - sizeof(u32); 5382 u64 zero = 0; 5383 5384 perf_output_put(handle, real_size); 5385 __output_copy(handle, data->raw->data, raw_size); 5386 if (real_size - raw_size) 5387 __output_copy(handle, &zero, real_size - raw_size); 5388 } else { 5389 struct { 5390 u32 size; 5391 u32 data; 5392 } raw = { 5393 .size = sizeof(u32), 5394 .data = 0, 5395 }; 5396 perf_output_put(handle, raw); 5397 } 5398 } 5399 5400 if (sample_type & PERF_SAMPLE_BRANCH_STACK) { 5401 if (data->br_stack) { 5402 size_t size; 5403 5404 size = data->br_stack->nr 5405 * sizeof(struct perf_branch_entry); 5406 5407 perf_output_put(handle, data->br_stack->nr); 5408 perf_output_copy(handle, data->br_stack->entries, size); 5409 } else { 5410 /* 5411 * we always store at least the value of nr 5412 */ 5413 u64 nr = 0; 5414 perf_output_put(handle, nr); 5415 } 5416 } 5417 5418 if (sample_type & PERF_SAMPLE_REGS_USER) { 5419 u64 abi = data->regs_user.abi; 5420 5421 /* 5422 * If there are no regs to dump, notice it through 5423 * first u64 being zero (PERF_SAMPLE_REGS_ABI_NONE). 5424 */ 5425 perf_output_put(handle, abi); 5426 5427 if (abi) { 5428 u64 mask = event->attr.sample_regs_user; 5429 perf_output_sample_regs(handle, 5430 data->regs_user.regs, 5431 mask); 5432 } 5433 } 5434 5435 if (sample_type & PERF_SAMPLE_STACK_USER) { 5436 perf_output_sample_ustack(handle, 5437 data->stack_user_size, 5438 data->regs_user.regs); 5439 } 5440 5441 if (sample_type & PERF_SAMPLE_WEIGHT) 5442 perf_output_put(handle, data->weight); 5443 5444 if (sample_type & PERF_SAMPLE_DATA_SRC) 5445 perf_output_put(handle, data->data_src.val); 5446 5447 if (sample_type & PERF_SAMPLE_TRANSACTION) 5448 perf_output_put(handle, data->txn); 5449 5450 if (sample_type & PERF_SAMPLE_REGS_INTR) { 5451 u64 abi = data->regs_intr.abi; 5452 /* 5453 * If there are no regs to dump, notice it through 5454 * first u64 being zero (PERF_SAMPLE_REGS_ABI_NONE). 5455 */ 5456 perf_output_put(handle, abi); 5457 5458 if (abi) { 5459 u64 mask = event->attr.sample_regs_intr; 5460 5461 perf_output_sample_regs(handle, 5462 data->regs_intr.regs, 5463 mask); 5464 } 5465 } 5466 5467 if (!event->attr.watermark) { 5468 int wakeup_events = event->attr.wakeup_events; 5469 5470 if (wakeup_events) { 5471 struct ring_buffer *rb = handle->rb; 5472 int events = local_inc_return(&rb->events); 5473 5474 if (events >= wakeup_events) { 5475 local_sub(wakeup_events, &rb->events); 5476 local_inc(&rb->wakeup); 5477 } 5478 } 5479 } 5480 } 5481 5482 void perf_prepare_sample(struct perf_event_header *header, 5483 struct perf_sample_data *data, 5484 struct perf_event *event, 5485 struct pt_regs *regs) 5486 { 5487 u64 sample_type = event->attr.sample_type; 5488 5489 header->type = PERF_RECORD_SAMPLE; 5490 header->size = sizeof(*header) + event->header_size; 5491 5492 header->misc = 0; 5493 header->misc |= perf_misc_flags(regs); 5494 5495 __perf_event_header__init_id(header, data, event); 5496 5497 if (sample_type & PERF_SAMPLE_IP) 5498 data->ip = perf_instruction_pointer(regs); 5499 5500 if (sample_type & PERF_SAMPLE_CALLCHAIN) { 5501 int size = 1; 5502 5503 data->callchain = perf_callchain(event, regs); 5504 5505 if (data->callchain) 5506 size += data->callchain->nr; 5507 5508 header->size += size * sizeof(u64); 5509 } 5510 5511 if (sample_type & PERF_SAMPLE_RAW) { 5512 int size = sizeof(u32); 5513 5514 if (data->raw) 5515 size += data->raw->size; 5516 else 5517 size += sizeof(u32); 5518 5519 header->size += round_up(size, sizeof(u64)); 5520 } 5521 5522 if (sample_type & PERF_SAMPLE_BRANCH_STACK) { 5523 int size = sizeof(u64); /* nr */ 5524 if (data->br_stack) { 5525 size += data->br_stack->nr 5526 * sizeof(struct perf_branch_entry); 5527 } 5528 header->size += size; 5529 } 5530 5531 if (sample_type & (PERF_SAMPLE_REGS_USER | PERF_SAMPLE_STACK_USER)) 5532 perf_sample_regs_user(&data->regs_user, regs, 5533 &data->regs_user_copy); 5534 5535 if (sample_type & PERF_SAMPLE_REGS_USER) { 5536 /* regs dump ABI info */ 5537 int size = sizeof(u64); 5538 5539 if (data->regs_user.regs) { 5540 u64 mask = event->attr.sample_regs_user; 5541 size += hweight64(mask) * sizeof(u64); 5542 } 5543 5544 header->size += size; 5545 } 5546 5547 if (sample_type & PERF_SAMPLE_STACK_USER) { 5548 /* 5549 * Either we need PERF_SAMPLE_STACK_USER bit to be allways 5550 * processed as the last one or have additional check added 5551 * in case new sample type is added, because we could eat 5552 * up the rest of the sample size. 5553 */ 5554 u16 stack_size = event->attr.sample_stack_user; 5555 u16 size = sizeof(u64); 5556 5557 stack_size = perf_sample_ustack_size(stack_size, header->size, 5558 data->regs_user.regs); 5559 5560 /* 5561 * If there is something to dump, add space for the dump 5562 * itself and for the field that tells the dynamic size, 5563 * which is how many have been actually dumped. 5564 */ 5565 if (stack_size) 5566 size += sizeof(u64) + stack_size; 5567 5568 data->stack_user_size = stack_size; 5569 header->size += size; 5570 } 5571 5572 if (sample_type & PERF_SAMPLE_REGS_INTR) { 5573 /* regs dump ABI info */ 5574 int size = sizeof(u64); 5575 5576 perf_sample_regs_intr(&data->regs_intr, regs); 5577 5578 if (data->regs_intr.regs) { 5579 u64 mask = event->attr.sample_regs_intr; 5580 5581 size += hweight64(mask) * sizeof(u64); 5582 } 5583 5584 header->size += size; 5585 } 5586 } 5587 5588 void perf_event_output(struct perf_event *event, 5589 struct perf_sample_data *data, 5590 struct pt_regs *regs) 5591 { 5592 struct perf_output_handle handle; 5593 struct perf_event_header header; 5594 5595 /* protect the callchain buffers */ 5596 rcu_read_lock(); 5597 5598 perf_prepare_sample(&header, data, event, regs); 5599 5600 if (perf_output_begin(&handle, event, header.size)) 5601 goto exit; 5602 5603 perf_output_sample(&handle, &header, data, event); 5604 5605 perf_output_end(&handle); 5606 5607 exit: 5608 rcu_read_unlock(); 5609 } 5610 5611 /* 5612 * read event_id 5613 */ 5614 5615 struct perf_read_event { 5616 struct perf_event_header header; 5617 5618 u32 pid; 5619 u32 tid; 5620 }; 5621 5622 static void 5623 perf_event_read_event(struct perf_event *event, 5624 struct task_struct *task) 5625 { 5626 struct perf_output_handle handle; 5627 struct perf_sample_data sample; 5628 struct perf_read_event read_event = { 5629 .header = { 5630 .type = PERF_RECORD_READ, 5631 .misc = 0, 5632 .size = sizeof(read_event) + event->read_size, 5633 }, 5634 .pid = perf_event_pid(event, task), 5635 .tid = perf_event_tid(event, task), 5636 }; 5637 int ret; 5638 5639 perf_event_header__init_id(&read_event.header, &sample, event); 5640 ret = perf_output_begin(&handle, event, read_event.header.size); 5641 if (ret) 5642 return; 5643 5644 perf_output_put(&handle, read_event); 5645 perf_output_read(&handle, event); 5646 perf_event__output_id_sample(event, &handle, &sample); 5647 5648 perf_output_end(&handle); 5649 } 5650 5651 typedef void (perf_event_aux_output_cb)(struct perf_event *event, void *data); 5652 5653 static void 5654 perf_event_aux_ctx(struct perf_event_context *ctx, 5655 perf_event_aux_output_cb output, 5656 void *data) 5657 { 5658 struct perf_event *event; 5659 5660 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) { 5661 if (event->state < PERF_EVENT_STATE_INACTIVE) 5662 continue; 5663 if (!event_filter_match(event)) 5664 continue; 5665 output(event, data); 5666 } 5667 } 5668 5669 static void 5670 perf_event_aux(perf_event_aux_output_cb output, void *data, 5671 struct perf_event_context *task_ctx) 5672 { 5673 struct perf_cpu_context *cpuctx; 5674 struct perf_event_context *ctx; 5675 struct pmu *pmu; 5676 int ctxn; 5677 5678 rcu_read_lock(); 5679 list_for_each_entry_rcu(pmu, &pmus, entry) { 5680 cpuctx = get_cpu_ptr(pmu->pmu_cpu_context); 5681 if (cpuctx->unique_pmu != pmu) 5682 goto next; 5683 perf_event_aux_ctx(&cpuctx->ctx, output, data); 5684 if (task_ctx) 5685 goto next; 5686 ctxn = pmu->task_ctx_nr; 5687 if (ctxn < 0) 5688 goto next; 5689 ctx = rcu_dereference(current->perf_event_ctxp[ctxn]); 5690 if (ctx) 5691 perf_event_aux_ctx(ctx, output, data); 5692 next: 5693 put_cpu_ptr(pmu->pmu_cpu_context); 5694 } 5695 5696 if (task_ctx) { 5697 preempt_disable(); 5698 perf_event_aux_ctx(task_ctx, output, data); 5699 preempt_enable(); 5700 } 5701 rcu_read_unlock(); 5702 } 5703 5704 /* 5705 * task tracking -- fork/exit 5706 * 5707 * enabled by: attr.comm | attr.mmap | attr.mmap2 | attr.mmap_data | attr.task 5708 */ 5709 5710 struct perf_task_event { 5711 struct task_struct *task; 5712 struct perf_event_context *task_ctx; 5713 5714 struct { 5715 struct perf_event_header header; 5716 5717 u32 pid; 5718 u32 ppid; 5719 u32 tid; 5720 u32 ptid; 5721 u64 time; 5722 } event_id; 5723 }; 5724 5725 static int perf_event_task_match(struct perf_event *event) 5726 { 5727 return event->attr.comm || event->attr.mmap || 5728 event->attr.mmap2 || event->attr.mmap_data || 5729 event->attr.task; 5730 } 5731 5732 static void perf_event_task_output(struct perf_event *event, 5733 void *data) 5734 { 5735 struct perf_task_event *task_event = data; 5736 struct perf_output_handle handle; 5737 struct perf_sample_data sample; 5738 struct task_struct *task = task_event->task; 5739 int ret, size = task_event->event_id.header.size; 5740 5741 if (!perf_event_task_match(event)) 5742 return; 5743 5744 perf_event_header__init_id(&task_event->event_id.header, &sample, event); 5745 5746 ret = perf_output_begin(&handle, event, 5747 task_event->event_id.header.size); 5748 if (ret) 5749 goto out; 5750 5751 task_event->event_id.pid = perf_event_pid(event, task); 5752 task_event->event_id.ppid = perf_event_pid(event, current); 5753 5754 task_event->event_id.tid = perf_event_tid(event, task); 5755 task_event->event_id.ptid = perf_event_tid(event, current); 5756 5757 task_event->event_id.time = perf_event_clock(event); 5758 5759 perf_output_put(&handle, task_event->event_id); 5760 5761 perf_event__output_id_sample(event, &handle, &sample); 5762 5763 perf_output_end(&handle); 5764 out: 5765 task_event->event_id.header.size = size; 5766 } 5767 5768 static void perf_event_task(struct task_struct *task, 5769 struct perf_event_context *task_ctx, 5770 int new) 5771 { 5772 struct perf_task_event task_event; 5773 5774 if (!atomic_read(&nr_comm_events) && 5775 !atomic_read(&nr_mmap_events) && 5776 !atomic_read(&nr_task_events)) 5777 return; 5778 5779 task_event = (struct perf_task_event){ 5780 .task = task, 5781 .task_ctx = task_ctx, 5782 .event_id = { 5783 .header = { 5784 .type = new ? PERF_RECORD_FORK : PERF_RECORD_EXIT, 5785 .misc = 0, 5786 .size = sizeof(task_event.event_id), 5787 }, 5788 /* .pid */ 5789 /* .ppid */ 5790 /* .tid */ 5791 /* .ptid */ 5792 /* .time */ 5793 }, 5794 }; 5795 5796 perf_event_aux(perf_event_task_output, 5797 &task_event, 5798 task_ctx); 5799 } 5800 5801 void perf_event_fork(struct task_struct *task) 5802 { 5803 perf_event_task(task, NULL, 1); 5804 } 5805 5806 /* 5807 * comm tracking 5808 */ 5809 5810 struct perf_comm_event { 5811 struct task_struct *task; 5812 char *comm; 5813 int comm_size; 5814 5815 struct { 5816 struct perf_event_header header; 5817 5818 u32 pid; 5819 u32 tid; 5820 } event_id; 5821 }; 5822 5823 static int perf_event_comm_match(struct perf_event *event) 5824 { 5825 return event->attr.comm; 5826 } 5827 5828 static void perf_event_comm_output(struct perf_event *event, 5829 void *data) 5830 { 5831 struct perf_comm_event *comm_event = data; 5832 struct perf_output_handle handle; 5833 struct perf_sample_data sample; 5834 int size = comm_event->event_id.header.size; 5835 int ret; 5836 5837 if (!perf_event_comm_match(event)) 5838 return; 5839 5840 perf_event_header__init_id(&comm_event->event_id.header, &sample, event); 5841 ret = perf_output_begin(&handle, event, 5842 comm_event->event_id.header.size); 5843 5844 if (ret) 5845 goto out; 5846 5847 comm_event->event_id.pid = perf_event_pid(event, comm_event->task); 5848 comm_event->event_id.tid = perf_event_tid(event, comm_event->task); 5849 5850 perf_output_put(&handle, comm_event->event_id); 5851 __output_copy(&handle, comm_event->comm, 5852 comm_event->comm_size); 5853 5854 perf_event__output_id_sample(event, &handle, &sample); 5855 5856 perf_output_end(&handle); 5857 out: 5858 comm_event->event_id.header.size = size; 5859 } 5860 5861 static void perf_event_comm_event(struct perf_comm_event *comm_event) 5862 { 5863 char comm[TASK_COMM_LEN]; 5864 unsigned int size; 5865 5866 memset(comm, 0, sizeof(comm)); 5867 strlcpy(comm, comm_event->task->comm, sizeof(comm)); 5868 size = ALIGN(strlen(comm)+1, sizeof(u64)); 5869 5870 comm_event->comm = comm; 5871 comm_event->comm_size = size; 5872 5873 comm_event->event_id.header.size = sizeof(comm_event->event_id) + size; 5874 5875 perf_event_aux(perf_event_comm_output, 5876 comm_event, 5877 NULL); 5878 } 5879 5880 void perf_event_comm(struct task_struct *task, bool exec) 5881 { 5882 struct perf_comm_event comm_event; 5883 5884 if (!atomic_read(&nr_comm_events)) 5885 return; 5886 5887 comm_event = (struct perf_comm_event){ 5888 .task = task, 5889 /* .comm */ 5890 /* .comm_size */ 5891 .event_id = { 5892 .header = { 5893 .type = PERF_RECORD_COMM, 5894 .misc = exec ? PERF_RECORD_MISC_COMM_EXEC : 0, 5895 /* .size */ 5896 }, 5897 /* .pid */ 5898 /* .tid */ 5899 }, 5900 }; 5901 5902 perf_event_comm_event(&comm_event); 5903 } 5904 5905 /* 5906 * mmap tracking 5907 */ 5908 5909 struct perf_mmap_event { 5910 struct vm_area_struct *vma; 5911 5912 const char *file_name; 5913 int file_size; 5914 int maj, min; 5915 u64 ino; 5916 u64 ino_generation; 5917 u32 prot, flags; 5918 5919 struct { 5920 struct perf_event_header header; 5921 5922 u32 pid; 5923 u32 tid; 5924 u64 start; 5925 u64 len; 5926 u64 pgoff; 5927 } event_id; 5928 }; 5929 5930 static int perf_event_mmap_match(struct perf_event *event, 5931 void *data) 5932 { 5933 struct perf_mmap_event *mmap_event = data; 5934 struct vm_area_struct *vma = mmap_event->vma; 5935 int executable = vma->vm_flags & VM_EXEC; 5936 5937 return (!executable && event->attr.mmap_data) || 5938 (executable && (event->attr.mmap || event->attr.mmap2)); 5939 } 5940 5941 static void perf_event_mmap_output(struct perf_event *event, 5942 void *data) 5943 { 5944 struct perf_mmap_event *mmap_event = data; 5945 struct perf_output_handle handle; 5946 struct perf_sample_data sample; 5947 int size = mmap_event->event_id.header.size; 5948 int ret; 5949 5950 if (!perf_event_mmap_match(event, data)) 5951 return; 5952 5953 if (event->attr.mmap2) { 5954 mmap_event->event_id.header.type = PERF_RECORD_MMAP2; 5955 mmap_event->event_id.header.size += sizeof(mmap_event->maj); 5956 mmap_event->event_id.header.size += sizeof(mmap_event->min); 5957 mmap_event->event_id.header.size += sizeof(mmap_event->ino); 5958 mmap_event->event_id.header.size += sizeof(mmap_event->ino_generation); 5959 mmap_event->event_id.header.size += sizeof(mmap_event->prot); 5960 mmap_event->event_id.header.size += sizeof(mmap_event->flags); 5961 } 5962 5963 perf_event_header__init_id(&mmap_event->event_id.header, &sample, event); 5964 ret = perf_output_begin(&handle, event, 5965 mmap_event->event_id.header.size); 5966 if (ret) 5967 goto out; 5968 5969 mmap_event->event_id.pid = perf_event_pid(event, current); 5970 mmap_event->event_id.tid = perf_event_tid(event, current); 5971 5972 perf_output_put(&handle, mmap_event->event_id); 5973 5974 if (event->attr.mmap2) { 5975 perf_output_put(&handle, mmap_event->maj); 5976 perf_output_put(&handle, mmap_event->min); 5977 perf_output_put(&handle, mmap_event->ino); 5978 perf_output_put(&handle, mmap_event->ino_generation); 5979 perf_output_put(&handle, mmap_event->prot); 5980 perf_output_put(&handle, mmap_event->flags); 5981 } 5982 5983 __output_copy(&handle, mmap_event->file_name, 5984 mmap_event->file_size); 5985 5986 perf_event__output_id_sample(event, &handle, &sample); 5987 5988 perf_output_end(&handle); 5989 out: 5990 mmap_event->event_id.header.size = size; 5991 } 5992 5993 static void perf_event_mmap_event(struct perf_mmap_event *mmap_event) 5994 { 5995 struct vm_area_struct *vma = mmap_event->vma; 5996 struct file *file = vma->vm_file; 5997 int maj = 0, min = 0; 5998 u64 ino = 0, gen = 0; 5999 u32 prot = 0, flags = 0; 6000 unsigned int size; 6001 char tmp[16]; 6002 char *buf = NULL; 6003 char *name; 6004 6005 if (file) { 6006 struct inode *inode; 6007 dev_t dev; 6008 6009 buf = kmalloc(PATH_MAX, GFP_KERNEL); 6010 if (!buf) { 6011 name = "//enomem"; 6012 goto cpy_name; 6013 } 6014 /* 6015 * d_path() works from the end of the rb backwards, so we 6016 * need to add enough zero bytes after the string to handle 6017 * the 64bit alignment we do later. 6018 */ 6019 name = file_path(file, buf, PATH_MAX - sizeof(u64)); 6020 if (IS_ERR(name)) { 6021 name = "//toolong"; 6022 goto cpy_name; 6023 } 6024 inode = file_inode(vma->vm_file); 6025 dev = inode->i_sb->s_dev; 6026 ino = inode->i_ino; 6027 gen = inode->i_generation; 6028 maj = MAJOR(dev); 6029 min = MINOR(dev); 6030 6031 if (vma->vm_flags & VM_READ) 6032 prot |= PROT_READ; 6033 if (vma->vm_flags & VM_WRITE) 6034 prot |= PROT_WRITE; 6035 if (vma->vm_flags & VM_EXEC) 6036 prot |= PROT_EXEC; 6037 6038 if (vma->vm_flags & VM_MAYSHARE) 6039 flags = MAP_SHARED; 6040 else 6041 flags = MAP_PRIVATE; 6042 6043 if (vma->vm_flags & VM_DENYWRITE) 6044 flags |= MAP_DENYWRITE; 6045 if (vma->vm_flags & VM_MAYEXEC) 6046 flags |= MAP_EXECUTABLE; 6047 if (vma->vm_flags & VM_LOCKED) 6048 flags |= MAP_LOCKED; 6049 if (vma->vm_flags & VM_HUGETLB) 6050 flags |= MAP_HUGETLB; 6051 6052 goto got_name; 6053 } else { 6054 if (vma->vm_ops && vma->vm_ops->name) { 6055 name = (char *) vma->vm_ops->name(vma); 6056 if (name) 6057 goto cpy_name; 6058 } 6059 6060 name = (char *)arch_vma_name(vma); 6061 if (name) 6062 goto cpy_name; 6063 6064 if (vma->vm_start <= vma->vm_mm->start_brk && 6065 vma->vm_end >= vma->vm_mm->brk) { 6066 name = "[heap]"; 6067 goto cpy_name; 6068 } 6069 if (vma->vm_start <= vma->vm_mm->start_stack && 6070 vma->vm_end >= vma->vm_mm->start_stack) { 6071 name = "[stack]"; 6072 goto cpy_name; 6073 } 6074 6075 name = "//anon"; 6076 goto cpy_name; 6077 } 6078 6079 cpy_name: 6080 strlcpy(tmp, name, sizeof(tmp)); 6081 name = tmp; 6082 got_name: 6083 /* 6084 * Since our buffer works in 8 byte units we need to align our string 6085 * size to a multiple of 8. However, we must guarantee the tail end is 6086 * zero'd out to avoid leaking random bits to userspace. 6087 */ 6088 size = strlen(name)+1; 6089 while (!IS_ALIGNED(size, sizeof(u64))) 6090 name[size++] = '\0'; 6091 6092 mmap_event->file_name = name; 6093 mmap_event->file_size = size; 6094 mmap_event->maj = maj; 6095 mmap_event->min = min; 6096 mmap_event->ino = ino; 6097 mmap_event->ino_generation = gen; 6098 mmap_event->prot = prot; 6099 mmap_event->flags = flags; 6100 6101 if (!(vma->vm_flags & VM_EXEC)) 6102 mmap_event->event_id.header.misc |= PERF_RECORD_MISC_MMAP_DATA; 6103 6104 mmap_event->event_id.header.size = sizeof(mmap_event->event_id) + size; 6105 6106 perf_event_aux(perf_event_mmap_output, 6107 mmap_event, 6108 NULL); 6109 6110 kfree(buf); 6111 } 6112 6113 void perf_event_mmap(struct vm_area_struct *vma) 6114 { 6115 struct perf_mmap_event mmap_event; 6116 6117 if (!atomic_read(&nr_mmap_events)) 6118 return; 6119 6120 mmap_event = (struct perf_mmap_event){ 6121 .vma = vma, 6122 /* .file_name */ 6123 /* .file_size */ 6124 .event_id = { 6125 .header = { 6126 .type = PERF_RECORD_MMAP, 6127 .misc = PERF_RECORD_MISC_USER, 6128 /* .size */ 6129 }, 6130 /* .pid */ 6131 /* .tid */ 6132 .start = vma->vm_start, 6133 .len = vma->vm_end - vma->vm_start, 6134 .pgoff = (u64)vma->vm_pgoff << PAGE_SHIFT, 6135 }, 6136 /* .maj (attr_mmap2 only) */ 6137 /* .min (attr_mmap2 only) */ 6138 /* .ino (attr_mmap2 only) */ 6139 /* .ino_generation (attr_mmap2 only) */ 6140 /* .prot (attr_mmap2 only) */ 6141 /* .flags (attr_mmap2 only) */ 6142 }; 6143 6144 perf_event_mmap_event(&mmap_event); 6145 } 6146 6147 void perf_event_aux_event(struct perf_event *event, unsigned long head, 6148 unsigned long size, u64 flags) 6149 { 6150 struct perf_output_handle handle; 6151 struct perf_sample_data sample; 6152 struct perf_aux_event { 6153 struct perf_event_header header; 6154 u64 offset; 6155 u64 size; 6156 u64 flags; 6157 } rec = { 6158 .header = { 6159 .type = PERF_RECORD_AUX, 6160 .misc = 0, 6161 .size = sizeof(rec), 6162 }, 6163 .offset = head, 6164 .size = size, 6165 .flags = flags, 6166 }; 6167 int ret; 6168 6169 perf_event_header__init_id(&rec.header, &sample, event); 6170 ret = perf_output_begin(&handle, event, rec.header.size); 6171 6172 if (ret) 6173 return; 6174 6175 perf_output_put(&handle, rec); 6176 perf_event__output_id_sample(event, &handle, &sample); 6177 6178 perf_output_end(&handle); 6179 } 6180 6181 /* 6182 * Lost/dropped samples logging 6183 */ 6184 void perf_log_lost_samples(struct perf_event *event, u64 lost) 6185 { 6186 struct perf_output_handle handle; 6187 struct perf_sample_data sample; 6188 int ret; 6189 6190 struct { 6191 struct perf_event_header header; 6192 u64 lost; 6193 } lost_samples_event = { 6194 .header = { 6195 .type = PERF_RECORD_LOST_SAMPLES, 6196 .misc = 0, 6197 .size = sizeof(lost_samples_event), 6198 }, 6199 .lost = lost, 6200 }; 6201 6202 perf_event_header__init_id(&lost_samples_event.header, &sample, event); 6203 6204 ret = perf_output_begin(&handle, event, 6205 lost_samples_event.header.size); 6206 if (ret) 6207 return; 6208 6209 perf_output_put(&handle, lost_samples_event); 6210 perf_event__output_id_sample(event, &handle, &sample); 6211 perf_output_end(&handle); 6212 } 6213 6214 /* 6215 * context_switch tracking 6216 */ 6217 6218 struct perf_switch_event { 6219 struct task_struct *task; 6220 struct task_struct *next_prev; 6221 6222 struct { 6223 struct perf_event_header header; 6224 u32 next_prev_pid; 6225 u32 next_prev_tid; 6226 } event_id; 6227 }; 6228 6229 static int perf_event_switch_match(struct perf_event *event) 6230 { 6231 return event->attr.context_switch; 6232 } 6233 6234 static void perf_event_switch_output(struct perf_event *event, void *data) 6235 { 6236 struct perf_switch_event *se = data; 6237 struct perf_output_handle handle; 6238 struct perf_sample_data sample; 6239 int ret; 6240 6241 if (!perf_event_switch_match(event)) 6242 return; 6243 6244 /* Only CPU-wide events are allowed to see next/prev pid/tid */ 6245 if (event->ctx->task) { 6246 se->event_id.header.type = PERF_RECORD_SWITCH; 6247 se->event_id.header.size = sizeof(se->event_id.header); 6248 } else { 6249 se->event_id.header.type = PERF_RECORD_SWITCH_CPU_WIDE; 6250 se->event_id.header.size = sizeof(se->event_id); 6251 se->event_id.next_prev_pid = 6252 perf_event_pid(event, se->next_prev); 6253 se->event_id.next_prev_tid = 6254 perf_event_tid(event, se->next_prev); 6255 } 6256 6257 perf_event_header__init_id(&se->event_id.header, &sample, event); 6258 6259 ret = perf_output_begin(&handle, event, se->event_id.header.size); 6260 if (ret) 6261 return; 6262 6263 if (event->ctx->task) 6264 perf_output_put(&handle, se->event_id.header); 6265 else 6266 perf_output_put(&handle, se->event_id); 6267 6268 perf_event__output_id_sample(event, &handle, &sample); 6269 6270 perf_output_end(&handle); 6271 } 6272 6273 static void perf_event_switch(struct task_struct *task, 6274 struct task_struct *next_prev, bool sched_in) 6275 { 6276 struct perf_switch_event switch_event; 6277 6278 /* N.B. caller checks nr_switch_events != 0 */ 6279 6280 switch_event = (struct perf_switch_event){ 6281 .task = task, 6282 .next_prev = next_prev, 6283 .event_id = { 6284 .header = { 6285 /* .type */ 6286 .misc = sched_in ? 0 : PERF_RECORD_MISC_SWITCH_OUT, 6287 /* .size */ 6288 }, 6289 /* .next_prev_pid */ 6290 /* .next_prev_tid */ 6291 }, 6292 }; 6293 6294 perf_event_aux(perf_event_switch_output, 6295 &switch_event, 6296 NULL); 6297 } 6298 6299 /* 6300 * IRQ throttle logging 6301 */ 6302 6303 static void perf_log_throttle(struct perf_event *event, int enable) 6304 { 6305 struct perf_output_handle handle; 6306 struct perf_sample_data sample; 6307 int ret; 6308 6309 struct { 6310 struct perf_event_header header; 6311 u64 time; 6312 u64 id; 6313 u64 stream_id; 6314 } throttle_event = { 6315 .header = { 6316 .type = PERF_RECORD_THROTTLE, 6317 .misc = 0, 6318 .size = sizeof(throttle_event), 6319 }, 6320 .time = perf_event_clock(event), 6321 .id = primary_event_id(event), 6322 .stream_id = event->id, 6323 }; 6324 6325 if (enable) 6326 throttle_event.header.type = PERF_RECORD_UNTHROTTLE; 6327 6328 perf_event_header__init_id(&throttle_event.header, &sample, event); 6329 6330 ret = perf_output_begin(&handle, event, 6331 throttle_event.header.size); 6332 if (ret) 6333 return; 6334 6335 perf_output_put(&handle, throttle_event); 6336 perf_event__output_id_sample(event, &handle, &sample); 6337 perf_output_end(&handle); 6338 } 6339 6340 static void perf_log_itrace_start(struct perf_event *event) 6341 { 6342 struct perf_output_handle handle; 6343 struct perf_sample_data sample; 6344 struct perf_aux_event { 6345 struct perf_event_header header; 6346 u32 pid; 6347 u32 tid; 6348 } rec; 6349 int ret; 6350 6351 if (event->parent) 6352 event = event->parent; 6353 6354 if (!(event->pmu->capabilities & PERF_PMU_CAP_ITRACE) || 6355 event->hw.itrace_started) 6356 return; 6357 6358 rec.header.type = PERF_RECORD_ITRACE_START; 6359 rec.header.misc = 0; 6360 rec.header.size = sizeof(rec); 6361 rec.pid = perf_event_pid(event, current); 6362 rec.tid = perf_event_tid(event, current); 6363 6364 perf_event_header__init_id(&rec.header, &sample, event); 6365 ret = perf_output_begin(&handle, event, rec.header.size); 6366 6367 if (ret) 6368 return; 6369 6370 perf_output_put(&handle, rec); 6371 perf_event__output_id_sample(event, &handle, &sample); 6372 6373 perf_output_end(&handle); 6374 } 6375 6376 /* 6377 * Generic event overflow handling, sampling. 6378 */ 6379 6380 static int __perf_event_overflow(struct perf_event *event, 6381 int throttle, struct perf_sample_data *data, 6382 struct pt_regs *regs) 6383 { 6384 int events = atomic_read(&event->event_limit); 6385 struct hw_perf_event *hwc = &event->hw; 6386 u64 seq; 6387 int ret = 0; 6388 6389 /* 6390 * Non-sampling counters might still use the PMI to fold short 6391 * hardware counters, ignore those. 6392 */ 6393 if (unlikely(!is_sampling_event(event))) 6394 return 0; 6395 6396 seq = __this_cpu_read(perf_throttled_seq); 6397 if (seq != hwc->interrupts_seq) { 6398 hwc->interrupts_seq = seq; 6399 hwc->interrupts = 1; 6400 } else { 6401 hwc->interrupts++; 6402 if (unlikely(throttle 6403 && hwc->interrupts >= max_samples_per_tick)) { 6404 __this_cpu_inc(perf_throttled_count); 6405 hwc->interrupts = MAX_INTERRUPTS; 6406 perf_log_throttle(event, 0); 6407 tick_nohz_full_kick(); 6408 ret = 1; 6409 } 6410 } 6411 6412 if (event->attr.freq) { 6413 u64 now = perf_clock(); 6414 s64 delta = now - hwc->freq_time_stamp; 6415 6416 hwc->freq_time_stamp = now; 6417 6418 if (delta > 0 && delta < 2*TICK_NSEC) 6419 perf_adjust_period(event, delta, hwc->last_period, true); 6420 } 6421 6422 /* 6423 * XXX event_limit might not quite work as expected on inherited 6424 * events 6425 */ 6426 6427 event->pending_kill = POLL_IN; 6428 if (events && atomic_dec_and_test(&event->event_limit)) { 6429 ret = 1; 6430 event->pending_kill = POLL_HUP; 6431 event->pending_disable = 1; 6432 irq_work_queue(&event->pending); 6433 } 6434 6435 if (event->overflow_handler) 6436 event->overflow_handler(event, data, regs); 6437 else 6438 perf_event_output(event, data, regs); 6439 6440 if (*perf_event_fasync(event) && event->pending_kill) { 6441 event->pending_wakeup = 1; 6442 irq_work_queue(&event->pending); 6443 } 6444 6445 return ret; 6446 } 6447 6448 int perf_event_overflow(struct perf_event *event, 6449 struct perf_sample_data *data, 6450 struct pt_regs *regs) 6451 { 6452 return __perf_event_overflow(event, 1, data, regs); 6453 } 6454 6455 /* 6456 * Generic software event infrastructure 6457 */ 6458 6459 struct swevent_htable { 6460 struct swevent_hlist *swevent_hlist; 6461 struct mutex hlist_mutex; 6462 int hlist_refcount; 6463 6464 /* Recursion avoidance in each contexts */ 6465 int recursion[PERF_NR_CONTEXTS]; 6466 6467 /* Keeps track of cpu being initialized/exited */ 6468 bool online; 6469 }; 6470 6471 static DEFINE_PER_CPU(struct swevent_htable, swevent_htable); 6472 6473 /* 6474 * We directly increment event->count and keep a second value in 6475 * event->hw.period_left to count intervals. This period event 6476 * is kept in the range [-sample_period, 0] so that we can use the 6477 * sign as trigger. 6478 */ 6479 6480 u64 perf_swevent_set_period(struct perf_event *event) 6481 { 6482 struct hw_perf_event *hwc = &event->hw; 6483 u64 period = hwc->last_period; 6484 u64 nr, offset; 6485 s64 old, val; 6486 6487 hwc->last_period = hwc->sample_period; 6488 6489 again: 6490 old = val = local64_read(&hwc->period_left); 6491 if (val < 0) 6492 return 0; 6493 6494 nr = div64_u64(period + val, period); 6495 offset = nr * period; 6496 val -= offset; 6497 if (local64_cmpxchg(&hwc->period_left, old, val) != old) 6498 goto again; 6499 6500 return nr; 6501 } 6502 6503 static void perf_swevent_overflow(struct perf_event *event, u64 overflow, 6504 struct perf_sample_data *data, 6505 struct pt_regs *regs) 6506 { 6507 struct hw_perf_event *hwc = &event->hw; 6508 int throttle = 0; 6509 6510 if (!overflow) 6511 overflow = perf_swevent_set_period(event); 6512 6513 if (hwc->interrupts == MAX_INTERRUPTS) 6514 return; 6515 6516 for (; overflow; overflow--) { 6517 if (__perf_event_overflow(event, throttle, 6518 data, regs)) { 6519 /* 6520 * We inhibit the overflow from happening when 6521 * hwc->interrupts == MAX_INTERRUPTS. 6522 */ 6523 break; 6524 } 6525 throttle = 1; 6526 } 6527 } 6528 6529 static void perf_swevent_event(struct perf_event *event, u64 nr, 6530 struct perf_sample_data *data, 6531 struct pt_regs *regs) 6532 { 6533 struct hw_perf_event *hwc = &event->hw; 6534 6535 local64_add(nr, &event->count); 6536 6537 if (!regs) 6538 return; 6539 6540 if (!is_sampling_event(event)) 6541 return; 6542 6543 if ((event->attr.sample_type & PERF_SAMPLE_PERIOD) && !event->attr.freq) { 6544 data->period = nr; 6545 return perf_swevent_overflow(event, 1, data, regs); 6546 } else 6547 data->period = event->hw.last_period; 6548 6549 if (nr == 1 && hwc->sample_period == 1 && !event->attr.freq) 6550 return perf_swevent_overflow(event, 1, data, regs); 6551 6552 if (local64_add_negative(nr, &hwc->period_left)) 6553 return; 6554 6555 perf_swevent_overflow(event, 0, data, regs); 6556 } 6557 6558 static int perf_exclude_event(struct perf_event *event, 6559 struct pt_regs *regs) 6560 { 6561 if (event->hw.state & PERF_HES_STOPPED) 6562 return 1; 6563 6564 if (regs) { 6565 if (event->attr.exclude_user && user_mode(regs)) 6566 return 1; 6567 6568 if (event->attr.exclude_kernel && !user_mode(regs)) 6569 return 1; 6570 } 6571 6572 return 0; 6573 } 6574 6575 static int perf_swevent_match(struct perf_event *event, 6576 enum perf_type_id type, 6577 u32 event_id, 6578 struct perf_sample_data *data, 6579 struct pt_regs *regs) 6580 { 6581 if (event->attr.type != type) 6582 return 0; 6583 6584 if (event->attr.config != event_id) 6585 return 0; 6586 6587 if (perf_exclude_event(event, regs)) 6588 return 0; 6589 6590 return 1; 6591 } 6592 6593 static inline u64 swevent_hash(u64 type, u32 event_id) 6594 { 6595 u64 val = event_id | (type << 32); 6596 6597 return hash_64(val, SWEVENT_HLIST_BITS); 6598 } 6599 6600 static inline struct hlist_head * 6601 __find_swevent_head(struct swevent_hlist *hlist, u64 type, u32 event_id) 6602 { 6603 u64 hash = swevent_hash(type, event_id); 6604 6605 return &hlist->heads[hash]; 6606 } 6607 6608 /* For the read side: events when they trigger */ 6609 static inline struct hlist_head * 6610 find_swevent_head_rcu(struct swevent_htable *swhash, u64 type, u32 event_id) 6611 { 6612 struct swevent_hlist *hlist; 6613 6614 hlist = rcu_dereference(swhash->swevent_hlist); 6615 if (!hlist) 6616 return NULL; 6617 6618 return __find_swevent_head(hlist, type, event_id); 6619 } 6620 6621 /* For the event head insertion and removal in the hlist */ 6622 static inline struct hlist_head * 6623 find_swevent_head(struct swevent_htable *swhash, struct perf_event *event) 6624 { 6625 struct swevent_hlist *hlist; 6626 u32 event_id = event->attr.config; 6627 u64 type = event->attr.type; 6628 6629 /* 6630 * Event scheduling is always serialized against hlist allocation 6631 * and release. Which makes the protected version suitable here. 6632 * The context lock guarantees that. 6633 */ 6634 hlist = rcu_dereference_protected(swhash->swevent_hlist, 6635 lockdep_is_held(&event->ctx->lock)); 6636 if (!hlist) 6637 return NULL; 6638 6639 return __find_swevent_head(hlist, type, event_id); 6640 } 6641 6642 static void do_perf_sw_event(enum perf_type_id type, u32 event_id, 6643 u64 nr, 6644 struct perf_sample_data *data, 6645 struct pt_regs *regs) 6646 { 6647 struct swevent_htable *swhash = this_cpu_ptr(&swevent_htable); 6648 struct perf_event *event; 6649 struct hlist_head *head; 6650 6651 rcu_read_lock(); 6652 head = find_swevent_head_rcu(swhash, type, event_id); 6653 if (!head) 6654 goto end; 6655 6656 hlist_for_each_entry_rcu(event, head, hlist_entry) { 6657 if (perf_swevent_match(event, type, event_id, data, regs)) 6658 perf_swevent_event(event, nr, data, regs); 6659 } 6660 end: 6661 rcu_read_unlock(); 6662 } 6663 6664 DEFINE_PER_CPU(struct pt_regs, __perf_regs[4]); 6665 6666 int perf_swevent_get_recursion_context(void) 6667 { 6668 struct swevent_htable *swhash = this_cpu_ptr(&swevent_htable); 6669 6670 return get_recursion_context(swhash->recursion); 6671 } 6672 EXPORT_SYMBOL_GPL(perf_swevent_get_recursion_context); 6673 6674 inline void perf_swevent_put_recursion_context(int rctx) 6675 { 6676 struct swevent_htable *swhash = this_cpu_ptr(&swevent_htable); 6677 6678 put_recursion_context(swhash->recursion, rctx); 6679 } 6680 6681 void ___perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr) 6682 { 6683 struct perf_sample_data data; 6684 6685 if (WARN_ON_ONCE(!regs)) 6686 return; 6687 6688 perf_sample_data_init(&data, addr, 0); 6689 do_perf_sw_event(PERF_TYPE_SOFTWARE, event_id, nr, &data, regs); 6690 } 6691 6692 void __perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr) 6693 { 6694 int rctx; 6695 6696 preempt_disable_notrace(); 6697 rctx = perf_swevent_get_recursion_context(); 6698 if (unlikely(rctx < 0)) 6699 goto fail; 6700 6701 ___perf_sw_event(event_id, nr, regs, addr); 6702 6703 perf_swevent_put_recursion_context(rctx); 6704 fail: 6705 preempt_enable_notrace(); 6706 } 6707 6708 static void perf_swevent_read(struct perf_event *event) 6709 { 6710 } 6711 6712 static int perf_swevent_add(struct perf_event *event, int flags) 6713 { 6714 struct swevent_htable *swhash = this_cpu_ptr(&swevent_htable); 6715 struct hw_perf_event *hwc = &event->hw; 6716 struct hlist_head *head; 6717 6718 if (is_sampling_event(event)) { 6719 hwc->last_period = hwc->sample_period; 6720 perf_swevent_set_period(event); 6721 } 6722 6723 hwc->state = !(flags & PERF_EF_START); 6724 6725 head = find_swevent_head(swhash, event); 6726 if (!head) { 6727 /* 6728 * We can race with cpu hotplug code. Do not 6729 * WARN if the cpu just got unplugged. 6730 */ 6731 WARN_ON_ONCE(swhash->online); 6732 return -EINVAL; 6733 } 6734 6735 hlist_add_head_rcu(&event->hlist_entry, head); 6736 perf_event_update_userpage(event); 6737 6738 return 0; 6739 } 6740 6741 static void perf_swevent_del(struct perf_event *event, int flags) 6742 { 6743 hlist_del_rcu(&event->hlist_entry); 6744 } 6745 6746 static void perf_swevent_start(struct perf_event *event, int flags) 6747 { 6748 event->hw.state = 0; 6749 } 6750 6751 static void perf_swevent_stop(struct perf_event *event, int flags) 6752 { 6753 event->hw.state = PERF_HES_STOPPED; 6754 } 6755 6756 /* Deref the hlist from the update side */ 6757 static inline struct swevent_hlist * 6758 swevent_hlist_deref(struct swevent_htable *swhash) 6759 { 6760 return rcu_dereference_protected(swhash->swevent_hlist, 6761 lockdep_is_held(&swhash->hlist_mutex)); 6762 } 6763 6764 static void swevent_hlist_release(struct swevent_htable *swhash) 6765 { 6766 struct swevent_hlist *hlist = swevent_hlist_deref(swhash); 6767 6768 if (!hlist) 6769 return; 6770 6771 RCU_INIT_POINTER(swhash->swevent_hlist, NULL); 6772 kfree_rcu(hlist, rcu_head); 6773 } 6774 6775 static void swevent_hlist_put_cpu(struct perf_event *event, int cpu) 6776 { 6777 struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu); 6778 6779 mutex_lock(&swhash->hlist_mutex); 6780 6781 if (!--swhash->hlist_refcount) 6782 swevent_hlist_release(swhash); 6783 6784 mutex_unlock(&swhash->hlist_mutex); 6785 } 6786 6787 static void swevent_hlist_put(struct perf_event *event) 6788 { 6789 int cpu; 6790 6791 for_each_possible_cpu(cpu) 6792 swevent_hlist_put_cpu(event, cpu); 6793 } 6794 6795 static int swevent_hlist_get_cpu(struct perf_event *event, int cpu) 6796 { 6797 struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu); 6798 int err = 0; 6799 6800 mutex_lock(&swhash->hlist_mutex); 6801 6802 if (!swevent_hlist_deref(swhash) && cpu_online(cpu)) { 6803 struct swevent_hlist *hlist; 6804 6805 hlist = kzalloc(sizeof(*hlist), GFP_KERNEL); 6806 if (!hlist) { 6807 err = -ENOMEM; 6808 goto exit; 6809 } 6810 rcu_assign_pointer(swhash->swevent_hlist, hlist); 6811 } 6812 swhash->hlist_refcount++; 6813 exit: 6814 mutex_unlock(&swhash->hlist_mutex); 6815 6816 return err; 6817 } 6818 6819 static int swevent_hlist_get(struct perf_event *event) 6820 { 6821 int err; 6822 int cpu, failed_cpu; 6823 6824 get_online_cpus(); 6825 for_each_possible_cpu(cpu) { 6826 err = swevent_hlist_get_cpu(event, cpu); 6827 if (err) { 6828 failed_cpu = cpu; 6829 goto fail; 6830 } 6831 } 6832 put_online_cpus(); 6833 6834 return 0; 6835 fail: 6836 for_each_possible_cpu(cpu) { 6837 if (cpu == failed_cpu) 6838 break; 6839 swevent_hlist_put_cpu(event, cpu); 6840 } 6841 6842 put_online_cpus(); 6843 return err; 6844 } 6845 6846 struct static_key perf_swevent_enabled[PERF_COUNT_SW_MAX]; 6847 6848 static void sw_perf_event_destroy(struct perf_event *event) 6849 { 6850 u64 event_id = event->attr.config; 6851 6852 WARN_ON(event->parent); 6853 6854 static_key_slow_dec(&perf_swevent_enabled[event_id]); 6855 swevent_hlist_put(event); 6856 } 6857 6858 static int perf_swevent_init(struct perf_event *event) 6859 { 6860 u64 event_id = event->attr.config; 6861 6862 if (event->attr.type != PERF_TYPE_SOFTWARE) 6863 return -ENOENT; 6864 6865 /* 6866 * no branch sampling for software events 6867 */ 6868 if (has_branch_stack(event)) 6869 return -EOPNOTSUPP; 6870 6871 switch (event_id) { 6872 case PERF_COUNT_SW_CPU_CLOCK: 6873 case PERF_COUNT_SW_TASK_CLOCK: 6874 return -ENOENT; 6875 6876 default: 6877 break; 6878 } 6879 6880 if (event_id >= PERF_COUNT_SW_MAX) 6881 return -ENOENT; 6882 6883 if (!event->parent) { 6884 int err; 6885 6886 err = swevent_hlist_get(event); 6887 if (err) 6888 return err; 6889 6890 static_key_slow_inc(&perf_swevent_enabled[event_id]); 6891 event->destroy = sw_perf_event_destroy; 6892 } 6893 6894 return 0; 6895 } 6896 6897 static struct pmu perf_swevent = { 6898 .task_ctx_nr = perf_sw_context, 6899 6900 .capabilities = PERF_PMU_CAP_NO_NMI, 6901 6902 .event_init = perf_swevent_init, 6903 .add = perf_swevent_add, 6904 .del = perf_swevent_del, 6905 .start = perf_swevent_start, 6906 .stop = perf_swevent_stop, 6907 .read = perf_swevent_read, 6908 }; 6909 6910 #ifdef CONFIG_EVENT_TRACING 6911 6912 static int perf_tp_filter_match(struct perf_event *event, 6913 struct perf_sample_data *data) 6914 { 6915 void *record = data->raw->data; 6916 6917 /* only top level events have filters set */ 6918 if (event->parent) 6919 event = event->parent; 6920 6921 if (likely(!event->filter) || filter_match_preds(event->filter, record)) 6922 return 1; 6923 return 0; 6924 } 6925 6926 static int perf_tp_event_match(struct perf_event *event, 6927 struct perf_sample_data *data, 6928 struct pt_regs *regs) 6929 { 6930 if (event->hw.state & PERF_HES_STOPPED) 6931 return 0; 6932 /* 6933 * All tracepoints are from kernel-space. 6934 */ 6935 if (event->attr.exclude_kernel) 6936 return 0; 6937 6938 if (!perf_tp_filter_match(event, data)) 6939 return 0; 6940 6941 return 1; 6942 } 6943 6944 void perf_tp_event(u64 addr, u64 count, void *record, int entry_size, 6945 struct pt_regs *regs, struct hlist_head *head, int rctx, 6946 struct task_struct *task) 6947 { 6948 struct perf_sample_data data; 6949 struct perf_event *event; 6950 6951 struct perf_raw_record raw = { 6952 .size = entry_size, 6953 .data = record, 6954 }; 6955 6956 perf_sample_data_init(&data, addr, 0); 6957 data.raw = &raw; 6958 6959 hlist_for_each_entry_rcu(event, head, hlist_entry) { 6960 if (perf_tp_event_match(event, &data, regs)) 6961 perf_swevent_event(event, count, &data, regs); 6962 } 6963 6964 /* 6965 * If we got specified a target task, also iterate its context and 6966 * deliver this event there too. 6967 */ 6968 if (task && task != current) { 6969 struct perf_event_context *ctx; 6970 struct trace_entry *entry = record; 6971 6972 rcu_read_lock(); 6973 ctx = rcu_dereference(task->perf_event_ctxp[perf_sw_context]); 6974 if (!ctx) 6975 goto unlock; 6976 6977 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) { 6978 if (event->attr.type != PERF_TYPE_TRACEPOINT) 6979 continue; 6980 if (event->attr.config != entry->type) 6981 continue; 6982 if (perf_tp_event_match(event, &data, regs)) 6983 perf_swevent_event(event, count, &data, regs); 6984 } 6985 unlock: 6986 rcu_read_unlock(); 6987 } 6988 6989 perf_swevent_put_recursion_context(rctx); 6990 } 6991 EXPORT_SYMBOL_GPL(perf_tp_event); 6992 6993 static void tp_perf_event_destroy(struct perf_event *event) 6994 { 6995 perf_trace_destroy(event); 6996 } 6997 6998 static int perf_tp_event_init(struct perf_event *event) 6999 { 7000 int err; 7001 7002 if (event->attr.type != PERF_TYPE_TRACEPOINT) 7003 return -ENOENT; 7004 7005 /* 7006 * no branch sampling for tracepoint events 7007 */ 7008 if (has_branch_stack(event)) 7009 return -EOPNOTSUPP; 7010 7011 err = perf_trace_init(event); 7012 if (err) 7013 return err; 7014 7015 event->destroy = tp_perf_event_destroy; 7016 7017 return 0; 7018 } 7019 7020 static struct pmu perf_tracepoint = { 7021 .task_ctx_nr = perf_sw_context, 7022 7023 .event_init = perf_tp_event_init, 7024 .add = perf_trace_add, 7025 .del = perf_trace_del, 7026 .start = perf_swevent_start, 7027 .stop = perf_swevent_stop, 7028 .read = perf_swevent_read, 7029 }; 7030 7031 static inline void perf_tp_register(void) 7032 { 7033 perf_pmu_register(&perf_tracepoint, "tracepoint", PERF_TYPE_TRACEPOINT); 7034 } 7035 7036 static int perf_event_set_filter(struct perf_event *event, void __user *arg) 7037 { 7038 char *filter_str; 7039 int ret; 7040 7041 if (event->attr.type != PERF_TYPE_TRACEPOINT) 7042 return -EINVAL; 7043 7044 filter_str = strndup_user(arg, PAGE_SIZE); 7045 if (IS_ERR(filter_str)) 7046 return PTR_ERR(filter_str); 7047 7048 ret = ftrace_profile_set_filter(event, event->attr.config, filter_str); 7049 7050 kfree(filter_str); 7051 return ret; 7052 } 7053 7054 static void perf_event_free_filter(struct perf_event *event) 7055 { 7056 ftrace_profile_free_filter(event); 7057 } 7058 7059 static int perf_event_set_bpf_prog(struct perf_event *event, u32 prog_fd) 7060 { 7061 struct bpf_prog *prog; 7062 7063 if (event->attr.type != PERF_TYPE_TRACEPOINT) 7064 return -EINVAL; 7065 7066 if (event->tp_event->prog) 7067 return -EEXIST; 7068 7069 if (!(event->tp_event->flags & TRACE_EVENT_FL_UKPROBE)) 7070 /* bpf programs can only be attached to u/kprobes */ 7071 return -EINVAL; 7072 7073 prog = bpf_prog_get(prog_fd); 7074 if (IS_ERR(prog)) 7075 return PTR_ERR(prog); 7076 7077 if (prog->type != BPF_PROG_TYPE_KPROBE) { 7078 /* valid fd, but invalid bpf program type */ 7079 bpf_prog_put(prog); 7080 return -EINVAL; 7081 } 7082 7083 event->tp_event->prog = prog; 7084 7085 return 0; 7086 } 7087 7088 static void perf_event_free_bpf_prog(struct perf_event *event) 7089 { 7090 struct bpf_prog *prog; 7091 7092 if (!event->tp_event) 7093 return; 7094 7095 prog = event->tp_event->prog; 7096 if (prog) { 7097 event->tp_event->prog = NULL; 7098 bpf_prog_put(prog); 7099 } 7100 } 7101 7102 #else 7103 7104 static inline void perf_tp_register(void) 7105 { 7106 } 7107 7108 static int perf_event_set_filter(struct perf_event *event, void __user *arg) 7109 { 7110 return -ENOENT; 7111 } 7112 7113 static void perf_event_free_filter(struct perf_event *event) 7114 { 7115 } 7116 7117 static int perf_event_set_bpf_prog(struct perf_event *event, u32 prog_fd) 7118 { 7119 return -ENOENT; 7120 } 7121 7122 static void perf_event_free_bpf_prog(struct perf_event *event) 7123 { 7124 } 7125 #endif /* CONFIG_EVENT_TRACING */ 7126 7127 #ifdef CONFIG_HAVE_HW_BREAKPOINT 7128 void perf_bp_event(struct perf_event *bp, void *data) 7129 { 7130 struct perf_sample_data sample; 7131 struct pt_regs *regs = data; 7132 7133 perf_sample_data_init(&sample, bp->attr.bp_addr, 0); 7134 7135 if (!bp->hw.state && !perf_exclude_event(bp, regs)) 7136 perf_swevent_event(bp, 1, &sample, regs); 7137 } 7138 #endif 7139 7140 /* 7141 * hrtimer based swevent callback 7142 */ 7143 7144 static enum hrtimer_restart perf_swevent_hrtimer(struct hrtimer *hrtimer) 7145 { 7146 enum hrtimer_restart ret = HRTIMER_RESTART; 7147 struct perf_sample_data data; 7148 struct pt_regs *regs; 7149 struct perf_event *event; 7150 u64 period; 7151 7152 event = container_of(hrtimer, struct perf_event, hw.hrtimer); 7153 7154 if (event->state != PERF_EVENT_STATE_ACTIVE) 7155 return HRTIMER_NORESTART; 7156 7157 event->pmu->read(event); 7158 7159 perf_sample_data_init(&data, 0, event->hw.last_period); 7160 regs = get_irq_regs(); 7161 7162 if (regs && !perf_exclude_event(event, regs)) { 7163 if (!(event->attr.exclude_idle && is_idle_task(current))) 7164 if (__perf_event_overflow(event, 1, &data, regs)) 7165 ret = HRTIMER_NORESTART; 7166 } 7167 7168 period = max_t(u64, 10000, event->hw.sample_period); 7169 hrtimer_forward_now(hrtimer, ns_to_ktime(period)); 7170 7171 return ret; 7172 } 7173 7174 static void perf_swevent_start_hrtimer(struct perf_event *event) 7175 { 7176 struct hw_perf_event *hwc = &event->hw; 7177 s64 period; 7178 7179 if (!is_sampling_event(event)) 7180 return; 7181 7182 period = local64_read(&hwc->period_left); 7183 if (period) { 7184 if (period < 0) 7185 period = 10000; 7186 7187 local64_set(&hwc->period_left, 0); 7188 } else { 7189 period = max_t(u64, 10000, hwc->sample_period); 7190 } 7191 hrtimer_start(&hwc->hrtimer, ns_to_ktime(period), 7192 HRTIMER_MODE_REL_PINNED); 7193 } 7194 7195 static void perf_swevent_cancel_hrtimer(struct perf_event *event) 7196 { 7197 struct hw_perf_event *hwc = &event->hw; 7198 7199 if (is_sampling_event(event)) { 7200 ktime_t remaining = hrtimer_get_remaining(&hwc->hrtimer); 7201 local64_set(&hwc->period_left, ktime_to_ns(remaining)); 7202 7203 hrtimer_cancel(&hwc->hrtimer); 7204 } 7205 } 7206 7207 static void perf_swevent_init_hrtimer(struct perf_event *event) 7208 { 7209 struct hw_perf_event *hwc = &event->hw; 7210 7211 if (!is_sampling_event(event)) 7212 return; 7213 7214 hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); 7215 hwc->hrtimer.function = perf_swevent_hrtimer; 7216 7217 /* 7218 * Since hrtimers have a fixed rate, we can do a static freq->period 7219 * mapping and avoid the whole period adjust feedback stuff. 7220 */ 7221 if (event->attr.freq) { 7222 long freq = event->attr.sample_freq; 7223 7224 event->attr.sample_period = NSEC_PER_SEC / freq; 7225 hwc->sample_period = event->attr.sample_period; 7226 local64_set(&hwc->period_left, hwc->sample_period); 7227 hwc->last_period = hwc->sample_period; 7228 event->attr.freq = 0; 7229 } 7230 } 7231 7232 /* 7233 * Software event: cpu wall time clock 7234 */ 7235 7236 static void cpu_clock_event_update(struct perf_event *event) 7237 { 7238 s64 prev; 7239 u64 now; 7240 7241 now = local_clock(); 7242 prev = local64_xchg(&event->hw.prev_count, now); 7243 local64_add(now - prev, &event->count); 7244 } 7245 7246 static void cpu_clock_event_start(struct perf_event *event, int flags) 7247 { 7248 local64_set(&event->hw.prev_count, local_clock()); 7249 perf_swevent_start_hrtimer(event); 7250 } 7251 7252 static void cpu_clock_event_stop(struct perf_event *event, int flags) 7253 { 7254 perf_swevent_cancel_hrtimer(event); 7255 cpu_clock_event_update(event); 7256 } 7257 7258 static int cpu_clock_event_add(struct perf_event *event, int flags) 7259 { 7260 if (flags & PERF_EF_START) 7261 cpu_clock_event_start(event, flags); 7262 perf_event_update_userpage(event); 7263 7264 return 0; 7265 } 7266 7267 static void cpu_clock_event_del(struct perf_event *event, int flags) 7268 { 7269 cpu_clock_event_stop(event, flags); 7270 } 7271 7272 static void cpu_clock_event_read(struct perf_event *event) 7273 { 7274 cpu_clock_event_update(event); 7275 } 7276 7277 static int cpu_clock_event_init(struct perf_event *event) 7278 { 7279 if (event->attr.type != PERF_TYPE_SOFTWARE) 7280 return -ENOENT; 7281 7282 if (event->attr.config != PERF_COUNT_SW_CPU_CLOCK) 7283 return -ENOENT; 7284 7285 /* 7286 * no branch sampling for software events 7287 */ 7288 if (has_branch_stack(event)) 7289 return -EOPNOTSUPP; 7290 7291 perf_swevent_init_hrtimer(event); 7292 7293 return 0; 7294 } 7295 7296 static struct pmu perf_cpu_clock = { 7297 .task_ctx_nr = perf_sw_context, 7298 7299 .capabilities = PERF_PMU_CAP_NO_NMI, 7300 7301 .event_init = cpu_clock_event_init, 7302 .add = cpu_clock_event_add, 7303 .del = cpu_clock_event_del, 7304 .start = cpu_clock_event_start, 7305 .stop = cpu_clock_event_stop, 7306 .read = cpu_clock_event_read, 7307 }; 7308 7309 /* 7310 * Software event: task time clock 7311 */ 7312 7313 static void task_clock_event_update(struct perf_event *event, u64 now) 7314 { 7315 u64 prev; 7316 s64 delta; 7317 7318 prev = local64_xchg(&event->hw.prev_count, now); 7319 delta = now - prev; 7320 local64_add(delta, &event->count); 7321 } 7322 7323 static void task_clock_event_start(struct perf_event *event, int flags) 7324 { 7325 local64_set(&event->hw.prev_count, event->ctx->time); 7326 perf_swevent_start_hrtimer(event); 7327 } 7328 7329 static void task_clock_event_stop(struct perf_event *event, int flags) 7330 { 7331 perf_swevent_cancel_hrtimer(event); 7332 task_clock_event_update(event, event->ctx->time); 7333 } 7334 7335 static int task_clock_event_add(struct perf_event *event, int flags) 7336 { 7337 if (flags & PERF_EF_START) 7338 task_clock_event_start(event, flags); 7339 perf_event_update_userpage(event); 7340 7341 return 0; 7342 } 7343 7344 static void task_clock_event_del(struct perf_event *event, int flags) 7345 { 7346 task_clock_event_stop(event, PERF_EF_UPDATE); 7347 } 7348 7349 static void task_clock_event_read(struct perf_event *event) 7350 { 7351 u64 now = perf_clock(); 7352 u64 delta = now - event->ctx->timestamp; 7353 u64 time = event->ctx->time + delta; 7354 7355 task_clock_event_update(event, time); 7356 } 7357 7358 static int task_clock_event_init(struct perf_event *event) 7359 { 7360 if (event->attr.type != PERF_TYPE_SOFTWARE) 7361 return -ENOENT; 7362 7363 if (event->attr.config != PERF_COUNT_SW_TASK_CLOCK) 7364 return -ENOENT; 7365 7366 /* 7367 * no branch sampling for software events 7368 */ 7369 if (has_branch_stack(event)) 7370 return -EOPNOTSUPP; 7371 7372 perf_swevent_init_hrtimer(event); 7373 7374 return 0; 7375 } 7376 7377 static struct pmu perf_task_clock = { 7378 .task_ctx_nr = perf_sw_context, 7379 7380 .capabilities = PERF_PMU_CAP_NO_NMI, 7381 7382 .event_init = task_clock_event_init, 7383 .add = task_clock_event_add, 7384 .del = task_clock_event_del, 7385 .start = task_clock_event_start, 7386 .stop = task_clock_event_stop, 7387 .read = task_clock_event_read, 7388 }; 7389 7390 static void perf_pmu_nop_void(struct pmu *pmu) 7391 { 7392 } 7393 7394 static void perf_pmu_nop_txn(struct pmu *pmu, unsigned int flags) 7395 { 7396 } 7397 7398 static int perf_pmu_nop_int(struct pmu *pmu) 7399 { 7400 return 0; 7401 } 7402 7403 static DEFINE_PER_CPU(unsigned int, nop_txn_flags); 7404 7405 static void perf_pmu_start_txn(struct pmu *pmu, unsigned int flags) 7406 { 7407 __this_cpu_write(nop_txn_flags, flags); 7408 7409 if (flags & ~PERF_PMU_TXN_ADD) 7410 return; 7411 7412 perf_pmu_disable(pmu); 7413 } 7414 7415 static int perf_pmu_commit_txn(struct pmu *pmu) 7416 { 7417 unsigned int flags = __this_cpu_read(nop_txn_flags); 7418 7419 __this_cpu_write(nop_txn_flags, 0); 7420 7421 if (flags & ~PERF_PMU_TXN_ADD) 7422 return 0; 7423 7424 perf_pmu_enable(pmu); 7425 return 0; 7426 } 7427 7428 static void perf_pmu_cancel_txn(struct pmu *pmu) 7429 { 7430 unsigned int flags = __this_cpu_read(nop_txn_flags); 7431 7432 __this_cpu_write(nop_txn_flags, 0); 7433 7434 if (flags & ~PERF_PMU_TXN_ADD) 7435 return; 7436 7437 perf_pmu_enable(pmu); 7438 } 7439 7440 static int perf_event_idx_default(struct perf_event *event) 7441 { 7442 return 0; 7443 } 7444 7445 /* 7446 * Ensures all contexts with the same task_ctx_nr have the same 7447 * pmu_cpu_context too. 7448 */ 7449 static struct perf_cpu_context __percpu *find_pmu_context(int ctxn) 7450 { 7451 struct pmu *pmu; 7452 7453 if (ctxn < 0) 7454 return NULL; 7455 7456 list_for_each_entry(pmu, &pmus, entry) { 7457 if (pmu->task_ctx_nr == ctxn) 7458 return pmu->pmu_cpu_context; 7459 } 7460 7461 return NULL; 7462 } 7463 7464 static void update_pmu_context(struct pmu *pmu, struct pmu *old_pmu) 7465 { 7466 int cpu; 7467 7468 for_each_possible_cpu(cpu) { 7469 struct perf_cpu_context *cpuctx; 7470 7471 cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu); 7472 7473 if (cpuctx->unique_pmu == old_pmu) 7474 cpuctx->unique_pmu = pmu; 7475 } 7476 } 7477 7478 static void free_pmu_context(struct pmu *pmu) 7479 { 7480 struct pmu *i; 7481 7482 mutex_lock(&pmus_lock); 7483 /* 7484 * Like a real lame refcount. 7485 */ 7486 list_for_each_entry(i, &pmus, entry) { 7487 if (i->pmu_cpu_context == pmu->pmu_cpu_context) { 7488 update_pmu_context(i, pmu); 7489 goto out; 7490 } 7491 } 7492 7493 free_percpu(pmu->pmu_cpu_context); 7494 out: 7495 mutex_unlock(&pmus_lock); 7496 } 7497 static struct idr pmu_idr; 7498 7499 static ssize_t 7500 type_show(struct device *dev, struct device_attribute *attr, char *page) 7501 { 7502 struct pmu *pmu = dev_get_drvdata(dev); 7503 7504 return snprintf(page, PAGE_SIZE-1, "%d\n", pmu->type); 7505 } 7506 static DEVICE_ATTR_RO(type); 7507 7508 static ssize_t 7509 perf_event_mux_interval_ms_show(struct device *dev, 7510 struct device_attribute *attr, 7511 char *page) 7512 { 7513 struct pmu *pmu = dev_get_drvdata(dev); 7514 7515 return snprintf(page, PAGE_SIZE-1, "%d\n", pmu->hrtimer_interval_ms); 7516 } 7517 7518 static DEFINE_MUTEX(mux_interval_mutex); 7519 7520 static ssize_t 7521 perf_event_mux_interval_ms_store(struct device *dev, 7522 struct device_attribute *attr, 7523 const char *buf, size_t count) 7524 { 7525 struct pmu *pmu = dev_get_drvdata(dev); 7526 int timer, cpu, ret; 7527 7528 ret = kstrtoint(buf, 0, &timer); 7529 if (ret) 7530 return ret; 7531 7532 if (timer < 1) 7533 return -EINVAL; 7534 7535 /* same value, noting to do */ 7536 if (timer == pmu->hrtimer_interval_ms) 7537 return count; 7538 7539 mutex_lock(&mux_interval_mutex); 7540 pmu->hrtimer_interval_ms = timer; 7541 7542 /* update all cpuctx for this PMU */ 7543 get_online_cpus(); 7544 for_each_online_cpu(cpu) { 7545 struct perf_cpu_context *cpuctx; 7546 cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu); 7547 cpuctx->hrtimer_interval = ns_to_ktime(NSEC_PER_MSEC * timer); 7548 7549 cpu_function_call(cpu, 7550 (remote_function_f)perf_mux_hrtimer_restart, cpuctx); 7551 } 7552 put_online_cpus(); 7553 mutex_unlock(&mux_interval_mutex); 7554 7555 return count; 7556 } 7557 static DEVICE_ATTR_RW(perf_event_mux_interval_ms); 7558 7559 static struct attribute *pmu_dev_attrs[] = { 7560 &dev_attr_type.attr, 7561 &dev_attr_perf_event_mux_interval_ms.attr, 7562 NULL, 7563 }; 7564 ATTRIBUTE_GROUPS(pmu_dev); 7565 7566 static int pmu_bus_running; 7567 static struct bus_type pmu_bus = { 7568 .name = "event_source", 7569 .dev_groups = pmu_dev_groups, 7570 }; 7571 7572 static void pmu_dev_release(struct device *dev) 7573 { 7574 kfree(dev); 7575 } 7576 7577 static int pmu_dev_alloc(struct pmu *pmu) 7578 { 7579 int ret = -ENOMEM; 7580 7581 pmu->dev = kzalloc(sizeof(struct device), GFP_KERNEL); 7582 if (!pmu->dev) 7583 goto out; 7584 7585 pmu->dev->groups = pmu->attr_groups; 7586 device_initialize(pmu->dev); 7587 ret = dev_set_name(pmu->dev, "%s", pmu->name); 7588 if (ret) 7589 goto free_dev; 7590 7591 dev_set_drvdata(pmu->dev, pmu); 7592 pmu->dev->bus = &pmu_bus; 7593 pmu->dev->release = pmu_dev_release; 7594 ret = device_add(pmu->dev); 7595 if (ret) 7596 goto free_dev; 7597 7598 out: 7599 return ret; 7600 7601 free_dev: 7602 put_device(pmu->dev); 7603 goto out; 7604 } 7605 7606 static struct lock_class_key cpuctx_mutex; 7607 static struct lock_class_key cpuctx_lock; 7608 7609 int perf_pmu_register(struct pmu *pmu, const char *name, int type) 7610 { 7611 int cpu, ret; 7612 7613 mutex_lock(&pmus_lock); 7614 ret = -ENOMEM; 7615 pmu->pmu_disable_count = alloc_percpu(int); 7616 if (!pmu->pmu_disable_count) 7617 goto unlock; 7618 7619 pmu->type = -1; 7620 if (!name) 7621 goto skip_type; 7622 pmu->name = name; 7623 7624 if (type < 0) { 7625 type = idr_alloc(&pmu_idr, pmu, PERF_TYPE_MAX, 0, GFP_KERNEL); 7626 if (type < 0) { 7627 ret = type; 7628 goto free_pdc; 7629 } 7630 } 7631 pmu->type = type; 7632 7633 if (pmu_bus_running) { 7634 ret = pmu_dev_alloc(pmu); 7635 if (ret) 7636 goto free_idr; 7637 } 7638 7639 skip_type: 7640 pmu->pmu_cpu_context = find_pmu_context(pmu->task_ctx_nr); 7641 if (pmu->pmu_cpu_context) 7642 goto got_cpu_context; 7643 7644 ret = -ENOMEM; 7645 pmu->pmu_cpu_context = alloc_percpu(struct perf_cpu_context); 7646 if (!pmu->pmu_cpu_context) 7647 goto free_dev; 7648 7649 for_each_possible_cpu(cpu) { 7650 struct perf_cpu_context *cpuctx; 7651 7652 cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu); 7653 __perf_event_init_context(&cpuctx->ctx); 7654 lockdep_set_class(&cpuctx->ctx.mutex, &cpuctx_mutex); 7655 lockdep_set_class(&cpuctx->ctx.lock, &cpuctx_lock); 7656 cpuctx->ctx.pmu = pmu; 7657 7658 __perf_mux_hrtimer_init(cpuctx, cpu); 7659 7660 cpuctx->unique_pmu = pmu; 7661 } 7662 7663 got_cpu_context: 7664 if (!pmu->start_txn) { 7665 if (pmu->pmu_enable) { 7666 /* 7667 * If we have pmu_enable/pmu_disable calls, install 7668 * transaction stubs that use that to try and batch 7669 * hardware accesses. 7670 */ 7671 pmu->start_txn = perf_pmu_start_txn; 7672 pmu->commit_txn = perf_pmu_commit_txn; 7673 pmu->cancel_txn = perf_pmu_cancel_txn; 7674 } else { 7675 pmu->start_txn = perf_pmu_nop_txn; 7676 pmu->commit_txn = perf_pmu_nop_int; 7677 pmu->cancel_txn = perf_pmu_nop_void; 7678 } 7679 } 7680 7681 if (!pmu->pmu_enable) { 7682 pmu->pmu_enable = perf_pmu_nop_void; 7683 pmu->pmu_disable = perf_pmu_nop_void; 7684 } 7685 7686 if (!pmu->event_idx) 7687 pmu->event_idx = perf_event_idx_default; 7688 7689 list_add_rcu(&pmu->entry, &pmus); 7690 atomic_set(&pmu->exclusive_cnt, 0); 7691 ret = 0; 7692 unlock: 7693 mutex_unlock(&pmus_lock); 7694 7695 return ret; 7696 7697 free_dev: 7698 device_del(pmu->dev); 7699 put_device(pmu->dev); 7700 7701 free_idr: 7702 if (pmu->type >= PERF_TYPE_MAX) 7703 idr_remove(&pmu_idr, pmu->type); 7704 7705 free_pdc: 7706 free_percpu(pmu->pmu_disable_count); 7707 goto unlock; 7708 } 7709 EXPORT_SYMBOL_GPL(perf_pmu_register); 7710 7711 void perf_pmu_unregister(struct pmu *pmu) 7712 { 7713 mutex_lock(&pmus_lock); 7714 list_del_rcu(&pmu->entry); 7715 mutex_unlock(&pmus_lock); 7716 7717 /* 7718 * We dereference the pmu list under both SRCU and regular RCU, so 7719 * synchronize against both of those. 7720 */ 7721 synchronize_srcu(&pmus_srcu); 7722 synchronize_rcu(); 7723 7724 free_percpu(pmu->pmu_disable_count); 7725 if (pmu->type >= PERF_TYPE_MAX) 7726 idr_remove(&pmu_idr, pmu->type); 7727 device_del(pmu->dev); 7728 put_device(pmu->dev); 7729 free_pmu_context(pmu); 7730 } 7731 EXPORT_SYMBOL_GPL(perf_pmu_unregister); 7732 7733 static int perf_try_init_event(struct pmu *pmu, struct perf_event *event) 7734 { 7735 struct perf_event_context *ctx = NULL; 7736 int ret; 7737 7738 if (!try_module_get(pmu->module)) 7739 return -ENODEV; 7740 7741 if (event->group_leader != event) { 7742 /* 7743 * This ctx->mutex can nest when we're called through 7744 * inheritance. See the perf_event_ctx_lock_nested() comment. 7745 */ 7746 ctx = perf_event_ctx_lock_nested(event->group_leader, 7747 SINGLE_DEPTH_NESTING); 7748 BUG_ON(!ctx); 7749 } 7750 7751 event->pmu = pmu; 7752 ret = pmu->event_init(event); 7753 7754 if (ctx) 7755 perf_event_ctx_unlock(event->group_leader, ctx); 7756 7757 if (ret) 7758 module_put(pmu->module); 7759 7760 return ret; 7761 } 7762 7763 static struct pmu *perf_init_event(struct perf_event *event) 7764 { 7765 struct pmu *pmu = NULL; 7766 int idx; 7767 int ret; 7768 7769 idx = srcu_read_lock(&pmus_srcu); 7770 7771 rcu_read_lock(); 7772 pmu = idr_find(&pmu_idr, event->attr.type); 7773 rcu_read_unlock(); 7774 if (pmu) { 7775 ret = perf_try_init_event(pmu, event); 7776 if (ret) 7777 pmu = ERR_PTR(ret); 7778 goto unlock; 7779 } 7780 7781 list_for_each_entry_rcu(pmu, &pmus, entry) { 7782 ret = perf_try_init_event(pmu, event); 7783 if (!ret) 7784 goto unlock; 7785 7786 if (ret != -ENOENT) { 7787 pmu = ERR_PTR(ret); 7788 goto unlock; 7789 } 7790 } 7791 pmu = ERR_PTR(-ENOENT); 7792 unlock: 7793 srcu_read_unlock(&pmus_srcu, idx); 7794 7795 return pmu; 7796 } 7797 7798 static void account_event_cpu(struct perf_event *event, int cpu) 7799 { 7800 if (event->parent) 7801 return; 7802 7803 if (is_cgroup_event(event)) 7804 atomic_inc(&per_cpu(perf_cgroup_events, cpu)); 7805 } 7806 7807 static void account_event(struct perf_event *event) 7808 { 7809 if (event->parent) 7810 return; 7811 7812 if (event->attach_state & PERF_ATTACH_TASK) 7813 static_key_slow_inc(&perf_sched_events.key); 7814 if (event->attr.mmap || event->attr.mmap_data) 7815 atomic_inc(&nr_mmap_events); 7816 if (event->attr.comm) 7817 atomic_inc(&nr_comm_events); 7818 if (event->attr.task) 7819 atomic_inc(&nr_task_events); 7820 if (event->attr.freq) { 7821 if (atomic_inc_return(&nr_freq_events) == 1) 7822 tick_nohz_full_kick_all(); 7823 } 7824 if (event->attr.context_switch) { 7825 atomic_inc(&nr_switch_events); 7826 static_key_slow_inc(&perf_sched_events.key); 7827 } 7828 if (has_branch_stack(event)) 7829 static_key_slow_inc(&perf_sched_events.key); 7830 if (is_cgroup_event(event)) 7831 static_key_slow_inc(&perf_sched_events.key); 7832 7833 account_event_cpu(event, event->cpu); 7834 } 7835 7836 /* 7837 * Allocate and initialize a event structure 7838 */ 7839 static struct perf_event * 7840 perf_event_alloc(struct perf_event_attr *attr, int cpu, 7841 struct task_struct *task, 7842 struct perf_event *group_leader, 7843 struct perf_event *parent_event, 7844 perf_overflow_handler_t overflow_handler, 7845 void *context, int cgroup_fd) 7846 { 7847 struct pmu *pmu; 7848 struct perf_event *event; 7849 struct hw_perf_event *hwc; 7850 long err = -EINVAL; 7851 7852 if ((unsigned)cpu >= nr_cpu_ids) { 7853 if (!task || cpu != -1) 7854 return ERR_PTR(-EINVAL); 7855 } 7856 7857 event = kzalloc(sizeof(*event), GFP_KERNEL); 7858 if (!event) 7859 return ERR_PTR(-ENOMEM); 7860 7861 /* 7862 * Single events are their own group leaders, with an 7863 * empty sibling list: 7864 */ 7865 if (!group_leader) 7866 group_leader = event; 7867 7868 mutex_init(&event->child_mutex); 7869 INIT_LIST_HEAD(&event->child_list); 7870 7871 INIT_LIST_HEAD(&event->group_entry); 7872 INIT_LIST_HEAD(&event->event_entry); 7873 INIT_LIST_HEAD(&event->sibling_list); 7874 INIT_LIST_HEAD(&event->rb_entry); 7875 INIT_LIST_HEAD(&event->active_entry); 7876 INIT_HLIST_NODE(&event->hlist_entry); 7877 7878 7879 init_waitqueue_head(&event->waitq); 7880 init_irq_work(&event->pending, perf_pending_event); 7881 7882 mutex_init(&event->mmap_mutex); 7883 7884 atomic_long_set(&event->refcount, 1); 7885 event->cpu = cpu; 7886 event->attr = *attr; 7887 event->group_leader = group_leader; 7888 event->pmu = NULL; 7889 event->oncpu = -1; 7890 7891 event->parent = parent_event; 7892 7893 event->ns = get_pid_ns(task_active_pid_ns(current)); 7894 event->id = atomic64_inc_return(&perf_event_id); 7895 7896 event->state = PERF_EVENT_STATE_INACTIVE; 7897 7898 if (task) { 7899 event->attach_state = PERF_ATTACH_TASK; 7900 /* 7901 * XXX pmu::event_init needs to know what task to account to 7902 * and we cannot use the ctx information because we need the 7903 * pmu before we get a ctx. 7904 */ 7905 event->hw.target = task; 7906 } 7907 7908 event->clock = &local_clock; 7909 if (parent_event) 7910 event->clock = parent_event->clock; 7911 7912 if (!overflow_handler && parent_event) { 7913 overflow_handler = parent_event->overflow_handler; 7914 context = parent_event->overflow_handler_context; 7915 } 7916 7917 event->overflow_handler = overflow_handler; 7918 event->overflow_handler_context = context; 7919 7920 perf_event__state_init(event); 7921 7922 pmu = NULL; 7923 7924 hwc = &event->hw; 7925 hwc->sample_period = attr->sample_period; 7926 if (attr->freq && attr->sample_freq) 7927 hwc->sample_period = 1; 7928 hwc->last_period = hwc->sample_period; 7929 7930 local64_set(&hwc->period_left, hwc->sample_period); 7931 7932 /* 7933 * we currently do not support PERF_FORMAT_GROUP on inherited events 7934 */ 7935 if (attr->inherit && (attr->read_format & PERF_FORMAT_GROUP)) 7936 goto err_ns; 7937 7938 if (!has_branch_stack(event)) 7939 event->attr.branch_sample_type = 0; 7940 7941 if (cgroup_fd != -1) { 7942 err = perf_cgroup_connect(cgroup_fd, event, attr, group_leader); 7943 if (err) 7944 goto err_ns; 7945 } 7946 7947 pmu = perf_init_event(event); 7948 if (!pmu) 7949 goto err_ns; 7950 else if (IS_ERR(pmu)) { 7951 err = PTR_ERR(pmu); 7952 goto err_ns; 7953 } 7954 7955 err = exclusive_event_init(event); 7956 if (err) 7957 goto err_pmu; 7958 7959 if (!event->parent) { 7960 if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN) { 7961 err = get_callchain_buffers(); 7962 if (err) 7963 goto err_per_task; 7964 } 7965 } 7966 7967 return event; 7968 7969 err_per_task: 7970 exclusive_event_destroy(event); 7971 7972 err_pmu: 7973 if (event->destroy) 7974 event->destroy(event); 7975 module_put(pmu->module); 7976 err_ns: 7977 if (is_cgroup_event(event)) 7978 perf_detach_cgroup(event); 7979 if (event->ns) 7980 put_pid_ns(event->ns); 7981 kfree(event); 7982 7983 return ERR_PTR(err); 7984 } 7985 7986 static int perf_copy_attr(struct perf_event_attr __user *uattr, 7987 struct perf_event_attr *attr) 7988 { 7989 u32 size; 7990 int ret; 7991 7992 if (!access_ok(VERIFY_WRITE, uattr, PERF_ATTR_SIZE_VER0)) 7993 return -EFAULT; 7994 7995 /* 7996 * zero the full structure, so that a short copy will be nice. 7997 */ 7998 memset(attr, 0, sizeof(*attr)); 7999 8000 ret = get_user(size, &uattr->size); 8001 if (ret) 8002 return ret; 8003 8004 if (size > PAGE_SIZE) /* silly large */ 8005 goto err_size; 8006 8007 if (!size) /* abi compat */ 8008 size = PERF_ATTR_SIZE_VER0; 8009 8010 if (size < PERF_ATTR_SIZE_VER0) 8011 goto err_size; 8012 8013 /* 8014 * If we're handed a bigger struct than we know of, 8015 * ensure all the unknown bits are 0 - i.e. new 8016 * user-space does not rely on any kernel feature 8017 * extensions we dont know about yet. 8018 */ 8019 if (size > sizeof(*attr)) { 8020 unsigned char __user *addr; 8021 unsigned char __user *end; 8022 unsigned char val; 8023 8024 addr = (void __user *)uattr + sizeof(*attr); 8025 end = (void __user *)uattr + size; 8026 8027 for (; addr < end; addr++) { 8028 ret = get_user(val, addr); 8029 if (ret) 8030 return ret; 8031 if (val) 8032 goto err_size; 8033 } 8034 size = sizeof(*attr); 8035 } 8036 8037 ret = copy_from_user(attr, uattr, size); 8038 if (ret) 8039 return -EFAULT; 8040 8041 if (attr->__reserved_1) 8042 return -EINVAL; 8043 8044 if (attr->sample_type & ~(PERF_SAMPLE_MAX-1)) 8045 return -EINVAL; 8046 8047 if (attr->read_format & ~(PERF_FORMAT_MAX-1)) 8048 return -EINVAL; 8049 8050 if (attr->sample_type & PERF_SAMPLE_BRANCH_STACK) { 8051 u64 mask = attr->branch_sample_type; 8052 8053 /* only using defined bits */ 8054 if (mask & ~(PERF_SAMPLE_BRANCH_MAX-1)) 8055 return -EINVAL; 8056 8057 /* at least one branch bit must be set */ 8058 if (!(mask & ~PERF_SAMPLE_BRANCH_PLM_ALL)) 8059 return -EINVAL; 8060 8061 /* propagate priv level, when not set for branch */ 8062 if (!(mask & PERF_SAMPLE_BRANCH_PLM_ALL)) { 8063 8064 /* exclude_kernel checked on syscall entry */ 8065 if (!attr->exclude_kernel) 8066 mask |= PERF_SAMPLE_BRANCH_KERNEL; 8067 8068 if (!attr->exclude_user) 8069 mask |= PERF_SAMPLE_BRANCH_USER; 8070 8071 if (!attr->exclude_hv) 8072 mask |= PERF_SAMPLE_BRANCH_HV; 8073 /* 8074 * adjust user setting (for HW filter setup) 8075 */ 8076 attr->branch_sample_type = mask; 8077 } 8078 /* privileged levels capture (kernel, hv): check permissions */ 8079 if ((mask & PERF_SAMPLE_BRANCH_PERM_PLM) 8080 && perf_paranoid_kernel() && !capable(CAP_SYS_ADMIN)) 8081 return -EACCES; 8082 } 8083 8084 if (attr->sample_type & PERF_SAMPLE_REGS_USER) { 8085 ret = perf_reg_validate(attr->sample_regs_user); 8086 if (ret) 8087 return ret; 8088 } 8089 8090 if (attr->sample_type & PERF_SAMPLE_STACK_USER) { 8091 if (!arch_perf_have_user_stack_dump()) 8092 return -ENOSYS; 8093 8094 /* 8095 * We have __u32 type for the size, but so far 8096 * we can only use __u16 as maximum due to the 8097 * __u16 sample size limit. 8098 */ 8099 if (attr->sample_stack_user >= USHRT_MAX) 8100 ret = -EINVAL; 8101 else if (!IS_ALIGNED(attr->sample_stack_user, sizeof(u64))) 8102 ret = -EINVAL; 8103 } 8104 8105 if (attr->sample_type & PERF_SAMPLE_REGS_INTR) 8106 ret = perf_reg_validate(attr->sample_regs_intr); 8107 out: 8108 return ret; 8109 8110 err_size: 8111 put_user(sizeof(*attr), &uattr->size); 8112 ret = -E2BIG; 8113 goto out; 8114 } 8115 8116 static int 8117 perf_event_set_output(struct perf_event *event, struct perf_event *output_event) 8118 { 8119 struct ring_buffer *rb = NULL; 8120 int ret = -EINVAL; 8121 8122 if (!output_event) 8123 goto set; 8124 8125 /* don't allow circular references */ 8126 if (event == output_event) 8127 goto out; 8128 8129 /* 8130 * Don't allow cross-cpu buffers 8131 */ 8132 if (output_event->cpu != event->cpu) 8133 goto out; 8134 8135 /* 8136 * If its not a per-cpu rb, it must be the same task. 8137 */ 8138 if (output_event->cpu == -1 && output_event->ctx != event->ctx) 8139 goto out; 8140 8141 /* 8142 * Mixing clocks in the same buffer is trouble you don't need. 8143 */ 8144 if (output_event->clock != event->clock) 8145 goto out; 8146 8147 /* 8148 * If both events generate aux data, they must be on the same PMU 8149 */ 8150 if (has_aux(event) && has_aux(output_event) && 8151 event->pmu != output_event->pmu) 8152 goto out; 8153 8154 set: 8155 mutex_lock(&event->mmap_mutex); 8156 /* Can't redirect output if we've got an active mmap() */ 8157 if (atomic_read(&event->mmap_count)) 8158 goto unlock; 8159 8160 if (output_event) { 8161 /* get the rb we want to redirect to */ 8162 rb = ring_buffer_get(output_event); 8163 if (!rb) 8164 goto unlock; 8165 } 8166 8167 ring_buffer_attach(event, rb); 8168 8169 ret = 0; 8170 unlock: 8171 mutex_unlock(&event->mmap_mutex); 8172 8173 out: 8174 return ret; 8175 } 8176 8177 static void mutex_lock_double(struct mutex *a, struct mutex *b) 8178 { 8179 if (b < a) 8180 swap(a, b); 8181 8182 mutex_lock(a); 8183 mutex_lock_nested(b, SINGLE_DEPTH_NESTING); 8184 } 8185 8186 static int perf_event_set_clock(struct perf_event *event, clockid_t clk_id) 8187 { 8188 bool nmi_safe = false; 8189 8190 switch (clk_id) { 8191 case CLOCK_MONOTONIC: 8192 event->clock = &ktime_get_mono_fast_ns; 8193 nmi_safe = true; 8194 break; 8195 8196 case CLOCK_MONOTONIC_RAW: 8197 event->clock = &ktime_get_raw_fast_ns; 8198 nmi_safe = true; 8199 break; 8200 8201 case CLOCK_REALTIME: 8202 event->clock = &ktime_get_real_ns; 8203 break; 8204 8205 case CLOCK_BOOTTIME: 8206 event->clock = &ktime_get_boot_ns; 8207 break; 8208 8209 case CLOCK_TAI: 8210 event->clock = &ktime_get_tai_ns; 8211 break; 8212 8213 default: 8214 return -EINVAL; 8215 } 8216 8217 if (!nmi_safe && !(event->pmu->capabilities & PERF_PMU_CAP_NO_NMI)) 8218 return -EINVAL; 8219 8220 return 0; 8221 } 8222 8223 /** 8224 * sys_perf_event_open - open a performance event, associate it to a task/cpu 8225 * 8226 * @attr_uptr: event_id type attributes for monitoring/sampling 8227 * @pid: target pid 8228 * @cpu: target cpu 8229 * @group_fd: group leader event fd 8230 */ 8231 SYSCALL_DEFINE5(perf_event_open, 8232 struct perf_event_attr __user *, attr_uptr, 8233 pid_t, pid, int, cpu, int, group_fd, unsigned long, flags) 8234 { 8235 struct perf_event *group_leader = NULL, *output_event = NULL; 8236 struct perf_event *event, *sibling; 8237 struct perf_event_attr attr; 8238 struct perf_event_context *ctx, *uninitialized_var(gctx); 8239 struct file *event_file = NULL; 8240 struct fd group = {NULL, 0}; 8241 struct task_struct *task = NULL; 8242 struct pmu *pmu; 8243 int event_fd; 8244 int move_group = 0; 8245 int err; 8246 int f_flags = O_RDWR; 8247 int cgroup_fd = -1; 8248 8249 /* for future expandability... */ 8250 if (flags & ~PERF_FLAG_ALL) 8251 return -EINVAL; 8252 8253 err = perf_copy_attr(attr_uptr, &attr); 8254 if (err) 8255 return err; 8256 8257 if (!attr.exclude_kernel) { 8258 if (perf_paranoid_kernel() && !capable(CAP_SYS_ADMIN)) 8259 return -EACCES; 8260 } 8261 8262 if (attr.freq) { 8263 if (attr.sample_freq > sysctl_perf_event_sample_rate) 8264 return -EINVAL; 8265 } else { 8266 if (attr.sample_period & (1ULL << 63)) 8267 return -EINVAL; 8268 } 8269 8270 /* 8271 * In cgroup mode, the pid argument is used to pass the fd 8272 * opened to the cgroup directory in cgroupfs. The cpu argument 8273 * designates the cpu on which to monitor threads from that 8274 * cgroup. 8275 */ 8276 if ((flags & PERF_FLAG_PID_CGROUP) && (pid == -1 || cpu == -1)) 8277 return -EINVAL; 8278 8279 if (flags & PERF_FLAG_FD_CLOEXEC) 8280 f_flags |= O_CLOEXEC; 8281 8282 event_fd = get_unused_fd_flags(f_flags); 8283 if (event_fd < 0) 8284 return event_fd; 8285 8286 if (group_fd != -1) { 8287 err = perf_fget_light(group_fd, &group); 8288 if (err) 8289 goto err_fd; 8290 group_leader = group.file->private_data; 8291 if (flags & PERF_FLAG_FD_OUTPUT) 8292 output_event = group_leader; 8293 if (flags & PERF_FLAG_FD_NO_GROUP) 8294 group_leader = NULL; 8295 } 8296 8297 if (pid != -1 && !(flags & PERF_FLAG_PID_CGROUP)) { 8298 task = find_lively_task_by_vpid(pid); 8299 if (IS_ERR(task)) { 8300 err = PTR_ERR(task); 8301 goto err_group_fd; 8302 } 8303 } 8304 8305 if (task && group_leader && 8306 group_leader->attr.inherit != attr.inherit) { 8307 err = -EINVAL; 8308 goto err_task; 8309 } 8310 8311 get_online_cpus(); 8312 8313 if (flags & PERF_FLAG_PID_CGROUP) 8314 cgroup_fd = pid; 8315 8316 event = perf_event_alloc(&attr, cpu, task, group_leader, NULL, 8317 NULL, NULL, cgroup_fd); 8318 if (IS_ERR(event)) { 8319 err = PTR_ERR(event); 8320 goto err_cpus; 8321 } 8322 8323 if (is_sampling_event(event)) { 8324 if (event->pmu->capabilities & PERF_PMU_CAP_NO_INTERRUPT) { 8325 err = -ENOTSUPP; 8326 goto err_alloc; 8327 } 8328 } 8329 8330 account_event(event); 8331 8332 /* 8333 * Special case software events and allow them to be part of 8334 * any hardware group. 8335 */ 8336 pmu = event->pmu; 8337 8338 if (attr.use_clockid) { 8339 err = perf_event_set_clock(event, attr.clockid); 8340 if (err) 8341 goto err_alloc; 8342 } 8343 8344 if (group_leader && 8345 (is_software_event(event) != is_software_event(group_leader))) { 8346 if (is_software_event(event)) { 8347 /* 8348 * If event and group_leader are not both a software 8349 * event, and event is, then group leader is not. 8350 * 8351 * Allow the addition of software events to !software 8352 * groups, this is safe because software events never 8353 * fail to schedule. 8354 */ 8355 pmu = group_leader->pmu; 8356 } else if (is_software_event(group_leader) && 8357 (group_leader->group_flags & PERF_GROUP_SOFTWARE)) { 8358 /* 8359 * In case the group is a pure software group, and we 8360 * try to add a hardware event, move the whole group to 8361 * the hardware context. 8362 */ 8363 move_group = 1; 8364 } 8365 } 8366 8367 /* 8368 * Get the target context (task or percpu): 8369 */ 8370 ctx = find_get_context(pmu, task, event); 8371 if (IS_ERR(ctx)) { 8372 err = PTR_ERR(ctx); 8373 goto err_alloc; 8374 } 8375 8376 if ((pmu->capabilities & PERF_PMU_CAP_EXCLUSIVE) && group_leader) { 8377 err = -EBUSY; 8378 goto err_context; 8379 } 8380 8381 if (task) { 8382 put_task_struct(task); 8383 task = NULL; 8384 } 8385 8386 /* 8387 * Look up the group leader (we will attach this event to it): 8388 */ 8389 if (group_leader) { 8390 err = -EINVAL; 8391 8392 /* 8393 * Do not allow a recursive hierarchy (this new sibling 8394 * becoming part of another group-sibling): 8395 */ 8396 if (group_leader->group_leader != group_leader) 8397 goto err_context; 8398 8399 /* All events in a group should have the same clock */ 8400 if (group_leader->clock != event->clock) 8401 goto err_context; 8402 8403 /* 8404 * Do not allow to attach to a group in a different 8405 * task or CPU context: 8406 */ 8407 if (move_group) { 8408 /* 8409 * Make sure we're both on the same task, or both 8410 * per-cpu events. 8411 */ 8412 if (group_leader->ctx->task != ctx->task) 8413 goto err_context; 8414 8415 /* 8416 * Make sure we're both events for the same CPU; 8417 * grouping events for different CPUs is broken; since 8418 * you can never concurrently schedule them anyhow. 8419 */ 8420 if (group_leader->cpu != event->cpu) 8421 goto err_context; 8422 } else { 8423 if (group_leader->ctx != ctx) 8424 goto err_context; 8425 } 8426 8427 /* 8428 * Only a group leader can be exclusive or pinned 8429 */ 8430 if (attr.exclusive || attr.pinned) 8431 goto err_context; 8432 } 8433 8434 if (output_event) { 8435 err = perf_event_set_output(event, output_event); 8436 if (err) 8437 goto err_context; 8438 } 8439 8440 event_file = anon_inode_getfile("[perf_event]", &perf_fops, event, 8441 f_flags); 8442 if (IS_ERR(event_file)) { 8443 err = PTR_ERR(event_file); 8444 goto err_context; 8445 } 8446 8447 if (move_group) { 8448 gctx = group_leader->ctx; 8449 mutex_lock_double(&gctx->mutex, &ctx->mutex); 8450 } else { 8451 mutex_lock(&ctx->mutex); 8452 } 8453 8454 if (!perf_event_validate_size(event)) { 8455 err = -E2BIG; 8456 goto err_locked; 8457 } 8458 8459 /* 8460 * Must be under the same ctx::mutex as perf_install_in_context(), 8461 * because we need to serialize with concurrent event creation. 8462 */ 8463 if (!exclusive_event_installable(event, ctx)) { 8464 /* exclusive and group stuff are assumed mutually exclusive */ 8465 WARN_ON_ONCE(move_group); 8466 8467 err = -EBUSY; 8468 goto err_locked; 8469 } 8470 8471 WARN_ON_ONCE(ctx->parent_ctx); 8472 8473 if (move_group) { 8474 /* 8475 * See perf_event_ctx_lock() for comments on the details 8476 * of swizzling perf_event::ctx. 8477 */ 8478 perf_remove_from_context(group_leader, false); 8479 8480 list_for_each_entry(sibling, &group_leader->sibling_list, 8481 group_entry) { 8482 perf_remove_from_context(sibling, false); 8483 put_ctx(gctx); 8484 } 8485 8486 /* 8487 * Wait for everybody to stop referencing the events through 8488 * the old lists, before installing it on new lists. 8489 */ 8490 synchronize_rcu(); 8491 8492 /* 8493 * Install the group siblings before the group leader. 8494 * 8495 * Because a group leader will try and install the entire group 8496 * (through the sibling list, which is still in-tact), we can 8497 * end up with siblings installed in the wrong context. 8498 * 8499 * By installing siblings first we NO-OP because they're not 8500 * reachable through the group lists. 8501 */ 8502 list_for_each_entry(sibling, &group_leader->sibling_list, 8503 group_entry) { 8504 perf_event__state_init(sibling); 8505 perf_install_in_context(ctx, sibling, sibling->cpu); 8506 get_ctx(ctx); 8507 } 8508 8509 /* 8510 * Removing from the context ends up with disabled 8511 * event. What we want here is event in the initial 8512 * startup state, ready to be add into new context. 8513 */ 8514 perf_event__state_init(group_leader); 8515 perf_install_in_context(ctx, group_leader, group_leader->cpu); 8516 get_ctx(ctx); 8517 8518 /* 8519 * Now that all events are installed in @ctx, nothing 8520 * references @gctx anymore, so drop the last reference we have 8521 * on it. 8522 */ 8523 put_ctx(gctx); 8524 } 8525 8526 /* 8527 * Precalculate sample_data sizes; do while holding ctx::mutex such 8528 * that we're serialized against further additions and before 8529 * perf_install_in_context() which is the point the event is active and 8530 * can use these values. 8531 */ 8532 perf_event__header_size(event); 8533 perf_event__id_header_size(event); 8534 8535 perf_install_in_context(ctx, event, event->cpu); 8536 perf_unpin_context(ctx); 8537 8538 if (move_group) 8539 mutex_unlock(&gctx->mutex); 8540 mutex_unlock(&ctx->mutex); 8541 8542 put_online_cpus(); 8543 8544 event->owner = current; 8545 8546 mutex_lock(¤t->perf_event_mutex); 8547 list_add_tail(&event->owner_entry, ¤t->perf_event_list); 8548 mutex_unlock(¤t->perf_event_mutex); 8549 8550 /* 8551 * Drop the reference on the group_event after placing the 8552 * new event on the sibling_list. This ensures destruction 8553 * of the group leader will find the pointer to itself in 8554 * perf_group_detach(). 8555 */ 8556 fdput(group); 8557 fd_install(event_fd, event_file); 8558 return event_fd; 8559 8560 err_locked: 8561 if (move_group) 8562 mutex_unlock(&gctx->mutex); 8563 mutex_unlock(&ctx->mutex); 8564 /* err_file: */ 8565 fput(event_file); 8566 err_context: 8567 perf_unpin_context(ctx); 8568 put_ctx(ctx); 8569 err_alloc: 8570 free_event(event); 8571 err_cpus: 8572 put_online_cpus(); 8573 err_task: 8574 if (task) 8575 put_task_struct(task); 8576 err_group_fd: 8577 fdput(group); 8578 err_fd: 8579 put_unused_fd(event_fd); 8580 return err; 8581 } 8582 8583 /** 8584 * perf_event_create_kernel_counter 8585 * 8586 * @attr: attributes of the counter to create 8587 * @cpu: cpu in which the counter is bound 8588 * @task: task to profile (NULL for percpu) 8589 */ 8590 struct perf_event * 8591 perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu, 8592 struct task_struct *task, 8593 perf_overflow_handler_t overflow_handler, 8594 void *context) 8595 { 8596 struct perf_event_context *ctx; 8597 struct perf_event *event; 8598 int err; 8599 8600 /* 8601 * Get the target context (task or percpu): 8602 */ 8603 8604 event = perf_event_alloc(attr, cpu, task, NULL, NULL, 8605 overflow_handler, context, -1); 8606 if (IS_ERR(event)) { 8607 err = PTR_ERR(event); 8608 goto err; 8609 } 8610 8611 /* Mark owner so we could distinguish it from user events. */ 8612 event->owner = EVENT_OWNER_KERNEL; 8613 8614 account_event(event); 8615 8616 ctx = find_get_context(event->pmu, task, event); 8617 if (IS_ERR(ctx)) { 8618 err = PTR_ERR(ctx); 8619 goto err_free; 8620 } 8621 8622 WARN_ON_ONCE(ctx->parent_ctx); 8623 mutex_lock(&ctx->mutex); 8624 if (!exclusive_event_installable(event, ctx)) { 8625 mutex_unlock(&ctx->mutex); 8626 perf_unpin_context(ctx); 8627 put_ctx(ctx); 8628 err = -EBUSY; 8629 goto err_free; 8630 } 8631 8632 perf_install_in_context(ctx, event, cpu); 8633 perf_unpin_context(ctx); 8634 mutex_unlock(&ctx->mutex); 8635 8636 return event; 8637 8638 err_free: 8639 free_event(event); 8640 err: 8641 return ERR_PTR(err); 8642 } 8643 EXPORT_SYMBOL_GPL(perf_event_create_kernel_counter); 8644 8645 void perf_pmu_migrate_context(struct pmu *pmu, int src_cpu, int dst_cpu) 8646 { 8647 struct perf_event_context *src_ctx; 8648 struct perf_event_context *dst_ctx; 8649 struct perf_event *event, *tmp; 8650 LIST_HEAD(events); 8651 8652 src_ctx = &per_cpu_ptr(pmu->pmu_cpu_context, src_cpu)->ctx; 8653 dst_ctx = &per_cpu_ptr(pmu->pmu_cpu_context, dst_cpu)->ctx; 8654 8655 /* 8656 * See perf_event_ctx_lock() for comments on the details 8657 * of swizzling perf_event::ctx. 8658 */ 8659 mutex_lock_double(&src_ctx->mutex, &dst_ctx->mutex); 8660 list_for_each_entry_safe(event, tmp, &src_ctx->event_list, 8661 event_entry) { 8662 perf_remove_from_context(event, false); 8663 unaccount_event_cpu(event, src_cpu); 8664 put_ctx(src_ctx); 8665 list_add(&event->migrate_entry, &events); 8666 } 8667 8668 /* 8669 * Wait for the events to quiesce before re-instating them. 8670 */ 8671 synchronize_rcu(); 8672 8673 /* 8674 * Re-instate events in 2 passes. 8675 * 8676 * Skip over group leaders and only install siblings on this first 8677 * pass, siblings will not get enabled without a leader, however a 8678 * leader will enable its siblings, even if those are still on the old 8679 * context. 8680 */ 8681 list_for_each_entry_safe(event, tmp, &events, migrate_entry) { 8682 if (event->group_leader == event) 8683 continue; 8684 8685 list_del(&event->migrate_entry); 8686 if (event->state >= PERF_EVENT_STATE_OFF) 8687 event->state = PERF_EVENT_STATE_INACTIVE; 8688 account_event_cpu(event, dst_cpu); 8689 perf_install_in_context(dst_ctx, event, dst_cpu); 8690 get_ctx(dst_ctx); 8691 } 8692 8693 /* 8694 * Once all the siblings are setup properly, install the group leaders 8695 * to make it go. 8696 */ 8697 list_for_each_entry_safe(event, tmp, &events, migrate_entry) { 8698 list_del(&event->migrate_entry); 8699 if (event->state >= PERF_EVENT_STATE_OFF) 8700 event->state = PERF_EVENT_STATE_INACTIVE; 8701 account_event_cpu(event, dst_cpu); 8702 perf_install_in_context(dst_ctx, event, dst_cpu); 8703 get_ctx(dst_ctx); 8704 } 8705 mutex_unlock(&dst_ctx->mutex); 8706 mutex_unlock(&src_ctx->mutex); 8707 } 8708 EXPORT_SYMBOL_GPL(perf_pmu_migrate_context); 8709 8710 static void sync_child_event(struct perf_event *child_event, 8711 struct task_struct *child) 8712 { 8713 struct perf_event *parent_event = child_event->parent; 8714 u64 child_val; 8715 8716 if (child_event->attr.inherit_stat) 8717 perf_event_read_event(child_event, child); 8718 8719 child_val = perf_event_count(child_event); 8720 8721 /* 8722 * Add back the child's count to the parent's count: 8723 */ 8724 atomic64_add(child_val, &parent_event->child_count); 8725 atomic64_add(child_event->total_time_enabled, 8726 &parent_event->child_total_time_enabled); 8727 atomic64_add(child_event->total_time_running, 8728 &parent_event->child_total_time_running); 8729 8730 /* 8731 * Remove this event from the parent's list 8732 */ 8733 WARN_ON_ONCE(parent_event->ctx->parent_ctx); 8734 mutex_lock(&parent_event->child_mutex); 8735 list_del_init(&child_event->child_list); 8736 mutex_unlock(&parent_event->child_mutex); 8737 8738 /* 8739 * Make sure user/parent get notified, that we just 8740 * lost one event. 8741 */ 8742 perf_event_wakeup(parent_event); 8743 8744 /* 8745 * Release the parent event, if this was the last 8746 * reference to it. 8747 */ 8748 put_event(parent_event); 8749 } 8750 8751 static void 8752 __perf_event_exit_task(struct perf_event *child_event, 8753 struct perf_event_context *child_ctx, 8754 struct task_struct *child) 8755 { 8756 /* 8757 * Do not destroy the 'original' grouping; because of the context 8758 * switch optimization the original events could've ended up in a 8759 * random child task. 8760 * 8761 * If we were to destroy the original group, all group related 8762 * operations would cease to function properly after this random 8763 * child dies. 8764 * 8765 * Do destroy all inherited groups, we don't care about those 8766 * and being thorough is better. 8767 */ 8768 perf_remove_from_context(child_event, !!child_event->parent); 8769 8770 /* 8771 * It can happen that the parent exits first, and has events 8772 * that are still around due to the child reference. These 8773 * events need to be zapped. 8774 */ 8775 if (child_event->parent) { 8776 sync_child_event(child_event, child); 8777 free_event(child_event); 8778 } else { 8779 child_event->state = PERF_EVENT_STATE_EXIT; 8780 perf_event_wakeup(child_event); 8781 } 8782 } 8783 8784 static void perf_event_exit_task_context(struct task_struct *child, int ctxn) 8785 { 8786 struct perf_event *child_event, *next; 8787 struct perf_event_context *child_ctx, *clone_ctx = NULL; 8788 unsigned long flags; 8789 8790 if (likely(!child->perf_event_ctxp[ctxn])) { 8791 perf_event_task(child, NULL, 0); 8792 return; 8793 } 8794 8795 local_irq_save(flags); 8796 /* 8797 * We can't reschedule here because interrupts are disabled, 8798 * and either child is current or it is a task that can't be 8799 * scheduled, so we are now safe from rescheduling changing 8800 * our context. 8801 */ 8802 child_ctx = rcu_dereference_raw(child->perf_event_ctxp[ctxn]); 8803 8804 /* 8805 * Take the context lock here so that if find_get_context is 8806 * reading child->perf_event_ctxp, we wait until it has 8807 * incremented the context's refcount before we do put_ctx below. 8808 */ 8809 raw_spin_lock(&child_ctx->lock); 8810 task_ctx_sched_out(child_ctx); 8811 child->perf_event_ctxp[ctxn] = NULL; 8812 8813 /* 8814 * If this context is a clone; unclone it so it can't get 8815 * swapped to another process while we're removing all 8816 * the events from it. 8817 */ 8818 clone_ctx = unclone_ctx(child_ctx); 8819 update_context_time(child_ctx); 8820 raw_spin_unlock_irqrestore(&child_ctx->lock, flags); 8821 8822 if (clone_ctx) 8823 put_ctx(clone_ctx); 8824 8825 /* 8826 * Report the task dead after unscheduling the events so that we 8827 * won't get any samples after PERF_RECORD_EXIT. We can however still 8828 * get a few PERF_RECORD_READ events. 8829 */ 8830 perf_event_task(child, child_ctx, 0); 8831 8832 /* 8833 * We can recurse on the same lock type through: 8834 * 8835 * __perf_event_exit_task() 8836 * sync_child_event() 8837 * put_event() 8838 * mutex_lock(&ctx->mutex) 8839 * 8840 * But since its the parent context it won't be the same instance. 8841 */ 8842 mutex_lock(&child_ctx->mutex); 8843 8844 list_for_each_entry_safe(child_event, next, &child_ctx->event_list, event_entry) 8845 __perf_event_exit_task(child_event, child_ctx, child); 8846 8847 mutex_unlock(&child_ctx->mutex); 8848 8849 put_ctx(child_ctx); 8850 } 8851 8852 /* 8853 * When a child task exits, feed back event values to parent events. 8854 */ 8855 void perf_event_exit_task(struct task_struct *child) 8856 { 8857 struct perf_event *event, *tmp; 8858 int ctxn; 8859 8860 mutex_lock(&child->perf_event_mutex); 8861 list_for_each_entry_safe(event, tmp, &child->perf_event_list, 8862 owner_entry) { 8863 list_del_init(&event->owner_entry); 8864 8865 /* 8866 * Ensure the list deletion is visible before we clear 8867 * the owner, closes a race against perf_release() where 8868 * we need to serialize on the owner->perf_event_mutex. 8869 */ 8870 smp_wmb(); 8871 event->owner = NULL; 8872 } 8873 mutex_unlock(&child->perf_event_mutex); 8874 8875 for_each_task_context_nr(ctxn) 8876 perf_event_exit_task_context(child, ctxn); 8877 } 8878 8879 static void perf_free_event(struct perf_event *event, 8880 struct perf_event_context *ctx) 8881 { 8882 struct perf_event *parent = event->parent; 8883 8884 if (WARN_ON_ONCE(!parent)) 8885 return; 8886 8887 mutex_lock(&parent->child_mutex); 8888 list_del_init(&event->child_list); 8889 mutex_unlock(&parent->child_mutex); 8890 8891 put_event(parent); 8892 8893 raw_spin_lock_irq(&ctx->lock); 8894 perf_group_detach(event); 8895 list_del_event(event, ctx); 8896 raw_spin_unlock_irq(&ctx->lock); 8897 free_event(event); 8898 } 8899 8900 /* 8901 * Free an unexposed, unused context as created by inheritance by 8902 * perf_event_init_task below, used by fork() in case of fail. 8903 * 8904 * Not all locks are strictly required, but take them anyway to be nice and 8905 * help out with the lockdep assertions. 8906 */ 8907 void perf_event_free_task(struct task_struct *task) 8908 { 8909 struct perf_event_context *ctx; 8910 struct perf_event *event, *tmp; 8911 int ctxn; 8912 8913 for_each_task_context_nr(ctxn) { 8914 ctx = task->perf_event_ctxp[ctxn]; 8915 if (!ctx) 8916 continue; 8917 8918 mutex_lock(&ctx->mutex); 8919 again: 8920 list_for_each_entry_safe(event, tmp, &ctx->pinned_groups, 8921 group_entry) 8922 perf_free_event(event, ctx); 8923 8924 list_for_each_entry_safe(event, tmp, &ctx->flexible_groups, 8925 group_entry) 8926 perf_free_event(event, ctx); 8927 8928 if (!list_empty(&ctx->pinned_groups) || 8929 !list_empty(&ctx->flexible_groups)) 8930 goto again; 8931 8932 mutex_unlock(&ctx->mutex); 8933 8934 put_ctx(ctx); 8935 } 8936 } 8937 8938 void perf_event_delayed_put(struct task_struct *task) 8939 { 8940 int ctxn; 8941 8942 for_each_task_context_nr(ctxn) 8943 WARN_ON_ONCE(task->perf_event_ctxp[ctxn]); 8944 } 8945 8946 struct perf_event *perf_event_get(unsigned int fd) 8947 { 8948 int err; 8949 struct fd f; 8950 struct perf_event *event; 8951 8952 err = perf_fget_light(fd, &f); 8953 if (err) 8954 return ERR_PTR(err); 8955 8956 event = f.file->private_data; 8957 atomic_long_inc(&event->refcount); 8958 fdput(f); 8959 8960 return event; 8961 } 8962 8963 const struct perf_event_attr *perf_event_attrs(struct perf_event *event) 8964 { 8965 if (!event) 8966 return ERR_PTR(-EINVAL); 8967 8968 return &event->attr; 8969 } 8970 8971 /* 8972 * inherit a event from parent task to child task: 8973 */ 8974 static struct perf_event * 8975 inherit_event(struct perf_event *parent_event, 8976 struct task_struct *parent, 8977 struct perf_event_context *parent_ctx, 8978 struct task_struct *child, 8979 struct perf_event *group_leader, 8980 struct perf_event_context *child_ctx) 8981 { 8982 enum perf_event_active_state parent_state = parent_event->state; 8983 struct perf_event *child_event; 8984 unsigned long flags; 8985 8986 /* 8987 * Instead of creating recursive hierarchies of events, 8988 * we link inherited events back to the original parent, 8989 * which has a filp for sure, which we use as the reference 8990 * count: 8991 */ 8992 if (parent_event->parent) 8993 parent_event = parent_event->parent; 8994 8995 child_event = perf_event_alloc(&parent_event->attr, 8996 parent_event->cpu, 8997 child, 8998 group_leader, parent_event, 8999 NULL, NULL, -1); 9000 if (IS_ERR(child_event)) 9001 return child_event; 9002 9003 if (is_orphaned_event(parent_event) || 9004 !atomic_long_inc_not_zero(&parent_event->refcount)) { 9005 free_event(child_event); 9006 return NULL; 9007 } 9008 9009 get_ctx(child_ctx); 9010 9011 /* 9012 * Make the child state follow the state of the parent event, 9013 * not its attr.disabled bit. We hold the parent's mutex, 9014 * so we won't race with perf_event_{en, dis}able_family. 9015 */ 9016 if (parent_state >= PERF_EVENT_STATE_INACTIVE) 9017 child_event->state = PERF_EVENT_STATE_INACTIVE; 9018 else 9019 child_event->state = PERF_EVENT_STATE_OFF; 9020 9021 if (parent_event->attr.freq) { 9022 u64 sample_period = parent_event->hw.sample_period; 9023 struct hw_perf_event *hwc = &child_event->hw; 9024 9025 hwc->sample_period = sample_period; 9026 hwc->last_period = sample_period; 9027 9028 local64_set(&hwc->period_left, sample_period); 9029 } 9030 9031 child_event->ctx = child_ctx; 9032 child_event->overflow_handler = parent_event->overflow_handler; 9033 child_event->overflow_handler_context 9034 = parent_event->overflow_handler_context; 9035 9036 /* 9037 * Precalculate sample_data sizes 9038 */ 9039 perf_event__header_size(child_event); 9040 perf_event__id_header_size(child_event); 9041 9042 /* 9043 * Link it up in the child's context: 9044 */ 9045 raw_spin_lock_irqsave(&child_ctx->lock, flags); 9046 add_event_to_ctx(child_event, child_ctx); 9047 raw_spin_unlock_irqrestore(&child_ctx->lock, flags); 9048 9049 /* 9050 * Link this into the parent event's child list 9051 */ 9052 WARN_ON_ONCE(parent_event->ctx->parent_ctx); 9053 mutex_lock(&parent_event->child_mutex); 9054 list_add_tail(&child_event->child_list, &parent_event->child_list); 9055 mutex_unlock(&parent_event->child_mutex); 9056 9057 return child_event; 9058 } 9059 9060 static int inherit_group(struct perf_event *parent_event, 9061 struct task_struct *parent, 9062 struct perf_event_context *parent_ctx, 9063 struct task_struct *child, 9064 struct perf_event_context *child_ctx) 9065 { 9066 struct perf_event *leader; 9067 struct perf_event *sub; 9068 struct perf_event *child_ctr; 9069 9070 leader = inherit_event(parent_event, parent, parent_ctx, 9071 child, NULL, child_ctx); 9072 if (IS_ERR(leader)) 9073 return PTR_ERR(leader); 9074 list_for_each_entry(sub, &parent_event->sibling_list, group_entry) { 9075 child_ctr = inherit_event(sub, parent, parent_ctx, 9076 child, leader, child_ctx); 9077 if (IS_ERR(child_ctr)) 9078 return PTR_ERR(child_ctr); 9079 } 9080 return 0; 9081 } 9082 9083 static int 9084 inherit_task_group(struct perf_event *event, struct task_struct *parent, 9085 struct perf_event_context *parent_ctx, 9086 struct task_struct *child, int ctxn, 9087 int *inherited_all) 9088 { 9089 int ret; 9090 struct perf_event_context *child_ctx; 9091 9092 if (!event->attr.inherit) { 9093 *inherited_all = 0; 9094 return 0; 9095 } 9096 9097 child_ctx = child->perf_event_ctxp[ctxn]; 9098 if (!child_ctx) { 9099 /* 9100 * This is executed from the parent task context, so 9101 * inherit events that have been marked for cloning. 9102 * First allocate and initialize a context for the 9103 * child. 9104 */ 9105 9106 child_ctx = alloc_perf_context(parent_ctx->pmu, child); 9107 if (!child_ctx) 9108 return -ENOMEM; 9109 9110 child->perf_event_ctxp[ctxn] = child_ctx; 9111 } 9112 9113 ret = inherit_group(event, parent, parent_ctx, 9114 child, child_ctx); 9115 9116 if (ret) 9117 *inherited_all = 0; 9118 9119 return ret; 9120 } 9121 9122 /* 9123 * Initialize the perf_event context in task_struct 9124 */ 9125 static int perf_event_init_context(struct task_struct *child, int ctxn) 9126 { 9127 struct perf_event_context *child_ctx, *parent_ctx; 9128 struct perf_event_context *cloned_ctx; 9129 struct perf_event *event; 9130 struct task_struct *parent = current; 9131 int inherited_all = 1; 9132 unsigned long flags; 9133 int ret = 0; 9134 9135 if (likely(!parent->perf_event_ctxp[ctxn])) 9136 return 0; 9137 9138 /* 9139 * If the parent's context is a clone, pin it so it won't get 9140 * swapped under us. 9141 */ 9142 parent_ctx = perf_pin_task_context(parent, ctxn); 9143 if (!parent_ctx) 9144 return 0; 9145 9146 /* 9147 * No need to check if parent_ctx != NULL here; since we saw 9148 * it non-NULL earlier, the only reason for it to become NULL 9149 * is if we exit, and since we're currently in the middle of 9150 * a fork we can't be exiting at the same time. 9151 */ 9152 9153 /* 9154 * Lock the parent list. No need to lock the child - not PID 9155 * hashed yet and not running, so nobody can access it. 9156 */ 9157 mutex_lock(&parent_ctx->mutex); 9158 9159 /* 9160 * We dont have to disable NMIs - we are only looking at 9161 * the list, not manipulating it: 9162 */ 9163 list_for_each_entry(event, &parent_ctx->pinned_groups, group_entry) { 9164 ret = inherit_task_group(event, parent, parent_ctx, 9165 child, ctxn, &inherited_all); 9166 if (ret) 9167 break; 9168 } 9169 9170 /* 9171 * We can't hold ctx->lock when iterating the ->flexible_group list due 9172 * to allocations, but we need to prevent rotation because 9173 * rotate_ctx() will change the list from interrupt context. 9174 */ 9175 raw_spin_lock_irqsave(&parent_ctx->lock, flags); 9176 parent_ctx->rotate_disable = 1; 9177 raw_spin_unlock_irqrestore(&parent_ctx->lock, flags); 9178 9179 list_for_each_entry(event, &parent_ctx->flexible_groups, group_entry) { 9180 ret = inherit_task_group(event, parent, parent_ctx, 9181 child, ctxn, &inherited_all); 9182 if (ret) 9183 break; 9184 } 9185 9186 raw_spin_lock_irqsave(&parent_ctx->lock, flags); 9187 parent_ctx->rotate_disable = 0; 9188 9189 child_ctx = child->perf_event_ctxp[ctxn]; 9190 9191 if (child_ctx && inherited_all) { 9192 /* 9193 * Mark the child context as a clone of the parent 9194 * context, or of whatever the parent is a clone of. 9195 * 9196 * Note that if the parent is a clone, the holding of 9197 * parent_ctx->lock avoids it from being uncloned. 9198 */ 9199 cloned_ctx = parent_ctx->parent_ctx; 9200 if (cloned_ctx) { 9201 child_ctx->parent_ctx = cloned_ctx; 9202 child_ctx->parent_gen = parent_ctx->parent_gen; 9203 } else { 9204 child_ctx->parent_ctx = parent_ctx; 9205 child_ctx->parent_gen = parent_ctx->generation; 9206 } 9207 get_ctx(child_ctx->parent_ctx); 9208 } 9209 9210 raw_spin_unlock_irqrestore(&parent_ctx->lock, flags); 9211 mutex_unlock(&parent_ctx->mutex); 9212 9213 perf_unpin_context(parent_ctx); 9214 put_ctx(parent_ctx); 9215 9216 return ret; 9217 } 9218 9219 /* 9220 * Initialize the perf_event context in task_struct 9221 */ 9222 int perf_event_init_task(struct task_struct *child) 9223 { 9224 int ctxn, ret; 9225 9226 memset(child->perf_event_ctxp, 0, sizeof(child->perf_event_ctxp)); 9227 mutex_init(&child->perf_event_mutex); 9228 INIT_LIST_HEAD(&child->perf_event_list); 9229 9230 for_each_task_context_nr(ctxn) { 9231 ret = perf_event_init_context(child, ctxn); 9232 if (ret) { 9233 perf_event_free_task(child); 9234 return ret; 9235 } 9236 } 9237 9238 return 0; 9239 } 9240 9241 static void __init perf_event_init_all_cpus(void) 9242 { 9243 struct swevent_htable *swhash; 9244 int cpu; 9245 9246 for_each_possible_cpu(cpu) { 9247 swhash = &per_cpu(swevent_htable, cpu); 9248 mutex_init(&swhash->hlist_mutex); 9249 INIT_LIST_HEAD(&per_cpu(active_ctx_list, cpu)); 9250 } 9251 } 9252 9253 static void perf_event_init_cpu(int cpu) 9254 { 9255 struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu); 9256 9257 mutex_lock(&swhash->hlist_mutex); 9258 swhash->online = true; 9259 if (swhash->hlist_refcount > 0) { 9260 struct swevent_hlist *hlist; 9261 9262 hlist = kzalloc_node(sizeof(*hlist), GFP_KERNEL, cpu_to_node(cpu)); 9263 WARN_ON(!hlist); 9264 rcu_assign_pointer(swhash->swevent_hlist, hlist); 9265 } 9266 mutex_unlock(&swhash->hlist_mutex); 9267 } 9268 9269 #if defined CONFIG_HOTPLUG_CPU || defined CONFIG_KEXEC_CORE 9270 static void __perf_event_exit_context(void *__info) 9271 { 9272 struct remove_event re = { .detach_group = true }; 9273 struct perf_event_context *ctx = __info; 9274 9275 rcu_read_lock(); 9276 list_for_each_entry_rcu(re.event, &ctx->event_list, event_entry) 9277 __perf_remove_from_context(&re); 9278 rcu_read_unlock(); 9279 } 9280 9281 static void perf_event_exit_cpu_context(int cpu) 9282 { 9283 struct perf_event_context *ctx; 9284 struct pmu *pmu; 9285 int idx; 9286 9287 idx = srcu_read_lock(&pmus_srcu); 9288 list_for_each_entry_rcu(pmu, &pmus, entry) { 9289 ctx = &per_cpu_ptr(pmu->pmu_cpu_context, cpu)->ctx; 9290 9291 mutex_lock(&ctx->mutex); 9292 smp_call_function_single(cpu, __perf_event_exit_context, ctx, 1); 9293 mutex_unlock(&ctx->mutex); 9294 } 9295 srcu_read_unlock(&pmus_srcu, idx); 9296 } 9297 9298 static void perf_event_exit_cpu(int cpu) 9299 { 9300 struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu); 9301 9302 perf_event_exit_cpu_context(cpu); 9303 9304 mutex_lock(&swhash->hlist_mutex); 9305 swhash->online = false; 9306 swevent_hlist_release(swhash); 9307 mutex_unlock(&swhash->hlist_mutex); 9308 } 9309 #else 9310 static inline void perf_event_exit_cpu(int cpu) { } 9311 #endif 9312 9313 static int 9314 perf_reboot(struct notifier_block *notifier, unsigned long val, void *v) 9315 { 9316 int cpu; 9317 9318 for_each_online_cpu(cpu) 9319 perf_event_exit_cpu(cpu); 9320 9321 return NOTIFY_OK; 9322 } 9323 9324 /* 9325 * Run the perf reboot notifier at the very last possible moment so that 9326 * the generic watchdog code runs as long as possible. 9327 */ 9328 static struct notifier_block perf_reboot_notifier = { 9329 .notifier_call = perf_reboot, 9330 .priority = INT_MIN, 9331 }; 9332 9333 static int 9334 perf_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu) 9335 { 9336 unsigned int cpu = (long)hcpu; 9337 9338 switch (action & ~CPU_TASKS_FROZEN) { 9339 9340 case CPU_UP_PREPARE: 9341 case CPU_DOWN_FAILED: 9342 perf_event_init_cpu(cpu); 9343 break; 9344 9345 case CPU_UP_CANCELED: 9346 case CPU_DOWN_PREPARE: 9347 perf_event_exit_cpu(cpu); 9348 break; 9349 default: 9350 break; 9351 } 9352 9353 return NOTIFY_OK; 9354 } 9355 9356 void __init perf_event_init(void) 9357 { 9358 int ret; 9359 9360 idr_init(&pmu_idr); 9361 9362 perf_event_init_all_cpus(); 9363 init_srcu_struct(&pmus_srcu); 9364 perf_pmu_register(&perf_swevent, "software", PERF_TYPE_SOFTWARE); 9365 perf_pmu_register(&perf_cpu_clock, NULL, -1); 9366 perf_pmu_register(&perf_task_clock, NULL, -1); 9367 perf_tp_register(); 9368 perf_cpu_notifier(perf_cpu_notify); 9369 register_reboot_notifier(&perf_reboot_notifier); 9370 9371 ret = init_hw_breakpoint(); 9372 WARN(ret, "hw_breakpoint initialization failed with: %d", ret); 9373 9374 /* do not patch jump label more than once per second */ 9375 jump_label_rate_limit(&perf_sched_events, HZ); 9376 9377 /* 9378 * Build time assertion that we keep the data_head at the intended 9379 * location. IOW, validation we got the __reserved[] size right. 9380 */ 9381 BUILD_BUG_ON((offsetof(struct perf_event_mmap_page, data_head)) 9382 != 1024); 9383 } 9384 9385 ssize_t perf_event_sysfs_show(struct device *dev, struct device_attribute *attr, 9386 char *page) 9387 { 9388 struct perf_pmu_events_attr *pmu_attr = 9389 container_of(attr, struct perf_pmu_events_attr, attr); 9390 9391 if (pmu_attr->event_str) 9392 return sprintf(page, "%s\n", pmu_attr->event_str); 9393 9394 return 0; 9395 } 9396 9397 static int __init perf_event_sysfs_init(void) 9398 { 9399 struct pmu *pmu; 9400 int ret; 9401 9402 mutex_lock(&pmus_lock); 9403 9404 ret = bus_register(&pmu_bus); 9405 if (ret) 9406 goto unlock; 9407 9408 list_for_each_entry(pmu, &pmus, entry) { 9409 if (!pmu->name || pmu->type < 0) 9410 continue; 9411 9412 ret = pmu_dev_alloc(pmu); 9413 WARN(ret, "Failed to register pmu: %s, reason %d\n", pmu->name, ret); 9414 } 9415 pmu_bus_running = 1; 9416 ret = 0; 9417 9418 unlock: 9419 mutex_unlock(&pmus_lock); 9420 9421 return ret; 9422 } 9423 device_initcall(perf_event_sysfs_init); 9424 9425 #ifdef CONFIG_CGROUP_PERF 9426 static struct cgroup_subsys_state * 9427 perf_cgroup_css_alloc(struct cgroup_subsys_state *parent_css) 9428 { 9429 struct perf_cgroup *jc; 9430 9431 jc = kzalloc(sizeof(*jc), GFP_KERNEL); 9432 if (!jc) 9433 return ERR_PTR(-ENOMEM); 9434 9435 jc->info = alloc_percpu(struct perf_cgroup_info); 9436 if (!jc->info) { 9437 kfree(jc); 9438 return ERR_PTR(-ENOMEM); 9439 } 9440 9441 return &jc->css; 9442 } 9443 9444 static void perf_cgroup_css_free(struct cgroup_subsys_state *css) 9445 { 9446 struct perf_cgroup *jc = container_of(css, struct perf_cgroup, css); 9447 9448 free_percpu(jc->info); 9449 kfree(jc); 9450 } 9451 9452 static int __perf_cgroup_move(void *info) 9453 { 9454 struct task_struct *task = info; 9455 perf_cgroup_switch(task, PERF_CGROUP_SWOUT | PERF_CGROUP_SWIN); 9456 return 0; 9457 } 9458 9459 static void perf_cgroup_attach(struct cgroup_subsys_state *css, 9460 struct cgroup_taskset *tset) 9461 { 9462 struct task_struct *task; 9463 9464 cgroup_taskset_for_each(task, tset) 9465 task_function_call(task, __perf_cgroup_move, task); 9466 } 9467 9468 struct cgroup_subsys perf_event_cgrp_subsys = { 9469 .css_alloc = perf_cgroup_css_alloc, 9470 .css_free = perf_cgroup_css_free, 9471 .attach = perf_cgroup_attach, 9472 }; 9473 #endif /* CONFIG_CGROUP_PERF */ 9474