1 /* 2 * Performance events core code: 3 * 4 * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de> 5 * Copyright (C) 2008-2011 Red Hat, Inc., Ingo Molnar 6 * Copyright (C) 2008-2011 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com> 7 * Copyright © 2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com> 8 * 9 * For licensing details see kernel-base/COPYING 10 */ 11 12 #include <linux/fs.h> 13 #include <linux/mm.h> 14 #include <linux/cpu.h> 15 #include <linux/smp.h> 16 #include <linux/idr.h> 17 #include <linux/file.h> 18 #include <linux/poll.h> 19 #include <linux/slab.h> 20 #include <linux/hash.h> 21 #include <linux/tick.h> 22 #include <linux/sysfs.h> 23 #include <linux/dcache.h> 24 #include <linux/percpu.h> 25 #include <linux/ptrace.h> 26 #include <linux/reboot.h> 27 #include <linux/vmstat.h> 28 #include <linux/device.h> 29 #include <linux/export.h> 30 #include <linux/vmalloc.h> 31 #include <linux/hardirq.h> 32 #include <linux/rculist.h> 33 #include <linux/uaccess.h> 34 #include <linux/syscalls.h> 35 #include <linux/anon_inodes.h> 36 #include <linux/kernel_stat.h> 37 #include <linux/cgroup.h> 38 #include <linux/perf_event.h> 39 #include <linux/trace_events.h> 40 #include <linux/hw_breakpoint.h> 41 #include <linux/mm_types.h> 42 #include <linux/module.h> 43 #include <linux/mman.h> 44 #include <linux/compat.h> 45 #include <linux/bpf.h> 46 #include <linux/filter.h> 47 48 #include "internal.h" 49 50 #include <asm/irq_regs.h> 51 52 static struct workqueue_struct *perf_wq; 53 54 typedef int (*remote_function_f)(void *); 55 56 struct remote_function_call { 57 struct task_struct *p; 58 remote_function_f func; 59 void *info; 60 int ret; 61 }; 62 63 static void remote_function(void *data) 64 { 65 struct remote_function_call *tfc = data; 66 struct task_struct *p = tfc->p; 67 68 if (p) { 69 tfc->ret = -EAGAIN; 70 if (task_cpu(p) != smp_processor_id() || !task_curr(p)) 71 return; 72 } 73 74 tfc->ret = tfc->func(tfc->info); 75 } 76 77 /** 78 * task_function_call - call a function on the cpu on which a task runs 79 * @p: the task to evaluate 80 * @func: the function to be called 81 * @info: the function call argument 82 * 83 * Calls the function @func when the task is currently running. This might 84 * be on the current CPU, which just calls the function directly 85 * 86 * returns: @func return value, or 87 * -ESRCH - when the process isn't running 88 * -EAGAIN - when the process moved away 89 */ 90 static int 91 task_function_call(struct task_struct *p, remote_function_f func, void *info) 92 { 93 struct remote_function_call data = { 94 .p = p, 95 .func = func, 96 .info = info, 97 .ret = -ESRCH, /* No such (running) process */ 98 }; 99 100 if (task_curr(p)) 101 smp_call_function_single(task_cpu(p), remote_function, &data, 1); 102 103 return data.ret; 104 } 105 106 /** 107 * cpu_function_call - call a function on the cpu 108 * @func: the function to be called 109 * @info: the function call argument 110 * 111 * Calls the function @func on the remote cpu. 112 * 113 * returns: @func return value or -ENXIO when the cpu is offline 114 */ 115 static int cpu_function_call(int cpu, remote_function_f func, void *info) 116 { 117 struct remote_function_call data = { 118 .p = NULL, 119 .func = func, 120 .info = info, 121 .ret = -ENXIO, /* No such CPU */ 122 }; 123 124 smp_call_function_single(cpu, remote_function, &data, 1); 125 126 return data.ret; 127 } 128 129 #define EVENT_OWNER_KERNEL ((void *) -1) 130 131 static bool is_kernel_event(struct perf_event *event) 132 { 133 return event->owner == EVENT_OWNER_KERNEL; 134 } 135 136 #define PERF_FLAG_ALL (PERF_FLAG_FD_NO_GROUP |\ 137 PERF_FLAG_FD_OUTPUT |\ 138 PERF_FLAG_PID_CGROUP |\ 139 PERF_FLAG_FD_CLOEXEC) 140 141 /* 142 * branch priv levels that need permission checks 143 */ 144 #define PERF_SAMPLE_BRANCH_PERM_PLM \ 145 (PERF_SAMPLE_BRANCH_KERNEL |\ 146 PERF_SAMPLE_BRANCH_HV) 147 148 enum event_type_t { 149 EVENT_FLEXIBLE = 0x1, 150 EVENT_PINNED = 0x2, 151 EVENT_ALL = EVENT_FLEXIBLE | EVENT_PINNED, 152 }; 153 154 /* 155 * perf_sched_events : >0 events exist 156 * perf_cgroup_events: >0 per-cpu cgroup events exist on this cpu 157 */ 158 struct static_key_deferred perf_sched_events __read_mostly; 159 static DEFINE_PER_CPU(atomic_t, perf_cgroup_events); 160 static DEFINE_PER_CPU(int, perf_sched_cb_usages); 161 162 static atomic_t nr_mmap_events __read_mostly; 163 static atomic_t nr_comm_events __read_mostly; 164 static atomic_t nr_task_events __read_mostly; 165 static atomic_t nr_freq_events __read_mostly; 166 static atomic_t nr_switch_events __read_mostly; 167 168 static LIST_HEAD(pmus); 169 static DEFINE_MUTEX(pmus_lock); 170 static struct srcu_struct pmus_srcu; 171 172 /* 173 * perf event paranoia level: 174 * -1 - not paranoid at all 175 * 0 - disallow raw tracepoint access for unpriv 176 * 1 - disallow cpu events for unpriv 177 * 2 - disallow kernel profiling for unpriv 178 */ 179 int sysctl_perf_event_paranoid __read_mostly = 1; 180 181 /* Minimum for 512 kiB + 1 user control page */ 182 int sysctl_perf_event_mlock __read_mostly = 512 + (PAGE_SIZE / 1024); /* 'free' kiB per user */ 183 184 /* 185 * max perf event sample rate 186 */ 187 #define DEFAULT_MAX_SAMPLE_RATE 100000 188 #define DEFAULT_SAMPLE_PERIOD_NS (NSEC_PER_SEC / DEFAULT_MAX_SAMPLE_RATE) 189 #define DEFAULT_CPU_TIME_MAX_PERCENT 25 190 191 int sysctl_perf_event_sample_rate __read_mostly = DEFAULT_MAX_SAMPLE_RATE; 192 193 static int max_samples_per_tick __read_mostly = DIV_ROUND_UP(DEFAULT_MAX_SAMPLE_RATE, HZ); 194 static int perf_sample_period_ns __read_mostly = DEFAULT_SAMPLE_PERIOD_NS; 195 196 static int perf_sample_allowed_ns __read_mostly = 197 DEFAULT_SAMPLE_PERIOD_NS * DEFAULT_CPU_TIME_MAX_PERCENT / 100; 198 199 void update_perf_cpu_limits(void) 200 { 201 u64 tmp = perf_sample_period_ns; 202 203 tmp *= sysctl_perf_cpu_time_max_percent; 204 do_div(tmp, 100); 205 ACCESS_ONCE(perf_sample_allowed_ns) = tmp; 206 } 207 208 static int perf_rotate_context(struct perf_cpu_context *cpuctx); 209 210 int perf_proc_update_handler(struct ctl_table *table, int write, 211 void __user *buffer, size_t *lenp, 212 loff_t *ppos) 213 { 214 int ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos); 215 216 if (ret || !write) 217 return ret; 218 219 max_samples_per_tick = DIV_ROUND_UP(sysctl_perf_event_sample_rate, HZ); 220 perf_sample_period_ns = NSEC_PER_SEC / sysctl_perf_event_sample_rate; 221 update_perf_cpu_limits(); 222 223 return 0; 224 } 225 226 int sysctl_perf_cpu_time_max_percent __read_mostly = DEFAULT_CPU_TIME_MAX_PERCENT; 227 228 int perf_cpu_time_max_percent_handler(struct ctl_table *table, int write, 229 void __user *buffer, size_t *lenp, 230 loff_t *ppos) 231 { 232 int ret = proc_dointvec(table, write, buffer, lenp, ppos); 233 234 if (ret || !write) 235 return ret; 236 237 update_perf_cpu_limits(); 238 239 return 0; 240 } 241 242 /* 243 * perf samples are done in some very critical code paths (NMIs). 244 * If they take too much CPU time, the system can lock up and not 245 * get any real work done. This will drop the sample rate when 246 * we detect that events are taking too long. 247 */ 248 #define NR_ACCUMULATED_SAMPLES 128 249 static DEFINE_PER_CPU(u64, running_sample_length); 250 251 static void perf_duration_warn(struct irq_work *w) 252 { 253 u64 allowed_ns = ACCESS_ONCE(perf_sample_allowed_ns); 254 u64 avg_local_sample_len; 255 u64 local_samples_len; 256 257 local_samples_len = __this_cpu_read(running_sample_length); 258 avg_local_sample_len = local_samples_len/NR_ACCUMULATED_SAMPLES; 259 260 printk_ratelimited(KERN_WARNING 261 "perf interrupt took too long (%lld > %lld), lowering " 262 "kernel.perf_event_max_sample_rate to %d\n", 263 avg_local_sample_len, allowed_ns >> 1, 264 sysctl_perf_event_sample_rate); 265 } 266 267 static DEFINE_IRQ_WORK(perf_duration_work, perf_duration_warn); 268 269 void perf_sample_event_took(u64 sample_len_ns) 270 { 271 u64 allowed_ns = ACCESS_ONCE(perf_sample_allowed_ns); 272 u64 avg_local_sample_len; 273 u64 local_samples_len; 274 275 if (allowed_ns == 0) 276 return; 277 278 /* decay the counter by 1 average sample */ 279 local_samples_len = __this_cpu_read(running_sample_length); 280 local_samples_len -= local_samples_len/NR_ACCUMULATED_SAMPLES; 281 local_samples_len += sample_len_ns; 282 __this_cpu_write(running_sample_length, local_samples_len); 283 284 /* 285 * note: this will be biased artifically low until we have 286 * seen NR_ACCUMULATED_SAMPLES. Doing it this way keeps us 287 * from having to maintain a count. 288 */ 289 avg_local_sample_len = local_samples_len/NR_ACCUMULATED_SAMPLES; 290 291 if (avg_local_sample_len <= allowed_ns) 292 return; 293 294 if (max_samples_per_tick <= 1) 295 return; 296 297 max_samples_per_tick = DIV_ROUND_UP(max_samples_per_tick, 2); 298 sysctl_perf_event_sample_rate = max_samples_per_tick * HZ; 299 perf_sample_period_ns = NSEC_PER_SEC / sysctl_perf_event_sample_rate; 300 301 update_perf_cpu_limits(); 302 303 if (!irq_work_queue(&perf_duration_work)) { 304 early_printk("perf interrupt took too long (%lld > %lld), lowering " 305 "kernel.perf_event_max_sample_rate to %d\n", 306 avg_local_sample_len, allowed_ns >> 1, 307 sysctl_perf_event_sample_rate); 308 } 309 } 310 311 static atomic64_t perf_event_id; 312 313 static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx, 314 enum event_type_t event_type); 315 316 static void cpu_ctx_sched_in(struct perf_cpu_context *cpuctx, 317 enum event_type_t event_type, 318 struct task_struct *task); 319 320 static void update_context_time(struct perf_event_context *ctx); 321 static u64 perf_event_time(struct perf_event *event); 322 323 void __weak perf_event_print_debug(void) { } 324 325 extern __weak const char *perf_pmu_name(void) 326 { 327 return "pmu"; 328 } 329 330 static inline u64 perf_clock(void) 331 { 332 return local_clock(); 333 } 334 335 static inline u64 perf_event_clock(struct perf_event *event) 336 { 337 return event->clock(); 338 } 339 340 static inline struct perf_cpu_context * 341 __get_cpu_context(struct perf_event_context *ctx) 342 { 343 return this_cpu_ptr(ctx->pmu->pmu_cpu_context); 344 } 345 346 static void perf_ctx_lock(struct perf_cpu_context *cpuctx, 347 struct perf_event_context *ctx) 348 { 349 raw_spin_lock(&cpuctx->ctx.lock); 350 if (ctx) 351 raw_spin_lock(&ctx->lock); 352 } 353 354 static void perf_ctx_unlock(struct perf_cpu_context *cpuctx, 355 struct perf_event_context *ctx) 356 { 357 if (ctx) 358 raw_spin_unlock(&ctx->lock); 359 raw_spin_unlock(&cpuctx->ctx.lock); 360 } 361 362 #ifdef CONFIG_CGROUP_PERF 363 364 static inline bool 365 perf_cgroup_match(struct perf_event *event) 366 { 367 struct perf_event_context *ctx = event->ctx; 368 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx); 369 370 /* @event doesn't care about cgroup */ 371 if (!event->cgrp) 372 return true; 373 374 /* wants specific cgroup scope but @cpuctx isn't associated with any */ 375 if (!cpuctx->cgrp) 376 return false; 377 378 /* 379 * Cgroup scoping is recursive. An event enabled for a cgroup is 380 * also enabled for all its descendant cgroups. If @cpuctx's 381 * cgroup is a descendant of @event's (the test covers identity 382 * case), it's a match. 383 */ 384 return cgroup_is_descendant(cpuctx->cgrp->css.cgroup, 385 event->cgrp->css.cgroup); 386 } 387 388 static inline void perf_detach_cgroup(struct perf_event *event) 389 { 390 css_put(&event->cgrp->css); 391 event->cgrp = NULL; 392 } 393 394 static inline int is_cgroup_event(struct perf_event *event) 395 { 396 return event->cgrp != NULL; 397 } 398 399 static inline u64 perf_cgroup_event_time(struct perf_event *event) 400 { 401 struct perf_cgroup_info *t; 402 403 t = per_cpu_ptr(event->cgrp->info, event->cpu); 404 return t->time; 405 } 406 407 static inline void __update_cgrp_time(struct perf_cgroup *cgrp) 408 { 409 struct perf_cgroup_info *info; 410 u64 now; 411 412 now = perf_clock(); 413 414 info = this_cpu_ptr(cgrp->info); 415 416 info->time += now - info->timestamp; 417 info->timestamp = now; 418 } 419 420 static inline void update_cgrp_time_from_cpuctx(struct perf_cpu_context *cpuctx) 421 { 422 struct perf_cgroup *cgrp_out = cpuctx->cgrp; 423 if (cgrp_out) 424 __update_cgrp_time(cgrp_out); 425 } 426 427 static inline void update_cgrp_time_from_event(struct perf_event *event) 428 { 429 struct perf_cgroup *cgrp; 430 431 /* 432 * ensure we access cgroup data only when needed and 433 * when we know the cgroup is pinned (css_get) 434 */ 435 if (!is_cgroup_event(event)) 436 return; 437 438 cgrp = perf_cgroup_from_task(current); 439 /* 440 * Do not update time when cgroup is not active 441 */ 442 if (cgrp == event->cgrp) 443 __update_cgrp_time(event->cgrp); 444 } 445 446 static inline void 447 perf_cgroup_set_timestamp(struct task_struct *task, 448 struct perf_event_context *ctx) 449 { 450 struct perf_cgroup *cgrp; 451 struct perf_cgroup_info *info; 452 453 /* 454 * ctx->lock held by caller 455 * ensure we do not access cgroup data 456 * unless we have the cgroup pinned (css_get) 457 */ 458 if (!task || !ctx->nr_cgroups) 459 return; 460 461 cgrp = perf_cgroup_from_task(task); 462 info = this_cpu_ptr(cgrp->info); 463 info->timestamp = ctx->timestamp; 464 } 465 466 #define PERF_CGROUP_SWOUT 0x1 /* cgroup switch out every event */ 467 #define PERF_CGROUP_SWIN 0x2 /* cgroup switch in events based on task */ 468 469 /* 470 * reschedule events based on the cgroup constraint of task. 471 * 472 * mode SWOUT : schedule out everything 473 * mode SWIN : schedule in based on cgroup for next 474 */ 475 void perf_cgroup_switch(struct task_struct *task, int mode) 476 { 477 struct perf_cpu_context *cpuctx; 478 struct pmu *pmu; 479 unsigned long flags; 480 481 /* 482 * disable interrupts to avoid geting nr_cgroup 483 * changes via __perf_event_disable(). Also 484 * avoids preemption. 485 */ 486 local_irq_save(flags); 487 488 /* 489 * we reschedule only in the presence of cgroup 490 * constrained events. 491 */ 492 rcu_read_lock(); 493 494 list_for_each_entry_rcu(pmu, &pmus, entry) { 495 cpuctx = this_cpu_ptr(pmu->pmu_cpu_context); 496 if (cpuctx->unique_pmu != pmu) 497 continue; /* ensure we process each cpuctx once */ 498 499 /* 500 * perf_cgroup_events says at least one 501 * context on this CPU has cgroup events. 502 * 503 * ctx->nr_cgroups reports the number of cgroup 504 * events for a context. 505 */ 506 if (cpuctx->ctx.nr_cgroups > 0) { 507 perf_ctx_lock(cpuctx, cpuctx->task_ctx); 508 perf_pmu_disable(cpuctx->ctx.pmu); 509 510 if (mode & PERF_CGROUP_SWOUT) { 511 cpu_ctx_sched_out(cpuctx, EVENT_ALL); 512 /* 513 * must not be done before ctxswout due 514 * to event_filter_match() in event_sched_out() 515 */ 516 cpuctx->cgrp = NULL; 517 } 518 519 if (mode & PERF_CGROUP_SWIN) { 520 WARN_ON_ONCE(cpuctx->cgrp); 521 /* 522 * set cgrp before ctxsw in to allow 523 * event_filter_match() to not have to pass 524 * task around 525 */ 526 cpuctx->cgrp = perf_cgroup_from_task(task); 527 cpu_ctx_sched_in(cpuctx, EVENT_ALL, task); 528 } 529 perf_pmu_enable(cpuctx->ctx.pmu); 530 perf_ctx_unlock(cpuctx, cpuctx->task_ctx); 531 } 532 } 533 534 rcu_read_unlock(); 535 536 local_irq_restore(flags); 537 } 538 539 static inline void perf_cgroup_sched_out(struct task_struct *task, 540 struct task_struct *next) 541 { 542 struct perf_cgroup *cgrp1; 543 struct perf_cgroup *cgrp2 = NULL; 544 545 /* 546 * we come here when we know perf_cgroup_events > 0 547 */ 548 cgrp1 = perf_cgroup_from_task(task); 549 550 /* 551 * next is NULL when called from perf_event_enable_on_exec() 552 * that will systematically cause a cgroup_switch() 553 */ 554 if (next) 555 cgrp2 = perf_cgroup_from_task(next); 556 557 /* 558 * only schedule out current cgroup events if we know 559 * that we are switching to a different cgroup. Otherwise, 560 * do no touch the cgroup events. 561 */ 562 if (cgrp1 != cgrp2) 563 perf_cgroup_switch(task, PERF_CGROUP_SWOUT); 564 } 565 566 static inline void perf_cgroup_sched_in(struct task_struct *prev, 567 struct task_struct *task) 568 { 569 struct perf_cgroup *cgrp1; 570 struct perf_cgroup *cgrp2 = NULL; 571 572 /* 573 * we come here when we know perf_cgroup_events > 0 574 */ 575 cgrp1 = perf_cgroup_from_task(task); 576 577 /* prev can never be NULL */ 578 cgrp2 = perf_cgroup_from_task(prev); 579 580 /* 581 * only need to schedule in cgroup events if we are changing 582 * cgroup during ctxsw. Cgroup events were not scheduled 583 * out of ctxsw out if that was not the case. 584 */ 585 if (cgrp1 != cgrp2) 586 perf_cgroup_switch(task, PERF_CGROUP_SWIN); 587 } 588 589 static inline int perf_cgroup_connect(int fd, struct perf_event *event, 590 struct perf_event_attr *attr, 591 struct perf_event *group_leader) 592 { 593 struct perf_cgroup *cgrp; 594 struct cgroup_subsys_state *css; 595 struct fd f = fdget(fd); 596 int ret = 0; 597 598 if (!f.file) 599 return -EBADF; 600 601 css = css_tryget_online_from_dir(f.file->f_path.dentry, 602 &perf_event_cgrp_subsys); 603 if (IS_ERR(css)) { 604 ret = PTR_ERR(css); 605 goto out; 606 } 607 608 cgrp = container_of(css, struct perf_cgroup, css); 609 event->cgrp = cgrp; 610 611 /* 612 * all events in a group must monitor 613 * the same cgroup because a task belongs 614 * to only one perf cgroup at a time 615 */ 616 if (group_leader && group_leader->cgrp != cgrp) { 617 perf_detach_cgroup(event); 618 ret = -EINVAL; 619 } 620 out: 621 fdput(f); 622 return ret; 623 } 624 625 static inline void 626 perf_cgroup_set_shadow_time(struct perf_event *event, u64 now) 627 { 628 struct perf_cgroup_info *t; 629 t = per_cpu_ptr(event->cgrp->info, event->cpu); 630 event->shadow_ctx_time = now - t->timestamp; 631 } 632 633 static inline void 634 perf_cgroup_defer_enabled(struct perf_event *event) 635 { 636 /* 637 * when the current task's perf cgroup does not match 638 * the event's, we need to remember to call the 639 * perf_mark_enable() function the first time a task with 640 * a matching perf cgroup is scheduled in. 641 */ 642 if (is_cgroup_event(event) && !perf_cgroup_match(event)) 643 event->cgrp_defer_enabled = 1; 644 } 645 646 static inline void 647 perf_cgroup_mark_enabled(struct perf_event *event, 648 struct perf_event_context *ctx) 649 { 650 struct perf_event *sub; 651 u64 tstamp = perf_event_time(event); 652 653 if (!event->cgrp_defer_enabled) 654 return; 655 656 event->cgrp_defer_enabled = 0; 657 658 event->tstamp_enabled = tstamp - event->total_time_enabled; 659 list_for_each_entry(sub, &event->sibling_list, group_entry) { 660 if (sub->state >= PERF_EVENT_STATE_INACTIVE) { 661 sub->tstamp_enabled = tstamp - sub->total_time_enabled; 662 sub->cgrp_defer_enabled = 0; 663 } 664 } 665 } 666 #else /* !CONFIG_CGROUP_PERF */ 667 668 static inline bool 669 perf_cgroup_match(struct perf_event *event) 670 { 671 return true; 672 } 673 674 static inline void perf_detach_cgroup(struct perf_event *event) 675 {} 676 677 static inline int is_cgroup_event(struct perf_event *event) 678 { 679 return 0; 680 } 681 682 static inline u64 perf_cgroup_event_cgrp_time(struct perf_event *event) 683 { 684 return 0; 685 } 686 687 static inline void update_cgrp_time_from_event(struct perf_event *event) 688 { 689 } 690 691 static inline void update_cgrp_time_from_cpuctx(struct perf_cpu_context *cpuctx) 692 { 693 } 694 695 static inline void perf_cgroup_sched_out(struct task_struct *task, 696 struct task_struct *next) 697 { 698 } 699 700 static inline void perf_cgroup_sched_in(struct task_struct *prev, 701 struct task_struct *task) 702 { 703 } 704 705 static inline int perf_cgroup_connect(pid_t pid, struct perf_event *event, 706 struct perf_event_attr *attr, 707 struct perf_event *group_leader) 708 { 709 return -EINVAL; 710 } 711 712 static inline void 713 perf_cgroup_set_timestamp(struct task_struct *task, 714 struct perf_event_context *ctx) 715 { 716 } 717 718 void 719 perf_cgroup_switch(struct task_struct *task, struct task_struct *next) 720 { 721 } 722 723 static inline void 724 perf_cgroup_set_shadow_time(struct perf_event *event, u64 now) 725 { 726 } 727 728 static inline u64 perf_cgroup_event_time(struct perf_event *event) 729 { 730 return 0; 731 } 732 733 static inline void 734 perf_cgroup_defer_enabled(struct perf_event *event) 735 { 736 } 737 738 static inline void 739 perf_cgroup_mark_enabled(struct perf_event *event, 740 struct perf_event_context *ctx) 741 { 742 } 743 #endif 744 745 /* 746 * set default to be dependent on timer tick just 747 * like original code 748 */ 749 #define PERF_CPU_HRTIMER (1000 / HZ) 750 /* 751 * function must be called with interrupts disbled 752 */ 753 static enum hrtimer_restart perf_mux_hrtimer_handler(struct hrtimer *hr) 754 { 755 struct perf_cpu_context *cpuctx; 756 int rotations = 0; 757 758 WARN_ON(!irqs_disabled()); 759 760 cpuctx = container_of(hr, struct perf_cpu_context, hrtimer); 761 rotations = perf_rotate_context(cpuctx); 762 763 raw_spin_lock(&cpuctx->hrtimer_lock); 764 if (rotations) 765 hrtimer_forward_now(hr, cpuctx->hrtimer_interval); 766 else 767 cpuctx->hrtimer_active = 0; 768 raw_spin_unlock(&cpuctx->hrtimer_lock); 769 770 return rotations ? HRTIMER_RESTART : HRTIMER_NORESTART; 771 } 772 773 static void __perf_mux_hrtimer_init(struct perf_cpu_context *cpuctx, int cpu) 774 { 775 struct hrtimer *timer = &cpuctx->hrtimer; 776 struct pmu *pmu = cpuctx->ctx.pmu; 777 u64 interval; 778 779 /* no multiplexing needed for SW PMU */ 780 if (pmu->task_ctx_nr == perf_sw_context) 781 return; 782 783 /* 784 * check default is sane, if not set then force to 785 * default interval (1/tick) 786 */ 787 interval = pmu->hrtimer_interval_ms; 788 if (interval < 1) 789 interval = pmu->hrtimer_interval_ms = PERF_CPU_HRTIMER; 790 791 cpuctx->hrtimer_interval = ns_to_ktime(NSEC_PER_MSEC * interval); 792 793 raw_spin_lock_init(&cpuctx->hrtimer_lock); 794 hrtimer_init(timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED); 795 timer->function = perf_mux_hrtimer_handler; 796 } 797 798 static int perf_mux_hrtimer_restart(struct perf_cpu_context *cpuctx) 799 { 800 struct hrtimer *timer = &cpuctx->hrtimer; 801 struct pmu *pmu = cpuctx->ctx.pmu; 802 unsigned long flags; 803 804 /* not for SW PMU */ 805 if (pmu->task_ctx_nr == perf_sw_context) 806 return 0; 807 808 raw_spin_lock_irqsave(&cpuctx->hrtimer_lock, flags); 809 if (!cpuctx->hrtimer_active) { 810 cpuctx->hrtimer_active = 1; 811 hrtimer_forward_now(timer, cpuctx->hrtimer_interval); 812 hrtimer_start_expires(timer, HRTIMER_MODE_ABS_PINNED); 813 } 814 raw_spin_unlock_irqrestore(&cpuctx->hrtimer_lock, flags); 815 816 return 0; 817 } 818 819 void perf_pmu_disable(struct pmu *pmu) 820 { 821 int *count = this_cpu_ptr(pmu->pmu_disable_count); 822 if (!(*count)++) 823 pmu->pmu_disable(pmu); 824 } 825 826 void perf_pmu_enable(struct pmu *pmu) 827 { 828 int *count = this_cpu_ptr(pmu->pmu_disable_count); 829 if (!--(*count)) 830 pmu->pmu_enable(pmu); 831 } 832 833 static DEFINE_PER_CPU(struct list_head, active_ctx_list); 834 835 /* 836 * perf_event_ctx_activate(), perf_event_ctx_deactivate(), and 837 * perf_event_task_tick() are fully serialized because they're strictly cpu 838 * affine and perf_event_ctx{activate,deactivate} are called with IRQs 839 * disabled, while perf_event_task_tick is called from IRQ context. 840 */ 841 static void perf_event_ctx_activate(struct perf_event_context *ctx) 842 { 843 struct list_head *head = this_cpu_ptr(&active_ctx_list); 844 845 WARN_ON(!irqs_disabled()); 846 847 WARN_ON(!list_empty(&ctx->active_ctx_list)); 848 849 list_add(&ctx->active_ctx_list, head); 850 } 851 852 static void perf_event_ctx_deactivate(struct perf_event_context *ctx) 853 { 854 WARN_ON(!irqs_disabled()); 855 856 WARN_ON(list_empty(&ctx->active_ctx_list)); 857 858 list_del_init(&ctx->active_ctx_list); 859 } 860 861 static void get_ctx(struct perf_event_context *ctx) 862 { 863 WARN_ON(!atomic_inc_not_zero(&ctx->refcount)); 864 } 865 866 static void free_ctx(struct rcu_head *head) 867 { 868 struct perf_event_context *ctx; 869 870 ctx = container_of(head, struct perf_event_context, rcu_head); 871 kfree(ctx->task_ctx_data); 872 kfree(ctx); 873 } 874 875 static void put_ctx(struct perf_event_context *ctx) 876 { 877 if (atomic_dec_and_test(&ctx->refcount)) { 878 if (ctx->parent_ctx) 879 put_ctx(ctx->parent_ctx); 880 if (ctx->task) 881 put_task_struct(ctx->task); 882 call_rcu(&ctx->rcu_head, free_ctx); 883 } 884 } 885 886 /* 887 * Because of perf_event::ctx migration in sys_perf_event_open::move_group and 888 * perf_pmu_migrate_context() we need some magic. 889 * 890 * Those places that change perf_event::ctx will hold both 891 * perf_event_ctx::mutex of the 'old' and 'new' ctx value. 892 * 893 * Lock ordering is by mutex address. There are two other sites where 894 * perf_event_context::mutex nests and those are: 895 * 896 * - perf_event_exit_task_context() [ child , 0 ] 897 * __perf_event_exit_task() 898 * sync_child_event() 899 * put_event() [ parent, 1 ] 900 * 901 * - perf_event_init_context() [ parent, 0 ] 902 * inherit_task_group() 903 * inherit_group() 904 * inherit_event() 905 * perf_event_alloc() 906 * perf_init_event() 907 * perf_try_init_event() [ child , 1 ] 908 * 909 * While it appears there is an obvious deadlock here -- the parent and child 910 * nesting levels are inverted between the two. This is in fact safe because 911 * life-time rules separate them. That is an exiting task cannot fork, and a 912 * spawning task cannot (yet) exit. 913 * 914 * But remember that that these are parent<->child context relations, and 915 * migration does not affect children, therefore these two orderings should not 916 * interact. 917 * 918 * The change in perf_event::ctx does not affect children (as claimed above) 919 * because the sys_perf_event_open() case will install a new event and break 920 * the ctx parent<->child relation, and perf_pmu_migrate_context() is only 921 * concerned with cpuctx and that doesn't have children. 922 * 923 * The places that change perf_event::ctx will issue: 924 * 925 * perf_remove_from_context(); 926 * synchronize_rcu(); 927 * perf_install_in_context(); 928 * 929 * to affect the change. The remove_from_context() + synchronize_rcu() should 930 * quiesce the event, after which we can install it in the new location. This 931 * means that only external vectors (perf_fops, prctl) can perturb the event 932 * while in transit. Therefore all such accessors should also acquire 933 * perf_event_context::mutex to serialize against this. 934 * 935 * However; because event->ctx can change while we're waiting to acquire 936 * ctx->mutex we must be careful and use the below perf_event_ctx_lock() 937 * function. 938 * 939 * Lock order: 940 * task_struct::perf_event_mutex 941 * perf_event_context::mutex 942 * perf_event_context::lock 943 * perf_event::child_mutex; 944 * perf_event::mmap_mutex 945 * mmap_sem 946 */ 947 static struct perf_event_context * 948 perf_event_ctx_lock_nested(struct perf_event *event, int nesting) 949 { 950 struct perf_event_context *ctx; 951 952 again: 953 rcu_read_lock(); 954 ctx = ACCESS_ONCE(event->ctx); 955 if (!atomic_inc_not_zero(&ctx->refcount)) { 956 rcu_read_unlock(); 957 goto again; 958 } 959 rcu_read_unlock(); 960 961 mutex_lock_nested(&ctx->mutex, nesting); 962 if (event->ctx != ctx) { 963 mutex_unlock(&ctx->mutex); 964 put_ctx(ctx); 965 goto again; 966 } 967 968 return ctx; 969 } 970 971 static inline struct perf_event_context * 972 perf_event_ctx_lock(struct perf_event *event) 973 { 974 return perf_event_ctx_lock_nested(event, 0); 975 } 976 977 static void perf_event_ctx_unlock(struct perf_event *event, 978 struct perf_event_context *ctx) 979 { 980 mutex_unlock(&ctx->mutex); 981 put_ctx(ctx); 982 } 983 984 /* 985 * This must be done under the ctx->lock, such as to serialize against 986 * context_equiv(), therefore we cannot call put_ctx() since that might end up 987 * calling scheduler related locks and ctx->lock nests inside those. 988 */ 989 static __must_check struct perf_event_context * 990 unclone_ctx(struct perf_event_context *ctx) 991 { 992 struct perf_event_context *parent_ctx = ctx->parent_ctx; 993 994 lockdep_assert_held(&ctx->lock); 995 996 if (parent_ctx) 997 ctx->parent_ctx = NULL; 998 ctx->generation++; 999 1000 return parent_ctx; 1001 } 1002 1003 static u32 perf_event_pid(struct perf_event *event, struct task_struct *p) 1004 { 1005 /* 1006 * only top level events have the pid namespace they were created in 1007 */ 1008 if (event->parent) 1009 event = event->parent; 1010 1011 return task_tgid_nr_ns(p, event->ns); 1012 } 1013 1014 static u32 perf_event_tid(struct perf_event *event, struct task_struct *p) 1015 { 1016 /* 1017 * only top level events have the pid namespace they were created in 1018 */ 1019 if (event->parent) 1020 event = event->parent; 1021 1022 return task_pid_nr_ns(p, event->ns); 1023 } 1024 1025 /* 1026 * If we inherit events we want to return the parent event id 1027 * to userspace. 1028 */ 1029 static u64 primary_event_id(struct perf_event *event) 1030 { 1031 u64 id = event->id; 1032 1033 if (event->parent) 1034 id = event->parent->id; 1035 1036 return id; 1037 } 1038 1039 /* 1040 * Get the perf_event_context for a task and lock it. 1041 * This has to cope with with the fact that until it is locked, 1042 * the context could get moved to another task. 1043 */ 1044 static struct perf_event_context * 1045 perf_lock_task_context(struct task_struct *task, int ctxn, unsigned long *flags) 1046 { 1047 struct perf_event_context *ctx; 1048 1049 retry: 1050 /* 1051 * One of the few rules of preemptible RCU is that one cannot do 1052 * rcu_read_unlock() while holding a scheduler (or nested) lock when 1053 * part of the read side critical section was preemptible -- see 1054 * rcu_read_unlock_special(). 1055 * 1056 * Since ctx->lock nests under rq->lock we must ensure the entire read 1057 * side critical section is non-preemptible. 1058 */ 1059 preempt_disable(); 1060 rcu_read_lock(); 1061 ctx = rcu_dereference(task->perf_event_ctxp[ctxn]); 1062 if (ctx) { 1063 /* 1064 * If this context is a clone of another, it might 1065 * get swapped for another underneath us by 1066 * perf_event_task_sched_out, though the 1067 * rcu_read_lock() protects us from any context 1068 * getting freed. Lock the context and check if it 1069 * got swapped before we could get the lock, and retry 1070 * if so. If we locked the right context, then it 1071 * can't get swapped on us any more. 1072 */ 1073 raw_spin_lock_irqsave(&ctx->lock, *flags); 1074 if (ctx != rcu_dereference(task->perf_event_ctxp[ctxn])) { 1075 raw_spin_unlock_irqrestore(&ctx->lock, *flags); 1076 rcu_read_unlock(); 1077 preempt_enable(); 1078 goto retry; 1079 } 1080 1081 if (!atomic_inc_not_zero(&ctx->refcount)) { 1082 raw_spin_unlock_irqrestore(&ctx->lock, *flags); 1083 ctx = NULL; 1084 } 1085 } 1086 rcu_read_unlock(); 1087 preempt_enable(); 1088 return ctx; 1089 } 1090 1091 /* 1092 * Get the context for a task and increment its pin_count so it 1093 * can't get swapped to another task. This also increments its 1094 * reference count so that the context can't get freed. 1095 */ 1096 static struct perf_event_context * 1097 perf_pin_task_context(struct task_struct *task, int ctxn) 1098 { 1099 struct perf_event_context *ctx; 1100 unsigned long flags; 1101 1102 ctx = perf_lock_task_context(task, ctxn, &flags); 1103 if (ctx) { 1104 ++ctx->pin_count; 1105 raw_spin_unlock_irqrestore(&ctx->lock, flags); 1106 } 1107 return ctx; 1108 } 1109 1110 static void perf_unpin_context(struct perf_event_context *ctx) 1111 { 1112 unsigned long flags; 1113 1114 raw_spin_lock_irqsave(&ctx->lock, flags); 1115 --ctx->pin_count; 1116 raw_spin_unlock_irqrestore(&ctx->lock, flags); 1117 } 1118 1119 /* 1120 * Update the record of the current time in a context. 1121 */ 1122 static void update_context_time(struct perf_event_context *ctx) 1123 { 1124 u64 now = perf_clock(); 1125 1126 ctx->time += now - ctx->timestamp; 1127 ctx->timestamp = now; 1128 } 1129 1130 static u64 perf_event_time(struct perf_event *event) 1131 { 1132 struct perf_event_context *ctx = event->ctx; 1133 1134 if (is_cgroup_event(event)) 1135 return perf_cgroup_event_time(event); 1136 1137 return ctx ? ctx->time : 0; 1138 } 1139 1140 /* 1141 * Update the total_time_enabled and total_time_running fields for a event. 1142 * The caller of this function needs to hold the ctx->lock. 1143 */ 1144 static void update_event_times(struct perf_event *event) 1145 { 1146 struct perf_event_context *ctx = event->ctx; 1147 u64 run_end; 1148 1149 if (event->state < PERF_EVENT_STATE_INACTIVE || 1150 event->group_leader->state < PERF_EVENT_STATE_INACTIVE) 1151 return; 1152 /* 1153 * in cgroup mode, time_enabled represents 1154 * the time the event was enabled AND active 1155 * tasks were in the monitored cgroup. This is 1156 * independent of the activity of the context as 1157 * there may be a mix of cgroup and non-cgroup events. 1158 * 1159 * That is why we treat cgroup events differently 1160 * here. 1161 */ 1162 if (is_cgroup_event(event)) 1163 run_end = perf_cgroup_event_time(event); 1164 else if (ctx->is_active) 1165 run_end = ctx->time; 1166 else 1167 run_end = event->tstamp_stopped; 1168 1169 event->total_time_enabled = run_end - event->tstamp_enabled; 1170 1171 if (event->state == PERF_EVENT_STATE_INACTIVE) 1172 run_end = event->tstamp_stopped; 1173 else 1174 run_end = perf_event_time(event); 1175 1176 event->total_time_running = run_end - event->tstamp_running; 1177 1178 } 1179 1180 /* 1181 * Update total_time_enabled and total_time_running for all events in a group. 1182 */ 1183 static void update_group_times(struct perf_event *leader) 1184 { 1185 struct perf_event *event; 1186 1187 update_event_times(leader); 1188 list_for_each_entry(event, &leader->sibling_list, group_entry) 1189 update_event_times(event); 1190 } 1191 1192 static struct list_head * 1193 ctx_group_list(struct perf_event *event, struct perf_event_context *ctx) 1194 { 1195 if (event->attr.pinned) 1196 return &ctx->pinned_groups; 1197 else 1198 return &ctx->flexible_groups; 1199 } 1200 1201 /* 1202 * Add a event from the lists for its context. 1203 * Must be called with ctx->mutex and ctx->lock held. 1204 */ 1205 static void 1206 list_add_event(struct perf_event *event, struct perf_event_context *ctx) 1207 { 1208 WARN_ON_ONCE(event->attach_state & PERF_ATTACH_CONTEXT); 1209 event->attach_state |= PERF_ATTACH_CONTEXT; 1210 1211 /* 1212 * If we're a stand alone event or group leader, we go to the context 1213 * list, group events are kept attached to the group so that 1214 * perf_group_detach can, at all times, locate all siblings. 1215 */ 1216 if (event->group_leader == event) { 1217 struct list_head *list; 1218 1219 if (is_software_event(event)) 1220 event->group_flags |= PERF_GROUP_SOFTWARE; 1221 1222 list = ctx_group_list(event, ctx); 1223 list_add_tail(&event->group_entry, list); 1224 } 1225 1226 if (is_cgroup_event(event)) 1227 ctx->nr_cgroups++; 1228 1229 list_add_rcu(&event->event_entry, &ctx->event_list); 1230 ctx->nr_events++; 1231 if (event->attr.inherit_stat) 1232 ctx->nr_stat++; 1233 1234 ctx->generation++; 1235 } 1236 1237 /* 1238 * Initialize event state based on the perf_event_attr::disabled. 1239 */ 1240 static inline void perf_event__state_init(struct perf_event *event) 1241 { 1242 event->state = event->attr.disabled ? PERF_EVENT_STATE_OFF : 1243 PERF_EVENT_STATE_INACTIVE; 1244 } 1245 1246 /* 1247 * Called at perf_event creation and when events are attached/detached from a 1248 * group. 1249 */ 1250 static void perf_event__read_size(struct perf_event *event) 1251 { 1252 int entry = sizeof(u64); /* value */ 1253 int size = 0; 1254 int nr = 1; 1255 1256 if (event->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) 1257 size += sizeof(u64); 1258 1259 if (event->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) 1260 size += sizeof(u64); 1261 1262 if (event->attr.read_format & PERF_FORMAT_ID) 1263 entry += sizeof(u64); 1264 1265 if (event->attr.read_format & PERF_FORMAT_GROUP) { 1266 nr += event->group_leader->nr_siblings; 1267 size += sizeof(u64); 1268 } 1269 1270 size += entry * nr; 1271 event->read_size = size; 1272 } 1273 1274 static void perf_event__header_size(struct perf_event *event) 1275 { 1276 struct perf_sample_data *data; 1277 u64 sample_type = event->attr.sample_type; 1278 u16 size = 0; 1279 1280 perf_event__read_size(event); 1281 1282 if (sample_type & PERF_SAMPLE_IP) 1283 size += sizeof(data->ip); 1284 1285 if (sample_type & PERF_SAMPLE_ADDR) 1286 size += sizeof(data->addr); 1287 1288 if (sample_type & PERF_SAMPLE_PERIOD) 1289 size += sizeof(data->period); 1290 1291 if (sample_type & PERF_SAMPLE_WEIGHT) 1292 size += sizeof(data->weight); 1293 1294 if (sample_type & PERF_SAMPLE_READ) 1295 size += event->read_size; 1296 1297 if (sample_type & PERF_SAMPLE_DATA_SRC) 1298 size += sizeof(data->data_src.val); 1299 1300 if (sample_type & PERF_SAMPLE_TRANSACTION) 1301 size += sizeof(data->txn); 1302 1303 event->header_size = size; 1304 } 1305 1306 static void perf_event__id_header_size(struct perf_event *event) 1307 { 1308 struct perf_sample_data *data; 1309 u64 sample_type = event->attr.sample_type; 1310 u16 size = 0; 1311 1312 if (sample_type & PERF_SAMPLE_TID) 1313 size += sizeof(data->tid_entry); 1314 1315 if (sample_type & PERF_SAMPLE_TIME) 1316 size += sizeof(data->time); 1317 1318 if (sample_type & PERF_SAMPLE_IDENTIFIER) 1319 size += sizeof(data->id); 1320 1321 if (sample_type & PERF_SAMPLE_ID) 1322 size += sizeof(data->id); 1323 1324 if (sample_type & PERF_SAMPLE_STREAM_ID) 1325 size += sizeof(data->stream_id); 1326 1327 if (sample_type & PERF_SAMPLE_CPU) 1328 size += sizeof(data->cpu_entry); 1329 1330 event->id_header_size = size; 1331 } 1332 1333 static void perf_group_attach(struct perf_event *event) 1334 { 1335 struct perf_event *group_leader = event->group_leader, *pos; 1336 1337 /* 1338 * We can have double attach due to group movement in perf_event_open. 1339 */ 1340 if (event->attach_state & PERF_ATTACH_GROUP) 1341 return; 1342 1343 event->attach_state |= PERF_ATTACH_GROUP; 1344 1345 if (group_leader == event) 1346 return; 1347 1348 WARN_ON_ONCE(group_leader->ctx != event->ctx); 1349 1350 if (group_leader->group_flags & PERF_GROUP_SOFTWARE && 1351 !is_software_event(event)) 1352 group_leader->group_flags &= ~PERF_GROUP_SOFTWARE; 1353 1354 list_add_tail(&event->group_entry, &group_leader->sibling_list); 1355 group_leader->nr_siblings++; 1356 1357 perf_event__header_size(group_leader); 1358 1359 list_for_each_entry(pos, &group_leader->sibling_list, group_entry) 1360 perf_event__header_size(pos); 1361 } 1362 1363 /* 1364 * Remove a event from the lists for its context. 1365 * Must be called with ctx->mutex and ctx->lock held. 1366 */ 1367 static void 1368 list_del_event(struct perf_event *event, struct perf_event_context *ctx) 1369 { 1370 struct perf_cpu_context *cpuctx; 1371 1372 WARN_ON_ONCE(event->ctx != ctx); 1373 lockdep_assert_held(&ctx->lock); 1374 1375 /* 1376 * We can have double detach due to exit/hot-unplug + close. 1377 */ 1378 if (!(event->attach_state & PERF_ATTACH_CONTEXT)) 1379 return; 1380 1381 event->attach_state &= ~PERF_ATTACH_CONTEXT; 1382 1383 if (is_cgroup_event(event)) { 1384 ctx->nr_cgroups--; 1385 cpuctx = __get_cpu_context(ctx); 1386 /* 1387 * if there are no more cgroup events 1388 * then cler cgrp to avoid stale pointer 1389 * in update_cgrp_time_from_cpuctx() 1390 */ 1391 if (!ctx->nr_cgroups) 1392 cpuctx->cgrp = NULL; 1393 } 1394 1395 ctx->nr_events--; 1396 if (event->attr.inherit_stat) 1397 ctx->nr_stat--; 1398 1399 list_del_rcu(&event->event_entry); 1400 1401 if (event->group_leader == event) 1402 list_del_init(&event->group_entry); 1403 1404 update_group_times(event); 1405 1406 /* 1407 * If event was in error state, then keep it 1408 * that way, otherwise bogus counts will be 1409 * returned on read(). The only way to get out 1410 * of error state is by explicit re-enabling 1411 * of the event 1412 */ 1413 if (event->state > PERF_EVENT_STATE_OFF) 1414 event->state = PERF_EVENT_STATE_OFF; 1415 1416 ctx->generation++; 1417 } 1418 1419 static void perf_group_detach(struct perf_event *event) 1420 { 1421 struct perf_event *sibling, *tmp; 1422 struct list_head *list = NULL; 1423 1424 /* 1425 * We can have double detach due to exit/hot-unplug + close. 1426 */ 1427 if (!(event->attach_state & PERF_ATTACH_GROUP)) 1428 return; 1429 1430 event->attach_state &= ~PERF_ATTACH_GROUP; 1431 1432 /* 1433 * If this is a sibling, remove it from its group. 1434 */ 1435 if (event->group_leader != event) { 1436 list_del_init(&event->group_entry); 1437 event->group_leader->nr_siblings--; 1438 goto out; 1439 } 1440 1441 if (!list_empty(&event->group_entry)) 1442 list = &event->group_entry; 1443 1444 /* 1445 * If this was a group event with sibling events then 1446 * upgrade the siblings to singleton events by adding them 1447 * to whatever list we are on. 1448 */ 1449 list_for_each_entry_safe(sibling, tmp, &event->sibling_list, group_entry) { 1450 if (list) 1451 list_move_tail(&sibling->group_entry, list); 1452 sibling->group_leader = sibling; 1453 1454 /* Inherit group flags from the previous leader */ 1455 sibling->group_flags = event->group_flags; 1456 1457 WARN_ON_ONCE(sibling->ctx != event->ctx); 1458 } 1459 1460 out: 1461 perf_event__header_size(event->group_leader); 1462 1463 list_for_each_entry(tmp, &event->group_leader->sibling_list, group_entry) 1464 perf_event__header_size(tmp); 1465 } 1466 1467 /* 1468 * User event without the task. 1469 */ 1470 static bool is_orphaned_event(struct perf_event *event) 1471 { 1472 return event && !is_kernel_event(event) && !event->owner; 1473 } 1474 1475 /* 1476 * Event has a parent but parent's task finished and it's 1477 * alive only because of children holding refference. 1478 */ 1479 static bool is_orphaned_child(struct perf_event *event) 1480 { 1481 return is_orphaned_event(event->parent); 1482 } 1483 1484 static void orphans_remove_work(struct work_struct *work); 1485 1486 static void schedule_orphans_remove(struct perf_event_context *ctx) 1487 { 1488 if (!ctx->task || ctx->orphans_remove_sched || !perf_wq) 1489 return; 1490 1491 if (queue_delayed_work(perf_wq, &ctx->orphans_remove, 1)) { 1492 get_ctx(ctx); 1493 ctx->orphans_remove_sched = true; 1494 } 1495 } 1496 1497 static int __init perf_workqueue_init(void) 1498 { 1499 perf_wq = create_singlethread_workqueue("perf"); 1500 WARN(!perf_wq, "failed to create perf workqueue\n"); 1501 return perf_wq ? 0 : -1; 1502 } 1503 1504 core_initcall(perf_workqueue_init); 1505 1506 static inline int pmu_filter_match(struct perf_event *event) 1507 { 1508 struct pmu *pmu = event->pmu; 1509 return pmu->filter_match ? pmu->filter_match(event) : 1; 1510 } 1511 1512 static inline int 1513 event_filter_match(struct perf_event *event) 1514 { 1515 return (event->cpu == -1 || event->cpu == smp_processor_id()) 1516 && perf_cgroup_match(event) && pmu_filter_match(event); 1517 } 1518 1519 static void 1520 event_sched_out(struct perf_event *event, 1521 struct perf_cpu_context *cpuctx, 1522 struct perf_event_context *ctx) 1523 { 1524 u64 tstamp = perf_event_time(event); 1525 u64 delta; 1526 1527 WARN_ON_ONCE(event->ctx != ctx); 1528 lockdep_assert_held(&ctx->lock); 1529 1530 /* 1531 * An event which could not be activated because of 1532 * filter mismatch still needs to have its timings 1533 * maintained, otherwise bogus information is return 1534 * via read() for time_enabled, time_running: 1535 */ 1536 if (event->state == PERF_EVENT_STATE_INACTIVE 1537 && !event_filter_match(event)) { 1538 delta = tstamp - event->tstamp_stopped; 1539 event->tstamp_running += delta; 1540 event->tstamp_stopped = tstamp; 1541 } 1542 1543 if (event->state != PERF_EVENT_STATE_ACTIVE) 1544 return; 1545 1546 perf_pmu_disable(event->pmu); 1547 1548 event->state = PERF_EVENT_STATE_INACTIVE; 1549 if (event->pending_disable) { 1550 event->pending_disable = 0; 1551 event->state = PERF_EVENT_STATE_OFF; 1552 } 1553 event->tstamp_stopped = tstamp; 1554 event->pmu->del(event, 0); 1555 event->oncpu = -1; 1556 1557 if (!is_software_event(event)) 1558 cpuctx->active_oncpu--; 1559 if (!--ctx->nr_active) 1560 perf_event_ctx_deactivate(ctx); 1561 if (event->attr.freq && event->attr.sample_freq) 1562 ctx->nr_freq--; 1563 if (event->attr.exclusive || !cpuctx->active_oncpu) 1564 cpuctx->exclusive = 0; 1565 1566 if (is_orphaned_child(event)) 1567 schedule_orphans_remove(ctx); 1568 1569 perf_pmu_enable(event->pmu); 1570 } 1571 1572 static void 1573 group_sched_out(struct perf_event *group_event, 1574 struct perf_cpu_context *cpuctx, 1575 struct perf_event_context *ctx) 1576 { 1577 struct perf_event *event; 1578 int state = group_event->state; 1579 1580 event_sched_out(group_event, cpuctx, ctx); 1581 1582 /* 1583 * Schedule out siblings (if any): 1584 */ 1585 list_for_each_entry(event, &group_event->sibling_list, group_entry) 1586 event_sched_out(event, cpuctx, ctx); 1587 1588 if (state == PERF_EVENT_STATE_ACTIVE && group_event->attr.exclusive) 1589 cpuctx->exclusive = 0; 1590 } 1591 1592 struct remove_event { 1593 struct perf_event *event; 1594 bool detach_group; 1595 }; 1596 1597 /* 1598 * Cross CPU call to remove a performance event 1599 * 1600 * We disable the event on the hardware level first. After that we 1601 * remove it from the context list. 1602 */ 1603 static int __perf_remove_from_context(void *info) 1604 { 1605 struct remove_event *re = info; 1606 struct perf_event *event = re->event; 1607 struct perf_event_context *ctx = event->ctx; 1608 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx); 1609 1610 raw_spin_lock(&ctx->lock); 1611 event_sched_out(event, cpuctx, ctx); 1612 if (re->detach_group) 1613 perf_group_detach(event); 1614 list_del_event(event, ctx); 1615 if (!ctx->nr_events && cpuctx->task_ctx == ctx) { 1616 ctx->is_active = 0; 1617 cpuctx->task_ctx = NULL; 1618 } 1619 raw_spin_unlock(&ctx->lock); 1620 1621 return 0; 1622 } 1623 1624 1625 /* 1626 * Remove the event from a task's (or a CPU's) list of events. 1627 * 1628 * CPU events are removed with a smp call. For task events we only 1629 * call when the task is on a CPU. 1630 * 1631 * If event->ctx is a cloned context, callers must make sure that 1632 * every task struct that event->ctx->task could possibly point to 1633 * remains valid. This is OK when called from perf_release since 1634 * that only calls us on the top-level context, which can't be a clone. 1635 * When called from perf_event_exit_task, it's OK because the 1636 * context has been detached from its task. 1637 */ 1638 static void perf_remove_from_context(struct perf_event *event, bool detach_group) 1639 { 1640 struct perf_event_context *ctx = event->ctx; 1641 struct task_struct *task = ctx->task; 1642 struct remove_event re = { 1643 .event = event, 1644 .detach_group = detach_group, 1645 }; 1646 1647 lockdep_assert_held(&ctx->mutex); 1648 1649 if (!task) { 1650 /* 1651 * Per cpu events are removed via an smp call. The removal can 1652 * fail if the CPU is currently offline, but in that case we 1653 * already called __perf_remove_from_context from 1654 * perf_event_exit_cpu. 1655 */ 1656 cpu_function_call(event->cpu, __perf_remove_from_context, &re); 1657 return; 1658 } 1659 1660 retry: 1661 if (!task_function_call(task, __perf_remove_from_context, &re)) 1662 return; 1663 1664 raw_spin_lock_irq(&ctx->lock); 1665 /* 1666 * If we failed to find a running task, but find the context active now 1667 * that we've acquired the ctx->lock, retry. 1668 */ 1669 if (ctx->is_active) { 1670 raw_spin_unlock_irq(&ctx->lock); 1671 /* 1672 * Reload the task pointer, it might have been changed by 1673 * a concurrent perf_event_context_sched_out(). 1674 */ 1675 task = ctx->task; 1676 goto retry; 1677 } 1678 1679 /* 1680 * Since the task isn't running, its safe to remove the event, us 1681 * holding the ctx->lock ensures the task won't get scheduled in. 1682 */ 1683 if (detach_group) 1684 perf_group_detach(event); 1685 list_del_event(event, ctx); 1686 raw_spin_unlock_irq(&ctx->lock); 1687 } 1688 1689 /* 1690 * Cross CPU call to disable a performance event 1691 */ 1692 int __perf_event_disable(void *info) 1693 { 1694 struct perf_event *event = info; 1695 struct perf_event_context *ctx = event->ctx; 1696 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx); 1697 1698 /* 1699 * If this is a per-task event, need to check whether this 1700 * event's task is the current task on this cpu. 1701 * 1702 * Can trigger due to concurrent perf_event_context_sched_out() 1703 * flipping contexts around. 1704 */ 1705 if (ctx->task && cpuctx->task_ctx != ctx) 1706 return -EINVAL; 1707 1708 raw_spin_lock(&ctx->lock); 1709 1710 /* 1711 * If the event is on, turn it off. 1712 * If it is in error state, leave it in error state. 1713 */ 1714 if (event->state >= PERF_EVENT_STATE_INACTIVE) { 1715 update_context_time(ctx); 1716 update_cgrp_time_from_event(event); 1717 update_group_times(event); 1718 if (event == event->group_leader) 1719 group_sched_out(event, cpuctx, ctx); 1720 else 1721 event_sched_out(event, cpuctx, ctx); 1722 event->state = PERF_EVENT_STATE_OFF; 1723 } 1724 1725 raw_spin_unlock(&ctx->lock); 1726 1727 return 0; 1728 } 1729 1730 /* 1731 * Disable a event. 1732 * 1733 * If event->ctx is a cloned context, callers must make sure that 1734 * every task struct that event->ctx->task could possibly point to 1735 * remains valid. This condition is satisifed when called through 1736 * perf_event_for_each_child or perf_event_for_each because they 1737 * hold the top-level event's child_mutex, so any descendant that 1738 * goes to exit will block in sync_child_event. 1739 * When called from perf_pending_event it's OK because event->ctx 1740 * is the current context on this CPU and preemption is disabled, 1741 * hence we can't get into perf_event_task_sched_out for this context. 1742 */ 1743 static void _perf_event_disable(struct perf_event *event) 1744 { 1745 struct perf_event_context *ctx = event->ctx; 1746 struct task_struct *task = ctx->task; 1747 1748 if (!task) { 1749 /* 1750 * Disable the event on the cpu that it's on 1751 */ 1752 cpu_function_call(event->cpu, __perf_event_disable, event); 1753 return; 1754 } 1755 1756 retry: 1757 if (!task_function_call(task, __perf_event_disable, event)) 1758 return; 1759 1760 raw_spin_lock_irq(&ctx->lock); 1761 /* 1762 * If the event is still active, we need to retry the cross-call. 1763 */ 1764 if (event->state == PERF_EVENT_STATE_ACTIVE) { 1765 raw_spin_unlock_irq(&ctx->lock); 1766 /* 1767 * Reload the task pointer, it might have been changed by 1768 * a concurrent perf_event_context_sched_out(). 1769 */ 1770 task = ctx->task; 1771 goto retry; 1772 } 1773 1774 /* 1775 * Since we have the lock this context can't be scheduled 1776 * in, so we can change the state safely. 1777 */ 1778 if (event->state == PERF_EVENT_STATE_INACTIVE) { 1779 update_group_times(event); 1780 event->state = PERF_EVENT_STATE_OFF; 1781 } 1782 raw_spin_unlock_irq(&ctx->lock); 1783 } 1784 1785 /* 1786 * Strictly speaking kernel users cannot create groups and therefore this 1787 * interface does not need the perf_event_ctx_lock() magic. 1788 */ 1789 void perf_event_disable(struct perf_event *event) 1790 { 1791 struct perf_event_context *ctx; 1792 1793 ctx = perf_event_ctx_lock(event); 1794 _perf_event_disable(event); 1795 perf_event_ctx_unlock(event, ctx); 1796 } 1797 EXPORT_SYMBOL_GPL(perf_event_disable); 1798 1799 static void perf_set_shadow_time(struct perf_event *event, 1800 struct perf_event_context *ctx, 1801 u64 tstamp) 1802 { 1803 /* 1804 * use the correct time source for the time snapshot 1805 * 1806 * We could get by without this by leveraging the 1807 * fact that to get to this function, the caller 1808 * has most likely already called update_context_time() 1809 * and update_cgrp_time_xx() and thus both timestamp 1810 * are identical (or very close). Given that tstamp is, 1811 * already adjusted for cgroup, we could say that: 1812 * tstamp - ctx->timestamp 1813 * is equivalent to 1814 * tstamp - cgrp->timestamp. 1815 * 1816 * Then, in perf_output_read(), the calculation would 1817 * work with no changes because: 1818 * - event is guaranteed scheduled in 1819 * - no scheduled out in between 1820 * - thus the timestamp would be the same 1821 * 1822 * But this is a bit hairy. 1823 * 1824 * So instead, we have an explicit cgroup call to remain 1825 * within the time time source all along. We believe it 1826 * is cleaner and simpler to understand. 1827 */ 1828 if (is_cgroup_event(event)) 1829 perf_cgroup_set_shadow_time(event, tstamp); 1830 else 1831 event->shadow_ctx_time = tstamp - ctx->timestamp; 1832 } 1833 1834 #define MAX_INTERRUPTS (~0ULL) 1835 1836 static void perf_log_throttle(struct perf_event *event, int enable); 1837 static void perf_log_itrace_start(struct perf_event *event); 1838 1839 static int 1840 event_sched_in(struct perf_event *event, 1841 struct perf_cpu_context *cpuctx, 1842 struct perf_event_context *ctx) 1843 { 1844 u64 tstamp = perf_event_time(event); 1845 int ret = 0; 1846 1847 lockdep_assert_held(&ctx->lock); 1848 1849 if (event->state <= PERF_EVENT_STATE_OFF) 1850 return 0; 1851 1852 event->state = PERF_EVENT_STATE_ACTIVE; 1853 event->oncpu = smp_processor_id(); 1854 1855 /* 1856 * Unthrottle events, since we scheduled we might have missed several 1857 * ticks already, also for a heavily scheduling task there is little 1858 * guarantee it'll get a tick in a timely manner. 1859 */ 1860 if (unlikely(event->hw.interrupts == MAX_INTERRUPTS)) { 1861 perf_log_throttle(event, 1); 1862 event->hw.interrupts = 0; 1863 } 1864 1865 /* 1866 * The new state must be visible before we turn it on in the hardware: 1867 */ 1868 smp_wmb(); 1869 1870 perf_pmu_disable(event->pmu); 1871 1872 perf_set_shadow_time(event, ctx, tstamp); 1873 1874 perf_log_itrace_start(event); 1875 1876 if (event->pmu->add(event, PERF_EF_START)) { 1877 event->state = PERF_EVENT_STATE_INACTIVE; 1878 event->oncpu = -1; 1879 ret = -EAGAIN; 1880 goto out; 1881 } 1882 1883 event->tstamp_running += tstamp - event->tstamp_stopped; 1884 1885 if (!is_software_event(event)) 1886 cpuctx->active_oncpu++; 1887 if (!ctx->nr_active++) 1888 perf_event_ctx_activate(ctx); 1889 if (event->attr.freq && event->attr.sample_freq) 1890 ctx->nr_freq++; 1891 1892 if (event->attr.exclusive) 1893 cpuctx->exclusive = 1; 1894 1895 if (is_orphaned_child(event)) 1896 schedule_orphans_remove(ctx); 1897 1898 out: 1899 perf_pmu_enable(event->pmu); 1900 1901 return ret; 1902 } 1903 1904 static int 1905 group_sched_in(struct perf_event *group_event, 1906 struct perf_cpu_context *cpuctx, 1907 struct perf_event_context *ctx) 1908 { 1909 struct perf_event *event, *partial_group = NULL; 1910 struct pmu *pmu = ctx->pmu; 1911 u64 now = ctx->time; 1912 bool simulate = false; 1913 1914 if (group_event->state == PERF_EVENT_STATE_OFF) 1915 return 0; 1916 1917 pmu->start_txn(pmu); 1918 1919 if (event_sched_in(group_event, cpuctx, ctx)) { 1920 pmu->cancel_txn(pmu); 1921 perf_mux_hrtimer_restart(cpuctx); 1922 return -EAGAIN; 1923 } 1924 1925 /* 1926 * Schedule in siblings as one group (if any): 1927 */ 1928 list_for_each_entry(event, &group_event->sibling_list, group_entry) { 1929 if (event_sched_in(event, cpuctx, ctx)) { 1930 partial_group = event; 1931 goto group_error; 1932 } 1933 } 1934 1935 if (!pmu->commit_txn(pmu)) 1936 return 0; 1937 1938 group_error: 1939 /* 1940 * Groups can be scheduled in as one unit only, so undo any 1941 * partial group before returning: 1942 * The events up to the failed event are scheduled out normally, 1943 * tstamp_stopped will be updated. 1944 * 1945 * The failed events and the remaining siblings need to have 1946 * their timings updated as if they had gone thru event_sched_in() 1947 * and event_sched_out(). This is required to get consistent timings 1948 * across the group. This also takes care of the case where the group 1949 * could never be scheduled by ensuring tstamp_stopped is set to mark 1950 * the time the event was actually stopped, such that time delta 1951 * calculation in update_event_times() is correct. 1952 */ 1953 list_for_each_entry(event, &group_event->sibling_list, group_entry) { 1954 if (event == partial_group) 1955 simulate = true; 1956 1957 if (simulate) { 1958 event->tstamp_running += now - event->tstamp_stopped; 1959 event->tstamp_stopped = now; 1960 } else { 1961 event_sched_out(event, cpuctx, ctx); 1962 } 1963 } 1964 event_sched_out(group_event, cpuctx, ctx); 1965 1966 pmu->cancel_txn(pmu); 1967 1968 perf_mux_hrtimer_restart(cpuctx); 1969 1970 return -EAGAIN; 1971 } 1972 1973 /* 1974 * Work out whether we can put this event group on the CPU now. 1975 */ 1976 static int group_can_go_on(struct perf_event *event, 1977 struct perf_cpu_context *cpuctx, 1978 int can_add_hw) 1979 { 1980 /* 1981 * Groups consisting entirely of software events can always go on. 1982 */ 1983 if (event->group_flags & PERF_GROUP_SOFTWARE) 1984 return 1; 1985 /* 1986 * If an exclusive group is already on, no other hardware 1987 * events can go on. 1988 */ 1989 if (cpuctx->exclusive) 1990 return 0; 1991 /* 1992 * If this group is exclusive and there are already 1993 * events on the CPU, it can't go on. 1994 */ 1995 if (event->attr.exclusive && cpuctx->active_oncpu) 1996 return 0; 1997 /* 1998 * Otherwise, try to add it if all previous groups were able 1999 * to go on. 2000 */ 2001 return can_add_hw; 2002 } 2003 2004 static void add_event_to_ctx(struct perf_event *event, 2005 struct perf_event_context *ctx) 2006 { 2007 u64 tstamp = perf_event_time(event); 2008 2009 list_add_event(event, ctx); 2010 perf_group_attach(event); 2011 event->tstamp_enabled = tstamp; 2012 event->tstamp_running = tstamp; 2013 event->tstamp_stopped = tstamp; 2014 } 2015 2016 static void task_ctx_sched_out(struct perf_event_context *ctx); 2017 static void 2018 ctx_sched_in(struct perf_event_context *ctx, 2019 struct perf_cpu_context *cpuctx, 2020 enum event_type_t event_type, 2021 struct task_struct *task); 2022 2023 static void perf_event_sched_in(struct perf_cpu_context *cpuctx, 2024 struct perf_event_context *ctx, 2025 struct task_struct *task) 2026 { 2027 cpu_ctx_sched_in(cpuctx, EVENT_PINNED, task); 2028 if (ctx) 2029 ctx_sched_in(ctx, cpuctx, EVENT_PINNED, task); 2030 cpu_ctx_sched_in(cpuctx, EVENT_FLEXIBLE, task); 2031 if (ctx) 2032 ctx_sched_in(ctx, cpuctx, EVENT_FLEXIBLE, task); 2033 } 2034 2035 /* 2036 * Cross CPU call to install and enable a performance event 2037 * 2038 * Must be called with ctx->mutex held 2039 */ 2040 static int __perf_install_in_context(void *info) 2041 { 2042 struct perf_event *event = info; 2043 struct perf_event_context *ctx = event->ctx; 2044 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx); 2045 struct perf_event_context *task_ctx = cpuctx->task_ctx; 2046 struct task_struct *task = current; 2047 2048 perf_ctx_lock(cpuctx, task_ctx); 2049 perf_pmu_disable(cpuctx->ctx.pmu); 2050 2051 /* 2052 * If there was an active task_ctx schedule it out. 2053 */ 2054 if (task_ctx) 2055 task_ctx_sched_out(task_ctx); 2056 2057 /* 2058 * If the context we're installing events in is not the 2059 * active task_ctx, flip them. 2060 */ 2061 if (ctx->task && task_ctx != ctx) { 2062 if (task_ctx) 2063 raw_spin_unlock(&task_ctx->lock); 2064 raw_spin_lock(&ctx->lock); 2065 task_ctx = ctx; 2066 } 2067 2068 if (task_ctx) { 2069 cpuctx->task_ctx = task_ctx; 2070 task = task_ctx->task; 2071 } 2072 2073 cpu_ctx_sched_out(cpuctx, EVENT_ALL); 2074 2075 update_context_time(ctx); 2076 /* 2077 * update cgrp time only if current cgrp 2078 * matches event->cgrp. Must be done before 2079 * calling add_event_to_ctx() 2080 */ 2081 update_cgrp_time_from_event(event); 2082 2083 add_event_to_ctx(event, ctx); 2084 2085 /* 2086 * Schedule everything back in 2087 */ 2088 perf_event_sched_in(cpuctx, task_ctx, task); 2089 2090 perf_pmu_enable(cpuctx->ctx.pmu); 2091 perf_ctx_unlock(cpuctx, task_ctx); 2092 2093 return 0; 2094 } 2095 2096 /* 2097 * Attach a performance event to a context 2098 * 2099 * First we add the event to the list with the hardware enable bit 2100 * in event->hw_config cleared. 2101 * 2102 * If the event is attached to a task which is on a CPU we use a smp 2103 * call to enable it in the task context. The task might have been 2104 * scheduled away, but we check this in the smp call again. 2105 */ 2106 static void 2107 perf_install_in_context(struct perf_event_context *ctx, 2108 struct perf_event *event, 2109 int cpu) 2110 { 2111 struct task_struct *task = ctx->task; 2112 2113 lockdep_assert_held(&ctx->mutex); 2114 2115 event->ctx = ctx; 2116 if (event->cpu != -1) 2117 event->cpu = cpu; 2118 2119 if (!task) { 2120 /* 2121 * Per cpu events are installed via an smp call and 2122 * the install is always successful. 2123 */ 2124 cpu_function_call(cpu, __perf_install_in_context, event); 2125 return; 2126 } 2127 2128 retry: 2129 if (!task_function_call(task, __perf_install_in_context, event)) 2130 return; 2131 2132 raw_spin_lock_irq(&ctx->lock); 2133 /* 2134 * If we failed to find a running task, but find the context active now 2135 * that we've acquired the ctx->lock, retry. 2136 */ 2137 if (ctx->is_active) { 2138 raw_spin_unlock_irq(&ctx->lock); 2139 /* 2140 * Reload the task pointer, it might have been changed by 2141 * a concurrent perf_event_context_sched_out(). 2142 */ 2143 task = ctx->task; 2144 goto retry; 2145 } 2146 2147 /* 2148 * Since the task isn't running, its safe to add the event, us holding 2149 * the ctx->lock ensures the task won't get scheduled in. 2150 */ 2151 add_event_to_ctx(event, ctx); 2152 raw_spin_unlock_irq(&ctx->lock); 2153 } 2154 2155 /* 2156 * Put a event into inactive state and update time fields. 2157 * Enabling the leader of a group effectively enables all 2158 * the group members that aren't explicitly disabled, so we 2159 * have to update their ->tstamp_enabled also. 2160 * Note: this works for group members as well as group leaders 2161 * since the non-leader members' sibling_lists will be empty. 2162 */ 2163 static void __perf_event_mark_enabled(struct perf_event *event) 2164 { 2165 struct perf_event *sub; 2166 u64 tstamp = perf_event_time(event); 2167 2168 event->state = PERF_EVENT_STATE_INACTIVE; 2169 event->tstamp_enabled = tstamp - event->total_time_enabled; 2170 list_for_each_entry(sub, &event->sibling_list, group_entry) { 2171 if (sub->state >= PERF_EVENT_STATE_INACTIVE) 2172 sub->tstamp_enabled = tstamp - sub->total_time_enabled; 2173 } 2174 } 2175 2176 /* 2177 * Cross CPU call to enable a performance event 2178 */ 2179 static int __perf_event_enable(void *info) 2180 { 2181 struct perf_event *event = info; 2182 struct perf_event_context *ctx = event->ctx; 2183 struct perf_event *leader = event->group_leader; 2184 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx); 2185 int err; 2186 2187 /* 2188 * There's a time window between 'ctx->is_active' check 2189 * in perf_event_enable function and this place having: 2190 * - IRQs on 2191 * - ctx->lock unlocked 2192 * 2193 * where the task could be killed and 'ctx' deactivated 2194 * by perf_event_exit_task. 2195 */ 2196 if (!ctx->is_active) 2197 return -EINVAL; 2198 2199 raw_spin_lock(&ctx->lock); 2200 update_context_time(ctx); 2201 2202 if (event->state >= PERF_EVENT_STATE_INACTIVE) 2203 goto unlock; 2204 2205 /* 2206 * set current task's cgroup time reference point 2207 */ 2208 perf_cgroup_set_timestamp(current, ctx); 2209 2210 __perf_event_mark_enabled(event); 2211 2212 if (!event_filter_match(event)) { 2213 if (is_cgroup_event(event)) 2214 perf_cgroup_defer_enabled(event); 2215 goto unlock; 2216 } 2217 2218 /* 2219 * If the event is in a group and isn't the group leader, 2220 * then don't put it on unless the group is on. 2221 */ 2222 if (leader != event && leader->state != PERF_EVENT_STATE_ACTIVE) 2223 goto unlock; 2224 2225 if (!group_can_go_on(event, cpuctx, 1)) { 2226 err = -EEXIST; 2227 } else { 2228 if (event == leader) 2229 err = group_sched_in(event, cpuctx, ctx); 2230 else 2231 err = event_sched_in(event, cpuctx, ctx); 2232 } 2233 2234 if (err) { 2235 /* 2236 * If this event can't go on and it's part of a 2237 * group, then the whole group has to come off. 2238 */ 2239 if (leader != event) { 2240 group_sched_out(leader, cpuctx, ctx); 2241 perf_mux_hrtimer_restart(cpuctx); 2242 } 2243 if (leader->attr.pinned) { 2244 update_group_times(leader); 2245 leader->state = PERF_EVENT_STATE_ERROR; 2246 } 2247 } 2248 2249 unlock: 2250 raw_spin_unlock(&ctx->lock); 2251 2252 return 0; 2253 } 2254 2255 /* 2256 * Enable a event. 2257 * 2258 * If event->ctx is a cloned context, callers must make sure that 2259 * every task struct that event->ctx->task could possibly point to 2260 * remains valid. This condition is satisfied when called through 2261 * perf_event_for_each_child or perf_event_for_each as described 2262 * for perf_event_disable. 2263 */ 2264 static void _perf_event_enable(struct perf_event *event) 2265 { 2266 struct perf_event_context *ctx = event->ctx; 2267 struct task_struct *task = ctx->task; 2268 2269 if (!task) { 2270 /* 2271 * Enable the event on the cpu that it's on 2272 */ 2273 cpu_function_call(event->cpu, __perf_event_enable, event); 2274 return; 2275 } 2276 2277 raw_spin_lock_irq(&ctx->lock); 2278 if (event->state >= PERF_EVENT_STATE_INACTIVE) 2279 goto out; 2280 2281 /* 2282 * If the event is in error state, clear that first. 2283 * That way, if we see the event in error state below, we 2284 * know that it has gone back into error state, as distinct 2285 * from the task having been scheduled away before the 2286 * cross-call arrived. 2287 */ 2288 if (event->state == PERF_EVENT_STATE_ERROR) 2289 event->state = PERF_EVENT_STATE_OFF; 2290 2291 retry: 2292 if (!ctx->is_active) { 2293 __perf_event_mark_enabled(event); 2294 goto out; 2295 } 2296 2297 raw_spin_unlock_irq(&ctx->lock); 2298 2299 if (!task_function_call(task, __perf_event_enable, event)) 2300 return; 2301 2302 raw_spin_lock_irq(&ctx->lock); 2303 2304 /* 2305 * If the context is active and the event is still off, 2306 * we need to retry the cross-call. 2307 */ 2308 if (ctx->is_active && event->state == PERF_EVENT_STATE_OFF) { 2309 /* 2310 * task could have been flipped by a concurrent 2311 * perf_event_context_sched_out() 2312 */ 2313 task = ctx->task; 2314 goto retry; 2315 } 2316 2317 out: 2318 raw_spin_unlock_irq(&ctx->lock); 2319 } 2320 2321 /* 2322 * See perf_event_disable(); 2323 */ 2324 void perf_event_enable(struct perf_event *event) 2325 { 2326 struct perf_event_context *ctx; 2327 2328 ctx = perf_event_ctx_lock(event); 2329 _perf_event_enable(event); 2330 perf_event_ctx_unlock(event, ctx); 2331 } 2332 EXPORT_SYMBOL_GPL(perf_event_enable); 2333 2334 static int _perf_event_refresh(struct perf_event *event, int refresh) 2335 { 2336 /* 2337 * not supported on inherited events 2338 */ 2339 if (event->attr.inherit || !is_sampling_event(event)) 2340 return -EINVAL; 2341 2342 atomic_add(refresh, &event->event_limit); 2343 _perf_event_enable(event); 2344 2345 return 0; 2346 } 2347 2348 /* 2349 * See perf_event_disable() 2350 */ 2351 int perf_event_refresh(struct perf_event *event, int refresh) 2352 { 2353 struct perf_event_context *ctx; 2354 int ret; 2355 2356 ctx = perf_event_ctx_lock(event); 2357 ret = _perf_event_refresh(event, refresh); 2358 perf_event_ctx_unlock(event, ctx); 2359 2360 return ret; 2361 } 2362 EXPORT_SYMBOL_GPL(perf_event_refresh); 2363 2364 static void ctx_sched_out(struct perf_event_context *ctx, 2365 struct perf_cpu_context *cpuctx, 2366 enum event_type_t event_type) 2367 { 2368 struct perf_event *event; 2369 int is_active = ctx->is_active; 2370 2371 ctx->is_active &= ~event_type; 2372 if (likely(!ctx->nr_events)) 2373 return; 2374 2375 update_context_time(ctx); 2376 update_cgrp_time_from_cpuctx(cpuctx); 2377 if (!ctx->nr_active) 2378 return; 2379 2380 perf_pmu_disable(ctx->pmu); 2381 if ((is_active & EVENT_PINNED) && (event_type & EVENT_PINNED)) { 2382 list_for_each_entry(event, &ctx->pinned_groups, group_entry) 2383 group_sched_out(event, cpuctx, ctx); 2384 } 2385 2386 if ((is_active & EVENT_FLEXIBLE) && (event_type & EVENT_FLEXIBLE)) { 2387 list_for_each_entry(event, &ctx->flexible_groups, group_entry) 2388 group_sched_out(event, cpuctx, ctx); 2389 } 2390 perf_pmu_enable(ctx->pmu); 2391 } 2392 2393 /* 2394 * Test whether two contexts are equivalent, i.e. whether they have both been 2395 * cloned from the same version of the same context. 2396 * 2397 * Equivalence is measured using a generation number in the context that is 2398 * incremented on each modification to it; see unclone_ctx(), list_add_event() 2399 * and list_del_event(). 2400 */ 2401 static int context_equiv(struct perf_event_context *ctx1, 2402 struct perf_event_context *ctx2) 2403 { 2404 lockdep_assert_held(&ctx1->lock); 2405 lockdep_assert_held(&ctx2->lock); 2406 2407 /* Pinning disables the swap optimization */ 2408 if (ctx1->pin_count || ctx2->pin_count) 2409 return 0; 2410 2411 /* If ctx1 is the parent of ctx2 */ 2412 if (ctx1 == ctx2->parent_ctx && ctx1->generation == ctx2->parent_gen) 2413 return 1; 2414 2415 /* If ctx2 is the parent of ctx1 */ 2416 if (ctx1->parent_ctx == ctx2 && ctx1->parent_gen == ctx2->generation) 2417 return 1; 2418 2419 /* 2420 * If ctx1 and ctx2 have the same parent; we flatten the parent 2421 * hierarchy, see perf_event_init_context(). 2422 */ 2423 if (ctx1->parent_ctx && ctx1->parent_ctx == ctx2->parent_ctx && 2424 ctx1->parent_gen == ctx2->parent_gen) 2425 return 1; 2426 2427 /* Unmatched */ 2428 return 0; 2429 } 2430 2431 static void __perf_event_sync_stat(struct perf_event *event, 2432 struct perf_event *next_event) 2433 { 2434 u64 value; 2435 2436 if (!event->attr.inherit_stat) 2437 return; 2438 2439 /* 2440 * Update the event value, we cannot use perf_event_read() 2441 * because we're in the middle of a context switch and have IRQs 2442 * disabled, which upsets smp_call_function_single(), however 2443 * we know the event must be on the current CPU, therefore we 2444 * don't need to use it. 2445 */ 2446 switch (event->state) { 2447 case PERF_EVENT_STATE_ACTIVE: 2448 event->pmu->read(event); 2449 /* fall-through */ 2450 2451 case PERF_EVENT_STATE_INACTIVE: 2452 update_event_times(event); 2453 break; 2454 2455 default: 2456 break; 2457 } 2458 2459 /* 2460 * In order to keep per-task stats reliable we need to flip the event 2461 * values when we flip the contexts. 2462 */ 2463 value = local64_read(&next_event->count); 2464 value = local64_xchg(&event->count, value); 2465 local64_set(&next_event->count, value); 2466 2467 swap(event->total_time_enabled, next_event->total_time_enabled); 2468 swap(event->total_time_running, next_event->total_time_running); 2469 2470 /* 2471 * Since we swizzled the values, update the user visible data too. 2472 */ 2473 perf_event_update_userpage(event); 2474 perf_event_update_userpage(next_event); 2475 } 2476 2477 static void perf_event_sync_stat(struct perf_event_context *ctx, 2478 struct perf_event_context *next_ctx) 2479 { 2480 struct perf_event *event, *next_event; 2481 2482 if (!ctx->nr_stat) 2483 return; 2484 2485 update_context_time(ctx); 2486 2487 event = list_first_entry(&ctx->event_list, 2488 struct perf_event, event_entry); 2489 2490 next_event = list_first_entry(&next_ctx->event_list, 2491 struct perf_event, event_entry); 2492 2493 while (&event->event_entry != &ctx->event_list && 2494 &next_event->event_entry != &next_ctx->event_list) { 2495 2496 __perf_event_sync_stat(event, next_event); 2497 2498 event = list_next_entry(event, event_entry); 2499 next_event = list_next_entry(next_event, event_entry); 2500 } 2501 } 2502 2503 static void perf_event_context_sched_out(struct task_struct *task, int ctxn, 2504 struct task_struct *next) 2505 { 2506 struct perf_event_context *ctx = task->perf_event_ctxp[ctxn]; 2507 struct perf_event_context *next_ctx; 2508 struct perf_event_context *parent, *next_parent; 2509 struct perf_cpu_context *cpuctx; 2510 int do_switch = 1; 2511 2512 if (likely(!ctx)) 2513 return; 2514 2515 cpuctx = __get_cpu_context(ctx); 2516 if (!cpuctx->task_ctx) 2517 return; 2518 2519 rcu_read_lock(); 2520 next_ctx = next->perf_event_ctxp[ctxn]; 2521 if (!next_ctx) 2522 goto unlock; 2523 2524 parent = rcu_dereference(ctx->parent_ctx); 2525 next_parent = rcu_dereference(next_ctx->parent_ctx); 2526 2527 /* If neither context have a parent context; they cannot be clones. */ 2528 if (!parent && !next_parent) 2529 goto unlock; 2530 2531 if (next_parent == ctx || next_ctx == parent || next_parent == parent) { 2532 /* 2533 * Looks like the two contexts are clones, so we might be 2534 * able to optimize the context switch. We lock both 2535 * contexts and check that they are clones under the 2536 * lock (including re-checking that neither has been 2537 * uncloned in the meantime). It doesn't matter which 2538 * order we take the locks because no other cpu could 2539 * be trying to lock both of these tasks. 2540 */ 2541 raw_spin_lock(&ctx->lock); 2542 raw_spin_lock_nested(&next_ctx->lock, SINGLE_DEPTH_NESTING); 2543 if (context_equiv(ctx, next_ctx)) { 2544 /* 2545 * XXX do we need a memory barrier of sorts 2546 * wrt to rcu_dereference() of perf_event_ctxp 2547 */ 2548 task->perf_event_ctxp[ctxn] = next_ctx; 2549 next->perf_event_ctxp[ctxn] = ctx; 2550 ctx->task = next; 2551 next_ctx->task = task; 2552 2553 swap(ctx->task_ctx_data, next_ctx->task_ctx_data); 2554 2555 do_switch = 0; 2556 2557 perf_event_sync_stat(ctx, next_ctx); 2558 } 2559 raw_spin_unlock(&next_ctx->lock); 2560 raw_spin_unlock(&ctx->lock); 2561 } 2562 unlock: 2563 rcu_read_unlock(); 2564 2565 if (do_switch) { 2566 raw_spin_lock(&ctx->lock); 2567 ctx_sched_out(ctx, cpuctx, EVENT_ALL); 2568 cpuctx->task_ctx = NULL; 2569 raw_spin_unlock(&ctx->lock); 2570 } 2571 } 2572 2573 void perf_sched_cb_dec(struct pmu *pmu) 2574 { 2575 this_cpu_dec(perf_sched_cb_usages); 2576 } 2577 2578 void perf_sched_cb_inc(struct pmu *pmu) 2579 { 2580 this_cpu_inc(perf_sched_cb_usages); 2581 } 2582 2583 /* 2584 * This function provides the context switch callback to the lower code 2585 * layer. It is invoked ONLY when the context switch callback is enabled. 2586 */ 2587 static void perf_pmu_sched_task(struct task_struct *prev, 2588 struct task_struct *next, 2589 bool sched_in) 2590 { 2591 struct perf_cpu_context *cpuctx; 2592 struct pmu *pmu; 2593 unsigned long flags; 2594 2595 if (prev == next) 2596 return; 2597 2598 local_irq_save(flags); 2599 2600 rcu_read_lock(); 2601 2602 list_for_each_entry_rcu(pmu, &pmus, entry) { 2603 if (pmu->sched_task) { 2604 cpuctx = this_cpu_ptr(pmu->pmu_cpu_context); 2605 2606 perf_ctx_lock(cpuctx, cpuctx->task_ctx); 2607 2608 perf_pmu_disable(pmu); 2609 2610 pmu->sched_task(cpuctx->task_ctx, sched_in); 2611 2612 perf_pmu_enable(pmu); 2613 2614 perf_ctx_unlock(cpuctx, cpuctx->task_ctx); 2615 } 2616 } 2617 2618 rcu_read_unlock(); 2619 2620 local_irq_restore(flags); 2621 } 2622 2623 static void perf_event_switch(struct task_struct *task, 2624 struct task_struct *next_prev, bool sched_in); 2625 2626 #define for_each_task_context_nr(ctxn) \ 2627 for ((ctxn) = 0; (ctxn) < perf_nr_task_contexts; (ctxn)++) 2628 2629 /* 2630 * Called from scheduler to remove the events of the current task, 2631 * with interrupts disabled. 2632 * 2633 * We stop each event and update the event value in event->count. 2634 * 2635 * This does not protect us against NMI, but disable() 2636 * sets the disabled bit in the control field of event _before_ 2637 * accessing the event control register. If a NMI hits, then it will 2638 * not restart the event. 2639 */ 2640 void __perf_event_task_sched_out(struct task_struct *task, 2641 struct task_struct *next) 2642 { 2643 int ctxn; 2644 2645 if (__this_cpu_read(perf_sched_cb_usages)) 2646 perf_pmu_sched_task(task, next, false); 2647 2648 if (atomic_read(&nr_switch_events)) 2649 perf_event_switch(task, next, false); 2650 2651 for_each_task_context_nr(ctxn) 2652 perf_event_context_sched_out(task, ctxn, next); 2653 2654 /* 2655 * if cgroup events exist on this CPU, then we need 2656 * to check if we have to switch out PMU state. 2657 * cgroup event are system-wide mode only 2658 */ 2659 if (atomic_read(this_cpu_ptr(&perf_cgroup_events))) 2660 perf_cgroup_sched_out(task, next); 2661 } 2662 2663 static void task_ctx_sched_out(struct perf_event_context *ctx) 2664 { 2665 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx); 2666 2667 if (!cpuctx->task_ctx) 2668 return; 2669 2670 if (WARN_ON_ONCE(ctx != cpuctx->task_ctx)) 2671 return; 2672 2673 ctx_sched_out(ctx, cpuctx, EVENT_ALL); 2674 cpuctx->task_ctx = NULL; 2675 } 2676 2677 /* 2678 * Called with IRQs disabled 2679 */ 2680 static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx, 2681 enum event_type_t event_type) 2682 { 2683 ctx_sched_out(&cpuctx->ctx, cpuctx, event_type); 2684 } 2685 2686 static void 2687 ctx_pinned_sched_in(struct perf_event_context *ctx, 2688 struct perf_cpu_context *cpuctx) 2689 { 2690 struct perf_event *event; 2691 2692 list_for_each_entry(event, &ctx->pinned_groups, group_entry) { 2693 if (event->state <= PERF_EVENT_STATE_OFF) 2694 continue; 2695 if (!event_filter_match(event)) 2696 continue; 2697 2698 /* may need to reset tstamp_enabled */ 2699 if (is_cgroup_event(event)) 2700 perf_cgroup_mark_enabled(event, ctx); 2701 2702 if (group_can_go_on(event, cpuctx, 1)) 2703 group_sched_in(event, cpuctx, ctx); 2704 2705 /* 2706 * If this pinned group hasn't been scheduled, 2707 * put it in error state. 2708 */ 2709 if (event->state == PERF_EVENT_STATE_INACTIVE) { 2710 update_group_times(event); 2711 event->state = PERF_EVENT_STATE_ERROR; 2712 } 2713 } 2714 } 2715 2716 static void 2717 ctx_flexible_sched_in(struct perf_event_context *ctx, 2718 struct perf_cpu_context *cpuctx) 2719 { 2720 struct perf_event *event; 2721 int can_add_hw = 1; 2722 2723 list_for_each_entry(event, &ctx->flexible_groups, group_entry) { 2724 /* Ignore events in OFF or ERROR state */ 2725 if (event->state <= PERF_EVENT_STATE_OFF) 2726 continue; 2727 /* 2728 * Listen to the 'cpu' scheduling filter constraint 2729 * of events: 2730 */ 2731 if (!event_filter_match(event)) 2732 continue; 2733 2734 /* may need to reset tstamp_enabled */ 2735 if (is_cgroup_event(event)) 2736 perf_cgroup_mark_enabled(event, ctx); 2737 2738 if (group_can_go_on(event, cpuctx, can_add_hw)) { 2739 if (group_sched_in(event, cpuctx, ctx)) 2740 can_add_hw = 0; 2741 } 2742 } 2743 } 2744 2745 static void 2746 ctx_sched_in(struct perf_event_context *ctx, 2747 struct perf_cpu_context *cpuctx, 2748 enum event_type_t event_type, 2749 struct task_struct *task) 2750 { 2751 u64 now; 2752 int is_active = ctx->is_active; 2753 2754 ctx->is_active |= event_type; 2755 if (likely(!ctx->nr_events)) 2756 return; 2757 2758 now = perf_clock(); 2759 ctx->timestamp = now; 2760 perf_cgroup_set_timestamp(task, ctx); 2761 /* 2762 * First go through the list and put on any pinned groups 2763 * in order to give them the best chance of going on. 2764 */ 2765 if (!(is_active & EVENT_PINNED) && (event_type & EVENT_PINNED)) 2766 ctx_pinned_sched_in(ctx, cpuctx); 2767 2768 /* Then walk through the lower prio flexible groups */ 2769 if (!(is_active & EVENT_FLEXIBLE) && (event_type & EVENT_FLEXIBLE)) 2770 ctx_flexible_sched_in(ctx, cpuctx); 2771 } 2772 2773 static void cpu_ctx_sched_in(struct perf_cpu_context *cpuctx, 2774 enum event_type_t event_type, 2775 struct task_struct *task) 2776 { 2777 struct perf_event_context *ctx = &cpuctx->ctx; 2778 2779 ctx_sched_in(ctx, cpuctx, event_type, task); 2780 } 2781 2782 static void perf_event_context_sched_in(struct perf_event_context *ctx, 2783 struct task_struct *task) 2784 { 2785 struct perf_cpu_context *cpuctx; 2786 2787 cpuctx = __get_cpu_context(ctx); 2788 if (cpuctx->task_ctx == ctx) 2789 return; 2790 2791 perf_ctx_lock(cpuctx, ctx); 2792 perf_pmu_disable(ctx->pmu); 2793 /* 2794 * We want to keep the following priority order: 2795 * cpu pinned (that don't need to move), task pinned, 2796 * cpu flexible, task flexible. 2797 */ 2798 cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE); 2799 2800 if (ctx->nr_events) 2801 cpuctx->task_ctx = ctx; 2802 2803 perf_event_sched_in(cpuctx, cpuctx->task_ctx, task); 2804 2805 perf_pmu_enable(ctx->pmu); 2806 perf_ctx_unlock(cpuctx, ctx); 2807 } 2808 2809 /* 2810 * Called from scheduler to add the events of the current task 2811 * with interrupts disabled. 2812 * 2813 * We restore the event value and then enable it. 2814 * 2815 * This does not protect us against NMI, but enable() 2816 * sets the enabled bit in the control field of event _before_ 2817 * accessing the event control register. If a NMI hits, then it will 2818 * keep the event running. 2819 */ 2820 void __perf_event_task_sched_in(struct task_struct *prev, 2821 struct task_struct *task) 2822 { 2823 struct perf_event_context *ctx; 2824 int ctxn; 2825 2826 for_each_task_context_nr(ctxn) { 2827 ctx = task->perf_event_ctxp[ctxn]; 2828 if (likely(!ctx)) 2829 continue; 2830 2831 perf_event_context_sched_in(ctx, task); 2832 } 2833 /* 2834 * if cgroup events exist on this CPU, then we need 2835 * to check if we have to switch in PMU state. 2836 * cgroup event are system-wide mode only 2837 */ 2838 if (atomic_read(this_cpu_ptr(&perf_cgroup_events))) 2839 perf_cgroup_sched_in(prev, task); 2840 2841 if (atomic_read(&nr_switch_events)) 2842 perf_event_switch(task, prev, true); 2843 2844 if (__this_cpu_read(perf_sched_cb_usages)) 2845 perf_pmu_sched_task(prev, task, true); 2846 } 2847 2848 static u64 perf_calculate_period(struct perf_event *event, u64 nsec, u64 count) 2849 { 2850 u64 frequency = event->attr.sample_freq; 2851 u64 sec = NSEC_PER_SEC; 2852 u64 divisor, dividend; 2853 2854 int count_fls, nsec_fls, frequency_fls, sec_fls; 2855 2856 count_fls = fls64(count); 2857 nsec_fls = fls64(nsec); 2858 frequency_fls = fls64(frequency); 2859 sec_fls = 30; 2860 2861 /* 2862 * We got @count in @nsec, with a target of sample_freq HZ 2863 * the target period becomes: 2864 * 2865 * @count * 10^9 2866 * period = ------------------- 2867 * @nsec * sample_freq 2868 * 2869 */ 2870 2871 /* 2872 * Reduce accuracy by one bit such that @a and @b converge 2873 * to a similar magnitude. 2874 */ 2875 #define REDUCE_FLS(a, b) \ 2876 do { \ 2877 if (a##_fls > b##_fls) { \ 2878 a >>= 1; \ 2879 a##_fls--; \ 2880 } else { \ 2881 b >>= 1; \ 2882 b##_fls--; \ 2883 } \ 2884 } while (0) 2885 2886 /* 2887 * Reduce accuracy until either term fits in a u64, then proceed with 2888 * the other, so that finally we can do a u64/u64 division. 2889 */ 2890 while (count_fls + sec_fls > 64 && nsec_fls + frequency_fls > 64) { 2891 REDUCE_FLS(nsec, frequency); 2892 REDUCE_FLS(sec, count); 2893 } 2894 2895 if (count_fls + sec_fls > 64) { 2896 divisor = nsec * frequency; 2897 2898 while (count_fls + sec_fls > 64) { 2899 REDUCE_FLS(count, sec); 2900 divisor >>= 1; 2901 } 2902 2903 dividend = count * sec; 2904 } else { 2905 dividend = count * sec; 2906 2907 while (nsec_fls + frequency_fls > 64) { 2908 REDUCE_FLS(nsec, frequency); 2909 dividend >>= 1; 2910 } 2911 2912 divisor = nsec * frequency; 2913 } 2914 2915 if (!divisor) 2916 return dividend; 2917 2918 return div64_u64(dividend, divisor); 2919 } 2920 2921 static DEFINE_PER_CPU(int, perf_throttled_count); 2922 static DEFINE_PER_CPU(u64, perf_throttled_seq); 2923 2924 static void perf_adjust_period(struct perf_event *event, u64 nsec, u64 count, bool disable) 2925 { 2926 struct hw_perf_event *hwc = &event->hw; 2927 s64 period, sample_period; 2928 s64 delta; 2929 2930 period = perf_calculate_period(event, nsec, count); 2931 2932 delta = (s64)(period - hwc->sample_period); 2933 delta = (delta + 7) / 8; /* low pass filter */ 2934 2935 sample_period = hwc->sample_period + delta; 2936 2937 if (!sample_period) 2938 sample_period = 1; 2939 2940 hwc->sample_period = sample_period; 2941 2942 if (local64_read(&hwc->period_left) > 8*sample_period) { 2943 if (disable) 2944 event->pmu->stop(event, PERF_EF_UPDATE); 2945 2946 local64_set(&hwc->period_left, 0); 2947 2948 if (disable) 2949 event->pmu->start(event, PERF_EF_RELOAD); 2950 } 2951 } 2952 2953 /* 2954 * combine freq adjustment with unthrottling to avoid two passes over the 2955 * events. At the same time, make sure, having freq events does not change 2956 * the rate of unthrottling as that would introduce bias. 2957 */ 2958 static void perf_adjust_freq_unthr_context(struct perf_event_context *ctx, 2959 int needs_unthr) 2960 { 2961 struct perf_event *event; 2962 struct hw_perf_event *hwc; 2963 u64 now, period = TICK_NSEC; 2964 s64 delta; 2965 2966 /* 2967 * only need to iterate over all events iff: 2968 * - context have events in frequency mode (needs freq adjust) 2969 * - there are events to unthrottle on this cpu 2970 */ 2971 if (!(ctx->nr_freq || needs_unthr)) 2972 return; 2973 2974 raw_spin_lock(&ctx->lock); 2975 perf_pmu_disable(ctx->pmu); 2976 2977 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) { 2978 if (event->state != PERF_EVENT_STATE_ACTIVE) 2979 continue; 2980 2981 if (!event_filter_match(event)) 2982 continue; 2983 2984 perf_pmu_disable(event->pmu); 2985 2986 hwc = &event->hw; 2987 2988 if (hwc->interrupts == MAX_INTERRUPTS) { 2989 hwc->interrupts = 0; 2990 perf_log_throttle(event, 1); 2991 event->pmu->start(event, 0); 2992 } 2993 2994 if (!event->attr.freq || !event->attr.sample_freq) 2995 goto next; 2996 2997 /* 2998 * stop the event and update event->count 2999 */ 3000 event->pmu->stop(event, PERF_EF_UPDATE); 3001 3002 now = local64_read(&event->count); 3003 delta = now - hwc->freq_count_stamp; 3004 hwc->freq_count_stamp = now; 3005 3006 /* 3007 * restart the event 3008 * reload only if value has changed 3009 * we have stopped the event so tell that 3010 * to perf_adjust_period() to avoid stopping it 3011 * twice. 3012 */ 3013 if (delta > 0) 3014 perf_adjust_period(event, period, delta, false); 3015 3016 event->pmu->start(event, delta > 0 ? PERF_EF_RELOAD : 0); 3017 next: 3018 perf_pmu_enable(event->pmu); 3019 } 3020 3021 perf_pmu_enable(ctx->pmu); 3022 raw_spin_unlock(&ctx->lock); 3023 } 3024 3025 /* 3026 * Round-robin a context's events: 3027 */ 3028 static void rotate_ctx(struct perf_event_context *ctx) 3029 { 3030 /* 3031 * Rotate the first entry last of non-pinned groups. Rotation might be 3032 * disabled by the inheritance code. 3033 */ 3034 if (!ctx->rotate_disable) 3035 list_rotate_left(&ctx->flexible_groups); 3036 } 3037 3038 static int perf_rotate_context(struct perf_cpu_context *cpuctx) 3039 { 3040 struct perf_event_context *ctx = NULL; 3041 int rotate = 0; 3042 3043 if (cpuctx->ctx.nr_events) { 3044 if (cpuctx->ctx.nr_events != cpuctx->ctx.nr_active) 3045 rotate = 1; 3046 } 3047 3048 ctx = cpuctx->task_ctx; 3049 if (ctx && ctx->nr_events) { 3050 if (ctx->nr_events != ctx->nr_active) 3051 rotate = 1; 3052 } 3053 3054 if (!rotate) 3055 goto done; 3056 3057 perf_ctx_lock(cpuctx, cpuctx->task_ctx); 3058 perf_pmu_disable(cpuctx->ctx.pmu); 3059 3060 cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE); 3061 if (ctx) 3062 ctx_sched_out(ctx, cpuctx, EVENT_FLEXIBLE); 3063 3064 rotate_ctx(&cpuctx->ctx); 3065 if (ctx) 3066 rotate_ctx(ctx); 3067 3068 perf_event_sched_in(cpuctx, ctx, current); 3069 3070 perf_pmu_enable(cpuctx->ctx.pmu); 3071 perf_ctx_unlock(cpuctx, cpuctx->task_ctx); 3072 done: 3073 3074 return rotate; 3075 } 3076 3077 #ifdef CONFIG_NO_HZ_FULL 3078 bool perf_event_can_stop_tick(void) 3079 { 3080 if (atomic_read(&nr_freq_events) || 3081 __this_cpu_read(perf_throttled_count)) 3082 return false; 3083 else 3084 return true; 3085 } 3086 #endif 3087 3088 void perf_event_task_tick(void) 3089 { 3090 struct list_head *head = this_cpu_ptr(&active_ctx_list); 3091 struct perf_event_context *ctx, *tmp; 3092 int throttled; 3093 3094 WARN_ON(!irqs_disabled()); 3095 3096 __this_cpu_inc(perf_throttled_seq); 3097 throttled = __this_cpu_xchg(perf_throttled_count, 0); 3098 3099 list_for_each_entry_safe(ctx, tmp, head, active_ctx_list) 3100 perf_adjust_freq_unthr_context(ctx, throttled); 3101 } 3102 3103 static int event_enable_on_exec(struct perf_event *event, 3104 struct perf_event_context *ctx) 3105 { 3106 if (!event->attr.enable_on_exec) 3107 return 0; 3108 3109 event->attr.enable_on_exec = 0; 3110 if (event->state >= PERF_EVENT_STATE_INACTIVE) 3111 return 0; 3112 3113 __perf_event_mark_enabled(event); 3114 3115 return 1; 3116 } 3117 3118 /* 3119 * Enable all of a task's events that have been marked enable-on-exec. 3120 * This expects task == current. 3121 */ 3122 static void perf_event_enable_on_exec(struct perf_event_context *ctx) 3123 { 3124 struct perf_event_context *clone_ctx = NULL; 3125 struct perf_event *event; 3126 unsigned long flags; 3127 int enabled = 0; 3128 int ret; 3129 3130 local_irq_save(flags); 3131 if (!ctx || !ctx->nr_events) 3132 goto out; 3133 3134 /* 3135 * We must ctxsw out cgroup events to avoid conflict 3136 * when invoking perf_task_event_sched_in() later on 3137 * in this function. Otherwise we end up trying to 3138 * ctxswin cgroup events which are already scheduled 3139 * in. 3140 */ 3141 perf_cgroup_sched_out(current, NULL); 3142 3143 raw_spin_lock(&ctx->lock); 3144 task_ctx_sched_out(ctx); 3145 3146 list_for_each_entry(event, &ctx->event_list, event_entry) { 3147 ret = event_enable_on_exec(event, ctx); 3148 if (ret) 3149 enabled = 1; 3150 } 3151 3152 /* 3153 * Unclone this context if we enabled any event. 3154 */ 3155 if (enabled) 3156 clone_ctx = unclone_ctx(ctx); 3157 3158 raw_spin_unlock(&ctx->lock); 3159 3160 /* 3161 * Also calls ctxswin for cgroup events, if any: 3162 */ 3163 perf_event_context_sched_in(ctx, ctx->task); 3164 out: 3165 local_irq_restore(flags); 3166 3167 if (clone_ctx) 3168 put_ctx(clone_ctx); 3169 } 3170 3171 void perf_event_exec(void) 3172 { 3173 struct perf_event_context *ctx; 3174 int ctxn; 3175 3176 rcu_read_lock(); 3177 for_each_task_context_nr(ctxn) { 3178 ctx = current->perf_event_ctxp[ctxn]; 3179 if (!ctx) 3180 continue; 3181 3182 perf_event_enable_on_exec(ctx); 3183 } 3184 rcu_read_unlock(); 3185 } 3186 3187 /* 3188 * Cross CPU call to read the hardware event 3189 */ 3190 static void __perf_event_read(void *info) 3191 { 3192 struct perf_event *event = info; 3193 struct perf_event_context *ctx = event->ctx; 3194 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx); 3195 3196 /* 3197 * If this is a task context, we need to check whether it is 3198 * the current task context of this cpu. If not it has been 3199 * scheduled out before the smp call arrived. In that case 3200 * event->count would have been updated to a recent sample 3201 * when the event was scheduled out. 3202 */ 3203 if (ctx->task && cpuctx->task_ctx != ctx) 3204 return; 3205 3206 raw_spin_lock(&ctx->lock); 3207 if (ctx->is_active) { 3208 update_context_time(ctx); 3209 update_cgrp_time_from_event(event); 3210 } 3211 update_event_times(event); 3212 if (event->state == PERF_EVENT_STATE_ACTIVE) 3213 event->pmu->read(event); 3214 raw_spin_unlock(&ctx->lock); 3215 } 3216 3217 static inline u64 perf_event_count(struct perf_event *event) 3218 { 3219 if (event->pmu->count) 3220 return event->pmu->count(event); 3221 3222 return __perf_event_count(event); 3223 } 3224 3225 /* 3226 * NMI-safe method to read a local event, that is an event that 3227 * is: 3228 * - either for the current task, or for this CPU 3229 * - does not have inherit set, for inherited task events 3230 * will not be local and we cannot read them atomically 3231 * - must not have a pmu::count method 3232 */ 3233 u64 perf_event_read_local(struct perf_event *event) 3234 { 3235 unsigned long flags; 3236 u64 val; 3237 3238 /* 3239 * Disabling interrupts avoids all counter scheduling (context 3240 * switches, timer based rotation and IPIs). 3241 */ 3242 local_irq_save(flags); 3243 3244 /* If this is a per-task event, it must be for current */ 3245 WARN_ON_ONCE((event->attach_state & PERF_ATTACH_TASK) && 3246 event->hw.target != current); 3247 3248 /* If this is a per-CPU event, it must be for this CPU */ 3249 WARN_ON_ONCE(!(event->attach_state & PERF_ATTACH_TASK) && 3250 event->cpu != smp_processor_id()); 3251 3252 /* 3253 * It must not be an event with inherit set, we cannot read 3254 * all child counters from atomic context. 3255 */ 3256 WARN_ON_ONCE(event->attr.inherit); 3257 3258 /* 3259 * It must not have a pmu::count method, those are not 3260 * NMI safe. 3261 */ 3262 WARN_ON_ONCE(event->pmu->count); 3263 3264 /* 3265 * If the event is currently on this CPU, its either a per-task event, 3266 * or local to this CPU. Furthermore it means its ACTIVE (otherwise 3267 * oncpu == -1). 3268 */ 3269 if (event->oncpu == smp_processor_id()) 3270 event->pmu->read(event); 3271 3272 val = local64_read(&event->count); 3273 local_irq_restore(flags); 3274 3275 return val; 3276 } 3277 3278 static u64 perf_event_read(struct perf_event *event) 3279 { 3280 /* 3281 * If event is enabled and currently active on a CPU, update the 3282 * value in the event structure: 3283 */ 3284 if (event->state == PERF_EVENT_STATE_ACTIVE) { 3285 smp_call_function_single(event->oncpu, 3286 __perf_event_read, event, 1); 3287 } else if (event->state == PERF_EVENT_STATE_INACTIVE) { 3288 struct perf_event_context *ctx = event->ctx; 3289 unsigned long flags; 3290 3291 raw_spin_lock_irqsave(&ctx->lock, flags); 3292 /* 3293 * may read while context is not active 3294 * (e.g., thread is blocked), in that case 3295 * we cannot update context time 3296 */ 3297 if (ctx->is_active) { 3298 update_context_time(ctx); 3299 update_cgrp_time_from_event(event); 3300 } 3301 update_event_times(event); 3302 raw_spin_unlock_irqrestore(&ctx->lock, flags); 3303 } 3304 3305 return perf_event_count(event); 3306 } 3307 3308 /* 3309 * Initialize the perf_event context in a task_struct: 3310 */ 3311 static void __perf_event_init_context(struct perf_event_context *ctx) 3312 { 3313 raw_spin_lock_init(&ctx->lock); 3314 mutex_init(&ctx->mutex); 3315 INIT_LIST_HEAD(&ctx->active_ctx_list); 3316 INIT_LIST_HEAD(&ctx->pinned_groups); 3317 INIT_LIST_HEAD(&ctx->flexible_groups); 3318 INIT_LIST_HEAD(&ctx->event_list); 3319 atomic_set(&ctx->refcount, 1); 3320 INIT_DELAYED_WORK(&ctx->orphans_remove, orphans_remove_work); 3321 } 3322 3323 static struct perf_event_context * 3324 alloc_perf_context(struct pmu *pmu, struct task_struct *task) 3325 { 3326 struct perf_event_context *ctx; 3327 3328 ctx = kzalloc(sizeof(struct perf_event_context), GFP_KERNEL); 3329 if (!ctx) 3330 return NULL; 3331 3332 __perf_event_init_context(ctx); 3333 if (task) { 3334 ctx->task = task; 3335 get_task_struct(task); 3336 } 3337 ctx->pmu = pmu; 3338 3339 return ctx; 3340 } 3341 3342 static struct task_struct * 3343 find_lively_task_by_vpid(pid_t vpid) 3344 { 3345 struct task_struct *task; 3346 int err; 3347 3348 rcu_read_lock(); 3349 if (!vpid) 3350 task = current; 3351 else 3352 task = find_task_by_vpid(vpid); 3353 if (task) 3354 get_task_struct(task); 3355 rcu_read_unlock(); 3356 3357 if (!task) 3358 return ERR_PTR(-ESRCH); 3359 3360 /* Reuse ptrace permission checks for now. */ 3361 err = -EACCES; 3362 if (!ptrace_may_access(task, PTRACE_MODE_READ)) 3363 goto errout; 3364 3365 return task; 3366 errout: 3367 put_task_struct(task); 3368 return ERR_PTR(err); 3369 3370 } 3371 3372 /* 3373 * Returns a matching context with refcount and pincount. 3374 */ 3375 static struct perf_event_context * 3376 find_get_context(struct pmu *pmu, struct task_struct *task, 3377 struct perf_event *event) 3378 { 3379 struct perf_event_context *ctx, *clone_ctx = NULL; 3380 struct perf_cpu_context *cpuctx; 3381 void *task_ctx_data = NULL; 3382 unsigned long flags; 3383 int ctxn, err; 3384 int cpu = event->cpu; 3385 3386 if (!task) { 3387 /* Must be root to operate on a CPU event: */ 3388 if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN)) 3389 return ERR_PTR(-EACCES); 3390 3391 /* 3392 * We could be clever and allow to attach a event to an 3393 * offline CPU and activate it when the CPU comes up, but 3394 * that's for later. 3395 */ 3396 if (!cpu_online(cpu)) 3397 return ERR_PTR(-ENODEV); 3398 3399 cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu); 3400 ctx = &cpuctx->ctx; 3401 get_ctx(ctx); 3402 ++ctx->pin_count; 3403 3404 return ctx; 3405 } 3406 3407 err = -EINVAL; 3408 ctxn = pmu->task_ctx_nr; 3409 if (ctxn < 0) 3410 goto errout; 3411 3412 if (event->attach_state & PERF_ATTACH_TASK_DATA) { 3413 task_ctx_data = kzalloc(pmu->task_ctx_size, GFP_KERNEL); 3414 if (!task_ctx_data) { 3415 err = -ENOMEM; 3416 goto errout; 3417 } 3418 } 3419 3420 retry: 3421 ctx = perf_lock_task_context(task, ctxn, &flags); 3422 if (ctx) { 3423 clone_ctx = unclone_ctx(ctx); 3424 ++ctx->pin_count; 3425 3426 if (task_ctx_data && !ctx->task_ctx_data) { 3427 ctx->task_ctx_data = task_ctx_data; 3428 task_ctx_data = NULL; 3429 } 3430 raw_spin_unlock_irqrestore(&ctx->lock, flags); 3431 3432 if (clone_ctx) 3433 put_ctx(clone_ctx); 3434 } else { 3435 ctx = alloc_perf_context(pmu, task); 3436 err = -ENOMEM; 3437 if (!ctx) 3438 goto errout; 3439 3440 if (task_ctx_data) { 3441 ctx->task_ctx_data = task_ctx_data; 3442 task_ctx_data = NULL; 3443 } 3444 3445 err = 0; 3446 mutex_lock(&task->perf_event_mutex); 3447 /* 3448 * If it has already passed perf_event_exit_task(). 3449 * we must see PF_EXITING, it takes this mutex too. 3450 */ 3451 if (task->flags & PF_EXITING) 3452 err = -ESRCH; 3453 else if (task->perf_event_ctxp[ctxn]) 3454 err = -EAGAIN; 3455 else { 3456 get_ctx(ctx); 3457 ++ctx->pin_count; 3458 rcu_assign_pointer(task->perf_event_ctxp[ctxn], ctx); 3459 } 3460 mutex_unlock(&task->perf_event_mutex); 3461 3462 if (unlikely(err)) { 3463 put_ctx(ctx); 3464 3465 if (err == -EAGAIN) 3466 goto retry; 3467 goto errout; 3468 } 3469 } 3470 3471 kfree(task_ctx_data); 3472 return ctx; 3473 3474 errout: 3475 kfree(task_ctx_data); 3476 return ERR_PTR(err); 3477 } 3478 3479 static void perf_event_free_filter(struct perf_event *event); 3480 static void perf_event_free_bpf_prog(struct perf_event *event); 3481 3482 static void free_event_rcu(struct rcu_head *head) 3483 { 3484 struct perf_event *event; 3485 3486 event = container_of(head, struct perf_event, rcu_head); 3487 if (event->ns) 3488 put_pid_ns(event->ns); 3489 perf_event_free_filter(event); 3490 kfree(event); 3491 } 3492 3493 static void ring_buffer_attach(struct perf_event *event, 3494 struct ring_buffer *rb); 3495 3496 static void unaccount_event_cpu(struct perf_event *event, int cpu) 3497 { 3498 if (event->parent) 3499 return; 3500 3501 if (is_cgroup_event(event)) 3502 atomic_dec(&per_cpu(perf_cgroup_events, cpu)); 3503 } 3504 3505 static void unaccount_event(struct perf_event *event) 3506 { 3507 if (event->parent) 3508 return; 3509 3510 if (event->attach_state & PERF_ATTACH_TASK) 3511 static_key_slow_dec_deferred(&perf_sched_events); 3512 if (event->attr.mmap || event->attr.mmap_data) 3513 atomic_dec(&nr_mmap_events); 3514 if (event->attr.comm) 3515 atomic_dec(&nr_comm_events); 3516 if (event->attr.task) 3517 atomic_dec(&nr_task_events); 3518 if (event->attr.freq) 3519 atomic_dec(&nr_freq_events); 3520 if (event->attr.context_switch) { 3521 static_key_slow_dec_deferred(&perf_sched_events); 3522 atomic_dec(&nr_switch_events); 3523 } 3524 if (is_cgroup_event(event)) 3525 static_key_slow_dec_deferred(&perf_sched_events); 3526 if (has_branch_stack(event)) 3527 static_key_slow_dec_deferred(&perf_sched_events); 3528 3529 unaccount_event_cpu(event, event->cpu); 3530 } 3531 3532 /* 3533 * The following implement mutual exclusion of events on "exclusive" pmus 3534 * (PERF_PMU_CAP_EXCLUSIVE). Such pmus can only have one event scheduled 3535 * at a time, so we disallow creating events that might conflict, namely: 3536 * 3537 * 1) cpu-wide events in the presence of per-task events, 3538 * 2) per-task events in the presence of cpu-wide events, 3539 * 3) two matching events on the same context. 3540 * 3541 * The former two cases are handled in the allocation path (perf_event_alloc(), 3542 * __free_event()), the latter -- before the first perf_install_in_context(). 3543 */ 3544 static int exclusive_event_init(struct perf_event *event) 3545 { 3546 struct pmu *pmu = event->pmu; 3547 3548 if (!(pmu->capabilities & PERF_PMU_CAP_EXCLUSIVE)) 3549 return 0; 3550 3551 /* 3552 * Prevent co-existence of per-task and cpu-wide events on the 3553 * same exclusive pmu. 3554 * 3555 * Negative pmu::exclusive_cnt means there are cpu-wide 3556 * events on this "exclusive" pmu, positive means there are 3557 * per-task events. 3558 * 3559 * Since this is called in perf_event_alloc() path, event::ctx 3560 * doesn't exist yet; it is, however, safe to use PERF_ATTACH_TASK 3561 * to mean "per-task event", because unlike other attach states it 3562 * never gets cleared. 3563 */ 3564 if (event->attach_state & PERF_ATTACH_TASK) { 3565 if (!atomic_inc_unless_negative(&pmu->exclusive_cnt)) 3566 return -EBUSY; 3567 } else { 3568 if (!atomic_dec_unless_positive(&pmu->exclusive_cnt)) 3569 return -EBUSY; 3570 } 3571 3572 return 0; 3573 } 3574 3575 static void exclusive_event_destroy(struct perf_event *event) 3576 { 3577 struct pmu *pmu = event->pmu; 3578 3579 if (!(pmu->capabilities & PERF_PMU_CAP_EXCLUSIVE)) 3580 return; 3581 3582 /* see comment in exclusive_event_init() */ 3583 if (event->attach_state & PERF_ATTACH_TASK) 3584 atomic_dec(&pmu->exclusive_cnt); 3585 else 3586 atomic_inc(&pmu->exclusive_cnt); 3587 } 3588 3589 static bool exclusive_event_match(struct perf_event *e1, struct perf_event *e2) 3590 { 3591 if ((e1->pmu->capabilities & PERF_PMU_CAP_EXCLUSIVE) && 3592 (e1->cpu == e2->cpu || 3593 e1->cpu == -1 || 3594 e2->cpu == -1)) 3595 return true; 3596 return false; 3597 } 3598 3599 /* Called under the same ctx::mutex as perf_install_in_context() */ 3600 static bool exclusive_event_installable(struct perf_event *event, 3601 struct perf_event_context *ctx) 3602 { 3603 struct perf_event *iter_event; 3604 struct pmu *pmu = event->pmu; 3605 3606 if (!(pmu->capabilities & PERF_PMU_CAP_EXCLUSIVE)) 3607 return true; 3608 3609 list_for_each_entry(iter_event, &ctx->event_list, event_entry) { 3610 if (exclusive_event_match(iter_event, event)) 3611 return false; 3612 } 3613 3614 return true; 3615 } 3616 3617 static void __free_event(struct perf_event *event) 3618 { 3619 if (!event->parent) { 3620 if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN) 3621 put_callchain_buffers(); 3622 } 3623 3624 perf_event_free_bpf_prog(event); 3625 3626 if (event->destroy) 3627 event->destroy(event); 3628 3629 if (event->ctx) 3630 put_ctx(event->ctx); 3631 3632 if (event->pmu) { 3633 exclusive_event_destroy(event); 3634 module_put(event->pmu->module); 3635 } 3636 3637 call_rcu(&event->rcu_head, free_event_rcu); 3638 } 3639 3640 static void _free_event(struct perf_event *event) 3641 { 3642 irq_work_sync(&event->pending); 3643 3644 unaccount_event(event); 3645 3646 if (event->rb) { 3647 /* 3648 * Can happen when we close an event with re-directed output. 3649 * 3650 * Since we have a 0 refcount, perf_mmap_close() will skip 3651 * over us; possibly making our ring_buffer_put() the last. 3652 */ 3653 mutex_lock(&event->mmap_mutex); 3654 ring_buffer_attach(event, NULL); 3655 mutex_unlock(&event->mmap_mutex); 3656 } 3657 3658 if (is_cgroup_event(event)) 3659 perf_detach_cgroup(event); 3660 3661 __free_event(event); 3662 } 3663 3664 /* 3665 * Used to free events which have a known refcount of 1, such as in error paths 3666 * where the event isn't exposed yet and inherited events. 3667 */ 3668 static void free_event(struct perf_event *event) 3669 { 3670 if (WARN(atomic_long_cmpxchg(&event->refcount, 1, 0) != 1, 3671 "unexpected event refcount: %ld; ptr=%p\n", 3672 atomic_long_read(&event->refcount), event)) { 3673 /* leak to avoid use-after-free */ 3674 return; 3675 } 3676 3677 _free_event(event); 3678 } 3679 3680 /* 3681 * Remove user event from the owner task. 3682 */ 3683 static void perf_remove_from_owner(struct perf_event *event) 3684 { 3685 struct task_struct *owner; 3686 3687 rcu_read_lock(); 3688 owner = ACCESS_ONCE(event->owner); 3689 /* 3690 * Matches the smp_wmb() in perf_event_exit_task(). If we observe 3691 * !owner it means the list deletion is complete and we can indeed 3692 * free this event, otherwise we need to serialize on 3693 * owner->perf_event_mutex. 3694 */ 3695 smp_read_barrier_depends(); 3696 if (owner) { 3697 /* 3698 * Since delayed_put_task_struct() also drops the last 3699 * task reference we can safely take a new reference 3700 * while holding the rcu_read_lock(). 3701 */ 3702 get_task_struct(owner); 3703 } 3704 rcu_read_unlock(); 3705 3706 if (owner) { 3707 /* 3708 * If we're here through perf_event_exit_task() we're already 3709 * holding ctx->mutex which would be an inversion wrt. the 3710 * normal lock order. 3711 * 3712 * However we can safely take this lock because its the child 3713 * ctx->mutex. 3714 */ 3715 mutex_lock_nested(&owner->perf_event_mutex, SINGLE_DEPTH_NESTING); 3716 3717 /* 3718 * We have to re-check the event->owner field, if it is cleared 3719 * we raced with perf_event_exit_task(), acquiring the mutex 3720 * ensured they're done, and we can proceed with freeing the 3721 * event. 3722 */ 3723 if (event->owner) 3724 list_del_init(&event->owner_entry); 3725 mutex_unlock(&owner->perf_event_mutex); 3726 put_task_struct(owner); 3727 } 3728 } 3729 3730 static void put_event(struct perf_event *event) 3731 { 3732 struct perf_event_context *ctx; 3733 3734 if (!atomic_long_dec_and_test(&event->refcount)) 3735 return; 3736 3737 if (!is_kernel_event(event)) 3738 perf_remove_from_owner(event); 3739 3740 /* 3741 * There are two ways this annotation is useful: 3742 * 3743 * 1) there is a lock recursion from perf_event_exit_task 3744 * see the comment there. 3745 * 3746 * 2) there is a lock-inversion with mmap_sem through 3747 * perf_event_read_group(), which takes faults while 3748 * holding ctx->mutex, however this is called after 3749 * the last filedesc died, so there is no possibility 3750 * to trigger the AB-BA case. 3751 */ 3752 ctx = perf_event_ctx_lock_nested(event, SINGLE_DEPTH_NESTING); 3753 WARN_ON_ONCE(ctx->parent_ctx); 3754 perf_remove_from_context(event, true); 3755 perf_event_ctx_unlock(event, ctx); 3756 3757 _free_event(event); 3758 } 3759 3760 int perf_event_release_kernel(struct perf_event *event) 3761 { 3762 put_event(event); 3763 return 0; 3764 } 3765 EXPORT_SYMBOL_GPL(perf_event_release_kernel); 3766 3767 /* 3768 * Called when the last reference to the file is gone. 3769 */ 3770 static int perf_release(struct inode *inode, struct file *file) 3771 { 3772 put_event(file->private_data); 3773 return 0; 3774 } 3775 3776 /* 3777 * Remove all orphanes events from the context. 3778 */ 3779 static void orphans_remove_work(struct work_struct *work) 3780 { 3781 struct perf_event_context *ctx; 3782 struct perf_event *event, *tmp; 3783 3784 ctx = container_of(work, struct perf_event_context, 3785 orphans_remove.work); 3786 3787 mutex_lock(&ctx->mutex); 3788 list_for_each_entry_safe(event, tmp, &ctx->event_list, event_entry) { 3789 struct perf_event *parent_event = event->parent; 3790 3791 if (!is_orphaned_child(event)) 3792 continue; 3793 3794 perf_remove_from_context(event, true); 3795 3796 mutex_lock(&parent_event->child_mutex); 3797 list_del_init(&event->child_list); 3798 mutex_unlock(&parent_event->child_mutex); 3799 3800 free_event(event); 3801 put_event(parent_event); 3802 } 3803 3804 raw_spin_lock_irq(&ctx->lock); 3805 ctx->orphans_remove_sched = false; 3806 raw_spin_unlock_irq(&ctx->lock); 3807 mutex_unlock(&ctx->mutex); 3808 3809 put_ctx(ctx); 3810 } 3811 3812 u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running) 3813 { 3814 struct perf_event *child; 3815 u64 total = 0; 3816 3817 *enabled = 0; 3818 *running = 0; 3819 3820 mutex_lock(&event->child_mutex); 3821 total += perf_event_read(event); 3822 *enabled += event->total_time_enabled + 3823 atomic64_read(&event->child_total_time_enabled); 3824 *running += event->total_time_running + 3825 atomic64_read(&event->child_total_time_running); 3826 3827 list_for_each_entry(child, &event->child_list, child_list) { 3828 total += perf_event_read(child); 3829 *enabled += child->total_time_enabled; 3830 *running += child->total_time_running; 3831 } 3832 mutex_unlock(&event->child_mutex); 3833 3834 return total; 3835 } 3836 EXPORT_SYMBOL_GPL(perf_event_read_value); 3837 3838 static int perf_event_read_group(struct perf_event *event, 3839 u64 read_format, char __user *buf) 3840 { 3841 struct perf_event *leader = event->group_leader, *sub; 3842 struct perf_event_context *ctx = leader->ctx; 3843 int n = 0, size = 0, ret; 3844 u64 count, enabled, running; 3845 u64 values[5]; 3846 3847 lockdep_assert_held(&ctx->mutex); 3848 3849 count = perf_event_read_value(leader, &enabled, &running); 3850 3851 values[n++] = 1 + leader->nr_siblings; 3852 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) 3853 values[n++] = enabled; 3854 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) 3855 values[n++] = running; 3856 values[n++] = count; 3857 if (read_format & PERF_FORMAT_ID) 3858 values[n++] = primary_event_id(leader); 3859 3860 size = n * sizeof(u64); 3861 3862 if (copy_to_user(buf, values, size)) 3863 return -EFAULT; 3864 3865 ret = size; 3866 3867 list_for_each_entry(sub, &leader->sibling_list, group_entry) { 3868 n = 0; 3869 3870 values[n++] = perf_event_read_value(sub, &enabled, &running); 3871 if (read_format & PERF_FORMAT_ID) 3872 values[n++] = primary_event_id(sub); 3873 3874 size = n * sizeof(u64); 3875 3876 if (copy_to_user(buf + ret, values, size)) { 3877 return -EFAULT; 3878 } 3879 3880 ret += size; 3881 } 3882 3883 return ret; 3884 } 3885 3886 static int perf_event_read_one(struct perf_event *event, 3887 u64 read_format, char __user *buf) 3888 { 3889 u64 enabled, running; 3890 u64 values[4]; 3891 int n = 0; 3892 3893 values[n++] = perf_event_read_value(event, &enabled, &running); 3894 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) 3895 values[n++] = enabled; 3896 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) 3897 values[n++] = running; 3898 if (read_format & PERF_FORMAT_ID) 3899 values[n++] = primary_event_id(event); 3900 3901 if (copy_to_user(buf, values, n * sizeof(u64))) 3902 return -EFAULT; 3903 3904 return n * sizeof(u64); 3905 } 3906 3907 static bool is_event_hup(struct perf_event *event) 3908 { 3909 bool no_children; 3910 3911 if (event->state != PERF_EVENT_STATE_EXIT) 3912 return false; 3913 3914 mutex_lock(&event->child_mutex); 3915 no_children = list_empty(&event->child_list); 3916 mutex_unlock(&event->child_mutex); 3917 return no_children; 3918 } 3919 3920 /* 3921 * Read the performance event - simple non blocking version for now 3922 */ 3923 static ssize_t 3924 perf_read_hw(struct perf_event *event, char __user *buf, size_t count) 3925 { 3926 u64 read_format = event->attr.read_format; 3927 int ret; 3928 3929 /* 3930 * Return end-of-file for a read on a event that is in 3931 * error state (i.e. because it was pinned but it couldn't be 3932 * scheduled on to the CPU at some point). 3933 */ 3934 if (event->state == PERF_EVENT_STATE_ERROR) 3935 return 0; 3936 3937 if (count < event->read_size) 3938 return -ENOSPC; 3939 3940 WARN_ON_ONCE(event->ctx->parent_ctx); 3941 if (read_format & PERF_FORMAT_GROUP) 3942 ret = perf_event_read_group(event, read_format, buf); 3943 else 3944 ret = perf_event_read_one(event, read_format, buf); 3945 3946 return ret; 3947 } 3948 3949 static ssize_t 3950 perf_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) 3951 { 3952 struct perf_event *event = file->private_data; 3953 struct perf_event_context *ctx; 3954 int ret; 3955 3956 ctx = perf_event_ctx_lock(event); 3957 ret = perf_read_hw(event, buf, count); 3958 perf_event_ctx_unlock(event, ctx); 3959 3960 return ret; 3961 } 3962 3963 static unsigned int perf_poll(struct file *file, poll_table *wait) 3964 { 3965 struct perf_event *event = file->private_data; 3966 struct ring_buffer *rb; 3967 unsigned int events = POLLHUP; 3968 3969 poll_wait(file, &event->waitq, wait); 3970 3971 if (is_event_hup(event)) 3972 return events; 3973 3974 /* 3975 * Pin the event->rb by taking event->mmap_mutex; otherwise 3976 * perf_event_set_output() can swizzle our rb and make us miss wakeups. 3977 */ 3978 mutex_lock(&event->mmap_mutex); 3979 rb = event->rb; 3980 if (rb) 3981 events = atomic_xchg(&rb->poll, 0); 3982 mutex_unlock(&event->mmap_mutex); 3983 return events; 3984 } 3985 3986 static void _perf_event_reset(struct perf_event *event) 3987 { 3988 (void)perf_event_read(event); 3989 local64_set(&event->count, 0); 3990 perf_event_update_userpage(event); 3991 } 3992 3993 /* 3994 * Holding the top-level event's child_mutex means that any 3995 * descendant process that has inherited this event will block 3996 * in sync_child_event if it goes to exit, thus satisfying the 3997 * task existence requirements of perf_event_enable/disable. 3998 */ 3999 static void perf_event_for_each_child(struct perf_event *event, 4000 void (*func)(struct perf_event *)) 4001 { 4002 struct perf_event *child; 4003 4004 WARN_ON_ONCE(event->ctx->parent_ctx); 4005 4006 mutex_lock(&event->child_mutex); 4007 func(event); 4008 list_for_each_entry(child, &event->child_list, child_list) 4009 func(child); 4010 mutex_unlock(&event->child_mutex); 4011 } 4012 4013 static void perf_event_for_each(struct perf_event *event, 4014 void (*func)(struct perf_event *)) 4015 { 4016 struct perf_event_context *ctx = event->ctx; 4017 struct perf_event *sibling; 4018 4019 lockdep_assert_held(&ctx->mutex); 4020 4021 event = event->group_leader; 4022 4023 perf_event_for_each_child(event, func); 4024 list_for_each_entry(sibling, &event->sibling_list, group_entry) 4025 perf_event_for_each_child(sibling, func); 4026 } 4027 4028 struct period_event { 4029 struct perf_event *event; 4030 u64 value; 4031 }; 4032 4033 static int __perf_event_period(void *info) 4034 { 4035 struct period_event *pe = info; 4036 struct perf_event *event = pe->event; 4037 struct perf_event_context *ctx = event->ctx; 4038 u64 value = pe->value; 4039 bool active; 4040 4041 raw_spin_lock(&ctx->lock); 4042 if (event->attr.freq) { 4043 event->attr.sample_freq = value; 4044 } else { 4045 event->attr.sample_period = value; 4046 event->hw.sample_period = value; 4047 } 4048 4049 active = (event->state == PERF_EVENT_STATE_ACTIVE); 4050 if (active) { 4051 perf_pmu_disable(ctx->pmu); 4052 event->pmu->stop(event, PERF_EF_UPDATE); 4053 } 4054 4055 local64_set(&event->hw.period_left, 0); 4056 4057 if (active) { 4058 event->pmu->start(event, PERF_EF_RELOAD); 4059 perf_pmu_enable(ctx->pmu); 4060 } 4061 raw_spin_unlock(&ctx->lock); 4062 4063 return 0; 4064 } 4065 4066 static int perf_event_period(struct perf_event *event, u64 __user *arg) 4067 { 4068 struct period_event pe = { .event = event, }; 4069 struct perf_event_context *ctx = event->ctx; 4070 struct task_struct *task; 4071 u64 value; 4072 4073 if (!is_sampling_event(event)) 4074 return -EINVAL; 4075 4076 if (copy_from_user(&value, arg, sizeof(value))) 4077 return -EFAULT; 4078 4079 if (!value) 4080 return -EINVAL; 4081 4082 if (event->attr.freq && value > sysctl_perf_event_sample_rate) 4083 return -EINVAL; 4084 4085 task = ctx->task; 4086 pe.value = value; 4087 4088 if (!task) { 4089 cpu_function_call(event->cpu, __perf_event_period, &pe); 4090 return 0; 4091 } 4092 4093 retry: 4094 if (!task_function_call(task, __perf_event_period, &pe)) 4095 return 0; 4096 4097 raw_spin_lock_irq(&ctx->lock); 4098 if (ctx->is_active) { 4099 raw_spin_unlock_irq(&ctx->lock); 4100 task = ctx->task; 4101 goto retry; 4102 } 4103 4104 __perf_event_period(&pe); 4105 raw_spin_unlock_irq(&ctx->lock); 4106 4107 return 0; 4108 } 4109 4110 static const struct file_operations perf_fops; 4111 4112 static inline int perf_fget_light(int fd, struct fd *p) 4113 { 4114 struct fd f = fdget(fd); 4115 if (!f.file) 4116 return -EBADF; 4117 4118 if (f.file->f_op != &perf_fops) { 4119 fdput(f); 4120 return -EBADF; 4121 } 4122 *p = f; 4123 return 0; 4124 } 4125 4126 static int perf_event_set_output(struct perf_event *event, 4127 struct perf_event *output_event); 4128 static int perf_event_set_filter(struct perf_event *event, void __user *arg); 4129 static int perf_event_set_bpf_prog(struct perf_event *event, u32 prog_fd); 4130 4131 static long _perf_ioctl(struct perf_event *event, unsigned int cmd, unsigned long arg) 4132 { 4133 void (*func)(struct perf_event *); 4134 u32 flags = arg; 4135 4136 switch (cmd) { 4137 case PERF_EVENT_IOC_ENABLE: 4138 func = _perf_event_enable; 4139 break; 4140 case PERF_EVENT_IOC_DISABLE: 4141 func = _perf_event_disable; 4142 break; 4143 case PERF_EVENT_IOC_RESET: 4144 func = _perf_event_reset; 4145 break; 4146 4147 case PERF_EVENT_IOC_REFRESH: 4148 return _perf_event_refresh(event, arg); 4149 4150 case PERF_EVENT_IOC_PERIOD: 4151 return perf_event_period(event, (u64 __user *)arg); 4152 4153 case PERF_EVENT_IOC_ID: 4154 { 4155 u64 id = primary_event_id(event); 4156 4157 if (copy_to_user((void __user *)arg, &id, sizeof(id))) 4158 return -EFAULT; 4159 return 0; 4160 } 4161 4162 case PERF_EVENT_IOC_SET_OUTPUT: 4163 { 4164 int ret; 4165 if (arg != -1) { 4166 struct perf_event *output_event; 4167 struct fd output; 4168 ret = perf_fget_light(arg, &output); 4169 if (ret) 4170 return ret; 4171 output_event = output.file->private_data; 4172 ret = perf_event_set_output(event, output_event); 4173 fdput(output); 4174 } else { 4175 ret = perf_event_set_output(event, NULL); 4176 } 4177 return ret; 4178 } 4179 4180 case PERF_EVENT_IOC_SET_FILTER: 4181 return perf_event_set_filter(event, (void __user *)arg); 4182 4183 case PERF_EVENT_IOC_SET_BPF: 4184 return perf_event_set_bpf_prog(event, arg); 4185 4186 default: 4187 return -ENOTTY; 4188 } 4189 4190 if (flags & PERF_IOC_FLAG_GROUP) 4191 perf_event_for_each(event, func); 4192 else 4193 perf_event_for_each_child(event, func); 4194 4195 return 0; 4196 } 4197 4198 static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg) 4199 { 4200 struct perf_event *event = file->private_data; 4201 struct perf_event_context *ctx; 4202 long ret; 4203 4204 ctx = perf_event_ctx_lock(event); 4205 ret = _perf_ioctl(event, cmd, arg); 4206 perf_event_ctx_unlock(event, ctx); 4207 4208 return ret; 4209 } 4210 4211 #ifdef CONFIG_COMPAT 4212 static long perf_compat_ioctl(struct file *file, unsigned int cmd, 4213 unsigned long arg) 4214 { 4215 switch (_IOC_NR(cmd)) { 4216 case _IOC_NR(PERF_EVENT_IOC_SET_FILTER): 4217 case _IOC_NR(PERF_EVENT_IOC_ID): 4218 /* Fix up pointer size (usually 4 -> 8 in 32-on-64-bit case */ 4219 if (_IOC_SIZE(cmd) == sizeof(compat_uptr_t)) { 4220 cmd &= ~IOCSIZE_MASK; 4221 cmd |= sizeof(void *) << IOCSIZE_SHIFT; 4222 } 4223 break; 4224 } 4225 return perf_ioctl(file, cmd, arg); 4226 } 4227 #else 4228 # define perf_compat_ioctl NULL 4229 #endif 4230 4231 int perf_event_task_enable(void) 4232 { 4233 struct perf_event_context *ctx; 4234 struct perf_event *event; 4235 4236 mutex_lock(¤t->perf_event_mutex); 4237 list_for_each_entry(event, ¤t->perf_event_list, owner_entry) { 4238 ctx = perf_event_ctx_lock(event); 4239 perf_event_for_each_child(event, _perf_event_enable); 4240 perf_event_ctx_unlock(event, ctx); 4241 } 4242 mutex_unlock(¤t->perf_event_mutex); 4243 4244 return 0; 4245 } 4246 4247 int perf_event_task_disable(void) 4248 { 4249 struct perf_event_context *ctx; 4250 struct perf_event *event; 4251 4252 mutex_lock(¤t->perf_event_mutex); 4253 list_for_each_entry(event, ¤t->perf_event_list, owner_entry) { 4254 ctx = perf_event_ctx_lock(event); 4255 perf_event_for_each_child(event, _perf_event_disable); 4256 perf_event_ctx_unlock(event, ctx); 4257 } 4258 mutex_unlock(¤t->perf_event_mutex); 4259 4260 return 0; 4261 } 4262 4263 static int perf_event_index(struct perf_event *event) 4264 { 4265 if (event->hw.state & PERF_HES_STOPPED) 4266 return 0; 4267 4268 if (event->state != PERF_EVENT_STATE_ACTIVE) 4269 return 0; 4270 4271 return event->pmu->event_idx(event); 4272 } 4273 4274 static void calc_timer_values(struct perf_event *event, 4275 u64 *now, 4276 u64 *enabled, 4277 u64 *running) 4278 { 4279 u64 ctx_time; 4280 4281 *now = perf_clock(); 4282 ctx_time = event->shadow_ctx_time + *now; 4283 *enabled = ctx_time - event->tstamp_enabled; 4284 *running = ctx_time - event->tstamp_running; 4285 } 4286 4287 static void perf_event_init_userpage(struct perf_event *event) 4288 { 4289 struct perf_event_mmap_page *userpg; 4290 struct ring_buffer *rb; 4291 4292 rcu_read_lock(); 4293 rb = rcu_dereference(event->rb); 4294 if (!rb) 4295 goto unlock; 4296 4297 userpg = rb->user_page; 4298 4299 /* Allow new userspace to detect that bit 0 is deprecated */ 4300 userpg->cap_bit0_is_deprecated = 1; 4301 userpg->size = offsetof(struct perf_event_mmap_page, __reserved); 4302 userpg->data_offset = PAGE_SIZE; 4303 userpg->data_size = perf_data_size(rb); 4304 4305 unlock: 4306 rcu_read_unlock(); 4307 } 4308 4309 void __weak arch_perf_update_userpage( 4310 struct perf_event *event, struct perf_event_mmap_page *userpg, u64 now) 4311 { 4312 } 4313 4314 /* 4315 * Callers need to ensure there can be no nesting of this function, otherwise 4316 * the seqlock logic goes bad. We can not serialize this because the arch 4317 * code calls this from NMI context. 4318 */ 4319 void perf_event_update_userpage(struct perf_event *event) 4320 { 4321 struct perf_event_mmap_page *userpg; 4322 struct ring_buffer *rb; 4323 u64 enabled, running, now; 4324 4325 rcu_read_lock(); 4326 rb = rcu_dereference(event->rb); 4327 if (!rb) 4328 goto unlock; 4329 4330 /* 4331 * compute total_time_enabled, total_time_running 4332 * based on snapshot values taken when the event 4333 * was last scheduled in. 4334 * 4335 * we cannot simply called update_context_time() 4336 * because of locking issue as we can be called in 4337 * NMI context 4338 */ 4339 calc_timer_values(event, &now, &enabled, &running); 4340 4341 userpg = rb->user_page; 4342 /* 4343 * Disable preemption so as to not let the corresponding user-space 4344 * spin too long if we get preempted. 4345 */ 4346 preempt_disable(); 4347 ++userpg->lock; 4348 barrier(); 4349 userpg->index = perf_event_index(event); 4350 userpg->offset = perf_event_count(event); 4351 if (userpg->index) 4352 userpg->offset -= local64_read(&event->hw.prev_count); 4353 4354 userpg->time_enabled = enabled + 4355 atomic64_read(&event->child_total_time_enabled); 4356 4357 userpg->time_running = running + 4358 atomic64_read(&event->child_total_time_running); 4359 4360 arch_perf_update_userpage(event, userpg, now); 4361 4362 barrier(); 4363 ++userpg->lock; 4364 preempt_enable(); 4365 unlock: 4366 rcu_read_unlock(); 4367 } 4368 4369 static int perf_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf) 4370 { 4371 struct perf_event *event = vma->vm_file->private_data; 4372 struct ring_buffer *rb; 4373 int ret = VM_FAULT_SIGBUS; 4374 4375 if (vmf->flags & FAULT_FLAG_MKWRITE) { 4376 if (vmf->pgoff == 0) 4377 ret = 0; 4378 return ret; 4379 } 4380 4381 rcu_read_lock(); 4382 rb = rcu_dereference(event->rb); 4383 if (!rb) 4384 goto unlock; 4385 4386 if (vmf->pgoff && (vmf->flags & FAULT_FLAG_WRITE)) 4387 goto unlock; 4388 4389 vmf->page = perf_mmap_to_page(rb, vmf->pgoff); 4390 if (!vmf->page) 4391 goto unlock; 4392 4393 get_page(vmf->page); 4394 vmf->page->mapping = vma->vm_file->f_mapping; 4395 vmf->page->index = vmf->pgoff; 4396 4397 ret = 0; 4398 unlock: 4399 rcu_read_unlock(); 4400 4401 return ret; 4402 } 4403 4404 static void ring_buffer_attach(struct perf_event *event, 4405 struct ring_buffer *rb) 4406 { 4407 struct ring_buffer *old_rb = NULL; 4408 unsigned long flags; 4409 4410 if (event->rb) { 4411 /* 4412 * Should be impossible, we set this when removing 4413 * event->rb_entry and wait/clear when adding event->rb_entry. 4414 */ 4415 WARN_ON_ONCE(event->rcu_pending); 4416 4417 old_rb = event->rb; 4418 spin_lock_irqsave(&old_rb->event_lock, flags); 4419 list_del_rcu(&event->rb_entry); 4420 spin_unlock_irqrestore(&old_rb->event_lock, flags); 4421 4422 event->rcu_batches = get_state_synchronize_rcu(); 4423 event->rcu_pending = 1; 4424 } 4425 4426 if (rb) { 4427 if (event->rcu_pending) { 4428 cond_synchronize_rcu(event->rcu_batches); 4429 event->rcu_pending = 0; 4430 } 4431 4432 spin_lock_irqsave(&rb->event_lock, flags); 4433 list_add_rcu(&event->rb_entry, &rb->event_list); 4434 spin_unlock_irqrestore(&rb->event_lock, flags); 4435 } 4436 4437 rcu_assign_pointer(event->rb, rb); 4438 4439 if (old_rb) { 4440 ring_buffer_put(old_rb); 4441 /* 4442 * Since we detached before setting the new rb, so that we 4443 * could attach the new rb, we could have missed a wakeup. 4444 * Provide it now. 4445 */ 4446 wake_up_all(&event->waitq); 4447 } 4448 } 4449 4450 static void ring_buffer_wakeup(struct perf_event *event) 4451 { 4452 struct ring_buffer *rb; 4453 4454 rcu_read_lock(); 4455 rb = rcu_dereference(event->rb); 4456 if (rb) { 4457 list_for_each_entry_rcu(event, &rb->event_list, rb_entry) 4458 wake_up_all(&event->waitq); 4459 } 4460 rcu_read_unlock(); 4461 } 4462 4463 struct ring_buffer *ring_buffer_get(struct perf_event *event) 4464 { 4465 struct ring_buffer *rb; 4466 4467 rcu_read_lock(); 4468 rb = rcu_dereference(event->rb); 4469 if (rb) { 4470 if (!atomic_inc_not_zero(&rb->refcount)) 4471 rb = NULL; 4472 } 4473 rcu_read_unlock(); 4474 4475 return rb; 4476 } 4477 4478 void ring_buffer_put(struct ring_buffer *rb) 4479 { 4480 if (!atomic_dec_and_test(&rb->refcount)) 4481 return; 4482 4483 WARN_ON_ONCE(!list_empty(&rb->event_list)); 4484 4485 call_rcu(&rb->rcu_head, rb_free_rcu); 4486 } 4487 4488 static void perf_mmap_open(struct vm_area_struct *vma) 4489 { 4490 struct perf_event *event = vma->vm_file->private_data; 4491 4492 atomic_inc(&event->mmap_count); 4493 atomic_inc(&event->rb->mmap_count); 4494 4495 if (vma->vm_pgoff) 4496 atomic_inc(&event->rb->aux_mmap_count); 4497 4498 if (event->pmu->event_mapped) 4499 event->pmu->event_mapped(event); 4500 } 4501 4502 /* 4503 * A buffer can be mmap()ed multiple times; either directly through the same 4504 * event, or through other events by use of perf_event_set_output(). 4505 * 4506 * In order to undo the VM accounting done by perf_mmap() we need to destroy 4507 * the buffer here, where we still have a VM context. This means we need 4508 * to detach all events redirecting to us. 4509 */ 4510 static void perf_mmap_close(struct vm_area_struct *vma) 4511 { 4512 struct perf_event *event = vma->vm_file->private_data; 4513 4514 struct ring_buffer *rb = ring_buffer_get(event); 4515 struct user_struct *mmap_user = rb->mmap_user; 4516 int mmap_locked = rb->mmap_locked; 4517 unsigned long size = perf_data_size(rb); 4518 4519 if (event->pmu->event_unmapped) 4520 event->pmu->event_unmapped(event); 4521 4522 /* 4523 * rb->aux_mmap_count will always drop before rb->mmap_count and 4524 * event->mmap_count, so it is ok to use event->mmap_mutex to 4525 * serialize with perf_mmap here. 4526 */ 4527 if (rb_has_aux(rb) && vma->vm_pgoff == rb->aux_pgoff && 4528 atomic_dec_and_mutex_lock(&rb->aux_mmap_count, &event->mmap_mutex)) { 4529 atomic_long_sub(rb->aux_nr_pages, &mmap_user->locked_vm); 4530 vma->vm_mm->pinned_vm -= rb->aux_mmap_locked; 4531 4532 rb_free_aux(rb); 4533 mutex_unlock(&event->mmap_mutex); 4534 } 4535 4536 atomic_dec(&rb->mmap_count); 4537 4538 if (!atomic_dec_and_mutex_lock(&event->mmap_count, &event->mmap_mutex)) 4539 goto out_put; 4540 4541 ring_buffer_attach(event, NULL); 4542 mutex_unlock(&event->mmap_mutex); 4543 4544 /* If there's still other mmap()s of this buffer, we're done. */ 4545 if (atomic_read(&rb->mmap_count)) 4546 goto out_put; 4547 4548 /* 4549 * No other mmap()s, detach from all other events that might redirect 4550 * into the now unreachable buffer. Somewhat complicated by the 4551 * fact that rb::event_lock otherwise nests inside mmap_mutex. 4552 */ 4553 again: 4554 rcu_read_lock(); 4555 list_for_each_entry_rcu(event, &rb->event_list, rb_entry) { 4556 if (!atomic_long_inc_not_zero(&event->refcount)) { 4557 /* 4558 * This event is en-route to free_event() which will 4559 * detach it and remove it from the list. 4560 */ 4561 continue; 4562 } 4563 rcu_read_unlock(); 4564 4565 mutex_lock(&event->mmap_mutex); 4566 /* 4567 * Check we didn't race with perf_event_set_output() which can 4568 * swizzle the rb from under us while we were waiting to 4569 * acquire mmap_mutex. 4570 * 4571 * If we find a different rb; ignore this event, a next 4572 * iteration will no longer find it on the list. We have to 4573 * still restart the iteration to make sure we're not now 4574 * iterating the wrong list. 4575 */ 4576 if (event->rb == rb) 4577 ring_buffer_attach(event, NULL); 4578 4579 mutex_unlock(&event->mmap_mutex); 4580 put_event(event); 4581 4582 /* 4583 * Restart the iteration; either we're on the wrong list or 4584 * destroyed its integrity by doing a deletion. 4585 */ 4586 goto again; 4587 } 4588 rcu_read_unlock(); 4589 4590 /* 4591 * It could be there's still a few 0-ref events on the list; they'll 4592 * get cleaned up by free_event() -- they'll also still have their 4593 * ref on the rb and will free it whenever they are done with it. 4594 * 4595 * Aside from that, this buffer is 'fully' detached and unmapped, 4596 * undo the VM accounting. 4597 */ 4598 4599 atomic_long_sub((size >> PAGE_SHIFT) + 1, &mmap_user->locked_vm); 4600 vma->vm_mm->pinned_vm -= mmap_locked; 4601 free_uid(mmap_user); 4602 4603 out_put: 4604 ring_buffer_put(rb); /* could be last */ 4605 } 4606 4607 static const struct vm_operations_struct perf_mmap_vmops = { 4608 .open = perf_mmap_open, 4609 .close = perf_mmap_close, /* non mergable */ 4610 .fault = perf_mmap_fault, 4611 .page_mkwrite = perf_mmap_fault, 4612 }; 4613 4614 static int perf_mmap(struct file *file, struct vm_area_struct *vma) 4615 { 4616 struct perf_event *event = file->private_data; 4617 unsigned long user_locked, user_lock_limit; 4618 struct user_struct *user = current_user(); 4619 unsigned long locked, lock_limit; 4620 struct ring_buffer *rb = NULL; 4621 unsigned long vma_size; 4622 unsigned long nr_pages; 4623 long user_extra = 0, extra = 0; 4624 int ret = 0, flags = 0; 4625 4626 /* 4627 * Don't allow mmap() of inherited per-task counters. This would 4628 * create a performance issue due to all children writing to the 4629 * same rb. 4630 */ 4631 if (event->cpu == -1 && event->attr.inherit) 4632 return -EINVAL; 4633 4634 if (!(vma->vm_flags & VM_SHARED)) 4635 return -EINVAL; 4636 4637 vma_size = vma->vm_end - vma->vm_start; 4638 4639 if (vma->vm_pgoff == 0) { 4640 nr_pages = (vma_size / PAGE_SIZE) - 1; 4641 } else { 4642 /* 4643 * AUX area mapping: if rb->aux_nr_pages != 0, it's already 4644 * mapped, all subsequent mappings should have the same size 4645 * and offset. Must be above the normal perf buffer. 4646 */ 4647 u64 aux_offset, aux_size; 4648 4649 if (!event->rb) 4650 return -EINVAL; 4651 4652 nr_pages = vma_size / PAGE_SIZE; 4653 4654 mutex_lock(&event->mmap_mutex); 4655 ret = -EINVAL; 4656 4657 rb = event->rb; 4658 if (!rb) 4659 goto aux_unlock; 4660 4661 aux_offset = ACCESS_ONCE(rb->user_page->aux_offset); 4662 aux_size = ACCESS_ONCE(rb->user_page->aux_size); 4663 4664 if (aux_offset < perf_data_size(rb) + PAGE_SIZE) 4665 goto aux_unlock; 4666 4667 if (aux_offset != vma->vm_pgoff << PAGE_SHIFT) 4668 goto aux_unlock; 4669 4670 /* already mapped with a different offset */ 4671 if (rb_has_aux(rb) && rb->aux_pgoff != vma->vm_pgoff) 4672 goto aux_unlock; 4673 4674 if (aux_size != vma_size || aux_size != nr_pages * PAGE_SIZE) 4675 goto aux_unlock; 4676 4677 /* already mapped with a different size */ 4678 if (rb_has_aux(rb) && rb->aux_nr_pages != nr_pages) 4679 goto aux_unlock; 4680 4681 if (!is_power_of_2(nr_pages)) 4682 goto aux_unlock; 4683 4684 if (!atomic_inc_not_zero(&rb->mmap_count)) 4685 goto aux_unlock; 4686 4687 if (rb_has_aux(rb)) { 4688 atomic_inc(&rb->aux_mmap_count); 4689 ret = 0; 4690 goto unlock; 4691 } 4692 4693 atomic_set(&rb->aux_mmap_count, 1); 4694 user_extra = nr_pages; 4695 4696 goto accounting; 4697 } 4698 4699 /* 4700 * If we have rb pages ensure they're a power-of-two number, so we 4701 * can do bitmasks instead of modulo. 4702 */ 4703 if (nr_pages != 0 && !is_power_of_2(nr_pages)) 4704 return -EINVAL; 4705 4706 if (vma_size != PAGE_SIZE * (1 + nr_pages)) 4707 return -EINVAL; 4708 4709 WARN_ON_ONCE(event->ctx->parent_ctx); 4710 again: 4711 mutex_lock(&event->mmap_mutex); 4712 if (event->rb) { 4713 if (event->rb->nr_pages != nr_pages) { 4714 ret = -EINVAL; 4715 goto unlock; 4716 } 4717 4718 if (!atomic_inc_not_zero(&event->rb->mmap_count)) { 4719 /* 4720 * Raced against perf_mmap_close() through 4721 * perf_event_set_output(). Try again, hope for better 4722 * luck. 4723 */ 4724 mutex_unlock(&event->mmap_mutex); 4725 goto again; 4726 } 4727 4728 goto unlock; 4729 } 4730 4731 user_extra = nr_pages + 1; 4732 4733 accounting: 4734 user_lock_limit = sysctl_perf_event_mlock >> (PAGE_SHIFT - 10); 4735 4736 /* 4737 * Increase the limit linearly with more CPUs: 4738 */ 4739 user_lock_limit *= num_online_cpus(); 4740 4741 user_locked = atomic_long_read(&user->locked_vm) + user_extra; 4742 4743 if (user_locked > user_lock_limit) 4744 extra = user_locked - user_lock_limit; 4745 4746 lock_limit = rlimit(RLIMIT_MEMLOCK); 4747 lock_limit >>= PAGE_SHIFT; 4748 locked = vma->vm_mm->pinned_vm + extra; 4749 4750 if ((locked > lock_limit) && perf_paranoid_tracepoint_raw() && 4751 !capable(CAP_IPC_LOCK)) { 4752 ret = -EPERM; 4753 goto unlock; 4754 } 4755 4756 WARN_ON(!rb && event->rb); 4757 4758 if (vma->vm_flags & VM_WRITE) 4759 flags |= RING_BUFFER_WRITABLE; 4760 4761 if (!rb) { 4762 rb = rb_alloc(nr_pages, 4763 event->attr.watermark ? event->attr.wakeup_watermark : 0, 4764 event->cpu, flags); 4765 4766 if (!rb) { 4767 ret = -ENOMEM; 4768 goto unlock; 4769 } 4770 4771 atomic_set(&rb->mmap_count, 1); 4772 rb->mmap_user = get_current_user(); 4773 rb->mmap_locked = extra; 4774 4775 ring_buffer_attach(event, rb); 4776 4777 perf_event_init_userpage(event); 4778 perf_event_update_userpage(event); 4779 } else { 4780 ret = rb_alloc_aux(rb, event, vma->vm_pgoff, nr_pages, 4781 event->attr.aux_watermark, flags); 4782 if (!ret) 4783 rb->aux_mmap_locked = extra; 4784 } 4785 4786 unlock: 4787 if (!ret) { 4788 atomic_long_add(user_extra, &user->locked_vm); 4789 vma->vm_mm->pinned_vm += extra; 4790 4791 atomic_inc(&event->mmap_count); 4792 } else if (rb) { 4793 atomic_dec(&rb->mmap_count); 4794 } 4795 aux_unlock: 4796 mutex_unlock(&event->mmap_mutex); 4797 4798 /* 4799 * Since pinned accounting is per vm we cannot allow fork() to copy our 4800 * vma. 4801 */ 4802 vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND | VM_DONTDUMP; 4803 vma->vm_ops = &perf_mmap_vmops; 4804 4805 if (event->pmu->event_mapped) 4806 event->pmu->event_mapped(event); 4807 4808 return ret; 4809 } 4810 4811 static int perf_fasync(int fd, struct file *filp, int on) 4812 { 4813 struct inode *inode = file_inode(filp); 4814 struct perf_event *event = filp->private_data; 4815 int retval; 4816 4817 mutex_lock(&inode->i_mutex); 4818 retval = fasync_helper(fd, filp, on, &event->fasync); 4819 mutex_unlock(&inode->i_mutex); 4820 4821 if (retval < 0) 4822 return retval; 4823 4824 return 0; 4825 } 4826 4827 static const struct file_operations perf_fops = { 4828 .llseek = no_llseek, 4829 .release = perf_release, 4830 .read = perf_read, 4831 .poll = perf_poll, 4832 .unlocked_ioctl = perf_ioctl, 4833 .compat_ioctl = perf_compat_ioctl, 4834 .mmap = perf_mmap, 4835 .fasync = perf_fasync, 4836 }; 4837 4838 /* 4839 * Perf event wakeup 4840 * 4841 * If there's data, ensure we set the poll() state and publish everything 4842 * to user-space before waking everybody up. 4843 */ 4844 4845 static inline struct fasync_struct **perf_event_fasync(struct perf_event *event) 4846 { 4847 /* only the parent has fasync state */ 4848 if (event->parent) 4849 event = event->parent; 4850 return &event->fasync; 4851 } 4852 4853 void perf_event_wakeup(struct perf_event *event) 4854 { 4855 ring_buffer_wakeup(event); 4856 4857 if (event->pending_kill) { 4858 kill_fasync(perf_event_fasync(event), SIGIO, event->pending_kill); 4859 event->pending_kill = 0; 4860 } 4861 } 4862 4863 static void perf_pending_event(struct irq_work *entry) 4864 { 4865 struct perf_event *event = container_of(entry, 4866 struct perf_event, pending); 4867 int rctx; 4868 4869 rctx = perf_swevent_get_recursion_context(); 4870 /* 4871 * If we 'fail' here, that's OK, it means recursion is already disabled 4872 * and we won't recurse 'further'. 4873 */ 4874 4875 if (event->pending_disable) { 4876 event->pending_disable = 0; 4877 __perf_event_disable(event); 4878 } 4879 4880 if (event->pending_wakeup) { 4881 event->pending_wakeup = 0; 4882 perf_event_wakeup(event); 4883 } 4884 4885 if (rctx >= 0) 4886 perf_swevent_put_recursion_context(rctx); 4887 } 4888 4889 /* 4890 * We assume there is only KVM supporting the callbacks. 4891 * Later on, we might change it to a list if there is 4892 * another virtualization implementation supporting the callbacks. 4893 */ 4894 struct perf_guest_info_callbacks *perf_guest_cbs; 4895 4896 int perf_register_guest_info_callbacks(struct perf_guest_info_callbacks *cbs) 4897 { 4898 perf_guest_cbs = cbs; 4899 return 0; 4900 } 4901 EXPORT_SYMBOL_GPL(perf_register_guest_info_callbacks); 4902 4903 int perf_unregister_guest_info_callbacks(struct perf_guest_info_callbacks *cbs) 4904 { 4905 perf_guest_cbs = NULL; 4906 return 0; 4907 } 4908 EXPORT_SYMBOL_GPL(perf_unregister_guest_info_callbacks); 4909 4910 static void 4911 perf_output_sample_regs(struct perf_output_handle *handle, 4912 struct pt_regs *regs, u64 mask) 4913 { 4914 int bit; 4915 4916 for_each_set_bit(bit, (const unsigned long *) &mask, 4917 sizeof(mask) * BITS_PER_BYTE) { 4918 u64 val; 4919 4920 val = perf_reg_value(regs, bit); 4921 perf_output_put(handle, val); 4922 } 4923 } 4924 4925 static void perf_sample_regs_user(struct perf_regs *regs_user, 4926 struct pt_regs *regs, 4927 struct pt_regs *regs_user_copy) 4928 { 4929 if (user_mode(regs)) { 4930 regs_user->abi = perf_reg_abi(current); 4931 regs_user->regs = regs; 4932 } else if (current->mm) { 4933 perf_get_regs_user(regs_user, regs, regs_user_copy); 4934 } else { 4935 regs_user->abi = PERF_SAMPLE_REGS_ABI_NONE; 4936 regs_user->regs = NULL; 4937 } 4938 } 4939 4940 static void perf_sample_regs_intr(struct perf_regs *regs_intr, 4941 struct pt_regs *regs) 4942 { 4943 regs_intr->regs = regs; 4944 regs_intr->abi = perf_reg_abi(current); 4945 } 4946 4947 4948 /* 4949 * Get remaining task size from user stack pointer. 4950 * 4951 * It'd be better to take stack vma map and limit this more 4952 * precisly, but there's no way to get it safely under interrupt, 4953 * so using TASK_SIZE as limit. 4954 */ 4955 static u64 perf_ustack_task_size(struct pt_regs *regs) 4956 { 4957 unsigned long addr = perf_user_stack_pointer(regs); 4958 4959 if (!addr || addr >= TASK_SIZE) 4960 return 0; 4961 4962 return TASK_SIZE - addr; 4963 } 4964 4965 static u16 4966 perf_sample_ustack_size(u16 stack_size, u16 header_size, 4967 struct pt_regs *regs) 4968 { 4969 u64 task_size; 4970 4971 /* No regs, no stack pointer, no dump. */ 4972 if (!regs) 4973 return 0; 4974 4975 /* 4976 * Check if we fit in with the requested stack size into the: 4977 * - TASK_SIZE 4978 * If we don't, we limit the size to the TASK_SIZE. 4979 * 4980 * - remaining sample size 4981 * If we don't, we customize the stack size to 4982 * fit in to the remaining sample size. 4983 */ 4984 4985 task_size = min((u64) USHRT_MAX, perf_ustack_task_size(regs)); 4986 stack_size = min(stack_size, (u16) task_size); 4987 4988 /* Current header size plus static size and dynamic size. */ 4989 header_size += 2 * sizeof(u64); 4990 4991 /* Do we fit in with the current stack dump size? */ 4992 if ((u16) (header_size + stack_size) < header_size) { 4993 /* 4994 * If we overflow the maximum size for the sample, 4995 * we customize the stack dump size to fit in. 4996 */ 4997 stack_size = USHRT_MAX - header_size - sizeof(u64); 4998 stack_size = round_up(stack_size, sizeof(u64)); 4999 } 5000 5001 return stack_size; 5002 } 5003 5004 static void 5005 perf_output_sample_ustack(struct perf_output_handle *handle, u64 dump_size, 5006 struct pt_regs *regs) 5007 { 5008 /* Case of a kernel thread, nothing to dump */ 5009 if (!regs) { 5010 u64 size = 0; 5011 perf_output_put(handle, size); 5012 } else { 5013 unsigned long sp; 5014 unsigned int rem; 5015 u64 dyn_size; 5016 5017 /* 5018 * We dump: 5019 * static size 5020 * - the size requested by user or the best one we can fit 5021 * in to the sample max size 5022 * data 5023 * - user stack dump data 5024 * dynamic size 5025 * - the actual dumped size 5026 */ 5027 5028 /* Static size. */ 5029 perf_output_put(handle, dump_size); 5030 5031 /* Data. */ 5032 sp = perf_user_stack_pointer(regs); 5033 rem = __output_copy_user(handle, (void *) sp, dump_size); 5034 dyn_size = dump_size - rem; 5035 5036 perf_output_skip(handle, rem); 5037 5038 /* Dynamic size. */ 5039 perf_output_put(handle, dyn_size); 5040 } 5041 } 5042 5043 static void __perf_event_header__init_id(struct perf_event_header *header, 5044 struct perf_sample_data *data, 5045 struct perf_event *event) 5046 { 5047 u64 sample_type = event->attr.sample_type; 5048 5049 data->type = sample_type; 5050 header->size += event->id_header_size; 5051 5052 if (sample_type & PERF_SAMPLE_TID) { 5053 /* namespace issues */ 5054 data->tid_entry.pid = perf_event_pid(event, current); 5055 data->tid_entry.tid = perf_event_tid(event, current); 5056 } 5057 5058 if (sample_type & PERF_SAMPLE_TIME) 5059 data->time = perf_event_clock(event); 5060 5061 if (sample_type & (PERF_SAMPLE_ID | PERF_SAMPLE_IDENTIFIER)) 5062 data->id = primary_event_id(event); 5063 5064 if (sample_type & PERF_SAMPLE_STREAM_ID) 5065 data->stream_id = event->id; 5066 5067 if (sample_type & PERF_SAMPLE_CPU) { 5068 data->cpu_entry.cpu = raw_smp_processor_id(); 5069 data->cpu_entry.reserved = 0; 5070 } 5071 } 5072 5073 void perf_event_header__init_id(struct perf_event_header *header, 5074 struct perf_sample_data *data, 5075 struct perf_event *event) 5076 { 5077 if (event->attr.sample_id_all) 5078 __perf_event_header__init_id(header, data, event); 5079 } 5080 5081 static void __perf_event__output_id_sample(struct perf_output_handle *handle, 5082 struct perf_sample_data *data) 5083 { 5084 u64 sample_type = data->type; 5085 5086 if (sample_type & PERF_SAMPLE_TID) 5087 perf_output_put(handle, data->tid_entry); 5088 5089 if (sample_type & PERF_SAMPLE_TIME) 5090 perf_output_put(handle, data->time); 5091 5092 if (sample_type & PERF_SAMPLE_ID) 5093 perf_output_put(handle, data->id); 5094 5095 if (sample_type & PERF_SAMPLE_STREAM_ID) 5096 perf_output_put(handle, data->stream_id); 5097 5098 if (sample_type & PERF_SAMPLE_CPU) 5099 perf_output_put(handle, data->cpu_entry); 5100 5101 if (sample_type & PERF_SAMPLE_IDENTIFIER) 5102 perf_output_put(handle, data->id); 5103 } 5104 5105 void perf_event__output_id_sample(struct perf_event *event, 5106 struct perf_output_handle *handle, 5107 struct perf_sample_data *sample) 5108 { 5109 if (event->attr.sample_id_all) 5110 __perf_event__output_id_sample(handle, sample); 5111 } 5112 5113 static void perf_output_read_one(struct perf_output_handle *handle, 5114 struct perf_event *event, 5115 u64 enabled, u64 running) 5116 { 5117 u64 read_format = event->attr.read_format; 5118 u64 values[4]; 5119 int n = 0; 5120 5121 values[n++] = perf_event_count(event); 5122 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) { 5123 values[n++] = enabled + 5124 atomic64_read(&event->child_total_time_enabled); 5125 } 5126 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) { 5127 values[n++] = running + 5128 atomic64_read(&event->child_total_time_running); 5129 } 5130 if (read_format & PERF_FORMAT_ID) 5131 values[n++] = primary_event_id(event); 5132 5133 __output_copy(handle, values, n * sizeof(u64)); 5134 } 5135 5136 /* 5137 * XXX PERF_FORMAT_GROUP vs inherited events seems difficult. 5138 */ 5139 static void perf_output_read_group(struct perf_output_handle *handle, 5140 struct perf_event *event, 5141 u64 enabled, u64 running) 5142 { 5143 struct perf_event *leader = event->group_leader, *sub; 5144 u64 read_format = event->attr.read_format; 5145 u64 values[5]; 5146 int n = 0; 5147 5148 values[n++] = 1 + leader->nr_siblings; 5149 5150 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) 5151 values[n++] = enabled; 5152 5153 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) 5154 values[n++] = running; 5155 5156 if (leader != event) 5157 leader->pmu->read(leader); 5158 5159 values[n++] = perf_event_count(leader); 5160 if (read_format & PERF_FORMAT_ID) 5161 values[n++] = primary_event_id(leader); 5162 5163 __output_copy(handle, values, n * sizeof(u64)); 5164 5165 list_for_each_entry(sub, &leader->sibling_list, group_entry) { 5166 n = 0; 5167 5168 if ((sub != event) && 5169 (sub->state == PERF_EVENT_STATE_ACTIVE)) 5170 sub->pmu->read(sub); 5171 5172 values[n++] = perf_event_count(sub); 5173 if (read_format & PERF_FORMAT_ID) 5174 values[n++] = primary_event_id(sub); 5175 5176 __output_copy(handle, values, n * sizeof(u64)); 5177 } 5178 } 5179 5180 #define PERF_FORMAT_TOTAL_TIMES (PERF_FORMAT_TOTAL_TIME_ENABLED|\ 5181 PERF_FORMAT_TOTAL_TIME_RUNNING) 5182 5183 static void perf_output_read(struct perf_output_handle *handle, 5184 struct perf_event *event) 5185 { 5186 u64 enabled = 0, running = 0, now; 5187 u64 read_format = event->attr.read_format; 5188 5189 /* 5190 * compute total_time_enabled, total_time_running 5191 * based on snapshot values taken when the event 5192 * was last scheduled in. 5193 * 5194 * we cannot simply called update_context_time() 5195 * because of locking issue as we are called in 5196 * NMI context 5197 */ 5198 if (read_format & PERF_FORMAT_TOTAL_TIMES) 5199 calc_timer_values(event, &now, &enabled, &running); 5200 5201 if (event->attr.read_format & PERF_FORMAT_GROUP) 5202 perf_output_read_group(handle, event, enabled, running); 5203 else 5204 perf_output_read_one(handle, event, enabled, running); 5205 } 5206 5207 void perf_output_sample(struct perf_output_handle *handle, 5208 struct perf_event_header *header, 5209 struct perf_sample_data *data, 5210 struct perf_event *event) 5211 { 5212 u64 sample_type = data->type; 5213 5214 perf_output_put(handle, *header); 5215 5216 if (sample_type & PERF_SAMPLE_IDENTIFIER) 5217 perf_output_put(handle, data->id); 5218 5219 if (sample_type & PERF_SAMPLE_IP) 5220 perf_output_put(handle, data->ip); 5221 5222 if (sample_type & PERF_SAMPLE_TID) 5223 perf_output_put(handle, data->tid_entry); 5224 5225 if (sample_type & PERF_SAMPLE_TIME) 5226 perf_output_put(handle, data->time); 5227 5228 if (sample_type & PERF_SAMPLE_ADDR) 5229 perf_output_put(handle, data->addr); 5230 5231 if (sample_type & PERF_SAMPLE_ID) 5232 perf_output_put(handle, data->id); 5233 5234 if (sample_type & PERF_SAMPLE_STREAM_ID) 5235 perf_output_put(handle, data->stream_id); 5236 5237 if (sample_type & PERF_SAMPLE_CPU) 5238 perf_output_put(handle, data->cpu_entry); 5239 5240 if (sample_type & PERF_SAMPLE_PERIOD) 5241 perf_output_put(handle, data->period); 5242 5243 if (sample_type & PERF_SAMPLE_READ) 5244 perf_output_read(handle, event); 5245 5246 if (sample_type & PERF_SAMPLE_CALLCHAIN) { 5247 if (data->callchain) { 5248 int size = 1; 5249 5250 if (data->callchain) 5251 size += data->callchain->nr; 5252 5253 size *= sizeof(u64); 5254 5255 __output_copy(handle, data->callchain, size); 5256 } else { 5257 u64 nr = 0; 5258 perf_output_put(handle, nr); 5259 } 5260 } 5261 5262 if (sample_type & PERF_SAMPLE_RAW) { 5263 if (data->raw) { 5264 perf_output_put(handle, data->raw->size); 5265 __output_copy(handle, data->raw->data, 5266 data->raw->size); 5267 } else { 5268 struct { 5269 u32 size; 5270 u32 data; 5271 } raw = { 5272 .size = sizeof(u32), 5273 .data = 0, 5274 }; 5275 perf_output_put(handle, raw); 5276 } 5277 } 5278 5279 if (sample_type & PERF_SAMPLE_BRANCH_STACK) { 5280 if (data->br_stack) { 5281 size_t size; 5282 5283 size = data->br_stack->nr 5284 * sizeof(struct perf_branch_entry); 5285 5286 perf_output_put(handle, data->br_stack->nr); 5287 perf_output_copy(handle, data->br_stack->entries, size); 5288 } else { 5289 /* 5290 * we always store at least the value of nr 5291 */ 5292 u64 nr = 0; 5293 perf_output_put(handle, nr); 5294 } 5295 } 5296 5297 if (sample_type & PERF_SAMPLE_REGS_USER) { 5298 u64 abi = data->regs_user.abi; 5299 5300 /* 5301 * If there are no regs to dump, notice it through 5302 * first u64 being zero (PERF_SAMPLE_REGS_ABI_NONE). 5303 */ 5304 perf_output_put(handle, abi); 5305 5306 if (abi) { 5307 u64 mask = event->attr.sample_regs_user; 5308 perf_output_sample_regs(handle, 5309 data->regs_user.regs, 5310 mask); 5311 } 5312 } 5313 5314 if (sample_type & PERF_SAMPLE_STACK_USER) { 5315 perf_output_sample_ustack(handle, 5316 data->stack_user_size, 5317 data->regs_user.regs); 5318 } 5319 5320 if (sample_type & PERF_SAMPLE_WEIGHT) 5321 perf_output_put(handle, data->weight); 5322 5323 if (sample_type & PERF_SAMPLE_DATA_SRC) 5324 perf_output_put(handle, data->data_src.val); 5325 5326 if (sample_type & PERF_SAMPLE_TRANSACTION) 5327 perf_output_put(handle, data->txn); 5328 5329 if (sample_type & PERF_SAMPLE_REGS_INTR) { 5330 u64 abi = data->regs_intr.abi; 5331 /* 5332 * If there are no regs to dump, notice it through 5333 * first u64 being zero (PERF_SAMPLE_REGS_ABI_NONE). 5334 */ 5335 perf_output_put(handle, abi); 5336 5337 if (abi) { 5338 u64 mask = event->attr.sample_regs_intr; 5339 5340 perf_output_sample_regs(handle, 5341 data->regs_intr.regs, 5342 mask); 5343 } 5344 } 5345 5346 if (!event->attr.watermark) { 5347 int wakeup_events = event->attr.wakeup_events; 5348 5349 if (wakeup_events) { 5350 struct ring_buffer *rb = handle->rb; 5351 int events = local_inc_return(&rb->events); 5352 5353 if (events >= wakeup_events) { 5354 local_sub(wakeup_events, &rb->events); 5355 local_inc(&rb->wakeup); 5356 } 5357 } 5358 } 5359 } 5360 5361 void perf_prepare_sample(struct perf_event_header *header, 5362 struct perf_sample_data *data, 5363 struct perf_event *event, 5364 struct pt_regs *regs) 5365 { 5366 u64 sample_type = event->attr.sample_type; 5367 5368 header->type = PERF_RECORD_SAMPLE; 5369 header->size = sizeof(*header) + event->header_size; 5370 5371 header->misc = 0; 5372 header->misc |= perf_misc_flags(regs); 5373 5374 __perf_event_header__init_id(header, data, event); 5375 5376 if (sample_type & PERF_SAMPLE_IP) 5377 data->ip = perf_instruction_pointer(regs); 5378 5379 if (sample_type & PERF_SAMPLE_CALLCHAIN) { 5380 int size = 1; 5381 5382 data->callchain = perf_callchain(event, regs); 5383 5384 if (data->callchain) 5385 size += data->callchain->nr; 5386 5387 header->size += size * sizeof(u64); 5388 } 5389 5390 if (sample_type & PERF_SAMPLE_RAW) { 5391 int size = sizeof(u32); 5392 5393 if (data->raw) 5394 size += data->raw->size; 5395 else 5396 size += sizeof(u32); 5397 5398 WARN_ON_ONCE(size & (sizeof(u64)-1)); 5399 header->size += size; 5400 } 5401 5402 if (sample_type & PERF_SAMPLE_BRANCH_STACK) { 5403 int size = sizeof(u64); /* nr */ 5404 if (data->br_stack) { 5405 size += data->br_stack->nr 5406 * sizeof(struct perf_branch_entry); 5407 } 5408 header->size += size; 5409 } 5410 5411 if (sample_type & (PERF_SAMPLE_REGS_USER | PERF_SAMPLE_STACK_USER)) 5412 perf_sample_regs_user(&data->regs_user, regs, 5413 &data->regs_user_copy); 5414 5415 if (sample_type & PERF_SAMPLE_REGS_USER) { 5416 /* regs dump ABI info */ 5417 int size = sizeof(u64); 5418 5419 if (data->regs_user.regs) { 5420 u64 mask = event->attr.sample_regs_user; 5421 size += hweight64(mask) * sizeof(u64); 5422 } 5423 5424 header->size += size; 5425 } 5426 5427 if (sample_type & PERF_SAMPLE_STACK_USER) { 5428 /* 5429 * Either we need PERF_SAMPLE_STACK_USER bit to be allways 5430 * processed as the last one or have additional check added 5431 * in case new sample type is added, because we could eat 5432 * up the rest of the sample size. 5433 */ 5434 u16 stack_size = event->attr.sample_stack_user; 5435 u16 size = sizeof(u64); 5436 5437 stack_size = perf_sample_ustack_size(stack_size, header->size, 5438 data->regs_user.regs); 5439 5440 /* 5441 * If there is something to dump, add space for the dump 5442 * itself and for the field that tells the dynamic size, 5443 * which is how many have been actually dumped. 5444 */ 5445 if (stack_size) 5446 size += sizeof(u64) + stack_size; 5447 5448 data->stack_user_size = stack_size; 5449 header->size += size; 5450 } 5451 5452 if (sample_type & PERF_SAMPLE_REGS_INTR) { 5453 /* regs dump ABI info */ 5454 int size = sizeof(u64); 5455 5456 perf_sample_regs_intr(&data->regs_intr, regs); 5457 5458 if (data->regs_intr.regs) { 5459 u64 mask = event->attr.sample_regs_intr; 5460 5461 size += hweight64(mask) * sizeof(u64); 5462 } 5463 5464 header->size += size; 5465 } 5466 } 5467 5468 void perf_event_output(struct perf_event *event, 5469 struct perf_sample_data *data, 5470 struct pt_regs *regs) 5471 { 5472 struct perf_output_handle handle; 5473 struct perf_event_header header; 5474 5475 /* protect the callchain buffers */ 5476 rcu_read_lock(); 5477 5478 perf_prepare_sample(&header, data, event, regs); 5479 5480 if (perf_output_begin(&handle, event, header.size)) 5481 goto exit; 5482 5483 perf_output_sample(&handle, &header, data, event); 5484 5485 perf_output_end(&handle); 5486 5487 exit: 5488 rcu_read_unlock(); 5489 } 5490 5491 /* 5492 * read event_id 5493 */ 5494 5495 struct perf_read_event { 5496 struct perf_event_header header; 5497 5498 u32 pid; 5499 u32 tid; 5500 }; 5501 5502 static void 5503 perf_event_read_event(struct perf_event *event, 5504 struct task_struct *task) 5505 { 5506 struct perf_output_handle handle; 5507 struct perf_sample_data sample; 5508 struct perf_read_event read_event = { 5509 .header = { 5510 .type = PERF_RECORD_READ, 5511 .misc = 0, 5512 .size = sizeof(read_event) + event->read_size, 5513 }, 5514 .pid = perf_event_pid(event, task), 5515 .tid = perf_event_tid(event, task), 5516 }; 5517 int ret; 5518 5519 perf_event_header__init_id(&read_event.header, &sample, event); 5520 ret = perf_output_begin(&handle, event, read_event.header.size); 5521 if (ret) 5522 return; 5523 5524 perf_output_put(&handle, read_event); 5525 perf_output_read(&handle, event); 5526 perf_event__output_id_sample(event, &handle, &sample); 5527 5528 perf_output_end(&handle); 5529 } 5530 5531 typedef void (perf_event_aux_output_cb)(struct perf_event *event, void *data); 5532 5533 static void 5534 perf_event_aux_ctx(struct perf_event_context *ctx, 5535 perf_event_aux_output_cb output, 5536 void *data) 5537 { 5538 struct perf_event *event; 5539 5540 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) { 5541 if (event->state < PERF_EVENT_STATE_INACTIVE) 5542 continue; 5543 if (!event_filter_match(event)) 5544 continue; 5545 output(event, data); 5546 } 5547 } 5548 5549 static void 5550 perf_event_aux(perf_event_aux_output_cb output, void *data, 5551 struct perf_event_context *task_ctx) 5552 { 5553 struct perf_cpu_context *cpuctx; 5554 struct perf_event_context *ctx; 5555 struct pmu *pmu; 5556 int ctxn; 5557 5558 rcu_read_lock(); 5559 list_for_each_entry_rcu(pmu, &pmus, entry) { 5560 cpuctx = get_cpu_ptr(pmu->pmu_cpu_context); 5561 if (cpuctx->unique_pmu != pmu) 5562 goto next; 5563 perf_event_aux_ctx(&cpuctx->ctx, output, data); 5564 if (task_ctx) 5565 goto next; 5566 ctxn = pmu->task_ctx_nr; 5567 if (ctxn < 0) 5568 goto next; 5569 ctx = rcu_dereference(current->perf_event_ctxp[ctxn]); 5570 if (ctx) 5571 perf_event_aux_ctx(ctx, output, data); 5572 next: 5573 put_cpu_ptr(pmu->pmu_cpu_context); 5574 } 5575 5576 if (task_ctx) { 5577 preempt_disable(); 5578 perf_event_aux_ctx(task_ctx, output, data); 5579 preempt_enable(); 5580 } 5581 rcu_read_unlock(); 5582 } 5583 5584 /* 5585 * task tracking -- fork/exit 5586 * 5587 * enabled by: attr.comm | attr.mmap | attr.mmap2 | attr.mmap_data | attr.task 5588 */ 5589 5590 struct perf_task_event { 5591 struct task_struct *task; 5592 struct perf_event_context *task_ctx; 5593 5594 struct { 5595 struct perf_event_header header; 5596 5597 u32 pid; 5598 u32 ppid; 5599 u32 tid; 5600 u32 ptid; 5601 u64 time; 5602 } event_id; 5603 }; 5604 5605 static int perf_event_task_match(struct perf_event *event) 5606 { 5607 return event->attr.comm || event->attr.mmap || 5608 event->attr.mmap2 || event->attr.mmap_data || 5609 event->attr.task; 5610 } 5611 5612 static void perf_event_task_output(struct perf_event *event, 5613 void *data) 5614 { 5615 struct perf_task_event *task_event = data; 5616 struct perf_output_handle handle; 5617 struct perf_sample_data sample; 5618 struct task_struct *task = task_event->task; 5619 int ret, size = task_event->event_id.header.size; 5620 5621 if (!perf_event_task_match(event)) 5622 return; 5623 5624 perf_event_header__init_id(&task_event->event_id.header, &sample, event); 5625 5626 ret = perf_output_begin(&handle, event, 5627 task_event->event_id.header.size); 5628 if (ret) 5629 goto out; 5630 5631 task_event->event_id.pid = perf_event_pid(event, task); 5632 task_event->event_id.ppid = perf_event_pid(event, current); 5633 5634 task_event->event_id.tid = perf_event_tid(event, task); 5635 task_event->event_id.ptid = perf_event_tid(event, current); 5636 5637 task_event->event_id.time = perf_event_clock(event); 5638 5639 perf_output_put(&handle, task_event->event_id); 5640 5641 perf_event__output_id_sample(event, &handle, &sample); 5642 5643 perf_output_end(&handle); 5644 out: 5645 task_event->event_id.header.size = size; 5646 } 5647 5648 static void perf_event_task(struct task_struct *task, 5649 struct perf_event_context *task_ctx, 5650 int new) 5651 { 5652 struct perf_task_event task_event; 5653 5654 if (!atomic_read(&nr_comm_events) && 5655 !atomic_read(&nr_mmap_events) && 5656 !atomic_read(&nr_task_events)) 5657 return; 5658 5659 task_event = (struct perf_task_event){ 5660 .task = task, 5661 .task_ctx = task_ctx, 5662 .event_id = { 5663 .header = { 5664 .type = new ? PERF_RECORD_FORK : PERF_RECORD_EXIT, 5665 .misc = 0, 5666 .size = sizeof(task_event.event_id), 5667 }, 5668 /* .pid */ 5669 /* .ppid */ 5670 /* .tid */ 5671 /* .ptid */ 5672 /* .time */ 5673 }, 5674 }; 5675 5676 perf_event_aux(perf_event_task_output, 5677 &task_event, 5678 task_ctx); 5679 } 5680 5681 void perf_event_fork(struct task_struct *task) 5682 { 5683 perf_event_task(task, NULL, 1); 5684 } 5685 5686 /* 5687 * comm tracking 5688 */ 5689 5690 struct perf_comm_event { 5691 struct task_struct *task; 5692 char *comm; 5693 int comm_size; 5694 5695 struct { 5696 struct perf_event_header header; 5697 5698 u32 pid; 5699 u32 tid; 5700 } event_id; 5701 }; 5702 5703 static int perf_event_comm_match(struct perf_event *event) 5704 { 5705 return event->attr.comm; 5706 } 5707 5708 static void perf_event_comm_output(struct perf_event *event, 5709 void *data) 5710 { 5711 struct perf_comm_event *comm_event = data; 5712 struct perf_output_handle handle; 5713 struct perf_sample_data sample; 5714 int size = comm_event->event_id.header.size; 5715 int ret; 5716 5717 if (!perf_event_comm_match(event)) 5718 return; 5719 5720 perf_event_header__init_id(&comm_event->event_id.header, &sample, event); 5721 ret = perf_output_begin(&handle, event, 5722 comm_event->event_id.header.size); 5723 5724 if (ret) 5725 goto out; 5726 5727 comm_event->event_id.pid = perf_event_pid(event, comm_event->task); 5728 comm_event->event_id.tid = perf_event_tid(event, comm_event->task); 5729 5730 perf_output_put(&handle, comm_event->event_id); 5731 __output_copy(&handle, comm_event->comm, 5732 comm_event->comm_size); 5733 5734 perf_event__output_id_sample(event, &handle, &sample); 5735 5736 perf_output_end(&handle); 5737 out: 5738 comm_event->event_id.header.size = size; 5739 } 5740 5741 static void perf_event_comm_event(struct perf_comm_event *comm_event) 5742 { 5743 char comm[TASK_COMM_LEN]; 5744 unsigned int size; 5745 5746 memset(comm, 0, sizeof(comm)); 5747 strlcpy(comm, comm_event->task->comm, sizeof(comm)); 5748 size = ALIGN(strlen(comm)+1, sizeof(u64)); 5749 5750 comm_event->comm = comm; 5751 comm_event->comm_size = size; 5752 5753 comm_event->event_id.header.size = sizeof(comm_event->event_id) + size; 5754 5755 perf_event_aux(perf_event_comm_output, 5756 comm_event, 5757 NULL); 5758 } 5759 5760 void perf_event_comm(struct task_struct *task, bool exec) 5761 { 5762 struct perf_comm_event comm_event; 5763 5764 if (!atomic_read(&nr_comm_events)) 5765 return; 5766 5767 comm_event = (struct perf_comm_event){ 5768 .task = task, 5769 /* .comm */ 5770 /* .comm_size */ 5771 .event_id = { 5772 .header = { 5773 .type = PERF_RECORD_COMM, 5774 .misc = exec ? PERF_RECORD_MISC_COMM_EXEC : 0, 5775 /* .size */ 5776 }, 5777 /* .pid */ 5778 /* .tid */ 5779 }, 5780 }; 5781 5782 perf_event_comm_event(&comm_event); 5783 } 5784 5785 /* 5786 * mmap tracking 5787 */ 5788 5789 struct perf_mmap_event { 5790 struct vm_area_struct *vma; 5791 5792 const char *file_name; 5793 int file_size; 5794 int maj, min; 5795 u64 ino; 5796 u64 ino_generation; 5797 u32 prot, flags; 5798 5799 struct { 5800 struct perf_event_header header; 5801 5802 u32 pid; 5803 u32 tid; 5804 u64 start; 5805 u64 len; 5806 u64 pgoff; 5807 } event_id; 5808 }; 5809 5810 static int perf_event_mmap_match(struct perf_event *event, 5811 void *data) 5812 { 5813 struct perf_mmap_event *mmap_event = data; 5814 struct vm_area_struct *vma = mmap_event->vma; 5815 int executable = vma->vm_flags & VM_EXEC; 5816 5817 return (!executable && event->attr.mmap_data) || 5818 (executable && (event->attr.mmap || event->attr.mmap2)); 5819 } 5820 5821 static void perf_event_mmap_output(struct perf_event *event, 5822 void *data) 5823 { 5824 struct perf_mmap_event *mmap_event = data; 5825 struct perf_output_handle handle; 5826 struct perf_sample_data sample; 5827 int size = mmap_event->event_id.header.size; 5828 int ret; 5829 5830 if (!perf_event_mmap_match(event, data)) 5831 return; 5832 5833 if (event->attr.mmap2) { 5834 mmap_event->event_id.header.type = PERF_RECORD_MMAP2; 5835 mmap_event->event_id.header.size += sizeof(mmap_event->maj); 5836 mmap_event->event_id.header.size += sizeof(mmap_event->min); 5837 mmap_event->event_id.header.size += sizeof(mmap_event->ino); 5838 mmap_event->event_id.header.size += sizeof(mmap_event->ino_generation); 5839 mmap_event->event_id.header.size += sizeof(mmap_event->prot); 5840 mmap_event->event_id.header.size += sizeof(mmap_event->flags); 5841 } 5842 5843 perf_event_header__init_id(&mmap_event->event_id.header, &sample, event); 5844 ret = perf_output_begin(&handle, event, 5845 mmap_event->event_id.header.size); 5846 if (ret) 5847 goto out; 5848 5849 mmap_event->event_id.pid = perf_event_pid(event, current); 5850 mmap_event->event_id.tid = perf_event_tid(event, current); 5851 5852 perf_output_put(&handle, mmap_event->event_id); 5853 5854 if (event->attr.mmap2) { 5855 perf_output_put(&handle, mmap_event->maj); 5856 perf_output_put(&handle, mmap_event->min); 5857 perf_output_put(&handle, mmap_event->ino); 5858 perf_output_put(&handle, mmap_event->ino_generation); 5859 perf_output_put(&handle, mmap_event->prot); 5860 perf_output_put(&handle, mmap_event->flags); 5861 } 5862 5863 __output_copy(&handle, mmap_event->file_name, 5864 mmap_event->file_size); 5865 5866 perf_event__output_id_sample(event, &handle, &sample); 5867 5868 perf_output_end(&handle); 5869 out: 5870 mmap_event->event_id.header.size = size; 5871 } 5872 5873 static void perf_event_mmap_event(struct perf_mmap_event *mmap_event) 5874 { 5875 struct vm_area_struct *vma = mmap_event->vma; 5876 struct file *file = vma->vm_file; 5877 int maj = 0, min = 0; 5878 u64 ino = 0, gen = 0; 5879 u32 prot = 0, flags = 0; 5880 unsigned int size; 5881 char tmp[16]; 5882 char *buf = NULL; 5883 char *name; 5884 5885 if (file) { 5886 struct inode *inode; 5887 dev_t dev; 5888 5889 buf = kmalloc(PATH_MAX, GFP_KERNEL); 5890 if (!buf) { 5891 name = "//enomem"; 5892 goto cpy_name; 5893 } 5894 /* 5895 * d_path() works from the end of the rb backwards, so we 5896 * need to add enough zero bytes after the string to handle 5897 * the 64bit alignment we do later. 5898 */ 5899 name = file_path(file, buf, PATH_MAX - sizeof(u64)); 5900 if (IS_ERR(name)) { 5901 name = "//toolong"; 5902 goto cpy_name; 5903 } 5904 inode = file_inode(vma->vm_file); 5905 dev = inode->i_sb->s_dev; 5906 ino = inode->i_ino; 5907 gen = inode->i_generation; 5908 maj = MAJOR(dev); 5909 min = MINOR(dev); 5910 5911 if (vma->vm_flags & VM_READ) 5912 prot |= PROT_READ; 5913 if (vma->vm_flags & VM_WRITE) 5914 prot |= PROT_WRITE; 5915 if (vma->vm_flags & VM_EXEC) 5916 prot |= PROT_EXEC; 5917 5918 if (vma->vm_flags & VM_MAYSHARE) 5919 flags = MAP_SHARED; 5920 else 5921 flags = MAP_PRIVATE; 5922 5923 if (vma->vm_flags & VM_DENYWRITE) 5924 flags |= MAP_DENYWRITE; 5925 if (vma->vm_flags & VM_MAYEXEC) 5926 flags |= MAP_EXECUTABLE; 5927 if (vma->vm_flags & VM_LOCKED) 5928 flags |= MAP_LOCKED; 5929 if (vma->vm_flags & VM_HUGETLB) 5930 flags |= MAP_HUGETLB; 5931 5932 goto got_name; 5933 } else { 5934 if (vma->vm_ops && vma->vm_ops->name) { 5935 name = (char *) vma->vm_ops->name(vma); 5936 if (name) 5937 goto cpy_name; 5938 } 5939 5940 name = (char *)arch_vma_name(vma); 5941 if (name) 5942 goto cpy_name; 5943 5944 if (vma->vm_start <= vma->vm_mm->start_brk && 5945 vma->vm_end >= vma->vm_mm->brk) { 5946 name = "[heap]"; 5947 goto cpy_name; 5948 } 5949 if (vma->vm_start <= vma->vm_mm->start_stack && 5950 vma->vm_end >= vma->vm_mm->start_stack) { 5951 name = "[stack]"; 5952 goto cpy_name; 5953 } 5954 5955 name = "//anon"; 5956 goto cpy_name; 5957 } 5958 5959 cpy_name: 5960 strlcpy(tmp, name, sizeof(tmp)); 5961 name = tmp; 5962 got_name: 5963 /* 5964 * Since our buffer works in 8 byte units we need to align our string 5965 * size to a multiple of 8. However, we must guarantee the tail end is 5966 * zero'd out to avoid leaking random bits to userspace. 5967 */ 5968 size = strlen(name)+1; 5969 while (!IS_ALIGNED(size, sizeof(u64))) 5970 name[size++] = '\0'; 5971 5972 mmap_event->file_name = name; 5973 mmap_event->file_size = size; 5974 mmap_event->maj = maj; 5975 mmap_event->min = min; 5976 mmap_event->ino = ino; 5977 mmap_event->ino_generation = gen; 5978 mmap_event->prot = prot; 5979 mmap_event->flags = flags; 5980 5981 if (!(vma->vm_flags & VM_EXEC)) 5982 mmap_event->event_id.header.misc |= PERF_RECORD_MISC_MMAP_DATA; 5983 5984 mmap_event->event_id.header.size = sizeof(mmap_event->event_id) + size; 5985 5986 perf_event_aux(perf_event_mmap_output, 5987 mmap_event, 5988 NULL); 5989 5990 kfree(buf); 5991 } 5992 5993 void perf_event_mmap(struct vm_area_struct *vma) 5994 { 5995 struct perf_mmap_event mmap_event; 5996 5997 if (!atomic_read(&nr_mmap_events)) 5998 return; 5999 6000 mmap_event = (struct perf_mmap_event){ 6001 .vma = vma, 6002 /* .file_name */ 6003 /* .file_size */ 6004 .event_id = { 6005 .header = { 6006 .type = PERF_RECORD_MMAP, 6007 .misc = PERF_RECORD_MISC_USER, 6008 /* .size */ 6009 }, 6010 /* .pid */ 6011 /* .tid */ 6012 .start = vma->vm_start, 6013 .len = vma->vm_end - vma->vm_start, 6014 .pgoff = (u64)vma->vm_pgoff << PAGE_SHIFT, 6015 }, 6016 /* .maj (attr_mmap2 only) */ 6017 /* .min (attr_mmap2 only) */ 6018 /* .ino (attr_mmap2 only) */ 6019 /* .ino_generation (attr_mmap2 only) */ 6020 /* .prot (attr_mmap2 only) */ 6021 /* .flags (attr_mmap2 only) */ 6022 }; 6023 6024 perf_event_mmap_event(&mmap_event); 6025 } 6026 6027 void perf_event_aux_event(struct perf_event *event, unsigned long head, 6028 unsigned long size, u64 flags) 6029 { 6030 struct perf_output_handle handle; 6031 struct perf_sample_data sample; 6032 struct perf_aux_event { 6033 struct perf_event_header header; 6034 u64 offset; 6035 u64 size; 6036 u64 flags; 6037 } rec = { 6038 .header = { 6039 .type = PERF_RECORD_AUX, 6040 .misc = 0, 6041 .size = sizeof(rec), 6042 }, 6043 .offset = head, 6044 .size = size, 6045 .flags = flags, 6046 }; 6047 int ret; 6048 6049 perf_event_header__init_id(&rec.header, &sample, event); 6050 ret = perf_output_begin(&handle, event, rec.header.size); 6051 6052 if (ret) 6053 return; 6054 6055 perf_output_put(&handle, rec); 6056 perf_event__output_id_sample(event, &handle, &sample); 6057 6058 perf_output_end(&handle); 6059 } 6060 6061 /* 6062 * Lost/dropped samples logging 6063 */ 6064 void perf_log_lost_samples(struct perf_event *event, u64 lost) 6065 { 6066 struct perf_output_handle handle; 6067 struct perf_sample_data sample; 6068 int ret; 6069 6070 struct { 6071 struct perf_event_header header; 6072 u64 lost; 6073 } lost_samples_event = { 6074 .header = { 6075 .type = PERF_RECORD_LOST_SAMPLES, 6076 .misc = 0, 6077 .size = sizeof(lost_samples_event), 6078 }, 6079 .lost = lost, 6080 }; 6081 6082 perf_event_header__init_id(&lost_samples_event.header, &sample, event); 6083 6084 ret = perf_output_begin(&handle, event, 6085 lost_samples_event.header.size); 6086 if (ret) 6087 return; 6088 6089 perf_output_put(&handle, lost_samples_event); 6090 perf_event__output_id_sample(event, &handle, &sample); 6091 perf_output_end(&handle); 6092 } 6093 6094 /* 6095 * context_switch tracking 6096 */ 6097 6098 struct perf_switch_event { 6099 struct task_struct *task; 6100 struct task_struct *next_prev; 6101 6102 struct { 6103 struct perf_event_header header; 6104 u32 next_prev_pid; 6105 u32 next_prev_tid; 6106 } event_id; 6107 }; 6108 6109 static int perf_event_switch_match(struct perf_event *event) 6110 { 6111 return event->attr.context_switch; 6112 } 6113 6114 static void perf_event_switch_output(struct perf_event *event, void *data) 6115 { 6116 struct perf_switch_event *se = data; 6117 struct perf_output_handle handle; 6118 struct perf_sample_data sample; 6119 int ret; 6120 6121 if (!perf_event_switch_match(event)) 6122 return; 6123 6124 /* Only CPU-wide events are allowed to see next/prev pid/tid */ 6125 if (event->ctx->task) { 6126 se->event_id.header.type = PERF_RECORD_SWITCH; 6127 se->event_id.header.size = sizeof(se->event_id.header); 6128 } else { 6129 se->event_id.header.type = PERF_RECORD_SWITCH_CPU_WIDE; 6130 se->event_id.header.size = sizeof(se->event_id); 6131 se->event_id.next_prev_pid = 6132 perf_event_pid(event, se->next_prev); 6133 se->event_id.next_prev_tid = 6134 perf_event_tid(event, se->next_prev); 6135 } 6136 6137 perf_event_header__init_id(&se->event_id.header, &sample, event); 6138 6139 ret = perf_output_begin(&handle, event, se->event_id.header.size); 6140 if (ret) 6141 return; 6142 6143 if (event->ctx->task) 6144 perf_output_put(&handle, se->event_id.header); 6145 else 6146 perf_output_put(&handle, se->event_id); 6147 6148 perf_event__output_id_sample(event, &handle, &sample); 6149 6150 perf_output_end(&handle); 6151 } 6152 6153 static void perf_event_switch(struct task_struct *task, 6154 struct task_struct *next_prev, bool sched_in) 6155 { 6156 struct perf_switch_event switch_event; 6157 6158 /* N.B. caller checks nr_switch_events != 0 */ 6159 6160 switch_event = (struct perf_switch_event){ 6161 .task = task, 6162 .next_prev = next_prev, 6163 .event_id = { 6164 .header = { 6165 /* .type */ 6166 .misc = sched_in ? 0 : PERF_RECORD_MISC_SWITCH_OUT, 6167 /* .size */ 6168 }, 6169 /* .next_prev_pid */ 6170 /* .next_prev_tid */ 6171 }, 6172 }; 6173 6174 perf_event_aux(perf_event_switch_output, 6175 &switch_event, 6176 NULL); 6177 } 6178 6179 /* 6180 * IRQ throttle logging 6181 */ 6182 6183 static void perf_log_throttle(struct perf_event *event, int enable) 6184 { 6185 struct perf_output_handle handle; 6186 struct perf_sample_data sample; 6187 int ret; 6188 6189 struct { 6190 struct perf_event_header header; 6191 u64 time; 6192 u64 id; 6193 u64 stream_id; 6194 } throttle_event = { 6195 .header = { 6196 .type = PERF_RECORD_THROTTLE, 6197 .misc = 0, 6198 .size = sizeof(throttle_event), 6199 }, 6200 .time = perf_event_clock(event), 6201 .id = primary_event_id(event), 6202 .stream_id = event->id, 6203 }; 6204 6205 if (enable) 6206 throttle_event.header.type = PERF_RECORD_UNTHROTTLE; 6207 6208 perf_event_header__init_id(&throttle_event.header, &sample, event); 6209 6210 ret = perf_output_begin(&handle, event, 6211 throttle_event.header.size); 6212 if (ret) 6213 return; 6214 6215 perf_output_put(&handle, throttle_event); 6216 perf_event__output_id_sample(event, &handle, &sample); 6217 perf_output_end(&handle); 6218 } 6219 6220 static void perf_log_itrace_start(struct perf_event *event) 6221 { 6222 struct perf_output_handle handle; 6223 struct perf_sample_data sample; 6224 struct perf_aux_event { 6225 struct perf_event_header header; 6226 u32 pid; 6227 u32 tid; 6228 } rec; 6229 int ret; 6230 6231 if (event->parent) 6232 event = event->parent; 6233 6234 if (!(event->pmu->capabilities & PERF_PMU_CAP_ITRACE) || 6235 event->hw.itrace_started) 6236 return; 6237 6238 rec.header.type = PERF_RECORD_ITRACE_START; 6239 rec.header.misc = 0; 6240 rec.header.size = sizeof(rec); 6241 rec.pid = perf_event_pid(event, current); 6242 rec.tid = perf_event_tid(event, current); 6243 6244 perf_event_header__init_id(&rec.header, &sample, event); 6245 ret = perf_output_begin(&handle, event, rec.header.size); 6246 6247 if (ret) 6248 return; 6249 6250 perf_output_put(&handle, rec); 6251 perf_event__output_id_sample(event, &handle, &sample); 6252 6253 perf_output_end(&handle); 6254 } 6255 6256 /* 6257 * Generic event overflow handling, sampling. 6258 */ 6259 6260 static int __perf_event_overflow(struct perf_event *event, 6261 int throttle, struct perf_sample_data *data, 6262 struct pt_regs *regs) 6263 { 6264 int events = atomic_read(&event->event_limit); 6265 struct hw_perf_event *hwc = &event->hw; 6266 u64 seq; 6267 int ret = 0; 6268 6269 /* 6270 * Non-sampling counters might still use the PMI to fold short 6271 * hardware counters, ignore those. 6272 */ 6273 if (unlikely(!is_sampling_event(event))) 6274 return 0; 6275 6276 seq = __this_cpu_read(perf_throttled_seq); 6277 if (seq != hwc->interrupts_seq) { 6278 hwc->interrupts_seq = seq; 6279 hwc->interrupts = 1; 6280 } else { 6281 hwc->interrupts++; 6282 if (unlikely(throttle 6283 && hwc->interrupts >= max_samples_per_tick)) { 6284 __this_cpu_inc(perf_throttled_count); 6285 hwc->interrupts = MAX_INTERRUPTS; 6286 perf_log_throttle(event, 0); 6287 tick_nohz_full_kick(); 6288 ret = 1; 6289 } 6290 } 6291 6292 if (event->attr.freq) { 6293 u64 now = perf_clock(); 6294 s64 delta = now - hwc->freq_time_stamp; 6295 6296 hwc->freq_time_stamp = now; 6297 6298 if (delta > 0 && delta < 2*TICK_NSEC) 6299 perf_adjust_period(event, delta, hwc->last_period, true); 6300 } 6301 6302 /* 6303 * XXX event_limit might not quite work as expected on inherited 6304 * events 6305 */ 6306 6307 event->pending_kill = POLL_IN; 6308 if (events && atomic_dec_and_test(&event->event_limit)) { 6309 ret = 1; 6310 event->pending_kill = POLL_HUP; 6311 event->pending_disable = 1; 6312 irq_work_queue(&event->pending); 6313 } 6314 6315 if (event->overflow_handler) 6316 event->overflow_handler(event, data, regs); 6317 else 6318 perf_event_output(event, data, regs); 6319 6320 if (*perf_event_fasync(event) && event->pending_kill) { 6321 event->pending_wakeup = 1; 6322 irq_work_queue(&event->pending); 6323 } 6324 6325 return ret; 6326 } 6327 6328 int perf_event_overflow(struct perf_event *event, 6329 struct perf_sample_data *data, 6330 struct pt_regs *regs) 6331 { 6332 return __perf_event_overflow(event, 1, data, regs); 6333 } 6334 6335 /* 6336 * Generic software event infrastructure 6337 */ 6338 6339 struct swevent_htable { 6340 struct swevent_hlist *swevent_hlist; 6341 struct mutex hlist_mutex; 6342 int hlist_refcount; 6343 6344 /* Recursion avoidance in each contexts */ 6345 int recursion[PERF_NR_CONTEXTS]; 6346 6347 /* Keeps track of cpu being initialized/exited */ 6348 bool online; 6349 }; 6350 6351 static DEFINE_PER_CPU(struct swevent_htable, swevent_htable); 6352 6353 /* 6354 * We directly increment event->count and keep a second value in 6355 * event->hw.period_left to count intervals. This period event 6356 * is kept in the range [-sample_period, 0] so that we can use the 6357 * sign as trigger. 6358 */ 6359 6360 u64 perf_swevent_set_period(struct perf_event *event) 6361 { 6362 struct hw_perf_event *hwc = &event->hw; 6363 u64 period = hwc->last_period; 6364 u64 nr, offset; 6365 s64 old, val; 6366 6367 hwc->last_period = hwc->sample_period; 6368 6369 again: 6370 old = val = local64_read(&hwc->period_left); 6371 if (val < 0) 6372 return 0; 6373 6374 nr = div64_u64(period + val, period); 6375 offset = nr * period; 6376 val -= offset; 6377 if (local64_cmpxchg(&hwc->period_left, old, val) != old) 6378 goto again; 6379 6380 return nr; 6381 } 6382 6383 static void perf_swevent_overflow(struct perf_event *event, u64 overflow, 6384 struct perf_sample_data *data, 6385 struct pt_regs *regs) 6386 { 6387 struct hw_perf_event *hwc = &event->hw; 6388 int throttle = 0; 6389 6390 if (!overflow) 6391 overflow = perf_swevent_set_period(event); 6392 6393 if (hwc->interrupts == MAX_INTERRUPTS) 6394 return; 6395 6396 for (; overflow; overflow--) { 6397 if (__perf_event_overflow(event, throttle, 6398 data, regs)) { 6399 /* 6400 * We inhibit the overflow from happening when 6401 * hwc->interrupts == MAX_INTERRUPTS. 6402 */ 6403 break; 6404 } 6405 throttle = 1; 6406 } 6407 } 6408 6409 static void perf_swevent_event(struct perf_event *event, u64 nr, 6410 struct perf_sample_data *data, 6411 struct pt_regs *regs) 6412 { 6413 struct hw_perf_event *hwc = &event->hw; 6414 6415 local64_add(nr, &event->count); 6416 6417 if (!regs) 6418 return; 6419 6420 if (!is_sampling_event(event)) 6421 return; 6422 6423 if ((event->attr.sample_type & PERF_SAMPLE_PERIOD) && !event->attr.freq) { 6424 data->period = nr; 6425 return perf_swevent_overflow(event, 1, data, regs); 6426 } else 6427 data->period = event->hw.last_period; 6428 6429 if (nr == 1 && hwc->sample_period == 1 && !event->attr.freq) 6430 return perf_swevent_overflow(event, 1, data, regs); 6431 6432 if (local64_add_negative(nr, &hwc->period_left)) 6433 return; 6434 6435 perf_swevent_overflow(event, 0, data, regs); 6436 } 6437 6438 static int perf_exclude_event(struct perf_event *event, 6439 struct pt_regs *regs) 6440 { 6441 if (event->hw.state & PERF_HES_STOPPED) 6442 return 1; 6443 6444 if (regs) { 6445 if (event->attr.exclude_user && user_mode(regs)) 6446 return 1; 6447 6448 if (event->attr.exclude_kernel && !user_mode(regs)) 6449 return 1; 6450 } 6451 6452 return 0; 6453 } 6454 6455 static int perf_swevent_match(struct perf_event *event, 6456 enum perf_type_id type, 6457 u32 event_id, 6458 struct perf_sample_data *data, 6459 struct pt_regs *regs) 6460 { 6461 if (event->attr.type != type) 6462 return 0; 6463 6464 if (event->attr.config != event_id) 6465 return 0; 6466 6467 if (perf_exclude_event(event, regs)) 6468 return 0; 6469 6470 return 1; 6471 } 6472 6473 static inline u64 swevent_hash(u64 type, u32 event_id) 6474 { 6475 u64 val = event_id | (type << 32); 6476 6477 return hash_64(val, SWEVENT_HLIST_BITS); 6478 } 6479 6480 static inline struct hlist_head * 6481 __find_swevent_head(struct swevent_hlist *hlist, u64 type, u32 event_id) 6482 { 6483 u64 hash = swevent_hash(type, event_id); 6484 6485 return &hlist->heads[hash]; 6486 } 6487 6488 /* For the read side: events when they trigger */ 6489 static inline struct hlist_head * 6490 find_swevent_head_rcu(struct swevent_htable *swhash, u64 type, u32 event_id) 6491 { 6492 struct swevent_hlist *hlist; 6493 6494 hlist = rcu_dereference(swhash->swevent_hlist); 6495 if (!hlist) 6496 return NULL; 6497 6498 return __find_swevent_head(hlist, type, event_id); 6499 } 6500 6501 /* For the event head insertion and removal in the hlist */ 6502 static inline struct hlist_head * 6503 find_swevent_head(struct swevent_htable *swhash, struct perf_event *event) 6504 { 6505 struct swevent_hlist *hlist; 6506 u32 event_id = event->attr.config; 6507 u64 type = event->attr.type; 6508 6509 /* 6510 * Event scheduling is always serialized against hlist allocation 6511 * and release. Which makes the protected version suitable here. 6512 * The context lock guarantees that. 6513 */ 6514 hlist = rcu_dereference_protected(swhash->swevent_hlist, 6515 lockdep_is_held(&event->ctx->lock)); 6516 if (!hlist) 6517 return NULL; 6518 6519 return __find_swevent_head(hlist, type, event_id); 6520 } 6521 6522 static void do_perf_sw_event(enum perf_type_id type, u32 event_id, 6523 u64 nr, 6524 struct perf_sample_data *data, 6525 struct pt_regs *regs) 6526 { 6527 struct swevent_htable *swhash = this_cpu_ptr(&swevent_htable); 6528 struct perf_event *event; 6529 struct hlist_head *head; 6530 6531 rcu_read_lock(); 6532 head = find_swevent_head_rcu(swhash, type, event_id); 6533 if (!head) 6534 goto end; 6535 6536 hlist_for_each_entry_rcu(event, head, hlist_entry) { 6537 if (perf_swevent_match(event, type, event_id, data, regs)) 6538 perf_swevent_event(event, nr, data, regs); 6539 } 6540 end: 6541 rcu_read_unlock(); 6542 } 6543 6544 DEFINE_PER_CPU(struct pt_regs, __perf_regs[4]); 6545 6546 int perf_swevent_get_recursion_context(void) 6547 { 6548 struct swevent_htable *swhash = this_cpu_ptr(&swevent_htable); 6549 6550 return get_recursion_context(swhash->recursion); 6551 } 6552 EXPORT_SYMBOL_GPL(perf_swevent_get_recursion_context); 6553 6554 inline void perf_swevent_put_recursion_context(int rctx) 6555 { 6556 struct swevent_htable *swhash = this_cpu_ptr(&swevent_htable); 6557 6558 put_recursion_context(swhash->recursion, rctx); 6559 } 6560 6561 void ___perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr) 6562 { 6563 struct perf_sample_data data; 6564 6565 if (WARN_ON_ONCE(!regs)) 6566 return; 6567 6568 perf_sample_data_init(&data, addr, 0); 6569 do_perf_sw_event(PERF_TYPE_SOFTWARE, event_id, nr, &data, regs); 6570 } 6571 6572 void __perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr) 6573 { 6574 int rctx; 6575 6576 preempt_disable_notrace(); 6577 rctx = perf_swevent_get_recursion_context(); 6578 if (unlikely(rctx < 0)) 6579 goto fail; 6580 6581 ___perf_sw_event(event_id, nr, regs, addr); 6582 6583 perf_swevent_put_recursion_context(rctx); 6584 fail: 6585 preempt_enable_notrace(); 6586 } 6587 6588 static void perf_swevent_read(struct perf_event *event) 6589 { 6590 } 6591 6592 static int perf_swevent_add(struct perf_event *event, int flags) 6593 { 6594 struct swevent_htable *swhash = this_cpu_ptr(&swevent_htable); 6595 struct hw_perf_event *hwc = &event->hw; 6596 struct hlist_head *head; 6597 6598 if (is_sampling_event(event)) { 6599 hwc->last_period = hwc->sample_period; 6600 perf_swevent_set_period(event); 6601 } 6602 6603 hwc->state = !(flags & PERF_EF_START); 6604 6605 head = find_swevent_head(swhash, event); 6606 if (!head) { 6607 /* 6608 * We can race with cpu hotplug code. Do not 6609 * WARN if the cpu just got unplugged. 6610 */ 6611 WARN_ON_ONCE(swhash->online); 6612 return -EINVAL; 6613 } 6614 6615 hlist_add_head_rcu(&event->hlist_entry, head); 6616 perf_event_update_userpage(event); 6617 6618 return 0; 6619 } 6620 6621 static void perf_swevent_del(struct perf_event *event, int flags) 6622 { 6623 hlist_del_rcu(&event->hlist_entry); 6624 } 6625 6626 static void perf_swevent_start(struct perf_event *event, int flags) 6627 { 6628 event->hw.state = 0; 6629 } 6630 6631 static void perf_swevent_stop(struct perf_event *event, int flags) 6632 { 6633 event->hw.state = PERF_HES_STOPPED; 6634 } 6635 6636 /* Deref the hlist from the update side */ 6637 static inline struct swevent_hlist * 6638 swevent_hlist_deref(struct swevent_htable *swhash) 6639 { 6640 return rcu_dereference_protected(swhash->swevent_hlist, 6641 lockdep_is_held(&swhash->hlist_mutex)); 6642 } 6643 6644 static void swevent_hlist_release(struct swevent_htable *swhash) 6645 { 6646 struct swevent_hlist *hlist = swevent_hlist_deref(swhash); 6647 6648 if (!hlist) 6649 return; 6650 6651 RCU_INIT_POINTER(swhash->swevent_hlist, NULL); 6652 kfree_rcu(hlist, rcu_head); 6653 } 6654 6655 static void swevent_hlist_put_cpu(struct perf_event *event, int cpu) 6656 { 6657 struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu); 6658 6659 mutex_lock(&swhash->hlist_mutex); 6660 6661 if (!--swhash->hlist_refcount) 6662 swevent_hlist_release(swhash); 6663 6664 mutex_unlock(&swhash->hlist_mutex); 6665 } 6666 6667 static void swevent_hlist_put(struct perf_event *event) 6668 { 6669 int cpu; 6670 6671 for_each_possible_cpu(cpu) 6672 swevent_hlist_put_cpu(event, cpu); 6673 } 6674 6675 static int swevent_hlist_get_cpu(struct perf_event *event, int cpu) 6676 { 6677 struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu); 6678 int err = 0; 6679 6680 mutex_lock(&swhash->hlist_mutex); 6681 6682 if (!swevent_hlist_deref(swhash) && cpu_online(cpu)) { 6683 struct swevent_hlist *hlist; 6684 6685 hlist = kzalloc(sizeof(*hlist), GFP_KERNEL); 6686 if (!hlist) { 6687 err = -ENOMEM; 6688 goto exit; 6689 } 6690 rcu_assign_pointer(swhash->swevent_hlist, hlist); 6691 } 6692 swhash->hlist_refcount++; 6693 exit: 6694 mutex_unlock(&swhash->hlist_mutex); 6695 6696 return err; 6697 } 6698 6699 static int swevent_hlist_get(struct perf_event *event) 6700 { 6701 int err; 6702 int cpu, failed_cpu; 6703 6704 get_online_cpus(); 6705 for_each_possible_cpu(cpu) { 6706 err = swevent_hlist_get_cpu(event, cpu); 6707 if (err) { 6708 failed_cpu = cpu; 6709 goto fail; 6710 } 6711 } 6712 put_online_cpus(); 6713 6714 return 0; 6715 fail: 6716 for_each_possible_cpu(cpu) { 6717 if (cpu == failed_cpu) 6718 break; 6719 swevent_hlist_put_cpu(event, cpu); 6720 } 6721 6722 put_online_cpus(); 6723 return err; 6724 } 6725 6726 struct static_key perf_swevent_enabled[PERF_COUNT_SW_MAX]; 6727 6728 static void sw_perf_event_destroy(struct perf_event *event) 6729 { 6730 u64 event_id = event->attr.config; 6731 6732 WARN_ON(event->parent); 6733 6734 static_key_slow_dec(&perf_swevent_enabled[event_id]); 6735 swevent_hlist_put(event); 6736 } 6737 6738 static int perf_swevent_init(struct perf_event *event) 6739 { 6740 u64 event_id = event->attr.config; 6741 6742 if (event->attr.type != PERF_TYPE_SOFTWARE) 6743 return -ENOENT; 6744 6745 /* 6746 * no branch sampling for software events 6747 */ 6748 if (has_branch_stack(event)) 6749 return -EOPNOTSUPP; 6750 6751 switch (event_id) { 6752 case PERF_COUNT_SW_CPU_CLOCK: 6753 case PERF_COUNT_SW_TASK_CLOCK: 6754 return -ENOENT; 6755 6756 default: 6757 break; 6758 } 6759 6760 if (event_id >= PERF_COUNT_SW_MAX) 6761 return -ENOENT; 6762 6763 if (!event->parent) { 6764 int err; 6765 6766 err = swevent_hlist_get(event); 6767 if (err) 6768 return err; 6769 6770 static_key_slow_inc(&perf_swevent_enabled[event_id]); 6771 event->destroy = sw_perf_event_destroy; 6772 } 6773 6774 return 0; 6775 } 6776 6777 static struct pmu perf_swevent = { 6778 .task_ctx_nr = perf_sw_context, 6779 6780 .capabilities = PERF_PMU_CAP_NO_NMI, 6781 6782 .event_init = perf_swevent_init, 6783 .add = perf_swevent_add, 6784 .del = perf_swevent_del, 6785 .start = perf_swevent_start, 6786 .stop = perf_swevent_stop, 6787 .read = perf_swevent_read, 6788 }; 6789 6790 #ifdef CONFIG_EVENT_TRACING 6791 6792 static int perf_tp_filter_match(struct perf_event *event, 6793 struct perf_sample_data *data) 6794 { 6795 void *record = data->raw->data; 6796 6797 if (likely(!event->filter) || filter_match_preds(event->filter, record)) 6798 return 1; 6799 return 0; 6800 } 6801 6802 static int perf_tp_event_match(struct perf_event *event, 6803 struct perf_sample_data *data, 6804 struct pt_regs *regs) 6805 { 6806 if (event->hw.state & PERF_HES_STOPPED) 6807 return 0; 6808 /* 6809 * All tracepoints are from kernel-space. 6810 */ 6811 if (event->attr.exclude_kernel) 6812 return 0; 6813 6814 if (!perf_tp_filter_match(event, data)) 6815 return 0; 6816 6817 return 1; 6818 } 6819 6820 void perf_tp_event(u64 addr, u64 count, void *record, int entry_size, 6821 struct pt_regs *regs, struct hlist_head *head, int rctx, 6822 struct task_struct *task) 6823 { 6824 struct perf_sample_data data; 6825 struct perf_event *event; 6826 6827 struct perf_raw_record raw = { 6828 .size = entry_size, 6829 .data = record, 6830 }; 6831 6832 perf_sample_data_init(&data, addr, 0); 6833 data.raw = &raw; 6834 6835 hlist_for_each_entry_rcu(event, head, hlist_entry) { 6836 if (perf_tp_event_match(event, &data, regs)) 6837 perf_swevent_event(event, count, &data, regs); 6838 } 6839 6840 /* 6841 * If we got specified a target task, also iterate its context and 6842 * deliver this event there too. 6843 */ 6844 if (task && task != current) { 6845 struct perf_event_context *ctx; 6846 struct trace_entry *entry = record; 6847 6848 rcu_read_lock(); 6849 ctx = rcu_dereference(task->perf_event_ctxp[perf_sw_context]); 6850 if (!ctx) 6851 goto unlock; 6852 6853 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) { 6854 if (event->attr.type != PERF_TYPE_TRACEPOINT) 6855 continue; 6856 if (event->attr.config != entry->type) 6857 continue; 6858 if (perf_tp_event_match(event, &data, regs)) 6859 perf_swevent_event(event, count, &data, regs); 6860 } 6861 unlock: 6862 rcu_read_unlock(); 6863 } 6864 6865 perf_swevent_put_recursion_context(rctx); 6866 } 6867 EXPORT_SYMBOL_GPL(perf_tp_event); 6868 6869 static void tp_perf_event_destroy(struct perf_event *event) 6870 { 6871 perf_trace_destroy(event); 6872 } 6873 6874 static int perf_tp_event_init(struct perf_event *event) 6875 { 6876 int err; 6877 6878 if (event->attr.type != PERF_TYPE_TRACEPOINT) 6879 return -ENOENT; 6880 6881 /* 6882 * no branch sampling for tracepoint events 6883 */ 6884 if (has_branch_stack(event)) 6885 return -EOPNOTSUPP; 6886 6887 err = perf_trace_init(event); 6888 if (err) 6889 return err; 6890 6891 event->destroy = tp_perf_event_destroy; 6892 6893 return 0; 6894 } 6895 6896 static struct pmu perf_tracepoint = { 6897 .task_ctx_nr = perf_sw_context, 6898 6899 .event_init = perf_tp_event_init, 6900 .add = perf_trace_add, 6901 .del = perf_trace_del, 6902 .start = perf_swevent_start, 6903 .stop = perf_swevent_stop, 6904 .read = perf_swevent_read, 6905 }; 6906 6907 static inline void perf_tp_register(void) 6908 { 6909 perf_pmu_register(&perf_tracepoint, "tracepoint", PERF_TYPE_TRACEPOINT); 6910 } 6911 6912 static int perf_event_set_filter(struct perf_event *event, void __user *arg) 6913 { 6914 char *filter_str; 6915 int ret; 6916 6917 if (event->attr.type != PERF_TYPE_TRACEPOINT) 6918 return -EINVAL; 6919 6920 filter_str = strndup_user(arg, PAGE_SIZE); 6921 if (IS_ERR(filter_str)) 6922 return PTR_ERR(filter_str); 6923 6924 ret = ftrace_profile_set_filter(event, event->attr.config, filter_str); 6925 6926 kfree(filter_str); 6927 return ret; 6928 } 6929 6930 static void perf_event_free_filter(struct perf_event *event) 6931 { 6932 ftrace_profile_free_filter(event); 6933 } 6934 6935 static int perf_event_set_bpf_prog(struct perf_event *event, u32 prog_fd) 6936 { 6937 struct bpf_prog *prog; 6938 6939 if (event->attr.type != PERF_TYPE_TRACEPOINT) 6940 return -EINVAL; 6941 6942 if (event->tp_event->prog) 6943 return -EEXIST; 6944 6945 if (!(event->tp_event->flags & TRACE_EVENT_FL_UKPROBE)) 6946 /* bpf programs can only be attached to u/kprobes */ 6947 return -EINVAL; 6948 6949 prog = bpf_prog_get(prog_fd); 6950 if (IS_ERR(prog)) 6951 return PTR_ERR(prog); 6952 6953 if (prog->type != BPF_PROG_TYPE_KPROBE) { 6954 /* valid fd, but invalid bpf program type */ 6955 bpf_prog_put(prog); 6956 return -EINVAL; 6957 } 6958 6959 event->tp_event->prog = prog; 6960 6961 return 0; 6962 } 6963 6964 static void perf_event_free_bpf_prog(struct perf_event *event) 6965 { 6966 struct bpf_prog *prog; 6967 6968 if (!event->tp_event) 6969 return; 6970 6971 prog = event->tp_event->prog; 6972 if (prog) { 6973 event->tp_event->prog = NULL; 6974 bpf_prog_put(prog); 6975 } 6976 } 6977 6978 #else 6979 6980 static inline void perf_tp_register(void) 6981 { 6982 } 6983 6984 static int perf_event_set_filter(struct perf_event *event, void __user *arg) 6985 { 6986 return -ENOENT; 6987 } 6988 6989 static void perf_event_free_filter(struct perf_event *event) 6990 { 6991 } 6992 6993 static int perf_event_set_bpf_prog(struct perf_event *event, u32 prog_fd) 6994 { 6995 return -ENOENT; 6996 } 6997 6998 static void perf_event_free_bpf_prog(struct perf_event *event) 6999 { 7000 } 7001 #endif /* CONFIG_EVENT_TRACING */ 7002 7003 #ifdef CONFIG_HAVE_HW_BREAKPOINT 7004 void perf_bp_event(struct perf_event *bp, void *data) 7005 { 7006 struct perf_sample_data sample; 7007 struct pt_regs *regs = data; 7008 7009 perf_sample_data_init(&sample, bp->attr.bp_addr, 0); 7010 7011 if (!bp->hw.state && !perf_exclude_event(bp, regs)) 7012 perf_swevent_event(bp, 1, &sample, regs); 7013 } 7014 #endif 7015 7016 /* 7017 * hrtimer based swevent callback 7018 */ 7019 7020 static enum hrtimer_restart perf_swevent_hrtimer(struct hrtimer *hrtimer) 7021 { 7022 enum hrtimer_restart ret = HRTIMER_RESTART; 7023 struct perf_sample_data data; 7024 struct pt_regs *regs; 7025 struct perf_event *event; 7026 u64 period; 7027 7028 event = container_of(hrtimer, struct perf_event, hw.hrtimer); 7029 7030 if (event->state != PERF_EVENT_STATE_ACTIVE) 7031 return HRTIMER_NORESTART; 7032 7033 event->pmu->read(event); 7034 7035 perf_sample_data_init(&data, 0, event->hw.last_period); 7036 regs = get_irq_regs(); 7037 7038 if (regs && !perf_exclude_event(event, regs)) { 7039 if (!(event->attr.exclude_idle && is_idle_task(current))) 7040 if (__perf_event_overflow(event, 1, &data, regs)) 7041 ret = HRTIMER_NORESTART; 7042 } 7043 7044 period = max_t(u64, 10000, event->hw.sample_period); 7045 hrtimer_forward_now(hrtimer, ns_to_ktime(period)); 7046 7047 return ret; 7048 } 7049 7050 static void perf_swevent_start_hrtimer(struct perf_event *event) 7051 { 7052 struct hw_perf_event *hwc = &event->hw; 7053 s64 period; 7054 7055 if (!is_sampling_event(event)) 7056 return; 7057 7058 period = local64_read(&hwc->period_left); 7059 if (period) { 7060 if (period < 0) 7061 period = 10000; 7062 7063 local64_set(&hwc->period_left, 0); 7064 } else { 7065 period = max_t(u64, 10000, hwc->sample_period); 7066 } 7067 hrtimer_start(&hwc->hrtimer, ns_to_ktime(period), 7068 HRTIMER_MODE_REL_PINNED); 7069 } 7070 7071 static void perf_swevent_cancel_hrtimer(struct perf_event *event) 7072 { 7073 struct hw_perf_event *hwc = &event->hw; 7074 7075 if (is_sampling_event(event)) { 7076 ktime_t remaining = hrtimer_get_remaining(&hwc->hrtimer); 7077 local64_set(&hwc->period_left, ktime_to_ns(remaining)); 7078 7079 hrtimer_cancel(&hwc->hrtimer); 7080 } 7081 } 7082 7083 static void perf_swevent_init_hrtimer(struct perf_event *event) 7084 { 7085 struct hw_perf_event *hwc = &event->hw; 7086 7087 if (!is_sampling_event(event)) 7088 return; 7089 7090 hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); 7091 hwc->hrtimer.function = perf_swevent_hrtimer; 7092 7093 /* 7094 * Since hrtimers have a fixed rate, we can do a static freq->period 7095 * mapping and avoid the whole period adjust feedback stuff. 7096 */ 7097 if (event->attr.freq) { 7098 long freq = event->attr.sample_freq; 7099 7100 event->attr.sample_period = NSEC_PER_SEC / freq; 7101 hwc->sample_period = event->attr.sample_period; 7102 local64_set(&hwc->period_left, hwc->sample_period); 7103 hwc->last_period = hwc->sample_period; 7104 event->attr.freq = 0; 7105 } 7106 } 7107 7108 /* 7109 * Software event: cpu wall time clock 7110 */ 7111 7112 static void cpu_clock_event_update(struct perf_event *event) 7113 { 7114 s64 prev; 7115 u64 now; 7116 7117 now = local_clock(); 7118 prev = local64_xchg(&event->hw.prev_count, now); 7119 local64_add(now - prev, &event->count); 7120 } 7121 7122 static void cpu_clock_event_start(struct perf_event *event, int flags) 7123 { 7124 local64_set(&event->hw.prev_count, local_clock()); 7125 perf_swevent_start_hrtimer(event); 7126 } 7127 7128 static void cpu_clock_event_stop(struct perf_event *event, int flags) 7129 { 7130 perf_swevent_cancel_hrtimer(event); 7131 cpu_clock_event_update(event); 7132 } 7133 7134 static int cpu_clock_event_add(struct perf_event *event, int flags) 7135 { 7136 if (flags & PERF_EF_START) 7137 cpu_clock_event_start(event, flags); 7138 perf_event_update_userpage(event); 7139 7140 return 0; 7141 } 7142 7143 static void cpu_clock_event_del(struct perf_event *event, int flags) 7144 { 7145 cpu_clock_event_stop(event, flags); 7146 } 7147 7148 static void cpu_clock_event_read(struct perf_event *event) 7149 { 7150 cpu_clock_event_update(event); 7151 } 7152 7153 static int cpu_clock_event_init(struct perf_event *event) 7154 { 7155 if (event->attr.type != PERF_TYPE_SOFTWARE) 7156 return -ENOENT; 7157 7158 if (event->attr.config != PERF_COUNT_SW_CPU_CLOCK) 7159 return -ENOENT; 7160 7161 /* 7162 * no branch sampling for software events 7163 */ 7164 if (has_branch_stack(event)) 7165 return -EOPNOTSUPP; 7166 7167 perf_swevent_init_hrtimer(event); 7168 7169 return 0; 7170 } 7171 7172 static struct pmu perf_cpu_clock = { 7173 .task_ctx_nr = perf_sw_context, 7174 7175 .capabilities = PERF_PMU_CAP_NO_NMI, 7176 7177 .event_init = cpu_clock_event_init, 7178 .add = cpu_clock_event_add, 7179 .del = cpu_clock_event_del, 7180 .start = cpu_clock_event_start, 7181 .stop = cpu_clock_event_stop, 7182 .read = cpu_clock_event_read, 7183 }; 7184 7185 /* 7186 * Software event: task time clock 7187 */ 7188 7189 static void task_clock_event_update(struct perf_event *event, u64 now) 7190 { 7191 u64 prev; 7192 s64 delta; 7193 7194 prev = local64_xchg(&event->hw.prev_count, now); 7195 delta = now - prev; 7196 local64_add(delta, &event->count); 7197 } 7198 7199 static void task_clock_event_start(struct perf_event *event, int flags) 7200 { 7201 local64_set(&event->hw.prev_count, event->ctx->time); 7202 perf_swevent_start_hrtimer(event); 7203 } 7204 7205 static void task_clock_event_stop(struct perf_event *event, int flags) 7206 { 7207 perf_swevent_cancel_hrtimer(event); 7208 task_clock_event_update(event, event->ctx->time); 7209 } 7210 7211 static int task_clock_event_add(struct perf_event *event, int flags) 7212 { 7213 if (flags & PERF_EF_START) 7214 task_clock_event_start(event, flags); 7215 perf_event_update_userpage(event); 7216 7217 return 0; 7218 } 7219 7220 static void task_clock_event_del(struct perf_event *event, int flags) 7221 { 7222 task_clock_event_stop(event, PERF_EF_UPDATE); 7223 } 7224 7225 static void task_clock_event_read(struct perf_event *event) 7226 { 7227 u64 now = perf_clock(); 7228 u64 delta = now - event->ctx->timestamp; 7229 u64 time = event->ctx->time + delta; 7230 7231 task_clock_event_update(event, time); 7232 } 7233 7234 static int task_clock_event_init(struct perf_event *event) 7235 { 7236 if (event->attr.type != PERF_TYPE_SOFTWARE) 7237 return -ENOENT; 7238 7239 if (event->attr.config != PERF_COUNT_SW_TASK_CLOCK) 7240 return -ENOENT; 7241 7242 /* 7243 * no branch sampling for software events 7244 */ 7245 if (has_branch_stack(event)) 7246 return -EOPNOTSUPP; 7247 7248 perf_swevent_init_hrtimer(event); 7249 7250 return 0; 7251 } 7252 7253 static struct pmu perf_task_clock = { 7254 .task_ctx_nr = perf_sw_context, 7255 7256 .capabilities = PERF_PMU_CAP_NO_NMI, 7257 7258 .event_init = task_clock_event_init, 7259 .add = task_clock_event_add, 7260 .del = task_clock_event_del, 7261 .start = task_clock_event_start, 7262 .stop = task_clock_event_stop, 7263 .read = task_clock_event_read, 7264 }; 7265 7266 static void perf_pmu_nop_void(struct pmu *pmu) 7267 { 7268 } 7269 7270 static int perf_pmu_nop_int(struct pmu *pmu) 7271 { 7272 return 0; 7273 } 7274 7275 static void perf_pmu_start_txn(struct pmu *pmu) 7276 { 7277 perf_pmu_disable(pmu); 7278 } 7279 7280 static int perf_pmu_commit_txn(struct pmu *pmu) 7281 { 7282 perf_pmu_enable(pmu); 7283 return 0; 7284 } 7285 7286 static void perf_pmu_cancel_txn(struct pmu *pmu) 7287 { 7288 perf_pmu_enable(pmu); 7289 } 7290 7291 static int perf_event_idx_default(struct perf_event *event) 7292 { 7293 return 0; 7294 } 7295 7296 /* 7297 * Ensures all contexts with the same task_ctx_nr have the same 7298 * pmu_cpu_context too. 7299 */ 7300 static struct perf_cpu_context __percpu *find_pmu_context(int ctxn) 7301 { 7302 struct pmu *pmu; 7303 7304 if (ctxn < 0) 7305 return NULL; 7306 7307 list_for_each_entry(pmu, &pmus, entry) { 7308 if (pmu->task_ctx_nr == ctxn) 7309 return pmu->pmu_cpu_context; 7310 } 7311 7312 return NULL; 7313 } 7314 7315 static void update_pmu_context(struct pmu *pmu, struct pmu *old_pmu) 7316 { 7317 int cpu; 7318 7319 for_each_possible_cpu(cpu) { 7320 struct perf_cpu_context *cpuctx; 7321 7322 cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu); 7323 7324 if (cpuctx->unique_pmu == old_pmu) 7325 cpuctx->unique_pmu = pmu; 7326 } 7327 } 7328 7329 static void free_pmu_context(struct pmu *pmu) 7330 { 7331 struct pmu *i; 7332 7333 mutex_lock(&pmus_lock); 7334 /* 7335 * Like a real lame refcount. 7336 */ 7337 list_for_each_entry(i, &pmus, entry) { 7338 if (i->pmu_cpu_context == pmu->pmu_cpu_context) { 7339 update_pmu_context(i, pmu); 7340 goto out; 7341 } 7342 } 7343 7344 free_percpu(pmu->pmu_cpu_context); 7345 out: 7346 mutex_unlock(&pmus_lock); 7347 } 7348 static struct idr pmu_idr; 7349 7350 static ssize_t 7351 type_show(struct device *dev, struct device_attribute *attr, char *page) 7352 { 7353 struct pmu *pmu = dev_get_drvdata(dev); 7354 7355 return snprintf(page, PAGE_SIZE-1, "%d\n", pmu->type); 7356 } 7357 static DEVICE_ATTR_RO(type); 7358 7359 static ssize_t 7360 perf_event_mux_interval_ms_show(struct device *dev, 7361 struct device_attribute *attr, 7362 char *page) 7363 { 7364 struct pmu *pmu = dev_get_drvdata(dev); 7365 7366 return snprintf(page, PAGE_SIZE-1, "%d\n", pmu->hrtimer_interval_ms); 7367 } 7368 7369 static DEFINE_MUTEX(mux_interval_mutex); 7370 7371 static ssize_t 7372 perf_event_mux_interval_ms_store(struct device *dev, 7373 struct device_attribute *attr, 7374 const char *buf, size_t count) 7375 { 7376 struct pmu *pmu = dev_get_drvdata(dev); 7377 int timer, cpu, ret; 7378 7379 ret = kstrtoint(buf, 0, &timer); 7380 if (ret) 7381 return ret; 7382 7383 if (timer < 1) 7384 return -EINVAL; 7385 7386 /* same value, noting to do */ 7387 if (timer == pmu->hrtimer_interval_ms) 7388 return count; 7389 7390 mutex_lock(&mux_interval_mutex); 7391 pmu->hrtimer_interval_ms = timer; 7392 7393 /* update all cpuctx for this PMU */ 7394 get_online_cpus(); 7395 for_each_online_cpu(cpu) { 7396 struct perf_cpu_context *cpuctx; 7397 cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu); 7398 cpuctx->hrtimer_interval = ns_to_ktime(NSEC_PER_MSEC * timer); 7399 7400 cpu_function_call(cpu, 7401 (remote_function_f)perf_mux_hrtimer_restart, cpuctx); 7402 } 7403 put_online_cpus(); 7404 mutex_unlock(&mux_interval_mutex); 7405 7406 return count; 7407 } 7408 static DEVICE_ATTR_RW(perf_event_mux_interval_ms); 7409 7410 static struct attribute *pmu_dev_attrs[] = { 7411 &dev_attr_type.attr, 7412 &dev_attr_perf_event_mux_interval_ms.attr, 7413 NULL, 7414 }; 7415 ATTRIBUTE_GROUPS(pmu_dev); 7416 7417 static int pmu_bus_running; 7418 static struct bus_type pmu_bus = { 7419 .name = "event_source", 7420 .dev_groups = pmu_dev_groups, 7421 }; 7422 7423 static void pmu_dev_release(struct device *dev) 7424 { 7425 kfree(dev); 7426 } 7427 7428 static int pmu_dev_alloc(struct pmu *pmu) 7429 { 7430 int ret = -ENOMEM; 7431 7432 pmu->dev = kzalloc(sizeof(struct device), GFP_KERNEL); 7433 if (!pmu->dev) 7434 goto out; 7435 7436 pmu->dev->groups = pmu->attr_groups; 7437 device_initialize(pmu->dev); 7438 ret = dev_set_name(pmu->dev, "%s", pmu->name); 7439 if (ret) 7440 goto free_dev; 7441 7442 dev_set_drvdata(pmu->dev, pmu); 7443 pmu->dev->bus = &pmu_bus; 7444 pmu->dev->release = pmu_dev_release; 7445 ret = device_add(pmu->dev); 7446 if (ret) 7447 goto free_dev; 7448 7449 out: 7450 return ret; 7451 7452 free_dev: 7453 put_device(pmu->dev); 7454 goto out; 7455 } 7456 7457 static struct lock_class_key cpuctx_mutex; 7458 static struct lock_class_key cpuctx_lock; 7459 7460 int perf_pmu_register(struct pmu *pmu, const char *name, int type) 7461 { 7462 int cpu, ret; 7463 7464 mutex_lock(&pmus_lock); 7465 ret = -ENOMEM; 7466 pmu->pmu_disable_count = alloc_percpu(int); 7467 if (!pmu->pmu_disable_count) 7468 goto unlock; 7469 7470 pmu->type = -1; 7471 if (!name) 7472 goto skip_type; 7473 pmu->name = name; 7474 7475 if (type < 0) { 7476 type = idr_alloc(&pmu_idr, pmu, PERF_TYPE_MAX, 0, GFP_KERNEL); 7477 if (type < 0) { 7478 ret = type; 7479 goto free_pdc; 7480 } 7481 } 7482 pmu->type = type; 7483 7484 if (pmu_bus_running) { 7485 ret = pmu_dev_alloc(pmu); 7486 if (ret) 7487 goto free_idr; 7488 } 7489 7490 skip_type: 7491 pmu->pmu_cpu_context = find_pmu_context(pmu->task_ctx_nr); 7492 if (pmu->pmu_cpu_context) 7493 goto got_cpu_context; 7494 7495 ret = -ENOMEM; 7496 pmu->pmu_cpu_context = alloc_percpu(struct perf_cpu_context); 7497 if (!pmu->pmu_cpu_context) 7498 goto free_dev; 7499 7500 for_each_possible_cpu(cpu) { 7501 struct perf_cpu_context *cpuctx; 7502 7503 cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu); 7504 __perf_event_init_context(&cpuctx->ctx); 7505 lockdep_set_class(&cpuctx->ctx.mutex, &cpuctx_mutex); 7506 lockdep_set_class(&cpuctx->ctx.lock, &cpuctx_lock); 7507 cpuctx->ctx.pmu = pmu; 7508 7509 __perf_mux_hrtimer_init(cpuctx, cpu); 7510 7511 cpuctx->unique_pmu = pmu; 7512 } 7513 7514 got_cpu_context: 7515 if (!pmu->start_txn) { 7516 if (pmu->pmu_enable) { 7517 /* 7518 * If we have pmu_enable/pmu_disable calls, install 7519 * transaction stubs that use that to try and batch 7520 * hardware accesses. 7521 */ 7522 pmu->start_txn = perf_pmu_start_txn; 7523 pmu->commit_txn = perf_pmu_commit_txn; 7524 pmu->cancel_txn = perf_pmu_cancel_txn; 7525 } else { 7526 pmu->start_txn = perf_pmu_nop_void; 7527 pmu->commit_txn = perf_pmu_nop_int; 7528 pmu->cancel_txn = perf_pmu_nop_void; 7529 } 7530 } 7531 7532 if (!pmu->pmu_enable) { 7533 pmu->pmu_enable = perf_pmu_nop_void; 7534 pmu->pmu_disable = perf_pmu_nop_void; 7535 } 7536 7537 if (!pmu->event_idx) 7538 pmu->event_idx = perf_event_idx_default; 7539 7540 list_add_rcu(&pmu->entry, &pmus); 7541 atomic_set(&pmu->exclusive_cnt, 0); 7542 ret = 0; 7543 unlock: 7544 mutex_unlock(&pmus_lock); 7545 7546 return ret; 7547 7548 free_dev: 7549 device_del(pmu->dev); 7550 put_device(pmu->dev); 7551 7552 free_idr: 7553 if (pmu->type >= PERF_TYPE_MAX) 7554 idr_remove(&pmu_idr, pmu->type); 7555 7556 free_pdc: 7557 free_percpu(pmu->pmu_disable_count); 7558 goto unlock; 7559 } 7560 EXPORT_SYMBOL_GPL(perf_pmu_register); 7561 7562 void perf_pmu_unregister(struct pmu *pmu) 7563 { 7564 mutex_lock(&pmus_lock); 7565 list_del_rcu(&pmu->entry); 7566 mutex_unlock(&pmus_lock); 7567 7568 /* 7569 * We dereference the pmu list under both SRCU and regular RCU, so 7570 * synchronize against both of those. 7571 */ 7572 synchronize_srcu(&pmus_srcu); 7573 synchronize_rcu(); 7574 7575 free_percpu(pmu->pmu_disable_count); 7576 if (pmu->type >= PERF_TYPE_MAX) 7577 idr_remove(&pmu_idr, pmu->type); 7578 device_del(pmu->dev); 7579 put_device(pmu->dev); 7580 free_pmu_context(pmu); 7581 } 7582 EXPORT_SYMBOL_GPL(perf_pmu_unregister); 7583 7584 static int perf_try_init_event(struct pmu *pmu, struct perf_event *event) 7585 { 7586 struct perf_event_context *ctx = NULL; 7587 int ret; 7588 7589 if (!try_module_get(pmu->module)) 7590 return -ENODEV; 7591 7592 if (event->group_leader != event) { 7593 /* 7594 * This ctx->mutex can nest when we're called through 7595 * inheritance. See the perf_event_ctx_lock_nested() comment. 7596 */ 7597 ctx = perf_event_ctx_lock_nested(event->group_leader, 7598 SINGLE_DEPTH_NESTING); 7599 BUG_ON(!ctx); 7600 } 7601 7602 event->pmu = pmu; 7603 ret = pmu->event_init(event); 7604 7605 if (ctx) 7606 perf_event_ctx_unlock(event->group_leader, ctx); 7607 7608 if (ret) 7609 module_put(pmu->module); 7610 7611 return ret; 7612 } 7613 7614 struct pmu *perf_init_event(struct perf_event *event) 7615 { 7616 struct pmu *pmu = NULL; 7617 int idx; 7618 int ret; 7619 7620 idx = srcu_read_lock(&pmus_srcu); 7621 7622 rcu_read_lock(); 7623 pmu = idr_find(&pmu_idr, event->attr.type); 7624 rcu_read_unlock(); 7625 if (pmu) { 7626 ret = perf_try_init_event(pmu, event); 7627 if (ret) 7628 pmu = ERR_PTR(ret); 7629 goto unlock; 7630 } 7631 7632 list_for_each_entry_rcu(pmu, &pmus, entry) { 7633 ret = perf_try_init_event(pmu, event); 7634 if (!ret) 7635 goto unlock; 7636 7637 if (ret != -ENOENT) { 7638 pmu = ERR_PTR(ret); 7639 goto unlock; 7640 } 7641 } 7642 pmu = ERR_PTR(-ENOENT); 7643 unlock: 7644 srcu_read_unlock(&pmus_srcu, idx); 7645 7646 return pmu; 7647 } 7648 7649 static void account_event_cpu(struct perf_event *event, int cpu) 7650 { 7651 if (event->parent) 7652 return; 7653 7654 if (is_cgroup_event(event)) 7655 atomic_inc(&per_cpu(perf_cgroup_events, cpu)); 7656 } 7657 7658 static void account_event(struct perf_event *event) 7659 { 7660 if (event->parent) 7661 return; 7662 7663 if (event->attach_state & PERF_ATTACH_TASK) 7664 static_key_slow_inc(&perf_sched_events.key); 7665 if (event->attr.mmap || event->attr.mmap_data) 7666 atomic_inc(&nr_mmap_events); 7667 if (event->attr.comm) 7668 atomic_inc(&nr_comm_events); 7669 if (event->attr.task) 7670 atomic_inc(&nr_task_events); 7671 if (event->attr.freq) { 7672 if (atomic_inc_return(&nr_freq_events) == 1) 7673 tick_nohz_full_kick_all(); 7674 } 7675 if (event->attr.context_switch) { 7676 atomic_inc(&nr_switch_events); 7677 static_key_slow_inc(&perf_sched_events.key); 7678 } 7679 if (has_branch_stack(event)) 7680 static_key_slow_inc(&perf_sched_events.key); 7681 if (is_cgroup_event(event)) 7682 static_key_slow_inc(&perf_sched_events.key); 7683 7684 account_event_cpu(event, event->cpu); 7685 } 7686 7687 /* 7688 * Allocate and initialize a event structure 7689 */ 7690 static struct perf_event * 7691 perf_event_alloc(struct perf_event_attr *attr, int cpu, 7692 struct task_struct *task, 7693 struct perf_event *group_leader, 7694 struct perf_event *parent_event, 7695 perf_overflow_handler_t overflow_handler, 7696 void *context, int cgroup_fd) 7697 { 7698 struct pmu *pmu; 7699 struct perf_event *event; 7700 struct hw_perf_event *hwc; 7701 long err = -EINVAL; 7702 7703 if ((unsigned)cpu >= nr_cpu_ids) { 7704 if (!task || cpu != -1) 7705 return ERR_PTR(-EINVAL); 7706 } 7707 7708 event = kzalloc(sizeof(*event), GFP_KERNEL); 7709 if (!event) 7710 return ERR_PTR(-ENOMEM); 7711 7712 /* 7713 * Single events are their own group leaders, with an 7714 * empty sibling list: 7715 */ 7716 if (!group_leader) 7717 group_leader = event; 7718 7719 mutex_init(&event->child_mutex); 7720 INIT_LIST_HEAD(&event->child_list); 7721 7722 INIT_LIST_HEAD(&event->group_entry); 7723 INIT_LIST_HEAD(&event->event_entry); 7724 INIT_LIST_HEAD(&event->sibling_list); 7725 INIT_LIST_HEAD(&event->rb_entry); 7726 INIT_LIST_HEAD(&event->active_entry); 7727 INIT_HLIST_NODE(&event->hlist_entry); 7728 7729 7730 init_waitqueue_head(&event->waitq); 7731 init_irq_work(&event->pending, perf_pending_event); 7732 7733 mutex_init(&event->mmap_mutex); 7734 7735 atomic_long_set(&event->refcount, 1); 7736 event->cpu = cpu; 7737 event->attr = *attr; 7738 event->group_leader = group_leader; 7739 event->pmu = NULL; 7740 event->oncpu = -1; 7741 7742 event->parent = parent_event; 7743 7744 event->ns = get_pid_ns(task_active_pid_ns(current)); 7745 event->id = atomic64_inc_return(&perf_event_id); 7746 7747 event->state = PERF_EVENT_STATE_INACTIVE; 7748 7749 if (task) { 7750 event->attach_state = PERF_ATTACH_TASK; 7751 /* 7752 * XXX pmu::event_init needs to know what task to account to 7753 * and we cannot use the ctx information because we need the 7754 * pmu before we get a ctx. 7755 */ 7756 event->hw.target = task; 7757 } 7758 7759 event->clock = &local_clock; 7760 if (parent_event) 7761 event->clock = parent_event->clock; 7762 7763 if (!overflow_handler && parent_event) { 7764 overflow_handler = parent_event->overflow_handler; 7765 context = parent_event->overflow_handler_context; 7766 } 7767 7768 event->overflow_handler = overflow_handler; 7769 event->overflow_handler_context = context; 7770 7771 perf_event__state_init(event); 7772 7773 pmu = NULL; 7774 7775 hwc = &event->hw; 7776 hwc->sample_period = attr->sample_period; 7777 if (attr->freq && attr->sample_freq) 7778 hwc->sample_period = 1; 7779 hwc->last_period = hwc->sample_period; 7780 7781 local64_set(&hwc->period_left, hwc->sample_period); 7782 7783 /* 7784 * we currently do not support PERF_FORMAT_GROUP on inherited events 7785 */ 7786 if (attr->inherit && (attr->read_format & PERF_FORMAT_GROUP)) 7787 goto err_ns; 7788 7789 if (!has_branch_stack(event)) 7790 event->attr.branch_sample_type = 0; 7791 7792 if (cgroup_fd != -1) { 7793 err = perf_cgroup_connect(cgroup_fd, event, attr, group_leader); 7794 if (err) 7795 goto err_ns; 7796 } 7797 7798 pmu = perf_init_event(event); 7799 if (!pmu) 7800 goto err_ns; 7801 else if (IS_ERR(pmu)) { 7802 err = PTR_ERR(pmu); 7803 goto err_ns; 7804 } 7805 7806 err = exclusive_event_init(event); 7807 if (err) 7808 goto err_pmu; 7809 7810 if (!event->parent) { 7811 if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN) { 7812 err = get_callchain_buffers(); 7813 if (err) 7814 goto err_per_task; 7815 } 7816 } 7817 7818 return event; 7819 7820 err_per_task: 7821 exclusive_event_destroy(event); 7822 7823 err_pmu: 7824 if (event->destroy) 7825 event->destroy(event); 7826 module_put(pmu->module); 7827 err_ns: 7828 if (is_cgroup_event(event)) 7829 perf_detach_cgroup(event); 7830 if (event->ns) 7831 put_pid_ns(event->ns); 7832 kfree(event); 7833 7834 return ERR_PTR(err); 7835 } 7836 7837 static int perf_copy_attr(struct perf_event_attr __user *uattr, 7838 struct perf_event_attr *attr) 7839 { 7840 u32 size; 7841 int ret; 7842 7843 if (!access_ok(VERIFY_WRITE, uattr, PERF_ATTR_SIZE_VER0)) 7844 return -EFAULT; 7845 7846 /* 7847 * zero the full structure, so that a short copy will be nice. 7848 */ 7849 memset(attr, 0, sizeof(*attr)); 7850 7851 ret = get_user(size, &uattr->size); 7852 if (ret) 7853 return ret; 7854 7855 if (size > PAGE_SIZE) /* silly large */ 7856 goto err_size; 7857 7858 if (!size) /* abi compat */ 7859 size = PERF_ATTR_SIZE_VER0; 7860 7861 if (size < PERF_ATTR_SIZE_VER0) 7862 goto err_size; 7863 7864 /* 7865 * If we're handed a bigger struct than we know of, 7866 * ensure all the unknown bits are 0 - i.e. new 7867 * user-space does not rely on any kernel feature 7868 * extensions we dont know about yet. 7869 */ 7870 if (size > sizeof(*attr)) { 7871 unsigned char __user *addr; 7872 unsigned char __user *end; 7873 unsigned char val; 7874 7875 addr = (void __user *)uattr + sizeof(*attr); 7876 end = (void __user *)uattr + size; 7877 7878 for (; addr < end; addr++) { 7879 ret = get_user(val, addr); 7880 if (ret) 7881 return ret; 7882 if (val) 7883 goto err_size; 7884 } 7885 size = sizeof(*attr); 7886 } 7887 7888 ret = copy_from_user(attr, uattr, size); 7889 if (ret) 7890 return -EFAULT; 7891 7892 if (attr->__reserved_1) 7893 return -EINVAL; 7894 7895 if (attr->sample_type & ~(PERF_SAMPLE_MAX-1)) 7896 return -EINVAL; 7897 7898 if (attr->read_format & ~(PERF_FORMAT_MAX-1)) 7899 return -EINVAL; 7900 7901 if (attr->sample_type & PERF_SAMPLE_BRANCH_STACK) { 7902 u64 mask = attr->branch_sample_type; 7903 7904 /* only using defined bits */ 7905 if (mask & ~(PERF_SAMPLE_BRANCH_MAX-1)) 7906 return -EINVAL; 7907 7908 /* at least one branch bit must be set */ 7909 if (!(mask & ~PERF_SAMPLE_BRANCH_PLM_ALL)) 7910 return -EINVAL; 7911 7912 /* propagate priv level, when not set for branch */ 7913 if (!(mask & PERF_SAMPLE_BRANCH_PLM_ALL)) { 7914 7915 /* exclude_kernel checked on syscall entry */ 7916 if (!attr->exclude_kernel) 7917 mask |= PERF_SAMPLE_BRANCH_KERNEL; 7918 7919 if (!attr->exclude_user) 7920 mask |= PERF_SAMPLE_BRANCH_USER; 7921 7922 if (!attr->exclude_hv) 7923 mask |= PERF_SAMPLE_BRANCH_HV; 7924 /* 7925 * adjust user setting (for HW filter setup) 7926 */ 7927 attr->branch_sample_type = mask; 7928 } 7929 /* privileged levels capture (kernel, hv): check permissions */ 7930 if ((mask & PERF_SAMPLE_BRANCH_PERM_PLM) 7931 && perf_paranoid_kernel() && !capable(CAP_SYS_ADMIN)) 7932 return -EACCES; 7933 } 7934 7935 if (attr->sample_type & PERF_SAMPLE_REGS_USER) { 7936 ret = perf_reg_validate(attr->sample_regs_user); 7937 if (ret) 7938 return ret; 7939 } 7940 7941 if (attr->sample_type & PERF_SAMPLE_STACK_USER) { 7942 if (!arch_perf_have_user_stack_dump()) 7943 return -ENOSYS; 7944 7945 /* 7946 * We have __u32 type for the size, but so far 7947 * we can only use __u16 as maximum due to the 7948 * __u16 sample size limit. 7949 */ 7950 if (attr->sample_stack_user >= USHRT_MAX) 7951 ret = -EINVAL; 7952 else if (!IS_ALIGNED(attr->sample_stack_user, sizeof(u64))) 7953 ret = -EINVAL; 7954 } 7955 7956 if (attr->sample_type & PERF_SAMPLE_REGS_INTR) 7957 ret = perf_reg_validate(attr->sample_regs_intr); 7958 out: 7959 return ret; 7960 7961 err_size: 7962 put_user(sizeof(*attr), &uattr->size); 7963 ret = -E2BIG; 7964 goto out; 7965 } 7966 7967 static int 7968 perf_event_set_output(struct perf_event *event, struct perf_event *output_event) 7969 { 7970 struct ring_buffer *rb = NULL; 7971 int ret = -EINVAL; 7972 7973 if (!output_event) 7974 goto set; 7975 7976 /* don't allow circular references */ 7977 if (event == output_event) 7978 goto out; 7979 7980 /* 7981 * Don't allow cross-cpu buffers 7982 */ 7983 if (output_event->cpu != event->cpu) 7984 goto out; 7985 7986 /* 7987 * If its not a per-cpu rb, it must be the same task. 7988 */ 7989 if (output_event->cpu == -1 && output_event->ctx != event->ctx) 7990 goto out; 7991 7992 /* 7993 * Mixing clocks in the same buffer is trouble you don't need. 7994 */ 7995 if (output_event->clock != event->clock) 7996 goto out; 7997 7998 /* 7999 * If both events generate aux data, they must be on the same PMU 8000 */ 8001 if (has_aux(event) && has_aux(output_event) && 8002 event->pmu != output_event->pmu) 8003 goto out; 8004 8005 set: 8006 mutex_lock(&event->mmap_mutex); 8007 /* Can't redirect output if we've got an active mmap() */ 8008 if (atomic_read(&event->mmap_count)) 8009 goto unlock; 8010 8011 if (output_event) { 8012 /* get the rb we want to redirect to */ 8013 rb = ring_buffer_get(output_event); 8014 if (!rb) 8015 goto unlock; 8016 } 8017 8018 ring_buffer_attach(event, rb); 8019 8020 ret = 0; 8021 unlock: 8022 mutex_unlock(&event->mmap_mutex); 8023 8024 out: 8025 return ret; 8026 } 8027 8028 static void mutex_lock_double(struct mutex *a, struct mutex *b) 8029 { 8030 if (b < a) 8031 swap(a, b); 8032 8033 mutex_lock(a); 8034 mutex_lock_nested(b, SINGLE_DEPTH_NESTING); 8035 } 8036 8037 static int perf_event_set_clock(struct perf_event *event, clockid_t clk_id) 8038 { 8039 bool nmi_safe = false; 8040 8041 switch (clk_id) { 8042 case CLOCK_MONOTONIC: 8043 event->clock = &ktime_get_mono_fast_ns; 8044 nmi_safe = true; 8045 break; 8046 8047 case CLOCK_MONOTONIC_RAW: 8048 event->clock = &ktime_get_raw_fast_ns; 8049 nmi_safe = true; 8050 break; 8051 8052 case CLOCK_REALTIME: 8053 event->clock = &ktime_get_real_ns; 8054 break; 8055 8056 case CLOCK_BOOTTIME: 8057 event->clock = &ktime_get_boot_ns; 8058 break; 8059 8060 case CLOCK_TAI: 8061 event->clock = &ktime_get_tai_ns; 8062 break; 8063 8064 default: 8065 return -EINVAL; 8066 } 8067 8068 if (!nmi_safe && !(event->pmu->capabilities & PERF_PMU_CAP_NO_NMI)) 8069 return -EINVAL; 8070 8071 return 0; 8072 } 8073 8074 /** 8075 * sys_perf_event_open - open a performance event, associate it to a task/cpu 8076 * 8077 * @attr_uptr: event_id type attributes for monitoring/sampling 8078 * @pid: target pid 8079 * @cpu: target cpu 8080 * @group_fd: group leader event fd 8081 */ 8082 SYSCALL_DEFINE5(perf_event_open, 8083 struct perf_event_attr __user *, attr_uptr, 8084 pid_t, pid, int, cpu, int, group_fd, unsigned long, flags) 8085 { 8086 struct perf_event *group_leader = NULL, *output_event = NULL; 8087 struct perf_event *event, *sibling; 8088 struct perf_event_attr attr; 8089 struct perf_event_context *ctx, *uninitialized_var(gctx); 8090 struct file *event_file = NULL; 8091 struct fd group = {NULL, 0}; 8092 struct task_struct *task = NULL; 8093 struct pmu *pmu; 8094 int event_fd; 8095 int move_group = 0; 8096 int err; 8097 int f_flags = O_RDWR; 8098 int cgroup_fd = -1; 8099 8100 /* for future expandability... */ 8101 if (flags & ~PERF_FLAG_ALL) 8102 return -EINVAL; 8103 8104 err = perf_copy_attr(attr_uptr, &attr); 8105 if (err) 8106 return err; 8107 8108 if (!attr.exclude_kernel) { 8109 if (perf_paranoid_kernel() && !capable(CAP_SYS_ADMIN)) 8110 return -EACCES; 8111 } 8112 8113 if (attr.freq) { 8114 if (attr.sample_freq > sysctl_perf_event_sample_rate) 8115 return -EINVAL; 8116 } else { 8117 if (attr.sample_period & (1ULL << 63)) 8118 return -EINVAL; 8119 } 8120 8121 /* 8122 * In cgroup mode, the pid argument is used to pass the fd 8123 * opened to the cgroup directory in cgroupfs. The cpu argument 8124 * designates the cpu on which to monitor threads from that 8125 * cgroup. 8126 */ 8127 if ((flags & PERF_FLAG_PID_CGROUP) && (pid == -1 || cpu == -1)) 8128 return -EINVAL; 8129 8130 if (flags & PERF_FLAG_FD_CLOEXEC) 8131 f_flags |= O_CLOEXEC; 8132 8133 event_fd = get_unused_fd_flags(f_flags); 8134 if (event_fd < 0) 8135 return event_fd; 8136 8137 if (group_fd != -1) { 8138 err = perf_fget_light(group_fd, &group); 8139 if (err) 8140 goto err_fd; 8141 group_leader = group.file->private_data; 8142 if (flags & PERF_FLAG_FD_OUTPUT) 8143 output_event = group_leader; 8144 if (flags & PERF_FLAG_FD_NO_GROUP) 8145 group_leader = NULL; 8146 } 8147 8148 if (pid != -1 && !(flags & PERF_FLAG_PID_CGROUP)) { 8149 task = find_lively_task_by_vpid(pid); 8150 if (IS_ERR(task)) { 8151 err = PTR_ERR(task); 8152 goto err_group_fd; 8153 } 8154 } 8155 8156 if (task && group_leader && 8157 group_leader->attr.inherit != attr.inherit) { 8158 err = -EINVAL; 8159 goto err_task; 8160 } 8161 8162 get_online_cpus(); 8163 8164 if (flags & PERF_FLAG_PID_CGROUP) 8165 cgroup_fd = pid; 8166 8167 event = perf_event_alloc(&attr, cpu, task, group_leader, NULL, 8168 NULL, NULL, cgroup_fd); 8169 if (IS_ERR(event)) { 8170 err = PTR_ERR(event); 8171 goto err_cpus; 8172 } 8173 8174 if (is_sampling_event(event)) { 8175 if (event->pmu->capabilities & PERF_PMU_CAP_NO_INTERRUPT) { 8176 err = -ENOTSUPP; 8177 goto err_alloc; 8178 } 8179 } 8180 8181 account_event(event); 8182 8183 /* 8184 * Special case software events and allow them to be part of 8185 * any hardware group. 8186 */ 8187 pmu = event->pmu; 8188 8189 if (attr.use_clockid) { 8190 err = perf_event_set_clock(event, attr.clockid); 8191 if (err) 8192 goto err_alloc; 8193 } 8194 8195 if (group_leader && 8196 (is_software_event(event) != is_software_event(group_leader))) { 8197 if (is_software_event(event)) { 8198 /* 8199 * If event and group_leader are not both a software 8200 * event, and event is, then group leader is not. 8201 * 8202 * Allow the addition of software events to !software 8203 * groups, this is safe because software events never 8204 * fail to schedule. 8205 */ 8206 pmu = group_leader->pmu; 8207 } else if (is_software_event(group_leader) && 8208 (group_leader->group_flags & PERF_GROUP_SOFTWARE)) { 8209 /* 8210 * In case the group is a pure software group, and we 8211 * try to add a hardware event, move the whole group to 8212 * the hardware context. 8213 */ 8214 move_group = 1; 8215 } 8216 } 8217 8218 /* 8219 * Get the target context (task or percpu): 8220 */ 8221 ctx = find_get_context(pmu, task, event); 8222 if (IS_ERR(ctx)) { 8223 err = PTR_ERR(ctx); 8224 goto err_alloc; 8225 } 8226 8227 if ((pmu->capabilities & PERF_PMU_CAP_EXCLUSIVE) && group_leader) { 8228 err = -EBUSY; 8229 goto err_context; 8230 } 8231 8232 if (task) { 8233 put_task_struct(task); 8234 task = NULL; 8235 } 8236 8237 /* 8238 * Look up the group leader (we will attach this event to it): 8239 */ 8240 if (group_leader) { 8241 err = -EINVAL; 8242 8243 /* 8244 * Do not allow a recursive hierarchy (this new sibling 8245 * becoming part of another group-sibling): 8246 */ 8247 if (group_leader->group_leader != group_leader) 8248 goto err_context; 8249 8250 /* All events in a group should have the same clock */ 8251 if (group_leader->clock != event->clock) 8252 goto err_context; 8253 8254 /* 8255 * Do not allow to attach to a group in a different 8256 * task or CPU context: 8257 */ 8258 if (move_group) { 8259 /* 8260 * Make sure we're both on the same task, or both 8261 * per-cpu events. 8262 */ 8263 if (group_leader->ctx->task != ctx->task) 8264 goto err_context; 8265 8266 /* 8267 * Make sure we're both events for the same CPU; 8268 * grouping events for different CPUs is broken; since 8269 * you can never concurrently schedule them anyhow. 8270 */ 8271 if (group_leader->cpu != event->cpu) 8272 goto err_context; 8273 } else { 8274 if (group_leader->ctx != ctx) 8275 goto err_context; 8276 } 8277 8278 /* 8279 * Only a group leader can be exclusive or pinned 8280 */ 8281 if (attr.exclusive || attr.pinned) 8282 goto err_context; 8283 } 8284 8285 if (output_event) { 8286 err = perf_event_set_output(event, output_event); 8287 if (err) 8288 goto err_context; 8289 } 8290 8291 event_file = anon_inode_getfile("[perf_event]", &perf_fops, event, 8292 f_flags); 8293 if (IS_ERR(event_file)) { 8294 err = PTR_ERR(event_file); 8295 goto err_context; 8296 } 8297 8298 if (move_group) { 8299 gctx = group_leader->ctx; 8300 8301 /* 8302 * See perf_event_ctx_lock() for comments on the details 8303 * of swizzling perf_event::ctx. 8304 */ 8305 mutex_lock_double(&gctx->mutex, &ctx->mutex); 8306 8307 perf_remove_from_context(group_leader, false); 8308 8309 list_for_each_entry(sibling, &group_leader->sibling_list, 8310 group_entry) { 8311 perf_remove_from_context(sibling, false); 8312 put_ctx(gctx); 8313 } 8314 } else { 8315 mutex_lock(&ctx->mutex); 8316 } 8317 8318 WARN_ON_ONCE(ctx->parent_ctx); 8319 8320 if (move_group) { 8321 /* 8322 * Wait for everybody to stop referencing the events through 8323 * the old lists, before installing it on new lists. 8324 */ 8325 synchronize_rcu(); 8326 8327 /* 8328 * Install the group siblings before the group leader. 8329 * 8330 * Because a group leader will try and install the entire group 8331 * (through the sibling list, which is still in-tact), we can 8332 * end up with siblings installed in the wrong context. 8333 * 8334 * By installing siblings first we NO-OP because they're not 8335 * reachable through the group lists. 8336 */ 8337 list_for_each_entry(sibling, &group_leader->sibling_list, 8338 group_entry) { 8339 perf_event__state_init(sibling); 8340 perf_install_in_context(ctx, sibling, sibling->cpu); 8341 get_ctx(ctx); 8342 } 8343 8344 /* 8345 * Removing from the context ends up with disabled 8346 * event. What we want here is event in the initial 8347 * startup state, ready to be add into new context. 8348 */ 8349 perf_event__state_init(group_leader); 8350 perf_install_in_context(ctx, group_leader, group_leader->cpu); 8351 get_ctx(ctx); 8352 } 8353 8354 if (!exclusive_event_installable(event, ctx)) { 8355 err = -EBUSY; 8356 mutex_unlock(&ctx->mutex); 8357 fput(event_file); 8358 goto err_context; 8359 } 8360 8361 perf_install_in_context(ctx, event, event->cpu); 8362 perf_unpin_context(ctx); 8363 8364 if (move_group) { 8365 mutex_unlock(&gctx->mutex); 8366 put_ctx(gctx); 8367 } 8368 mutex_unlock(&ctx->mutex); 8369 8370 put_online_cpus(); 8371 8372 event->owner = current; 8373 8374 mutex_lock(¤t->perf_event_mutex); 8375 list_add_tail(&event->owner_entry, ¤t->perf_event_list); 8376 mutex_unlock(¤t->perf_event_mutex); 8377 8378 /* 8379 * Precalculate sample_data sizes 8380 */ 8381 perf_event__header_size(event); 8382 perf_event__id_header_size(event); 8383 8384 /* 8385 * Drop the reference on the group_event after placing the 8386 * new event on the sibling_list. This ensures destruction 8387 * of the group leader will find the pointer to itself in 8388 * perf_group_detach(). 8389 */ 8390 fdput(group); 8391 fd_install(event_fd, event_file); 8392 return event_fd; 8393 8394 err_context: 8395 perf_unpin_context(ctx); 8396 put_ctx(ctx); 8397 err_alloc: 8398 free_event(event); 8399 err_cpus: 8400 put_online_cpus(); 8401 err_task: 8402 if (task) 8403 put_task_struct(task); 8404 err_group_fd: 8405 fdput(group); 8406 err_fd: 8407 put_unused_fd(event_fd); 8408 return err; 8409 } 8410 8411 /** 8412 * perf_event_create_kernel_counter 8413 * 8414 * @attr: attributes of the counter to create 8415 * @cpu: cpu in which the counter is bound 8416 * @task: task to profile (NULL for percpu) 8417 */ 8418 struct perf_event * 8419 perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu, 8420 struct task_struct *task, 8421 perf_overflow_handler_t overflow_handler, 8422 void *context) 8423 { 8424 struct perf_event_context *ctx; 8425 struct perf_event *event; 8426 int err; 8427 8428 /* 8429 * Get the target context (task or percpu): 8430 */ 8431 8432 event = perf_event_alloc(attr, cpu, task, NULL, NULL, 8433 overflow_handler, context, -1); 8434 if (IS_ERR(event)) { 8435 err = PTR_ERR(event); 8436 goto err; 8437 } 8438 8439 /* Mark owner so we could distinguish it from user events. */ 8440 event->owner = EVENT_OWNER_KERNEL; 8441 8442 account_event(event); 8443 8444 ctx = find_get_context(event->pmu, task, event); 8445 if (IS_ERR(ctx)) { 8446 err = PTR_ERR(ctx); 8447 goto err_free; 8448 } 8449 8450 WARN_ON_ONCE(ctx->parent_ctx); 8451 mutex_lock(&ctx->mutex); 8452 if (!exclusive_event_installable(event, ctx)) { 8453 mutex_unlock(&ctx->mutex); 8454 perf_unpin_context(ctx); 8455 put_ctx(ctx); 8456 err = -EBUSY; 8457 goto err_free; 8458 } 8459 8460 perf_install_in_context(ctx, event, cpu); 8461 perf_unpin_context(ctx); 8462 mutex_unlock(&ctx->mutex); 8463 8464 return event; 8465 8466 err_free: 8467 free_event(event); 8468 err: 8469 return ERR_PTR(err); 8470 } 8471 EXPORT_SYMBOL_GPL(perf_event_create_kernel_counter); 8472 8473 void perf_pmu_migrate_context(struct pmu *pmu, int src_cpu, int dst_cpu) 8474 { 8475 struct perf_event_context *src_ctx; 8476 struct perf_event_context *dst_ctx; 8477 struct perf_event *event, *tmp; 8478 LIST_HEAD(events); 8479 8480 src_ctx = &per_cpu_ptr(pmu->pmu_cpu_context, src_cpu)->ctx; 8481 dst_ctx = &per_cpu_ptr(pmu->pmu_cpu_context, dst_cpu)->ctx; 8482 8483 /* 8484 * See perf_event_ctx_lock() for comments on the details 8485 * of swizzling perf_event::ctx. 8486 */ 8487 mutex_lock_double(&src_ctx->mutex, &dst_ctx->mutex); 8488 list_for_each_entry_safe(event, tmp, &src_ctx->event_list, 8489 event_entry) { 8490 perf_remove_from_context(event, false); 8491 unaccount_event_cpu(event, src_cpu); 8492 put_ctx(src_ctx); 8493 list_add(&event->migrate_entry, &events); 8494 } 8495 8496 /* 8497 * Wait for the events to quiesce before re-instating them. 8498 */ 8499 synchronize_rcu(); 8500 8501 /* 8502 * Re-instate events in 2 passes. 8503 * 8504 * Skip over group leaders and only install siblings on this first 8505 * pass, siblings will not get enabled without a leader, however a 8506 * leader will enable its siblings, even if those are still on the old 8507 * context. 8508 */ 8509 list_for_each_entry_safe(event, tmp, &events, migrate_entry) { 8510 if (event->group_leader == event) 8511 continue; 8512 8513 list_del(&event->migrate_entry); 8514 if (event->state >= PERF_EVENT_STATE_OFF) 8515 event->state = PERF_EVENT_STATE_INACTIVE; 8516 account_event_cpu(event, dst_cpu); 8517 perf_install_in_context(dst_ctx, event, dst_cpu); 8518 get_ctx(dst_ctx); 8519 } 8520 8521 /* 8522 * Once all the siblings are setup properly, install the group leaders 8523 * to make it go. 8524 */ 8525 list_for_each_entry_safe(event, tmp, &events, migrate_entry) { 8526 list_del(&event->migrate_entry); 8527 if (event->state >= PERF_EVENT_STATE_OFF) 8528 event->state = PERF_EVENT_STATE_INACTIVE; 8529 account_event_cpu(event, dst_cpu); 8530 perf_install_in_context(dst_ctx, event, dst_cpu); 8531 get_ctx(dst_ctx); 8532 } 8533 mutex_unlock(&dst_ctx->mutex); 8534 mutex_unlock(&src_ctx->mutex); 8535 } 8536 EXPORT_SYMBOL_GPL(perf_pmu_migrate_context); 8537 8538 static void sync_child_event(struct perf_event *child_event, 8539 struct task_struct *child) 8540 { 8541 struct perf_event *parent_event = child_event->parent; 8542 u64 child_val; 8543 8544 if (child_event->attr.inherit_stat) 8545 perf_event_read_event(child_event, child); 8546 8547 child_val = perf_event_count(child_event); 8548 8549 /* 8550 * Add back the child's count to the parent's count: 8551 */ 8552 atomic64_add(child_val, &parent_event->child_count); 8553 atomic64_add(child_event->total_time_enabled, 8554 &parent_event->child_total_time_enabled); 8555 atomic64_add(child_event->total_time_running, 8556 &parent_event->child_total_time_running); 8557 8558 /* 8559 * Remove this event from the parent's list 8560 */ 8561 WARN_ON_ONCE(parent_event->ctx->parent_ctx); 8562 mutex_lock(&parent_event->child_mutex); 8563 list_del_init(&child_event->child_list); 8564 mutex_unlock(&parent_event->child_mutex); 8565 8566 /* 8567 * Make sure user/parent get notified, that we just 8568 * lost one event. 8569 */ 8570 perf_event_wakeup(parent_event); 8571 8572 /* 8573 * Release the parent event, if this was the last 8574 * reference to it. 8575 */ 8576 put_event(parent_event); 8577 } 8578 8579 static void 8580 __perf_event_exit_task(struct perf_event *child_event, 8581 struct perf_event_context *child_ctx, 8582 struct task_struct *child) 8583 { 8584 /* 8585 * Do not destroy the 'original' grouping; because of the context 8586 * switch optimization the original events could've ended up in a 8587 * random child task. 8588 * 8589 * If we were to destroy the original group, all group related 8590 * operations would cease to function properly after this random 8591 * child dies. 8592 * 8593 * Do destroy all inherited groups, we don't care about those 8594 * and being thorough is better. 8595 */ 8596 perf_remove_from_context(child_event, !!child_event->parent); 8597 8598 /* 8599 * It can happen that the parent exits first, and has events 8600 * that are still around due to the child reference. These 8601 * events need to be zapped. 8602 */ 8603 if (child_event->parent) { 8604 sync_child_event(child_event, child); 8605 free_event(child_event); 8606 } else { 8607 child_event->state = PERF_EVENT_STATE_EXIT; 8608 perf_event_wakeup(child_event); 8609 } 8610 } 8611 8612 static void perf_event_exit_task_context(struct task_struct *child, int ctxn) 8613 { 8614 struct perf_event *child_event, *next; 8615 struct perf_event_context *child_ctx, *clone_ctx = NULL; 8616 unsigned long flags; 8617 8618 if (likely(!child->perf_event_ctxp[ctxn])) { 8619 perf_event_task(child, NULL, 0); 8620 return; 8621 } 8622 8623 local_irq_save(flags); 8624 /* 8625 * We can't reschedule here because interrupts are disabled, 8626 * and either child is current or it is a task that can't be 8627 * scheduled, so we are now safe from rescheduling changing 8628 * our context. 8629 */ 8630 child_ctx = rcu_dereference_raw(child->perf_event_ctxp[ctxn]); 8631 8632 /* 8633 * Take the context lock here so that if find_get_context is 8634 * reading child->perf_event_ctxp, we wait until it has 8635 * incremented the context's refcount before we do put_ctx below. 8636 */ 8637 raw_spin_lock(&child_ctx->lock); 8638 task_ctx_sched_out(child_ctx); 8639 child->perf_event_ctxp[ctxn] = NULL; 8640 8641 /* 8642 * If this context is a clone; unclone it so it can't get 8643 * swapped to another process while we're removing all 8644 * the events from it. 8645 */ 8646 clone_ctx = unclone_ctx(child_ctx); 8647 update_context_time(child_ctx); 8648 raw_spin_unlock_irqrestore(&child_ctx->lock, flags); 8649 8650 if (clone_ctx) 8651 put_ctx(clone_ctx); 8652 8653 /* 8654 * Report the task dead after unscheduling the events so that we 8655 * won't get any samples after PERF_RECORD_EXIT. We can however still 8656 * get a few PERF_RECORD_READ events. 8657 */ 8658 perf_event_task(child, child_ctx, 0); 8659 8660 /* 8661 * We can recurse on the same lock type through: 8662 * 8663 * __perf_event_exit_task() 8664 * sync_child_event() 8665 * put_event() 8666 * mutex_lock(&ctx->mutex) 8667 * 8668 * But since its the parent context it won't be the same instance. 8669 */ 8670 mutex_lock(&child_ctx->mutex); 8671 8672 list_for_each_entry_safe(child_event, next, &child_ctx->event_list, event_entry) 8673 __perf_event_exit_task(child_event, child_ctx, child); 8674 8675 mutex_unlock(&child_ctx->mutex); 8676 8677 put_ctx(child_ctx); 8678 } 8679 8680 /* 8681 * When a child task exits, feed back event values to parent events. 8682 */ 8683 void perf_event_exit_task(struct task_struct *child) 8684 { 8685 struct perf_event *event, *tmp; 8686 int ctxn; 8687 8688 mutex_lock(&child->perf_event_mutex); 8689 list_for_each_entry_safe(event, tmp, &child->perf_event_list, 8690 owner_entry) { 8691 list_del_init(&event->owner_entry); 8692 8693 /* 8694 * Ensure the list deletion is visible before we clear 8695 * the owner, closes a race against perf_release() where 8696 * we need to serialize on the owner->perf_event_mutex. 8697 */ 8698 smp_wmb(); 8699 event->owner = NULL; 8700 } 8701 mutex_unlock(&child->perf_event_mutex); 8702 8703 for_each_task_context_nr(ctxn) 8704 perf_event_exit_task_context(child, ctxn); 8705 } 8706 8707 static void perf_free_event(struct perf_event *event, 8708 struct perf_event_context *ctx) 8709 { 8710 struct perf_event *parent = event->parent; 8711 8712 if (WARN_ON_ONCE(!parent)) 8713 return; 8714 8715 mutex_lock(&parent->child_mutex); 8716 list_del_init(&event->child_list); 8717 mutex_unlock(&parent->child_mutex); 8718 8719 put_event(parent); 8720 8721 raw_spin_lock_irq(&ctx->lock); 8722 perf_group_detach(event); 8723 list_del_event(event, ctx); 8724 raw_spin_unlock_irq(&ctx->lock); 8725 free_event(event); 8726 } 8727 8728 /* 8729 * Free an unexposed, unused context as created by inheritance by 8730 * perf_event_init_task below, used by fork() in case of fail. 8731 * 8732 * Not all locks are strictly required, but take them anyway to be nice and 8733 * help out with the lockdep assertions. 8734 */ 8735 void perf_event_free_task(struct task_struct *task) 8736 { 8737 struct perf_event_context *ctx; 8738 struct perf_event *event, *tmp; 8739 int ctxn; 8740 8741 for_each_task_context_nr(ctxn) { 8742 ctx = task->perf_event_ctxp[ctxn]; 8743 if (!ctx) 8744 continue; 8745 8746 mutex_lock(&ctx->mutex); 8747 again: 8748 list_for_each_entry_safe(event, tmp, &ctx->pinned_groups, 8749 group_entry) 8750 perf_free_event(event, ctx); 8751 8752 list_for_each_entry_safe(event, tmp, &ctx->flexible_groups, 8753 group_entry) 8754 perf_free_event(event, ctx); 8755 8756 if (!list_empty(&ctx->pinned_groups) || 8757 !list_empty(&ctx->flexible_groups)) 8758 goto again; 8759 8760 mutex_unlock(&ctx->mutex); 8761 8762 put_ctx(ctx); 8763 } 8764 } 8765 8766 void perf_event_delayed_put(struct task_struct *task) 8767 { 8768 int ctxn; 8769 8770 for_each_task_context_nr(ctxn) 8771 WARN_ON_ONCE(task->perf_event_ctxp[ctxn]); 8772 } 8773 8774 struct perf_event *perf_event_get(unsigned int fd) 8775 { 8776 int err; 8777 struct fd f; 8778 struct perf_event *event; 8779 8780 err = perf_fget_light(fd, &f); 8781 if (err) 8782 return ERR_PTR(err); 8783 8784 event = f.file->private_data; 8785 atomic_long_inc(&event->refcount); 8786 fdput(f); 8787 8788 return event; 8789 } 8790 8791 const struct perf_event_attr *perf_event_attrs(struct perf_event *event) 8792 { 8793 if (!event) 8794 return ERR_PTR(-EINVAL); 8795 8796 return &event->attr; 8797 } 8798 8799 /* 8800 * inherit a event from parent task to child task: 8801 */ 8802 static struct perf_event * 8803 inherit_event(struct perf_event *parent_event, 8804 struct task_struct *parent, 8805 struct perf_event_context *parent_ctx, 8806 struct task_struct *child, 8807 struct perf_event *group_leader, 8808 struct perf_event_context *child_ctx) 8809 { 8810 enum perf_event_active_state parent_state = parent_event->state; 8811 struct perf_event *child_event; 8812 unsigned long flags; 8813 8814 /* 8815 * Instead of creating recursive hierarchies of events, 8816 * we link inherited events back to the original parent, 8817 * which has a filp for sure, which we use as the reference 8818 * count: 8819 */ 8820 if (parent_event->parent) 8821 parent_event = parent_event->parent; 8822 8823 child_event = perf_event_alloc(&parent_event->attr, 8824 parent_event->cpu, 8825 child, 8826 group_leader, parent_event, 8827 NULL, NULL, -1); 8828 if (IS_ERR(child_event)) 8829 return child_event; 8830 8831 if (is_orphaned_event(parent_event) || 8832 !atomic_long_inc_not_zero(&parent_event->refcount)) { 8833 free_event(child_event); 8834 return NULL; 8835 } 8836 8837 get_ctx(child_ctx); 8838 8839 /* 8840 * Make the child state follow the state of the parent event, 8841 * not its attr.disabled bit. We hold the parent's mutex, 8842 * so we won't race with perf_event_{en, dis}able_family. 8843 */ 8844 if (parent_state >= PERF_EVENT_STATE_INACTIVE) 8845 child_event->state = PERF_EVENT_STATE_INACTIVE; 8846 else 8847 child_event->state = PERF_EVENT_STATE_OFF; 8848 8849 if (parent_event->attr.freq) { 8850 u64 sample_period = parent_event->hw.sample_period; 8851 struct hw_perf_event *hwc = &child_event->hw; 8852 8853 hwc->sample_period = sample_period; 8854 hwc->last_period = sample_period; 8855 8856 local64_set(&hwc->period_left, sample_period); 8857 } 8858 8859 child_event->ctx = child_ctx; 8860 child_event->overflow_handler = parent_event->overflow_handler; 8861 child_event->overflow_handler_context 8862 = parent_event->overflow_handler_context; 8863 8864 /* 8865 * Precalculate sample_data sizes 8866 */ 8867 perf_event__header_size(child_event); 8868 perf_event__id_header_size(child_event); 8869 8870 /* 8871 * Link it up in the child's context: 8872 */ 8873 raw_spin_lock_irqsave(&child_ctx->lock, flags); 8874 add_event_to_ctx(child_event, child_ctx); 8875 raw_spin_unlock_irqrestore(&child_ctx->lock, flags); 8876 8877 /* 8878 * Link this into the parent event's child list 8879 */ 8880 WARN_ON_ONCE(parent_event->ctx->parent_ctx); 8881 mutex_lock(&parent_event->child_mutex); 8882 list_add_tail(&child_event->child_list, &parent_event->child_list); 8883 mutex_unlock(&parent_event->child_mutex); 8884 8885 return child_event; 8886 } 8887 8888 static int inherit_group(struct perf_event *parent_event, 8889 struct task_struct *parent, 8890 struct perf_event_context *parent_ctx, 8891 struct task_struct *child, 8892 struct perf_event_context *child_ctx) 8893 { 8894 struct perf_event *leader; 8895 struct perf_event *sub; 8896 struct perf_event *child_ctr; 8897 8898 leader = inherit_event(parent_event, parent, parent_ctx, 8899 child, NULL, child_ctx); 8900 if (IS_ERR(leader)) 8901 return PTR_ERR(leader); 8902 list_for_each_entry(sub, &parent_event->sibling_list, group_entry) { 8903 child_ctr = inherit_event(sub, parent, parent_ctx, 8904 child, leader, child_ctx); 8905 if (IS_ERR(child_ctr)) 8906 return PTR_ERR(child_ctr); 8907 } 8908 return 0; 8909 } 8910 8911 static int 8912 inherit_task_group(struct perf_event *event, struct task_struct *parent, 8913 struct perf_event_context *parent_ctx, 8914 struct task_struct *child, int ctxn, 8915 int *inherited_all) 8916 { 8917 int ret; 8918 struct perf_event_context *child_ctx; 8919 8920 if (!event->attr.inherit) { 8921 *inherited_all = 0; 8922 return 0; 8923 } 8924 8925 child_ctx = child->perf_event_ctxp[ctxn]; 8926 if (!child_ctx) { 8927 /* 8928 * This is executed from the parent task context, so 8929 * inherit events that have been marked for cloning. 8930 * First allocate and initialize a context for the 8931 * child. 8932 */ 8933 8934 child_ctx = alloc_perf_context(parent_ctx->pmu, child); 8935 if (!child_ctx) 8936 return -ENOMEM; 8937 8938 child->perf_event_ctxp[ctxn] = child_ctx; 8939 } 8940 8941 ret = inherit_group(event, parent, parent_ctx, 8942 child, child_ctx); 8943 8944 if (ret) 8945 *inherited_all = 0; 8946 8947 return ret; 8948 } 8949 8950 /* 8951 * Initialize the perf_event context in task_struct 8952 */ 8953 static int perf_event_init_context(struct task_struct *child, int ctxn) 8954 { 8955 struct perf_event_context *child_ctx, *parent_ctx; 8956 struct perf_event_context *cloned_ctx; 8957 struct perf_event *event; 8958 struct task_struct *parent = current; 8959 int inherited_all = 1; 8960 unsigned long flags; 8961 int ret = 0; 8962 8963 if (likely(!parent->perf_event_ctxp[ctxn])) 8964 return 0; 8965 8966 /* 8967 * If the parent's context is a clone, pin it so it won't get 8968 * swapped under us. 8969 */ 8970 parent_ctx = perf_pin_task_context(parent, ctxn); 8971 if (!parent_ctx) 8972 return 0; 8973 8974 /* 8975 * No need to check if parent_ctx != NULL here; since we saw 8976 * it non-NULL earlier, the only reason for it to become NULL 8977 * is if we exit, and since we're currently in the middle of 8978 * a fork we can't be exiting at the same time. 8979 */ 8980 8981 /* 8982 * Lock the parent list. No need to lock the child - not PID 8983 * hashed yet and not running, so nobody can access it. 8984 */ 8985 mutex_lock(&parent_ctx->mutex); 8986 8987 /* 8988 * We dont have to disable NMIs - we are only looking at 8989 * the list, not manipulating it: 8990 */ 8991 list_for_each_entry(event, &parent_ctx->pinned_groups, group_entry) { 8992 ret = inherit_task_group(event, parent, parent_ctx, 8993 child, ctxn, &inherited_all); 8994 if (ret) 8995 break; 8996 } 8997 8998 /* 8999 * We can't hold ctx->lock when iterating the ->flexible_group list due 9000 * to allocations, but we need to prevent rotation because 9001 * rotate_ctx() will change the list from interrupt context. 9002 */ 9003 raw_spin_lock_irqsave(&parent_ctx->lock, flags); 9004 parent_ctx->rotate_disable = 1; 9005 raw_spin_unlock_irqrestore(&parent_ctx->lock, flags); 9006 9007 list_for_each_entry(event, &parent_ctx->flexible_groups, group_entry) { 9008 ret = inherit_task_group(event, parent, parent_ctx, 9009 child, ctxn, &inherited_all); 9010 if (ret) 9011 break; 9012 } 9013 9014 raw_spin_lock_irqsave(&parent_ctx->lock, flags); 9015 parent_ctx->rotate_disable = 0; 9016 9017 child_ctx = child->perf_event_ctxp[ctxn]; 9018 9019 if (child_ctx && inherited_all) { 9020 /* 9021 * Mark the child context as a clone of the parent 9022 * context, or of whatever the parent is a clone of. 9023 * 9024 * Note that if the parent is a clone, the holding of 9025 * parent_ctx->lock avoids it from being uncloned. 9026 */ 9027 cloned_ctx = parent_ctx->parent_ctx; 9028 if (cloned_ctx) { 9029 child_ctx->parent_ctx = cloned_ctx; 9030 child_ctx->parent_gen = parent_ctx->parent_gen; 9031 } else { 9032 child_ctx->parent_ctx = parent_ctx; 9033 child_ctx->parent_gen = parent_ctx->generation; 9034 } 9035 get_ctx(child_ctx->parent_ctx); 9036 } 9037 9038 raw_spin_unlock_irqrestore(&parent_ctx->lock, flags); 9039 mutex_unlock(&parent_ctx->mutex); 9040 9041 perf_unpin_context(parent_ctx); 9042 put_ctx(parent_ctx); 9043 9044 return ret; 9045 } 9046 9047 /* 9048 * Initialize the perf_event context in task_struct 9049 */ 9050 int perf_event_init_task(struct task_struct *child) 9051 { 9052 int ctxn, ret; 9053 9054 memset(child->perf_event_ctxp, 0, sizeof(child->perf_event_ctxp)); 9055 mutex_init(&child->perf_event_mutex); 9056 INIT_LIST_HEAD(&child->perf_event_list); 9057 9058 for_each_task_context_nr(ctxn) { 9059 ret = perf_event_init_context(child, ctxn); 9060 if (ret) { 9061 perf_event_free_task(child); 9062 return ret; 9063 } 9064 } 9065 9066 return 0; 9067 } 9068 9069 static void __init perf_event_init_all_cpus(void) 9070 { 9071 struct swevent_htable *swhash; 9072 int cpu; 9073 9074 for_each_possible_cpu(cpu) { 9075 swhash = &per_cpu(swevent_htable, cpu); 9076 mutex_init(&swhash->hlist_mutex); 9077 INIT_LIST_HEAD(&per_cpu(active_ctx_list, cpu)); 9078 } 9079 } 9080 9081 static void perf_event_init_cpu(int cpu) 9082 { 9083 struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu); 9084 9085 mutex_lock(&swhash->hlist_mutex); 9086 swhash->online = true; 9087 if (swhash->hlist_refcount > 0) { 9088 struct swevent_hlist *hlist; 9089 9090 hlist = kzalloc_node(sizeof(*hlist), GFP_KERNEL, cpu_to_node(cpu)); 9091 WARN_ON(!hlist); 9092 rcu_assign_pointer(swhash->swevent_hlist, hlist); 9093 } 9094 mutex_unlock(&swhash->hlist_mutex); 9095 } 9096 9097 #if defined CONFIG_HOTPLUG_CPU || defined CONFIG_KEXEC_CORE 9098 static void __perf_event_exit_context(void *__info) 9099 { 9100 struct remove_event re = { .detach_group = true }; 9101 struct perf_event_context *ctx = __info; 9102 9103 rcu_read_lock(); 9104 list_for_each_entry_rcu(re.event, &ctx->event_list, event_entry) 9105 __perf_remove_from_context(&re); 9106 rcu_read_unlock(); 9107 } 9108 9109 static void perf_event_exit_cpu_context(int cpu) 9110 { 9111 struct perf_event_context *ctx; 9112 struct pmu *pmu; 9113 int idx; 9114 9115 idx = srcu_read_lock(&pmus_srcu); 9116 list_for_each_entry_rcu(pmu, &pmus, entry) { 9117 ctx = &per_cpu_ptr(pmu->pmu_cpu_context, cpu)->ctx; 9118 9119 mutex_lock(&ctx->mutex); 9120 smp_call_function_single(cpu, __perf_event_exit_context, ctx, 1); 9121 mutex_unlock(&ctx->mutex); 9122 } 9123 srcu_read_unlock(&pmus_srcu, idx); 9124 } 9125 9126 static void perf_event_exit_cpu(int cpu) 9127 { 9128 struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu); 9129 9130 perf_event_exit_cpu_context(cpu); 9131 9132 mutex_lock(&swhash->hlist_mutex); 9133 swhash->online = false; 9134 swevent_hlist_release(swhash); 9135 mutex_unlock(&swhash->hlist_mutex); 9136 } 9137 #else 9138 static inline void perf_event_exit_cpu(int cpu) { } 9139 #endif 9140 9141 static int 9142 perf_reboot(struct notifier_block *notifier, unsigned long val, void *v) 9143 { 9144 int cpu; 9145 9146 for_each_online_cpu(cpu) 9147 perf_event_exit_cpu(cpu); 9148 9149 return NOTIFY_OK; 9150 } 9151 9152 /* 9153 * Run the perf reboot notifier at the very last possible moment so that 9154 * the generic watchdog code runs as long as possible. 9155 */ 9156 static struct notifier_block perf_reboot_notifier = { 9157 .notifier_call = perf_reboot, 9158 .priority = INT_MIN, 9159 }; 9160 9161 static int 9162 perf_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu) 9163 { 9164 unsigned int cpu = (long)hcpu; 9165 9166 switch (action & ~CPU_TASKS_FROZEN) { 9167 9168 case CPU_UP_PREPARE: 9169 case CPU_DOWN_FAILED: 9170 perf_event_init_cpu(cpu); 9171 break; 9172 9173 case CPU_UP_CANCELED: 9174 case CPU_DOWN_PREPARE: 9175 perf_event_exit_cpu(cpu); 9176 break; 9177 default: 9178 break; 9179 } 9180 9181 return NOTIFY_OK; 9182 } 9183 9184 void __init perf_event_init(void) 9185 { 9186 int ret; 9187 9188 idr_init(&pmu_idr); 9189 9190 perf_event_init_all_cpus(); 9191 init_srcu_struct(&pmus_srcu); 9192 perf_pmu_register(&perf_swevent, "software", PERF_TYPE_SOFTWARE); 9193 perf_pmu_register(&perf_cpu_clock, NULL, -1); 9194 perf_pmu_register(&perf_task_clock, NULL, -1); 9195 perf_tp_register(); 9196 perf_cpu_notifier(perf_cpu_notify); 9197 register_reboot_notifier(&perf_reboot_notifier); 9198 9199 ret = init_hw_breakpoint(); 9200 WARN(ret, "hw_breakpoint initialization failed with: %d", ret); 9201 9202 /* do not patch jump label more than once per second */ 9203 jump_label_rate_limit(&perf_sched_events, HZ); 9204 9205 /* 9206 * Build time assertion that we keep the data_head at the intended 9207 * location. IOW, validation we got the __reserved[] size right. 9208 */ 9209 BUILD_BUG_ON((offsetof(struct perf_event_mmap_page, data_head)) 9210 != 1024); 9211 } 9212 9213 ssize_t perf_event_sysfs_show(struct device *dev, struct device_attribute *attr, 9214 char *page) 9215 { 9216 struct perf_pmu_events_attr *pmu_attr = 9217 container_of(attr, struct perf_pmu_events_attr, attr); 9218 9219 if (pmu_attr->event_str) 9220 return sprintf(page, "%s\n", pmu_attr->event_str); 9221 9222 return 0; 9223 } 9224 9225 static int __init perf_event_sysfs_init(void) 9226 { 9227 struct pmu *pmu; 9228 int ret; 9229 9230 mutex_lock(&pmus_lock); 9231 9232 ret = bus_register(&pmu_bus); 9233 if (ret) 9234 goto unlock; 9235 9236 list_for_each_entry(pmu, &pmus, entry) { 9237 if (!pmu->name || pmu->type < 0) 9238 continue; 9239 9240 ret = pmu_dev_alloc(pmu); 9241 WARN(ret, "Failed to register pmu: %s, reason %d\n", pmu->name, ret); 9242 } 9243 pmu_bus_running = 1; 9244 ret = 0; 9245 9246 unlock: 9247 mutex_unlock(&pmus_lock); 9248 9249 return ret; 9250 } 9251 device_initcall(perf_event_sysfs_init); 9252 9253 #ifdef CONFIG_CGROUP_PERF 9254 static struct cgroup_subsys_state * 9255 perf_cgroup_css_alloc(struct cgroup_subsys_state *parent_css) 9256 { 9257 struct perf_cgroup *jc; 9258 9259 jc = kzalloc(sizeof(*jc), GFP_KERNEL); 9260 if (!jc) 9261 return ERR_PTR(-ENOMEM); 9262 9263 jc->info = alloc_percpu(struct perf_cgroup_info); 9264 if (!jc->info) { 9265 kfree(jc); 9266 return ERR_PTR(-ENOMEM); 9267 } 9268 9269 return &jc->css; 9270 } 9271 9272 static void perf_cgroup_css_free(struct cgroup_subsys_state *css) 9273 { 9274 struct perf_cgroup *jc = container_of(css, struct perf_cgroup, css); 9275 9276 free_percpu(jc->info); 9277 kfree(jc); 9278 } 9279 9280 static int __perf_cgroup_move(void *info) 9281 { 9282 struct task_struct *task = info; 9283 perf_cgroup_switch(task, PERF_CGROUP_SWOUT | PERF_CGROUP_SWIN); 9284 return 0; 9285 } 9286 9287 static void perf_cgroup_attach(struct cgroup_subsys_state *css, 9288 struct cgroup_taskset *tset) 9289 { 9290 struct task_struct *task; 9291 9292 cgroup_taskset_for_each(task, tset) 9293 task_function_call(task, __perf_cgroup_move, task); 9294 } 9295 9296 static void perf_cgroup_exit(struct cgroup_subsys_state *css, 9297 struct cgroup_subsys_state *old_css, 9298 struct task_struct *task) 9299 { 9300 /* 9301 * cgroup_exit() is called in the copy_process() failure path. 9302 * Ignore this case since the task hasn't ran yet, this avoids 9303 * trying to poke a half freed task state from generic code. 9304 */ 9305 if (!(task->flags & PF_EXITING)) 9306 return; 9307 9308 task_function_call(task, __perf_cgroup_move, task); 9309 } 9310 9311 struct cgroup_subsys perf_event_cgrp_subsys = { 9312 .css_alloc = perf_cgroup_css_alloc, 9313 .css_free = perf_cgroup_css_free, 9314 .exit = perf_cgroup_exit, 9315 .attach = perf_cgroup_attach, 9316 }; 9317 #endif /* CONFIG_CGROUP_PERF */ 9318