1 /* 2 * Performance events core code: 3 * 4 * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de> 5 * Copyright (C) 2008-2011 Red Hat, Inc., Ingo Molnar 6 * Copyright (C) 2008-2011 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com> 7 * Copyright © 2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com> 8 * 9 * For licensing details see kernel-base/COPYING 10 */ 11 12 #include <linux/fs.h> 13 #include <linux/mm.h> 14 #include <linux/cpu.h> 15 #include <linux/smp.h> 16 #include <linux/idr.h> 17 #include <linux/file.h> 18 #include <linux/poll.h> 19 #include <linux/slab.h> 20 #include <linux/hash.h> 21 #include <linux/tick.h> 22 #include <linux/sysfs.h> 23 #include <linux/dcache.h> 24 #include <linux/percpu.h> 25 #include <linux/ptrace.h> 26 #include <linux/reboot.h> 27 #include <linux/vmstat.h> 28 #include <linux/device.h> 29 #include <linux/export.h> 30 #include <linux/vmalloc.h> 31 #include <linux/hardirq.h> 32 #include <linux/rculist.h> 33 #include <linux/uaccess.h> 34 #include <linux/syscalls.h> 35 #include <linux/anon_inodes.h> 36 #include <linux/kernel_stat.h> 37 #include <linux/perf_event.h> 38 #include <linux/ftrace_event.h> 39 #include <linux/hw_breakpoint.h> 40 #include <linux/mm_types.h> 41 #include <linux/cgroup.h> 42 #include <linux/module.h> 43 #include <linux/mman.h> 44 #include <linux/compat.h> 45 46 #include "internal.h" 47 48 #include <asm/irq_regs.h> 49 50 struct remote_function_call { 51 struct task_struct *p; 52 int (*func)(void *info); 53 void *info; 54 int ret; 55 }; 56 57 static void remote_function(void *data) 58 { 59 struct remote_function_call *tfc = data; 60 struct task_struct *p = tfc->p; 61 62 if (p) { 63 tfc->ret = -EAGAIN; 64 if (task_cpu(p) != smp_processor_id() || !task_curr(p)) 65 return; 66 } 67 68 tfc->ret = tfc->func(tfc->info); 69 } 70 71 /** 72 * task_function_call - call a function on the cpu on which a task runs 73 * @p: the task to evaluate 74 * @func: the function to be called 75 * @info: the function call argument 76 * 77 * Calls the function @func when the task is currently running. This might 78 * be on the current CPU, which just calls the function directly 79 * 80 * returns: @func return value, or 81 * -ESRCH - when the process isn't running 82 * -EAGAIN - when the process moved away 83 */ 84 static int 85 task_function_call(struct task_struct *p, int (*func) (void *info), void *info) 86 { 87 struct remote_function_call data = { 88 .p = p, 89 .func = func, 90 .info = info, 91 .ret = -ESRCH, /* No such (running) process */ 92 }; 93 94 if (task_curr(p)) 95 smp_call_function_single(task_cpu(p), remote_function, &data, 1); 96 97 return data.ret; 98 } 99 100 /** 101 * cpu_function_call - call a function on the cpu 102 * @func: the function to be called 103 * @info: the function call argument 104 * 105 * Calls the function @func on the remote cpu. 106 * 107 * returns: @func return value or -ENXIO when the cpu is offline 108 */ 109 static int cpu_function_call(int cpu, int (*func) (void *info), void *info) 110 { 111 struct remote_function_call data = { 112 .p = NULL, 113 .func = func, 114 .info = info, 115 .ret = -ENXIO, /* No such CPU */ 116 }; 117 118 smp_call_function_single(cpu, remote_function, &data, 1); 119 120 return data.ret; 121 } 122 123 #define PERF_FLAG_ALL (PERF_FLAG_FD_NO_GROUP |\ 124 PERF_FLAG_FD_OUTPUT |\ 125 PERF_FLAG_PID_CGROUP |\ 126 PERF_FLAG_FD_CLOEXEC) 127 128 /* 129 * branch priv levels that need permission checks 130 */ 131 #define PERF_SAMPLE_BRANCH_PERM_PLM \ 132 (PERF_SAMPLE_BRANCH_KERNEL |\ 133 PERF_SAMPLE_BRANCH_HV) 134 135 enum event_type_t { 136 EVENT_FLEXIBLE = 0x1, 137 EVENT_PINNED = 0x2, 138 EVENT_ALL = EVENT_FLEXIBLE | EVENT_PINNED, 139 }; 140 141 /* 142 * perf_sched_events : >0 events exist 143 * perf_cgroup_events: >0 per-cpu cgroup events exist on this cpu 144 */ 145 struct static_key_deferred perf_sched_events __read_mostly; 146 static DEFINE_PER_CPU(atomic_t, perf_cgroup_events); 147 static DEFINE_PER_CPU(atomic_t, perf_branch_stack_events); 148 149 static atomic_t nr_mmap_events __read_mostly; 150 static atomic_t nr_comm_events __read_mostly; 151 static atomic_t nr_task_events __read_mostly; 152 static atomic_t nr_freq_events __read_mostly; 153 154 static LIST_HEAD(pmus); 155 static DEFINE_MUTEX(pmus_lock); 156 static struct srcu_struct pmus_srcu; 157 158 /* 159 * perf event paranoia level: 160 * -1 - not paranoid at all 161 * 0 - disallow raw tracepoint access for unpriv 162 * 1 - disallow cpu events for unpriv 163 * 2 - disallow kernel profiling for unpriv 164 */ 165 int sysctl_perf_event_paranoid __read_mostly = 1; 166 167 /* Minimum for 512 kiB + 1 user control page */ 168 int sysctl_perf_event_mlock __read_mostly = 512 + (PAGE_SIZE / 1024); /* 'free' kiB per user */ 169 170 /* 171 * max perf event sample rate 172 */ 173 #define DEFAULT_MAX_SAMPLE_RATE 100000 174 #define DEFAULT_SAMPLE_PERIOD_NS (NSEC_PER_SEC / DEFAULT_MAX_SAMPLE_RATE) 175 #define DEFAULT_CPU_TIME_MAX_PERCENT 25 176 177 int sysctl_perf_event_sample_rate __read_mostly = DEFAULT_MAX_SAMPLE_RATE; 178 179 static int max_samples_per_tick __read_mostly = DIV_ROUND_UP(DEFAULT_MAX_SAMPLE_RATE, HZ); 180 static int perf_sample_period_ns __read_mostly = DEFAULT_SAMPLE_PERIOD_NS; 181 182 static int perf_sample_allowed_ns __read_mostly = 183 DEFAULT_SAMPLE_PERIOD_NS * DEFAULT_CPU_TIME_MAX_PERCENT / 100; 184 185 void update_perf_cpu_limits(void) 186 { 187 u64 tmp = perf_sample_period_ns; 188 189 tmp *= sysctl_perf_cpu_time_max_percent; 190 do_div(tmp, 100); 191 ACCESS_ONCE(perf_sample_allowed_ns) = tmp; 192 } 193 194 static int perf_rotate_context(struct perf_cpu_context *cpuctx); 195 196 int perf_proc_update_handler(struct ctl_table *table, int write, 197 void __user *buffer, size_t *lenp, 198 loff_t *ppos) 199 { 200 int ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos); 201 202 if (ret || !write) 203 return ret; 204 205 max_samples_per_tick = DIV_ROUND_UP(sysctl_perf_event_sample_rate, HZ); 206 perf_sample_period_ns = NSEC_PER_SEC / sysctl_perf_event_sample_rate; 207 update_perf_cpu_limits(); 208 209 return 0; 210 } 211 212 int sysctl_perf_cpu_time_max_percent __read_mostly = DEFAULT_CPU_TIME_MAX_PERCENT; 213 214 int perf_cpu_time_max_percent_handler(struct ctl_table *table, int write, 215 void __user *buffer, size_t *lenp, 216 loff_t *ppos) 217 { 218 int ret = proc_dointvec(table, write, buffer, lenp, ppos); 219 220 if (ret || !write) 221 return ret; 222 223 update_perf_cpu_limits(); 224 225 return 0; 226 } 227 228 /* 229 * perf samples are done in some very critical code paths (NMIs). 230 * If they take too much CPU time, the system can lock up and not 231 * get any real work done. This will drop the sample rate when 232 * we detect that events are taking too long. 233 */ 234 #define NR_ACCUMULATED_SAMPLES 128 235 static DEFINE_PER_CPU(u64, running_sample_length); 236 237 static void perf_duration_warn(struct irq_work *w) 238 { 239 u64 allowed_ns = ACCESS_ONCE(perf_sample_allowed_ns); 240 u64 avg_local_sample_len; 241 u64 local_samples_len; 242 243 local_samples_len = __get_cpu_var(running_sample_length); 244 avg_local_sample_len = local_samples_len/NR_ACCUMULATED_SAMPLES; 245 246 printk_ratelimited(KERN_WARNING 247 "perf interrupt took too long (%lld > %lld), lowering " 248 "kernel.perf_event_max_sample_rate to %d\n", 249 avg_local_sample_len, allowed_ns >> 1, 250 sysctl_perf_event_sample_rate); 251 } 252 253 static DEFINE_IRQ_WORK(perf_duration_work, perf_duration_warn); 254 255 void perf_sample_event_took(u64 sample_len_ns) 256 { 257 u64 allowed_ns = ACCESS_ONCE(perf_sample_allowed_ns); 258 u64 avg_local_sample_len; 259 u64 local_samples_len; 260 261 if (allowed_ns == 0) 262 return; 263 264 /* decay the counter by 1 average sample */ 265 local_samples_len = __get_cpu_var(running_sample_length); 266 local_samples_len -= local_samples_len/NR_ACCUMULATED_SAMPLES; 267 local_samples_len += sample_len_ns; 268 __get_cpu_var(running_sample_length) = local_samples_len; 269 270 /* 271 * note: this will be biased artifically low until we have 272 * seen NR_ACCUMULATED_SAMPLES. Doing it this way keeps us 273 * from having to maintain a count. 274 */ 275 avg_local_sample_len = local_samples_len/NR_ACCUMULATED_SAMPLES; 276 277 if (avg_local_sample_len <= allowed_ns) 278 return; 279 280 if (max_samples_per_tick <= 1) 281 return; 282 283 max_samples_per_tick = DIV_ROUND_UP(max_samples_per_tick, 2); 284 sysctl_perf_event_sample_rate = max_samples_per_tick * HZ; 285 perf_sample_period_ns = NSEC_PER_SEC / sysctl_perf_event_sample_rate; 286 287 update_perf_cpu_limits(); 288 289 if (!irq_work_queue(&perf_duration_work)) { 290 early_printk("perf interrupt took too long (%lld > %lld), lowering " 291 "kernel.perf_event_max_sample_rate to %d\n", 292 avg_local_sample_len, allowed_ns >> 1, 293 sysctl_perf_event_sample_rate); 294 } 295 } 296 297 static atomic64_t perf_event_id; 298 299 static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx, 300 enum event_type_t event_type); 301 302 static void cpu_ctx_sched_in(struct perf_cpu_context *cpuctx, 303 enum event_type_t event_type, 304 struct task_struct *task); 305 306 static void update_context_time(struct perf_event_context *ctx); 307 static u64 perf_event_time(struct perf_event *event); 308 309 void __weak perf_event_print_debug(void) { } 310 311 extern __weak const char *perf_pmu_name(void) 312 { 313 return "pmu"; 314 } 315 316 static inline u64 perf_clock(void) 317 { 318 return local_clock(); 319 } 320 321 static inline struct perf_cpu_context * 322 __get_cpu_context(struct perf_event_context *ctx) 323 { 324 return this_cpu_ptr(ctx->pmu->pmu_cpu_context); 325 } 326 327 static void perf_ctx_lock(struct perf_cpu_context *cpuctx, 328 struct perf_event_context *ctx) 329 { 330 raw_spin_lock(&cpuctx->ctx.lock); 331 if (ctx) 332 raw_spin_lock(&ctx->lock); 333 } 334 335 static void perf_ctx_unlock(struct perf_cpu_context *cpuctx, 336 struct perf_event_context *ctx) 337 { 338 if (ctx) 339 raw_spin_unlock(&ctx->lock); 340 raw_spin_unlock(&cpuctx->ctx.lock); 341 } 342 343 #ifdef CONFIG_CGROUP_PERF 344 345 /* 346 * perf_cgroup_info keeps track of time_enabled for a cgroup. 347 * This is a per-cpu dynamically allocated data structure. 348 */ 349 struct perf_cgroup_info { 350 u64 time; 351 u64 timestamp; 352 }; 353 354 struct perf_cgroup { 355 struct cgroup_subsys_state css; 356 struct perf_cgroup_info __percpu *info; 357 }; 358 359 /* 360 * Must ensure cgroup is pinned (css_get) before calling 361 * this function. In other words, we cannot call this function 362 * if there is no cgroup event for the current CPU context. 363 */ 364 static inline struct perf_cgroup * 365 perf_cgroup_from_task(struct task_struct *task) 366 { 367 return container_of(task_css(task, perf_event_cgrp_id), 368 struct perf_cgroup, css); 369 } 370 371 static inline bool 372 perf_cgroup_match(struct perf_event *event) 373 { 374 struct perf_event_context *ctx = event->ctx; 375 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx); 376 377 /* @event doesn't care about cgroup */ 378 if (!event->cgrp) 379 return true; 380 381 /* wants specific cgroup scope but @cpuctx isn't associated with any */ 382 if (!cpuctx->cgrp) 383 return false; 384 385 /* 386 * Cgroup scoping is recursive. An event enabled for a cgroup is 387 * also enabled for all its descendant cgroups. If @cpuctx's 388 * cgroup is a descendant of @event's (the test covers identity 389 * case), it's a match. 390 */ 391 return cgroup_is_descendant(cpuctx->cgrp->css.cgroup, 392 event->cgrp->css.cgroup); 393 } 394 395 static inline void perf_put_cgroup(struct perf_event *event) 396 { 397 css_put(&event->cgrp->css); 398 } 399 400 static inline void perf_detach_cgroup(struct perf_event *event) 401 { 402 perf_put_cgroup(event); 403 event->cgrp = NULL; 404 } 405 406 static inline int is_cgroup_event(struct perf_event *event) 407 { 408 return event->cgrp != NULL; 409 } 410 411 static inline u64 perf_cgroup_event_time(struct perf_event *event) 412 { 413 struct perf_cgroup_info *t; 414 415 t = per_cpu_ptr(event->cgrp->info, event->cpu); 416 return t->time; 417 } 418 419 static inline void __update_cgrp_time(struct perf_cgroup *cgrp) 420 { 421 struct perf_cgroup_info *info; 422 u64 now; 423 424 now = perf_clock(); 425 426 info = this_cpu_ptr(cgrp->info); 427 428 info->time += now - info->timestamp; 429 info->timestamp = now; 430 } 431 432 static inline void update_cgrp_time_from_cpuctx(struct perf_cpu_context *cpuctx) 433 { 434 struct perf_cgroup *cgrp_out = cpuctx->cgrp; 435 if (cgrp_out) 436 __update_cgrp_time(cgrp_out); 437 } 438 439 static inline void update_cgrp_time_from_event(struct perf_event *event) 440 { 441 struct perf_cgroup *cgrp; 442 443 /* 444 * ensure we access cgroup data only when needed and 445 * when we know the cgroup is pinned (css_get) 446 */ 447 if (!is_cgroup_event(event)) 448 return; 449 450 cgrp = perf_cgroup_from_task(current); 451 /* 452 * Do not update time when cgroup is not active 453 */ 454 if (cgrp == event->cgrp) 455 __update_cgrp_time(event->cgrp); 456 } 457 458 static inline void 459 perf_cgroup_set_timestamp(struct task_struct *task, 460 struct perf_event_context *ctx) 461 { 462 struct perf_cgroup *cgrp; 463 struct perf_cgroup_info *info; 464 465 /* 466 * ctx->lock held by caller 467 * ensure we do not access cgroup data 468 * unless we have the cgroup pinned (css_get) 469 */ 470 if (!task || !ctx->nr_cgroups) 471 return; 472 473 cgrp = perf_cgroup_from_task(task); 474 info = this_cpu_ptr(cgrp->info); 475 info->timestamp = ctx->timestamp; 476 } 477 478 #define PERF_CGROUP_SWOUT 0x1 /* cgroup switch out every event */ 479 #define PERF_CGROUP_SWIN 0x2 /* cgroup switch in events based on task */ 480 481 /* 482 * reschedule events based on the cgroup constraint of task. 483 * 484 * mode SWOUT : schedule out everything 485 * mode SWIN : schedule in based on cgroup for next 486 */ 487 void perf_cgroup_switch(struct task_struct *task, int mode) 488 { 489 struct perf_cpu_context *cpuctx; 490 struct pmu *pmu; 491 unsigned long flags; 492 493 /* 494 * disable interrupts to avoid geting nr_cgroup 495 * changes via __perf_event_disable(). Also 496 * avoids preemption. 497 */ 498 local_irq_save(flags); 499 500 /* 501 * we reschedule only in the presence of cgroup 502 * constrained events. 503 */ 504 rcu_read_lock(); 505 506 list_for_each_entry_rcu(pmu, &pmus, entry) { 507 cpuctx = this_cpu_ptr(pmu->pmu_cpu_context); 508 if (cpuctx->unique_pmu != pmu) 509 continue; /* ensure we process each cpuctx once */ 510 511 /* 512 * perf_cgroup_events says at least one 513 * context on this CPU has cgroup events. 514 * 515 * ctx->nr_cgroups reports the number of cgroup 516 * events for a context. 517 */ 518 if (cpuctx->ctx.nr_cgroups > 0) { 519 perf_ctx_lock(cpuctx, cpuctx->task_ctx); 520 perf_pmu_disable(cpuctx->ctx.pmu); 521 522 if (mode & PERF_CGROUP_SWOUT) { 523 cpu_ctx_sched_out(cpuctx, EVENT_ALL); 524 /* 525 * must not be done before ctxswout due 526 * to event_filter_match() in event_sched_out() 527 */ 528 cpuctx->cgrp = NULL; 529 } 530 531 if (mode & PERF_CGROUP_SWIN) { 532 WARN_ON_ONCE(cpuctx->cgrp); 533 /* 534 * set cgrp before ctxsw in to allow 535 * event_filter_match() to not have to pass 536 * task around 537 */ 538 cpuctx->cgrp = perf_cgroup_from_task(task); 539 cpu_ctx_sched_in(cpuctx, EVENT_ALL, task); 540 } 541 perf_pmu_enable(cpuctx->ctx.pmu); 542 perf_ctx_unlock(cpuctx, cpuctx->task_ctx); 543 } 544 } 545 546 rcu_read_unlock(); 547 548 local_irq_restore(flags); 549 } 550 551 static inline void perf_cgroup_sched_out(struct task_struct *task, 552 struct task_struct *next) 553 { 554 struct perf_cgroup *cgrp1; 555 struct perf_cgroup *cgrp2 = NULL; 556 557 /* 558 * we come here when we know perf_cgroup_events > 0 559 */ 560 cgrp1 = perf_cgroup_from_task(task); 561 562 /* 563 * next is NULL when called from perf_event_enable_on_exec() 564 * that will systematically cause a cgroup_switch() 565 */ 566 if (next) 567 cgrp2 = perf_cgroup_from_task(next); 568 569 /* 570 * only schedule out current cgroup events if we know 571 * that we are switching to a different cgroup. Otherwise, 572 * do no touch the cgroup events. 573 */ 574 if (cgrp1 != cgrp2) 575 perf_cgroup_switch(task, PERF_CGROUP_SWOUT); 576 } 577 578 static inline void perf_cgroup_sched_in(struct task_struct *prev, 579 struct task_struct *task) 580 { 581 struct perf_cgroup *cgrp1; 582 struct perf_cgroup *cgrp2 = NULL; 583 584 /* 585 * we come here when we know perf_cgroup_events > 0 586 */ 587 cgrp1 = perf_cgroup_from_task(task); 588 589 /* prev can never be NULL */ 590 cgrp2 = perf_cgroup_from_task(prev); 591 592 /* 593 * only need to schedule in cgroup events if we are changing 594 * cgroup during ctxsw. Cgroup events were not scheduled 595 * out of ctxsw out if that was not the case. 596 */ 597 if (cgrp1 != cgrp2) 598 perf_cgroup_switch(task, PERF_CGROUP_SWIN); 599 } 600 601 static inline int perf_cgroup_connect(int fd, struct perf_event *event, 602 struct perf_event_attr *attr, 603 struct perf_event *group_leader) 604 { 605 struct perf_cgroup *cgrp; 606 struct cgroup_subsys_state *css; 607 struct fd f = fdget(fd); 608 int ret = 0; 609 610 if (!f.file) 611 return -EBADF; 612 613 css = css_tryget_online_from_dir(f.file->f_dentry, 614 &perf_event_cgrp_subsys); 615 if (IS_ERR(css)) { 616 ret = PTR_ERR(css); 617 goto out; 618 } 619 620 cgrp = container_of(css, struct perf_cgroup, css); 621 event->cgrp = cgrp; 622 623 /* 624 * all events in a group must monitor 625 * the same cgroup because a task belongs 626 * to only one perf cgroup at a time 627 */ 628 if (group_leader && group_leader->cgrp != cgrp) { 629 perf_detach_cgroup(event); 630 ret = -EINVAL; 631 } 632 out: 633 fdput(f); 634 return ret; 635 } 636 637 static inline void 638 perf_cgroup_set_shadow_time(struct perf_event *event, u64 now) 639 { 640 struct perf_cgroup_info *t; 641 t = per_cpu_ptr(event->cgrp->info, event->cpu); 642 event->shadow_ctx_time = now - t->timestamp; 643 } 644 645 static inline void 646 perf_cgroup_defer_enabled(struct perf_event *event) 647 { 648 /* 649 * when the current task's perf cgroup does not match 650 * the event's, we need to remember to call the 651 * perf_mark_enable() function the first time a task with 652 * a matching perf cgroup is scheduled in. 653 */ 654 if (is_cgroup_event(event) && !perf_cgroup_match(event)) 655 event->cgrp_defer_enabled = 1; 656 } 657 658 static inline void 659 perf_cgroup_mark_enabled(struct perf_event *event, 660 struct perf_event_context *ctx) 661 { 662 struct perf_event *sub; 663 u64 tstamp = perf_event_time(event); 664 665 if (!event->cgrp_defer_enabled) 666 return; 667 668 event->cgrp_defer_enabled = 0; 669 670 event->tstamp_enabled = tstamp - event->total_time_enabled; 671 list_for_each_entry(sub, &event->sibling_list, group_entry) { 672 if (sub->state >= PERF_EVENT_STATE_INACTIVE) { 673 sub->tstamp_enabled = tstamp - sub->total_time_enabled; 674 sub->cgrp_defer_enabled = 0; 675 } 676 } 677 } 678 #else /* !CONFIG_CGROUP_PERF */ 679 680 static inline bool 681 perf_cgroup_match(struct perf_event *event) 682 { 683 return true; 684 } 685 686 static inline void perf_detach_cgroup(struct perf_event *event) 687 {} 688 689 static inline int is_cgroup_event(struct perf_event *event) 690 { 691 return 0; 692 } 693 694 static inline u64 perf_cgroup_event_cgrp_time(struct perf_event *event) 695 { 696 return 0; 697 } 698 699 static inline void update_cgrp_time_from_event(struct perf_event *event) 700 { 701 } 702 703 static inline void update_cgrp_time_from_cpuctx(struct perf_cpu_context *cpuctx) 704 { 705 } 706 707 static inline void perf_cgroup_sched_out(struct task_struct *task, 708 struct task_struct *next) 709 { 710 } 711 712 static inline void perf_cgroup_sched_in(struct task_struct *prev, 713 struct task_struct *task) 714 { 715 } 716 717 static inline int perf_cgroup_connect(pid_t pid, struct perf_event *event, 718 struct perf_event_attr *attr, 719 struct perf_event *group_leader) 720 { 721 return -EINVAL; 722 } 723 724 static inline void 725 perf_cgroup_set_timestamp(struct task_struct *task, 726 struct perf_event_context *ctx) 727 { 728 } 729 730 void 731 perf_cgroup_switch(struct task_struct *task, struct task_struct *next) 732 { 733 } 734 735 static inline void 736 perf_cgroup_set_shadow_time(struct perf_event *event, u64 now) 737 { 738 } 739 740 static inline u64 perf_cgroup_event_time(struct perf_event *event) 741 { 742 return 0; 743 } 744 745 static inline void 746 perf_cgroup_defer_enabled(struct perf_event *event) 747 { 748 } 749 750 static inline void 751 perf_cgroup_mark_enabled(struct perf_event *event, 752 struct perf_event_context *ctx) 753 { 754 } 755 #endif 756 757 /* 758 * set default to be dependent on timer tick just 759 * like original code 760 */ 761 #define PERF_CPU_HRTIMER (1000 / HZ) 762 /* 763 * function must be called with interrupts disbled 764 */ 765 static enum hrtimer_restart perf_cpu_hrtimer_handler(struct hrtimer *hr) 766 { 767 struct perf_cpu_context *cpuctx; 768 enum hrtimer_restart ret = HRTIMER_NORESTART; 769 int rotations = 0; 770 771 WARN_ON(!irqs_disabled()); 772 773 cpuctx = container_of(hr, struct perf_cpu_context, hrtimer); 774 775 rotations = perf_rotate_context(cpuctx); 776 777 /* 778 * arm timer if needed 779 */ 780 if (rotations) { 781 hrtimer_forward_now(hr, cpuctx->hrtimer_interval); 782 ret = HRTIMER_RESTART; 783 } 784 785 return ret; 786 } 787 788 /* CPU is going down */ 789 void perf_cpu_hrtimer_cancel(int cpu) 790 { 791 struct perf_cpu_context *cpuctx; 792 struct pmu *pmu; 793 unsigned long flags; 794 795 if (WARN_ON(cpu != smp_processor_id())) 796 return; 797 798 local_irq_save(flags); 799 800 rcu_read_lock(); 801 802 list_for_each_entry_rcu(pmu, &pmus, entry) { 803 cpuctx = this_cpu_ptr(pmu->pmu_cpu_context); 804 805 if (pmu->task_ctx_nr == perf_sw_context) 806 continue; 807 808 hrtimer_cancel(&cpuctx->hrtimer); 809 } 810 811 rcu_read_unlock(); 812 813 local_irq_restore(flags); 814 } 815 816 static void __perf_cpu_hrtimer_init(struct perf_cpu_context *cpuctx, int cpu) 817 { 818 struct hrtimer *hr = &cpuctx->hrtimer; 819 struct pmu *pmu = cpuctx->ctx.pmu; 820 int timer; 821 822 /* no multiplexing needed for SW PMU */ 823 if (pmu->task_ctx_nr == perf_sw_context) 824 return; 825 826 /* 827 * check default is sane, if not set then force to 828 * default interval (1/tick) 829 */ 830 timer = pmu->hrtimer_interval_ms; 831 if (timer < 1) 832 timer = pmu->hrtimer_interval_ms = PERF_CPU_HRTIMER; 833 834 cpuctx->hrtimer_interval = ns_to_ktime(NSEC_PER_MSEC * timer); 835 836 hrtimer_init(hr, CLOCK_MONOTONIC, HRTIMER_MODE_REL_PINNED); 837 hr->function = perf_cpu_hrtimer_handler; 838 } 839 840 static void perf_cpu_hrtimer_restart(struct perf_cpu_context *cpuctx) 841 { 842 struct hrtimer *hr = &cpuctx->hrtimer; 843 struct pmu *pmu = cpuctx->ctx.pmu; 844 845 /* not for SW PMU */ 846 if (pmu->task_ctx_nr == perf_sw_context) 847 return; 848 849 if (hrtimer_active(hr)) 850 return; 851 852 if (!hrtimer_callback_running(hr)) 853 __hrtimer_start_range_ns(hr, cpuctx->hrtimer_interval, 854 0, HRTIMER_MODE_REL_PINNED, 0); 855 } 856 857 void perf_pmu_disable(struct pmu *pmu) 858 { 859 int *count = this_cpu_ptr(pmu->pmu_disable_count); 860 if (!(*count)++) 861 pmu->pmu_disable(pmu); 862 } 863 864 void perf_pmu_enable(struct pmu *pmu) 865 { 866 int *count = this_cpu_ptr(pmu->pmu_disable_count); 867 if (!--(*count)) 868 pmu->pmu_enable(pmu); 869 } 870 871 static DEFINE_PER_CPU(struct list_head, rotation_list); 872 873 /* 874 * perf_pmu_rotate_start() and perf_rotate_context() are fully serialized 875 * because they're strictly cpu affine and rotate_start is called with IRQs 876 * disabled, while rotate_context is called from IRQ context. 877 */ 878 static void perf_pmu_rotate_start(struct pmu *pmu) 879 { 880 struct perf_cpu_context *cpuctx = this_cpu_ptr(pmu->pmu_cpu_context); 881 struct list_head *head = &__get_cpu_var(rotation_list); 882 883 WARN_ON(!irqs_disabled()); 884 885 if (list_empty(&cpuctx->rotation_list)) 886 list_add(&cpuctx->rotation_list, head); 887 } 888 889 static void get_ctx(struct perf_event_context *ctx) 890 { 891 WARN_ON(!atomic_inc_not_zero(&ctx->refcount)); 892 } 893 894 static void put_ctx(struct perf_event_context *ctx) 895 { 896 if (atomic_dec_and_test(&ctx->refcount)) { 897 if (ctx->parent_ctx) 898 put_ctx(ctx->parent_ctx); 899 if (ctx->task) 900 put_task_struct(ctx->task); 901 kfree_rcu(ctx, rcu_head); 902 } 903 } 904 905 static void unclone_ctx(struct perf_event_context *ctx) 906 { 907 if (ctx->parent_ctx) { 908 put_ctx(ctx->parent_ctx); 909 ctx->parent_ctx = NULL; 910 } 911 ctx->generation++; 912 } 913 914 static u32 perf_event_pid(struct perf_event *event, struct task_struct *p) 915 { 916 /* 917 * only top level events have the pid namespace they were created in 918 */ 919 if (event->parent) 920 event = event->parent; 921 922 return task_tgid_nr_ns(p, event->ns); 923 } 924 925 static u32 perf_event_tid(struct perf_event *event, struct task_struct *p) 926 { 927 /* 928 * only top level events have the pid namespace they were created in 929 */ 930 if (event->parent) 931 event = event->parent; 932 933 return task_pid_nr_ns(p, event->ns); 934 } 935 936 /* 937 * If we inherit events we want to return the parent event id 938 * to userspace. 939 */ 940 static u64 primary_event_id(struct perf_event *event) 941 { 942 u64 id = event->id; 943 944 if (event->parent) 945 id = event->parent->id; 946 947 return id; 948 } 949 950 /* 951 * Get the perf_event_context for a task and lock it. 952 * This has to cope with with the fact that until it is locked, 953 * the context could get moved to another task. 954 */ 955 static struct perf_event_context * 956 perf_lock_task_context(struct task_struct *task, int ctxn, unsigned long *flags) 957 { 958 struct perf_event_context *ctx; 959 960 retry: 961 /* 962 * One of the few rules of preemptible RCU is that one cannot do 963 * rcu_read_unlock() while holding a scheduler (or nested) lock when 964 * part of the read side critical section was preemptible -- see 965 * rcu_read_unlock_special(). 966 * 967 * Since ctx->lock nests under rq->lock we must ensure the entire read 968 * side critical section is non-preemptible. 969 */ 970 preempt_disable(); 971 rcu_read_lock(); 972 ctx = rcu_dereference(task->perf_event_ctxp[ctxn]); 973 if (ctx) { 974 /* 975 * If this context is a clone of another, it might 976 * get swapped for another underneath us by 977 * perf_event_task_sched_out, though the 978 * rcu_read_lock() protects us from any context 979 * getting freed. Lock the context and check if it 980 * got swapped before we could get the lock, and retry 981 * if so. If we locked the right context, then it 982 * can't get swapped on us any more. 983 */ 984 raw_spin_lock_irqsave(&ctx->lock, *flags); 985 if (ctx != rcu_dereference(task->perf_event_ctxp[ctxn])) { 986 raw_spin_unlock_irqrestore(&ctx->lock, *flags); 987 rcu_read_unlock(); 988 preempt_enable(); 989 goto retry; 990 } 991 992 if (!atomic_inc_not_zero(&ctx->refcount)) { 993 raw_spin_unlock_irqrestore(&ctx->lock, *flags); 994 ctx = NULL; 995 } 996 } 997 rcu_read_unlock(); 998 preempt_enable(); 999 return ctx; 1000 } 1001 1002 /* 1003 * Get the context for a task and increment its pin_count so it 1004 * can't get swapped to another task. This also increments its 1005 * reference count so that the context can't get freed. 1006 */ 1007 static struct perf_event_context * 1008 perf_pin_task_context(struct task_struct *task, int ctxn) 1009 { 1010 struct perf_event_context *ctx; 1011 unsigned long flags; 1012 1013 ctx = perf_lock_task_context(task, ctxn, &flags); 1014 if (ctx) { 1015 ++ctx->pin_count; 1016 raw_spin_unlock_irqrestore(&ctx->lock, flags); 1017 } 1018 return ctx; 1019 } 1020 1021 static void perf_unpin_context(struct perf_event_context *ctx) 1022 { 1023 unsigned long flags; 1024 1025 raw_spin_lock_irqsave(&ctx->lock, flags); 1026 --ctx->pin_count; 1027 raw_spin_unlock_irqrestore(&ctx->lock, flags); 1028 } 1029 1030 /* 1031 * Update the record of the current time in a context. 1032 */ 1033 static void update_context_time(struct perf_event_context *ctx) 1034 { 1035 u64 now = perf_clock(); 1036 1037 ctx->time += now - ctx->timestamp; 1038 ctx->timestamp = now; 1039 } 1040 1041 static u64 perf_event_time(struct perf_event *event) 1042 { 1043 struct perf_event_context *ctx = event->ctx; 1044 1045 if (is_cgroup_event(event)) 1046 return perf_cgroup_event_time(event); 1047 1048 return ctx ? ctx->time : 0; 1049 } 1050 1051 /* 1052 * Update the total_time_enabled and total_time_running fields for a event. 1053 * The caller of this function needs to hold the ctx->lock. 1054 */ 1055 static void update_event_times(struct perf_event *event) 1056 { 1057 struct perf_event_context *ctx = event->ctx; 1058 u64 run_end; 1059 1060 if (event->state < PERF_EVENT_STATE_INACTIVE || 1061 event->group_leader->state < PERF_EVENT_STATE_INACTIVE) 1062 return; 1063 /* 1064 * in cgroup mode, time_enabled represents 1065 * the time the event was enabled AND active 1066 * tasks were in the monitored cgroup. This is 1067 * independent of the activity of the context as 1068 * there may be a mix of cgroup and non-cgroup events. 1069 * 1070 * That is why we treat cgroup events differently 1071 * here. 1072 */ 1073 if (is_cgroup_event(event)) 1074 run_end = perf_cgroup_event_time(event); 1075 else if (ctx->is_active) 1076 run_end = ctx->time; 1077 else 1078 run_end = event->tstamp_stopped; 1079 1080 event->total_time_enabled = run_end - event->tstamp_enabled; 1081 1082 if (event->state == PERF_EVENT_STATE_INACTIVE) 1083 run_end = event->tstamp_stopped; 1084 else 1085 run_end = perf_event_time(event); 1086 1087 event->total_time_running = run_end - event->tstamp_running; 1088 1089 } 1090 1091 /* 1092 * Update total_time_enabled and total_time_running for all events in a group. 1093 */ 1094 static void update_group_times(struct perf_event *leader) 1095 { 1096 struct perf_event *event; 1097 1098 update_event_times(leader); 1099 list_for_each_entry(event, &leader->sibling_list, group_entry) 1100 update_event_times(event); 1101 } 1102 1103 static struct list_head * 1104 ctx_group_list(struct perf_event *event, struct perf_event_context *ctx) 1105 { 1106 if (event->attr.pinned) 1107 return &ctx->pinned_groups; 1108 else 1109 return &ctx->flexible_groups; 1110 } 1111 1112 /* 1113 * Add a event from the lists for its context. 1114 * Must be called with ctx->mutex and ctx->lock held. 1115 */ 1116 static void 1117 list_add_event(struct perf_event *event, struct perf_event_context *ctx) 1118 { 1119 WARN_ON_ONCE(event->attach_state & PERF_ATTACH_CONTEXT); 1120 event->attach_state |= PERF_ATTACH_CONTEXT; 1121 1122 /* 1123 * If we're a stand alone event or group leader, we go to the context 1124 * list, group events are kept attached to the group so that 1125 * perf_group_detach can, at all times, locate all siblings. 1126 */ 1127 if (event->group_leader == event) { 1128 struct list_head *list; 1129 1130 if (is_software_event(event)) 1131 event->group_flags |= PERF_GROUP_SOFTWARE; 1132 1133 list = ctx_group_list(event, ctx); 1134 list_add_tail(&event->group_entry, list); 1135 } 1136 1137 if (is_cgroup_event(event)) 1138 ctx->nr_cgroups++; 1139 1140 if (has_branch_stack(event)) 1141 ctx->nr_branch_stack++; 1142 1143 list_add_rcu(&event->event_entry, &ctx->event_list); 1144 if (!ctx->nr_events) 1145 perf_pmu_rotate_start(ctx->pmu); 1146 ctx->nr_events++; 1147 if (event->attr.inherit_stat) 1148 ctx->nr_stat++; 1149 1150 ctx->generation++; 1151 } 1152 1153 /* 1154 * Initialize event state based on the perf_event_attr::disabled. 1155 */ 1156 static inline void perf_event__state_init(struct perf_event *event) 1157 { 1158 event->state = event->attr.disabled ? PERF_EVENT_STATE_OFF : 1159 PERF_EVENT_STATE_INACTIVE; 1160 } 1161 1162 /* 1163 * Called at perf_event creation and when events are attached/detached from a 1164 * group. 1165 */ 1166 static void perf_event__read_size(struct perf_event *event) 1167 { 1168 int entry = sizeof(u64); /* value */ 1169 int size = 0; 1170 int nr = 1; 1171 1172 if (event->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) 1173 size += sizeof(u64); 1174 1175 if (event->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) 1176 size += sizeof(u64); 1177 1178 if (event->attr.read_format & PERF_FORMAT_ID) 1179 entry += sizeof(u64); 1180 1181 if (event->attr.read_format & PERF_FORMAT_GROUP) { 1182 nr += event->group_leader->nr_siblings; 1183 size += sizeof(u64); 1184 } 1185 1186 size += entry * nr; 1187 event->read_size = size; 1188 } 1189 1190 static void perf_event__header_size(struct perf_event *event) 1191 { 1192 struct perf_sample_data *data; 1193 u64 sample_type = event->attr.sample_type; 1194 u16 size = 0; 1195 1196 perf_event__read_size(event); 1197 1198 if (sample_type & PERF_SAMPLE_IP) 1199 size += sizeof(data->ip); 1200 1201 if (sample_type & PERF_SAMPLE_ADDR) 1202 size += sizeof(data->addr); 1203 1204 if (sample_type & PERF_SAMPLE_PERIOD) 1205 size += sizeof(data->period); 1206 1207 if (sample_type & PERF_SAMPLE_WEIGHT) 1208 size += sizeof(data->weight); 1209 1210 if (sample_type & PERF_SAMPLE_READ) 1211 size += event->read_size; 1212 1213 if (sample_type & PERF_SAMPLE_DATA_SRC) 1214 size += sizeof(data->data_src.val); 1215 1216 if (sample_type & PERF_SAMPLE_TRANSACTION) 1217 size += sizeof(data->txn); 1218 1219 event->header_size = size; 1220 } 1221 1222 static void perf_event__id_header_size(struct perf_event *event) 1223 { 1224 struct perf_sample_data *data; 1225 u64 sample_type = event->attr.sample_type; 1226 u16 size = 0; 1227 1228 if (sample_type & PERF_SAMPLE_TID) 1229 size += sizeof(data->tid_entry); 1230 1231 if (sample_type & PERF_SAMPLE_TIME) 1232 size += sizeof(data->time); 1233 1234 if (sample_type & PERF_SAMPLE_IDENTIFIER) 1235 size += sizeof(data->id); 1236 1237 if (sample_type & PERF_SAMPLE_ID) 1238 size += sizeof(data->id); 1239 1240 if (sample_type & PERF_SAMPLE_STREAM_ID) 1241 size += sizeof(data->stream_id); 1242 1243 if (sample_type & PERF_SAMPLE_CPU) 1244 size += sizeof(data->cpu_entry); 1245 1246 event->id_header_size = size; 1247 } 1248 1249 static void perf_group_attach(struct perf_event *event) 1250 { 1251 struct perf_event *group_leader = event->group_leader, *pos; 1252 1253 /* 1254 * We can have double attach due to group movement in perf_event_open. 1255 */ 1256 if (event->attach_state & PERF_ATTACH_GROUP) 1257 return; 1258 1259 event->attach_state |= PERF_ATTACH_GROUP; 1260 1261 if (group_leader == event) 1262 return; 1263 1264 if (group_leader->group_flags & PERF_GROUP_SOFTWARE && 1265 !is_software_event(event)) 1266 group_leader->group_flags &= ~PERF_GROUP_SOFTWARE; 1267 1268 list_add_tail(&event->group_entry, &group_leader->sibling_list); 1269 group_leader->nr_siblings++; 1270 1271 perf_event__header_size(group_leader); 1272 1273 list_for_each_entry(pos, &group_leader->sibling_list, group_entry) 1274 perf_event__header_size(pos); 1275 } 1276 1277 /* 1278 * Remove a event from the lists for its context. 1279 * Must be called with ctx->mutex and ctx->lock held. 1280 */ 1281 static void 1282 list_del_event(struct perf_event *event, struct perf_event_context *ctx) 1283 { 1284 struct perf_cpu_context *cpuctx; 1285 /* 1286 * We can have double detach due to exit/hot-unplug + close. 1287 */ 1288 if (!(event->attach_state & PERF_ATTACH_CONTEXT)) 1289 return; 1290 1291 event->attach_state &= ~PERF_ATTACH_CONTEXT; 1292 1293 if (is_cgroup_event(event)) { 1294 ctx->nr_cgroups--; 1295 cpuctx = __get_cpu_context(ctx); 1296 /* 1297 * if there are no more cgroup events 1298 * then cler cgrp to avoid stale pointer 1299 * in update_cgrp_time_from_cpuctx() 1300 */ 1301 if (!ctx->nr_cgroups) 1302 cpuctx->cgrp = NULL; 1303 } 1304 1305 if (has_branch_stack(event)) 1306 ctx->nr_branch_stack--; 1307 1308 ctx->nr_events--; 1309 if (event->attr.inherit_stat) 1310 ctx->nr_stat--; 1311 1312 list_del_rcu(&event->event_entry); 1313 1314 if (event->group_leader == event) 1315 list_del_init(&event->group_entry); 1316 1317 update_group_times(event); 1318 1319 /* 1320 * If event was in error state, then keep it 1321 * that way, otherwise bogus counts will be 1322 * returned on read(). The only way to get out 1323 * of error state is by explicit re-enabling 1324 * of the event 1325 */ 1326 if (event->state > PERF_EVENT_STATE_OFF) 1327 event->state = PERF_EVENT_STATE_OFF; 1328 1329 ctx->generation++; 1330 } 1331 1332 static void perf_group_detach(struct perf_event *event) 1333 { 1334 struct perf_event *sibling, *tmp; 1335 struct list_head *list = NULL; 1336 1337 /* 1338 * We can have double detach due to exit/hot-unplug + close. 1339 */ 1340 if (!(event->attach_state & PERF_ATTACH_GROUP)) 1341 return; 1342 1343 event->attach_state &= ~PERF_ATTACH_GROUP; 1344 1345 /* 1346 * If this is a sibling, remove it from its group. 1347 */ 1348 if (event->group_leader != event) { 1349 list_del_init(&event->group_entry); 1350 event->group_leader->nr_siblings--; 1351 goto out; 1352 } 1353 1354 if (!list_empty(&event->group_entry)) 1355 list = &event->group_entry; 1356 1357 /* 1358 * If this was a group event with sibling events then 1359 * upgrade the siblings to singleton events by adding them 1360 * to whatever list we are on. 1361 */ 1362 list_for_each_entry_safe(sibling, tmp, &event->sibling_list, group_entry) { 1363 if (list) 1364 list_move_tail(&sibling->group_entry, list); 1365 sibling->group_leader = sibling; 1366 1367 /* Inherit group flags from the previous leader */ 1368 sibling->group_flags = event->group_flags; 1369 } 1370 1371 out: 1372 perf_event__header_size(event->group_leader); 1373 1374 list_for_each_entry(tmp, &event->group_leader->sibling_list, group_entry) 1375 perf_event__header_size(tmp); 1376 } 1377 1378 static inline int 1379 event_filter_match(struct perf_event *event) 1380 { 1381 return (event->cpu == -1 || event->cpu == smp_processor_id()) 1382 && perf_cgroup_match(event); 1383 } 1384 1385 static void 1386 event_sched_out(struct perf_event *event, 1387 struct perf_cpu_context *cpuctx, 1388 struct perf_event_context *ctx) 1389 { 1390 u64 tstamp = perf_event_time(event); 1391 u64 delta; 1392 /* 1393 * An event which could not be activated because of 1394 * filter mismatch still needs to have its timings 1395 * maintained, otherwise bogus information is return 1396 * via read() for time_enabled, time_running: 1397 */ 1398 if (event->state == PERF_EVENT_STATE_INACTIVE 1399 && !event_filter_match(event)) { 1400 delta = tstamp - event->tstamp_stopped; 1401 event->tstamp_running += delta; 1402 event->tstamp_stopped = tstamp; 1403 } 1404 1405 if (event->state != PERF_EVENT_STATE_ACTIVE) 1406 return; 1407 1408 perf_pmu_disable(event->pmu); 1409 1410 event->state = PERF_EVENT_STATE_INACTIVE; 1411 if (event->pending_disable) { 1412 event->pending_disable = 0; 1413 event->state = PERF_EVENT_STATE_OFF; 1414 } 1415 event->tstamp_stopped = tstamp; 1416 event->pmu->del(event, 0); 1417 event->oncpu = -1; 1418 1419 if (!is_software_event(event)) 1420 cpuctx->active_oncpu--; 1421 ctx->nr_active--; 1422 if (event->attr.freq && event->attr.sample_freq) 1423 ctx->nr_freq--; 1424 if (event->attr.exclusive || !cpuctx->active_oncpu) 1425 cpuctx->exclusive = 0; 1426 1427 perf_pmu_enable(event->pmu); 1428 } 1429 1430 static void 1431 group_sched_out(struct perf_event *group_event, 1432 struct perf_cpu_context *cpuctx, 1433 struct perf_event_context *ctx) 1434 { 1435 struct perf_event *event; 1436 int state = group_event->state; 1437 1438 event_sched_out(group_event, cpuctx, ctx); 1439 1440 /* 1441 * Schedule out siblings (if any): 1442 */ 1443 list_for_each_entry(event, &group_event->sibling_list, group_entry) 1444 event_sched_out(event, cpuctx, ctx); 1445 1446 if (state == PERF_EVENT_STATE_ACTIVE && group_event->attr.exclusive) 1447 cpuctx->exclusive = 0; 1448 } 1449 1450 struct remove_event { 1451 struct perf_event *event; 1452 bool detach_group; 1453 }; 1454 1455 /* 1456 * Cross CPU call to remove a performance event 1457 * 1458 * We disable the event on the hardware level first. After that we 1459 * remove it from the context list. 1460 */ 1461 static int __perf_remove_from_context(void *info) 1462 { 1463 struct remove_event *re = info; 1464 struct perf_event *event = re->event; 1465 struct perf_event_context *ctx = event->ctx; 1466 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx); 1467 1468 raw_spin_lock(&ctx->lock); 1469 event_sched_out(event, cpuctx, ctx); 1470 if (re->detach_group) 1471 perf_group_detach(event); 1472 list_del_event(event, ctx); 1473 if (!ctx->nr_events && cpuctx->task_ctx == ctx) { 1474 ctx->is_active = 0; 1475 cpuctx->task_ctx = NULL; 1476 } 1477 raw_spin_unlock(&ctx->lock); 1478 1479 return 0; 1480 } 1481 1482 1483 /* 1484 * Remove the event from a task's (or a CPU's) list of events. 1485 * 1486 * CPU events are removed with a smp call. For task events we only 1487 * call when the task is on a CPU. 1488 * 1489 * If event->ctx is a cloned context, callers must make sure that 1490 * every task struct that event->ctx->task could possibly point to 1491 * remains valid. This is OK when called from perf_release since 1492 * that only calls us on the top-level context, which can't be a clone. 1493 * When called from perf_event_exit_task, it's OK because the 1494 * context has been detached from its task. 1495 */ 1496 static void perf_remove_from_context(struct perf_event *event, bool detach_group) 1497 { 1498 struct perf_event_context *ctx = event->ctx; 1499 struct task_struct *task = ctx->task; 1500 struct remove_event re = { 1501 .event = event, 1502 .detach_group = detach_group, 1503 }; 1504 1505 lockdep_assert_held(&ctx->mutex); 1506 1507 if (!task) { 1508 /* 1509 * Per cpu events are removed via an smp call and 1510 * the removal is always successful. 1511 */ 1512 cpu_function_call(event->cpu, __perf_remove_from_context, &re); 1513 return; 1514 } 1515 1516 retry: 1517 if (!task_function_call(task, __perf_remove_from_context, &re)) 1518 return; 1519 1520 raw_spin_lock_irq(&ctx->lock); 1521 /* 1522 * If we failed to find a running task, but find the context active now 1523 * that we've acquired the ctx->lock, retry. 1524 */ 1525 if (ctx->is_active) { 1526 raw_spin_unlock_irq(&ctx->lock); 1527 /* 1528 * Reload the task pointer, it might have been changed by 1529 * a concurrent perf_event_context_sched_out(). 1530 */ 1531 task = ctx->task; 1532 goto retry; 1533 } 1534 1535 /* 1536 * Since the task isn't running, its safe to remove the event, us 1537 * holding the ctx->lock ensures the task won't get scheduled in. 1538 */ 1539 if (detach_group) 1540 perf_group_detach(event); 1541 list_del_event(event, ctx); 1542 raw_spin_unlock_irq(&ctx->lock); 1543 } 1544 1545 /* 1546 * Cross CPU call to disable a performance event 1547 */ 1548 int __perf_event_disable(void *info) 1549 { 1550 struct perf_event *event = info; 1551 struct perf_event_context *ctx = event->ctx; 1552 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx); 1553 1554 /* 1555 * If this is a per-task event, need to check whether this 1556 * event's task is the current task on this cpu. 1557 * 1558 * Can trigger due to concurrent perf_event_context_sched_out() 1559 * flipping contexts around. 1560 */ 1561 if (ctx->task && cpuctx->task_ctx != ctx) 1562 return -EINVAL; 1563 1564 raw_spin_lock(&ctx->lock); 1565 1566 /* 1567 * If the event is on, turn it off. 1568 * If it is in error state, leave it in error state. 1569 */ 1570 if (event->state >= PERF_EVENT_STATE_INACTIVE) { 1571 update_context_time(ctx); 1572 update_cgrp_time_from_event(event); 1573 update_group_times(event); 1574 if (event == event->group_leader) 1575 group_sched_out(event, cpuctx, ctx); 1576 else 1577 event_sched_out(event, cpuctx, ctx); 1578 event->state = PERF_EVENT_STATE_OFF; 1579 } 1580 1581 raw_spin_unlock(&ctx->lock); 1582 1583 return 0; 1584 } 1585 1586 /* 1587 * Disable a event. 1588 * 1589 * If event->ctx is a cloned context, callers must make sure that 1590 * every task struct that event->ctx->task could possibly point to 1591 * remains valid. This condition is satisifed when called through 1592 * perf_event_for_each_child or perf_event_for_each because they 1593 * hold the top-level event's child_mutex, so any descendant that 1594 * goes to exit will block in sync_child_event. 1595 * When called from perf_pending_event it's OK because event->ctx 1596 * is the current context on this CPU and preemption is disabled, 1597 * hence we can't get into perf_event_task_sched_out for this context. 1598 */ 1599 void perf_event_disable(struct perf_event *event) 1600 { 1601 struct perf_event_context *ctx = event->ctx; 1602 struct task_struct *task = ctx->task; 1603 1604 if (!task) { 1605 /* 1606 * Disable the event on the cpu that it's on 1607 */ 1608 cpu_function_call(event->cpu, __perf_event_disable, event); 1609 return; 1610 } 1611 1612 retry: 1613 if (!task_function_call(task, __perf_event_disable, event)) 1614 return; 1615 1616 raw_spin_lock_irq(&ctx->lock); 1617 /* 1618 * If the event is still active, we need to retry the cross-call. 1619 */ 1620 if (event->state == PERF_EVENT_STATE_ACTIVE) { 1621 raw_spin_unlock_irq(&ctx->lock); 1622 /* 1623 * Reload the task pointer, it might have been changed by 1624 * a concurrent perf_event_context_sched_out(). 1625 */ 1626 task = ctx->task; 1627 goto retry; 1628 } 1629 1630 /* 1631 * Since we have the lock this context can't be scheduled 1632 * in, so we can change the state safely. 1633 */ 1634 if (event->state == PERF_EVENT_STATE_INACTIVE) { 1635 update_group_times(event); 1636 event->state = PERF_EVENT_STATE_OFF; 1637 } 1638 raw_spin_unlock_irq(&ctx->lock); 1639 } 1640 EXPORT_SYMBOL_GPL(perf_event_disable); 1641 1642 static void perf_set_shadow_time(struct perf_event *event, 1643 struct perf_event_context *ctx, 1644 u64 tstamp) 1645 { 1646 /* 1647 * use the correct time source for the time snapshot 1648 * 1649 * We could get by without this by leveraging the 1650 * fact that to get to this function, the caller 1651 * has most likely already called update_context_time() 1652 * and update_cgrp_time_xx() and thus both timestamp 1653 * are identical (or very close). Given that tstamp is, 1654 * already adjusted for cgroup, we could say that: 1655 * tstamp - ctx->timestamp 1656 * is equivalent to 1657 * tstamp - cgrp->timestamp. 1658 * 1659 * Then, in perf_output_read(), the calculation would 1660 * work with no changes because: 1661 * - event is guaranteed scheduled in 1662 * - no scheduled out in between 1663 * - thus the timestamp would be the same 1664 * 1665 * But this is a bit hairy. 1666 * 1667 * So instead, we have an explicit cgroup call to remain 1668 * within the time time source all along. We believe it 1669 * is cleaner and simpler to understand. 1670 */ 1671 if (is_cgroup_event(event)) 1672 perf_cgroup_set_shadow_time(event, tstamp); 1673 else 1674 event->shadow_ctx_time = tstamp - ctx->timestamp; 1675 } 1676 1677 #define MAX_INTERRUPTS (~0ULL) 1678 1679 static void perf_log_throttle(struct perf_event *event, int enable); 1680 1681 static int 1682 event_sched_in(struct perf_event *event, 1683 struct perf_cpu_context *cpuctx, 1684 struct perf_event_context *ctx) 1685 { 1686 u64 tstamp = perf_event_time(event); 1687 int ret = 0; 1688 1689 lockdep_assert_held(&ctx->lock); 1690 1691 if (event->state <= PERF_EVENT_STATE_OFF) 1692 return 0; 1693 1694 event->state = PERF_EVENT_STATE_ACTIVE; 1695 event->oncpu = smp_processor_id(); 1696 1697 /* 1698 * Unthrottle events, since we scheduled we might have missed several 1699 * ticks already, also for a heavily scheduling task there is little 1700 * guarantee it'll get a tick in a timely manner. 1701 */ 1702 if (unlikely(event->hw.interrupts == MAX_INTERRUPTS)) { 1703 perf_log_throttle(event, 1); 1704 event->hw.interrupts = 0; 1705 } 1706 1707 /* 1708 * The new state must be visible before we turn it on in the hardware: 1709 */ 1710 smp_wmb(); 1711 1712 perf_pmu_disable(event->pmu); 1713 1714 if (event->pmu->add(event, PERF_EF_START)) { 1715 event->state = PERF_EVENT_STATE_INACTIVE; 1716 event->oncpu = -1; 1717 ret = -EAGAIN; 1718 goto out; 1719 } 1720 1721 event->tstamp_running += tstamp - event->tstamp_stopped; 1722 1723 perf_set_shadow_time(event, ctx, tstamp); 1724 1725 if (!is_software_event(event)) 1726 cpuctx->active_oncpu++; 1727 ctx->nr_active++; 1728 if (event->attr.freq && event->attr.sample_freq) 1729 ctx->nr_freq++; 1730 1731 if (event->attr.exclusive) 1732 cpuctx->exclusive = 1; 1733 1734 out: 1735 perf_pmu_enable(event->pmu); 1736 1737 return ret; 1738 } 1739 1740 static int 1741 group_sched_in(struct perf_event *group_event, 1742 struct perf_cpu_context *cpuctx, 1743 struct perf_event_context *ctx) 1744 { 1745 struct perf_event *event, *partial_group = NULL; 1746 struct pmu *pmu = ctx->pmu; 1747 u64 now = ctx->time; 1748 bool simulate = false; 1749 1750 if (group_event->state == PERF_EVENT_STATE_OFF) 1751 return 0; 1752 1753 pmu->start_txn(pmu); 1754 1755 if (event_sched_in(group_event, cpuctx, ctx)) { 1756 pmu->cancel_txn(pmu); 1757 perf_cpu_hrtimer_restart(cpuctx); 1758 return -EAGAIN; 1759 } 1760 1761 /* 1762 * Schedule in siblings as one group (if any): 1763 */ 1764 list_for_each_entry(event, &group_event->sibling_list, group_entry) { 1765 if (event_sched_in(event, cpuctx, ctx)) { 1766 partial_group = event; 1767 goto group_error; 1768 } 1769 } 1770 1771 if (!pmu->commit_txn(pmu)) 1772 return 0; 1773 1774 group_error: 1775 /* 1776 * Groups can be scheduled in as one unit only, so undo any 1777 * partial group before returning: 1778 * The events up to the failed event are scheduled out normally, 1779 * tstamp_stopped will be updated. 1780 * 1781 * The failed events and the remaining siblings need to have 1782 * their timings updated as if they had gone thru event_sched_in() 1783 * and event_sched_out(). This is required to get consistent timings 1784 * across the group. This also takes care of the case where the group 1785 * could never be scheduled by ensuring tstamp_stopped is set to mark 1786 * the time the event was actually stopped, such that time delta 1787 * calculation in update_event_times() is correct. 1788 */ 1789 list_for_each_entry(event, &group_event->sibling_list, group_entry) { 1790 if (event == partial_group) 1791 simulate = true; 1792 1793 if (simulate) { 1794 event->tstamp_running += now - event->tstamp_stopped; 1795 event->tstamp_stopped = now; 1796 } else { 1797 event_sched_out(event, cpuctx, ctx); 1798 } 1799 } 1800 event_sched_out(group_event, cpuctx, ctx); 1801 1802 pmu->cancel_txn(pmu); 1803 1804 perf_cpu_hrtimer_restart(cpuctx); 1805 1806 return -EAGAIN; 1807 } 1808 1809 /* 1810 * Work out whether we can put this event group on the CPU now. 1811 */ 1812 static int group_can_go_on(struct perf_event *event, 1813 struct perf_cpu_context *cpuctx, 1814 int can_add_hw) 1815 { 1816 /* 1817 * Groups consisting entirely of software events can always go on. 1818 */ 1819 if (event->group_flags & PERF_GROUP_SOFTWARE) 1820 return 1; 1821 /* 1822 * If an exclusive group is already on, no other hardware 1823 * events can go on. 1824 */ 1825 if (cpuctx->exclusive) 1826 return 0; 1827 /* 1828 * If this group is exclusive and there are already 1829 * events on the CPU, it can't go on. 1830 */ 1831 if (event->attr.exclusive && cpuctx->active_oncpu) 1832 return 0; 1833 /* 1834 * Otherwise, try to add it if all previous groups were able 1835 * to go on. 1836 */ 1837 return can_add_hw; 1838 } 1839 1840 static void add_event_to_ctx(struct perf_event *event, 1841 struct perf_event_context *ctx) 1842 { 1843 u64 tstamp = perf_event_time(event); 1844 1845 list_add_event(event, ctx); 1846 perf_group_attach(event); 1847 event->tstamp_enabled = tstamp; 1848 event->tstamp_running = tstamp; 1849 event->tstamp_stopped = tstamp; 1850 } 1851 1852 static void task_ctx_sched_out(struct perf_event_context *ctx); 1853 static void 1854 ctx_sched_in(struct perf_event_context *ctx, 1855 struct perf_cpu_context *cpuctx, 1856 enum event_type_t event_type, 1857 struct task_struct *task); 1858 1859 static void perf_event_sched_in(struct perf_cpu_context *cpuctx, 1860 struct perf_event_context *ctx, 1861 struct task_struct *task) 1862 { 1863 cpu_ctx_sched_in(cpuctx, EVENT_PINNED, task); 1864 if (ctx) 1865 ctx_sched_in(ctx, cpuctx, EVENT_PINNED, task); 1866 cpu_ctx_sched_in(cpuctx, EVENT_FLEXIBLE, task); 1867 if (ctx) 1868 ctx_sched_in(ctx, cpuctx, EVENT_FLEXIBLE, task); 1869 } 1870 1871 /* 1872 * Cross CPU call to install and enable a performance event 1873 * 1874 * Must be called with ctx->mutex held 1875 */ 1876 static int __perf_install_in_context(void *info) 1877 { 1878 struct perf_event *event = info; 1879 struct perf_event_context *ctx = event->ctx; 1880 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx); 1881 struct perf_event_context *task_ctx = cpuctx->task_ctx; 1882 struct task_struct *task = current; 1883 1884 perf_ctx_lock(cpuctx, task_ctx); 1885 perf_pmu_disable(cpuctx->ctx.pmu); 1886 1887 /* 1888 * If there was an active task_ctx schedule it out. 1889 */ 1890 if (task_ctx) 1891 task_ctx_sched_out(task_ctx); 1892 1893 /* 1894 * If the context we're installing events in is not the 1895 * active task_ctx, flip them. 1896 */ 1897 if (ctx->task && task_ctx != ctx) { 1898 if (task_ctx) 1899 raw_spin_unlock(&task_ctx->lock); 1900 raw_spin_lock(&ctx->lock); 1901 task_ctx = ctx; 1902 } 1903 1904 if (task_ctx) { 1905 cpuctx->task_ctx = task_ctx; 1906 task = task_ctx->task; 1907 } 1908 1909 cpu_ctx_sched_out(cpuctx, EVENT_ALL); 1910 1911 update_context_time(ctx); 1912 /* 1913 * update cgrp time only if current cgrp 1914 * matches event->cgrp. Must be done before 1915 * calling add_event_to_ctx() 1916 */ 1917 update_cgrp_time_from_event(event); 1918 1919 add_event_to_ctx(event, ctx); 1920 1921 /* 1922 * Schedule everything back in 1923 */ 1924 perf_event_sched_in(cpuctx, task_ctx, task); 1925 1926 perf_pmu_enable(cpuctx->ctx.pmu); 1927 perf_ctx_unlock(cpuctx, task_ctx); 1928 1929 return 0; 1930 } 1931 1932 /* 1933 * Attach a performance event to a context 1934 * 1935 * First we add the event to the list with the hardware enable bit 1936 * in event->hw_config cleared. 1937 * 1938 * If the event is attached to a task which is on a CPU we use a smp 1939 * call to enable it in the task context. The task might have been 1940 * scheduled away, but we check this in the smp call again. 1941 */ 1942 static void 1943 perf_install_in_context(struct perf_event_context *ctx, 1944 struct perf_event *event, 1945 int cpu) 1946 { 1947 struct task_struct *task = ctx->task; 1948 1949 lockdep_assert_held(&ctx->mutex); 1950 1951 event->ctx = ctx; 1952 if (event->cpu != -1) 1953 event->cpu = cpu; 1954 1955 if (!task) { 1956 /* 1957 * Per cpu events are installed via an smp call and 1958 * the install is always successful. 1959 */ 1960 cpu_function_call(cpu, __perf_install_in_context, event); 1961 return; 1962 } 1963 1964 retry: 1965 if (!task_function_call(task, __perf_install_in_context, event)) 1966 return; 1967 1968 raw_spin_lock_irq(&ctx->lock); 1969 /* 1970 * If we failed to find a running task, but find the context active now 1971 * that we've acquired the ctx->lock, retry. 1972 */ 1973 if (ctx->is_active) { 1974 raw_spin_unlock_irq(&ctx->lock); 1975 /* 1976 * Reload the task pointer, it might have been changed by 1977 * a concurrent perf_event_context_sched_out(). 1978 */ 1979 task = ctx->task; 1980 goto retry; 1981 } 1982 1983 /* 1984 * Since the task isn't running, its safe to add the event, us holding 1985 * the ctx->lock ensures the task won't get scheduled in. 1986 */ 1987 add_event_to_ctx(event, ctx); 1988 raw_spin_unlock_irq(&ctx->lock); 1989 } 1990 1991 /* 1992 * Put a event into inactive state and update time fields. 1993 * Enabling the leader of a group effectively enables all 1994 * the group members that aren't explicitly disabled, so we 1995 * have to update their ->tstamp_enabled also. 1996 * Note: this works for group members as well as group leaders 1997 * since the non-leader members' sibling_lists will be empty. 1998 */ 1999 static void __perf_event_mark_enabled(struct perf_event *event) 2000 { 2001 struct perf_event *sub; 2002 u64 tstamp = perf_event_time(event); 2003 2004 event->state = PERF_EVENT_STATE_INACTIVE; 2005 event->tstamp_enabled = tstamp - event->total_time_enabled; 2006 list_for_each_entry(sub, &event->sibling_list, group_entry) { 2007 if (sub->state >= PERF_EVENT_STATE_INACTIVE) 2008 sub->tstamp_enabled = tstamp - sub->total_time_enabled; 2009 } 2010 } 2011 2012 /* 2013 * Cross CPU call to enable a performance event 2014 */ 2015 static int __perf_event_enable(void *info) 2016 { 2017 struct perf_event *event = info; 2018 struct perf_event_context *ctx = event->ctx; 2019 struct perf_event *leader = event->group_leader; 2020 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx); 2021 int err; 2022 2023 /* 2024 * There's a time window between 'ctx->is_active' check 2025 * in perf_event_enable function and this place having: 2026 * - IRQs on 2027 * - ctx->lock unlocked 2028 * 2029 * where the task could be killed and 'ctx' deactivated 2030 * by perf_event_exit_task. 2031 */ 2032 if (!ctx->is_active) 2033 return -EINVAL; 2034 2035 raw_spin_lock(&ctx->lock); 2036 update_context_time(ctx); 2037 2038 if (event->state >= PERF_EVENT_STATE_INACTIVE) 2039 goto unlock; 2040 2041 /* 2042 * set current task's cgroup time reference point 2043 */ 2044 perf_cgroup_set_timestamp(current, ctx); 2045 2046 __perf_event_mark_enabled(event); 2047 2048 if (!event_filter_match(event)) { 2049 if (is_cgroup_event(event)) 2050 perf_cgroup_defer_enabled(event); 2051 goto unlock; 2052 } 2053 2054 /* 2055 * If the event is in a group and isn't the group leader, 2056 * then don't put it on unless the group is on. 2057 */ 2058 if (leader != event && leader->state != PERF_EVENT_STATE_ACTIVE) 2059 goto unlock; 2060 2061 if (!group_can_go_on(event, cpuctx, 1)) { 2062 err = -EEXIST; 2063 } else { 2064 if (event == leader) 2065 err = group_sched_in(event, cpuctx, ctx); 2066 else 2067 err = event_sched_in(event, cpuctx, ctx); 2068 } 2069 2070 if (err) { 2071 /* 2072 * If this event can't go on and it's part of a 2073 * group, then the whole group has to come off. 2074 */ 2075 if (leader != event) { 2076 group_sched_out(leader, cpuctx, ctx); 2077 perf_cpu_hrtimer_restart(cpuctx); 2078 } 2079 if (leader->attr.pinned) { 2080 update_group_times(leader); 2081 leader->state = PERF_EVENT_STATE_ERROR; 2082 } 2083 } 2084 2085 unlock: 2086 raw_spin_unlock(&ctx->lock); 2087 2088 return 0; 2089 } 2090 2091 /* 2092 * Enable a event. 2093 * 2094 * If event->ctx is a cloned context, callers must make sure that 2095 * every task struct that event->ctx->task could possibly point to 2096 * remains valid. This condition is satisfied when called through 2097 * perf_event_for_each_child or perf_event_for_each as described 2098 * for perf_event_disable. 2099 */ 2100 void perf_event_enable(struct perf_event *event) 2101 { 2102 struct perf_event_context *ctx = event->ctx; 2103 struct task_struct *task = ctx->task; 2104 2105 if (!task) { 2106 /* 2107 * Enable the event on the cpu that it's on 2108 */ 2109 cpu_function_call(event->cpu, __perf_event_enable, event); 2110 return; 2111 } 2112 2113 raw_spin_lock_irq(&ctx->lock); 2114 if (event->state >= PERF_EVENT_STATE_INACTIVE) 2115 goto out; 2116 2117 /* 2118 * If the event is in error state, clear that first. 2119 * That way, if we see the event in error state below, we 2120 * know that it has gone back into error state, as distinct 2121 * from the task having been scheduled away before the 2122 * cross-call arrived. 2123 */ 2124 if (event->state == PERF_EVENT_STATE_ERROR) 2125 event->state = PERF_EVENT_STATE_OFF; 2126 2127 retry: 2128 if (!ctx->is_active) { 2129 __perf_event_mark_enabled(event); 2130 goto out; 2131 } 2132 2133 raw_spin_unlock_irq(&ctx->lock); 2134 2135 if (!task_function_call(task, __perf_event_enable, event)) 2136 return; 2137 2138 raw_spin_lock_irq(&ctx->lock); 2139 2140 /* 2141 * If the context is active and the event is still off, 2142 * we need to retry the cross-call. 2143 */ 2144 if (ctx->is_active && event->state == PERF_EVENT_STATE_OFF) { 2145 /* 2146 * task could have been flipped by a concurrent 2147 * perf_event_context_sched_out() 2148 */ 2149 task = ctx->task; 2150 goto retry; 2151 } 2152 2153 out: 2154 raw_spin_unlock_irq(&ctx->lock); 2155 } 2156 EXPORT_SYMBOL_GPL(perf_event_enable); 2157 2158 int perf_event_refresh(struct perf_event *event, int refresh) 2159 { 2160 /* 2161 * not supported on inherited events 2162 */ 2163 if (event->attr.inherit || !is_sampling_event(event)) 2164 return -EINVAL; 2165 2166 atomic_add(refresh, &event->event_limit); 2167 perf_event_enable(event); 2168 2169 return 0; 2170 } 2171 EXPORT_SYMBOL_GPL(perf_event_refresh); 2172 2173 static void ctx_sched_out(struct perf_event_context *ctx, 2174 struct perf_cpu_context *cpuctx, 2175 enum event_type_t event_type) 2176 { 2177 struct perf_event *event; 2178 int is_active = ctx->is_active; 2179 2180 ctx->is_active &= ~event_type; 2181 if (likely(!ctx->nr_events)) 2182 return; 2183 2184 update_context_time(ctx); 2185 update_cgrp_time_from_cpuctx(cpuctx); 2186 if (!ctx->nr_active) 2187 return; 2188 2189 perf_pmu_disable(ctx->pmu); 2190 if ((is_active & EVENT_PINNED) && (event_type & EVENT_PINNED)) { 2191 list_for_each_entry(event, &ctx->pinned_groups, group_entry) 2192 group_sched_out(event, cpuctx, ctx); 2193 } 2194 2195 if ((is_active & EVENT_FLEXIBLE) && (event_type & EVENT_FLEXIBLE)) { 2196 list_for_each_entry(event, &ctx->flexible_groups, group_entry) 2197 group_sched_out(event, cpuctx, ctx); 2198 } 2199 perf_pmu_enable(ctx->pmu); 2200 } 2201 2202 /* 2203 * Test whether two contexts are equivalent, i.e. whether they have both been 2204 * cloned from the same version of the same context. 2205 * 2206 * Equivalence is measured using a generation number in the context that is 2207 * incremented on each modification to it; see unclone_ctx(), list_add_event() 2208 * and list_del_event(). 2209 */ 2210 static int context_equiv(struct perf_event_context *ctx1, 2211 struct perf_event_context *ctx2) 2212 { 2213 /* Pinning disables the swap optimization */ 2214 if (ctx1->pin_count || ctx2->pin_count) 2215 return 0; 2216 2217 /* If ctx1 is the parent of ctx2 */ 2218 if (ctx1 == ctx2->parent_ctx && ctx1->generation == ctx2->parent_gen) 2219 return 1; 2220 2221 /* If ctx2 is the parent of ctx1 */ 2222 if (ctx1->parent_ctx == ctx2 && ctx1->parent_gen == ctx2->generation) 2223 return 1; 2224 2225 /* 2226 * If ctx1 and ctx2 have the same parent; we flatten the parent 2227 * hierarchy, see perf_event_init_context(). 2228 */ 2229 if (ctx1->parent_ctx && ctx1->parent_ctx == ctx2->parent_ctx && 2230 ctx1->parent_gen == ctx2->parent_gen) 2231 return 1; 2232 2233 /* Unmatched */ 2234 return 0; 2235 } 2236 2237 static void __perf_event_sync_stat(struct perf_event *event, 2238 struct perf_event *next_event) 2239 { 2240 u64 value; 2241 2242 if (!event->attr.inherit_stat) 2243 return; 2244 2245 /* 2246 * Update the event value, we cannot use perf_event_read() 2247 * because we're in the middle of a context switch and have IRQs 2248 * disabled, which upsets smp_call_function_single(), however 2249 * we know the event must be on the current CPU, therefore we 2250 * don't need to use it. 2251 */ 2252 switch (event->state) { 2253 case PERF_EVENT_STATE_ACTIVE: 2254 event->pmu->read(event); 2255 /* fall-through */ 2256 2257 case PERF_EVENT_STATE_INACTIVE: 2258 update_event_times(event); 2259 break; 2260 2261 default: 2262 break; 2263 } 2264 2265 /* 2266 * In order to keep per-task stats reliable we need to flip the event 2267 * values when we flip the contexts. 2268 */ 2269 value = local64_read(&next_event->count); 2270 value = local64_xchg(&event->count, value); 2271 local64_set(&next_event->count, value); 2272 2273 swap(event->total_time_enabled, next_event->total_time_enabled); 2274 swap(event->total_time_running, next_event->total_time_running); 2275 2276 /* 2277 * Since we swizzled the values, update the user visible data too. 2278 */ 2279 perf_event_update_userpage(event); 2280 perf_event_update_userpage(next_event); 2281 } 2282 2283 static void perf_event_sync_stat(struct perf_event_context *ctx, 2284 struct perf_event_context *next_ctx) 2285 { 2286 struct perf_event *event, *next_event; 2287 2288 if (!ctx->nr_stat) 2289 return; 2290 2291 update_context_time(ctx); 2292 2293 event = list_first_entry(&ctx->event_list, 2294 struct perf_event, event_entry); 2295 2296 next_event = list_first_entry(&next_ctx->event_list, 2297 struct perf_event, event_entry); 2298 2299 while (&event->event_entry != &ctx->event_list && 2300 &next_event->event_entry != &next_ctx->event_list) { 2301 2302 __perf_event_sync_stat(event, next_event); 2303 2304 event = list_next_entry(event, event_entry); 2305 next_event = list_next_entry(next_event, event_entry); 2306 } 2307 } 2308 2309 static void perf_event_context_sched_out(struct task_struct *task, int ctxn, 2310 struct task_struct *next) 2311 { 2312 struct perf_event_context *ctx = task->perf_event_ctxp[ctxn]; 2313 struct perf_event_context *next_ctx; 2314 struct perf_event_context *parent, *next_parent; 2315 struct perf_cpu_context *cpuctx; 2316 int do_switch = 1; 2317 2318 if (likely(!ctx)) 2319 return; 2320 2321 cpuctx = __get_cpu_context(ctx); 2322 if (!cpuctx->task_ctx) 2323 return; 2324 2325 rcu_read_lock(); 2326 next_ctx = next->perf_event_ctxp[ctxn]; 2327 if (!next_ctx) 2328 goto unlock; 2329 2330 parent = rcu_dereference(ctx->parent_ctx); 2331 next_parent = rcu_dereference(next_ctx->parent_ctx); 2332 2333 /* If neither context have a parent context; they cannot be clones. */ 2334 if (!parent || !next_parent) 2335 goto unlock; 2336 2337 if (next_parent == ctx || next_ctx == parent || next_parent == parent) { 2338 /* 2339 * Looks like the two contexts are clones, so we might be 2340 * able to optimize the context switch. We lock both 2341 * contexts and check that they are clones under the 2342 * lock (including re-checking that neither has been 2343 * uncloned in the meantime). It doesn't matter which 2344 * order we take the locks because no other cpu could 2345 * be trying to lock both of these tasks. 2346 */ 2347 raw_spin_lock(&ctx->lock); 2348 raw_spin_lock_nested(&next_ctx->lock, SINGLE_DEPTH_NESTING); 2349 if (context_equiv(ctx, next_ctx)) { 2350 /* 2351 * XXX do we need a memory barrier of sorts 2352 * wrt to rcu_dereference() of perf_event_ctxp 2353 */ 2354 task->perf_event_ctxp[ctxn] = next_ctx; 2355 next->perf_event_ctxp[ctxn] = ctx; 2356 ctx->task = next; 2357 next_ctx->task = task; 2358 do_switch = 0; 2359 2360 perf_event_sync_stat(ctx, next_ctx); 2361 } 2362 raw_spin_unlock(&next_ctx->lock); 2363 raw_spin_unlock(&ctx->lock); 2364 } 2365 unlock: 2366 rcu_read_unlock(); 2367 2368 if (do_switch) { 2369 raw_spin_lock(&ctx->lock); 2370 ctx_sched_out(ctx, cpuctx, EVENT_ALL); 2371 cpuctx->task_ctx = NULL; 2372 raw_spin_unlock(&ctx->lock); 2373 } 2374 } 2375 2376 #define for_each_task_context_nr(ctxn) \ 2377 for ((ctxn) = 0; (ctxn) < perf_nr_task_contexts; (ctxn)++) 2378 2379 /* 2380 * Called from scheduler to remove the events of the current task, 2381 * with interrupts disabled. 2382 * 2383 * We stop each event and update the event value in event->count. 2384 * 2385 * This does not protect us against NMI, but disable() 2386 * sets the disabled bit in the control field of event _before_ 2387 * accessing the event control register. If a NMI hits, then it will 2388 * not restart the event. 2389 */ 2390 void __perf_event_task_sched_out(struct task_struct *task, 2391 struct task_struct *next) 2392 { 2393 int ctxn; 2394 2395 for_each_task_context_nr(ctxn) 2396 perf_event_context_sched_out(task, ctxn, next); 2397 2398 /* 2399 * if cgroup events exist on this CPU, then we need 2400 * to check if we have to switch out PMU state. 2401 * cgroup event are system-wide mode only 2402 */ 2403 if (atomic_read(&__get_cpu_var(perf_cgroup_events))) 2404 perf_cgroup_sched_out(task, next); 2405 } 2406 2407 static void task_ctx_sched_out(struct perf_event_context *ctx) 2408 { 2409 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx); 2410 2411 if (!cpuctx->task_ctx) 2412 return; 2413 2414 if (WARN_ON_ONCE(ctx != cpuctx->task_ctx)) 2415 return; 2416 2417 ctx_sched_out(ctx, cpuctx, EVENT_ALL); 2418 cpuctx->task_ctx = NULL; 2419 } 2420 2421 /* 2422 * Called with IRQs disabled 2423 */ 2424 static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx, 2425 enum event_type_t event_type) 2426 { 2427 ctx_sched_out(&cpuctx->ctx, cpuctx, event_type); 2428 } 2429 2430 static void 2431 ctx_pinned_sched_in(struct perf_event_context *ctx, 2432 struct perf_cpu_context *cpuctx) 2433 { 2434 struct perf_event *event; 2435 2436 list_for_each_entry(event, &ctx->pinned_groups, group_entry) { 2437 if (event->state <= PERF_EVENT_STATE_OFF) 2438 continue; 2439 if (!event_filter_match(event)) 2440 continue; 2441 2442 /* may need to reset tstamp_enabled */ 2443 if (is_cgroup_event(event)) 2444 perf_cgroup_mark_enabled(event, ctx); 2445 2446 if (group_can_go_on(event, cpuctx, 1)) 2447 group_sched_in(event, cpuctx, ctx); 2448 2449 /* 2450 * If this pinned group hasn't been scheduled, 2451 * put it in error state. 2452 */ 2453 if (event->state == PERF_EVENT_STATE_INACTIVE) { 2454 update_group_times(event); 2455 event->state = PERF_EVENT_STATE_ERROR; 2456 } 2457 } 2458 } 2459 2460 static void 2461 ctx_flexible_sched_in(struct perf_event_context *ctx, 2462 struct perf_cpu_context *cpuctx) 2463 { 2464 struct perf_event *event; 2465 int can_add_hw = 1; 2466 2467 list_for_each_entry(event, &ctx->flexible_groups, group_entry) { 2468 /* Ignore events in OFF or ERROR state */ 2469 if (event->state <= PERF_EVENT_STATE_OFF) 2470 continue; 2471 /* 2472 * Listen to the 'cpu' scheduling filter constraint 2473 * of events: 2474 */ 2475 if (!event_filter_match(event)) 2476 continue; 2477 2478 /* may need to reset tstamp_enabled */ 2479 if (is_cgroup_event(event)) 2480 perf_cgroup_mark_enabled(event, ctx); 2481 2482 if (group_can_go_on(event, cpuctx, can_add_hw)) { 2483 if (group_sched_in(event, cpuctx, ctx)) 2484 can_add_hw = 0; 2485 } 2486 } 2487 } 2488 2489 static void 2490 ctx_sched_in(struct perf_event_context *ctx, 2491 struct perf_cpu_context *cpuctx, 2492 enum event_type_t event_type, 2493 struct task_struct *task) 2494 { 2495 u64 now; 2496 int is_active = ctx->is_active; 2497 2498 ctx->is_active |= event_type; 2499 if (likely(!ctx->nr_events)) 2500 return; 2501 2502 now = perf_clock(); 2503 ctx->timestamp = now; 2504 perf_cgroup_set_timestamp(task, ctx); 2505 /* 2506 * First go through the list and put on any pinned groups 2507 * in order to give them the best chance of going on. 2508 */ 2509 if (!(is_active & EVENT_PINNED) && (event_type & EVENT_PINNED)) 2510 ctx_pinned_sched_in(ctx, cpuctx); 2511 2512 /* Then walk through the lower prio flexible groups */ 2513 if (!(is_active & EVENT_FLEXIBLE) && (event_type & EVENT_FLEXIBLE)) 2514 ctx_flexible_sched_in(ctx, cpuctx); 2515 } 2516 2517 static void cpu_ctx_sched_in(struct perf_cpu_context *cpuctx, 2518 enum event_type_t event_type, 2519 struct task_struct *task) 2520 { 2521 struct perf_event_context *ctx = &cpuctx->ctx; 2522 2523 ctx_sched_in(ctx, cpuctx, event_type, task); 2524 } 2525 2526 static void perf_event_context_sched_in(struct perf_event_context *ctx, 2527 struct task_struct *task) 2528 { 2529 struct perf_cpu_context *cpuctx; 2530 2531 cpuctx = __get_cpu_context(ctx); 2532 if (cpuctx->task_ctx == ctx) 2533 return; 2534 2535 perf_ctx_lock(cpuctx, ctx); 2536 perf_pmu_disable(ctx->pmu); 2537 /* 2538 * We want to keep the following priority order: 2539 * cpu pinned (that don't need to move), task pinned, 2540 * cpu flexible, task flexible. 2541 */ 2542 cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE); 2543 2544 if (ctx->nr_events) 2545 cpuctx->task_ctx = ctx; 2546 2547 perf_event_sched_in(cpuctx, cpuctx->task_ctx, task); 2548 2549 perf_pmu_enable(ctx->pmu); 2550 perf_ctx_unlock(cpuctx, ctx); 2551 2552 /* 2553 * Since these rotations are per-cpu, we need to ensure the 2554 * cpu-context we got scheduled on is actually rotating. 2555 */ 2556 perf_pmu_rotate_start(ctx->pmu); 2557 } 2558 2559 /* 2560 * When sampling the branck stack in system-wide, it may be necessary 2561 * to flush the stack on context switch. This happens when the branch 2562 * stack does not tag its entries with the pid of the current task. 2563 * Otherwise it becomes impossible to associate a branch entry with a 2564 * task. This ambiguity is more likely to appear when the branch stack 2565 * supports priv level filtering and the user sets it to monitor only 2566 * at the user level (which could be a useful measurement in system-wide 2567 * mode). In that case, the risk is high of having a branch stack with 2568 * branch from multiple tasks. Flushing may mean dropping the existing 2569 * entries or stashing them somewhere in the PMU specific code layer. 2570 * 2571 * This function provides the context switch callback to the lower code 2572 * layer. It is invoked ONLY when there is at least one system-wide context 2573 * with at least one active event using taken branch sampling. 2574 */ 2575 static void perf_branch_stack_sched_in(struct task_struct *prev, 2576 struct task_struct *task) 2577 { 2578 struct perf_cpu_context *cpuctx; 2579 struct pmu *pmu; 2580 unsigned long flags; 2581 2582 /* no need to flush branch stack if not changing task */ 2583 if (prev == task) 2584 return; 2585 2586 local_irq_save(flags); 2587 2588 rcu_read_lock(); 2589 2590 list_for_each_entry_rcu(pmu, &pmus, entry) { 2591 cpuctx = this_cpu_ptr(pmu->pmu_cpu_context); 2592 2593 /* 2594 * check if the context has at least one 2595 * event using PERF_SAMPLE_BRANCH_STACK 2596 */ 2597 if (cpuctx->ctx.nr_branch_stack > 0 2598 && pmu->flush_branch_stack) { 2599 2600 perf_ctx_lock(cpuctx, cpuctx->task_ctx); 2601 2602 perf_pmu_disable(pmu); 2603 2604 pmu->flush_branch_stack(); 2605 2606 perf_pmu_enable(pmu); 2607 2608 perf_ctx_unlock(cpuctx, cpuctx->task_ctx); 2609 } 2610 } 2611 2612 rcu_read_unlock(); 2613 2614 local_irq_restore(flags); 2615 } 2616 2617 /* 2618 * Called from scheduler to add the events of the current task 2619 * with interrupts disabled. 2620 * 2621 * We restore the event value and then enable it. 2622 * 2623 * This does not protect us against NMI, but enable() 2624 * sets the enabled bit in the control field of event _before_ 2625 * accessing the event control register. If a NMI hits, then it will 2626 * keep the event running. 2627 */ 2628 void __perf_event_task_sched_in(struct task_struct *prev, 2629 struct task_struct *task) 2630 { 2631 struct perf_event_context *ctx; 2632 int ctxn; 2633 2634 for_each_task_context_nr(ctxn) { 2635 ctx = task->perf_event_ctxp[ctxn]; 2636 if (likely(!ctx)) 2637 continue; 2638 2639 perf_event_context_sched_in(ctx, task); 2640 } 2641 /* 2642 * if cgroup events exist on this CPU, then we need 2643 * to check if we have to switch in PMU state. 2644 * cgroup event are system-wide mode only 2645 */ 2646 if (atomic_read(&__get_cpu_var(perf_cgroup_events))) 2647 perf_cgroup_sched_in(prev, task); 2648 2649 /* check for system-wide branch_stack events */ 2650 if (atomic_read(&__get_cpu_var(perf_branch_stack_events))) 2651 perf_branch_stack_sched_in(prev, task); 2652 } 2653 2654 static u64 perf_calculate_period(struct perf_event *event, u64 nsec, u64 count) 2655 { 2656 u64 frequency = event->attr.sample_freq; 2657 u64 sec = NSEC_PER_SEC; 2658 u64 divisor, dividend; 2659 2660 int count_fls, nsec_fls, frequency_fls, sec_fls; 2661 2662 count_fls = fls64(count); 2663 nsec_fls = fls64(nsec); 2664 frequency_fls = fls64(frequency); 2665 sec_fls = 30; 2666 2667 /* 2668 * We got @count in @nsec, with a target of sample_freq HZ 2669 * the target period becomes: 2670 * 2671 * @count * 10^9 2672 * period = ------------------- 2673 * @nsec * sample_freq 2674 * 2675 */ 2676 2677 /* 2678 * Reduce accuracy by one bit such that @a and @b converge 2679 * to a similar magnitude. 2680 */ 2681 #define REDUCE_FLS(a, b) \ 2682 do { \ 2683 if (a##_fls > b##_fls) { \ 2684 a >>= 1; \ 2685 a##_fls--; \ 2686 } else { \ 2687 b >>= 1; \ 2688 b##_fls--; \ 2689 } \ 2690 } while (0) 2691 2692 /* 2693 * Reduce accuracy until either term fits in a u64, then proceed with 2694 * the other, so that finally we can do a u64/u64 division. 2695 */ 2696 while (count_fls + sec_fls > 64 && nsec_fls + frequency_fls > 64) { 2697 REDUCE_FLS(nsec, frequency); 2698 REDUCE_FLS(sec, count); 2699 } 2700 2701 if (count_fls + sec_fls > 64) { 2702 divisor = nsec * frequency; 2703 2704 while (count_fls + sec_fls > 64) { 2705 REDUCE_FLS(count, sec); 2706 divisor >>= 1; 2707 } 2708 2709 dividend = count * sec; 2710 } else { 2711 dividend = count * sec; 2712 2713 while (nsec_fls + frequency_fls > 64) { 2714 REDUCE_FLS(nsec, frequency); 2715 dividend >>= 1; 2716 } 2717 2718 divisor = nsec * frequency; 2719 } 2720 2721 if (!divisor) 2722 return dividend; 2723 2724 return div64_u64(dividend, divisor); 2725 } 2726 2727 static DEFINE_PER_CPU(int, perf_throttled_count); 2728 static DEFINE_PER_CPU(u64, perf_throttled_seq); 2729 2730 static void perf_adjust_period(struct perf_event *event, u64 nsec, u64 count, bool disable) 2731 { 2732 struct hw_perf_event *hwc = &event->hw; 2733 s64 period, sample_period; 2734 s64 delta; 2735 2736 period = perf_calculate_period(event, nsec, count); 2737 2738 delta = (s64)(period - hwc->sample_period); 2739 delta = (delta + 7) / 8; /* low pass filter */ 2740 2741 sample_period = hwc->sample_period + delta; 2742 2743 if (!sample_period) 2744 sample_period = 1; 2745 2746 hwc->sample_period = sample_period; 2747 2748 if (local64_read(&hwc->period_left) > 8*sample_period) { 2749 if (disable) 2750 event->pmu->stop(event, PERF_EF_UPDATE); 2751 2752 local64_set(&hwc->period_left, 0); 2753 2754 if (disable) 2755 event->pmu->start(event, PERF_EF_RELOAD); 2756 } 2757 } 2758 2759 /* 2760 * combine freq adjustment with unthrottling to avoid two passes over the 2761 * events. At the same time, make sure, having freq events does not change 2762 * the rate of unthrottling as that would introduce bias. 2763 */ 2764 static void perf_adjust_freq_unthr_context(struct perf_event_context *ctx, 2765 int needs_unthr) 2766 { 2767 struct perf_event *event; 2768 struct hw_perf_event *hwc; 2769 u64 now, period = TICK_NSEC; 2770 s64 delta; 2771 2772 /* 2773 * only need to iterate over all events iff: 2774 * - context have events in frequency mode (needs freq adjust) 2775 * - there are events to unthrottle on this cpu 2776 */ 2777 if (!(ctx->nr_freq || needs_unthr)) 2778 return; 2779 2780 raw_spin_lock(&ctx->lock); 2781 perf_pmu_disable(ctx->pmu); 2782 2783 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) { 2784 if (event->state != PERF_EVENT_STATE_ACTIVE) 2785 continue; 2786 2787 if (!event_filter_match(event)) 2788 continue; 2789 2790 perf_pmu_disable(event->pmu); 2791 2792 hwc = &event->hw; 2793 2794 if (hwc->interrupts == MAX_INTERRUPTS) { 2795 hwc->interrupts = 0; 2796 perf_log_throttle(event, 1); 2797 event->pmu->start(event, 0); 2798 } 2799 2800 if (!event->attr.freq || !event->attr.sample_freq) 2801 goto next; 2802 2803 /* 2804 * stop the event and update event->count 2805 */ 2806 event->pmu->stop(event, PERF_EF_UPDATE); 2807 2808 now = local64_read(&event->count); 2809 delta = now - hwc->freq_count_stamp; 2810 hwc->freq_count_stamp = now; 2811 2812 /* 2813 * restart the event 2814 * reload only if value has changed 2815 * we have stopped the event so tell that 2816 * to perf_adjust_period() to avoid stopping it 2817 * twice. 2818 */ 2819 if (delta > 0) 2820 perf_adjust_period(event, period, delta, false); 2821 2822 event->pmu->start(event, delta > 0 ? PERF_EF_RELOAD : 0); 2823 next: 2824 perf_pmu_enable(event->pmu); 2825 } 2826 2827 perf_pmu_enable(ctx->pmu); 2828 raw_spin_unlock(&ctx->lock); 2829 } 2830 2831 /* 2832 * Round-robin a context's events: 2833 */ 2834 static void rotate_ctx(struct perf_event_context *ctx) 2835 { 2836 /* 2837 * Rotate the first entry last of non-pinned groups. Rotation might be 2838 * disabled by the inheritance code. 2839 */ 2840 if (!ctx->rotate_disable) 2841 list_rotate_left(&ctx->flexible_groups); 2842 } 2843 2844 /* 2845 * perf_pmu_rotate_start() and perf_rotate_context() are fully serialized 2846 * because they're strictly cpu affine and rotate_start is called with IRQs 2847 * disabled, while rotate_context is called from IRQ context. 2848 */ 2849 static int perf_rotate_context(struct perf_cpu_context *cpuctx) 2850 { 2851 struct perf_event_context *ctx = NULL; 2852 int rotate = 0, remove = 1; 2853 2854 if (cpuctx->ctx.nr_events) { 2855 remove = 0; 2856 if (cpuctx->ctx.nr_events != cpuctx->ctx.nr_active) 2857 rotate = 1; 2858 } 2859 2860 ctx = cpuctx->task_ctx; 2861 if (ctx && ctx->nr_events) { 2862 remove = 0; 2863 if (ctx->nr_events != ctx->nr_active) 2864 rotate = 1; 2865 } 2866 2867 if (!rotate) 2868 goto done; 2869 2870 perf_ctx_lock(cpuctx, cpuctx->task_ctx); 2871 perf_pmu_disable(cpuctx->ctx.pmu); 2872 2873 cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE); 2874 if (ctx) 2875 ctx_sched_out(ctx, cpuctx, EVENT_FLEXIBLE); 2876 2877 rotate_ctx(&cpuctx->ctx); 2878 if (ctx) 2879 rotate_ctx(ctx); 2880 2881 perf_event_sched_in(cpuctx, ctx, current); 2882 2883 perf_pmu_enable(cpuctx->ctx.pmu); 2884 perf_ctx_unlock(cpuctx, cpuctx->task_ctx); 2885 done: 2886 if (remove) 2887 list_del_init(&cpuctx->rotation_list); 2888 2889 return rotate; 2890 } 2891 2892 #ifdef CONFIG_NO_HZ_FULL 2893 bool perf_event_can_stop_tick(void) 2894 { 2895 if (atomic_read(&nr_freq_events) || 2896 __this_cpu_read(perf_throttled_count)) 2897 return false; 2898 else 2899 return true; 2900 } 2901 #endif 2902 2903 void perf_event_task_tick(void) 2904 { 2905 struct list_head *head = &__get_cpu_var(rotation_list); 2906 struct perf_cpu_context *cpuctx, *tmp; 2907 struct perf_event_context *ctx; 2908 int throttled; 2909 2910 WARN_ON(!irqs_disabled()); 2911 2912 __this_cpu_inc(perf_throttled_seq); 2913 throttled = __this_cpu_xchg(perf_throttled_count, 0); 2914 2915 list_for_each_entry_safe(cpuctx, tmp, head, rotation_list) { 2916 ctx = &cpuctx->ctx; 2917 perf_adjust_freq_unthr_context(ctx, throttled); 2918 2919 ctx = cpuctx->task_ctx; 2920 if (ctx) 2921 perf_adjust_freq_unthr_context(ctx, throttled); 2922 } 2923 } 2924 2925 static int event_enable_on_exec(struct perf_event *event, 2926 struct perf_event_context *ctx) 2927 { 2928 if (!event->attr.enable_on_exec) 2929 return 0; 2930 2931 event->attr.enable_on_exec = 0; 2932 if (event->state >= PERF_EVENT_STATE_INACTIVE) 2933 return 0; 2934 2935 __perf_event_mark_enabled(event); 2936 2937 return 1; 2938 } 2939 2940 /* 2941 * Enable all of a task's events that have been marked enable-on-exec. 2942 * This expects task == current. 2943 */ 2944 static void perf_event_enable_on_exec(struct perf_event_context *ctx) 2945 { 2946 struct perf_event *event; 2947 unsigned long flags; 2948 int enabled = 0; 2949 int ret; 2950 2951 local_irq_save(flags); 2952 if (!ctx || !ctx->nr_events) 2953 goto out; 2954 2955 /* 2956 * We must ctxsw out cgroup events to avoid conflict 2957 * when invoking perf_task_event_sched_in() later on 2958 * in this function. Otherwise we end up trying to 2959 * ctxswin cgroup events which are already scheduled 2960 * in. 2961 */ 2962 perf_cgroup_sched_out(current, NULL); 2963 2964 raw_spin_lock(&ctx->lock); 2965 task_ctx_sched_out(ctx); 2966 2967 list_for_each_entry(event, &ctx->event_list, event_entry) { 2968 ret = event_enable_on_exec(event, ctx); 2969 if (ret) 2970 enabled = 1; 2971 } 2972 2973 /* 2974 * Unclone this context if we enabled any event. 2975 */ 2976 if (enabled) 2977 unclone_ctx(ctx); 2978 2979 raw_spin_unlock(&ctx->lock); 2980 2981 /* 2982 * Also calls ctxswin for cgroup events, if any: 2983 */ 2984 perf_event_context_sched_in(ctx, ctx->task); 2985 out: 2986 local_irq_restore(flags); 2987 } 2988 2989 void perf_event_exec(void) 2990 { 2991 struct perf_event_context *ctx; 2992 int ctxn; 2993 2994 rcu_read_lock(); 2995 for_each_task_context_nr(ctxn) { 2996 ctx = current->perf_event_ctxp[ctxn]; 2997 if (!ctx) 2998 continue; 2999 3000 perf_event_enable_on_exec(ctx); 3001 } 3002 rcu_read_unlock(); 3003 } 3004 3005 /* 3006 * Cross CPU call to read the hardware event 3007 */ 3008 static void __perf_event_read(void *info) 3009 { 3010 struct perf_event *event = info; 3011 struct perf_event_context *ctx = event->ctx; 3012 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx); 3013 3014 /* 3015 * If this is a task context, we need to check whether it is 3016 * the current task context of this cpu. If not it has been 3017 * scheduled out before the smp call arrived. In that case 3018 * event->count would have been updated to a recent sample 3019 * when the event was scheduled out. 3020 */ 3021 if (ctx->task && cpuctx->task_ctx != ctx) 3022 return; 3023 3024 raw_spin_lock(&ctx->lock); 3025 if (ctx->is_active) { 3026 update_context_time(ctx); 3027 update_cgrp_time_from_event(event); 3028 } 3029 update_event_times(event); 3030 if (event->state == PERF_EVENT_STATE_ACTIVE) 3031 event->pmu->read(event); 3032 raw_spin_unlock(&ctx->lock); 3033 } 3034 3035 static inline u64 perf_event_count(struct perf_event *event) 3036 { 3037 return local64_read(&event->count) + atomic64_read(&event->child_count); 3038 } 3039 3040 static u64 perf_event_read(struct perf_event *event) 3041 { 3042 /* 3043 * If event is enabled and currently active on a CPU, update the 3044 * value in the event structure: 3045 */ 3046 if (event->state == PERF_EVENT_STATE_ACTIVE) { 3047 smp_call_function_single(event->oncpu, 3048 __perf_event_read, event, 1); 3049 } else if (event->state == PERF_EVENT_STATE_INACTIVE) { 3050 struct perf_event_context *ctx = event->ctx; 3051 unsigned long flags; 3052 3053 raw_spin_lock_irqsave(&ctx->lock, flags); 3054 /* 3055 * may read while context is not active 3056 * (e.g., thread is blocked), in that case 3057 * we cannot update context time 3058 */ 3059 if (ctx->is_active) { 3060 update_context_time(ctx); 3061 update_cgrp_time_from_event(event); 3062 } 3063 update_event_times(event); 3064 raw_spin_unlock_irqrestore(&ctx->lock, flags); 3065 } 3066 3067 return perf_event_count(event); 3068 } 3069 3070 /* 3071 * Initialize the perf_event context in a task_struct: 3072 */ 3073 static void __perf_event_init_context(struct perf_event_context *ctx) 3074 { 3075 raw_spin_lock_init(&ctx->lock); 3076 mutex_init(&ctx->mutex); 3077 INIT_LIST_HEAD(&ctx->pinned_groups); 3078 INIT_LIST_HEAD(&ctx->flexible_groups); 3079 INIT_LIST_HEAD(&ctx->event_list); 3080 atomic_set(&ctx->refcount, 1); 3081 } 3082 3083 static struct perf_event_context * 3084 alloc_perf_context(struct pmu *pmu, struct task_struct *task) 3085 { 3086 struct perf_event_context *ctx; 3087 3088 ctx = kzalloc(sizeof(struct perf_event_context), GFP_KERNEL); 3089 if (!ctx) 3090 return NULL; 3091 3092 __perf_event_init_context(ctx); 3093 if (task) { 3094 ctx->task = task; 3095 get_task_struct(task); 3096 } 3097 ctx->pmu = pmu; 3098 3099 return ctx; 3100 } 3101 3102 static struct task_struct * 3103 find_lively_task_by_vpid(pid_t vpid) 3104 { 3105 struct task_struct *task; 3106 int err; 3107 3108 rcu_read_lock(); 3109 if (!vpid) 3110 task = current; 3111 else 3112 task = find_task_by_vpid(vpid); 3113 if (task) 3114 get_task_struct(task); 3115 rcu_read_unlock(); 3116 3117 if (!task) 3118 return ERR_PTR(-ESRCH); 3119 3120 /* Reuse ptrace permission checks for now. */ 3121 err = -EACCES; 3122 if (!ptrace_may_access(task, PTRACE_MODE_READ)) 3123 goto errout; 3124 3125 return task; 3126 errout: 3127 put_task_struct(task); 3128 return ERR_PTR(err); 3129 3130 } 3131 3132 /* 3133 * Returns a matching context with refcount and pincount. 3134 */ 3135 static struct perf_event_context * 3136 find_get_context(struct pmu *pmu, struct task_struct *task, int cpu) 3137 { 3138 struct perf_event_context *ctx; 3139 struct perf_cpu_context *cpuctx; 3140 unsigned long flags; 3141 int ctxn, err; 3142 3143 if (!task) { 3144 /* Must be root to operate on a CPU event: */ 3145 if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN)) 3146 return ERR_PTR(-EACCES); 3147 3148 /* 3149 * We could be clever and allow to attach a event to an 3150 * offline CPU and activate it when the CPU comes up, but 3151 * that's for later. 3152 */ 3153 if (!cpu_online(cpu)) 3154 return ERR_PTR(-ENODEV); 3155 3156 cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu); 3157 ctx = &cpuctx->ctx; 3158 get_ctx(ctx); 3159 ++ctx->pin_count; 3160 3161 return ctx; 3162 } 3163 3164 err = -EINVAL; 3165 ctxn = pmu->task_ctx_nr; 3166 if (ctxn < 0) 3167 goto errout; 3168 3169 retry: 3170 ctx = perf_lock_task_context(task, ctxn, &flags); 3171 if (ctx) { 3172 unclone_ctx(ctx); 3173 ++ctx->pin_count; 3174 raw_spin_unlock_irqrestore(&ctx->lock, flags); 3175 } else { 3176 ctx = alloc_perf_context(pmu, task); 3177 err = -ENOMEM; 3178 if (!ctx) 3179 goto errout; 3180 3181 err = 0; 3182 mutex_lock(&task->perf_event_mutex); 3183 /* 3184 * If it has already passed perf_event_exit_task(). 3185 * we must see PF_EXITING, it takes this mutex too. 3186 */ 3187 if (task->flags & PF_EXITING) 3188 err = -ESRCH; 3189 else if (task->perf_event_ctxp[ctxn]) 3190 err = -EAGAIN; 3191 else { 3192 get_ctx(ctx); 3193 ++ctx->pin_count; 3194 rcu_assign_pointer(task->perf_event_ctxp[ctxn], ctx); 3195 } 3196 mutex_unlock(&task->perf_event_mutex); 3197 3198 if (unlikely(err)) { 3199 put_ctx(ctx); 3200 3201 if (err == -EAGAIN) 3202 goto retry; 3203 goto errout; 3204 } 3205 } 3206 3207 return ctx; 3208 3209 errout: 3210 return ERR_PTR(err); 3211 } 3212 3213 static void perf_event_free_filter(struct perf_event *event); 3214 3215 static void free_event_rcu(struct rcu_head *head) 3216 { 3217 struct perf_event *event; 3218 3219 event = container_of(head, struct perf_event, rcu_head); 3220 if (event->ns) 3221 put_pid_ns(event->ns); 3222 perf_event_free_filter(event); 3223 kfree(event); 3224 } 3225 3226 static void ring_buffer_put(struct ring_buffer *rb); 3227 static void ring_buffer_attach(struct perf_event *event, 3228 struct ring_buffer *rb); 3229 3230 static void unaccount_event_cpu(struct perf_event *event, int cpu) 3231 { 3232 if (event->parent) 3233 return; 3234 3235 if (has_branch_stack(event)) { 3236 if (!(event->attach_state & PERF_ATTACH_TASK)) 3237 atomic_dec(&per_cpu(perf_branch_stack_events, cpu)); 3238 } 3239 if (is_cgroup_event(event)) 3240 atomic_dec(&per_cpu(perf_cgroup_events, cpu)); 3241 } 3242 3243 static void unaccount_event(struct perf_event *event) 3244 { 3245 if (event->parent) 3246 return; 3247 3248 if (event->attach_state & PERF_ATTACH_TASK) 3249 static_key_slow_dec_deferred(&perf_sched_events); 3250 if (event->attr.mmap || event->attr.mmap_data) 3251 atomic_dec(&nr_mmap_events); 3252 if (event->attr.comm) 3253 atomic_dec(&nr_comm_events); 3254 if (event->attr.task) 3255 atomic_dec(&nr_task_events); 3256 if (event->attr.freq) 3257 atomic_dec(&nr_freq_events); 3258 if (is_cgroup_event(event)) 3259 static_key_slow_dec_deferred(&perf_sched_events); 3260 if (has_branch_stack(event)) 3261 static_key_slow_dec_deferred(&perf_sched_events); 3262 3263 unaccount_event_cpu(event, event->cpu); 3264 } 3265 3266 static void __free_event(struct perf_event *event) 3267 { 3268 if (!event->parent) { 3269 if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN) 3270 put_callchain_buffers(); 3271 } 3272 3273 if (event->destroy) 3274 event->destroy(event); 3275 3276 if (event->ctx) 3277 put_ctx(event->ctx); 3278 3279 if (event->pmu) 3280 module_put(event->pmu->module); 3281 3282 call_rcu(&event->rcu_head, free_event_rcu); 3283 } 3284 3285 static void _free_event(struct perf_event *event) 3286 { 3287 irq_work_sync(&event->pending); 3288 3289 unaccount_event(event); 3290 3291 if (event->rb) { 3292 /* 3293 * Can happen when we close an event with re-directed output. 3294 * 3295 * Since we have a 0 refcount, perf_mmap_close() will skip 3296 * over us; possibly making our ring_buffer_put() the last. 3297 */ 3298 mutex_lock(&event->mmap_mutex); 3299 ring_buffer_attach(event, NULL); 3300 mutex_unlock(&event->mmap_mutex); 3301 } 3302 3303 if (is_cgroup_event(event)) 3304 perf_detach_cgroup(event); 3305 3306 __free_event(event); 3307 } 3308 3309 /* 3310 * Used to free events which have a known refcount of 1, such as in error paths 3311 * where the event isn't exposed yet and inherited events. 3312 */ 3313 static void free_event(struct perf_event *event) 3314 { 3315 if (WARN(atomic_long_cmpxchg(&event->refcount, 1, 0) != 1, 3316 "unexpected event refcount: %ld; ptr=%p\n", 3317 atomic_long_read(&event->refcount), event)) { 3318 /* leak to avoid use-after-free */ 3319 return; 3320 } 3321 3322 _free_event(event); 3323 } 3324 3325 /* 3326 * Called when the last reference to the file is gone. 3327 */ 3328 static void put_event(struct perf_event *event) 3329 { 3330 struct perf_event_context *ctx = event->ctx; 3331 struct task_struct *owner; 3332 3333 if (!atomic_long_dec_and_test(&event->refcount)) 3334 return; 3335 3336 rcu_read_lock(); 3337 owner = ACCESS_ONCE(event->owner); 3338 /* 3339 * Matches the smp_wmb() in perf_event_exit_task(). If we observe 3340 * !owner it means the list deletion is complete and we can indeed 3341 * free this event, otherwise we need to serialize on 3342 * owner->perf_event_mutex. 3343 */ 3344 smp_read_barrier_depends(); 3345 if (owner) { 3346 /* 3347 * Since delayed_put_task_struct() also drops the last 3348 * task reference we can safely take a new reference 3349 * while holding the rcu_read_lock(). 3350 */ 3351 get_task_struct(owner); 3352 } 3353 rcu_read_unlock(); 3354 3355 if (owner) { 3356 mutex_lock(&owner->perf_event_mutex); 3357 /* 3358 * We have to re-check the event->owner field, if it is cleared 3359 * we raced with perf_event_exit_task(), acquiring the mutex 3360 * ensured they're done, and we can proceed with freeing the 3361 * event. 3362 */ 3363 if (event->owner) 3364 list_del_init(&event->owner_entry); 3365 mutex_unlock(&owner->perf_event_mutex); 3366 put_task_struct(owner); 3367 } 3368 3369 WARN_ON_ONCE(ctx->parent_ctx); 3370 /* 3371 * There are two ways this annotation is useful: 3372 * 3373 * 1) there is a lock recursion from perf_event_exit_task 3374 * see the comment there. 3375 * 3376 * 2) there is a lock-inversion with mmap_sem through 3377 * perf_event_read_group(), which takes faults while 3378 * holding ctx->mutex, however this is called after 3379 * the last filedesc died, so there is no possibility 3380 * to trigger the AB-BA case. 3381 */ 3382 mutex_lock_nested(&ctx->mutex, SINGLE_DEPTH_NESTING); 3383 perf_remove_from_context(event, true); 3384 mutex_unlock(&ctx->mutex); 3385 3386 _free_event(event); 3387 } 3388 3389 int perf_event_release_kernel(struct perf_event *event) 3390 { 3391 put_event(event); 3392 return 0; 3393 } 3394 EXPORT_SYMBOL_GPL(perf_event_release_kernel); 3395 3396 static int perf_release(struct inode *inode, struct file *file) 3397 { 3398 put_event(file->private_data); 3399 return 0; 3400 } 3401 3402 u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running) 3403 { 3404 struct perf_event *child; 3405 u64 total = 0; 3406 3407 *enabled = 0; 3408 *running = 0; 3409 3410 mutex_lock(&event->child_mutex); 3411 total += perf_event_read(event); 3412 *enabled += event->total_time_enabled + 3413 atomic64_read(&event->child_total_time_enabled); 3414 *running += event->total_time_running + 3415 atomic64_read(&event->child_total_time_running); 3416 3417 list_for_each_entry(child, &event->child_list, child_list) { 3418 total += perf_event_read(child); 3419 *enabled += child->total_time_enabled; 3420 *running += child->total_time_running; 3421 } 3422 mutex_unlock(&event->child_mutex); 3423 3424 return total; 3425 } 3426 EXPORT_SYMBOL_GPL(perf_event_read_value); 3427 3428 static int perf_event_read_group(struct perf_event *event, 3429 u64 read_format, char __user *buf) 3430 { 3431 struct perf_event *leader = event->group_leader, *sub; 3432 int n = 0, size = 0, ret = -EFAULT; 3433 struct perf_event_context *ctx = leader->ctx; 3434 u64 values[5]; 3435 u64 count, enabled, running; 3436 3437 mutex_lock(&ctx->mutex); 3438 count = perf_event_read_value(leader, &enabled, &running); 3439 3440 values[n++] = 1 + leader->nr_siblings; 3441 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) 3442 values[n++] = enabled; 3443 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) 3444 values[n++] = running; 3445 values[n++] = count; 3446 if (read_format & PERF_FORMAT_ID) 3447 values[n++] = primary_event_id(leader); 3448 3449 size = n * sizeof(u64); 3450 3451 if (copy_to_user(buf, values, size)) 3452 goto unlock; 3453 3454 ret = size; 3455 3456 list_for_each_entry(sub, &leader->sibling_list, group_entry) { 3457 n = 0; 3458 3459 values[n++] = perf_event_read_value(sub, &enabled, &running); 3460 if (read_format & PERF_FORMAT_ID) 3461 values[n++] = primary_event_id(sub); 3462 3463 size = n * sizeof(u64); 3464 3465 if (copy_to_user(buf + ret, values, size)) { 3466 ret = -EFAULT; 3467 goto unlock; 3468 } 3469 3470 ret += size; 3471 } 3472 unlock: 3473 mutex_unlock(&ctx->mutex); 3474 3475 return ret; 3476 } 3477 3478 static int perf_event_read_one(struct perf_event *event, 3479 u64 read_format, char __user *buf) 3480 { 3481 u64 enabled, running; 3482 u64 values[4]; 3483 int n = 0; 3484 3485 values[n++] = perf_event_read_value(event, &enabled, &running); 3486 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) 3487 values[n++] = enabled; 3488 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) 3489 values[n++] = running; 3490 if (read_format & PERF_FORMAT_ID) 3491 values[n++] = primary_event_id(event); 3492 3493 if (copy_to_user(buf, values, n * sizeof(u64))) 3494 return -EFAULT; 3495 3496 return n * sizeof(u64); 3497 } 3498 3499 /* 3500 * Read the performance event - simple non blocking version for now 3501 */ 3502 static ssize_t 3503 perf_read_hw(struct perf_event *event, char __user *buf, size_t count) 3504 { 3505 u64 read_format = event->attr.read_format; 3506 int ret; 3507 3508 /* 3509 * Return end-of-file for a read on a event that is in 3510 * error state (i.e. because it was pinned but it couldn't be 3511 * scheduled on to the CPU at some point). 3512 */ 3513 if (event->state == PERF_EVENT_STATE_ERROR) 3514 return 0; 3515 3516 if (count < event->read_size) 3517 return -ENOSPC; 3518 3519 WARN_ON_ONCE(event->ctx->parent_ctx); 3520 if (read_format & PERF_FORMAT_GROUP) 3521 ret = perf_event_read_group(event, read_format, buf); 3522 else 3523 ret = perf_event_read_one(event, read_format, buf); 3524 3525 return ret; 3526 } 3527 3528 static ssize_t 3529 perf_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) 3530 { 3531 struct perf_event *event = file->private_data; 3532 3533 return perf_read_hw(event, buf, count); 3534 } 3535 3536 static unsigned int perf_poll(struct file *file, poll_table *wait) 3537 { 3538 struct perf_event *event = file->private_data; 3539 struct ring_buffer *rb; 3540 unsigned int events = POLL_HUP; 3541 3542 /* 3543 * Pin the event->rb by taking event->mmap_mutex; otherwise 3544 * perf_event_set_output() can swizzle our rb and make us miss wakeups. 3545 */ 3546 mutex_lock(&event->mmap_mutex); 3547 rb = event->rb; 3548 if (rb) 3549 events = atomic_xchg(&rb->poll, 0); 3550 mutex_unlock(&event->mmap_mutex); 3551 3552 poll_wait(file, &event->waitq, wait); 3553 3554 return events; 3555 } 3556 3557 static void perf_event_reset(struct perf_event *event) 3558 { 3559 (void)perf_event_read(event); 3560 local64_set(&event->count, 0); 3561 perf_event_update_userpage(event); 3562 } 3563 3564 /* 3565 * Holding the top-level event's child_mutex means that any 3566 * descendant process that has inherited this event will block 3567 * in sync_child_event if it goes to exit, thus satisfying the 3568 * task existence requirements of perf_event_enable/disable. 3569 */ 3570 static void perf_event_for_each_child(struct perf_event *event, 3571 void (*func)(struct perf_event *)) 3572 { 3573 struct perf_event *child; 3574 3575 WARN_ON_ONCE(event->ctx->parent_ctx); 3576 mutex_lock(&event->child_mutex); 3577 func(event); 3578 list_for_each_entry(child, &event->child_list, child_list) 3579 func(child); 3580 mutex_unlock(&event->child_mutex); 3581 } 3582 3583 static void perf_event_for_each(struct perf_event *event, 3584 void (*func)(struct perf_event *)) 3585 { 3586 struct perf_event_context *ctx = event->ctx; 3587 struct perf_event *sibling; 3588 3589 WARN_ON_ONCE(ctx->parent_ctx); 3590 mutex_lock(&ctx->mutex); 3591 event = event->group_leader; 3592 3593 perf_event_for_each_child(event, func); 3594 list_for_each_entry(sibling, &event->sibling_list, group_entry) 3595 perf_event_for_each_child(sibling, func); 3596 mutex_unlock(&ctx->mutex); 3597 } 3598 3599 static int perf_event_period(struct perf_event *event, u64 __user *arg) 3600 { 3601 struct perf_event_context *ctx = event->ctx; 3602 int ret = 0, active; 3603 u64 value; 3604 3605 if (!is_sampling_event(event)) 3606 return -EINVAL; 3607 3608 if (copy_from_user(&value, arg, sizeof(value))) 3609 return -EFAULT; 3610 3611 if (!value) 3612 return -EINVAL; 3613 3614 raw_spin_lock_irq(&ctx->lock); 3615 if (event->attr.freq) { 3616 if (value > sysctl_perf_event_sample_rate) { 3617 ret = -EINVAL; 3618 goto unlock; 3619 } 3620 3621 event->attr.sample_freq = value; 3622 } else { 3623 event->attr.sample_period = value; 3624 event->hw.sample_period = value; 3625 } 3626 3627 active = (event->state == PERF_EVENT_STATE_ACTIVE); 3628 if (active) { 3629 perf_pmu_disable(ctx->pmu); 3630 event->pmu->stop(event, PERF_EF_UPDATE); 3631 } 3632 3633 local64_set(&event->hw.period_left, 0); 3634 3635 if (active) { 3636 event->pmu->start(event, PERF_EF_RELOAD); 3637 perf_pmu_enable(ctx->pmu); 3638 } 3639 3640 unlock: 3641 raw_spin_unlock_irq(&ctx->lock); 3642 3643 return ret; 3644 } 3645 3646 static const struct file_operations perf_fops; 3647 3648 static inline int perf_fget_light(int fd, struct fd *p) 3649 { 3650 struct fd f = fdget(fd); 3651 if (!f.file) 3652 return -EBADF; 3653 3654 if (f.file->f_op != &perf_fops) { 3655 fdput(f); 3656 return -EBADF; 3657 } 3658 *p = f; 3659 return 0; 3660 } 3661 3662 static int perf_event_set_output(struct perf_event *event, 3663 struct perf_event *output_event); 3664 static int perf_event_set_filter(struct perf_event *event, void __user *arg); 3665 3666 static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg) 3667 { 3668 struct perf_event *event = file->private_data; 3669 void (*func)(struct perf_event *); 3670 u32 flags = arg; 3671 3672 switch (cmd) { 3673 case PERF_EVENT_IOC_ENABLE: 3674 func = perf_event_enable; 3675 break; 3676 case PERF_EVENT_IOC_DISABLE: 3677 func = perf_event_disable; 3678 break; 3679 case PERF_EVENT_IOC_RESET: 3680 func = perf_event_reset; 3681 break; 3682 3683 case PERF_EVENT_IOC_REFRESH: 3684 return perf_event_refresh(event, arg); 3685 3686 case PERF_EVENT_IOC_PERIOD: 3687 return perf_event_period(event, (u64 __user *)arg); 3688 3689 case PERF_EVENT_IOC_ID: 3690 { 3691 u64 id = primary_event_id(event); 3692 3693 if (copy_to_user((void __user *)arg, &id, sizeof(id))) 3694 return -EFAULT; 3695 return 0; 3696 } 3697 3698 case PERF_EVENT_IOC_SET_OUTPUT: 3699 { 3700 int ret; 3701 if (arg != -1) { 3702 struct perf_event *output_event; 3703 struct fd output; 3704 ret = perf_fget_light(arg, &output); 3705 if (ret) 3706 return ret; 3707 output_event = output.file->private_data; 3708 ret = perf_event_set_output(event, output_event); 3709 fdput(output); 3710 } else { 3711 ret = perf_event_set_output(event, NULL); 3712 } 3713 return ret; 3714 } 3715 3716 case PERF_EVENT_IOC_SET_FILTER: 3717 return perf_event_set_filter(event, (void __user *)arg); 3718 3719 default: 3720 return -ENOTTY; 3721 } 3722 3723 if (flags & PERF_IOC_FLAG_GROUP) 3724 perf_event_for_each(event, func); 3725 else 3726 perf_event_for_each_child(event, func); 3727 3728 return 0; 3729 } 3730 3731 #ifdef CONFIG_COMPAT 3732 static long perf_compat_ioctl(struct file *file, unsigned int cmd, 3733 unsigned long arg) 3734 { 3735 switch (_IOC_NR(cmd)) { 3736 case _IOC_NR(PERF_EVENT_IOC_SET_FILTER): 3737 case _IOC_NR(PERF_EVENT_IOC_ID): 3738 /* Fix up pointer size (usually 4 -> 8 in 32-on-64-bit case */ 3739 if (_IOC_SIZE(cmd) == sizeof(compat_uptr_t)) { 3740 cmd &= ~IOCSIZE_MASK; 3741 cmd |= sizeof(void *) << IOCSIZE_SHIFT; 3742 } 3743 break; 3744 } 3745 return perf_ioctl(file, cmd, arg); 3746 } 3747 #else 3748 # define perf_compat_ioctl NULL 3749 #endif 3750 3751 int perf_event_task_enable(void) 3752 { 3753 struct perf_event *event; 3754 3755 mutex_lock(¤t->perf_event_mutex); 3756 list_for_each_entry(event, ¤t->perf_event_list, owner_entry) 3757 perf_event_for_each_child(event, perf_event_enable); 3758 mutex_unlock(¤t->perf_event_mutex); 3759 3760 return 0; 3761 } 3762 3763 int perf_event_task_disable(void) 3764 { 3765 struct perf_event *event; 3766 3767 mutex_lock(¤t->perf_event_mutex); 3768 list_for_each_entry(event, ¤t->perf_event_list, owner_entry) 3769 perf_event_for_each_child(event, perf_event_disable); 3770 mutex_unlock(¤t->perf_event_mutex); 3771 3772 return 0; 3773 } 3774 3775 static int perf_event_index(struct perf_event *event) 3776 { 3777 if (event->hw.state & PERF_HES_STOPPED) 3778 return 0; 3779 3780 if (event->state != PERF_EVENT_STATE_ACTIVE) 3781 return 0; 3782 3783 return event->pmu->event_idx(event); 3784 } 3785 3786 static void calc_timer_values(struct perf_event *event, 3787 u64 *now, 3788 u64 *enabled, 3789 u64 *running) 3790 { 3791 u64 ctx_time; 3792 3793 *now = perf_clock(); 3794 ctx_time = event->shadow_ctx_time + *now; 3795 *enabled = ctx_time - event->tstamp_enabled; 3796 *running = ctx_time - event->tstamp_running; 3797 } 3798 3799 static void perf_event_init_userpage(struct perf_event *event) 3800 { 3801 struct perf_event_mmap_page *userpg; 3802 struct ring_buffer *rb; 3803 3804 rcu_read_lock(); 3805 rb = rcu_dereference(event->rb); 3806 if (!rb) 3807 goto unlock; 3808 3809 userpg = rb->user_page; 3810 3811 /* Allow new userspace to detect that bit 0 is deprecated */ 3812 userpg->cap_bit0_is_deprecated = 1; 3813 userpg->size = offsetof(struct perf_event_mmap_page, __reserved); 3814 3815 unlock: 3816 rcu_read_unlock(); 3817 } 3818 3819 void __weak arch_perf_update_userpage(struct perf_event_mmap_page *userpg, u64 now) 3820 { 3821 } 3822 3823 /* 3824 * Callers need to ensure there can be no nesting of this function, otherwise 3825 * the seqlock logic goes bad. We can not serialize this because the arch 3826 * code calls this from NMI context. 3827 */ 3828 void perf_event_update_userpage(struct perf_event *event) 3829 { 3830 struct perf_event_mmap_page *userpg; 3831 struct ring_buffer *rb; 3832 u64 enabled, running, now; 3833 3834 rcu_read_lock(); 3835 rb = rcu_dereference(event->rb); 3836 if (!rb) 3837 goto unlock; 3838 3839 /* 3840 * compute total_time_enabled, total_time_running 3841 * based on snapshot values taken when the event 3842 * was last scheduled in. 3843 * 3844 * we cannot simply called update_context_time() 3845 * because of locking issue as we can be called in 3846 * NMI context 3847 */ 3848 calc_timer_values(event, &now, &enabled, &running); 3849 3850 userpg = rb->user_page; 3851 /* 3852 * Disable preemption so as to not let the corresponding user-space 3853 * spin too long if we get preempted. 3854 */ 3855 preempt_disable(); 3856 ++userpg->lock; 3857 barrier(); 3858 userpg->index = perf_event_index(event); 3859 userpg->offset = perf_event_count(event); 3860 if (userpg->index) 3861 userpg->offset -= local64_read(&event->hw.prev_count); 3862 3863 userpg->time_enabled = enabled + 3864 atomic64_read(&event->child_total_time_enabled); 3865 3866 userpg->time_running = running + 3867 atomic64_read(&event->child_total_time_running); 3868 3869 arch_perf_update_userpage(userpg, now); 3870 3871 barrier(); 3872 ++userpg->lock; 3873 preempt_enable(); 3874 unlock: 3875 rcu_read_unlock(); 3876 } 3877 3878 static int perf_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf) 3879 { 3880 struct perf_event *event = vma->vm_file->private_data; 3881 struct ring_buffer *rb; 3882 int ret = VM_FAULT_SIGBUS; 3883 3884 if (vmf->flags & FAULT_FLAG_MKWRITE) { 3885 if (vmf->pgoff == 0) 3886 ret = 0; 3887 return ret; 3888 } 3889 3890 rcu_read_lock(); 3891 rb = rcu_dereference(event->rb); 3892 if (!rb) 3893 goto unlock; 3894 3895 if (vmf->pgoff && (vmf->flags & FAULT_FLAG_WRITE)) 3896 goto unlock; 3897 3898 vmf->page = perf_mmap_to_page(rb, vmf->pgoff); 3899 if (!vmf->page) 3900 goto unlock; 3901 3902 get_page(vmf->page); 3903 vmf->page->mapping = vma->vm_file->f_mapping; 3904 vmf->page->index = vmf->pgoff; 3905 3906 ret = 0; 3907 unlock: 3908 rcu_read_unlock(); 3909 3910 return ret; 3911 } 3912 3913 static void ring_buffer_attach(struct perf_event *event, 3914 struct ring_buffer *rb) 3915 { 3916 struct ring_buffer *old_rb = NULL; 3917 unsigned long flags; 3918 3919 if (event->rb) { 3920 /* 3921 * Should be impossible, we set this when removing 3922 * event->rb_entry and wait/clear when adding event->rb_entry. 3923 */ 3924 WARN_ON_ONCE(event->rcu_pending); 3925 3926 old_rb = event->rb; 3927 event->rcu_batches = get_state_synchronize_rcu(); 3928 event->rcu_pending = 1; 3929 3930 spin_lock_irqsave(&old_rb->event_lock, flags); 3931 list_del_rcu(&event->rb_entry); 3932 spin_unlock_irqrestore(&old_rb->event_lock, flags); 3933 } 3934 3935 if (event->rcu_pending && rb) { 3936 cond_synchronize_rcu(event->rcu_batches); 3937 event->rcu_pending = 0; 3938 } 3939 3940 if (rb) { 3941 spin_lock_irqsave(&rb->event_lock, flags); 3942 list_add_rcu(&event->rb_entry, &rb->event_list); 3943 spin_unlock_irqrestore(&rb->event_lock, flags); 3944 } 3945 3946 rcu_assign_pointer(event->rb, rb); 3947 3948 if (old_rb) { 3949 ring_buffer_put(old_rb); 3950 /* 3951 * Since we detached before setting the new rb, so that we 3952 * could attach the new rb, we could have missed a wakeup. 3953 * Provide it now. 3954 */ 3955 wake_up_all(&event->waitq); 3956 } 3957 } 3958 3959 static void ring_buffer_wakeup(struct perf_event *event) 3960 { 3961 struct ring_buffer *rb; 3962 3963 rcu_read_lock(); 3964 rb = rcu_dereference(event->rb); 3965 if (rb) { 3966 list_for_each_entry_rcu(event, &rb->event_list, rb_entry) 3967 wake_up_all(&event->waitq); 3968 } 3969 rcu_read_unlock(); 3970 } 3971 3972 static void rb_free_rcu(struct rcu_head *rcu_head) 3973 { 3974 struct ring_buffer *rb; 3975 3976 rb = container_of(rcu_head, struct ring_buffer, rcu_head); 3977 rb_free(rb); 3978 } 3979 3980 static struct ring_buffer *ring_buffer_get(struct perf_event *event) 3981 { 3982 struct ring_buffer *rb; 3983 3984 rcu_read_lock(); 3985 rb = rcu_dereference(event->rb); 3986 if (rb) { 3987 if (!atomic_inc_not_zero(&rb->refcount)) 3988 rb = NULL; 3989 } 3990 rcu_read_unlock(); 3991 3992 return rb; 3993 } 3994 3995 static void ring_buffer_put(struct ring_buffer *rb) 3996 { 3997 if (!atomic_dec_and_test(&rb->refcount)) 3998 return; 3999 4000 WARN_ON_ONCE(!list_empty(&rb->event_list)); 4001 4002 call_rcu(&rb->rcu_head, rb_free_rcu); 4003 } 4004 4005 static void perf_mmap_open(struct vm_area_struct *vma) 4006 { 4007 struct perf_event *event = vma->vm_file->private_data; 4008 4009 atomic_inc(&event->mmap_count); 4010 atomic_inc(&event->rb->mmap_count); 4011 } 4012 4013 /* 4014 * A buffer can be mmap()ed multiple times; either directly through the same 4015 * event, or through other events by use of perf_event_set_output(). 4016 * 4017 * In order to undo the VM accounting done by perf_mmap() we need to destroy 4018 * the buffer here, where we still have a VM context. This means we need 4019 * to detach all events redirecting to us. 4020 */ 4021 static void perf_mmap_close(struct vm_area_struct *vma) 4022 { 4023 struct perf_event *event = vma->vm_file->private_data; 4024 4025 struct ring_buffer *rb = ring_buffer_get(event); 4026 struct user_struct *mmap_user = rb->mmap_user; 4027 int mmap_locked = rb->mmap_locked; 4028 unsigned long size = perf_data_size(rb); 4029 4030 atomic_dec(&rb->mmap_count); 4031 4032 if (!atomic_dec_and_mutex_lock(&event->mmap_count, &event->mmap_mutex)) 4033 goto out_put; 4034 4035 ring_buffer_attach(event, NULL); 4036 mutex_unlock(&event->mmap_mutex); 4037 4038 /* If there's still other mmap()s of this buffer, we're done. */ 4039 if (atomic_read(&rb->mmap_count)) 4040 goto out_put; 4041 4042 /* 4043 * No other mmap()s, detach from all other events that might redirect 4044 * into the now unreachable buffer. Somewhat complicated by the 4045 * fact that rb::event_lock otherwise nests inside mmap_mutex. 4046 */ 4047 again: 4048 rcu_read_lock(); 4049 list_for_each_entry_rcu(event, &rb->event_list, rb_entry) { 4050 if (!atomic_long_inc_not_zero(&event->refcount)) { 4051 /* 4052 * This event is en-route to free_event() which will 4053 * detach it and remove it from the list. 4054 */ 4055 continue; 4056 } 4057 rcu_read_unlock(); 4058 4059 mutex_lock(&event->mmap_mutex); 4060 /* 4061 * Check we didn't race with perf_event_set_output() which can 4062 * swizzle the rb from under us while we were waiting to 4063 * acquire mmap_mutex. 4064 * 4065 * If we find a different rb; ignore this event, a next 4066 * iteration will no longer find it on the list. We have to 4067 * still restart the iteration to make sure we're not now 4068 * iterating the wrong list. 4069 */ 4070 if (event->rb == rb) 4071 ring_buffer_attach(event, NULL); 4072 4073 mutex_unlock(&event->mmap_mutex); 4074 put_event(event); 4075 4076 /* 4077 * Restart the iteration; either we're on the wrong list or 4078 * destroyed its integrity by doing a deletion. 4079 */ 4080 goto again; 4081 } 4082 rcu_read_unlock(); 4083 4084 /* 4085 * It could be there's still a few 0-ref events on the list; they'll 4086 * get cleaned up by free_event() -- they'll also still have their 4087 * ref on the rb and will free it whenever they are done with it. 4088 * 4089 * Aside from that, this buffer is 'fully' detached and unmapped, 4090 * undo the VM accounting. 4091 */ 4092 4093 atomic_long_sub((size >> PAGE_SHIFT) + 1, &mmap_user->locked_vm); 4094 vma->vm_mm->pinned_vm -= mmap_locked; 4095 free_uid(mmap_user); 4096 4097 out_put: 4098 ring_buffer_put(rb); /* could be last */ 4099 } 4100 4101 static const struct vm_operations_struct perf_mmap_vmops = { 4102 .open = perf_mmap_open, 4103 .close = perf_mmap_close, 4104 .fault = perf_mmap_fault, 4105 .page_mkwrite = perf_mmap_fault, 4106 }; 4107 4108 static int perf_mmap(struct file *file, struct vm_area_struct *vma) 4109 { 4110 struct perf_event *event = file->private_data; 4111 unsigned long user_locked, user_lock_limit; 4112 struct user_struct *user = current_user(); 4113 unsigned long locked, lock_limit; 4114 struct ring_buffer *rb; 4115 unsigned long vma_size; 4116 unsigned long nr_pages; 4117 long user_extra, extra; 4118 int ret = 0, flags = 0; 4119 4120 /* 4121 * Don't allow mmap() of inherited per-task counters. This would 4122 * create a performance issue due to all children writing to the 4123 * same rb. 4124 */ 4125 if (event->cpu == -1 && event->attr.inherit) 4126 return -EINVAL; 4127 4128 if (!(vma->vm_flags & VM_SHARED)) 4129 return -EINVAL; 4130 4131 vma_size = vma->vm_end - vma->vm_start; 4132 nr_pages = (vma_size / PAGE_SIZE) - 1; 4133 4134 /* 4135 * If we have rb pages ensure they're a power-of-two number, so we 4136 * can do bitmasks instead of modulo. 4137 */ 4138 if (nr_pages != 0 && !is_power_of_2(nr_pages)) 4139 return -EINVAL; 4140 4141 if (vma_size != PAGE_SIZE * (1 + nr_pages)) 4142 return -EINVAL; 4143 4144 if (vma->vm_pgoff != 0) 4145 return -EINVAL; 4146 4147 WARN_ON_ONCE(event->ctx->parent_ctx); 4148 again: 4149 mutex_lock(&event->mmap_mutex); 4150 if (event->rb) { 4151 if (event->rb->nr_pages != nr_pages) { 4152 ret = -EINVAL; 4153 goto unlock; 4154 } 4155 4156 if (!atomic_inc_not_zero(&event->rb->mmap_count)) { 4157 /* 4158 * Raced against perf_mmap_close() through 4159 * perf_event_set_output(). Try again, hope for better 4160 * luck. 4161 */ 4162 mutex_unlock(&event->mmap_mutex); 4163 goto again; 4164 } 4165 4166 goto unlock; 4167 } 4168 4169 user_extra = nr_pages + 1; 4170 user_lock_limit = sysctl_perf_event_mlock >> (PAGE_SHIFT - 10); 4171 4172 /* 4173 * Increase the limit linearly with more CPUs: 4174 */ 4175 user_lock_limit *= num_online_cpus(); 4176 4177 user_locked = atomic_long_read(&user->locked_vm) + user_extra; 4178 4179 extra = 0; 4180 if (user_locked > user_lock_limit) 4181 extra = user_locked - user_lock_limit; 4182 4183 lock_limit = rlimit(RLIMIT_MEMLOCK); 4184 lock_limit >>= PAGE_SHIFT; 4185 locked = vma->vm_mm->pinned_vm + extra; 4186 4187 if ((locked > lock_limit) && perf_paranoid_tracepoint_raw() && 4188 !capable(CAP_IPC_LOCK)) { 4189 ret = -EPERM; 4190 goto unlock; 4191 } 4192 4193 WARN_ON(event->rb); 4194 4195 if (vma->vm_flags & VM_WRITE) 4196 flags |= RING_BUFFER_WRITABLE; 4197 4198 rb = rb_alloc(nr_pages, 4199 event->attr.watermark ? event->attr.wakeup_watermark : 0, 4200 event->cpu, flags); 4201 4202 if (!rb) { 4203 ret = -ENOMEM; 4204 goto unlock; 4205 } 4206 4207 atomic_set(&rb->mmap_count, 1); 4208 rb->mmap_locked = extra; 4209 rb->mmap_user = get_current_user(); 4210 4211 atomic_long_add(user_extra, &user->locked_vm); 4212 vma->vm_mm->pinned_vm += extra; 4213 4214 ring_buffer_attach(event, rb); 4215 4216 perf_event_init_userpage(event); 4217 perf_event_update_userpage(event); 4218 4219 unlock: 4220 if (!ret) 4221 atomic_inc(&event->mmap_count); 4222 mutex_unlock(&event->mmap_mutex); 4223 4224 /* 4225 * Since pinned accounting is per vm we cannot allow fork() to copy our 4226 * vma. 4227 */ 4228 vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND | VM_DONTDUMP; 4229 vma->vm_ops = &perf_mmap_vmops; 4230 4231 return ret; 4232 } 4233 4234 static int perf_fasync(int fd, struct file *filp, int on) 4235 { 4236 struct inode *inode = file_inode(filp); 4237 struct perf_event *event = filp->private_data; 4238 int retval; 4239 4240 mutex_lock(&inode->i_mutex); 4241 retval = fasync_helper(fd, filp, on, &event->fasync); 4242 mutex_unlock(&inode->i_mutex); 4243 4244 if (retval < 0) 4245 return retval; 4246 4247 return 0; 4248 } 4249 4250 static const struct file_operations perf_fops = { 4251 .llseek = no_llseek, 4252 .release = perf_release, 4253 .read = perf_read, 4254 .poll = perf_poll, 4255 .unlocked_ioctl = perf_ioctl, 4256 .compat_ioctl = perf_compat_ioctl, 4257 .mmap = perf_mmap, 4258 .fasync = perf_fasync, 4259 }; 4260 4261 /* 4262 * Perf event wakeup 4263 * 4264 * If there's data, ensure we set the poll() state and publish everything 4265 * to user-space before waking everybody up. 4266 */ 4267 4268 void perf_event_wakeup(struct perf_event *event) 4269 { 4270 ring_buffer_wakeup(event); 4271 4272 if (event->pending_kill) { 4273 kill_fasync(&event->fasync, SIGIO, event->pending_kill); 4274 event->pending_kill = 0; 4275 } 4276 } 4277 4278 static void perf_pending_event(struct irq_work *entry) 4279 { 4280 struct perf_event *event = container_of(entry, 4281 struct perf_event, pending); 4282 4283 if (event->pending_disable) { 4284 event->pending_disable = 0; 4285 __perf_event_disable(event); 4286 } 4287 4288 if (event->pending_wakeup) { 4289 event->pending_wakeup = 0; 4290 perf_event_wakeup(event); 4291 } 4292 } 4293 4294 /* 4295 * We assume there is only KVM supporting the callbacks. 4296 * Later on, we might change it to a list if there is 4297 * another virtualization implementation supporting the callbacks. 4298 */ 4299 struct perf_guest_info_callbacks *perf_guest_cbs; 4300 4301 int perf_register_guest_info_callbacks(struct perf_guest_info_callbacks *cbs) 4302 { 4303 perf_guest_cbs = cbs; 4304 return 0; 4305 } 4306 EXPORT_SYMBOL_GPL(perf_register_guest_info_callbacks); 4307 4308 int perf_unregister_guest_info_callbacks(struct perf_guest_info_callbacks *cbs) 4309 { 4310 perf_guest_cbs = NULL; 4311 return 0; 4312 } 4313 EXPORT_SYMBOL_GPL(perf_unregister_guest_info_callbacks); 4314 4315 static void 4316 perf_output_sample_regs(struct perf_output_handle *handle, 4317 struct pt_regs *regs, u64 mask) 4318 { 4319 int bit; 4320 4321 for_each_set_bit(bit, (const unsigned long *) &mask, 4322 sizeof(mask) * BITS_PER_BYTE) { 4323 u64 val; 4324 4325 val = perf_reg_value(regs, bit); 4326 perf_output_put(handle, val); 4327 } 4328 } 4329 4330 static void perf_sample_regs_user(struct perf_regs_user *regs_user, 4331 struct pt_regs *regs) 4332 { 4333 if (!user_mode(regs)) { 4334 if (current->mm) 4335 regs = task_pt_regs(current); 4336 else 4337 regs = NULL; 4338 } 4339 4340 if (regs) { 4341 regs_user->regs = regs; 4342 regs_user->abi = perf_reg_abi(current); 4343 } 4344 } 4345 4346 /* 4347 * Get remaining task size from user stack pointer. 4348 * 4349 * It'd be better to take stack vma map and limit this more 4350 * precisly, but there's no way to get it safely under interrupt, 4351 * so using TASK_SIZE as limit. 4352 */ 4353 static u64 perf_ustack_task_size(struct pt_regs *regs) 4354 { 4355 unsigned long addr = perf_user_stack_pointer(regs); 4356 4357 if (!addr || addr >= TASK_SIZE) 4358 return 0; 4359 4360 return TASK_SIZE - addr; 4361 } 4362 4363 static u16 4364 perf_sample_ustack_size(u16 stack_size, u16 header_size, 4365 struct pt_regs *regs) 4366 { 4367 u64 task_size; 4368 4369 /* No regs, no stack pointer, no dump. */ 4370 if (!regs) 4371 return 0; 4372 4373 /* 4374 * Check if we fit in with the requested stack size into the: 4375 * - TASK_SIZE 4376 * If we don't, we limit the size to the TASK_SIZE. 4377 * 4378 * - remaining sample size 4379 * If we don't, we customize the stack size to 4380 * fit in to the remaining sample size. 4381 */ 4382 4383 task_size = min((u64) USHRT_MAX, perf_ustack_task_size(regs)); 4384 stack_size = min(stack_size, (u16) task_size); 4385 4386 /* Current header size plus static size and dynamic size. */ 4387 header_size += 2 * sizeof(u64); 4388 4389 /* Do we fit in with the current stack dump size? */ 4390 if ((u16) (header_size + stack_size) < header_size) { 4391 /* 4392 * If we overflow the maximum size for the sample, 4393 * we customize the stack dump size to fit in. 4394 */ 4395 stack_size = USHRT_MAX - header_size - sizeof(u64); 4396 stack_size = round_up(stack_size, sizeof(u64)); 4397 } 4398 4399 return stack_size; 4400 } 4401 4402 static void 4403 perf_output_sample_ustack(struct perf_output_handle *handle, u64 dump_size, 4404 struct pt_regs *regs) 4405 { 4406 /* Case of a kernel thread, nothing to dump */ 4407 if (!regs) { 4408 u64 size = 0; 4409 perf_output_put(handle, size); 4410 } else { 4411 unsigned long sp; 4412 unsigned int rem; 4413 u64 dyn_size; 4414 4415 /* 4416 * We dump: 4417 * static size 4418 * - the size requested by user or the best one we can fit 4419 * in to the sample max size 4420 * data 4421 * - user stack dump data 4422 * dynamic size 4423 * - the actual dumped size 4424 */ 4425 4426 /* Static size. */ 4427 perf_output_put(handle, dump_size); 4428 4429 /* Data. */ 4430 sp = perf_user_stack_pointer(regs); 4431 rem = __output_copy_user(handle, (void *) sp, dump_size); 4432 dyn_size = dump_size - rem; 4433 4434 perf_output_skip(handle, rem); 4435 4436 /* Dynamic size. */ 4437 perf_output_put(handle, dyn_size); 4438 } 4439 } 4440 4441 static void __perf_event_header__init_id(struct perf_event_header *header, 4442 struct perf_sample_data *data, 4443 struct perf_event *event) 4444 { 4445 u64 sample_type = event->attr.sample_type; 4446 4447 data->type = sample_type; 4448 header->size += event->id_header_size; 4449 4450 if (sample_type & PERF_SAMPLE_TID) { 4451 /* namespace issues */ 4452 data->tid_entry.pid = perf_event_pid(event, current); 4453 data->tid_entry.tid = perf_event_tid(event, current); 4454 } 4455 4456 if (sample_type & PERF_SAMPLE_TIME) 4457 data->time = perf_clock(); 4458 4459 if (sample_type & (PERF_SAMPLE_ID | PERF_SAMPLE_IDENTIFIER)) 4460 data->id = primary_event_id(event); 4461 4462 if (sample_type & PERF_SAMPLE_STREAM_ID) 4463 data->stream_id = event->id; 4464 4465 if (sample_type & PERF_SAMPLE_CPU) { 4466 data->cpu_entry.cpu = raw_smp_processor_id(); 4467 data->cpu_entry.reserved = 0; 4468 } 4469 } 4470 4471 void perf_event_header__init_id(struct perf_event_header *header, 4472 struct perf_sample_data *data, 4473 struct perf_event *event) 4474 { 4475 if (event->attr.sample_id_all) 4476 __perf_event_header__init_id(header, data, event); 4477 } 4478 4479 static void __perf_event__output_id_sample(struct perf_output_handle *handle, 4480 struct perf_sample_data *data) 4481 { 4482 u64 sample_type = data->type; 4483 4484 if (sample_type & PERF_SAMPLE_TID) 4485 perf_output_put(handle, data->tid_entry); 4486 4487 if (sample_type & PERF_SAMPLE_TIME) 4488 perf_output_put(handle, data->time); 4489 4490 if (sample_type & PERF_SAMPLE_ID) 4491 perf_output_put(handle, data->id); 4492 4493 if (sample_type & PERF_SAMPLE_STREAM_ID) 4494 perf_output_put(handle, data->stream_id); 4495 4496 if (sample_type & PERF_SAMPLE_CPU) 4497 perf_output_put(handle, data->cpu_entry); 4498 4499 if (sample_type & PERF_SAMPLE_IDENTIFIER) 4500 perf_output_put(handle, data->id); 4501 } 4502 4503 void perf_event__output_id_sample(struct perf_event *event, 4504 struct perf_output_handle *handle, 4505 struct perf_sample_data *sample) 4506 { 4507 if (event->attr.sample_id_all) 4508 __perf_event__output_id_sample(handle, sample); 4509 } 4510 4511 static void perf_output_read_one(struct perf_output_handle *handle, 4512 struct perf_event *event, 4513 u64 enabled, u64 running) 4514 { 4515 u64 read_format = event->attr.read_format; 4516 u64 values[4]; 4517 int n = 0; 4518 4519 values[n++] = perf_event_count(event); 4520 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) { 4521 values[n++] = enabled + 4522 atomic64_read(&event->child_total_time_enabled); 4523 } 4524 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) { 4525 values[n++] = running + 4526 atomic64_read(&event->child_total_time_running); 4527 } 4528 if (read_format & PERF_FORMAT_ID) 4529 values[n++] = primary_event_id(event); 4530 4531 __output_copy(handle, values, n * sizeof(u64)); 4532 } 4533 4534 /* 4535 * XXX PERF_FORMAT_GROUP vs inherited events seems difficult. 4536 */ 4537 static void perf_output_read_group(struct perf_output_handle *handle, 4538 struct perf_event *event, 4539 u64 enabled, u64 running) 4540 { 4541 struct perf_event *leader = event->group_leader, *sub; 4542 u64 read_format = event->attr.read_format; 4543 u64 values[5]; 4544 int n = 0; 4545 4546 values[n++] = 1 + leader->nr_siblings; 4547 4548 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) 4549 values[n++] = enabled; 4550 4551 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) 4552 values[n++] = running; 4553 4554 if (leader != event) 4555 leader->pmu->read(leader); 4556 4557 values[n++] = perf_event_count(leader); 4558 if (read_format & PERF_FORMAT_ID) 4559 values[n++] = primary_event_id(leader); 4560 4561 __output_copy(handle, values, n * sizeof(u64)); 4562 4563 list_for_each_entry(sub, &leader->sibling_list, group_entry) { 4564 n = 0; 4565 4566 if ((sub != event) && 4567 (sub->state == PERF_EVENT_STATE_ACTIVE)) 4568 sub->pmu->read(sub); 4569 4570 values[n++] = perf_event_count(sub); 4571 if (read_format & PERF_FORMAT_ID) 4572 values[n++] = primary_event_id(sub); 4573 4574 __output_copy(handle, values, n * sizeof(u64)); 4575 } 4576 } 4577 4578 #define PERF_FORMAT_TOTAL_TIMES (PERF_FORMAT_TOTAL_TIME_ENABLED|\ 4579 PERF_FORMAT_TOTAL_TIME_RUNNING) 4580 4581 static void perf_output_read(struct perf_output_handle *handle, 4582 struct perf_event *event) 4583 { 4584 u64 enabled = 0, running = 0, now; 4585 u64 read_format = event->attr.read_format; 4586 4587 /* 4588 * compute total_time_enabled, total_time_running 4589 * based on snapshot values taken when the event 4590 * was last scheduled in. 4591 * 4592 * we cannot simply called update_context_time() 4593 * because of locking issue as we are called in 4594 * NMI context 4595 */ 4596 if (read_format & PERF_FORMAT_TOTAL_TIMES) 4597 calc_timer_values(event, &now, &enabled, &running); 4598 4599 if (event->attr.read_format & PERF_FORMAT_GROUP) 4600 perf_output_read_group(handle, event, enabled, running); 4601 else 4602 perf_output_read_one(handle, event, enabled, running); 4603 } 4604 4605 void perf_output_sample(struct perf_output_handle *handle, 4606 struct perf_event_header *header, 4607 struct perf_sample_data *data, 4608 struct perf_event *event) 4609 { 4610 u64 sample_type = data->type; 4611 4612 perf_output_put(handle, *header); 4613 4614 if (sample_type & PERF_SAMPLE_IDENTIFIER) 4615 perf_output_put(handle, data->id); 4616 4617 if (sample_type & PERF_SAMPLE_IP) 4618 perf_output_put(handle, data->ip); 4619 4620 if (sample_type & PERF_SAMPLE_TID) 4621 perf_output_put(handle, data->tid_entry); 4622 4623 if (sample_type & PERF_SAMPLE_TIME) 4624 perf_output_put(handle, data->time); 4625 4626 if (sample_type & PERF_SAMPLE_ADDR) 4627 perf_output_put(handle, data->addr); 4628 4629 if (sample_type & PERF_SAMPLE_ID) 4630 perf_output_put(handle, data->id); 4631 4632 if (sample_type & PERF_SAMPLE_STREAM_ID) 4633 perf_output_put(handle, data->stream_id); 4634 4635 if (sample_type & PERF_SAMPLE_CPU) 4636 perf_output_put(handle, data->cpu_entry); 4637 4638 if (sample_type & PERF_SAMPLE_PERIOD) 4639 perf_output_put(handle, data->period); 4640 4641 if (sample_type & PERF_SAMPLE_READ) 4642 perf_output_read(handle, event); 4643 4644 if (sample_type & PERF_SAMPLE_CALLCHAIN) { 4645 if (data->callchain) { 4646 int size = 1; 4647 4648 if (data->callchain) 4649 size += data->callchain->nr; 4650 4651 size *= sizeof(u64); 4652 4653 __output_copy(handle, data->callchain, size); 4654 } else { 4655 u64 nr = 0; 4656 perf_output_put(handle, nr); 4657 } 4658 } 4659 4660 if (sample_type & PERF_SAMPLE_RAW) { 4661 if (data->raw) { 4662 perf_output_put(handle, data->raw->size); 4663 __output_copy(handle, data->raw->data, 4664 data->raw->size); 4665 } else { 4666 struct { 4667 u32 size; 4668 u32 data; 4669 } raw = { 4670 .size = sizeof(u32), 4671 .data = 0, 4672 }; 4673 perf_output_put(handle, raw); 4674 } 4675 } 4676 4677 if (sample_type & PERF_SAMPLE_BRANCH_STACK) { 4678 if (data->br_stack) { 4679 size_t size; 4680 4681 size = data->br_stack->nr 4682 * sizeof(struct perf_branch_entry); 4683 4684 perf_output_put(handle, data->br_stack->nr); 4685 perf_output_copy(handle, data->br_stack->entries, size); 4686 } else { 4687 /* 4688 * we always store at least the value of nr 4689 */ 4690 u64 nr = 0; 4691 perf_output_put(handle, nr); 4692 } 4693 } 4694 4695 if (sample_type & PERF_SAMPLE_REGS_USER) { 4696 u64 abi = data->regs_user.abi; 4697 4698 /* 4699 * If there are no regs to dump, notice it through 4700 * first u64 being zero (PERF_SAMPLE_REGS_ABI_NONE). 4701 */ 4702 perf_output_put(handle, abi); 4703 4704 if (abi) { 4705 u64 mask = event->attr.sample_regs_user; 4706 perf_output_sample_regs(handle, 4707 data->regs_user.regs, 4708 mask); 4709 } 4710 } 4711 4712 if (sample_type & PERF_SAMPLE_STACK_USER) { 4713 perf_output_sample_ustack(handle, 4714 data->stack_user_size, 4715 data->regs_user.regs); 4716 } 4717 4718 if (sample_type & PERF_SAMPLE_WEIGHT) 4719 perf_output_put(handle, data->weight); 4720 4721 if (sample_type & PERF_SAMPLE_DATA_SRC) 4722 perf_output_put(handle, data->data_src.val); 4723 4724 if (sample_type & PERF_SAMPLE_TRANSACTION) 4725 perf_output_put(handle, data->txn); 4726 4727 if (!event->attr.watermark) { 4728 int wakeup_events = event->attr.wakeup_events; 4729 4730 if (wakeup_events) { 4731 struct ring_buffer *rb = handle->rb; 4732 int events = local_inc_return(&rb->events); 4733 4734 if (events >= wakeup_events) { 4735 local_sub(wakeup_events, &rb->events); 4736 local_inc(&rb->wakeup); 4737 } 4738 } 4739 } 4740 } 4741 4742 void perf_prepare_sample(struct perf_event_header *header, 4743 struct perf_sample_data *data, 4744 struct perf_event *event, 4745 struct pt_regs *regs) 4746 { 4747 u64 sample_type = event->attr.sample_type; 4748 4749 header->type = PERF_RECORD_SAMPLE; 4750 header->size = sizeof(*header) + event->header_size; 4751 4752 header->misc = 0; 4753 header->misc |= perf_misc_flags(regs); 4754 4755 __perf_event_header__init_id(header, data, event); 4756 4757 if (sample_type & PERF_SAMPLE_IP) 4758 data->ip = perf_instruction_pointer(regs); 4759 4760 if (sample_type & PERF_SAMPLE_CALLCHAIN) { 4761 int size = 1; 4762 4763 data->callchain = perf_callchain(event, regs); 4764 4765 if (data->callchain) 4766 size += data->callchain->nr; 4767 4768 header->size += size * sizeof(u64); 4769 } 4770 4771 if (sample_type & PERF_SAMPLE_RAW) { 4772 int size = sizeof(u32); 4773 4774 if (data->raw) 4775 size += data->raw->size; 4776 else 4777 size += sizeof(u32); 4778 4779 WARN_ON_ONCE(size & (sizeof(u64)-1)); 4780 header->size += size; 4781 } 4782 4783 if (sample_type & PERF_SAMPLE_BRANCH_STACK) { 4784 int size = sizeof(u64); /* nr */ 4785 if (data->br_stack) { 4786 size += data->br_stack->nr 4787 * sizeof(struct perf_branch_entry); 4788 } 4789 header->size += size; 4790 } 4791 4792 if (sample_type & PERF_SAMPLE_REGS_USER) { 4793 /* regs dump ABI info */ 4794 int size = sizeof(u64); 4795 4796 perf_sample_regs_user(&data->regs_user, regs); 4797 4798 if (data->regs_user.regs) { 4799 u64 mask = event->attr.sample_regs_user; 4800 size += hweight64(mask) * sizeof(u64); 4801 } 4802 4803 header->size += size; 4804 } 4805 4806 if (sample_type & PERF_SAMPLE_STACK_USER) { 4807 /* 4808 * Either we need PERF_SAMPLE_STACK_USER bit to be allways 4809 * processed as the last one or have additional check added 4810 * in case new sample type is added, because we could eat 4811 * up the rest of the sample size. 4812 */ 4813 struct perf_regs_user *uregs = &data->regs_user; 4814 u16 stack_size = event->attr.sample_stack_user; 4815 u16 size = sizeof(u64); 4816 4817 if (!uregs->abi) 4818 perf_sample_regs_user(uregs, regs); 4819 4820 stack_size = perf_sample_ustack_size(stack_size, header->size, 4821 uregs->regs); 4822 4823 /* 4824 * If there is something to dump, add space for the dump 4825 * itself and for the field that tells the dynamic size, 4826 * which is how many have been actually dumped. 4827 */ 4828 if (stack_size) 4829 size += sizeof(u64) + stack_size; 4830 4831 data->stack_user_size = stack_size; 4832 header->size += size; 4833 } 4834 } 4835 4836 static void perf_event_output(struct perf_event *event, 4837 struct perf_sample_data *data, 4838 struct pt_regs *regs) 4839 { 4840 struct perf_output_handle handle; 4841 struct perf_event_header header; 4842 4843 /* protect the callchain buffers */ 4844 rcu_read_lock(); 4845 4846 perf_prepare_sample(&header, data, event, regs); 4847 4848 if (perf_output_begin(&handle, event, header.size)) 4849 goto exit; 4850 4851 perf_output_sample(&handle, &header, data, event); 4852 4853 perf_output_end(&handle); 4854 4855 exit: 4856 rcu_read_unlock(); 4857 } 4858 4859 /* 4860 * read event_id 4861 */ 4862 4863 struct perf_read_event { 4864 struct perf_event_header header; 4865 4866 u32 pid; 4867 u32 tid; 4868 }; 4869 4870 static void 4871 perf_event_read_event(struct perf_event *event, 4872 struct task_struct *task) 4873 { 4874 struct perf_output_handle handle; 4875 struct perf_sample_data sample; 4876 struct perf_read_event read_event = { 4877 .header = { 4878 .type = PERF_RECORD_READ, 4879 .misc = 0, 4880 .size = sizeof(read_event) + event->read_size, 4881 }, 4882 .pid = perf_event_pid(event, task), 4883 .tid = perf_event_tid(event, task), 4884 }; 4885 int ret; 4886 4887 perf_event_header__init_id(&read_event.header, &sample, event); 4888 ret = perf_output_begin(&handle, event, read_event.header.size); 4889 if (ret) 4890 return; 4891 4892 perf_output_put(&handle, read_event); 4893 perf_output_read(&handle, event); 4894 perf_event__output_id_sample(event, &handle, &sample); 4895 4896 perf_output_end(&handle); 4897 } 4898 4899 typedef void (perf_event_aux_output_cb)(struct perf_event *event, void *data); 4900 4901 static void 4902 perf_event_aux_ctx(struct perf_event_context *ctx, 4903 perf_event_aux_output_cb output, 4904 void *data) 4905 { 4906 struct perf_event *event; 4907 4908 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) { 4909 if (event->state < PERF_EVENT_STATE_INACTIVE) 4910 continue; 4911 if (!event_filter_match(event)) 4912 continue; 4913 output(event, data); 4914 } 4915 } 4916 4917 static void 4918 perf_event_aux(perf_event_aux_output_cb output, void *data, 4919 struct perf_event_context *task_ctx) 4920 { 4921 struct perf_cpu_context *cpuctx; 4922 struct perf_event_context *ctx; 4923 struct pmu *pmu; 4924 int ctxn; 4925 4926 rcu_read_lock(); 4927 list_for_each_entry_rcu(pmu, &pmus, entry) { 4928 cpuctx = get_cpu_ptr(pmu->pmu_cpu_context); 4929 if (cpuctx->unique_pmu != pmu) 4930 goto next; 4931 perf_event_aux_ctx(&cpuctx->ctx, output, data); 4932 if (task_ctx) 4933 goto next; 4934 ctxn = pmu->task_ctx_nr; 4935 if (ctxn < 0) 4936 goto next; 4937 ctx = rcu_dereference(current->perf_event_ctxp[ctxn]); 4938 if (ctx) 4939 perf_event_aux_ctx(ctx, output, data); 4940 next: 4941 put_cpu_ptr(pmu->pmu_cpu_context); 4942 } 4943 4944 if (task_ctx) { 4945 preempt_disable(); 4946 perf_event_aux_ctx(task_ctx, output, data); 4947 preempt_enable(); 4948 } 4949 rcu_read_unlock(); 4950 } 4951 4952 /* 4953 * task tracking -- fork/exit 4954 * 4955 * enabled by: attr.comm | attr.mmap | attr.mmap2 | attr.mmap_data | attr.task 4956 */ 4957 4958 struct perf_task_event { 4959 struct task_struct *task; 4960 struct perf_event_context *task_ctx; 4961 4962 struct { 4963 struct perf_event_header header; 4964 4965 u32 pid; 4966 u32 ppid; 4967 u32 tid; 4968 u32 ptid; 4969 u64 time; 4970 } event_id; 4971 }; 4972 4973 static int perf_event_task_match(struct perf_event *event) 4974 { 4975 return event->attr.comm || event->attr.mmap || 4976 event->attr.mmap2 || event->attr.mmap_data || 4977 event->attr.task; 4978 } 4979 4980 static void perf_event_task_output(struct perf_event *event, 4981 void *data) 4982 { 4983 struct perf_task_event *task_event = data; 4984 struct perf_output_handle handle; 4985 struct perf_sample_data sample; 4986 struct task_struct *task = task_event->task; 4987 int ret, size = task_event->event_id.header.size; 4988 4989 if (!perf_event_task_match(event)) 4990 return; 4991 4992 perf_event_header__init_id(&task_event->event_id.header, &sample, event); 4993 4994 ret = perf_output_begin(&handle, event, 4995 task_event->event_id.header.size); 4996 if (ret) 4997 goto out; 4998 4999 task_event->event_id.pid = perf_event_pid(event, task); 5000 task_event->event_id.ppid = perf_event_pid(event, current); 5001 5002 task_event->event_id.tid = perf_event_tid(event, task); 5003 task_event->event_id.ptid = perf_event_tid(event, current); 5004 5005 perf_output_put(&handle, task_event->event_id); 5006 5007 perf_event__output_id_sample(event, &handle, &sample); 5008 5009 perf_output_end(&handle); 5010 out: 5011 task_event->event_id.header.size = size; 5012 } 5013 5014 static void perf_event_task(struct task_struct *task, 5015 struct perf_event_context *task_ctx, 5016 int new) 5017 { 5018 struct perf_task_event task_event; 5019 5020 if (!atomic_read(&nr_comm_events) && 5021 !atomic_read(&nr_mmap_events) && 5022 !atomic_read(&nr_task_events)) 5023 return; 5024 5025 task_event = (struct perf_task_event){ 5026 .task = task, 5027 .task_ctx = task_ctx, 5028 .event_id = { 5029 .header = { 5030 .type = new ? PERF_RECORD_FORK : PERF_RECORD_EXIT, 5031 .misc = 0, 5032 .size = sizeof(task_event.event_id), 5033 }, 5034 /* .pid */ 5035 /* .ppid */ 5036 /* .tid */ 5037 /* .ptid */ 5038 .time = perf_clock(), 5039 }, 5040 }; 5041 5042 perf_event_aux(perf_event_task_output, 5043 &task_event, 5044 task_ctx); 5045 } 5046 5047 void perf_event_fork(struct task_struct *task) 5048 { 5049 perf_event_task(task, NULL, 1); 5050 } 5051 5052 /* 5053 * comm tracking 5054 */ 5055 5056 struct perf_comm_event { 5057 struct task_struct *task; 5058 char *comm; 5059 int comm_size; 5060 5061 struct { 5062 struct perf_event_header header; 5063 5064 u32 pid; 5065 u32 tid; 5066 } event_id; 5067 }; 5068 5069 static int perf_event_comm_match(struct perf_event *event) 5070 { 5071 return event->attr.comm; 5072 } 5073 5074 static void perf_event_comm_output(struct perf_event *event, 5075 void *data) 5076 { 5077 struct perf_comm_event *comm_event = data; 5078 struct perf_output_handle handle; 5079 struct perf_sample_data sample; 5080 int size = comm_event->event_id.header.size; 5081 int ret; 5082 5083 if (!perf_event_comm_match(event)) 5084 return; 5085 5086 perf_event_header__init_id(&comm_event->event_id.header, &sample, event); 5087 ret = perf_output_begin(&handle, event, 5088 comm_event->event_id.header.size); 5089 5090 if (ret) 5091 goto out; 5092 5093 comm_event->event_id.pid = perf_event_pid(event, comm_event->task); 5094 comm_event->event_id.tid = perf_event_tid(event, comm_event->task); 5095 5096 perf_output_put(&handle, comm_event->event_id); 5097 __output_copy(&handle, comm_event->comm, 5098 comm_event->comm_size); 5099 5100 perf_event__output_id_sample(event, &handle, &sample); 5101 5102 perf_output_end(&handle); 5103 out: 5104 comm_event->event_id.header.size = size; 5105 } 5106 5107 static void perf_event_comm_event(struct perf_comm_event *comm_event) 5108 { 5109 char comm[TASK_COMM_LEN]; 5110 unsigned int size; 5111 5112 memset(comm, 0, sizeof(comm)); 5113 strlcpy(comm, comm_event->task->comm, sizeof(comm)); 5114 size = ALIGN(strlen(comm)+1, sizeof(u64)); 5115 5116 comm_event->comm = comm; 5117 comm_event->comm_size = size; 5118 5119 comm_event->event_id.header.size = sizeof(comm_event->event_id) + size; 5120 5121 perf_event_aux(perf_event_comm_output, 5122 comm_event, 5123 NULL); 5124 } 5125 5126 void perf_event_comm(struct task_struct *task, bool exec) 5127 { 5128 struct perf_comm_event comm_event; 5129 5130 if (!atomic_read(&nr_comm_events)) 5131 return; 5132 5133 comm_event = (struct perf_comm_event){ 5134 .task = task, 5135 /* .comm */ 5136 /* .comm_size */ 5137 .event_id = { 5138 .header = { 5139 .type = PERF_RECORD_COMM, 5140 .misc = exec ? PERF_RECORD_MISC_COMM_EXEC : 0, 5141 /* .size */ 5142 }, 5143 /* .pid */ 5144 /* .tid */ 5145 }, 5146 }; 5147 5148 perf_event_comm_event(&comm_event); 5149 } 5150 5151 /* 5152 * mmap tracking 5153 */ 5154 5155 struct perf_mmap_event { 5156 struct vm_area_struct *vma; 5157 5158 const char *file_name; 5159 int file_size; 5160 int maj, min; 5161 u64 ino; 5162 u64 ino_generation; 5163 u32 prot, flags; 5164 5165 struct { 5166 struct perf_event_header header; 5167 5168 u32 pid; 5169 u32 tid; 5170 u64 start; 5171 u64 len; 5172 u64 pgoff; 5173 } event_id; 5174 }; 5175 5176 static int perf_event_mmap_match(struct perf_event *event, 5177 void *data) 5178 { 5179 struct perf_mmap_event *mmap_event = data; 5180 struct vm_area_struct *vma = mmap_event->vma; 5181 int executable = vma->vm_flags & VM_EXEC; 5182 5183 return (!executable && event->attr.mmap_data) || 5184 (executable && (event->attr.mmap || event->attr.mmap2)); 5185 } 5186 5187 static void perf_event_mmap_output(struct perf_event *event, 5188 void *data) 5189 { 5190 struct perf_mmap_event *mmap_event = data; 5191 struct perf_output_handle handle; 5192 struct perf_sample_data sample; 5193 int size = mmap_event->event_id.header.size; 5194 int ret; 5195 5196 if (!perf_event_mmap_match(event, data)) 5197 return; 5198 5199 if (event->attr.mmap2) { 5200 mmap_event->event_id.header.type = PERF_RECORD_MMAP2; 5201 mmap_event->event_id.header.size += sizeof(mmap_event->maj); 5202 mmap_event->event_id.header.size += sizeof(mmap_event->min); 5203 mmap_event->event_id.header.size += sizeof(mmap_event->ino); 5204 mmap_event->event_id.header.size += sizeof(mmap_event->ino_generation); 5205 mmap_event->event_id.header.size += sizeof(mmap_event->prot); 5206 mmap_event->event_id.header.size += sizeof(mmap_event->flags); 5207 } 5208 5209 perf_event_header__init_id(&mmap_event->event_id.header, &sample, event); 5210 ret = perf_output_begin(&handle, event, 5211 mmap_event->event_id.header.size); 5212 if (ret) 5213 goto out; 5214 5215 mmap_event->event_id.pid = perf_event_pid(event, current); 5216 mmap_event->event_id.tid = perf_event_tid(event, current); 5217 5218 perf_output_put(&handle, mmap_event->event_id); 5219 5220 if (event->attr.mmap2) { 5221 perf_output_put(&handle, mmap_event->maj); 5222 perf_output_put(&handle, mmap_event->min); 5223 perf_output_put(&handle, mmap_event->ino); 5224 perf_output_put(&handle, mmap_event->ino_generation); 5225 perf_output_put(&handle, mmap_event->prot); 5226 perf_output_put(&handle, mmap_event->flags); 5227 } 5228 5229 __output_copy(&handle, mmap_event->file_name, 5230 mmap_event->file_size); 5231 5232 perf_event__output_id_sample(event, &handle, &sample); 5233 5234 perf_output_end(&handle); 5235 out: 5236 mmap_event->event_id.header.size = size; 5237 } 5238 5239 static void perf_event_mmap_event(struct perf_mmap_event *mmap_event) 5240 { 5241 struct vm_area_struct *vma = mmap_event->vma; 5242 struct file *file = vma->vm_file; 5243 int maj = 0, min = 0; 5244 u64 ino = 0, gen = 0; 5245 u32 prot = 0, flags = 0; 5246 unsigned int size; 5247 char tmp[16]; 5248 char *buf = NULL; 5249 char *name; 5250 5251 if (file) { 5252 struct inode *inode; 5253 dev_t dev; 5254 5255 buf = kmalloc(PATH_MAX, GFP_KERNEL); 5256 if (!buf) { 5257 name = "//enomem"; 5258 goto cpy_name; 5259 } 5260 /* 5261 * d_path() works from the end of the rb backwards, so we 5262 * need to add enough zero bytes after the string to handle 5263 * the 64bit alignment we do later. 5264 */ 5265 name = d_path(&file->f_path, buf, PATH_MAX - sizeof(u64)); 5266 if (IS_ERR(name)) { 5267 name = "//toolong"; 5268 goto cpy_name; 5269 } 5270 inode = file_inode(vma->vm_file); 5271 dev = inode->i_sb->s_dev; 5272 ino = inode->i_ino; 5273 gen = inode->i_generation; 5274 maj = MAJOR(dev); 5275 min = MINOR(dev); 5276 5277 if (vma->vm_flags & VM_READ) 5278 prot |= PROT_READ; 5279 if (vma->vm_flags & VM_WRITE) 5280 prot |= PROT_WRITE; 5281 if (vma->vm_flags & VM_EXEC) 5282 prot |= PROT_EXEC; 5283 5284 if (vma->vm_flags & VM_MAYSHARE) 5285 flags = MAP_SHARED; 5286 else 5287 flags = MAP_PRIVATE; 5288 5289 if (vma->vm_flags & VM_DENYWRITE) 5290 flags |= MAP_DENYWRITE; 5291 if (vma->vm_flags & VM_MAYEXEC) 5292 flags |= MAP_EXECUTABLE; 5293 if (vma->vm_flags & VM_LOCKED) 5294 flags |= MAP_LOCKED; 5295 if (vma->vm_flags & VM_HUGETLB) 5296 flags |= MAP_HUGETLB; 5297 5298 goto got_name; 5299 } else { 5300 if (vma->vm_ops && vma->vm_ops->name) { 5301 name = (char *) vma->vm_ops->name(vma); 5302 if (name) 5303 goto cpy_name; 5304 } 5305 5306 name = (char *)arch_vma_name(vma); 5307 if (name) 5308 goto cpy_name; 5309 5310 if (vma->vm_start <= vma->vm_mm->start_brk && 5311 vma->vm_end >= vma->vm_mm->brk) { 5312 name = "[heap]"; 5313 goto cpy_name; 5314 } 5315 if (vma->vm_start <= vma->vm_mm->start_stack && 5316 vma->vm_end >= vma->vm_mm->start_stack) { 5317 name = "[stack]"; 5318 goto cpy_name; 5319 } 5320 5321 name = "//anon"; 5322 goto cpy_name; 5323 } 5324 5325 cpy_name: 5326 strlcpy(tmp, name, sizeof(tmp)); 5327 name = tmp; 5328 got_name: 5329 /* 5330 * Since our buffer works in 8 byte units we need to align our string 5331 * size to a multiple of 8. However, we must guarantee the tail end is 5332 * zero'd out to avoid leaking random bits to userspace. 5333 */ 5334 size = strlen(name)+1; 5335 while (!IS_ALIGNED(size, sizeof(u64))) 5336 name[size++] = '\0'; 5337 5338 mmap_event->file_name = name; 5339 mmap_event->file_size = size; 5340 mmap_event->maj = maj; 5341 mmap_event->min = min; 5342 mmap_event->ino = ino; 5343 mmap_event->ino_generation = gen; 5344 mmap_event->prot = prot; 5345 mmap_event->flags = flags; 5346 5347 if (!(vma->vm_flags & VM_EXEC)) 5348 mmap_event->event_id.header.misc |= PERF_RECORD_MISC_MMAP_DATA; 5349 5350 mmap_event->event_id.header.size = sizeof(mmap_event->event_id) + size; 5351 5352 perf_event_aux(perf_event_mmap_output, 5353 mmap_event, 5354 NULL); 5355 5356 kfree(buf); 5357 } 5358 5359 void perf_event_mmap(struct vm_area_struct *vma) 5360 { 5361 struct perf_mmap_event mmap_event; 5362 5363 if (!atomic_read(&nr_mmap_events)) 5364 return; 5365 5366 mmap_event = (struct perf_mmap_event){ 5367 .vma = vma, 5368 /* .file_name */ 5369 /* .file_size */ 5370 .event_id = { 5371 .header = { 5372 .type = PERF_RECORD_MMAP, 5373 .misc = PERF_RECORD_MISC_USER, 5374 /* .size */ 5375 }, 5376 /* .pid */ 5377 /* .tid */ 5378 .start = vma->vm_start, 5379 .len = vma->vm_end - vma->vm_start, 5380 .pgoff = (u64)vma->vm_pgoff << PAGE_SHIFT, 5381 }, 5382 /* .maj (attr_mmap2 only) */ 5383 /* .min (attr_mmap2 only) */ 5384 /* .ino (attr_mmap2 only) */ 5385 /* .ino_generation (attr_mmap2 only) */ 5386 /* .prot (attr_mmap2 only) */ 5387 /* .flags (attr_mmap2 only) */ 5388 }; 5389 5390 perf_event_mmap_event(&mmap_event); 5391 } 5392 5393 /* 5394 * IRQ throttle logging 5395 */ 5396 5397 static void perf_log_throttle(struct perf_event *event, int enable) 5398 { 5399 struct perf_output_handle handle; 5400 struct perf_sample_data sample; 5401 int ret; 5402 5403 struct { 5404 struct perf_event_header header; 5405 u64 time; 5406 u64 id; 5407 u64 stream_id; 5408 } throttle_event = { 5409 .header = { 5410 .type = PERF_RECORD_THROTTLE, 5411 .misc = 0, 5412 .size = sizeof(throttle_event), 5413 }, 5414 .time = perf_clock(), 5415 .id = primary_event_id(event), 5416 .stream_id = event->id, 5417 }; 5418 5419 if (enable) 5420 throttle_event.header.type = PERF_RECORD_UNTHROTTLE; 5421 5422 perf_event_header__init_id(&throttle_event.header, &sample, event); 5423 5424 ret = perf_output_begin(&handle, event, 5425 throttle_event.header.size); 5426 if (ret) 5427 return; 5428 5429 perf_output_put(&handle, throttle_event); 5430 perf_event__output_id_sample(event, &handle, &sample); 5431 perf_output_end(&handle); 5432 } 5433 5434 /* 5435 * Generic event overflow handling, sampling. 5436 */ 5437 5438 static int __perf_event_overflow(struct perf_event *event, 5439 int throttle, struct perf_sample_data *data, 5440 struct pt_regs *regs) 5441 { 5442 int events = atomic_read(&event->event_limit); 5443 struct hw_perf_event *hwc = &event->hw; 5444 u64 seq; 5445 int ret = 0; 5446 5447 /* 5448 * Non-sampling counters might still use the PMI to fold short 5449 * hardware counters, ignore those. 5450 */ 5451 if (unlikely(!is_sampling_event(event))) 5452 return 0; 5453 5454 seq = __this_cpu_read(perf_throttled_seq); 5455 if (seq != hwc->interrupts_seq) { 5456 hwc->interrupts_seq = seq; 5457 hwc->interrupts = 1; 5458 } else { 5459 hwc->interrupts++; 5460 if (unlikely(throttle 5461 && hwc->interrupts >= max_samples_per_tick)) { 5462 __this_cpu_inc(perf_throttled_count); 5463 hwc->interrupts = MAX_INTERRUPTS; 5464 perf_log_throttle(event, 0); 5465 tick_nohz_full_kick(); 5466 ret = 1; 5467 } 5468 } 5469 5470 if (event->attr.freq) { 5471 u64 now = perf_clock(); 5472 s64 delta = now - hwc->freq_time_stamp; 5473 5474 hwc->freq_time_stamp = now; 5475 5476 if (delta > 0 && delta < 2*TICK_NSEC) 5477 perf_adjust_period(event, delta, hwc->last_period, true); 5478 } 5479 5480 /* 5481 * XXX event_limit might not quite work as expected on inherited 5482 * events 5483 */ 5484 5485 event->pending_kill = POLL_IN; 5486 if (events && atomic_dec_and_test(&event->event_limit)) { 5487 ret = 1; 5488 event->pending_kill = POLL_HUP; 5489 event->pending_disable = 1; 5490 irq_work_queue(&event->pending); 5491 } 5492 5493 if (event->overflow_handler) 5494 event->overflow_handler(event, data, regs); 5495 else 5496 perf_event_output(event, data, regs); 5497 5498 if (event->fasync && event->pending_kill) { 5499 event->pending_wakeup = 1; 5500 irq_work_queue(&event->pending); 5501 } 5502 5503 return ret; 5504 } 5505 5506 int perf_event_overflow(struct perf_event *event, 5507 struct perf_sample_data *data, 5508 struct pt_regs *regs) 5509 { 5510 return __perf_event_overflow(event, 1, data, regs); 5511 } 5512 5513 /* 5514 * Generic software event infrastructure 5515 */ 5516 5517 struct swevent_htable { 5518 struct swevent_hlist *swevent_hlist; 5519 struct mutex hlist_mutex; 5520 int hlist_refcount; 5521 5522 /* Recursion avoidance in each contexts */ 5523 int recursion[PERF_NR_CONTEXTS]; 5524 5525 /* Keeps track of cpu being initialized/exited */ 5526 bool online; 5527 }; 5528 5529 static DEFINE_PER_CPU(struct swevent_htable, swevent_htable); 5530 5531 /* 5532 * We directly increment event->count and keep a second value in 5533 * event->hw.period_left to count intervals. This period event 5534 * is kept in the range [-sample_period, 0] so that we can use the 5535 * sign as trigger. 5536 */ 5537 5538 u64 perf_swevent_set_period(struct perf_event *event) 5539 { 5540 struct hw_perf_event *hwc = &event->hw; 5541 u64 period = hwc->last_period; 5542 u64 nr, offset; 5543 s64 old, val; 5544 5545 hwc->last_period = hwc->sample_period; 5546 5547 again: 5548 old = val = local64_read(&hwc->period_left); 5549 if (val < 0) 5550 return 0; 5551 5552 nr = div64_u64(period + val, period); 5553 offset = nr * period; 5554 val -= offset; 5555 if (local64_cmpxchg(&hwc->period_left, old, val) != old) 5556 goto again; 5557 5558 return nr; 5559 } 5560 5561 static void perf_swevent_overflow(struct perf_event *event, u64 overflow, 5562 struct perf_sample_data *data, 5563 struct pt_regs *regs) 5564 { 5565 struct hw_perf_event *hwc = &event->hw; 5566 int throttle = 0; 5567 5568 if (!overflow) 5569 overflow = perf_swevent_set_period(event); 5570 5571 if (hwc->interrupts == MAX_INTERRUPTS) 5572 return; 5573 5574 for (; overflow; overflow--) { 5575 if (__perf_event_overflow(event, throttle, 5576 data, regs)) { 5577 /* 5578 * We inhibit the overflow from happening when 5579 * hwc->interrupts == MAX_INTERRUPTS. 5580 */ 5581 break; 5582 } 5583 throttle = 1; 5584 } 5585 } 5586 5587 static void perf_swevent_event(struct perf_event *event, u64 nr, 5588 struct perf_sample_data *data, 5589 struct pt_regs *regs) 5590 { 5591 struct hw_perf_event *hwc = &event->hw; 5592 5593 local64_add(nr, &event->count); 5594 5595 if (!regs) 5596 return; 5597 5598 if (!is_sampling_event(event)) 5599 return; 5600 5601 if ((event->attr.sample_type & PERF_SAMPLE_PERIOD) && !event->attr.freq) { 5602 data->period = nr; 5603 return perf_swevent_overflow(event, 1, data, regs); 5604 } else 5605 data->period = event->hw.last_period; 5606 5607 if (nr == 1 && hwc->sample_period == 1 && !event->attr.freq) 5608 return perf_swevent_overflow(event, 1, data, regs); 5609 5610 if (local64_add_negative(nr, &hwc->period_left)) 5611 return; 5612 5613 perf_swevent_overflow(event, 0, data, regs); 5614 } 5615 5616 static int perf_exclude_event(struct perf_event *event, 5617 struct pt_regs *regs) 5618 { 5619 if (event->hw.state & PERF_HES_STOPPED) 5620 return 1; 5621 5622 if (regs) { 5623 if (event->attr.exclude_user && user_mode(regs)) 5624 return 1; 5625 5626 if (event->attr.exclude_kernel && !user_mode(regs)) 5627 return 1; 5628 } 5629 5630 return 0; 5631 } 5632 5633 static int perf_swevent_match(struct perf_event *event, 5634 enum perf_type_id type, 5635 u32 event_id, 5636 struct perf_sample_data *data, 5637 struct pt_regs *regs) 5638 { 5639 if (event->attr.type != type) 5640 return 0; 5641 5642 if (event->attr.config != event_id) 5643 return 0; 5644 5645 if (perf_exclude_event(event, regs)) 5646 return 0; 5647 5648 return 1; 5649 } 5650 5651 static inline u64 swevent_hash(u64 type, u32 event_id) 5652 { 5653 u64 val = event_id | (type << 32); 5654 5655 return hash_64(val, SWEVENT_HLIST_BITS); 5656 } 5657 5658 static inline struct hlist_head * 5659 __find_swevent_head(struct swevent_hlist *hlist, u64 type, u32 event_id) 5660 { 5661 u64 hash = swevent_hash(type, event_id); 5662 5663 return &hlist->heads[hash]; 5664 } 5665 5666 /* For the read side: events when they trigger */ 5667 static inline struct hlist_head * 5668 find_swevent_head_rcu(struct swevent_htable *swhash, u64 type, u32 event_id) 5669 { 5670 struct swevent_hlist *hlist; 5671 5672 hlist = rcu_dereference(swhash->swevent_hlist); 5673 if (!hlist) 5674 return NULL; 5675 5676 return __find_swevent_head(hlist, type, event_id); 5677 } 5678 5679 /* For the event head insertion and removal in the hlist */ 5680 static inline struct hlist_head * 5681 find_swevent_head(struct swevent_htable *swhash, struct perf_event *event) 5682 { 5683 struct swevent_hlist *hlist; 5684 u32 event_id = event->attr.config; 5685 u64 type = event->attr.type; 5686 5687 /* 5688 * Event scheduling is always serialized against hlist allocation 5689 * and release. Which makes the protected version suitable here. 5690 * The context lock guarantees that. 5691 */ 5692 hlist = rcu_dereference_protected(swhash->swevent_hlist, 5693 lockdep_is_held(&event->ctx->lock)); 5694 if (!hlist) 5695 return NULL; 5696 5697 return __find_swevent_head(hlist, type, event_id); 5698 } 5699 5700 static void do_perf_sw_event(enum perf_type_id type, u32 event_id, 5701 u64 nr, 5702 struct perf_sample_data *data, 5703 struct pt_regs *regs) 5704 { 5705 struct swevent_htable *swhash = &__get_cpu_var(swevent_htable); 5706 struct perf_event *event; 5707 struct hlist_head *head; 5708 5709 rcu_read_lock(); 5710 head = find_swevent_head_rcu(swhash, type, event_id); 5711 if (!head) 5712 goto end; 5713 5714 hlist_for_each_entry_rcu(event, head, hlist_entry) { 5715 if (perf_swevent_match(event, type, event_id, data, regs)) 5716 perf_swevent_event(event, nr, data, regs); 5717 } 5718 end: 5719 rcu_read_unlock(); 5720 } 5721 5722 int perf_swevent_get_recursion_context(void) 5723 { 5724 struct swevent_htable *swhash = &__get_cpu_var(swevent_htable); 5725 5726 return get_recursion_context(swhash->recursion); 5727 } 5728 EXPORT_SYMBOL_GPL(perf_swevent_get_recursion_context); 5729 5730 inline void perf_swevent_put_recursion_context(int rctx) 5731 { 5732 struct swevent_htable *swhash = &__get_cpu_var(swevent_htable); 5733 5734 put_recursion_context(swhash->recursion, rctx); 5735 } 5736 5737 void __perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr) 5738 { 5739 struct perf_sample_data data; 5740 int rctx; 5741 5742 preempt_disable_notrace(); 5743 rctx = perf_swevent_get_recursion_context(); 5744 if (rctx < 0) 5745 return; 5746 5747 perf_sample_data_init(&data, addr, 0); 5748 5749 do_perf_sw_event(PERF_TYPE_SOFTWARE, event_id, nr, &data, regs); 5750 5751 perf_swevent_put_recursion_context(rctx); 5752 preempt_enable_notrace(); 5753 } 5754 5755 static void perf_swevent_read(struct perf_event *event) 5756 { 5757 } 5758 5759 static int perf_swevent_add(struct perf_event *event, int flags) 5760 { 5761 struct swevent_htable *swhash = &__get_cpu_var(swevent_htable); 5762 struct hw_perf_event *hwc = &event->hw; 5763 struct hlist_head *head; 5764 5765 if (is_sampling_event(event)) { 5766 hwc->last_period = hwc->sample_period; 5767 perf_swevent_set_period(event); 5768 } 5769 5770 hwc->state = !(flags & PERF_EF_START); 5771 5772 head = find_swevent_head(swhash, event); 5773 if (!head) { 5774 /* 5775 * We can race with cpu hotplug code. Do not 5776 * WARN if the cpu just got unplugged. 5777 */ 5778 WARN_ON_ONCE(swhash->online); 5779 return -EINVAL; 5780 } 5781 5782 hlist_add_head_rcu(&event->hlist_entry, head); 5783 5784 return 0; 5785 } 5786 5787 static void perf_swevent_del(struct perf_event *event, int flags) 5788 { 5789 hlist_del_rcu(&event->hlist_entry); 5790 } 5791 5792 static void perf_swevent_start(struct perf_event *event, int flags) 5793 { 5794 event->hw.state = 0; 5795 } 5796 5797 static void perf_swevent_stop(struct perf_event *event, int flags) 5798 { 5799 event->hw.state = PERF_HES_STOPPED; 5800 } 5801 5802 /* Deref the hlist from the update side */ 5803 static inline struct swevent_hlist * 5804 swevent_hlist_deref(struct swevent_htable *swhash) 5805 { 5806 return rcu_dereference_protected(swhash->swevent_hlist, 5807 lockdep_is_held(&swhash->hlist_mutex)); 5808 } 5809 5810 static void swevent_hlist_release(struct swevent_htable *swhash) 5811 { 5812 struct swevent_hlist *hlist = swevent_hlist_deref(swhash); 5813 5814 if (!hlist) 5815 return; 5816 5817 rcu_assign_pointer(swhash->swevent_hlist, NULL); 5818 kfree_rcu(hlist, rcu_head); 5819 } 5820 5821 static void swevent_hlist_put_cpu(struct perf_event *event, int cpu) 5822 { 5823 struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu); 5824 5825 mutex_lock(&swhash->hlist_mutex); 5826 5827 if (!--swhash->hlist_refcount) 5828 swevent_hlist_release(swhash); 5829 5830 mutex_unlock(&swhash->hlist_mutex); 5831 } 5832 5833 static void swevent_hlist_put(struct perf_event *event) 5834 { 5835 int cpu; 5836 5837 for_each_possible_cpu(cpu) 5838 swevent_hlist_put_cpu(event, cpu); 5839 } 5840 5841 static int swevent_hlist_get_cpu(struct perf_event *event, int cpu) 5842 { 5843 struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu); 5844 int err = 0; 5845 5846 mutex_lock(&swhash->hlist_mutex); 5847 5848 if (!swevent_hlist_deref(swhash) && cpu_online(cpu)) { 5849 struct swevent_hlist *hlist; 5850 5851 hlist = kzalloc(sizeof(*hlist), GFP_KERNEL); 5852 if (!hlist) { 5853 err = -ENOMEM; 5854 goto exit; 5855 } 5856 rcu_assign_pointer(swhash->swevent_hlist, hlist); 5857 } 5858 swhash->hlist_refcount++; 5859 exit: 5860 mutex_unlock(&swhash->hlist_mutex); 5861 5862 return err; 5863 } 5864 5865 static int swevent_hlist_get(struct perf_event *event) 5866 { 5867 int err; 5868 int cpu, failed_cpu; 5869 5870 get_online_cpus(); 5871 for_each_possible_cpu(cpu) { 5872 err = swevent_hlist_get_cpu(event, cpu); 5873 if (err) { 5874 failed_cpu = cpu; 5875 goto fail; 5876 } 5877 } 5878 put_online_cpus(); 5879 5880 return 0; 5881 fail: 5882 for_each_possible_cpu(cpu) { 5883 if (cpu == failed_cpu) 5884 break; 5885 swevent_hlist_put_cpu(event, cpu); 5886 } 5887 5888 put_online_cpus(); 5889 return err; 5890 } 5891 5892 struct static_key perf_swevent_enabled[PERF_COUNT_SW_MAX]; 5893 5894 static void sw_perf_event_destroy(struct perf_event *event) 5895 { 5896 u64 event_id = event->attr.config; 5897 5898 WARN_ON(event->parent); 5899 5900 static_key_slow_dec(&perf_swevent_enabled[event_id]); 5901 swevent_hlist_put(event); 5902 } 5903 5904 static int perf_swevent_init(struct perf_event *event) 5905 { 5906 u64 event_id = event->attr.config; 5907 5908 if (event->attr.type != PERF_TYPE_SOFTWARE) 5909 return -ENOENT; 5910 5911 /* 5912 * no branch sampling for software events 5913 */ 5914 if (has_branch_stack(event)) 5915 return -EOPNOTSUPP; 5916 5917 switch (event_id) { 5918 case PERF_COUNT_SW_CPU_CLOCK: 5919 case PERF_COUNT_SW_TASK_CLOCK: 5920 return -ENOENT; 5921 5922 default: 5923 break; 5924 } 5925 5926 if (event_id >= PERF_COUNT_SW_MAX) 5927 return -ENOENT; 5928 5929 if (!event->parent) { 5930 int err; 5931 5932 err = swevent_hlist_get(event); 5933 if (err) 5934 return err; 5935 5936 static_key_slow_inc(&perf_swevent_enabled[event_id]); 5937 event->destroy = sw_perf_event_destroy; 5938 } 5939 5940 return 0; 5941 } 5942 5943 static int perf_swevent_event_idx(struct perf_event *event) 5944 { 5945 return 0; 5946 } 5947 5948 static struct pmu perf_swevent = { 5949 .task_ctx_nr = perf_sw_context, 5950 5951 .event_init = perf_swevent_init, 5952 .add = perf_swevent_add, 5953 .del = perf_swevent_del, 5954 .start = perf_swevent_start, 5955 .stop = perf_swevent_stop, 5956 .read = perf_swevent_read, 5957 5958 .event_idx = perf_swevent_event_idx, 5959 }; 5960 5961 #ifdef CONFIG_EVENT_TRACING 5962 5963 static int perf_tp_filter_match(struct perf_event *event, 5964 struct perf_sample_data *data) 5965 { 5966 void *record = data->raw->data; 5967 5968 if (likely(!event->filter) || filter_match_preds(event->filter, record)) 5969 return 1; 5970 return 0; 5971 } 5972 5973 static int perf_tp_event_match(struct perf_event *event, 5974 struct perf_sample_data *data, 5975 struct pt_regs *regs) 5976 { 5977 if (event->hw.state & PERF_HES_STOPPED) 5978 return 0; 5979 /* 5980 * All tracepoints are from kernel-space. 5981 */ 5982 if (event->attr.exclude_kernel) 5983 return 0; 5984 5985 if (!perf_tp_filter_match(event, data)) 5986 return 0; 5987 5988 return 1; 5989 } 5990 5991 void perf_tp_event(u64 addr, u64 count, void *record, int entry_size, 5992 struct pt_regs *regs, struct hlist_head *head, int rctx, 5993 struct task_struct *task) 5994 { 5995 struct perf_sample_data data; 5996 struct perf_event *event; 5997 5998 struct perf_raw_record raw = { 5999 .size = entry_size, 6000 .data = record, 6001 }; 6002 6003 perf_sample_data_init(&data, addr, 0); 6004 data.raw = &raw; 6005 6006 hlist_for_each_entry_rcu(event, head, hlist_entry) { 6007 if (perf_tp_event_match(event, &data, regs)) 6008 perf_swevent_event(event, count, &data, regs); 6009 } 6010 6011 /* 6012 * If we got specified a target task, also iterate its context and 6013 * deliver this event there too. 6014 */ 6015 if (task && task != current) { 6016 struct perf_event_context *ctx; 6017 struct trace_entry *entry = record; 6018 6019 rcu_read_lock(); 6020 ctx = rcu_dereference(task->perf_event_ctxp[perf_sw_context]); 6021 if (!ctx) 6022 goto unlock; 6023 6024 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) { 6025 if (event->attr.type != PERF_TYPE_TRACEPOINT) 6026 continue; 6027 if (event->attr.config != entry->type) 6028 continue; 6029 if (perf_tp_event_match(event, &data, regs)) 6030 perf_swevent_event(event, count, &data, regs); 6031 } 6032 unlock: 6033 rcu_read_unlock(); 6034 } 6035 6036 perf_swevent_put_recursion_context(rctx); 6037 } 6038 EXPORT_SYMBOL_GPL(perf_tp_event); 6039 6040 static void tp_perf_event_destroy(struct perf_event *event) 6041 { 6042 perf_trace_destroy(event); 6043 } 6044 6045 static int perf_tp_event_init(struct perf_event *event) 6046 { 6047 int err; 6048 6049 if (event->attr.type != PERF_TYPE_TRACEPOINT) 6050 return -ENOENT; 6051 6052 /* 6053 * no branch sampling for tracepoint events 6054 */ 6055 if (has_branch_stack(event)) 6056 return -EOPNOTSUPP; 6057 6058 err = perf_trace_init(event); 6059 if (err) 6060 return err; 6061 6062 event->destroy = tp_perf_event_destroy; 6063 6064 return 0; 6065 } 6066 6067 static struct pmu perf_tracepoint = { 6068 .task_ctx_nr = perf_sw_context, 6069 6070 .event_init = perf_tp_event_init, 6071 .add = perf_trace_add, 6072 .del = perf_trace_del, 6073 .start = perf_swevent_start, 6074 .stop = perf_swevent_stop, 6075 .read = perf_swevent_read, 6076 6077 .event_idx = perf_swevent_event_idx, 6078 }; 6079 6080 static inline void perf_tp_register(void) 6081 { 6082 perf_pmu_register(&perf_tracepoint, "tracepoint", PERF_TYPE_TRACEPOINT); 6083 } 6084 6085 static int perf_event_set_filter(struct perf_event *event, void __user *arg) 6086 { 6087 char *filter_str; 6088 int ret; 6089 6090 if (event->attr.type != PERF_TYPE_TRACEPOINT) 6091 return -EINVAL; 6092 6093 filter_str = strndup_user(arg, PAGE_SIZE); 6094 if (IS_ERR(filter_str)) 6095 return PTR_ERR(filter_str); 6096 6097 ret = ftrace_profile_set_filter(event, event->attr.config, filter_str); 6098 6099 kfree(filter_str); 6100 return ret; 6101 } 6102 6103 static void perf_event_free_filter(struct perf_event *event) 6104 { 6105 ftrace_profile_free_filter(event); 6106 } 6107 6108 #else 6109 6110 static inline void perf_tp_register(void) 6111 { 6112 } 6113 6114 static int perf_event_set_filter(struct perf_event *event, void __user *arg) 6115 { 6116 return -ENOENT; 6117 } 6118 6119 static void perf_event_free_filter(struct perf_event *event) 6120 { 6121 } 6122 6123 #endif /* CONFIG_EVENT_TRACING */ 6124 6125 #ifdef CONFIG_HAVE_HW_BREAKPOINT 6126 void perf_bp_event(struct perf_event *bp, void *data) 6127 { 6128 struct perf_sample_data sample; 6129 struct pt_regs *regs = data; 6130 6131 perf_sample_data_init(&sample, bp->attr.bp_addr, 0); 6132 6133 if (!bp->hw.state && !perf_exclude_event(bp, regs)) 6134 perf_swevent_event(bp, 1, &sample, regs); 6135 } 6136 #endif 6137 6138 /* 6139 * hrtimer based swevent callback 6140 */ 6141 6142 static enum hrtimer_restart perf_swevent_hrtimer(struct hrtimer *hrtimer) 6143 { 6144 enum hrtimer_restart ret = HRTIMER_RESTART; 6145 struct perf_sample_data data; 6146 struct pt_regs *regs; 6147 struct perf_event *event; 6148 u64 period; 6149 6150 event = container_of(hrtimer, struct perf_event, hw.hrtimer); 6151 6152 if (event->state != PERF_EVENT_STATE_ACTIVE) 6153 return HRTIMER_NORESTART; 6154 6155 event->pmu->read(event); 6156 6157 perf_sample_data_init(&data, 0, event->hw.last_period); 6158 regs = get_irq_regs(); 6159 6160 if (regs && !perf_exclude_event(event, regs)) { 6161 if (!(event->attr.exclude_idle && is_idle_task(current))) 6162 if (__perf_event_overflow(event, 1, &data, regs)) 6163 ret = HRTIMER_NORESTART; 6164 } 6165 6166 period = max_t(u64, 10000, event->hw.sample_period); 6167 hrtimer_forward_now(hrtimer, ns_to_ktime(period)); 6168 6169 return ret; 6170 } 6171 6172 static void perf_swevent_start_hrtimer(struct perf_event *event) 6173 { 6174 struct hw_perf_event *hwc = &event->hw; 6175 s64 period; 6176 6177 if (!is_sampling_event(event)) 6178 return; 6179 6180 period = local64_read(&hwc->period_left); 6181 if (period) { 6182 if (period < 0) 6183 period = 10000; 6184 6185 local64_set(&hwc->period_left, 0); 6186 } else { 6187 period = max_t(u64, 10000, hwc->sample_period); 6188 } 6189 __hrtimer_start_range_ns(&hwc->hrtimer, 6190 ns_to_ktime(period), 0, 6191 HRTIMER_MODE_REL_PINNED, 0); 6192 } 6193 6194 static void perf_swevent_cancel_hrtimer(struct perf_event *event) 6195 { 6196 struct hw_perf_event *hwc = &event->hw; 6197 6198 if (is_sampling_event(event)) { 6199 ktime_t remaining = hrtimer_get_remaining(&hwc->hrtimer); 6200 local64_set(&hwc->period_left, ktime_to_ns(remaining)); 6201 6202 hrtimer_cancel(&hwc->hrtimer); 6203 } 6204 } 6205 6206 static void perf_swevent_init_hrtimer(struct perf_event *event) 6207 { 6208 struct hw_perf_event *hwc = &event->hw; 6209 6210 if (!is_sampling_event(event)) 6211 return; 6212 6213 hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); 6214 hwc->hrtimer.function = perf_swevent_hrtimer; 6215 6216 /* 6217 * Since hrtimers have a fixed rate, we can do a static freq->period 6218 * mapping and avoid the whole period adjust feedback stuff. 6219 */ 6220 if (event->attr.freq) { 6221 long freq = event->attr.sample_freq; 6222 6223 event->attr.sample_period = NSEC_PER_SEC / freq; 6224 hwc->sample_period = event->attr.sample_period; 6225 local64_set(&hwc->period_left, hwc->sample_period); 6226 hwc->last_period = hwc->sample_period; 6227 event->attr.freq = 0; 6228 } 6229 } 6230 6231 /* 6232 * Software event: cpu wall time clock 6233 */ 6234 6235 static void cpu_clock_event_update(struct perf_event *event) 6236 { 6237 s64 prev; 6238 u64 now; 6239 6240 now = local_clock(); 6241 prev = local64_xchg(&event->hw.prev_count, now); 6242 local64_add(now - prev, &event->count); 6243 } 6244 6245 static void cpu_clock_event_start(struct perf_event *event, int flags) 6246 { 6247 local64_set(&event->hw.prev_count, local_clock()); 6248 perf_swevent_start_hrtimer(event); 6249 } 6250 6251 static void cpu_clock_event_stop(struct perf_event *event, int flags) 6252 { 6253 perf_swevent_cancel_hrtimer(event); 6254 cpu_clock_event_update(event); 6255 } 6256 6257 static int cpu_clock_event_add(struct perf_event *event, int flags) 6258 { 6259 if (flags & PERF_EF_START) 6260 cpu_clock_event_start(event, flags); 6261 6262 return 0; 6263 } 6264 6265 static void cpu_clock_event_del(struct perf_event *event, int flags) 6266 { 6267 cpu_clock_event_stop(event, flags); 6268 } 6269 6270 static void cpu_clock_event_read(struct perf_event *event) 6271 { 6272 cpu_clock_event_update(event); 6273 } 6274 6275 static int cpu_clock_event_init(struct perf_event *event) 6276 { 6277 if (event->attr.type != PERF_TYPE_SOFTWARE) 6278 return -ENOENT; 6279 6280 if (event->attr.config != PERF_COUNT_SW_CPU_CLOCK) 6281 return -ENOENT; 6282 6283 /* 6284 * no branch sampling for software events 6285 */ 6286 if (has_branch_stack(event)) 6287 return -EOPNOTSUPP; 6288 6289 perf_swevent_init_hrtimer(event); 6290 6291 return 0; 6292 } 6293 6294 static struct pmu perf_cpu_clock = { 6295 .task_ctx_nr = perf_sw_context, 6296 6297 .event_init = cpu_clock_event_init, 6298 .add = cpu_clock_event_add, 6299 .del = cpu_clock_event_del, 6300 .start = cpu_clock_event_start, 6301 .stop = cpu_clock_event_stop, 6302 .read = cpu_clock_event_read, 6303 6304 .event_idx = perf_swevent_event_idx, 6305 }; 6306 6307 /* 6308 * Software event: task time clock 6309 */ 6310 6311 static void task_clock_event_update(struct perf_event *event, u64 now) 6312 { 6313 u64 prev; 6314 s64 delta; 6315 6316 prev = local64_xchg(&event->hw.prev_count, now); 6317 delta = now - prev; 6318 local64_add(delta, &event->count); 6319 } 6320 6321 static void task_clock_event_start(struct perf_event *event, int flags) 6322 { 6323 local64_set(&event->hw.prev_count, event->ctx->time); 6324 perf_swevent_start_hrtimer(event); 6325 } 6326 6327 static void task_clock_event_stop(struct perf_event *event, int flags) 6328 { 6329 perf_swevent_cancel_hrtimer(event); 6330 task_clock_event_update(event, event->ctx->time); 6331 } 6332 6333 static int task_clock_event_add(struct perf_event *event, int flags) 6334 { 6335 if (flags & PERF_EF_START) 6336 task_clock_event_start(event, flags); 6337 6338 return 0; 6339 } 6340 6341 static void task_clock_event_del(struct perf_event *event, int flags) 6342 { 6343 task_clock_event_stop(event, PERF_EF_UPDATE); 6344 } 6345 6346 static void task_clock_event_read(struct perf_event *event) 6347 { 6348 u64 now = perf_clock(); 6349 u64 delta = now - event->ctx->timestamp; 6350 u64 time = event->ctx->time + delta; 6351 6352 task_clock_event_update(event, time); 6353 } 6354 6355 static int task_clock_event_init(struct perf_event *event) 6356 { 6357 if (event->attr.type != PERF_TYPE_SOFTWARE) 6358 return -ENOENT; 6359 6360 if (event->attr.config != PERF_COUNT_SW_TASK_CLOCK) 6361 return -ENOENT; 6362 6363 /* 6364 * no branch sampling for software events 6365 */ 6366 if (has_branch_stack(event)) 6367 return -EOPNOTSUPP; 6368 6369 perf_swevent_init_hrtimer(event); 6370 6371 return 0; 6372 } 6373 6374 static struct pmu perf_task_clock = { 6375 .task_ctx_nr = perf_sw_context, 6376 6377 .event_init = task_clock_event_init, 6378 .add = task_clock_event_add, 6379 .del = task_clock_event_del, 6380 .start = task_clock_event_start, 6381 .stop = task_clock_event_stop, 6382 .read = task_clock_event_read, 6383 6384 .event_idx = perf_swevent_event_idx, 6385 }; 6386 6387 static void perf_pmu_nop_void(struct pmu *pmu) 6388 { 6389 } 6390 6391 static int perf_pmu_nop_int(struct pmu *pmu) 6392 { 6393 return 0; 6394 } 6395 6396 static void perf_pmu_start_txn(struct pmu *pmu) 6397 { 6398 perf_pmu_disable(pmu); 6399 } 6400 6401 static int perf_pmu_commit_txn(struct pmu *pmu) 6402 { 6403 perf_pmu_enable(pmu); 6404 return 0; 6405 } 6406 6407 static void perf_pmu_cancel_txn(struct pmu *pmu) 6408 { 6409 perf_pmu_enable(pmu); 6410 } 6411 6412 static int perf_event_idx_default(struct perf_event *event) 6413 { 6414 return event->hw.idx + 1; 6415 } 6416 6417 /* 6418 * Ensures all contexts with the same task_ctx_nr have the same 6419 * pmu_cpu_context too. 6420 */ 6421 static struct perf_cpu_context __percpu *find_pmu_context(int ctxn) 6422 { 6423 struct pmu *pmu; 6424 6425 if (ctxn < 0) 6426 return NULL; 6427 6428 list_for_each_entry(pmu, &pmus, entry) { 6429 if (pmu->task_ctx_nr == ctxn) 6430 return pmu->pmu_cpu_context; 6431 } 6432 6433 return NULL; 6434 } 6435 6436 static void update_pmu_context(struct pmu *pmu, struct pmu *old_pmu) 6437 { 6438 int cpu; 6439 6440 for_each_possible_cpu(cpu) { 6441 struct perf_cpu_context *cpuctx; 6442 6443 cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu); 6444 6445 if (cpuctx->unique_pmu == old_pmu) 6446 cpuctx->unique_pmu = pmu; 6447 } 6448 } 6449 6450 static void free_pmu_context(struct pmu *pmu) 6451 { 6452 struct pmu *i; 6453 6454 mutex_lock(&pmus_lock); 6455 /* 6456 * Like a real lame refcount. 6457 */ 6458 list_for_each_entry(i, &pmus, entry) { 6459 if (i->pmu_cpu_context == pmu->pmu_cpu_context) { 6460 update_pmu_context(i, pmu); 6461 goto out; 6462 } 6463 } 6464 6465 free_percpu(pmu->pmu_cpu_context); 6466 out: 6467 mutex_unlock(&pmus_lock); 6468 } 6469 static struct idr pmu_idr; 6470 6471 static ssize_t 6472 type_show(struct device *dev, struct device_attribute *attr, char *page) 6473 { 6474 struct pmu *pmu = dev_get_drvdata(dev); 6475 6476 return snprintf(page, PAGE_SIZE-1, "%d\n", pmu->type); 6477 } 6478 static DEVICE_ATTR_RO(type); 6479 6480 static ssize_t 6481 perf_event_mux_interval_ms_show(struct device *dev, 6482 struct device_attribute *attr, 6483 char *page) 6484 { 6485 struct pmu *pmu = dev_get_drvdata(dev); 6486 6487 return snprintf(page, PAGE_SIZE-1, "%d\n", pmu->hrtimer_interval_ms); 6488 } 6489 6490 static ssize_t 6491 perf_event_mux_interval_ms_store(struct device *dev, 6492 struct device_attribute *attr, 6493 const char *buf, size_t count) 6494 { 6495 struct pmu *pmu = dev_get_drvdata(dev); 6496 int timer, cpu, ret; 6497 6498 ret = kstrtoint(buf, 0, &timer); 6499 if (ret) 6500 return ret; 6501 6502 if (timer < 1) 6503 return -EINVAL; 6504 6505 /* same value, noting to do */ 6506 if (timer == pmu->hrtimer_interval_ms) 6507 return count; 6508 6509 pmu->hrtimer_interval_ms = timer; 6510 6511 /* update all cpuctx for this PMU */ 6512 for_each_possible_cpu(cpu) { 6513 struct perf_cpu_context *cpuctx; 6514 cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu); 6515 cpuctx->hrtimer_interval = ns_to_ktime(NSEC_PER_MSEC * timer); 6516 6517 if (hrtimer_active(&cpuctx->hrtimer)) 6518 hrtimer_forward_now(&cpuctx->hrtimer, cpuctx->hrtimer_interval); 6519 } 6520 6521 return count; 6522 } 6523 static DEVICE_ATTR_RW(perf_event_mux_interval_ms); 6524 6525 static struct attribute *pmu_dev_attrs[] = { 6526 &dev_attr_type.attr, 6527 &dev_attr_perf_event_mux_interval_ms.attr, 6528 NULL, 6529 }; 6530 ATTRIBUTE_GROUPS(pmu_dev); 6531 6532 static int pmu_bus_running; 6533 static struct bus_type pmu_bus = { 6534 .name = "event_source", 6535 .dev_groups = pmu_dev_groups, 6536 }; 6537 6538 static void pmu_dev_release(struct device *dev) 6539 { 6540 kfree(dev); 6541 } 6542 6543 static int pmu_dev_alloc(struct pmu *pmu) 6544 { 6545 int ret = -ENOMEM; 6546 6547 pmu->dev = kzalloc(sizeof(struct device), GFP_KERNEL); 6548 if (!pmu->dev) 6549 goto out; 6550 6551 pmu->dev->groups = pmu->attr_groups; 6552 device_initialize(pmu->dev); 6553 ret = dev_set_name(pmu->dev, "%s", pmu->name); 6554 if (ret) 6555 goto free_dev; 6556 6557 dev_set_drvdata(pmu->dev, pmu); 6558 pmu->dev->bus = &pmu_bus; 6559 pmu->dev->release = pmu_dev_release; 6560 ret = device_add(pmu->dev); 6561 if (ret) 6562 goto free_dev; 6563 6564 out: 6565 return ret; 6566 6567 free_dev: 6568 put_device(pmu->dev); 6569 goto out; 6570 } 6571 6572 static struct lock_class_key cpuctx_mutex; 6573 static struct lock_class_key cpuctx_lock; 6574 6575 int perf_pmu_register(struct pmu *pmu, const char *name, int type) 6576 { 6577 int cpu, ret; 6578 6579 mutex_lock(&pmus_lock); 6580 ret = -ENOMEM; 6581 pmu->pmu_disable_count = alloc_percpu(int); 6582 if (!pmu->pmu_disable_count) 6583 goto unlock; 6584 6585 pmu->type = -1; 6586 if (!name) 6587 goto skip_type; 6588 pmu->name = name; 6589 6590 if (type < 0) { 6591 type = idr_alloc(&pmu_idr, pmu, PERF_TYPE_MAX, 0, GFP_KERNEL); 6592 if (type < 0) { 6593 ret = type; 6594 goto free_pdc; 6595 } 6596 } 6597 pmu->type = type; 6598 6599 if (pmu_bus_running) { 6600 ret = pmu_dev_alloc(pmu); 6601 if (ret) 6602 goto free_idr; 6603 } 6604 6605 skip_type: 6606 pmu->pmu_cpu_context = find_pmu_context(pmu->task_ctx_nr); 6607 if (pmu->pmu_cpu_context) 6608 goto got_cpu_context; 6609 6610 ret = -ENOMEM; 6611 pmu->pmu_cpu_context = alloc_percpu(struct perf_cpu_context); 6612 if (!pmu->pmu_cpu_context) 6613 goto free_dev; 6614 6615 for_each_possible_cpu(cpu) { 6616 struct perf_cpu_context *cpuctx; 6617 6618 cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu); 6619 __perf_event_init_context(&cpuctx->ctx); 6620 lockdep_set_class(&cpuctx->ctx.mutex, &cpuctx_mutex); 6621 lockdep_set_class(&cpuctx->ctx.lock, &cpuctx_lock); 6622 cpuctx->ctx.type = cpu_context; 6623 cpuctx->ctx.pmu = pmu; 6624 6625 __perf_cpu_hrtimer_init(cpuctx, cpu); 6626 6627 INIT_LIST_HEAD(&cpuctx->rotation_list); 6628 cpuctx->unique_pmu = pmu; 6629 } 6630 6631 got_cpu_context: 6632 if (!pmu->start_txn) { 6633 if (pmu->pmu_enable) { 6634 /* 6635 * If we have pmu_enable/pmu_disable calls, install 6636 * transaction stubs that use that to try and batch 6637 * hardware accesses. 6638 */ 6639 pmu->start_txn = perf_pmu_start_txn; 6640 pmu->commit_txn = perf_pmu_commit_txn; 6641 pmu->cancel_txn = perf_pmu_cancel_txn; 6642 } else { 6643 pmu->start_txn = perf_pmu_nop_void; 6644 pmu->commit_txn = perf_pmu_nop_int; 6645 pmu->cancel_txn = perf_pmu_nop_void; 6646 } 6647 } 6648 6649 if (!pmu->pmu_enable) { 6650 pmu->pmu_enable = perf_pmu_nop_void; 6651 pmu->pmu_disable = perf_pmu_nop_void; 6652 } 6653 6654 if (!pmu->event_idx) 6655 pmu->event_idx = perf_event_idx_default; 6656 6657 list_add_rcu(&pmu->entry, &pmus); 6658 ret = 0; 6659 unlock: 6660 mutex_unlock(&pmus_lock); 6661 6662 return ret; 6663 6664 free_dev: 6665 device_del(pmu->dev); 6666 put_device(pmu->dev); 6667 6668 free_idr: 6669 if (pmu->type >= PERF_TYPE_MAX) 6670 idr_remove(&pmu_idr, pmu->type); 6671 6672 free_pdc: 6673 free_percpu(pmu->pmu_disable_count); 6674 goto unlock; 6675 } 6676 EXPORT_SYMBOL_GPL(perf_pmu_register); 6677 6678 void perf_pmu_unregister(struct pmu *pmu) 6679 { 6680 mutex_lock(&pmus_lock); 6681 list_del_rcu(&pmu->entry); 6682 mutex_unlock(&pmus_lock); 6683 6684 /* 6685 * We dereference the pmu list under both SRCU and regular RCU, so 6686 * synchronize against both of those. 6687 */ 6688 synchronize_srcu(&pmus_srcu); 6689 synchronize_rcu(); 6690 6691 free_percpu(pmu->pmu_disable_count); 6692 if (pmu->type >= PERF_TYPE_MAX) 6693 idr_remove(&pmu_idr, pmu->type); 6694 device_del(pmu->dev); 6695 put_device(pmu->dev); 6696 free_pmu_context(pmu); 6697 } 6698 EXPORT_SYMBOL_GPL(perf_pmu_unregister); 6699 6700 struct pmu *perf_init_event(struct perf_event *event) 6701 { 6702 struct pmu *pmu = NULL; 6703 int idx; 6704 int ret; 6705 6706 idx = srcu_read_lock(&pmus_srcu); 6707 6708 rcu_read_lock(); 6709 pmu = idr_find(&pmu_idr, event->attr.type); 6710 rcu_read_unlock(); 6711 if (pmu) { 6712 if (!try_module_get(pmu->module)) { 6713 pmu = ERR_PTR(-ENODEV); 6714 goto unlock; 6715 } 6716 event->pmu = pmu; 6717 ret = pmu->event_init(event); 6718 if (ret) 6719 pmu = ERR_PTR(ret); 6720 goto unlock; 6721 } 6722 6723 list_for_each_entry_rcu(pmu, &pmus, entry) { 6724 if (!try_module_get(pmu->module)) { 6725 pmu = ERR_PTR(-ENODEV); 6726 goto unlock; 6727 } 6728 event->pmu = pmu; 6729 ret = pmu->event_init(event); 6730 if (!ret) 6731 goto unlock; 6732 6733 if (ret != -ENOENT) { 6734 pmu = ERR_PTR(ret); 6735 goto unlock; 6736 } 6737 } 6738 pmu = ERR_PTR(-ENOENT); 6739 unlock: 6740 srcu_read_unlock(&pmus_srcu, idx); 6741 6742 return pmu; 6743 } 6744 6745 static void account_event_cpu(struct perf_event *event, int cpu) 6746 { 6747 if (event->parent) 6748 return; 6749 6750 if (has_branch_stack(event)) { 6751 if (!(event->attach_state & PERF_ATTACH_TASK)) 6752 atomic_inc(&per_cpu(perf_branch_stack_events, cpu)); 6753 } 6754 if (is_cgroup_event(event)) 6755 atomic_inc(&per_cpu(perf_cgroup_events, cpu)); 6756 } 6757 6758 static void account_event(struct perf_event *event) 6759 { 6760 if (event->parent) 6761 return; 6762 6763 if (event->attach_state & PERF_ATTACH_TASK) 6764 static_key_slow_inc(&perf_sched_events.key); 6765 if (event->attr.mmap || event->attr.mmap_data) 6766 atomic_inc(&nr_mmap_events); 6767 if (event->attr.comm) 6768 atomic_inc(&nr_comm_events); 6769 if (event->attr.task) 6770 atomic_inc(&nr_task_events); 6771 if (event->attr.freq) { 6772 if (atomic_inc_return(&nr_freq_events) == 1) 6773 tick_nohz_full_kick_all(); 6774 } 6775 if (has_branch_stack(event)) 6776 static_key_slow_inc(&perf_sched_events.key); 6777 if (is_cgroup_event(event)) 6778 static_key_slow_inc(&perf_sched_events.key); 6779 6780 account_event_cpu(event, event->cpu); 6781 } 6782 6783 /* 6784 * Allocate and initialize a event structure 6785 */ 6786 static struct perf_event * 6787 perf_event_alloc(struct perf_event_attr *attr, int cpu, 6788 struct task_struct *task, 6789 struct perf_event *group_leader, 6790 struct perf_event *parent_event, 6791 perf_overflow_handler_t overflow_handler, 6792 void *context) 6793 { 6794 struct pmu *pmu; 6795 struct perf_event *event; 6796 struct hw_perf_event *hwc; 6797 long err = -EINVAL; 6798 6799 if ((unsigned)cpu >= nr_cpu_ids) { 6800 if (!task || cpu != -1) 6801 return ERR_PTR(-EINVAL); 6802 } 6803 6804 event = kzalloc(sizeof(*event), GFP_KERNEL); 6805 if (!event) 6806 return ERR_PTR(-ENOMEM); 6807 6808 /* 6809 * Single events are their own group leaders, with an 6810 * empty sibling list: 6811 */ 6812 if (!group_leader) 6813 group_leader = event; 6814 6815 mutex_init(&event->child_mutex); 6816 INIT_LIST_HEAD(&event->child_list); 6817 6818 INIT_LIST_HEAD(&event->group_entry); 6819 INIT_LIST_HEAD(&event->event_entry); 6820 INIT_LIST_HEAD(&event->sibling_list); 6821 INIT_LIST_HEAD(&event->rb_entry); 6822 INIT_LIST_HEAD(&event->active_entry); 6823 INIT_HLIST_NODE(&event->hlist_entry); 6824 6825 6826 init_waitqueue_head(&event->waitq); 6827 init_irq_work(&event->pending, perf_pending_event); 6828 6829 mutex_init(&event->mmap_mutex); 6830 6831 atomic_long_set(&event->refcount, 1); 6832 event->cpu = cpu; 6833 event->attr = *attr; 6834 event->group_leader = group_leader; 6835 event->pmu = NULL; 6836 event->oncpu = -1; 6837 6838 event->parent = parent_event; 6839 6840 event->ns = get_pid_ns(task_active_pid_ns(current)); 6841 event->id = atomic64_inc_return(&perf_event_id); 6842 6843 event->state = PERF_EVENT_STATE_INACTIVE; 6844 6845 if (task) { 6846 event->attach_state = PERF_ATTACH_TASK; 6847 6848 if (attr->type == PERF_TYPE_TRACEPOINT) 6849 event->hw.tp_target = task; 6850 #ifdef CONFIG_HAVE_HW_BREAKPOINT 6851 /* 6852 * hw_breakpoint is a bit difficult here.. 6853 */ 6854 else if (attr->type == PERF_TYPE_BREAKPOINT) 6855 event->hw.bp_target = task; 6856 #endif 6857 } 6858 6859 if (!overflow_handler && parent_event) { 6860 overflow_handler = parent_event->overflow_handler; 6861 context = parent_event->overflow_handler_context; 6862 } 6863 6864 event->overflow_handler = overflow_handler; 6865 event->overflow_handler_context = context; 6866 6867 perf_event__state_init(event); 6868 6869 pmu = NULL; 6870 6871 hwc = &event->hw; 6872 hwc->sample_period = attr->sample_period; 6873 if (attr->freq && attr->sample_freq) 6874 hwc->sample_period = 1; 6875 hwc->last_period = hwc->sample_period; 6876 6877 local64_set(&hwc->period_left, hwc->sample_period); 6878 6879 /* 6880 * we currently do not support PERF_FORMAT_GROUP on inherited events 6881 */ 6882 if (attr->inherit && (attr->read_format & PERF_FORMAT_GROUP)) 6883 goto err_ns; 6884 6885 pmu = perf_init_event(event); 6886 if (!pmu) 6887 goto err_ns; 6888 else if (IS_ERR(pmu)) { 6889 err = PTR_ERR(pmu); 6890 goto err_ns; 6891 } 6892 6893 if (!event->parent) { 6894 if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN) { 6895 err = get_callchain_buffers(); 6896 if (err) 6897 goto err_pmu; 6898 } 6899 } 6900 6901 return event; 6902 6903 err_pmu: 6904 if (event->destroy) 6905 event->destroy(event); 6906 module_put(pmu->module); 6907 err_ns: 6908 if (event->ns) 6909 put_pid_ns(event->ns); 6910 kfree(event); 6911 6912 return ERR_PTR(err); 6913 } 6914 6915 static int perf_copy_attr(struct perf_event_attr __user *uattr, 6916 struct perf_event_attr *attr) 6917 { 6918 u32 size; 6919 int ret; 6920 6921 if (!access_ok(VERIFY_WRITE, uattr, PERF_ATTR_SIZE_VER0)) 6922 return -EFAULT; 6923 6924 /* 6925 * zero the full structure, so that a short copy will be nice. 6926 */ 6927 memset(attr, 0, sizeof(*attr)); 6928 6929 ret = get_user(size, &uattr->size); 6930 if (ret) 6931 return ret; 6932 6933 if (size > PAGE_SIZE) /* silly large */ 6934 goto err_size; 6935 6936 if (!size) /* abi compat */ 6937 size = PERF_ATTR_SIZE_VER0; 6938 6939 if (size < PERF_ATTR_SIZE_VER0) 6940 goto err_size; 6941 6942 /* 6943 * If we're handed a bigger struct than we know of, 6944 * ensure all the unknown bits are 0 - i.e. new 6945 * user-space does not rely on any kernel feature 6946 * extensions we dont know about yet. 6947 */ 6948 if (size > sizeof(*attr)) { 6949 unsigned char __user *addr; 6950 unsigned char __user *end; 6951 unsigned char val; 6952 6953 addr = (void __user *)uattr + sizeof(*attr); 6954 end = (void __user *)uattr + size; 6955 6956 for (; addr < end; addr++) { 6957 ret = get_user(val, addr); 6958 if (ret) 6959 return ret; 6960 if (val) 6961 goto err_size; 6962 } 6963 size = sizeof(*attr); 6964 } 6965 6966 ret = copy_from_user(attr, uattr, size); 6967 if (ret) 6968 return -EFAULT; 6969 6970 if (attr->__reserved_1) 6971 return -EINVAL; 6972 6973 if (attr->sample_type & ~(PERF_SAMPLE_MAX-1)) 6974 return -EINVAL; 6975 6976 if (attr->read_format & ~(PERF_FORMAT_MAX-1)) 6977 return -EINVAL; 6978 6979 if (attr->sample_type & PERF_SAMPLE_BRANCH_STACK) { 6980 u64 mask = attr->branch_sample_type; 6981 6982 /* only using defined bits */ 6983 if (mask & ~(PERF_SAMPLE_BRANCH_MAX-1)) 6984 return -EINVAL; 6985 6986 /* at least one branch bit must be set */ 6987 if (!(mask & ~PERF_SAMPLE_BRANCH_PLM_ALL)) 6988 return -EINVAL; 6989 6990 /* propagate priv level, when not set for branch */ 6991 if (!(mask & PERF_SAMPLE_BRANCH_PLM_ALL)) { 6992 6993 /* exclude_kernel checked on syscall entry */ 6994 if (!attr->exclude_kernel) 6995 mask |= PERF_SAMPLE_BRANCH_KERNEL; 6996 6997 if (!attr->exclude_user) 6998 mask |= PERF_SAMPLE_BRANCH_USER; 6999 7000 if (!attr->exclude_hv) 7001 mask |= PERF_SAMPLE_BRANCH_HV; 7002 /* 7003 * adjust user setting (for HW filter setup) 7004 */ 7005 attr->branch_sample_type = mask; 7006 } 7007 /* privileged levels capture (kernel, hv): check permissions */ 7008 if ((mask & PERF_SAMPLE_BRANCH_PERM_PLM) 7009 && perf_paranoid_kernel() && !capable(CAP_SYS_ADMIN)) 7010 return -EACCES; 7011 } 7012 7013 if (attr->sample_type & PERF_SAMPLE_REGS_USER) { 7014 ret = perf_reg_validate(attr->sample_regs_user); 7015 if (ret) 7016 return ret; 7017 } 7018 7019 if (attr->sample_type & PERF_SAMPLE_STACK_USER) { 7020 if (!arch_perf_have_user_stack_dump()) 7021 return -ENOSYS; 7022 7023 /* 7024 * We have __u32 type for the size, but so far 7025 * we can only use __u16 as maximum due to the 7026 * __u16 sample size limit. 7027 */ 7028 if (attr->sample_stack_user >= USHRT_MAX) 7029 ret = -EINVAL; 7030 else if (!IS_ALIGNED(attr->sample_stack_user, sizeof(u64))) 7031 ret = -EINVAL; 7032 } 7033 7034 out: 7035 return ret; 7036 7037 err_size: 7038 put_user(sizeof(*attr), &uattr->size); 7039 ret = -E2BIG; 7040 goto out; 7041 } 7042 7043 static int 7044 perf_event_set_output(struct perf_event *event, struct perf_event *output_event) 7045 { 7046 struct ring_buffer *rb = NULL; 7047 int ret = -EINVAL; 7048 7049 if (!output_event) 7050 goto set; 7051 7052 /* don't allow circular references */ 7053 if (event == output_event) 7054 goto out; 7055 7056 /* 7057 * Don't allow cross-cpu buffers 7058 */ 7059 if (output_event->cpu != event->cpu) 7060 goto out; 7061 7062 /* 7063 * If its not a per-cpu rb, it must be the same task. 7064 */ 7065 if (output_event->cpu == -1 && output_event->ctx != event->ctx) 7066 goto out; 7067 7068 set: 7069 mutex_lock(&event->mmap_mutex); 7070 /* Can't redirect output if we've got an active mmap() */ 7071 if (atomic_read(&event->mmap_count)) 7072 goto unlock; 7073 7074 if (output_event) { 7075 /* get the rb we want to redirect to */ 7076 rb = ring_buffer_get(output_event); 7077 if (!rb) 7078 goto unlock; 7079 } 7080 7081 ring_buffer_attach(event, rb); 7082 7083 ret = 0; 7084 unlock: 7085 mutex_unlock(&event->mmap_mutex); 7086 7087 out: 7088 return ret; 7089 } 7090 7091 /** 7092 * sys_perf_event_open - open a performance event, associate it to a task/cpu 7093 * 7094 * @attr_uptr: event_id type attributes for monitoring/sampling 7095 * @pid: target pid 7096 * @cpu: target cpu 7097 * @group_fd: group leader event fd 7098 */ 7099 SYSCALL_DEFINE5(perf_event_open, 7100 struct perf_event_attr __user *, attr_uptr, 7101 pid_t, pid, int, cpu, int, group_fd, unsigned long, flags) 7102 { 7103 struct perf_event *group_leader = NULL, *output_event = NULL; 7104 struct perf_event *event, *sibling; 7105 struct perf_event_attr attr; 7106 struct perf_event_context *ctx; 7107 struct file *event_file = NULL; 7108 struct fd group = {NULL, 0}; 7109 struct task_struct *task = NULL; 7110 struct pmu *pmu; 7111 int event_fd; 7112 int move_group = 0; 7113 int err; 7114 int f_flags = O_RDWR; 7115 7116 /* for future expandability... */ 7117 if (flags & ~PERF_FLAG_ALL) 7118 return -EINVAL; 7119 7120 err = perf_copy_attr(attr_uptr, &attr); 7121 if (err) 7122 return err; 7123 7124 if (!attr.exclude_kernel) { 7125 if (perf_paranoid_kernel() && !capable(CAP_SYS_ADMIN)) 7126 return -EACCES; 7127 } 7128 7129 if (attr.freq) { 7130 if (attr.sample_freq > sysctl_perf_event_sample_rate) 7131 return -EINVAL; 7132 } else { 7133 if (attr.sample_period & (1ULL << 63)) 7134 return -EINVAL; 7135 } 7136 7137 /* 7138 * In cgroup mode, the pid argument is used to pass the fd 7139 * opened to the cgroup directory in cgroupfs. The cpu argument 7140 * designates the cpu on which to monitor threads from that 7141 * cgroup. 7142 */ 7143 if ((flags & PERF_FLAG_PID_CGROUP) && (pid == -1 || cpu == -1)) 7144 return -EINVAL; 7145 7146 if (flags & PERF_FLAG_FD_CLOEXEC) 7147 f_flags |= O_CLOEXEC; 7148 7149 event_fd = get_unused_fd_flags(f_flags); 7150 if (event_fd < 0) 7151 return event_fd; 7152 7153 if (group_fd != -1) { 7154 err = perf_fget_light(group_fd, &group); 7155 if (err) 7156 goto err_fd; 7157 group_leader = group.file->private_data; 7158 if (flags & PERF_FLAG_FD_OUTPUT) 7159 output_event = group_leader; 7160 if (flags & PERF_FLAG_FD_NO_GROUP) 7161 group_leader = NULL; 7162 } 7163 7164 if (pid != -1 && !(flags & PERF_FLAG_PID_CGROUP)) { 7165 task = find_lively_task_by_vpid(pid); 7166 if (IS_ERR(task)) { 7167 err = PTR_ERR(task); 7168 goto err_group_fd; 7169 } 7170 } 7171 7172 if (task && group_leader && 7173 group_leader->attr.inherit != attr.inherit) { 7174 err = -EINVAL; 7175 goto err_task; 7176 } 7177 7178 get_online_cpus(); 7179 7180 event = perf_event_alloc(&attr, cpu, task, group_leader, NULL, 7181 NULL, NULL); 7182 if (IS_ERR(event)) { 7183 err = PTR_ERR(event); 7184 goto err_cpus; 7185 } 7186 7187 if (flags & PERF_FLAG_PID_CGROUP) { 7188 err = perf_cgroup_connect(pid, event, &attr, group_leader); 7189 if (err) { 7190 __free_event(event); 7191 goto err_cpus; 7192 } 7193 } 7194 7195 if (is_sampling_event(event)) { 7196 if (event->pmu->capabilities & PERF_PMU_CAP_NO_INTERRUPT) { 7197 err = -ENOTSUPP; 7198 goto err_alloc; 7199 } 7200 } 7201 7202 account_event(event); 7203 7204 /* 7205 * Special case software events and allow them to be part of 7206 * any hardware group. 7207 */ 7208 pmu = event->pmu; 7209 7210 if (group_leader && 7211 (is_software_event(event) != is_software_event(group_leader))) { 7212 if (is_software_event(event)) { 7213 /* 7214 * If event and group_leader are not both a software 7215 * event, and event is, then group leader is not. 7216 * 7217 * Allow the addition of software events to !software 7218 * groups, this is safe because software events never 7219 * fail to schedule. 7220 */ 7221 pmu = group_leader->pmu; 7222 } else if (is_software_event(group_leader) && 7223 (group_leader->group_flags & PERF_GROUP_SOFTWARE)) { 7224 /* 7225 * In case the group is a pure software group, and we 7226 * try to add a hardware event, move the whole group to 7227 * the hardware context. 7228 */ 7229 move_group = 1; 7230 } 7231 } 7232 7233 /* 7234 * Get the target context (task or percpu): 7235 */ 7236 ctx = find_get_context(pmu, task, event->cpu); 7237 if (IS_ERR(ctx)) { 7238 err = PTR_ERR(ctx); 7239 goto err_alloc; 7240 } 7241 7242 if (task) { 7243 put_task_struct(task); 7244 task = NULL; 7245 } 7246 7247 /* 7248 * Look up the group leader (we will attach this event to it): 7249 */ 7250 if (group_leader) { 7251 err = -EINVAL; 7252 7253 /* 7254 * Do not allow a recursive hierarchy (this new sibling 7255 * becoming part of another group-sibling): 7256 */ 7257 if (group_leader->group_leader != group_leader) 7258 goto err_context; 7259 /* 7260 * Do not allow to attach to a group in a different 7261 * task or CPU context: 7262 */ 7263 if (move_group) { 7264 if (group_leader->ctx->type != ctx->type) 7265 goto err_context; 7266 } else { 7267 if (group_leader->ctx != ctx) 7268 goto err_context; 7269 } 7270 7271 /* 7272 * Only a group leader can be exclusive or pinned 7273 */ 7274 if (attr.exclusive || attr.pinned) 7275 goto err_context; 7276 } 7277 7278 if (output_event) { 7279 err = perf_event_set_output(event, output_event); 7280 if (err) 7281 goto err_context; 7282 } 7283 7284 event_file = anon_inode_getfile("[perf_event]", &perf_fops, event, 7285 f_flags); 7286 if (IS_ERR(event_file)) { 7287 err = PTR_ERR(event_file); 7288 goto err_context; 7289 } 7290 7291 if (move_group) { 7292 struct perf_event_context *gctx = group_leader->ctx; 7293 7294 mutex_lock(&gctx->mutex); 7295 perf_remove_from_context(group_leader, false); 7296 7297 /* 7298 * Removing from the context ends up with disabled 7299 * event. What we want here is event in the initial 7300 * startup state, ready to be add into new context. 7301 */ 7302 perf_event__state_init(group_leader); 7303 list_for_each_entry(sibling, &group_leader->sibling_list, 7304 group_entry) { 7305 perf_remove_from_context(sibling, false); 7306 perf_event__state_init(sibling); 7307 put_ctx(gctx); 7308 } 7309 mutex_unlock(&gctx->mutex); 7310 put_ctx(gctx); 7311 } 7312 7313 WARN_ON_ONCE(ctx->parent_ctx); 7314 mutex_lock(&ctx->mutex); 7315 7316 if (move_group) { 7317 synchronize_rcu(); 7318 perf_install_in_context(ctx, group_leader, event->cpu); 7319 get_ctx(ctx); 7320 list_for_each_entry(sibling, &group_leader->sibling_list, 7321 group_entry) { 7322 perf_install_in_context(ctx, sibling, event->cpu); 7323 get_ctx(ctx); 7324 } 7325 } 7326 7327 perf_install_in_context(ctx, event, event->cpu); 7328 perf_unpin_context(ctx); 7329 mutex_unlock(&ctx->mutex); 7330 7331 put_online_cpus(); 7332 7333 event->owner = current; 7334 7335 mutex_lock(¤t->perf_event_mutex); 7336 list_add_tail(&event->owner_entry, ¤t->perf_event_list); 7337 mutex_unlock(¤t->perf_event_mutex); 7338 7339 /* 7340 * Precalculate sample_data sizes 7341 */ 7342 perf_event__header_size(event); 7343 perf_event__id_header_size(event); 7344 7345 /* 7346 * Drop the reference on the group_event after placing the 7347 * new event on the sibling_list. This ensures destruction 7348 * of the group leader will find the pointer to itself in 7349 * perf_group_detach(). 7350 */ 7351 fdput(group); 7352 fd_install(event_fd, event_file); 7353 return event_fd; 7354 7355 err_context: 7356 perf_unpin_context(ctx); 7357 put_ctx(ctx); 7358 err_alloc: 7359 free_event(event); 7360 err_cpus: 7361 put_online_cpus(); 7362 err_task: 7363 if (task) 7364 put_task_struct(task); 7365 err_group_fd: 7366 fdput(group); 7367 err_fd: 7368 put_unused_fd(event_fd); 7369 return err; 7370 } 7371 7372 /** 7373 * perf_event_create_kernel_counter 7374 * 7375 * @attr: attributes of the counter to create 7376 * @cpu: cpu in which the counter is bound 7377 * @task: task to profile (NULL for percpu) 7378 */ 7379 struct perf_event * 7380 perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu, 7381 struct task_struct *task, 7382 perf_overflow_handler_t overflow_handler, 7383 void *context) 7384 { 7385 struct perf_event_context *ctx; 7386 struct perf_event *event; 7387 int err; 7388 7389 /* 7390 * Get the target context (task or percpu): 7391 */ 7392 7393 event = perf_event_alloc(attr, cpu, task, NULL, NULL, 7394 overflow_handler, context); 7395 if (IS_ERR(event)) { 7396 err = PTR_ERR(event); 7397 goto err; 7398 } 7399 7400 account_event(event); 7401 7402 ctx = find_get_context(event->pmu, task, cpu); 7403 if (IS_ERR(ctx)) { 7404 err = PTR_ERR(ctx); 7405 goto err_free; 7406 } 7407 7408 WARN_ON_ONCE(ctx->parent_ctx); 7409 mutex_lock(&ctx->mutex); 7410 perf_install_in_context(ctx, event, cpu); 7411 perf_unpin_context(ctx); 7412 mutex_unlock(&ctx->mutex); 7413 7414 return event; 7415 7416 err_free: 7417 free_event(event); 7418 err: 7419 return ERR_PTR(err); 7420 } 7421 EXPORT_SYMBOL_GPL(perf_event_create_kernel_counter); 7422 7423 void perf_pmu_migrate_context(struct pmu *pmu, int src_cpu, int dst_cpu) 7424 { 7425 struct perf_event_context *src_ctx; 7426 struct perf_event_context *dst_ctx; 7427 struct perf_event *event, *tmp; 7428 LIST_HEAD(events); 7429 7430 src_ctx = &per_cpu_ptr(pmu->pmu_cpu_context, src_cpu)->ctx; 7431 dst_ctx = &per_cpu_ptr(pmu->pmu_cpu_context, dst_cpu)->ctx; 7432 7433 mutex_lock(&src_ctx->mutex); 7434 list_for_each_entry_safe(event, tmp, &src_ctx->event_list, 7435 event_entry) { 7436 perf_remove_from_context(event, false); 7437 unaccount_event_cpu(event, src_cpu); 7438 put_ctx(src_ctx); 7439 list_add(&event->migrate_entry, &events); 7440 } 7441 mutex_unlock(&src_ctx->mutex); 7442 7443 synchronize_rcu(); 7444 7445 mutex_lock(&dst_ctx->mutex); 7446 list_for_each_entry_safe(event, tmp, &events, migrate_entry) { 7447 list_del(&event->migrate_entry); 7448 if (event->state >= PERF_EVENT_STATE_OFF) 7449 event->state = PERF_EVENT_STATE_INACTIVE; 7450 account_event_cpu(event, dst_cpu); 7451 perf_install_in_context(dst_ctx, event, dst_cpu); 7452 get_ctx(dst_ctx); 7453 } 7454 mutex_unlock(&dst_ctx->mutex); 7455 } 7456 EXPORT_SYMBOL_GPL(perf_pmu_migrate_context); 7457 7458 static void sync_child_event(struct perf_event *child_event, 7459 struct task_struct *child) 7460 { 7461 struct perf_event *parent_event = child_event->parent; 7462 u64 child_val; 7463 7464 if (child_event->attr.inherit_stat) 7465 perf_event_read_event(child_event, child); 7466 7467 child_val = perf_event_count(child_event); 7468 7469 /* 7470 * Add back the child's count to the parent's count: 7471 */ 7472 atomic64_add(child_val, &parent_event->child_count); 7473 atomic64_add(child_event->total_time_enabled, 7474 &parent_event->child_total_time_enabled); 7475 atomic64_add(child_event->total_time_running, 7476 &parent_event->child_total_time_running); 7477 7478 /* 7479 * Remove this event from the parent's list 7480 */ 7481 WARN_ON_ONCE(parent_event->ctx->parent_ctx); 7482 mutex_lock(&parent_event->child_mutex); 7483 list_del_init(&child_event->child_list); 7484 mutex_unlock(&parent_event->child_mutex); 7485 7486 /* 7487 * Release the parent event, if this was the last 7488 * reference to it. 7489 */ 7490 put_event(parent_event); 7491 } 7492 7493 static void 7494 __perf_event_exit_task(struct perf_event *child_event, 7495 struct perf_event_context *child_ctx, 7496 struct task_struct *child) 7497 { 7498 /* 7499 * Do not destroy the 'original' grouping; because of the context 7500 * switch optimization the original events could've ended up in a 7501 * random child task. 7502 * 7503 * If we were to destroy the original group, all group related 7504 * operations would cease to function properly after this random 7505 * child dies. 7506 * 7507 * Do destroy all inherited groups, we don't care about those 7508 * and being thorough is better. 7509 */ 7510 perf_remove_from_context(child_event, !!child_event->parent); 7511 7512 /* 7513 * It can happen that the parent exits first, and has events 7514 * that are still around due to the child reference. These 7515 * events need to be zapped. 7516 */ 7517 if (child_event->parent) { 7518 sync_child_event(child_event, child); 7519 free_event(child_event); 7520 } 7521 } 7522 7523 static void perf_event_exit_task_context(struct task_struct *child, int ctxn) 7524 { 7525 struct perf_event *child_event, *next; 7526 struct perf_event_context *child_ctx, *parent_ctx; 7527 unsigned long flags; 7528 7529 if (likely(!child->perf_event_ctxp[ctxn])) { 7530 perf_event_task(child, NULL, 0); 7531 return; 7532 } 7533 7534 local_irq_save(flags); 7535 /* 7536 * We can't reschedule here because interrupts are disabled, 7537 * and either child is current or it is a task that can't be 7538 * scheduled, so we are now safe from rescheduling changing 7539 * our context. 7540 */ 7541 child_ctx = rcu_dereference_raw(child->perf_event_ctxp[ctxn]); 7542 7543 /* 7544 * Take the context lock here so that if find_get_context is 7545 * reading child->perf_event_ctxp, we wait until it has 7546 * incremented the context's refcount before we do put_ctx below. 7547 */ 7548 raw_spin_lock(&child_ctx->lock); 7549 task_ctx_sched_out(child_ctx); 7550 child->perf_event_ctxp[ctxn] = NULL; 7551 7552 /* 7553 * In order to avoid freeing: child_ctx->parent_ctx->task 7554 * under perf_event_context::lock, grab another reference. 7555 */ 7556 parent_ctx = child_ctx->parent_ctx; 7557 if (parent_ctx) 7558 get_ctx(parent_ctx); 7559 7560 /* 7561 * If this context is a clone; unclone it so it can't get 7562 * swapped to another process while we're removing all 7563 * the events from it. 7564 */ 7565 unclone_ctx(child_ctx); 7566 update_context_time(child_ctx); 7567 raw_spin_unlock_irqrestore(&child_ctx->lock, flags); 7568 7569 /* 7570 * Now that we no longer hold perf_event_context::lock, drop 7571 * our extra child_ctx->parent_ctx reference. 7572 */ 7573 if (parent_ctx) 7574 put_ctx(parent_ctx); 7575 7576 /* 7577 * Report the task dead after unscheduling the events so that we 7578 * won't get any samples after PERF_RECORD_EXIT. We can however still 7579 * get a few PERF_RECORD_READ events. 7580 */ 7581 perf_event_task(child, child_ctx, 0); 7582 7583 /* 7584 * We can recurse on the same lock type through: 7585 * 7586 * __perf_event_exit_task() 7587 * sync_child_event() 7588 * put_event() 7589 * mutex_lock(&ctx->mutex) 7590 * 7591 * But since its the parent context it won't be the same instance. 7592 */ 7593 mutex_lock(&child_ctx->mutex); 7594 7595 list_for_each_entry_safe(child_event, next, &child_ctx->event_list, event_entry) 7596 __perf_event_exit_task(child_event, child_ctx, child); 7597 7598 mutex_unlock(&child_ctx->mutex); 7599 7600 put_ctx(child_ctx); 7601 } 7602 7603 /* 7604 * When a child task exits, feed back event values to parent events. 7605 */ 7606 void perf_event_exit_task(struct task_struct *child) 7607 { 7608 struct perf_event *event, *tmp; 7609 int ctxn; 7610 7611 mutex_lock(&child->perf_event_mutex); 7612 list_for_each_entry_safe(event, tmp, &child->perf_event_list, 7613 owner_entry) { 7614 list_del_init(&event->owner_entry); 7615 7616 /* 7617 * Ensure the list deletion is visible before we clear 7618 * the owner, closes a race against perf_release() where 7619 * we need to serialize on the owner->perf_event_mutex. 7620 */ 7621 smp_wmb(); 7622 event->owner = NULL; 7623 } 7624 mutex_unlock(&child->perf_event_mutex); 7625 7626 for_each_task_context_nr(ctxn) 7627 perf_event_exit_task_context(child, ctxn); 7628 } 7629 7630 static void perf_free_event(struct perf_event *event, 7631 struct perf_event_context *ctx) 7632 { 7633 struct perf_event *parent = event->parent; 7634 7635 if (WARN_ON_ONCE(!parent)) 7636 return; 7637 7638 mutex_lock(&parent->child_mutex); 7639 list_del_init(&event->child_list); 7640 mutex_unlock(&parent->child_mutex); 7641 7642 put_event(parent); 7643 7644 perf_group_detach(event); 7645 list_del_event(event, ctx); 7646 free_event(event); 7647 } 7648 7649 /* 7650 * free an unexposed, unused context as created by inheritance by 7651 * perf_event_init_task below, used by fork() in case of fail. 7652 */ 7653 void perf_event_free_task(struct task_struct *task) 7654 { 7655 struct perf_event_context *ctx; 7656 struct perf_event *event, *tmp; 7657 int ctxn; 7658 7659 for_each_task_context_nr(ctxn) { 7660 ctx = task->perf_event_ctxp[ctxn]; 7661 if (!ctx) 7662 continue; 7663 7664 mutex_lock(&ctx->mutex); 7665 again: 7666 list_for_each_entry_safe(event, tmp, &ctx->pinned_groups, 7667 group_entry) 7668 perf_free_event(event, ctx); 7669 7670 list_for_each_entry_safe(event, tmp, &ctx->flexible_groups, 7671 group_entry) 7672 perf_free_event(event, ctx); 7673 7674 if (!list_empty(&ctx->pinned_groups) || 7675 !list_empty(&ctx->flexible_groups)) 7676 goto again; 7677 7678 mutex_unlock(&ctx->mutex); 7679 7680 put_ctx(ctx); 7681 } 7682 } 7683 7684 void perf_event_delayed_put(struct task_struct *task) 7685 { 7686 int ctxn; 7687 7688 for_each_task_context_nr(ctxn) 7689 WARN_ON_ONCE(task->perf_event_ctxp[ctxn]); 7690 } 7691 7692 /* 7693 * inherit a event from parent task to child task: 7694 */ 7695 static struct perf_event * 7696 inherit_event(struct perf_event *parent_event, 7697 struct task_struct *parent, 7698 struct perf_event_context *parent_ctx, 7699 struct task_struct *child, 7700 struct perf_event *group_leader, 7701 struct perf_event_context *child_ctx) 7702 { 7703 struct perf_event *child_event; 7704 unsigned long flags; 7705 7706 /* 7707 * Instead of creating recursive hierarchies of events, 7708 * we link inherited events back to the original parent, 7709 * which has a filp for sure, which we use as the reference 7710 * count: 7711 */ 7712 if (parent_event->parent) 7713 parent_event = parent_event->parent; 7714 7715 child_event = perf_event_alloc(&parent_event->attr, 7716 parent_event->cpu, 7717 child, 7718 group_leader, parent_event, 7719 NULL, NULL); 7720 if (IS_ERR(child_event)) 7721 return child_event; 7722 7723 if (!atomic_long_inc_not_zero(&parent_event->refcount)) { 7724 free_event(child_event); 7725 return NULL; 7726 } 7727 7728 get_ctx(child_ctx); 7729 7730 /* 7731 * Make the child state follow the state of the parent event, 7732 * not its attr.disabled bit. We hold the parent's mutex, 7733 * so we won't race with perf_event_{en, dis}able_family. 7734 */ 7735 if (parent_event->state >= PERF_EVENT_STATE_INACTIVE) 7736 child_event->state = PERF_EVENT_STATE_INACTIVE; 7737 else 7738 child_event->state = PERF_EVENT_STATE_OFF; 7739 7740 if (parent_event->attr.freq) { 7741 u64 sample_period = parent_event->hw.sample_period; 7742 struct hw_perf_event *hwc = &child_event->hw; 7743 7744 hwc->sample_period = sample_period; 7745 hwc->last_period = sample_period; 7746 7747 local64_set(&hwc->period_left, sample_period); 7748 } 7749 7750 child_event->ctx = child_ctx; 7751 child_event->overflow_handler = parent_event->overflow_handler; 7752 child_event->overflow_handler_context 7753 = parent_event->overflow_handler_context; 7754 7755 /* 7756 * Precalculate sample_data sizes 7757 */ 7758 perf_event__header_size(child_event); 7759 perf_event__id_header_size(child_event); 7760 7761 /* 7762 * Link it up in the child's context: 7763 */ 7764 raw_spin_lock_irqsave(&child_ctx->lock, flags); 7765 add_event_to_ctx(child_event, child_ctx); 7766 raw_spin_unlock_irqrestore(&child_ctx->lock, flags); 7767 7768 /* 7769 * Link this into the parent event's child list 7770 */ 7771 WARN_ON_ONCE(parent_event->ctx->parent_ctx); 7772 mutex_lock(&parent_event->child_mutex); 7773 list_add_tail(&child_event->child_list, &parent_event->child_list); 7774 mutex_unlock(&parent_event->child_mutex); 7775 7776 return child_event; 7777 } 7778 7779 static int inherit_group(struct perf_event *parent_event, 7780 struct task_struct *parent, 7781 struct perf_event_context *parent_ctx, 7782 struct task_struct *child, 7783 struct perf_event_context *child_ctx) 7784 { 7785 struct perf_event *leader; 7786 struct perf_event *sub; 7787 struct perf_event *child_ctr; 7788 7789 leader = inherit_event(parent_event, parent, parent_ctx, 7790 child, NULL, child_ctx); 7791 if (IS_ERR(leader)) 7792 return PTR_ERR(leader); 7793 list_for_each_entry(sub, &parent_event->sibling_list, group_entry) { 7794 child_ctr = inherit_event(sub, parent, parent_ctx, 7795 child, leader, child_ctx); 7796 if (IS_ERR(child_ctr)) 7797 return PTR_ERR(child_ctr); 7798 } 7799 return 0; 7800 } 7801 7802 static int 7803 inherit_task_group(struct perf_event *event, struct task_struct *parent, 7804 struct perf_event_context *parent_ctx, 7805 struct task_struct *child, int ctxn, 7806 int *inherited_all) 7807 { 7808 int ret; 7809 struct perf_event_context *child_ctx; 7810 7811 if (!event->attr.inherit) { 7812 *inherited_all = 0; 7813 return 0; 7814 } 7815 7816 child_ctx = child->perf_event_ctxp[ctxn]; 7817 if (!child_ctx) { 7818 /* 7819 * This is executed from the parent task context, so 7820 * inherit events that have been marked for cloning. 7821 * First allocate and initialize a context for the 7822 * child. 7823 */ 7824 7825 child_ctx = alloc_perf_context(parent_ctx->pmu, child); 7826 if (!child_ctx) 7827 return -ENOMEM; 7828 7829 child->perf_event_ctxp[ctxn] = child_ctx; 7830 } 7831 7832 ret = inherit_group(event, parent, parent_ctx, 7833 child, child_ctx); 7834 7835 if (ret) 7836 *inherited_all = 0; 7837 7838 return ret; 7839 } 7840 7841 /* 7842 * Initialize the perf_event context in task_struct 7843 */ 7844 static int perf_event_init_context(struct task_struct *child, int ctxn) 7845 { 7846 struct perf_event_context *child_ctx, *parent_ctx; 7847 struct perf_event_context *cloned_ctx; 7848 struct perf_event *event; 7849 struct task_struct *parent = current; 7850 int inherited_all = 1; 7851 unsigned long flags; 7852 int ret = 0; 7853 7854 if (likely(!parent->perf_event_ctxp[ctxn])) 7855 return 0; 7856 7857 /* 7858 * If the parent's context is a clone, pin it so it won't get 7859 * swapped under us. 7860 */ 7861 parent_ctx = perf_pin_task_context(parent, ctxn); 7862 if (!parent_ctx) 7863 return 0; 7864 7865 /* 7866 * No need to check if parent_ctx != NULL here; since we saw 7867 * it non-NULL earlier, the only reason for it to become NULL 7868 * is if we exit, and since we're currently in the middle of 7869 * a fork we can't be exiting at the same time. 7870 */ 7871 7872 /* 7873 * Lock the parent list. No need to lock the child - not PID 7874 * hashed yet and not running, so nobody can access it. 7875 */ 7876 mutex_lock(&parent_ctx->mutex); 7877 7878 /* 7879 * We dont have to disable NMIs - we are only looking at 7880 * the list, not manipulating it: 7881 */ 7882 list_for_each_entry(event, &parent_ctx->pinned_groups, group_entry) { 7883 ret = inherit_task_group(event, parent, parent_ctx, 7884 child, ctxn, &inherited_all); 7885 if (ret) 7886 break; 7887 } 7888 7889 /* 7890 * We can't hold ctx->lock when iterating the ->flexible_group list due 7891 * to allocations, but we need to prevent rotation because 7892 * rotate_ctx() will change the list from interrupt context. 7893 */ 7894 raw_spin_lock_irqsave(&parent_ctx->lock, flags); 7895 parent_ctx->rotate_disable = 1; 7896 raw_spin_unlock_irqrestore(&parent_ctx->lock, flags); 7897 7898 list_for_each_entry(event, &parent_ctx->flexible_groups, group_entry) { 7899 ret = inherit_task_group(event, parent, parent_ctx, 7900 child, ctxn, &inherited_all); 7901 if (ret) 7902 break; 7903 } 7904 7905 raw_spin_lock_irqsave(&parent_ctx->lock, flags); 7906 parent_ctx->rotate_disable = 0; 7907 7908 child_ctx = child->perf_event_ctxp[ctxn]; 7909 7910 if (child_ctx && inherited_all) { 7911 /* 7912 * Mark the child context as a clone of the parent 7913 * context, or of whatever the parent is a clone of. 7914 * 7915 * Note that if the parent is a clone, the holding of 7916 * parent_ctx->lock avoids it from being uncloned. 7917 */ 7918 cloned_ctx = parent_ctx->parent_ctx; 7919 if (cloned_ctx) { 7920 child_ctx->parent_ctx = cloned_ctx; 7921 child_ctx->parent_gen = parent_ctx->parent_gen; 7922 } else { 7923 child_ctx->parent_ctx = parent_ctx; 7924 child_ctx->parent_gen = parent_ctx->generation; 7925 } 7926 get_ctx(child_ctx->parent_ctx); 7927 } 7928 7929 raw_spin_unlock_irqrestore(&parent_ctx->lock, flags); 7930 mutex_unlock(&parent_ctx->mutex); 7931 7932 perf_unpin_context(parent_ctx); 7933 put_ctx(parent_ctx); 7934 7935 return ret; 7936 } 7937 7938 /* 7939 * Initialize the perf_event context in task_struct 7940 */ 7941 int perf_event_init_task(struct task_struct *child) 7942 { 7943 int ctxn, ret; 7944 7945 memset(child->perf_event_ctxp, 0, sizeof(child->perf_event_ctxp)); 7946 mutex_init(&child->perf_event_mutex); 7947 INIT_LIST_HEAD(&child->perf_event_list); 7948 7949 for_each_task_context_nr(ctxn) { 7950 ret = perf_event_init_context(child, ctxn); 7951 if (ret) { 7952 perf_event_free_task(child); 7953 return ret; 7954 } 7955 } 7956 7957 return 0; 7958 } 7959 7960 static void __init perf_event_init_all_cpus(void) 7961 { 7962 struct swevent_htable *swhash; 7963 int cpu; 7964 7965 for_each_possible_cpu(cpu) { 7966 swhash = &per_cpu(swevent_htable, cpu); 7967 mutex_init(&swhash->hlist_mutex); 7968 INIT_LIST_HEAD(&per_cpu(rotation_list, cpu)); 7969 } 7970 } 7971 7972 static void perf_event_init_cpu(int cpu) 7973 { 7974 struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu); 7975 7976 mutex_lock(&swhash->hlist_mutex); 7977 swhash->online = true; 7978 if (swhash->hlist_refcount > 0) { 7979 struct swevent_hlist *hlist; 7980 7981 hlist = kzalloc_node(sizeof(*hlist), GFP_KERNEL, cpu_to_node(cpu)); 7982 WARN_ON(!hlist); 7983 rcu_assign_pointer(swhash->swevent_hlist, hlist); 7984 } 7985 mutex_unlock(&swhash->hlist_mutex); 7986 } 7987 7988 #if defined CONFIG_HOTPLUG_CPU || defined CONFIG_KEXEC 7989 static void perf_pmu_rotate_stop(struct pmu *pmu) 7990 { 7991 struct perf_cpu_context *cpuctx = this_cpu_ptr(pmu->pmu_cpu_context); 7992 7993 WARN_ON(!irqs_disabled()); 7994 7995 list_del_init(&cpuctx->rotation_list); 7996 } 7997 7998 static void __perf_event_exit_context(void *__info) 7999 { 8000 struct remove_event re = { .detach_group = false }; 8001 struct perf_event_context *ctx = __info; 8002 8003 perf_pmu_rotate_stop(ctx->pmu); 8004 8005 rcu_read_lock(); 8006 list_for_each_entry_rcu(re.event, &ctx->event_list, event_entry) 8007 __perf_remove_from_context(&re); 8008 rcu_read_unlock(); 8009 } 8010 8011 static void perf_event_exit_cpu_context(int cpu) 8012 { 8013 struct perf_event_context *ctx; 8014 struct pmu *pmu; 8015 int idx; 8016 8017 idx = srcu_read_lock(&pmus_srcu); 8018 list_for_each_entry_rcu(pmu, &pmus, entry) { 8019 ctx = &per_cpu_ptr(pmu->pmu_cpu_context, cpu)->ctx; 8020 8021 mutex_lock(&ctx->mutex); 8022 smp_call_function_single(cpu, __perf_event_exit_context, ctx, 1); 8023 mutex_unlock(&ctx->mutex); 8024 } 8025 srcu_read_unlock(&pmus_srcu, idx); 8026 } 8027 8028 static void perf_event_exit_cpu(int cpu) 8029 { 8030 struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu); 8031 8032 perf_event_exit_cpu_context(cpu); 8033 8034 mutex_lock(&swhash->hlist_mutex); 8035 swhash->online = false; 8036 swevent_hlist_release(swhash); 8037 mutex_unlock(&swhash->hlist_mutex); 8038 } 8039 #else 8040 static inline void perf_event_exit_cpu(int cpu) { } 8041 #endif 8042 8043 static int 8044 perf_reboot(struct notifier_block *notifier, unsigned long val, void *v) 8045 { 8046 int cpu; 8047 8048 for_each_online_cpu(cpu) 8049 perf_event_exit_cpu(cpu); 8050 8051 return NOTIFY_OK; 8052 } 8053 8054 /* 8055 * Run the perf reboot notifier at the very last possible moment so that 8056 * the generic watchdog code runs as long as possible. 8057 */ 8058 static struct notifier_block perf_reboot_notifier = { 8059 .notifier_call = perf_reboot, 8060 .priority = INT_MIN, 8061 }; 8062 8063 static int 8064 perf_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu) 8065 { 8066 unsigned int cpu = (long)hcpu; 8067 8068 switch (action & ~CPU_TASKS_FROZEN) { 8069 8070 case CPU_UP_PREPARE: 8071 case CPU_DOWN_FAILED: 8072 perf_event_init_cpu(cpu); 8073 break; 8074 8075 case CPU_UP_CANCELED: 8076 case CPU_DOWN_PREPARE: 8077 perf_event_exit_cpu(cpu); 8078 break; 8079 default: 8080 break; 8081 } 8082 8083 return NOTIFY_OK; 8084 } 8085 8086 void __init perf_event_init(void) 8087 { 8088 int ret; 8089 8090 idr_init(&pmu_idr); 8091 8092 perf_event_init_all_cpus(); 8093 init_srcu_struct(&pmus_srcu); 8094 perf_pmu_register(&perf_swevent, "software", PERF_TYPE_SOFTWARE); 8095 perf_pmu_register(&perf_cpu_clock, NULL, -1); 8096 perf_pmu_register(&perf_task_clock, NULL, -1); 8097 perf_tp_register(); 8098 perf_cpu_notifier(perf_cpu_notify); 8099 register_reboot_notifier(&perf_reboot_notifier); 8100 8101 ret = init_hw_breakpoint(); 8102 WARN(ret, "hw_breakpoint initialization failed with: %d", ret); 8103 8104 /* do not patch jump label more than once per second */ 8105 jump_label_rate_limit(&perf_sched_events, HZ); 8106 8107 /* 8108 * Build time assertion that we keep the data_head at the intended 8109 * location. IOW, validation we got the __reserved[] size right. 8110 */ 8111 BUILD_BUG_ON((offsetof(struct perf_event_mmap_page, data_head)) 8112 != 1024); 8113 } 8114 8115 static int __init perf_event_sysfs_init(void) 8116 { 8117 struct pmu *pmu; 8118 int ret; 8119 8120 mutex_lock(&pmus_lock); 8121 8122 ret = bus_register(&pmu_bus); 8123 if (ret) 8124 goto unlock; 8125 8126 list_for_each_entry(pmu, &pmus, entry) { 8127 if (!pmu->name || pmu->type < 0) 8128 continue; 8129 8130 ret = pmu_dev_alloc(pmu); 8131 WARN(ret, "Failed to register pmu: %s, reason %d\n", pmu->name, ret); 8132 } 8133 pmu_bus_running = 1; 8134 ret = 0; 8135 8136 unlock: 8137 mutex_unlock(&pmus_lock); 8138 8139 return ret; 8140 } 8141 device_initcall(perf_event_sysfs_init); 8142 8143 #ifdef CONFIG_CGROUP_PERF 8144 static struct cgroup_subsys_state * 8145 perf_cgroup_css_alloc(struct cgroup_subsys_state *parent_css) 8146 { 8147 struct perf_cgroup *jc; 8148 8149 jc = kzalloc(sizeof(*jc), GFP_KERNEL); 8150 if (!jc) 8151 return ERR_PTR(-ENOMEM); 8152 8153 jc->info = alloc_percpu(struct perf_cgroup_info); 8154 if (!jc->info) { 8155 kfree(jc); 8156 return ERR_PTR(-ENOMEM); 8157 } 8158 8159 return &jc->css; 8160 } 8161 8162 static void perf_cgroup_css_free(struct cgroup_subsys_state *css) 8163 { 8164 struct perf_cgroup *jc = container_of(css, struct perf_cgroup, css); 8165 8166 free_percpu(jc->info); 8167 kfree(jc); 8168 } 8169 8170 static int __perf_cgroup_move(void *info) 8171 { 8172 struct task_struct *task = info; 8173 perf_cgroup_switch(task, PERF_CGROUP_SWOUT | PERF_CGROUP_SWIN); 8174 return 0; 8175 } 8176 8177 static void perf_cgroup_attach(struct cgroup_subsys_state *css, 8178 struct cgroup_taskset *tset) 8179 { 8180 struct task_struct *task; 8181 8182 cgroup_taskset_for_each(task, tset) 8183 task_function_call(task, __perf_cgroup_move, task); 8184 } 8185 8186 static void perf_cgroup_exit(struct cgroup_subsys_state *css, 8187 struct cgroup_subsys_state *old_css, 8188 struct task_struct *task) 8189 { 8190 /* 8191 * cgroup_exit() is called in the copy_process() failure path. 8192 * Ignore this case since the task hasn't ran yet, this avoids 8193 * trying to poke a half freed task state from generic code. 8194 */ 8195 if (!(task->flags & PF_EXITING)) 8196 return; 8197 8198 task_function_call(task, __perf_cgroup_move, task); 8199 } 8200 8201 struct cgroup_subsys perf_event_cgrp_subsys = { 8202 .css_alloc = perf_cgroup_css_alloc, 8203 .css_free = perf_cgroup_css_free, 8204 .exit = perf_cgroup_exit, 8205 .attach = perf_cgroup_attach, 8206 }; 8207 #endif /* CONFIG_CGROUP_PERF */ 8208