1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Performance events core code: 4 * 5 * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de> 6 * Copyright (C) 2008-2011 Red Hat, Inc., Ingo Molnar 7 * Copyright (C) 2008-2011 Red Hat, Inc., Peter Zijlstra 8 * Copyright © 2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com> 9 */ 10 11 #include <linux/fs.h> 12 #include <linux/mm.h> 13 #include <linux/cpu.h> 14 #include <linux/smp.h> 15 #include <linux/idr.h> 16 #include <linux/file.h> 17 #include <linux/poll.h> 18 #include <linux/slab.h> 19 #include <linux/hash.h> 20 #include <linux/tick.h> 21 #include <linux/sysfs.h> 22 #include <linux/dcache.h> 23 #include <linux/percpu.h> 24 #include <linux/ptrace.h> 25 #include <linux/reboot.h> 26 #include <linux/vmstat.h> 27 #include <linux/device.h> 28 #include <linux/export.h> 29 #include <linux/vmalloc.h> 30 #include <linux/hardirq.h> 31 #include <linux/hugetlb.h> 32 #include <linux/rculist.h> 33 #include <linux/uaccess.h> 34 #include <linux/syscalls.h> 35 #include <linux/anon_inodes.h> 36 #include <linux/kernel_stat.h> 37 #include <linux/cgroup.h> 38 #include <linux/perf_event.h> 39 #include <linux/trace_events.h> 40 #include <linux/hw_breakpoint.h> 41 #include <linux/mm_types.h> 42 #include <linux/module.h> 43 #include <linux/mman.h> 44 #include <linux/compat.h> 45 #include <linux/bpf.h> 46 #include <linux/filter.h> 47 #include <linux/namei.h> 48 #include <linux/parser.h> 49 #include <linux/sched/clock.h> 50 #include <linux/sched/mm.h> 51 #include <linux/proc_ns.h> 52 #include <linux/mount.h> 53 #include <linux/min_heap.h> 54 #include <linux/highmem.h> 55 #include <linux/pgtable.h> 56 #include <linux/buildid.h> 57 58 #include "internal.h" 59 60 #include <asm/irq_regs.h> 61 62 typedef int (*remote_function_f)(void *); 63 64 struct remote_function_call { 65 struct task_struct *p; 66 remote_function_f func; 67 void *info; 68 int ret; 69 }; 70 71 static void remote_function(void *data) 72 { 73 struct remote_function_call *tfc = data; 74 struct task_struct *p = tfc->p; 75 76 if (p) { 77 /* -EAGAIN */ 78 if (task_cpu(p) != smp_processor_id()) 79 return; 80 81 /* 82 * Now that we're on right CPU with IRQs disabled, we can test 83 * if we hit the right task without races. 84 */ 85 86 tfc->ret = -ESRCH; /* No such (running) process */ 87 if (p != current) 88 return; 89 } 90 91 tfc->ret = tfc->func(tfc->info); 92 } 93 94 /** 95 * task_function_call - call a function on the cpu on which a task runs 96 * @p: the task to evaluate 97 * @func: the function to be called 98 * @info: the function call argument 99 * 100 * Calls the function @func when the task is currently running. This might 101 * be on the current CPU, which just calls the function directly. This will 102 * retry due to any failures in smp_call_function_single(), such as if the 103 * task_cpu() goes offline concurrently. 104 * 105 * returns @func return value or -ESRCH or -ENXIO when the process isn't running 106 */ 107 static int 108 task_function_call(struct task_struct *p, remote_function_f func, void *info) 109 { 110 struct remote_function_call data = { 111 .p = p, 112 .func = func, 113 .info = info, 114 .ret = -EAGAIN, 115 }; 116 int ret; 117 118 for (;;) { 119 ret = smp_call_function_single(task_cpu(p), remote_function, 120 &data, 1); 121 if (!ret) 122 ret = data.ret; 123 124 if (ret != -EAGAIN) 125 break; 126 127 cond_resched(); 128 } 129 130 return ret; 131 } 132 133 /** 134 * cpu_function_call - call a function on the cpu 135 * @func: the function to be called 136 * @info: the function call argument 137 * 138 * Calls the function @func on the remote cpu. 139 * 140 * returns: @func return value or -ENXIO when the cpu is offline 141 */ 142 static int cpu_function_call(int cpu, remote_function_f func, void *info) 143 { 144 struct remote_function_call data = { 145 .p = NULL, 146 .func = func, 147 .info = info, 148 .ret = -ENXIO, /* No such CPU */ 149 }; 150 151 smp_call_function_single(cpu, remote_function, &data, 1); 152 153 return data.ret; 154 } 155 156 static inline struct perf_cpu_context * 157 __get_cpu_context(struct perf_event_context *ctx) 158 { 159 return this_cpu_ptr(ctx->pmu->pmu_cpu_context); 160 } 161 162 static void perf_ctx_lock(struct perf_cpu_context *cpuctx, 163 struct perf_event_context *ctx) 164 { 165 raw_spin_lock(&cpuctx->ctx.lock); 166 if (ctx) 167 raw_spin_lock(&ctx->lock); 168 } 169 170 static void perf_ctx_unlock(struct perf_cpu_context *cpuctx, 171 struct perf_event_context *ctx) 172 { 173 if (ctx) 174 raw_spin_unlock(&ctx->lock); 175 raw_spin_unlock(&cpuctx->ctx.lock); 176 } 177 178 #define TASK_TOMBSTONE ((void *)-1L) 179 180 static bool is_kernel_event(struct perf_event *event) 181 { 182 return READ_ONCE(event->owner) == TASK_TOMBSTONE; 183 } 184 185 /* 186 * On task ctx scheduling... 187 * 188 * When !ctx->nr_events a task context will not be scheduled. This means 189 * we can disable the scheduler hooks (for performance) without leaving 190 * pending task ctx state. 191 * 192 * This however results in two special cases: 193 * 194 * - removing the last event from a task ctx; this is relatively straight 195 * forward and is done in __perf_remove_from_context. 196 * 197 * - adding the first event to a task ctx; this is tricky because we cannot 198 * rely on ctx->is_active and therefore cannot use event_function_call(). 199 * See perf_install_in_context(). 200 * 201 * If ctx->nr_events, then ctx->is_active and cpuctx->task_ctx are set. 202 */ 203 204 typedef void (*event_f)(struct perf_event *, struct perf_cpu_context *, 205 struct perf_event_context *, void *); 206 207 struct event_function_struct { 208 struct perf_event *event; 209 event_f func; 210 void *data; 211 }; 212 213 static int event_function(void *info) 214 { 215 struct event_function_struct *efs = info; 216 struct perf_event *event = efs->event; 217 struct perf_event_context *ctx = event->ctx; 218 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx); 219 struct perf_event_context *task_ctx = cpuctx->task_ctx; 220 int ret = 0; 221 222 lockdep_assert_irqs_disabled(); 223 224 perf_ctx_lock(cpuctx, task_ctx); 225 /* 226 * Since we do the IPI call without holding ctx->lock things can have 227 * changed, double check we hit the task we set out to hit. 228 */ 229 if (ctx->task) { 230 if (ctx->task != current) { 231 ret = -ESRCH; 232 goto unlock; 233 } 234 235 /* 236 * We only use event_function_call() on established contexts, 237 * and event_function() is only ever called when active (or 238 * rather, we'll have bailed in task_function_call() or the 239 * above ctx->task != current test), therefore we must have 240 * ctx->is_active here. 241 */ 242 WARN_ON_ONCE(!ctx->is_active); 243 /* 244 * And since we have ctx->is_active, cpuctx->task_ctx must 245 * match. 246 */ 247 WARN_ON_ONCE(task_ctx != ctx); 248 } else { 249 WARN_ON_ONCE(&cpuctx->ctx != ctx); 250 } 251 252 efs->func(event, cpuctx, ctx, efs->data); 253 unlock: 254 perf_ctx_unlock(cpuctx, task_ctx); 255 256 return ret; 257 } 258 259 static void event_function_call(struct perf_event *event, event_f func, void *data) 260 { 261 struct perf_event_context *ctx = event->ctx; 262 struct task_struct *task = READ_ONCE(ctx->task); /* verified in event_function */ 263 struct event_function_struct efs = { 264 .event = event, 265 .func = func, 266 .data = data, 267 }; 268 269 if (!event->parent) { 270 /* 271 * If this is a !child event, we must hold ctx::mutex to 272 * stabilize the event->ctx relation. See 273 * perf_event_ctx_lock(). 274 */ 275 lockdep_assert_held(&ctx->mutex); 276 } 277 278 if (!task) { 279 cpu_function_call(event->cpu, event_function, &efs); 280 return; 281 } 282 283 if (task == TASK_TOMBSTONE) 284 return; 285 286 again: 287 if (!task_function_call(task, event_function, &efs)) 288 return; 289 290 raw_spin_lock_irq(&ctx->lock); 291 /* 292 * Reload the task pointer, it might have been changed by 293 * a concurrent perf_event_context_sched_out(). 294 */ 295 task = ctx->task; 296 if (task == TASK_TOMBSTONE) { 297 raw_spin_unlock_irq(&ctx->lock); 298 return; 299 } 300 if (ctx->is_active) { 301 raw_spin_unlock_irq(&ctx->lock); 302 goto again; 303 } 304 func(event, NULL, ctx, data); 305 raw_spin_unlock_irq(&ctx->lock); 306 } 307 308 /* 309 * Similar to event_function_call() + event_function(), but hard assumes IRQs 310 * are already disabled and we're on the right CPU. 311 */ 312 static void event_function_local(struct perf_event *event, event_f func, void *data) 313 { 314 struct perf_event_context *ctx = event->ctx; 315 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx); 316 struct task_struct *task = READ_ONCE(ctx->task); 317 struct perf_event_context *task_ctx = NULL; 318 319 lockdep_assert_irqs_disabled(); 320 321 if (task) { 322 if (task == TASK_TOMBSTONE) 323 return; 324 325 task_ctx = ctx; 326 } 327 328 perf_ctx_lock(cpuctx, task_ctx); 329 330 task = ctx->task; 331 if (task == TASK_TOMBSTONE) 332 goto unlock; 333 334 if (task) { 335 /* 336 * We must be either inactive or active and the right task, 337 * otherwise we're screwed, since we cannot IPI to somewhere 338 * else. 339 */ 340 if (ctx->is_active) { 341 if (WARN_ON_ONCE(task != current)) 342 goto unlock; 343 344 if (WARN_ON_ONCE(cpuctx->task_ctx != ctx)) 345 goto unlock; 346 } 347 } else { 348 WARN_ON_ONCE(&cpuctx->ctx != ctx); 349 } 350 351 func(event, cpuctx, ctx, data); 352 unlock: 353 perf_ctx_unlock(cpuctx, task_ctx); 354 } 355 356 #define PERF_FLAG_ALL (PERF_FLAG_FD_NO_GROUP |\ 357 PERF_FLAG_FD_OUTPUT |\ 358 PERF_FLAG_PID_CGROUP |\ 359 PERF_FLAG_FD_CLOEXEC) 360 361 /* 362 * branch priv levels that need permission checks 363 */ 364 #define PERF_SAMPLE_BRANCH_PERM_PLM \ 365 (PERF_SAMPLE_BRANCH_KERNEL |\ 366 PERF_SAMPLE_BRANCH_HV) 367 368 enum event_type_t { 369 EVENT_FLEXIBLE = 0x1, 370 EVENT_PINNED = 0x2, 371 EVENT_TIME = 0x4, 372 /* see ctx_resched() for details */ 373 EVENT_CPU = 0x8, 374 EVENT_ALL = EVENT_FLEXIBLE | EVENT_PINNED, 375 }; 376 377 /* 378 * perf_sched_events : >0 events exist 379 * perf_cgroup_events: >0 per-cpu cgroup events exist on this cpu 380 */ 381 382 static void perf_sched_delayed(struct work_struct *work); 383 DEFINE_STATIC_KEY_FALSE(perf_sched_events); 384 static DECLARE_DELAYED_WORK(perf_sched_work, perf_sched_delayed); 385 static DEFINE_MUTEX(perf_sched_mutex); 386 static atomic_t perf_sched_count; 387 388 static DEFINE_PER_CPU(atomic_t, perf_cgroup_events); 389 static DEFINE_PER_CPU(int, perf_sched_cb_usages); 390 static DEFINE_PER_CPU(struct pmu_event_list, pmu_sb_events); 391 392 static atomic_t nr_mmap_events __read_mostly; 393 static atomic_t nr_comm_events __read_mostly; 394 static atomic_t nr_namespaces_events __read_mostly; 395 static atomic_t nr_task_events __read_mostly; 396 static atomic_t nr_freq_events __read_mostly; 397 static atomic_t nr_switch_events __read_mostly; 398 static atomic_t nr_ksymbol_events __read_mostly; 399 static atomic_t nr_bpf_events __read_mostly; 400 static atomic_t nr_cgroup_events __read_mostly; 401 static atomic_t nr_text_poke_events __read_mostly; 402 static atomic_t nr_build_id_events __read_mostly; 403 404 static LIST_HEAD(pmus); 405 static DEFINE_MUTEX(pmus_lock); 406 static struct srcu_struct pmus_srcu; 407 static cpumask_var_t perf_online_mask; 408 static struct kmem_cache *perf_event_cache; 409 410 /* 411 * perf event paranoia level: 412 * -1 - not paranoid at all 413 * 0 - disallow raw tracepoint access for unpriv 414 * 1 - disallow cpu events for unpriv 415 * 2 - disallow kernel profiling for unpriv 416 */ 417 int sysctl_perf_event_paranoid __read_mostly = 2; 418 419 /* Minimum for 512 kiB + 1 user control page */ 420 int sysctl_perf_event_mlock __read_mostly = 512 + (PAGE_SIZE / 1024); /* 'free' kiB per user */ 421 422 /* 423 * max perf event sample rate 424 */ 425 #define DEFAULT_MAX_SAMPLE_RATE 100000 426 #define DEFAULT_SAMPLE_PERIOD_NS (NSEC_PER_SEC / DEFAULT_MAX_SAMPLE_RATE) 427 #define DEFAULT_CPU_TIME_MAX_PERCENT 25 428 429 int sysctl_perf_event_sample_rate __read_mostly = DEFAULT_MAX_SAMPLE_RATE; 430 431 static int max_samples_per_tick __read_mostly = DIV_ROUND_UP(DEFAULT_MAX_SAMPLE_RATE, HZ); 432 static int perf_sample_period_ns __read_mostly = DEFAULT_SAMPLE_PERIOD_NS; 433 434 static int perf_sample_allowed_ns __read_mostly = 435 DEFAULT_SAMPLE_PERIOD_NS * DEFAULT_CPU_TIME_MAX_PERCENT / 100; 436 437 static void update_perf_cpu_limits(void) 438 { 439 u64 tmp = perf_sample_period_ns; 440 441 tmp *= sysctl_perf_cpu_time_max_percent; 442 tmp = div_u64(tmp, 100); 443 if (!tmp) 444 tmp = 1; 445 446 WRITE_ONCE(perf_sample_allowed_ns, tmp); 447 } 448 449 static bool perf_rotate_context(struct perf_cpu_context *cpuctx); 450 451 int perf_proc_update_handler(struct ctl_table *table, int write, 452 void *buffer, size_t *lenp, loff_t *ppos) 453 { 454 int ret; 455 int perf_cpu = sysctl_perf_cpu_time_max_percent; 456 /* 457 * If throttling is disabled don't allow the write: 458 */ 459 if (write && (perf_cpu == 100 || perf_cpu == 0)) 460 return -EINVAL; 461 462 ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos); 463 if (ret || !write) 464 return ret; 465 466 max_samples_per_tick = DIV_ROUND_UP(sysctl_perf_event_sample_rate, HZ); 467 perf_sample_period_ns = NSEC_PER_SEC / sysctl_perf_event_sample_rate; 468 update_perf_cpu_limits(); 469 470 return 0; 471 } 472 473 int sysctl_perf_cpu_time_max_percent __read_mostly = DEFAULT_CPU_TIME_MAX_PERCENT; 474 475 int perf_cpu_time_max_percent_handler(struct ctl_table *table, int write, 476 void *buffer, size_t *lenp, loff_t *ppos) 477 { 478 int ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos); 479 480 if (ret || !write) 481 return ret; 482 483 if (sysctl_perf_cpu_time_max_percent == 100 || 484 sysctl_perf_cpu_time_max_percent == 0) { 485 printk(KERN_WARNING 486 "perf: Dynamic interrupt throttling disabled, can hang your system!\n"); 487 WRITE_ONCE(perf_sample_allowed_ns, 0); 488 } else { 489 update_perf_cpu_limits(); 490 } 491 492 return 0; 493 } 494 495 /* 496 * perf samples are done in some very critical code paths (NMIs). 497 * If they take too much CPU time, the system can lock up and not 498 * get any real work done. This will drop the sample rate when 499 * we detect that events are taking too long. 500 */ 501 #define NR_ACCUMULATED_SAMPLES 128 502 static DEFINE_PER_CPU(u64, running_sample_length); 503 504 static u64 __report_avg; 505 static u64 __report_allowed; 506 507 static void perf_duration_warn(struct irq_work *w) 508 { 509 printk_ratelimited(KERN_INFO 510 "perf: interrupt took too long (%lld > %lld), lowering " 511 "kernel.perf_event_max_sample_rate to %d\n", 512 __report_avg, __report_allowed, 513 sysctl_perf_event_sample_rate); 514 } 515 516 static DEFINE_IRQ_WORK(perf_duration_work, perf_duration_warn); 517 518 void perf_sample_event_took(u64 sample_len_ns) 519 { 520 u64 max_len = READ_ONCE(perf_sample_allowed_ns); 521 u64 running_len; 522 u64 avg_len; 523 u32 max; 524 525 if (max_len == 0) 526 return; 527 528 /* Decay the counter by 1 average sample. */ 529 running_len = __this_cpu_read(running_sample_length); 530 running_len -= running_len/NR_ACCUMULATED_SAMPLES; 531 running_len += sample_len_ns; 532 __this_cpu_write(running_sample_length, running_len); 533 534 /* 535 * Note: this will be biased artifically low until we have 536 * seen NR_ACCUMULATED_SAMPLES. Doing it this way keeps us 537 * from having to maintain a count. 538 */ 539 avg_len = running_len/NR_ACCUMULATED_SAMPLES; 540 if (avg_len <= max_len) 541 return; 542 543 __report_avg = avg_len; 544 __report_allowed = max_len; 545 546 /* 547 * Compute a throttle threshold 25% below the current duration. 548 */ 549 avg_len += avg_len / 4; 550 max = (TICK_NSEC / 100) * sysctl_perf_cpu_time_max_percent; 551 if (avg_len < max) 552 max /= (u32)avg_len; 553 else 554 max = 1; 555 556 WRITE_ONCE(perf_sample_allowed_ns, avg_len); 557 WRITE_ONCE(max_samples_per_tick, max); 558 559 sysctl_perf_event_sample_rate = max * HZ; 560 perf_sample_period_ns = NSEC_PER_SEC / sysctl_perf_event_sample_rate; 561 562 if (!irq_work_queue(&perf_duration_work)) { 563 early_printk("perf: interrupt took too long (%lld > %lld), lowering " 564 "kernel.perf_event_max_sample_rate to %d\n", 565 __report_avg, __report_allowed, 566 sysctl_perf_event_sample_rate); 567 } 568 } 569 570 static atomic64_t perf_event_id; 571 572 static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx, 573 enum event_type_t event_type); 574 575 static void cpu_ctx_sched_in(struct perf_cpu_context *cpuctx, 576 enum event_type_t event_type, 577 struct task_struct *task); 578 579 static void update_context_time(struct perf_event_context *ctx); 580 static u64 perf_event_time(struct perf_event *event); 581 582 void __weak perf_event_print_debug(void) { } 583 584 static inline u64 perf_clock(void) 585 { 586 return local_clock(); 587 } 588 589 static inline u64 perf_event_clock(struct perf_event *event) 590 { 591 return event->clock(); 592 } 593 594 /* 595 * State based event timekeeping... 596 * 597 * The basic idea is to use event->state to determine which (if any) time 598 * fields to increment with the current delta. This means we only need to 599 * update timestamps when we change state or when they are explicitly requested 600 * (read). 601 * 602 * Event groups make things a little more complicated, but not terribly so. The 603 * rules for a group are that if the group leader is OFF the entire group is 604 * OFF, irrespecive of what the group member states are. This results in 605 * __perf_effective_state(). 606 * 607 * A futher ramification is that when a group leader flips between OFF and 608 * !OFF, we need to update all group member times. 609 * 610 * 611 * NOTE: perf_event_time() is based on the (cgroup) context time, and thus we 612 * need to make sure the relevant context time is updated before we try and 613 * update our timestamps. 614 */ 615 616 static __always_inline enum perf_event_state 617 __perf_effective_state(struct perf_event *event) 618 { 619 struct perf_event *leader = event->group_leader; 620 621 if (leader->state <= PERF_EVENT_STATE_OFF) 622 return leader->state; 623 624 return event->state; 625 } 626 627 static __always_inline void 628 __perf_update_times(struct perf_event *event, u64 now, u64 *enabled, u64 *running) 629 { 630 enum perf_event_state state = __perf_effective_state(event); 631 u64 delta = now - event->tstamp; 632 633 *enabled = event->total_time_enabled; 634 if (state >= PERF_EVENT_STATE_INACTIVE) 635 *enabled += delta; 636 637 *running = event->total_time_running; 638 if (state >= PERF_EVENT_STATE_ACTIVE) 639 *running += delta; 640 } 641 642 static void perf_event_update_time(struct perf_event *event) 643 { 644 u64 now = perf_event_time(event); 645 646 __perf_update_times(event, now, &event->total_time_enabled, 647 &event->total_time_running); 648 event->tstamp = now; 649 } 650 651 static void perf_event_update_sibling_time(struct perf_event *leader) 652 { 653 struct perf_event *sibling; 654 655 for_each_sibling_event(sibling, leader) 656 perf_event_update_time(sibling); 657 } 658 659 static void 660 perf_event_set_state(struct perf_event *event, enum perf_event_state state) 661 { 662 if (event->state == state) 663 return; 664 665 perf_event_update_time(event); 666 /* 667 * If a group leader gets enabled/disabled all its siblings 668 * are affected too. 669 */ 670 if ((event->state < 0) ^ (state < 0)) 671 perf_event_update_sibling_time(event); 672 673 WRITE_ONCE(event->state, state); 674 } 675 676 #ifdef CONFIG_CGROUP_PERF 677 678 static inline bool 679 perf_cgroup_match(struct perf_event *event) 680 { 681 struct perf_event_context *ctx = event->ctx; 682 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx); 683 684 /* @event doesn't care about cgroup */ 685 if (!event->cgrp) 686 return true; 687 688 /* wants specific cgroup scope but @cpuctx isn't associated with any */ 689 if (!cpuctx->cgrp) 690 return false; 691 692 /* 693 * Cgroup scoping is recursive. An event enabled for a cgroup is 694 * also enabled for all its descendant cgroups. If @cpuctx's 695 * cgroup is a descendant of @event's (the test covers identity 696 * case), it's a match. 697 */ 698 return cgroup_is_descendant(cpuctx->cgrp->css.cgroup, 699 event->cgrp->css.cgroup); 700 } 701 702 static inline void perf_detach_cgroup(struct perf_event *event) 703 { 704 css_put(&event->cgrp->css); 705 event->cgrp = NULL; 706 } 707 708 static inline int is_cgroup_event(struct perf_event *event) 709 { 710 return event->cgrp != NULL; 711 } 712 713 static inline u64 perf_cgroup_event_time(struct perf_event *event) 714 { 715 struct perf_cgroup_info *t; 716 717 t = per_cpu_ptr(event->cgrp->info, event->cpu); 718 return t->time; 719 } 720 721 static inline void __update_cgrp_time(struct perf_cgroup *cgrp) 722 { 723 struct perf_cgroup_info *info; 724 u64 now; 725 726 now = perf_clock(); 727 728 info = this_cpu_ptr(cgrp->info); 729 730 info->time += now - info->timestamp; 731 info->timestamp = now; 732 } 733 734 static inline void update_cgrp_time_from_cpuctx(struct perf_cpu_context *cpuctx) 735 { 736 struct perf_cgroup *cgrp = cpuctx->cgrp; 737 struct cgroup_subsys_state *css; 738 739 if (cgrp) { 740 for (css = &cgrp->css; css; css = css->parent) { 741 cgrp = container_of(css, struct perf_cgroup, css); 742 __update_cgrp_time(cgrp); 743 } 744 } 745 } 746 747 static inline void update_cgrp_time_from_event(struct perf_event *event) 748 { 749 struct perf_cgroup *cgrp; 750 751 /* 752 * ensure we access cgroup data only when needed and 753 * when we know the cgroup is pinned (css_get) 754 */ 755 if (!is_cgroup_event(event)) 756 return; 757 758 cgrp = perf_cgroup_from_task(current, event->ctx); 759 /* 760 * Do not update time when cgroup is not active 761 */ 762 if (cgroup_is_descendant(cgrp->css.cgroup, event->cgrp->css.cgroup)) 763 __update_cgrp_time(event->cgrp); 764 } 765 766 static inline void 767 perf_cgroup_set_timestamp(struct task_struct *task, 768 struct perf_event_context *ctx) 769 { 770 struct perf_cgroup *cgrp; 771 struct perf_cgroup_info *info; 772 struct cgroup_subsys_state *css; 773 774 /* 775 * ctx->lock held by caller 776 * ensure we do not access cgroup data 777 * unless we have the cgroup pinned (css_get) 778 */ 779 if (!task || !ctx->nr_cgroups) 780 return; 781 782 cgrp = perf_cgroup_from_task(task, ctx); 783 784 for (css = &cgrp->css; css; css = css->parent) { 785 cgrp = container_of(css, struct perf_cgroup, css); 786 info = this_cpu_ptr(cgrp->info); 787 info->timestamp = ctx->timestamp; 788 } 789 } 790 791 static DEFINE_PER_CPU(struct list_head, cgrp_cpuctx_list); 792 793 #define PERF_CGROUP_SWOUT 0x1 /* cgroup switch out every event */ 794 #define PERF_CGROUP_SWIN 0x2 /* cgroup switch in events based on task */ 795 796 /* 797 * reschedule events based on the cgroup constraint of task. 798 * 799 * mode SWOUT : schedule out everything 800 * mode SWIN : schedule in based on cgroup for next 801 */ 802 static void perf_cgroup_switch(struct task_struct *task, int mode) 803 { 804 struct perf_cpu_context *cpuctx; 805 struct list_head *list; 806 unsigned long flags; 807 808 /* 809 * Disable interrupts and preemption to avoid this CPU's 810 * cgrp_cpuctx_entry to change under us. 811 */ 812 local_irq_save(flags); 813 814 list = this_cpu_ptr(&cgrp_cpuctx_list); 815 list_for_each_entry(cpuctx, list, cgrp_cpuctx_entry) { 816 WARN_ON_ONCE(cpuctx->ctx.nr_cgroups == 0); 817 818 perf_ctx_lock(cpuctx, cpuctx->task_ctx); 819 perf_pmu_disable(cpuctx->ctx.pmu); 820 821 if (mode & PERF_CGROUP_SWOUT) { 822 cpu_ctx_sched_out(cpuctx, EVENT_ALL); 823 /* 824 * must not be done before ctxswout due 825 * to event_filter_match() in event_sched_out() 826 */ 827 cpuctx->cgrp = NULL; 828 } 829 830 if (mode & PERF_CGROUP_SWIN) { 831 WARN_ON_ONCE(cpuctx->cgrp); 832 /* 833 * set cgrp before ctxsw in to allow 834 * event_filter_match() to not have to pass 835 * task around 836 * we pass the cpuctx->ctx to perf_cgroup_from_task() 837 * because cgorup events are only per-cpu 838 */ 839 cpuctx->cgrp = perf_cgroup_from_task(task, 840 &cpuctx->ctx); 841 cpu_ctx_sched_in(cpuctx, EVENT_ALL, task); 842 } 843 perf_pmu_enable(cpuctx->ctx.pmu); 844 perf_ctx_unlock(cpuctx, cpuctx->task_ctx); 845 } 846 847 local_irq_restore(flags); 848 } 849 850 static inline void perf_cgroup_sched_out(struct task_struct *task, 851 struct task_struct *next) 852 { 853 struct perf_cgroup *cgrp1; 854 struct perf_cgroup *cgrp2 = NULL; 855 856 rcu_read_lock(); 857 /* 858 * we come here when we know perf_cgroup_events > 0 859 * we do not need to pass the ctx here because we know 860 * we are holding the rcu lock 861 */ 862 cgrp1 = perf_cgroup_from_task(task, NULL); 863 cgrp2 = perf_cgroup_from_task(next, NULL); 864 865 /* 866 * only schedule out current cgroup events if we know 867 * that we are switching to a different cgroup. Otherwise, 868 * do no touch the cgroup events. 869 */ 870 if (cgrp1 != cgrp2) 871 perf_cgroup_switch(task, PERF_CGROUP_SWOUT); 872 873 rcu_read_unlock(); 874 } 875 876 static inline void perf_cgroup_sched_in(struct task_struct *prev, 877 struct task_struct *task) 878 { 879 struct perf_cgroup *cgrp1; 880 struct perf_cgroup *cgrp2 = NULL; 881 882 rcu_read_lock(); 883 /* 884 * we come here when we know perf_cgroup_events > 0 885 * we do not need to pass the ctx here because we know 886 * we are holding the rcu lock 887 */ 888 cgrp1 = perf_cgroup_from_task(task, NULL); 889 cgrp2 = perf_cgroup_from_task(prev, NULL); 890 891 /* 892 * only need to schedule in cgroup events if we are changing 893 * cgroup during ctxsw. Cgroup events were not scheduled 894 * out of ctxsw out if that was not the case. 895 */ 896 if (cgrp1 != cgrp2) 897 perf_cgroup_switch(task, PERF_CGROUP_SWIN); 898 899 rcu_read_unlock(); 900 } 901 902 static int perf_cgroup_ensure_storage(struct perf_event *event, 903 struct cgroup_subsys_state *css) 904 { 905 struct perf_cpu_context *cpuctx; 906 struct perf_event **storage; 907 int cpu, heap_size, ret = 0; 908 909 /* 910 * Allow storage to have sufficent space for an iterator for each 911 * possibly nested cgroup plus an iterator for events with no cgroup. 912 */ 913 for (heap_size = 1; css; css = css->parent) 914 heap_size++; 915 916 for_each_possible_cpu(cpu) { 917 cpuctx = per_cpu_ptr(event->pmu->pmu_cpu_context, cpu); 918 if (heap_size <= cpuctx->heap_size) 919 continue; 920 921 storage = kmalloc_node(heap_size * sizeof(struct perf_event *), 922 GFP_KERNEL, cpu_to_node(cpu)); 923 if (!storage) { 924 ret = -ENOMEM; 925 break; 926 } 927 928 raw_spin_lock_irq(&cpuctx->ctx.lock); 929 if (cpuctx->heap_size < heap_size) { 930 swap(cpuctx->heap, storage); 931 if (storage == cpuctx->heap_default) 932 storage = NULL; 933 cpuctx->heap_size = heap_size; 934 } 935 raw_spin_unlock_irq(&cpuctx->ctx.lock); 936 937 kfree(storage); 938 } 939 940 return ret; 941 } 942 943 static inline int perf_cgroup_connect(int fd, struct perf_event *event, 944 struct perf_event_attr *attr, 945 struct perf_event *group_leader) 946 { 947 struct perf_cgroup *cgrp; 948 struct cgroup_subsys_state *css; 949 struct fd f = fdget(fd); 950 int ret = 0; 951 952 if (!f.file) 953 return -EBADF; 954 955 css = css_tryget_online_from_dir(f.file->f_path.dentry, 956 &perf_event_cgrp_subsys); 957 if (IS_ERR(css)) { 958 ret = PTR_ERR(css); 959 goto out; 960 } 961 962 ret = perf_cgroup_ensure_storage(event, css); 963 if (ret) 964 goto out; 965 966 cgrp = container_of(css, struct perf_cgroup, css); 967 event->cgrp = cgrp; 968 969 /* 970 * all events in a group must monitor 971 * the same cgroup because a task belongs 972 * to only one perf cgroup at a time 973 */ 974 if (group_leader && group_leader->cgrp != cgrp) { 975 perf_detach_cgroup(event); 976 ret = -EINVAL; 977 } 978 out: 979 fdput(f); 980 return ret; 981 } 982 983 static inline void 984 perf_cgroup_set_shadow_time(struct perf_event *event, u64 now) 985 { 986 struct perf_cgroup_info *t; 987 t = per_cpu_ptr(event->cgrp->info, event->cpu); 988 event->shadow_ctx_time = now - t->timestamp; 989 } 990 991 static inline void 992 perf_cgroup_event_enable(struct perf_event *event, struct perf_event_context *ctx) 993 { 994 struct perf_cpu_context *cpuctx; 995 996 if (!is_cgroup_event(event)) 997 return; 998 999 /* 1000 * Because cgroup events are always per-cpu events, 1001 * @ctx == &cpuctx->ctx. 1002 */ 1003 cpuctx = container_of(ctx, struct perf_cpu_context, ctx); 1004 1005 /* 1006 * Since setting cpuctx->cgrp is conditional on the current @cgrp 1007 * matching the event's cgroup, we must do this for every new event, 1008 * because if the first would mismatch, the second would not try again 1009 * and we would leave cpuctx->cgrp unset. 1010 */ 1011 if (ctx->is_active && !cpuctx->cgrp) { 1012 struct perf_cgroup *cgrp = perf_cgroup_from_task(current, ctx); 1013 1014 if (cgroup_is_descendant(cgrp->css.cgroup, event->cgrp->css.cgroup)) 1015 cpuctx->cgrp = cgrp; 1016 } 1017 1018 if (ctx->nr_cgroups++) 1019 return; 1020 1021 list_add(&cpuctx->cgrp_cpuctx_entry, 1022 per_cpu_ptr(&cgrp_cpuctx_list, event->cpu)); 1023 } 1024 1025 static inline void 1026 perf_cgroup_event_disable(struct perf_event *event, struct perf_event_context *ctx) 1027 { 1028 struct perf_cpu_context *cpuctx; 1029 1030 if (!is_cgroup_event(event)) 1031 return; 1032 1033 /* 1034 * Because cgroup events are always per-cpu events, 1035 * @ctx == &cpuctx->ctx. 1036 */ 1037 cpuctx = container_of(ctx, struct perf_cpu_context, ctx); 1038 1039 if (--ctx->nr_cgroups) 1040 return; 1041 1042 if (ctx->is_active && cpuctx->cgrp) 1043 cpuctx->cgrp = NULL; 1044 1045 list_del(&cpuctx->cgrp_cpuctx_entry); 1046 } 1047 1048 #else /* !CONFIG_CGROUP_PERF */ 1049 1050 static inline bool 1051 perf_cgroup_match(struct perf_event *event) 1052 { 1053 return true; 1054 } 1055 1056 static inline void perf_detach_cgroup(struct perf_event *event) 1057 {} 1058 1059 static inline int is_cgroup_event(struct perf_event *event) 1060 { 1061 return 0; 1062 } 1063 1064 static inline void update_cgrp_time_from_event(struct perf_event *event) 1065 { 1066 } 1067 1068 static inline void update_cgrp_time_from_cpuctx(struct perf_cpu_context *cpuctx) 1069 { 1070 } 1071 1072 static inline void perf_cgroup_sched_out(struct task_struct *task, 1073 struct task_struct *next) 1074 { 1075 } 1076 1077 static inline void perf_cgroup_sched_in(struct task_struct *prev, 1078 struct task_struct *task) 1079 { 1080 } 1081 1082 static inline int perf_cgroup_connect(pid_t pid, struct perf_event *event, 1083 struct perf_event_attr *attr, 1084 struct perf_event *group_leader) 1085 { 1086 return -EINVAL; 1087 } 1088 1089 static inline void 1090 perf_cgroup_set_timestamp(struct task_struct *task, 1091 struct perf_event_context *ctx) 1092 { 1093 } 1094 1095 static inline void 1096 perf_cgroup_switch(struct task_struct *task, struct task_struct *next) 1097 { 1098 } 1099 1100 static inline void 1101 perf_cgroup_set_shadow_time(struct perf_event *event, u64 now) 1102 { 1103 } 1104 1105 static inline u64 perf_cgroup_event_time(struct perf_event *event) 1106 { 1107 return 0; 1108 } 1109 1110 static inline void 1111 perf_cgroup_event_enable(struct perf_event *event, struct perf_event_context *ctx) 1112 { 1113 } 1114 1115 static inline void 1116 perf_cgroup_event_disable(struct perf_event *event, struct perf_event_context *ctx) 1117 { 1118 } 1119 #endif 1120 1121 /* 1122 * set default to be dependent on timer tick just 1123 * like original code 1124 */ 1125 #define PERF_CPU_HRTIMER (1000 / HZ) 1126 /* 1127 * function must be called with interrupts disabled 1128 */ 1129 static enum hrtimer_restart perf_mux_hrtimer_handler(struct hrtimer *hr) 1130 { 1131 struct perf_cpu_context *cpuctx; 1132 bool rotations; 1133 1134 lockdep_assert_irqs_disabled(); 1135 1136 cpuctx = container_of(hr, struct perf_cpu_context, hrtimer); 1137 rotations = perf_rotate_context(cpuctx); 1138 1139 raw_spin_lock(&cpuctx->hrtimer_lock); 1140 if (rotations) 1141 hrtimer_forward_now(hr, cpuctx->hrtimer_interval); 1142 else 1143 cpuctx->hrtimer_active = 0; 1144 raw_spin_unlock(&cpuctx->hrtimer_lock); 1145 1146 return rotations ? HRTIMER_RESTART : HRTIMER_NORESTART; 1147 } 1148 1149 static void __perf_mux_hrtimer_init(struct perf_cpu_context *cpuctx, int cpu) 1150 { 1151 struct hrtimer *timer = &cpuctx->hrtimer; 1152 struct pmu *pmu = cpuctx->ctx.pmu; 1153 u64 interval; 1154 1155 /* no multiplexing needed for SW PMU */ 1156 if (pmu->task_ctx_nr == perf_sw_context) 1157 return; 1158 1159 /* 1160 * check default is sane, if not set then force to 1161 * default interval (1/tick) 1162 */ 1163 interval = pmu->hrtimer_interval_ms; 1164 if (interval < 1) 1165 interval = pmu->hrtimer_interval_ms = PERF_CPU_HRTIMER; 1166 1167 cpuctx->hrtimer_interval = ns_to_ktime(NSEC_PER_MSEC * interval); 1168 1169 raw_spin_lock_init(&cpuctx->hrtimer_lock); 1170 hrtimer_init(timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED_HARD); 1171 timer->function = perf_mux_hrtimer_handler; 1172 } 1173 1174 static int perf_mux_hrtimer_restart(struct perf_cpu_context *cpuctx) 1175 { 1176 struct hrtimer *timer = &cpuctx->hrtimer; 1177 struct pmu *pmu = cpuctx->ctx.pmu; 1178 unsigned long flags; 1179 1180 /* not for SW PMU */ 1181 if (pmu->task_ctx_nr == perf_sw_context) 1182 return 0; 1183 1184 raw_spin_lock_irqsave(&cpuctx->hrtimer_lock, flags); 1185 if (!cpuctx->hrtimer_active) { 1186 cpuctx->hrtimer_active = 1; 1187 hrtimer_forward_now(timer, cpuctx->hrtimer_interval); 1188 hrtimer_start_expires(timer, HRTIMER_MODE_ABS_PINNED_HARD); 1189 } 1190 raw_spin_unlock_irqrestore(&cpuctx->hrtimer_lock, flags); 1191 1192 return 0; 1193 } 1194 1195 void perf_pmu_disable(struct pmu *pmu) 1196 { 1197 int *count = this_cpu_ptr(pmu->pmu_disable_count); 1198 if (!(*count)++) 1199 pmu->pmu_disable(pmu); 1200 } 1201 1202 void perf_pmu_enable(struct pmu *pmu) 1203 { 1204 int *count = this_cpu_ptr(pmu->pmu_disable_count); 1205 if (!--(*count)) 1206 pmu->pmu_enable(pmu); 1207 } 1208 1209 static DEFINE_PER_CPU(struct list_head, active_ctx_list); 1210 1211 /* 1212 * perf_event_ctx_activate(), perf_event_ctx_deactivate(), and 1213 * perf_event_task_tick() are fully serialized because they're strictly cpu 1214 * affine and perf_event_ctx{activate,deactivate} are called with IRQs 1215 * disabled, while perf_event_task_tick is called from IRQ context. 1216 */ 1217 static void perf_event_ctx_activate(struct perf_event_context *ctx) 1218 { 1219 struct list_head *head = this_cpu_ptr(&active_ctx_list); 1220 1221 lockdep_assert_irqs_disabled(); 1222 1223 WARN_ON(!list_empty(&ctx->active_ctx_list)); 1224 1225 list_add(&ctx->active_ctx_list, head); 1226 } 1227 1228 static void perf_event_ctx_deactivate(struct perf_event_context *ctx) 1229 { 1230 lockdep_assert_irqs_disabled(); 1231 1232 WARN_ON(list_empty(&ctx->active_ctx_list)); 1233 1234 list_del_init(&ctx->active_ctx_list); 1235 } 1236 1237 static void get_ctx(struct perf_event_context *ctx) 1238 { 1239 refcount_inc(&ctx->refcount); 1240 } 1241 1242 static void *alloc_task_ctx_data(struct pmu *pmu) 1243 { 1244 if (pmu->task_ctx_cache) 1245 return kmem_cache_zalloc(pmu->task_ctx_cache, GFP_KERNEL); 1246 1247 return NULL; 1248 } 1249 1250 static void free_task_ctx_data(struct pmu *pmu, void *task_ctx_data) 1251 { 1252 if (pmu->task_ctx_cache && task_ctx_data) 1253 kmem_cache_free(pmu->task_ctx_cache, task_ctx_data); 1254 } 1255 1256 static void free_ctx(struct rcu_head *head) 1257 { 1258 struct perf_event_context *ctx; 1259 1260 ctx = container_of(head, struct perf_event_context, rcu_head); 1261 free_task_ctx_data(ctx->pmu, ctx->task_ctx_data); 1262 kfree(ctx); 1263 } 1264 1265 static void put_ctx(struct perf_event_context *ctx) 1266 { 1267 if (refcount_dec_and_test(&ctx->refcount)) { 1268 if (ctx->parent_ctx) 1269 put_ctx(ctx->parent_ctx); 1270 if (ctx->task && ctx->task != TASK_TOMBSTONE) 1271 put_task_struct(ctx->task); 1272 call_rcu(&ctx->rcu_head, free_ctx); 1273 } 1274 } 1275 1276 /* 1277 * Because of perf_event::ctx migration in sys_perf_event_open::move_group and 1278 * perf_pmu_migrate_context() we need some magic. 1279 * 1280 * Those places that change perf_event::ctx will hold both 1281 * perf_event_ctx::mutex of the 'old' and 'new' ctx value. 1282 * 1283 * Lock ordering is by mutex address. There are two other sites where 1284 * perf_event_context::mutex nests and those are: 1285 * 1286 * - perf_event_exit_task_context() [ child , 0 ] 1287 * perf_event_exit_event() 1288 * put_event() [ parent, 1 ] 1289 * 1290 * - perf_event_init_context() [ parent, 0 ] 1291 * inherit_task_group() 1292 * inherit_group() 1293 * inherit_event() 1294 * perf_event_alloc() 1295 * perf_init_event() 1296 * perf_try_init_event() [ child , 1 ] 1297 * 1298 * While it appears there is an obvious deadlock here -- the parent and child 1299 * nesting levels are inverted between the two. This is in fact safe because 1300 * life-time rules separate them. That is an exiting task cannot fork, and a 1301 * spawning task cannot (yet) exit. 1302 * 1303 * But remember that these are parent<->child context relations, and 1304 * migration does not affect children, therefore these two orderings should not 1305 * interact. 1306 * 1307 * The change in perf_event::ctx does not affect children (as claimed above) 1308 * because the sys_perf_event_open() case will install a new event and break 1309 * the ctx parent<->child relation, and perf_pmu_migrate_context() is only 1310 * concerned with cpuctx and that doesn't have children. 1311 * 1312 * The places that change perf_event::ctx will issue: 1313 * 1314 * perf_remove_from_context(); 1315 * synchronize_rcu(); 1316 * perf_install_in_context(); 1317 * 1318 * to affect the change. The remove_from_context() + synchronize_rcu() should 1319 * quiesce the event, after which we can install it in the new location. This 1320 * means that only external vectors (perf_fops, prctl) can perturb the event 1321 * while in transit. Therefore all such accessors should also acquire 1322 * perf_event_context::mutex to serialize against this. 1323 * 1324 * However; because event->ctx can change while we're waiting to acquire 1325 * ctx->mutex we must be careful and use the below perf_event_ctx_lock() 1326 * function. 1327 * 1328 * Lock order: 1329 * exec_update_lock 1330 * task_struct::perf_event_mutex 1331 * perf_event_context::mutex 1332 * perf_event::child_mutex; 1333 * perf_event_context::lock 1334 * perf_event::mmap_mutex 1335 * mmap_lock 1336 * perf_addr_filters_head::lock 1337 * 1338 * cpu_hotplug_lock 1339 * pmus_lock 1340 * cpuctx->mutex / perf_event_context::mutex 1341 */ 1342 static struct perf_event_context * 1343 perf_event_ctx_lock_nested(struct perf_event *event, int nesting) 1344 { 1345 struct perf_event_context *ctx; 1346 1347 again: 1348 rcu_read_lock(); 1349 ctx = READ_ONCE(event->ctx); 1350 if (!refcount_inc_not_zero(&ctx->refcount)) { 1351 rcu_read_unlock(); 1352 goto again; 1353 } 1354 rcu_read_unlock(); 1355 1356 mutex_lock_nested(&ctx->mutex, nesting); 1357 if (event->ctx != ctx) { 1358 mutex_unlock(&ctx->mutex); 1359 put_ctx(ctx); 1360 goto again; 1361 } 1362 1363 return ctx; 1364 } 1365 1366 static inline struct perf_event_context * 1367 perf_event_ctx_lock(struct perf_event *event) 1368 { 1369 return perf_event_ctx_lock_nested(event, 0); 1370 } 1371 1372 static void perf_event_ctx_unlock(struct perf_event *event, 1373 struct perf_event_context *ctx) 1374 { 1375 mutex_unlock(&ctx->mutex); 1376 put_ctx(ctx); 1377 } 1378 1379 /* 1380 * This must be done under the ctx->lock, such as to serialize against 1381 * context_equiv(), therefore we cannot call put_ctx() since that might end up 1382 * calling scheduler related locks and ctx->lock nests inside those. 1383 */ 1384 static __must_check struct perf_event_context * 1385 unclone_ctx(struct perf_event_context *ctx) 1386 { 1387 struct perf_event_context *parent_ctx = ctx->parent_ctx; 1388 1389 lockdep_assert_held(&ctx->lock); 1390 1391 if (parent_ctx) 1392 ctx->parent_ctx = NULL; 1393 ctx->generation++; 1394 1395 return parent_ctx; 1396 } 1397 1398 static u32 perf_event_pid_type(struct perf_event *event, struct task_struct *p, 1399 enum pid_type type) 1400 { 1401 u32 nr; 1402 /* 1403 * only top level events have the pid namespace they were created in 1404 */ 1405 if (event->parent) 1406 event = event->parent; 1407 1408 nr = __task_pid_nr_ns(p, type, event->ns); 1409 /* avoid -1 if it is idle thread or runs in another ns */ 1410 if (!nr && !pid_alive(p)) 1411 nr = -1; 1412 return nr; 1413 } 1414 1415 static u32 perf_event_pid(struct perf_event *event, struct task_struct *p) 1416 { 1417 return perf_event_pid_type(event, p, PIDTYPE_TGID); 1418 } 1419 1420 static u32 perf_event_tid(struct perf_event *event, struct task_struct *p) 1421 { 1422 return perf_event_pid_type(event, p, PIDTYPE_PID); 1423 } 1424 1425 /* 1426 * If we inherit events we want to return the parent event id 1427 * to userspace. 1428 */ 1429 static u64 primary_event_id(struct perf_event *event) 1430 { 1431 u64 id = event->id; 1432 1433 if (event->parent) 1434 id = event->parent->id; 1435 1436 return id; 1437 } 1438 1439 /* 1440 * Get the perf_event_context for a task and lock it. 1441 * 1442 * This has to cope with the fact that until it is locked, 1443 * the context could get moved to another task. 1444 */ 1445 static struct perf_event_context * 1446 perf_lock_task_context(struct task_struct *task, int ctxn, unsigned long *flags) 1447 { 1448 struct perf_event_context *ctx; 1449 1450 retry: 1451 /* 1452 * One of the few rules of preemptible RCU is that one cannot do 1453 * rcu_read_unlock() while holding a scheduler (or nested) lock when 1454 * part of the read side critical section was irqs-enabled -- see 1455 * rcu_read_unlock_special(). 1456 * 1457 * Since ctx->lock nests under rq->lock we must ensure the entire read 1458 * side critical section has interrupts disabled. 1459 */ 1460 local_irq_save(*flags); 1461 rcu_read_lock(); 1462 ctx = rcu_dereference(task->perf_event_ctxp[ctxn]); 1463 if (ctx) { 1464 /* 1465 * If this context is a clone of another, it might 1466 * get swapped for another underneath us by 1467 * perf_event_task_sched_out, though the 1468 * rcu_read_lock() protects us from any context 1469 * getting freed. Lock the context and check if it 1470 * got swapped before we could get the lock, and retry 1471 * if so. If we locked the right context, then it 1472 * can't get swapped on us any more. 1473 */ 1474 raw_spin_lock(&ctx->lock); 1475 if (ctx != rcu_dereference(task->perf_event_ctxp[ctxn])) { 1476 raw_spin_unlock(&ctx->lock); 1477 rcu_read_unlock(); 1478 local_irq_restore(*flags); 1479 goto retry; 1480 } 1481 1482 if (ctx->task == TASK_TOMBSTONE || 1483 !refcount_inc_not_zero(&ctx->refcount)) { 1484 raw_spin_unlock(&ctx->lock); 1485 ctx = NULL; 1486 } else { 1487 WARN_ON_ONCE(ctx->task != task); 1488 } 1489 } 1490 rcu_read_unlock(); 1491 if (!ctx) 1492 local_irq_restore(*flags); 1493 return ctx; 1494 } 1495 1496 /* 1497 * Get the context for a task and increment its pin_count so it 1498 * can't get swapped to another task. This also increments its 1499 * reference count so that the context can't get freed. 1500 */ 1501 static struct perf_event_context * 1502 perf_pin_task_context(struct task_struct *task, int ctxn) 1503 { 1504 struct perf_event_context *ctx; 1505 unsigned long flags; 1506 1507 ctx = perf_lock_task_context(task, ctxn, &flags); 1508 if (ctx) { 1509 ++ctx->pin_count; 1510 raw_spin_unlock_irqrestore(&ctx->lock, flags); 1511 } 1512 return ctx; 1513 } 1514 1515 static void perf_unpin_context(struct perf_event_context *ctx) 1516 { 1517 unsigned long flags; 1518 1519 raw_spin_lock_irqsave(&ctx->lock, flags); 1520 --ctx->pin_count; 1521 raw_spin_unlock_irqrestore(&ctx->lock, flags); 1522 } 1523 1524 /* 1525 * Update the record of the current time in a context. 1526 */ 1527 static void update_context_time(struct perf_event_context *ctx) 1528 { 1529 u64 now = perf_clock(); 1530 1531 ctx->time += now - ctx->timestamp; 1532 ctx->timestamp = now; 1533 } 1534 1535 static u64 perf_event_time(struct perf_event *event) 1536 { 1537 struct perf_event_context *ctx = event->ctx; 1538 1539 if (is_cgroup_event(event)) 1540 return perf_cgroup_event_time(event); 1541 1542 return ctx ? ctx->time : 0; 1543 } 1544 1545 static enum event_type_t get_event_type(struct perf_event *event) 1546 { 1547 struct perf_event_context *ctx = event->ctx; 1548 enum event_type_t event_type; 1549 1550 lockdep_assert_held(&ctx->lock); 1551 1552 /* 1553 * It's 'group type', really, because if our group leader is 1554 * pinned, so are we. 1555 */ 1556 if (event->group_leader != event) 1557 event = event->group_leader; 1558 1559 event_type = event->attr.pinned ? EVENT_PINNED : EVENT_FLEXIBLE; 1560 if (!ctx->task) 1561 event_type |= EVENT_CPU; 1562 1563 return event_type; 1564 } 1565 1566 /* 1567 * Helper function to initialize event group nodes. 1568 */ 1569 static void init_event_group(struct perf_event *event) 1570 { 1571 RB_CLEAR_NODE(&event->group_node); 1572 event->group_index = 0; 1573 } 1574 1575 /* 1576 * Extract pinned or flexible groups from the context 1577 * based on event attrs bits. 1578 */ 1579 static struct perf_event_groups * 1580 get_event_groups(struct perf_event *event, struct perf_event_context *ctx) 1581 { 1582 if (event->attr.pinned) 1583 return &ctx->pinned_groups; 1584 else 1585 return &ctx->flexible_groups; 1586 } 1587 1588 /* 1589 * Helper function to initializes perf_event_group trees. 1590 */ 1591 static void perf_event_groups_init(struct perf_event_groups *groups) 1592 { 1593 groups->tree = RB_ROOT; 1594 groups->index = 0; 1595 } 1596 1597 static inline struct cgroup *event_cgroup(const struct perf_event *event) 1598 { 1599 struct cgroup *cgroup = NULL; 1600 1601 #ifdef CONFIG_CGROUP_PERF 1602 if (event->cgrp) 1603 cgroup = event->cgrp->css.cgroup; 1604 #endif 1605 1606 return cgroup; 1607 } 1608 1609 /* 1610 * Compare function for event groups; 1611 * 1612 * Implements complex key that first sorts by CPU and then by virtual index 1613 * which provides ordering when rotating groups for the same CPU. 1614 */ 1615 static __always_inline int 1616 perf_event_groups_cmp(const int left_cpu, const struct cgroup *left_cgroup, 1617 const u64 left_group_index, const struct perf_event *right) 1618 { 1619 if (left_cpu < right->cpu) 1620 return -1; 1621 if (left_cpu > right->cpu) 1622 return 1; 1623 1624 #ifdef CONFIG_CGROUP_PERF 1625 { 1626 const struct cgroup *right_cgroup = event_cgroup(right); 1627 1628 if (left_cgroup != right_cgroup) { 1629 if (!left_cgroup) { 1630 /* 1631 * Left has no cgroup but right does, no 1632 * cgroups come first. 1633 */ 1634 return -1; 1635 } 1636 if (!right_cgroup) { 1637 /* 1638 * Right has no cgroup but left does, no 1639 * cgroups come first. 1640 */ 1641 return 1; 1642 } 1643 /* Two dissimilar cgroups, order by id. */ 1644 if (cgroup_id(left_cgroup) < cgroup_id(right_cgroup)) 1645 return -1; 1646 1647 return 1; 1648 } 1649 } 1650 #endif 1651 1652 if (left_group_index < right->group_index) 1653 return -1; 1654 if (left_group_index > right->group_index) 1655 return 1; 1656 1657 return 0; 1658 } 1659 1660 #define __node_2_pe(node) \ 1661 rb_entry((node), struct perf_event, group_node) 1662 1663 static inline bool __group_less(struct rb_node *a, const struct rb_node *b) 1664 { 1665 struct perf_event *e = __node_2_pe(a); 1666 return perf_event_groups_cmp(e->cpu, event_cgroup(e), e->group_index, 1667 __node_2_pe(b)) < 0; 1668 } 1669 1670 struct __group_key { 1671 int cpu; 1672 struct cgroup *cgroup; 1673 }; 1674 1675 static inline int __group_cmp(const void *key, const struct rb_node *node) 1676 { 1677 const struct __group_key *a = key; 1678 const struct perf_event *b = __node_2_pe(node); 1679 1680 /* partial/subtree match: @cpu, @cgroup; ignore: @group_index */ 1681 return perf_event_groups_cmp(a->cpu, a->cgroup, b->group_index, b); 1682 } 1683 1684 /* 1685 * Insert @event into @groups' tree; using {@event->cpu, ++@groups->index} for 1686 * key (see perf_event_groups_less). This places it last inside the CPU 1687 * subtree. 1688 */ 1689 static void 1690 perf_event_groups_insert(struct perf_event_groups *groups, 1691 struct perf_event *event) 1692 { 1693 event->group_index = ++groups->index; 1694 1695 rb_add(&event->group_node, &groups->tree, __group_less); 1696 } 1697 1698 /* 1699 * Helper function to insert event into the pinned or flexible groups. 1700 */ 1701 static void 1702 add_event_to_groups(struct perf_event *event, struct perf_event_context *ctx) 1703 { 1704 struct perf_event_groups *groups; 1705 1706 groups = get_event_groups(event, ctx); 1707 perf_event_groups_insert(groups, event); 1708 } 1709 1710 /* 1711 * Delete a group from a tree. 1712 */ 1713 static void 1714 perf_event_groups_delete(struct perf_event_groups *groups, 1715 struct perf_event *event) 1716 { 1717 WARN_ON_ONCE(RB_EMPTY_NODE(&event->group_node) || 1718 RB_EMPTY_ROOT(&groups->tree)); 1719 1720 rb_erase(&event->group_node, &groups->tree); 1721 init_event_group(event); 1722 } 1723 1724 /* 1725 * Helper function to delete event from its groups. 1726 */ 1727 static void 1728 del_event_from_groups(struct perf_event *event, struct perf_event_context *ctx) 1729 { 1730 struct perf_event_groups *groups; 1731 1732 groups = get_event_groups(event, ctx); 1733 perf_event_groups_delete(groups, event); 1734 } 1735 1736 /* 1737 * Get the leftmost event in the cpu/cgroup subtree. 1738 */ 1739 static struct perf_event * 1740 perf_event_groups_first(struct perf_event_groups *groups, int cpu, 1741 struct cgroup *cgrp) 1742 { 1743 struct __group_key key = { 1744 .cpu = cpu, 1745 .cgroup = cgrp, 1746 }; 1747 struct rb_node *node; 1748 1749 node = rb_find_first(&key, &groups->tree, __group_cmp); 1750 if (node) 1751 return __node_2_pe(node); 1752 1753 return NULL; 1754 } 1755 1756 /* 1757 * Like rb_entry_next_safe() for the @cpu subtree. 1758 */ 1759 static struct perf_event * 1760 perf_event_groups_next(struct perf_event *event) 1761 { 1762 struct __group_key key = { 1763 .cpu = event->cpu, 1764 .cgroup = event_cgroup(event), 1765 }; 1766 struct rb_node *next; 1767 1768 next = rb_next_match(&key, &event->group_node, __group_cmp); 1769 if (next) 1770 return __node_2_pe(next); 1771 1772 return NULL; 1773 } 1774 1775 /* 1776 * Iterate through the whole groups tree. 1777 */ 1778 #define perf_event_groups_for_each(event, groups) \ 1779 for (event = rb_entry_safe(rb_first(&((groups)->tree)), \ 1780 typeof(*event), group_node); event; \ 1781 event = rb_entry_safe(rb_next(&event->group_node), \ 1782 typeof(*event), group_node)) 1783 1784 /* 1785 * Add an event from the lists for its context. 1786 * Must be called with ctx->mutex and ctx->lock held. 1787 */ 1788 static void 1789 list_add_event(struct perf_event *event, struct perf_event_context *ctx) 1790 { 1791 lockdep_assert_held(&ctx->lock); 1792 1793 WARN_ON_ONCE(event->attach_state & PERF_ATTACH_CONTEXT); 1794 event->attach_state |= PERF_ATTACH_CONTEXT; 1795 1796 event->tstamp = perf_event_time(event); 1797 1798 /* 1799 * If we're a stand alone event or group leader, we go to the context 1800 * list, group events are kept attached to the group so that 1801 * perf_group_detach can, at all times, locate all siblings. 1802 */ 1803 if (event->group_leader == event) { 1804 event->group_caps = event->event_caps; 1805 add_event_to_groups(event, ctx); 1806 } 1807 1808 list_add_rcu(&event->event_entry, &ctx->event_list); 1809 ctx->nr_events++; 1810 if (event->attr.inherit_stat) 1811 ctx->nr_stat++; 1812 1813 if (event->state > PERF_EVENT_STATE_OFF) 1814 perf_cgroup_event_enable(event, ctx); 1815 1816 ctx->generation++; 1817 } 1818 1819 /* 1820 * Initialize event state based on the perf_event_attr::disabled. 1821 */ 1822 static inline void perf_event__state_init(struct perf_event *event) 1823 { 1824 event->state = event->attr.disabled ? PERF_EVENT_STATE_OFF : 1825 PERF_EVENT_STATE_INACTIVE; 1826 } 1827 1828 static void __perf_event_read_size(struct perf_event *event, int nr_siblings) 1829 { 1830 int entry = sizeof(u64); /* value */ 1831 int size = 0; 1832 int nr = 1; 1833 1834 if (event->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) 1835 size += sizeof(u64); 1836 1837 if (event->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) 1838 size += sizeof(u64); 1839 1840 if (event->attr.read_format & PERF_FORMAT_ID) 1841 entry += sizeof(u64); 1842 1843 if (event->attr.read_format & PERF_FORMAT_GROUP) { 1844 nr += nr_siblings; 1845 size += sizeof(u64); 1846 } 1847 1848 size += entry * nr; 1849 event->read_size = size; 1850 } 1851 1852 static void __perf_event_header_size(struct perf_event *event, u64 sample_type) 1853 { 1854 struct perf_sample_data *data; 1855 u16 size = 0; 1856 1857 if (sample_type & PERF_SAMPLE_IP) 1858 size += sizeof(data->ip); 1859 1860 if (sample_type & PERF_SAMPLE_ADDR) 1861 size += sizeof(data->addr); 1862 1863 if (sample_type & PERF_SAMPLE_PERIOD) 1864 size += sizeof(data->period); 1865 1866 if (sample_type & PERF_SAMPLE_WEIGHT_TYPE) 1867 size += sizeof(data->weight.full); 1868 1869 if (sample_type & PERF_SAMPLE_READ) 1870 size += event->read_size; 1871 1872 if (sample_type & PERF_SAMPLE_DATA_SRC) 1873 size += sizeof(data->data_src.val); 1874 1875 if (sample_type & PERF_SAMPLE_TRANSACTION) 1876 size += sizeof(data->txn); 1877 1878 if (sample_type & PERF_SAMPLE_PHYS_ADDR) 1879 size += sizeof(data->phys_addr); 1880 1881 if (sample_type & PERF_SAMPLE_CGROUP) 1882 size += sizeof(data->cgroup); 1883 1884 if (sample_type & PERF_SAMPLE_DATA_PAGE_SIZE) 1885 size += sizeof(data->data_page_size); 1886 1887 if (sample_type & PERF_SAMPLE_CODE_PAGE_SIZE) 1888 size += sizeof(data->code_page_size); 1889 1890 event->header_size = size; 1891 } 1892 1893 /* 1894 * Called at perf_event creation and when events are attached/detached from a 1895 * group. 1896 */ 1897 static void perf_event__header_size(struct perf_event *event) 1898 { 1899 __perf_event_read_size(event, 1900 event->group_leader->nr_siblings); 1901 __perf_event_header_size(event, event->attr.sample_type); 1902 } 1903 1904 static void perf_event__id_header_size(struct perf_event *event) 1905 { 1906 struct perf_sample_data *data; 1907 u64 sample_type = event->attr.sample_type; 1908 u16 size = 0; 1909 1910 if (sample_type & PERF_SAMPLE_TID) 1911 size += sizeof(data->tid_entry); 1912 1913 if (sample_type & PERF_SAMPLE_TIME) 1914 size += sizeof(data->time); 1915 1916 if (sample_type & PERF_SAMPLE_IDENTIFIER) 1917 size += sizeof(data->id); 1918 1919 if (sample_type & PERF_SAMPLE_ID) 1920 size += sizeof(data->id); 1921 1922 if (sample_type & PERF_SAMPLE_STREAM_ID) 1923 size += sizeof(data->stream_id); 1924 1925 if (sample_type & PERF_SAMPLE_CPU) 1926 size += sizeof(data->cpu_entry); 1927 1928 event->id_header_size = size; 1929 } 1930 1931 static bool perf_event_validate_size(struct perf_event *event) 1932 { 1933 /* 1934 * The values computed here will be over-written when we actually 1935 * attach the event. 1936 */ 1937 __perf_event_read_size(event, event->group_leader->nr_siblings + 1); 1938 __perf_event_header_size(event, event->attr.sample_type & ~PERF_SAMPLE_READ); 1939 perf_event__id_header_size(event); 1940 1941 /* 1942 * Sum the lot; should not exceed the 64k limit we have on records. 1943 * Conservative limit to allow for callchains and other variable fields. 1944 */ 1945 if (event->read_size + event->header_size + 1946 event->id_header_size + sizeof(struct perf_event_header) >= 16*1024) 1947 return false; 1948 1949 return true; 1950 } 1951 1952 static void perf_group_attach(struct perf_event *event) 1953 { 1954 struct perf_event *group_leader = event->group_leader, *pos; 1955 1956 lockdep_assert_held(&event->ctx->lock); 1957 1958 /* 1959 * We can have double attach due to group movement in perf_event_open. 1960 */ 1961 if (event->attach_state & PERF_ATTACH_GROUP) 1962 return; 1963 1964 event->attach_state |= PERF_ATTACH_GROUP; 1965 1966 if (group_leader == event) 1967 return; 1968 1969 WARN_ON_ONCE(group_leader->ctx != event->ctx); 1970 1971 group_leader->group_caps &= event->event_caps; 1972 1973 list_add_tail(&event->sibling_list, &group_leader->sibling_list); 1974 group_leader->nr_siblings++; 1975 1976 perf_event__header_size(group_leader); 1977 1978 for_each_sibling_event(pos, group_leader) 1979 perf_event__header_size(pos); 1980 } 1981 1982 /* 1983 * Remove an event from the lists for its context. 1984 * Must be called with ctx->mutex and ctx->lock held. 1985 */ 1986 static void 1987 list_del_event(struct perf_event *event, struct perf_event_context *ctx) 1988 { 1989 WARN_ON_ONCE(event->ctx != ctx); 1990 lockdep_assert_held(&ctx->lock); 1991 1992 /* 1993 * We can have double detach due to exit/hot-unplug + close. 1994 */ 1995 if (!(event->attach_state & PERF_ATTACH_CONTEXT)) 1996 return; 1997 1998 event->attach_state &= ~PERF_ATTACH_CONTEXT; 1999 2000 ctx->nr_events--; 2001 if (event->attr.inherit_stat) 2002 ctx->nr_stat--; 2003 2004 list_del_rcu(&event->event_entry); 2005 2006 if (event->group_leader == event) 2007 del_event_from_groups(event, ctx); 2008 2009 /* 2010 * If event was in error state, then keep it 2011 * that way, otherwise bogus counts will be 2012 * returned on read(). The only way to get out 2013 * of error state is by explicit re-enabling 2014 * of the event 2015 */ 2016 if (event->state > PERF_EVENT_STATE_OFF) { 2017 perf_cgroup_event_disable(event, ctx); 2018 perf_event_set_state(event, PERF_EVENT_STATE_OFF); 2019 } 2020 2021 ctx->generation++; 2022 } 2023 2024 static int 2025 perf_aux_output_match(struct perf_event *event, struct perf_event *aux_event) 2026 { 2027 if (!has_aux(aux_event)) 2028 return 0; 2029 2030 if (!event->pmu->aux_output_match) 2031 return 0; 2032 2033 return event->pmu->aux_output_match(aux_event); 2034 } 2035 2036 static void put_event(struct perf_event *event); 2037 static void event_sched_out(struct perf_event *event, 2038 struct perf_cpu_context *cpuctx, 2039 struct perf_event_context *ctx); 2040 2041 static void perf_put_aux_event(struct perf_event *event) 2042 { 2043 struct perf_event_context *ctx = event->ctx; 2044 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx); 2045 struct perf_event *iter; 2046 2047 /* 2048 * If event uses aux_event tear down the link 2049 */ 2050 if (event->aux_event) { 2051 iter = event->aux_event; 2052 event->aux_event = NULL; 2053 put_event(iter); 2054 return; 2055 } 2056 2057 /* 2058 * If the event is an aux_event, tear down all links to 2059 * it from other events. 2060 */ 2061 for_each_sibling_event(iter, event->group_leader) { 2062 if (iter->aux_event != event) 2063 continue; 2064 2065 iter->aux_event = NULL; 2066 put_event(event); 2067 2068 /* 2069 * If it's ACTIVE, schedule it out and put it into ERROR 2070 * state so that we don't try to schedule it again. Note 2071 * that perf_event_enable() will clear the ERROR status. 2072 */ 2073 event_sched_out(iter, cpuctx, ctx); 2074 perf_event_set_state(event, PERF_EVENT_STATE_ERROR); 2075 } 2076 } 2077 2078 static bool perf_need_aux_event(struct perf_event *event) 2079 { 2080 return !!event->attr.aux_output || !!event->attr.aux_sample_size; 2081 } 2082 2083 static int perf_get_aux_event(struct perf_event *event, 2084 struct perf_event *group_leader) 2085 { 2086 /* 2087 * Our group leader must be an aux event if we want to be 2088 * an aux_output. This way, the aux event will precede its 2089 * aux_output events in the group, and therefore will always 2090 * schedule first. 2091 */ 2092 if (!group_leader) 2093 return 0; 2094 2095 /* 2096 * aux_output and aux_sample_size are mutually exclusive. 2097 */ 2098 if (event->attr.aux_output && event->attr.aux_sample_size) 2099 return 0; 2100 2101 if (event->attr.aux_output && 2102 !perf_aux_output_match(event, group_leader)) 2103 return 0; 2104 2105 if (event->attr.aux_sample_size && !group_leader->pmu->snapshot_aux) 2106 return 0; 2107 2108 if (!atomic_long_inc_not_zero(&group_leader->refcount)) 2109 return 0; 2110 2111 /* 2112 * Link aux_outputs to their aux event; this is undone in 2113 * perf_group_detach() by perf_put_aux_event(). When the 2114 * group in torn down, the aux_output events loose their 2115 * link to the aux_event and can't schedule any more. 2116 */ 2117 event->aux_event = group_leader; 2118 2119 return 1; 2120 } 2121 2122 static inline struct list_head *get_event_list(struct perf_event *event) 2123 { 2124 struct perf_event_context *ctx = event->ctx; 2125 return event->attr.pinned ? &ctx->pinned_active : &ctx->flexible_active; 2126 } 2127 2128 /* 2129 * Events that have PERF_EV_CAP_SIBLING require being part of a group and 2130 * cannot exist on their own, schedule them out and move them into the ERROR 2131 * state. Also see _perf_event_enable(), it will not be able to recover 2132 * this ERROR state. 2133 */ 2134 static inline void perf_remove_sibling_event(struct perf_event *event) 2135 { 2136 struct perf_event_context *ctx = event->ctx; 2137 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx); 2138 2139 event_sched_out(event, cpuctx, ctx); 2140 perf_event_set_state(event, PERF_EVENT_STATE_ERROR); 2141 } 2142 2143 static void perf_group_detach(struct perf_event *event) 2144 { 2145 struct perf_event *leader = event->group_leader; 2146 struct perf_event *sibling, *tmp; 2147 struct perf_event_context *ctx = event->ctx; 2148 2149 lockdep_assert_held(&ctx->lock); 2150 2151 /* 2152 * We can have double detach due to exit/hot-unplug + close. 2153 */ 2154 if (!(event->attach_state & PERF_ATTACH_GROUP)) 2155 return; 2156 2157 event->attach_state &= ~PERF_ATTACH_GROUP; 2158 2159 perf_put_aux_event(event); 2160 2161 /* 2162 * If this is a sibling, remove it from its group. 2163 */ 2164 if (leader != event) { 2165 list_del_init(&event->sibling_list); 2166 event->group_leader->nr_siblings--; 2167 goto out; 2168 } 2169 2170 /* 2171 * If this was a group event with sibling events then 2172 * upgrade the siblings to singleton events by adding them 2173 * to whatever list we are on. 2174 */ 2175 list_for_each_entry_safe(sibling, tmp, &event->sibling_list, sibling_list) { 2176 2177 if (sibling->event_caps & PERF_EV_CAP_SIBLING) 2178 perf_remove_sibling_event(sibling); 2179 2180 sibling->group_leader = sibling; 2181 list_del_init(&sibling->sibling_list); 2182 2183 /* Inherit group flags from the previous leader */ 2184 sibling->group_caps = event->group_caps; 2185 2186 if (!RB_EMPTY_NODE(&event->group_node)) { 2187 add_event_to_groups(sibling, event->ctx); 2188 2189 if (sibling->state == PERF_EVENT_STATE_ACTIVE) 2190 list_add_tail(&sibling->active_list, get_event_list(sibling)); 2191 } 2192 2193 WARN_ON_ONCE(sibling->ctx != event->ctx); 2194 } 2195 2196 out: 2197 for_each_sibling_event(tmp, leader) 2198 perf_event__header_size(tmp); 2199 2200 perf_event__header_size(leader); 2201 } 2202 2203 static void sync_child_event(struct perf_event *child_event); 2204 2205 static void perf_child_detach(struct perf_event *event) 2206 { 2207 struct perf_event *parent_event = event->parent; 2208 2209 if (!(event->attach_state & PERF_ATTACH_CHILD)) 2210 return; 2211 2212 event->attach_state &= ~PERF_ATTACH_CHILD; 2213 2214 if (WARN_ON_ONCE(!parent_event)) 2215 return; 2216 2217 lockdep_assert_held(&parent_event->child_mutex); 2218 2219 sync_child_event(event); 2220 list_del_init(&event->child_list); 2221 } 2222 2223 static bool is_orphaned_event(struct perf_event *event) 2224 { 2225 return event->state == PERF_EVENT_STATE_DEAD; 2226 } 2227 2228 static inline int __pmu_filter_match(struct perf_event *event) 2229 { 2230 struct pmu *pmu = event->pmu; 2231 return pmu->filter_match ? pmu->filter_match(event) : 1; 2232 } 2233 2234 /* 2235 * Check whether we should attempt to schedule an event group based on 2236 * PMU-specific filtering. An event group can consist of HW and SW events, 2237 * potentially with a SW leader, so we must check all the filters, to 2238 * determine whether a group is schedulable: 2239 */ 2240 static inline int pmu_filter_match(struct perf_event *event) 2241 { 2242 struct perf_event *sibling; 2243 2244 if (!__pmu_filter_match(event)) 2245 return 0; 2246 2247 for_each_sibling_event(sibling, event) { 2248 if (!__pmu_filter_match(sibling)) 2249 return 0; 2250 } 2251 2252 return 1; 2253 } 2254 2255 static inline int 2256 event_filter_match(struct perf_event *event) 2257 { 2258 return (event->cpu == -1 || event->cpu == smp_processor_id()) && 2259 perf_cgroup_match(event) && pmu_filter_match(event); 2260 } 2261 2262 static void 2263 event_sched_out(struct perf_event *event, 2264 struct perf_cpu_context *cpuctx, 2265 struct perf_event_context *ctx) 2266 { 2267 enum perf_event_state state = PERF_EVENT_STATE_INACTIVE; 2268 2269 WARN_ON_ONCE(event->ctx != ctx); 2270 lockdep_assert_held(&ctx->lock); 2271 2272 if (event->state != PERF_EVENT_STATE_ACTIVE) 2273 return; 2274 2275 /* 2276 * Asymmetry; we only schedule events _IN_ through ctx_sched_in(), but 2277 * we can schedule events _OUT_ individually through things like 2278 * __perf_remove_from_context(). 2279 */ 2280 list_del_init(&event->active_list); 2281 2282 perf_pmu_disable(event->pmu); 2283 2284 event->pmu->del(event, 0); 2285 event->oncpu = -1; 2286 2287 if (READ_ONCE(event->pending_disable) >= 0) { 2288 WRITE_ONCE(event->pending_disable, -1); 2289 perf_cgroup_event_disable(event, ctx); 2290 state = PERF_EVENT_STATE_OFF; 2291 } 2292 perf_event_set_state(event, state); 2293 2294 if (!is_software_event(event)) 2295 cpuctx->active_oncpu--; 2296 if (!--ctx->nr_active) 2297 perf_event_ctx_deactivate(ctx); 2298 if (event->attr.freq && event->attr.sample_freq) 2299 ctx->nr_freq--; 2300 if (event->attr.exclusive || !cpuctx->active_oncpu) 2301 cpuctx->exclusive = 0; 2302 2303 perf_pmu_enable(event->pmu); 2304 } 2305 2306 static void 2307 group_sched_out(struct perf_event *group_event, 2308 struct perf_cpu_context *cpuctx, 2309 struct perf_event_context *ctx) 2310 { 2311 struct perf_event *event; 2312 2313 if (group_event->state != PERF_EVENT_STATE_ACTIVE) 2314 return; 2315 2316 perf_pmu_disable(ctx->pmu); 2317 2318 event_sched_out(group_event, cpuctx, ctx); 2319 2320 /* 2321 * Schedule out siblings (if any): 2322 */ 2323 for_each_sibling_event(event, group_event) 2324 event_sched_out(event, cpuctx, ctx); 2325 2326 perf_pmu_enable(ctx->pmu); 2327 } 2328 2329 #define DETACH_GROUP 0x01UL 2330 #define DETACH_CHILD 0x02UL 2331 2332 /* 2333 * Cross CPU call to remove a performance event 2334 * 2335 * We disable the event on the hardware level first. After that we 2336 * remove it from the context list. 2337 */ 2338 static void 2339 __perf_remove_from_context(struct perf_event *event, 2340 struct perf_cpu_context *cpuctx, 2341 struct perf_event_context *ctx, 2342 void *info) 2343 { 2344 unsigned long flags = (unsigned long)info; 2345 2346 if (ctx->is_active & EVENT_TIME) { 2347 update_context_time(ctx); 2348 update_cgrp_time_from_cpuctx(cpuctx); 2349 } 2350 2351 event_sched_out(event, cpuctx, ctx); 2352 if (flags & DETACH_GROUP) 2353 perf_group_detach(event); 2354 if (flags & DETACH_CHILD) 2355 perf_child_detach(event); 2356 list_del_event(event, ctx); 2357 2358 if (!ctx->nr_events && ctx->is_active) { 2359 ctx->is_active = 0; 2360 ctx->rotate_necessary = 0; 2361 if (ctx->task) { 2362 WARN_ON_ONCE(cpuctx->task_ctx != ctx); 2363 cpuctx->task_ctx = NULL; 2364 } 2365 } 2366 } 2367 2368 /* 2369 * Remove the event from a task's (or a CPU's) list of events. 2370 * 2371 * If event->ctx is a cloned context, callers must make sure that 2372 * every task struct that event->ctx->task could possibly point to 2373 * remains valid. This is OK when called from perf_release since 2374 * that only calls us on the top-level context, which can't be a clone. 2375 * When called from perf_event_exit_task, it's OK because the 2376 * context has been detached from its task. 2377 */ 2378 static void perf_remove_from_context(struct perf_event *event, unsigned long flags) 2379 { 2380 struct perf_event_context *ctx = event->ctx; 2381 2382 lockdep_assert_held(&ctx->mutex); 2383 2384 /* 2385 * Because of perf_event_exit_task(), perf_remove_from_context() ought 2386 * to work in the face of TASK_TOMBSTONE, unlike every other 2387 * event_function_call() user. 2388 */ 2389 raw_spin_lock_irq(&ctx->lock); 2390 if (!ctx->is_active) { 2391 __perf_remove_from_context(event, __get_cpu_context(ctx), 2392 ctx, (void *)flags); 2393 raw_spin_unlock_irq(&ctx->lock); 2394 return; 2395 } 2396 raw_spin_unlock_irq(&ctx->lock); 2397 2398 event_function_call(event, __perf_remove_from_context, (void *)flags); 2399 } 2400 2401 /* 2402 * Cross CPU call to disable a performance event 2403 */ 2404 static void __perf_event_disable(struct perf_event *event, 2405 struct perf_cpu_context *cpuctx, 2406 struct perf_event_context *ctx, 2407 void *info) 2408 { 2409 if (event->state < PERF_EVENT_STATE_INACTIVE) 2410 return; 2411 2412 if (ctx->is_active & EVENT_TIME) { 2413 update_context_time(ctx); 2414 update_cgrp_time_from_event(event); 2415 } 2416 2417 if (event == event->group_leader) 2418 group_sched_out(event, cpuctx, ctx); 2419 else 2420 event_sched_out(event, cpuctx, ctx); 2421 2422 perf_event_set_state(event, PERF_EVENT_STATE_OFF); 2423 perf_cgroup_event_disable(event, ctx); 2424 } 2425 2426 /* 2427 * Disable an event. 2428 * 2429 * If event->ctx is a cloned context, callers must make sure that 2430 * every task struct that event->ctx->task could possibly point to 2431 * remains valid. This condition is satisfied when called through 2432 * perf_event_for_each_child or perf_event_for_each because they 2433 * hold the top-level event's child_mutex, so any descendant that 2434 * goes to exit will block in perf_event_exit_event(). 2435 * 2436 * When called from perf_pending_event it's OK because event->ctx 2437 * is the current context on this CPU and preemption is disabled, 2438 * hence we can't get into perf_event_task_sched_out for this context. 2439 */ 2440 static void _perf_event_disable(struct perf_event *event) 2441 { 2442 struct perf_event_context *ctx = event->ctx; 2443 2444 raw_spin_lock_irq(&ctx->lock); 2445 if (event->state <= PERF_EVENT_STATE_OFF) { 2446 raw_spin_unlock_irq(&ctx->lock); 2447 return; 2448 } 2449 raw_spin_unlock_irq(&ctx->lock); 2450 2451 event_function_call(event, __perf_event_disable, NULL); 2452 } 2453 2454 void perf_event_disable_local(struct perf_event *event) 2455 { 2456 event_function_local(event, __perf_event_disable, NULL); 2457 } 2458 2459 /* 2460 * Strictly speaking kernel users cannot create groups and therefore this 2461 * interface does not need the perf_event_ctx_lock() magic. 2462 */ 2463 void perf_event_disable(struct perf_event *event) 2464 { 2465 struct perf_event_context *ctx; 2466 2467 ctx = perf_event_ctx_lock(event); 2468 _perf_event_disable(event); 2469 perf_event_ctx_unlock(event, ctx); 2470 } 2471 EXPORT_SYMBOL_GPL(perf_event_disable); 2472 2473 void perf_event_disable_inatomic(struct perf_event *event) 2474 { 2475 WRITE_ONCE(event->pending_disable, smp_processor_id()); 2476 /* can fail, see perf_pending_event_disable() */ 2477 irq_work_queue(&event->pending); 2478 } 2479 2480 static void perf_set_shadow_time(struct perf_event *event, 2481 struct perf_event_context *ctx) 2482 { 2483 /* 2484 * use the correct time source for the time snapshot 2485 * 2486 * We could get by without this by leveraging the 2487 * fact that to get to this function, the caller 2488 * has most likely already called update_context_time() 2489 * and update_cgrp_time_xx() and thus both timestamp 2490 * are identical (or very close). Given that tstamp is, 2491 * already adjusted for cgroup, we could say that: 2492 * tstamp - ctx->timestamp 2493 * is equivalent to 2494 * tstamp - cgrp->timestamp. 2495 * 2496 * Then, in perf_output_read(), the calculation would 2497 * work with no changes because: 2498 * - event is guaranteed scheduled in 2499 * - no scheduled out in between 2500 * - thus the timestamp would be the same 2501 * 2502 * But this is a bit hairy. 2503 * 2504 * So instead, we have an explicit cgroup call to remain 2505 * within the time source all along. We believe it 2506 * is cleaner and simpler to understand. 2507 */ 2508 if (is_cgroup_event(event)) 2509 perf_cgroup_set_shadow_time(event, event->tstamp); 2510 else 2511 event->shadow_ctx_time = event->tstamp - ctx->timestamp; 2512 } 2513 2514 #define MAX_INTERRUPTS (~0ULL) 2515 2516 static void perf_log_throttle(struct perf_event *event, int enable); 2517 static void perf_log_itrace_start(struct perf_event *event); 2518 2519 static int 2520 event_sched_in(struct perf_event *event, 2521 struct perf_cpu_context *cpuctx, 2522 struct perf_event_context *ctx) 2523 { 2524 int ret = 0; 2525 2526 WARN_ON_ONCE(event->ctx != ctx); 2527 2528 lockdep_assert_held(&ctx->lock); 2529 2530 if (event->state <= PERF_EVENT_STATE_OFF) 2531 return 0; 2532 2533 WRITE_ONCE(event->oncpu, smp_processor_id()); 2534 /* 2535 * Order event::oncpu write to happen before the ACTIVE state is 2536 * visible. This allows perf_event_{stop,read}() to observe the correct 2537 * ->oncpu if it sees ACTIVE. 2538 */ 2539 smp_wmb(); 2540 perf_event_set_state(event, PERF_EVENT_STATE_ACTIVE); 2541 2542 /* 2543 * Unthrottle events, since we scheduled we might have missed several 2544 * ticks already, also for a heavily scheduling task there is little 2545 * guarantee it'll get a tick in a timely manner. 2546 */ 2547 if (unlikely(event->hw.interrupts == MAX_INTERRUPTS)) { 2548 perf_log_throttle(event, 1); 2549 event->hw.interrupts = 0; 2550 } 2551 2552 perf_pmu_disable(event->pmu); 2553 2554 perf_set_shadow_time(event, ctx); 2555 2556 perf_log_itrace_start(event); 2557 2558 if (event->pmu->add(event, PERF_EF_START)) { 2559 perf_event_set_state(event, PERF_EVENT_STATE_INACTIVE); 2560 event->oncpu = -1; 2561 ret = -EAGAIN; 2562 goto out; 2563 } 2564 2565 if (!is_software_event(event)) 2566 cpuctx->active_oncpu++; 2567 if (!ctx->nr_active++) 2568 perf_event_ctx_activate(ctx); 2569 if (event->attr.freq && event->attr.sample_freq) 2570 ctx->nr_freq++; 2571 2572 if (event->attr.exclusive) 2573 cpuctx->exclusive = 1; 2574 2575 out: 2576 perf_pmu_enable(event->pmu); 2577 2578 return ret; 2579 } 2580 2581 static int 2582 group_sched_in(struct perf_event *group_event, 2583 struct perf_cpu_context *cpuctx, 2584 struct perf_event_context *ctx) 2585 { 2586 struct perf_event *event, *partial_group = NULL; 2587 struct pmu *pmu = ctx->pmu; 2588 2589 if (group_event->state == PERF_EVENT_STATE_OFF) 2590 return 0; 2591 2592 pmu->start_txn(pmu, PERF_PMU_TXN_ADD); 2593 2594 if (event_sched_in(group_event, cpuctx, ctx)) 2595 goto error; 2596 2597 /* 2598 * Schedule in siblings as one group (if any): 2599 */ 2600 for_each_sibling_event(event, group_event) { 2601 if (event_sched_in(event, cpuctx, ctx)) { 2602 partial_group = event; 2603 goto group_error; 2604 } 2605 } 2606 2607 if (!pmu->commit_txn(pmu)) 2608 return 0; 2609 2610 group_error: 2611 /* 2612 * Groups can be scheduled in as one unit only, so undo any 2613 * partial group before returning: 2614 * The events up to the failed event are scheduled out normally. 2615 */ 2616 for_each_sibling_event(event, group_event) { 2617 if (event == partial_group) 2618 break; 2619 2620 event_sched_out(event, cpuctx, ctx); 2621 } 2622 event_sched_out(group_event, cpuctx, ctx); 2623 2624 error: 2625 pmu->cancel_txn(pmu); 2626 return -EAGAIN; 2627 } 2628 2629 /* 2630 * Work out whether we can put this event group on the CPU now. 2631 */ 2632 static int group_can_go_on(struct perf_event *event, 2633 struct perf_cpu_context *cpuctx, 2634 int can_add_hw) 2635 { 2636 /* 2637 * Groups consisting entirely of software events can always go on. 2638 */ 2639 if (event->group_caps & PERF_EV_CAP_SOFTWARE) 2640 return 1; 2641 /* 2642 * If an exclusive group is already on, no other hardware 2643 * events can go on. 2644 */ 2645 if (cpuctx->exclusive) 2646 return 0; 2647 /* 2648 * If this group is exclusive and there are already 2649 * events on the CPU, it can't go on. 2650 */ 2651 if (event->attr.exclusive && !list_empty(get_event_list(event))) 2652 return 0; 2653 /* 2654 * Otherwise, try to add it if all previous groups were able 2655 * to go on. 2656 */ 2657 return can_add_hw; 2658 } 2659 2660 static void add_event_to_ctx(struct perf_event *event, 2661 struct perf_event_context *ctx) 2662 { 2663 list_add_event(event, ctx); 2664 perf_group_attach(event); 2665 } 2666 2667 static void ctx_sched_out(struct perf_event_context *ctx, 2668 struct perf_cpu_context *cpuctx, 2669 enum event_type_t event_type); 2670 static void 2671 ctx_sched_in(struct perf_event_context *ctx, 2672 struct perf_cpu_context *cpuctx, 2673 enum event_type_t event_type, 2674 struct task_struct *task); 2675 2676 static void task_ctx_sched_out(struct perf_cpu_context *cpuctx, 2677 struct perf_event_context *ctx, 2678 enum event_type_t event_type) 2679 { 2680 if (!cpuctx->task_ctx) 2681 return; 2682 2683 if (WARN_ON_ONCE(ctx != cpuctx->task_ctx)) 2684 return; 2685 2686 ctx_sched_out(ctx, cpuctx, event_type); 2687 } 2688 2689 static void perf_event_sched_in(struct perf_cpu_context *cpuctx, 2690 struct perf_event_context *ctx, 2691 struct task_struct *task) 2692 { 2693 cpu_ctx_sched_in(cpuctx, EVENT_PINNED, task); 2694 if (ctx) 2695 ctx_sched_in(ctx, cpuctx, EVENT_PINNED, task); 2696 cpu_ctx_sched_in(cpuctx, EVENT_FLEXIBLE, task); 2697 if (ctx) 2698 ctx_sched_in(ctx, cpuctx, EVENT_FLEXIBLE, task); 2699 } 2700 2701 /* 2702 * We want to maintain the following priority of scheduling: 2703 * - CPU pinned (EVENT_CPU | EVENT_PINNED) 2704 * - task pinned (EVENT_PINNED) 2705 * - CPU flexible (EVENT_CPU | EVENT_FLEXIBLE) 2706 * - task flexible (EVENT_FLEXIBLE). 2707 * 2708 * In order to avoid unscheduling and scheduling back in everything every 2709 * time an event is added, only do it for the groups of equal priority and 2710 * below. 2711 * 2712 * This can be called after a batch operation on task events, in which case 2713 * event_type is a bit mask of the types of events involved. For CPU events, 2714 * event_type is only either EVENT_PINNED or EVENT_FLEXIBLE. 2715 */ 2716 static void ctx_resched(struct perf_cpu_context *cpuctx, 2717 struct perf_event_context *task_ctx, 2718 enum event_type_t event_type) 2719 { 2720 enum event_type_t ctx_event_type; 2721 bool cpu_event = !!(event_type & EVENT_CPU); 2722 2723 /* 2724 * If pinned groups are involved, flexible groups also need to be 2725 * scheduled out. 2726 */ 2727 if (event_type & EVENT_PINNED) 2728 event_type |= EVENT_FLEXIBLE; 2729 2730 ctx_event_type = event_type & EVENT_ALL; 2731 2732 perf_pmu_disable(cpuctx->ctx.pmu); 2733 if (task_ctx) 2734 task_ctx_sched_out(cpuctx, task_ctx, event_type); 2735 2736 /* 2737 * Decide which cpu ctx groups to schedule out based on the types 2738 * of events that caused rescheduling: 2739 * - EVENT_CPU: schedule out corresponding groups; 2740 * - EVENT_PINNED task events: schedule out EVENT_FLEXIBLE groups; 2741 * - otherwise, do nothing more. 2742 */ 2743 if (cpu_event) 2744 cpu_ctx_sched_out(cpuctx, ctx_event_type); 2745 else if (ctx_event_type & EVENT_PINNED) 2746 cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE); 2747 2748 perf_event_sched_in(cpuctx, task_ctx, current); 2749 perf_pmu_enable(cpuctx->ctx.pmu); 2750 } 2751 2752 void perf_pmu_resched(struct pmu *pmu) 2753 { 2754 struct perf_cpu_context *cpuctx = this_cpu_ptr(pmu->pmu_cpu_context); 2755 struct perf_event_context *task_ctx = cpuctx->task_ctx; 2756 2757 perf_ctx_lock(cpuctx, task_ctx); 2758 ctx_resched(cpuctx, task_ctx, EVENT_ALL|EVENT_CPU); 2759 perf_ctx_unlock(cpuctx, task_ctx); 2760 } 2761 2762 /* 2763 * Cross CPU call to install and enable a performance event 2764 * 2765 * Very similar to remote_function() + event_function() but cannot assume that 2766 * things like ctx->is_active and cpuctx->task_ctx are set. 2767 */ 2768 static int __perf_install_in_context(void *info) 2769 { 2770 struct perf_event *event = info; 2771 struct perf_event_context *ctx = event->ctx; 2772 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx); 2773 struct perf_event_context *task_ctx = cpuctx->task_ctx; 2774 bool reprogram = true; 2775 int ret = 0; 2776 2777 raw_spin_lock(&cpuctx->ctx.lock); 2778 if (ctx->task) { 2779 raw_spin_lock(&ctx->lock); 2780 task_ctx = ctx; 2781 2782 reprogram = (ctx->task == current); 2783 2784 /* 2785 * If the task is running, it must be running on this CPU, 2786 * otherwise we cannot reprogram things. 2787 * 2788 * If its not running, we don't care, ctx->lock will 2789 * serialize against it becoming runnable. 2790 */ 2791 if (task_curr(ctx->task) && !reprogram) { 2792 ret = -ESRCH; 2793 goto unlock; 2794 } 2795 2796 WARN_ON_ONCE(reprogram && cpuctx->task_ctx && cpuctx->task_ctx != ctx); 2797 } else if (task_ctx) { 2798 raw_spin_lock(&task_ctx->lock); 2799 } 2800 2801 #ifdef CONFIG_CGROUP_PERF 2802 if (event->state > PERF_EVENT_STATE_OFF && is_cgroup_event(event)) { 2803 /* 2804 * If the current cgroup doesn't match the event's 2805 * cgroup, we should not try to schedule it. 2806 */ 2807 struct perf_cgroup *cgrp = perf_cgroup_from_task(current, ctx); 2808 reprogram = cgroup_is_descendant(cgrp->css.cgroup, 2809 event->cgrp->css.cgroup); 2810 } 2811 #endif 2812 2813 if (reprogram) { 2814 ctx_sched_out(ctx, cpuctx, EVENT_TIME); 2815 add_event_to_ctx(event, ctx); 2816 ctx_resched(cpuctx, task_ctx, get_event_type(event)); 2817 } else { 2818 add_event_to_ctx(event, ctx); 2819 } 2820 2821 unlock: 2822 perf_ctx_unlock(cpuctx, task_ctx); 2823 2824 return ret; 2825 } 2826 2827 static bool exclusive_event_installable(struct perf_event *event, 2828 struct perf_event_context *ctx); 2829 2830 /* 2831 * Attach a performance event to a context. 2832 * 2833 * Very similar to event_function_call, see comment there. 2834 */ 2835 static void 2836 perf_install_in_context(struct perf_event_context *ctx, 2837 struct perf_event *event, 2838 int cpu) 2839 { 2840 struct task_struct *task = READ_ONCE(ctx->task); 2841 2842 lockdep_assert_held(&ctx->mutex); 2843 2844 WARN_ON_ONCE(!exclusive_event_installable(event, ctx)); 2845 2846 if (event->cpu != -1) 2847 event->cpu = cpu; 2848 2849 /* 2850 * Ensures that if we can observe event->ctx, both the event and ctx 2851 * will be 'complete'. See perf_iterate_sb_cpu(). 2852 */ 2853 smp_store_release(&event->ctx, ctx); 2854 2855 /* 2856 * perf_event_attr::disabled events will not run and can be initialized 2857 * without IPI. Except when this is the first event for the context, in 2858 * that case we need the magic of the IPI to set ctx->is_active. 2859 * 2860 * The IOC_ENABLE that is sure to follow the creation of a disabled 2861 * event will issue the IPI and reprogram the hardware. 2862 */ 2863 if (__perf_effective_state(event) == PERF_EVENT_STATE_OFF && ctx->nr_events) { 2864 raw_spin_lock_irq(&ctx->lock); 2865 if (ctx->task == TASK_TOMBSTONE) { 2866 raw_spin_unlock_irq(&ctx->lock); 2867 return; 2868 } 2869 add_event_to_ctx(event, ctx); 2870 raw_spin_unlock_irq(&ctx->lock); 2871 return; 2872 } 2873 2874 if (!task) { 2875 cpu_function_call(cpu, __perf_install_in_context, event); 2876 return; 2877 } 2878 2879 /* 2880 * Should not happen, we validate the ctx is still alive before calling. 2881 */ 2882 if (WARN_ON_ONCE(task == TASK_TOMBSTONE)) 2883 return; 2884 2885 /* 2886 * Installing events is tricky because we cannot rely on ctx->is_active 2887 * to be set in case this is the nr_events 0 -> 1 transition. 2888 * 2889 * Instead we use task_curr(), which tells us if the task is running. 2890 * However, since we use task_curr() outside of rq::lock, we can race 2891 * against the actual state. This means the result can be wrong. 2892 * 2893 * If we get a false positive, we retry, this is harmless. 2894 * 2895 * If we get a false negative, things are complicated. If we are after 2896 * perf_event_context_sched_in() ctx::lock will serialize us, and the 2897 * value must be correct. If we're before, it doesn't matter since 2898 * perf_event_context_sched_in() will program the counter. 2899 * 2900 * However, this hinges on the remote context switch having observed 2901 * our task->perf_event_ctxp[] store, such that it will in fact take 2902 * ctx::lock in perf_event_context_sched_in(). 2903 * 2904 * We do this by task_function_call(), if the IPI fails to hit the task 2905 * we know any future context switch of task must see the 2906 * perf_event_ctpx[] store. 2907 */ 2908 2909 /* 2910 * This smp_mb() orders the task->perf_event_ctxp[] store with the 2911 * task_cpu() load, such that if the IPI then does not find the task 2912 * running, a future context switch of that task must observe the 2913 * store. 2914 */ 2915 smp_mb(); 2916 again: 2917 if (!task_function_call(task, __perf_install_in_context, event)) 2918 return; 2919 2920 raw_spin_lock_irq(&ctx->lock); 2921 task = ctx->task; 2922 if (WARN_ON_ONCE(task == TASK_TOMBSTONE)) { 2923 /* 2924 * Cannot happen because we already checked above (which also 2925 * cannot happen), and we hold ctx->mutex, which serializes us 2926 * against perf_event_exit_task_context(). 2927 */ 2928 raw_spin_unlock_irq(&ctx->lock); 2929 return; 2930 } 2931 /* 2932 * If the task is not running, ctx->lock will avoid it becoming so, 2933 * thus we can safely install the event. 2934 */ 2935 if (task_curr(task)) { 2936 raw_spin_unlock_irq(&ctx->lock); 2937 goto again; 2938 } 2939 add_event_to_ctx(event, ctx); 2940 raw_spin_unlock_irq(&ctx->lock); 2941 } 2942 2943 /* 2944 * Cross CPU call to enable a performance event 2945 */ 2946 static void __perf_event_enable(struct perf_event *event, 2947 struct perf_cpu_context *cpuctx, 2948 struct perf_event_context *ctx, 2949 void *info) 2950 { 2951 struct perf_event *leader = event->group_leader; 2952 struct perf_event_context *task_ctx; 2953 2954 if (event->state >= PERF_EVENT_STATE_INACTIVE || 2955 event->state <= PERF_EVENT_STATE_ERROR) 2956 return; 2957 2958 if (ctx->is_active) 2959 ctx_sched_out(ctx, cpuctx, EVENT_TIME); 2960 2961 perf_event_set_state(event, PERF_EVENT_STATE_INACTIVE); 2962 perf_cgroup_event_enable(event, ctx); 2963 2964 if (!ctx->is_active) 2965 return; 2966 2967 if (!event_filter_match(event)) { 2968 ctx_sched_in(ctx, cpuctx, EVENT_TIME, current); 2969 return; 2970 } 2971 2972 /* 2973 * If the event is in a group and isn't the group leader, 2974 * then don't put it on unless the group is on. 2975 */ 2976 if (leader != event && leader->state != PERF_EVENT_STATE_ACTIVE) { 2977 ctx_sched_in(ctx, cpuctx, EVENT_TIME, current); 2978 return; 2979 } 2980 2981 task_ctx = cpuctx->task_ctx; 2982 if (ctx->task) 2983 WARN_ON_ONCE(task_ctx != ctx); 2984 2985 ctx_resched(cpuctx, task_ctx, get_event_type(event)); 2986 } 2987 2988 /* 2989 * Enable an event. 2990 * 2991 * If event->ctx is a cloned context, callers must make sure that 2992 * every task struct that event->ctx->task could possibly point to 2993 * remains valid. This condition is satisfied when called through 2994 * perf_event_for_each_child or perf_event_for_each as described 2995 * for perf_event_disable. 2996 */ 2997 static void _perf_event_enable(struct perf_event *event) 2998 { 2999 struct perf_event_context *ctx = event->ctx; 3000 3001 raw_spin_lock_irq(&ctx->lock); 3002 if (event->state >= PERF_EVENT_STATE_INACTIVE || 3003 event->state < PERF_EVENT_STATE_ERROR) { 3004 out: 3005 raw_spin_unlock_irq(&ctx->lock); 3006 return; 3007 } 3008 3009 /* 3010 * If the event is in error state, clear that first. 3011 * 3012 * That way, if we see the event in error state below, we know that it 3013 * has gone back into error state, as distinct from the task having 3014 * been scheduled away before the cross-call arrived. 3015 */ 3016 if (event->state == PERF_EVENT_STATE_ERROR) { 3017 /* 3018 * Detached SIBLING events cannot leave ERROR state. 3019 */ 3020 if (event->event_caps & PERF_EV_CAP_SIBLING && 3021 event->group_leader == event) 3022 goto out; 3023 3024 event->state = PERF_EVENT_STATE_OFF; 3025 } 3026 raw_spin_unlock_irq(&ctx->lock); 3027 3028 event_function_call(event, __perf_event_enable, NULL); 3029 } 3030 3031 /* 3032 * See perf_event_disable(); 3033 */ 3034 void perf_event_enable(struct perf_event *event) 3035 { 3036 struct perf_event_context *ctx; 3037 3038 ctx = perf_event_ctx_lock(event); 3039 _perf_event_enable(event); 3040 perf_event_ctx_unlock(event, ctx); 3041 } 3042 EXPORT_SYMBOL_GPL(perf_event_enable); 3043 3044 struct stop_event_data { 3045 struct perf_event *event; 3046 unsigned int restart; 3047 }; 3048 3049 static int __perf_event_stop(void *info) 3050 { 3051 struct stop_event_data *sd = info; 3052 struct perf_event *event = sd->event; 3053 3054 /* if it's already INACTIVE, do nothing */ 3055 if (READ_ONCE(event->state) != PERF_EVENT_STATE_ACTIVE) 3056 return 0; 3057 3058 /* matches smp_wmb() in event_sched_in() */ 3059 smp_rmb(); 3060 3061 /* 3062 * There is a window with interrupts enabled before we get here, 3063 * so we need to check again lest we try to stop another CPU's event. 3064 */ 3065 if (READ_ONCE(event->oncpu) != smp_processor_id()) 3066 return -EAGAIN; 3067 3068 event->pmu->stop(event, PERF_EF_UPDATE); 3069 3070 /* 3071 * May race with the actual stop (through perf_pmu_output_stop()), 3072 * but it is only used for events with AUX ring buffer, and such 3073 * events will refuse to restart because of rb::aux_mmap_count==0, 3074 * see comments in perf_aux_output_begin(). 3075 * 3076 * Since this is happening on an event-local CPU, no trace is lost 3077 * while restarting. 3078 */ 3079 if (sd->restart) 3080 event->pmu->start(event, 0); 3081 3082 return 0; 3083 } 3084 3085 static int perf_event_stop(struct perf_event *event, int restart) 3086 { 3087 struct stop_event_data sd = { 3088 .event = event, 3089 .restart = restart, 3090 }; 3091 int ret = 0; 3092 3093 do { 3094 if (READ_ONCE(event->state) != PERF_EVENT_STATE_ACTIVE) 3095 return 0; 3096 3097 /* matches smp_wmb() in event_sched_in() */ 3098 smp_rmb(); 3099 3100 /* 3101 * We only want to restart ACTIVE events, so if the event goes 3102 * inactive here (event->oncpu==-1), there's nothing more to do; 3103 * fall through with ret==-ENXIO. 3104 */ 3105 ret = cpu_function_call(READ_ONCE(event->oncpu), 3106 __perf_event_stop, &sd); 3107 } while (ret == -EAGAIN); 3108 3109 return ret; 3110 } 3111 3112 /* 3113 * In order to contain the amount of racy and tricky in the address filter 3114 * configuration management, it is a two part process: 3115 * 3116 * (p1) when userspace mappings change as a result of (1) or (2) or (3) below, 3117 * we update the addresses of corresponding vmas in 3118 * event::addr_filter_ranges array and bump the event::addr_filters_gen; 3119 * (p2) when an event is scheduled in (pmu::add), it calls 3120 * perf_event_addr_filters_sync() which calls pmu::addr_filters_sync() 3121 * if the generation has changed since the previous call. 3122 * 3123 * If (p1) happens while the event is active, we restart it to force (p2). 3124 * 3125 * (1) perf_addr_filters_apply(): adjusting filters' offsets based on 3126 * pre-existing mappings, called once when new filters arrive via SET_FILTER 3127 * ioctl; 3128 * (2) perf_addr_filters_adjust(): adjusting filters' offsets based on newly 3129 * registered mapping, called for every new mmap(), with mm::mmap_lock down 3130 * for reading; 3131 * (3) perf_event_addr_filters_exec(): clearing filters' offsets in the process 3132 * of exec. 3133 */ 3134 void perf_event_addr_filters_sync(struct perf_event *event) 3135 { 3136 struct perf_addr_filters_head *ifh = perf_event_addr_filters(event); 3137 3138 if (!has_addr_filter(event)) 3139 return; 3140 3141 raw_spin_lock(&ifh->lock); 3142 if (event->addr_filters_gen != event->hw.addr_filters_gen) { 3143 event->pmu->addr_filters_sync(event); 3144 event->hw.addr_filters_gen = event->addr_filters_gen; 3145 } 3146 raw_spin_unlock(&ifh->lock); 3147 } 3148 EXPORT_SYMBOL_GPL(perf_event_addr_filters_sync); 3149 3150 static int _perf_event_refresh(struct perf_event *event, int refresh) 3151 { 3152 /* 3153 * not supported on inherited events 3154 */ 3155 if (event->attr.inherit || !is_sampling_event(event)) 3156 return -EINVAL; 3157 3158 atomic_add(refresh, &event->event_limit); 3159 _perf_event_enable(event); 3160 3161 return 0; 3162 } 3163 3164 /* 3165 * See perf_event_disable() 3166 */ 3167 int perf_event_refresh(struct perf_event *event, int refresh) 3168 { 3169 struct perf_event_context *ctx; 3170 int ret; 3171 3172 ctx = perf_event_ctx_lock(event); 3173 ret = _perf_event_refresh(event, refresh); 3174 perf_event_ctx_unlock(event, ctx); 3175 3176 return ret; 3177 } 3178 EXPORT_SYMBOL_GPL(perf_event_refresh); 3179 3180 static int perf_event_modify_breakpoint(struct perf_event *bp, 3181 struct perf_event_attr *attr) 3182 { 3183 int err; 3184 3185 _perf_event_disable(bp); 3186 3187 err = modify_user_hw_breakpoint_check(bp, attr, true); 3188 3189 if (!bp->attr.disabled) 3190 _perf_event_enable(bp); 3191 3192 return err; 3193 } 3194 3195 static int perf_event_modify_attr(struct perf_event *event, 3196 struct perf_event_attr *attr) 3197 { 3198 int (*func)(struct perf_event *, struct perf_event_attr *); 3199 struct perf_event *child; 3200 int err; 3201 3202 if (event->attr.type != attr->type) 3203 return -EINVAL; 3204 3205 switch (event->attr.type) { 3206 case PERF_TYPE_BREAKPOINT: 3207 func = perf_event_modify_breakpoint; 3208 break; 3209 default: 3210 /* Place holder for future additions. */ 3211 return -EOPNOTSUPP; 3212 } 3213 3214 WARN_ON_ONCE(event->ctx->parent_ctx); 3215 3216 mutex_lock(&event->child_mutex); 3217 err = func(event, attr); 3218 if (err) 3219 goto out; 3220 list_for_each_entry(child, &event->child_list, child_list) { 3221 err = func(child, attr); 3222 if (err) 3223 goto out; 3224 } 3225 out: 3226 mutex_unlock(&event->child_mutex); 3227 return err; 3228 } 3229 3230 static void ctx_sched_out(struct perf_event_context *ctx, 3231 struct perf_cpu_context *cpuctx, 3232 enum event_type_t event_type) 3233 { 3234 struct perf_event *event, *tmp; 3235 int is_active = ctx->is_active; 3236 3237 lockdep_assert_held(&ctx->lock); 3238 3239 if (likely(!ctx->nr_events)) { 3240 /* 3241 * See __perf_remove_from_context(). 3242 */ 3243 WARN_ON_ONCE(ctx->is_active); 3244 if (ctx->task) 3245 WARN_ON_ONCE(cpuctx->task_ctx); 3246 return; 3247 } 3248 3249 ctx->is_active &= ~event_type; 3250 if (!(ctx->is_active & EVENT_ALL)) 3251 ctx->is_active = 0; 3252 3253 if (ctx->task) { 3254 WARN_ON_ONCE(cpuctx->task_ctx != ctx); 3255 if (!ctx->is_active) 3256 cpuctx->task_ctx = NULL; 3257 } 3258 3259 /* 3260 * Always update time if it was set; not only when it changes. 3261 * Otherwise we can 'forget' to update time for any but the last 3262 * context we sched out. For example: 3263 * 3264 * ctx_sched_out(.event_type = EVENT_FLEXIBLE) 3265 * ctx_sched_out(.event_type = EVENT_PINNED) 3266 * 3267 * would only update time for the pinned events. 3268 */ 3269 if (is_active & EVENT_TIME) { 3270 /* update (and stop) ctx time */ 3271 update_context_time(ctx); 3272 update_cgrp_time_from_cpuctx(cpuctx); 3273 } 3274 3275 is_active ^= ctx->is_active; /* changed bits */ 3276 3277 if (!ctx->nr_active || !(is_active & EVENT_ALL)) 3278 return; 3279 3280 perf_pmu_disable(ctx->pmu); 3281 if (is_active & EVENT_PINNED) { 3282 list_for_each_entry_safe(event, tmp, &ctx->pinned_active, active_list) 3283 group_sched_out(event, cpuctx, ctx); 3284 } 3285 3286 if (is_active & EVENT_FLEXIBLE) { 3287 list_for_each_entry_safe(event, tmp, &ctx->flexible_active, active_list) 3288 group_sched_out(event, cpuctx, ctx); 3289 3290 /* 3291 * Since we cleared EVENT_FLEXIBLE, also clear 3292 * rotate_necessary, is will be reset by 3293 * ctx_flexible_sched_in() when needed. 3294 */ 3295 ctx->rotate_necessary = 0; 3296 } 3297 perf_pmu_enable(ctx->pmu); 3298 } 3299 3300 /* 3301 * Test whether two contexts are equivalent, i.e. whether they have both been 3302 * cloned from the same version of the same context. 3303 * 3304 * Equivalence is measured using a generation number in the context that is 3305 * incremented on each modification to it; see unclone_ctx(), list_add_event() 3306 * and list_del_event(). 3307 */ 3308 static int context_equiv(struct perf_event_context *ctx1, 3309 struct perf_event_context *ctx2) 3310 { 3311 lockdep_assert_held(&ctx1->lock); 3312 lockdep_assert_held(&ctx2->lock); 3313 3314 /* Pinning disables the swap optimization */ 3315 if (ctx1->pin_count || ctx2->pin_count) 3316 return 0; 3317 3318 /* If ctx1 is the parent of ctx2 */ 3319 if (ctx1 == ctx2->parent_ctx && ctx1->generation == ctx2->parent_gen) 3320 return 1; 3321 3322 /* If ctx2 is the parent of ctx1 */ 3323 if (ctx1->parent_ctx == ctx2 && ctx1->parent_gen == ctx2->generation) 3324 return 1; 3325 3326 /* 3327 * If ctx1 and ctx2 have the same parent; we flatten the parent 3328 * hierarchy, see perf_event_init_context(). 3329 */ 3330 if (ctx1->parent_ctx && ctx1->parent_ctx == ctx2->parent_ctx && 3331 ctx1->parent_gen == ctx2->parent_gen) 3332 return 1; 3333 3334 /* Unmatched */ 3335 return 0; 3336 } 3337 3338 static void __perf_event_sync_stat(struct perf_event *event, 3339 struct perf_event *next_event) 3340 { 3341 u64 value; 3342 3343 if (!event->attr.inherit_stat) 3344 return; 3345 3346 /* 3347 * Update the event value, we cannot use perf_event_read() 3348 * because we're in the middle of a context switch and have IRQs 3349 * disabled, which upsets smp_call_function_single(), however 3350 * we know the event must be on the current CPU, therefore we 3351 * don't need to use it. 3352 */ 3353 if (event->state == PERF_EVENT_STATE_ACTIVE) 3354 event->pmu->read(event); 3355 3356 perf_event_update_time(event); 3357 3358 /* 3359 * In order to keep per-task stats reliable we need to flip the event 3360 * values when we flip the contexts. 3361 */ 3362 value = local64_read(&next_event->count); 3363 value = local64_xchg(&event->count, value); 3364 local64_set(&next_event->count, value); 3365 3366 swap(event->total_time_enabled, next_event->total_time_enabled); 3367 swap(event->total_time_running, next_event->total_time_running); 3368 3369 /* 3370 * Since we swizzled the values, update the user visible data too. 3371 */ 3372 perf_event_update_userpage(event); 3373 perf_event_update_userpage(next_event); 3374 } 3375 3376 static void perf_event_sync_stat(struct perf_event_context *ctx, 3377 struct perf_event_context *next_ctx) 3378 { 3379 struct perf_event *event, *next_event; 3380 3381 if (!ctx->nr_stat) 3382 return; 3383 3384 update_context_time(ctx); 3385 3386 event = list_first_entry(&ctx->event_list, 3387 struct perf_event, event_entry); 3388 3389 next_event = list_first_entry(&next_ctx->event_list, 3390 struct perf_event, event_entry); 3391 3392 while (&event->event_entry != &ctx->event_list && 3393 &next_event->event_entry != &next_ctx->event_list) { 3394 3395 __perf_event_sync_stat(event, next_event); 3396 3397 event = list_next_entry(event, event_entry); 3398 next_event = list_next_entry(next_event, event_entry); 3399 } 3400 } 3401 3402 static void perf_event_context_sched_out(struct task_struct *task, int ctxn, 3403 struct task_struct *next) 3404 { 3405 struct perf_event_context *ctx = task->perf_event_ctxp[ctxn]; 3406 struct perf_event_context *next_ctx; 3407 struct perf_event_context *parent, *next_parent; 3408 struct perf_cpu_context *cpuctx; 3409 int do_switch = 1; 3410 struct pmu *pmu; 3411 3412 if (likely(!ctx)) 3413 return; 3414 3415 pmu = ctx->pmu; 3416 cpuctx = __get_cpu_context(ctx); 3417 if (!cpuctx->task_ctx) 3418 return; 3419 3420 rcu_read_lock(); 3421 next_ctx = next->perf_event_ctxp[ctxn]; 3422 if (!next_ctx) 3423 goto unlock; 3424 3425 parent = rcu_dereference(ctx->parent_ctx); 3426 next_parent = rcu_dereference(next_ctx->parent_ctx); 3427 3428 /* If neither context have a parent context; they cannot be clones. */ 3429 if (!parent && !next_parent) 3430 goto unlock; 3431 3432 if (next_parent == ctx || next_ctx == parent || next_parent == parent) { 3433 /* 3434 * Looks like the two contexts are clones, so we might be 3435 * able to optimize the context switch. We lock both 3436 * contexts and check that they are clones under the 3437 * lock (including re-checking that neither has been 3438 * uncloned in the meantime). It doesn't matter which 3439 * order we take the locks because no other cpu could 3440 * be trying to lock both of these tasks. 3441 */ 3442 raw_spin_lock(&ctx->lock); 3443 raw_spin_lock_nested(&next_ctx->lock, SINGLE_DEPTH_NESTING); 3444 if (context_equiv(ctx, next_ctx)) { 3445 3446 WRITE_ONCE(ctx->task, next); 3447 WRITE_ONCE(next_ctx->task, task); 3448 3449 perf_pmu_disable(pmu); 3450 3451 if (cpuctx->sched_cb_usage && pmu->sched_task) 3452 pmu->sched_task(ctx, false); 3453 3454 /* 3455 * PMU specific parts of task perf context can require 3456 * additional synchronization. As an example of such 3457 * synchronization see implementation details of Intel 3458 * LBR call stack data profiling; 3459 */ 3460 if (pmu->swap_task_ctx) 3461 pmu->swap_task_ctx(ctx, next_ctx); 3462 else 3463 swap(ctx->task_ctx_data, next_ctx->task_ctx_data); 3464 3465 perf_pmu_enable(pmu); 3466 3467 /* 3468 * RCU_INIT_POINTER here is safe because we've not 3469 * modified the ctx and the above modification of 3470 * ctx->task and ctx->task_ctx_data are immaterial 3471 * since those values are always verified under 3472 * ctx->lock which we're now holding. 3473 */ 3474 RCU_INIT_POINTER(task->perf_event_ctxp[ctxn], next_ctx); 3475 RCU_INIT_POINTER(next->perf_event_ctxp[ctxn], ctx); 3476 3477 do_switch = 0; 3478 3479 perf_event_sync_stat(ctx, next_ctx); 3480 } 3481 raw_spin_unlock(&next_ctx->lock); 3482 raw_spin_unlock(&ctx->lock); 3483 } 3484 unlock: 3485 rcu_read_unlock(); 3486 3487 if (do_switch) { 3488 raw_spin_lock(&ctx->lock); 3489 perf_pmu_disable(pmu); 3490 3491 if (cpuctx->sched_cb_usage && pmu->sched_task) 3492 pmu->sched_task(ctx, false); 3493 task_ctx_sched_out(cpuctx, ctx, EVENT_ALL); 3494 3495 perf_pmu_enable(pmu); 3496 raw_spin_unlock(&ctx->lock); 3497 } 3498 } 3499 3500 static DEFINE_PER_CPU(struct list_head, sched_cb_list); 3501 3502 void perf_sched_cb_dec(struct pmu *pmu) 3503 { 3504 struct perf_cpu_context *cpuctx = this_cpu_ptr(pmu->pmu_cpu_context); 3505 3506 this_cpu_dec(perf_sched_cb_usages); 3507 3508 if (!--cpuctx->sched_cb_usage) 3509 list_del(&cpuctx->sched_cb_entry); 3510 } 3511 3512 3513 void perf_sched_cb_inc(struct pmu *pmu) 3514 { 3515 struct perf_cpu_context *cpuctx = this_cpu_ptr(pmu->pmu_cpu_context); 3516 3517 if (!cpuctx->sched_cb_usage++) 3518 list_add(&cpuctx->sched_cb_entry, this_cpu_ptr(&sched_cb_list)); 3519 3520 this_cpu_inc(perf_sched_cb_usages); 3521 } 3522 3523 /* 3524 * This function provides the context switch callback to the lower code 3525 * layer. It is invoked ONLY when the context switch callback is enabled. 3526 * 3527 * This callback is relevant even to per-cpu events; for example multi event 3528 * PEBS requires this to provide PID/TID information. This requires we flush 3529 * all queued PEBS records before we context switch to a new task. 3530 */ 3531 static void __perf_pmu_sched_task(struct perf_cpu_context *cpuctx, bool sched_in) 3532 { 3533 struct pmu *pmu; 3534 3535 pmu = cpuctx->ctx.pmu; /* software PMUs will not have sched_task */ 3536 3537 if (WARN_ON_ONCE(!pmu->sched_task)) 3538 return; 3539 3540 perf_ctx_lock(cpuctx, cpuctx->task_ctx); 3541 perf_pmu_disable(pmu); 3542 3543 pmu->sched_task(cpuctx->task_ctx, sched_in); 3544 3545 perf_pmu_enable(pmu); 3546 perf_ctx_unlock(cpuctx, cpuctx->task_ctx); 3547 } 3548 3549 static void perf_pmu_sched_task(struct task_struct *prev, 3550 struct task_struct *next, 3551 bool sched_in) 3552 { 3553 struct perf_cpu_context *cpuctx; 3554 3555 if (prev == next) 3556 return; 3557 3558 list_for_each_entry(cpuctx, this_cpu_ptr(&sched_cb_list), sched_cb_entry) { 3559 /* will be handled in perf_event_context_sched_in/out */ 3560 if (cpuctx->task_ctx) 3561 continue; 3562 3563 __perf_pmu_sched_task(cpuctx, sched_in); 3564 } 3565 } 3566 3567 static void perf_event_switch(struct task_struct *task, 3568 struct task_struct *next_prev, bool sched_in); 3569 3570 #define for_each_task_context_nr(ctxn) \ 3571 for ((ctxn) = 0; (ctxn) < perf_nr_task_contexts; (ctxn)++) 3572 3573 /* 3574 * Called from scheduler to remove the events of the current task, 3575 * with interrupts disabled. 3576 * 3577 * We stop each event and update the event value in event->count. 3578 * 3579 * This does not protect us against NMI, but disable() 3580 * sets the disabled bit in the control field of event _before_ 3581 * accessing the event control register. If a NMI hits, then it will 3582 * not restart the event. 3583 */ 3584 void __perf_event_task_sched_out(struct task_struct *task, 3585 struct task_struct *next) 3586 { 3587 int ctxn; 3588 3589 if (__this_cpu_read(perf_sched_cb_usages)) 3590 perf_pmu_sched_task(task, next, false); 3591 3592 if (atomic_read(&nr_switch_events)) 3593 perf_event_switch(task, next, false); 3594 3595 for_each_task_context_nr(ctxn) 3596 perf_event_context_sched_out(task, ctxn, next); 3597 3598 /* 3599 * if cgroup events exist on this CPU, then we need 3600 * to check if we have to switch out PMU state. 3601 * cgroup event are system-wide mode only 3602 */ 3603 if (atomic_read(this_cpu_ptr(&perf_cgroup_events))) 3604 perf_cgroup_sched_out(task, next); 3605 } 3606 3607 /* 3608 * Called with IRQs disabled 3609 */ 3610 static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx, 3611 enum event_type_t event_type) 3612 { 3613 ctx_sched_out(&cpuctx->ctx, cpuctx, event_type); 3614 } 3615 3616 static bool perf_less_group_idx(const void *l, const void *r) 3617 { 3618 const struct perf_event *le = *(const struct perf_event **)l; 3619 const struct perf_event *re = *(const struct perf_event **)r; 3620 3621 return le->group_index < re->group_index; 3622 } 3623 3624 static void swap_ptr(void *l, void *r) 3625 { 3626 void **lp = l, **rp = r; 3627 3628 swap(*lp, *rp); 3629 } 3630 3631 static const struct min_heap_callbacks perf_min_heap = { 3632 .elem_size = sizeof(struct perf_event *), 3633 .less = perf_less_group_idx, 3634 .swp = swap_ptr, 3635 }; 3636 3637 static void __heap_add(struct min_heap *heap, struct perf_event *event) 3638 { 3639 struct perf_event **itrs = heap->data; 3640 3641 if (event) { 3642 itrs[heap->nr] = event; 3643 heap->nr++; 3644 } 3645 } 3646 3647 static noinline int visit_groups_merge(struct perf_cpu_context *cpuctx, 3648 struct perf_event_groups *groups, int cpu, 3649 int (*func)(struct perf_event *, void *), 3650 void *data) 3651 { 3652 #ifdef CONFIG_CGROUP_PERF 3653 struct cgroup_subsys_state *css = NULL; 3654 #endif 3655 /* Space for per CPU and/or any CPU event iterators. */ 3656 struct perf_event *itrs[2]; 3657 struct min_heap event_heap; 3658 struct perf_event **evt; 3659 int ret; 3660 3661 if (cpuctx) { 3662 event_heap = (struct min_heap){ 3663 .data = cpuctx->heap, 3664 .nr = 0, 3665 .size = cpuctx->heap_size, 3666 }; 3667 3668 lockdep_assert_held(&cpuctx->ctx.lock); 3669 3670 #ifdef CONFIG_CGROUP_PERF 3671 if (cpuctx->cgrp) 3672 css = &cpuctx->cgrp->css; 3673 #endif 3674 } else { 3675 event_heap = (struct min_heap){ 3676 .data = itrs, 3677 .nr = 0, 3678 .size = ARRAY_SIZE(itrs), 3679 }; 3680 /* Events not within a CPU context may be on any CPU. */ 3681 __heap_add(&event_heap, perf_event_groups_first(groups, -1, NULL)); 3682 } 3683 evt = event_heap.data; 3684 3685 __heap_add(&event_heap, perf_event_groups_first(groups, cpu, NULL)); 3686 3687 #ifdef CONFIG_CGROUP_PERF 3688 for (; css; css = css->parent) 3689 __heap_add(&event_heap, perf_event_groups_first(groups, cpu, css->cgroup)); 3690 #endif 3691 3692 min_heapify_all(&event_heap, &perf_min_heap); 3693 3694 while (event_heap.nr) { 3695 ret = func(*evt, data); 3696 if (ret) 3697 return ret; 3698 3699 *evt = perf_event_groups_next(*evt); 3700 if (*evt) 3701 min_heapify(&event_heap, 0, &perf_min_heap); 3702 else 3703 min_heap_pop(&event_heap, &perf_min_heap); 3704 } 3705 3706 return 0; 3707 } 3708 3709 static int merge_sched_in(struct perf_event *event, void *data) 3710 { 3711 struct perf_event_context *ctx = event->ctx; 3712 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx); 3713 int *can_add_hw = data; 3714 3715 if (event->state <= PERF_EVENT_STATE_OFF) 3716 return 0; 3717 3718 if (!event_filter_match(event)) 3719 return 0; 3720 3721 if (group_can_go_on(event, cpuctx, *can_add_hw)) { 3722 if (!group_sched_in(event, cpuctx, ctx)) 3723 list_add_tail(&event->active_list, get_event_list(event)); 3724 } 3725 3726 if (event->state == PERF_EVENT_STATE_INACTIVE) { 3727 if (event->attr.pinned) { 3728 perf_cgroup_event_disable(event, ctx); 3729 perf_event_set_state(event, PERF_EVENT_STATE_ERROR); 3730 } 3731 3732 *can_add_hw = 0; 3733 ctx->rotate_necessary = 1; 3734 perf_mux_hrtimer_restart(cpuctx); 3735 } 3736 3737 return 0; 3738 } 3739 3740 static void 3741 ctx_pinned_sched_in(struct perf_event_context *ctx, 3742 struct perf_cpu_context *cpuctx) 3743 { 3744 int can_add_hw = 1; 3745 3746 if (ctx != &cpuctx->ctx) 3747 cpuctx = NULL; 3748 3749 visit_groups_merge(cpuctx, &ctx->pinned_groups, 3750 smp_processor_id(), 3751 merge_sched_in, &can_add_hw); 3752 } 3753 3754 static void 3755 ctx_flexible_sched_in(struct perf_event_context *ctx, 3756 struct perf_cpu_context *cpuctx) 3757 { 3758 int can_add_hw = 1; 3759 3760 if (ctx != &cpuctx->ctx) 3761 cpuctx = NULL; 3762 3763 visit_groups_merge(cpuctx, &ctx->flexible_groups, 3764 smp_processor_id(), 3765 merge_sched_in, &can_add_hw); 3766 } 3767 3768 static void 3769 ctx_sched_in(struct perf_event_context *ctx, 3770 struct perf_cpu_context *cpuctx, 3771 enum event_type_t event_type, 3772 struct task_struct *task) 3773 { 3774 int is_active = ctx->is_active; 3775 u64 now; 3776 3777 lockdep_assert_held(&ctx->lock); 3778 3779 if (likely(!ctx->nr_events)) 3780 return; 3781 3782 ctx->is_active |= (event_type | EVENT_TIME); 3783 if (ctx->task) { 3784 if (!is_active) 3785 cpuctx->task_ctx = ctx; 3786 else 3787 WARN_ON_ONCE(cpuctx->task_ctx != ctx); 3788 } 3789 3790 is_active ^= ctx->is_active; /* changed bits */ 3791 3792 if (is_active & EVENT_TIME) { 3793 /* start ctx time */ 3794 now = perf_clock(); 3795 ctx->timestamp = now; 3796 perf_cgroup_set_timestamp(task, ctx); 3797 } 3798 3799 /* 3800 * First go through the list and put on any pinned groups 3801 * in order to give them the best chance of going on. 3802 */ 3803 if (is_active & EVENT_PINNED) 3804 ctx_pinned_sched_in(ctx, cpuctx); 3805 3806 /* Then walk through the lower prio flexible groups */ 3807 if (is_active & EVENT_FLEXIBLE) 3808 ctx_flexible_sched_in(ctx, cpuctx); 3809 } 3810 3811 static void cpu_ctx_sched_in(struct perf_cpu_context *cpuctx, 3812 enum event_type_t event_type, 3813 struct task_struct *task) 3814 { 3815 struct perf_event_context *ctx = &cpuctx->ctx; 3816 3817 ctx_sched_in(ctx, cpuctx, event_type, task); 3818 } 3819 3820 static void perf_event_context_sched_in(struct perf_event_context *ctx, 3821 struct task_struct *task) 3822 { 3823 struct perf_cpu_context *cpuctx; 3824 struct pmu *pmu = ctx->pmu; 3825 3826 cpuctx = __get_cpu_context(ctx); 3827 if (cpuctx->task_ctx == ctx) { 3828 if (cpuctx->sched_cb_usage) 3829 __perf_pmu_sched_task(cpuctx, true); 3830 return; 3831 } 3832 3833 perf_ctx_lock(cpuctx, ctx); 3834 /* 3835 * We must check ctx->nr_events while holding ctx->lock, such 3836 * that we serialize against perf_install_in_context(). 3837 */ 3838 if (!ctx->nr_events) 3839 goto unlock; 3840 3841 perf_pmu_disable(pmu); 3842 /* 3843 * We want to keep the following priority order: 3844 * cpu pinned (that don't need to move), task pinned, 3845 * cpu flexible, task flexible. 3846 * 3847 * However, if task's ctx is not carrying any pinned 3848 * events, no need to flip the cpuctx's events around. 3849 */ 3850 if (!RB_EMPTY_ROOT(&ctx->pinned_groups.tree)) 3851 cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE); 3852 perf_event_sched_in(cpuctx, ctx, task); 3853 3854 if (cpuctx->sched_cb_usage && pmu->sched_task) 3855 pmu->sched_task(cpuctx->task_ctx, true); 3856 3857 perf_pmu_enable(pmu); 3858 3859 unlock: 3860 perf_ctx_unlock(cpuctx, ctx); 3861 } 3862 3863 /* 3864 * Called from scheduler to add the events of the current task 3865 * with interrupts disabled. 3866 * 3867 * We restore the event value and then enable it. 3868 * 3869 * This does not protect us against NMI, but enable() 3870 * sets the enabled bit in the control field of event _before_ 3871 * accessing the event control register. If a NMI hits, then it will 3872 * keep the event running. 3873 */ 3874 void __perf_event_task_sched_in(struct task_struct *prev, 3875 struct task_struct *task) 3876 { 3877 struct perf_event_context *ctx; 3878 int ctxn; 3879 3880 /* 3881 * If cgroup events exist on this CPU, then we need to check if we have 3882 * to switch in PMU state; cgroup event are system-wide mode only. 3883 * 3884 * Since cgroup events are CPU events, we must schedule these in before 3885 * we schedule in the task events. 3886 */ 3887 if (atomic_read(this_cpu_ptr(&perf_cgroup_events))) 3888 perf_cgroup_sched_in(prev, task); 3889 3890 for_each_task_context_nr(ctxn) { 3891 ctx = task->perf_event_ctxp[ctxn]; 3892 if (likely(!ctx)) 3893 continue; 3894 3895 perf_event_context_sched_in(ctx, task); 3896 } 3897 3898 if (atomic_read(&nr_switch_events)) 3899 perf_event_switch(task, prev, true); 3900 3901 if (__this_cpu_read(perf_sched_cb_usages)) 3902 perf_pmu_sched_task(prev, task, true); 3903 } 3904 3905 static u64 perf_calculate_period(struct perf_event *event, u64 nsec, u64 count) 3906 { 3907 u64 frequency = event->attr.sample_freq; 3908 u64 sec = NSEC_PER_SEC; 3909 u64 divisor, dividend; 3910 3911 int count_fls, nsec_fls, frequency_fls, sec_fls; 3912 3913 count_fls = fls64(count); 3914 nsec_fls = fls64(nsec); 3915 frequency_fls = fls64(frequency); 3916 sec_fls = 30; 3917 3918 /* 3919 * We got @count in @nsec, with a target of sample_freq HZ 3920 * the target period becomes: 3921 * 3922 * @count * 10^9 3923 * period = ------------------- 3924 * @nsec * sample_freq 3925 * 3926 */ 3927 3928 /* 3929 * Reduce accuracy by one bit such that @a and @b converge 3930 * to a similar magnitude. 3931 */ 3932 #define REDUCE_FLS(a, b) \ 3933 do { \ 3934 if (a##_fls > b##_fls) { \ 3935 a >>= 1; \ 3936 a##_fls--; \ 3937 } else { \ 3938 b >>= 1; \ 3939 b##_fls--; \ 3940 } \ 3941 } while (0) 3942 3943 /* 3944 * Reduce accuracy until either term fits in a u64, then proceed with 3945 * the other, so that finally we can do a u64/u64 division. 3946 */ 3947 while (count_fls + sec_fls > 64 && nsec_fls + frequency_fls > 64) { 3948 REDUCE_FLS(nsec, frequency); 3949 REDUCE_FLS(sec, count); 3950 } 3951 3952 if (count_fls + sec_fls > 64) { 3953 divisor = nsec * frequency; 3954 3955 while (count_fls + sec_fls > 64) { 3956 REDUCE_FLS(count, sec); 3957 divisor >>= 1; 3958 } 3959 3960 dividend = count * sec; 3961 } else { 3962 dividend = count * sec; 3963 3964 while (nsec_fls + frequency_fls > 64) { 3965 REDUCE_FLS(nsec, frequency); 3966 dividend >>= 1; 3967 } 3968 3969 divisor = nsec * frequency; 3970 } 3971 3972 if (!divisor) 3973 return dividend; 3974 3975 return div64_u64(dividend, divisor); 3976 } 3977 3978 static DEFINE_PER_CPU(int, perf_throttled_count); 3979 static DEFINE_PER_CPU(u64, perf_throttled_seq); 3980 3981 static void perf_adjust_period(struct perf_event *event, u64 nsec, u64 count, bool disable) 3982 { 3983 struct hw_perf_event *hwc = &event->hw; 3984 s64 period, sample_period; 3985 s64 delta; 3986 3987 period = perf_calculate_period(event, nsec, count); 3988 3989 delta = (s64)(period - hwc->sample_period); 3990 delta = (delta + 7) / 8; /* low pass filter */ 3991 3992 sample_period = hwc->sample_period + delta; 3993 3994 if (!sample_period) 3995 sample_period = 1; 3996 3997 hwc->sample_period = sample_period; 3998 3999 if (local64_read(&hwc->period_left) > 8*sample_period) { 4000 if (disable) 4001 event->pmu->stop(event, PERF_EF_UPDATE); 4002 4003 local64_set(&hwc->period_left, 0); 4004 4005 if (disable) 4006 event->pmu->start(event, PERF_EF_RELOAD); 4007 } 4008 } 4009 4010 /* 4011 * combine freq adjustment with unthrottling to avoid two passes over the 4012 * events. At the same time, make sure, having freq events does not change 4013 * the rate of unthrottling as that would introduce bias. 4014 */ 4015 static void perf_adjust_freq_unthr_context(struct perf_event_context *ctx, 4016 int needs_unthr) 4017 { 4018 struct perf_event *event; 4019 struct hw_perf_event *hwc; 4020 u64 now, period = TICK_NSEC; 4021 s64 delta; 4022 4023 /* 4024 * only need to iterate over all events iff: 4025 * - context have events in frequency mode (needs freq adjust) 4026 * - there are events to unthrottle on this cpu 4027 */ 4028 if (!(ctx->nr_freq || needs_unthr)) 4029 return; 4030 4031 raw_spin_lock(&ctx->lock); 4032 perf_pmu_disable(ctx->pmu); 4033 4034 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) { 4035 if (event->state != PERF_EVENT_STATE_ACTIVE) 4036 continue; 4037 4038 if (!event_filter_match(event)) 4039 continue; 4040 4041 perf_pmu_disable(event->pmu); 4042 4043 hwc = &event->hw; 4044 4045 if (hwc->interrupts == MAX_INTERRUPTS) { 4046 hwc->interrupts = 0; 4047 perf_log_throttle(event, 1); 4048 event->pmu->start(event, 0); 4049 } 4050 4051 if (!event->attr.freq || !event->attr.sample_freq) 4052 goto next; 4053 4054 /* 4055 * stop the event and update event->count 4056 */ 4057 event->pmu->stop(event, PERF_EF_UPDATE); 4058 4059 now = local64_read(&event->count); 4060 delta = now - hwc->freq_count_stamp; 4061 hwc->freq_count_stamp = now; 4062 4063 /* 4064 * restart the event 4065 * reload only if value has changed 4066 * we have stopped the event so tell that 4067 * to perf_adjust_period() to avoid stopping it 4068 * twice. 4069 */ 4070 if (delta > 0) 4071 perf_adjust_period(event, period, delta, false); 4072 4073 event->pmu->start(event, delta > 0 ? PERF_EF_RELOAD : 0); 4074 next: 4075 perf_pmu_enable(event->pmu); 4076 } 4077 4078 perf_pmu_enable(ctx->pmu); 4079 raw_spin_unlock(&ctx->lock); 4080 } 4081 4082 /* 4083 * Move @event to the tail of the @ctx's elegible events. 4084 */ 4085 static void rotate_ctx(struct perf_event_context *ctx, struct perf_event *event) 4086 { 4087 /* 4088 * Rotate the first entry last of non-pinned groups. Rotation might be 4089 * disabled by the inheritance code. 4090 */ 4091 if (ctx->rotate_disable) 4092 return; 4093 4094 perf_event_groups_delete(&ctx->flexible_groups, event); 4095 perf_event_groups_insert(&ctx->flexible_groups, event); 4096 } 4097 4098 /* pick an event from the flexible_groups to rotate */ 4099 static inline struct perf_event * 4100 ctx_event_to_rotate(struct perf_event_context *ctx) 4101 { 4102 struct perf_event *event; 4103 4104 /* pick the first active flexible event */ 4105 event = list_first_entry_or_null(&ctx->flexible_active, 4106 struct perf_event, active_list); 4107 4108 /* if no active flexible event, pick the first event */ 4109 if (!event) { 4110 event = rb_entry_safe(rb_first(&ctx->flexible_groups.tree), 4111 typeof(*event), group_node); 4112 } 4113 4114 /* 4115 * Unconditionally clear rotate_necessary; if ctx_flexible_sched_in() 4116 * finds there are unschedulable events, it will set it again. 4117 */ 4118 ctx->rotate_necessary = 0; 4119 4120 return event; 4121 } 4122 4123 static bool perf_rotate_context(struct perf_cpu_context *cpuctx) 4124 { 4125 struct perf_event *cpu_event = NULL, *task_event = NULL; 4126 struct perf_event_context *task_ctx = NULL; 4127 int cpu_rotate, task_rotate; 4128 4129 /* 4130 * Since we run this from IRQ context, nobody can install new 4131 * events, thus the event count values are stable. 4132 */ 4133 4134 cpu_rotate = cpuctx->ctx.rotate_necessary; 4135 task_ctx = cpuctx->task_ctx; 4136 task_rotate = task_ctx ? task_ctx->rotate_necessary : 0; 4137 4138 if (!(cpu_rotate || task_rotate)) 4139 return false; 4140 4141 perf_ctx_lock(cpuctx, cpuctx->task_ctx); 4142 perf_pmu_disable(cpuctx->ctx.pmu); 4143 4144 if (task_rotate) 4145 task_event = ctx_event_to_rotate(task_ctx); 4146 if (cpu_rotate) 4147 cpu_event = ctx_event_to_rotate(&cpuctx->ctx); 4148 4149 /* 4150 * As per the order given at ctx_resched() first 'pop' task flexible 4151 * and then, if needed CPU flexible. 4152 */ 4153 if (task_event || (task_ctx && cpu_event)) 4154 ctx_sched_out(task_ctx, cpuctx, EVENT_FLEXIBLE); 4155 if (cpu_event) 4156 cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE); 4157 4158 if (task_event) 4159 rotate_ctx(task_ctx, task_event); 4160 if (cpu_event) 4161 rotate_ctx(&cpuctx->ctx, cpu_event); 4162 4163 perf_event_sched_in(cpuctx, task_ctx, current); 4164 4165 perf_pmu_enable(cpuctx->ctx.pmu); 4166 perf_ctx_unlock(cpuctx, cpuctx->task_ctx); 4167 4168 return true; 4169 } 4170 4171 void perf_event_task_tick(void) 4172 { 4173 struct list_head *head = this_cpu_ptr(&active_ctx_list); 4174 struct perf_event_context *ctx, *tmp; 4175 int throttled; 4176 4177 lockdep_assert_irqs_disabled(); 4178 4179 __this_cpu_inc(perf_throttled_seq); 4180 throttled = __this_cpu_xchg(perf_throttled_count, 0); 4181 tick_dep_clear_cpu(smp_processor_id(), TICK_DEP_BIT_PERF_EVENTS); 4182 4183 list_for_each_entry_safe(ctx, tmp, head, active_ctx_list) 4184 perf_adjust_freq_unthr_context(ctx, throttled); 4185 } 4186 4187 static int event_enable_on_exec(struct perf_event *event, 4188 struct perf_event_context *ctx) 4189 { 4190 if (!event->attr.enable_on_exec) 4191 return 0; 4192 4193 event->attr.enable_on_exec = 0; 4194 if (event->state >= PERF_EVENT_STATE_INACTIVE) 4195 return 0; 4196 4197 perf_event_set_state(event, PERF_EVENT_STATE_INACTIVE); 4198 4199 return 1; 4200 } 4201 4202 /* 4203 * Enable all of a task's events that have been marked enable-on-exec. 4204 * This expects task == current. 4205 */ 4206 static void perf_event_enable_on_exec(int ctxn) 4207 { 4208 struct perf_event_context *ctx, *clone_ctx = NULL; 4209 enum event_type_t event_type = 0; 4210 struct perf_cpu_context *cpuctx; 4211 struct perf_event *event; 4212 unsigned long flags; 4213 int enabled = 0; 4214 4215 local_irq_save(flags); 4216 ctx = current->perf_event_ctxp[ctxn]; 4217 if (!ctx || !ctx->nr_events) 4218 goto out; 4219 4220 cpuctx = __get_cpu_context(ctx); 4221 perf_ctx_lock(cpuctx, ctx); 4222 ctx_sched_out(ctx, cpuctx, EVENT_TIME); 4223 list_for_each_entry(event, &ctx->event_list, event_entry) { 4224 enabled |= event_enable_on_exec(event, ctx); 4225 event_type |= get_event_type(event); 4226 } 4227 4228 /* 4229 * Unclone and reschedule this context if we enabled any event. 4230 */ 4231 if (enabled) { 4232 clone_ctx = unclone_ctx(ctx); 4233 ctx_resched(cpuctx, ctx, event_type); 4234 } else { 4235 ctx_sched_in(ctx, cpuctx, EVENT_TIME, current); 4236 } 4237 perf_ctx_unlock(cpuctx, ctx); 4238 4239 out: 4240 local_irq_restore(flags); 4241 4242 if (clone_ctx) 4243 put_ctx(clone_ctx); 4244 } 4245 4246 static void perf_remove_from_owner(struct perf_event *event); 4247 static void perf_event_exit_event(struct perf_event *event, 4248 struct perf_event_context *ctx); 4249 4250 /* 4251 * Removes all events from the current task that have been marked 4252 * remove-on-exec, and feeds their values back to parent events. 4253 */ 4254 static void perf_event_remove_on_exec(int ctxn) 4255 { 4256 struct perf_event_context *ctx, *clone_ctx = NULL; 4257 struct perf_event *event, *next; 4258 LIST_HEAD(free_list); 4259 unsigned long flags; 4260 bool modified = false; 4261 4262 ctx = perf_pin_task_context(current, ctxn); 4263 if (!ctx) 4264 return; 4265 4266 mutex_lock(&ctx->mutex); 4267 4268 if (WARN_ON_ONCE(ctx->task != current)) 4269 goto unlock; 4270 4271 list_for_each_entry_safe(event, next, &ctx->event_list, event_entry) { 4272 if (!event->attr.remove_on_exec) 4273 continue; 4274 4275 if (!is_kernel_event(event)) 4276 perf_remove_from_owner(event); 4277 4278 modified = true; 4279 4280 perf_event_exit_event(event, ctx); 4281 } 4282 4283 raw_spin_lock_irqsave(&ctx->lock, flags); 4284 if (modified) 4285 clone_ctx = unclone_ctx(ctx); 4286 --ctx->pin_count; 4287 raw_spin_unlock_irqrestore(&ctx->lock, flags); 4288 4289 unlock: 4290 mutex_unlock(&ctx->mutex); 4291 4292 put_ctx(ctx); 4293 if (clone_ctx) 4294 put_ctx(clone_ctx); 4295 } 4296 4297 struct perf_read_data { 4298 struct perf_event *event; 4299 bool group; 4300 int ret; 4301 }; 4302 4303 static int __perf_event_read_cpu(struct perf_event *event, int event_cpu) 4304 { 4305 u16 local_pkg, event_pkg; 4306 4307 if (event->group_caps & PERF_EV_CAP_READ_ACTIVE_PKG) { 4308 int local_cpu = smp_processor_id(); 4309 4310 event_pkg = topology_physical_package_id(event_cpu); 4311 local_pkg = topology_physical_package_id(local_cpu); 4312 4313 if (event_pkg == local_pkg) 4314 return local_cpu; 4315 } 4316 4317 return event_cpu; 4318 } 4319 4320 /* 4321 * Cross CPU call to read the hardware event 4322 */ 4323 static void __perf_event_read(void *info) 4324 { 4325 struct perf_read_data *data = info; 4326 struct perf_event *sub, *event = data->event; 4327 struct perf_event_context *ctx = event->ctx; 4328 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx); 4329 struct pmu *pmu = event->pmu; 4330 4331 /* 4332 * If this is a task context, we need to check whether it is 4333 * the current task context of this cpu. If not it has been 4334 * scheduled out before the smp call arrived. In that case 4335 * event->count would have been updated to a recent sample 4336 * when the event was scheduled out. 4337 */ 4338 if (ctx->task && cpuctx->task_ctx != ctx) 4339 return; 4340 4341 raw_spin_lock(&ctx->lock); 4342 if (ctx->is_active & EVENT_TIME) { 4343 update_context_time(ctx); 4344 update_cgrp_time_from_event(event); 4345 } 4346 4347 perf_event_update_time(event); 4348 if (data->group) 4349 perf_event_update_sibling_time(event); 4350 4351 if (event->state != PERF_EVENT_STATE_ACTIVE) 4352 goto unlock; 4353 4354 if (!data->group) { 4355 pmu->read(event); 4356 data->ret = 0; 4357 goto unlock; 4358 } 4359 4360 pmu->start_txn(pmu, PERF_PMU_TXN_READ); 4361 4362 pmu->read(event); 4363 4364 for_each_sibling_event(sub, event) { 4365 if (sub->state == PERF_EVENT_STATE_ACTIVE) { 4366 /* 4367 * Use sibling's PMU rather than @event's since 4368 * sibling could be on different (eg: software) PMU. 4369 */ 4370 sub->pmu->read(sub); 4371 } 4372 } 4373 4374 data->ret = pmu->commit_txn(pmu); 4375 4376 unlock: 4377 raw_spin_unlock(&ctx->lock); 4378 } 4379 4380 static inline u64 perf_event_count(struct perf_event *event) 4381 { 4382 return local64_read(&event->count) + atomic64_read(&event->child_count); 4383 } 4384 4385 /* 4386 * NMI-safe method to read a local event, that is an event that 4387 * is: 4388 * - either for the current task, or for this CPU 4389 * - does not have inherit set, for inherited task events 4390 * will not be local and we cannot read them atomically 4391 * - must not have a pmu::count method 4392 */ 4393 int perf_event_read_local(struct perf_event *event, u64 *value, 4394 u64 *enabled, u64 *running) 4395 { 4396 unsigned long flags; 4397 int ret = 0; 4398 4399 /* 4400 * Disabling interrupts avoids all counter scheduling (context 4401 * switches, timer based rotation and IPIs). 4402 */ 4403 local_irq_save(flags); 4404 4405 /* 4406 * It must not be an event with inherit set, we cannot read 4407 * all child counters from atomic context. 4408 */ 4409 if (event->attr.inherit) { 4410 ret = -EOPNOTSUPP; 4411 goto out; 4412 } 4413 4414 /* If this is a per-task event, it must be for current */ 4415 if ((event->attach_state & PERF_ATTACH_TASK) && 4416 event->hw.target != current) { 4417 ret = -EINVAL; 4418 goto out; 4419 } 4420 4421 /* If this is a per-CPU event, it must be for this CPU */ 4422 if (!(event->attach_state & PERF_ATTACH_TASK) && 4423 event->cpu != smp_processor_id()) { 4424 ret = -EINVAL; 4425 goto out; 4426 } 4427 4428 /* If this is a pinned event it must be running on this CPU */ 4429 if (event->attr.pinned && event->oncpu != smp_processor_id()) { 4430 ret = -EBUSY; 4431 goto out; 4432 } 4433 4434 /* 4435 * If the event is currently on this CPU, its either a per-task event, 4436 * or local to this CPU. Furthermore it means its ACTIVE (otherwise 4437 * oncpu == -1). 4438 */ 4439 if (event->oncpu == smp_processor_id()) 4440 event->pmu->read(event); 4441 4442 *value = local64_read(&event->count); 4443 if (enabled || running) { 4444 u64 now = event->shadow_ctx_time + perf_clock(); 4445 u64 __enabled, __running; 4446 4447 __perf_update_times(event, now, &__enabled, &__running); 4448 if (enabled) 4449 *enabled = __enabled; 4450 if (running) 4451 *running = __running; 4452 } 4453 out: 4454 local_irq_restore(flags); 4455 4456 return ret; 4457 } 4458 4459 static int perf_event_read(struct perf_event *event, bool group) 4460 { 4461 enum perf_event_state state = READ_ONCE(event->state); 4462 int event_cpu, ret = 0; 4463 4464 /* 4465 * If event is enabled and currently active on a CPU, update the 4466 * value in the event structure: 4467 */ 4468 again: 4469 if (state == PERF_EVENT_STATE_ACTIVE) { 4470 struct perf_read_data data; 4471 4472 /* 4473 * Orders the ->state and ->oncpu loads such that if we see 4474 * ACTIVE we must also see the right ->oncpu. 4475 * 4476 * Matches the smp_wmb() from event_sched_in(). 4477 */ 4478 smp_rmb(); 4479 4480 event_cpu = READ_ONCE(event->oncpu); 4481 if ((unsigned)event_cpu >= nr_cpu_ids) 4482 return 0; 4483 4484 data = (struct perf_read_data){ 4485 .event = event, 4486 .group = group, 4487 .ret = 0, 4488 }; 4489 4490 preempt_disable(); 4491 event_cpu = __perf_event_read_cpu(event, event_cpu); 4492 4493 /* 4494 * Purposely ignore the smp_call_function_single() return 4495 * value. 4496 * 4497 * If event_cpu isn't a valid CPU it means the event got 4498 * scheduled out and that will have updated the event count. 4499 * 4500 * Therefore, either way, we'll have an up-to-date event count 4501 * after this. 4502 */ 4503 (void)smp_call_function_single(event_cpu, __perf_event_read, &data, 1); 4504 preempt_enable(); 4505 ret = data.ret; 4506 4507 } else if (state == PERF_EVENT_STATE_INACTIVE) { 4508 struct perf_event_context *ctx = event->ctx; 4509 unsigned long flags; 4510 4511 raw_spin_lock_irqsave(&ctx->lock, flags); 4512 state = event->state; 4513 if (state != PERF_EVENT_STATE_INACTIVE) { 4514 raw_spin_unlock_irqrestore(&ctx->lock, flags); 4515 goto again; 4516 } 4517 4518 /* 4519 * May read while context is not active (e.g., thread is 4520 * blocked), in that case we cannot update context time 4521 */ 4522 if (ctx->is_active & EVENT_TIME) { 4523 update_context_time(ctx); 4524 update_cgrp_time_from_event(event); 4525 } 4526 4527 perf_event_update_time(event); 4528 if (group) 4529 perf_event_update_sibling_time(event); 4530 raw_spin_unlock_irqrestore(&ctx->lock, flags); 4531 } 4532 4533 return ret; 4534 } 4535 4536 /* 4537 * Initialize the perf_event context in a task_struct: 4538 */ 4539 static void __perf_event_init_context(struct perf_event_context *ctx) 4540 { 4541 raw_spin_lock_init(&ctx->lock); 4542 mutex_init(&ctx->mutex); 4543 INIT_LIST_HEAD(&ctx->active_ctx_list); 4544 perf_event_groups_init(&ctx->pinned_groups); 4545 perf_event_groups_init(&ctx->flexible_groups); 4546 INIT_LIST_HEAD(&ctx->event_list); 4547 INIT_LIST_HEAD(&ctx->pinned_active); 4548 INIT_LIST_HEAD(&ctx->flexible_active); 4549 refcount_set(&ctx->refcount, 1); 4550 } 4551 4552 static struct perf_event_context * 4553 alloc_perf_context(struct pmu *pmu, struct task_struct *task) 4554 { 4555 struct perf_event_context *ctx; 4556 4557 ctx = kzalloc(sizeof(struct perf_event_context), GFP_KERNEL); 4558 if (!ctx) 4559 return NULL; 4560 4561 __perf_event_init_context(ctx); 4562 if (task) 4563 ctx->task = get_task_struct(task); 4564 ctx->pmu = pmu; 4565 4566 return ctx; 4567 } 4568 4569 static struct task_struct * 4570 find_lively_task_by_vpid(pid_t vpid) 4571 { 4572 struct task_struct *task; 4573 4574 rcu_read_lock(); 4575 if (!vpid) 4576 task = current; 4577 else 4578 task = find_task_by_vpid(vpid); 4579 if (task) 4580 get_task_struct(task); 4581 rcu_read_unlock(); 4582 4583 if (!task) 4584 return ERR_PTR(-ESRCH); 4585 4586 return task; 4587 } 4588 4589 /* 4590 * Returns a matching context with refcount and pincount. 4591 */ 4592 static struct perf_event_context * 4593 find_get_context(struct pmu *pmu, struct task_struct *task, 4594 struct perf_event *event) 4595 { 4596 struct perf_event_context *ctx, *clone_ctx = NULL; 4597 struct perf_cpu_context *cpuctx; 4598 void *task_ctx_data = NULL; 4599 unsigned long flags; 4600 int ctxn, err; 4601 int cpu = event->cpu; 4602 4603 if (!task) { 4604 /* Must be root to operate on a CPU event: */ 4605 err = perf_allow_cpu(&event->attr); 4606 if (err) 4607 return ERR_PTR(err); 4608 4609 cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu); 4610 ctx = &cpuctx->ctx; 4611 get_ctx(ctx); 4612 ++ctx->pin_count; 4613 4614 return ctx; 4615 } 4616 4617 err = -EINVAL; 4618 ctxn = pmu->task_ctx_nr; 4619 if (ctxn < 0) 4620 goto errout; 4621 4622 if (event->attach_state & PERF_ATTACH_TASK_DATA) { 4623 task_ctx_data = alloc_task_ctx_data(pmu); 4624 if (!task_ctx_data) { 4625 err = -ENOMEM; 4626 goto errout; 4627 } 4628 } 4629 4630 retry: 4631 ctx = perf_lock_task_context(task, ctxn, &flags); 4632 if (ctx) { 4633 clone_ctx = unclone_ctx(ctx); 4634 ++ctx->pin_count; 4635 4636 if (task_ctx_data && !ctx->task_ctx_data) { 4637 ctx->task_ctx_data = task_ctx_data; 4638 task_ctx_data = NULL; 4639 } 4640 raw_spin_unlock_irqrestore(&ctx->lock, flags); 4641 4642 if (clone_ctx) 4643 put_ctx(clone_ctx); 4644 } else { 4645 ctx = alloc_perf_context(pmu, task); 4646 err = -ENOMEM; 4647 if (!ctx) 4648 goto errout; 4649 4650 if (task_ctx_data) { 4651 ctx->task_ctx_data = task_ctx_data; 4652 task_ctx_data = NULL; 4653 } 4654 4655 err = 0; 4656 mutex_lock(&task->perf_event_mutex); 4657 /* 4658 * If it has already passed perf_event_exit_task(). 4659 * we must see PF_EXITING, it takes this mutex too. 4660 */ 4661 if (task->flags & PF_EXITING) 4662 err = -ESRCH; 4663 else if (task->perf_event_ctxp[ctxn]) 4664 err = -EAGAIN; 4665 else { 4666 get_ctx(ctx); 4667 ++ctx->pin_count; 4668 rcu_assign_pointer(task->perf_event_ctxp[ctxn], ctx); 4669 } 4670 mutex_unlock(&task->perf_event_mutex); 4671 4672 if (unlikely(err)) { 4673 put_ctx(ctx); 4674 4675 if (err == -EAGAIN) 4676 goto retry; 4677 goto errout; 4678 } 4679 } 4680 4681 free_task_ctx_data(pmu, task_ctx_data); 4682 return ctx; 4683 4684 errout: 4685 free_task_ctx_data(pmu, task_ctx_data); 4686 return ERR_PTR(err); 4687 } 4688 4689 static void perf_event_free_filter(struct perf_event *event); 4690 static void perf_event_free_bpf_prog(struct perf_event *event); 4691 4692 static void free_event_rcu(struct rcu_head *head) 4693 { 4694 struct perf_event *event; 4695 4696 event = container_of(head, struct perf_event, rcu_head); 4697 if (event->ns) 4698 put_pid_ns(event->ns); 4699 perf_event_free_filter(event); 4700 kmem_cache_free(perf_event_cache, event); 4701 } 4702 4703 static void ring_buffer_attach(struct perf_event *event, 4704 struct perf_buffer *rb); 4705 4706 static void detach_sb_event(struct perf_event *event) 4707 { 4708 struct pmu_event_list *pel = per_cpu_ptr(&pmu_sb_events, event->cpu); 4709 4710 raw_spin_lock(&pel->lock); 4711 list_del_rcu(&event->sb_list); 4712 raw_spin_unlock(&pel->lock); 4713 } 4714 4715 static bool is_sb_event(struct perf_event *event) 4716 { 4717 struct perf_event_attr *attr = &event->attr; 4718 4719 if (event->parent) 4720 return false; 4721 4722 if (event->attach_state & PERF_ATTACH_TASK) 4723 return false; 4724 4725 if (attr->mmap || attr->mmap_data || attr->mmap2 || 4726 attr->comm || attr->comm_exec || 4727 attr->task || attr->ksymbol || 4728 attr->context_switch || attr->text_poke || 4729 attr->bpf_event) 4730 return true; 4731 return false; 4732 } 4733 4734 static void unaccount_pmu_sb_event(struct perf_event *event) 4735 { 4736 if (is_sb_event(event)) 4737 detach_sb_event(event); 4738 } 4739 4740 static void unaccount_event_cpu(struct perf_event *event, int cpu) 4741 { 4742 if (event->parent) 4743 return; 4744 4745 if (is_cgroup_event(event)) 4746 atomic_dec(&per_cpu(perf_cgroup_events, cpu)); 4747 } 4748 4749 #ifdef CONFIG_NO_HZ_FULL 4750 static DEFINE_SPINLOCK(nr_freq_lock); 4751 #endif 4752 4753 static void unaccount_freq_event_nohz(void) 4754 { 4755 #ifdef CONFIG_NO_HZ_FULL 4756 spin_lock(&nr_freq_lock); 4757 if (atomic_dec_and_test(&nr_freq_events)) 4758 tick_nohz_dep_clear(TICK_DEP_BIT_PERF_EVENTS); 4759 spin_unlock(&nr_freq_lock); 4760 #endif 4761 } 4762 4763 static void unaccount_freq_event(void) 4764 { 4765 if (tick_nohz_full_enabled()) 4766 unaccount_freq_event_nohz(); 4767 else 4768 atomic_dec(&nr_freq_events); 4769 } 4770 4771 static void unaccount_event(struct perf_event *event) 4772 { 4773 bool dec = false; 4774 4775 if (event->parent) 4776 return; 4777 4778 if (event->attach_state & (PERF_ATTACH_TASK | PERF_ATTACH_SCHED_CB)) 4779 dec = true; 4780 if (event->attr.mmap || event->attr.mmap_data) 4781 atomic_dec(&nr_mmap_events); 4782 if (event->attr.build_id) 4783 atomic_dec(&nr_build_id_events); 4784 if (event->attr.comm) 4785 atomic_dec(&nr_comm_events); 4786 if (event->attr.namespaces) 4787 atomic_dec(&nr_namespaces_events); 4788 if (event->attr.cgroup) 4789 atomic_dec(&nr_cgroup_events); 4790 if (event->attr.task) 4791 atomic_dec(&nr_task_events); 4792 if (event->attr.freq) 4793 unaccount_freq_event(); 4794 if (event->attr.context_switch) { 4795 dec = true; 4796 atomic_dec(&nr_switch_events); 4797 } 4798 if (is_cgroup_event(event)) 4799 dec = true; 4800 if (has_branch_stack(event)) 4801 dec = true; 4802 if (event->attr.ksymbol) 4803 atomic_dec(&nr_ksymbol_events); 4804 if (event->attr.bpf_event) 4805 atomic_dec(&nr_bpf_events); 4806 if (event->attr.text_poke) 4807 atomic_dec(&nr_text_poke_events); 4808 4809 if (dec) { 4810 if (!atomic_add_unless(&perf_sched_count, -1, 1)) 4811 schedule_delayed_work(&perf_sched_work, HZ); 4812 } 4813 4814 unaccount_event_cpu(event, event->cpu); 4815 4816 unaccount_pmu_sb_event(event); 4817 } 4818 4819 static void perf_sched_delayed(struct work_struct *work) 4820 { 4821 mutex_lock(&perf_sched_mutex); 4822 if (atomic_dec_and_test(&perf_sched_count)) 4823 static_branch_disable(&perf_sched_events); 4824 mutex_unlock(&perf_sched_mutex); 4825 } 4826 4827 /* 4828 * The following implement mutual exclusion of events on "exclusive" pmus 4829 * (PERF_PMU_CAP_EXCLUSIVE). Such pmus can only have one event scheduled 4830 * at a time, so we disallow creating events that might conflict, namely: 4831 * 4832 * 1) cpu-wide events in the presence of per-task events, 4833 * 2) per-task events in the presence of cpu-wide events, 4834 * 3) two matching events on the same context. 4835 * 4836 * The former two cases are handled in the allocation path (perf_event_alloc(), 4837 * _free_event()), the latter -- before the first perf_install_in_context(). 4838 */ 4839 static int exclusive_event_init(struct perf_event *event) 4840 { 4841 struct pmu *pmu = event->pmu; 4842 4843 if (!is_exclusive_pmu(pmu)) 4844 return 0; 4845 4846 /* 4847 * Prevent co-existence of per-task and cpu-wide events on the 4848 * same exclusive pmu. 4849 * 4850 * Negative pmu::exclusive_cnt means there are cpu-wide 4851 * events on this "exclusive" pmu, positive means there are 4852 * per-task events. 4853 * 4854 * Since this is called in perf_event_alloc() path, event::ctx 4855 * doesn't exist yet; it is, however, safe to use PERF_ATTACH_TASK 4856 * to mean "per-task event", because unlike other attach states it 4857 * never gets cleared. 4858 */ 4859 if (event->attach_state & PERF_ATTACH_TASK) { 4860 if (!atomic_inc_unless_negative(&pmu->exclusive_cnt)) 4861 return -EBUSY; 4862 } else { 4863 if (!atomic_dec_unless_positive(&pmu->exclusive_cnt)) 4864 return -EBUSY; 4865 } 4866 4867 return 0; 4868 } 4869 4870 static void exclusive_event_destroy(struct perf_event *event) 4871 { 4872 struct pmu *pmu = event->pmu; 4873 4874 if (!is_exclusive_pmu(pmu)) 4875 return; 4876 4877 /* see comment in exclusive_event_init() */ 4878 if (event->attach_state & PERF_ATTACH_TASK) 4879 atomic_dec(&pmu->exclusive_cnt); 4880 else 4881 atomic_inc(&pmu->exclusive_cnt); 4882 } 4883 4884 static bool exclusive_event_match(struct perf_event *e1, struct perf_event *e2) 4885 { 4886 if ((e1->pmu == e2->pmu) && 4887 (e1->cpu == e2->cpu || 4888 e1->cpu == -1 || 4889 e2->cpu == -1)) 4890 return true; 4891 return false; 4892 } 4893 4894 static bool exclusive_event_installable(struct perf_event *event, 4895 struct perf_event_context *ctx) 4896 { 4897 struct perf_event *iter_event; 4898 struct pmu *pmu = event->pmu; 4899 4900 lockdep_assert_held(&ctx->mutex); 4901 4902 if (!is_exclusive_pmu(pmu)) 4903 return true; 4904 4905 list_for_each_entry(iter_event, &ctx->event_list, event_entry) { 4906 if (exclusive_event_match(iter_event, event)) 4907 return false; 4908 } 4909 4910 return true; 4911 } 4912 4913 static void perf_addr_filters_splice(struct perf_event *event, 4914 struct list_head *head); 4915 4916 static void _free_event(struct perf_event *event) 4917 { 4918 irq_work_sync(&event->pending); 4919 4920 unaccount_event(event); 4921 4922 security_perf_event_free(event); 4923 4924 if (event->rb) { 4925 /* 4926 * Can happen when we close an event with re-directed output. 4927 * 4928 * Since we have a 0 refcount, perf_mmap_close() will skip 4929 * over us; possibly making our ring_buffer_put() the last. 4930 */ 4931 mutex_lock(&event->mmap_mutex); 4932 ring_buffer_attach(event, NULL); 4933 mutex_unlock(&event->mmap_mutex); 4934 } 4935 4936 if (is_cgroup_event(event)) 4937 perf_detach_cgroup(event); 4938 4939 if (!event->parent) { 4940 if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN) 4941 put_callchain_buffers(); 4942 } 4943 4944 perf_event_free_bpf_prog(event); 4945 perf_addr_filters_splice(event, NULL); 4946 kfree(event->addr_filter_ranges); 4947 4948 if (event->destroy) 4949 event->destroy(event); 4950 4951 /* 4952 * Must be after ->destroy(), due to uprobe_perf_close() using 4953 * hw.target. 4954 */ 4955 if (event->hw.target) 4956 put_task_struct(event->hw.target); 4957 4958 /* 4959 * perf_event_free_task() relies on put_ctx() being 'last', in particular 4960 * all task references must be cleaned up. 4961 */ 4962 if (event->ctx) 4963 put_ctx(event->ctx); 4964 4965 exclusive_event_destroy(event); 4966 module_put(event->pmu->module); 4967 4968 call_rcu(&event->rcu_head, free_event_rcu); 4969 } 4970 4971 /* 4972 * Used to free events which have a known refcount of 1, such as in error paths 4973 * where the event isn't exposed yet and inherited events. 4974 */ 4975 static void free_event(struct perf_event *event) 4976 { 4977 if (WARN(atomic_long_cmpxchg(&event->refcount, 1, 0) != 1, 4978 "unexpected event refcount: %ld; ptr=%p\n", 4979 atomic_long_read(&event->refcount), event)) { 4980 /* leak to avoid use-after-free */ 4981 return; 4982 } 4983 4984 _free_event(event); 4985 } 4986 4987 /* 4988 * Remove user event from the owner task. 4989 */ 4990 static void perf_remove_from_owner(struct perf_event *event) 4991 { 4992 struct task_struct *owner; 4993 4994 rcu_read_lock(); 4995 /* 4996 * Matches the smp_store_release() in perf_event_exit_task(). If we 4997 * observe !owner it means the list deletion is complete and we can 4998 * indeed free this event, otherwise we need to serialize on 4999 * owner->perf_event_mutex. 5000 */ 5001 owner = READ_ONCE(event->owner); 5002 if (owner) { 5003 /* 5004 * Since delayed_put_task_struct() also drops the last 5005 * task reference we can safely take a new reference 5006 * while holding the rcu_read_lock(). 5007 */ 5008 get_task_struct(owner); 5009 } 5010 rcu_read_unlock(); 5011 5012 if (owner) { 5013 /* 5014 * If we're here through perf_event_exit_task() we're already 5015 * holding ctx->mutex which would be an inversion wrt. the 5016 * normal lock order. 5017 * 5018 * However we can safely take this lock because its the child 5019 * ctx->mutex. 5020 */ 5021 mutex_lock_nested(&owner->perf_event_mutex, SINGLE_DEPTH_NESTING); 5022 5023 /* 5024 * We have to re-check the event->owner field, if it is cleared 5025 * we raced with perf_event_exit_task(), acquiring the mutex 5026 * ensured they're done, and we can proceed with freeing the 5027 * event. 5028 */ 5029 if (event->owner) { 5030 list_del_init(&event->owner_entry); 5031 smp_store_release(&event->owner, NULL); 5032 } 5033 mutex_unlock(&owner->perf_event_mutex); 5034 put_task_struct(owner); 5035 } 5036 } 5037 5038 static void put_event(struct perf_event *event) 5039 { 5040 if (!atomic_long_dec_and_test(&event->refcount)) 5041 return; 5042 5043 _free_event(event); 5044 } 5045 5046 /* 5047 * Kill an event dead; while event:refcount will preserve the event 5048 * object, it will not preserve its functionality. Once the last 'user' 5049 * gives up the object, we'll destroy the thing. 5050 */ 5051 int perf_event_release_kernel(struct perf_event *event) 5052 { 5053 struct perf_event_context *ctx = event->ctx; 5054 struct perf_event *child, *tmp; 5055 LIST_HEAD(free_list); 5056 5057 /* 5058 * If we got here through err_file: fput(event_file); we will not have 5059 * attached to a context yet. 5060 */ 5061 if (!ctx) { 5062 WARN_ON_ONCE(event->attach_state & 5063 (PERF_ATTACH_CONTEXT|PERF_ATTACH_GROUP)); 5064 goto no_ctx; 5065 } 5066 5067 if (!is_kernel_event(event)) 5068 perf_remove_from_owner(event); 5069 5070 ctx = perf_event_ctx_lock(event); 5071 WARN_ON_ONCE(ctx->parent_ctx); 5072 perf_remove_from_context(event, DETACH_GROUP); 5073 5074 raw_spin_lock_irq(&ctx->lock); 5075 /* 5076 * Mark this event as STATE_DEAD, there is no external reference to it 5077 * anymore. 5078 * 5079 * Anybody acquiring event->child_mutex after the below loop _must_ 5080 * also see this, most importantly inherit_event() which will avoid 5081 * placing more children on the list. 5082 * 5083 * Thus this guarantees that we will in fact observe and kill _ALL_ 5084 * child events. 5085 */ 5086 event->state = PERF_EVENT_STATE_DEAD; 5087 raw_spin_unlock_irq(&ctx->lock); 5088 5089 perf_event_ctx_unlock(event, ctx); 5090 5091 again: 5092 mutex_lock(&event->child_mutex); 5093 list_for_each_entry(child, &event->child_list, child_list) { 5094 5095 /* 5096 * Cannot change, child events are not migrated, see the 5097 * comment with perf_event_ctx_lock_nested(). 5098 */ 5099 ctx = READ_ONCE(child->ctx); 5100 /* 5101 * Since child_mutex nests inside ctx::mutex, we must jump 5102 * through hoops. We start by grabbing a reference on the ctx. 5103 * 5104 * Since the event cannot get freed while we hold the 5105 * child_mutex, the context must also exist and have a !0 5106 * reference count. 5107 */ 5108 get_ctx(ctx); 5109 5110 /* 5111 * Now that we have a ctx ref, we can drop child_mutex, and 5112 * acquire ctx::mutex without fear of it going away. Then we 5113 * can re-acquire child_mutex. 5114 */ 5115 mutex_unlock(&event->child_mutex); 5116 mutex_lock(&ctx->mutex); 5117 mutex_lock(&event->child_mutex); 5118 5119 /* 5120 * Now that we hold ctx::mutex and child_mutex, revalidate our 5121 * state, if child is still the first entry, it didn't get freed 5122 * and we can continue doing so. 5123 */ 5124 tmp = list_first_entry_or_null(&event->child_list, 5125 struct perf_event, child_list); 5126 if (tmp == child) { 5127 perf_remove_from_context(child, DETACH_GROUP); 5128 list_move(&child->child_list, &free_list); 5129 /* 5130 * This matches the refcount bump in inherit_event(); 5131 * this can't be the last reference. 5132 */ 5133 put_event(event); 5134 } 5135 5136 mutex_unlock(&event->child_mutex); 5137 mutex_unlock(&ctx->mutex); 5138 put_ctx(ctx); 5139 goto again; 5140 } 5141 mutex_unlock(&event->child_mutex); 5142 5143 list_for_each_entry_safe(child, tmp, &free_list, child_list) { 5144 void *var = &child->ctx->refcount; 5145 5146 list_del(&child->child_list); 5147 free_event(child); 5148 5149 /* 5150 * Wake any perf_event_free_task() waiting for this event to be 5151 * freed. 5152 */ 5153 smp_mb(); /* pairs with wait_var_event() */ 5154 wake_up_var(var); 5155 } 5156 5157 no_ctx: 5158 put_event(event); /* Must be the 'last' reference */ 5159 return 0; 5160 } 5161 EXPORT_SYMBOL_GPL(perf_event_release_kernel); 5162 5163 /* 5164 * Called when the last reference to the file is gone. 5165 */ 5166 static int perf_release(struct inode *inode, struct file *file) 5167 { 5168 perf_event_release_kernel(file->private_data); 5169 return 0; 5170 } 5171 5172 static u64 __perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running) 5173 { 5174 struct perf_event *child; 5175 u64 total = 0; 5176 5177 *enabled = 0; 5178 *running = 0; 5179 5180 mutex_lock(&event->child_mutex); 5181 5182 (void)perf_event_read(event, false); 5183 total += perf_event_count(event); 5184 5185 *enabled += event->total_time_enabled + 5186 atomic64_read(&event->child_total_time_enabled); 5187 *running += event->total_time_running + 5188 atomic64_read(&event->child_total_time_running); 5189 5190 list_for_each_entry(child, &event->child_list, child_list) { 5191 (void)perf_event_read(child, false); 5192 total += perf_event_count(child); 5193 *enabled += child->total_time_enabled; 5194 *running += child->total_time_running; 5195 } 5196 mutex_unlock(&event->child_mutex); 5197 5198 return total; 5199 } 5200 5201 u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running) 5202 { 5203 struct perf_event_context *ctx; 5204 u64 count; 5205 5206 ctx = perf_event_ctx_lock(event); 5207 count = __perf_event_read_value(event, enabled, running); 5208 perf_event_ctx_unlock(event, ctx); 5209 5210 return count; 5211 } 5212 EXPORT_SYMBOL_GPL(perf_event_read_value); 5213 5214 static int __perf_read_group_add(struct perf_event *leader, 5215 u64 read_format, u64 *values) 5216 { 5217 struct perf_event_context *ctx = leader->ctx; 5218 struct perf_event *sub; 5219 unsigned long flags; 5220 int n = 1; /* skip @nr */ 5221 int ret; 5222 5223 ret = perf_event_read(leader, true); 5224 if (ret) 5225 return ret; 5226 5227 raw_spin_lock_irqsave(&ctx->lock, flags); 5228 5229 /* 5230 * Since we co-schedule groups, {enabled,running} times of siblings 5231 * will be identical to those of the leader, so we only publish one 5232 * set. 5233 */ 5234 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) { 5235 values[n++] += leader->total_time_enabled + 5236 atomic64_read(&leader->child_total_time_enabled); 5237 } 5238 5239 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) { 5240 values[n++] += leader->total_time_running + 5241 atomic64_read(&leader->child_total_time_running); 5242 } 5243 5244 /* 5245 * Write {count,id} tuples for every sibling. 5246 */ 5247 values[n++] += perf_event_count(leader); 5248 if (read_format & PERF_FORMAT_ID) 5249 values[n++] = primary_event_id(leader); 5250 5251 for_each_sibling_event(sub, leader) { 5252 values[n++] += perf_event_count(sub); 5253 if (read_format & PERF_FORMAT_ID) 5254 values[n++] = primary_event_id(sub); 5255 } 5256 5257 raw_spin_unlock_irqrestore(&ctx->lock, flags); 5258 return 0; 5259 } 5260 5261 static int perf_read_group(struct perf_event *event, 5262 u64 read_format, char __user *buf) 5263 { 5264 struct perf_event *leader = event->group_leader, *child; 5265 struct perf_event_context *ctx = leader->ctx; 5266 int ret; 5267 u64 *values; 5268 5269 lockdep_assert_held(&ctx->mutex); 5270 5271 values = kzalloc(event->read_size, GFP_KERNEL); 5272 if (!values) 5273 return -ENOMEM; 5274 5275 values[0] = 1 + leader->nr_siblings; 5276 5277 /* 5278 * By locking the child_mutex of the leader we effectively 5279 * lock the child list of all siblings.. XXX explain how. 5280 */ 5281 mutex_lock(&leader->child_mutex); 5282 5283 ret = __perf_read_group_add(leader, read_format, values); 5284 if (ret) 5285 goto unlock; 5286 5287 list_for_each_entry(child, &leader->child_list, child_list) { 5288 ret = __perf_read_group_add(child, read_format, values); 5289 if (ret) 5290 goto unlock; 5291 } 5292 5293 mutex_unlock(&leader->child_mutex); 5294 5295 ret = event->read_size; 5296 if (copy_to_user(buf, values, event->read_size)) 5297 ret = -EFAULT; 5298 goto out; 5299 5300 unlock: 5301 mutex_unlock(&leader->child_mutex); 5302 out: 5303 kfree(values); 5304 return ret; 5305 } 5306 5307 static int perf_read_one(struct perf_event *event, 5308 u64 read_format, char __user *buf) 5309 { 5310 u64 enabled, running; 5311 u64 values[4]; 5312 int n = 0; 5313 5314 values[n++] = __perf_event_read_value(event, &enabled, &running); 5315 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) 5316 values[n++] = enabled; 5317 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) 5318 values[n++] = running; 5319 if (read_format & PERF_FORMAT_ID) 5320 values[n++] = primary_event_id(event); 5321 5322 if (copy_to_user(buf, values, n * sizeof(u64))) 5323 return -EFAULT; 5324 5325 return n * sizeof(u64); 5326 } 5327 5328 static bool is_event_hup(struct perf_event *event) 5329 { 5330 bool no_children; 5331 5332 if (event->state > PERF_EVENT_STATE_EXIT) 5333 return false; 5334 5335 mutex_lock(&event->child_mutex); 5336 no_children = list_empty(&event->child_list); 5337 mutex_unlock(&event->child_mutex); 5338 return no_children; 5339 } 5340 5341 /* 5342 * Read the performance event - simple non blocking version for now 5343 */ 5344 static ssize_t 5345 __perf_read(struct perf_event *event, char __user *buf, size_t count) 5346 { 5347 u64 read_format = event->attr.read_format; 5348 int ret; 5349 5350 /* 5351 * Return end-of-file for a read on an event that is in 5352 * error state (i.e. because it was pinned but it couldn't be 5353 * scheduled on to the CPU at some point). 5354 */ 5355 if (event->state == PERF_EVENT_STATE_ERROR) 5356 return 0; 5357 5358 if (count < event->read_size) 5359 return -ENOSPC; 5360 5361 WARN_ON_ONCE(event->ctx->parent_ctx); 5362 if (read_format & PERF_FORMAT_GROUP) 5363 ret = perf_read_group(event, read_format, buf); 5364 else 5365 ret = perf_read_one(event, read_format, buf); 5366 5367 return ret; 5368 } 5369 5370 static ssize_t 5371 perf_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) 5372 { 5373 struct perf_event *event = file->private_data; 5374 struct perf_event_context *ctx; 5375 int ret; 5376 5377 ret = security_perf_event_read(event); 5378 if (ret) 5379 return ret; 5380 5381 ctx = perf_event_ctx_lock(event); 5382 ret = __perf_read(event, buf, count); 5383 perf_event_ctx_unlock(event, ctx); 5384 5385 return ret; 5386 } 5387 5388 static __poll_t perf_poll(struct file *file, poll_table *wait) 5389 { 5390 struct perf_event *event = file->private_data; 5391 struct perf_buffer *rb; 5392 __poll_t events = EPOLLHUP; 5393 5394 poll_wait(file, &event->waitq, wait); 5395 5396 if (is_event_hup(event)) 5397 return events; 5398 5399 /* 5400 * Pin the event->rb by taking event->mmap_mutex; otherwise 5401 * perf_event_set_output() can swizzle our rb and make us miss wakeups. 5402 */ 5403 mutex_lock(&event->mmap_mutex); 5404 rb = event->rb; 5405 if (rb) 5406 events = atomic_xchg(&rb->poll, 0); 5407 mutex_unlock(&event->mmap_mutex); 5408 return events; 5409 } 5410 5411 static void _perf_event_reset(struct perf_event *event) 5412 { 5413 (void)perf_event_read(event, false); 5414 local64_set(&event->count, 0); 5415 perf_event_update_userpage(event); 5416 } 5417 5418 /* Assume it's not an event with inherit set. */ 5419 u64 perf_event_pause(struct perf_event *event, bool reset) 5420 { 5421 struct perf_event_context *ctx; 5422 u64 count; 5423 5424 ctx = perf_event_ctx_lock(event); 5425 WARN_ON_ONCE(event->attr.inherit); 5426 _perf_event_disable(event); 5427 count = local64_read(&event->count); 5428 if (reset) 5429 local64_set(&event->count, 0); 5430 perf_event_ctx_unlock(event, ctx); 5431 5432 return count; 5433 } 5434 EXPORT_SYMBOL_GPL(perf_event_pause); 5435 5436 /* 5437 * Holding the top-level event's child_mutex means that any 5438 * descendant process that has inherited this event will block 5439 * in perf_event_exit_event() if it goes to exit, thus satisfying the 5440 * task existence requirements of perf_event_enable/disable. 5441 */ 5442 static void perf_event_for_each_child(struct perf_event *event, 5443 void (*func)(struct perf_event *)) 5444 { 5445 struct perf_event *child; 5446 5447 WARN_ON_ONCE(event->ctx->parent_ctx); 5448 5449 mutex_lock(&event->child_mutex); 5450 func(event); 5451 list_for_each_entry(child, &event->child_list, child_list) 5452 func(child); 5453 mutex_unlock(&event->child_mutex); 5454 } 5455 5456 static void perf_event_for_each(struct perf_event *event, 5457 void (*func)(struct perf_event *)) 5458 { 5459 struct perf_event_context *ctx = event->ctx; 5460 struct perf_event *sibling; 5461 5462 lockdep_assert_held(&ctx->mutex); 5463 5464 event = event->group_leader; 5465 5466 perf_event_for_each_child(event, func); 5467 for_each_sibling_event(sibling, event) 5468 perf_event_for_each_child(sibling, func); 5469 } 5470 5471 static void __perf_event_period(struct perf_event *event, 5472 struct perf_cpu_context *cpuctx, 5473 struct perf_event_context *ctx, 5474 void *info) 5475 { 5476 u64 value = *((u64 *)info); 5477 bool active; 5478 5479 if (event->attr.freq) { 5480 event->attr.sample_freq = value; 5481 } else { 5482 event->attr.sample_period = value; 5483 event->hw.sample_period = value; 5484 } 5485 5486 active = (event->state == PERF_EVENT_STATE_ACTIVE); 5487 if (active) { 5488 perf_pmu_disable(ctx->pmu); 5489 /* 5490 * We could be throttled; unthrottle now to avoid the tick 5491 * trying to unthrottle while we already re-started the event. 5492 */ 5493 if (event->hw.interrupts == MAX_INTERRUPTS) { 5494 event->hw.interrupts = 0; 5495 perf_log_throttle(event, 1); 5496 } 5497 event->pmu->stop(event, PERF_EF_UPDATE); 5498 } 5499 5500 local64_set(&event->hw.period_left, 0); 5501 5502 if (active) { 5503 event->pmu->start(event, PERF_EF_RELOAD); 5504 perf_pmu_enable(ctx->pmu); 5505 } 5506 } 5507 5508 static int perf_event_check_period(struct perf_event *event, u64 value) 5509 { 5510 return event->pmu->check_period(event, value); 5511 } 5512 5513 static int _perf_event_period(struct perf_event *event, u64 value) 5514 { 5515 if (!is_sampling_event(event)) 5516 return -EINVAL; 5517 5518 if (!value) 5519 return -EINVAL; 5520 5521 if (event->attr.freq && value > sysctl_perf_event_sample_rate) 5522 return -EINVAL; 5523 5524 if (perf_event_check_period(event, value)) 5525 return -EINVAL; 5526 5527 if (!event->attr.freq && (value & (1ULL << 63))) 5528 return -EINVAL; 5529 5530 event_function_call(event, __perf_event_period, &value); 5531 5532 return 0; 5533 } 5534 5535 int perf_event_period(struct perf_event *event, u64 value) 5536 { 5537 struct perf_event_context *ctx; 5538 int ret; 5539 5540 ctx = perf_event_ctx_lock(event); 5541 ret = _perf_event_period(event, value); 5542 perf_event_ctx_unlock(event, ctx); 5543 5544 return ret; 5545 } 5546 EXPORT_SYMBOL_GPL(perf_event_period); 5547 5548 static const struct file_operations perf_fops; 5549 5550 static inline int perf_fget_light(int fd, struct fd *p) 5551 { 5552 struct fd f = fdget(fd); 5553 if (!f.file) 5554 return -EBADF; 5555 5556 if (f.file->f_op != &perf_fops) { 5557 fdput(f); 5558 return -EBADF; 5559 } 5560 *p = f; 5561 return 0; 5562 } 5563 5564 static int perf_event_set_output(struct perf_event *event, 5565 struct perf_event *output_event); 5566 static int perf_event_set_filter(struct perf_event *event, void __user *arg); 5567 static int perf_event_set_bpf_prog(struct perf_event *event, u32 prog_fd); 5568 static int perf_copy_attr(struct perf_event_attr __user *uattr, 5569 struct perf_event_attr *attr); 5570 5571 static long _perf_ioctl(struct perf_event *event, unsigned int cmd, unsigned long arg) 5572 { 5573 void (*func)(struct perf_event *); 5574 u32 flags = arg; 5575 5576 switch (cmd) { 5577 case PERF_EVENT_IOC_ENABLE: 5578 func = _perf_event_enable; 5579 break; 5580 case PERF_EVENT_IOC_DISABLE: 5581 func = _perf_event_disable; 5582 break; 5583 case PERF_EVENT_IOC_RESET: 5584 func = _perf_event_reset; 5585 break; 5586 5587 case PERF_EVENT_IOC_REFRESH: 5588 return _perf_event_refresh(event, arg); 5589 5590 case PERF_EVENT_IOC_PERIOD: 5591 { 5592 u64 value; 5593 5594 if (copy_from_user(&value, (u64 __user *)arg, sizeof(value))) 5595 return -EFAULT; 5596 5597 return _perf_event_period(event, value); 5598 } 5599 case PERF_EVENT_IOC_ID: 5600 { 5601 u64 id = primary_event_id(event); 5602 5603 if (copy_to_user((void __user *)arg, &id, sizeof(id))) 5604 return -EFAULT; 5605 return 0; 5606 } 5607 5608 case PERF_EVENT_IOC_SET_OUTPUT: 5609 { 5610 int ret; 5611 if (arg != -1) { 5612 struct perf_event *output_event; 5613 struct fd output; 5614 ret = perf_fget_light(arg, &output); 5615 if (ret) 5616 return ret; 5617 output_event = output.file->private_data; 5618 ret = perf_event_set_output(event, output_event); 5619 fdput(output); 5620 } else { 5621 ret = perf_event_set_output(event, NULL); 5622 } 5623 return ret; 5624 } 5625 5626 case PERF_EVENT_IOC_SET_FILTER: 5627 return perf_event_set_filter(event, (void __user *)arg); 5628 5629 case PERF_EVENT_IOC_SET_BPF: 5630 return perf_event_set_bpf_prog(event, arg); 5631 5632 case PERF_EVENT_IOC_PAUSE_OUTPUT: { 5633 struct perf_buffer *rb; 5634 5635 rcu_read_lock(); 5636 rb = rcu_dereference(event->rb); 5637 if (!rb || !rb->nr_pages) { 5638 rcu_read_unlock(); 5639 return -EINVAL; 5640 } 5641 rb_toggle_paused(rb, !!arg); 5642 rcu_read_unlock(); 5643 return 0; 5644 } 5645 5646 case PERF_EVENT_IOC_QUERY_BPF: 5647 return perf_event_query_prog_array(event, (void __user *)arg); 5648 5649 case PERF_EVENT_IOC_MODIFY_ATTRIBUTES: { 5650 struct perf_event_attr new_attr; 5651 int err = perf_copy_attr((struct perf_event_attr __user *)arg, 5652 &new_attr); 5653 5654 if (err) 5655 return err; 5656 5657 return perf_event_modify_attr(event, &new_attr); 5658 } 5659 default: 5660 return -ENOTTY; 5661 } 5662 5663 if (flags & PERF_IOC_FLAG_GROUP) 5664 perf_event_for_each(event, func); 5665 else 5666 perf_event_for_each_child(event, func); 5667 5668 return 0; 5669 } 5670 5671 static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg) 5672 { 5673 struct perf_event *event = file->private_data; 5674 struct perf_event_context *ctx; 5675 long ret; 5676 5677 /* Treat ioctl like writes as it is likely a mutating operation. */ 5678 ret = security_perf_event_write(event); 5679 if (ret) 5680 return ret; 5681 5682 ctx = perf_event_ctx_lock(event); 5683 ret = _perf_ioctl(event, cmd, arg); 5684 perf_event_ctx_unlock(event, ctx); 5685 5686 return ret; 5687 } 5688 5689 #ifdef CONFIG_COMPAT 5690 static long perf_compat_ioctl(struct file *file, unsigned int cmd, 5691 unsigned long arg) 5692 { 5693 switch (_IOC_NR(cmd)) { 5694 case _IOC_NR(PERF_EVENT_IOC_SET_FILTER): 5695 case _IOC_NR(PERF_EVENT_IOC_ID): 5696 case _IOC_NR(PERF_EVENT_IOC_QUERY_BPF): 5697 case _IOC_NR(PERF_EVENT_IOC_MODIFY_ATTRIBUTES): 5698 /* Fix up pointer size (usually 4 -> 8 in 32-on-64-bit case */ 5699 if (_IOC_SIZE(cmd) == sizeof(compat_uptr_t)) { 5700 cmd &= ~IOCSIZE_MASK; 5701 cmd |= sizeof(void *) << IOCSIZE_SHIFT; 5702 } 5703 break; 5704 } 5705 return perf_ioctl(file, cmd, arg); 5706 } 5707 #else 5708 # define perf_compat_ioctl NULL 5709 #endif 5710 5711 int perf_event_task_enable(void) 5712 { 5713 struct perf_event_context *ctx; 5714 struct perf_event *event; 5715 5716 mutex_lock(¤t->perf_event_mutex); 5717 list_for_each_entry(event, ¤t->perf_event_list, owner_entry) { 5718 ctx = perf_event_ctx_lock(event); 5719 perf_event_for_each_child(event, _perf_event_enable); 5720 perf_event_ctx_unlock(event, ctx); 5721 } 5722 mutex_unlock(¤t->perf_event_mutex); 5723 5724 return 0; 5725 } 5726 5727 int perf_event_task_disable(void) 5728 { 5729 struct perf_event_context *ctx; 5730 struct perf_event *event; 5731 5732 mutex_lock(¤t->perf_event_mutex); 5733 list_for_each_entry(event, ¤t->perf_event_list, owner_entry) { 5734 ctx = perf_event_ctx_lock(event); 5735 perf_event_for_each_child(event, _perf_event_disable); 5736 perf_event_ctx_unlock(event, ctx); 5737 } 5738 mutex_unlock(¤t->perf_event_mutex); 5739 5740 return 0; 5741 } 5742 5743 static int perf_event_index(struct perf_event *event) 5744 { 5745 if (event->hw.state & PERF_HES_STOPPED) 5746 return 0; 5747 5748 if (event->state != PERF_EVENT_STATE_ACTIVE) 5749 return 0; 5750 5751 return event->pmu->event_idx(event); 5752 } 5753 5754 static void calc_timer_values(struct perf_event *event, 5755 u64 *now, 5756 u64 *enabled, 5757 u64 *running) 5758 { 5759 u64 ctx_time; 5760 5761 *now = perf_clock(); 5762 ctx_time = event->shadow_ctx_time + *now; 5763 __perf_update_times(event, ctx_time, enabled, running); 5764 } 5765 5766 static void perf_event_init_userpage(struct perf_event *event) 5767 { 5768 struct perf_event_mmap_page *userpg; 5769 struct perf_buffer *rb; 5770 5771 rcu_read_lock(); 5772 rb = rcu_dereference(event->rb); 5773 if (!rb) 5774 goto unlock; 5775 5776 userpg = rb->user_page; 5777 5778 /* Allow new userspace to detect that bit 0 is deprecated */ 5779 userpg->cap_bit0_is_deprecated = 1; 5780 userpg->size = offsetof(struct perf_event_mmap_page, __reserved); 5781 userpg->data_offset = PAGE_SIZE; 5782 userpg->data_size = perf_data_size(rb); 5783 5784 unlock: 5785 rcu_read_unlock(); 5786 } 5787 5788 void __weak arch_perf_update_userpage( 5789 struct perf_event *event, struct perf_event_mmap_page *userpg, u64 now) 5790 { 5791 } 5792 5793 /* 5794 * Callers need to ensure there can be no nesting of this function, otherwise 5795 * the seqlock logic goes bad. We can not serialize this because the arch 5796 * code calls this from NMI context. 5797 */ 5798 void perf_event_update_userpage(struct perf_event *event) 5799 { 5800 struct perf_event_mmap_page *userpg; 5801 struct perf_buffer *rb; 5802 u64 enabled, running, now; 5803 5804 rcu_read_lock(); 5805 rb = rcu_dereference(event->rb); 5806 if (!rb) 5807 goto unlock; 5808 5809 /* 5810 * compute total_time_enabled, total_time_running 5811 * based on snapshot values taken when the event 5812 * was last scheduled in. 5813 * 5814 * we cannot simply called update_context_time() 5815 * because of locking issue as we can be called in 5816 * NMI context 5817 */ 5818 calc_timer_values(event, &now, &enabled, &running); 5819 5820 userpg = rb->user_page; 5821 /* 5822 * Disable preemption to guarantee consistent time stamps are stored to 5823 * the user page. 5824 */ 5825 preempt_disable(); 5826 ++userpg->lock; 5827 barrier(); 5828 userpg->index = perf_event_index(event); 5829 userpg->offset = perf_event_count(event); 5830 if (userpg->index) 5831 userpg->offset -= local64_read(&event->hw.prev_count); 5832 5833 userpg->time_enabled = enabled + 5834 atomic64_read(&event->child_total_time_enabled); 5835 5836 userpg->time_running = running + 5837 atomic64_read(&event->child_total_time_running); 5838 5839 arch_perf_update_userpage(event, userpg, now); 5840 5841 barrier(); 5842 ++userpg->lock; 5843 preempt_enable(); 5844 unlock: 5845 rcu_read_unlock(); 5846 } 5847 EXPORT_SYMBOL_GPL(perf_event_update_userpage); 5848 5849 static vm_fault_t perf_mmap_fault(struct vm_fault *vmf) 5850 { 5851 struct perf_event *event = vmf->vma->vm_file->private_data; 5852 struct perf_buffer *rb; 5853 vm_fault_t ret = VM_FAULT_SIGBUS; 5854 5855 if (vmf->flags & FAULT_FLAG_MKWRITE) { 5856 if (vmf->pgoff == 0) 5857 ret = 0; 5858 return ret; 5859 } 5860 5861 rcu_read_lock(); 5862 rb = rcu_dereference(event->rb); 5863 if (!rb) 5864 goto unlock; 5865 5866 if (vmf->pgoff && (vmf->flags & FAULT_FLAG_WRITE)) 5867 goto unlock; 5868 5869 vmf->page = perf_mmap_to_page(rb, vmf->pgoff); 5870 if (!vmf->page) 5871 goto unlock; 5872 5873 get_page(vmf->page); 5874 vmf->page->mapping = vmf->vma->vm_file->f_mapping; 5875 vmf->page->index = vmf->pgoff; 5876 5877 ret = 0; 5878 unlock: 5879 rcu_read_unlock(); 5880 5881 return ret; 5882 } 5883 5884 static void ring_buffer_attach(struct perf_event *event, 5885 struct perf_buffer *rb) 5886 { 5887 struct perf_buffer *old_rb = NULL; 5888 unsigned long flags; 5889 5890 if (event->rb) { 5891 /* 5892 * Should be impossible, we set this when removing 5893 * event->rb_entry and wait/clear when adding event->rb_entry. 5894 */ 5895 WARN_ON_ONCE(event->rcu_pending); 5896 5897 old_rb = event->rb; 5898 spin_lock_irqsave(&old_rb->event_lock, flags); 5899 list_del_rcu(&event->rb_entry); 5900 spin_unlock_irqrestore(&old_rb->event_lock, flags); 5901 5902 event->rcu_batches = get_state_synchronize_rcu(); 5903 event->rcu_pending = 1; 5904 } 5905 5906 if (rb) { 5907 if (event->rcu_pending) { 5908 cond_synchronize_rcu(event->rcu_batches); 5909 event->rcu_pending = 0; 5910 } 5911 5912 spin_lock_irqsave(&rb->event_lock, flags); 5913 list_add_rcu(&event->rb_entry, &rb->event_list); 5914 spin_unlock_irqrestore(&rb->event_lock, flags); 5915 } 5916 5917 /* 5918 * Avoid racing with perf_mmap_close(AUX): stop the event 5919 * before swizzling the event::rb pointer; if it's getting 5920 * unmapped, its aux_mmap_count will be 0 and it won't 5921 * restart. See the comment in __perf_pmu_output_stop(). 5922 * 5923 * Data will inevitably be lost when set_output is done in 5924 * mid-air, but then again, whoever does it like this is 5925 * not in for the data anyway. 5926 */ 5927 if (has_aux(event)) 5928 perf_event_stop(event, 0); 5929 5930 rcu_assign_pointer(event->rb, rb); 5931 5932 if (old_rb) { 5933 ring_buffer_put(old_rb); 5934 /* 5935 * Since we detached before setting the new rb, so that we 5936 * could attach the new rb, we could have missed a wakeup. 5937 * Provide it now. 5938 */ 5939 wake_up_all(&event->waitq); 5940 } 5941 } 5942 5943 static void ring_buffer_wakeup(struct perf_event *event) 5944 { 5945 struct perf_buffer *rb; 5946 5947 rcu_read_lock(); 5948 rb = rcu_dereference(event->rb); 5949 if (rb) { 5950 list_for_each_entry_rcu(event, &rb->event_list, rb_entry) 5951 wake_up_all(&event->waitq); 5952 } 5953 rcu_read_unlock(); 5954 } 5955 5956 struct perf_buffer *ring_buffer_get(struct perf_event *event) 5957 { 5958 struct perf_buffer *rb; 5959 5960 rcu_read_lock(); 5961 rb = rcu_dereference(event->rb); 5962 if (rb) { 5963 if (!refcount_inc_not_zero(&rb->refcount)) 5964 rb = NULL; 5965 } 5966 rcu_read_unlock(); 5967 5968 return rb; 5969 } 5970 5971 void ring_buffer_put(struct perf_buffer *rb) 5972 { 5973 if (!refcount_dec_and_test(&rb->refcount)) 5974 return; 5975 5976 WARN_ON_ONCE(!list_empty(&rb->event_list)); 5977 5978 call_rcu(&rb->rcu_head, rb_free_rcu); 5979 } 5980 5981 static void perf_mmap_open(struct vm_area_struct *vma) 5982 { 5983 struct perf_event *event = vma->vm_file->private_data; 5984 5985 atomic_inc(&event->mmap_count); 5986 atomic_inc(&event->rb->mmap_count); 5987 5988 if (vma->vm_pgoff) 5989 atomic_inc(&event->rb->aux_mmap_count); 5990 5991 if (event->pmu->event_mapped) 5992 event->pmu->event_mapped(event, vma->vm_mm); 5993 } 5994 5995 static void perf_pmu_output_stop(struct perf_event *event); 5996 5997 /* 5998 * A buffer can be mmap()ed multiple times; either directly through the same 5999 * event, or through other events by use of perf_event_set_output(). 6000 * 6001 * In order to undo the VM accounting done by perf_mmap() we need to destroy 6002 * the buffer here, where we still have a VM context. This means we need 6003 * to detach all events redirecting to us. 6004 */ 6005 static void perf_mmap_close(struct vm_area_struct *vma) 6006 { 6007 struct perf_event *event = vma->vm_file->private_data; 6008 struct perf_buffer *rb = ring_buffer_get(event); 6009 struct user_struct *mmap_user = rb->mmap_user; 6010 int mmap_locked = rb->mmap_locked; 6011 unsigned long size = perf_data_size(rb); 6012 bool detach_rest = false; 6013 6014 if (event->pmu->event_unmapped) 6015 event->pmu->event_unmapped(event, vma->vm_mm); 6016 6017 /* 6018 * rb->aux_mmap_count will always drop before rb->mmap_count and 6019 * event->mmap_count, so it is ok to use event->mmap_mutex to 6020 * serialize with perf_mmap here. 6021 */ 6022 if (rb_has_aux(rb) && vma->vm_pgoff == rb->aux_pgoff && 6023 atomic_dec_and_mutex_lock(&rb->aux_mmap_count, &event->mmap_mutex)) { 6024 /* 6025 * Stop all AUX events that are writing to this buffer, 6026 * so that we can free its AUX pages and corresponding PMU 6027 * data. Note that after rb::aux_mmap_count dropped to zero, 6028 * they won't start any more (see perf_aux_output_begin()). 6029 */ 6030 perf_pmu_output_stop(event); 6031 6032 /* now it's safe to free the pages */ 6033 atomic_long_sub(rb->aux_nr_pages - rb->aux_mmap_locked, &mmap_user->locked_vm); 6034 atomic64_sub(rb->aux_mmap_locked, &vma->vm_mm->pinned_vm); 6035 6036 /* this has to be the last one */ 6037 rb_free_aux(rb); 6038 WARN_ON_ONCE(refcount_read(&rb->aux_refcount)); 6039 6040 mutex_unlock(&event->mmap_mutex); 6041 } 6042 6043 if (atomic_dec_and_test(&rb->mmap_count)) 6044 detach_rest = true; 6045 6046 if (!atomic_dec_and_mutex_lock(&event->mmap_count, &event->mmap_mutex)) 6047 goto out_put; 6048 6049 ring_buffer_attach(event, NULL); 6050 mutex_unlock(&event->mmap_mutex); 6051 6052 /* If there's still other mmap()s of this buffer, we're done. */ 6053 if (!detach_rest) 6054 goto out_put; 6055 6056 /* 6057 * No other mmap()s, detach from all other events that might redirect 6058 * into the now unreachable buffer. Somewhat complicated by the 6059 * fact that rb::event_lock otherwise nests inside mmap_mutex. 6060 */ 6061 again: 6062 rcu_read_lock(); 6063 list_for_each_entry_rcu(event, &rb->event_list, rb_entry) { 6064 if (!atomic_long_inc_not_zero(&event->refcount)) { 6065 /* 6066 * This event is en-route to free_event() which will 6067 * detach it and remove it from the list. 6068 */ 6069 continue; 6070 } 6071 rcu_read_unlock(); 6072 6073 mutex_lock(&event->mmap_mutex); 6074 /* 6075 * Check we didn't race with perf_event_set_output() which can 6076 * swizzle the rb from under us while we were waiting to 6077 * acquire mmap_mutex. 6078 * 6079 * If we find a different rb; ignore this event, a next 6080 * iteration will no longer find it on the list. We have to 6081 * still restart the iteration to make sure we're not now 6082 * iterating the wrong list. 6083 */ 6084 if (event->rb == rb) 6085 ring_buffer_attach(event, NULL); 6086 6087 mutex_unlock(&event->mmap_mutex); 6088 put_event(event); 6089 6090 /* 6091 * Restart the iteration; either we're on the wrong list or 6092 * destroyed its integrity by doing a deletion. 6093 */ 6094 goto again; 6095 } 6096 rcu_read_unlock(); 6097 6098 /* 6099 * It could be there's still a few 0-ref events on the list; they'll 6100 * get cleaned up by free_event() -- they'll also still have their 6101 * ref on the rb and will free it whenever they are done with it. 6102 * 6103 * Aside from that, this buffer is 'fully' detached and unmapped, 6104 * undo the VM accounting. 6105 */ 6106 6107 atomic_long_sub((size >> PAGE_SHIFT) + 1 - mmap_locked, 6108 &mmap_user->locked_vm); 6109 atomic64_sub(mmap_locked, &vma->vm_mm->pinned_vm); 6110 free_uid(mmap_user); 6111 6112 out_put: 6113 ring_buffer_put(rb); /* could be last */ 6114 } 6115 6116 static const struct vm_operations_struct perf_mmap_vmops = { 6117 .open = perf_mmap_open, 6118 .close = perf_mmap_close, /* non mergeable */ 6119 .fault = perf_mmap_fault, 6120 .page_mkwrite = perf_mmap_fault, 6121 }; 6122 6123 static int perf_mmap(struct file *file, struct vm_area_struct *vma) 6124 { 6125 struct perf_event *event = file->private_data; 6126 unsigned long user_locked, user_lock_limit; 6127 struct user_struct *user = current_user(); 6128 struct perf_buffer *rb = NULL; 6129 unsigned long locked, lock_limit; 6130 unsigned long vma_size; 6131 unsigned long nr_pages; 6132 long user_extra = 0, extra = 0; 6133 int ret = 0, flags = 0; 6134 6135 /* 6136 * Don't allow mmap() of inherited per-task counters. This would 6137 * create a performance issue due to all children writing to the 6138 * same rb. 6139 */ 6140 if (event->cpu == -1 && event->attr.inherit) 6141 return -EINVAL; 6142 6143 if (!(vma->vm_flags & VM_SHARED)) 6144 return -EINVAL; 6145 6146 ret = security_perf_event_read(event); 6147 if (ret) 6148 return ret; 6149 6150 vma_size = vma->vm_end - vma->vm_start; 6151 6152 if (vma->vm_pgoff == 0) { 6153 nr_pages = (vma_size / PAGE_SIZE) - 1; 6154 } else { 6155 /* 6156 * AUX area mapping: if rb->aux_nr_pages != 0, it's already 6157 * mapped, all subsequent mappings should have the same size 6158 * and offset. Must be above the normal perf buffer. 6159 */ 6160 u64 aux_offset, aux_size; 6161 6162 if (!event->rb) 6163 return -EINVAL; 6164 6165 nr_pages = vma_size / PAGE_SIZE; 6166 6167 mutex_lock(&event->mmap_mutex); 6168 ret = -EINVAL; 6169 6170 rb = event->rb; 6171 if (!rb) 6172 goto aux_unlock; 6173 6174 aux_offset = READ_ONCE(rb->user_page->aux_offset); 6175 aux_size = READ_ONCE(rb->user_page->aux_size); 6176 6177 if (aux_offset < perf_data_size(rb) + PAGE_SIZE) 6178 goto aux_unlock; 6179 6180 if (aux_offset != vma->vm_pgoff << PAGE_SHIFT) 6181 goto aux_unlock; 6182 6183 /* already mapped with a different offset */ 6184 if (rb_has_aux(rb) && rb->aux_pgoff != vma->vm_pgoff) 6185 goto aux_unlock; 6186 6187 if (aux_size != vma_size || aux_size != nr_pages * PAGE_SIZE) 6188 goto aux_unlock; 6189 6190 /* already mapped with a different size */ 6191 if (rb_has_aux(rb) && rb->aux_nr_pages != nr_pages) 6192 goto aux_unlock; 6193 6194 if (!is_power_of_2(nr_pages)) 6195 goto aux_unlock; 6196 6197 if (!atomic_inc_not_zero(&rb->mmap_count)) 6198 goto aux_unlock; 6199 6200 if (rb_has_aux(rb)) { 6201 atomic_inc(&rb->aux_mmap_count); 6202 ret = 0; 6203 goto unlock; 6204 } 6205 6206 atomic_set(&rb->aux_mmap_count, 1); 6207 user_extra = nr_pages; 6208 6209 goto accounting; 6210 } 6211 6212 /* 6213 * If we have rb pages ensure they're a power-of-two number, so we 6214 * can do bitmasks instead of modulo. 6215 */ 6216 if (nr_pages != 0 && !is_power_of_2(nr_pages)) 6217 return -EINVAL; 6218 6219 if (vma_size != PAGE_SIZE * (1 + nr_pages)) 6220 return -EINVAL; 6221 6222 WARN_ON_ONCE(event->ctx->parent_ctx); 6223 again: 6224 mutex_lock(&event->mmap_mutex); 6225 if (event->rb) { 6226 if (event->rb->nr_pages != nr_pages) { 6227 ret = -EINVAL; 6228 goto unlock; 6229 } 6230 6231 if (!atomic_inc_not_zero(&event->rb->mmap_count)) { 6232 /* 6233 * Raced against perf_mmap_close() through 6234 * perf_event_set_output(). Try again, hope for better 6235 * luck. 6236 */ 6237 mutex_unlock(&event->mmap_mutex); 6238 goto again; 6239 } 6240 6241 goto unlock; 6242 } 6243 6244 user_extra = nr_pages + 1; 6245 6246 accounting: 6247 user_lock_limit = sysctl_perf_event_mlock >> (PAGE_SHIFT - 10); 6248 6249 /* 6250 * Increase the limit linearly with more CPUs: 6251 */ 6252 user_lock_limit *= num_online_cpus(); 6253 6254 user_locked = atomic_long_read(&user->locked_vm); 6255 6256 /* 6257 * sysctl_perf_event_mlock may have changed, so that 6258 * user->locked_vm > user_lock_limit 6259 */ 6260 if (user_locked > user_lock_limit) 6261 user_locked = user_lock_limit; 6262 user_locked += user_extra; 6263 6264 if (user_locked > user_lock_limit) { 6265 /* 6266 * charge locked_vm until it hits user_lock_limit; 6267 * charge the rest from pinned_vm 6268 */ 6269 extra = user_locked - user_lock_limit; 6270 user_extra -= extra; 6271 } 6272 6273 lock_limit = rlimit(RLIMIT_MEMLOCK); 6274 lock_limit >>= PAGE_SHIFT; 6275 locked = atomic64_read(&vma->vm_mm->pinned_vm) + extra; 6276 6277 if ((locked > lock_limit) && perf_is_paranoid() && 6278 !capable(CAP_IPC_LOCK)) { 6279 ret = -EPERM; 6280 goto unlock; 6281 } 6282 6283 WARN_ON(!rb && event->rb); 6284 6285 if (vma->vm_flags & VM_WRITE) 6286 flags |= RING_BUFFER_WRITABLE; 6287 6288 if (!rb) { 6289 rb = rb_alloc(nr_pages, 6290 event->attr.watermark ? event->attr.wakeup_watermark : 0, 6291 event->cpu, flags); 6292 6293 if (!rb) { 6294 ret = -ENOMEM; 6295 goto unlock; 6296 } 6297 6298 atomic_set(&rb->mmap_count, 1); 6299 rb->mmap_user = get_current_user(); 6300 rb->mmap_locked = extra; 6301 6302 ring_buffer_attach(event, rb); 6303 6304 perf_event_init_userpage(event); 6305 perf_event_update_userpage(event); 6306 } else { 6307 ret = rb_alloc_aux(rb, event, vma->vm_pgoff, nr_pages, 6308 event->attr.aux_watermark, flags); 6309 if (!ret) 6310 rb->aux_mmap_locked = extra; 6311 } 6312 6313 unlock: 6314 if (!ret) { 6315 atomic_long_add(user_extra, &user->locked_vm); 6316 atomic64_add(extra, &vma->vm_mm->pinned_vm); 6317 6318 atomic_inc(&event->mmap_count); 6319 } else if (rb) { 6320 atomic_dec(&rb->mmap_count); 6321 } 6322 aux_unlock: 6323 mutex_unlock(&event->mmap_mutex); 6324 6325 /* 6326 * Since pinned accounting is per vm we cannot allow fork() to copy our 6327 * vma. 6328 */ 6329 vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND | VM_DONTDUMP; 6330 vma->vm_ops = &perf_mmap_vmops; 6331 6332 if (event->pmu->event_mapped) 6333 event->pmu->event_mapped(event, vma->vm_mm); 6334 6335 return ret; 6336 } 6337 6338 static int perf_fasync(int fd, struct file *filp, int on) 6339 { 6340 struct inode *inode = file_inode(filp); 6341 struct perf_event *event = filp->private_data; 6342 int retval; 6343 6344 inode_lock(inode); 6345 retval = fasync_helper(fd, filp, on, &event->fasync); 6346 inode_unlock(inode); 6347 6348 if (retval < 0) 6349 return retval; 6350 6351 return 0; 6352 } 6353 6354 static const struct file_operations perf_fops = { 6355 .llseek = no_llseek, 6356 .release = perf_release, 6357 .read = perf_read, 6358 .poll = perf_poll, 6359 .unlocked_ioctl = perf_ioctl, 6360 .compat_ioctl = perf_compat_ioctl, 6361 .mmap = perf_mmap, 6362 .fasync = perf_fasync, 6363 }; 6364 6365 /* 6366 * Perf event wakeup 6367 * 6368 * If there's data, ensure we set the poll() state and publish everything 6369 * to user-space before waking everybody up. 6370 */ 6371 6372 static inline struct fasync_struct **perf_event_fasync(struct perf_event *event) 6373 { 6374 /* only the parent has fasync state */ 6375 if (event->parent) 6376 event = event->parent; 6377 return &event->fasync; 6378 } 6379 6380 void perf_event_wakeup(struct perf_event *event) 6381 { 6382 ring_buffer_wakeup(event); 6383 6384 if (event->pending_kill) { 6385 kill_fasync(perf_event_fasync(event), SIGIO, event->pending_kill); 6386 event->pending_kill = 0; 6387 } 6388 } 6389 6390 static void perf_sigtrap(struct perf_event *event) 6391 { 6392 struct kernel_siginfo info; 6393 6394 /* 6395 * We'd expect this to only occur if the irq_work is delayed and either 6396 * ctx->task or current has changed in the meantime. This can be the 6397 * case on architectures that do not implement arch_irq_work_raise(). 6398 */ 6399 if (WARN_ON_ONCE(event->ctx->task != current)) 6400 return; 6401 6402 /* 6403 * perf_pending_event() can race with the task exiting. 6404 */ 6405 if (current->flags & PF_EXITING) 6406 return; 6407 6408 clear_siginfo(&info); 6409 info.si_signo = SIGTRAP; 6410 info.si_code = TRAP_PERF; 6411 info.si_errno = event->attr.type; 6412 info.si_perf = event->attr.sig_data; 6413 info.si_addr = (void __user *)event->pending_addr; 6414 force_sig_info(&info); 6415 } 6416 6417 static void perf_pending_event_disable(struct perf_event *event) 6418 { 6419 int cpu = READ_ONCE(event->pending_disable); 6420 6421 if (cpu < 0) 6422 return; 6423 6424 if (cpu == smp_processor_id()) { 6425 WRITE_ONCE(event->pending_disable, -1); 6426 6427 if (event->attr.sigtrap) { 6428 perf_sigtrap(event); 6429 atomic_set_release(&event->event_limit, 1); /* rearm event */ 6430 return; 6431 } 6432 6433 perf_event_disable_local(event); 6434 return; 6435 } 6436 6437 /* 6438 * CPU-A CPU-B 6439 * 6440 * perf_event_disable_inatomic() 6441 * @pending_disable = CPU-A; 6442 * irq_work_queue(); 6443 * 6444 * sched-out 6445 * @pending_disable = -1; 6446 * 6447 * sched-in 6448 * perf_event_disable_inatomic() 6449 * @pending_disable = CPU-B; 6450 * irq_work_queue(); // FAILS 6451 * 6452 * irq_work_run() 6453 * perf_pending_event() 6454 * 6455 * But the event runs on CPU-B and wants disabling there. 6456 */ 6457 irq_work_queue_on(&event->pending, cpu); 6458 } 6459 6460 static void perf_pending_event(struct irq_work *entry) 6461 { 6462 struct perf_event *event = container_of(entry, struct perf_event, pending); 6463 int rctx; 6464 6465 rctx = perf_swevent_get_recursion_context(); 6466 /* 6467 * If we 'fail' here, that's OK, it means recursion is already disabled 6468 * and we won't recurse 'further'. 6469 */ 6470 6471 perf_pending_event_disable(event); 6472 6473 if (event->pending_wakeup) { 6474 event->pending_wakeup = 0; 6475 perf_event_wakeup(event); 6476 } 6477 6478 if (rctx >= 0) 6479 perf_swevent_put_recursion_context(rctx); 6480 } 6481 6482 /* 6483 * We assume there is only KVM supporting the callbacks. 6484 * Later on, we might change it to a list if there is 6485 * another virtualization implementation supporting the callbacks. 6486 */ 6487 struct perf_guest_info_callbacks *perf_guest_cbs; 6488 6489 int perf_register_guest_info_callbacks(struct perf_guest_info_callbacks *cbs) 6490 { 6491 perf_guest_cbs = cbs; 6492 return 0; 6493 } 6494 EXPORT_SYMBOL_GPL(perf_register_guest_info_callbacks); 6495 6496 int perf_unregister_guest_info_callbacks(struct perf_guest_info_callbacks *cbs) 6497 { 6498 perf_guest_cbs = NULL; 6499 return 0; 6500 } 6501 EXPORT_SYMBOL_GPL(perf_unregister_guest_info_callbacks); 6502 6503 static void 6504 perf_output_sample_regs(struct perf_output_handle *handle, 6505 struct pt_regs *regs, u64 mask) 6506 { 6507 int bit; 6508 DECLARE_BITMAP(_mask, 64); 6509 6510 bitmap_from_u64(_mask, mask); 6511 for_each_set_bit(bit, _mask, sizeof(mask) * BITS_PER_BYTE) { 6512 u64 val; 6513 6514 val = perf_reg_value(regs, bit); 6515 perf_output_put(handle, val); 6516 } 6517 } 6518 6519 static void perf_sample_regs_user(struct perf_regs *regs_user, 6520 struct pt_regs *regs) 6521 { 6522 if (user_mode(regs)) { 6523 regs_user->abi = perf_reg_abi(current); 6524 regs_user->regs = regs; 6525 } else if (!(current->flags & PF_KTHREAD)) { 6526 perf_get_regs_user(regs_user, regs); 6527 } else { 6528 regs_user->abi = PERF_SAMPLE_REGS_ABI_NONE; 6529 regs_user->regs = NULL; 6530 } 6531 } 6532 6533 static void perf_sample_regs_intr(struct perf_regs *regs_intr, 6534 struct pt_regs *regs) 6535 { 6536 regs_intr->regs = regs; 6537 regs_intr->abi = perf_reg_abi(current); 6538 } 6539 6540 6541 /* 6542 * Get remaining task size from user stack pointer. 6543 * 6544 * It'd be better to take stack vma map and limit this more 6545 * precisely, but there's no way to get it safely under interrupt, 6546 * so using TASK_SIZE as limit. 6547 */ 6548 static u64 perf_ustack_task_size(struct pt_regs *regs) 6549 { 6550 unsigned long addr = perf_user_stack_pointer(regs); 6551 6552 if (!addr || addr >= TASK_SIZE) 6553 return 0; 6554 6555 return TASK_SIZE - addr; 6556 } 6557 6558 static u16 6559 perf_sample_ustack_size(u16 stack_size, u16 header_size, 6560 struct pt_regs *regs) 6561 { 6562 u64 task_size; 6563 6564 /* No regs, no stack pointer, no dump. */ 6565 if (!regs) 6566 return 0; 6567 6568 /* 6569 * Check if we fit in with the requested stack size into the: 6570 * - TASK_SIZE 6571 * If we don't, we limit the size to the TASK_SIZE. 6572 * 6573 * - remaining sample size 6574 * If we don't, we customize the stack size to 6575 * fit in to the remaining sample size. 6576 */ 6577 6578 task_size = min((u64) USHRT_MAX, perf_ustack_task_size(regs)); 6579 stack_size = min(stack_size, (u16) task_size); 6580 6581 /* Current header size plus static size and dynamic size. */ 6582 header_size += 2 * sizeof(u64); 6583 6584 /* Do we fit in with the current stack dump size? */ 6585 if ((u16) (header_size + stack_size) < header_size) { 6586 /* 6587 * If we overflow the maximum size for the sample, 6588 * we customize the stack dump size to fit in. 6589 */ 6590 stack_size = USHRT_MAX - header_size - sizeof(u64); 6591 stack_size = round_up(stack_size, sizeof(u64)); 6592 } 6593 6594 return stack_size; 6595 } 6596 6597 static void 6598 perf_output_sample_ustack(struct perf_output_handle *handle, u64 dump_size, 6599 struct pt_regs *regs) 6600 { 6601 /* Case of a kernel thread, nothing to dump */ 6602 if (!regs) { 6603 u64 size = 0; 6604 perf_output_put(handle, size); 6605 } else { 6606 unsigned long sp; 6607 unsigned int rem; 6608 u64 dyn_size; 6609 mm_segment_t fs; 6610 6611 /* 6612 * We dump: 6613 * static size 6614 * - the size requested by user or the best one we can fit 6615 * in to the sample max size 6616 * data 6617 * - user stack dump data 6618 * dynamic size 6619 * - the actual dumped size 6620 */ 6621 6622 /* Static size. */ 6623 perf_output_put(handle, dump_size); 6624 6625 /* Data. */ 6626 sp = perf_user_stack_pointer(regs); 6627 fs = force_uaccess_begin(); 6628 rem = __output_copy_user(handle, (void *) sp, dump_size); 6629 force_uaccess_end(fs); 6630 dyn_size = dump_size - rem; 6631 6632 perf_output_skip(handle, rem); 6633 6634 /* Dynamic size. */ 6635 perf_output_put(handle, dyn_size); 6636 } 6637 } 6638 6639 static unsigned long perf_prepare_sample_aux(struct perf_event *event, 6640 struct perf_sample_data *data, 6641 size_t size) 6642 { 6643 struct perf_event *sampler = event->aux_event; 6644 struct perf_buffer *rb; 6645 6646 data->aux_size = 0; 6647 6648 if (!sampler) 6649 goto out; 6650 6651 if (WARN_ON_ONCE(READ_ONCE(sampler->state) != PERF_EVENT_STATE_ACTIVE)) 6652 goto out; 6653 6654 if (WARN_ON_ONCE(READ_ONCE(sampler->oncpu) != smp_processor_id())) 6655 goto out; 6656 6657 rb = ring_buffer_get(sampler->parent ? sampler->parent : sampler); 6658 if (!rb) 6659 goto out; 6660 6661 /* 6662 * If this is an NMI hit inside sampling code, don't take 6663 * the sample. See also perf_aux_sample_output(). 6664 */ 6665 if (READ_ONCE(rb->aux_in_sampling)) { 6666 data->aux_size = 0; 6667 } else { 6668 size = min_t(size_t, size, perf_aux_size(rb)); 6669 data->aux_size = ALIGN(size, sizeof(u64)); 6670 } 6671 ring_buffer_put(rb); 6672 6673 out: 6674 return data->aux_size; 6675 } 6676 6677 long perf_pmu_snapshot_aux(struct perf_buffer *rb, 6678 struct perf_event *event, 6679 struct perf_output_handle *handle, 6680 unsigned long size) 6681 { 6682 unsigned long flags; 6683 long ret; 6684 6685 /* 6686 * Normal ->start()/->stop() callbacks run in IRQ mode in scheduler 6687 * paths. If we start calling them in NMI context, they may race with 6688 * the IRQ ones, that is, for example, re-starting an event that's just 6689 * been stopped, which is why we're using a separate callback that 6690 * doesn't change the event state. 6691 * 6692 * IRQs need to be disabled to prevent IPIs from racing with us. 6693 */ 6694 local_irq_save(flags); 6695 /* 6696 * Guard against NMI hits inside the critical section; 6697 * see also perf_prepare_sample_aux(). 6698 */ 6699 WRITE_ONCE(rb->aux_in_sampling, 1); 6700 barrier(); 6701 6702 ret = event->pmu->snapshot_aux(event, handle, size); 6703 6704 barrier(); 6705 WRITE_ONCE(rb->aux_in_sampling, 0); 6706 local_irq_restore(flags); 6707 6708 return ret; 6709 } 6710 6711 static void perf_aux_sample_output(struct perf_event *event, 6712 struct perf_output_handle *handle, 6713 struct perf_sample_data *data) 6714 { 6715 struct perf_event *sampler = event->aux_event; 6716 struct perf_buffer *rb; 6717 unsigned long pad; 6718 long size; 6719 6720 if (WARN_ON_ONCE(!sampler || !data->aux_size)) 6721 return; 6722 6723 rb = ring_buffer_get(sampler->parent ? sampler->parent : sampler); 6724 if (!rb) 6725 return; 6726 6727 size = perf_pmu_snapshot_aux(rb, sampler, handle, data->aux_size); 6728 6729 /* 6730 * An error here means that perf_output_copy() failed (returned a 6731 * non-zero surplus that it didn't copy), which in its current 6732 * enlightened implementation is not possible. If that changes, we'd 6733 * like to know. 6734 */ 6735 if (WARN_ON_ONCE(size < 0)) 6736 goto out_put; 6737 6738 /* 6739 * The pad comes from ALIGN()ing data->aux_size up to u64 in 6740 * perf_prepare_sample_aux(), so should not be more than that. 6741 */ 6742 pad = data->aux_size - size; 6743 if (WARN_ON_ONCE(pad >= sizeof(u64))) 6744 pad = 8; 6745 6746 if (pad) { 6747 u64 zero = 0; 6748 perf_output_copy(handle, &zero, pad); 6749 } 6750 6751 out_put: 6752 ring_buffer_put(rb); 6753 } 6754 6755 static void __perf_event_header__init_id(struct perf_event_header *header, 6756 struct perf_sample_data *data, 6757 struct perf_event *event) 6758 { 6759 u64 sample_type = event->attr.sample_type; 6760 6761 data->type = sample_type; 6762 header->size += event->id_header_size; 6763 6764 if (sample_type & PERF_SAMPLE_TID) { 6765 /* namespace issues */ 6766 data->tid_entry.pid = perf_event_pid(event, current); 6767 data->tid_entry.tid = perf_event_tid(event, current); 6768 } 6769 6770 if (sample_type & PERF_SAMPLE_TIME) 6771 data->time = perf_event_clock(event); 6772 6773 if (sample_type & (PERF_SAMPLE_ID | PERF_SAMPLE_IDENTIFIER)) 6774 data->id = primary_event_id(event); 6775 6776 if (sample_type & PERF_SAMPLE_STREAM_ID) 6777 data->stream_id = event->id; 6778 6779 if (sample_type & PERF_SAMPLE_CPU) { 6780 data->cpu_entry.cpu = raw_smp_processor_id(); 6781 data->cpu_entry.reserved = 0; 6782 } 6783 } 6784 6785 void perf_event_header__init_id(struct perf_event_header *header, 6786 struct perf_sample_data *data, 6787 struct perf_event *event) 6788 { 6789 if (event->attr.sample_id_all) 6790 __perf_event_header__init_id(header, data, event); 6791 } 6792 6793 static void __perf_event__output_id_sample(struct perf_output_handle *handle, 6794 struct perf_sample_data *data) 6795 { 6796 u64 sample_type = data->type; 6797 6798 if (sample_type & PERF_SAMPLE_TID) 6799 perf_output_put(handle, data->tid_entry); 6800 6801 if (sample_type & PERF_SAMPLE_TIME) 6802 perf_output_put(handle, data->time); 6803 6804 if (sample_type & PERF_SAMPLE_ID) 6805 perf_output_put(handle, data->id); 6806 6807 if (sample_type & PERF_SAMPLE_STREAM_ID) 6808 perf_output_put(handle, data->stream_id); 6809 6810 if (sample_type & PERF_SAMPLE_CPU) 6811 perf_output_put(handle, data->cpu_entry); 6812 6813 if (sample_type & PERF_SAMPLE_IDENTIFIER) 6814 perf_output_put(handle, data->id); 6815 } 6816 6817 void perf_event__output_id_sample(struct perf_event *event, 6818 struct perf_output_handle *handle, 6819 struct perf_sample_data *sample) 6820 { 6821 if (event->attr.sample_id_all) 6822 __perf_event__output_id_sample(handle, sample); 6823 } 6824 6825 static void perf_output_read_one(struct perf_output_handle *handle, 6826 struct perf_event *event, 6827 u64 enabled, u64 running) 6828 { 6829 u64 read_format = event->attr.read_format; 6830 u64 values[4]; 6831 int n = 0; 6832 6833 values[n++] = perf_event_count(event); 6834 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) { 6835 values[n++] = enabled + 6836 atomic64_read(&event->child_total_time_enabled); 6837 } 6838 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) { 6839 values[n++] = running + 6840 atomic64_read(&event->child_total_time_running); 6841 } 6842 if (read_format & PERF_FORMAT_ID) 6843 values[n++] = primary_event_id(event); 6844 6845 __output_copy(handle, values, n * sizeof(u64)); 6846 } 6847 6848 static void perf_output_read_group(struct perf_output_handle *handle, 6849 struct perf_event *event, 6850 u64 enabled, u64 running) 6851 { 6852 struct perf_event *leader = event->group_leader, *sub; 6853 u64 read_format = event->attr.read_format; 6854 u64 values[5]; 6855 int n = 0; 6856 6857 values[n++] = 1 + leader->nr_siblings; 6858 6859 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) 6860 values[n++] = enabled; 6861 6862 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) 6863 values[n++] = running; 6864 6865 if ((leader != event) && 6866 (leader->state == PERF_EVENT_STATE_ACTIVE)) 6867 leader->pmu->read(leader); 6868 6869 values[n++] = perf_event_count(leader); 6870 if (read_format & PERF_FORMAT_ID) 6871 values[n++] = primary_event_id(leader); 6872 6873 __output_copy(handle, values, n * sizeof(u64)); 6874 6875 for_each_sibling_event(sub, leader) { 6876 n = 0; 6877 6878 if ((sub != event) && 6879 (sub->state == PERF_EVENT_STATE_ACTIVE)) 6880 sub->pmu->read(sub); 6881 6882 values[n++] = perf_event_count(sub); 6883 if (read_format & PERF_FORMAT_ID) 6884 values[n++] = primary_event_id(sub); 6885 6886 __output_copy(handle, values, n * sizeof(u64)); 6887 } 6888 } 6889 6890 #define PERF_FORMAT_TOTAL_TIMES (PERF_FORMAT_TOTAL_TIME_ENABLED|\ 6891 PERF_FORMAT_TOTAL_TIME_RUNNING) 6892 6893 /* 6894 * XXX PERF_SAMPLE_READ vs inherited events seems difficult. 6895 * 6896 * The problem is that its both hard and excessively expensive to iterate the 6897 * child list, not to mention that its impossible to IPI the children running 6898 * on another CPU, from interrupt/NMI context. 6899 */ 6900 static void perf_output_read(struct perf_output_handle *handle, 6901 struct perf_event *event) 6902 { 6903 u64 enabled = 0, running = 0, now; 6904 u64 read_format = event->attr.read_format; 6905 6906 /* 6907 * compute total_time_enabled, total_time_running 6908 * based on snapshot values taken when the event 6909 * was last scheduled in. 6910 * 6911 * we cannot simply called update_context_time() 6912 * because of locking issue as we are called in 6913 * NMI context 6914 */ 6915 if (read_format & PERF_FORMAT_TOTAL_TIMES) 6916 calc_timer_values(event, &now, &enabled, &running); 6917 6918 if (event->attr.read_format & PERF_FORMAT_GROUP) 6919 perf_output_read_group(handle, event, enabled, running); 6920 else 6921 perf_output_read_one(handle, event, enabled, running); 6922 } 6923 6924 static inline bool perf_sample_save_hw_index(struct perf_event *event) 6925 { 6926 return event->attr.branch_sample_type & PERF_SAMPLE_BRANCH_HW_INDEX; 6927 } 6928 6929 void perf_output_sample(struct perf_output_handle *handle, 6930 struct perf_event_header *header, 6931 struct perf_sample_data *data, 6932 struct perf_event *event) 6933 { 6934 u64 sample_type = data->type; 6935 6936 perf_output_put(handle, *header); 6937 6938 if (sample_type & PERF_SAMPLE_IDENTIFIER) 6939 perf_output_put(handle, data->id); 6940 6941 if (sample_type & PERF_SAMPLE_IP) 6942 perf_output_put(handle, data->ip); 6943 6944 if (sample_type & PERF_SAMPLE_TID) 6945 perf_output_put(handle, data->tid_entry); 6946 6947 if (sample_type & PERF_SAMPLE_TIME) 6948 perf_output_put(handle, data->time); 6949 6950 if (sample_type & PERF_SAMPLE_ADDR) 6951 perf_output_put(handle, data->addr); 6952 6953 if (sample_type & PERF_SAMPLE_ID) 6954 perf_output_put(handle, data->id); 6955 6956 if (sample_type & PERF_SAMPLE_STREAM_ID) 6957 perf_output_put(handle, data->stream_id); 6958 6959 if (sample_type & PERF_SAMPLE_CPU) 6960 perf_output_put(handle, data->cpu_entry); 6961 6962 if (sample_type & PERF_SAMPLE_PERIOD) 6963 perf_output_put(handle, data->period); 6964 6965 if (sample_type & PERF_SAMPLE_READ) 6966 perf_output_read(handle, event); 6967 6968 if (sample_type & PERF_SAMPLE_CALLCHAIN) { 6969 int size = 1; 6970 6971 size += data->callchain->nr; 6972 size *= sizeof(u64); 6973 __output_copy(handle, data->callchain, size); 6974 } 6975 6976 if (sample_type & PERF_SAMPLE_RAW) { 6977 struct perf_raw_record *raw = data->raw; 6978 6979 if (raw) { 6980 struct perf_raw_frag *frag = &raw->frag; 6981 6982 perf_output_put(handle, raw->size); 6983 do { 6984 if (frag->copy) { 6985 __output_custom(handle, frag->copy, 6986 frag->data, frag->size); 6987 } else { 6988 __output_copy(handle, frag->data, 6989 frag->size); 6990 } 6991 if (perf_raw_frag_last(frag)) 6992 break; 6993 frag = frag->next; 6994 } while (1); 6995 if (frag->pad) 6996 __output_skip(handle, NULL, frag->pad); 6997 } else { 6998 struct { 6999 u32 size; 7000 u32 data; 7001 } raw = { 7002 .size = sizeof(u32), 7003 .data = 0, 7004 }; 7005 perf_output_put(handle, raw); 7006 } 7007 } 7008 7009 if (sample_type & PERF_SAMPLE_BRANCH_STACK) { 7010 if (data->br_stack) { 7011 size_t size; 7012 7013 size = data->br_stack->nr 7014 * sizeof(struct perf_branch_entry); 7015 7016 perf_output_put(handle, data->br_stack->nr); 7017 if (perf_sample_save_hw_index(event)) 7018 perf_output_put(handle, data->br_stack->hw_idx); 7019 perf_output_copy(handle, data->br_stack->entries, size); 7020 } else { 7021 /* 7022 * we always store at least the value of nr 7023 */ 7024 u64 nr = 0; 7025 perf_output_put(handle, nr); 7026 } 7027 } 7028 7029 if (sample_type & PERF_SAMPLE_REGS_USER) { 7030 u64 abi = data->regs_user.abi; 7031 7032 /* 7033 * If there are no regs to dump, notice it through 7034 * first u64 being zero (PERF_SAMPLE_REGS_ABI_NONE). 7035 */ 7036 perf_output_put(handle, abi); 7037 7038 if (abi) { 7039 u64 mask = event->attr.sample_regs_user; 7040 perf_output_sample_regs(handle, 7041 data->regs_user.regs, 7042 mask); 7043 } 7044 } 7045 7046 if (sample_type & PERF_SAMPLE_STACK_USER) { 7047 perf_output_sample_ustack(handle, 7048 data->stack_user_size, 7049 data->regs_user.regs); 7050 } 7051 7052 if (sample_type & PERF_SAMPLE_WEIGHT_TYPE) 7053 perf_output_put(handle, data->weight.full); 7054 7055 if (sample_type & PERF_SAMPLE_DATA_SRC) 7056 perf_output_put(handle, data->data_src.val); 7057 7058 if (sample_type & PERF_SAMPLE_TRANSACTION) 7059 perf_output_put(handle, data->txn); 7060 7061 if (sample_type & PERF_SAMPLE_REGS_INTR) { 7062 u64 abi = data->regs_intr.abi; 7063 /* 7064 * If there are no regs to dump, notice it through 7065 * first u64 being zero (PERF_SAMPLE_REGS_ABI_NONE). 7066 */ 7067 perf_output_put(handle, abi); 7068 7069 if (abi) { 7070 u64 mask = event->attr.sample_regs_intr; 7071 7072 perf_output_sample_regs(handle, 7073 data->regs_intr.regs, 7074 mask); 7075 } 7076 } 7077 7078 if (sample_type & PERF_SAMPLE_PHYS_ADDR) 7079 perf_output_put(handle, data->phys_addr); 7080 7081 if (sample_type & PERF_SAMPLE_CGROUP) 7082 perf_output_put(handle, data->cgroup); 7083 7084 if (sample_type & PERF_SAMPLE_DATA_PAGE_SIZE) 7085 perf_output_put(handle, data->data_page_size); 7086 7087 if (sample_type & PERF_SAMPLE_CODE_PAGE_SIZE) 7088 perf_output_put(handle, data->code_page_size); 7089 7090 if (sample_type & PERF_SAMPLE_AUX) { 7091 perf_output_put(handle, data->aux_size); 7092 7093 if (data->aux_size) 7094 perf_aux_sample_output(event, handle, data); 7095 } 7096 7097 if (!event->attr.watermark) { 7098 int wakeup_events = event->attr.wakeup_events; 7099 7100 if (wakeup_events) { 7101 struct perf_buffer *rb = handle->rb; 7102 int events = local_inc_return(&rb->events); 7103 7104 if (events >= wakeup_events) { 7105 local_sub(wakeup_events, &rb->events); 7106 local_inc(&rb->wakeup); 7107 } 7108 } 7109 } 7110 } 7111 7112 static u64 perf_virt_to_phys(u64 virt) 7113 { 7114 u64 phys_addr = 0; 7115 struct page *p = NULL; 7116 7117 if (!virt) 7118 return 0; 7119 7120 if (virt >= TASK_SIZE) { 7121 /* If it's vmalloc()d memory, leave phys_addr as 0 */ 7122 if (virt_addr_valid((void *)(uintptr_t)virt) && 7123 !(virt >= VMALLOC_START && virt < VMALLOC_END)) 7124 phys_addr = (u64)virt_to_phys((void *)(uintptr_t)virt); 7125 } else { 7126 /* 7127 * Walking the pages tables for user address. 7128 * Interrupts are disabled, so it prevents any tear down 7129 * of the page tables. 7130 * Try IRQ-safe get_user_page_fast_only first. 7131 * If failed, leave phys_addr as 0. 7132 */ 7133 if (current->mm != NULL) { 7134 pagefault_disable(); 7135 if (get_user_page_fast_only(virt, 0, &p)) 7136 phys_addr = page_to_phys(p) + virt % PAGE_SIZE; 7137 pagefault_enable(); 7138 } 7139 7140 if (p) 7141 put_page(p); 7142 } 7143 7144 return phys_addr; 7145 } 7146 7147 /* 7148 * Return the pagetable size of a given virtual address. 7149 */ 7150 static u64 perf_get_pgtable_size(struct mm_struct *mm, unsigned long addr) 7151 { 7152 u64 size = 0; 7153 7154 #ifdef CONFIG_HAVE_FAST_GUP 7155 pgd_t *pgdp, pgd; 7156 p4d_t *p4dp, p4d; 7157 pud_t *pudp, pud; 7158 pmd_t *pmdp, pmd; 7159 pte_t *ptep, pte; 7160 7161 pgdp = pgd_offset(mm, addr); 7162 pgd = READ_ONCE(*pgdp); 7163 if (pgd_none(pgd)) 7164 return 0; 7165 7166 if (pgd_leaf(pgd)) 7167 return pgd_leaf_size(pgd); 7168 7169 p4dp = p4d_offset_lockless(pgdp, pgd, addr); 7170 p4d = READ_ONCE(*p4dp); 7171 if (!p4d_present(p4d)) 7172 return 0; 7173 7174 if (p4d_leaf(p4d)) 7175 return p4d_leaf_size(p4d); 7176 7177 pudp = pud_offset_lockless(p4dp, p4d, addr); 7178 pud = READ_ONCE(*pudp); 7179 if (!pud_present(pud)) 7180 return 0; 7181 7182 if (pud_leaf(pud)) 7183 return pud_leaf_size(pud); 7184 7185 pmdp = pmd_offset_lockless(pudp, pud, addr); 7186 pmd = READ_ONCE(*pmdp); 7187 if (!pmd_present(pmd)) 7188 return 0; 7189 7190 if (pmd_leaf(pmd)) 7191 return pmd_leaf_size(pmd); 7192 7193 ptep = pte_offset_map(&pmd, addr); 7194 pte = ptep_get_lockless(ptep); 7195 if (pte_present(pte)) 7196 size = pte_leaf_size(pte); 7197 pte_unmap(ptep); 7198 #endif /* CONFIG_HAVE_FAST_GUP */ 7199 7200 return size; 7201 } 7202 7203 static u64 perf_get_page_size(unsigned long addr) 7204 { 7205 struct mm_struct *mm; 7206 unsigned long flags; 7207 u64 size; 7208 7209 if (!addr) 7210 return 0; 7211 7212 /* 7213 * Software page-table walkers must disable IRQs, 7214 * which prevents any tear down of the page tables. 7215 */ 7216 local_irq_save(flags); 7217 7218 mm = current->mm; 7219 if (!mm) { 7220 /* 7221 * For kernel threads and the like, use init_mm so that 7222 * we can find kernel memory. 7223 */ 7224 mm = &init_mm; 7225 } 7226 7227 size = perf_get_pgtable_size(mm, addr); 7228 7229 local_irq_restore(flags); 7230 7231 return size; 7232 } 7233 7234 static struct perf_callchain_entry __empty_callchain = { .nr = 0, }; 7235 7236 struct perf_callchain_entry * 7237 perf_callchain(struct perf_event *event, struct pt_regs *regs) 7238 { 7239 bool kernel = !event->attr.exclude_callchain_kernel; 7240 bool user = !event->attr.exclude_callchain_user; 7241 /* Disallow cross-task user callchains. */ 7242 bool crosstask = event->ctx->task && event->ctx->task != current; 7243 const u32 max_stack = event->attr.sample_max_stack; 7244 struct perf_callchain_entry *callchain; 7245 7246 if (!kernel && !user) 7247 return &__empty_callchain; 7248 7249 callchain = get_perf_callchain(regs, 0, kernel, user, 7250 max_stack, crosstask, true); 7251 return callchain ?: &__empty_callchain; 7252 } 7253 7254 void perf_prepare_sample(struct perf_event_header *header, 7255 struct perf_sample_data *data, 7256 struct perf_event *event, 7257 struct pt_regs *regs) 7258 { 7259 u64 sample_type = event->attr.sample_type; 7260 7261 header->type = PERF_RECORD_SAMPLE; 7262 header->size = sizeof(*header) + event->header_size; 7263 7264 header->misc = 0; 7265 header->misc |= perf_misc_flags(regs); 7266 7267 __perf_event_header__init_id(header, data, event); 7268 7269 if (sample_type & (PERF_SAMPLE_IP | PERF_SAMPLE_CODE_PAGE_SIZE)) 7270 data->ip = perf_instruction_pointer(regs); 7271 7272 if (sample_type & PERF_SAMPLE_CALLCHAIN) { 7273 int size = 1; 7274 7275 if (!(sample_type & __PERF_SAMPLE_CALLCHAIN_EARLY)) 7276 data->callchain = perf_callchain(event, regs); 7277 7278 size += data->callchain->nr; 7279 7280 header->size += size * sizeof(u64); 7281 } 7282 7283 if (sample_type & PERF_SAMPLE_RAW) { 7284 struct perf_raw_record *raw = data->raw; 7285 int size; 7286 7287 if (raw) { 7288 struct perf_raw_frag *frag = &raw->frag; 7289 u32 sum = 0; 7290 7291 do { 7292 sum += frag->size; 7293 if (perf_raw_frag_last(frag)) 7294 break; 7295 frag = frag->next; 7296 } while (1); 7297 7298 size = round_up(sum + sizeof(u32), sizeof(u64)); 7299 raw->size = size - sizeof(u32); 7300 frag->pad = raw->size - sum; 7301 } else { 7302 size = sizeof(u64); 7303 } 7304 7305 header->size += size; 7306 } 7307 7308 if (sample_type & PERF_SAMPLE_BRANCH_STACK) { 7309 int size = sizeof(u64); /* nr */ 7310 if (data->br_stack) { 7311 if (perf_sample_save_hw_index(event)) 7312 size += sizeof(u64); 7313 7314 size += data->br_stack->nr 7315 * sizeof(struct perf_branch_entry); 7316 } 7317 header->size += size; 7318 } 7319 7320 if (sample_type & (PERF_SAMPLE_REGS_USER | PERF_SAMPLE_STACK_USER)) 7321 perf_sample_regs_user(&data->regs_user, regs); 7322 7323 if (sample_type & PERF_SAMPLE_REGS_USER) { 7324 /* regs dump ABI info */ 7325 int size = sizeof(u64); 7326 7327 if (data->regs_user.regs) { 7328 u64 mask = event->attr.sample_regs_user; 7329 size += hweight64(mask) * sizeof(u64); 7330 } 7331 7332 header->size += size; 7333 } 7334 7335 if (sample_type & PERF_SAMPLE_STACK_USER) { 7336 /* 7337 * Either we need PERF_SAMPLE_STACK_USER bit to be always 7338 * processed as the last one or have additional check added 7339 * in case new sample type is added, because we could eat 7340 * up the rest of the sample size. 7341 */ 7342 u16 stack_size = event->attr.sample_stack_user; 7343 u16 size = sizeof(u64); 7344 7345 stack_size = perf_sample_ustack_size(stack_size, header->size, 7346 data->regs_user.regs); 7347 7348 /* 7349 * If there is something to dump, add space for the dump 7350 * itself and for the field that tells the dynamic size, 7351 * which is how many have been actually dumped. 7352 */ 7353 if (stack_size) 7354 size += sizeof(u64) + stack_size; 7355 7356 data->stack_user_size = stack_size; 7357 header->size += size; 7358 } 7359 7360 if (sample_type & PERF_SAMPLE_REGS_INTR) { 7361 /* regs dump ABI info */ 7362 int size = sizeof(u64); 7363 7364 perf_sample_regs_intr(&data->regs_intr, regs); 7365 7366 if (data->regs_intr.regs) { 7367 u64 mask = event->attr.sample_regs_intr; 7368 7369 size += hweight64(mask) * sizeof(u64); 7370 } 7371 7372 header->size += size; 7373 } 7374 7375 if (sample_type & PERF_SAMPLE_PHYS_ADDR) 7376 data->phys_addr = perf_virt_to_phys(data->addr); 7377 7378 #ifdef CONFIG_CGROUP_PERF 7379 if (sample_type & PERF_SAMPLE_CGROUP) { 7380 struct cgroup *cgrp; 7381 7382 /* protected by RCU */ 7383 cgrp = task_css_check(current, perf_event_cgrp_id, 1)->cgroup; 7384 data->cgroup = cgroup_id(cgrp); 7385 } 7386 #endif 7387 7388 /* 7389 * PERF_DATA_PAGE_SIZE requires PERF_SAMPLE_ADDR. If the user doesn't 7390 * require PERF_SAMPLE_ADDR, kernel implicitly retrieve the data->addr, 7391 * but the value will not dump to the userspace. 7392 */ 7393 if (sample_type & PERF_SAMPLE_DATA_PAGE_SIZE) 7394 data->data_page_size = perf_get_page_size(data->addr); 7395 7396 if (sample_type & PERF_SAMPLE_CODE_PAGE_SIZE) 7397 data->code_page_size = perf_get_page_size(data->ip); 7398 7399 if (sample_type & PERF_SAMPLE_AUX) { 7400 u64 size; 7401 7402 header->size += sizeof(u64); /* size */ 7403 7404 /* 7405 * Given the 16bit nature of header::size, an AUX sample can 7406 * easily overflow it, what with all the preceding sample bits. 7407 * Make sure this doesn't happen by using up to U16_MAX bytes 7408 * per sample in total (rounded down to 8 byte boundary). 7409 */ 7410 size = min_t(size_t, U16_MAX - header->size, 7411 event->attr.aux_sample_size); 7412 size = rounddown(size, 8); 7413 size = perf_prepare_sample_aux(event, data, size); 7414 7415 WARN_ON_ONCE(size + header->size > U16_MAX); 7416 header->size += size; 7417 } 7418 /* 7419 * If you're adding more sample types here, you likely need to do 7420 * something about the overflowing header::size, like repurpose the 7421 * lowest 3 bits of size, which should be always zero at the moment. 7422 * This raises a more important question, do we really need 512k sized 7423 * samples and why, so good argumentation is in order for whatever you 7424 * do here next. 7425 */ 7426 WARN_ON_ONCE(header->size & 7); 7427 } 7428 7429 static __always_inline int 7430 __perf_event_output(struct perf_event *event, 7431 struct perf_sample_data *data, 7432 struct pt_regs *regs, 7433 int (*output_begin)(struct perf_output_handle *, 7434 struct perf_sample_data *, 7435 struct perf_event *, 7436 unsigned int)) 7437 { 7438 struct perf_output_handle handle; 7439 struct perf_event_header header; 7440 int err; 7441 7442 /* protect the callchain buffers */ 7443 rcu_read_lock(); 7444 7445 perf_prepare_sample(&header, data, event, regs); 7446 7447 err = output_begin(&handle, data, event, header.size); 7448 if (err) 7449 goto exit; 7450 7451 perf_output_sample(&handle, &header, data, event); 7452 7453 perf_output_end(&handle); 7454 7455 exit: 7456 rcu_read_unlock(); 7457 return err; 7458 } 7459 7460 void 7461 perf_event_output_forward(struct perf_event *event, 7462 struct perf_sample_data *data, 7463 struct pt_regs *regs) 7464 { 7465 __perf_event_output(event, data, regs, perf_output_begin_forward); 7466 } 7467 7468 void 7469 perf_event_output_backward(struct perf_event *event, 7470 struct perf_sample_data *data, 7471 struct pt_regs *regs) 7472 { 7473 __perf_event_output(event, data, regs, perf_output_begin_backward); 7474 } 7475 7476 int 7477 perf_event_output(struct perf_event *event, 7478 struct perf_sample_data *data, 7479 struct pt_regs *regs) 7480 { 7481 return __perf_event_output(event, data, regs, perf_output_begin); 7482 } 7483 7484 /* 7485 * read event_id 7486 */ 7487 7488 struct perf_read_event { 7489 struct perf_event_header header; 7490 7491 u32 pid; 7492 u32 tid; 7493 }; 7494 7495 static void 7496 perf_event_read_event(struct perf_event *event, 7497 struct task_struct *task) 7498 { 7499 struct perf_output_handle handle; 7500 struct perf_sample_data sample; 7501 struct perf_read_event read_event = { 7502 .header = { 7503 .type = PERF_RECORD_READ, 7504 .misc = 0, 7505 .size = sizeof(read_event) + event->read_size, 7506 }, 7507 .pid = perf_event_pid(event, task), 7508 .tid = perf_event_tid(event, task), 7509 }; 7510 int ret; 7511 7512 perf_event_header__init_id(&read_event.header, &sample, event); 7513 ret = perf_output_begin(&handle, &sample, event, read_event.header.size); 7514 if (ret) 7515 return; 7516 7517 perf_output_put(&handle, read_event); 7518 perf_output_read(&handle, event); 7519 perf_event__output_id_sample(event, &handle, &sample); 7520 7521 perf_output_end(&handle); 7522 } 7523 7524 typedef void (perf_iterate_f)(struct perf_event *event, void *data); 7525 7526 static void 7527 perf_iterate_ctx(struct perf_event_context *ctx, 7528 perf_iterate_f output, 7529 void *data, bool all) 7530 { 7531 struct perf_event *event; 7532 7533 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) { 7534 if (!all) { 7535 if (event->state < PERF_EVENT_STATE_INACTIVE) 7536 continue; 7537 if (!event_filter_match(event)) 7538 continue; 7539 } 7540 7541 output(event, data); 7542 } 7543 } 7544 7545 static void perf_iterate_sb_cpu(perf_iterate_f output, void *data) 7546 { 7547 struct pmu_event_list *pel = this_cpu_ptr(&pmu_sb_events); 7548 struct perf_event *event; 7549 7550 list_for_each_entry_rcu(event, &pel->list, sb_list) { 7551 /* 7552 * Skip events that are not fully formed yet; ensure that 7553 * if we observe event->ctx, both event and ctx will be 7554 * complete enough. See perf_install_in_context(). 7555 */ 7556 if (!smp_load_acquire(&event->ctx)) 7557 continue; 7558 7559 if (event->state < PERF_EVENT_STATE_INACTIVE) 7560 continue; 7561 if (!event_filter_match(event)) 7562 continue; 7563 output(event, data); 7564 } 7565 } 7566 7567 /* 7568 * Iterate all events that need to receive side-band events. 7569 * 7570 * For new callers; ensure that account_pmu_sb_event() includes 7571 * your event, otherwise it might not get delivered. 7572 */ 7573 static void 7574 perf_iterate_sb(perf_iterate_f output, void *data, 7575 struct perf_event_context *task_ctx) 7576 { 7577 struct perf_event_context *ctx; 7578 int ctxn; 7579 7580 rcu_read_lock(); 7581 preempt_disable(); 7582 7583 /* 7584 * If we have task_ctx != NULL we only notify the task context itself. 7585 * The task_ctx is set only for EXIT events before releasing task 7586 * context. 7587 */ 7588 if (task_ctx) { 7589 perf_iterate_ctx(task_ctx, output, data, false); 7590 goto done; 7591 } 7592 7593 perf_iterate_sb_cpu(output, data); 7594 7595 for_each_task_context_nr(ctxn) { 7596 ctx = rcu_dereference(current->perf_event_ctxp[ctxn]); 7597 if (ctx) 7598 perf_iterate_ctx(ctx, output, data, false); 7599 } 7600 done: 7601 preempt_enable(); 7602 rcu_read_unlock(); 7603 } 7604 7605 /* 7606 * Clear all file-based filters at exec, they'll have to be 7607 * re-instated when/if these objects are mmapped again. 7608 */ 7609 static void perf_event_addr_filters_exec(struct perf_event *event, void *data) 7610 { 7611 struct perf_addr_filters_head *ifh = perf_event_addr_filters(event); 7612 struct perf_addr_filter *filter; 7613 unsigned int restart = 0, count = 0; 7614 unsigned long flags; 7615 7616 if (!has_addr_filter(event)) 7617 return; 7618 7619 raw_spin_lock_irqsave(&ifh->lock, flags); 7620 list_for_each_entry(filter, &ifh->list, entry) { 7621 if (filter->path.dentry) { 7622 event->addr_filter_ranges[count].start = 0; 7623 event->addr_filter_ranges[count].size = 0; 7624 restart++; 7625 } 7626 7627 count++; 7628 } 7629 7630 if (restart) 7631 event->addr_filters_gen++; 7632 raw_spin_unlock_irqrestore(&ifh->lock, flags); 7633 7634 if (restart) 7635 perf_event_stop(event, 1); 7636 } 7637 7638 void perf_event_exec(void) 7639 { 7640 struct perf_event_context *ctx; 7641 int ctxn; 7642 7643 for_each_task_context_nr(ctxn) { 7644 perf_event_enable_on_exec(ctxn); 7645 perf_event_remove_on_exec(ctxn); 7646 7647 rcu_read_lock(); 7648 ctx = rcu_dereference(current->perf_event_ctxp[ctxn]); 7649 if (ctx) { 7650 perf_iterate_ctx(ctx, perf_event_addr_filters_exec, 7651 NULL, true); 7652 } 7653 rcu_read_unlock(); 7654 } 7655 } 7656 7657 struct remote_output { 7658 struct perf_buffer *rb; 7659 int err; 7660 }; 7661 7662 static void __perf_event_output_stop(struct perf_event *event, void *data) 7663 { 7664 struct perf_event *parent = event->parent; 7665 struct remote_output *ro = data; 7666 struct perf_buffer *rb = ro->rb; 7667 struct stop_event_data sd = { 7668 .event = event, 7669 }; 7670 7671 if (!has_aux(event)) 7672 return; 7673 7674 if (!parent) 7675 parent = event; 7676 7677 /* 7678 * In case of inheritance, it will be the parent that links to the 7679 * ring-buffer, but it will be the child that's actually using it. 7680 * 7681 * We are using event::rb to determine if the event should be stopped, 7682 * however this may race with ring_buffer_attach() (through set_output), 7683 * which will make us skip the event that actually needs to be stopped. 7684 * So ring_buffer_attach() has to stop an aux event before re-assigning 7685 * its rb pointer. 7686 */ 7687 if (rcu_dereference(parent->rb) == rb) 7688 ro->err = __perf_event_stop(&sd); 7689 } 7690 7691 static int __perf_pmu_output_stop(void *info) 7692 { 7693 struct perf_event *event = info; 7694 struct pmu *pmu = event->ctx->pmu; 7695 struct perf_cpu_context *cpuctx = this_cpu_ptr(pmu->pmu_cpu_context); 7696 struct remote_output ro = { 7697 .rb = event->rb, 7698 }; 7699 7700 rcu_read_lock(); 7701 perf_iterate_ctx(&cpuctx->ctx, __perf_event_output_stop, &ro, false); 7702 if (cpuctx->task_ctx) 7703 perf_iterate_ctx(cpuctx->task_ctx, __perf_event_output_stop, 7704 &ro, false); 7705 rcu_read_unlock(); 7706 7707 return ro.err; 7708 } 7709 7710 static void perf_pmu_output_stop(struct perf_event *event) 7711 { 7712 struct perf_event *iter; 7713 int err, cpu; 7714 7715 restart: 7716 rcu_read_lock(); 7717 list_for_each_entry_rcu(iter, &event->rb->event_list, rb_entry) { 7718 /* 7719 * For per-CPU events, we need to make sure that neither they 7720 * nor their children are running; for cpu==-1 events it's 7721 * sufficient to stop the event itself if it's active, since 7722 * it can't have children. 7723 */ 7724 cpu = iter->cpu; 7725 if (cpu == -1) 7726 cpu = READ_ONCE(iter->oncpu); 7727 7728 if (cpu == -1) 7729 continue; 7730 7731 err = cpu_function_call(cpu, __perf_pmu_output_stop, event); 7732 if (err == -EAGAIN) { 7733 rcu_read_unlock(); 7734 goto restart; 7735 } 7736 } 7737 rcu_read_unlock(); 7738 } 7739 7740 /* 7741 * task tracking -- fork/exit 7742 * 7743 * enabled by: attr.comm | attr.mmap | attr.mmap2 | attr.mmap_data | attr.task 7744 */ 7745 7746 struct perf_task_event { 7747 struct task_struct *task; 7748 struct perf_event_context *task_ctx; 7749 7750 struct { 7751 struct perf_event_header header; 7752 7753 u32 pid; 7754 u32 ppid; 7755 u32 tid; 7756 u32 ptid; 7757 u64 time; 7758 } event_id; 7759 }; 7760 7761 static int perf_event_task_match(struct perf_event *event) 7762 { 7763 return event->attr.comm || event->attr.mmap || 7764 event->attr.mmap2 || event->attr.mmap_data || 7765 event->attr.task; 7766 } 7767 7768 static void perf_event_task_output(struct perf_event *event, 7769 void *data) 7770 { 7771 struct perf_task_event *task_event = data; 7772 struct perf_output_handle handle; 7773 struct perf_sample_data sample; 7774 struct task_struct *task = task_event->task; 7775 int ret, size = task_event->event_id.header.size; 7776 7777 if (!perf_event_task_match(event)) 7778 return; 7779 7780 perf_event_header__init_id(&task_event->event_id.header, &sample, event); 7781 7782 ret = perf_output_begin(&handle, &sample, event, 7783 task_event->event_id.header.size); 7784 if (ret) 7785 goto out; 7786 7787 task_event->event_id.pid = perf_event_pid(event, task); 7788 task_event->event_id.tid = perf_event_tid(event, task); 7789 7790 if (task_event->event_id.header.type == PERF_RECORD_EXIT) { 7791 task_event->event_id.ppid = perf_event_pid(event, 7792 task->real_parent); 7793 task_event->event_id.ptid = perf_event_pid(event, 7794 task->real_parent); 7795 } else { /* PERF_RECORD_FORK */ 7796 task_event->event_id.ppid = perf_event_pid(event, current); 7797 task_event->event_id.ptid = perf_event_tid(event, current); 7798 } 7799 7800 task_event->event_id.time = perf_event_clock(event); 7801 7802 perf_output_put(&handle, task_event->event_id); 7803 7804 perf_event__output_id_sample(event, &handle, &sample); 7805 7806 perf_output_end(&handle); 7807 out: 7808 task_event->event_id.header.size = size; 7809 } 7810 7811 static void perf_event_task(struct task_struct *task, 7812 struct perf_event_context *task_ctx, 7813 int new) 7814 { 7815 struct perf_task_event task_event; 7816 7817 if (!atomic_read(&nr_comm_events) && 7818 !atomic_read(&nr_mmap_events) && 7819 !atomic_read(&nr_task_events)) 7820 return; 7821 7822 task_event = (struct perf_task_event){ 7823 .task = task, 7824 .task_ctx = task_ctx, 7825 .event_id = { 7826 .header = { 7827 .type = new ? PERF_RECORD_FORK : PERF_RECORD_EXIT, 7828 .misc = 0, 7829 .size = sizeof(task_event.event_id), 7830 }, 7831 /* .pid */ 7832 /* .ppid */ 7833 /* .tid */ 7834 /* .ptid */ 7835 /* .time */ 7836 }, 7837 }; 7838 7839 perf_iterate_sb(perf_event_task_output, 7840 &task_event, 7841 task_ctx); 7842 } 7843 7844 void perf_event_fork(struct task_struct *task) 7845 { 7846 perf_event_task(task, NULL, 1); 7847 perf_event_namespaces(task); 7848 } 7849 7850 /* 7851 * comm tracking 7852 */ 7853 7854 struct perf_comm_event { 7855 struct task_struct *task; 7856 char *comm; 7857 int comm_size; 7858 7859 struct { 7860 struct perf_event_header header; 7861 7862 u32 pid; 7863 u32 tid; 7864 } event_id; 7865 }; 7866 7867 static int perf_event_comm_match(struct perf_event *event) 7868 { 7869 return event->attr.comm; 7870 } 7871 7872 static void perf_event_comm_output(struct perf_event *event, 7873 void *data) 7874 { 7875 struct perf_comm_event *comm_event = data; 7876 struct perf_output_handle handle; 7877 struct perf_sample_data sample; 7878 int size = comm_event->event_id.header.size; 7879 int ret; 7880 7881 if (!perf_event_comm_match(event)) 7882 return; 7883 7884 perf_event_header__init_id(&comm_event->event_id.header, &sample, event); 7885 ret = perf_output_begin(&handle, &sample, event, 7886 comm_event->event_id.header.size); 7887 7888 if (ret) 7889 goto out; 7890 7891 comm_event->event_id.pid = perf_event_pid(event, comm_event->task); 7892 comm_event->event_id.tid = perf_event_tid(event, comm_event->task); 7893 7894 perf_output_put(&handle, comm_event->event_id); 7895 __output_copy(&handle, comm_event->comm, 7896 comm_event->comm_size); 7897 7898 perf_event__output_id_sample(event, &handle, &sample); 7899 7900 perf_output_end(&handle); 7901 out: 7902 comm_event->event_id.header.size = size; 7903 } 7904 7905 static void perf_event_comm_event(struct perf_comm_event *comm_event) 7906 { 7907 char comm[TASK_COMM_LEN]; 7908 unsigned int size; 7909 7910 memset(comm, 0, sizeof(comm)); 7911 strlcpy(comm, comm_event->task->comm, sizeof(comm)); 7912 size = ALIGN(strlen(comm)+1, sizeof(u64)); 7913 7914 comm_event->comm = comm; 7915 comm_event->comm_size = size; 7916 7917 comm_event->event_id.header.size = sizeof(comm_event->event_id) + size; 7918 7919 perf_iterate_sb(perf_event_comm_output, 7920 comm_event, 7921 NULL); 7922 } 7923 7924 void perf_event_comm(struct task_struct *task, bool exec) 7925 { 7926 struct perf_comm_event comm_event; 7927 7928 if (!atomic_read(&nr_comm_events)) 7929 return; 7930 7931 comm_event = (struct perf_comm_event){ 7932 .task = task, 7933 /* .comm */ 7934 /* .comm_size */ 7935 .event_id = { 7936 .header = { 7937 .type = PERF_RECORD_COMM, 7938 .misc = exec ? PERF_RECORD_MISC_COMM_EXEC : 0, 7939 /* .size */ 7940 }, 7941 /* .pid */ 7942 /* .tid */ 7943 }, 7944 }; 7945 7946 perf_event_comm_event(&comm_event); 7947 } 7948 7949 /* 7950 * namespaces tracking 7951 */ 7952 7953 struct perf_namespaces_event { 7954 struct task_struct *task; 7955 7956 struct { 7957 struct perf_event_header header; 7958 7959 u32 pid; 7960 u32 tid; 7961 u64 nr_namespaces; 7962 struct perf_ns_link_info link_info[NR_NAMESPACES]; 7963 } event_id; 7964 }; 7965 7966 static int perf_event_namespaces_match(struct perf_event *event) 7967 { 7968 return event->attr.namespaces; 7969 } 7970 7971 static void perf_event_namespaces_output(struct perf_event *event, 7972 void *data) 7973 { 7974 struct perf_namespaces_event *namespaces_event = data; 7975 struct perf_output_handle handle; 7976 struct perf_sample_data sample; 7977 u16 header_size = namespaces_event->event_id.header.size; 7978 int ret; 7979 7980 if (!perf_event_namespaces_match(event)) 7981 return; 7982 7983 perf_event_header__init_id(&namespaces_event->event_id.header, 7984 &sample, event); 7985 ret = perf_output_begin(&handle, &sample, event, 7986 namespaces_event->event_id.header.size); 7987 if (ret) 7988 goto out; 7989 7990 namespaces_event->event_id.pid = perf_event_pid(event, 7991 namespaces_event->task); 7992 namespaces_event->event_id.tid = perf_event_tid(event, 7993 namespaces_event->task); 7994 7995 perf_output_put(&handle, namespaces_event->event_id); 7996 7997 perf_event__output_id_sample(event, &handle, &sample); 7998 7999 perf_output_end(&handle); 8000 out: 8001 namespaces_event->event_id.header.size = header_size; 8002 } 8003 8004 static void perf_fill_ns_link_info(struct perf_ns_link_info *ns_link_info, 8005 struct task_struct *task, 8006 const struct proc_ns_operations *ns_ops) 8007 { 8008 struct path ns_path; 8009 struct inode *ns_inode; 8010 int error; 8011 8012 error = ns_get_path(&ns_path, task, ns_ops); 8013 if (!error) { 8014 ns_inode = ns_path.dentry->d_inode; 8015 ns_link_info->dev = new_encode_dev(ns_inode->i_sb->s_dev); 8016 ns_link_info->ino = ns_inode->i_ino; 8017 path_put(&ns_path); 8018 } 8019 } 8020 8021 void perf_event_namespaces(struct task_struct *task) 8022 { 8023 struct perf_namespaces_event namespaces_event; 8024 struct perf_ns_link_info *ns_link_info; 8025 8026 if (!atomic_read(&nr_namespaces_events)) 8027 return; 8028 8029 namespaces_event = (struct perf_namespaces_event){ 8030 .task = task, 8031 .event_id = { 8032 .header = { 8033 .type = PERF_RECORD_NAMESPACES, 8034 .misc = 0, 8035 .size = sizeof(namespaces_event.event_id), 8036 }, 8037 /* .pid */ 8038 /* .tid */ 8039 .nr_namespaces = NR_NAMESPACES, 8040 /* .link_info[NR_NAMESPACES] */ 8041 }, 8042 }; 8043 8044 ns_link_info = namespaces_event.event_id.link_info; 8045 8046 perf_fill_ns_link_info(&ns_link_info[MNT_NS_INDEX], 8047 task, &mntns_operations); 8048 8049 #ifdef CONFIG_USER_NS 8050 perf_fill_ns_link_info(&ns_link_info[USER_NS_INDEX], 8051 task, &userns_operations); 8052 #endif 8053 #ifdef CONFIG_NET_NS 8054 perf_fill_ns_link_info(&ns_link_info[NET_NS_INDEX], 8055 task, &netns_operations); 8056 #endif 8057 #ifdef CONFIG_UTS_NS 8058 perf_fill_ns_link_info(&ns_link_info[UTS_NS_INDEX], 8059 task, &utsns_operations); 8060 #endif 8061 #ifdef CONFIG_IPC_NS 8062 perf_fill_ns_link_info(&ns_link_info[IPC_NS_INDEX], 8063 task, &ipcns_operations); 8064 #endif 8065 #ifdef CONFIG_PID_NS 8066 perf_fill_ns_link_info(&ns_link_info[PID_NS_INDEX], 8067 task, &pidns_operations); 8068 #endif 8069 #ifdef CONFIG_CGROUPS 8070 perf_fill_ns_link_info(&ns_link_info[CGROUP_NS_INDEX], 8071 task, &cgroupns_operations); 8072 #endif 8073 8074 perf_iterate_sb(perf_event_namespaces_output, 8075 &namespaces_event, 8076 NULL); 8077 } 8078 8079 /* 8080 * cgroup tracking 8081 */ 8082 #ifdef CONFIG_CGROUP_PERF 8083 8084 struct perf_cgroup_event { 8085 char *path; 8086 int path_size; 8087 struct { 8088 struct perf_event_header header; 8089 u64 id; 8090 char path[]; 8091 } event_id; 8092 }; 8093 8094 static int perf_event_cgroup_match(struct perf_event *event) 8095 { 8096 return event->attr.cgroup; 8097 } 8098 8099 static void perf_event_cgroup_output(struct perf_event *event, void *data) 8100 { 8101 struct perf_cgroup_event *cgroup_event = data; 8102 struct perf_output_handle handle; 8103 struct perf_sample_data sample; 8104 u16 header_size = cgroup_event->event_id.header.size; 8105 int ret; 8106 8107 if (!perf_event_cgroup_match(event)) 8108 return; 8109 8110 perf_event_header__init_id(&cgroup_event->event_id.header, 8111 &sample, event); 8112 ret = perf_output_begin(&handle, &sample, event, 8113 cgroup_event->event_id.header.size); 8114 if (ret) 8115 goto out; 8116 8117 perf_output_put(&handle, cgroup_event->event_id); 8118 __output_copy(&handle, cgroup_event->path, cgroup_event->path_size); 8119 8120 perf_event__output_id_sample(event, &handle, &sample); 8121 8122 perf_output_end(&handle); 8123 out: 8124 cgroup_event->event_id.header.size = header_size; 8125 } 8126 8127 static void perf_event_cgroup(struct cgroup *cgrp) 8128 { 8129 struct perf_cgroup_event cgroup_event; 8130 char path_enomem[16] = "//enomem"; 8131 char *pathname; 8132 size_t size; 8133 8134 if (!atomic_read(&nr_cgroup_events)) 8135 return; 8136 8137 cgroup_event = (struct perf_cgroup_event){ 8138 .event_id = { 8139 .header = { 8140 .type = PERF_RECORD_CGROUP, 8141 .misc = 0, 8142 .size = sizeof(cgroup_event.event_id), 8143 }, 8144 .id = cgroup_id(cgrp), 8145 }, 8146 }; 8147 8148 pathname = kmalloc(PATH_MAX, GFP_KERNEL); 8149 if (pathname == NULL) { 8150 cgroup_event.path = path_enomem; 8151 } else { 8152 /* just to be sure to have enough space for alignment */ 8153 cgroup_path(cgrp, pathname, PATH_MAX - sizeof(u64)); 8154 cgroup_event.path = pathname; 8155 } 8156 8157 /* 8158 * Since our buffer works in 8 byte units we need to align our string 8159 * size to a multiple of 8. However, we must guarantee the tail end is 8160 * zero'd out to avoid leaking random bits to userspace. 8161 */ 8162 size = strlen(cgroup_event.path) + 1; 8163 while (!IS_ALIGNED(size, sizeof(u64))) 8164 cgroup_event.path[size++] = '\0'; 8165 8166 cgroup_event.event_id.header.size += size; 8167 cgroup_event.path_size = size; 8168 8169 perf_iterate_sb(perf_event_cgroup_output, 8170 &cgroup_event, 8171 NULL); 8172 8173 kfree(pathname); 8174 } 8175 8176 #endif 8177 8178 /* 8179 * mmap tracking 8180 */ 8181 8182 struct perf_mmap_event { 8183 struct vm_area_struct *vma; 8184 8185 const char *file_name; 8186 int file_size; 8187 int maj, min; 8188 u64 ino; 8189 u64 ino_generation; 8190 u32 prot, flags; 8191 u8 build_id[BUILD_ID_SIZE_MAX]; 8192 u32 build_id_size; 8193 8194 struct { 8195 struct perf_event_header header; 8196 8197 u32 pid; 8198 u32 tid; 8199 u64 start; 8200 u64 len; 8201 u64 pgoff; 8202 } event_id; 8203 }; 8204 8205 static int perf_event_mmap_match(struct perf_event *event, 8206 void *data) 8207 { 8208 struct perf_mmap_event *mmap_event = data; 8209 struct vm_area_struct *vma = mmap_event->vma; 8210 int executable = vma->vm_flags & VM_EXEC; 8211 8212 return (!executable && event->attr.mmap_data) || 8213 (executable && (event->attr.mmap || event->attr.mmap2)); 8214 } 8215 8216 static void perf_event_mmap_output(struct perf_event *event, 8217 void *data) 8218 { 8219 struct perf_mmap_event *mmap_event = data; 8220 struct perf_output_handle handle; 8221 struct perf_sample_data sample; 8222 int size = mmap_event->event_id.header.size; 8223 u32 type = mmap_event->event_id.header.type; 8224 bool use_build_id; 8225 int ret; 8226 8227 if (!perf_event_mmap_match(event, data)) 8228 return; 8229 8230 if (event->attr.mmap2) { 8231 mmap_event->event_id.header.type = PERF_RECORD_MMAP2; 8232 mmap_event->event_id.header.size += sizeof(mmap_event->maj); 8233 mmap_event->event_id.header.size += sizeof(mmap_event->min); 8234 mmap_event->event_id.header.size += sizeof(mmap_event->ino); 8235 mmap_event->event_id.header.size += sizeof(mmap_event->ino_generation); 8236 mmap_event->event_id.header.size += sizeof(mmap_event->prot); 8237 mmap_event->event_id.header.size += sizeof(mmap_event->flags); 8238 } 8239 8240 perf_event_header__init_id(&mmap_event->event_id.header, &sample, event); 8241 ret = perf_output_begin(&handle, &sample, event, 8242 mmap_event->event_id.header.size); 8243 if (ret) 8244 goto out; 8245 8246 mmap_event->event_id.pid = perf_event_pid(event, current); 8247 mmap_event->event_id.tid = perf_event_tid(event, current); 8248 8249 use_build_id = event->attr.build_id && mmap_event->build_id_size; 8250 8251 if (event->attr.mmap2 && use_build_id) 8252 mmap_event->event_id.header.misc |= PERF_RECORD_MISC_MMAP_BUILD_ID; 8253 8254 perf_output_put(&handle, mmap_event->event_id); 8255 8256 if (event->attr.mmap2) { 8257 if (use_build_id) { 8258 u8 size[4] = { (u8) mmap_event->build_id_size, 0, 0, 0 }; 8259 8260 __output_copy(&handle, size, 4); 8261 __output_copy(&handle, mmap_event->build_id, BUILD_ID_SIZE_MAX); 8262 } else { 8263 perf_output_put(&handle, mmap_event->maj); 8264 perf_output_put(&handle, mmap_event->min); 8265 perf_output_put(&handle, mmap_event->ino); 8266 perf_output_put(&handle, mmap_event->ino_generation); 8267 } 8268 perf_output_put(&handle, mmap_event->prot); 8269 perf_output_put(&handle, mmap_event->flags); 8270 } 8271 8272 __output_copy(&handle, mmap_event->file_name, 8273 mmap_event->file_size); 8274 8275 perf_event__output_id_sample(event, &handle, &sample); 8276 8277 perf_output_end(&handle); 8278 out: 8279 mmap_event->event_id.header.size = size; 8280 mmap_event->event_id.header.type = type; 8281 } 8282 8283 static void perf_event_mmap_event(struct perf_mmap_event *mmap_event) 8284 { 8285 struct vm_area_struct *vma = mmap_event->vma; 8286 struct file *file = vma->vm_file; 8287 int maj = 0, min = 0; 8288 u64 ino = 0, gen = 0; 8289 u32 prot = 0, flags = 0; 8290 unsigned int size; 8291 char tmp[16]; 8292 char *buf = NULL; 8293 char *name; 8294 8295 if (vma->vm_flags & VM_READ) 8296 prot |= PROT_READ; 8297 if (vma->vm_flags & VM_WRITE) 8298 prot |= PROT_WRITE; 8299 if (vma->vm_flags & VM_EXEC) 8300 prot |= PROT_EXEC; 8301 8302 if (vma->vm_flags & VM_MAYSHARE) 8303 flags = MAP_SHARED; 8304 else 8305 flags = MAP_PRIVATE; 8306 8307 if (vma->vm_flags & VM_DENYWRITE) 8308 flags |= MAP_DENYWRITE; 8309 if (vma->vm_flags & VM_MAYEXEC) 8310 flags |= MAP_EXECUTABLE; 8311 if (vma->vm_flags & VM_LOCKED) 8312 flags |= MAP_LOCKED; 8313 if (is_vm_hugetlb_page(vma)) 8314 flags |= MAP_HUGETLB; 8315 8316 if (file) { 8317 struct inode *inode; 8318 dev_t dev; 8319 8320 buf = kmalloc(PATH_MAX, GFP_KERNEL); 8321 if (!buf) { 8322 name = "//enomem"; 8323 goto cpy_name; 8324 } 8325 /* 8326 * d_path() works from the end of the rb backwards, so we 8327 * need to add enough zero bytes after the string to handle 8328 * the 64bit alignment we do later. 8329 */ 8330 name = file_path(file, buf, PATH_MAX - sizeof(u64)); 8331 if (IS_ERR(name)) { 8332 name = "//toolong"; 8333 goto cpy_name; 8334 } 8335 inode = file_inode(vma->vm_file); 8336 dev = inode->i_sb->s_dev; 8337 ino = inode->i_ino; 8338 gen = inode->i_generation; 8339 maj = MAJOR(dev); 8340 min = MINOR(dev); 8341 8342 goto got_name; 8343 } else { 8344 if (vma->vm_ops && vma->vm_ops->name) { 8345 name = (char *) vma->vm_ops->name(vma); 8346 if (name) 8347 goto cpy_name; 8348 } 8349 8350 name = (char *)arch_vma_name(vma); 8351 if (name) 8352 goto cpy_name; 8353 8354 if (vma->vm_start <= vma->vm_mm->start_brk && 8355 vma->vm_end >= vma->vm_mm->brk) { 8356 name = "[heap]"; 8357 goto cpy_name; 8358 } 8359 if (vma->vm_start <= vma->vm_mm->start_stack && 8360 vma->vm_end >= vma->vm_mm->start_stack) { 8361 name = "[stack]"; 8362 goto cpy_name; 8363 } 8364 8365 name = "//anon"; 8366 goto cpy_name; 8367 } 8368 8369 cpy_name: 8370 strlcpy(tmp, name, sizeof(tmp)); 8371 name = tmp; 8372 got_name: 8373 /* 8374 * Since our buffer works in 8 byte units we need to align our string 8375 * size to a multiple of 8. However, we must guarantee the tail end is 8376 * zero'd out to avoid leaking random bits to userspace. 8377 */ 8378 size = strlen(name)+1; 8379 while (!IS_ALIGNED(size, sizeof(u64))) 8380 name[size++] = '\0'; 8381 8382 mmap_event->file_name = name; 8383 mmap_event->file_size = size; 8384 mmap_event->maj = maj; 8385 mmap_event->min = min; 8386 mmap_event->ino = ino; 8387 mmap_event->ino_generation = gen; 8388 mmap_event->prot = prot; 8389 mmap_event->flags = flags; 8390 8391 if (!(vma->vm_flags & VM_EXEC)) 8392 mmap_event->event_id.header.misc |= PERF_RECORD_MISC_MMAP_DATA; 8393 8394 mmap_event->event_id.header.size = sizeof(mmap_event->event_id) + size; 8395 8396 if (atomic_read(&nr_build_id_events)) 8397 build_id_parse(vma, mmap_event->build_id, &mmap_event->build_id_size); 8398 8399 perf_iterate_sb(perf_event_mmap_output, 8400 mmap_event, 8401 NULL); 8402 8403 kfree(buf); 8404 } 8405 8406 /* 8407 * Check whether inode and address range match filter criteria. 8408 */ 8409 static bool perf_addr_filter_match(struct perf_addr_filter *filter, 8410 struct file *file, unsigned long offset, 8411 unsigned long size) 8412 { 8413 /* d_inode(NULL) won't be equal to any mapped user-space file */ 8414 if (!filter->path.dentry) 8415 return false; 8416 8417 if (d_inode(filter->path.dentry) != file_inode(file)) 8418 return false; 8419 8420 if (filter->offset > offset + size) 8421 return false; 8422 8423 if (filter->offset + filter->size < offset) 8424 return false; 8425 8426 return true; 8427 } 8428 8429 static bool perf_addr_filter_vma_adjust(struct perf_addr_filter *filter, 8430 struct vm_area_struct *vma, 8431 struct perf_addr_filter_range *fr) 8432 { 8433 unsigned long vma_size = vma->vm_end - vma->vm_start; 8434 unsigned long off = vma->vm_pgoff << PAGE_SHIFT; 8435 struct file *file = vma->vm_file; 8436 8437 if (!perf_addr_filter_match(filter, file, off, vma_size)) 8438 return false; 8439 8440 if (filter->offset < off) { 8441 fr->start = vma->vm_start; 8442 fr->size = min(vma_size, filter->size - (off - filter->offset)); 8443 } else { 8444 fr->start = vma->vm_start + filter->offset - off; 8445 fr->size = min(vma->vm_end - fr->start, filter->size); 8446 } 8447 8448 return true; 8449 } 8450 8451 static void __perf_addr_filters_adjust(struct perf_event *event, void *data) 8452 { 8453 struct perf_addr_filters_head *ifh = perf_event_addr_filters(event); 8454 struct vm_area_struct *vma = data; 8455 struct perf_addr_filter *filter; 8456 unsigned int restart = 0, count = 0; 8457 unsigned long flags; 8458 8459 if (!has_addr_filter(event)) 8460 return; 8461 8462 if (!vma->vm_file) 8463 return; 8464 8465 raw_spin_lock_irqsave(&ifh->lock, flags); 8466 list_for_each_entry(filter, &ifh->list, entry) { 8467 if (perf_addr_filter_vma_adjust(filter, vma, 8468 &event->addr_filter_ranges[count])) 8469 restart++; 8470 8471 count++; 8472 } 8473 8474 if (restart) 8475 event->addr_filters_gen++; 8476 raw_spin_unlock_irqrestore(&ifh->lock, flags); 8477 8478 if (restart) 8479 perf_event_stop(event, 1); 8480 } 8481 8482 /* 8483 * Adjust all task's events' filters to the new vma 8484 */ 8485 static void perf_addr_filters_adjust(struct vm_area_struct *vma) 8486 { 8487 struct perf_event_context *ctx; 8488 int ctxn; 8489 8490 /* 8491 * Data tracing isn't supported yet and as such there is no need 8492 * to keep track of anything that isn't related to executable code: 8493 */ 8494 if (!(vma->vm_flags & VM_EXEC)) 8495 return; 8496 8497 rcu_read_lock(); 8498 for_each_task_context_nr(ctxn) { 8499 ctx = rcu_dereference(current->perf_event_ctxp[ctxn]); 8500 if (!ctx) 8501 continue; 8502 8503 perf_iterate_ctx(ctx, __perf_addr_filters_adjust, vma, true); 8504 } 8505 rcu_read_unlock(); 8506 } 8507 8508 void perf_event_mmap(struct vm_area_struct *vma) 8509 { 8510 struct perf_mmap_event mmap_event; 8511 8512 if (!atomic_read(&nr_mmap_events)) 8513 return; 8514 8515 mmap_event = (struct perf_mmap_event){ 8516 .vma = vma, 8517 /* .file_name */ 8518 /* .file_size */ 8519 .event_id = { 8520 .header = { 8521 .type = PERF_RECORD_MMAP, 8522 .misc = PERF_RECORD_MISC_USER, 8523 /* .size */ 8524 }, 8525 /* .pid */ 8526 /* .tid */ 8527 .start = vma->vm_start, 8528 .len = vma->vm_end - vma->vm_start, 8529 .pgoff = (u64)vma->vm_pgoff << PAGE_SHIFT, 8530 }, 8531 /* .maj (attr_mmap2 only) */ 8532 /* .min (attr_mmap2 only) */ 8533 /* .ino (attr_mmap2 only) */ 8534 /* .ino_generation (attr_mmap2 only) */ 8535 /* .prot (attr_mmap2 only) */ 8536 /* .flags (attr_mmap2 only) */ 8537 }; 8538 8539 perf_addr_filters_adjust(vma); 8540 perf_event_mmap_event(&mmap_event); 8541 } 8542 8543 void perf_event_aux_event(struct perf_event *event, unsigned long head, 8544 unsigned long size, u64 flags) 8545 { 8546 struct perf_output_handle handle; 8547 struct perf_sample_data sample; 8548 struct perf_aux_event { 8549 struct perf_event_header header; 8550 u64 offset; 8551 u64 size; 8552 u64 flags; 8553 } rec = { 8554 .header = { 8555 .type = PERF_RECORD_AUX, 8556 .misc = 0, 8557 .size = sizeof(rec), 8558 }, 8559 .offset = head, 8560 .size = size, 8561 .flags = flags, 8562 }; 8563 int ret; 8564 8565 perf_event_header__init_id(&rec.header, &sample, event); 8566 ret = perf_output_begin(&handle, &sample, event, rec.header.size); 8567 8568 if (ret) 8569 return; 8570 8571 perf_output_put(&handle, rec); 8572 perf_event__output_id_sample(event, &handle, &sample); 8573 8574 perf_output_end(&handle); 8575 } 8576 8577 /* 8578 * Lost/dropped samples logging 8579 */ 8580 void perf_log_lost_samples(struct perf_event *event, u64 lost) 8581 { 8582 struct perf_output_handle handle; 8583 struct perf_sample_data sample; 8584 int ret; 8585 8586 struct { 8587 struct perf_event_header header; 8588 u64 lost; 8589 } lost_samples_event = { 8590 .header = { 8591 .type = PERF_RECORD_LOST_SAMPLES, 8592 .misc = 0, 8593 .size = sizeof(lost_samples_event), 8594 }, 8595 .lost = lost, 8596 }; 8597 8598 perf_event_header__init_id(&lost_samples_event.header, &sample, event); 8599 8600 ret = perf_output_begin(&handle, &sample, event, 8601 lost_samples_event.header.size); 8602 if (ret) 8603 return; 8604 8605 perf_output_put(&handle, lost_samples_event); 8606 perf_event__output_id_sample(event, &handle, &sample); 8607 perf_output_end(&handle); 8608 } 8609 8610 /* 8611 * context_switch tracking 8612 */ 8613 8614 struct perf_switch_event { 8615 struct task_struct *task; 8616 struct task_struct *next_prev; 8617 8618 struct { 8619 struct perf_event_header header; 8620 u32 next_prev_pid; 8621 u32 next_prev_tid; 8622 } event_id; 8623 }; 8624 8625 static int perf_event_switch_match(struct perf_event *event) 8626 { 8627 return event->attr.context_switch; 8628 } 8629 8630 static void perf_event_switch_output(struct perf_event *event, void *data) 8631 { 8632 struct perf_switch_event *se = data; 8633 struct perf_output_handle handle; 8634 struct perf_sample_data sample; 8635 int ret; 8636 8637 if (!perf_event_switch_match(event)) 8638 return; 8639 8640 /* Only CPU-wide events are allowed to see next/prev pid/tid */ 8641 if (event->ctx->task) { 8642 se->event_id.header.type = PERF_RECORD_SWITCH; 8643 se->event_id.header.size = sizeof(se->event_id.header); 8644 } else { 8645 se->event_id.header.type = PERF_RECORD_SWITCH_CPU_WIDE; 8646 se->event_id.header.size = sizeof(se->event_id); 8647 se->event_id.next_prev_pid = 8648 perf_event_pid(event, se->next_prev); 8649 se->event_id.next_prev_tid = 8650 perf_event_tid(event, se->next_prev); 8651 } 8652 8653 perf_event_header__init_id(&se->event_id.header, &sample, event); 8654 8655 ret = perf_output_begin(&handle, &sample, event, se->event_id.header.size); 8656 if (ret) 8657 return; 8658 8659 if (event->ctx->task) 8660 perf_output_put(&handle, se->event_id.header); 8661 else 8662 perf_output_put(&handle, se->event_id); 8663 8664 perf_event__output_id_sample(event, &handle, &sample); 8665 8666 perf_output_end(&handle); 8667 } 8668 8669 static void perf_event_switch(struct task_struct *task, 8670 struct task_struct *next_prev, bool sched_in) 8671 { 8672 struct perf_switch_event switch_event; 8673 8674 /* N.B. caller checks nr_switch_events != 0 */ 8675 8676 switch_event = (struct perf_switch_event){ 8677 .task = task, 8678 .next_prev = next_prev, 8679 .event_id = { 8680 .header = { 8681 /* .type */ 8682 .misc = sched_in ? 0 : PERF_RECORD_MISC_SWITCH_OUT, 8683 /* .size */ 8684 }, 8685 /* .next_prev_pid */ 8686 /* .next_prev_tid */ 8687 }, 8688 }; 8689 8690 if (!sched_in && task->state == TASK_RUNNING) 8691 switch_event.event_id.header.misc |= 8692 PERF_RECORD_MISC_SWITCH_OUT_PREEMPT; 8693 8694 perf_iterate_sb(perf_event_switch_output, 8695 &switch_event, 8696 NULL); 8697 } 8698 8699 /* 8700 * IRQ throttle logging 8701 */ 8702 8703 static void perf_log_throttle(struct perf_event *event, int enable) 8704 { 8705 struct perf_output_handle handle; 8706 struct perf_sample_data sample; 8707 int ret; 8708 8709 struct { 8710 struct perf_event_header header; 8711 u64 time; 8712 u64 id; 8713 u64 stream_id; 8714 } throttle_event = { 8715 .header = { 8716 .type = PERF_RECORD_THROTTLE, 8717 .misc = 0, 8718 .size = sizeof(throttle_event), 8719 }, 8720 .time = perf_event_clock(event), 8721 .id = primary_event_id(event), 8722 .stream_id = event->id, 8723 }; 8724 8725 if (enable) 8726 throttle_event.header.type = PERF_RECORD_UNTHROTTLE; 8727 8728 perf_event_header__init_id(&throttle_event.header, &sample, event); 8729 8730 ret = perf_output_begin(&handle, &sample, event, 8731 throttle_event.header.size); 8732 if (ret) 8733 return; 8734 8735 perf_output_put(&handle, throttle_event); 8736 perf_event__output_id_sample(event, &handle, &sample); 8737 perf_output_end(&handle); 8738 } 8739 8740 /* 8741 * ksymbol register/unregister tracking 8742 */ 8743 8744 struct perf_ksymbol_event { 8745 const char *name; 8746 int name_len; 8747 struct { 8748 struct perf_event_header header; 8749 u64 addr; 8750 u32 len; 8751 u16 ksym_type; 8752 u16 flags; 8753 } event_id; 8754 }; 8755 8756 static int perf_event_ksymbol_match(struct perf_event *event) 8757 { 8758 return event->attr.ksymbol; 8759 } 8760 8761 static void perf_event_ksymbol_output(struct perf_event *event, void *data) 8762 { 8763 struct perf_ksymbol_event *ksymbol_event = data; 8764 struct perf_output_handle handle; 8765 struct perf_sample_data sample; 8766 int ret; 8767 8768 if (!perf_event_ksymbol_match(event)) 8769 return; 8770 8771 perf_event_header__init_id(&ksymbol_event->event_id.header, 8772 &sample, event); 8773 ret = perf_output_begin(&handle, &sample, event, 8774 ksymbol_event->event_id.header.size); 8775 if (ret) 8776 return; 8777 8778 perf_output_put(&handle, ksymbol_event->event_id); 8779 __output_copy(&handle, ksymbol_event->name, ksymbol_event->name_len); 8780 perf_event__output_id_sample(event, &handle, &sample); 8781 8782 perf_output_end(&handle); 8783 } 8784 8785 void perf_event_ksymbol(u16 ksym_type, u64 addr, u32 len, bool unregister, 8786 const char *sym) 8787 { 8788 struct perf_ksymbol_event ksymbol_event; 8789 char name[KSYM_NAME_LEN]; 8790 u16 flags = 0; 8791 int name_len; 8792 8793 if (!atomic_read(&nr_ksymbol_events)) 8794 return; 8795 8796 if (ksym_type >= PERF_RECORD_KSYMBOL_TYPE_MAX || 8797 ksym_type == PERF_RECORD_KSYMBOL_TYPE_UNKNOWN) 8798 goto err; 8799 8800 strlcpy(name, sym, KSYM_NAME_LEN); 8801 name_len = strlen(name) + 1; 8802 while (!IS_ALIGNED(name_len, sizeof(u64))) 8803 name[name_len++] = '\0'; 8804 BUILD_BUG_ON(KSYM_NAME_LEN % sizeof(u64)); 8805 8806 if (unregister) 8807 flags |= PERF_RECORD_KSYMBOL_FLAGS_UNREGISTER; 8808 8809 ksymbol_event = (struct perf_ksymbol_event){ 8810 .name = name, 8811 .name_len = name_len, 8812 .event_id = { 8813 .header = { 8814 .type = PERF_RECORD_KSYMBOL, 8815 .size = sizeof(ksymbol_event.event_id) + 8816 name_len, 8817 }, 8818 .addr = addr, 8819 .len = len, 8820 .ksym_type = ksym_type, 8821 .flags = flags, 8822 }, 8823 }; 8824 8825 perf_iterate_sb(perf_event_ksymbol_output, &ksymbol_event, NULL); 8826 return; 8827 err: 8828 WARN_ONCE(1, "%s: Invalid KSYMBOL type 0x%x\n", __func__, ksym_type); 8829 } 8830 8831 /* 8832 * bpf program load/unload tracking 8833 */ 8834 8835 struct perf_bpf_event { 8836 struct bpf_prog *prog; 8837 struct { 8838 struct perf_event_header header; 8839 u16 type; 8840 u16 flags; 8841 u32 id; 8842 u8 tag[BPF_TAG_SIZE]; 8843 } event_id; 8844 }; 8845 8846 static int perf_event_bpf_match(struct perf_event *event) 8847 { 8848 return event->attr.bpf_event; 8849 } 8850 8851 static void perf_event_bpf_output(struct perf_event *event, void *data) 8852 { 8853 struct perf_bpf_event *bpf_event = data; 8854 struct perf_output_handle handle; 8855 struct perf_sample_data sample; 8856 int ret; 8857 8858 if (!perf_event_bpf_match(event)) 8859 return; 8860 8861 perf_event_header__init_id(&bpf_event->event_id.header, 8862 &sample, event); 8863 ret = perf_output_begin(&handle, data, event, 8864 bpf_event->event_id.header.size); 8865 if (ret) 8866 return; 8867 8868 perf_output_put(&handle, bpf_event->event_id); 8869 perf_event__output_id_sample(event, &handle, &sample); 8870 8871 perf_output_end(&handle); 8872 } 8873 8874 static void perf_event_bpf_emit_ksymbols(struct bpf_prog *prog, 8875 enum perf_bpf_event_type type) 8876 { 8877 bool unregister = type == PERF_BPF_EVENT_PROG_UNLOAD; 8878 int i; 8879 8880 if (prog->aux->func_cnt == 0) { 8881 perf_event_ksymbol(PERF_RECORD_KSYMBOL_TYPE_BPF, 8882 (u64)(unsigned long)prog->bpf_func, 8883 prog->jited_len, unregister, 8884 prog->aux->ksym.name); 8885 } else { 8886 for (i = 0; i < prog->aux->func_cnt; i++) { 8887 struct bpf_prog *subprog = prog->aux->func[i]; 8888 8889 perf_event_ksymbol( 8890 PERF_RECORD_KSYMBOL_TYPE_BPF, 8891 (u64)(unsigned long)subprog->bpf_func, 8892 subprog->jited_len, unregister, 8893 prog->aux->ksym.name); 8894 } 8895 } 8896 } 8897 8898 void perf_event_bpf_event(struct bpf_prog *prog, 8899 enum perf_bpf_event_type type, 8900 u16 flags) 8901 { 8902 struct perf_bpf_event bpf_event; 8903 8904 if (type <= PERF_BPF_EVENT_UNKNOWN || 8905 type >= PERF_BPF_EVENT_MAX) 8906 return; 8907 8908 switch (type) { 8909 case PERF_BPF_EVENT_PROG_LOAD: 8910 case PERF_BPF_EVENT_PROG_UNLOAD: 8911 if (atomic_read(&nr_ksymbol_events)) 8912 perf_event_bpf_emit_ksymbols(prog, type); 8913 break; 8914 default: 8915 break; 8916 } 8917 8918 if (!atomic_read(&nr_bpf_events)) 8919 return; 8920 8921 bpf_event = (struct perf_bpf_event){ 8922 .prog = prog, 8923 .event_id = { 8924 .header = { 8925 .type = PERF_RECORD_BPF_EVENT, 8926 .size = sizeof(bpf_event.event_id), 8927 }, 8928 .type = type, 8929 .flags = flags, 8930 .id = prog->aux->id, 8931 }, 8932 }; 8933 8934 BUILD_BUG_ON(BPF_TAG_SIZE % sizeof(u64)); 8935 8936 memcpy(bpf_event.event_id.tag, prog->tag, BPF_TAG_SIZE); 8937 perf_iterate_sb(perf_event_bpf_output, &bpf_event, NULL); 8938 } 8939 8940 struct perf_text_poke_event { 8941 const void *old_bytes; 8942 const void *new_bytes; 8943 size_t pad; 8944 u16 old_len; 8945 u16 new_len; 8946 8947 struct { 8948 struct perf_event_header header; 8949 8950 u64 addr; 8951 } event_id; 8952 }; 8953 8954 static int perf_event_text_poke_match(struct perf_event *event) 8955 { 8956 return event->attr.text_poke; 8957 } 8958 8959 static void perf_event_text_poke_output(struct perf_event *event, void *data) 8960 { 8961 struct perf_text_poke_event *text_poke_event = data; 8962 struct perf_output_handle handle; 8963 struct perf_sample_data sample; 8964 u64 padding = 0; 8965 int ret; 8966 8967 if (!perf_event_text_poke_match(event)) 8968 return; 8969 8970 perf_event_header__init_id(&text_poke_event->event_id.header, &sample, event); 8971 8972 ret = perf_output_begin(&handle, &sample, event, 8973 text_poke_event->event_id.header.size); 8974 if (ret) 8975 return; 8976 8977 perf_output_put(&handle, text_poke_event->event_id); 8978 perf_output_put(&handle, text_poke_event->old_len); 8979 perf_output_put(&handle, text_poke_event->new_len); 8980 8981 __output_copy(&handle, text_poke_event->old_bytes, text_poke_event->old_len); 8982 __output_copy(&handle, text_poke_event->new_bytes, text_poke_event->new_len); 8983 8984 if (text_poke_event->pad) 8985 __output_copy(&handle, &padding, text_poke_event->pad); 8986 8987 perf_event__output_id_sample(event, &handle, &sample); 8988 8989 perf_output_end(&handle); 8990 } 8991 8992 void perf_event_text_poke(const void *addr, const void *old_bytes, 8993 size_t old_len, const void *new_bytes, size_t new_len) 8994 { 8995 struct perf_text_poke_event text_poke_event; 8996 size_t tot, pad; 8997 8998 if (!atomic_read(&nr_text_poke_events)) 8999 return; 9000 9001 tot = sizeof(text_poke_event.old_len) + old_len; 9002 tot += sizeof(text_poke_event.new_len) + new_len; 9003 pad = ALIGN(tot, sizeof(u64)) - tot; 9004 9005 text_poke_event = (struct perf_text_poke_event){ 9006 .old_bytes = old_bytes, 9007 .new_bytes = new_bytes, 9008 .pad = pad, 9009 .old_len = old_len, 9010 .new_len = new_len, 9011 .event_id = { 9012 .header = { 9013 .type = PERF_RECORD_TEXT_POKE, 9014 .misc = PERF_RECORD_MISC_KERNEL, 9015 .size = sizeof(text_poke_event.event_id) + tot + pad, 9016 }, 9017 .addr = (unsigned long)addr, 9018 }, 9019 }; 9020 9021 perf_iterate_sb(perf_event_text_poke_output, &text_poke_event, NULL); 9022 } 9023 9024 void perf_event_itrace_started(struct perf_event *event) 9025 { 9026 event->attach_state |= PERF_ATTACH_ITRACE; 9027 } 9028 9029 static void perf_log_itrace_start(struct perf_event *event) 9030 { 9031 struct perf_output_handle handle; 9032 struct perf_sample_data sample; 9033 struct perf_aux_event { 9034 struct perf_event_header header; 9035 u32 pid; 9036 u32 tid; 9037 } rec; 9038 int ret; 9039 9040 if (event->parent) 9041 event = event->parent; 9042 9043 if (!(event->pmu->capabilities & PERF_PMU_CAP_ITRACE) || 9044 event->attach_state & PERF_ATTACH_ITRACE) 9045 return; 9046 9047 rec.header.type = PERF_RECORD_ITRACE_START; 9048 rec.header.misc = 0; 9049 rec.header.size = sizeof(rec); 9050 rec.pid = perf_event_pid(event, current); 9051 rec.tid = perf_event_tid(event, current); 9052 9053 perf_event_header__init_id(&rec.header, &sample, event); 9054 ret = perf_output_begin(&handle, &sample, event, rec.header.size); 9055 9056 if (ret) 9057 return; 9058 9059 perf_output_put(&handle, rec); 9060 perf_event__output_id_sample(event, &handle, &sample); 9061 9062 perf_output_end(&handle); 9063 } 9064 9065 static int 9066 __perf_event_account_interrupt(struct perf_event *event, int throttle) 9067 { 9068 struct hw_perf_event *hwc = &event->hw; 9069 int ret = 0; 9070 u64 seq; 9071 9072 seq = __this_cpu_read(perf_throttled_seq); 9073 if (seq != hwc->interrupts_seq) { 9074 hwc->interrupts_seq = seq; 9075 hwc->interrupts = 1; 9076 } else { 9077 hwc->interrupts++; 9078 if (unlikely(throttle 9079 && hwc->interrupts >= max_samples_per_tick)) { 9080 __this_cpu_inc(perf_throttled_count); 9081 tick_dep_set_cpu(smp_processor_id(), TICK_DEP_BIT_PERF_EVENTS); 9082 hwc->interrupts = MAX_INTERRUPTS; 9083 perf_log_throttle(event, 0); 9084 ret = 1; 9085 } 9086 } 9087 9088 if (event->attr.freq) { 9089 u64 now = perf_clock(); 9090 s64 delta = now - hwc->freq_time_stamp; 9091 9092 hwc->freq_time_stamp = now; 9093 9094 if (delta > 0 && delta < 2*TICK_NSEC) 9095 perf_adjust_period(event, delta, hwc->last_period, true); 9096 } 9097 9098 return ret; 9099 } 9100 9101 int perf_event_account_interrupt(struct perf_event *event) 9102 { 9103 return __perf_event_account_interrupt(event, 1); 9104 } 9105 9106 /* 9107 * Generic event overflow handling, sampling. 9108 */ 9109 9110 static int __perf_event_overflow(struct perf_event *event, 9111 int throttle, struct perf_sample_data *data, 9112 struct pt_regs *regs) 9113 { 9114 int events = atomic_read(&event->event_limit); 9115 int ret = 0; 9116 9117 /* 9118 * Non-sampling counters might still use the PMI to fold short 9119 * hardware counters, ignore those. 9120 */ 9121 if (unlikely(!is_sampling_event(event))) 9122 return 0; 9123 9124 ret = __perf_event_account_interrupt(event, throttle); 9125 9126 /* 9127 * XXX event_limit might not quite work as expected on inherited 9128 * events 9129 */ 9130 9131 event->pending_kill = POLL_IN; 9132 if (events && atomic_dec_and_test(&event->event_limit)) { 9133 ret = 1; 9134 event->pending_kill = POLL_HUP; 9135 event->pending_addr = data->addr; 9136 9137 perf_event_disable_inatomic(event); 9138 } 9139 9140 READ_ONCE(event->overflow_handler)(event, data, regs); 9141 9142 if (*perf_event_fasync(event) && event->pending_kill) { 9143 event->pending_wakeup = 1; 9144 irq_work_queue(&event->pending); 9145 } 9146 9147 return ret; 9148 } 9149 9150 int perf_event_overflow(struct perf_event *event, 9151 struct perf_sample_data *data, 9152 struct pt_regs *regs) 9153 { 9154 return __perf_event_overflow(event, 1, data, regs); 9155 } 9156 9157 /* 9158 * Generic software event infrastructure 9159 */ 9160 9161 struct swevent_htable { 9162 struct swevent_hlist *swevent_hlist; 9163 struct mutex hlist_mutex; 9164 int hlist_refcount; 9165 9166 /* Recursion avoidance in each contexts */ 9167 int recursion[PERF_NR_CONTEXTS]; 9168 }; 9169 9170 static DEFINE_PER_CPU(struct swevent_htable, swevent_htable); 9171 9172 /* 9173 * We directly increment event->count and keep a second value in 9174 * event->hw.period_left to count intervals. This period event 9175 * is kept in the range [-sample_period, 0] so that we can use the 9176 * sign as trigger. 9177 */ 9178 9179 u64 perf_swevent_set_period(struct perf_event *event) 9180 { 9181 struct hw_perf_event *hwc = &event->hw; 9182 u64 period = hwc->last_period; 9183 u64 nr, offset; 9184 s64 old, val; 9185 9186 hwc->last_period = hwc->sample_period; 9187 9188 again: 9189 old = val = local64_read(&hwc->period_left); 9190 if (val < 0) 9191 return 0; 9192 9193 nr = div64_u64(period + val, period); 9194 offset = nr * period; 9195 val -= offset; 9196 if (local64_cmpxchg(&hwc->period_left, old, val) != old) 9197 goto again; 9198 9199 return nr; 9200 } 9201 9202 static void perf_swevent_overflow(struct perf_event *event, u64 overflow, 9203 struct perf_sample_data *data, 9204 struct pt_regs *regs) 9205 { 9206 struct hw_perf_event *hwc = &event->hw; 9207 int throttle = 0; 9208 9209 if (!overflow) 9210 overflow = perf_swevent_set_period(event); 9211 9212 if (hwc->interrupts == MAX_INTERRUPTS) 9213 return; 9214 9215 for (; overflow; overflow--) { 9216 if (__perf_event_overflow(event, throttle, 9217 data, regs)) { 9218 /* 9219 * We inhibit the overflow from happening when 9220 * hwc->interrupts == MAX_INTERRUPTS. 9221 */ 9222 break; 9223 } 9224 throttle = 1; 9225 } 9226 } 9227 9228 static void perf_swevent_event(struct perf_event *event, u64 nr, 9229 struct perf_sample_data *data, 9230 struct pt_regs *regs) 9231 { 9232 struct hw_perf_event *hwc = &event->hw; 9233 9234 local64_add(nr, &event->count); 9235 9236 if (!regs) 9237 return; 9238 9239 if (!is_sampling_event(event)) 9240 return; 9241 9242 if ((event->attr.sample_type & PERF_SAMPLE_PERIOD) && !event->attr.freq) { 9243 data->period = nr; 9244 return perf_swevent_overflow(event, 1, data, regs); 9245 } else 9246 data->period = event->hw.last_period; 9247 9248 if (nr == 1 && hwc->sample_period == 1 && !event->attr.freq) 9249 return perf_swevent_overflow(event, 1, data, regs); 9250 9251 if (local64_add_negative(nr, &hwc->period_left)) 9252 return; 9253 9254 perf_swevent_overflow(event, 0, data, regs); 9255 } 9256 9257 static int perf_exclude_event(struct perf_event *event, 9258 struct pt_regs *regs) 9259 { 9260 if (event->hw.state & PERF_HES_STOPPED) 9261 return 1; 9262 9263 if (regs) { 9264 if (event->attr.exclude_user && user_mode(regs)) 9265 return 1; 9266 9267 if (event->attr.exclude_kernel && !user_mode(regs)) 9268 return 1; 9269 } 9270 9271 return 0; 9272 } 9273 9274 static int perf_swevent_match(struct perf_event *event, 9275 enum perf_type_id type, 9276 u32 event_id, 9277 struct perf_sample_data *data, 9278 struct pt_regs *regs) 9279 { 9280 if (event->attr.type != type) 9281 return 0; 9282 9283 if (event->attr.config != event_id) 9284 return 0; 9285 9286 if (perf_exclude_event(event, regs)) 9287 return 0; 9288 9289 return 1; 9290 } 9291 9292 static inline u64 swevent_hash(u64 type, u32 event_id) 9293 { 9294 u64 val = event_id | (type << 32); 9295 9296 return hash_64(val, SWEVENT_HLIST_BITS); 9297 } 9298 9299 static inline struct hlist_head * 9300 __find_swevent_head(struct swevent_hlist *hlist, u64 type, u32 event_id) 9301 { 9302 u64 hash = swevent_hash(type, event_id); 9303 9304 return &hlist->heads[hash]; 9305 } 9306 9307 /* For the read side: events when they trigger */ 9308 static inline struct hlist_head * 9309 find_swevent_head_rcu(struct swevent_htable *swhash, u64 type, u32 event_id) 9310 { 9311 struct swevent_hlist *hlist; 9312 9313 hlist = rcu_dereference(swhash->swevent_hlist); 9314 if (!hlist) 9315 return NULL; 9316 9317 return __find_swevent_head(hlist, type, event_id); 9318 } 9319 9320 /* For the event head insertion and removal in the hlist */ 9321 static inline struct hlist_head * 9322 find_swevent_head(struct swevent_htable *swhash, struct perf_event *event) 9323 { 9324 struct swevent_hlist *hlist; 9325 u32 event_id = event->attr.config; 9326 u64 type = event->attr.type; 9327 9328 /* 9329 * Event scheduling is always serialized against hlist allocation 9330 * and release. Which makes the protected version suitable here. 9331 * The context lock guarantees that. 9332 */ 9333 hlist = rcu_dereference_protected(swhash->swevent_hlist, 9334 lockdep_is_held(&event->ctx->lock)); 9335 if (!hlist) 9336 return NULL; 9337 9338 return __find_swevent_head(hlist, type, event_id); 9339 } 9340 9341 static void do_perf_sw_event(enum perf_type_id type, u32 event_id, 9342 u64 nr, 9343 struct perf_sample_data *data, 9344 struct pt_regs *regs) 9345 { 9346 struct swevent_htable *swhash = this_cpu_ptr(&swevent_htable); 9347 struct perf_event *event; 9348 struct hlist_head *head; 9349 9350 rcu_read_lock(); 9351 head = find_swevent_head_rcu(swhash, type, event_id); 9352 if (!head) 9353 goto end; 9354 9355 hlist_for_each_entry_rcu(event, head, hlist_entry) { 9356 if (perf_swevent_match(event, type, event_id, data, regs)) 9357 perf_swevent_event(event, nr, data, regs); 9358 } 9359 end: 9360 rcu_read_unlock(); 9361 } 9362 9363 DEFINE_PER_CPU(struct pt_regs, __perf_regs[4]); 9364 9365 int perf_swevent_get_recursion_context(void) 9366 { 9367 struct swevent_htable *swhash = this_cpu_ptr(&swevent_htable); 9368 9369 return get_recursion_context(swhash->recursion); 9370 } 9371 EXPORT_SYMBOL_GPL(perf_swevent_get_recursion_context); 9372 9373 void perf_swevent_put_recursion_context(int rctx) 9374 { 9375 struct swevent_htable *swhash = this_cpu_ptr(&swevent_htable); 9376 9377 put_recursion_context(swhash->recursion, rctx); 9378 } 9379 9380 void ___perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr) 9381 { 9382 struct perf_sample_data data; 9383 9384 if (WARN_ON_ONCE(!regs)) 9385 return; 9386 9387 perf_sample_data_init(&data, addr, 0); 9388 do_perf_sw_event(PERF_TYPE_SOFTWARE, event_id, nr, &data, regs); 9389 } 9390 9391 void __perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr) 9392 { 9393 int rctx; 9394 9395 preempt_disable_notrace(); 9396 rctx = perf_swevent_get_recursion_context(); 9397 if (unlikely(rctx < 0)) 9398 goto fail; 9399 9400 ___perf_sw_event(event_id, nr, regs, addr); 9401 9402 perf_swevent_put_recursion_context(rctx); 9403 fail: 9404 preempt_enable_notrace(); 9405 } 9406 9407 static void perf_swevent_read(struct perf_event *event) 9408 { 9409 } 9410 9411 static int perf_swevent_add(struct perf_event *event, int flags) 9412 { 9413 struct swevent_htable *swhash = this_cpu_ptr(&swevent_htable); 9414 struct hw_perf_event *hwc = &event->hw; 9415 struct hlist_head *head; 9416 9417 if (is_sampling_event(event)) { 9418 hwc->last_period = hwc->sample_period; 9419 perf_swevent_set_period(event); 9420 } 9421 9422 hwc->state = !(flags & PERF_EF_START); 9423 9424 head = find_swevent_head(swhash, event); 9425 if (WARN_ON_ONCE(!head)) 9426 return -EINVAL; 9427 9428 hlist_add_head_rcu(&event->hlist_entry, head); 9429 perf_event_update_userpage(event); 9430 9431 return 0; 9432 } 9433 9434 static void perf_swevent_del(struct perf_event *event, int flags) 9435 { 9436 hlist_del_rcu(&event->hlist_entry); 9437 } 9438 9439 static void perf_swevent_start(struct perf_event *event, int flags) 9440 { 9441 event->hw.state = 0; 9442 } 9443 9444 static void perf_swevent_stop(struct perf_event *event, int flags) 9445 { 9446 event->hw.state = PERF_HES_STOPPED; 9447 } 9448 9449 /* Deref the hlist from the update side */ 9450 static inline struct swevent_hlist * 9451 swevent_hlist_deref(struct swevent_htable *swhash) 9452 { 9453 return rcu_dereference_protected(swhash->swevent_hlist, 9454 lockdep_is_held(&swhash->hlist_mutex)); 9455 } 9456 9457 static void swevent_hlist_release(struct swevent_htable *swhash) 9458 { 9459 struct swevent_hlist *hlist = swevent_hlist_deref(swhash); 9460 9461 if (!hlist) 9462 return; 9463 9464 RCU_INIT_POINTER(swhash->swevent_hlist, NULL); 9465 kfree_rcu(hlist, rcu_head); 9466 } 9467 9468 static void swevent_hlist_put_cpu(int cpu) 9469 { 9470 struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu); 9471 9472 mutex_lock(&swhash->hlist_mutex); 9473 9474 if (!--swhash->hlist_refcount) 9475 swevent_hlist_release(swhash); 9476 9477 mutex_unlock(&swhash->hlist_mutex); 9478 } 9479 9480 static void swevent_hlist_put(void) 9481 { 9482 int cpu; 9483 9484 for_each_possible_cpu(cpu) 9485 swevent_hlist_put_cpu(cpu); 9486 } 9487 9488 static int swevent_hlist_get_cpu(int cpu) 9489 { 9490 struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu); 9491 int err = 0; 9492 9493 mutex_lock(&swhash->hlist_mutex); 9494 if (!swevent_hlist_deref(swhash) && 9495 cpumask_test_cpu(cpu, perf_online_mask)) { 9496 struct swevent_hlist *hlist; 9497 9498 hlist = kzalloc(sizeof(*hlist), GFP_KERNEL); 9499 if (!hlist) { 9500 err = -ENOMEM; 9501 goto exit; 9502 } 9503 rcu_assign_pointer(swhash->swevent_hlist, hlist); 9504 } 9505 swhash->hlist_refcount++; 9506 exit: 9507 mutex_unlock(&swhash->hlist_mutex); 9508 9509 return err; 9510 } 9511 9512 static int swevent_hlist_get(void) 9513 { 9514 int err, cpu, failed_cpu; 9515 9516 mutex_lock(&pmus_lock); 9517 for_each_possible_cpu(cpu) { 9518 err = swevent_hlist_get_cpu(cpu); 9519 if (err) { 9520 failed_cpu = cpu; 9521 goto fail; 9522 } 9523 } 9524 mutex_unlock(&pmus_lock); 9525 return 0; 9526 fail: 9527 for_each_possible_cpu(cpu) { 9528 if (cpu == failed_cpu) 9529 break; 9530 swevent_hlist_put_cpu(cpu); 9531 } 9532 mutex_unlock(&pmus_lock); 9533 return err; 9534 } 9535 9536 struct static_key perf_swevent_enabled[PERF_COUNT_SW_MAX]; 9537 9538 static void sw_perf_event_destroy(struct perf_event *event) 9539 { 9540 u64 event_id = event->attr.config; 9541 9542 WARN_ON(event->parent); 9543 9544 static_key_slow_dec(&perf_swevent_enabled[event_id]); 9545 swevent_hlist_put(); 9546 } 9547 9548 static int perf_swevent_init(struct perf_event *event) 9549 { 9550 u64 event_id = event->attr.config; 9551 9552 if (event->attr.type != PERF_TYPE_SOFTWARE) 9553 return -ENOENT; 9554 9555 /* 9556 * no branch sampling for software events 9557 */ 9558 if (has_branch_stack(event)) 9559 return -EOPNOTSUPP; 9560 9561 switch (event_id) { 9562 case PERF_COUNT_SW_CPU_CLOCK: 9563 case PERF_COUNT_SW_TASK_CLOCK: 9564 return -ENOENT; 9565 9566 default: 9567 break; 9568 } 9569 9570 if (event_id >= PERF_COUNT_SW_MAX) 9571 return -ENOENT; 9572 9573 if (!event->parent) { 9574 int err; 9575 9576 err = swevent_hlist_get(); 9577 if (err) 9578 return err; 9579 9580 static_key_slow_inc(&perf_swevent_enabled[event_id]); 9581 event->destroy = sw_perf_event_destroy; 9582 } 9583 9584 return 0; 9585 } 9586 9587 static struct pmu perf_swevent = { 9588 .task_ctx_nr = perf_sw_context, 9589 9590 .capabilities = PERF_PMU_CAP_NO_NMI, 9591 9592 .event_init = perf_swevent_init, 9593 .add = perf_swevent_add, 9594 .del = perf_swevent_del, 9595 .start = perf_swevent_start, 9596 .stop = perf_swevent_stop, 9597 .read = perf_swevent_read, 9598 }; 9599 9600 #ifdef CONFIG_EVENT_TRACING 9601 9602 static int perf_tp_filter_match(struct perf_event *event, 9603 struct perf_sample_data *data) 9604 { 9605 void *record = data->raw->frag.data; 9606 9607 /* only top level events have filters set */ 9608 if (event->parent) 9609 event = event->parent; 9610 9611 if (likely(!event->filter) || filter_match_preds(event->filter, record)) 9612 return 1; 9613 return 0; 9614 } 9615 9616 static int perf_tp_event_match(struct perf_event *event, 9617 struct perf_sample_data *data, 9618 struct pt_regs *regs) 9619 { 9620 if (event->hw.state & PERF_HES_STOPPED) 9621 return 0; 9622 /* 9623 * If exclude_kernel, only trace user-space tracepoints (uprobes) 9624 */ 9625 if (event->attr.exclude_kernel && !user_mode(regs)) 9626 return 0; 9627 9628 if (!perf_tp_filter_match(event, data)) 9629 return 0; 9630 9631 return 1; 9632 } 9633 9634 void perf_trace_run_bpf_submit(void *raw_data, int size, int rctx, 9635 struct trace_event_call *call, u64 count, 9636 struct pt_regs *regs, struct hlist_head *head, 9637 struct task_struct *task) 9638 { 9639 if (bpf_prog_array_valid(call)) { 9640 *(struct pt_regs **)raw_data = regs; 9641 if (!trace_call_bpf(call, raw_data) || hlist_empty(head)) { 9642 perf_swevent_put_recursion_context(rctx); 9643 return; 9644 } 9645 } 9646 perf_tp_event(call->event.type, count, raw_data, size, regs, head, 9647 rctx, task); 9648 } 9649 EXPORT_SYMBOL_GPL(perf_trace_run_bpf_submit); 9650 9651 void perf_tp_event(u16 event_type, u64 count, void *record, int entry_size, 9652 struct pt_regs *regs, struct hlist_head *head, int rctx, 9653 struct task_struct *task) 9654 { 9655 struct perf_sample_data data; 9656 struct perf_event *event; 9657 9658 struct perf_raw_record raw = { 9659 .frag = { 9660 .size = entry_size, 9661 .data = record, 9662 }, 9663 }; 9664 9665 perf_sample_data_init(&data, 0, 0); 9666 data.raw = &raw; 9667 9668 perf_trace_buf_update(record, event_type); 9669 9670 hlist_for_each_entry_rcu(event, head, hlist_entry) { 9671 if (perf_tp_event_match(event, &data, regs)) 9672 perf_swevent_event(event, count, &data, regs); 9673 } 9674 9675 /* 9676 * If we got specified a target task, also iterate its context and 9677 * deliver this event there too. 9678 */ 9679 if (task && task != current) { 9680 struct perf_event_context *ctx; 9681 struct trace_entry *entry = record; 9682 9683 rcu_read_lock(); 9684 ctx = rcu_dereference(task->perf_event_ctxp[perf_sw_context]); 9685 if (!ctx) 9686 goto unlock; 9687 9688 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) { 9689 if (event->cpu != smp_processor_id()) 9690 continue; 9691 if (event->attr.type != PERF_TYPE_TRACEPOINT) 9692 continue; 9693 if (event->attr.config != entry->type) 9694 continue; 9695 if (perf_tp_event_match(event, &data, regs)) 9696 perf_swevent_event(event, count, &data, regs); 9697 } 9698 unlock: 9699 rcu_read_unlock(); 9700 } 9701 9702 perf_swevent_put_recursion_context(rctx); 9703 } 9704 EXPORT_SYMBOL_GPL(perf_tp_event); 9705 9706 static void tp_perf_event_destroy(struct perf_event *event) 9707 { 9708 perf_trace_destroy(event); 9709 } 9710 9711 static int perf_tp_event_init(struct perf_event *event) 9712 { 9713 int err; 9714 9715 if (event->attr.type != PERF_TYPE_TRACEPOINT) 9716 return -ENOENT; 9717 9718 /* 9719 * no branch sampling for tracepoint events 9720 */ 9721 if (has_branch_stack(event)) 9722 return -EOPNOTSUPP; 9723 9724 err = perf_trace_init(event); 9725 if (err) 9726 return err; 9727 9728 event->destroy = tp_perf_event_destroy; 9729 9730 return 0; 9731 } 9732 9733 static struct pmu perf_tracepoint = { 9734 .task_ctx_nr = perf_sw_context, 9735 9736 .event_init = perf_tp_event_init, 9737 .add = perf_trace_add, 9738 .del = perf_trace_del, 9739 .start = perf_swevent_start, 9740 .stop = perf_swevent_stop, 9741 .read = perf_swevent_read, 9742 }; 9743 9744 #if defined(CONFIG_KPROBE_EVENTS) || defined(CONFIG_UPROBE_EVENTS) 9745 /* 9746 * Flags in config, used by dynamic PMU kprobe and uprobe 9747 * The flags should match following PMU_FORMAT_ATTR(). 9748 * 9749 * PERF_PROBE_CONFIG_IS_RETPROBE if set, create kretprobe/uretprobe 9750 * if not set, create kprobe/uprobe 9751 * 9752 * The following values specify a reference counter (or semaphore in the 9753 * terminology of tools like dtrace, systemtap, etc.) Userspace Statically 9754 * Defined Tracepoints (USDT). Currently, we use 40 bit for the offset. 9755 * 9756 * PERF_UPROBE_REF_CTR_OFFSET_BITS # of bits in config as th offset 9757 * PERF_UPROBE_REF_CTR_OFFSET_SHIFT # of bits to shift left 9758 */ 9759 enum perf_probe_config { 9760 PERF_PROBE_CONFIG_IS_RETPROBE = 1U << 0, /* [k,u]retprobe */ 9761 PERF_UPROBE_REF_CTR_OFFSET_BITS = 32, 9762 PERF_UPROBE_REF_CTR_OFFSET_SHIFT = 64 - PERF_UPROBE_REF_CTR_OFFSET_BITS, 9763 }; 9764 9765 PMU_FORMAT_ATTR(retprobe, "config:0"); 9766 #endif 9767 9768 #ifdef CONFIG_KPROBE_EVENTS 9769 static struct attribute *kprobe_attrs[] = { 9770 &format_attr_retprobe.attr, 9771 NULL, 9772 }; 9773 9774 static struct attribute_group kprobe_format_group = { 9775 .name = "format", 9776 .attrs = kprobe_attrs, 9777 }; 9778 9779 static const struct attribute_group *kprobe_attr_groups[] = { 9780 &kprobe_format_group, 9781 NULL, 9782 }; 9783 9784 static int perf_kprobe_event_init(struct perf_event *event); 9785 static struct pmu perf_kprobe = { 9786 .task_ctx_nr = perf_sw_context, 9787 .event_init = perf_kprobe_event_init, 9788 .add = perf_trace_add, 9789 .del = perf_trace_del, 9790 .start = perf_swevent_start, 9791 .stop = perf_swevent_stop, 9792 .read = perf_swevent_read, 9793 .attr_groups = kprobe_attr_groups, 9794 }; 9795 9796 static int perf_kprobe_event_init(struct perf_event *event) 9797 { 9798 int err; 9799 bool is_retprobe; 9800 9801 if (event->attr.type != perf_kprobe.type) 9802 return -ENOENT; 9803 9804 if (!perfmon_capable()) 9805 return -EACCES; 9806 9807 /* 9808 * no branch sampling for probe events 9809 */ 9810 if (has_branch_stack(event)) 9811 return -EOPNOTSUPP; 9812 9813 is_retprobe = event->attr.config & PERF_PROBE_CONFIG_IS_RETPROBE; 9814 err = perf_kprobe_init(event, is_retprobe); 9815 if (err) 9816 return err; 9817 9818 event->destroy = perf_kprobe_destroy; 9819 9820 return 0; 9821 } 9822 #endif /* CONFIG_KPROBE_EVENTS */ 9823 9824 #ifdef CONFIG_UPROBE_EVENTS 9825 PMU_FORMAT_ATTR(ref_ctr_offset, "config:32-63"); 9826 9827 static struct attribute *uprobe_attrs[] = { 9828 &format_attr_retprobe.attr, 9829 &format_attr_ref_ctr_offset.attr, 9830 NULL, 9831 }; 9832 9833 static struct attribute_group uprobe_format_group = { 9834 .name = "format", 9835 .attrs = uprobe_attrs, 9836 }; 9837 9838 static const struct attribute_group *uprobe_attr_groups[] = { 9839 &uprobe_format_group, 9840 NULL, 9841 }; 9842 9843 static int perf_uprobe_event_init(struct perf_event *event); 9844 static struct pmu perf_uprobe = { 9845 .task_ctx_nr = perf_sw_context, 9846 .event_init = perf_uprobe_event_init, 9847 .add = perf_trace_add, 9848 .del = perf_trace_del, 9849 .start = perf_swevent_start, 9850 .stop = perf_swevent_stop, 9851 .read = perf_swevent_read, 9852 .attr_groups = uprobe_attr_groups, 9853 }; 9854 9855 static int perf_uprobe_event_init(struct perf_event *event) 9856 { 9857 int err; 9858 unsigned long ref_ctr_offset; 9859 bool is_retprobe; 9860 9861 if (event->attr.type != perf_uprobe.type) 9862 return -ENOENT; 9863 9864 if (!perfmon_capable()) 9865 return -EACCES; 9866 9867 /* 9868 * no branch sampling for probe events 9869 */ 9870 if (has_branch_stack(event)) 9871 return -EOPNOTSUPP; 9872 9873 is_retprobe = event->attr.config & PERF_PROBE_CONFIG_IS_RETPROBE; 9874 ref_ctr_offset = event->attr.config >> PERF_UPROBE_REF_CTR_OFFSET_SHIFT; 9875 err = perf_uprobe_init(event, ref_ctr_offset, is_retprobe); 9876 if (err) 9877 return err; 9878 9879 event->destroy = perf_uprobe_destroy; 9880 9881 return 0; 9882 } 9883 #endif /* CONFIG_UPROBE_EVENTS */ 9884 9885 static inline void perf_tp_register(void) 9886 { 9887 perf_pmu_register(&perf_tracepoint, "tracepoint", PERF_TYPE_TRACEPOINT); 9888 #ifdef CONFIG_KPROBE_EVENTS 9889 perf_pmu_register(&perf_kprobe, "kprobe", -1); 9890 #endif 9891 #ifdef CONFIG_UPROBE_EVENTS 9892 perf_pmu_register(&perf_uprobe, "uprobe", -1); 9893 #endif 9894 } 9895 9896 static void perf_event_free_filter(struct perf_event *event) 9897 { 9898 ftrace_profile_free_filter(event); 9899 } 9900 9901 #ifdef CONFIG_BPF_SYSCALL 9902 static void bpf_overflow_handler(struct perf_event *event, 9903 struct perf_sample_data *data, 9904 struct pt_regs *regs) 9905 { 9906 struct bpf_perf_event_data_kern ctx = { 9907 .data = data, 9908 .event = event, 9909 }; 9910 int ret = 0; 9911 9912 ctx.regs = perf_arch_bpf_user_pt_regs(regs); 9913 if (unlikely(__this_cpu_inc_return(bpf_prog_active) != 1)) 9914 goto out; 9915 rcu_read_lock(); 9916 ret = BPF_PROG_RUN(event->prog, &ctx); 9917 rcu_read_unlock(); 9918 out: 9919 __this_cpu_dec(bpf_prog_active); 9920 if (!ret) 9921 return; 9922 9923 event->orig_overflow_handler(event, data, regs); 9924 } 9925 9926 static int perf_event_set_bpf_handler(struct perf_event *event, u32 prog_fd) 9927 { 9928 struct bpf_prog *prog; 9929 9930 if (event->overflow_handler_context) 9931 /* hw breakpoint or kernel counter */ 9932 return -EINVAL; 9933 9934 if (event->prog) 9935 return -EEXIST; 9936 9937 prog = bpf_prog_get_type(prog_fd, BPF_PROG_TYPE_PERF_EVENT); 9938 if (IS_ERR(prog)) 9939 return PTR_ERR(prog); 9940 9941 if (event->attr.precise_ip && 9942 prog->call_get_stack && 9943 (!(event->attr.sample_type & __PERF_SAMPLE_CALLCHAIN_EARLY) || 9944 event->attr.exclude_callchain_kernel || 9945 event->attr.exclude_callchain_user)) { 9946 /* 9947 * On perf_event with precise_ip, calling bpf_get_stack() 9948 * may trigger unwinder warnings and occasional crashes. 9949 * bpf_get_[stack|stackid] works around this issue by using 9950 * callchain attached to perf_sample_data. If the 9951 * perf_event does not full (kernel and user) callchain 9952 * attached to perf_sample_data, do not allow attaching BPF 9953 * program that calls bpf_get_[stack|stackid]. 9954 */ 9955 bpf_prog_put(prog); 9956 return -EPROTO; 9957 } 9958 9959 event->prog = prog; 9960 event->orig_overflow_handler = READ_ONCE(event->overflow_handler); 9961 WRITE_ONCE(event->overflow_handler, bpf_overflow_handler); 9962 return 0; 9963 } 9964 9965 static void perf_event_free_bpf_handler(struct perf_event *event) 9966 { 9967 struct bpf_prog *prog = event->prog; 9968 9969 if (!prog) 9970 return; 9971 9972 WRITE_ONCE(event->overflow_handler, event->orig_overflow_handler); 9973 event->prog = NULL; 9974 bpf_prog_put(prog); 9975 } 9976 #else 9977 static int perf_event_set_bpf_handler(struct perf_event *event, u32 prog_fd) 9978 { 9979 return -EOPNOTSUPP; 9980 } 9981 static void perf_event_free_bpf_handler(struct perf_event *event) 9982 { 9983 } 9984 #endif 9985 9986 /* 9987 * returns true if the event is a tracepoint, or a kprobe/upprobe created 9988 * with perf_event_open() 9989 */ 9990 static inline bool perf_event_is_tracing(struct perf_event *event) 9991 { 9992 if (event->pmu == &perf_tracepoint) 9993 return true; 9994 #ifdef CONFIG_KPROBE_EVENTS 9995 if (event->pmu == &perf_kprobe) 9996 return true; 9997 #endif 9998 #ifdef CONFIG_UPROBE_EVENTS 9999 if (event->pmu == &perf_uprobe) 10000 return true; 10001 #endif 10002 return false; 10003 } 10004 10005 static int perf_event_set_bpf_prog(struct perf_event *event, u32 prog_fd) 10006 { 10007 bool is_kprobe, is_tracepoint, is_syscall_tp; 10008 struct bpf_prog *prog; 10009 int ret; 10010 10011 if (!perf_event_is_tracing(event)) 10012 return perf_event_set_bpf_handler(event, prog_fd); 10013 10014 is_kprobe = event->tp_event->flags & TRACE_EVENT_FL_UKPROBE; 10015 is_tracepoint = event->tp_event->flags & TRACE_EVENT_FL_TRACEPOINT; 10016 is_syscall_tp = is_syscall_trace_event(event->tp_event); 10017 if (!is_kprobe && !is_tracepoint && !is_syscall_tp) 10018 /* bpf programs can only be attached to u/kprobe or tracepoint */ 10019 return -EINVAL; 10020 10021 prog = bpf_prog_get(prog_fd); 10022 if (IS_ERR(prog)) 10023 return PTR_ERR(prog); 10024 10025 if ((is_kprobe && prog->type != BPF_PROG_TYPE_KPROBE) || 10026 (is_tracepoint && prog->type != BPF_PROG_TYPE_TRACEPOINT) || 10027 (is_syscall_tp && prog->type != BPF_PROG_TYPE_TRACEPOINT)) { 10028 /* valid fd, but invalid bpf program type */ 10029 bpf_prog_put(prog); 10030 return -EINVAL; 10031 } 10032 10033 /* Kprobe override only works for kprobes, not uprobes. */ 10034 if (prog->kprobe_override && 10035 !(event->tp_event->flags & TRACE_EVENT_FL_KPROBE)) { 10036 bpf_prog_put(prog); 10037 return -EINVAL; 10038 } 10039 10040 if (is_tracepoint || is_syscall_tp) { 10041 int off = trace_event_get_offsets(event->tp_event); 10042 10043 if (prog->aux->max_ctx_offset > off) { 10044 bpf_prog_put(prog); 10045 return -EACCES; 10046 } 10047 } 10048 10049 ret = perf_event_attach_bpf_prog(event, prog); 10050 if (ret) 10051 bpf_prog_put(prog); 10052 return ret; 10053 } 10054 10055 static void perf_event_free_bpf_prog(struct perf_event *event) 10056 { 10057 if (!perf_event_is_tracing(event)) { 10058 perf_event_free_bpf_handler(event); 10059 return; 10060 } 10061 perf_event_detach_bpf_prog(event); 10062 } 10063 10064 #else 10065 10066 static inline void perf_tp_register(void) 10067 { 10068 } 10069 10070 static void perf_event_free_filter(struct perf_event *event) 10071 { 10072 } 10073 10074 static int perf_event_set_bpf_prog(struct perf_event *event, u32 prog_fd) 10075 { 10076 return -ENOENT; 10077 } 10078 10079 static void perf_event_free_bpf_prog(struct perf_event *event) 10080 { 10081 } 10082 #endif /* CONFIG_EVENT_TRACING */ 10083 10084 #ifdef CONFIG_HAVE_HW_BREAKPOINT 10085 void perf_bp_event(struct perf_event *bp, void *data) 10086 { 10087 struct perf_sample_data sample; 10088 struct pt_regs *regs = data; 10089 10090 perf_sample_data_init(&sample, bp->attr.bp_addr, 0); 10091 10092 if (!bp->hw.state && !perf_exclude_event(bp, regs)) 10093 perf_swevent_event(bp, 1, &sample, regs); 10094 } 10095 #endif 10096 10097 /* 10098 * Allocate a new address filter 10099 */ 10100 static struct perf_addr_filter * 10101 perf_addr_filter_new(struct perf_event *event, struct list_head *filters) 10102 { 10103 int node = cpu_to_node(event->cpu == -1 ? 0 : event->cpu); 10104 struct perf_addr_filter *filter; 10105 10106 filter = kzalloc_node(sizeof(*filter), GFP_KERNEL, node); 10107 if (!filter) 10108 return NULL; 10109 10110 INIT_LIST_HEAD(&filter->entry); 10111 list_add_tail(&filter->entry, filters); 10112 10113 return filter; 10114 } 10115 10116 static void free_filters_list(struct list_head *filters) 10117 { 10118 struct perf_addr_filter *filter, *iter; 10119 10120 list_for_each_entry_safe(filter, iter, filters, entry) { 10121 path_put(&filter->path); 10122 list_del(&filter->entry); 10123 kfree(filter); 10124 } 10125 } 10126 10127 /* 10128 * Free existing address filters and optionally install new ones 10129 */ 10130 static void perf_addr_filters_splice(struct perf_event *event, 10131 struct list_head *head) 10132 { 10133 unsigned long flags; 10134 LIST_HEAD(list); 10135 10136 if (!has_addr_filter(event)) 10137 return; 10138 10139 /* don't bother with children, they don't have their own filters */ 10140 if (event->parent) 10141 return; 10142 10143 raw_spin_lock_irqsave(&event->addr_filters.lock, flags); 10144 10145 list_splice_init(&event->addr_filters.list, &list); 10146 if (head) 10147 list_splice(head, &event->addr_filters.list); 10148 10149 raw_spin_unlock_irqrestore(&event->addr_filters.lock, flags); 10150 10151 free_filters_list(&list); 10152 } 10153 10154 /* 10155 * Scan through mm's vmas and see if one of them matches the 10156 * @filter; if so, adjust filter's address range. 10157 * Called with mm::mmap_lock down for reading. 10158 */ 10159 static void perf_addr_filter_apply(struct perf_addr_filter *filter, 10160 struct mm_struct *mm, 10161 struct perf_addr_filter_range *fr) 10162 { 10163 struct vm_area_struct *vma; 10164 10165 for (vma = mm->mmap; vma; vma = vma->vm_next) { 10166 if (!vma->vm_file) 10167 continue; 10168 10169 if (perf_addr_filter_vma_adjust(filter, vma, fr)) 10170 return; 10171 } 10172 } 10173 10174 /* 10175 * Update event's address range filters based on the 10176 * task's existing mappings, if any. 10177 */ 10178 static void perf_event_addr_filters_apply(struct perf_event *event) 10179 { 10180 struct perf_addr_filters_head *ifh = perf_event_addr_filters(event); 10181 struct task_struct *task = READ_ONCE(event->ctx->task); 10182 struct perf_addr_filter *filter; 10183 struct mm_struct *mm = NULL; 10184 unsigned int count = 0; 10185 unsigned long flags; 10186 10187 /* 10188 * We may observe TASK_TOMBSTONE, which means that the event tear-down 10189 * will stop on the parent's child_mutex that our caller is also holding 10190 */ 10191 if (task == TASK_TOMBSTONE) 10192 return; 10193 10194 if (ifh->nr_file_filters) { 10195 mm = get_task_mm(event->ctx->task); 10196 if (!mm) 10197 goto restart; 10198 10199 mmap_read_lock(mm); 10200 } 10201 10202 raw_spin_lock_irqsave(&ifh->lock, flags); 10203 list_for_each_entry(filter, &ifh->list, entry) { 10204 if (filter->path.dentry) { 10205 /* 10206 * Adjust base offset if the filter is associated to a 10207 * binary that needs to be mapped: 10208 */ 10209 event->addr_filter_ranges[count].start = 0; 10210 event->addr_filter_ranges[count].size = 0; 10211 10212 perf_addr_filter_apply(filter, mm, &event->addr_filter_ranges[count]); 10213 } else { 10214 event->addr_filter_ranges[count].start = filter->offset; 10215 event->addr_filter_ranges[count].size = filter->size; 10216 } 10217 10218 count++; 10219 } 10220 10221 event->addr_filters_gen++; 10222 raw_spin_unlock_irqrestore(&ifh->lock, flags); 10223 10224 if (ifh->nr_file_filters) { 10225 mmap_read_unlock(mm); 10226 10227 mmput(mm); 10228 } 10229 10230 restart: 10231 perf_event_stop(event, 1); 10232 } 10233 10234 /* 10235 * Address range filtering: limiting the data to certain 10236 * instruction address ranges. Filters are ioctl()ed to us from 10237 * userspace as ascii strings. 10238 * 10239 * Filter string format: 10240 * 10241 * ACTION RANGE_SPEC 10242 * where ACTION is one of the 10243 * * "filter": limit the trace to this region 10244 * * "start": start tracing from this address 10245 * * "stop": stop tracing at this address/region; 10246 * RANGE_SPEC is 10247 * * for kernel addresses: <start address>[/<size>] 10248 * * for object files: <start address>[/<size>]@</path/to/object/file> 10249 * 10250 * if <size> is not specified or is zero, the range is treated as a single 10251 * address; not valid for ACTION=="filter". 10252 */ 10253 enum { 10254 IF_ACT_NONE = -1, 10255 IF_ACT_FILTER, 10256 IF_ACT_START, 10257 IF_ACT_STOP, 10258 IF_SRC_FILE, 10259 IF_SRC_KERNEL, 10260 IF_SRC_FILEADDR, 10261 IF_SRC_KERNELADDR, 10262 }; 10263 10264 enum { 10265 IF_STATE_ACTION = 0, 10266 IF_STATE_SOURCE, 10267 IF_STATE_END, 10268 }; 10269 10270 static const match_table_t if_tokens = { 10271 { IF_ACT_FILTER, "filter" }, 10272 { IF_ACT_START, "start" }, 10273 { IF_ACT_STOP, "stop" }, 10274 { IF_SRC_FILE, "%u/%u@%s" }, 10275 { IF_SRC_KERNEL, "%u/%u" }, 10276 { IF_SRC_FILEADDR, "%u@%s" }, 10277 { IF_SRC_KERNELADDR, "%u" }, 10278 { IF_ACT_NONE, NULL }, 10279 }; 10280 10281 /* 10282 * Address filter string parser 10283 */ 10284 static int 10285 perf_event_parse_addr_filter(struct perf_event *event, char *fstr, 10286 struct list_head *filters) 10287 { 10288 struct perf_addr_filter *filter = NULL; 10289 char *start, *orig, *filename = NULL; 10290 substring_t args[MAX_OPT_ARGS]; 10291 int state = IF_STATE_ACTION, token; 10292 unsigned int kernel = 0; 10293 int ret = -EINVAL; 10294 10295 orig = fstr = kstrdup(fstr, GFP_KERNEL); 10296 if (!fstr) 10297 return -ENOMEM; 10298 10299 while ((start = strsep(&fstr, " ,\n")) != NULL) { 10300 static const enum perf_addr_filter_action_t actions[] = { 10301 [IF_ACT_FILTER] = PERF_ADDR_FILTER_ACTION_FILTER, 10302 [IF_ACT_START] = PERF_ADDR_FILTER_ACTION_START, 10303 [IF_ACT_STOP] = PERF_ADDR_FILTER_ACTION_STOP, 10304 }; 10305 ret = -EINVAL; 10306 10307 if (!*start) 10308 continue; 10309 10310 /* filter definition begins */ 10311 if (state == IF_STATE_ACTION) { 10312 filter = perf_addr_filter_new(event, filters); 10313 if (!filter) 10314 goto fail; 10315 } 10316 10317 token = match_token(start, if_tokens, args); 10318 switch (token) { 10319 case IF_ACT_FILTER: 10320 case IF_ACT_START: 10321 case IF_ACT_STOP: 10322 if (state != IF_STATE_ACTION) 10323 goto fail; 10324 10325 filter->action = actions[token]; 10326 state = IF_STATE_SOURCE; 10327 break; 10328 10329 case IF_SRC_KERNELADDR: 10330 case IF_SRC_KERNEL: 10331 kernel = 1; 10332 fallthrough; 10333 10334 case IF_SRC_FILEADDR: 10335 case IF_SRC_FILE: 10336 if (state != IF_STATE_SOURCE) 10337 goto fail; 10338 10339 *args[0].to = 0; 10340 ret = kstrtoul(args[0].from, 0, &filter->offset); 10341 if (ret) 10342 goto fail; 10343 10344 if (token == IF_SRC_KERNEL || token == IF_SRC_FILE) { 10345 *args[1].to = 0; 10346 ret = kstrtoul(args[1].from, 0, &filter->size); 10347 if (ret) 10348 goto fail; 10349 } 10350 10351 if (token == IF_SRC_FILE || token == IF_SRC_FILEADDR) { 10352 int fpos = token == IF_SRC_FILE ? 2 : 1; 10353 10354 kfree(filename); 10355 filename = match_strdup(&args[fpos]); 10356 if (!filename) { 10357 ret = -ENOMEM; 10358 goto fail; 10359 } 10360 } 10361 10362 state = IF_STATE_END; 10363 break; 10364 10365 default: 10366 goto fail; 10367 } 10368 10369 /* 10370 * Filter definition is fully parsed, validate and install it. 10371 * Make sure that it doesn't contradict itself or the event's 10372 * attribute. 10373 */ 10374 if (state == IF_STATE_END) { 10375 ret = -EINVAL; 10376 if (kernel && event->attr.exclude_kernel) 10377 goto fail; 10378 10379 /* 10380 * ACTION "filter" must have a non-zero length region 10381 * specified. 10382 */ 10383 if (filter->action == PERF_ADDR_FILTER_ACTION_FILTER && 10384 !filter->size) 10385 goto fail; 10386 10387 if (!kernel) { 10388 if (!filename) 10389 goto fail; 10390 10391 /* 10392 * For now, we only support file-based filters 10393 * in per-task events; doing so for CPU-wide 10394 * events requires additional context switching 10395 * trickery, since same object code will be 10396 * mapped at different virtual addresses in 10397 * different processes. 10398 */ 10399 ret = -EOPNOTSUPP; 10400 if (!event->ctx->task) 10401 goto fail; 10402 10403 /* look up the path and grab its inode */ 10404 ret = kern_path(filename, LOOKUP_FOLLOW, 10405 &filter->path); 10406 if (ret) 10407 goto fail; 10408 10409 ret = -EINVAL; 10410 if (!filter->path.dentry || 10411 !S_ISREG(d_inode(filter->path.dentry) 10412 ->i_mode)) 10413 goto fail; 10414 10415 event->addr_filters.nr_file_filters++; 10416 } 10417 10418 /* ready to consume more filters */ 10419 state = IF_STATE_ACTION; 10420 filter = NULL; 10421 } 10422 } 10423 10424 if (state != IF_STATE_ACTION) 10425 goto fail; 10426 10427 kfree(filename); 10428 kfree(orig); 10429 10430 return 0; 10431 10432 fail: 10433 kfree(filename); 10434 free_filters_list(filters); 10435 kfree(orig); 10436 10437 return ret; 10438 } 10439 10440 static int 10441 perf_event_set_addr_filter(struct perf_event *event, char *filter_str) 10442 { 10443 LIST_HEAD(filters); 10444 int ret; 10445 10446 /* 10447 * Since this is called in perf_ioctl() path, we're already holding 10448 * ctx::mutex. 10449 */ 10450 lockdep_assert_held(&event->ctx->mutex); 10451 10452 if (WARN_ON_ONCE(event->parent)) 10453 return -EINVAL; 10454 10455 ret = perf_event_parse_addr_filter(event, filter_str, &filters); 10456 if (ret) 10457 goto fail_clear_files; 10458 10459 ret = event->pmu->addr_filters_validate(&filters); 10460 if (ret) 10461 goto fail_free_filters; 10462 10463 /* remove existing filters, if any */ 10464 perf_addr_filters_splice(event, &filters); 10465 10466 /* install new filters */ 10467 perf_event_for_each_child(event, perf_event_addr_filters_apply); 10468 10469 return ret; 10470 10471 fail_free_filters: 10472 free_filters_list(&filters); 10473 10474 fail_clear_files: 10475 event->addr_filters.nr_file_filters = 0; 10476 10477 return ret; 10478 } 10479 10480 static int perf_event_set_filter(struct perf_event *event, void __user *arg) 10481 { 10482 int ret = -EINVAL; 10483 char *filter_str; 10484 10485 filter_str = strndup_user(arg, PAGE_SIZE); 10486 if (IS_ERR(filter_str)) 10487 return PTR_ERR(filter_str); 10488 10489 #ifdef CONFIG_EVENT_TRACING 10490 if (perf_event_is_tracing(event)) { 10491 struct perf_event_context *ctx = event->ctx; 10492 10493 /* 10494 * Beware, here be dragons!! 10495 * 10496 * the tracepoint muck will deadlock against ctx->mutex, but 10497 * the tracepoint stuff does not actually need it. So 10498 * temporarily drop ctx->mutex. As per perf_event_ctx_lock() we 10499 * already have a reference on ctx. 10500 * 10501 * This can result in event getting moved to a different ctx, 10502 * but that does not affect the tracepoint state. 10503 */ 10504 mutex_unlock(&ctx->mutex); 10505 ret = ftrace_profile_set_filter(event, event->attr.config, filter_str); 10506 mutex_lock(&ctx->mutex); 10507 } else 10508 #endif 10509 if (has_addr_filter(event)) 10510 ret = perf_event_set_addr_filter(event, filter_str); 10511 10512 kfree(filter_str); 10513 return ret; 10514 } 10515 10516 /* 10517 * hrtimer based swevent callback 10518 */ 10519 10520 static enum hrtimer_restart perf_swevent_hrtimer(struct hrtimer *hrtimer) 10521 { 10522 enum hrtimer_restart ret = HRTIMER_RESTART; 10523 struct perf_sample_data data; 10524 struct pt_regs *regs; 10525 struct perf_event *event; 10526 u64 period; 10527 10528 event = container_of(hrtimer, struct perf_event, hw.hrtimer); 10529 10530 if (event->state != PERF_EVENT_STATE_ACTIVE) 10531 return HRTIMER_NORESTART; 10532 10533 event->pmu->read(event); 10534 10535 perf_sample_data_init(&data, 0, event->hw.last_period); 10536 regs = get_irq_regs(); 10537 10538 if (regs && !perf_exclude_event(event, regs)) { 10539 if (!(event->attr.exclude_idle && is_idle_task(current))) 10540 if (__perf_event_overflow(event, 1, &data, regs)) 10541 ret = HRTIMER_NORESTART; 10542 } 10543 10544 period = max_t(u64, 10000, event->hw.sample_period); 10545 hrtimer_forward_now(hrtimer, ns_to_ktime(period)); 10546 10547 return ret; 10548 } 10549 10550 static void perf_swevent_start_hrtimer(struct perf_event *event) 10551 { 10552 struct hw_perf_event *hwc = &event->hw; 10553 s64 period; 10554 10555 if (!is_sampling_event(event)) 10556 return; 10557 10558 period = local64_read(&hwc->period_left); 10559 if (period) { 10560 if (period < 0) 10561 period = 10000; 10562 10563 local64_set(&hwc->period_left, 0); 10564 } else { 10565 period = max_t(u64, 10000, hwc->sample_period); 10566 } 10567 hrtimer_start(&hwc->hrtimer, ns_to_ktime(period), 10568 HRTIMER_MODE_REL_PINNED_HARD); 10569 } 10570 10571 static void perf_swevent_cancel_hrtimer(struct perf_event *event) 10572 { 10573 struct hw_perf_event *hwc = &event->hw; 10574 10575 if (is_sampling_event(event)) { 10576 ktime_t remaining = hrtimer_get_remaining(&hwc->hrtimer); 10577 local64_set(&hwc->period_left, ktime_to_ns(remaining)); 10578 10579 hrtimer_cancel(&hwc->hrtimer); 10580 } 10581 } 10582 10583 static void perf_swevent_init_hrtimer(struct perf_event *event) 10584 { 10585 struct hw_perf_event *hwc = &event->hw; 10586 10587 if (!is_sampling_event(event)) 10588 return; 10589 10590 hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD); 10591 hwc->hrtimer.function = perf_swevent_hrtimer; 10592 10593 /* 10594 * Since hrtimers have a fixed rate, we can do a static freq->period 10595 * mapping and avoid the whole period adjust feedback stuff. 10596 */ 10597 if (event->attr.freq) { 10598 long freq = event->attr.sample_freq; 10599 10600 event->attr.sample_period = NSEC_PER_SEC / freq; 10601 hwc->sample_period = event->attr.sample_period; 10602 local64_set(&hwc->period_left, hwc->sample_period); 10603 hwc->last_period = hwc->sample_period; 10604 event->attr.freq = 0; 10605 } 10606 } 10607 10608 /* 10609 * Software event: cpu wall time clock 10610 */ 10611 10612 static void cpu_clock_event_update(struct perf_event *event) 10613 { 10614 s64 prev; 10615 u64 now; 10616 10617 now = local_clock(); 10618 prev = local64_xchg(&event->hw.prev_count, now); 10619 local64_add(now - prev, &event->count); 10620 } 10621 10622 static void cpu_clock_event_start(struct perf_event *event, int flags) 10623 { 10624 local64_set(&event->hw.prev_count, local_clock()); 10625 perf_swevent_start_hrtimer(event); 10626 } 10627 10628 static void cpu_clock_event_stop(struct perf_event *event, int flags) 10629 { 10630 perf_swevent_cancel_hrtimer(event); 10631 cpu_clock_event_update(event); 10632 } 10633 10634 static int cpu_clock_event_add(struct perf_event *event, int flags) 10635 { 10636 if (flags & PERF_EF_START) 10637 cpu_clock_event_start(event, flags); 10638 perf_event_update_userpage(event); 10639 10640 return 0; 10641 } 10642 10643 static void cpu_clock_event_del(struct perf_event *event, int flags) 10644 { 10645 cpu_clock_event_stop(event, flags); 10646 } 10647 10648 static void cpu_clock_event_read(struct perf_event *event) 10649 { 10650 cpu_clock_event_update(event); 10651 } 10652 10653 static int cpu_clock_event_init(struct perf_event *event) 10654 { 10655 if (event->attr.type != PERF_TYPE_SOFTWARE) 10656 return -ENOENT; 10657 10658 if (event->attr.config != PERF_COUNT_SW_CPU_CLOCK) 10659 return -ENOENT; 10660 10661 /* 10662 * no branch sampling for software events 10663 */ 10664 if (has_branch_stack(event)) 10665 return -EOPNOTSUPP; 10666 10667 perf_swevent_init_hrtimer(event); 10668 10669 return 0; 10670 } 10671 10672 static struct pmu perf_cpu_clock = { 10673 .task_ctx_nr = perf_sw_context, 10674 10675 .capabilities = PERF_PMU_CAP_NO_NMI, 10676 10677 .event_init = cpu_clock_event_init, 10678 .add = cpu_clock_event_add, 10679 .del = cpu_clock_event_del, 10680 .start = cpu_clock_event_start, 10681 .stop = cpu_clock_event_stop, 10682 .read = cpu_clock_event_read, 10683 }; 10684 10685 /* 10686 * Software event: task time clock 10687 */ 10688 10689 static void task_clock_event_update(struct perf_event *event, u64 now) 10690 { 10691 u64 prev; 10692 s64 delta; 10693 10694 prev = local64_xchg(&event->hw.prev_count, now); 10695 delta = now - prev; 10696 local64_add(delta, &event->count); 10697 } 10698 10699 static void task_clock_event_start(struct perf_event *event, int flags) 10700 { 10701 local64_set(&event->hw.prev_count, event->ctx->time); 10702 perf_swevent_start_hrtimer(event); 10703 } 10704 10705 static void task_clock_event_stop(struct perf_event *event, int flags) 10706 { 10707 perf_swevent_cancel_hrtimer(event); 10708 task_clock_event_update(event, event->ctx->time); 10709 } 10710 10711 static int task_clock_event_add(struct perf_event *event, int flags) 10712 { 10713 if (flags & PERF_EF_START) 10714 task_clock_event_start(event, flags); 10715 perf_event_update_userpage(event); 10716 10717 return 0; 10718 } 10719 10720 static void task_clock_event_del(struct perf_event *event, int flags) 10721 { 10722 task_clock_event_stop(event, PERF_EF_UPDATE); 10723 } 10724 10725 static void task_clock_event_read(struct perf_event *event) 10726 { 10727 u64 now = perf_clock(); 10728 u64 delta = now - event->ctx->timestamp; 10729 u64 time = event->ctx->time + delta; 10730 10731 task_clock_event_update(event, time); 10732 } 10733 10734 static int task_clock_event_init(struct perf_event *event) 10735 { 10736 if (event->attr.type != PERF_TYPE_SOFTWARE) 10737 return -ENOENT; 10738 10739 if (event->attr.config != PERF_COUNT_SW_TASK_CLOCK) 10740 return -ENOENT; 10741 10742 /* 10743 * no branch sampling for software events 10744 */ 10745 if (has_branch_stack(event)) 10746 return -EOPNOTSUPP; 10747 10748 perf_swevent_init_hrtimer(event); 10749 10750 return 0; 10751 } 10752 10753 static struct pmu perf_task_clock = { 10754 .task_ctx_nr = perf_sw_context, 10755 10756 .capabilities = PERF_PMU_CAP_NO_NMI, 10757 10758 .event_init = task_clock_event_init, 10759 .add = task_clock_event_add, 10760 .del = task_clock_event_del, 10761 .start = task_clock_event_start, 10762 .stop = task_clock_event_stop, 10763 .read = task_clock_event_read, 10764 }; 10765 10766 static void perf_pmu_nop_void(struct pmu *pmu) 10767 { 10768 } 10769 10770 static void perf_pmu_nop_txn(struct pmu *pmu, unsigned int flags) 10771 { 10772 } 10773 10774 static int perf_pmu_nop_int(struct pmu *pmu) 10775 { 10776 return 0; 10777 } 10778 10779 static int perf_event_nop_int(struct perf_event *event, u64 value) 10780 { 10781 return 0; 10782 } 10783 10784 static DEFINE_PER_CPU(unsigned int, nop_txn_flags); 10785 10786 static void perf_pmu_start_txn(struct pmu *pmu, unsigned int flags) 10787 { 10788 __this_cpu_write(nop_txn_flags, flags); 10789 10790 if (flags & ~PERF_PMU_TXN_ADD) 10791 return; 10792 10793 perf_pmu_disable(pmu); 10794 } 10795 10796 static int perf_pmu_commit_txn(struct pmu *pmu) 10797 { 10798 unsigned int flags = __this_cpu_read(nop_txn_flags); 10799 10800 __this_cpu_write(nop_txn_flags, 0); 10801 10802 if (flags & ~PERF_PMU_TXN_ADD) 10803 return 0; 10804 10805 perf_pmu_enable(pmu); 10806 return 0; 10807 } 10808 10809 static void perf_pmu_cancel_txn(struct pmu *pmu) 10810 { 10811 unsigned int flags = __this_cpu_read(nop_txn_flags); 10812 10813 __this_cpu_write(nop_txn_flags, 0); 10814 10815 if (flags & ~PERF_PMU_TXN_ADD) 10816 return; 10817 10818 perf_pmu_enable(pmu); 10819 } 10820 10821 static int perf_event_idx_default(struct perf_event *event) 10822 { 10823 return 0; 10824 } 10825 10826 /* 10827 * Ensures all contexts with the same task_ctx_nr have the same 10828 * pmu_cpu_context too. 10829 */ 10830 static struct perf_cpu_context __percpu *find_pmu_context(int ctxn) 10831 { 10832 struct pmu *pmu; 10833 10834 if (ctxn < 0) 10835 return NULL; 10836 10837 list_for_each_entry(pmu, &pmus, entry) { 10838 if (pmu->task_ctx_nr == ctxn) 10839 return pmu->pmu_cpu_context; 10840 } 10841 10842 return NULL; 10843 } 10844 10845 static void free_pmu_context(struct pmu *pmu) 10846 { 10847 /* 10848 * Static contexts such as perf_sw_context have a global lifetime 10849 * and may be shared between different PMUs. Avoid freeing them 10850 * when a single PMU is going away. 10851 */ 10852 if (pmu->task_ctx_nr > perf_invalid_context) 10853 return; 10854 10855 free_percpu(pmu->pmu_cpu_context); 10856 } 10857 10858 /* 10859 * Let userspace know that this PMU supports address range filtering: 10860 */ 10861 static ssize_t nr_addr_filters_show(struct device *dev, 10862 struct device_attribute *attr, 10863 char *page) 10864 { 10865 struct pmu *pmu = dev_get_drvdata(dev); 10866 10867 return snprintf(page, PAGE_SIZE - 1, "%d\n", pmu->nr_addr_filters); 10868 } 10869 DEVICE_ATTR_RO(nr_addr_filters); 10870 10871 static struct idr pmu_idr; 10872 10873 static ssize_t 10874 type_show(struct device *dev, struct device_attribute *attr, char *page) 10875 { 10876 struct pmu *pmu = dev_get_drvdata(dev); 10877 10878 return snprintf(page, PAGE_SIZE-1, "%d\n", pmu->type); 10879 } 10880 static DEVICE_ATTR_RO(type); 10881 10882 static ssize_t 10883 perf_event_mux_interval_ms_show(struct device *dev, 10884 struct device_attribute *attr, 10885 char *page) 10886 { 10887 struct pmu *pmu = dev_get_drvdata(dev); 10888 10889 return snprintf(page, PAGE_SIZE-1, "%d\n", pmu->hrtimer_interval_ms); 10890 } 10891 10892 static DEFINE_MUTEX(mux_interval_mutex); 10893 10894 static ssize_t 10895 perf_event_mux_interval_ms_store(struct device *dev, 10896 struct device_attribute *attr, 10897 const char *buf, size_t count) 10898 { 10899 struct pmu *pmu = dev_get_drvdata(dev); 10900 int timer, cpu, ret; 10901 10902 ret = kstrtoint(buf, 0, &timer); 10903 if (ret) 10904 return ret; 10905 10906 if (timer < 1) 10907 return -EINVAL; 10908 10909 /* same value, noting to do */ 10910 if (timer == pmu->hrtimer_interval_ms) 10911 return count; 10912 10913 mutex_lock(&mux_interval_mutex); 10914 pmu->hrtimer_interval_ms = timer; 10915 10916 /* update all cpuctx for this PMU */ 10917 cpus_read_lock(); 10918 for_each_online_cpu(cpu) { 10919 struct perf_cpu_context *cpuctx; 10920 cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu); 10921 cpuctx->hrtimer_interval = ns_to_ktime(NSEC_PER_MSEC * timer); 10922 10923 cpu_function_call(cpu, 10924 (remote_function_f)perf_mux_hrtimer_restart, cpuctx); 10925 } 10926 cpus_read_unlock(); 10927 mutex_unlock(&mux_interval_mutex); 10928 10929 return count; 10930 } 10931 static DEVICE_ATTR_RW(perf_event_mux_interval_ms); 10932 10933 static struct attribute *pmu_dev_attrs[] = { 10934 &dev_attr_type.attr, 10935 &dev_attr_perf_event_mux_interval_ms.attr, 10936 NULL, 10937 }; 10938 ATTRIBUTE_GROUPS(pmu_dev); 10939 10940 static int pmu_bus_running; 10941 static struct bus_type pmu_bus = { 10942 .name = "event_source", 10943 .dev_groups = pmu_dev_groups, 10944 }; 10945 10946 static void pmu_dev_release(struct device *dev) 10947 { 10948 kfree(dev); 10949 } 10950 10951 static int pmu_dev_alloc(struct pmu *pmu) 10952 { 10953 int ret = -ENOMEM; 10954 10955 pmu->dev = kzalloc(sizeof(struct device), GFP_KERNEL); 10956 if (!pmu->dev) 10957 goto out; 10958 10959 pmu->dev->groups = pmu->attr_groups; 10960 device_initialize(pmu->dev); 10961 ret = dev_set_name(pmu->dev, "%s", pmu->name); 10962 if (ret) 10963 goto free_dev; 10964 10965 dev_set_drvdata(pmu->dev, pmu); 10966 pmu->dev->bus = &pmu_bus; 10967 pmu->dev->release = pmu_dev_release; 10968 ret = device_add(pmu->dev); 10969 if (ret) 10970 goto free_dev; 10971 10972 /* For PMUs with address filters, throw in an extra attribute: */ 10973 if (pmu->nr_addr_filters) 10974 ret = device_create_file(pmu->dev, &dev_attr_nr_addr_filters); 10975 10976 if (ret) 10977 goto del_dev; 10978 10979 if (pmu->attr_update) 10980 ret = sysfs_update_groups(&pmu->dev->kobj, pmu->attr_update); 10981 10982 if (ret) 10983 goto del_dev; 10984 10985 out: 10986 return ret; 10987 10988 del_dev: 10989 device_del(pmu->dev); 10990 10991 free_dev: 10992 put_device(pmu->dev); 10993 goto out; 10994 } 10995 10996 static struct lock_class_key cpuctx_mutex; 10997 static struct lock_class_key cpuctx_lock; 10998 10999 int perf_pmu_register(struct pmu *pmu, const char *name, int type) 11000 { 11001 int cpu, ret, max = PERF_TYPE_MAX; 11002 11003 mutex_lock(&pmus_lock); 11004 ret = -ENOMEM; 11005 pmu->pmu_disable_count = alloc_percpu(int); 11006 if (!pmu->pmu_disable_count) 11007 goto unlock; 11008 11009 pmu->type = -1; 11010 if (!name) 11011 goto skip_type; 11012 pmu->name = name; 11013 11014 if (type != PERF_TYPE_SOFTWARE) { 11015 if (type >= 0) 11016 max = type; 11017 11018 ret = idr_alloc(&pmu_idr, pmu, max, 0, GFP_KERNEL); 11019 if (ret < 0) 11020 goto free_pdc; 11021 11022 WARN_ON(type >= 0 && ret != type); 11023 11024 type = ret; 11025 } 11026 pmu->type = type; 11027 11028 if (pmu_bus_running) { 11029 ret = pmu_dev_alloc(pmu); 11030 if (ret) 11031 goto free_idr; 11032 } 11033 11034 skip_type: 11035 if (pmu->task_ctx_nr == perf_hw_context) { 11036 static int hw_context_taken = 0; 11037 11038 /* 11039 * Other than systems with heterogeneous CPUs, it never makes 11040 * sense for two PMUs to share perf_hw_context. PMUs which are 11041 * uncore must use perf_invalid_context. 11042 */ 11043 if (WARN_ON_ONCE(hw_context_taken && 11044 !(pmu->capabilities & PERF_PMU_CAP_HETEROGENEOUS_CPUS))) 11045 pmu->task_ctx_nr = perf_invalid_context; 11046 11047 hw_context_taken = 1; 11048 } 11049 11050 pmu->pmu_cpu_context = find_pmu_context(pmu->task_ctx_nr); 11051 if (pmu->pmu_cpu_context) 11052 goto got_cpu_context; 11053 11054 ret = -ENOMEM; 11055 pmu->pmu_cpu_context = alloc_percpu(struct perf_cpu_context); 11056 if (!pmu->pmu_cpu_context) 11057 goto free_dev; 11058 11059 for_each_possible_cpu(cpu) { 11060 struct perf_cpu_context *cpuctx; 11061 11062 cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu); 11063 __perf_event_init_context(&cpuctx->ctx); 11064 lockdep_set_class(&cpuctx->ctx.mutex, &cpuctx_mutex); 11065 lockdep_set_class(&cpuctx->ctx.lock, &cpuctx_lock); 11066 cpuctx->ctx.pmu = pmu; 11067 cpuctx->online = cpumask_test_cpu(cpu, perf_online_mask); 11068 11069 __perf_mux_hrtimer_init(cpuctx, cpu); 11070 11071 cpuctx->heap_size = ARRAY_SIZE(cpuctx->heap_default); 11072 cpuctx->heap = cpuctx->heap_default; 11073 } 11074 11075 got_cpu_context: 11076 if (!pmu->start_txn) { 11077 if (pmu->pmu_enable) { 11078 /* 11079 * If we have pmu_enable/pmu_disable calls, install 11080 * transaction stubs that use that to try and batch 11081 * hardware accesses. 11082 */ 11083 pmu->start_txn = perf_pmu_start_txn; 11084 pmu->commit_txn = perf_pmu_commit_txn; 11085 pmu->cancel_txn = perf_pmu_cancel_txn; 11086 } else { 11087 pmu->start_txn = perf_pmu_nop_txn; 11088 pmu->commit_txn = perf_pmu_nop_int; 11089 pmu->cancel_txn = perf_pmu_nop_void; 11090 } 11091 } 11092 11093 if (!pmu->pmu_enable) { 11094 pmu->pmu_enable = perf_pmu_nop_void; 11095 pmu->pmu_disable = perf_pmu_nop_void; 11096 } 11097 11098 if (!pmu->check_period) 11099 pmu->check_period = perf_event_nop_int; 11100 11101 if (!pmu->event_idx) 11102 pmu->event_idx = perf_event_idx_default; 11103 11104 /* 11105 * Ensure the TYPE_SOFTWARE PMUs are at the head of the list, 11106 * since these cannot be in the IDR. This way the linear search 11107 * is fast, provided a valid software event is provided. 11108 */ 11109 if (type == PERF_TYPE_SOFTWARE || !name) 11110 list_add_rcu(&pmu->entry, &pmus); 11111 else 11112 list_add_tail_rcu(&pmu->entry, &pmus); 11113 11114 atomic_set(&pmu->exclusive_cnt, 0); 11115 ret = 0; 11116 unlock: 11117 mutex_unlock(&pmus_lock); 11118 11119 return ret; 11120 11121 free_dev: 11122 device_del(pmu->dev); 11123 put_device(pmu->dev); 11124 11125 free_idr: 11126 if (pmu->type != PERF_TYPE_SOFTWARE) 11127 idr_remove(&pmu_idr, pmu->type); 11128 11129 free_pdc: 11130 free_percpu(pmu->pmu_disable_count); 11131 goto unlock; 11132 } 11133 EXPORT_SYMBOL_GPL(perf_pmu_register); 11134 11135 void perf_pmu_unregister(struct pmu *pmu) 11136 { 11137 mutex_lock(&pmus_lock); 11138 list_del_rcu(&pmu->entry); 11139 11140 /* 11141 * We dereference the pmu list under both SRCU and regular RCU, so 11142 * synchronize against both of those. 11143 */ 11144 synchronize_srcu(&pmus_srcu); 11145 synchronize_rcu(); 11146 11147 free_percpu(pmu->pmu_disable_count); 11148 if (pmu->type != PERF_TYPE_SOFTWARE) 11149 idr_remove(&pmu_idr, pmu->type); 11150 if (pmu_bus_running) { 11151 if (pmu->nr_addr_filters) 11152 device_remove_file(pmu->dev, &dev_attr_nr_addr_filters); 11153 device_del(pmu->dev); 11154 put_device(pmu->dev); 11155 } 11156 free_pmu_context(pmu); 11157 mutex_unlock(&pmus_lock); 11158 } 11159 EXPORT_SYMBOL_GPL(perf_pmu_unregister); 11160 11161 static inline bool has_extended_regs(struct perf_event *event) 11162 { 11163 return (event->attr.sample_regs_user & PERF_REG_EXTENDED_MASK) || 11164 (event->attr.sample_regs_intr & PERF_REG_EXTENDED_MASK); 11165 } 11166 11167 static int perf_try_init_event(struct pmu *pmu, struct perf_event *event) 11168 { 11169 struct perf_event_context *ctx = NULL; 11170 int ret; 11171 11172 if (!try_module_get(pmu->module)) 11173 return -ENODEV; 11174 11175 /* 11176 * A number of pmu->event_init() methods iterate the sibling_list to, 11177 * for example, validate if the group fits on the PMU. Therefore, 11178 * if this is a sibling event, acquire the ctx->mutex to protect 11179 * the sibling_list. 11180 */ 11181 if (event->group_leader != event && pmu->task_ctx_nr != perf_sw_context) { 11182 /* 11183 * This ctx->mutex can nest when we're called through 11184 * inheritance. See the perf_event_ctx_lock_nested() comment. 11185 */ 11186 ctx = perf_event_ctx_lock_nested(event->group_leader, 11187 SINGLE_DEPTH_NESTING); 11188 BUG_ON(!ctx); 11189 } 11190 11191 event->pmu = pmu; 11192 ret = pmu->event_init(event); 11193 11194 if (ctx) 11195 perf_event_ctx_unlock(event->group_leader, ctx); 11196 11197 if (!ret) { 11198 if (!(pmu->capabilities & PERF_PMU_CAP_EXTENDED_REGS) && 11199 has_extended_regs(event)) 11200 ret = -EOPNOTSUPP; 11201 11202 if (pmu->capabilities & PERF_PMU_CAP_NO_EXCLUDE && 11203 event_has_any_exclude_flag(event)) 11204 ret = -EINVAL; 11205 11206 if (ret && event->destroy) 11207 event->destroy(event); 11208 } 11209 11210 if (ret) 11211 module_put(pmu->module); 11212 11213 return ret; 11214 } 11215 11216 static struct pmu *perf_init_event(struct perf_event *event) 11217 { 11218 bool extended_type = false; 11219 int idx, type, ret; 11220 struct pmu *pmu; 11221 11222 idx = srcu_read_lock(&pmus_srcu); 11223 11224 /* Try parent's PMU first: */ 11225 if (event->parent && event->parent->pmu) { 11226 pmu = event->parent->pmu; 11227 ret = perf_try_init_event(pmu, event); 11228 if (!ret) 11229 goto unlock; 11230 } 11231 11232 /* 11233 * PERF_TYPE_HARDWARE and PERF_TYPE_HW_CACHE 11234 * are often aliases for PERF_TYPE_RAW. 11235 */ 11236 type = event->attr.type; 11237 if (type == PERF_TYPE_HARDWARE || type == PERF_TYPE_HW_CACHE) { 11238 type = event->attr.config >> PERF_PMU_TYPE_SHIFT; 11239 if (!type) { 11240 type = PERF_TYPE_RAW; 11241 } else { 11242 extended_type = true; 11243 event->attr.config &= PERF_HW_EVENT_MASK; 11244 } 11245 } 11246 11247 again: 11248 rcu_read_lock(); 11249 pmu = idr_find(&pmu_idr, type); 11250 rcu_read_unlock(); 11251 if (pmu) { 11252 if (event->attr.type != type && type != PERF_TYPE_RAW && 11253 !(pmu->capabilities & PERF_PMU_CAP_EXTENDED_HW_TYPE)) 11254 goto fail; 11255 11256 ret = perf_try_init_event(pmu, event); 11257 if (ret == -ENOENT && event->attr.type != type && !extended_type) { 11258 type = event->attr.type; 11259 goto again; 11260 } 11261 11262 if (ret) 11263 pmu = ERR_PTR(ret); 11264 11265 goto unlock; 11266 } 11267 11268 list_for_each_entry_rcu(pmu, &pmus, entry, lockdep_is_held(&pmus_srcu)) { 11269 ret = perf_try_init_event(pmu, event); 11270 if (!ret) 11271 goto unlock; 11272 11273 if (ret != -ENOENT) { 11274 pmu = ERR_PTR(ret); 11275 goto unlock; 11276 } 11277 } 11278 fail: 11279 pmu = ERR_PTR(-ENOENT); 11280 unlock: 11281 srcu_read_unlock(&pmus_srcu, idx); 11282 11283 return pmu; 11284 } 11285 11286 static void attach_sb_event(struct perf_event *event) 11287 { 11288 struct pmu_event_list *pel = per_cpu_ptr(&pmu_sb_events, event->cpu); 11289 11290 raw_spin_lock(&pel->lock); 11291 list_add_rcu(&event->sb_list, &pel->list); 11292 raw_spin_unlock(&pel->lock); 11293 } 11294 11295 /* 11296 * We keep a list of all !task (and therefore per-cpu) events 11297 * that need to receive side-band records. 11298 * 11299 * This avoids having to scan all the various PMU per-cpu contexts 11300 * looking for them. 11301 */ 11302 static void account_pmu_sb_event(struct perf_event *event) 11303 { 11304 if (is_sb_event(event)) 11305 attach_sb_event(event); 11306 } 11307 11308 static void account_event_cpu(struct perf_event *event, int cpu) 11309 { 11310 if (event->parent) 11311 return; 11312 11313 if (is_cgroup_event(event)) 11314 atomic_inc(&per_cpu(perf_cgroup_events, cpu)); 11315 } 11316 11317 /* Freq events need the tick to stay alive (see perf_event_task_tick). */ 11318 static void account_freq_event_nohz(void) 11319 { 11320 #ifdef CONFIG_NO_HZ_FULL 11321 /* Lock so we don't race with concurrent unaccount */ 11322 spin_lock(&nr_freq_lock); 11323 if (atomic_inc_return(&nr_freq_events) == 1) 11324 tick_nohz_dep_set(TICK_DEP_BIT_PERF_EVENTS); 11325 spin_unlock(&nr_freq_lock); 11326 #endif 11327 } 11328 11329 static void account_freq_event(void) 11330 { 11331 if (tick_nohz_full_enabled()) 11332 account_freq_event_nohz(); 11333 else 11334 atomic_inc(&nr_freq_events); 11335 } 11336 11337 11338 static void account_event(struct perf_event *event) 11339 { 11340 bool inc = false; 11341 11342 if (event->parent) 11343 return; 11344 11345 if (event->attach_state & (PERF_ATTACH_TASK | PERF_ATTACH_SCHED_CB)) 11346 inc = true; 11347 if (event->attr.mmap || event->attr.mmap_data) 11348 atomic_inc(&nr_mmap_events); 11349 if (event->attr.build_id) 11350 atomic_inc(&nr_build_id_events); 11351 if (event->attr.comm) 11352 atomic_inc(&nr_comm_events); 11353 if (event->attr.namespaces) 11354 atomic_inc(&nr_namespaces_events); 11355 if (event->attr.cgroup) 11356 atomic_inc(&nr_cgroup_events); 11357 if (event->attr.task) 11358 atomic_inc(&nr_task_events); 11359 if (event->attr.freq) 11360 account_freq_event(); 11361 if (event->attr.context_switch) { 11362 atomic_inc(&nr_switch_events); 11363 inc = true; 11364 } 11365 if (has_branch_stack(event)) 11366 inc = true; 11367 if (is_cgroup_event(event)) 11368 inc = true; 11369 if (event->attr.ksymbol) 11370 atomic_inc(&nr_ksymbol_events); 11371 if (event->attr.bpf_event) 11372 atomic_inc(&nr_bpf_events); 11373 if (event->attr.text_poke) 11374 atomic_inc(&nr_text_poke_events); 11375 11376 if (inc) { 11377 /* 11378 * We need the mutex here because static_branch_enable() 11379 * must complete *before* the perf_sched_count increment 11380 * becomes visible. 11381 */ 11382 if (atomic_inc_not_zero(&perf_sched_count)) 11383 goto enabled; 11384 11385 mutex_lock(&perf_sched_mutex); 11386 if (!atomic_read(&perf_sched_count)) { 11387 static_branch_enable(&perf_sched_events); 11388 /* 11389 * Guarantee that all CPUs observe they key change and 11390 * call the perf scheduling hooks before proceeding to 11391 * install events that need them. 11392 */ 11393 synchronize_rcu(); 11394 } 11395 /* 11396 * Now that we have waited for the sync_sched(), allow further 11397 * increments to by-pass the mutex. 11398 */ 11399 atomic_inc(&perf_sched_count); 11400 mutex_unlock(&perf_sched_mutex); 11401 } 11402 enabled: 11403 11404 account_event_cpu(event, event->cpu); 11405 11406 account_pmu_sb_event(event); 11407 } 11408 11409 /* 11410 * Allocate and initialize an event structure 11411 */ 11412 static struct perf_event * 11413 perf_event_alloc(struct perf_event_attr *attr, int cpu, 11414 struct task_struct *task, 11415 struct perf_event *group_leader, 11416 struct perf_event *parent_event, 11417 perf_overflow_handler_t overflow_handler, 11418 void *context, int cgroup_fd) 11419 { 11420 struct pmu *pmu; 11421 struct perf_event *event; 11422 struct hw_perf_event *hwc; 11423 long err = -EINVAL; 11424 int node; 11425 11426 if ((unsigned)cpu >= nr_cpu_ids) { 11427 if (!task || cpu != -1) 11428 return ERR_PTR(-EINVAL); 11429 } 11430 if (attr->sigtrap && !task) { 11431 /* Requires a task: avoid signalling random tasks. */ 11432 return ERR_PTR(-EINVAL); 11433 } 11434 11435 node = (cpu >= 0) ? cpu_to_node(cpu) : -1; 11436 event = kmem_cache_alloc_node(perf_event_cache, GFP_KERNEL | __GFP_ZERO, 11437 node); 11438 if (!event) 11439 return ERR_PTR(-ENOMEM); 11440 11441 /* 11442 * Single events are their own group leaders, with an 11443 * empty sibling list: 11444 */ 11445 if (!group_leader) 11446 group_leader = event; 11447 11448 mutex_init(&event->child_mutex); 11449 INIT_LIST_HEAD(&event->child_list); 11450 11451 INIT_LIST_HEAD(&event->event_entry); 11452 INIT_LIST_HEAD(&event->sibling_list); 11453 INIT_LIST_HEAD(&event->active_list); 11454 init_event_group(event); 11455 INIT_LIST_HEAD(&event->rb_entry); 11456 INIT_LIST_HEAD(&event->active_entry); 11457 INIT_LIST_HEAD(&event->addr_filters.list); 11458 INIT_HLIST_NODE(&event->hlist_entry); 11459 11460 11461 init_waitqueue_head(&event->waitq); 11462 event->pending_disable = -1; 11463 init_irq_work(&event->pending, perf_pending_event); 11464 11465 mutex_init(&event->mmap_mutex); 11466 raw_spin_lock_init(&event->addr_filters.lock); 11467 11468 atomic_long_set(&event->refcount, 1); 11469 event->cpu = cpu; 11470 event->attr = *attr; 11471 event->group_leader = group_leader; 11472 event->pmu = NULL; 11473 event->oncpu = -1; 11474 11475 event->parent = parent_event; 11476 11477 event->ns = get_pid_ns(task_active_pid_ns(current)); 11478 event->id = atomic64_inc_return(&perf_event_id); 11479 11480 event->state = PERF_EVENT_STATE_INACTIVE; 11481 11482 if (event->attr.sigtrap) 11483 atomic_set(&event->event_limit, 1); 11484 11485 if (task) { 11486 event->attach_state = PERF_ATTACH_TASK; 11487 /* 11488 * XXX pmu::event_init needs to know what task to account to 11489 * and we cannot use the ctx information because we need the 11490 * pmu before we get a ctx. 11491 */ 11492 event->hw.target = get_task_struct(task); 11493 } 11494 11495 event->clock = &local_clock; 11496 if (parent_event) 11497 event->clock = parent_event->clock; 11498 11499 if (!overflow_handler && parent_event) { 11500 overflow_handler = parent_event->overflow_handler; 11501 context = parent_event->overflow_handler_context; 11502 #if defined(CONFIG_BPF_SYSCALL) && defined(CONFIG_EVENT_TRACING) 11503 if (overflow_handler == bpf_overflow_handler) { 11504 struct bpf_prog *prog = parent_event->prog; 11505 11506 bpf_prog_inc(prog); 11507 event->prog = prog; 11508 event->orig_overflow_handler = 11509 parent_event->orig_overflow_handler; 11510 } 11511 #endif 11512 } 11513 11514 if (overflow_handler) { 11515 event->overflow_handler = overflow_handler; 11516 event->overflow_handler_context = context; 11517 } else if (is_write_backward(event)){ 11518 event->overflow_handler = perf_event_output_backward; 11519 event->overflow_handler_context = NULL; 11520 } else { 11521 event->overflow_handler = perf_event_output_forward; 11522 event->overflow_handler_context = NULL; 11523 } 11524 11525 perf_event__state_init(event); 11526 11527 pmu = NULL; 11528 11529 hwc = &event->hw; 11530 hwc->sample_period = attr->sample_period; 11531 if (attr->freq && attr->sample_freq) 11532 hwc->sample_period = 1; 11533 hwc->last_period = hwc->sample_period; 11534 11535 local64_set(&hwc->period_left, hwc->sample_period); 11536 11537 /* 11538 * We currently do not support PERF_SAMPLE_READ on inherited events. 11539 * See perf_output_read(). 11540 */ 11541 if (attr->inherit && (attr->sample_type & PERF_SAMPLE_READ)) 11542 goto err_ns; 11543 11544 if (!has_branch_stack(event)) 11545 event->attr.branch_sample_type = 0; 11546 11547 pmu = perf_init_event(event); 11548 if (IS_ERR(pmu)) { 11549 err = PTR_ERR(pmu); 11550 goto err_ns; 11551 } 11552 11553 /* 11554 * Disallow uncore-cgroup events, they don't make sense as the cgroup will 11555 * be different on other CPUs in the uncore mask. 11556 */ 11557 if (pmu->task_ctx_nr == perf_invalid_context && cgroup_fd != -1) { 11558 err = -EINVAL; 11559 goto err_pmu; 11560 } 11561 11562 if (event->attr.aux_output && 11563 !(pmu->capabilities & PERF_PMU_CAP_AUX_OUTPUT)) { 11564 err = -EOPNOTSUPP; 11565 goto err_pmu; 11566 } 11567 11568 if (cgroup_fd != -1) { 11569 err = perf_cgroup_connect(cgroup_fd, event, attr, group_leader); 11570 if (err) 11571 goto err_pmu; 11572 } 11573 11574 err = exclusive_event_init(event); 11575 if (err) 11576 goto err_pmu; 11577 11578 if (has_addr_filter(event)) { 11579 event->addr_filter_ranges = kcalloc(pmu->nr_addr_filters, 11580 sizeof(struct perf_addr_filter_range), 11581 GFP_KERNEL); 11582 if (!event->addr_filter_ranges) { 11583 err = -ENOMEM; 11584 goto err_per_task; 11585 } 11586 11587 /* 11588 * Clone the parent's vma offsets: they are valid until exec() 11589 * even if the mm is not shared with the parent. 11590 */ 11591 if (event->parent) { 11592 struct perf_addr_filters_head *ifh = perf_event_addr_filters(event); 11593 11594 raw_spin_lock_irq(&ifh->lock); 11595 memcpy(event->addr_filter_ranges, 11596 event->parent->addr_filter_ranges, 11597 pmu->nr_addr_filters * sizeof(struct perf_addr_filter_range)); 11598 raw_spin_unlock_irq(&ifh->lock); 11599 } 11600 11601 /* force hw sync on the address filters */ 11602 event->addr_filters_gen = 1; 11603 } 11604 11605 if (!event->parent) { 11606 if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN) { 11607 err = get_callchain_buffers(attr->sample_max_stack); 11608 if (err) 11609 goto err_addr_filters; 11610 } 11611 } 11612 11613 err = security_perf_event_alloc(event); 11614 if (err) 11615 goto err_callchain_buffer; 11616 11617 /* symmetric to unaccount_event() in _free_event() */ 11618 account_event(event); 11619 11620 return event; 11621 11622 err_callchain_buffer: 11623 if (!event->parent) { 11624 if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN) 11625 put_callchain_buffers(); 11626 } 11627 err_addr_filters: 11628 kfree(event->addr_filter_ranges); 11629 11630 err_per_task: 11631 exclusive_event_destroy(event); 11632 11633 err_pmu: 11634 if (is_cgroup_event(event)) 11635 perf_detach_cgroup(event); 11636 if (event->destroy) 11637 event->destroy(event); 11638 module_put(pmu->module); 11639 err_ns: 11640 if (event->ns) 11641 put_pid_ns(event->ns); 11642 if (event->hw.target) 11643 put_task_struct(event->hw.target); 11644 kmem_cache_free(perf_event_cache, event); 11645 11646 return ERR_PTR(err); 11647 } 11648 11649 static int perf_copy_attr(struct perf_event_attr __user *uattr, 11650 struct perf_event_attr *attr) 11651 { 11652 u32 size; 11653 int ret; 11654 11655 /* Zero the full structure, so that a short copy will be nice. */ 11656 memset(attr, 0, sizeof(*attr)); 11657 11658 ret = get_user(size, &uattr->size); 11659 if (ret) 11660 return ret; 11661 11662 /* ABI compatibility quirk: */ 11663 if (!size) 11664 size = PERF_ATTR_SIZE_VER0; 11665 if (size < PERF_ATTR_SIZE_VER0 || size > PAGE_SIZE) 11666 goto err_size; 11667 11668 ret = copy_struct_from_user(attr, sizeof(*attr), uattr, size); 11669 if (ret) { 11670 if (ret == -E2BIG) 11671 goto err_size; 11672 return ret; 11673 } 11674 11675 attr->size = size; 11676 11677 if (attr->__reserved_1 || attr->__reserved_2 || attr->__reserved_3) 11678 return -EINVAL; 11679 11680 if (attr->sample_type & ~(PERF_SAMPLE_MAX-1)) 11681 return -EINVAL; 11682 11683 if (attr->read_format & ~(PERF_FORMAT_MAX-1)) 11684 return -EINVAL; 11685 11686 if (attr->sample_type & PERF_SAMPLE_BRANCH_STACK) { 11687 u64 mask = attr->branch_sample_type; 11688 11689 /* only using defined bits */ 11690 if (mask & ~(PERF_SAMPLE_BRANCH_MAX-1)) 11691 return -EINVAL; 11692 11693 /* at least one branch bit must be set */ 11694 if (!(mask & ~PERF_SAMPLE_BRANCH_PLM_ALL)) 11695 return -EINVAL; 11696 11697 /* propagate priv level, when not set for branch */ 11698 if (!(mask & PERF_SAMPLE_BRANCH_PLM_ALL)) { 11699 11700 /* exclude_kernel checked on syscall entry */ 11701 if (!attr->exclude_kernel) 11702 mask |= PERF_SAMPLE_BRANCH_KERNEL; 11703 11704 if (!attr->exclude_user) 11705 mask |= PERF_SAMPLE_BRANCH_USER; 11706 11707 if (!attr->exclude_hv) 11708 mask |= PERF_SAMPLE_BRANCH_HV; 11709 /* 11710 * adjust user setting (for HW filter setup) 11711 */ 11712 attr->branch_sample_type = mask; 11713 } 11714 /* privileged levels capture (kernel, hv): check permissions */ 11715 if (mask & PERF_SAMPLE_BRANCH_PERM_PLM) { 11716 ret = perf_allow_kernel(attr); 11717 if (ret) 11718 return ret; 11719 } 11720 } 11721 11722 if (attr->sample_type & PERF_SAMPLE_REGS_USER) { 11723 ret = perf_reg_validate(attr->sample_regs_user); 11724 if (ret) 11725 return ret; 11726 } 11727 11728 if (attr->sample_type & PERF_SAMPLE_STACK_USER) { 11729 if (!arch_perf_have_user_stack_dump()) 11730 return -ENOSYS; 11731 11732 /* 11733 * We have __u32 type for the size, but so far 11734 * we can only use __u16 as maximum due to the 11735 * __u16 sample size limit. 11736 */ 11737 if (attr->sample_stack_user >= USHRT_MAX) 11738 return -EINVAL; 11739 else if (!IS_ALIGNED(attr->sample_stack_user, sizeof(u64))) 11740 return -EINVAL; 11741 } 11742 11743 if (!attr->sample_max_stack) 11744 attr->sample_max_stack = sysctl_perf_event_max_stack; 11745 11746 if (attr->sample_type & PERF_SAMPLE_REGS_INTR) 11747 ret = perf_reg_validate(attr->sample_regs_intr); 11748 11749 #ifndef CONFIG_CGROUP_PERF 11750 if (attr->sample_type & PERF_SAMPLE_CGROUP) 11751 return -EINVAL; 11752 #endif 11753 if ((attr->sample_type & PERF_SAMPLE_WEIGHT) && 11754 (attr->sample_type & PERF_SAMPLE_WEIGHT_STRUCT)) 11755 return -EINVAL; 11756 11757 if (!attr->inherit && attr->inherit_thread) 11758 return -EINVAL; 11759 11760 if (attr->remove_on_exec && attr->enable_on_exec) 11761 return -EINVAL; 11762 11763 if (attr->sigtrap && !attr->remove_on_exec) 11764 return -EINVAL; 11765 11766 out: 11767 return ret; 11768 11769 err_size: 11770 put_user(sizeof(*attr), &uattr->size); 11771 ret = -E2BIG; 11772 goto out; 11773 } 11774 11775 static int 11776 perf_event_set_output(struct perf_event *event, struct perf_event *output_event) 11777 { 11778 struct perf_buffer *rb = NULL; 11779 int ret = -EINVAL; 11780 11781 if (!output_event) 11782 goto set; 11783 11784 /* don't allow circular references */ 11785 if (event == output_event) 11786 goto out; 11787 11788 /* 11789 * Don't allow cross-cpu buffers 11790 */ 11791 if (output_event->cpu != event->cpu) 11792 goto out; 11793 11794 /* 11795 * If its not a per-cpu rb, it must be the same task. 11796 */ 11797 if (output_event->cpu == -1 && output_event->ctx != event->ctx) 11798 goto out; 11799 11800 /* 11801 * Mixing clocks in the same buffer is trouble you don't need. 11802 */ 11803 if (output_event->clock != event->clock) 11804 goto out; 11805 11806 /* 11807 * Either writing ring buffer from beginning or from end. 11808 * Mixing is not allowed. 11809 */ 11810 if (is_write_backward(output_event) != is_write_backward(event)) 11811 goto out; 11812 11813 /* 11814 * If both events generate aux data, they must be on the same PMU 11815 */ 11816 if (has_aux(event) && has_aux(output_event) && 11817 event->pmu != output_event->pmu) 11818 goto out; 11819 11820 set: 11821 mutex_lock(&event->mmap_mutex); 11822 /* Can't redirect output if we've got an active mmap() */ 11823 if (atomic_read(&event->mmap_count)) 11824 goto unlock; 11825 11826 if (output_event) { 11827 /* get the rb we want to redirect to */ 11828 rb = ring_buffer_get(output_event); 11829 if (!rb) 11830 goto unlock; 11831 } 11832 11833 ring_buffer_attach(event, rb); 11834 11835 ret = 0; 11836 unlock: 11837 mutex_unlock(&event->mmap_mutex); 11838 11839 out: 11840 return ret; 11841 } 11842 11843 static void mutex_lock_double(struct mutex *a, struct mutex *b) 11844 { 11845 if (b < a) 11846 swap(a, b); 11847 11848 mutex_lock(a); 11849 mutex_lock_nested(b, SINGLE_DEPTH_NESTING); 11850 } 11851 11852 static int perf_event_set_clock(struct perf_event *event, clockid_t clk_id) 11853 { 11854 bool nmi_safe = false; 11855 11856 switch (clk_id) { 11857 case CLOCK_MONOTONIC: 11858 event->clock = &ktime_get_mono_fast_ns; 11859 nmi_safe = true; 11860 break; 11861 11862 case CLOCK_MONOTONIC_RAW: 11863 event->clock = &ktime_get_raw_fast_ns; 11864 nmi_safe = true; 11865 break; 11866 11867 case CLOCK_REALTIME: 11868 event->clock = &ktime_get_real_ns; 11869 break; 11870 11871 case CLOCK_BOOTTIME: 11872 event->clock = &ktime_get_boottime_ns; 11873 break; 11874 11875 case CLOCK_TAI: 11876 event->clock = &ktime_get_clocktai_ns; 11877 break; 11878 11879 default: 11880 return -EINVAL; 11881 } 11882 11883 if (!nmi_safe && !(event->pmu->capabilities & PERF_PMU_CAP_NO_NMI)) 11884 return -EINVAL; 11885 11886 return 0; 11887 } 11888 11889 /* 11890 * Variation on perf_event_ctx_lock_nested(), except we take two context 11891 * mutexes. 11892 */ 11893 static struct perf_event_context * 11894 __perf_event_ctx_lock_double(struct perf_event *group_leader, 11895 struct perf_event_context *ctx) 11896 { 11897 struct perf_event_context *gctx; 11898 11899 again: 11900 rcu_read_lock(); 11901 gctx = READ_ONCE(group_leader->ctx); 11902 if (!refcount_inc_not_zero(&gctx->refcount)) { 11903 rcu_read_unlock(); 11904 goto again; 11905 } 11906 rcu_read_unlock(); 11907 11908 mutex_lock_double(&gctx->mutex, &ctx->mutex); 11909 11910 if (group_leader->ctx != gctx) { 11911 mutex_unlock(&ctx->mutex); 11912 mutex_unlock(&gctx->mutex); 11913 put_ctx(gctx); 11914 goto again; 11915 } 11916 11917 return gctx; 11918 } 11919 11920 /** 11921 * sys_perf_event_open - open a performance event, associate it to a task/cpu 11922 * 11923 * @attr_uptr: event_id type attributes for monitoring/sampling 11924 * @pid: target pid 11925 * @cpu: target cpu 11926 * @group_fd: group leader event fd 11927 */ 11928 SYSCALL_DEFINE5(perf_event_open, 11929 struct perf_event_attr __user *, attr_uptr, 11930 pid_t, pid, int, cpu, int, group_fd, unsigned long, flags) 11931 { 11932 struct perf_event *group_leader = NULL, *output_event = NULL; 11933 struct perf_event *event, *sibling; 11934 struct perf_event_attr attr; 11935 struct perf_event_context *ctx, *gctx; 11936 struct file *event_file = NULL; 11937 struct fd group = {NULL, 0}; 11938 struct task_struct *task = NULL; 11939 struct pmu *pmu; 11940 int event_fd; 11941 int move_group = 0; 11942 int err; 11943 int f_flags = O_RDWR; 11944 int cgroup_fd = -1; 11945 11946 /* for future expandability... */ 11947 if (flags & ~PERF_FLAG_ALL) 11948 return -EINVAL; 11949 11950 /* Do we allow access to perf_event_open(2) ? */ 11951 err = security_perf_event_open(&attr, PERF_SECURITY_OPEN); 11952 if (err) 11953 return err; 11954 11955 err = perf_copy_attr(attr_uptr, &attr); 11956 if (err) 11957 return err; 11958 11959 if (!attr.exclude_kernel) { 11960 err = perf_allow_kernel(&attr); 11961 if (err) 11962 return err; 11963 } 11964 11965 if (attr.namespaces) { 11966 if (!perfmon_capable()) 11967 return -EACCES; 11968 } 11969 11970 if (attr.freq) { 11971 if (attr.sample_freq > sysctl_perf_event_sample_rate) 11972 return -EINVAL; 11973 } else { 11974 if (attr.sample_period & (1ULL << 63)) 11975 return -EINVAL; 11976 } 11977 11978 /* Only privileged users can get physical addresses */ 11979 if ((attr.sample_type & PERF_SAMPLE_PHYS_ADDR)) { 11980 err = perf_allow_kernel(&attr); 11981 if (err) 11982 return err; 11983 } 11984 11985 /* REGS_INTR can leak data, lockdown must prevent this */ 11986 if (attr.sample_type & PERF_SAMPLE_REGS_INTR) { 11987 err = security_locked_down(LOCKDOWN_PERF); 11988 if (err) 11989 return err; 11990 } 11991 11992 /* 11993 * In cgroup mode, the pid argument is used to pass the fd 11994 * opened to the cgroup directory in cgroupfs. The cpu argument 11995 * designates the cpu on which to monitor threads from that 11996 * cgroup. 11997 */ 11998 if ((flags & PERF_FLAG_PID_CGROUP) && (pid == -1 || cpu == -1)) 11999 return -EINVAL; 12000 12001 if (flags & PERF_FLAG_FD_CLOEXEC) 12002 f_flags |= O_CLOEXEC; 12003 12004 event_fd = get_unused_fd_flags(f_flags); 12005 if (event_fd < 0) 12006 return event_fd; 12007 12008 if (group_fd != -1) { 12009 err = perf_fget_light(group_fd, &group); 12010 if (err) 12011 goto err_fd; 12012 group_leader = group.file->private_data; 12013 if (flags & PERF_FLAG_FD_OUTPUT) 12014 output_event = group_leader; 12015 if (flags & PERF_FLAG_FD_NO_GROUP) 12016 group_leader = NULL; 12017 } 12018 12019 if (pid != -1 && !(flags & PERF_FLAG_PID_CGROUP)) { 12020 task = find_lively_task_by_vpid(pid); 12021 if (IS_ERR(task)) { 12022 err = PTR_ERR(task); 12023 goto err_group_fd; 12024 } 12025 } 12026 12027 if (task && group_leader && 12028 group_leader->attr.inherit != attr.inherit) { 12029 err = -EINVAL; 12030 goto err_task; 12031 } 12032 12033 if (flags & PERF_FLAG_PID_CGROUP) 12034 cgroup_fd = pid; 12035 12036 event = perf_event_alloc(&attr, cpu, task, group_leader, NULL, 12037 NULL, NULL, cgroup_fd); 12038 if (IS_ERR(event)) { 12039 err = PTR_ERR(event); 12040 goto err_task; 12041 } 12042 12043 if (is_sampling_event(event)) { 12044 if (event->pmu->capabilities & PERF_PMU_CAP_NO_INTERRUPT) { 12045 err = -EOPNOTSUPP; 12046 goto err_alloc; 12047 } 12048 } 12049 12050 /* 12051 * Special case software events and allow them to be part of 12052 * any hardware group. 12053 */ 12054 pmu = event->pmu; 12055 12056 if (attr.use_clockid) { 12057 err = perf_event_set_clock(event, attr.clockid); 12058 if (err) 12059 goto err_alloc; 12060 } 12061 12062 if (pmu->task_ctx_nr == perf_sw_context) 12063 event->event_caps |= PERF_EV_CAP_SOFTWARE; 12064 12065 if (group_leader) { 12066 if (is_software_event(event) && 12067 !in_software_context(group_leader)) { 12068 /* 12069 * If the event is a sw event, but the group_leader 12070 * is on hw context. 12071 * 12072 * Allow the addition of software events to hw 12073 * groups, this is safe because software events 12074 * never fail to schedule. 12075 */ 12076 pmu = group_leader->ctx->pmu; 12077 } else if (!is_software_event(event) && 12078 is_software_event(group_leader) && 12079 (group_leader->group_caps & PERF_EV_CAP_SOFTWARE)) { 12080 /* 12081 * In case the group is a pure software group, and we 12082 * try to add a hardware event, move the whole group to 12083 * the hardware context. 12084 */ 12085 move_group = 1; 12086 } 12087 } 12088 12089 /* 12090 * Get the target context (task or percpu): 12091 */ 12092 ctx = find_get_context(pmu, task, event); 12093 if (IS_ERR(ctx)) { 12094 err = PTR_ERR(ctx); 12095 goto err_alloc; 12096 } 12097 12098 /* 12099 * Look up the group leader (we will attach this event to it): 12100 */ 12101 if (group_leader) { 12102 err = -EINVAL; 12103 12104 /* 12105 * Do not allow a recursive hierarchy (this new sibling 12106 * becoming part of another group-sibling): 12107 */ 12108 if (group_leader->group_leader != group_leader) 12109 goto err_context; 12110 12111 /* All events in a group should have the same clock */ 12112 if (group_leader->clock != event->clock) 12113 goto err_context; 12114 12115 /* 12116 * Make sure we're both events for the same CPU; 12117 * grouping events for different CPUs is broken; since 12118 * you can never concurrently schedule them anyhow. 12119 */ 12120 if (group_leader->cpu != event->cpu) 12121 goto err_context; 12122 12123 /* 12124 * Make sure we're both on the same task, or both 12125 * per-CPU events. 12126 */ 12127 if (group_leader->ctx->task != ctx->task) 12128 goto err_context; 12129 12130 /* 12131 * Do not allow to attach to a group in a different task 12132 * or CPU context. If we're moving SW events, we'll fix 12133 * this up later, so allow that. 12134 */ 12135 if (!move_group && group_leader->ctx != ctx) 12136 goto err_context; 12137 12138 /* 12139 * Only a group leader can be exclusive or pinned 12140 */ 12141 if (attr.exclusive || attr.pinned) 12142 goto err_context; 12143 } 12144 12145 if (output_event) { 12146 err = perf_event_set_output(event, output_event); 12147 if (err) 12148 goto err_context; 12149 } 12150 12151 event_file = anon_inode_getfile("[perf_event]", &perf_fops, event, 12152 f_flags); 12153 if (IS_ERR(event_file)) { 12154 err = PTR_ERR(event_file); 12155 event_file = NULL; 12156 goto err_context; 12157 } 12158 12159 if (task) { 12160 err = down_read_interruptible(&task->signal->exec_update_lock); 12161 if (err) 12162 goto err_file; 12163 12164 /* 12165 * Preserve ptrace permission check for backwards compatibility. 12166 * 12167 * We must hold exec_update_lock across this and any potential 12168 * perf_install_in_context() call for this new event to 12169 * serialize against exec() altering our credentials (and the 12170 * perf_event_exit_task() that could imply). 12171 */ 12172 err = -EACCES; 12173 if (!perfmon_capable() && !ptrace_may_access(task, PTRACE_MODE_READ_REALCREDS)) 12174 goto err_cred; 12175 } 12176 12177 if (move_group) { 12178 gctx = __perf_event_ctx_lock_double(group_leader, ctx); 12179 12180 if (gctx->task == TASK_TOMBSTONE) { 12181 err = -ESRCH; 12182 goto err_locked; 12183 } 12184 12185 /* 12186 * Check if we raced against another sys_perf_event_open() call 12187 * moving the software group underneath us. 12188 */ 12189 if (!(group_leader->group_caps & PERF_EV_CAP_SOFTWARE)) { 12190 /* 12191 * If someone moved the group out from under us, check 12192 * if this new event wound up on the same ctx, if so 12193 * its the regular !move_group case, otherwise fail. 12194 */ 12195 if (gctx != ctx) { 12196 err = -EINVAL; 12197 goto err_locked; 12198 } else { 12199 perf_event_ctx_unlock(group_leader, gctx); 12200 move_group = 0; 12201 } 12202 } 12203 12204 /* 12205 * Failure to create exclusive events returns -EBUSY. 12206 */ 12207 err = -EBUSY; 12208 if (!exclusive_event_installable(group_leader, ctx)) 12209 goto err_locked; 12210 12211 for_each_sibling_event(sibling, group_leader) { 12212 if (!exclusive_event_installable(sibling, ctx)) 12213 goto err_locked; 12214 } 12215 } else { 12216 mutex_lock(&ctx->mutex); 12217 } 12218 12219 if (ctx->task == TASK_TOMBSTONE) { 12220 err = -ESRCH; 12221 goto err_locked; 12222 } 12223 12224 if (!perf_event_validate_size(event)) { 12225 err = -E2BIG; 12226 goto err_locked; 12227 } 12228 12229 if (!task) { 12230 /* 12231 * Check if the @cpu we're creating an event for is online. 12232 * 12233 * We use the perf_cpu_context::ctx::mutex to serialize against 12234 * the hotplug notifiers. See perf_event_{init,exit}_cpu(). 12235 */ 12236 struct perf_cpu_context *cpuctx = 12237 container_of(ctx, struct perf_cpu_context, ctx); 12238 12239 if (!cpuctx->online) { 12240 err = -ENODEV; 12241 goto err_locked; 12242 } 12243 } 12244 12245 if (perf_need_aux_event(event) && !perf_get_aux_event(event, group_leader)) { 12246 err = -EINVAL; 12247 goto err_locked; 12248 } 12249 12250 /* 12251 * Must be under the same ctx::mutex as perf_install_in_context(), 12252 * because we need to serialize with concurrent event creation. 12253 */ 12254 if (!exclusive_event_installable(event, ctx)) { 12255 err = -EBUSY; 12256 goto err_locked; 12257 } 12258 12259 WARN_ON_ONCE(ctx->parent_ctx); 12260 12261 /* 12262 * This is the point on no return; we cannot fail hereafter. This is 12263 * where we start modifying current state. 12264 */ 12265 12266 if (move_group) { 12267 /* 12268 * See perf_event_ctx_lock() for comments on the details 12269 * of swizzling perf_event::ctx. 12270 */ 12271 perf_remove_from_context(group_leader, 0); 12272 put_ctx(gctx); 12273 12274 for_each_sibling_event(sibling, group_leader) { 12275 perf_remove_from_context(sibling, 0); 12276 put_ctx(gctx); 12277 } 12278 12279 /* 12280 * Wait for everybody to stop referencing the events through 12281 * the old lists, before installing it on new lists. 12282 */ 12283 synchronize_rcu(); 12284 12285 /* 12286 * Install the group siblings before the group leader. 12287 * 12288 * Because a group leader will try and install the entire group 12289 * (through the sibling list, which is still in-tact), we can 12290 * end up with siblings installed in the wrong context. 12291 * 12292 * By installing siblings first we NO-OP because they're not 12293 * reachable through the group lists. 12294 */ 12295 for_each_sibling_event(sibling, group_leader) { 12296 perf_event__state_init(sibling); 12297 perf_install_in_context(ctx, sibling, sibling->cpu); 12298 get_ctx(ctx); 12299 } 12300 12301 /* 12302 * Removing from the context ends up with disabled 12303 * event. What we want here is event in the initial 12304 * startup state, ready to be add into new context. 12305 */ 12306 perf_event__state_init(group_leader); 12307 perf_install_in_context(ctx, group_leader, group_leader->cpu); 12308 get_ctx(ctx); 12309 } 12310 12311 /* 12312 * Precalculate sample_data sizes; do while holding ctx::mutex such 12313 * that we're serialized against further additions and before 12314 * perf_install_in_context() which is the point the event is active and 12315 * can use these values. 12316 */ 12317 perf_event__header_size(event); 12318 perf_event__id_header_size(event); 12319 12320 event->owner = current; 12321 12322 perf_install_in_context(ctx, event, event->cpu); 12323 perf_unpin_context(ctx); 12324 12325 if (move_group) 12326 perf_event_ctx_unlock(group_leader, gctx); 12327 mutex_unlock(&ctx->mutex); 12328 12329 if (task) { 12330 up_read(&task->signal->exec_update_lock); 12331 put_task_struct(task); 12332 } 12333 12334 mutex_lock(¤t->perf_event_mutex); 12335 list_add_tail(&event->owner_entry, ¤t->perf_event_list); 12336 mutex_unlock(¤t->perf_event_mutex); 12337 12338 /* 12339 * Drop the reference on the group_event after placing the 12340 * new event on the sibling_list. This ensures destruction 12341 * of the group leader will find the pointer to itself in 12342 * perf_group_detach(). 12343 */ 12344 fdput(group); 12345 fd_install(event_fd, event_file); 12346 return event_fd; 12347 12348 err_locked: 12349 if (move_group) 12350 perf_event_ctx_unlock(group_leader, gctx); 12351 mutex_unlock(&ctx->mutex); 12352 err_cred: 12353 if (task) 12354 up_read(&task->signal->exec_update_lock); 12355 err_file: 12356 fput(event_file); 12357 err_context: 12358 perf_unpin_context(ctx); 12359 put_ctx(ctx); 12360 err_alloc: 12361 /* 12362 * If event_file is set, the fput() above will have called ->release() 12363 * and that will take care of freeing the event. 12364 */ 12365 if (!event_file) 12366 free_event(event); 12367 err_task: 12368 if (task) 12369 put_task_struct(task); 12370 err_group_fd: 12371 fdput(group); 12372 err_fd: 12373 put_unused_fd(event_fd); 12374 return err; 12375 } 12376 12377 /** 12378 * perf_event_create_kernel_counter 12379 * 12380 * @attr: attributes of the counter to create 12381 * @cpu: cpu in which the counter is bound 12382 * @task: task to profile (NULL for percpu) 12383 */ 12384 struct perf_event * 12385 perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu, 12386 struct task_struct *task, 12387 perf_overflow_handler_t overflow_handler, 12388 void *context) 12389 { 12390 struct perf_event_context *ctx; 12391 struct perf_event *event; 12392 int err; 12393 12394 /* 12395 * Grouping is not supported for kernel events, neither is 'AUX', 12396 * make sure the caller's intentions are adjusted. 12397 */ 12398 if (attr->aux_output) 12399 return ERR_PTR(-EINVAL); 12400 12401 event = perf_event_alloc(attr, cpu, task, NULL, NULL, 12402 overflow_handler, context, -1); 12403 if (IS_ERR(event)) { 12404 err = PTR_ERR(event); 12405 goto err; 12406 } 12407 12408 /* Mark owner so we could distinguish it from user events. */ 12409 event->owner = TASK_TOMBSTONE; 12410 12411 /* 12412 * Get the target context (task or percpu): 12413 */ 12414 ctx = find_get_context(event->pmu, task, event); 12415 if (IS_ERR(ctx)) { 12416 err = PTR_ERR(ctx); 12417 goto err_free; 12418 } 12419 12420 WARN_ON_ONCE(ctx->parent_ctx); 12421 mutex_lock(&ctx->mutex); 12422 if (ctx->task == TASK_TOMBSTONE) { 12423 err = -ESRCH; 12424 goto err_unlock; 12425 } 12426 12427 if (!task) { 12428 /* 12429 * Check if the @cpu we're creating an event for is online. 12430 * 12431 * We use the perf_cpu_context::ctx::mutex to serialize against 12432 * the hotplug notifiers. See perf_event_{init,exit}_cpu(). 12433 */ 12434 struct perf_cpu_context *cpuctx = 12435 container_of(ctx, struct perf_cpu_context, ctx); 12436 if (!cpuctx->online) { 12437 err = -ENODEV; 12438 goto err_unlock; 12439 } 12440 } 12441 12442 if (!exclusive_event_installable(event, ctx)) { 12443 err = -EBUSY; 12444 goto err_unlock; 12445 } 12446 12447 perf_install_in_context(ctx, event, event->cpu); 12448 perf_unpin_context(ctx); 12449 mutex_unlock(&ctx->mutex); 12450 12451 return event; 12452 12453 err_unlock: 12454 mutex_unlock(&ctx->mutex); 12455 perf_unpin_context(ctx); 12456 put_ctx(ctx); 12457 err_free: 12458 free_event(event); 12459 err: 12460 return ERR_PTR(err); 12461 } 12462 EXPORT_SYMBOL_GPL(perf_event_create_kernel_counter); 12463 12464 void perf_pmu_migrate_context(struct pmu *pmu, int src_cpu, int dst_cpu) 12465 { 12466 struct perf_event_context *src_ctx; 12467 struct perf_event_context *dst_ctx; 12468 struct perf_event *event, *tmp; 12469 LIST_HEAD(events); 12470 12471 src_ctx = &per_cpu_ptr(pmu->pmu_cpu_context, src_cpu)->ctx; 12472 dst_ctx = &per_cpu_ptr(pmu->pmu_cpu_context, dst_cpu)->ctx; 12473 12474 /* 12475 * See perf_event_ctx_lock() for comments on the details 12476 * of swizzling perf_event::ctx. 12477 */ 12478 mutex_lock_double(&src_ctx->mutex, &dst_ctx->mutex); 12479 list_for_each_entry_safe(event, tmp, &src_ctx->event_list, 12480 event_entry) { 12481 perf_remove_from_context(event, 0); 12482 unaccount_event_cpu(event, src_cpu); 12483 put_ctx(src_ctx); 12484 list_add(&event->migrate_entry, &events); 12485 } 12486 12487 /* 12488 * Wait for the events to quiesce before re-instating them. 12489 */ 12490 synchronize_rcu(); 12491 12492 /* 12493 * Re-instate events in 2 passes. 12494 * 12495 * Skip over group leaders and only install siblings on this first 12496 * pass, siblings will not get enabled without a leader, however a 12497 * leader will enable its siblings, even if those are still on the old 12498 * context. 12499 */ 12500 list_for_each_entry_safe(event, tmp, &events, migrate_entry) { 12501 if (event->group_leader == event) 12502 continue; 12503 12504 list_del(&event->migrate_entry); 12505 if (event->state >= PERF_EVENT_STATE_OFF) 12506 event->state = PERF_EVENT_STATE_INACTIVE; 12507 account_event_cpu(event, dst_cpu); 12508 perf_install_in_context(dst_ctx, event, dst_cpu); 12509 get_ctx(dst_ctx); 12510 } 12511 12512 /* 12513 * Once all the siblings are setup properly, install the group leaders 12514 * to make it go. 12515 */ 12516 list_for_each_entry_safe(event, tmp, &events, migrate_entry) { 12517 list_del(&event->migrate_entry); 12518 if (event->state >= PERF_EVENT_STATE_OFF) 12519 event->state = PERF_EVENT_STATE_INACTIVE; 12520 account_event_cpu(event, dst_cpu); 12521 perf_install_in_context(dst_ctx, event, dst_cpu); 12522 get_ctx(dst_ctx); 12523 } 12524 mutex_unlock(&dst_ctx->mutex); 12525 mutex_unlock(&src_ctx->mutex); 12526 } 12527 EXPORT_SYMBOL_GPL(perf_pmu_migrate_context); 12528 12529 static void sync_child_event(struct perf_event *child_event) 12530 { 12531 struct perf_event *parent_event = child_event->parent; 12532 u64 child_val; 12533 12534 if (child_event->attr.inherit_stat) { 12535 struct task_struct *task = child_event->ctx->task; 12536 12537 if (task && task != TASK_TOMBSTONE) 12538 perf_event_read_event(child_event, task); 12539 } 12540 12541 child_val = perf_event_count(child_event); 12542 12543 /* 12544 * Add back the child's count to the parent's count: 12545 */ 12546 atomic64_add(child_val, &parent_event->child_count); 12547 atomic64_add(child_event->total_time_enabled, 12548 &parent_event->child_total_time_enabled); 12549 atomic64_add(child_event->total_time_running, 12550 &parent_event->child_total_time_running); 12551 } 12552 12553 static void 12554 perf_event_exit_event(struct perf_event *event, struct perf_event_context *ctx) 12555 { 12556 struct perf_event *parent_event = event->parent; 12557 unsigned long detach_flags = 0; 12558 12559 if (parent_event) { 12560 /* 12561 * Do not destroy the 'original' grouping; because of the 12562 * context switch optimization the original events could've 12563 * ended up in a random child task. 12564 * 12565 * If we were to destroy the original group, all group related 12566 * operations would cease to function properly after this 12567 * random child dies. 12568 * 12569 * Do destroy all inherited groups, we don't care about those 12570 * and being thorough is better. 12571 */ 12572 detach_flags = DETACH_GROUP | DETACH_CHILD; 12573 mutex_lock(&parent_event->child_mutex); 12574 } 12575 12576 perf_remove_from_context(event, detach_flags); 12577 12578 raw_spin_lock_irq(&ctx->lock); 12579 if (event->state > PERF_EVENT_STATE_EXIT) 12580 perf_event_set_state(event, PERF_EVENT_STATE_EXIT); 12581 raw_spin_unlock_irq(&ctx->lock); 12582 12583 /* 12584 * Child events can be freed. 12585 */ 12586 if (parent_event) { 12587 mutex_unlock(&parent_event->child_mutex); 12588 /* 12589 * Kick perf_poll() for is_event_hup(); 12590 */ 12591 perf_event_wakeup(parent_event); 12592 free_event(event); 12593 put_event(parent_event); 12594 return; 12595 } 12596 12597 /* 12598 * Parent events are governed by their filedesc, retain them. 12599 */ 12600 perf_event_wakeup(event); 12601 } 12602 12603 static void perf_event_exit_task_context(struct task_struct *child, int ctxn) 12604 { 12605 struct perf_event_context *child_ctx, *clone_ctx = NULL; 12606 struct perf_event *child_event, *next; 12607 12608 WARN_ON_ONCE(child != current); 12609 12610 child_ctx = perf_pin_task_context(child, ctxn); 12611 if (!child_ctx) 12612 return; 12613 12614 /* 12615 * In order to reduce the amount of tricky in ctx tear-down, we hold 12616 * ctx::mutex over the entire thing. This serializes against almost 12617 * everything that wants to access the ctx. 12618 * 12619 * The exception is sys_perf_event_open() / 12620 * perf_event_create_kernel_count() which does find_get_context() 12621 * without ctx::mutex (it cannot because of the move_group double mutex 12622 * lock thing). See the comments in perf_install_in_context(). 12623 */ 12624 mutex_lock(&child_ctx->mutex); 12625 12626 /* 12627 * In a single ctx::lock section, de-schedule the events and detach the 12628 * context from the task such that we cannot ever get it scheduled back 12629 * in. 12630 */ 12631 raw_spin_lock_irq(&child_ctx->lock); 12632 task_ctx_sched_out(__get_cpu_context(child_ctx), child_ctx, EVENT_ALL); 12633 12634 /* 12635 * Now that the context is inactive, destroy the task <-> ctx relation 12636 * and mark the context dead. 12637 */ 12638 RCU_INIT_POINTER(child->perf_event_ctxp[ctxn], NULL); 12639 put_ctx(child_ctx); /* cannot be last */ 12640 WRITE_ONCE(child_ctx->task, TASK_TOMBSTONE); 12641 put_task_struct(current); /* cannot be last */ 12642 12643 clone_ctx = unclone_ctx(child_ctx); 12644 raw_spin_unlock_irq(&child_ctx->lock); 12645 12646 if (clone_ctx) 12647 put_ctx(clone_ctx); 12648 12649 /* 12650 * Report the task dead after unscheduling the events so that we 12651 * won't get any samples after PERF_RECORD_EXIT. We can however still 12652 * get a few PERF_RECORD_READ events. 12653 */ 12654 perf_event_task(child, child_ctx, 0); 12655 12656 list_for_each_entry_safe(child_event, next, &child_ctx->event_list, event_entry) 12657 perf_event_exit_event(child_event, child_ctx); 12658 12659 mutex_unlock(&child_ctx->mutex); 12660 12661 put_ctx(child_ctx); 12662 } 12663 12664 /* 12665 * When a child task exits, feed back event values to parent events. 12666 * 12667 * Can be called with exec_update_lock held when called from 12668 * setup_new_exec(). 12669 */ 12670 void perf_event_exit_task(struct task_struct *child) 12671 { 12672 struct perf_event *event, *tmp; 12673 int ctxn; 12674 12675 mutex_lock(&child->perf_event_mutex); 12676 list_for_each_entry_safe(event, tmp, &child->perf_event_list, 12677 owner_entry) { 12678 list_del_init(&event->owner_entry); 12679 12680 /* 12681 * Ensure the list deletion is visible before we clear 12682 * the owner, closes a race against perf_release() where 12683 * we need to serialize on the owner->perf_event_mutex. 12684 */ 12685 smp_store_release(&event->owner, NULL); 12686 } 12687 mutex_unlock(&child->perf_event_mutex); 12688 12689 for_each_task_context_nr(ctxn) 12690 perf_event_exit_task_context(child, ctxn); 12691 12692 /* 12693 * The perf_event_exit_task_context calls perf_event_task 12694 * with child's task_ctx, which generates EXIT events for 12695 * child contexts and sets child->perf_event_ctxp[] to NULL. 12696 * At this point we need to send EXIT events to cpu contexts. 12697 */ 12698 perf_event_task(child, NULL, 0); 12699 } 12700 12701 static void perf_free_event(struct perf_event *event, 12702 struct perf_event_context *ctx) 12703 { 12704 struct perf_event *parent = event->parent; 12705 12706 if (WARN_ON_ONCE(!parent)) 12707 return; 12708 12709 mutex_lock(&parent->child_mutex); 12710 list_del_init(&event->child_list); 12711 mutex_unlock(&parent->child_mutex); 12712 12713 put_event(parent); 12714 12715 raw_spin_lock_irq(&ctx->lock); 12716 perf_group_detach(event); 12717 list_del_event(event, ctx); 12718 raw_spin_unlock_irq(&ctx->lock); 12719 free_event(event); 12720 } 12721 12722 /* 12723 * Free a context as created by inheritance by perf_event_init_task() below, 12724 * used by fork() in case of fail. 12725 * 12726 * Even though the task has never lived, the context and events have been 12727 * exposed through the child_list, so we must take care tearing it all down. 12728 */ 12729 void perf_event_free_task(struct task_struct *task) 12730 { 12731 struct perf_event_context *ctx; 12732 struct perf_event *event, *tmp; 12733 int ctxn; 12734 12735 for_each_task_context_nr(ctxn) { 12736 ctx = task->perf_event_ctxp[ctxn]; 12737 if (!ctx) 12738 continue; 12739 12740 mutex_lock(&ctx->mutex); 12741 raw_spin_lock_irq(&ctx->lock); 12742 /* 12743 * Destroy the task <-> ctx relation and mark the context dead. 12744 * 12745 * This is important because even though the task hasn't been 12746 * exposed yet the context has been (through child_list). 12747 */ 12748 RCU_INIT_POINTER(task->perf_event_ctxp[ctxn], NULL); 12749 WRITE_ONCE(ctx->task, TASK_TOMBSTONE); 12750 put_task_struct(task); /* cannot be last */ 12751 raw_spin_unlock_irq(&ctx->lock); 12752 12753 list_for_each_entry_safe(event, tmp, &ctx->event_list, event_entry) 12754 perf_free_event(event, ctx); 12755 12756 mutex_unlock(&ctx->mutex); 12757 12758 /* 12759 * perf_event_release_kernel() could've stolen some of our 12760 * child events and still have them on its free_list. In that 12761 * case we must wait for these events to have been freed (in 12762 * particular all their references to this task must've been 12763 * dropped). 12764 * 12765 * Without this copy_process() will unconditionally free this 12766 * task (irrespective of its reference count) and 12767 * _free_event()'s put_task_struct(event->hw.target) will be a 12768 * use-after-free. 12769 * 12770 * Wait for all events to drop their context reference. 12771 */ 12772 wait_var_event(&ctx->refcount, refcount_read(&ctx->refcount) == 1); 12773 put_ctx(ctx); /* must be last */ 12774 } 12775 } 12776 12777 void perf_event_delayed_put(struct task_struct *task) 12778 { 12779 int ctxn; 12780 12781 for_each_task_context_nr(ctxn) 12782 WARN_ON_ONCE(task->perf_event_ctxp[ctxn]); 12783 } 12784 12785 struct file *perf_event_get(unsigned int fd) 12786 { 12787 struct file *file = fget(fd); 12788 if (!file) 12789 return ERR_PTR(-EBADF); 12790 12791 if (file->f_op != &perf_fops) { 12792 fput(file); 12793 return ERR_PTR(-EBADF); 12794 } 12795 12796 return file; 12797 } 12798 12799 const struct perf_event *perf_get_event(struct file *file) 12800 { 12801 if (file->f_op != &perf_fops) 12802 return ERR_PTR(-EINVAL); 12803 12804 return file->private_data; 12805 } 12806 12807 const struct perf_event_attr *perf_event_attrs(struct perf_event *event) 12808 { 12809 if (!event) 12810 return ERR_PTR(-EINVAL); 12811 12812 return &event->attr; 12813 } 12814 12815 /* 12816 * Inherit an event from parent task to child task. 12817 * 12818 * Returns: 12819 * - valid pointer on success 12820 * - NULL for orphaned events 12821 * - IS_ERR() on error 12822 */ 12823 static struct perf_event * 12824 inherit_event(struct perf_event *parent_event, 12825 struct task_struct *parent, 12826 struct perf_event_context *parent_ctx, 12827 struct task_struct *child, 12828 struct perf_event *group_leader, 12829 struct perf_event_context *child_ctx) 12830 { 12831 enum perf_event_state parent_state = parent_event->state; 12832 struct perf_event *child_event; 12833 unsigned long flags; 12834 12835 /* 12836 * Instead of creating recursive hierarchies of events, 12837 * we link inherited events back to the original parent, 12838 * which has a filp for sure, which we use as the reference 12839 * count: 12840 */ 12841 if (parent_event->parent) 12842 parent_event = parent_event->parent; 12843 12844 child_event = perf_event_alloc(&parent_event->attr, 12845 parent_event->cpu, 12846 child, 12847 group_leader, parent_event, 12848 NULL, NULL, -1); 12849 if (IS_ERR(child_event)) 12850 return child_event; 12851 12852 12853 if ((child_event->attach_state & PERF_ATTACH_TASK_DATA) && 12854 !child_ctx->task_ctx_data) { 12855 struct pmu *pmu = child_event->pmu; 12856 12857 child_ctx->task_ctx_data = alloc_task_ctx_data(pmu); 12858 if (!child_ctx->task_ctx_data) { 12859 free_event(child_event); 12860 return ERR_PTR(-ENOMEM); 12861 } 12862 } 12863 12864 /* 12865 * is_orphaned_event() and list_add_tail(&parent_event->child_list) 12866 * must be under the same lock in order to serialize against 12867 * perf_event_release_kernel(), such that either we must observe 12868 * is_orphaned_event() or they will observe us on the child_list. 12869 */ 12870 mutex_lock(&parent_event->child_mutex); 12871 if (is_orphaned_event(parent_event) || 12872 !atomic_long_inc_not_zero(&parent_event->refcount)) { 12873 mutex_unlock(&parent_event->child_mutex); 12874 /* task_ctx_data is freed with child_ctx */ 12875 free_event(child_event); 12876 return NULL; 12877 } 12878 12879 get_ctx(child_ctx); 12880 12881 /* 12882 * Make the child state follow the state of the parent event, 12883 * not its attr.disabled bit. We hold the parent's mutex, 12884 * so we won't race with perf_event_{en, dis}able_family. 12885 */ 12886 if (parent_state >= PERF_EVENT_STATE_INACTIVE) 12887 child_event->state = PERF_EVENT_STATE_INACTIVE; 12888 else 12889 child_event->state = PERF_EVENT_STATE_OFF; 12890 12891 if (parent_event->attr.freq) { 12892 u64 sample_period = parent_event->hw.sample_period; 12893 struct hw_perf_event *hwc = &child_event->hw; 12894 12895 hwc->sample_period = sample_period; 12896 hwc->last_period = sample_period; 12897 12898 local64_set(&hwc->period_left, sample_period); 12899 } 12900 12901 child_event->ctx = child_ctx; 12902 child_event->overflow_handler = parent_event->overflow_handler; 12903 child_event->overflow_handler_context 12904 = parent_event->overflow_handler_context; 12905 12906 /* 12907 * Precalculate sample_data sizes 12908 */ 12909 perf_event__header_size(child_event); 12910 perf_event__id_header_size(child_event); 12911 12912 /* 12913 * Link it up in the child's context: 12914 */ 12915 raw_spin_lock_irqsave(&child_ctx->lock, flags); 12916 add_event_to_ctx(child_event, child_ctx); 12917 child_event->attach_state |= PERF_ATTACH_CHILD; 12918 raw_spin_unlock_irqrestore(&child_ctx->lock, flags); 12919 12920 /* 12921 * Link this into the parent event's child list 12922 */ 12923 list_add_tail(&child_event->child_list, &parent_event->child_list); 12924 mutex_unlock(&parent_event->child_mutex); 12925 12926 return child_event; 12927 } 12928 12929 /* 12930 * Inherits an event group. 12931 * 12932 * This will quietly suppress orphaned events; !inherit_event() is not an error. 12933 * This matches with perf_event_release_kernel() removing all child events. 12934 * 12935 * Returns: 12936 * - 0 on success 12937 * - <0 on error 12938 */ 12939 static int inherit_group(struct perf_event *parent_event, 12940 struct task_struct *parent, 12941 struct perf_event_context *parent_ctx, 12942 struct task_struct *child, 12943 struct perf_event_context *child_ctx) 12944 { 12945 struct perf_event *leader; 12946 struct perf_event *sub; 12947 struct perf_event *child_ctr; 12948 12949 leader = inherit_event(parent_event, parent, parent_ctx, 12950 child, NULL, child_ctx); 12951 if (IS_ERR(leader)) 12952 return PTR_ERR(leader); 12953 /* 12954 * @leader can be NULL here because of is_orphaned_event(). In this 12955 * case inherit_event() will create individual events, similar to what 12956 * perf_group_detach() would do anyway. 12957 */ 12958 for_each_sibling_event(sub, parent_event) { 12959 child_ctr = inherit_event(sub, parent, parent_ctx, 12960 child, leader, child_ctx); 12961 if (IS_ERR(child_ctr)) 12962 return PTR_ERR(child_ctr); 12963 12964 if (sub->aux_event == parent_event && child_ctr && 12965 !perf_get_aux_event(child_ctr, leader)) 12966 return -EINVAL; 12967 } 12968 return 0; 12969 } 12970 12971 /* 12972 * Creates the child task context and tries to inherit the event-group. 12973 * 12974 * Clears @inherited_all on !attr.inherited or error. Note that we'll leave 12975 * inherited_all set when we 'fail' to inherit an orphaned event; this is 12976 * consistent with perf_event_release_kernel() removing all child events. 12977 * 12978 * Returns: 12979 * - 0 on success 12980 * - <0 on error 12981 */ 12982 static int 12983 inherit_task_group(struct perf_event *event, struct task_struct *parent, 12984 struct perf_event_context *parent_ctx, 12985 struct task_struct *child, int ctxn, 12986 u64 clone_flags, int *inherited_all) 12987 { 12988 int ret; 12989 struct perf_event_context *child_ctx; 12990 12991 if (!event->attr.inherit || 12992 (event->attr.inherit_thread && !(clone_flags & CLONE_THREAD)) || 12993 /* Do not inherit if sigtrap and signal handlers were cleared. */ 12994 (event->attr.sigtrap && (clone_flags & CLONE_CLEAR_SIGHAND))) { 12995 *inherited_all = 0; 12996 return 0; 12997 } 12998 12999 child_ctx = child->perf_event_ctxp[ctxn]; 13000 if (!child_ctx) { 13001 /* 13002 * This is executed from the parent task context, so 13003 * inherit events that have been marked for cloning. 13004 * First allocate and initialize a context for the 13005 * child. 13006 */ 13007 child_ctx = alloc_perf_context(parent_ctx->pmu, child); 13008 if (!child_ctx) 13009 return -ENOMEM; 13010 13011 child->perf_event_ctxp[ctxn] = child_ctx; 13012 } 13013 13014 ret = inherit_group(event, parent, parent_ctx, 13015 child, child_ctx); 13016 13017 if (ret) 13018 *inherited_all = 0; 13019 13020 return ret; 13021 } 13022 13023 /* 13024 * Initialize the perf_event context in task_struct 13025 */ 13026 static int perf_event_init_context(struct task_struct *child, int ctxn, 13027 u64 clone_flags) 13028 { 13029 struct perf_event_context *child_ctx, *parent_ctx; 13030 struct perf_event_context *cloned_ctx; 13031 struct perf_event *event; 13032 struct task_struct *parent = current; 13033 int inherited_all = 1; 13034 unsigned long flags; 13035 int ret = 0; 13036 13037 if (likely(!parent->perf_event_ctxp[ctxn])) 13038 return 0; 13039 13040 /* 13041 * If the parent's context is a clone, pin it so it won't get 13042 * swapped under us. 13043 */ 13044 parent_ctx = perf_pin_task_context(parent, ctxn); 13045 if (!parent_ctx) 13046 return 0; 13047 13048 /* 13049 * No need to check if parent_ctx != NULL here; since we saw 13050 * it non-NULL earlier, the only reason for it to become NULL 13051 * is if we exit, and since we're currently in the middle of 13052 * a fork we can't be exiting at the same time. 13053 */ 13054 13055 /* 13056 * Lock the parent list. No need to lock the child - not PID 13057 * hashed yet and not running, so nobody can access it. 13058 */ 13059 mutex_lock(&parent_ctx->mutex); 13060 13061 /* 13062 * We dont have to disable NMIs - we are only looking at 13063 * the list, not manipulating it: 13064 */ 13065 perf_event_groups_for_each(event, &parent_ctx->pinned_groups) { 13066 ret = inherit_task_group(event, parent, parent_ctx, 13067 child, ctxn, clone_flags, 13068 &inherited_all); 13069 if (ret) 13070 goto out_unlock; 13071 } 13072 13073 /* 13074 * We can't hold ctx->lock when iterating the ->flexible_group list due 13075 * to allocations, but we need to prevent rotation because 13076 * rotate_ctx() will change the list from interrupt context. 13077 */ 13078 raw_spin_lock_irqsave(&parent_ctx->lock, flags); 13079 parent_ctx->rotate_disable = 1; 13080 raw_spin_unlock_irqrestore(&parent_ctx->lock, flags); 13081 13082 perf_event_groups_for_each(event, &parent_ctx->flexible_groups) { 13083 ret = inherit_task_group(event, parent, parent_ctx, 13084 child, ctxn, clone_flags, 13085 &inherited_all); 13086 if (ret) 13087 goto out_unlock; 13088 } 13089 13090 raw_spin_lock_irqsave(&parent_ctx->lock, flags); 13091 parent_ctx->rotate_disable = 0; 13092 13093 child_ctx = child->perf_event_ctxp[ctxn]; 13094 13095 if (child_ctx && inherited_all) { 13096 /* 13097 * Mark the child context as a clone of the parent 13098 * context, or of whatever the parent is a clone of. 13099 * 13100 * Note that if the parent is a clone, the holding of 13101 * parent_ctx->lock avoids it from being uncloned. 13102 */ 13103 cloned_ctx = parent_ctx->parent_ctx; 13104 if (cloned_ctx) { 13105 child_ctx->parent_ctx = cloned_ctx; 13106 child_ctx->parent_gen = parent_ctx->parent_gen; 13107 } else { 13108 child_ctx->parent_ctx = parent_ctx; 13109 child_ctx->parent_gen = parent_ctx->generation; 13110 } 13111 get_ctx(child_ctx->parent_ctx); 13112 } 13113 13114 raw_spin_unlock_irqrestore(&parent_ctx->lock, flags); 13115 out_unlock: 13116 mutex_unlock(&parent_ctx->mutex); 13117 13118 perf_unpin_context(parent_ctx); 13119 put_ctx(parent_ctx); 13120 13121 return ret; 13122 } 13123 13124 /* 13125 * Initialize the perf_event context in task_struct 13126 */ 13127 int perf_event_init_task(struct task_struct *child, u64 clone_flags) 13128 { 13129 int ctxn, ret; 13130 13131 memset(child->perf_event_ctxp, 0, sizeof(child->perf_event_ctxp)); 13132 mutex_init(&child->perf_event_mutex); 13133 INIT_LIST_HEAD(&child->perf_event_list); 13134 13135 for_each_task_context_nr(ctxn) { 13136 ret = perf_event_init_context(child, ctxn, clone_flags); 13137 if (ret) { 13138 perf_event_free_task(child); 13139 return ret; 13140 } 13141 } 13142 13143 return 0; 13144 } 13145 13146 static void __init perf_event_init_all_cpus(void) 13147 { 13148 struct swevent_htable *swhash; 13149 int cpu; 13150 13151 zalloc_cpumask_var(&perf_online_mask, GFP_KERNEL); 13152 13153 for_each_possible_cpu(cpu) { 13154 swhash = &per_cpu(swevent_htable, cpu); 13155 mutex_init(&swhash->hlist_mutex); 13156 INIT_LIST_HEAD(&per_cpu(active_ctx_list, cpu)); 13157 13158 INIT_LIST_HEAD(&per_cpu(pmu_sb_events.list, cpu)); 13159 raw_spin_lock_init(&per_cpu(pmu_sb_events.lock, cpu)); 13160 13161 #ifdef CONFIG_CGROUP_PERF 13162 INIT_LIST_HEAD(&per_cpu(cgrp_cpuctx_list, cpu)); 13163 #endif 13164 INIT_LIST_HEAD(&per_cpu(sched_cb_list, cpu)); 13165 } 13166 } 13167 13168 static void perf_swevent_init_cpu(unsigned int cpu) 13169 { 13170 struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu); 13171 13172 mutex_lock(&swhash->hlist_mutex); 13173 if (swhash->hlist_refcount > 0 && !swevent_hlist_deref(swhash)) { 13174 struct swevent_hlist *hlist; 13175 13176 hlist = kzalloc_node(sizeof(*hlist), GFP_KERNEL, cpu_to_node(cpu)); 13177 WARN_ON(!hlist); 13178 rcu_assign_pointer(swhash->swevent_hlist, hlist); 13179 } 13180 mutex_unlock(&swhash->hlist_mutex); 13181 } 13182 13183 #if defined CONFIG_HOTPLUG_CPU || defined CONFIG_KEXEC_CORE 13184 static void __perf_event_exit_context(void *__info) 13185 { 13186 struct perf_event_context *ctx = __info; 13187 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx); 13188 struct perf_event *event; 13189 13190 raw_spin_lock(&ctx->lock); 13191 ctx_sched_out(ctx, cpuctx, EVENT_TIME); 13192 list_for_each_entry(event, &ctx->event_list, event_entry) 13193 __perf_remove_from_context(event, cpuctx, ctx, (void *)DETACH_GROUP); 13194 raw_spin_unlock(&ctx->lock); 13195 } 13196 13197 static void perf_event_exit_cpu_context(int cpu) 13198 { 13199 struct perf_cpu_context *cpuctx; 13200 struct perf_event_context *ctx; 13201 struct pmu *pmu; 13202 13203 mutex_lock(&pmus_lock); 13204 list_for_each_entry(pmu, &pmus, entry) { 13205 cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu); 13206 ctx = &cpuctx->ctx; 13207 13208 mutex_lock(&ctx->mutex); 13209 smp_call_function_single(cpu, __perf_event_exit_context, ctx, 1); 13210 cpuctx->online = 0; 13211 mutex_unlock(&ctx->mutex); 13212 } 13213 cpumask_clear_cpu(cpu, perf_online_mask); 13214 mutex_unlock(&pmus_lock); 13215 } 13216 #else 13217 13218 static void perf_event_exit_cpu_context(int cpu) { } 13219 13220 #endif 13221 13222 int perf_event_init_cpu(unsigned int cpu) 13223 { 13224 struct perf_cpu_context *cpuctx; 13225 struct perf_event_context *ctx; 13226 struct pmu *pmu; 13227 13228 perf_swevent_init_cpu(cpu); 13229 13230 mutex_lock(&pmus_lock); 13231 cpumask_set_cpu(cpu, perf_online_mask); 13232 list_for_each_entry(pmu, &pmus, entry) { 13233 cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu); 13234 ctx = &cpuctx->ctx; 13235 13236 mutex_lock(&ctx->mutex); 13237 cpuctx->online = 1; 13238 mutex_unlock(&ctx->mutex); 13239 } 13240 mutex_unlock(&pmus_lock); 13241 13242 return 0; 13243 } 13244 13245 int perf_event_exit_cpu(unsigned int cpu) 13246 { 13247 perf_event_exit_cpu_context(cpu); 13248 return 0; 13249 } 13250 13251 static int 13252 perf_reboot(struct notifier_block *notifier, unsigned long val, void *v) 13253 { 13254 int cpu; 13255 13256 for_each_online_cpu(cpu) 13257 perf_event_exit_cpu(cpu); 13258 13259 return NOTIFY_OK; 13260 } 13261 13262 /* 13263 * Run the perf reboot notifier at the very last possible moment so that 13264 * the generic watchdog code runs as long as possible. 13265 */ 13266 static struct notifier_block perf_reboot_notifier = { 13267 .notifier_call = perf_reboot, 13268 .priority = INT_MIN, 13269 }; 13270 13271 void __init perf_event_init(void) 13272 { 13273 int ret; 13274 13275 idr_init(&pmu_idr); 13276 13277 perf_event_init_all_cpus(); 13278 init_srcu_struct(&pmus_srcu); 13279 perf_pmu_register(&perf_swevent, "software", PERF_TYPE_SOFTWARE); 13280 perf_pmu_register(&perf_cpu_clock, NULL, -1); 13281 perf_pmu_register(&perf_task_clock, NULL, -1); 13282 perf_tp_register(); 13283 perf_event_init_cpu(smp_processor_id()); 13284 register_reboot_notifier(&perf_reboot_notifier); 13285 13286 ret = init_hw_breakpoint(); 13287 WARN(ret, "hw_breakpoint initialization failed with: %d", ret); 13288 13289 perf_event_cache = KMEM_CACHE(perf_event, SLAB_PANIC); 13290 13291 /* 13292 * Build time assertion that we keep the data_head at the intended 13293 * location. IOW, validation we got the __reserved[] size right. 13294 */ 13295 BUILD_BUG_ON((offsetof(struct perf_event_mmap_page, data_head)) 13296 != 1024); 13297 } 13298 13299 ssize_t perf_event_sysfs_show(struct device *dev, struct device_attribute *attr, 13300 char *page) 13301 { 13302 struct perf_pmu_events_attr *pmu_attr = 13303 container_of(attr, struct perf_pmu_events_attr, attr); 13304 13305 if (pmu_attr->event_str) 13306 return sprintf(page, "%s\n", pmu_attr->event_str); 13307 13308 return 0; 13309 } 13310 EXPORT_SYMBOL_GPL(perf_event_sysfs_show); 13311 13312 static int __init perf_event_sysfs_init(void) 13313 { 13314 struct pmu *pmu; 13315 int ret; 13316 13317 mutex_lock(&pmus_lock); 13318 13319 ret = bus_register(&pmu_bus); 13320 if (ret) 13321 goto unlock; 13322 13323 list_for_each_entry(pmu, &pmus, entry) { 13324 if (!pmu->name || pmu->type < 0) 13325 continue; 13326 13327 ret = pmu_dev_alloc(pmu); 13328 WARN(ret, "Failed to register pmu: %s, reason %d\n", pmu->name, ret); 13329 } 13330 pmu_bus_running = 1; 13331 ret = 0; 13332 13333 unlock: 13334 mutex_unlock(&pmus_lock); 13335 13336 return ret; 13337 } 13338 device_initcall(perf_event_sysfs_init); 13339 13340 #ifdef CONFIG_CGROUP_PERF 13341 static struct cgroup_subsys_state * 13342 perf_cgroup_css_alloc(struct cgroup_subsys_state *parent_css) 13343 { 13344 struct perf_cgroup *jc; 13345 13346 jc = kzalloc(sizeof(*jc), GFP_KERNEL); 13347 if (!jc) 13348 return ERR_PTR(-ENOMEM); 13349 13350 jc->info = alloc_percpu(struct perf_cgroup_info); 13351 if (!jc->info) { 13352 kfree(jc); 13353 return ERR_PTR(-ENOMEM); 13354 } 13355 13356 return &jc->css; 13357 } 13358 13359 static void perf_cgroup_css_free(struct cgroup_subsys_state *css) 13360 { 13361 struct perf_cgroup *jc = container_of(css, struct perf_cgroup, css); 13362 13363 free_percpu(jc->info); 13364 kfree(jc); 13365 } 13366 13367 static int perf_cgroup_css_online(struct cgroup_subsys_state *css) 13368 { 13369 perf_event_cgroup(css->cgroup); 13370 return 0; 13371 } 13372 13373 static int __perf_cgroup_move(void *info) 13374 { 13375 struct task_struct *task = info; 13376 rcu_read_lock(); 13377 perf_cgroup_switch(task, PERF_CGROUP_SWOUT | PERF_CGROUP_SWIN); 13378 rcu_read_unlock(); 13379 return 0; 13380 } 13381 13382 static void perf_cgroup_attach(struct cgroup_taskset *tset) 13383 { 13384 struct task_struct *task; 13385 struct cgroup_subsys_state *css; 13386 13387 cgroup_taskset_for_each(task, css, tset) 13388 task_function_call(task, __perf_cgroup_move, task); 13389 } 13390 13391 struct cgroup_subsys perf_event_cgrp_subsys = { 13392 .css_alloc = perf_cgroup_css_alloc, 13393 .css_free = perf_cgroup_css_free, 13394 .css_online = perf_cgroup_css_online, 13395 .attach = perf_cgroup_attach, 13396 /* 13397 * Implicitly enable on dfl hierarchy so that perf events can 13398 * always be filtered by cgroup2 path as long as perf_event 13399 * controller is not mounted on a legacy hierarchy. 13400 */ 13401 .implicit_on_dfl = true, 13402 .threaded = true, 13403 }; 13404 #endif /* CONFIG_CGROUP_PERF */ 13405