1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Performance events core code:
4 *
5 * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
6 * Copyright (C) 2008-2011 Red Hat, Inc., Ingo Molnar
7 * Copyright (C) 2008-2011 Red Hat, Inc., Peter Zijlstra
8 * Copyright © 2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
9 */
10
11 #include <linux/fs.h>
12 #include <linux/mm.h>
13 #include <linux/cpu.h>
14 #include <linux/smp.h>
15 #include <linux/idr.h>
16 #include <linux/file.h>
17 #include <linux/poll.h>
18 #include <linux/slab.h>
19 #include <linux/hash.h>
20 #include <linux/tick.h>
21 #include <linux/sysfs.h>
22 #include <linux/dcache.h>
23 #include <linux/percpu.h>
24 #include <linux/ptrace.h>
25 #include <linux/reboot.h>
26 #include <linux/vmstat.h>
27 #include <linux/device.h>
28 #include <linux/export.h>
29 #include <linux/vmalloc.h>
30 #include <linux/hardirq.h>
31 #include <linux/hugetlb.h>
32 #include <linux/rculist.h>
33 #include <linux/uaccess.h>
34 #include <linux/syscalls.h>
35 #include <linux/anon_inodes.h>
36 #include <linux/kernel_stat.h>
37 #include <linux/cgroup.h>
38 #include <linux/perf_event.h>
39 #include <linux/trace_events.h>
40 #include <linux/hw_breakpoint.h>
41 #include <linux/mm_types.h>
42 #include <linux/module.h>
43 #include <linux/mman.h>
44 #include <linux/compat.h>
45 #include <linux/bpf.h>
46 #include <linux/filter.h>
47 #include <linux/namei.h>
48 #include <linux/parser.h>
49 #include <linux/sched/clock.h>
50 #include <linux/sched/mm.h>
51 #include <linux/proc_ns.h>
52 #include <linux/mount.h>
53 #include <linux/min_heap.h>
54 #include <linux/highmem.h>
55 #include <linux/pgtable.h>
56 #include <linux/buildid.h>
57 #include <linux/task_work.h>
58
59 #include "internal.h"
60
61 #include <asm/irq_regs.h>
62
63 typedef int (*remote_function_f)(void *);
64
65 struct remote_function_call {
66 struct task_struct *p;
67 remote_function_f func;
68 void *info;
69 int ret;
70 };
71
remote_function(void * data)72 static void remote_function(void *data)
73 {
74 struct remote_function_call *tfc = data;
75 struct task_struct *p = tfc->p;
76
77 if (p) {
78 /* -EAGAIN */
79 if (task_cpu(p) != smp_processor_id())
80 return;
81
82 /*
83 * Now that we're on right CPU with IRQs disabled, we can test
84 * if we hit the right task without races.
85 */
86
87 tfc->ret = -ESRCH; /* No such (running) process */
88 if (p != current)
89 return;
90 }
91
92 tfc->ret = tfc->func(tfc->info);
93 }
94
95 /**
96 * task_function_call - call a function on the cpu on which a task runs
97 * @p: the task to evaluate
98 * @func: the function to be called
99 * @info: the function call argument
100 *
101 * Calls the function @func when the task is currently running. This might
102 * be on the current CPU, which just calls the function directly. This will
103 * retry due to any failures in smp_call_function_single(), such as if the
104 * task_cpu() goes offline concurrently.
105 *
106 * returns @func return value or -ESRCH or -ENXIO when the process isn't running
107 */
108 static int
task_function_call(struct task_struct * p,remote_function_f func,void * info)109 task_function_call(struct task_struct *p, remote_function_f func, void *info)
110 {
111 struct remote_function_call data = {
112 .p = p,
113 .func = func,
114 .info = info,
115 .ret = -EAGAIN,
116 };
117 int ret;
118
119 for (;;) {
120 ret = smp_call_function_single(task_cpu(p), remote_function,
121 &data, 1);
122 if (!ret)
123 ret = data.ret;
124
125 if (ret != -EAGAIN)
126 break;
127
128 cond_resched();
129 }
130
131 return ret;
132 }
133
134 /**
135 * cpu_function_call - call a function on the cpu
136 * @cpu: target cpu to queue this function
137 * @func: the function to be called
138 * @info: the function call argument
139 *
140 * Calls the function @func on the remote cpu.
141 *
142 * returns: @func return value or -ENXIO when the cpu is offline
143 */
cpu_function_call(int cpu,remote_function_f func,void * info)144 static int cpu_function_call(int cpu, remote_function_f func, void *info)
145 {
146 struct remote_function_call data = {
147 .p = NULL,
148 .func = func,
149 .info = info,
150 .ret = -ENXIO, /* No such CPU */
151 };
152
153 smp_call_function_single(cpu, remote_function, &data, 1);
154
155 return data.ret;
156 }
157
perf_ctx_lock(struct perf_cpu_context * cpuctx,struct perf_event_context * ctx)158 static void perf_ctx_lock(struct perf_cpu_context *cpuctx,
159 struct perf_event_context *ctx)
160 {
161 raw_spin_lock(&cpuctx->ctx.lock);
162 if (ctx)
163 raw_spin_lock(&ctx->lock);
164 }
165
perf_ctx_unlock(struct perf_cpu_context * cpuctx,struct perf_event_context * ctx)166 static void perf_ctx_unlock(struct perf_cpu_context *cpuctx,
167 struct perf_event_context *ctx)
168 {
169 if (ctx)
170 raw_spin_unlock(&ctx->lock);
171 raw_spin_unlock(&cpuctx->ctx.lock);
172 }
173
174 #define TASK_TOMBSTONE ((void *)-1L)
175
is_kernel_event(struct perf_event * event)176 static bool is_kernel_event(struct perf_event *event)
177 {
178 return READ_ONCE(event->owner) == TASK_TOMBSTONE;
179 }
180
181 static DEFINE_PER_CPU(struct perf_cpu_context, perf_cpu_context);
182
perf_cpu_task_ctx(void)183 struct perf_event_context *perf_cpu_task_ctx(void)
184 {
185 lockdep_assert_irqs_disabled();
186 return this_cpu_ptr(&perf_cpu_context)->task_ctx;
187 }
188
189 /*
190 * On task ctx scheduling...
191 *
192 * When !ctx->nr_events a task context will not be scheduled. This means
193 * we can disable the scheduler hooks (for performance) without leaving
194 * pending task ctx state.
195 *
196 * This however results in two special cases:
197 *
198 * - removing the last event from a task ctx; this is relatively straight
199 * forward and is done in __perf_remove_from_context.
200 *
201 * - adding the first event to a task ctx; this is tricky because we cannot
202 * rely on ctx->is_active and therefore cannot use event_function_call().
203 * See perf_install_in_context().
204 *
205 * If ctx->nr_events, then ctx->is_active and cpuctx->task_ctx are set.
206 */
207
208 typedef void (*event_f)(struct perf_event *, struct perf_cpu_context *,
209 struct perf_event_context *, void *);
210
211 struct event_function_struct {
212 struct perf_event *event;
213 event_f func;
214 void *data;
215 };
216
event_function(void * info)217 static int event_function(void *info)
218 {
219 struct event_function_struct *efs = info;
220 struct perf_event *event = efs->event;
221 struct perf_event_context *ctx = event->ctx;
222 struct perf_cpu_context *cpuctx = this_cpu_ptr(&perf_cpu_context);
223 struct perf_event_context *task_ctx = cpuctx->task_ctx;
224 int ret = 0;
225
226 lockdep_assert_irqs_disabled();
227
228 perf_ctx_lock(cpuctx, task_ctx);
229 /*
230 * Since we do the IPI call without holding ctx->lock things can have
231 * changed, double check we hit the task we set out to hit.
232 */
233 if (ctx->task) {
234 if (ctx->task != current) {
235 ret = -ESRCH;
236 goto unlock;
237 }
238
239 /*
240 * We only use event_function_call() on established contexts,
241 * and event_function() is only ever called when active (or
242 * rather, we'll have bailed in task_function_call() or the
243 * above ctx->task != current test), therefore we must have
244 * ctx->is_active here.
245 */
246 WARN_ON_ONCE(!ctx->is_active);
247 /*
248 * And since we have ctx->is_active, cpuctx->task_ctx must
249 * match.
250 */
251 WARN_ON_ONCE(task_ctx != ctx);
252 } else {
253 WARN_ON_ONCE(&cpuctx->ctx != ctx);
254 }
255
256 efs->func(event, cpuctx, ctx, efs->data);
257 unlock:
258 perf_ctx_unlock(cpuctx, task_ctx);
259
260 return ret;
261 }
262
event_function_call(struct perf_event * event,event_f func,void * data)263 static void event_function_call(struct perf_event *event, event_f func, void *data)
264 {
265 struct perf_event_context *ctx = event->ctx;
266 struct task_struct *task = READ_ONCE(ctx->task); /* verified in event_function */
267 struct event_function_struct efs = {
268 .event = event,
269 .func = func,
270 .data = data,
271 };
272
273 if (!event->parent) {
274 /*
275 * If this is a !child event, we must hold ctx::mutex to
276 * stabilize the event->ctx relation. See
277 * perf_event_ctx_lock().
278 */
279 lockdep_assert_held(&ctx->mutex);
280 }
281
282 if (!task) {
283 cpu_function_call(event->cpu, event_function, &efs);
284 return;
285 }
286
287 if (task == TASK_TOMBSTONE)
288 return;
289
290 again:
291 if (!task_function_call(task, event_function, &efs))
292 return;
293
294 raw_spin_lock_irq(&ctx->lock);
295 /*
296 * Reload the task pointer, it might have been changed by
297 * a concurrent perf_event_context_sched_out().
298 */
299 task = ctx->task;
300 if (task == TASK_TOMBSTONE) {
301 raw_spin_unlock_irq(&ctx->lock);
302 return;
303 }
304 if (ctx->is_active) {
305 raw_spin_unlock_irq(&ctx->lock);
306 goto again;
307 }
308 func(event, NULL, ctx, data);
309 raw_spin_unlock_irq(&ctx->lock);
310 }
311
312 /*
313 * Similar to event_function_call() + event_function(), but hard assumes IRQs
314 * are already disabled and we're on the right CPU.
315 */
event_function_local(struct perf_event * event,event_f func,void * data)316 static void event_function_local(struct perf_event *event, event_f func, void *data)
317 {
318 struct perf_event_context *ctx = event->ctx;
319 struct perf_cpu_context *cpuctx = this_cpu_ptr(&perf_cpu_context);
320 struct task_struct *task = READ_ONCE(ctx->task);
321 struct perf_event_context *task_ctx = NULL;
322
323 lockdep_assert_irqs_disabled();
324
325 if (task) {
326 if (task == TASK_TOMBSTONE)
327 return;
328
329 task_ctx = ctx;
330 }
331
332 perf_ctx_lock(cpuctx, task_ctx);
333
334 task = ctx->task;
335 if (task == TASK_TOMBSTONE)
336 goto unlock;
337
338 if (task) {
339 /*
340 * We must be either inactive or active and the right task,
341 * otherwise we're screwed, since we cannot IPI to somewhere
342 * else.
343 */
344 if (ctx->is_active) {
345 if (WARN_ON_ONCE(task != current))
346 goto unlock;
347
348 if (WARN_ON_ONCE(cpuctx->task_ctx != ctx))
349 goto unlock;
350 }
351 } else {
352 WARN_ON_ONCE(&cpuctx->ctx != ctx);
353 }
354
355 func(event, cpuctx, ctx, data);
356 unlock:
357 perf_ctx_unlock(cpuctx, task_ctx);
358 }
359
360 #define PERF_FLAG_ALL (PERF_FLAG_FD_NO_GROUP |\
361 PERF_FLAG_FD_OUTPUT |\
362 PERF_FLAG_PID_CGROUP |\
363 PERF_FLAG_FD_CLOEXEC)
364
365 /*
366 * branch priv levels that need permission checks
367 */
368 #define PERF_SAMPLE_BRANCH_PERM_PLM \
369 (PERF_SAMPLE_BRANCH_KERNEL |\
370 PERF_SAMPLE_BRANCH_HV)
371
372 enum event_type_t {
373 EVENT_FLEXIBLE = 0x1,
374 EVENT_PINNED = 0x2,
375 EVENT_TIME = 0x4,
376 /* see ctx_resched() for details */
377 EVENT_CPU = 0x8,
378 EVENT_CGROUP = 0x10,
379 EVENT_ALL = EVENT_FLEXIBLE | EVENT_PINNED,
380 };
381
382 /*
383 * perf_sched_events : >0 events exist
384 */
385
386 static void perf_sched_delayed(struct work_struct *work);
387 DEFINE_STATIC_KEY_FALSE(perf_sched_events);
388 static DECLARE_DELAYED_WORK(perf_sched_work, perf_sched_delayed);
389 static DEFINE_MUTEX(perf_sched_mutex);
390 static atomic_t perf_sched_count;
391
392 static DEFINE_PER_CPU(struct pmu_event_list, pmu_sb_events);
393
394 static atomic_t nr_mmap_events __read_mostly;
395 static atomic_t nr_comm_events __read_mostly;
396 static atomic_t nr_namespaces_events __read_mostly;
397 static atomic_t nr_task_events __read_mostly;
398 static atomic_t nr_freq_events __read_mostly;
399 static atomic_t nr_switch_events __read_mostly;
400 static atomic_t nr_ksymbol_events __read_mostly;
401 static atomic_t nr_bpf_events __read_mostly;
402 static atomic_t nr_cgroup_events __read_mostly;
403 static atomic_t nr_text_poke_events __read_mostly;
404 static atomic_t nr_build_id_events __read_mostly;
405
406 static LIST_HEAD(pmus);
407 static DEFINE_MUTEX(pmus_lock);
408 static struct srcu_struct pmus_srcu;
409 static cpumask_var_t perf_online_mask;
410 static struct kmem_cache *perf_event_cache;
411
412 /*
413 * perf event paranoia level:
414 * -1 - not paranoid at all
415 * 0 - disallow raw tracepoint access for unpriv
416 * 1 - disallow cpu events for unpriv
417 * 2 - disallow kernel profiling for unpriv
418 */
419 int sysctl_perf_event_paranoid __read_mostly = 2;
420
421 /* Minimum for 512 kiB + 1 user control page */
422 int sysctl_perf_event_mlock __read_mostly = 512 + (PAGE_SIZE / 1024); /* 'free' kiB per user */
423
424 /*
425 * max perf event sample rate
426 */
427 #define DEFAULT_MAX_SAMPLE_RATE 100000
428 #define DEFAULT_SAMPLE_PERIOD_NS (NSEC_PER_SEC / DEFAULT_MAX_SAMPLE_RATE)
429 #define DEFAULT_CPU_TIME_MAX_PERCENT 25
430
431 int sysctl_perf_event_sample_rate __read_mostly = DEFAULT_MAX_SAMPLE_RATE;
432
433 static int max_samples_per_tick __read_mostly = DIV_ROUND_UP(DEFAULT_MAX_SAMPLE_RATE, HZ);
434 static int perf_sample_period_ns __read_mostly = DEFAULT_SAMPLE_PERIOD_NS;
435
436 static int perf_sample_allowed_ns __read_mostly =
437 DEFAULT_SAMPLE_PERIOD_NS * DEFAULT_CPU_TIME_MAX_PERCENT / 100;
438
update_perf_cpu_limits(void)439 static void update_perf_cpu_limits(void)
440 {
441 u64 tmp = perf_sample_period_ns;
442
443 tmp *= sysctl_perf_cpu_time_max_percent;
444 tmp = div_u64(tmp, 100);
445 if (!tmp)
446 tmp = 1;
447
448 WRITE_ONCE(perf_sample_allowed_ns, tmp);
449 }
450
451 static bool perf_rotate_context(struct perf_cpu_pmu_context *cpc);
452
perf_proc_update_handler(struct ctl_table * table,int write,void * buffer,size_t * lenp,loff_t * ppos)453 int perf_proc_update_handler(struct ctl_table *table, int write,
454 void *buffer, size_t *lenp, loff_t *ppos)
455 {
456 int ret;
457 int perf_cpu = sysctl_perf_cpu_time_max_percent;
458 /*
459 * If throttling is disabled don't allow the write:
460 */
461 if (write && (perf_cpu == 100 || perf_cpu == 0))
462 return -EINVAL;
463
464 ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
465 if (ret || !write)
466 return ret;
467
468 max_samples_per_tick = DIV_ROUND_UP(sysctl_perf_event_sample_rate, HZ);
469 perf_sample_period_ns = NSEC_PER_SEC / sysctl_perf_event_sample_rate;
470 update_perf_cpu_limits();
471
472 return 0;
473 }
474
475 int sysctl_perf_cpu_time_max_percent __read_mostly = DEFAULT_CPU_TIME_MAX_PERCENT;
476
perf_cpu_time_max_percent_handler(struct ctl_table * table,int write,void * buffer,size_t * lenp,loff_t * ppos)477 int perf_cpu_time_max_percent_handler(struct ctl_table *table, int write,
478 void *buffer, size_t *lenp, loff_t *ppos)
479 {
480 int ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
481
482 if (ret || !write)
483 return ret;
484
485 if (sysctl_perf_cpu_time_max_percent == 100 ||
486 sysctl_perf_cpu_time_max_percent == 0) {
487 printk(KERN_WARNING
488 "perf: Dynamic interrupt throttling disabled, can hang your system!\n");
489 WRITE_ONCE(perf_sample_allowed_ns, 0);
490 } else {
491 update_perf_cpu_limits();
492 }
493
494 return 0;
495 }
496
497 /*
498 * perf samples are done in some very critical code paths (NMIs).
499 * If they take too much CPU time, the system can lock up and not
500 * get any real work done. This will drop the sample rate when
501 * we detect that events are taking too long.
502 */
503 #define NR_ACCUMULATED_SAMPLES 128
504 static DEFINE_PER_CPU(u64, running_sample_length);
505
506 static u64 __report_avg;
507 static u64 __report_allowed;
508
perf_duration_warn(struct irq_work * w)509 static void perf_duration_warn(struct irq_work *w)
510 {
511 printk_ratelimited(KERN_INFO
512 "perf: interrupt took too long (%lld > %lld), lowering "
513 "kernel.perf_event_max_sample_rate to %d\n",
514 __report_avg, __report_allowed,
515 sysctl_perf_event_sample_rate);
516 }
517
518 static DEFINE_IRQ_WORK(perf_duration_work, perf_duration_warn);
519
perf_sample_event_took(u64 sample_len_ns)520 void perf_sample_event_took(u64 sample_len_ns)
521 {
522 u64 max_len = READ_ONCE(perf_sample_allowed_ns);
523 u64 running_len;
524 u64 avg_len;
525 u32 max;
526
527 if (max_len == 0)
528 return;
529
530 /* Decay the counter by 1 average sample. */
531 running_len = __this_cpu_read(running_sample_length);
532 running_len -= running_len/NR_ACCUMULATED_SAMPLES;
533 running_len += sample_len_ns;
534 __this_cpu_write(running_sample_length, running_len);
535
536 /*
537 * Note: this will be biased artifically low until we have
538 * seen NR_ACCUMULATED_SAMPLES. Doing it this way keeps us
539 * from having to maintain a count.
540 */
541 avg_len = running_len/NR_ACCUMULATED_SAMPLES;
542 if (avg_len <= max_len)
543 return;
544
545 __report_avg = avg_len;
546 __report_allowed = max_len;
547
548 /*
549 * Compute a throttle threshold 25% below the current duration.
550 */
551 avg_len += avg_len / 4;
552 max = (TICK_NSEC / 100) * sysctl_perf_cpu_time_max_percent;
553 if (avg_len < max)
554 max /= (u32)avg_len;
555 else
556 max = 1;
557
558 WRITE_ONCE(perf_sample_allowed_ns, avg_len);
559 WRITE_ONCE(max_samples_per_tick, max);
560
561 sysctl_perf_event_sample_rate = max * HZ;
562 perf_sample_period_ns = NSEC_PER_SEC / sysctl_perf_event_sample_rate;
563
564 if (!irq_work_queue(&perf_duration_work)) {
565 early_printk("perf: interrupt took too long (%lld > %lld), lowering "
566 "kernel.perf_event_max_sample_rate to %d\n",
567 __report_avg, __report_allowed,
568 sysctl_perf_event_sample_rate);
569 }
570 }
571
572 static atomic64_t perf_event_id;
573
574 static void update_context_time(struct perf_event_context *ctx);
575 static u64 perf_event_time(struct perf_event *event);
576
perf_event_print_debug(void)577 void __weak perf_event_print_debug(void) { }
578
perf_clock(void)579 static inline u64 perf_clock(void)
580 {
581 return local_clock();
582 }
583
perf_event_clock(struct perf_event * event)584 static inline u64 perf_event_clock(struct perf_event *event)
585 {
586 return event->clock();
587 }
588
589 /*
590 * State based event timekeeping...
591 *
592 * The basic idea is to use event->state to determine which (if any) time
593 * fields to increment with the current delta. This means we only need to
594 * update timestamps when we change state or when they are explicitly requested
595 * (read).
596 *
597 * Event groups make things a little more complicated, but not terribly so. The
598 * rules for a group are that if the group leader is OFF the entire group is
599 * OFF, irrespecive of what the group member states are. This results in
600 * __perf_effective_state().
601 *
602 * A futher ramification is that when a group leader flips between OFF and
603 * !OFF, we need to update all group member times.
604 *
605 *
606 * NOTE: perf_event_time() is based on the (cgroup) context time, and thus we
607 * need to make sure the relevant context time is updated before we try and
608 * update our timestamps.
609 */
610
611 static __always_inline enum perf_event_state
__perf_effective_state(struct perf_event * event)612 __perf_effective_state(struct perf_event *event)
613 {
614 struct perf_event *leader = event->group_leader;
615
616 if (leader->state <= PERF_EVENT_STATE_OFF)
617 return leader->state;
618
619 return event->state;
620 }
621
622 static __always_inline void
__perf_update_times(struct perf_event * event,u64 now,u64 * enabled,u64 * running)623 __perf_update_times(struct perf_event *event, u64 now, u64 *enabled, u64 *running)
624 {
625 enum perf_event_state state = __perf_effective_state(event);
626 u64 delta = now - event->tstamp;
627
628 *enabled = event->total_time_enabled;
629 if (state >= PERF_EVENT_STATE_INACTIVE)
630 *enabled += delta;
631
632 *running = event->total_time_running;
633 if (state >= PERF_EVENT_STATE_ACTIVE)
634 *running += delta;
635 }
636
perf_event_update_time(struct perf_event * event)637 static void perf_event_update_time(struct perf_event *event)
638 {
639 u64 now = perf_event_time(event);
640
641 __perf_update_times(event, now, &event->total_time_enabled,
642 &event->total_time_running);
643 event->tstamp = now;
644 }
645
perf_event_update_sibling_time(struct perf_event * leader)646 static void perf_event_update_sibling_time(struct perf_event *leader)
647 {
648 struct perf_event *sibling;
649
650 for_each_sibling_event(sibling, leader)
651 perf_event_update_time(sibling);
652 }
653
654 static void
perf_event_set_state(struct perf_event * event,enum perf_event_state state)655 perf_event_set_state(struct perf_event *event, enum perf_event_state state)
656 {
657 if (event->state == state)
658 return;
659
660 perf_event_update_time(event);
661 /*
662 * If a group leader gets enabled/disabled all its siblings
663 * are affected too.
664 */
665 if ((event->state < 0) ^ (state < 0))
666 perf_event_update_sibling_time(event);
667
668 WRITE_ONCE(event->state, state);
669 }
670
671 /*
672 * UP store-release, load-acquire
673 */
674
675 #define __store_release(ptr, val) \
676 do { \
677 barrier(); \
678 WRITE_ONCE(*(ptr), (val)); \
679 } while (0)
680
681 #define __load_acquire(ptr) \
682 ({ \
683 __unqual_scalar_typeof(*(ptr)) ___p = READ_ONCE(*(ptr)); \
684 barrier(); \
685 ___p; \
686 })
687
perf_ctx_disable(struct perf_event_context * ctx,bool cgroup)688 static void perf_ctx_disable(struct perf_event_context *ctx, bool cgroup)
689 {
690 struct perf_event_pmu_context *pmu_ctx;
691
692 list_for_each_entry(pmu_ctx, &ctx->pmu_ctx_list, pmu_ctx_entry) {
693 if (cgroup && !pmu_ctx->nr_cgroups)
694 continue;
695 perf_pmu_disable(pmu_ctx->pmu);
696 }
697 }
698
perf_ctx_enable(struct perf_event_context * ctx,bool cgroup)699 static void perf_ctx_enable(struct perf_event_context *ctx, bool cgroup)
700 {
701 struct perf_event_pmu_context *pmu_ctx;
702
703 list_for_each_entry(pmu_ctx, &ctx->pmu_ctx_list, pmu_ctx_entry) {
704 if (cgroup && !pmu_ctx->nr_cgroups)
705 continue;
706 perf_pmu_enable(pmu_ctx->pmu);
707 }
708 }
709
710 static void ctx_sched_out(struct perf_event_context *ctx, enum event_type_t event_type);
711 static void ctx_sched_in(struct perf_event_context *ctx, enum event_type_t event_type);
712
713 #ifdef CONFIG_CGROUP_PERF
714
715 static inline bool
perf_cgroup_match(struct perf_event * event)716 perf_cgroup_match(struct perf_event *event)
717 {
718 struct perf_cpu_context *cpuctx = this_cpu_ptr(&perf_cpu_context);
719
720 /* @event doesn't care about cgroup */
721 if (!event->cgrp)
722 return true;
723
724 /* wants specific cgroup scope but @cpuctx isn't associated with any */
725 if (!cpuctx->cgrp)
726 return false;
727
728 /*
729 * Cgroup scoping is recursive. An event enabled for a cgroup is
730 * also enabled for all its descendant cgroups. If @cpuctx's
731 * cgroup is a descendant of @event's (the test covers identity
732 * case), it's a match.
733 */
734 return cgroup_is_descendant(cpuctx->cgrp->css.cgroup,
735 event->cgrp->css.cgroup);
736 }
737
perf_detach_cgroup(struct perf_event * event)738 static inline void perf_detach_cgroup(struct perf_event *event)
739 {
740 css_put(&event->cgrp->css);
741 event->cgrp = NULL;
742 }
743
is_cgroup_event(struct perf_event * event)744 static inline int is_cgroup_event(struct perf_event *event)
745 {
746 return event->cgrp != NULL;
747 }
748
perf_cgroup_event_time(struct perf_event * event)749 static inline u64 perf_cgroup_event_time(struct perf_event *event)
750 {
751 struct perf_cgroup_info *t;
752
753 t = per_cpu_ptr(event->cgrp->info, event->cpu);
754 return t->time;
755 }
756
perf_cgroup_event_time_now(struct perf_event * event,u64 now)757 static inline u64 perf_cgroup_event_time_now(struct perf_event *event, u64 now)
758 {
759 struct perf_cgroup_info *t;
760
761 t = per_cpu_ptr(event->cgrp->info, event->cpu);
762 if (!__load_acquire(&t->active))
763 return t->time;
764 now += READ_ONCE(t->timeoffset);
765 return now;
766 }
767
__update_cgrp_time(struct perf_cgroup_info * info,u64 now,bool adv)768 static inline void __update_cgrp_time(struct perf_cgroup_info *info, u64 now, bool adv)
769 {
770 if (adv)
771 info->time += now - info->timestamp;
772 info->timestamp = now;
773 /*
774 * see update_context_time()
775 */
776 WRITE_ONCE(info->timeoffset, info->time - info->timestamp);
777 }
778
update_cgrp_time_from_cpuctx(struct perf_cpu_context * cpuctx,bool final)779 static inline void update_cgrp_time_from_cpuctx(struct perf_cpu_context *cpuctx, bool final)
780 {
781 struct perf_cgroup *cgrp = cpuctx->cgrp;
782 struct cgroup_subsys_state *css;
783 struct perf_cgroup_info *info;
784
785 if (cgrp) {
786 u64 now = perf_clock();
787
788 for (css = &cgrp->css; css; css = css->parent) {
789 cgrp = container_of(css, struct perf_cgroup, css);
790 info = this_cpu_ptr(cgrp->info);
791
792 __update_cgrp_time(info, now, true);
793 if (final)
794 __store_release(&info->active, 0);
795 }
796 }
797 }
798
update_cgrp_time_from_event(struct perf_event * event)799 static inline void update_cgrp_time_from_event(struct perf_event *event)
800 {
801 struct perf_cgroup_info *info;
802
803 /*
804 * ensure we access cgroup data only when needed and
805 * when we know the cgroup is pinned (css_get)
806 */
807 if (!is_cgroup_event(event))
808 return;
809
810 info = this_cpu_ptr(event->cgrp->info);
811 /*
812 * Do not update time when cgroup is not active
813 */
814 if (info->active)
815 __update_cgrp_time(info, perf_clock(), true);
816 }
817
818 static inline void
perf_cgroup_set_timestamp(struct perf_cpu_context * cpuctx)819 perf_cgroup_set_timestamp(struct perf_cpu_context *cpuctx)
820 {
821 struct perf_event_context *ctx = &cpuctx->ctx;
822 struct perf_cgroup *cgrp = cpuctx->cgrp;
823 struct perf_cgroup_info *info;
824 struct cgroup_subsys_state *css;
825
826 /*
827 * ctx->lock held by caller
828 * ensure we do not access cgroup data
829 * unless we have the cgroup pinned (css_get)
830 */
831 if (!cgrp)
832 return;
833
834 WARN_ON_ONCE(!ctx->nr_cgroups);
835
836 for (css = &cgrp->css; css; css = css->parent) {
837 cgrp = container_of(css, struct perf_cgroup, css);
838 info = this_cpu_ptr(cgrp->info);
839 __update_cgrp_time(info, ctx->timestamp, false);
840 __store_release(&info->active, 1);
841 }
842 }
843
844 /*
845 * reschedule events based on the cgroup constraint of task.
846 */
perf_cgroup_switch(struct task_struct * task)847 static void perf_cgroup_switch(struct task_struct *task)
848 {
849 struct perf_cpu_context *cpuctx = this_cpu_ptr(&perf_cpu_context);
850 struct perf_cgroup *cgrp;
851
852 /*
853 * cpuctx->cgrp is set when the first cgroup event enabled,
854 * and is cleared when the last cgroup event disabled.
855 */
856 if (READ_ONCE(cpuctx->cgrp) == NULL)
857 return;
858
859 WARN_ON_ONCE(cpuctx->ctx.nr_cgroups == 0);
860
861 cgrp = perf_cgroup_from_task(task, NULL);
862 if (READ_ONCE(cpuctx->cgrp) == cgrp)
863 return;
864
865 perf_ctx_lock(cpuctx, cpuctx->task_ctx);
866 perf_ctx_disable(&cpuctx->ctx, true);
867
868 ctx_sched_out(&cpuctx->ctx, EVENT_ALL|EVENT_CGROUP);
869 /*
870 * must not be done before ctxswout due
871 * to update_cgrp_time_from_cpuctx() in
872 * ctx_sched_out()
873 */
874 cpuctx->cgrp = cgrp;
875 /*
876 * set cgrp before ctxsw in to allow
877 * perf_cgroup_set_timestamp() in ctx_sched_in()
878 * to not have to pass task around
879 */
880 ctx_sched_in(&cpuctx->ctx, EVENT_ALL|EVENT_CGROUP);
881
882 perf_ctx_enable(&cpuctx->ctx, true);
883 perf_ctx_unlock(cpuctx, cpuctx->task_ctx);
884 }
885
perf_cgroup_ensure_storage(struct perf_event * event,struct cgroup_subsys_state * css)886 static int perf_cgroup_ensure_storage(struct perf_event *event,
887 struct cgroup_subsys_state *css)
888 {
889 struct perf_cpu_context *cpuctx;
890 struct perf_event **storage;
891 int cpu, heap_size, ret = 0;
892
893 /*
894 * Allow storage to have sufficent space for an iterator for each
895 * possibly nested cgroup plus an iterator for events with no cgroup.
896 */
897 for (heap_size = 1; css; css = css->parent)
898 heap_size++;
899
900 for_each_possible_cpu(cpu) {
901 cpuctx = per_cpu_ptr(&perf_cpu_context, cpu);
902 if (heap_size <= cpuctx->heap_size)
903 continue;
904
905 storage = kmalloc_node(heap_size * sizeof(struct perf_event *),
906 GFP_KERNEL, cpu_to_node(cpu));
907 if (!storage) {
908 ret = -ENOMEM;
909 break;
910 }
911
912 raw_spin_lock_irq(&cpuctx->ctx.lock);
913 if (cpuctx->heap_size < heap_size) {
914 swap(cpuctx->heap, storage);
915 if (storage == cpuctx->heap_default)
916 storage = NULL;
917 cpuctx->heap_size = heap_size;
918 }
919 raw_spin_unlock_irq(&cpuctx->ctx.lock);
920
921 kfree(storage);
922 }
923
924 return ret;
925 }
926
perf_cgroup_connect(int fd,struct perf_event * event,struct perf_event_attr * attr,struct perf_event * group_leader)927 static inline int perf_cgroup_connect(int fd, struct perf_event *event,
928 struct perf_event_attr *attr,
929 struct perf_event *group_leader)
930 {
931 struct perf_cgroup *cgrp;
932 struct cgroup_subsys_state *css;
933 struct fd f = fdget(fd);
934 int ret = 0;
935
936 if (!f.file)
937 return -EBADF;
938
939 css = css_tryget_online_from_dir(f.file->f_path.dentry,
940 &perf_event_cgrp_subsys);
941 if (IS_ERR(css)) {
942 ret = PTR_ERR(css);
943 goto out;
944 }
945
946 ret = perf_cgroup_ensure_storage(event, css);
947 if (ret)
948 goto out;
949
950 cgrp = container_of(css, struct perf_cgroup, css);
951 event->cgrp = cgrp;
952
953 /*
954 * all events in a group must monitor
955 * the same cgroup because a task belongs
956 * to only one perf cgroup at a time
957 */
958 if (group_leader && group_leader->cgrp != cgrp) {
959 perf_detach_cgroup(event);
960 ret = -EINVAL;
961 }
962 out:
963 fdput(f);
964 return ret;
965 }
966
967 static inline void
perf_cgroup_event_enable(struct perf_event * event,struct perf_event_context * ctx)968 perf_cgroup_event_enable(struct perf_event *event, struct perf_event_context *ctx)
969 {
970 struct perf_cpu_context *cpuctx;
971
972 if (!is_cgroup_event(event))
973 return;
974
975 event->pmu_ctx->nr_cgroups++;
976
977 /*
978 * Because cgroup events are always per-cpu events,
979 * @ctx == &cpuctx->ctx.
980 */
981 cpuctx = container_of(ctx, struct perf_cpu_context, ctx);
982
983 if (ctx->nr_cgroups++)
984 return;
985
986 cpuctx->cgrp = perf_cgroup_from_task(current, ctx);
987 }
988
989 static inline void
perf_cgroup_event_disable(struct perf_event * event,struct perf_event_context * ctx)990 perf_cgroup_event_disable(struct perf_event *event, struct perf_event_context *ctx)
991 {
992 struct perf_cpu_context *cpuctx;
993
994 if (!is_cgroup_event(event))
995 return;
996
997 event->pmu_ctx->nr_cgroups--;
998
999 /*
1000 * Because cgroup events are always per-cpu events,
1001 * @ctx == &cpuctx->ctx.
1002 */
1003 cpuctx = container_of(ctx, struct perf_cpu_context, ctx);
1004
1005 if (--ctx->nr_cgroups)
1006 return;
1007
1008 cpuctx->cgrp = NULL;
1009 }
1010
1011 #else /* !CONFIG_CGROUP_PERF */
1012
1013 static inline bool
perf_cgroup_match(struct perf_event * event)1014 perf_cgroup_match(struct perf_event *event)
1015 {
1016 return true;
1017 }
1018
perf_detach_cgroup(struct perf_event * event)1019 static inline void perf_detach_cgroup(struct perf_event *event)
1020 {}
1021
is_cgroup_event(struct perf_event * event)1022 static inline int is_cgroup_event(struct perf_event *event)
1023 {
1024 return 0;
1025 }
1026
update_cgrp_time_from_event(struct perf_event * event)1027 static inline void update_cgrp_time_from_event(struct perf_event *event)
1028 {
1029 }
1030
update_cgrp_time_from_cpuctx(struct perf_cpu_context * cpuctx,bool final)1031 static inline void update_cgrp_time_from_cpuctx(struct perf_cpu_context *cpuctx,
1032 bool final)
1033 {
1034 }
1035
perf_cgroup_connect(pid_t pid,struct perf_event * event,struct perf_event_attr * attr,struct perf_event * group_leader)1036 static inline int perf_cgroup_connect(pid_t pid, struct perf_event *event,
1037 struct perf_event_attr *attr,
1038 struct perf_event *group_leader)
1039 {
1040 return -EINVAL;
1041 }
1042
1043 static inline void
perf_cgroup_set_timestamp(struct perf_cpu_context * cpuctx)1044 perf_cgroup_set_timestamp(struct perf_cpu_context *cpuctx)
1045 {
1046 }
1047
perf_cgroup_event_time(struct perf_event * event)1048 static inline u64 perf_cgroup_event_time(struct perf_event *event)
1049 {
1050 return 0;
1051 }
1052
perf_cgroup_event_time_now(struct perf_event * event,u64 now)1053 static inline u64 perf_cgroup_event_time_now(struct perf_event *event, u64 now)
1054 {
1055 return 0;
1056 }
1057
1058 static inline void
perf_cgroup_event_enable(struct perf_event * event,struct perf_event_context * ctx)1059 perf_cgroup_event_enable(struct perf_event *event, struct perf_event_context *ctx)
1060 {
1061 }
1062
1063 static inline void
perf_cgroup_event_disable(struct perf_event * event,struct perf_event_context * ctx)1064 perf_cgroup_event_disable(struct perf_event *event, struct perf_event_context *ctx)
1065 {
1066 }
1067
perf_cgroup_switch(struct task_struct * task)1068 static void perf_cgroup_switch(struct task_struct *task)
1069 {
1070 }
1071 #endif
1072
1073 /*
1074 * set default to be dependent on timer tick just
1075 * like original code
1076 */
1077 #define PERF_CPU_HRTIMER (1000 / HZ)
1078 /*
1079 * function must be called with interrupts disabled
1080 */
perf_mux_hrtimer_handler(struct hrtimer * hr)1081 static enum hrtimer_restart perf_mux_hrtimer_handler(struct hrtimer *hr)
1082 {
1083 struct perf_cpu_pmu_context *cpc;
1084 bool rotations;
1085
1086 lockdep_assert_irqs_disabled();
1087
1088 cpc = container_of(hr, struct perf_cpu_pmu_context, hrtimer);
1089 rotations = perf_rotate_context(cpc);
1090
1091 raw_spin_lock(&cpc->hrtimer_lock);
1092 if (rotations)
1093 hrtimer_forward_now(hr, cpc->hrtimer_interval);
1094 else
1095 cpc->hrtimer_active = 0;
1096 raw_spin_unlock(&cpc->hrtimer_lock);
1097
1098 return rotations ? HRTIMER_RESTART : HRTIMER_NORESTART;
1099 }
1100
__perf_mux_hrtimer_init(struct perf_cpu_pmu_context * cpc,int cpu)1101 static void __perf_mux_hrtimer_init(struct perf_cpu_pmu_context *cpc, int cpu)
1102 {
1103 struct hrtimer *timer = &cpc->hrtimer;
1104 struct pmu *pmu = cpc->epc.pmu;
1105 u64 interval;
1106
1107 /*
1108 * check default is sane, if not set then force to
1109 * default interval (1/tick)
1110 */
1111 interval = pmu->hrtimer_interval_ms;
1112 if (interval < 1)
1113 interval = pmu->hrtimer_interval_ms = PERF_CPU_HRTIMER;
1114
1115 cpc->hrtimer_interval = ns_to_ktime(NSEC_PER_MSEC * interval);
1116
1117 raw_spin_lock_init(&cpc->hrtimer_lock);
1118 hrtimer_init(timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED_HARD);
1119 timer->function = perf_mux_hrtimer_handler;
1120 }
1121
perf_mux_hrtimer_restart(struct perf_cpu_pmu_context * cpc)1122 static int perf_mux_hrtimer_restart(struct perf_cpu_pmu_context *cpc)
1123 {
1124 struct hrtimer *timer = &cpc->hrtimer;
1125 unsigned long flags;
1126
1127 raw_spin_lock_irqsave(&cpc->hrtimer_lock, flags);
1128 if (!cpc->hrtimer_active) {
1129 cpc->hrtimer_active = 1;
1130 hrtimer_forward_now(timer, cpc->hrtimer_interval);
1131 hrtimer_start_expires(timer, HRTIMER_MODE_ABS_PINNED_HARD);
1132 }
1133 raw_spin_unlock_irqrestore(&cpc->hrtimer_lock, flags);
1134
1135 return 0;
1136 }
1137
perf_mux_hrtimer_restart_ipi(void * arg)1138 static int perf_mux_hrtimer_restart_ipi(void *arg)
1139 {
1140 return perf_mux_hrtimer_restart(arg);
1141 }
1142
perf_pmu_disable(struct pmu * pmu)1143 void perf_pmu_disable(struct pmu *pmu)
1144 {
1145 int *count = this_cpu_ptr(pmu->pmu_disable_count);
1146 if (!(*count)++)
1147 pmu->pmu_disable(pmu);
1148 }
1149
perf_pmu_enable(struct pmu * pmu)1150 void perf_pmu_enable(struct pmu *pmu)
1151 {
1152 int *count = this_cpu_ptr(pmu->pmu_disable_count);
1153 if (!--(*count))
1154 pmu->pmu_enable(pmu);
1155 }
1156
perf_assert_pmu_disabled(struct pmu * pmu)1157 static void perf_assert_pmu_disabled(struct pmu *pmu)
1158 {
1159 WARN_ON_ONCE(*this_cpu_ptr(pmu->pmu_disable_count) == 0);
1160 }
1161
get_ctx(struct perf_event_context * ctx)1162 static void get_ctx(struct perf_event_context *ctx)
1163 {
1164 refcount_inc(&ctx->refcount);
1165 }
1166
alloc_task_ctx_data(struct pmu * pmu)1167 static void *alloc_task_ctx_data(struct pmu *pmu)
1168 {
1169 if (pmu->task_ctx_cache)
1170 return kmem_cache_zalloc(pmu->task_ctx_cache, GFP_KERNEL);
1171
1172 return NULL;
1173 }
1174
free_task_ctx_data(struct pmu * pmu,void * task_ctx_data)1175 static void free_task_ctx_data(struct pmu *pmu, void *task_ctx_data)
1176 {
1177 if (pmu->task_ctx_cache && task_ctx_data)
1178 kmem_cache_free(pmu->task_ctx_cache, task_ctx_data);
1179 }
1180
free_ctx(struct rcu_head * head)1181 static void free_ctx(struct rcu_head *head)
1182 {
1183 struct perf_event_context *ctx;
1184
1185 ctx = container_of(head, struct perf_event_context, rcu_head);
1186 kfree(ctx);
1187 }
1188
put_ctx(struct perf_event_context * ctx)1189 static void put_ctx(struct perf_event_context *ctx)
1190 {
1191 if (refcount_dec_and_test(&ctx->refcount)) {
1192 if (ctx->parent_ctx)
1193 put_ctx(ctx->parent_ctx);
1194 if (ctx->task && ctx->task != TASK_TOMBSTONE)
1195 put_task_struct(ctx->task);
1196 call_rcu(&ctx->rcu_head, free_ctx);
1197 }
1198 }
1199
1200 /*
1201 * Because of perf_event::ctx migration in sys_perf_event_open::move_group and
1202 * perf_pmu_migrate_context() we need some magic.
1203 *
1204 * Those places that change perf_event::ctx will hold both
1205 * perf_event_ctx::mutex of the 'old' and 'new' ctx value.
1206 *
1207 * Lock ordering is by mutex address. There are two other sites where
1208 * perf_event_context::mutex nests and those are:
1209 *
1210 * - perf_event_exit_task_context() [ child , 0 ]
1211 * perf_event_exit_event()
1212 * put_event() [ parent, 1 ]
1213 *
1214 * - perf_event_init_context() [ parent, 0 ]
1215 * inherit_task_group()
1216 * inherit_group()
1217 * inherit_event()
1218 * perf_event_alloc()
1219 * perf_init_event()
1220 * perf_try_init_event() [ child , 1 ]
1221 *
1222 * While it appears there is an obvious deadlock here -- the parent and child
1223 * nesting levels are inverted between the two. This is in fact safe because
1224 * life-time rules separate them. That is an exiting task cannot fork, and a
1225 * spawning task cannot (yet) exit.
1226 *
1227 * But remember that these are parent<->child context relations, and
1228 * migration does not affect children, therefore these two orderings should not
1229 * interact.
1230 *
1231 * The change in perf_event::ctx does not affect children (as claimed above)
1232 * because the sys_perf_event_open() case will install a new event and break
1233 * the ctx parent<->child relation, and perf_pmu_migrate_context() is only
1234 * concerned with cpuctx and that doesn't have children.
1235 *
1236 * The places that change perf_event::ctx will issue:
1237 *
1238 * perf_remove_from_context();
1239 * synchronize_rcu();
1240 * perf_install_in_context();
1241 *
1242 * to affect the change. The remove_from_context() + synchronize_rcu() should
1243 * quiesce the event, after which we can install it in the new location. This
1244 * means that only external vectors (perf_fops, prctl) can perturb the event
1245 * while in transit. Therefore all such accessors should also acquire
1246 * perf_event_context::mutex to serialize against this.
1247 *
1248 * However; because event->ctx can change while we're waiting to acquire
1249 * ctx->mutex we must be careful and use the below perf_event_ctx_lock()
1250 * function.
1251 *
1252 * Lock order:
1253 * exec_update_lock
1254 * task_struct::perf_event_mutex
1255 * perf_event_context::mutex
1256 * perf_event::child_mutex;
1257 * perf_event_context::lock
1258 * perf_event::mmap_mutex
1259 * mmap_lock
1260 * perf_addr_filters_head::lock
1261 *
1262 * cpu_hotplug_lock
1263 * pmus_lock
1264 * cpuctx->mutex / perf_event_context::mutex
1265 */
1266 static struct perf_event_context *
perf_event_ctx_lock_nested(struct perf_event * event,int nesting)1267 perf_event_ctx_lock_nested(struct perf_event *event, int nesting)
1268 {
1269 struct perf_event_context *ctx;
1270
1271 again:
1272 rcu_read_lock();
1273 ctx = READ_ONCE(event->ctx);
1274 if (!refcount_inc_not_zero(&ctx->refcount)) {
1275 rcu_read_unlock();
1276 goto again;
1277 }
1278 rcu_read_unlock();
1279
1280 mutex_lock_nested(&ctx->mutex, nesting);
1281 if (event->ctx != ctx) {
1282 mutex_unlock(&ctx->mutex);
1283 put_ctx(ctx);
1284 goto again;
1285 }
1286
1287 return ctx;
1288 }
1289
1290 static inline struct perf_event_context *
perf_event_ctx_lock(struct perf_event * event)1291 perf_event_ctx_lock(struct perf_event *event)
1292 {
1293 return perf_event_ctx_lock_nested(event, 0);
1294 }
1295
perf_event_ctx_unlock(struct perf_event * event,struct perf_event_context * ctx)1296 static void perf_event_ctx_unlock(struct perf_event *event,
1297 struct perf_event_context *ctx)
1298 {
1299 mutex_unlock(&ctx->mutex);
1300 put_ctx(ctx);
1301 }
1302
1303 /*
1304 * This must be done under the ctx->lock, such as to serialize against
1305 * context_equiv(), therefore we cannot call put_ctx() since that might end up
1306 * calling scheduler related locks and ctx->lock nests inside those.
1307 */
1308 static __must_check struct perf_event_context *
unclone_ctx(struct perf_event_context * ctx)1309 unclone_ctx(struct perf_event_context *ctx)
1310 {
1311 struct perf_event_context *parent_ctx = ctx->parent_ctx;
1312
1313 lockdep_assert_held(&ctx->lock);
1314
1315 if (parent_ctx)
1316 ctx->parent_ctx = NULL;
1317 ctx->generation++;
1318
1319 return parent_ctx;
1320 }
1321
perf_event_pid_type(struct perf_event * event,struct task_struct * p,enum pid_type type)1322 static u32 perf_event_pid_type(struct perf_event *event, struct task_struct *p,
1323 enum pid_type type)
1324 {
1325 u32 nr;
1326 /*
1327 * only top level events have the pid namespace they were created in
1328 */
1329 if (event->parent)
1330 event = event->parent;
1331
1332 nr = __task_pid_nr_ns(p, type, event->ns);
1333 /* avoid -1 if it is idle thread or runs in another ns */
1334 if (!nr && !pid_alive(p))
1335 nr = -1;
1336 return nr;
1337 }
1338
perf_event_pid(struct perf_event * event,struct task_struct * p)1339 static u32 perf_event_pid(struct perf_event *event, struct task_struct *p)
1340 {
1341 return perf_event_pid_type(event, p, PIDTYPE_TGID);
1342 }
1343
perf_event_tid(struct perf_event * event,struct task_struct * p)1344 static u32 perf_event_tid(struct perf_event *event, struct task_struct *p)
1345 {
1346 return perf_event_pid_type(event, p, PIDTYPE_PID);
1347 }
1348
1349 /*
1350 * If we inherit events we want to return the parent event id
1351 * to userspace.
1352 */
primary_event_id(struct perf_event * event)1353 static u64 primary_event_id(struct perf_event *event)
1354 {
1355 u64 id = event->id;
1356
1357 if (event->parent)
1358 id = event->parent->id;
1359
1360 return id;
1361 }
1362
1363 /*
1364 * Get the perf_event_context for a task and lock it.
1365 *
1366 * This has to cope with the fact that until it is locked,
1367 * the context could get moved to another task.
1368 */
1369 static struct perf_event_context *
perf_lock_task_context(struct task_struct * task,unsigned long * flags)1370 perf_lock_task_context(struct task_struct *task, unsigned long *flags)
1371 {
1372 struct perf_event_context *ctx;
1373
1374 retry:
1375 /*
1376 * One of the few rules of preemptible RCU is that one cannot do
1377 * rcu_read_unlock() while holding a scheduler (or nested) lock when
1378 * part of the read side critical section was irqs-enabled -- see
1379 * rcu_read_unlock_special().
1380 *
1381 * Since ctx->lock nests under rq->lock we must ensure the entire read
1382 * side critical section has interrupts disabled.
1383 */
1384 local_irq_save(*flags);
1385 rcu_read_lock();
1386 ctx = rcu_dereference(task->perf_event_ctxp);
1387 if (ctx) {
1388 /*
1389 * If this context is a clone of another, it might
1390 * get swapped for another underneath us by
1391 * perf_event_task_sched_out, though the
1392 * rcu_read_lock() protects us from any context
1393 * getting freed. Lock the context and check if it
1394 * got swapped before we could get the lock, and retry
1395 * if so. If we locked the right context, then it
1396 * can't get swapped on us any more.
1397 */
1398 raw_spin_lock(&ctx->lock);
1399 if (ctx != rcu_dereference(task->perf_event_ctxp)) {
1400 raw_spin_unlock(&ctx->lock);
1401 rcu_read_unlock();
1402 local_irq_restore(*flags);
1403 goto retry;
1404 }
1405
1406 if (ctx->task == TASK_TOMBSTONE ||
1407 !refcount_inc_not_zero(&ctx->refcount)) {
1408 raw_spin_unlock(&ctx->lock);
1409 ctx = NULL;
1410 } else {
1411 WARN_ON_ONCE(ctx->task != task);
1412 }
1413 }
1414 rcu_read_unlock();
1415 if (!ctx)
1416 local_irq_restore(*flags);
1417 return ctx;
1418 }
1419
1420 /*
1421 * Get the context for a task and increment its pin_count so it
1422 * can't get swapped to another task. This also increments its
1423 * reference count so that the context can't get freed.
1424 */
1425 static struct perf_event_context *
perf_pin_task_context(struct task_struct * task)1426 perf_pin_task_context(struct task_struct *task)
1427 {
1428 struct perf_event_context *ctx;
1429 unsigned long flags;
1430
1431 ctx = perf_lock_task_context(task, &flags);
1432 if (ctx) {
1433 ++ctx->pin_count;
1434 raw_spin_unlock_irqrestore(&ctx->lock, flags);
1435 }
1436 return ctx;
1437 }
1438
perf_unpin_context(struct perf_event_context * ctx)1439 static void perf_unpin_context(struct perf_event_context *ctx)
1440 {
1441 unsigned long flags;
1442
1443 raw_spin_lock_irqsave(&ctx->lock, flags);
1444 --ctx->pin_count;
1445 raw_spin_unlock_irqrestore(&ctx->lock, flags);
1446 }
1447
1448 /*
1449 * Update the record of the current time in a context.
1450 */
__update_context_time(struct perf_event_context * ctx,bool adv)1451 static void __update_context_time(struct perf_event_context *ctx, bool adv)
1452 {
1453 u64 now = perf_clock();
1454
1455 lockdep_assert_held(&ctx->lock);
1456
1457 if (adv)
1458 ctx->time += now - ctx->timestamp;
1459 ctx->timestamp = now;
1460
1461 /*
1462 * The above: time' = time + (now - timestamp), can be re-arranged
1463 * into: time` = now + (time - timestamp), which gives a single value
1464 * offset to compute future time without locks on.
1465 *
1466 * See perf_event_time_now(), which can be used from NMI context where
1467 * it's (obviously) not possible to acquire ctx->lock in order to read
1468 * both the above values in a consistent manner.
1469 */
1470 WRITE_ONCE(ctx->timeoffset, ctx->time - ctx->timestamp);
1471 }
1472
update_context_time(struct perf_event_context * ctx)1473 static void update_context_time(struct perf_event_context *ctx)
1474 {
1475 __update_context_time(ctx, true);
1476 }
1477
perf_event_time(struct perf_event * event)1478 static u64 perf_event_time(struct perf_event *event)
1479 {
1480 struct perf_event_context *ctx = event->ctx;
1481
1482 if (unlikely(!ctx))
1483 return 0;
1484
1485 if (is_cgroup_event(event))
1486 return perf_cgroup_event_time(event);
1487
1488 return ctx->time;
1489 }
1490
perf_event_time_now(struct perf_event * event,u64 now)1491 static u64 perf_event_time_now(struct perf_event *event, u64 now)
1492 {
1493 struct perf_event_context *ctx = event->ctx;
1494
1495 if (unlikely(!ctx))
1496 return 0;
1497
1498 if (is_cgroup_event(event))
1499 return perf_cgroup_event_time_now(event, now);
1500
1501 if (!(__load_acquire(&ctx->is_active) & EVENT_TIME))
1502 return ctx->time;
1503
1504 now += READ_ONCE(ctx->timeoffset);
1505 return now;
1506 }
1507
get_event_type(struct perf_event * event)1508 static enum event_type_t get_event_type(struct perf_event *event)
1509 {
1510 struct perf_event_context *ctx = event->ctx;
1511 enum event_type_t event_type;
1512
1513 lockdep_assert_held(&ctx->lock);
1514
1515 /*
1516 * It's 'group type', really, because if our group leader is
1517 * pinned, so are we.
1518 */
1519 if (event->group_leader != event)
1520 event = event->group_leader;
1521
1522 event_type = event->attr.pinned ? EVENT_PINNED : EVENT_FLEXIBLE;
1523 if (!ctx->task)
1524 event_type |= EVENT_CPU;
1525
1526 return event_type;
1527 }
1528
1529 /*
1530 * Helper function to initialize event group nodes.
1531 */
init_event_group(struct perf_event * event)1532 static void init_event_group(struct perf_event *event)
1533 {
1534 RB_CLEAR_NODE(&event->group_node);
1535 event->group_index = 0;
1536 }
1537
1538 /*
1539 * Extract pinned or flexible groups from the context
1540 * based on event attrs bits.
1541 */
1542 static struct perf_event_groups *
get_event_groups(struct perf_event * event,struct perf_event_context * ctx)1543 get_event_groups(struct perf_event *event, struct perf_event_context *ctx)
1544 {
1545 if (event->attr.pinned)
1546 return &ctx->pinned_groups;
1547 else
1548 return &ctx->flexible_groups;
1549 }
1550
1551 /*
1552 * Helper function to initializes perf_event_group trees.
1553 */
perf_event_groups_init(struct perf_event_groups * groups)1554 static void perf_event_groups_init(struct perf_event_groups *groups)
1555 {
1556 groups->tree = RB_ROOT;
1557 groups->index = 0;
1558 }
1559
event_cgroup(const struct perf_event * event)1560 static inline struct cgroup *event_cgroup(const struct perf_event *event)
1561 {
1562 struct cgroup *cgroup = NULL;
1563
1564 #ifdef CONFIG_CGROUP_PERF
1565 if (event->cgrp)
1566 cgroup = event->cgrp->css.cgroup;
1567 #endif
1568
1569 return cgroup;
1570 }
1571
1572 /*
1573 * Compare function for event groups;
1574 *
1575 * Implements complex key that first sorts by CPU and then by virtual index
1576 * which provides ordering when rotating groups for the same CPU.
1577 */
1578 static __always_inline int
perf_event_groups_cmp(const int left_cpu,const struct pmu * left_pmu,const struct cgroup * left_cgroup,const u64 left_group_index,const struct perf_event * right)1579 perf_event_groups_cmp(const int left_cpu, const struct pmu *left_pmu,
1580 const struct cgroup *left_cgroup, const u64 left_group_index,
1581 const struct perf_event *right)
1582 {
1583 if (left_cpu < right->cpu)
1584 return -1;
1585 if (left_cpu > right->cpu)
1586 return 1;
1587
1588 if (left_pmu) {
1589 if (left_pmu < right->pmu_ctx->pmu)
1590 return -1;
1591 if (left_pmu > right->pmu_ctx->pmu)
1592 return 1;
1593 }
1594
1595 #ifdef CONFIG_CGROUP_PERF
1596 {
1597 const struct cgroup *right_cgroup = event_cgroup(right);
1598
1599 if (left_cgroup != right_cgroup) {
1600 if (!left_cgroup) {
1601 /*
1602 * Left has no cgroup but right does, no
1603 * cgroups come first.
1604 */
1605 return -1;
1606 }
1607 if (!right_cgroup) {
1608 /*
1609 * Right has no cgroup but left does, no
1610 * cgroups come first.
1611 */
1612 return 1;
1613 }
1614 /* Two dissimilar cgroups, order by id. */
1615 if (cgroup_id(left_cgroup) < cgroup_id(right_cgroup))
1616 return -1;
1617
1618 return 1;
1619 }
1620 }
1621 #endif
1622
1623 if (left_group_index < right->group_index)
1624 return -1;
1625 if (left_group_index > right->group_index)
1626 return 1;
1627
1628 return 0;
1629 }
1630
1631 #define __node_2_pe(node) \
1632 rb_entry((node), struct perf_event, group_node)
1633
__group_less(struct rb_node * a,const struct rb_node * b)1634 static inline bool __group_less(struct rb_node *a, const struct rb_node *b)
1635 {
1636 struct perf_event *e = __node_2_pe(a);
1637 return perf_event_groups_cmp(e->cpu, e->pmu_ctx->pmu, event_cgroup(e),
1638 e->group_index, __node_2_pe(b)) < 0;
1639 }
1640
1641 struct __group_key {
1642 int cpu;
1643 struct pmu *pmu;
1644 struct cgroup *cgroup;
1645 };
1646
__group_cmp(const void * key,const struct rb_node * node)1647 static inline int __group_cmp(const void *key, const struct rb_node *node)
1648 {
1649 const struct __group_key *a = key;
1650 const struct perf_event *b = __node_2_pe(node);
1651
1652 /* partial/subtree match: @cpu, @pmu, @cgroup; ignore: @group_index */
1653 return perf_event_groups_cmp(a->cpu, a->pmu, a->cgroup, b->group_index, b);
1654 }
1655
1656 static inline int
__group_cmp_ignore_cgroup(const void * key,const struct rb_node * node)1657 __group_cmp_ignore_cgroup(const void *key, const struct rb_node *node)
1658 {
1659 const struct __group_key *a = key;
1660 const struct perf_event *b = __node_2_pe(node);
1661
1662 /* partial/subtree match: @cpu, @pmu, ignore: @cgroup, @group_index */
1663 return perf_event_groups_cmp(a->cpu, a->pmu, event_cgroup(b),
1664 b->group_index, b);
1665 }
1666
1667 /*
1668 * Insert @event into @groups' tree; using
1669 * {@event->cpu, @event->pmu_ctx->pmu, event_cgroup(@event), ++@groups->index}
1670 * as key. This places it last inside the {cpu,pmu,cgroup} subtree.
1671 */
1672 static void
perf_event_groups_insert(struct perf_event_groups * groups,struct perf_event * event)1673 perf_event_groups_insert(struct perf_event_groups *groups,
1674 struct perf_event *event)
1675 {
1676 event->group_index = ++groups->index;
1677
1678 rb_add(&event->group_node, &groups->tree, __group_less);
1679 }
1680
1681 /*
1682 * Helper function to insert event into the pinned or flexible groups.
1683 */
1684 static void
add_event_to_groups(struct perf_event * event,struct perf_event_context * ctx)1685 add_event_to_groups(struct perf_event *event, struct perf_event_context *ctx)
1686 {
1687 struct perf_event_groups *groups;
1688
1689 groups = get_event_groups(event, ctx);
1690 perf_event_groups_insert(groups, event);
1691 }
1692
1693 /*
1694 * Delete a group from a tree.
1695 */
1696 static void
perf_event_groups_delete(struct perf_event_groups * groups,struct perf_event * event)1697 perf_event_groups_delete(struct perf_event_groups *groups,
1698 struct perf_event *event)
1699 {
1700 WARN_ON_ONCE(RB_EMPTY_NODE(&event->group_node) ||
1701 RB_EMPTY_ROOT(&groups->tree));
1702
1703 rb_erase(&event->group_node, &groups->tree);
1704 init_event_group(event);
1705 }
1706
1707 /*
1708 * Helper function to delete event from its groups.
1709 */
1710 static void
del_event_from_groups(struct perf_event * event,struct perf_event_context * ctx)1711 del_event_from_groups(struct perf_event *event, struct perf_event_context *ctx)
1712 {
1713 struct perf_event_groups *groups;
1714
1715 groups = get_event_groups(event, ctx);
1716 perf_event_groups_delete(groups, event);
1717 }
1718
1719 /*
1720 * Get the leftmost event in the {cpu,pmu,cgroup} subtree.
1721 */
1722 static struct perf_event *
perf_event_groups_first(struct perf_event_groups * groups,int cpu,struct pmu * pmu,struct cgroup * cgrp)1723 perf_event_groups_first(struct perf_event_groups *groups, int cpu,
1724 struct pmu *pmu, struct cgroup *cgrp)
1725 {
1726 struct __group_key key = {
1727 .cpu = cpu,
1728 .pmu = pmu,
1729 .cgroup = cgrp,
1730 };
1731 struct rb_node *node;
1732
1733 node = rb_find_first(&key, &groups->tree, __group_cmp);
1734 if (node)
1735 return __node_2_pe(node);
1736
1737 return NULL;
1738 }
1739
1740 static struct perf_event *
perf_event_groups_next(struct perf_event * event,struct pmu * pmu)1741 perf_event_groups_next(struct perf_event *event, struct pmu *pmu)
1742 {
1743 struct __group_key key = {
1744 .cpu = event->cpu,
1745 .pmu = pmu,
1746 .cgroup = event_cgroup(event),
1747 };
1748 struct rb_node *next;
1749
1750 next = rb_next_match(&key, &event->group_node, __group_cmp);
1751 if (next)
1752 return __node_2_pe(next);
1753
1754 return NULL;
1755 }
1756
1757 #define perf_event_groups_for_cpu_pmu(event, groups, cpu, pmu) \
1758 for (event = perf_event_groups_first(groups, cpu, pmu, NULL); \
1759 event; event = perf_event_groups_next(event, pmu))
1760
1761 /*
1762 * Iterate through the whole groups tree.
1763 */
1764 #define perf_event_groups_for_each(event, groups) \
1765 for (event = rb_entry_safe(rb_first(&((groups)->tree)), \
1766 typeof(*event), group_node); event; \
1767 event = rb_entry_safe(rb_next(&event->group_node), \
1768 typeof(*event), group_node))
1769
1770 /*
1771 * Add an event from the lists for its context.
1772 * Must be called with ctx->mutex and ctx->lock held.
1773 */
1774 static void
list_add_event(struct perf_event * event,struct perf_event_context * ctx)1775 list_add_event(struct perf_event *event, struct perf_event_context *ctx)
1776 {
1777 lockdep_assert_held(&ctx->lock);
1778
1779 WARN_ON_ONCE(event->attach_state & PERF_ATTACH_CONTEXT);
1780 event->attach_state |= PERF_ATTACH_CONTEXT;
1781
1782 event->tstamp = perf_event_time(event);
1783
1784 /*
1785 * If we're a stand alone event or group leader, we go to the context
1786 * list, group events are kept attached to the group so that
1787 * perf_group_detach can, at all times, locate all siblings.
1788 */
1789 if (event->group_leader == event) {
1790 event->group_caps = event->event_caps;
1791 add_event_to_groups(event, ctx);
1792 }
1793
1794 list_add_rcu(&event->event_entry, &ctx->event_list);
1795 ctx->nr_events++;
1796 if (event->hw.flags & PERF_EVENT_FLAG_USER_READ_CNT)
1797 ctx->nr_user++;
1798 if (event->attr.inherit_stat)
1799 ctx->nr_stat++;
1800
1801 if (event->state > PERF_EVENT_STATE_OFF)
1802 perf_cgroup_event_enable(event, ctx);
1803
1804 ctx->generation++;
1805 event->pmu_ctx->nr_events++;
1806 }
1807
1808 /*
1809 * Initialize event state based on the perf_event_attr::disabled.
1810 */
perf_event__state_init(struct perf_event * event)1811 static inline void perf_event__state_init(struct perf_event *event)
1812 {
1813 event->state = event->attr.disabled ? PERF_EVENT_STATE_OFF :
1814 PERF_EVENT_STATE_INACTIVE;
1815 }
1816
__perf_event_read_size(u64 read_format,int nr_siblings)1817 static int __perf_event_read_size(u64 read_format, int nr_siblings)
1818 {
1819 int entry = sizeof(u64); /* value */
1820 int size = 0;
1821 int nr = 1;
1822
1823 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
1824 size += sizeof(u64);
1825
1826 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
1827 size += sizeof(u64);
1828
1829 if (read_format & PERF_FORMAT_ID)
1830 entry += sizeof(u64);
1831
1832 if (read_format & PERF_FORMAT_LOST)
1833 entry += sizeof(u64);
1834
1835 if (read_format & PERF_FORMAT_GROUP) {
1836 nr += nr_siblings;
1837 size += sizeof(u64);
1838 }
1839
1840 /*
1841 * Since perf_event_validate_size() limits this to 16k and inhibits
1842 * adding more siblings, this will never overflow.
1843 */
1844 return size + nr * entry;
1845 }
1846
__perf_event_header_size(struct perf_event * event,u64 sample_type)1847 static void __perf_event_header_size(struct perf_event *event, u64 sample_type)
1848 {
1849 struct perf_sample_data *data;
1850 u16 size = 0;
1851
1852 if (sample_type & PERF_SAMPLE_IP)
1853 size += sizeof(data->ip);
1854
1855 if (sample_type & PERF_SAMPLE_ADDR)
1856 size += sizeof(data->addr);
1857
1858 if (sample_type & PERF_SAMPLE_PERIOD)
1859 size += sizeof(data->period);
1860
1861 if (sample_type & PERF_SAMPLE_WEIGHT_TYPE)
1862 size += sizeof(data->weight.full);
1863
1864 if (sample_type & PERF_SAMPLE_READ)
1865 size += event->read_size;
1866
1867 if (sample_type & PERF_SAMPLE_DATA_SRC)
1868 size += sizeof(data->data_src.val);
1869
1870 if (sample_type & PERF_SAMPLE_TRANSACTION)
1871 size += sizeof(data->txn);
1872
1873 if (sample_type & PERF_SAMPLE_PHYS_ADDR)
1874 size += sizeof(data->phys_addr);
1875
1876 if (sample_type & PERF_SAMPLE_CGROUP)
1877 size += sizeof(data->cgroup);
1878
1879 if (sample_type & PERF_SAMPLE_DATA_PAGE_SIZE)
1880 size += sizeof(data->data_page_size);
1881
1882 if (sample_type & PERF_SAMPLE_CODE_PAGE_SIZE)
1883 size += sizeof(data->code_page_size);
1884
1885 event->header_size = size;
1886 }
1887
1888 /*
1889 * Called at perf_event creation and when events are attached/detached from a
1890 * group.
1891 */
perf_event__header_size(struct perf_event * event)1892 static void perf_event__header_size(struct perf_event *event)
1893 {
1894 event->read_size =
1895 __perf_event_read_size(event->attr.read_format,
1896 event->group_leader->nr_siblings);
1897 __perf_event_header_size(event, event->attr.sample_type);
1898 }
1899
perf_event__id_header_size(struct perf_event * event)1900 static void perf_event__id_header_size(struct perf_event *event)
1901 {
1902 struct perf_sample_data *data;
1903 u64 sample_type = event->attr.sample_type;
1904 u16 size = 0;
1905
1906 if (sample_type & PERF_SAMPLE_TID)
1907 size += sizeof(data->tid_entry);
1908
1909 if (sample_type & PERF_SAMPLE_TIME)
1910 size += sizeof(data->time);
1911
1912 if (sample_type & PERF_SAMPLE_IDENTIFIER)
1913 size += sizeof(data->id);
1914
1915 if (sample_type & PERF_SAMPLE_ID)
1916 size += sizeof(data->id);
1917
1918 if (sample_type & PERF_SAMPLE_STREAM_ID)
1919 size += sizeof(data->stream_id);
1920
1921 if (sample_type & PERF_SAMPLE_CPU)
1922 size += sizeof(data->cpu_entry);
1923
1924 event->id_header_size = size;
1925 }
1926
1927 /*
1928 * Check that adding an event to the group does not result in anybody
1929 * overflowing the 64k event limit imposed by the output buffer.
1930 *
1931 * Specifically, check that the read_size for the event does not exceed 16k,
1932 * read_size being the one term that grows with groups size. Since read_size
1933 * depends on per-event read_format, also (re)check the existing events.
1934 *
1935 * This leaves 48k for the constant size fields and things like callchains,
1936 * branch stacks and register sets.
1937 */
perf_event_validate_size(struct perf_event * event)1938 static bool perf_event_validate_size(struct perf_event *event)
1939 {
1940 struct perf_event *sibling, *group_leader = event->group_leader;
1941
1942 if (__perf_event_read_size(event->attr.read_format,
1943 group_leader->nr_siblings + 1) > 16*1024)
1944 return false;
1945
1946 if (__perf_event_read_size(group_leader->attr.read_format,
1947 group_leader->nr_siblings + 1) > 16*1024)
1948 return false;
1949
1950 /*
1951 * When creating a new group leader, group_leader->ctx is initialized
1952 * after the size has been validated, but we cannot safely use
1953 * for_each_sibling_event() until group_leader->ctx is set. A new group
1954 * leader cannot have any siblings yet, so we can safely skip checking
1955 * the non-existent siblings.
1956 */
1957 if (event == group_leader)
1958 return true;
1959
1960 for_each_sibling_event(sibling, group_leader) {
1961 if (__perf_event_read_size(sibling->attr.read_format,
1962 group_leader->nr_siblings + 1) > 16*1024)
1963 return false;
1964 }
1965
1966 return true;
1967 }
1968
perf_group_attach(struct perf_event * event)1969 static void perf_group_attach(struct perf_event *event)
1970 {
1971 struct perf_event *group_leader = event->group_leader, *pos;
1972
1973 lockdep_assert_held(&event->ctx->lock);
1974
1975 /*
1976 * We can have double attach due to group movement (move_group) in
1977 * perf_event_open().
1978 */
1979 if (event->attach_state & PERF_ATTACH_GROUP)
1980 return;
1981
1982 event->attach_state |= PERF_ATTACH_GROUP;
1983
1984 if (group_leader == event)
1985 return;
1986
1987 WARN_ON_ONCE(group_leader->ctx != event->ctx);
1988
1989 group_leader->group_caps &= event->event_caps;
1990
1991 list_add_tail(&event->sibling_list, &group_leader->sibling_list);
1992 group_leader->nr_siblings++;
1993 group_leader->group_generation++;
1994
1995 perf_event__header_size(group_leader);
1996
1997 for_each_sibling_event(pos, group_leader)
1998 perf_event__header_size(pos);
1999 }
2000
2001 /*
2002 * Remove an event from the lists for its context.
2003 * Must be called with ctx->mutex and ctx->lock held.
2004 */
2005 static void
list_del_event(struct perf_event * event,struct perf_event_context * ctx)2006 list_del_event(struct perf_event *event, struct perf_event_context *ctx)
2007 {
2008 WARN_ON_ONCE(event->ctx != ctx);
2009 lockdep_assert_held(&ctx->lock);
2010
2011 /*
2012 * We can have double detach due to exit/hot-unplug + close.
2013 */
2014 if (!(event->attach_state & PERF_ATTACH_CONTEXT))
2015 return;
2016
2017 event->attach_state &= ~PERF_ATTACH_CONTEXT;
2018
2019 ctx->nr_events--;
2020 if (event->hw.flags & PERF_EVENT_FLAG_USER_READ_CNT)
2021 ctx->nr_user--;
2022 if (event->attr.inherit_stat)
2023 ctx->nr_stat--;
2024
2025 list_del_rcu(&event->event_entry);
2026
2027 if (event->group_leader == event)
2028 del_event_from_groups(event, ctx);
2029
2030 /*
2031 * If event was in error state, then keep it
2032 * that way, otherwise bogus counts will be
2033 * returned on read(). The only way to get out
2034 * of error state is by explicit re-enabling
2035 * of the event
2036 */
2037 if (event->state > PERF_EVENT_STATE_OFF) {
2038 perf_cgroup_event_disable(event, ctx);
2039 perf_event_set_state(event, PERF_EVENT_STATE_OFF);
2040 }
2041
2042 ctx->generation++;
2043 event->pmu_ctx->nr_events--;
2044 }
2045
2046 static int
perf_aux_output_match(struct perf_event * event,struct perf_event * aux_event)2047 perf_aux_output_match(struct perf_event *event, struct perf_event *aux_event)
2048 {
2049 if (!has_aux(aux_event))
2050 return 0;
2051
2052 if (!event->pmu->aux_output_match)
2053 return 0;
2054
2055 return event->pmu->aux_output_match(aux_event);
2056 }
2057
2058 static void put_event(struct perf_event *event);
2059 static void event_sched_out(struct perf_event *event,
2060 struct perf_event_context *ctx);
2061
perf_put_aux_event(struct perf_event * event)2062 static void perf_put_aux_event(struct perf_event *event)
2063 {
2064 struct perf_event_context *ctx = event->ctx;
2065 struct perf_event *iter;
2066
2067 /*
2068 * If event uses aux_event tear down the link
2069 */
2070 if (event->aux_event) {
2071 iter = event->aux_event;
2072 event->aux_event = NULL;
2073 put_event(iter);
2074 return;
2075 }
2076
2077 /*
2078 * If the event is an aux_event, tear down all links to
2079 * it from other events.
2080 */
2081 for_each_sibling_event(iter, event->group_leader) {
2082 if (iter->aux_event != event)
2083 continue;
2084
2085 iter->aux_event = NULL;
2086 put_event(event);
2087
2088 /*
2089 * If it's ACTIVE, schedule it out and put it into ERROR
2090 * state so that we don't try to schedule it again. Note
2091 * that perf_event_enable() will clear the ERROR status.
2092 */
2093 event_sched_out(iter, ctx);
2094 perf_event_set_state(event, PERF_EVENT_STATE_ERROR);
2095 }
2096 }
2097
perf_need_aux_event(struct perf_event * event)2098 static bool perf_need_aux_event(struct perf_event *event)
2099 {
2100 return !!event->attr.aux_output || !!event->attr.aux_sample_size;
2101 }
2102
perf_get_aux_event(struct perf_event * event,struct perf_event * group_leader)2103 static int perf_get_aux_event(struct perf_event *event,
2104 struct perf_event *group_leader)
2105 {
2106 /*
2107 * Our group leader must be an aux event if we want to be
2108 * an aux_output. This way, the aux event will precede its
2109 * aux_output events in the group, and therefore will always
2110 * schedule first.
2111 */
2112 if (!group_leader)
2113 return 0;
2114
2115 /*
2116 * aux_output and aux_sample_size are mutually exclusive.
2117 */
2118 if (event->attr.aux_output && event->attr.aux_sample_size)
2119 return 0;
2120
2121 if (event->attr.aux_output &&
2122 !perf_aux_output_match(event, group_leader))
2123 return 0;
2124
2125 if (event->attr.aux_sample_size && !group_leader->pmu->snapshot_aux)
2126 return 0;
2127
2128 if (!atomic_long_inc_not_zero(&group_leader->refcount))
2129 return 0;
2130
2131 /*
2132 * Link aux_outputs to their aux event; this is undone in
2133 * perf_group_detach() by perf_put_aux_event(). When the
2134 * group in torn down, the aux_output events loose their
2135 * link to the aux_event and can't schedule any more.
2136 */
2137 event->aux_event = group_leader;
2138
2139 return 1;
2140 }
2141
get_event_list(struct perf_event * event)2142 static inline struct list_head *get_event_list(struct perf_event *event)
2143 {
2144 return event->attr.pinned ? &event->pmu_ctx->pinned_active :
2145 &event->pmu_ctx->flexible_active;
2146 }
2147
2148 /*
2149 * Events that have PERF_EV_CAP_SIBLING require being part of a group and
2150 * cannot exist on their own, schedule them out and move them into the ERROR
2151 * state. Also see _perf_event_enable(), it will not be able to recover
2152 * this ERROR state.
2153 */
perf_remove_sibling_event(struct perf_event * event)2154 static inline void perf_remove_sibling_event(struct perf_event *event)
2155 {
2156 event_sched_out(event, event->ctx);
2157 perf_event_set_state(event, PERF_EVENT_STATE_ERROR);
2158 }
2159
perf_group_detach(struct perf_event * event)2160 static void perf_group_detach(struct perf_event *event)
2161 {
2162 struct perf_event *leader = event->group_leader;
2163 struct perf_event *sibling, *tmp;
2164 struct perf_event_context *ctx = event->ctx;
2165
2166 lockdep_assert_held(&ctx->lock);
2167
2168 /*
2169 * We can have double detach due to exit/hot-unplug + close.
2170 */
2171 if (!(event->attach_state & PERF_ATTACH_GROUP))
2172 return;
2173
2174 event->attach_state &= ~PERF_ATTACH_GROUP;
2175
2176 perf_put_aux_event(event);
2177
2178 /*
2179 * If this is a sibling, remove it from its group.
2180 */
2181 if (leader != event) {
2182 list_del_init(&event->sibling_list);
2183 event->group_leader->nr_siblings--;
2184 event->group_leader->group_generation++;
2185 goto out;
2186 }
2187
2188 /*
2189 * If this was a group event with sibling events then
2190 * upgrade the siblings to singleton events by adding them
2191 * to whatever list we are on.
2192 */
2193 list_for_each_entry_safe(sibling, tmp, &event->sibling_list, sibling_list) {
2194
2195 if (sibling->event_caps & PERF_EV_CAP_SIBLING)
2196 perf_remove_sibling_event(sibling);
2197
2198 sibling->group_leader = sibling;
2199 list_del_init(&sibling->sibling_list);
2200
2201 /* Inherit group flags from the previous leader */
2202 sibling->group_caps = event->group_caps;
2203
2204 if (sibling->attach_state & PERF_ATTACH_CONTEXT) {
2205 add_event_to_groups(sibling, event->ctx);
2206
2207 if (sibling->state == PERF_EVENT_STATE_ACTIVE)
2208 list_add_tail(&sibling->active_list, get_event_list(sibling));
2209 }
2210
2211 WARN_ON_ONCE(sibling->ctx != event->ctx);
2212 }
2213
2214 out:
2215 for_each_sibling_event(tmp, leader)
2216 perf_event__header_size(tmp);
2217
2218 perf_event__header_size(leader);
2219 }
2220
2221 static void sync_child_event(struct perf_event *child_event);
2222
perf_child_detach(struct perf_event * event)2223 static void perf_child_detach(struct perf_event *event)
2224 {
2225 struct perf_event *parent_event = event->parent;
2226
2227 if (!(event->attach_state & PERF_ATTACH_CHILD))
2228 return;
2229
2230 event->attach_state &= ~PERF_ATTACH_CHILD;
2231
2232 if (WARN_ON_ONCE(!parent_event))
2233 return;
2234
2235 lockdep_assert_held(&parent_event->child_mutex);
2236
2237 sync_child_event(event);
2238 list_del_init(&event->child_list);
2239 }
2240
is_orphaned_event(struct perf_event * event)2241 static bool is_orphaned_event(struct perf_event *event)
2242 {
2243 return event->state == PERF_EVENT_STATE_DEAD;
2244 }
2245
2246 static inline int
event_filter_match(struct perf_event * event)2247 event_filter_match(struct perf_event *event)
2248 {
2249 return (event->cpu == -1 || event->cpu == smp_processor_id()) &&
2250 perf_cgroup_match(event);
2251 }
2252
2253 static void
event_sched_out(struct perf_event * event,struct perf_event_context * ctx)2254 event_sched_out(struct perf_event *event, struct perf_event_context *ctx)
2255 {
2256 struct perf_event_pmu_context *epc = event->pmu_ctx;
2257 struct perf_cpu_pmu_context *cpc = this_cpu_ptr(epc->pmu->cpu_pmu_context);
2258 enum perf_event_state state = PERF_EVENT_STATE_INACTIVE;
2259
2260 // XXX cpc serialization, probably per-cpu IRQ disabled
2261
2262 WARN_ON_ONCE(event->ctx != ctx);
2263 lockdep_assert_held(&ctx->lock);
2264
2265 if (event->state != PERF_EVENT_STATE_ACTIVE)
2266 return;
2267
2268 /*
2269 * Asymmetry; we only schedule events _IN_ through ctx_sched_in(), but
2270 * we can schedule events _OUT_ individually through things like
2271 * __perf_remove_from_context().
2272 */
2273 list_del_init(&event->active_list);
2274
2275 perf_pmu_disable(event->pmu);
2276
2277 event->pmu->del(event, 0);
2278 event->oncpu = -1;
2279
2280 if (event->pending_disable) {
2281 event->pending_disable = 0;
2282 perf_cgroup_event_disable(event, ctx);
2283 state = PERF_EVENT_STATE_OFF;
2284 }
2285
2286 if (event->pending_sigtrap) {
2287 bool dec = true;
2288
2289 event->pending_sigtrap = 0;
2290 if (state != PERF_EVENT_STATE_OFF &&
2291 !event->pending_work) {
2292 event->pending_work = 1;
2293 dec = false;
2294 WARN_ON_ONCE(!atomic_long_inc_not_zero(&event->refcount));
2295 task_work_add(current, &event->pending_task, TWA_RESUME);
2296 }
2297 if (dec)
2298 local_dec(&event->ctx->nr_pending);
2299 }
2300
2301 perf_event_set_state(event, state);
2302
2303 if (!is_software_event(event))
2304 cpc->active_oncpu--;
2305 if (event->attr.freq && event->attr.sample_freq)
2306 ctx->nr_freq--;
2307 if (event->attr.exclusive || !cpc->active_oncpu)
2308 cpc->exclusive = 0;
2309
2310 perf_pmu_enable(event->pmu);
2311 }
2312
2313 static void
group_sched_out(struct perf_event * group_event,struct perf_event_context * ctx)2314 group_sched_out(struct perf_event *group_event, struct perf_event_context *ctx)
2315 {
2316 struct perf_event *event;
2317
2318 if (group_event->state != PERF_EVENT_STATE_ACTIVE)
2319 return;
2320
2321 perf_assert_pmu_disabled(group_event->pmu_ctx->pmu);
2322
2323 event_sched_out(group_event, ctx);
2324
2325 /*
2326 * Schedule out siblings (if any):
2327 */
2328 for_each_sibling_event(event, group_event)
2329 event_sched_out(event, ctx);
2330 }
2331
2332 #define DETACH_GROUP 0x01UL
2333 #define DETACH_CHILD 0x02UL
2334 #define DETACH_DEAD 0x04UL
2335
2336 /*
2337 * Cross CPU call to remove a performance event
2338 *
2339 * We disable the event on the hardware level first. After that we
2340 * remove it from the context list.
2341 */
2342 static void
__perf_remove_from_context(struct perf_event * event,struct perf_cpu_context * cpuctx,struct perf_event_context * ctx,void * info)2343 __perf_remove_from_context(struct perf_event *event,
2344 struct perf_cpu_context *cpuctx,
2345 struct perf_event_context *ctx,
2346 void *info)
2347 {
2348 struct perf_event_pmu_context *pmu_ctx = event->pmu_ctx;
2349 unsigned long flags = (unsigned long)info;
2350
2351 if (ctx->is_active & EVENT_TIME) {
2352 update_context_time(ctx);
2353 update_cgrp_time_from_cpuctx(cpuctx, false);
2354 }
2355
2356 /*
2357 * Ensure event_sched_out() switches to OFF, at the very least
2358 * this avoids raising perf_pending_task() at this time.
2359 */
2360 if (flags & DETACH_DEAD)
2361 event->pending_disable = 1;
2362 event_sched_out(event, ctx);
2363 if (flags & DETACH_GROUP)
2364 perf_group_detach(event);
2365 if (flags & DETACH_CHILD)
2366 perf_child_detach(event);
2367 list_del_event(event, ctx);
2368 if (flags & DETACH_DEAD)
2369 event->state = PERF_EVENT_STATE_DEAD;
2370
2371 if (!pmu_ctx->nr_events) {
2372 pmu_ctx->rotate_necessary = 0;
2373
2374 if (ctx->task && ctx->is_active) {
2375 struct perf_cpu_pmu_context *cpc;
2376
2377 cpc = this_cpu_ptr(pmu_ctx->pmu->cpu_pmu_context);
2378 WARN_ON_ONCE(cpc->task_epc && cpc->task_epc != pmu_ctx);
2379 cpc->task_epc = NULL;
2380 }
2381 }
2382
2383 if (!ctx->nr_events && ctx->is_active) {
2384 if (ctx == &cpuctx->ctx)
2385 update_cgrp_time_from_cpuctx(cpuctx, true);
2386
2387 ctx->is_active = 0;
2388 if (ctx->task) {
2389 WARN_ON_ONCE(cpuctx->task_ctx != ctx);
2390 cpuctx->task_ctx = NULL;
2391 }
2392 }
2393 }
2394
2395 /*
2396 * Remove the event from a task's (or a CPU's) list of events.
2397 *
2398 * If event->ctx is a cloned context, callers must make sure that
2399 * every task struct that event->ctx->task could possibly point to
2400 * remains valid. This is OK when called from perf_release since
2401 * that only calls us on the top-level context, which can't be a clone.
2402 * When called from perf_event_exit_task, it's OK because the
2403 * context has been detached from its task.
2404 */
perf_remove_from_context(struct perf_event * event,unsigned long flags)2405 static void perf_remove_from_context(struct perf_event *event, unsigned long flags)
2406 {
2407 struct perf_event_context *ctx = event->ctx;
2408
2409 lockdep_assert_held(&ctx->mutex);
2410
2411 /*
2412 * Because of perf_event_exit_task(), perf_remove_from_context() ought
2413 * to work in the face of TASK_TOMBSTONE, unlike every other
2414 * event_function_call() user.
2415 */
2416 raw_spin_lock_irq(&ctx->lock);
2417 if (!ctx->is_active) {
2418 __perf_remove_from_context(event, this_cpu_ptr(&perf_cpu_context),
2419 ctx, (void *)flags);
2420 raw_spin_unlock_irq(&ctx->lock);
2421 return;
2422 }
2423 raw_spin_unlock_irq(&ctx->lock);
2424
2425 event_function_call(event, __perf_remove_from_context, (void *)flags);
2426 }
2427
2428 /*
2429 * Cross CPU call to disable a performance event
2430 */
__perf_event_disable(struct perf_event * event,struct perf_cpu_context * cpuctx,struct perf_event_context * ctx,void * info)2431 static void __perf_event_disable(struct perf_event *event,
2432 struct perf_cpu_context *cpuctx,
2433 struct perf_event_context *ctx,
2434 void *info)
2435 {
2436 if (event->state < PERF_EVENT_STATE_INACTIVE)
2437 return;
2438
2439 if (ctx->is_active & EVENT_TIME) {
2440 update_context_time(ctx);
2441 update_cgrp_time_from_event(event);
2442 }
2443
2444 perf_pmu_disable(event->pmu_ctx->pmu);
2445
2446 if (event == event->group_leader)
2447 group_sched_out(event, ctx);
2448 else
2449 event_sched_out(event, ctx);
2450
2451 perf_event_set_state(event, PERF_EVENT_STATE_OFF);
2452 perf_cgroup_event_disable(event, ctx);
2453
2454 perf_pmu_enable(event->pmu_ctx->pmu);
2455 }
2456
2457 /*
2458 * Disable an event.
2459 *
2460 * If event->ctx is a cloned context, callers must make sure that
2461 * every task struct that event->ctx->task could possibly point to
2462 * remains valid. This condition is satisfied when called through
2463 * perf_event_for_each_child or perf_event_for_each because they
2464 * hold the top-level event's child_mutex, so any descendant that
2465 * goes to exit will block in perf_event_exit_event().
2466 *
2467 * When called from perf_pending_irq it's OK because event->ctx
2468 * is the current context on this CPU and preemption is disabled,
2469 * hence we can't get into perf_event_task_sched_out for this context.
2470 */
_perf_event_disable(struct perf_event * event)2471 static void _perf_event_disable(struct perf_event *event)
2472 {
2473 struct perf_event_context *ctx = event->ctx;
2474
2475 raw_spin_lock_irq(&ctx->lock);
2476 if (event->state <= PERF_EVENT_STATE_OFF) {
2477 raw_spin_unlock_irq(&ctx->lock);
2478 return;
2479 }
2480 raw_spin_unlock_irq(&ctx->lock);
2481
2482 event_function_call(event, __perf_event_disable, NULL);
2483 }
2484
perf_event_disable_local(struct perf_event * event)2485 void perf_event_disable_local(struct perf_event *event)
2486 {
2487 event_function_local(event, __perf_event_disable, NULL);
2488 }
2489
2490 /*
2491 * Strictly speaking kernel users cannot create groups and therefore this
2492 * interface does not need the perf_event_ctx_lock() magic.
2493 */
perf_event_disable(struct perf_event * event)2494 void perf_event_disable(struct perf_event *event)
2495 {
2496 struct perf_event_context *ctx;
2497
2498 ctx = perf_event_ctx_lock(event);
2499 _perf_event_disable(event);
2500 perf_event_ctx_unlock(event, ctx);
2501 }
2502 EXPORT_SYMBOL_GPL(perf_event_disable);
2503
perf_event_disable_inatomic(struct perf_event * event)2504 void perf_event_disable_inatomic(struct perf_event *event)
2505 {
2506 event->pending_disable = 1;
2507 irq_work_queue(&event->pending_irq);
2508 }
2509
2510 #define MAX_INTERRUPTS (~0ULL)
2511
2512 static void perf_log_throttle(struct perf_event *event, int enable);
2513 static void perf_log_itrace_start(struct perf_event *event);
2514
2515 static int
event_sched_in(struct perf_event * event,struct perf_event_context * ctx)2516 event_sched_in(struct perf_event *event, struct perf_event_context *ctx)
2517 {
2518 struct perf_event_pmu_context *epc = event->pmu_ctx;
2519 struct perf_cpu_pmu_context *cpc = this_cpu_ptr(epc->pmu->cpu_pmu_context);
2520 int ret = 0;
2521
2522 WARN_ON_ONCE(event->ctx != ctx);
2523
2524 lockdep_assert_held(&ctx->lock);
2525
2526 if (event->state <= PERF_EVENT_STATE_OFF)
2527 return 0;
2528
2529 WRITE_ONCE(event->oncpu, smp_processor_id());
2530 /*
2531 * Order event::oncpu write to happen before the ACTIVE state is
2532 * visible. This allows perf_event_{stop,read}() to observe the correct
2533 * ->oncpu if it sees ACTIVE.
2534 */
2535 smp_wmb();
2536 perf_event_set_state(event, PERF_EVENT_STATE_ACTIVE);
2537
2538 /*
2539 * Unthrottle events, since we scheduled we might have missed several
2540 * ticks already, also for a heavily scheduling task there is little
2541 * guarantee it'll get a tick in a timely manner.
2542 */
2543 if (unlikely(event->hw.interrupts == MAX_INTERRUPTS)) {
2544 perf_log_throttle(event, 1);
2545 event->hw.interrupts = 0;
2546 }
2547
2548 perf_pmu_disable(event->pmu);
2549
2550 perf_log_itrace_start(event);
2551
2552 if (event->pmu->add(event, PERF_EF_START)) {
2553 perf_event_set_state(event, PERF_EVENT_STATE_INACTIVE);
2554 event->oncpu = -1;
2555 ret = -EAGAIN;
2556 goto out;
2557 }
2558
2559 if (!is_software_event(event))
2560 cpc->active_oncpu++;
2561 if (event->attr.freq && event->attr.sample_freq)
2562 ctx->nr_freq++;
2563
2564 if (event->attr.exclusive)
2565 cpc->exclusive = 1;
2566
2567 out:
2568 perf_pmu_enable(event->pmu);
2569
2570 return ret;
2571 }
2572
2573 static int
group_sched_in(struct perf_event * group_event,struct perf_event_context * ctx)2574 group_sched_in(struct perf_event *group_event, struct perf_event_context *ctx)
2575 {
2576 struct perf_event *event, *partial_group = NULL;
2577 struct pmu *pmu = group_event->pmu_ctx->pmu;
2578
2579 if (group_event->state == PERF_EVENT_STATE_OFF)
2580 return 0;
2581
2582 pmu->start_txn(pmu, PERF_PMU_TXN_ADD);
2583
2584 if (event_sched_in(group_event, ctx))
2585 goto error;
2586
2587 /*
2588 * Schedule in siblings as one group (if any):
2589 */
2590 for_each_sibling_event(event, group_event) {
2591 if (event_sched_in(event, ctx)) {
2592 partial_group = event;
2593 goto group_error;
2594 }
2595 }
2596
2597 if (!pmu->commit_txn(pmu))
2598 return 0;
2599
2600 group_error:
2601 /*
2602 * Groups can be scheduled in as one unit only, so undo any
2603 * partial group before returning:
2604 * The events up to the failed event are scheduled out normally.
2605 */
2606 for_each_sibling_event(event, group_event) {
2607 if (event == partial_group)
2608 break;
2609
2610 event_sched_out(event, ctx);
2611 }
2612 event_sched_out(group_event, ctx);
2613
2614 error:
2615 pmu->cancel_txn(pmu);
2616 return -EAGAIN;
2617 }
2618
2619 /*
2620 * Work out whether we can put this event group on the CPU now.
2621 */
group_can_go_on(struct perf_event * event,int can_add_hw)2622 static int group_can_go_on(struct perf_event *event, int can_add_hw)
2623 {
2624 struct perf_event_pmu_context *epc = event->pmu_ctx;
2625 struct perf_cpu_pmu_context *cpc = this_cpu_ptr(epc->pmu->cpu_pmu_context);
2626
2627 /*
2628 * Groups consisting entirely of software events can always go on.
2629 */
2630 if (event->group_caps & PERF_EV_CAP_SOFTWARE)
2631 return 1;
2632 /*
2633 * If an exclusive group is already on, no other hardware
2634 * events can go on.
2635 */
2636 if (cpc->exclusive)
2637 return 0;
2638 /*
2639 * If this group is exclusive and there are already
2640 * events on the CPU, it can't go on.
2641 */
2642 if (event->attr.exclusive && !list_empty(get_event_list(event)))
2643 return 0;
2644 /*
2645 * Otherwise, try to add it if all previous groups were able
2646 * to go on.
2647 */
2648 return can_add_hw;
2649 }
2650
add_event_to_ctx(struct perf_event * event,struct perf_event_context * ctx)2651 static void add_event_to_ctx(struct perf_event *event,
2652 struct perf_event_context *ctx)
2653 {
2654 list_add_event(event, ctx);
2655 perf_group_attach(event);
2656 }
2657
task_ctx_sched_out(struct perf_event_context * ctx,enum event_type_t event_type)2658 static void task_ctx_sched_out(struct perf_event_context *ctx,
2659 enum event_type_t event_type)
2660 {
2661 struct perf_cpu_context *cpuctx = this_cpu_ptr(&perf_cpu_context);
2662
2663 if (!cpuctx->task_ctx)
2664 return;
2665
2666 if (WARN_ON_ONCE(ctx != cpuctx->task_ctx))
2667 return;
2668
2669 ctx_sched_out(ctx, event_type);
2670 }
2671
perf_event_sched_in(struct perf_cpu_context * cpuctx,struct perf_event_context * ctx)2672 static void perf_event_sched_in(struct perf_cpu_context *cpuctx,
2673 struct perf_event_context *ctx)
2674 {
2675 ctx_sched_in(&cpuctx->ctx, EVENT_PINNED);
2676 if (ctx)
2677 ctx_sched_in(ctx, EVENT_PINNED);
2678 ctx_sched_in(&cpuctx->ctx, EVENT_FLEXIBLE);
2679 if (ctx)
2680 ctx_sched_in(ctx, EVENT_FLEXIBLE);
2681 }
2682
2683 /*
2684 * We want to maintain the following priority of scheduling:
2685 * - CPU pinned (EVENT_CPU | EVENT_PINNED)
2686 * - task pinned (EVENT_PINNED)
2687 * - CPU flexible (EVENT_CPU | EVENT_FLEXIBLE)
2688 * - task flexible (EVENT_FLEXIBLE).
2689 *
2690 * In order to avoid unscheduling and scheduling back in everything every
2691 * time an event is added, only do it for the groups of equal priority and
2692 * below.
2693 *
2694 * This can be called after a batch operation on task events, in which case
2695 * event_type is a bit mask of the types of events involved. For CPU events,
2696 * event_type is only either EVENT_PINNED or EVENT_FLEXIBLE.
2697 */
2698 /*
2699 * XXX: ctx_resched() reschedule entire perf_event_context while adding new
2700 * event to the context or enabling existing event in the context. We can
2701 * probably optimize it by rescheduling only affected pmu_ctx.
2702 */
ctx_resched(struct perf_cpu_context * cpuctx,struct perf_event_context * task_ctx,enum event_type_t event_type)2703 static void ctx_resched(struct perf_cpu_context *cpuctx,
2704 struct perf_event_context *task_ctx,
2705 enum event_type_t event_type)
2706 {
2707 bool cpu_event = !!(event_type & EVENT_CPU);
2708
2709 /*
2710 * If pinned groups are involved, flexible groups also need to be
2711 * scheduled out.
2712 */
2713 if (event_type & EVENT_PINNED)
2714 event_type |= EVENT_FLEXIBLE;
2715
2716 event_type &= EVENT_ALL;
2717
2718 perf_ctx_disable(&cpuctx->ctx, false);
2719 if (task_ctx) {
2720 perf_ctx_disable(task_ctx, false);
2721 task_ctx_sched_out(task_ctx, event_type);
2722 }
2723
2724 /*
2725 * Decide which cpu ctx groups to schedule out based on the types
2726 * of events that caused rescheduling:
2727 * - EVENT_CPU: schedule out corresponding groups;
2728 * - EVENT_PINNED task events: schedule out EVENT_FLEXIBLE groups;
2729 * - otherwise, do nothing more.
2730 */
2731 if (cpu_event)
2732 ctx_sched_out(&cpuctx->ctx, event_type);
2733 else if (event_type & EVENT_PINNED)
2734 ctx_sched_out(&cpuctx->ctx, EVENT_FLEXIBLE);
2735
2736 perf_event_sched_in(cpuctx, task_ctx);
2737
2738 perf_ctx_enable(&cpuctx->ctx, false);
2739 if (task_ctx)
2740 perf_ctx_enable(task_ctx, false);
2741 }
2742
perf_pmu_resched(struct pmu * pmu)2743 void perf_pmu_resched(struct pmu *pmu)
2744 {
2745 struct perf_cpu_context *cpuctx = this_cpu_ptr(&perf_cpu_context);
2746 struct perf_event_context *task_ctx = cpuctx->task_ctx;
2747
2748 perf_ctx_lock(cpuctx, task_ctx);
2749 ctx_resched(cpuctx, task_ctx, EVENT_ALL|EVENT_CPU);
2750 perf_ctx_unlock(cpuctx, task_ctx);
2751 }
2752
2753 /*
2754 * Cross CPU call to install and enable a performance event
2755 *
2756 * Very similar to remote_function() + event_function() but cannot assume that
2757 * things like ctx->is_active and cpuctx->task_ctx are set.
2758 */
__perf_install_in_context(void * info)2759 static int __perf_install_in_context(void *info)
2760 {
2761 struct perf_event *event = info;
2762 struct perf_event_context *ctx = event->ctx;
2763 struct perf_cpu_context *cpuctx = this_cpu_ptr(&perf_cpu_context);
2764 struct perf_event_context *task_ctx = cpuctx->task_ctx;
2765 bool reprogram = true;
2766 int ret = 0;
2767
2768 raw_spin_lock(&cpuctx->ctx.lock);
2769 if (ctx->task) {
2770 raw_spin_lock(&ctx->lock);
2771 task_ctx = ctx;
2772
2773 reprogram = (ctx->task == current);
2774
2775 /*
2776 * If the task is running, it must be running on this CPU,
2777 * otherwise we cannot reprogram things.
2778 *
2779 * If its not running, we don't care, ctx->lock will
2780 * serialize against it becoming runnable.
2781 */
2782 if (task_curr(ctx->task) && !reprogram) {
2783 ret = -ESRCH;
2784 goto unlock;
2785 }
2786
2787 WARN_ON_ONCE(reprogram && cpuctx->task_ctx && cpuctx->task_ctx != ctx);
2788 } else if (task_ctx) {
2789 raw_spin_lock(&task_ctx->lock);
2790 }
2791
2792 #ifdef CONFIG_CGROUP_PERF
2793 if (event->state > PERF_EVENT_STATE_OFF && is_cgroup_event(event)) {
2794 /*
2795 * If the current cgroup doesn't match the event's
2796 * cgroup, we should not try to schedule it.
2797 */
2798 struct perf_cgroup *cgrp = perf_cgroup_from_task(current, ctx);
2799 reprogram = cgroup_is_descendant(cgrp->css.cgroup,
2800 event->cgrp->css.cgroup);
2801 }
2802 #endif
2803
2804 if (reprogram) {
2805 ctx_sched_out(ctx, EVENT_TIME);
2806 add_event_to_ctx(event, ctx);
2807 ctx_resched(cpuctx, task_ctx, get_event_type(event));
2808 } else {
2809 add_event_to_ctx(event, ctx);
2810 }
2811
2812 unlock:
2813 perf_ctx_unlock(cpuctx, task_ctx);
2814
2815 return ret;
2816 }
2817
2818 static bool exclusive_event_installable(struct perf_event *event,
2819 struct perf_event_context *ctx);
2820
2821 /*
2822 * Attach a performance event to a context.
2823 *
2824 * Very similar to event_function_call, see comment there.
2825 */
2826 static void
perf_install_in_context(struct perf_event_context * ctx,struct perf_event * event,int cpu)2827 perf_install_in_context(struct perf_event_context *ctx,
2828 struct perf_event *event,
2829 int cpu)
2830 {
2831 struct task_struct *task = READ_ONCE(ctx->task);
2832
2833 lockdep_assert_held(&ctx->mutex);
2834
2835 WARN_ON_ONCE(!exclusive_event_installable(event, ctx));
2836
2837 if (event->cpu != -1)
2838 WARN_ON_ONCE(event->cpu != cpu);
2839
2840 /*
2841 * Ensures that if we can observe event->ctx, both the event and ctx
2842 * will be 'complete'. See perf_iterate_sb_cpu().
2843 */
2844 smp_store_release(&event->ctx, ctx);
2845
2846 /*
2847 * perf_event_attr::disabled events will not run and can be initialized
2848 * without IPI. Except when this is the first event for the context, in
2849 * that case we need the magic of the IPI to set ctx->is_active.
2850 *
2851 * The IOC_ENABLE that is sure to follow the creation of a disabled
2852 * event will issue the IPI and reprogram the hardware.
2853 */
2854 if (__perf_effective_state(event) == PERF_EVENT_STATE_OFF &&
2855 ctx->nr_events && !is_cgroup_event(event)) {
2856 raw_spin_lock_irq(&ctx->lock);
2857 if (ctx->task == TASK_TOMBSTONE) {
2858 raw_spin_unlock_irq(&ctx->lock);
2859 return;
2860 }
2861 add_event_to_ctx(event, ctx);
2862 raw_spin_unlock_irq(&ctx->lock);
2863 return;
2864 }
2865
2866 if (!task) {
2867 cpu_function_call(cpu, __perf_install_in_context, event);
2868 return;
2869 }
2870
2871 /*
2872 * Should not happen, we validate the ctx is still alive before calling.
2873 */
2874 if (WARN_ON_ONCE(task == TASK_TOMBSTONE))
2875 return;
2876
2877 /*
2878 * Installing events is tricky because we cannot rely on ctx->is_active
2879 * to be set in case this is the nr_events 0 -> 1 transition.
2880 *
2881 * Instead we use task_curr(), which tells us if the task is running.
2882 * However, since we use task_curr() outside of rq::lock, we can race
2883 * against the actual state. This means the result can be wrong.
2884 *
2885 * If we get a false positive, we retry, this is harmless.
2886 *
2887 * If we get a false negative, things are complicated. If we are after
2888 * perf_event_context_sched_in() ctx::lock will serialize us, and the
2889 * value must be correct. If we're before, it doesn't matter since
2890 * perf_event_context_sched_in() will program the counter.
2891 *
2892 * However, this hinges on the remote context switch having observed
2893 * our task->perf_event_ctxp[] store, such that it will in fact take
2894 * ctx::lock in perf_event_context_sched_in().
2895 *
2896 * We do this by task_function_call(), if the IPI fails to hit the task
2897 * we know any future context switch of task must see the
2898 * perf_event_ctpx[] store.
2899 */
2900
2901 /*
2902 * This smp_mb() orders the task->perf_event_ctxp[] store with the
2903 * task_cpu() load, such that if the IPI then does not find the task
2904 * running, a future context switch of that task must observe the
2905 * store.
2906 */
2907 smp_mb();
2908 again:
2909 if (!task_function_call(task, __perf_install_in_context, event))
2910 return;
2911
2912 raw_spin_lock_irq(&ctx->lock);
2913 task = ctx->task;
2914 if (WARN_ON_ONCE(task == TASK_TOMBSTONE)) {
2915 /*
2916 * Cannot happen because we already checked above (which also
2917 * cannot happen), and we hold ctx->mutex, which serializes us
2918 * against perf_event_exit_task_context().
2919 */
2920 raw_spin_unlock_irq(&ctx->lock);
2921 return;
2922 }
2923 /*
2924 * If the task is not running, ctx->lock will avoid it becoming so,
2925 * thus we can safely install the event.
2926 */
2927 if (task_curr(task)) {
2928 raw_spin_unlock_irq(&ctx->lock);
2929 goto again;
2930 }
2931 add_event_to_ctx(event, ctx);
2932 raw_spin_unlock_irq(&ctx->lock);
2933 }
2934
2935 /*
2936 * Cross CPU call to enable a performance event
2937 */
__perf_event_enable(struct perf_event * event,struct perf_cpu_context * cpuctx,struct perf_event_context * ctx,void * info)2938 static void __perf_event_enable(struct perf_event *event,
2939 struct perf_cpu_context *cpuctx,
2940 struct perf_event_context *ctx,
2941 void *info)
2942 {
2943 struct perf_event *leader = event->group_leader;
2944 struct perf_event_context *task_ctx;
2945
2946 if (event->state >= PERF_EVENT_STATE_INACTIVE ||
2947 event->state <= PERF_EVENT_STATE_ERROR)
2948 return;
2949
2950 if (ctx->is_active)
2951 ctx_sched_out(ctx, EVENT_TIME);
2952
2953 perf_event_set_state(event, PERF_EVENT_STATE_INACTIVE);
2954 perf_cgroup_event_enable(event, ctx);
2955
2956 if (!ctx->is_active)
2957 return;
2958
2959 if (!event_filter_match(event)) {
2960 ctx_sched_in(ctx, EVENT_TIME);
2961 return;
2962 }
2963
2964 /*
2965 * If the event is in a group and isn't the group leader,
2966 * then don't put it on unless the group is on.
2967 */
2968 if (leader != event && leader->state != PERF_EVENT_STATE_ACTIVE) {
2969 ctx_sched_in(ctx, EVENT_TIME);
2970 return;
2971 }
2972
2973 task_ctx = cpuctx->task_ctx;
2974 if (ctx->task)
2975 WARN_ON_ONCE(task_ctx != ctx);
2976
2977 ctx_resched(cpuctx, task_ctx, get_event_type(event));
2978 }
2979
2980 /*
2981 * Enable an event.
2982 *
2983 * If event->ctx is a cloned context, callers must make sure that
2984 * every task struct that event->ctx->task could possibly point to
2985 * remains valid. This condition is satisfied when called through
2986 * perf_event_for_each_child or perf_event_for_each as described
2987 * for perf_event_disable.
2988 */
_perf_event_enable(struct perf_event * event)2989 static void _perf_event_enable(struct perf_event *event)
2990 {
2991 struct perf_event_context *ctx = event->ctx;
2992
2993 raw_spin_lock_irq(&ctx->lock);
2994 if (event->state >= PERF_EVENT_STATE_INACTIVE ||
2995 event->state < PERF_EVENT_STATE_ERROR) {
2996 out:
2997 raw_spin_unlock_irq(&ctx->lock);
2998 return;
2999 }
3000
3001 /*
3002 * If the event is in error state, clear that first.
3003 *
3004 * That way, if we see the event in error state below, we know that it
3005 * has gone back into error state, as distinct from the task having
3006 * been scheduled away before the cross-call arrived.
3007 */
3008 if (event->state == PERF_EVENT_STATE_ERROR) {
3009 /*
3010 * Detached SIBLING events cannot leave ERROR state.
3011 */
3012 if (event->event_caps & PERF_EV_CAP_SIBLING &&
3013 event->group_leader == event)
3014 goto out;
3015
3016 event->state = PERF_EVENT_STATE_OFF;
3017 }
3018 raw_spin_unlock_irq(&ctx->lock);
3019
3020 event_function_call(event, __perf_event_enable, NULL);
3021 }
3022
3023 /*
3024 * See perf_event_disable();
3025 */
perf_event_enable(struct perf_event * event)3026 void perf_event_enable(struct perf_event *event)
3027 {
3028 struct perf_event_context *ctx;
3029
3030 ctx = perf_event_ctx_lock(event);
3031 _perf_event_enable(event);
3032 perf_event_ctx_unlock(event, ctx);
3033 }
3034 EXPORT_SYMBOL_GPL(perf_event_enable);
3035
3036 struct stop_event_data {
3037 struct perf_event *event;
3038 unsigned int restart;
3039 };
3040
__perf_event_stop(void * info)3041 static int __perf_event_stop(void *info)
3042 {
3043 struct stop_event_data *sd = info;
3044 struct perf_event *event = sd->event;
3045
3046 /* if it's already INACTIVE, do nothing */
3047 if (READ_ONCE(event->state) != PERF_EVENT_STATE_ACTIVE)
3048 return 0;
3049
3050 /* matches smp_wmb() in event_sched_in() */
3051 smp_rmb();
3052
3053 /*
3054 * There is a window with interrupts enabled before we get here,
3055 * so we need to check again lest we try to stop another CPU's event.
3056 */
3057 if (READ_ONCE(event->oncpu) != smp_processor_id())
3058 return -EAGAIN;
3059
3060 event->pmu->stop(event, PERF_EF_UPDATE);
3061
3062 /*
3063 * May race with the actual stop (through perf_pmu_output_stop()),
3064 * but it is only used for events with AUX ring buffer, and such
3065 * events will refuse to restart because of rb::aux_mmap_count==0,
3066 * see comments in perf_aux_output_begin().
3067 *
3068 * Since this is happening on an event-local CPU, no trace is lost
3069 * while restarting.
3070 */
3071 if (sd->restart)
3072 event->pmu->start(event, 0);
3073
3074 return 0;
3075 }
3076
perf_event_stop(struct perf_event * event,int restart)3077 static int perf_event_stop(struct perf_event *event, int restart)
3078 {
3079 struct stop_event_data sd = {
3080 .event = event,
3081 .restart = restart,
3082 };
3083 int ret = 0;
3084
3085 do {
3086 if (READ_ONCE(event->state) != PERF_EVENT_STATE_ACTIVE)
3087 return 0;
3088
3089 /* matches smp_wmb() in event_sched_in() */
3090 smp_rmb();
3091
3092 /*
3093 * We only want to restart ACTIVE events, so if the event goes
3094 * inactive here (event->oncpu==-1), there's nothing more to do;
3095 * fall through with ret==-ENXIO.
3096 */
3097 ret = cpu_function_call(READ_ONCE(event->oncpu),
3098 __perf_event_stop, &sd);
3099 } while (ret == -EAGAIN);
3100
3101 return ret;
3102 }
3103
3104 /*
3105 * In order to contain the amount of racy and tricky in the address filter
3106 * configuration management, it is a two part process:
3107 *
3108 * (p1) when userspace mappings change as a result of (1) or (2) or (3) below,
3109 * we update the addresses of corresponding vmas in
3110 * event::addr_filter_ranges array and bump the event::addr_filters_gen;
3111 * (p2) when an event is scheduled in (pmu::add), it calls
3112 * perf_event_addr_filters_sync() which calls pmu::addr_filters_sync()
3113 * if the generation has changed since the previous call.
3114 *
3115 * If (p1) happens while the event is active, we restart it to force (p2).
3116 *
3117 * (1) perf_addr_filters_apply(): adjusting filters' offsets based on
3118 * pre-existing mappings, called once when new filters arrive via SET_FILTER
3119 * ioctl;
3120 * (2) perf_addr_filters_adjust(): adjusting filters' offsets based on newly
3121 * registered mapping, called for every new mmap(), with mm::mmap_lock down
3122 * for reading;
3123 * (3) perf_event_addr_filters_exec(): clearing filters' offsets in the process
3124 * of exec.
3125 */
perf_event_addr_filters_sync(struct perf_event * event)3126 void perf_event_addr_filters_sync(struct perf_event *event)
3127 {
3128 struct perf_addr_filters_head *ifh = perf_event_addr_filters(event);
3129
3130 if (!has_addr_filter(event))
3131 return;
3132
3133 raw_spin_lock(&ifh->lock);
3134 if (event->addr_filters_gen != event->hw.addr_filters_gen) {
3135 event->pmu->addr_filters_sync(event);
3136 event->hw.addr_filters_gen = event->addr_filters_gen;
3137 }
3138 raw_spin_unlock(&ifh->lock);
3139 }
3140 EXPORT_SYMBOL_GPL(perf_event_addr_filters_sync);
3141
_perf_event_refresh(struct perf_event * event,int refresh)3142 static int _perf_event_refresh(struct perf_event *event, int refresh)
3143 {
3144 /*
3145 * not supported on inherited events
3146 */
3147 if (event->attr.inherit || !is_sampling_event(event))
3148 return -EINVAL;
3149
3150 atomic_add(refresh, &event->event_limit);
3151 _perf_event_enable(event);
3152
3153 return 0;
3154 }
3155
3156 /*
3157 * See perf_event_disable()
3158 */
perf_event_refresh(struct perf_event * event,int refresh)3159 int perf_event_refresh(struct perf_event *event, int refresh)
3160 {
3161 struct perf_event_context *ctx;
3162 int ret;
3163
3164 ctx = perf_event_ctx_lock(event);
3165 ret = _perf_event_refresh(event, refresh);
3166 perf_event_ctx_unlock(event, ctx);
3167
3168 return ret;
3169 }
3170 EXPORT_SYMBOL_GPL(perf_event_refresh);
3171
perf_event_modify_breakpoint(struct perf_event * bp,struct perf_event_attr * attr)3172 static int perf_event_modify_breakpoint(struct perf_event *bp,
3173 struct perf_event_attr *attr)
3174 {
3175 int err;
3176
3177 _perf_event_disable(bp);
3178
3179 err = modify_user_hw_breakpoint_check(bp, attr, true);
3180
3181 if (!bp->attr.disabled)
3182 _perf_event_enable(bp);
3183
3184 return err;
3185 }
3186
3187 /*
3188 * Copy event-type-independent attributes that may be modified.
3189 */
perf_event_modify_copy_attr(struct perf_event_attr * to,const struct perf_event_attr * from)3190 static void perf_event_modify_copy_attr(struct perf_event_attr *to,
3191 const struct perf_event_attr *from)
3192 {
3193 to->sig_data = from->sig_data;
3194 }
3195
perf_event_modify_attr(struct perf_event * event,struct perf_event_attr * attr)3196 static int perf_event_modify_attr(struct perf_event *event,
3197 struct perf_event_attr *attr)
3198 {
3199 int (*func)(struct perf_event *, struct perf_event_attr *);
3200 struct perf_event *child;
3201 int err;
3202
3203 if (event->attr.type != attr->type)
3204 return -EINVAL;
3205
3206 switch (event->attr.type) {
3207 case PERF_TYPE_BREAKPOINT:
3208 func = perf_event_modify_breakpoint;
3209 break;
3210 default:
3211 /* Place holder for future additions. */
3212 return -EOPNOTSUPP;
3213 }
3214
3215 WARN_ON_ONCE(event->ctx->parent_ctx);
3216
3217 mutex_lock(&event->child_mutex);
3218 /*
3219 * Event-type-independent attributes must be copied before event-type
3220 * modification, which will validate that final attributes match the
3221 * source attributes after all relevant attributes have been copied.
3222 */
3223 perf_event_modify_copy_attr(&event->attr, attr);
3224 err = func(event, attr);
3225 if (err)
3226 goto out;
3227 list_for_each_entry(child, &event->child_list, child_list) {
3228 perf_event_modify_copy_attr(&child->attr, attr);
3229 err = func(child, attr);
3230 if (err)
3231 goto out;
3232 }
3233 out:
3234 mutex_unlock(&event->child_mutex);
3235 return err;
3236 }
3237
__pmu_ctx_sched_out(struct perf_event_pmu_context * pmu_ctx,enum event_type_t event_type)3238 static void __pmu_ctx_sched_out(struct perf_event_pmu_context *pmu_ctx,
3239 enum event_type_t event_type)
3240 {
3241 struct perf_event_context *ctx = pmu_ctx->ctx;
3242 struct perf_event *event, *tmp;
3243 struct pmu *pmu = pmu_ctx->pmu;
3244
3245 if (ctx->task && !ctx->is_active) {
3246 struct perf_cpu_pmu_context *cpc;
3247
3248 cpc = this_cpu_ptr(pmu->cpu_pmu_context);
3249 WARN_ON_ONCE(cpc->task_epc && cpc->task_epc != pmu_ctx);
3250 cpc->task_epc = NULL;
3251 }
3252
3253 if (!event_type)
3254 return;
3255
3256 perf_pmu_disable(pmu);
3257 if (event_type & EVENT_PINNED) {
3258 list_for_each_entry_safe(event, tmp,
3259 &pmu_ctx->pinned_active,
3260 active_list)
3261 group_sched_out(event, ctx);
3262 }
3263
3264 if (event_type & EVENT_FLEXIBLE) {
3265 list_for_each_entry_safe(event, tmp,
3266 &pmu_ctx->flexible_active,
3267 active_list)
3268 group_sched_out(event, ctx);
3269 /*
3270 * Since we cleared EVENT_FLEXIBLE, also clear
3271 * rotate_necessary, is will be reset by
3272 * ctx_flexible_sched_in() when needed.
3273 */
3274 pmu_ctx->rotate_necessary = 0;
3275 }
3276 perf_pmu_enable(pmu);
3277 }
3278
3279 static void
ctx_sched_out(struct perf_event_context * ctx,enum event_type_t event_type)3280 ctx_sched_out(struct perf_event_context *ctx, enum event_type_t event_type)
3281 {
3282 struct perf_cpu_context *cpuctx = this_cpu_ptr(&perf_cpu_context);
3283 struct perf_event_pmu_context *pmu_ctx;
3284 int is_active = ctx->is_active;
3285 bool cgroup = event_type & EVENT_CGROUP;
3286
3287 event_type &= ~EVENT_CGROUP;
3288
3289 lockdep_assert_held(&ctx->lock);
3290
3291 if (likely(!ctx->nr_events)) {
3292 /*
3293 * See __perf_remove_from_context().
3294 */
3295 WARN_ON_ONCE(ctx->is_active);
3296 if (ctx->task)
3297 WARN_ON_ONCE(cpuctx->task_ctx);
3298 return;
3299 }
3300
3301 /*
3302 * Always update time if it was set; not only when it changes.
3303 * Otherwise we can 'forget' to update time for any but the last
3304 * context we sched out. For example:
3305 *
3306 * ctx_sched_out(.event_type = EVENT_FLEXIBLE)
3307 * ctx_sched_out(.event_type = EVENT_PINNED)
3308 *
3309 * would only update time for the pinned events.
3310 */
3311 if (is_active & EVENT_TIME) {
3312 /* update (and stop) ctx time */
3313 update_context_time(ctx);
3314 update_cgrp_time_from_cpuctx(cpuctx, ctx == &cpuctx->ctx);
3315 /*
3316 * CPU-release for the below ->is_active store,
3317 * see __load_acquire() in perf_event_time_now()
3318 */
3319 barrier();
3320 }
3321
3322 ctx->is_active &= ~event_type;
3323 if (!(ctx->is_active & EVENT_ALL))
3324 ctx->is_active = 0;
3325
3326 if (ctx->task) {
3327 WARN_ON_ONCE(cpuctx->task_ctx != ctx);
3328 if (!ctx->is_active)
3329 cpuctx->task_ctx = NULL;
3330 }
3331
3332 is_active ^= ctx->is_active; /* changed bits */
3333
3334 list_for_each_entry(pmu_ctx, &ctx->pmu_ctx_list, pmu_ctx_entry) {
3335 if (cgroup && !pmu_ctx->nr_cgroups)
3336 continue;
3337 __pmu_ctx_sched_out(pmu_ctx, is_active);
3338 }
3339 }
3340
3341 /*
3342 * Test whether two contexts are equivalent, i.e. whether they have both been
3343 * cloned from the same version of the same context.
3344 *
3345 * Equivalence is measured using a generation number in the context that is
3346 * incremented on each modification to it; see unclone_ctx(), list_add_event()
3347 * and list_del_event().
3348 */
context_equiv(struct perf_event_context * ctx1,struct perf_event_context * ctx2)3349 static int context_equiv(struct perf_event_context *ctx1,
3350 struct perf_event_context *ctx2)
3351 {
3352 lockdep_assert_held(&ctx1->lock);
3353 lockdep_assert_held(&ctx2->lock);
3354
3355 /* Pinning disables the swap optimization */
3356 if (ctx1->pin_count || ctx2->pin_count)
3357 return 0;
3358
3359 /* If ctx1 is the parent of ctx2 */
3360 if (ctx1 == ctx2->parent_ctx && ctx1->generation == ctx2->parent_gen)
3361 return 1;
3362
3363 /* If ctx2 is the parent of ctx1 */
3364 if (ctx1->parent_ctx == ctx2 && ctx1->parent_gen == ctx2->generation)
3365 return 1;
3366
3367 /*
3368 * If ctx1 and ctx2 have the same parent; we flatten the parent
3369 * hierarchy, see perf_event_init_context().
3370 */
3371 if (ctx1->parent_ctx && ctx1->parent_ctx == ctx2->parent_ctx &&
3372 ctx1->parent_gen == ctx2->parent_gen)
3373 return 1;
3374
3375 /* Unmatched */
3376 return 0;
3377 }
3378
__perf_event_sync_stat(struct perf_event * event,struct perf_event * next_event)3379 static void __perf_event_sync_stat(struct perf_event *event,
3380 struct perf_event *next_event)
3381 {
3382 u64 value;
3383
3384 if (!event->attr.inherit_stat)
3385 return;
3386
3387 /*
3388 * Update the event value, we cannot use perf_event_read()
3389 * because we're in the middle of a context switch and have IRQs
3390 * disabled, which upsets smp_call_function_single(), however
3391 * we know the event must be on the current CPU, therefore we
3392 * don't need to use it.
3393 */
3394 if (event->state == PERF_EVENT_STATE_ACTIVE)
3395 event->pmu->read(event);
3396
3397 perf_event_update_time(event);
3398
3399 /*
3400 * In order to keep per-task stats reliable we need to flip the event
3401 * values when we flip the contexts.
3402 */
3403 value = local64_read(&next_event->count);
3404 value = local64_xchg(&event->count, value);
3405 local64_set(&next_event->count, value);
3406
3407 swap(event->total_time_enabled, next_event->total_time_enabled);
3408 swap(event->total_time_running, next_event->total_time_running);
3409
3410 /*
3411 * Since we swizzled the values, update the user visible data too.
3412 */
3413 perf_event_update_userpage(event);
3414 perf_event_update_userpage(next_event);
3415 }
3416
perf_event_sync_stat(struct perf_event_context * ctx,struct perf_event_context * next_ctx)3417 static void perf_event_sync_stat(struct perf_event_context *ctx,
3418 struct perf_event_context *next_ctx)
3419 {
3420 struct perf_event *event, *next_event;
3421
3422 if (!ctx->nr_stat)
3423 return;
3424
3425 update_context_time(ctx);
3426
3427 event = list_first_entry(&ctx->event_list,
3428 struct perf_event, event_entry);
3429
3430 next_event = list_first_entry(&next_ctx->event_list,
3431 struct perf_event, event_entry);
3432
3433 while (&event->event_entry != &ctx->event_list &&
3434 &next_event->event_entry != &next_ctx->event_list) {
3435
3436 __perf_event_sync_stat(event, next_event);
3437
3438 event = list_next_entry(event, event_entry);
3439 next_event = list_next_entry(next_event, event_entry);
3440 }
3441 }
3442
3443 #define double_list_for_each_entry(pos1, pos2, head1, head2, member) \
3444 for (pos1 = list_first_entry(head1, typeof(*pos1), member), \
3445 pos2 = list_first_entry(head2, typeof(*pos2), member); \
3446 !list_entry_is_head(pos1, head1, member) && \
3447 !list_entry_is_head(pos2, head2, member); \
3448 pos1 = list_next_entry(pos1, member), \
3449 pos2 = list_next_entry(pos2, member))
3450
perf_event_swap_task_ctx_data(struct perf_event_context * prev_ctx,struct perf_event_context * next_ctx)3451 static void perf_event_swap_task_ctx_data(struct perf_event_context *prev_ctx,
3452 struct perf_event_context *next_ctx)
3453 {
3454 struct perf_event_pmu_context *prev_epc, *next_epc;
3455
3456 if (!prev_ctx->nr_task_data)
3457 return;
3458
3459 double_list_for_each_entry(prev_epc, next_epc,
3460 &prev_ctx->pmu_ctx_list, &next_ctx->pmu_ctx_list,
3461 pmu_ctx_entry) {
3462
3463 if (WARN_ON_ONCE(prev_epc->pmu != next_epc->pmu))
3464 continue;
3465
3466 /*
3467 * PMU specific parts of task perf context can require
3468 * additional synchronization. As an example of such
3469 * synchronization see implementation details of Intel
3470 * LBR call stack data profiling;
3471 */
3472 if (prev_epc->pmu->swap_task_ctx)
3473 prev_epc->pmu->swap_task_ctx(prev_epc, next_epc);
3474 else
3475 swap(prev_epc->task_ctx_data, next_epc->task_ctx_data);
3476 }
3477 }
3478
perf_ctx_sched_task_cb(struct perf_event_context * ctx,bool sched_in)3479 static void perf_ctx_sched_task_cb(struct perf_event_context *ctx, bool sched_in)
3480 {
3481 struct perf_event_pmu_context *pmu_ctx;
3482 struct perf_cpu_pmu_context *cpc;
3483
3484 list_for_each_entry(pmu_ctx, &ctx->pmu_ctx_list, pmu_ctx_entry) {
3485 cpc = this_cpu_ptr(pmu_ctx->pmu->cpu_pmu_context);
3486
3487 if (cpc->sched_cb_usage && pmu_ctx->pmu->sched_task)
3488 pmu_ctx->pmu->sched_task(pmu_ctx, sched_in);
3489 }
3490 }
3491
3492 static void
perf_event_context_sched_out(struct task_struct * task,struct task_struct * next)3493 perf_event_context_sched_out(struct task_struct *task, struct task_struct *next)
3494 {
3495 struct perf_event_context *ctx = task->perf_event_ctxp;
3496 struct perf_event_context *next_ctx;
3497 struct perf_event_context *parent, *next_parent;
3498 int do_switch = 1;
3499
3500 if (likely(!ctx))
3501 return;
3502
3503 rcu_read_lock();
3504 next_ctx = rcu_dereference(next->perf_event_ctxp);
3505 if (!next_ctx)
3506 goto unlock;
3507
3508 parent = rcu_dereference(ctx->parent_ctx);
3509 next_parent = rcu_dereference(next_ctx->parent_ctx);
3510
3511 /* If neither context have a parent context; they cannot be clones. */
3512 if (!parent && !next_parent)
3513 goto unlock;
3514
3515 if (next_parent == ctx || next_ctx == parent || next_parent == parent) {
3516 /*
3517 * Looks like the two contexts are clones, so we might be
3518 * able to optimize the context switch. We lock both
3519 * contexts and check that they are clones under the
3520 * lock (including re-checking that neither has been
3521 * uncloned in the meantime). It doesn't matter which
3522 * order we take the locks because no other cpu could
3523 * be trying to lock both of these tasks.
3524 */
3525 raw_spin_lock(&ctx->lock);
3526 raw_spin_lock_nested(&next_ctx->lock, SINGLE_DEPTH_NESTING);
3527 if (context_equiv(ctx, next_ctx)) {
3528
3529 perf_ctx_disable(ctx, false);
3530
3531 /* PMIs are disabled; ctx->nr_pending is stable. */
3532 if (local_read(&ctx->nr_pending) ||
3533 local_read(&next_ctx->nr_pending)) {
3534 /*
3535 * Must not swap out ctx when there's pending
3536 * events that rely on the ctx->task relation.
3537 */
3538 raw_spin_unlock(&next_ctx->lock);
3539 rcu_read_unlock();
3540 goto inside_switch;
3541 }
3542
3543 WRITE_ONCE(ctx->task, next);
3544 WRITE_ONCE(next_ctx->task, task);
3545
3546 perf_ctx_sched_task_cb(ctx, false);
3547 perf_event_swap_task_ctx_data(ctx, next_ctx);
3548
3549 perf_ctx_enable(ctx, false);
3550
3551 /*
3552 * RCU_INIT_POINTER here is safe because we've not
3553 * modified the ctx and the above modification of
3554 * ctx->task and ctx->task_ctx_data are immaterial
3555 * since those values are always verified under
3556 * ctx->lock which we're now holding.
3557 */
3558 RCU_INIT_POINTER(task->perf_event_ctxp, next_ctx);
3559 RCU_INIT_POINTER(next->perf_event_ctxp, ctx);
3560
3561 do_switch = 0;
3562
3563 perf_event_sync_stat(ctx, next_ctx);
3564 }
3565 raw_spin_unlock(&next_ctx->lock);
3566 raw_spin_unlock(&ctx->lock);
3567 }
3568 unlock:
3569 rcu_read_unlock();
3570
3571 if (do_switch) {
3572 raw_spin_lock(&ctx->lock);
3573 perf_ctx_disable(ctx, false);
3574
3575 inside_switch:
3576 perf_ctx_sched_task_cb(ctx, false);
3577 task_ctx_sched_out(ctx, EVENT_ALL);
3578
3579 perf_ctx_enable(ctx, false);
3580 raw_spin_unlock(&ctx->lock);
3581 }
3582 }
3583
3584 static DEFINE_PER_CPU(struct list_head, sched_cb_list);
3585 static DEFINE_PER_CPU(int, perf_sched_cb_usages);
3586
perf_sched_cb_dec(struct pmu * pmu)3587 void perf_sched_cb_dec(struct pmu *pmu)
3588 {
3589 struct perf_cpu_pmu_context *cpc = this_cpu_ptr(pmu->cpu_pmu_context);
3590
3591 this_cpu_dec(perf_sched_cb_usages);
3592 barrier();
3593
3594 if (!--cpc->sched_cb_usage)
3595 list_del(&cpc->sched_cb_entry);
3596 }
3597
3598
perf_sched_cb_inc(struct pmu * pmu)3599 void perf_sched_cb_inc(struct pmu *pmu)
3600 {
3601 struct perf_cpu_pmu_context *cpc = this_cpu_ptr(pmu->cpu_pmu_context);
3602
3603 if (!cpc->sched_cb_usage++)
3604 list_add(&cpc->sched_cb_entry, this_cpu_ptr(&sched_cb_list));
3605
3606 barrier();
3607 this_cpu_inc(perf_sched_cb_usages);
3608 }
3609
3610 /*
3611 * This function provides the context switch callback to the lower code
3612 * layer. It is invoked ONLY when the context switch callback is enabled.
3613 *
3614 * This callback is relevant even to per-cpu events; for example multi event
3615 * PEBS requires this to provide PID/TID information. This requires we flush
3616 * all queued PEBS records before we context switch to a new task.
3617 */
__perf_pmu_sched_task(struct perf_cpu_pmu_context * cpc,bool sched_in)3618 static void __perf_pmu_sched_task(struct perf_cpu_pmu_context *cpc, bool sched_in)
3619 {
3620 struct perf_cpu_context *cpuctx = this_cpu_ptr(&perf_cpu_context);
3621 struct pmu *pmu;
3622
3623 pmu = cpc->epc.pmu;
3624
3625 /* software PMUs will not have sched_task */
3626 if (WARN_ON_ONCE(!pmu->sched_task))
3627 return;
3628
3629 perf_ctx_lock(cpuctx, cpuctx->task_ctx);
3630 perf_pmu_disable(pmu);
3631
3632 pmu->sched_task(cpc->task_epc, sched_in);
3633
3634 perf_pmu_enable(pmu);
3635 perf_ctx_unlock(cpuctx, cpuctx->task_ctx);
3636 }
3637
perf_pmu_sched_task(struct task_struct * prev,struct task_struct * next,bool sched_in)3638 static void perf_pmu_sched_task(struct task_struct *prev,
3639 struct task_struct *next,
3640 bool sched_in)
3641 {
3642 struct perf_cpu_context *cpuctx = this_cpu_ptr(&perf_cpu_context);
3643 struct perf_cpu_pmu_context *cpc;
3644
3645 /* cpuctx->task_ctx will be handled in perf_event_context_sched_in/out */
3646 if (prev == next || cpuctx->task_ctx)
3647 return;
3648
3649 list_for_each_entry(cpc, this_cpu_ptr(&sched_cb_list), sched_cb_entry)
3650 __perf_pmu_sched_task(cpc, sched_in);
3651 }
3652
3653 static void perf_event_switch(struct task_struct *task,
3654 struct task_struct *next_prev, bool sched_in);
3655
3656 /*
3657 * Called from scheduler to remove the events of the current task,
3658 * with interrupts disabled.
3659 *
3660 * We stop each event and update the event value in event->count.
3661 *
3662 * This does not protect us against NMI, but disable()
3663 * sets the disabled bit in the control field of event _before_
3664 * accessing the event control register. If a NMI hits, then it will
3665 * not restart the event.
3666 */
__perf_event_task_sched_out(struct task_struct * task,struct task_struct * next)3667 void __perf_event_task_sched_out(struct task_struct *task,
3668 struct task_struct *next)
3669 {
3670 if (__this_cpu_read(perf_sched_cb_usages))
3671 perf_pmu_sched_task(task, next, false);
3672
3673 if (atomic_read(&nr_switch_events))
3674 perf_event_switch(task, next, false);
3675
3676 perf_event_context_sched_out(task, next);
3677
3678 /*
3679 * if cgroup events exist on this CPU, then we need
3680 * to check if we have to switch out PMU state.
3681 * cgroup event are system-wide mode only
3682 */
3683 perf_cgroup_switch(next);
3684 }
3685
perf_less_group_idx(const void * l,const void * r)3686 static bool perf_less_group_idx(const void *l, const void *r)
3687 {
3688 const struct perf_event *le = *(const struct perf_event **)l;
3689 const struct perf_event *re = *(const struct perf_event **)r;
3690
3691 return le->group_index < re->group_index;
3692 }
3693
swap_ptr(void * l,void * r)3694 static void swap_ptr(void *l, void *r)
3695 {
3696 void **lp = l, **rp = r;
3697
3698 swap(*lp, *rp);
3699 }
3700
3701 static const struct min_heap_callbacks perf_min_heap = {
3702 .elem_size = sizeof(struct perf_event *),
3703 .less = perf_less_group_idx,
3704 .swp = swap_ptr,
3705 };
3706
__heap_add(struct min_heap * heap,struct perf_event * event)3707 static void __heap_add(struct min_heap *heap, struct perf_event *event)
3708 {
3709 struct perf_event **itrs = heap->data;
3710
3711 if (event) {
3712 itrs[heap->nr] = event;
3713 heap->nr++;
3714 }
3715 }
3716
__link_epc(struct perf_event_pmu_context * pmu_ctx)3717 static void __link_epc(struct perf_event_pmu_context *pmu_ctx)
3718 {
3719 struct perf_cpu_pmu_context *cpc;
3720
3721 if (!pmu_ctx->ctx->task)
3722 return;
3723
3724 cpc = this_cpu_ptr(pmu_ctx->pmu->cpu_pmu_context);
3725 WARN_ON_ONCE(cpc->task_epc && cpc->task_epc != pmu_ctx);
3726 cpc->task_epc = pmu_ctx;
3727 }
3728
visit_groups_merge(struct perf_event_context * ctx,struct perf_event_groups * groups,int cpu,struct pmu * pmu,int (* func)(struct perf_event *,void *),void * data)3729 static noinline int visit_groups_merge(struct perf_event_context *ctx,
3730 struct perf_event_groups *groups, int cpu,
3731 struct pmu *pmu,
3732 int (*func)(struct perf_event *, void *),
3733 void *data)
3734 {
3735 #ifdef CONFIG_CGROUP_PERF
3736 struct cgroup_subsys_state *css = NULL;
3737 #endif
3738 struct perf_cpu_context *cpuctx = NULL;
3739 /* Space for per CPU and/or any CPU event iterators. */
3740 struct perf_event *itrs[2];
3741 struct min_heap event_heap;
3742 struct perf_event **evt;
3743 int ret;
3744
3745 if (pmu->filter && pmu->filter(pmu, cpu))
3746 return 0;
3747
3748 if (!ctx->task) {
3749 cpuctx = this_cpu_ptr(&perf_cpu_context);
3750 event_heap = (struct min_heap){
3751 .data = cpuctx->heap,
3752 .nr = 0,
3753 .size = cpuctx->heap_size,
3754 };
3755
3756 lockdep_assert_held(&cpuctx->ctx.lock);
3757
3758 #ifdef CONFIG_CGROUP_PERF
3759 if (cpuctx->cgrp)
3760 css = &cpuctx->cgrp->css;
3761 #endif
3762 } else {
3763 event_heap = (struct min_heap){
3764 .data = itrs,
3765 .nr = 0,
3766 .size = ARRAY_SIZE(itrs),
3767 };
3768 /* Events not within a CPU context may be on any CPU. */
3769 __heap_add(&event_heap, perf_event_groups_first(groups, -1, pmu, NULL));
3770 }
3771 evt = event_heap.data;
3772
3773 __heap_add(&event_heap, perf_event_groups_first(groups, cpu, pmu, NULL));
3774
3775 #ifdef CONFIG_CGROUP_PERF
3776 for (; css; css = css->parent)
3777 __heap_add(&event_heap, perf_event_groups_first(groups, cpu, pmu, css->cgroup));
3778 #endif
3779
3780 if (event_heap.nr) {
3781 __link_epc((*evt)->pmu_ctx);
3782 perf_assert_pmu_disabled((*evt)->pmu_ctx->pmu);
3783 }
3784
3785 min_heapify_all(&event_heap, &perf_min_heap);
3786
3787 while (event_heap.nr) {
3788 ret = func(*evt, data);
3789 if (ret)
3790 return ret;
3791
3792 *evt = perf_event_groups_next(*evt, pmu);
3793 if (*evt)
3794 min_heapify(&event_heap, 0, &perf_min_heap);
3795 else
3796 min_heap_pop(&event_heap, &perf_min_heap);
3797 }
3798
3799 return 0;
3800 }
3801
3802 /*
3803 * Because the userpage is strictly per-event (there is no concept of context,
3804 * so there cannot be a context indirection), every userpage must be updated
3805 * when context time starts :-(
3806 *
3807 * IOW, we must not miss EVENT_TIME edges.
3808 */
event_update_userpage(struct perf_event * event)3809 static inline bool event_update_userpage(struct perf_event *event)
3810 {
3811 if (likely(!atomic_read(&event->mmap_count)))
3812 return false;
3813
3814 perf_event_update_time(event);
3815 perf_event_update_userpage(event);
3816
3817 return true;
3818 }
3819
group_update_userpage(struct perf_event * group_event)3820 static inline void group_update_userpage(struct perf_event *group_event)
3821 {
3822 struct perf_event *event;
3823
3824 if (!event_update_userpage(group_event))
3825 return;
3826
3827 for_each_sibling_event(event, group_event)
3828 event_update_userpage(event);
3829 }
3830
merge_sched_in(struct perf_event * event,void * data)3831 static int merge_sched_in(struct perf_event *event, void *data)
3832 {
3833 struct perf_event_context *ctx = event->ctx;
3834 int *can_add_hw = data;
3835
3836 if (event->state <= PERF_EVENT_STATE_OFF)
3837 return 0;
3838
3839 if (!event_filter_match(event))
3840 return 0;
3841
3842 if (group_can_go_on(event, *can_add_hw)) {
3843 if (!group_sched_in(event, ctx))
3844 list_add_tail(&event->active_list, get_event_list(event));
3845 }
3846
3847 if (event->state == PERF_EVENT_STATE_INACTIVE) {
3848 *can_add_hw = 0;
3849 if (event->attr.pinned) {
3850 perf_cgroup_event_disable(event, ctx);
3851 perf_event_set_state(event, PERF_EVENT_STATE_ERROR);
3852 } else {
3853 struct perf_cpu_pmu_context *cpc;
3854
3855 event->pmu_ctx->rotate_necessary = 1;
3856 cpc = this_cpu_ptr(event->pmu_ctx->pmu->cpu_pmu_context);
3857 perf_mux_hrtimer_restart(cpc);
3858 group_update_userpage(event);
3859 }
3860 }
3861
3862 return 0;
3863 }
3864
pmu_groups_sched_in(struct perf_event_context * ctx,struct perf_event_groups * groups,struct pmu * pmu)3865 static void pmu_groups_sched_in(struct perf_event_context *ctx,
3866 struct perf_event_groups *groups,
3867 struct pmu *pmu)
3868 {
3869 int can_add_hw = 1;
3870 visit_groups_merge(ctx, groups, smp_processor_id(), pmu,
3871 merge_sched_in, &can_add_hw);
3872 }
3873
ctx_groups_sched_in(struct perf_event_context * ctx,struct perf_event_groups * groups,bool cgroup)3874 static void ctx_groups_sched_in(struct perf_event_context *ctx,
3875 struct perf_event_groups *groups,
3876 bool cgroup)
3877 {
3878 struct perf_event_pmu_context *pmu_ctx;
3879
3880 list_for_each_entry(pmu_ctx, &ctx->pmu_ctx_list, pmu_ctx_entry) {
3881 if (cgroup && !pmu_ctx->nr_cgroups)
3882 continue;
3883 pmu_groups_sched_in(ctx, groups, pmu_ctx->pmu);
3884 }
3885 }
3886
__pmu_ctx_sched_in(struct perf_event_context * ctx,struct pmu * pmu)3887 static void __pmu_ctx_sched_in(struct perf_event_context *ctx,
3888 struct pmu *pmu)
3889 {
3890 pmu_groups_sched_in(ctx, &ctx->flexible_groups, pmu);
3891 }
3892
3893 static void
ctx_sched_in(struct perf_event_context * ctx,enum event_type_t event_type)3894 ctx_sched_in(struct perf_event_context *ctx, enum event_type_t event_type)
3895 {
3896 struct perf_cpu_context *cpuctx = this_cpu_ptr(&perf_cpu_context);
3897 int is_active = ctx->is_active;
3898 bool cgroup = event_type & EVENT_CGROUP;
3899
3900 event_type &= ~EVENT_CGROUP;
3901
3902 lockdep_assert_held(&ctx->lock);
3903
3904 if (likely(!ctx->nr_events))
3905 return;
3906
3907 if (!(is_active & EVENT_TIME)) {
3908 /* start ctx time */
3909 __update_context_time(ctx, false);
3910 perf_cgroup_set_timestamp(cpuctx);
3911 /*
3912 * CPU-release for the below ->is_active store,
3913 * see __load_acquire() in perf_event_time_now()
3914 */
3915 barrier();
3916 }
3917
3918 ctx->is_active |= (event_type | EVENT_TIME);
3919 if (ctx->task) {
3920 if (!is_active)
3921 cpuctx->task_ctx = ctx;
3922 else
3923 WARN_ON_ONCE(cpuctx->task_ctx != ctx);
3924 }
3925
3926 is_active ^= ctx->is_active; /* changed bits */
3927
3928 /*
3929 * First go through the list and put on any pinned groups
3930 * in order to give them the best chance of going on.
3931 */
3932 if (is_active & EVENT_PINNED)
3933 ctx_groups_sched_in(ctx, &ctx->pinned_groups, cgroup);
3934
3935 /* Then walk through the lower prio flexible groups */
3936 if (is_active & EVENT_FLEXIBLE)
3937 ctx_groups_sched_in(ctx, &ctx->flexible_groups, cgroup);
3938 }
3939
perf_event_context_sched_in(struct task_struct * task)3940 static void perf_event_context_sched_in(struct task_struct *task)
3941 {
3942 struct perf_cpu_context *cpuctx = this_cpu_ptr(&perf_cpu_context);
3943 struct perf_event_context *ctx;
3944
3945 rcu_read_lock();
3946 ctx = rcu_dereference(task->perf_event_ctxp);
3947 if (!ctx)
3948 goto rcu_unlock;
3949
3950 if (cpuctx->task_ctx == ctx) {
3951 perf_ctx_lock(cpuctx, ctx);
3952 perf_ctx_disable(ctx, false);
3953
3954 perf_ctx_sched_task_cb(ctx, true);
3955
3956 perf_ctx_enable(ctx, false);
3957 perf_ctx_unlock(cpuctx, ctx);
3958 goto rcu_unlock;
3959 }
3960
3961 perf_ctx_lock(cpuctx, ctx);
3962 /*
3963 * We must check ctx->nr_events while holding ctx->lock, such
3964 * that we serialize against perf_install_in_context().
3965 */
3966 if (!ctx->nr_events)
3967 goto unlock;
3968
3969 perf_ctx_disable(ctx, false);
3970 /*
3971 * We want to keep the following priority order:
3972 * cpu pinned (that don't need to move), task pinned,
3973 * cpu flexible, task flexible.
3974 *
3975 * However, if task's ctx is not carrying any pinned
3976 * events, no need to flip the cpuctx's events around.
3977 */
3978 if (!RB_EMPTY_ROOT(&ctx->pinned_groups.tree)) {
3979 perf_ctx_disable(&cpuctx->ctx, false);
3980 ctx_sched_out(&cpuctx->ctx, EVENT_FLEXIBLE);
3981 }
3982
3983 perf_event_sched_in(cpuctx, ctx);
3984
3985 perf_ctx_sched_task_cb(cpuctx->task_ctx, true);
3986
3987 if (!RB_EMPTY_ROOT(&ctx->pinned_groups.tree))
3988 perf_ctx_enable(&cpuctx->ctx, false);
3989
3990 perf_ctx_enable(ctx, false);
3991
3992 unlock:
3993 perf_ctx_unlock(cpuctx, ctx);
3994 rcu_unlock:
3995 rcu_read_unlock();
3996 }
3997
3998 /*
3999 * Called from scheduler to add the events of the current task
4000 * with interrupts disabled.
4001 *
4002 * We restore the event value and then enable it.
4003 *
4004 * This does not protect us against NMI, but enable()
4005 * sets the enabled bit in the control field of event _before_
4006 * accessing the event control register. If a NMI hits, then it will
4007 * keep the event running.
4008 */
__perf_event_task_sched_in(struct task_struct * prev,struct task_struct * task)4009 void __perf_event_task_sched_in(struct task_struct *prev,
4010 struct task_struct *task)
4011 {
4012 perf_event_context_sched_in(task);
4013
4014 if (atomic_read(&nr_switch_events))
4015 perf_event_switch(task, prev, true);
4016
4017 if (__this_cpu_read(perf_sched_cb_usages))
4018 perf_pmu_sched_task(prev, task, true);
4019 }
4020
perf_calculate_period(struct perf_event * event,u64 nsec,u64 count)4021 static u64 perf_calculate_period(struct perf_event *event, u64 nsec, u64 count)
4022 {
4023 u64 frequency = event->attr.sample_freq;
4024 u64 sec = NSEC_PER_SEC;
4025 u64 divisor, dividend;
4026
4027 int count_fls, nsec_fls, frequency_fls, sec_fls;
4028
4029 count_fls = fls64(count);
4030 nsec_fls = fls64(nsec);
4031 frequency_fls = fls64(frequency);
4032 sec_fls = 30;
4033
4034 /*
4035 * We got @count in @nsec, with a target of sample_freq HZ
4036 * the target period becomes:
4037 *
4038 * @count * 10^9
4039 * period = -------------------
4040 * @nsec * sample_freq
4041 *
4042 */
4043
4044 /*
4045 * Reduce accuracy by one bit such that @a and @b converge
4046 * to a similar magnitude.
4047 */
4048 #define REDUCE_FLS(a, b) \
4049 do { \
4050 if (a##_fls > b##_fls) { \
4051 a >>= 1; \
4052 a##_fls--; \
4053 } else { \
4054 b >>= 1; \
4055 b##_fls--; \
4056 } \
4057 } while (0)
4058
4059 /*
4060 * Reduce accuracy until either term fits in a u64, then proceed with
4061 * the other, so that finally we can do a u64/u64 division.
4062 */
4063 while (count_fls + sec_fls > 64 && nsec_fls + frequency_fls > 64) {
4064 REDUCE_FLS(nsec, frequency);
4065 REDUCE_FLS(sec, count);
4066 }
4067
4068 if (count_fls + sec_fls > 64) {
4069 divisor = nsec * frequency;
4070
4071 while (count_fls + sec_fls > 64) {
4072 REDUCE_FLS(count, sec);
4073 divisor >>= 1;
4074 }
4075
4076 dividend = count * sec;
4077 } else {
4078 dividend = count * sec;
4079
4080 while (nsec_fls + frequency_fls > 64) {
4081 REDUCE_FLS(nsec, frequency);
4082 dividend >>= 1;
4083 }
4084
4085 divisor = nsec * frequency;
4086 }
4087
4088 if (!divisor)
4089 return dividend;
4090
4091 return div64_u64(dividend, divisor);
4092 }
4093
4094 static DEFINE_PER_CPU(int, perf_throttled_count);
4095 static DEFINE_PER_CPU(u64, perf_throttled_seq);
4096
perf_adjust_period(struct perf_event * event,u64 nsec,u64 count,bool disable)4097 static void perf_adjust_period(struct perf_event *event, u64 nsec, u64 count, bool disable)
4098 {
4099 struct hw_perf_event *hwc = &event->hw;
4100 s64 period, sample_period;
4101 s64 delta;
4102
4103 period = perf_calculate_period(event, nsec, count);
4104
4105 delta = (s64)(period - hwc->sample_period);
4106 delta = (delta + 7) / 8; /* low pass filter */
4107
4108 sample_period = hwc->sample_period + delta;
4109
4110 if (!sample_period)
4111 sample_period = 1;
4112
4113 hwc->sample_period = sample_period;
4114
4115 if (local64_read(&hwc->period_left) > 8*sample_period) {
4116 if (disable)
4117 event->pmu->stop(event, PERF_EF_UPDATE);
4118
4119 local64_set(&hwc->period_left, 0);
4120
4121 if (disable)
4122 event->pmu->start(event, PERF_EF_RELOAD);
4123 }
4124 }
4125
4126 /*
4127 * combine freq adjustment with unthrottling to avoid two passes over the
4128 * events. At the same time, make sure, having freq events does not change
4129 * the rate of unthrottling as that would introduce bias.
4130 */
4131 static void
perf_adjust_freq_unthr_context(struct perf_event_context * ctx,bool unthrottle)4132 perf_adjust_freq_unthr_context(struct perf_event_context *ctx, bool unthrottle)
4133 {
4134 struct perf_event *event;
4135 struct hw_perf_event *hwc;
4136 u64 now, period = TICK_NSEC;
4137 s64 delta;
4138
4139 /*
4140 * only need to iterate over all events iff:
4141 * - context have events in frequency mode (needs freq adjust)
4142 * - there are events to unthrottle on this cpu
4143 */
4144 if (!(ctx->nr_freq || unthrottle))
4145 return;
4146
4147 raw_spin_lock(&ctx->lock);
4148
4149 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
4150 if (event->state != PERF_EVENT_STATE_ACTIVE)
4151 continue;
4152
4153 // XXX use visit thingy to avoid the -1,cpu match
4154 if (!event_filter_match(event))
4155 continue;
4156
4157 perf_pmu_disable(event->pmu);
4158
4159 hwc = &event->hw;
4160
4161 if (hwc->interrupts == MAX_INTERRUPTS) {
4162 hwc->interrupts = 0;
4163 perf_log_throttle(event, 1);
4164 event->pmu->start(event, 0);
4165 }
4166
4167 if (!event->attr.freq || !event->attr.sample_freq)
4168 goto next;
4169
4170 /*
4171 * stop the event and update event->count
4172 */
4173 event->pmu->stop(event, PERF_EF_UPDATE);
4174
4175 now = local64_read(&event->count);
4176 delta = now - hwc->freq_count_stamp;
4177 hwc->freq_count_stamp = now;
4178
4179 /*
4180 * restart the event
4181 * reload only if value has changed
4182 * we have stopped the event so tell that
4183 * to perf_adjust_period() to avoid stopping it
4184 * twice.
4185 */
4186 if (delta > 0)
4187 perf_adjust_period(event, period, delta, false);
4188
4189 event->pmu->start(event, delta > 0 ? PERF_EF_RELOAD : 0);
4190 next:
4191 perf_pmu_enable(event->pmu);
4192 }
4193
4194 raw_spin_unlock(&ctx->lock);
4195 }
4196
4197 /*
4198 * Move @event to the tail of the @ctx's elegible events.
4199 */
rotate_ctx(struct perf_event_context * ctx,struct perf_event * event)4200 static void rotate_ctx(struct perf_event_context *ctx, struct perf_event *event)
4201 {
4202 /*
4203 * Rotate the first entry last of non-pinned groups. Rotation might be
4204 * disabled by the inheritance code.
4205 */
4206 if (ctx->rotate_disable)
4207 return;
4208
4209 perf_event_groups_delete(&ctx->flexible_groups, event);
4210 perf_event_groups_insert(&ctx->flexible_groups, event);
4211 }
4212
4213 /* pick an event from the flexible_groups to rotate */
4214 static inline struct perf_event *
ctx_event_to_rotate(struct perf_event_pmu_context * pmu_ctx)4215 ctx_event_to_rotate(struct perf_event_pmu_context *pmu_ctx)
4216 {
4217 struct perf_event *event;
4218 struct rb_node *node;
4219 struct rb_root *tree;
4220 struct __group_key key = {
4221 .pmu = pmu_ctx->pmu,
4222 };
4223
4224 /* pick the first active flexible event */
4225 event = list_first_entry_or_null(&pmu_ctx->flexible_active,
4226 struct perf_event, active_list);
4227 if (event)
4228 goto out;
4229
4230 /* if no active flexible event, pick the first event */
4231 tree = &pmu_ctx->ctx->flexible_groups.tree;
4232
4233 if (!pmu_ctx->ctx->task) {
4234 key.cpu = smp_processor_id();
4235
4236 node = rb_find_first(&key, tree, __group_cmp_ignore_cgroup);
4237 if (node)
4238 event = __node_2_pe(node);
4239 goto out;
4240 }
4241
4242 key.cpu = -1;
4243 node = rb_find_first(&key, tree, __group_cmp_ignore_cgroup);
4244 if (node) {
4245 event = __node_2_pe(node);
4246 goto out;
4247 }
4248
4249 key.cpu = smp_processor_id();
4250 node = rb_find_first(&key, tree, __group_cmp_ignore_cgroup);
4251 if (node)
4252 event = __node_2_pe(node);
4253
4254 out:
4255 /*
4256 * Unconditionally clear rotate_necessary; if ctx_flexible_sched_in()
4257 * finds there are unschedulable events, it will set it again.
4258 */
4259 pmu_ctx->rotate_necessary = 0;
4260
4261 return event;
4262 }
4263
perf_rotate_context(struct perf_cpu_pmu_context * cpc)4264 static bool perf_rotate_context(struct perf_cpu_pmu_context *cpc)
4265 {
4266 struct perf_cpu_context *cpuctx = this_cpu_ptr(&perf_cpu_context);
4267 struct perf_event_pmu_context *cpu_epc, *task_epc = NULL;
4268 struct perf_event *cpu_event = NULL, *task_event = NULL;
4269 int cpu_rotate, task_rotate;
4270 struct pmu *pmu;
4271
4272 /*
4273 * Since we run this from IRQ context, nobody can install new
4274 * events, thus the event count values are stable.
4275 */
4276
4277 cpu_epc = &cpc->epc;
4278 pmu = cpu_epc->pmu;
4279 task_epc = cpc->task_epc;
4280
4281 cpu_rotate = cpu_epc->rotate_necessary;
4282 task_rotate = task_epc ? task_epc->rotate_necessary : 0;
4283
4284 if (!(cpu_rotate || task_rotate))
4285 return false;
4286
4287 perf_ctx_lock(cpuctx, cpuctx->task_ctx);
4288 perf_pmu_disable(pmu);
4289
4290 if (task_rotate)
4291 task_event = ctx_event_to_rotate(task_epc);
4292 if (cpu_rotate)
4293 cpu_event = ctx_event_to_rotate(cpu_epc);
4294
4295 /*
4296 * As per the order given at ctx_resched() first 'pop' task flexible
4297 * and then, if needed CPU flexible.
4298 */
4299 if (task_event || (task_epc && cpu_event)) {
4300 update_context_time(task_epc->ctx);
4301 __pmu_ctx_sched_out(task_epc, EVENT_FLEXIBLE);
4302 }
4303
4304 if (cpu_event) {
4305 update_context_time(&cpuctx->ctx);
4306 __pmu_ctx_sched_out(cpu_epc, EVENT_FLEXIBLE);
4307 rotate_ctx(&cpuctx->ctx, cpu_event);
4308 __pmu_ctx_sched_in(&cpuctx->ctx, pmu);
4309 }
4310
4311 if (task_event)
4312 rotate_ctx(task_epc->ctx, task_event);
4313
4314 if (task_event || (task_epc && cpu_event))
4315 __pmu_ctx_sched_in(task_epc->ctx, pmu);
4316
4317 perf_pmu_enable(pmu);
4318 perf_ctx_unlock(cpuctx, cpuctx->task_ctx);
4319
4320 return true;
4321 }
4322
perf_event_task_tick(void)4323 void perf_event_task_tick(void)
4324 {
4325 struct perf_cpu_context *cpuctx = this_cpu_ptr(&perf_cpu_context);
4326 struct perf_event_context *ctx;
4327 int throttled;
4328
4329 lockdep_assert_irqs_disabled();
4330
4331 __this_cpu_inc(perf_throttled_seq);
4332 throttled = __this_cpu_xchg(perf_throttled_count, 0);
4333 tick_dep_clear_cpu(smp_processor_id(), TICK_DEP_BIT_PERF_EVENTS);
4334
4335 perf_adjust_freq_unthr_context(&cpuctx->ctx, !!throttled);
4336
4337 rcu_read_lock();
4338 ctx = rcu_dereference(current->perf_event_ctxp);
4339 if (ctx)
4340 perf_adjust_freq_unthr_context(ctx, !!throttled);
4341 rcu_read_unlock();
4342 }
4343
event_enable_on_exec(struct perf_event * event,struct perf_event_context * ctx)4344 static int event_enable_on_exec(struct perf_event *event,
4345 struct perf_event_context *ctx)
4346 {
4347 if (!event->attr.enable_on_exec)
4348 return 0;
4349
4350 event->attr.enable_on_exec = 0;
4351 if (event->state >= PERF_EVENT_STATE_INACTIVE)
4352 return 0;
4353
4354 perf_event_set_state(event, PERF_EVENT_STATE_INACTIVE);
4355
4356 return 1;
4357 }
4358
4359 /*
4360 * Enable all of a task's events that have been marked enable-on-exec.
4361 * This expects task == current.
4362 */
perf_event_enable_on_exec(struct perf_event_context * ctx)4363 static void perf_event_enable_on_exec(struct perf_event_context *ctx)
4364 {
4365 struct perf_event_context *clone_ctx = NULL;
4366 enum event_type_t event_type = 0;
4367 struct perf_cpu_context *cpuctx;
4368 struct perf_event *event;
4369 unsigned long flags;
4370 int enabled = 0;
4371
4372 local_irq_save(flags);
4373 if (WARN_ON_ONCE(current->perf_event_ctxp != ctx))
4374 goto out;
4375
4376 if (!ctx->nr_events)
4377 goto out;
4378
4379 cpuctx = this_cpu_ptr(&perf_cpu_context);
4380 perf_ctx_lock(cpuctx, ctx);
4381 ctx_sched_out(ctx, EVENT_TIME);
4382
4383 list_for_each_entry(event, &ctx->event_list, event_entry) {
4384 enabled |= event_enable_on_exec(event, ctx);
4385 event_type |= get_event_type(event);
4386 }
4387
4388 /*
4389 * Unclone and reschedule this context if we enabled any event.
4390 */
4391 if (enabled) {
4392 clone_ctx = unclone_ctx(ctx);
4393 ctx_resched(cpuctx, ctx, event_type);
4394 } else {
4395 ctx_sched_in(ctx, EVENT_TIME);
4396 }
4397 perf_ctx_unlock(cpuctx, ctx);
4398
4399 out:
4400 local_irq_restore(flags);
4401
4402 if (clone_ctx)
4403 put_ctx(clone_ctx);
4404 }
4405
4406 static void perf_remove_from_owner(struct perf_event *event);
4407 static void perf_event_exit_event(struct perf_event *event,
4408 struct perf_event_context *ctx);
4409
4410 /*
4411 * Removes all events from the current task that have been marked
4412 * remove-on-exec, and feeds their values back to parent events.
4413 */
perf_event_remove_on_exec(struct perf_event_context * ctx)4414 static void perf_event_remove_on_exec(struct perf_event_context *ctx)
4415 {
4416 struct perf_event_context *clone_ctx = NULL;
4417 struct perf_event *event, *next;
4418 unsigned long flags;
4419 bool modified = false;
4420
4421 mutex_lock(&ctx->mutex);
4422
4423 if (WARN_ON_ONCE(ctx->task != current))
4424 goto unlock;
4425
4426 list_for_each_entry_safe(event, next, &ctx->event_list, event_entry) {
4427 if (!event->attr.remove_on_exec)
4428 continue;
4429
4430 if (!is_kernel_event(event))
4431 perf_remove_from_owner(event);
4432
4433 modified = true;
4434
4435 perf_event_exit_event(event, ctx);
4436 }
4437
4438 raw_spin_lock_irqsave(&ctx->lock, flags);
4439 if (modified)
4440 clone_ctx = unclone_ctx(ctx);
4441 raw_spin_unlock_irqrestore(&ctx->lock, flags);
4442
4443 unlock:
4444 mutex_unlock(&ctx->mutex);
4445
4446 if (clone_ctx)
4447 put_ctx(clone_ctx);
4448 }
4449
4450 struct perf_read_data {
4451 struct perf_event *event;
4452 bool group;
4453 int ret;
4454 };
4455
__perf_event_read_cpu(struct perf_event * event,int event_cpu)4456 static int __perf_event_read_cpu(struct perf_event *event, int event_cpu)
4457 {
4458 u16 local_pkg, event_pkg;
4459
4460 if (event->group_caps & PERF_EV_CAP_READ_ACTIVE_PKG) {
4461 int local_cpu = smp_processor_id();
4462
4463 event_pkg = topology_physical_package_id(event_cpu);
4464 local_pkg = topology_physical_package_id(local_cpu);
4465
4466 if (event_pkg == local_pkg)
4467 return local_cpu;
4468 }
4469
4470 return event_cpu;
4471 }
4472
4473 /*
4474 * Cross CPU call to read the hardware event
4475 */
__perf_event_read(void * info)4476 static void __perf_event_read(void *info)
4477 {
4478 struct perf_read_data *data = info;
4479 struct perf_event *sub, *event = data->event;
4480 struct perf_event_context *ctx = event->ctx;
4481 struct perf_cpu_context *cpuctx = this_cpu_ptr(&perf_cpu_context);
4482 struct pmu *pmu = event->pmu;
4483
4484 /*
4485 * If this is a task context, we need to check whether it is
4486 * the current task context of this cpu. If not it has been
4487 * scheduled out before the smp call arrived. In that case
4488 * event->count would have been updated to a recent sample
4489 * when the event was scheduled out.
4490 */
4491 if (ctx->task && cpuctx->task_ctx != ctx)
4492 return;
4493
4494 raw_spin_lock(&ctx->lock);
4495 if (ctx->is_active & EVENT_TIME) {
4496 update_context_time(ctx);
4497 update_cgrp_time_from_event(event);
4498 }
4499
4500 perf_event_update_time(event);
4501 if (data->group)
4502 perf_event_update_sibling_time(event);
4503
4504 if (event->state != PERF_EVENT_STATE_ACTIVE)
4505 goto unlock;
4506
4507 if (!data->group) {
4508 pmu->read(event);
4509 data->ret = 0;
4510 goto unlock;
4511 }
4512
4513 pmu->start_txn(pmu, PERF_PMU_TXN_READ);
4514
4515 pmu->read(event);
4516
4517 for_each_sibling_event(sub, event) {
4518 if (sub->state == PERF_EVENT_STATE_ACTIVE) {
4519 /*
4520 * Use sibling's PMU rather than @event's since
4521 * sibling could be on different (eg: software) PMU.
4522 */
4523 sub->pmu->read(sub);
4524 }
4525 }
4526
4527 data->ret = pmu->commit_txn(pmu);
4528
4529 unlock:
4530 raw_spin_unlock(&ctx->lock);
4531 }
4532
perf_event_count(struct perf_event * event)4533 static inline u64 perf_event_count(struct perf_event *event)
4534 {
4535 return local64_read(&event->count) + atomic64_read(&event->child_count);
4536 }
4537
calc_timer_values(struct perf_event * event,u64 * now,u64 * enabled,u64 * running)4538 static void calc_timer_values(struct perf_event *event,
4539 u64 *now,
4540 u64 *enabled,
4541 u64 *running)
4542 {
4543 u64 ctx_time;
4544
4545 *now = perf_clock();
4546 ctx_time = perf_event_time_now(event, *now);
4547 __perf_update_times(event, ctx_time, enabled, running);
4548 }
4549
4550 /*
4551 * NMI-safe method to read a local event, that is an event that
4552 * is:
4553 * - either for the current task, or for this CPU
4554 * - does not have inherit set, for inherited task events
4555 * will not be local and we cannot read them atomically
4556 * - must not have a pmu::count method
4557 */
perf_event_read_local(struct perf_event * event,u64 * value,u64 * enabled,u64 * running)4558 int perf_event_read_local(struct perf_event *event, u64 *value,
4559 u64 *enabled, u64 *running)
4560 {
4561 unsigned long flags;
4562 int ret = 0;
4563
4564 /*
4565 * Disabling interrupts avoids all counter scheduling (context
4566 * switches, timer based rotation and IPIs).
4567 */
4568 local_irq_save(flags);
4569
4570 /*
4571 * It must not be an event with inherit set, we cannot read
4572 * all child counters from atomic context.
4573 */
4574 if (event->attr.inherit) {
4575 ret = -EOPNOTSUPP;
4576 goto out;
4577 }
4578
4579 /* If this is a per-task event, it must be for current */
4580 if ((event->attach_state & PERF_ATTACH_TASK) &&
4581 event->hw.target != current) {
4582 ret = -EINVAL;
4583 goto out;
4584 }
4585
4586 /* If this is a per-CPU event, it must be for this CPU */
4587 if (!(event->attach_state & PERF_ATTACH_TASK) &&
4588 event->cpu != smp_processor_id()) {
4589 ret = -EINVAL;
4590 goto out;
4591 }
4592
4593 /* If this is a pinned event it must be running on this CPU */
4594 if (event->attr.pinned && event->oncpu != smp_processor_id()) {
4595 ret = -EBUSY;
4596 goto out;
4597 }
4598
4599 /*
4600 * If the event is currently on this CPU, its either a per-task event,
4601 * or local to this CPU. Furthermore it means its ACTIVE (otherwise
4602 * oncpu == -1).
4603 */
4604 if (event->oncpu == smp_processor_id())
4605 event->pmu->read(event);
4606
4607 *value = local64_read(&event->count);
4608 if (enabled || running) {
4609 u64 __enabled, __running, __now;
4610
4611 calc_timer_values(event, &__now, &__enabled, &__running);
4612 if (enabled)
4613 *enabled = __enabled;
4614 if (running)
4615 *running = __running;
4616 }
4617 out:
4618 local_irq_restore(flags);
4619
4620 return ret;
4621 }
4622
perf_event_read(struct perf_event * event,bool group)4623 static int perf_event_read(struct perf_event *event, bool group)
4624 {
4625 enum perf_event_state state = READ_ONCE(event->state);
4626 int event_cpu, ret = 0;
4627
4628 /*
4629 * If event is enabled and currently active on a CPU, update the
4630 * value in the event structure:
4631 */
4632 again:
4633 if (state == PERF_EVENT_STATE_ACTIVE) {
4634 struct perf_read_data data;
4635
4636 /*
4637 * Orders the ->state and ->oncpu loads such that if we see
4638 * ACTIVE we must also see the right ->oncpu.
4639 *
4640 * Matches the smp_wmb() from event_sched_in().
4641 */
4642 smp_rmb();
4643
4644 event_cpu = READ_ONCE(event->oncpu);
4645 if ((unsigned)event_cpu >= nr_cpu_ids)
4646 return 0;
4647
4648 data = (struct perf_read_data){
4649 .event = event,
4650 .group = group,
4651 .ret = 0,
4652 };
4653
4654 preempt_disable();
4655 event_cpu = __perf_event_read_cpu(event, event_cpu);
4656
4657 /*
4658 * Purposely ignore the smp_call_function_single() return
4659 * value.
4660 *
4661 * If event_cpu isn't a valid CPU it means the event got
4662 * scheduled out and that will have updated the event count.
4663 *
4664 * Therefore, either way, we'll have an up-to-date event count
4665 * after this.
4666 */
4667 (void)smp_call_function_single(event_cpu, __perf_event_read, &data, 1);
4668 preempt_enable();
4669 ret = data.ret;
4670
4671 } else if (state == PERF_EVENT_STATE_INACTIVE) {
4672 struct perf_event_context *ctx = event->ctx;
4673 unsigned long flags;
4674
4675 raw_spin_lock_irqsave(&ctx->lock, flags);
4676 state = event->state;
4677 if (state != PERF_EVENT_STATE_INACTIVE) {
4678 raw_spin_unlock_irqrestore(&ctx->lock, flags);
4679 goto again;
4680 }
4681
4682 /*
4683 * May read while context is not active (e.g., thread is
4684 * blocked), in that case we cannot update context time
4685 */
4686 if (ctx->is_active & EVENT_TIME) {
4687 update_context_time(ctx);
4688 update_cgrp_time_from_event(event);
4689 }
4690
4691 perf_event_update_time(event);
4692 if (group)
4693 perf_event_update_sibling_time(event);
4694 raw_spin_unlock_irqrestore(&ctx->lock, flags);
4695 }
4696
4697 return ret;
4698 }
4699
4700 /*
4701 * Initialize the perf_event context in a task_struct:
4702 */
__perf_event_init_context(struct perf_event_context * ctx)4703 static void __perf_event_init_context(struct perf_event_context *ctx)
4704 {
4705 raw_spin_lock_init(&ctx->lock);
4706 mutex_init(&ctx->mutex);
4707 INIT_LIST_HEAD(&ctx->pmu_ctx_list);
4708 perf_event_groups_init(&ctx->pinned_groups);
4709 perf_event_groups_init(&ctx->flexible_groups);
4710 INIT_LIST_HEAD(&ctx->event_list);
4711 refcount_set(&ctx->refcount, 1);
4712 }
4713
4714 static void
__perf_init_event_pmu_context(struct perf_event_pmu_context * epc,struct pmu * pmu)4715 __perf_init_event_pmu_context(struct perf_event_pmu_context *epc, struct pmu *pmu)
4716 {
4717 epc->pmu = pmu;
4718 INIT_LIST_HEAD(&epc->pmu_ctx_entry);
4719 INIT_LIST_HEAD(&epc->pinned_active);
4720 INIT_LIST_HEAD(&epc->flexible_active);
4721 atomic_set(&epc->refcount, 1);
4722 }
4723
4724 static struct perf_event_context *
alloc_perf_context(struct task_struct * task)4725 alloc_perf_context(struct task_struct *task)
4726 {
4727 struct perf_event_context *ctx;
4728
4729 ctx = kzalloc(sizeof(struct perf_event_context), GFP_KERNEL);
4730 if (!ctx)
4731 return NULL;
4732
4733 __perf_event_init_context(ctx);
4734 if (task)
4735 ctx->task = get_task_struct(task);
4736
4737 return ctx;
4738 }
4739
4740 static struct task_struct *
find_lively_task_by_vpid(pid_t vpid)4741 find_lively_task_by_vpid(pid_t vpid)
4742 {
4743 struct task_struct *task;
4744
4745 rcu_read_lock();
4746 if (!vpid)
4747 task = current;
4748 else
4749 task = find_task_by_vpid(vpid);
4750 if (task)
4751 get_task_struct(task);
4752 rcu_read_unlock();
4753
4754 if (!task)
4755 return ERR_PTR(-ESRCH);
4756
4757 return task;
4758 }
4759
4760 /*
4761 * Returns a matching context with refcount and pincount.
4762 */
4763 static struct perf_event_context *
find_get_context(struct task_struct * task,struct perf_event * event)4764 find_get_context(struct task_struct *task, struct perf_event *event)
4765 {
4766 struct perf_event_context *ctx, *clone_ctx = NULL;
4767 struct perf_cpu_context *cpuctx;
4768 unsigned long flags;
4769 int err;
4770
4771 if (!task) {
4772 /* Must be root to operate on a CPU event: */
4773 err = perf_allow_cpu(&event->attr);
4774 if (err)
4775 return ERR_PTR(err);
4776
4777 cpuctx = per_cpu_ptr(&perf_cpu_context, event->cpu);
4778 ctx = &cpuctx->ctx;
4779 get_ctx(ctx);
4780 raw_spin_lock_irqsave(&ctx->lock, flags);
4781 ++ctx->pin_count;
4782 raw_spin_unlock_irqrestore(&ctx->lock, flags);
4783
4784 return ctx;
4785 }
4786
4787 err = -EINVAL;
4788 retry:
4789 ctx = perf_lock_task_context(task, &flags);
4790 if (ctx) {
4791 clone_ctx = unclone_ctx(ctx);
4792 ++ctx->pin_count;
4793
4794 raw_spin_unlock_irqrestore(&ctx->lock, flags);
4795
4796 if (clone_ctx)
4797 put_ctx(clone_ctx);
4798 } else {
4799 ctx = alloc_perf_context(task);
4800 err = -ENOMEM;
4801 if (!ctx)
4802 goto errout;
4803
4804 err = 0;
4805 mutex_lock(&task->perf_event_mutex);
4806 /*
4807 * If it has already passed perf_event_exit_task().
4808 * we must see PF_EXITING, it takes this mutex too.
4809 */
4810 if (task->flags & PF_EXITING)
4811 err = -ESRCH;
4812 else if (task->perf_event_ctxp)
4813 err = -EAGAIN;
4814 else {
4815 get_ctx(ctx);
4816 ++ctx->pin_count;
4817 rcu_assign_pointer(task->perf_event_ctxp, ctx);
4818 }
4819 mutex_unlock(&task->perf_event_mutex);
4820
4821 if (unlikely(err)) {
4822 put_ctx(ctx);
4823
4824 if (err == -EAGAIN)
4825 goto retry;
4826 goto errout;
4827 }
4828 }
4829
4830 return ctx;
4831
4832 errout:
4833 return ERR_PTR(err);
4834 }
4835
4836 static struct perf_event_pmu_context *
find_get_pmu_context(struct pmu * pmu,struct perf_event_context * ctx,struct perf_event * event)4837 find_get_pmu_context(struct pmu *pmu, struct perf_event_context *ctx,
4838 struct perf_event *event)
4839 {
4840 struct perf_event_pmu_context *new = NULL, *epc;
4841 void *task_ctx_data = NULL;
4842
4843 if (!ctx->task) {
4844 /*
4845 * perf_pmu_migrate_context() / __perf_pmu_install_event()
4846 * relies on the fact that find_get_pmu_context() cannot fail
4847 * for CPU contexts.
4848 */
4849 struct perf_cpu_pmu_context *cpc;
4850
4851 cpc = per_cpu_ptr(pmu->cpu_pmu_context, event->cpu);
4852 epc = &cpc->epc;
4853 raw_spin_lock_irq(&ctx->lock);
4854 if (!epc->ctx) {
4855 atomic_set(&epc->refcount, 1);
4856 epc->embedded = 1;
4857 list_add(&epc->pmu_ctx_entry, &ctx->pmu_ctx_list);
4858 epc->ctx = ctx;
4859 } else {
4860 WARN_ON_ONCE(epc->ctx != ctx);
4861 atomic_inc(&epc->refcount);
4862 }
4863 raw_spin_unlock_irq(&ctx->lock);
4864 return epc;
4865 }
4866
4867 new = kzalloc(sizeof(*epc), GFP_KERNEL);
4868 if (!new)
4869 return ERR_PTR(-ENOMEM);
4870
4871 if (event->attach_state & PERF_ATTACH_TASK_DATA) {
4872 task_ctx_data = alloc_task_ctx_data(pmu);
4873 if (!task_ctx_data) {
4874 kfree(new);
4875 return ERR_PTR(-ENOMEM);
4876 }
4877 }
4878
4879 __perf_init_event_pmu_context(new, pmu);
4880
4881 /*
4882 * XXX
4883 *
4884 * lockdep_assert_held(&ctx->mutex);
4885 *
4886 * can't because perf_event_init_task() doesn't actually hold the
4887 * child_ctx->mutex.
4888 */
4889
4890 raw_spin_lock_irq(&ctx->lock);
4891 list_for_each_entry(epc, &ctx->pmu_ctx_list, pmu_ctx_entry) {
4892 if (epc->pmu == pmu) {
4893 WARN_ON_ONCE(epc->ctx != ctx);
4894 atomic_inc(&epc->refcount);
4895 goto found_epc;
4896 }
4897 }
4898
4899 epc = new;
4900 new = NULL;
4901
4902 list_add(&epc->pmu_ctx_entry, &ctx->pmu_ctx_list);
4903 epc->ctx = ctx;
4904
4905 found_epc:
4906 if (task_ctx_data && !epc->task_ctx_data) {
4907 epc->task_ctx_data = task_ctx_data;
4908 task_ctx_data = NULL;
4909 ctx->nr_task_data++;
4910 }
4911 raw_spin_unlock_irq(&ctx->lock);
4912
4913 free_task_ctx_data(pmu, task_ctx_data);
4914 kfree(new);
4915
4916 return epc;
4917 }
4918
get_pmu_ctx(struct perf_event_pmu_context * epc)4919 static void get_pmu_ctx(struct perf_event_pmu_context *epc)
4920 {
4921 WARN_ON_ONCE(!atomic_inc_not_zero(&epc->refcount));
4922 }
4923
free_epc_rcu(struct rcu_head * head)4924 static void free_epc_rcu(struct rcu_head *head)
4925 {
4926 struct perf_event_pmu_context *epc = container_of(head, typeof(*epc), rcu_head);
4927
4928 kfree(epc->task_ctx_data);
4929 kfree(epc);
4930 }
4931
put_pmu_ctx(struct perf_event_pmu_context * epc)4932 static void put_pmu_ctx(struct perf_event_pmu_context *epc)
4933 {
4934 struct perf_event_context *ctx = epc->ctx;
4935 unsigned long flags;
4936
4937 /*
4938 * XXX
4939 *
4940 * lockdep_assert_held(&ctx->mutex);
4941 *
4942 * can't because of the call-site in _free_event()/put_event()
4943 * which isn't always called under ctx->mutex.
4944 */
4945 if (!atomic_dec_and_raw_lock_irqsave(&epc->refcount, &ctx->lock, flags))
4946 return;
4947
4948 WARN_ON_ONCE(list_empty(&epc->pmu_ctx_entry));
4949
4950 list_del_init(&epc->pmu_ctx_entry);
4951 epc->ctx = NULL;
4952
4953 WARN_ON_ONCE(!list_empty(&epc->pinned_active));
4954 WARN_ON_ONCE(!list_empty(&epc->flexible_active));
4955
4956 raw_spin_unlock_irqrestore(&ctx->lock, flags);
4957
4958 if (epc->embedded)
4959 return;
4960
4961 call_rcu(&epc->rcu_head, free_epc_rcu);
4962 }
4963
4964 static void perf_event_free_filter(struct perf_event *event);
4965
free_event_rcu(struct rcu_head * head)4966 static void free_event_rcu(struct rcu_head *head)
4967 {
4968 struct perf_event *event = container_of(head, typeof(*event), rcu_head);
4969
4970 if (event->ns)
4971 put_pid_ns(event->ns);
4972 perf_event_free_filter(event);
4973 kmem_cache_free(perf_event_cache, event);
4974 }
4975
4976 static void ring_buffer_attach(struct perf_event *event,
4977 struct perf_buffer *rb);
4978
detach_sb_event(struct perf_event * event)4979 static void detach_sb_event(struct perf_event *event)
4980 {
4981 struct pmu_event_list *pel = per_cpu_ptr(&pmu_sb_events, event->cpu);
4982
4983 raw_spin_lock(&pel->lock);
4984 list_del_rcu(&event->sb_list);
4985 raw_spin_unlock(&pel->lock);
4986 }
4987
is_sb_event(struct perf_event * event)4988 static bool is_sb_event(struct perf_event *event)
4989 {
4990 struct perf_event_attr *attr = &event->attr;
4991
4992 if (event->parent)
4993 return false;
4994
4995 if (event->attach_state & PERF_ATTACH_TASK)
4996 return false;
4997
4998 if (attr->mmap || attr->mmap_data || attr->mmap2 ||
4999 attr->comm || attr->comm_exec ||
5000 attr->task || attr->ksymbol ||
5001 attr->context_switch || attr->text_poke ||
5002 attr->bpf_event)
5003 return true;
5004 return false;
5005 }
5006
unaccount_pmu_sb_event(struct perf_event * event)5007 static void unaccount_pmu_sb_event(struct perf_event *event)
5008 {
5009 if (is_sb_event(event))
5010 detach_sb_event(event);
5011 }
5012
5013 #ifdef CONFIG_NO_HZ_FULL
5014 static DEFINE_SPINLOCK(nr_freq_lock);
5015 #endif
5016
unaccount_freq_event_nohz(void)5017 static void unaccount_freq_event_nohz(void)
5018 {
5019 #ifdef CONFIG_NO_HZ_FULL
5020 spin_lock(&nr_freq_lock);
5021 if (atomic_dec_and_test(&nr_freq_events))
5022 tick_nohz_dep_clear(TICK_DEP_BIT_PERF_EVENTS);
5023 spin_unlock(&nr_freq_lock);
5024 #endif
5025 }
5026
unaccount_freq_event(void)5027 static void unaccount_freq_event(void)
5028 {
5029 if (tick_nohz_full_enabled())
5030 unaccount_freq_event_nohz();
5031 else
5032 atomic_dec(&nr_freq_events);
5033 }
5034
unaccount_event(struct perf_event * event)5035 static void unaccount_event(struct perf_event *event)
5036 {
5037 bool dec = false;
5038
5039 if (event->parent)
5040 return;
5041
5042 if (event->attach_state & (PERF_ATTACH_TASK | PERF_ATTACH_SCHED_CB))
5043 dec = true;
5044 if (event->attr.mmap || event->attr.mmap_data)
5045 atomic_dec(&nr_mmap_events);
5046 if (event->attr.build_id)
5047 atomic_dec(&nr_build_id_events);
5048 if (event->attr.comm)
5049 atomic_dec(&nr_comm_events);
5050 if (event->attr.namespaces)
5051 atomic_dec(&nr_namespaces_events);
5052 if (event->attr.cgroup)
5053 atomic_dec(&nr_cgroup_events);
5054 if (event->attr.task)
5055 atomic_dec(&nr_task_events);
5056 if (event->attr.freq)
5057 unaccount_freq_event();
5058 if (event->attr.context_switch) {
5059 dec = true;
5060 atomic_dec(&nr_switch_events);
5061 }
5062 if (is_cgroup_event(event))
5063 dec = true;
5064 if (has_branch_stack(event))
5065 dec = true;
5066 if (event->attr.ksymbol)
5067 atomic_dec(&nr_ksymbol_events);
5068 if (event->attr.bpf_event)
5069 atomic_dec(&nr_bpf_events);
5070 if (event->attr.text_poke)
5071 atomic_dec(&nr_text_poke_events);
5072
5073 if (dec) {
5074 if (!atomic_add_unless(&perf_sched_count, -1, 1))
5075 schedule_delayed_work(&perf_sched_work, HZ);
5076 }
5077
5078 unaccount_pmu_sb_event(event);
5079 }
5080
perf_sched_delayed(struct work_struct * work)5081 static void perf_sched_delayed(struct work_struct *work)
5082 {
5083 mutex_lock(&perf_sched_mutex);
5084 if (atomic_dec_and_test(&perf_sched_count))
5085 static_branch_disable(&perf_sched_events);
5086 mutex_unlock(&perf_sched_mutex);
5087 }
5088
5089 /*
5090 * The following implement mutual exclusion of events on "exclusive" pmus
5091 * (PERF_PMU_CAP_EXCLUSIVE). Such pmus can only have one event scheduled
5092 * at a time, so we disallow creating events that might conflict, namely:
5093 *
5094 * 1) cpu-wide events in the presence of per-task events,
5095 * 2) per-task events in the presence of cpu-wide events,
5096 * 3) two matching events on the same perf_event_context.
5097 *
5098 * The former two cases are handled in the allocation path (perf_event_alloc(),
5099 * _free_event()), the latter -- before the first perf_install_in_context().
5100 */
exclusive_event_init(struct perf_event * event)5101 static int exclusive_event_init(struct perf_event *event)
5102 {
5103 struct pmu *pmu = event->pmu;
5104
5105 if (!is_exclusive_pmu(pmu))
5106 return 0;
5107
5108 /*
5109 * Prevent co-existence of per-task and cpu-wide events on the
5110 * same exclusive pmu.
5111 *
5112 * Negative pmu::exclusive_cnt means there are cpu-wide
5113 * events on this "exclusive" pmu, positive means there are
5114 * per-task events.
5115 *
5116 * Since this is called in perf_event_alloc() path, event::ctx
5117 * doesn't exist yet; it is, however, safe to use PERF_ATTACH_TASK
5118 * to mean "per-task event", because unlike other attach states it
5119 * never gets cleared.
5120 */
5121 if (event->attach_state & PERF_ATTACH_TASK) {
5122 if (!atomic_inc_unless_negative(&pmu->exclusive_cnt))
5123 return -EBUSY;
5124 } else {
5125 if (!atomic_dec_unless_positive(&pmu->exclusive_cnt))
5126 return -EBUSY;
5127 }
5128
5129 return 0;
5130 }
5131
exclusive_event_destroy(struct perf_event * event)5132 static void exclusive_event_destroy(struct perf_event *event)
5133 {
5134 struct pmu *pmu = event->pmu;
5135
5136 if (!is_exclusive_pmu(pmu))
5137 return;
5138
5139 /* see comment in exclusive_event_init() */
5140 if (event->attach_state & PERF_ATTACH_TASK)
5141 atomic_dec(&pmu->exclusive_cnt);
5142 else
5143 atomic_inc(&pmu->exclusive_cnt);
5144 }
5145
exclusive_event_match(struct perf_event * e1,struct perf_event * e2)5146 static bool exclusive_event_match(struct perf_event *e1, struct perf_event *e2)
5147 {
5148 if ((e1->pmu == e2->pmu) &&
5149 (e1->cpu == e2->cpu ||
5150 e1->cpu == -1 ||
5151 e2->cpu == -1))
5152 return true;
5153 return false;
5154 }
5155
exclusive_event_installable(struct perf_event * event,struct perf_event_context * ctx)5156 static bool exclusive_event_installable(struct perf_event *event,
5157 struct perf_event_context *ctx)
5158 {
5159 struct perf_event *iter_event;
5160 struct pmu *pmu = event->pmu;
5161
5162 lockdep_assert_held(&ctx->mutex);
5163
5164 if (!is_exclusive_pmu(pmu))
5165 return true;
5166
5167 list_for_each_entry(iter_event, &ctx->event_list, event_entry) {
5168 if (exclusive_event_match(iter_event, event))
5169 return false;
5170 }
5171
5172 return true;
5173 }
5174
5175 static void perf_addr_filters_splice(struct perf_event *event,
5176 struct list_head *head);
5177
_free_event(struct perf_event * event)5178 static void _free_event(struct perf_event *event)
5179 {
5180 irq_work_sync(&event->pending_irq);
5181
5182 unaccount_event(event);
5183
5184 security_perf_event_free(event);
5185
5186 if (event->rb) {
5187 /*
5188 * Can happen when we close an event with re-directed output.
5189 *
5190 * Since we have a 0 refcount, perf_mmap_close() will skip
5191 * over us; possibly making our ring_buffer_put() the last.
5192 */
5193 mutex_lock(&event->mmap_mutex);
5194 ring_buffer_attach(event, NULL);
5195 mutex_unlock(&event->mmap_mutex);
5196 }
5197
5198 if (is_cgroup_event(event))
5199 perf_detach_cgroup(event);
5200
5201 if (!event->parent) {
5202 if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN)
5203 put_callchain_buffers();
5204 }
5205
5206 perf_event_free_bpf_prog(event);
5207 perf_addr_filters_splice(event, NULL);
5208 kfree(event->addr_filter_ranges);
5209
5210 if (event->destroy)
5211 event->destroy(event);
5212
5213 /*
5214 * Must be after ->destroy(), due to uprobe_perf_close() using
5215 * hw.target.
5216 */
5217 if (event->hw.target)
5218 put_task_struct(event->hw.target);
5219
5220 if (event->pmu_ctx)
5221 put_pmu_ctx(event->pmu_ctx);
5222
5223 /*
5224 * perf_event_free_task() relies on put_ctx() being 'last', in particular
5225 * all task references must be cleaned up.
5226 */
5227 if (event->ctx)
5228 put_ctx(event->ctx);
5229
5230 exclusive_event_destroy(event);
5231 module_put(event->pmu->module);
5232
5233 call_rcu(&event->rcu_head, free_event_rcu);
5234 }
5235
5236 /*
5237 * Used to free events which have a known refcount of 1, such as in error paths
5238 * where the event isn't exposed yet and inherited events.
5239 */
free_event(struct perf_event * event)5240 static void free_event(struct perf_event *event)
5241 {
5242 if (WARN(atomic_long_cmpxchg(&event->refcount, 1, 0) != 1,
5243 "unexpected event refcount: %ld; ptr=%p\n",
5244 atomic_long_read(&event->refcount), event)) {
5245 /* leak to avoid use-after-free */
5246 return;
5247 }
5248
5249 _free_event(event);
5250 }
5251
5252 /*
5253 * Remove user event from the owner task.
5254 */
perf_remove_from_owner(struct perf_event * event)5255 static void perf_remove_from_owner(struct perf_event *event)
5256 {
5257 struct task_struct *owner;
5258
5259 rcu_read_lock();
5260 /*
5261 * Matches the smp_store_release() in perf_event_exit_task(). If we
5262 * observe !owner it means the list deletion is complete and we can
5263 * indeed free this event, otherwise we need to serialize on
5264 * owner->perf_event_mutex.
5265 */
5266 owner = READ_ONCE(event->owner);
5267 if (owner) {
5268 /*
5269 * Since delayed_put_task_struct() also drops the last
5270 * task reference we can safely take a new reference
5271 * while holding the rcu_read_lock().
5272 */
5273 get_task_struct(owner);
5274 }
5275 rcu_read_unlock();
5276
5277 if (owner) {
5278 /*
5279 * If we're here through perf_event_exit_task() we're already
5280 * holding ctx->mutex which would be an inversion wrt. the
5281 * normal lock order.
5282 *
5283 * However we can safely take this lock because its the child
5284 * ctx->mutex.
5285 */
5286 mutex_lock_nested(&owner->perf_event_mutex, SINGLE_DEPTH_NESTING);
5287
5288 /*
5289 * We have to re-check the event->owner field, if it is cleared
5290 * we raced with perf_event_exit_task(), acquiring the mutex
5291 * ensured they're done, and we can proceed with freeing the
5292 * event.
5293 */
5294 if (event->owner) {
5295 list_del_init(&event->owner_entry);
5296 smp_store_release(&event->owner, NULL);
5297 }
5298 mutex_unlock(&owner->perf_event_mutex);
5299 put_task_struct(owner);
5300 }
5301 }
5302
put_event(struct perf_event * event)5303 static void put_event(struct perf_event *event)
5304 {
5305 if (!atomic_long_dec_and_test(&event->refcount))
5306 return;
5307
5308 _free_event(event);
5309 }
5310
5311 /*
5312 * Kill an event dead; while event:refcount will preserve the event
5313 * object, it will not preserve its functionality. Once the last 'user'
5314 * gives up the object, we'll destroy the thing.
5315 */
perf_event_release_kernel(struct perf_event * event)5316 int perf_event_release_kernel(struct perf_event *event)
5317 {
5318 struct perf_event_context *ctx = event->ctx;
5319 struct perf_event *child, *tmp;
5320 LIST_HEAD(free_list);
5321
5322 /*
5323 * If we got here through err_alloc: free_event(event); we will not
5324 * have attached to a context yet.
5325 */
5326 if (!ctx) {
5327 WARN_ON_ONCE(event->attach_state &
5328 (PERF_ATTACH_CONTEXT|PERF_ATTACH_GROUP));
5329 goto no_ctx;
5330 }
5331
5332 if (!is_kernel_event(event))
5333 perf_remove_from_owner(event);
5334
5335 ctx = perf_event_ctx_lock(event);
5336 WARN_ON_ONCE(ctx->parent_ctx);
5337
5338 /*
5339 * Mark this event as STATE_DEAD, there is no external reference to it
5340 * anymore.
5341 *
5342 * Anybody acquiring event->child_mutex after the below loop _must_
5343 * also see this, most importantly inherit_event() which will avoid
5344 * placing more children on the list.
5345 *
5346 * Thus this guarantees that we will in fact observe and kill _ALL_
5347 * child events.
5348 */
5349 perf_remove_from_context(event, DETACH_GROUP|DETACH_DEAD);
5350
5351 perf_event_ctx_unlock(event, ctx);
5352
5353 again:
5354 mutex_lock(&event->child_mutex);
5355 list_for_each_entry(child, &event->child_list, child_list) {
5356 void *var = NULL;
5357
5358 /*
5359 * Cannot change, child events are not migrated, see the
5360 * comment with perf_event_ctx_lock_nested().
5361 */
5362 ctx = READ_ONCE(child->ctx);
5363 /*
5364 * Since child_mutex nests inside ctx::mutex, we must jump
5365 * through hoops. We start by grabbing a reference on the ctx.
5366 *
5367 * Since the event cannot get freed while we hold the
5368 * child_mutex, the context must also exist and have a !0
5369 * reference count.
5370 */
5371 get_ctx(ctx);
5372
5373 /*
5374 * Now that we have a ctx ref, we can drop child_mutex, and
5375 * acquire ctx::mutex without fear of it going away. Then we
5376 * can re-acquire child_mutex.
5377 */
5378 mutex_unlock(&event->child_mutex);
5379 mutex_lock(&ctx->mutex);
5380 mutex_lock(&event->child_mutex);
5381
5382 /*
5383 * Now that we hold ctx::mutex and child_mutex, revalidate our
5384 * state, if child is still the first entry, it didn't get freed
5385 * and we can continue doing so.
5386 */
5387 tmp = list_first_entry_or_null(&event->child_list,
5388 struct perf_event, child_list);
5389 if (tmp == child) {
5390 perf_remove_from_context(child, DETACH_GROUP);
5391 list_move(&child->child_list, &free_list);
5392 /*
5393 * This matches the refcount bump in inherit_event();
5394 * this can't be the last reference.
5395 */
5396 put_event(event);
5397 } else {
5398 var = &ctx->refcount;
5399 }
5400
5401 mutex_unlock(&event->child_mutex);
5402 mutex_unlock(&ctx->mutex);
5403 put_ctx(ctx);
5404
5405 if (var) {
5406 /*
5407 * If perf_event_free_task() has deleted all events from the
5408 * ctx while the child_mutex got released above, make sure to
5409 * notify about the preceding put_ctx().
5410 */
5411 smp_mb(); /* pairs with wait_var_event() */
5412 wake_up_var(var);
5413 }
5414 goto again;
5415 }
5416 mutex_unlock(&event->child_mutex);
5417
5418 list_for_each_entry_safe(child, tmp, &free_list, child_list) {
5419 void *var = &child->ctx->refcount;
5420
5421 list_del(&child->child_list);
5422 free_event(child);
5423
5424 /*
5425 * Wake any perf_event_free_task() waiting for this event to be
5426 * freed.
5427 */
5428 smp_mb(); /* pairs with wait_var_event() */
5429 wake_up_var(var);
5430 }
5431
5432 no_ctx:
5433 put_event(event); /* Must be the 'last' reference */
5434 return 0;
5435 }
5436 EXPORT_SYMBOL_GPL(perf_event_release_kernel);
5437
5438 /*
5439 * Called when the last reference to the file is gone.
5440 */
perf_release(struct inode * inode,struct file * file)5441 static int perf_release(struct inode *inode, struct file *file)
5442 {
5443 perf_event_release_kernel(file->private_data);
5444 return 0;
5445 }
5446
__perf_event_read_value(struct perf_event * event,u64 * enabled,u64 * running)5447 static u64 __perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running)
5448 {
5449 struct perf_event *child;
5450 u64 total = 0;
5451
5452 *enabled = 0;
5453 *running = 0;
5454
5455 mutex_lock(&event->child_mutex);
5456
5457 (void)perf_event_read(event, false);
5458 total += perf_event_count(event);
5459
5460 *enabled += event->total_time_enabled +
5461 atomic64_read(&event->child_total_time_enabled);
5462 *running += event->total_time_running +
5463 atomic64_read(&event->child_total_time_running);
5464
5465 list_for_each_entry(child, &event->child_list, child_list) {
5466 (void)perf_event_read(child, false);
5467 total += perf_event_count(child);
5468 *enabled += child->total_time_enabled;
5469 *running += child->total_time_running;
5470 }
5471 mutex_unlock(&event->child_mutex);
5472
5473 return total;
5474 }
5475
perf_event_read_value(struct perf_event * event,u64 * enabled,u64 * running)5476 u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running)
5477 {
5478 struct perf_event_context *ctx;
5479 u64 count;
5480
5481 ctx = perf_event_ctx_lock(event);
5482 count = __perf_event_read_value(event, enabled, running);
5483 perf_event_ctx_unlock(event, ctx);
5484
5485 return count;
5486 }
5487 EXPORT_SYMBOL_GPL(perf_event_read_value);
5488
__perf_read_group_add(struct perf_event * leader,u64 read_format,u64 * values)5489 static int __perf_read_group_add(struct perf_event *leader,
5490 u64 read_format, u64 *values)
5491 {
5492 struct perf_event_context *ctx = leader->ctx;
5493 struct perf_event *sub, *parent;
5494 unsigned long flags;
5495 int n = 1; /* skip @nr */
5496 int ret;
5497
5498 ret = perf_event_read(leader, true);
5499 if (ret)
5500 return ret;
5501
5502 raw_spin_lock_irqsave(&ctx->lock, flags);
5503 /*
5504 * Verify the grouping between the parent and child (inherited)
5505 * events is still in tact.
5506 *
5507 * Specifically:
5508 * - leader->ctx->lock pins leader->sibling_list
5509 * - parent->child_mutex pins parent->child_list
5510 * - parent->ctx->mutex pins parent->sibling_list
5511 *
5512 * Because parent->ctx != leader->ctx (and child_list nests inside
5513 * ctx->mutex), group destruction is not atomic between children, also
5514 * see perf_event_release_kernel(). Additionally, parent can grow the
5515 * group.
5516 *
5517 * Therefore it is possible to have parent and child groups in a
5518 * different configuration and summing over such a beast makes no sense
5519 * what so ever.
5520 *
5521 * Reject this.
5522 */
5523 parent = leader->parent;
5524 if (parent &&
5525 (parent->group_generation != leader->group_generation ||
5526 parent->nr_siblings != leader->nr_siblings)) {
5527 ret = -ECHILD;
5528 goto unlock;
5529 }
5530
5531 /*
5532 * Since we co-schedule groups, {enabled,running} times of siblings
5533 * will be identical to those of the leader, so we only publish one
5534 * set.
5535 */
5536 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
5537 values[n++] += leader->total_time_enabled +
5538 atomic64_read(&leader->child_total_time_enabled);
5539 }
5540
5541 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
5542 values[n++] += leader->total_time_running +
5543 atomic64_read(&leader->child_total_time_running);
5544 }
5545
5546 /*
5547 * Write {count,id} tuples for every sibling.
5548 */
5549 values[n++] += perf_event_count(leader);
5550 if (read_format & PERF_FORMAT_ID)
5551 values[n++] = primary_event_id(leader);
5552 if (read_format & PERF_FORMAT_LOST)
5553 values[n++] = atomic64_read(&leader->lost_samples);
5554
5555 for_each_sibling_event(sub, leader) {
5556 values[n++] += perf_event_count(sub);
5557 if (read_format & PERF_FORMAT_ID)
5558 values[n++] = primary_event_id(sub);
5559 if (read_format & PERF_FORMAT_LOST)
5560 values[n++] = atomic64_read(&sub->lost_samples);
5561 }
5562
5563 unlock:
5564 raw_spin_unlock_irqrestore(&ctx->lock, flags);
5565 return ret;
5566 }
5567
perf_read_group(struct perf_event * event,u64 read_format,char __user * buf)5568 static int perf_read_group(struct perf_event *event,
5569 u64 read_format, char __user *buf)
5570 {
5571 struct perf_event *leader = event->group_leader, *child;
5572 struct perf_event_context *ctx = leader->ctx;
5573 int ret;
5574 u64 *values;
5575
5576 lockdep_assert_held(&ctx->mutex);
5577
5578 values = kzalloc(event->read_size, GFP_KERNEL);
5579 if (!values)
5580 return -ENOMEM;
5581
5582 values[0] = 1 + leader->nr_siblings;
5583
5584 mutex_lock(&leader->child_mutex);
5585
5586 ret = __perf_read_group_add(leader, read_format, values);
5587 if (ret)
5588 goto unlock;
5589
5590 list_for_each_entry(child, &leader->child_list, child_list) {
5591 ret = __perf_read_group_add(child, read_format, values);
5592 if (ret)
5593 goto unlock;
5594 }
5595
5596 mutex_unlock(&leader->child_mutex);
5597
5598 ret = event->read_size;
5599 if (copy_to_user(buf, values, event->read_size))
5600 ret = -EFAULT;
5601 goto out;
5602
5603 unlock:
5604 mutex_unlock(&leader->child_mutex);
5605 out:
5606 kfree(values);
5607 return ret;
5608 }
5609
perf_read_one(struct perf_event * event,u64 read_format,char __user * buf)5610 static int perf_read_one(struct perf_event *event,
5611 u64 read_format, char __user *buf)
5612 {
5613 u64 enabled, running;
5614 u64 values[5];
5615 int n = 0;
5616
5617 values[n++] = __perf_event_read_value(event, &enabled, &running);
5618 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
5619 values[n++] = enabled;
5620 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
5621 values[n++] = running;
5622 if (read_format & PERF_FORMAT_ID)
5623 values[n++] = primary_event_id(event);
5624 if (read_format & PERF_FORMAT_LOST)
5625 values[n++] = atomic64_read(&event->lost_samples);
5626
5627 if (copy_to_user(buf, values, n * sizeof(u64)))
5628 return -EFAULT;
5629
5630 return n * sizeof(u64);
5631 }
5632
is_event_hup(struct perf_event * event)5633 static bool is_event_hup(struct perf_event *event)
5634 {
5635 bool no_children;
5636
5637 if (event->state > PERF_EVENT_STATE_EXIT)
5638 return false;
5639
5640 mutex_lock(&event->child_mutex);
5641 no_children = list_empty(&event->child_list);
5642 mutex_unlock(&event->child_mutex);
5643 return no_children;
5644 }
5645
5646 /*
5647 * Read the performance event - simple non blocking version for now
5648 */
5649 static ssize_t
__perf_read(struct perf_event * event,char __user * buf,size_t count)5650 __perf_read(struct perf_event *event, char __user *buf, size_t count)
5651 {
5652 u64 read_format = event->attr.read_format;
5653 int ret;
5654
5655 /*
5656 * Return end-of-file for a read on an event that is in
5657 * error state (i.e. because it was pinned but it couldn't be
5658 * scheduled on to the CPU at some point).
5659 */
5660 if (event->state == PERF_EVENT_STATE_ERROR)
5661 return 0;
5662
5663 if (count < event->read_size)
5664 return -ENOSPC;
5665
5666 WARN_ON_ONCE(event->ctx->parent_ctx);
5667 if (read_format & PERF_FORMAT_GROUP)
5668 ret = perf_read_group(event, read_format, buf);
5669 else
5670 ret = perf_read_one(event, read_format, buf);
5671
5672 return ret;
5673 }
5674
5675 static ssize_t
perf_read(struct file * file,char __user * buf,size_t count,loff_t * ppos)5676 perf_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
5677 {
5678 struct perf_event *event = file->private_data;
5679 struct perf_event_context *ctx;
5680 int ret;
5681
5682 ret = security_perf_event_read(event);
5683 if (ret)
5684 return ret;
5685
5686 ctx = perf_event_ctx_lock(event);
5687 ret = __perf_read(event, buf, count);
5688 perf_event_ctx_unlock(event, ctx);
5689
5690 return ret;
5691 }
5692
perf_poll(struct file * file,poll_table * wait)5693 static __poll_t perf_poll(struct file *file, poll_table *wait)
5694 {
5695 struct perf_event *event = file->private_data;
5696 struct perf_buffer *rb;
5697 __poll_t events = EPOLLHUP;
5698
5699 poll_wait(file, &event->waitq, wait);
5700
5701 if (is_event_hup(event))
5702 return events;
5703
5704 /*
5705 * Pin the event->rb by taking event->mmap_mutex; otherwise
5706 * perf_event_set_output() can swizzle our rb and make us miss wakeups.
5707 */
5708 mutex_lock(&event->mmap_mutex);
5709 rb = event->rb;
5710 if (rb)
5711 events = atomic_xchg(&rb->poll, 0);
5712 mutex_unlock(&event->mmap_mutex);
5713 return events;
5714 }
5715
_perf_event_reset(struct perf_event * event)5716 static void _perf_event_reset(struct perf_event *event)
5717 {
5718 (void)perf_event_read(event, false);
5719 local64_set(&event->count, 0);
5720 perf_event_update_userpage(event);
5721 }
5722
5723 /* Assume it's not an event with inherit set. */
perf_event_pause(struct perf_event * event,bool reset)5724 u64 perf_event_pause(struct perf_event *event, bool reset)
5725 {
5726 struct perf_event_context *ctx;
5727 u64 count;
5728
5729 ctx = perf_event_ctx_lock(event);
5730 WARN_ON_ONCE(event->attr.inherit);
5731 _perf_event_disable(event);
5732 count = local64_read(&event->count);
5733 if (reset)
5734 local64_set(&event->count, 0);
5735 perf_event_ctx_unlock(event, ctx);
5736
5737 return count;
5738 }
5739 EXPORT_SYMBOL_GPL(perf_event_pause);
5740
5741 /*
5742 * Holding the top-level event's child_mutex means that any
5743 * descendant process that has inherited this event will block
5744 * in perf_event_exit_event() if it goes to exit, thus satisfying the
5745 * task existence requirements of perf_event_enable/disable.
5746 */
perf_event_for_each_child(struct perf_event * event,void (* func)(struct perf_event *))5747 static void perf_event_for_each_child(struct perf_event *event,
5748 void (*func)(struct perf_event *))
5749 {
5750 struct perf_event *child;
5751
5752 WARN_ON_ONCE(event->ctx->parent_ctx);
5753
5754 mutex_lock(&event->child_mutex);
5755 func(event);
5756 list_for_each_entry(child, &event->child_list, child_list)
5757 func(child);
5758 mutex_unlock(&event->child_mutex);
5759 }
5760
perf_event_for_each(struct perf_event * event,void (* func)(struct perf_event *))5761 static void perf_event_for_each(struct perf_event *event,
5762 void (*func)(struct perf_event *))
5763 {
5764 struct perf_event_context *ctx = event->ctx;
5765 struct perf_event *sibling;
5766
5767 lockdep_assert_held(&ctx->mutex);
5768
5769 event = event->group_leader;
5770
5771 perf_event_for_each_child(event, func);
5772 for_each_sibling_event(sibling, event)
5773 perf_event_for_each_child(sibling, func);
5774 }
5775
__perf_event_period(struct perf_event * event,struct perf_cpu_context * cpuctx,struct perf_event_context * ctx,void * info)5776 static void __perf_event_period(struct perf_event *event,
5777 struct perf_cpu_context *cpuctx,
5778 struct perf_event_context *ctx,
5779 void *info)
5780 {
5781 u64 value = *((u64 *)info);
5782 bool active;
5783
5784 if (event->attr.freq) {
5785 event->attr.sample_freq = value;
5786 } else {
5787 event->attr.sample_period = value;
5788 event->hw.sample_period = value;
5789 }
5790
5791 active = (event->state == PERF_EVENT_STATE_ACTIVE);
5792 if (active) {
5793 perf_pmu_disable(event->pmu);
5794 /*
5795 * We could be throttled; unthrottle now to avoid the tick
5796 * trying to unthrottle while we already re-started the event.
5797 */
5798 if (event->hw.interrupts == MAX_INTERRUPTS) {
5799 event->hw.interrupts = 0;
5800 perf_log_throttle(event, 1);
5801 }
5802 event->pmu->stop(event, PERF_EF_UPDATE);
5803 }
5804
5805 local64_set(&event->hw.period_left, 0);
5806
5807 if (active) {
5808 event->pmu->start(event, PERF_EF_RELOAD);
5809 perf_pmu_enable(event->pmu);
5810 }
5811 }
5812
perf_event_check_period(struct perf_event * event,u64 value)5813 static int perf_event_check_period(struct perf_event *event, u64 value)
5814 {
5815 return event->pmu->check_period(event, value);
5816 }
5817
_perf_event_period(struct perf_event * event,u64 value)5818 static int _perf_event_period(struct perf_event *event, u64 value)
5819 {
5820 if (!is_sampling_event(event))
5821 return -EINVAL;
5822
5823 if (!value)
5824 return -EINVAL;
5825
5826 if (event->attr.freq && value > sysctl_perf_event_sample_rate)
5827 return -EINVAL;
5828
5829 if (perf_event_check_period(event, value))
5830 return -EINVAL;
5831
5832 if (!event->attr.freq && (value & (1ULL << 63)))
5833 return -EINVAL;
5834
5835 event_function_call(event, __perf_event_period, &value);
5836
5837 return 0;
5838 }
5839
perf_event_period(struct perf_event * event,u64 value)5840 int perf_event_period(struct perf_event *event, u64 value)
5841 {
5842 struct perf_event_context *ctx;
5843 int ret;
5844
5845 ctx = perf_event_ctx_lock(event);
5846 ret = _perf_event_period(event, value);
5847 perf_event_ctx_unlock(event, ctx);
5848
5849 return ret;
5850 }
5851 EXPORT_SYMBOL_GPL(perf_event_period);
5852
5853 static const struct file_operations perf_fops;
5854
perf_fget_light(int fd,struct fd * p)5855 static inline int perf_fget_light(int fd, struct fd *p)
5856 {
5857 struct fd f = fdget(fd);
5858 if (!f.file)
5859 return -EBADF;
5860
5861 if (f.file->f_op != &perf_fops) {
5862 fdput(f);
5863 return -EBADF;
5864 }
5865 *p = f;
5866 return 0;
5867 }
5868
5869 static int perf_event_set_output(struct perf_event *event,
5870 struct perf_event *output_event);
5871 static int perf_event_set_filter(struct perf_event *event, void __user *arg);
5872 static int perf_copy_attr(struct perf_event_attr __user *uattr,
5873 struct perf_event_attr *attr);
5874
_perf_ioctl(struct perf_event * event,unsigned int cmd,unsigned long arg)5875 static long _perf_ioctl(struct perf_event *event, unsigned int cmd, unsigned long arg)
5876 {
5877 void (*func)(struct perf_event *);
5878 u32 flags = arg;
5879
5880 switch (cmd) {
5881 case PERF_EVENT_IOC_ENABLE:
5882 func = _perf_event_enable;
5883 break;
5884 case PERF_EVENT_IOC_DISABLE:
5885 func = _perf_event_disable;
5886 break;
5887 case PERF_EVENT_IOC_RESET:
5888 func = _perf_event_reset;
5889 break;
5890
5891 case PERF_EVENT_IOC_REFRESH:
5892 return _perf_event_refresh(event, arg);
5893
5894 case PERF_EVENT_IOC_PERIOD:
5895 {
5896 u64 value;
5897
5898 if (copy_from_user(&value, (u64 __user *)arg, sizeof(value)))
5899 return -EFAULT;
5900
5901 return _perf_event_period(event, value);
5902 }
5903 case PERF_EVENT_IOC_ID:
5904 {
5905 u64 id = primary_event_id(event);
5906
5907 if (copy_to_user((void __user *)arg, &id, sizeof(id)))
5908 return -EFAULT;
5909 return 0;
5910 }
5911
5912 case PERF_EVENT_IOC_SET_OUTPUT:
5913 {
5914 int ret;
5915 if (arg != -1) {
5916 struct perf_event *output_event;
5917 struct fd output;
5918 ret = perf_fget_light(arg, &output);
5919 if (ret)
5920 return ret;
5921 output_event = output.file->private_data;
5922 ret = perf_event_set_output(event, output_event);
5923 fdput(output);
5924 } else {
5925 ret = perf_event_set_output(event, NULL);
5926 }
5927 return ret;
5928 }
5929
5930 case PERF_EVENT_IOC_SET_FILTER:
5931 return perf_event_set_filter(event, (void __user *)arg);
5932
5933 case PERF_EVENT_IOC_SET_BPF:
5934 {
5935 struct bpf_prog *prog;
5936 int err;
5937
5938 prog = bpf_prog_get(arg);
5939 if (IS_ERR(prog))
5940 return PTR_ERR(prog);
5941
5942 err = perf_event_set_bpf_prog(event, prog, 0);
5943 if (err) {
5944 bpf_prog_put(prog);
5945 return err;
5946 }
5947
5948 return 0;
5949 }
5950
5951 case PERF_EVENT_IOC_PAUSE_OUTPUT: {
5952 struct perf_buffer *rb;
5953
5954 rcu_read_lock();
5955 rb = rcu_dereference(event->rb);
5956 if (!rb || !rb->nr_pages) {
5957 rcu_read_unlock();
5958 return -EINVAL;
5959 }
5960 rb_toggle_paused(rb, !!arg);
5961 rcu_read_unlock();
5962 return 0;
5963 }
5964
5965 case PERF_EVENT_IOC_QUERY_BPF:
5966 return perf_event_query_prog_array(event, (void __user *)arg);
5967
5968 case PERF_EVENT_IOC_MODIFY_ATTRIBUTES: {
5969 struct perf_event_attr new_attr;
5970 int err = perf_copy_attr((struct perf_event_attr __user *)arg,
5971 &new_attr);
5972
5973 if (err)
5974 return err;
5975
5976 return perf_event_modify_attr(event, &new_attr);
5977 }
5978 default:
5979 return -ENOTTY;
5980 }
5981
5982 if (flags & PERF_IOC_FLAG_GROUP)
5983 perf_event_for_each(event, func);
5984 else
5985 perf_event_for_each_child(event, func);
5986
5987 return 0;
5988 }
5989
perf_ioctl(struct file * file,unsigned int cmd,unsigned long arg)5990 static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
5991 {
5992 struct perf_event *event = file->private_data;
5993 struct perf_event_context *ctx;
5994 long ret;
5995
5996 /* Treat ioctl like writes as it is likely a mutating operation. */
5997 ret = security_perf_event_write(event);
5998 if (ret)
5999 return ret;
6000
6001 ctx = perf_event_ctx_lock(event);
6002 ret = _perf_ioctl(event, cmd, arg);
6003 perf_event_ctx_unlock(event, ctx);
6004
6005 return ret;
6006 }
6007
6008 #ifdef CONFIG_COMPAT
perf_compat_ioctl(struct file * file,unsigned int cmd,unsigned long arg)6009 static long perf_compat_ioctl(struct file *file, unsigned int cmd,
6010 unsigned long arg)
6011 {
6012 switch (_IOC_NR(cmd)) {
6013 case _IOC_NR(PERF_EVENT_IOC_SET_FILTER):
6014 case _IOC_NR(PERF_EVENT_IOC_ID):
6015 case _IOC_NR(PERF_EVENT_IOC_QUERY_BPF):
6016 case _IOC_NR(PERF_EVENT_IOC_MODIFY_ATTRIBUTES):
6017 /* Fix up pointer size (usually 4 -> 8 in 32-on-64-bit case */
6018 if (_IOC_SIZE(cmd) == sizeof(compat_uptr_t)) {
6019 cmd &= ~IOCSIZE_MASK;
6020 cmd |= sizeof(void *) << IOCSIZE_SHIFT;
6021 }
6022 break;
6023 }
6024 return perf_ioctl(file, cmd, arg);
6025 }
6026 #else
6027 # define perf_compat_ioctl NULL
6028 #endif
6029
perf_event_task_enable(void)6030 int perf_event_task_enable(void)
6031 {
6032 struct perf_event_context *ctx;
6033 struct perf_event *event;
6034
6035 mutex_lock(¤t->perf_event_mutex);
6036 list_for_each_entry(event, ¤t->perf_event_list, owner_entry) {
6037 ctx = perf_event_ctx_lock(event);
6038 perf_event_for_each_child(event, _perf_event_enable);
6039 perf_event_ctx_unlock(event, ctx);
6040 }
6041 mutex_unlock(¤t->perf_event_mutex);
6042
6043 return 0;
6044 }
6045
perf_event_task_disable(void)6046 int perf_event_task_disable(void)
6047 {
6048 struct perf_event_context *ctx;
6049 struct perf_event *event;
6050
6051 mutex_lock(¤t->perf_event_mutex);
6052 list_for_each_entry(event, ¤t->perf_event_list, owner_entry) {
6053 ctx = perf_event_ctx_lock(event);
6054 perf_event_for_each_child(event, _perf_event_disable);
6055 perf_event_ctx_unlock(event, ctx);
6056 }
6057 mutex_unlock(¤t->perf_event_mutex);
6058
6059 return 0;
6060 }
6061
perf_event_index(struct perf_event * event)6062 static int perf_event_index(struct perf_event *event)
6063 {
6064 if (event->hw.state & PERF_HES_STOPPED)
6065 return 0;
6066
6067 if (event->state != PERF_EVENT_STATE_ACTIVE)
6068 return 0;
6069
6070 return event->pmu->event_idx(event);
6071 }
6072
perf_event_init_userpage(struct perf_event * event)6073 static void perf_event_init_userpage(struct perf_event *event)
6074 {
6075 struct perf_event_mmap_page *userpg;
6076 struct perf_buffer *rb;
6077
6078 rcu_read_lock();
6079 rb = rcu_dereference(event->rb);
6080 if (!rb)
6081 goto unlock;
6082
6083 userpg = rb->user_page;
6084
6085 /* Allow new userspace to detect that bit 0 is deprecated */
6086 userpg->cap_bit0_is_deprecated = 1;
6087 userpg->size = offsetof(struct perf_event_mmap_page, __reserved);
6088 userpg->data_offset = PAGE_SIZE;
6089 userpg->data_size = perf_data_size(rb);
6090
6091 unlock:
6092 rcu_read_unlock();
6093 }
6094
arch_perf_update_userpage(struct perf_event * event,struct perf_event_mmap_page * userpg,u64 now)6095 void __weak arch_perf_update_userpage(
6096 struct perf_event *event, struct perf_event_mmap_page *userpg, u64 now)
6097 {
6098 }
6099
6100 /*
6101 * Callers need to ensure there can be no nesting of this function, otherwise
6102 * the seqlock logic goes bad. We can not serialize this because the arch
6103 * code calls this from NMI context.
6104 */
perf_event_update_userpage(struct perf_event * event)6105 void perf_event_update_userpage(struct perf_event *event)
6106 {
6107 struct perf_event_mmap_page *userpg;
6108 struct perf_buffer *rb;
6109 u64 enabled, running, now;
6110
6111 rcu_read_lock();
6112 rb = rcu_dereference(event->rb);
6113 if (!rb)
6114 goto unlock;
6115
6116 /*
6117 * compute total_time_enabled, total_time_running
6118 * based on snapshot values taken when the event
6119 * was last scheduled in.
6120 *
6121 * we cannot simply called update_context_time()
6122 * because of locking issue as we can be called in
6123 * NMI context
6124 */
6125 calc_timer_values(event, &now, &enabled, &running);
6126
6127 userpg = rb->user_page;
6128 /*
6129 * Disable preemption to guarantee consistent time stamps are stored to
6130 * the user page.
6131 */
6132 preempt_disable();
6133 ++userpg->lock;
6134 barrier();
6135 userpg->index = perf_event_index(event);
6136 userpg->offset = perf_event_count(event);
6137 if (userpg->index)
6138 userpg->offset -= local64_read(&event->hw.prev_count);
6139
6140 userpg->time_enabled = enabled +
6141 atomic64_read(&event->child_total_time_enabled);
6142
6143 userpg->time_running = running +
6144 atomic64_read(&event->child_total_time_running);
6145
6146 arch_perf_update_userpage(event, userpg, now);
6147
6148 barrier();
6149 ++userpg->lock;
6150 preempt_enable();
6151 unlock:
6152 rcu_read_unlock();
6153 }
6154 EXPORT_SYMBOL_GPL(perf_event_update_userpage);
6155
perf_mmap_fault(struct vm_fault * vmf)6156 static vm_fault_t perf_mmap_fault(struct vm_fault *vmf)
6157 {
6158 struct perf_event *event = vmf->vma->vm_file->private_data;
6159 struct perf_buffer *rb;
6160 vm_fault_t ret = VM_FAULT_SIGBUS;
6161
6162 if (vmf->flags & FAULT_FLAG_MKWRITE) {
6163 if (vmf->pgoff == 0)
6164 ret = 0;
6165 return ret;
6166 }
6167
6168 rcu_read_lock();
6169 rb = rcu_dereference(event->rb);
6170 if (!rb)
6171 goto unlock;
6172
6173 if (vmf->pgoff && (vmf->flags & FAULT_FLAG_WRITE))
6174 goto unlock;
6175
6176 vmf->page = perf_mmap_to_page(rb, vmf->pgoff);
6177 if (!vmf->page)
6178 goto unlock;
6179
6180 get_page(vmf->page);
6181 vmf->page->mapping = vmf->vma->vm_file->f_mapping;
6182 vmf->page->index = vmf->pgoff;
6183
6184 ret = 0;
6185 unlock:
6186 rcu_read_unlock();
6187
6188 return ret;
6189 }
6190
ring_buffer_attach(struct perf_event * event,struct perf_buffer * rb)6191 static void ring_buffer_attach(struct perf_event *event,
6192 struct perf_buffer *rb)
6193 {
6194 struct perf_buffer *old_rb = NULL;
6195 unsigned long flags;
6196
6197 WARN_ON_ONCE(event->parent);
6198
6199 if (event->rb) {
6200 /*
6201 * Should be impossible, we set this when removing
6202 * event->rb_entry and wait/clear when adding event->rb_entry.
6203 */
6204 WARN_ON_ONCE(event->rcu_pending);
6205
6206 old_rb = event->rb;
6207 spin_lock_irqsave(&old_rb->event_lock, flags);
6208 list_del_rcu(&event->rb_entry);
6209 spin_unlock_irqrestore(&old_rb->event_lock, flags);
6210
6211 event->rcu_batches = get_state_synchronize_rcu();
6212 event->rcu_pending = 1;
6213 }
6214
6215 if (rb) {
6216 if (event->rcu_pending) {
6217 cond_synchronize_rcu(event->rcu_batches);
6218 event->rcu_pending = 0;
6219 }
6220
6221 spin_lock_irqsave(&rb->event_lock, flags);
6222 list_add_rcu(&event->rb_entry, &rb->event_list);
6223 spin_unlock_irqrestore(&rb->event_lock, flags);
6224 }
6225
6226 /*
6227 * Avoid racing with perf_mmap_close(AUX): stop the event
6228 * before swizzling the event::rb pointer; if it's getting
6229 * unmapped, its aux_mmap_count will be 0 and it won't
6230 * restart. See the comment in __perf_pmu_output_stop().
6231 *
6232 * Data will inevitably be lost when set_output is done in
6233 * mid-air, but then again, whoever does it like this is
6234 * not in for the data anyway.
6235 */
6236 if (has_aux(event))
6237 perf_event_stop(event, 0);
6238
6239 rcu_assign_pointer(event->rb, rb);
6240
6241 if (old_rb) {
6242 ring_buffer_put(old_rb);
6243 /*
6244 * Since we detached before setting the new rb, so that we
6245 * could attach the new rb, we could have missed a wakeup.
6246 * Provide it now.
6247 */
6248 wake_up_all(&event->waitq);
6249 }
6250 }
6251
ring_buffer_wakeup(struct perf_event * event)6252 static void ring_buffer_wakeup(struct perf_event *event)
6253 {
6254 struct perf_buffer *rb;
6255
6256 if (event->parent)
6257 event = event->parent;
6258
6259 rcu_read_lock();
6260 rb = rcu_dereference(event->rb);
6261 if (rb) {
6262 list_for_each_entry_rcu(event, &rb->event_list, rb_entry)
6263 wake_up_all(&event->waitq);
6264 }
6265 rcu_read_unlock();
6266 }
6267
ring_buffer_get(struct perf_event * event)6268 struct perf_buffer *ring_buffer_get(struct perf_event *event)
6269 {
6270 struct perf_buffer *rb;
6271
6272 if (event->parent)
6273 event = event->parent;
6274
6275 rcu_read_lock();
6276 rb = rcu_dereference(event->rb);
6277 if (rb) {
6278 if (!refcount_inc_not_zero(&rb->refcount))
6279 rb = NULL;
6280 }
6281 rcu_read_unlock();
6282
6283 return rb;
6284 }
6285
ring_buffer_put(struct perf_buffer * rb)6286 void ring_buffer_put(struct perf_buffer *rb)
6287 {
6288 if (!refcount_dec_and_test(&rb->refcount))
6289 return;
6290
6291 WARN_ON_ONCE(!list_empty(&rb->event_list));
6292
6293 call_rcu(&rb->rcu_head, rb_free_rcu);
6294 }
6295
perf_mmap_open(struct vm_area_struct * vma)6296 static void perf_mmap_open(struct vm_area_struct *vma)
6297 {
6298 struct perf_event *event = vma->vm_file->private_data;
6299
6300 atomic_inc(&event->mmap_count);
6301 atomic_inc(&event->rb->mmap_count);
6302
6303 if (vma->vm_pgoff)
6304 atomic_inc(&event->rb->aux_mmap_count);
6305
6306 if (event->pmu->event_mapped)
6307 event->pmu->event_mapped(event, vma->vm_mm);
6308 }
6309
6310 static void perf_pmu_output_stop(struct perf_event *event);
6311
6312 /*
6313 * A buffer can be mmap()ed multiple times; either directly through the same
6314 * event, or through other events by use of perf_event_set_output().
6315 *
6316 * In order to undo the VM accounting done by perf_mmap() we need to destroy
6317 * the buffer here, where we still have a VM context. This means we need
6318 * to detach all events redirecting to us.
6319 */
perf_mmap_close(struct vm_area_struct * vma)6320 static void perf_mmap_close(struct vm_area_struct *vma)
6321 {
6322 struct perf_event *event = vma->vm_file->private_data;
6323 struct perf_buffer *rb = ring_buffer_get(event);
6324 struct user_struct *mmap_user = rb->mmap_user;
6325 int mmap_locked = rb->mmap_locked;
6326 unsigned long size = perf_data_size(rb);
6327 bool detach_rest = false;
6328
6329 if (event->pmu->event_unmapped)
6330 event->pmu->event_unmapped(event, vma->vm_mm);
6331
6332 /*
6333 * rb->aux_mmap_count will always drop before rb->mmap_count and
6334 * event->mmap_count, so it is ok to use event->mmap_mutex to
6335 * serialize with perf_mmap here.
6336 */
6337 if (rb_has_aux(rb) && vma->vm_pgoff == rb->aux_pgoff &&
6338 atomic_dec_and_mutex_lock(&rb->aux_mmap_count, &event->mmap_mutex)) {
6339 /*
6340 * Stop all AUX events that are writing to this buffer,
6341 * so that we can free its AUX pages and corresponding PMU
6342 * data. Note that after rb::aux_mmap_count dropped to zero,
6343 * they won't start any more (see perf_aux_output_begin()).
6344 */
6345 perf_pmu_output_stop(event);
6346
6347 /* now it's safe to free the pages */
6348 atomic_long_sub(rb->aux_nr_pages - rb->aux_mmap_locked, &mmap_user->locked_vm);
6349 atomic64_sub(rb->aux_mmap_locked, &vma->vm_mm->pinned_vm);
6350
6351 /* this has to be the last one */
6352 rb_free_aux(rb);
6353 WARN_ON_ONCE(refcount_read(&rb->aux_refcount));
6354
6355 mutex_unlock(&event->mmap_mutex);
6356 }
6357
6358 if (atomic_dec_and_test(&rb->mmap_count))
6359 detach_rest = true;
6360
6361 if (!atomic_dec_and_mutex_lock(&event->mmap_count, &event->mmap_mutex))
6362 goto out_put;
6363
6364 ring_buffer_attach(event, NULL);
6365 mutex_unlock(&event->mmap_mutex);
6366
6367 /* If there's still other mmap()s of this buffer, we're done. */
6368 if (!detach_rest)
6369 goto out_put;
6370
6371 /*
6372 * No other mmap()s, detach from all other events that might redirect
6373 * into the now unreachable buffer. Somewhat complicated by the
6374 * fact that rb::event_lock otherwise nests inside mmap_mutex.
6375 */
6376 again:
6377 rcu_read_lock();
6378 list_for_each_entry_rcu(event, &rb->event_list, rb_entry) {
6379 if (!atomic_long_inc_not_zero(&event->refcount)) {
6380 /*
6381 * This event is en-route to free_event() which will
6382 * detach it and remove it from the list.
6383 */
6384 continue;
6385 }
6386 rcu_read_unlock();
6387
6388 mutex_lock(&event->mmap_mutex);
6389 /*
6390 * Check we didn't race with perf_event_set_output() which can
6391 * swizzle the rb from under us while we were waiting to
6392 * acquire mmap_mutex.
6393 *
6394 * If we find a different rb; ignore this event, a next
6395 * iteration will no longer find it on the list. We have to
6396 * still restart the iteration to make sure we're not now
6397 * iterating the wrong list.
6398 */
6399 if (event->rb == rb)
6400 ring_buffer_attach(event, NULL);
6401
6402 mutex_unlock(&event->mmap_mutex);
6403 put_event(event);
6404
6405 /*
6406 * Restart the iteration; either we're on the wrong list or
6407 * destroyed its integrity by doing a deletion.
6408 */
6409 goto again;
6410 }
6411 rcu_read_unlock();
6412
6413 /*
6414 * It could be there's still a few 0-ref events on the list; they'll
6415 * get cleaned up by free_event() -- they'll also still have their
6416 * ref on the rb and will free it whenever they are done with it.
6417 *
6418 * Aside from that, this buffer is 'fully' detached and unmapped,
6419 * undo the VM accounting.
6420 */
6421
6422 atomic_long_sub((size >> PAGE_SHIFT) + 1 - mmap_locked,
6423 &mmap_user->locked_vm);
6424 atomic64_sub(mmap_locked, &vma->vm_mm->pinned_vm);
6425 free_uid(mmap_user);
6426
6427 out_put:
6428 ring_buffer_put(rb); /* could be last */
6429 }
6430
6431 static const struct vm_operations_struct perf_mmap_vmops = {
6432 .open = perf_mmap_open,
6433 .close = perf_mmap_close, /* non mergeable */
6434 .fault = perf_mmap_fault,
6435 .page_mkwrite = perf_mmap_fault,
6436 };
6437
perf_mmap(struct file * file,struct vm_area_struct * vma)6438 static int perf_mmap(struct file *file, struct vm_area_struct *vma)
6439 {
6440 struct perf_event *event = file->private_data;
6441 unsigned long user_locked, user_lock_limit;
6442 struct user_struct *user = current_user();
6443 struct perf_buffer *rb = NULL;
6444 unsigned long locked, lock_limit;
6445 unsigned long vma_size;
6446 unsigned long nr_pages;
6447 long user_extra = 0, extra = 0;
6448 int ret = 0, flags = 0;
6449
6450 /*
6451 * Don't allow mmap() of inherited per-task counters. This would
6452 * create a performance issue due to all children writing to the
6453 * same rb.
6454 */
6455 if (event->cpu == -1 && event->attr.inherit)
6456 return -EINVAL;
6457
6458 if (!(vma->vm_flags & VM_SHARED))
6459 return -EINVAL;
6460
6461 ret = security_perf_event_read(event);
6462 if (ret)
6463 return ret;
6464
6465 vma_size = vma->vm_end - vma->vm_start;
6466
6467 if (vma->vm_pgoff == 0) {
6468 nr_pages = (vma_size / PAGE_SIZE) - 1;
6469 } else {
6470 /*
6471 * AUX area mapping: if rb->aux_nr_pages != 0, it's already
6472 * mapped, all subsequent mappings should have the same size
6473 * and offset. Must be above the normal perf buffer.
6474 */
6475 u64 aux_offset, aux_size;
6476
6477 if (!event->rb)
6478 return -EINVAL;
6479
6480 nr_pages = vma_size / PAGE_SIZE;
6481
6482 mutex_lock(&event->mmap_mutex);
6483 ret = -EINVAL;
6484
6485 rb = event->rb;
6486 if (!rb)
6487 goto aux_unlock;
6488
6489 aux_offset = READ_ONCE(rb->user_page->aux_offset);
6490 aux_size = READ_ONCE(rb->user_page->aux_size);
6491
6492 if (aux_offset < perf_data_size(rb) + PAGE_SIZE)
6493 goto aux_unlock;
6494
6495 if (aux_offset != vma->vm_pgoff << PAGE_SHIFT)
6496 goto aux_unlock;
6497
6498 /* already mapped with a different offset */
6499 if (rb_has_aux(rb) && rb->aux_pgoff != vma->vm_pgoff)
6500 goto aux_unlock;
6501
6502 if (aux_size != vma_size || aux_size != nr_pages * PAGE_SIZE)
6503 goto aux_unlock;
6504
6505 /* already mapped with a different size */
6506 if (rb_has_aux(rb) && rb->aux_nr_pages != nr_pages)
6507 goto aux_unlock;
6508
6509 if (!is_power_of_2(nr_pages))
6510 goto aux_unlock;
6511
6512 if (!atomic_inc_not_zero(&rb->mmap_count))
6513 goto aux_unlock;
6514
6515 if (rb_has_aux(rb)) {
6516 atomic_inc(&rb->aux_mmap_count);
6517 ret = 0;
6518 goto unlock;
6519 }
6520
6521 atomic_set(&rb->aux_mmap_count, 1);
6522 user_extra = nr_pages;
6523
6524 goto accounting;
6525 }
6526
6527 /*
6528 * If we have rb pages ensure they're a power-of-two number, so we
6529 * can do bitmasks instead of modulo.
6530 */
6531 if (nr_pages != 0 && !is_power_of_2(nr_pages))
6532 return -EINVAL;
6533
6534 if (vma_size != PAGE_SIZE * (1 + nr_pages))
6535 return -EINVAL;
6536
6537 WARN_ON_ONCE(event->ctx->parent_ctx);
6538 again:
6539 mutex_lock(&event->mmap_mutex);
6540 if (event->rb) {
6541 if (data_page_nr(event->rb) != nr_pages) {
6542 ret = -EINVAL;
6543 goto unlock;
6544 }
6545
6546 if (!atomic_inc_not_zero(&event->rb->mmap_count)) {
6547 /*
6548 * Raced against perf_mmap_close(); remove the
6549 * event and try again.
6550 */
6551 ring_buffer_attach(event, NULL);
6552 mutex_unlock(&event->mmap_mutex);
6553 goto again;
6554 }
6555
6556 goto unlock;
6557 }
6558
6559 user_extra = nr_pages + 1;
6560
6561 accounting:
6562 user_lock_limit = sysctl_perf_event_mlock >> (PAGE_SHIFT - 10);
6563
6564 /*
6565 * Increase the limit linearly with more CPUs:
6566 */
6567 user_lock_limit *= num_online_cpus();
6568
6569 user_locked = atomic_long_read(&user->locked_vm);
6570
6571 /*
6572 * sysctl_perf_event_mlock may have changed, so that
6573 * user->locked_vm > user_lock_limit
6574 */
6575 if (user_locked > user_lock_limit)
6576 user_locked = user_lock_limit;
6577 user_locked += user_extra;
6578
6579 if (user_locked > user_lock_limit) {
6580 /*
6581 * charge locked_vm until it hits user_lock_limit;
6582 * charge the rest from pinned_vm
6583 */
6584 extra = user_locked - user_lock_limit;
6585 user_extra -= extra;
6586 }
6587
6588 lock_limit = rlimit(RLIMIT_MEMLOCK);
6589 lock_limit >>= PAGE_SHIFT;
6590 locked = atomic64_read(&vma->vm_mm->pinned_vm) + extra;
6591
6592 if ((locked > lock_limit) && perf_is_paranoid() &&
6593 !capable(CAP_IPC_LOCK)) {
6594 ret = -EPERM;
6595 goto unlock;
6596 }
6597
6598 WARN_ON(!rb && event->rb);
6599
6600 if (vma->vm_flags & VM_WRITE)
6601 flags |= RING_BUFFER_WRITABLE;
6602
6603 if (!rb) {
6604 rb = rb_alloc(nr_pages,
6605 event->attr.watermark ? event->attr.wakeup_watermark : 0,
6606 event->cpu, flags);
6607
6608 if (!rb) {
6609 ret = -ENOMEM;
6610 goto unlock;
6611 }
6612
6613 atomic_set(&rb->mmap_count, 1);
6614 rb->mmap_user = get_current_user();
6615 rb->mmap_locked = extra;
6616
6617 ring_buffer_attach(event, rb);
6618
6619 perf_event_update_time(event);
6620 perf_event_init_userpage(event);
6621 perf_event_update_userpage(event);
6622 } else {
6623 ret = rb_alloc_aux(rb, event, vma->vm_pgoff, nr_pages,
6624 event->attr.aux_watermark, flags);
6625 if (!ret)
6626 rb->aux_mmap_locked = extra;
6627 }
6628
6629 unlock:
6630 if (!ret) {
6631 atomic_long_add(user_extra, &user->locked_vm);
6632 atomic64_add(extra, &vma->vm_mm->pinned_vm);
6633
6634 atomic_inc(&event->mmap_count);
6635 } else if (rb) {
6636 atomic_dec(&rb->mmap_count);
6637 }
6638 aux_unlock:
6639 mutex_unlock(&event->mmap_mutex);
6640
6641 /*
6642 * Since pinned accounting is per vm we cannot allow fork() to copy our
6643 * vma.
6644 */
6645 vm_flags_set(vma, VM_DONTCOPY | VM_DONTEXPAND | VM_DONTDUMP);
6646 vma->vm_ops = &perf_mmap_vmops;
6647
6648 if (event->pmu->event_mapped)
6649 event->pmu->event_mapped(event, vma->vm_mm);
6650
6651 return ret;
6652 }
6653
perf_fasync(int fd,struct file * filp,int on)6654 static int perf_fasync(int fd, struct file *filp, int on)
6655 {
6656 struct inode *inode = file_inode(filp);
6657 struct perf_event *event = filp->private_data;
6658 int retval;
6659
6660 inode_lock(inode);
6661 retval = fasync_helper(fd, filp, on, &event->fasync);
6662 inode_unlock(inode);
6663
6664 if (retval < 0)
6665 return retval;
6666
6667 return 0;
6668 }
6669
6670 static const struct file_operations perf_fops = {
6671 .llseek = no_llseek,
6672 .release = perf_release,
6673 .read = perf_read,
6674 .poll = perf_poll,
6675 .unlocked_ioctl = perf_ioctl,
6676 .compat_ioctl = perf_compat_ioctl,
6677 .mmap = perf_mmap,
6678 .fasync = perf_fasync,
6679 };
6680
6681 /*
6682 * Perf event wakeup
6683 *
6684 * If there's data, ensure we set the poll() state and publish everything
6685 * to user-space before waking everybody up.
6686 */
6687
perf_event_fasync(struct perf_event * event)6688 static inline struct fasync_struct **perf_event_fasync(struct perf_event *event)
6689 {
6690 /* only the parent has fasync state */
6691 if (event->parent)
6692 event = event->parent;
6693 return &event->fasync;
6694 }
6695
perf_event_wakeup(struct perf_event * event)6696 void perf_event_wakeup(struct perf_event *event)
6697 {
6698 ring_buffer_wakeup(event);
6699
6700 if (event->pending_kill) {
6701 kill_fasync(perf_event_fasync(event), SIGIO, event->pending_kill);
6702 event->pending_kill = 0;
6703 }
6704 }
6705
perf_sigtrap(struct perf_event * event)6706 static void perf_sigtrap(struct perf_event *event)
6707 {
6708 /*
6709 * We'd expect this to only occur if the irq_work is delayed and either
6710 * ctx->task or current has changed in the meantime. This can be the
6711 * case on architectures that do not implement arch_irq_work_raise().
6712 */
6713 if (WARN_ON_ONCE(event->ctx->task != current))
6714 return;
6715
6716 /*
6717 * Both perf_pending_task() and perf_pending_irq() can race with the
6718 * task exiting.
6719 */
6720 if (current->flags & PF_EXITING)
6721 return;
6722
6723 send_sig_perf((void __user *)event->pending_addr,
6724 event->orig_type, event->attr.sig_data);
6725 }
6726
6727 /*
6728 * Deliver the pending work in-event-context or follow the context.
6729 */
__perf_pending_irq(struct perf_event * event)6730 static void __perf_pending_irq(struct perf_event *event)
6731 {
6732 int cpu = READ_ONCE(event->oncpu);
6733
6734 /*
6735 * If the event isn't running; we done. event_sched_out() will have
6736 * taken care of things.
6737 */
6738 if (cpu < 0)
6739 return;
6740
6741 /*
6742 * Yay, we hit home and are in the context of the event.
6743 */
6744 if (cpu == smp_processor_id()) {
6745 if (event->pending_sigtrap) {
6746 event->pending_sigtrap = 0;
6747 perf_sigtrap(event);
6748 local_dec(&event->ctx->nr_pending);
6749 }
6750 if (event->pending_disable) {
6751 event->pending_disable = 0;
6752 perf_event_disable_local(event);
6753 }
6754 return;
6755 }
6756
6757 /*
6758 * CPU-A CPU-B
6759 *
6760 * perf_event_disable_inatomic()
6761 * @pending_disable = CPU-A;
6762 * irq_work_queue();
6763 *
6764 * sched-out
6765 * @pending_disable = -1;
6766 *
6767 * sched-in
6768 * perf_event_disable_inatomic()
6769 * @pending_disable = CPU-B;
6770 * irq_work_queue(); // FAILS
6771 *
6772 * irq_work_run()
6773 * perf_pending_irq()
6774 *
6775 * But the event runs on CPU-B and wants disabling there.
6776 */
6777 irq_work_queue_on(&event->pending_irq, cpu);
6778 }
6779
perf_pending_irq(struct irq_work * entry)6780 static void perf_pending_irq(struct irq_work *entry)
6781 {
6782 struct perf_event *event = container_of(entry, struct perf_event, pending_irq);
6783 int rctx;
6784
6785 /*
6786 * If we 'fail' here, that's OK, it means recursion is already disabled
6787 * and we won't recurse 'further'.
6788 */
6789 rctx = perf_swevent_get_recursion_context();
6790
6791 /*
6792 * The wakeup isn't bound to the context of the event -- it can happen
6793 * irrespective of where the event is.
6794 */
6795 if (event->pending_wakeup) {
6796 event->pending_wakeup = 0;
6797 perf_event_wakeup(event);
6798 }
6799
6800 __perf_pending_irq(event);
6801
6802 if (rctx >= 0)
6803 perf_swevent_put_recursion_context(rctx);
6804 }
6805
perf_pending_task(struct callback_head * head)6806 static void perf_pending_task(struct callback_head *head)
6807 {
6808 struct perf_event *event = container_of(head, struct perf_event, pending_task);
6809 int rctx;
6810
6811 /*
6812 * If we 'fail' here, that's OK, it means recursion is already disabled
6813 * and we won't recurse 'further'.
6814 */
6815 preempt_disable_notrace();
6816 rctx = perf_swevent_get_recursion_context();
6817
6818 if (event->pending_work) {
6819 event->pending_work = 0;
6820 perf_sigtrap(event);
6821 local_dec(&event->ctx->nr_pending);
6822 }
6823
6824 if (rctx >= 0)
6825 perf_swevent_put_recursion_context(rctx);
6826 preempt_enable_notrace();
6827
6828 put_event(event);
6829 }
6830
6831 #ifdef CONFIG_GUEST_PERF_EVENTS
6832 struct perf_guest_info_callbacks __rcu *perf_guest_cbs;
6833
6834 DEFINE_STATIC_CALL_RET0(__perf_guest_state, *perf_guest_cbs->state);
6835 DEFINE_STATIC_CALL_RET0(__perf_guest_get_ip, *perf_guest_cbs->get_ip);
6836 DEFINE_STATIC_CALL_RET0(__perf_guest_handle_intel_pt_intr, *perf_guest_cbs->handle_intel_pt_intr);
6837
perf_register_guest_info_callbacks(struct perf_guest_info_callbacks * cbs)6838 void perf_register_guest_info_callbacks(struct perf_guest_info_callbacks *cbs)
6839 {
6840 if (WARN_ON_ONCE(rcu_access_pointer(perf_guest_cbs)))
6841 return;
6842
6843 rcu_assign_pointer(perf_guest_cbs, cbs);
6844 static_call_update(__perf_guest_state, cbs->state);
6845 static_call_update(__perf_guest_get_ip, cbs->get_ip);
6846
6847 /* Implementing ->handle_intel_pt_intr is optional. */
6848 if (cbs->handle_intel_pt_intr)
6849 static_call_update(__perf_guest_handle_intel_pt_intr,
6850 cbs->handle_intel_pt_intr);
6851 }
6852 EXPORT_SYMBOL_GPL(perf_register_guest_info_callbacks);
6853
perf_unregister_guest_info_callbacks(struct perf_guest_info_callbacks * cbs)6854 void perf_unregister_guest_info_callbacks(struct perf_guest_info_callbacks *cbs)
6855 {
6856 if (WARN_ON_ONCE(rcu_access_pointer(perf_guest_cbs) != cbs))
6857 return;
6858
6859 rcu_assign_pointer(perf_guest_cbs, NULL);
6860 static_call_update(__perf_guest_state, (void *)&__static_call_return0);
6861 static_call_update(__perf_guest_get_ip, (void *)&__static_call_return0);
6862 static_call_update(__perf_guest_handle_intel_pt_intr,
6863 (void *)&__static_call_return0);
6864 synchronize_rcu();
6865 }
6866 EXPORT_SYMBOL_GPL(perf_unregister_guest_info_callbacks);
6867 #endif
6868
6869 static void
perf_output_sample_regs(struct perf_output_handle * handle,struct pt_regs * regs,u64 mask)6870 perf_output_sample_regs(struct perf_output_handle *handle,
6871 struct pt_regs *regs, u64 mask)
6872 {
6873 int bit;
6874 DECLARE_BITMAP(_mask, 64);
6875
6876 bitmap_from_u64(_mask, mask);
6877 for_each_set_bit(bit, _mask, sizeof(mask) * BITS_PER_BYTE) {
6878 u64 val;
6879
6880 val = perf_reg_value(regs, bit);
6881 perf_output_put(handle, val);
6882 }
6883 }
6884
perf_sample_regs_user(struct perf_regs * regs_user,struct pt_regs * regs)6885 static void perf_sample_regs_user(struct perf_regs *regs_user,
6886 struct pt_regs *regs)
6887 {
6888 if (user_mode(regs)) {
6889 regs_user->abi = perf_reg_abi(current);
6890 regs_user->regs = regs;
6891 } else if (!(current->flags & PF_KTHREAD)) {
6892 perf_get_regs_user(regs_user, regs);
6893 } else {
6894 regs_user->abi = PERF_SAMPLE_REGS_ABI_NONE;
6895 regs_user->regs = NULL;
6896 }
6897 }
6898
perf_sample_regs_intr(struct perf_regs * regs_intr,struct pt_regs * regs)6899 static void perf_sample_regs_intr(struct perf_regs *regs_intr,
6900 struct pt_regs *regs)
6901 {
6902 regs_intr->regs = regs;
6903 regs_intr->abi = perf_reg_abi(current);
6904 }
6905
6906
6907 /*
6908 * Get remaining task size from user stack pointer.
6909 *
6910 * It'd be better to take stack vma map and limit this more
6911 * precisely, but there's no way to get it safely under interrupt,
6912 * so using TASK_SIZE as limit.
6913 */
perf_ustack_task_size(struct pt_regs * regs)6914 static u64 perf_ustack_task_size(struct pt_regs *regs)
6915 {
6916 unsigned long addr = perf_user_stack_pointer(regs);
6917
6918 if (!addr || addr >= TASK_SIZE)
6919 return 0;
6920
6921 return TASK_SIZE - addr;
6922 }
6923
6924 static u16
perf_sample_ustack_size(u16 stack_size,u16 header_size,struct pt_regs * regs)6925 perf_sample_ustack_size(u16 stack_size, u16 header_size,
6926 struct pt_regs *regs)
6927 {
6928 u64 task_size;
6929
6930 /* No regs, no stack pointer, no dump. */
6931 if (!regs)
6932 return 0;
6933
6934 /*
6935 * Check if we fit in with the requested stack size into the:
6936 * - TASK_SIZE
6937 * If we don't, we limit the size to the TASK_SIZE.
6938 *
6939 * - remaining sample size
6940 * If we don't, we customize the stack size to
6941 * fit in to the remaining sample size.
6942 */
6943
6944 task_size = min((u64) USHRT_MAX, perf_ustack_task_size(regs));
6945 stack_size = min(stack_size, (u16) task_size);
6946
6947 /* Current header size plus static size and dynamic size. */
6948 header_size += 2 * sizeof(u64);
6949
6950 /* Do we fit in with the current stack dump size? */
6951 if ((u16) (header_size + stack_size) < header_size) {
6952 /*
6953 * If we overflow the maximum size for the sample,
6954 * we customize the stack dump size to fit in.
6955 */
6956 stack_size = USHRT_MAX - header_size - sizeof(u64);
6957 stack_size = round_up(stack_size, sizeof(u64));
6958 }
6959
6960 return stack_size;
6961 }
6962
6963 static void
perf_output_sample_ustack(struct perf_output_handle * handle,u64 dump_size,struct pt_regs * regs)6964 perf_output_sample_ustack(struct perf_output_handle *handle, u64 dump_size,
6965 struct pt_regs *regs)
6966 {
6967 /* Case of a kernel thread, nothing to dump */
6968 if (!regs) {
6969 u64 size = 0;
6970 perf_output_put(handle, size);
6971 } else {
6972 unsigned long sp;
6973 unsigned int rem;
6974 u64 dyn_size;
6975
6976 /*
6977 * We dump:
6978 * static size
6979 * - the size requested by user or the best one we can fit
6980 * in to the sample max size
6981 * data
6982 * - user stack dump data
6983 * dynamic size
6984 * - the actual dumped size
6985 */
6986
6987 /* Static size. */
6988 perf_output_put(handle, dump_size);
6989
6990 /* Data. */
6991 sp = perf_user_stack_pointer(regs);
6992 rem = __output_copy_user(handle, (void *) sp, dump_size);
6993 dyn_size = dump_size - rem;
6994
6995 perf_output_skip(handle, rem);
6996
6997 /* Dynamic size. */
6998 perf_output_put(handle, dyn_size);
6999 }
7000 }
7001
perf_prepare_sample_aux(struct perf_event * event,struct perf_sample_data * data,size_t size)7002 static unsigned long perf_prepare_sample_aux(struct perf_event *event,
7003 struct perf_sample_data *data,
7004 size_t size)
7005 {
7006 struct perf_event *sampler = event->aux_event;
7007 struct perf_buffer *rb;
7008
7009 data->aux_size = 0;
7010
7011 if (!sampler)
7012 goto out;
7013
7014 if (WARN_ON_ONCE(READ_ONCE(sampler->state) != PERF_EVENT_STATE_ACTIVE))
7015 goto out;
7016
7017 if (WARN_ON_ONCE(READ_ONCE(sampler->oncpu) != smp_processor_id()))
7018 goto out;
7019
7020 rb = ring_buffer_get(sampler);
7021 if (!rb)
7022 goto out;
7023
7024 /*
7025 * If this is an NMI hit inside sampling code, don't take
7026 * the sample. See also perf_aux_sample_output().
7027 */
7028 if (READ_ONCE(rb->aux_in_sampling)) {
7029 data->aux_size = 0;
7030 } else {
7031 size = min_t(size_t, size, perf_aux_size(rb));
7032 data->aux_size = ALIGN(size, sizeof(u64));
7033 }
7034 ring_buffer_put(rb);
7035
7036 out:
7037 return data->aux_size;
7038 }
7039
perf_pmu_snapshot_aux(struct perf_buffer * rb,struct perf_event * event,struct perf_output_handle * handle,unsigned long size)7040 static long perf_pmu_snapshot_aux(struct perf_buffer *rb,
7041 struct perf_event *event,
7042 struct perf_output_handle *handle,
7043 unsigned long size)
7044 {
7045 unsigned long flags;
7046 long ret;
7047
7048 /*
7049 * Normal ->start()/->stop() callbacks run in IRQ mode in scheduler
7050 * paths. If we start calling them in NMI context, they may race with
7051 * the IRQ ones, that is, for example, re-starting an event that's just
7052 * been stopped, which is why we're using a separate callback that
7053 * doesn't change the event state.
7054 *
7055 * IRQs need to be disabled to prevent IPIs from racing with us.
7056 */
7057 local_irq_save(flags);
7058 /*
7059 * Guard against NMI hits inside the critical section;
7060 * see also perf_prepare_sample_aux().
7061 */
7062 WRITE_ONCE(rb->aux_in_sampling, 1);
7063 barrier();
7064
7065 ret = event->pmu->snapshot_aux(event, handle, size);
7066
7067 barrier();
7068 WRITE_ONCE(rb->aux_in_sampling, 0);
7069 local_irq_restore(flags);
7070
7071 return ret;
7072 }
7073
perf_aux_sample_output(struct perf_event * event,struct perf_output_handle * handle,struct perf_sample_data * data)7074 static void perf_aux_sample_output(struct perf_event *event,
7075 struct perf_output_handle *handle,
7076 struct perf_sample_data *data)
7077 {
7078 struct perf_event *sampler = event->aux_event;
7079 struct perf_buffer *rb;
7080 unsigned long pad;
7081 long size;
7082
7083 if (WARN_ON_ONCE(!sampler || !data->aux_size))
7084 return;
7085
7086 rb = ring_buffer_get(sampler);
7087 if (!rb)
7088 return;
7089
7090 size = perf_pmu_snapshot_aux(rb, sampler, handle, data->aux_size);
7091
7092 /*
7093 * An error here means that perf_output_copy() failed (returned a
7094 * non-zero surplus that it didn't copy), which in its current
7095 * enlightened implementation is not possible. If that changes, we'd
7096 * like to know.
7097 */
7098 if (WARN_ON_ONCE(size < 0))
7099 goto out_put;
7100
7101 /*
7102 * The pad comes from ALIGN()ing data->aux_size up to u64 in
7103 * perf_prepare_sample_aux(), so should not be more than that.
7104 */
7105 pad = data->aux_size - size;
7106 if (WARN_ON_ONCE(pad >= sizeof(u64)))
7107 pad = 8;
7108
7109 if (pad) {
7110 u64 zero = 0;
7111 perf_output_copy(handle, &zero, pad);
7112 }
7113
7114 out_put:
7115 ring_buffer_put(rb);
7116 }
7117
7118 /*
7119 * A set of common sample data types saved even for non-sample records
7120 * when event->attr.sample_id_all is set.
7121 */
7122 #define PERF_SAMPLE_ID_ALL (PERF_SAMPLE_TID | PERF_SAMPLE_TIME | \
7123 PERF_SAMPLE_ID | PERF_SAMPLE_STREAM_ID | \
7124 PERF_SAMPLE_CPU | PERF_SAMPLE_IDENTIFIER)
7125
__perf_event_header__init_id(struct perf_sample_data * data,struct perf_event * event,u64 sample_type)7126 static void __perf_event_header__init_id(struct perf_sample_data *data,
7127 struct perf_event *event,
7128 u64 sample_type)
7129 {
7130 data->type = event->attr.sample_type;
7131 data->sample_flags |= data->type & PERF_SAMPLE_ID_ALL;
7132
7133 if (sample_type & PERF_SAMPLE_TID) {
7134 /* namespace issues */
7135 data->tid_entry.pid = perf_event_pid(event, current);
7136 data->tid_entry.tid = perf_event_tid(event, current);
7137 }
7138
7139 if (sample_type & PERF_SAMPLE_TIME)
7140 data->time = perf_event_clock(event);
7141
7142 if (sample_type & (PERF_SAMPLE_ID | PERF_SAMPLE_IDENTIFIER))
7143 data->id = primary_event_id(event);
7144
7145 if (sample_type & PERF_SAMPLE_STREAM_ID)
7146 data->stream_id = event->id;
7147
7148 if (sample_type & PERF_SAMPLE_CPU) {
7149 data->cpu_entry.cpu = raw_smp_processor_id();
7150 data->cpu_entry.reserved = 0;
7151 }
7152 }
7153
perf_event_header__init_id(struct perf_event_header * header,struct perf_sample_data * data,struct perf_event * event)7154 void perf_event_header__init_id(struct perf_event_header *header,
7155 struct perf_sample_data *data,
7156 struct perf_event *event)
7157 {
7158 if (event->attr.sample_id_all) {
7159 header->size += event->id_header_size;
7160 __perf_event_header__init_id(data, event, event->attr.sample_type);
7161 }
7162 }
7163
__perf_event__output_id_sample(struct perf_output_handle * handle,struct perf_sample_data * data)7164 static void __perf_event__output_id_sample(struct perf_output_handle *handle,
7165 struct perf_sample_data *data)
7166 {
7167 u64 sample_type = data->type;
7168
7169 if (sample_type & PERF_SAMPLE_TID)
7170 perf_output_put(handle, data->tid_entry);
7171
7172 if (sample_type & PERF_SAMPLE_TIME)
7173 perf_output_put(handle, data->time);
7174
7175 if (sample_type & PERF_SAMPLE_ID)
7176 perf_output_put(handle, data->id);
7177
7178 if (sample_type & PERF_SAMPLE_STREAM_ID)
7179 perf_output_put(handle, data->stream_id);
7180
7181 if (sample_type & PERF_SAMPLE_CPU)
7182 perf_output_put(handle, data->cpu_entry);
7183
7184 if (sample_type & PERF_SAMPLE_IDENTIFIER)
7185 perf_output_put(handle, data->id);
7186 }
7187
perf_event__output_id_sample(struct perf_event * event,struct perf_output_handle * handle,struct perf_sample_data * sample)7188 void perf_event__output_id_sample(struct perf_event *event,
7189 struct perf_output_handle *handle,
7190 struct perf_sample_data *sample)
7191 {
7192 if (event->attr.sample_id_all)
7193 __perf_event__output_id_sample(handle, sample);
7194 }
7195
perf_output_read_one(struct perf_output_handle * handle,struct perf_event * event,u64 enabled,u64 running)7196 static void perf_output_read_one(struct perf_output_handle *handle,
7197 struct perf_event *event,
7198 u64 enabled, u64 running)
7199 {
7200 u64 read_format = event->attr.read_format;
7201 u64 values[5];
7202 int n = 0;
7203
7204 values[n++] = perf_event_count(event);
7205 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
7206 values[n++] = enabled +
7207 atomic64_read(&event->child_total_time_enabled);
7208 }
7209 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
7210 values[n++] = running +
7211 atomic64_read(&event->child_total_time_running);
7212 }
7213 if (read_format & PERF_FORMAT_ID)
7214 values[n++] = primary_event_id(event);
7215 if (read_format & PERF_FORMAT_LOST)
7216 values[n++] = atomic64_read(&event->lost_samples);
7217
7218 __output_copy(handle, values, n * sizeof(u64));
7219 }
7220
perf_output_read_group(struct perf_output_handle * handle,struct perf_event * event,u64 enabled,u64 running)7221 static void perf_output_read_group(struct perf_output_handle *handle,
7222 struct perf_event *event,
7223 u64 enabled, u64 running)
7224 {
7225 struct perf_event *leader = event->group_leader, *sub;
7226 u64 read_format = event->attr.read_format;
7227 unsigned long flags;
7228 u64 values[6];
7229 int n = 0;
7230
7231 /*
7232 * Disabling interrupts avoids all counter scheduling
7233 * (context switches, timer based rotation and IPIs).
7234 */
7235 local_irq_save(flags);
7236
7237 values[n++] = 1 + leader->nr_siblings;
7238
7239 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
7240 values[n++] = enabled;
7241
7242 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
7243 values[n++] = running;
7244
7245 if ((leader != event) &&
7246 (leader->state == PERF_EVENT_STATE_ACTIVE))
7247 leader->pmu->read(leader);
7248
7249 values[n++] = perf_event_count(leader);
7250 if (read_format & PERF_FORMAT_ID)
7251 values[n++] = primary_event_id(leader);
7252 if (read_format & PERF_FORMAT_LOST)
7253 values[n++] = atomic64_read(&leader->lost_samples);
7254
7255 __output_copy(handle, values, n * sizeof(u64));
7256
7257 for_each_sibling_event(sub, leader) {
7258 n = 0;
7259
7260 if ((sub != event) &&
7261 (sub->state == PERF_EVENT_STATE_ACTIVE))
7262 sub->pmu->read(sub);
7263
7264 values[n++] = perf_event_count(sub);
7265 if (read_format & PERF_FORMAT_ID)
7266 values[n++] = primary_event_id(sub);
7267 if (read_format & PERF_FORMAT_LOST)
7268 values[n++] = atomic64_read(&sub->lost_samples);
7269
7270 __output_copy(handle, values, n * sizeof(u64));
7271 }
7272
7273 local_irq_restore(flags);
7274 }
7275
7276 #define PERF_FORMAT_TOTAL_TIMES (PERF_FORMAT_TOTAL_TIME_ENABLED|\
7277 PERF_FORMAT_TOTAL_TIME_RUNNING)
7278
7279 /*
7280 * XXX PERF_SAMPLE_READ vs inherited events seems difficult.
7281 *
7282 * The problem is that its both hard and excessively expensive to iterate the
7283 * child list, not to mention that its impossible to IPI the children running
7284 * on another CPU, from interrupt/NMI context.
7285 */
perf_output_read(struct perf_output_handle * handle,struct perf_event * event)7286 static void perf_output_read(struct perf_output_handle *handle,
7287 struct perf_event *event)
7288 {
7289 u64 enabled = 0, running = 0, now;
7290 u64 read_format = event->attr.read_format;
7291
7292 /*
7293 * compute total_time_enabled, total_time_running
7294 * based on snapshot values taken when the event
7295 * was last scheduled in.
7296 *
7297 * we cannot simply called update_context_time()
7298 * because of locking issue as we are called in
7299 * NMI context
7300 */
7301 if (read_format & PERF_FORMAT_TOTAL_TIMES)
7302 calc_timer_values(event, &now, &enabled, &running);
7303
7304 if (event->attr.read_format & PERF_FORMAT_GROUP)
7305 perf_output_read_group(handle, event, enabled, running);
7306 else
7307 perf_output_read_one(handle, event, enabled, running);
7308 }
7309
perf_output_sample(struct perf_output_handle * handle,struct perf_event_header * header,struct perf_sample_data * data,struct perf_event * event)7310 void perf_output_sample(struct perf_output_handle *handle,
7311 struct perf_event_header *header,
7312 struct perf_sample_data *data,
7313 struct perf_event *event)
7314 {
7315 u64 sample_type = data->type;
7316
7317 perf_output_put(handle, *header);
7318
7319 if (sample_type & PERF_SAMPLE_IDENTIFIER)
7320 perf_output_put(handle, data->id);
7321
7322 if (sample_type & PERF_SAMPLE_IP)
7323 perf_output_put(handle, data->ip);
7324
7325 if (sample_type & PERF_SAMPLE_TID)
7326 perf_output_put(handle, data->tid_entry);
7327
7328 if (sample_type & PERF_SAMPLE_TIME)
7329 perf_output_put(handle, data->time);
7330
7331 if (sample_type & PERF_SAMPLE_ADDR)
7332 perf_output_put(handle, data->addr);
7333
7334 if (sample_type & PERF_SAMPLE_ID)
7335 perf_output_put(handle, data->id);
7336
7337 if (sample_type & PERF_SAMPLE_STREAM_ID)
7338 perf_output_put(handle, data->stream_id);
7339
7340 if (sample_type & PERF_SAMPLE_CPU)
7341 perf_output_put(handle, data->cpu_entry);
7342
7343 if (sample_type & PERF_SAMPLE_PERIOD)
7344 perf_output_put(handle, data->period);
7345
7346 if (sample_type & PERF_SAMPLE_READ)
7347 perf_output_read(handle, event);
7348
7349 if (sample_type & PERF_SAMPLE_CALLCHAIN) {
7350 int size = 1;
7351
7352 size += data->callchain->nr;
7353 size *= sizeof(u64);
7354 __output_copy(handle, data->callchain, size);
7355 }
7356
7357 if (sample_type & PERF_SAMPLE_RAW) {
7358 struct perf_raw_record *raw = data->raw;
7359
7360 if (raw) {
7361 struct perf_raw_frag *frag = &raw->frag;
7362
7363 perf_output_put(handle, raw->size);
7364 do {
7365 if (frag->copy) {
7366 __output_custom(handle, frag->copy,
7367 frag->data, frag->size);
7368 } else {
7369 __output_copy(handle, frag->data,
7370 frag->size);
7371 }
7372 if (perf_raw_frag_last(frag))
7373 break;
7374 frag = frag->next;
7375 } while (1);
7376 if (frag->pad)
7377 __output_skip(handle, NULL, frag->pad);
7378 } else {
7379 struct {
7380 u32 size;
7381 u32 data;
7382 } raw = {
7383 .size = sizeof(u32),
7384 .data = 0,
7385 };
7386 perf_output_put(handle, raw);
7387 }
7388 }
7389
7390 if (sample_type & PERF_SAMPLE_BRANCH_STACK) {
7391 if (data->br_stack) {
7392 size_t size;
7393
7394 size = data->br_stack->nr
7395 * sizeof(struct perf_branch_entry);
7396
7397 perf_output_put(handle, data->br_stack->nr);
7398 if (branch_sample_hw_index(event))
7399 perf_output_put(handle, data->br_stack->hw_idx);
7400 perf_output_copy(handle, data->br_stack->entries, size);
7401 } else {
7402 /*
7403 * we always store at least the value of nr
7404 */
7405 u64 nr = 0;
7406 perf_output_put(handle, nr);
7407 }
7408 }
7409
7410 if (sample_type & PERF_SAMPLE_REGS_USER) {
7411 u64 abi = data->regs_user.abi;
7412
7413 /*
7414 * If there are no regs to dump, notice it through
7415 * first u64 being zero (PERF_SAMPLE_REGS_ABI_NONE).
7416 */
7417 perf_output_put(handle, abi);
7418
7419 if (abi) {
7420 u64 mask = event->attr.sample_regs_user;
7421 perf_output_sample_regs(handle,
7422 data->regs_user.regs,
7423 mask);
7424 }
7425 }
7426
7427 if (sample_type & PERF_SAMPLE_STACK_USER) {
7428 perf_output_sample_ustack(handle,
7429 data->stack_user_size,
7430 data->regs_user.regs);
7431 }
7432
7433 if (sample_type & PERF_SAMPLE_WEIGHT_TYPE)
7434 perf_output_put(handle, data->weight.full);
7435
7436 if (sample_type & PERF_SAMPLE_DATA_SRC)
7437 perf_output_put(handle, data->data_src.val);
7438
7439 if (sample_type & PERF_SAMPLE_TRANSACTION)
7440 perf_output_put(handle, data->txn);
7441
7442 if (sample_type & PERF_SAMPLE_REGS_INTR) {
7443 u64 abi = data->regs_intr.abi;
7444 /*
7445 * If there are no regs to dump, notice it through
7446 * first u64 being zero (PERF_SAMPLE_REGS_ABI_NONE).
7447 */
7448 perf_output_put(handle, abi);
7449
7450 if (abi) {
7451 u64 mask = event->attr.sample_regs_intr;
7452
7453 perf_output_sample_regs(handle,
7454 data->regs_intr.regs,
7455 mask);
7456 }
7457 }
7458
7459 if (sample_type & PERF_SAMPLE_PHYS_ADDR)
7460 perf_output_put(handle, data->phys_addr);
7461
7462 if (sample_type & PERF_SAMPLE_CGROUP)
7463 perf_output_put(handle, data->cgroup);
7464
7465 if (sample_type & PERF_SAMPLE_DATA_PAGE_SIZE)
7466 perf_output_put(handle, data->data_page_size);
7467
7468 if (sample_type & PERF_SAMPLE_CODE_PAGE_SIZE)
7469 perf_output_put(handle, data->code_page_size);
7470
7471 if (sample_type & PERF_SAMPLE_AUX) {
7472 perf_output_put(handle, data->aux_size);
7473
7474 if (data->aux_size)
7475 perf_aux_sample_output(event, handle, data);
7476 }
7477
7478 if (!event->attr.watermark) {
7479 int wakeup_events = event->attr.wakeup_events;
7480
7481 if (wakeup_events) {
7482 struct perf_buffer *rb = handle->rb;
7483 int events = local_inc_return(&rb->events);
7484
7485 if (events >= wakeup_events) {
7486 local_sub(wakeup_events, &rb->events);
7487 local_inc(&rb->wakeup);
7488 }
7489 }
7490 }
7491 }
7492
perf_virt_to_phys(u64 virt)7493 static u64 perf_virt_to_phys(u64 virt)
7494 {
7495 u64 phys_addr = 0;
7496
7497 if (!virt)
7498 return 0;
7499
7500 if (virt >= TASK_SIZE) {
7501 /* If it's vmalloc()d memory, leave phys_addr as 0 */
7502 if (virt_addr_valid((void *)(uintptr_t)virt) &&
7503 !(virt >= VMALLOC_START && virt < VMALLOC_END))
7504 phys_addr = (u64)virt_to_phys((void *)(uintptr_t)virt);
7505 } else {
7506 /*
7507 * Walking the pages tables for user address.
7508 * Interrupts are disabled, so it prevents any tear down
7509 * of the page tables.
7510 * Try IRQ-safe get_user_page_fast_only first.
7511 * If failed, leave phys_addr as 0.
7512 */
7513 if (current->mm != NULL) {
7514 struct page *p;
7515
7516 pagefault_disable();
7517 if (get_user_page_fast_only(virt, 0, &p)) {
7518 phys_addr = page_to_phys(p) + virt % PAGE_SIZE;
7519 put_page(p);
7520 }
7521 pagefault_enable();
7522 }
7523 }
7524
7525 return phys_addr;
7526 }
7527
7528 /*
7529 * Return the pagetable size of a given virtual address.
7530 */
perf_get_pgtable_size(struct mm_struct * mm,unsigned long addr)7531 static u64 perf_get_pgtable_size(struct mm_struct *mm, unsigned long addr)
7532 {
7533 u64 size = 0;
7534
7535 #ifdef CONFIG_HAVE_FAST_GUP
7536 pgd_t *pgdp, pgd;
7537 p4d_t *p4dp, p4d;
7538 pud_t *pudp, pud;
7539 pmd_t *pmdp, pmd;
7540 pte_t *ptep, pte;
7541
7542 pgdp = pgd_offset(mm, addr);
7543 pgd = READ_ONCE(*pgdp);
7544 if (pgd_none(pgd))
7545 return 0;
7546
7547 if (pgd_leaf(pgd))
7548 return pgd_leaf_size(pgd);
7549
7550 p4dp = p4d_offset_lockless(pgdp, pgd, addr);
7551 p4d = READ_ONCE(*p4dp);
7552 if (!p4d_present(p4d))
7553 return 0;
7554
7555 if (p4d_leaf(p4d))
7556 return p4d_leaf_size(p4d);
7557
7558 pudp = pud_offset_lockless(p4dp, p4d, addr);
7559 pud = READ_ONCE(*pudp);
7560 if (!pud_present(pud))
7561 return 0;
7562
7563 if (pud_leaf(pud))
7564 return pud_leaf_size(pud);
7565
7566 pmdp = pmd_offset_lockless(pudp, pud, addr);
7567 again:
7568 pmd = pmdp_get_lockless(pmdp);
7569 if (!pmd_present(pmd))
7570 return 0;
7571
7572 if (pmd_leaf(pmd))
7573 return pmd_leaf_size(pmd);
7574
7575 ptep = pte_offset_map(&pmd, addr);
7576 if (!ptep)
7577 goto again;
7578
7579 pte = ptep_get_lockless(ptep);
7580 if (pte_present(pte))
7581 size = pte_leaf_size(pte);
7582 pte_unmap(ptep);
7583 #endif /* CONFIG_HAVE_FAST_GUP */
7584
7585 return size;
7586 }
7587
perf_get_page_size(unsigned long addr)7588 static u64 perf_get_page_size(unsigned long addr)
7589 {
7590 struct mm_struct *mm;
7591 unsigned long flags;
7592 u64 size;
7593
7594 if (!addr)
7595 return 0;
7596
7597 /*
7598 * Software page-table walkers must disable IRQs,
7599 * which prevents any tear down of the page tables.
7600 */
7601 local_irq_save(flags);
7602
7603 mm = current->mm;
7604 if (!mm) {
7605 /*
7606 * For kernel threads and the like, use init_mm so that
7607 * we can find kernel memory.
7608 */
7609 mm = &init_mm;
7610 }
7611
7612 size = perf_get_pgtable_size(mm, addr);
7613
7614 local_irq_restore(flags);
7615
7616 return size;
7617 }
7618
7619 static struct perf_callchain_entry __empty_callchain = { .nr = 0, };
7620
7621 struct perf_callchain_entry *
perf_callchain(struct perf_event * event,struct pt_regs * regs)7622 perf_callchain(struct perf_event *event, struct pt_regs *regs)
7623 {
7624 bool kernel = !event->attr.exclude_callchain_kernel;
7625 bool user = !event->attr.exclude_callchain_user;
7626 /* Disallow cross-task user callchains. */
7627 bool crosstask = event->ctx->task && event->ctx->task != current;
7628 const u32 max_stack = event->attr.sample_max_stack;
7629 struct perf_callchain_entry *callchain;
7630
7631 if (!kernel && !user)
7632 return &__empty_callchain;
7633
7634 callchain = get_perf_callchain(regs, 0, kernel, user,
7635 max_stack, crosstask, true);
7636 return callchain ?: &__empty_callchain;
7637 }
7638
__cond_set(u64 flags,u64 s,u64 d)7639 static __always_inline u64 __cond_set(u64 flags, u64 s, u64 d)
7640 {
7641 return d * !!(flags & s);
7642 }
7643
perf_prepare_sample(struct perf_sample_data * data,struct perf_event * event,struct pt_regs * regs)7644 void perf_prepare_sample(struct perf_sample_data *data,
7645 struct perf_event *event,
7646 struct pt_regs *regs)
7647 {
7648 u64 sample_type = event->attr.sample_type;
7649 u64 filtered_sample_type;
7650
7651 /*
7652 * Add the sample flags that are dependent to others. And clear the
7653 * sample flags that have already been done by the PMU driver.
7654 */
7655 filtered_sample_type = sample_type;
7656 filtered_sample_type |= __cond_set(sample_type, PERF_SAMPLE_CODE_PAGE_SIZE,
7657 PERF_SAMPLE_IP);
7658 filtered_sample_type |= __cond_set(sample_type, PERF_SAMPLE_DATA_PAGE_SIZE |
7659 PERF_SAMPLE_PHYS_ADDR, PERF_SAMPLE_ADDR);
7660 filtered_sample_type |= __cond_set(sample_type, PERF_SAMPLE_STACK_USER,
7661 PERF_SAMPLE_REGS_USER);
7662 filtered_sample_type &= ~data->sample_flags;
7663
7664 if (filtered_sample_type == 0) {
7665 /* Make sure it has the correct data->type for output */
7666 data->type = event->attr.sample_type;
7667 return;
7668 }
7669
7670 __perf_event_header__init_id(data, event, filtered_sample_type);
7671
7672 if (filtered_sample_type & PERF_SAMPLE_IP) {
7673 data->ip = perf_instruction_pointer(regs);
7674 data->sample_flags |= PERF_SAMPLE_IP;
7675 }
7676
7677 if (filtered_sample_type & PERF_SAMPLE_CALLCHAIN)
7678 perf_sample_save_callchain(data, event, regs);
7679
7680 if (filtered_sample_type & PERF_SAMPLE_RAW) {
7681 data->raw = NULL;
7682 data->dyn_size += sizeof(u64);
7683 data->sample_flags |= PERF_SAMPLE_RAW;
7684 }
7685
7686 if (filtered_sample_type & PERF_SAMPLE_BRANCH_STACK) {
7687 data->br_stack = NULL;
7688 data->dyn_size += sizeof(u64);
7689 data->sample_flags |= PERF_SAMPLE_BRANCH_STACK;
7690 }
7691
7692 if (filtered_sample_type & PERF_SAMPLE_REGS_USER)
7693 perf_sample_regs_user(&data->regs_user, regs);
7694
7695 /*
7696 * It cannot use the filtered_sample_type here as REGS_USER can be set
7697 * by STACK_USER (using __cond_set() above) and we don't want to update
7698 * the dyn_size if it's not requested by users.
7699 */
7700 if ((sample_type & ~data->sample_flags) & PERF_SAMPLE_REGS_USER) {
7701 /* regs dump ABI info */
7702 int size = sizeof(u64);
7703
7704 if (data->regs_user.regs) {
7705 u64 mask = event->attr.sample_regs_user;
7706 size += hweight64(mask) * sizeof(u64);
7707 }
7708
7709 data->dyn_size += size;
7710 data->sample_flags |= PERF_SAMPLE_REGS_USER;
7711 }
7712
7713 if (filtered_sample_type & PERF_SAMPLE_STACK_USER) {
7714 /*
7715 * Either we need PERF_SAMPLE_STACK_USER bit to be always
7716 * processed as the last one or have additional check added
7717 * in case new sample type is added, because we could eat
7718 * up the rest of the sample size.
7719 */
7720 u16 stack_size = event->attr.sample_stack_user;
7721 u16 header_size = perf_sample_data_size(data, event);
7722 u16 size = sizeof(u64);
7723
7724 stack_size = perf_sample_ustack_size(stack_size, header_size,
7725 data->regs_user.regs);
7726
7727 /*
7728 * If there is something to dump, add space for the dump
7729 * itself and for the field that tells the dynamic size,
7730 * which is how many have been actually dumped.
7731 */
7732 if (stack_size)
7733 size += sizeof(u64) + stack_size;
7734
7735 data->stack_user_size = stack_size;
7736 data->dyn_size += size;
7737 data->sample_flags |= PERF_SAMPLE_STACK_USER;
7738 }
7739
7740 if (filtered_sample_type & PERF_SAMPLE_WEIGHT_TYPE) {
7741 data->weight.full = 0;
7742 data->sample_flags |= PERF_SAMPLE_WEIGHT_TYPE;
7743 }
7744
7745 if (filtered_sample_type & PERF_SAMPLE_DATA_SRC) {
7746 data->data_src.val = PERF_MEM_NA;
7747 data->sample_flags |= PERF_SAMPLE_DATA_SRC;
7748 }
7749
7750 if (filtered_sample_type & PERF_SAMPLE_TRANSACTION) {
7751 data->txn = 0;
7752 data->sample_flags |= PERF_SAMPLE_TRANSACTION;
7753 }
7754
7755 if (filtered_sample_type & PERF_SAMPLE_ADDR) {
7756 data->addr = 0;
7757 data->sample_flags |= PERF_SAMPLE_ADDR;
7758 }
7759
7760 if (filtered_sample_type & PERF_SAMPLE_REGS_INTR) {
7761 /* regs dump ABI info */
7762 int size = sizeof(u64);
7763
7764 perf_sample_regs_intr(&data->regs_intr, regs);
7765
7766 if (data->regs_intr.regs) {
7767 u64 mask = event->attr.sample_regs_intr;
7768
7769 size += hweight64(mask) * sizeof(u64);
7770 }
7771
7772 data->dyn_size += size;
7773 data->sample_flags |= PERF_SAMPLE_REGS_INTR;
7774 }
7775
7776 if (filtered_sample_type & PERF_SAMPLE_PHYS_ADDR) {
7777 data->phys_addr = perf_virt_to_phys(data->addr);
7778 data->sample_flags |= PERF_SAMPLE_PHYS_ADDR;
7779 }
7780
7781 #ifdef CONFIG_CGROUP_PERF
7782 if (filtered_sample_type & PERF_SAMPLE_CGROUP) {
7783 struct cgroup *cgrp;
7784
7785 /* protected by RCU */
7786 cgrp = task_css_check(current, perf_event_cgrp_id, 1)->cgroup;
7787 data->cgroup = cgroup_id(cgrp);
7788 data->sample_flags |= PERF_SAMPLE_CGROUP;
7789 }
7790 #endif
7791
7792 /*
7793 * PERF_DATA_PAGE_SIZE requires PERF_SAMPLE_ADDR. If the user doesn't
7794 * require PERF_SAMPLE_ADDR, kernel implicitly retrieve the data->addr,
7795 * but the value will not dump to the userspace.
7796 */
7797 if (filtered_sample_type & PERF_SAMPLE_DATA_PAGE_SIZE) {
7798 data->data_page_size = perf_get_page_size(data->addr);
7799 data->sample_flags |= PERF_SAMPLE_DATA_PAGE_SIZE;
7800 }
7801
7802 if (filtered_sample_type & PERF_SAMPLE_CODE_PAGE_SIZE) {
7803 data->code_page_size = perf_get_page_size(data->ip);
7804 data->sample_flags |= PERF_SAMPLE_CODE_PAGE_SIZE;
7805 }
7806
7807 if (filtered_sample_type & PERF_SAMPLE_AUX) {
7808 u64 size;
7809 u16 header_size = perf_sample_data_size(data, event);
7810
7811 header_size += sizeof(u64); /* size */
7812
7813 /*
7814 * Given the 16bit nature of header::size, an AUX sample can
7815 * easily overflow it, what with all the preceding sample bits.
7816 * Make sure this doesn't happen by using up to U16_MAX bytes
7817 * per sample in total (rounded down to 8 byte boundary).
7818 */
7819 size = min_t(size_t, U16_MAX - header_size,
7820 event->attr.aux_sample_size);
7821 size = rounddown(size, 8);
7822 size = perf_prepare_sample_aux(event, data, size);
7823
7824 WARN_ON_ONCE(size + header_size > U16_MAX);
7825 data->dyn_size += size + sizeof(u64); /* size above */
7826 data->sample_flags |= PERF_SAMPLE_AUX;
7827 }
7828 }
7829
perf_prepare_header(struct perf_event_header * header,struct perf_sample_data * data,struct perf_event * event,struct pt_regs * regs)7830 void perf_prepare_header(struct perf_event_header *header,
7831 struct perf_sample_data *data,
7832 struct perf_event *event,
7833 struct pt_regs *regs)
7834 {
7835 header->type = PERF_RECORD_SAMPLE;
7836 header->size = perf_sample_data_size(data, event);
7837 header->misc = perf_misc_flags(regs);
7838
7839 /*
7840 * If you're adding more sample types here, you likely need to do
7841 * something about the overflowing header::size, like repurpose the
7842 * lowest 3 bits of size, which should be always zero at the moment.
7843 * This raises a more important question, do we really need 512k sized
7844 * samples and why, so good argumentation is in order for whatever you
7845 * do here next.
7846 */
7847 WARN_ON_ONCE(header->size & 7);
7848 }
7849
7850 static __always_inline int
__perf_event_output(struct perf_event * event,struct perf_sample_data * data,struct pt_regs * regs,int (* output_begin)(struct perf_output_handle *,struct perf_sample_data *,struct perf_event *,unsigned int))7851 __perf_event_output(struct perf_event *event,
7852 struct perf_sample_data *data,
7853 struct pt_regs *regs,
7854 int (*output_begin)(struct perf_output_handle *,
7855 struct perf_sample_data *,
7856 struct perf_event *,
7857 unsigned int))
7858 {
7859 struct perf_output_handle handle;
7860 struct perf_event_header header;
7861 int err;
7862
7863 /* protect the callchain buffers */
7864 rcu_read_lock();
7865
7866 perf_prepare_sample(data, event, regs);
7867 perf_prepare_header(&header, data, event, regs);
7868
7869 err = output_begin(&handle, data, event, header.size);
7870 if (err)
7871 goto exit;
7872
7873 perf_output_sample(&handle, &header, data, event);
7874
7875 perf_output_end(&handle);
7876
7877 exit:
7878 rcu_read_unlock();
7879 return err;
7880 }
7881
7882 void
perf_event_output_forward(struct perf_event * event,struct perf_sample_data * data,struct pt_regs * regs)7883 perf_event_output_forward(struct perf_event *event,
7884 struct perf_sample_data *data,
7885 struct pt_regs *regs)
7886 {
7887 __perf_event_output(event, data, regs, perf_output_begin_forward);
7888 }
7889
7890 void
perf_event_output_backward(struct perf_event * event,struct perf_sample_data * data,struct pt_regs * regs)7891 perf_event_output_backward(struct perf_event *event,
7892 struct perf_sample_data *data,
7893 struct pt_regs *regs)
7894 {
7895 __perf_event_output(event, data, regs, perf_output_begin_backward);
7896 }
7897
7898 int
perf_event_output(struct perf_event * event,struct perf_sample_data * data,struct pt_regs * regs)7899 perf_event_output(struct perf_event *event,
7900 struct perf_sample_data *data,
7901 struct pt_regs *regs)
7902 {
7903 return __perf_event_output(event, data, regs, perf_output_begin);
7904 }
7905
7906 /*
7907 * read event_id
7908 */
7909
7910 struct perf_read_event {
7911 struct perf_event_header header;
7912
7913 u32 pid;
7914 u32 tid;
7915 };
7916
7917 static void
perf_event_read_event(struct perf_event * event,struct task_struct * task)7918 perf_event_read_event(struct perf_event *event,
7919 struct task_struct *task)
7920 {
7921 struct perf_output_handle handle;
7922 struct perf_sample_data sample;
7923 struct perf_read_event read_event = {
7924 .header = {
7925 .type = PERF_RECORD_READ,
7926 .misc = 0,
7927 .size = sizeof(read_event) + event->read_size,
7928 },
7929 .pid = perf_event_pid(event, task),
7930 .tid = perf_event_tid(event, task),
7931 };
7932 int ret;
7933
7934 perf_event_header__init_id(&read_event.header, &sample, event);
7935 ret = perf_output_begin(&handle, &sample, event, read_event.header.size);
7936 if (ret)
7937 return;
7938
7939 perf_output_put(&handle, read_event);
7940 perf_output_read(&handle, event);
7941 perf_event__output_id_sample(event, &handle, &sample);
7942
7943 perf_output_end(&handle);
7944 }
7945
7946 typedef void (perf_iterate_f)(struct perf_event *event, void *data);
7947
7948 static void
perf_iterate_ctx(struct perf_event_context * ctx,perf_iterate_f output,void * data,bool all)7949 perf_iterate_ctx(struct perf_event_context *ctx,
7950 perf_iterate_f output,
7951 void *data, bool all)
7952 {
7953 struct perf_event *event;
7954
7955 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
7956 if (!all) {
7957 if (event->state < PERF_EVENT_STATE_INACTIVE)
7958 continue;
7959 if (!event_filter_match(event))
7960 continue;
7961 }
7962
7963 output(event, data);
7964 }
7965 }
7966
perf_iterate_sb_cpu(perf_iterate_f output,void * data)7967 static void perf_iterate_sb_cpu(perf_iterate_f output, void *data)
7968 {
7969 struct pmu_event_list *pel = this_cpu_ptr(&pmu_sb_events);
7970 struct perf_event *event;
7971
7972 list_for_each_entry_rcu(event, &pel->list, sb_list) {
7973 /*
7974 * Skip events that are not fully formed yet; ensure that
7975 * if we observe event->ctx, both event and ctx will be
7976 * complete enough. See perf_install_in_context().
7977 */
7978 if (!smp_load_acquire(&event->ctx))
7979 continue;
7980
7981 if (event->state < PERF_EVENT_STATE_INACTIVE)
7982 continue;
7983 if (!event_filter_match(event))
7984 continue;
7985 output(event, data);
7986 }
7987 }
7988
7989 /*
7990 * Iterate all events that need to receive side-band events.
7991 *
7992 * For new callers; ensure that account_pmu_sb_event() includes
7993 * your event, otherwise it might not get delivered.
7994 */
7995 static void
perf_iterate_sb(perf_iterate_f output,void * data,struct perf_event_context * task_ctx)7996 perf_iterate_sb(perf_iterate_f output, void *data,
7997 struct perf_event_context *task_ctx)
7998 {
7999 struct perf_event_context *ctx;
8000
8001 rcu_read_lock();
8002 preempt_disable();
8003
8004 /*
8005 * If we have task_ctx != NULL we only notify the task context itself.
8006 * The task_ctx is set only for EXIT events before releasing task
8007 * context.
8008 */
8009 if (task_ctx) {
8010 perf_iterate_ctx(task_ctx, output, data, false);
8011 goto done;
8012 }
8013
8014 perf_iterate_sb_cpu(output, data);
8015
8016 ctx = rcu_dereference(current->perf_event_ctxp);
8017 if (ctx)
8018 perf_iterate_ctx(ctx, output, data, false);
8019 done:
8020 preempt_enable();
8021 rcu_read_unlock();
8022 }
8023
8024 /*
8025 * Clear all file-based filters at exec, they'll have to be
8026 * re-instated when/if these objects are mmapped again.
8027 */
perf_event_addr_filters_exec(struct perf_event * event,void * data)8028 static void perf_event_addr_filters_exec(struct perf_event *event, void *data)
8029 {
8030 struct perf_addr_filters_head *ifh = perf_event_addr_filters(event);
8031 struct perf_addr_filter *filter;
8032 unsigned int restart = 0, count = 0;
8033 unsigned long flags;
8034
8035 if (!has_addr_filter(event))
8036 return;
8037
8038 raw_spin_lock_irqsave(&ifh->lock, flags);
8039 list_for_each_entry(filter, &ifh->list, entry) {
8040 if (filter->path.dentry) {
8041 event->addr_filter_ranges[count].start = 0;
8042 event->addr_filter_ranges[count].size = 0;
8043 restart++;
8044 }
8045
8046 count++;
8047 }
8048
8049 if (restart)
8050 event->addr_filters_gen++;
8051 raw_spin_unlock_irqrestore(&ifh->lock, flags);
8052
8053 if (restart)
8054 perf_event_stop(event, 1);
8055 }
8056
perf_event_exec(void)8057 void perf_event_exec(void)
8058 {
8059 struct perf_event_context *ctx;
8060
8061 ctx = perf_pin_task_context(current);
8062 if (!ctx)
8063 return;
8064
8065 perf_event_enable_on_exec(ctx);
8066 perf_event_remove_on_exec(ctx);
8067 perf_iterate_ctx(ctx, perf_event_addr_filters_exec, NULL, true);
8068
8069 perf_unpin_context(ctx);
8070 put_ctx(ctx);
8071 }
8072
8073 struct remote_output {
8074 struct perf_buffer *rb;
8075 int err;
8076 };
8077
__perf_event_output_stop(struct perf_event * event,void * data)8078 static void __perf_event_output_stop(struct perf_event *event, void *data)
8079 {
8080 struct perf_event *parent = event->parent;
8081 struct remote_output *ro = data;
8082 struct perf_buffer *rb = ro->rb;
8083 struct stop_event_data sd = {
8084 .event = event,
8085 };
8086
8087 if (!has_aux(event))
8088 return;
8089
8090 if (!parent)
8091 parent = event;
8092
8093 /*
8094 * In case of inheritance, it will be the parent that links to the
8095 * ring-buffer, but it will be the child that's actually using it.
8096 *
8097 * We are using event::rb to determine if the event should be stopped,
8098 * however this may race with ring_buffer_attach() (through set_output),
8099 * which will make us skip the event that actually needs to be stopped.
8100 * So ring_buffer_attach() has to stop an aux event before re-assigning
8101 * its rb pointer.
8102 */
8103 if (rcu_dereference(parent->rb) == rb)
8104 ro->err = __perf_event_stop(&sd);
8105 }
8106
__perf_pmu_output_stop(void * info)8107 static int __perf_pmu_output_stop(void *info)
8108 {
8109 struct perf_event *event = info;
8110 struct perf_cpu_context *cpuctx = this_cpu_ptr(&perf_cpu_context);
8111 struct remote_output ro = {
8112 .rb = event->rb,
8113 };
8114
8115 rcu_read_lock();
8116 perf_iterate_ctx(&cpuctx->ctx, __perf_event_output_stop, &ro, false);
8117 if (cpuctx->task_ctx)
8118 perf_iterate_ctx(cpuctx->task_ctx, __perf_event_output_stop,
8119 &ro, false);
8120 rcu_read_unlock();
8121
8122 return ro.err;
8123 }
8124
perf_pmu_output_stop(struct perf_event * event)8125 static void perf_pmu_output_stop(struct perf_event *event)
8126 {
8127 struct perf_event *iter;
8128 int err, cpu;
8129
8130 restart:
8131 rcu_read_lock();
8132 list_for_each_entry_rcu(iter, &event->rb->event_list, rb_entry) {
8133 /*
8134 * For per-CPU events, we need to make sure that neither they
8135 * nor their children are running; for cpu==-1 events it's
8136 * sufficient to stop the event itself if it's active, since
8137 * it can't have children.
8138 */
8139 cpu = iter->cpu;
8140 if (cpu == -1)
8141 cpu = READ_ONCE(iter->oncpu);
8142
8143 if (cpu == -1)
8144 continue;
8145
8146 err = cpu_function_call(cpu, __perf_pmu_output_stop, event);
8147 if (err == -EAGAIN) {
8148 rcu_read_unlock();
8149 goto restart;
8150 }
8151 }
8152 rcu_read_unlock();
8153 }
8154
8155 /*
8156 * task tracking -- fork/exit
8157 *
8158 * enabled by: attr.comm | attr.mmap | attr.mmap2 | attr.mmap_data | attr.task
8159 */
8160
8161 struct perf_task_event {
8162 struct task_struct *task;
8163 struct perf_event_context *task_ctx;
8164
8165 struct {
8166 struct perf_event_header header;
8167
8168 u32 pid;
8169 u32 ppid;
8170 u32 tid;
8171 u32 ptid;
8172 u64 time;
8173 } event_id;
8174 };
8175
perf_event_task_match(struct perf_event * event)8176 static int perf_event_task_match(struct perf_event *event)
8177 {
8178 return event->attr.comm || event->attr.mmap ||
8179 event->attr.mmap2 || event->attr.mmap_data ||
8180 event->attr.task;
8181 }
8182
perf_event_task_output(struct perf_event * event,void * data)8183 static void perf_event_task_output(struct perf_event *event,
8184 void *data)
8185 {
8186 struct perf_task_event *task_event = data;
8187 struct perf_output_handle handle;
8188 struct perf_sample_data sample;
8189 struct task_struct *task = task_event->task;
8190 int ret, size = task_event->event_id.header.size;
8191
8192 if (!perf_event_task_match(event))
8193 return;
8194
8195 perf_event_header__init_id(&task_event->event_id.header, &sample, event);
8196
8197 ret = perf_output_begin(&handle, &sample, event,
8198 task_event->event_id.header.size);
8199 if (ret)
8200 goto out;
8201
8202 task_event->event_id.pid = perf_event_pid(event, task);
8203 task_event->event_id.tid = perf_event_tid(event, task);
8204
8205 if (task_event->event_id.header.type == PERF_RECORD_EXIT) {
8206 task_event->event_id.ppid = perf_event_pid(event,
8207 task->real_parent);
8208 task_event->event_id.ptid = perf_event_pid(event,
8209 task->real_parent);
8210 } else { /* PERF_RECORD_FORK */
8211 task_event->event_id.ppid = perf_event_pid(event, current);
8212 task_event->event_id.ptid = perf_event_tid(event, current);
8213 }
8214
8215 task_event->event_id.time = perf_event_clock(event);
8216
8217 perf_output_put(&handle, task_event->event_id);
8218
8219 perf_event__output_id_sample(event, &handle, &sample);
8220
8221 perf_output_end(&handle);
8222 out:
8223 task_event->event_id.header.size = size;
8224 }
8225
perf_event_task(struct task_struct * task,struct perf_event_context * task_ctx,int new)8226 static void perf_event_task(struct task_struct *task,
8227 struct perf_event_context *task_ctx,
8228 int new)
8229 {
8230 struct perf_task_event task_event;
8231
8232 if (!atomic_read(&nr_comm_events) &&
8233 !atomic_read(&nr_mmap_events) &&
8234 !atomic_read(&nr_task_events))
8235 return;
8236
8237 task_event = (struct perf_task_event){
8238 .task = task,
8239 .task_ctx = task_ctx,
8240 .event_id = {
8241 .header = {
8242 .type = new ? PERF_RECORD_FORK : PERF_RECORD_EXIT,
8243 .misc = 0,
8244 .size = sizeof(task_event.event_id),
8245 },
8246 /* .pid */
8247 /* .ppid */
8248 /* .tid */
8249 /* .ptid */
8250 /* .time */
8251 },
8252 };
8253
8254 perf_iterate_sb(perf_event_task_output,
8255 &task_event,
8256 task_ctx);
8257 }
8258
perf_event_fork(struct task_struct * task)8259 void perf_event_fork(struct task_struct *task)
8260 {
8261 perf_event_task(task, NULL, 1);
8262 perf_event_namespaces(task);
8263 }
8264
8265 /*
8266 * comm tracking
8267 */
8268
8269 struct perf_comm_event {
8270 struct task_struct *task;
8271 char *comm;
8272 int comm_size;
8273
8274 struct {
8275 struct perf_event_header header;
8276
8277 u32 pid;
8278 u32 tid;
8279 } event_id;
8280 };
8281
perf_event_comm_match(struct perf_event * event)8282 static int perf_event_comm_match(struct perf_event *event)
8283 {
8284 return event->attr.comm;
8285 }
8286
perf_event_comm_output(struct perf_event * event,void * data)8287 static void perf_event_comm_output(struct perf_event *event,
8288 void *data)
8289 {
8290 struct perf_comm_event *comm_event = data;
8291 struct perf_output_handle handle;
8292 struct perf_sample_data sample;
8293 int size = comm_event->event_id.header.size;
8294 int ret;
8295
8296 if (!perf_event_comm_match(event))
8297 return;
8298
8299 perf_event_header__init_id(&comm_event->event_id.header, &sample, event);
8300 ret = perf_output_begin(&handle, &sample, event,
8301 comm_event->event_id.header.size);
8302
8303 if (ret)
8304 goto out;
8305
8306 comm_event->event_id.pid = perf_event_pid(event, comm_event->task);
8307 comm_event->event_id.tid = perf_event_tid(event, comm_event->task);
8308
8309 perf_output_put(&handle, comm_event->event_id);
8310 __output_copy(&handle, comm_event->comm,
8311 comm_event->comm_size);
8312
8313 perf_event__output_id_sample(event, &handle, &sample);
8314
8315 perf_output_end(&handle);
8316 out:
8317 comm_event->event_id.header.size = size;
8318 }
8319
perf_event_comm_event(struct perf_comm_event * comm_event)8320 static void perf_event_comm_event(struct perf_comm_event *comm_event)
8321 {
8322 char comm[TASK_COMM_LEN];
8323 unsigned int size;
8324
8325 memset(comm, 0, sizeof(comm));
8326 strscpy(comm, comm_event->task->comm, sizeof(comm));
8327 size = ALIGN(strlen(comm)+1, sizeof(u64));
8328
8329 comm_event->comm = comm;
8330 comm_event->comm_size = size;
8331
8332 comm_event->event_id.header.size = sizeof(comm_event->event_id) + size;
8333
8334 perf_iterate_sb(perf_event_comm_output,
8335 comm_event,
8336 NULL);
8337 }
8338
perf_event_comm(struct task_struct * task,bool exec)8339 void perf_event_comm(struct task_struct *task, bool exec)
8340 {
8341 struct perf_comm_event comm_event;
8342
8343 if (!atomic_read(&nr_comm_events))
8344 return;
8345
8346 comm_event = (struct perf_comm_event){
8347 .task = task,
8348 /* .comm */
8349 /* .comm_size */
8350 .event_id = {
8351 .header = {
8352 .type = PERF_RECORD_COMM,
8353 .misc = exec ? PERF_RECORD_MISC_COMM_EXEC : 0,
8354 /* .size */
8355 },
8356 /* .pid */
8357 /* .tid */
8358 },
8359 };
8360
8361 perf_event_comm_event(&comm_event);
8362 }
8363
8364 /*
8365 * namespaces tracking
8366 */
8367
8368 struct perf_namespaces_event {
8369 struct task_struct *task;
8370
8371 struct {
8372 struct perf_event_header header;
8373
8374 u32 pid;
8375 u32 tid;
8376 u64 nr_namespaces;
8377 struct perf_ns_link_info link_info[NR_NAMESPACES];
8378 } event_id;
8379 };
8380
perf_event_namespaces_match(struct perf_event * event)8381 static int perf_event_namespaces_match(struct perf_event *event)
8382 {
8383 return event->attr.namespaces;
8384 }
8385
perf_event_namespaces_output(struct perf_event * event,void * data)8386 static void perf_event_namespaces_output(struct perf_event *event,
8387 void *data)
8388 {
8389 struct perf_namespaces_event *namespaces_event = data;
8390 struct perf_output_handle handle;
8391 struct perf_sample_data sample;
8392 u16 header_size = namespaces_event->event_id.header.size;
8393 int ret;
8394
8395 if (!perf_event_namespaces_match(event))
8396 return;
8397
8398 perf_event_header__init_id(&namespaces_event->event_id.header,
8399 &sample, event);
8400 ret = perf_output_begin(&handle, &sample, event,
8401 namespaces_event->event_id.header.size);
8402 if (ret)
8403 goto out;
8404
8405 namespaces_event->event_id.pid = perf_event_pid(event,
8406 namespaces_event->task);
8407 namespaces_event->event_id.tid = perf_event_tid(event,
8408 namespaces_event->task);
8409
8410 perf_output_put(&handle, namespaces_event->event_id);
8411
8412 perf_event__output_id_sample(event, &handle, &sample);
8413
8414 perf_output_end(&handle);
8415 out:
8416 namespaces_event->event_id.header.size = header_size;
8417 }
8418
perf_fill_ns_link_info(struct perf_ns_link_info * ns_link_info,struct task_struct * task,const struct proc_ns_operations * ns_ops)8419 static void perf_fill_ns_link_info(struct perf_ns_link_info *ns_link_info,
8420 struct task_struct *task,
8421 const struct proc_ns_operations *ns_ops)
8422 {
8423 struct path ns_path;
8424 struct inode *ns_inode;
8425 int error;
8426
8427 error = ns_get_path(&ns_path, task, ns_ops);
8428 if (!error) {
8429 ns_inode = ns_path.dentry->d_inode;
8430 ns_link_info->dev = new_encode_dev(ns_inode->i_sb->s_dev);
8431 ns_link_info->ino = ns_inode->i_ino;
8432 path_put(&ns_path);
8433 }
8434 }
8435
perf_event_namespaces(struct task_struct * task)8436 void perf_event_namespaces(struct task_struct *task)
8437 {
8438 struct perf_namespaces_event namespaces_event;
8439 struct perf_ns_link_info *ns_link_info;
8440
8441 if (!atomic_read(&nr_namespaces_events))
8442 return;
8443
8444 namespaces_event = (struct perf_namespaces_event){
8445 .task = task,
8446 .event_id = {
8447 .header = {
8448 .type = PERF_RECORD_NAMESPACES,
8449 .misc = 0,
8450 .size = sizeof(namespaces_event.event_id),
8451 },
8452 /* .pid */
8453 /* .tid */
8454 .nr_namespaces = NR_NAMESPACES,
8455 /* .link_info[NR_NAMESPACES] */
8456 },
8457 };
8458
8459 ns_link_info = namespaces_event.event_id.link_info;
8460
8461 perf_fill_ns_link_info(&ns_link_info[MNT_NS_INDEX],
8462 task, &mntns_operations);
8463
8464 #ifdef CONFIG_USER_NS
8465 perf_fill_ns_link_info(&ns_link_info[USER_NS_INDEX],
8466 task, &userns_operations);
8467 #endif
8468 #ifdef CONFIG_NET_NS
8469 perf_fill_ns_link_info(&ns_link_info[NET_NS_INDEX],
8470 task, &netns_operations);
8471 #endif
8472 #ifdef CONFIG_UTS_NS
8473 perf_fill_ns_link_info(&ns_link_info[UTS_NS_INDEX],
8474 task, &utsns_operations);
8475 #endif
8476 #ifdef CONFIG_IPC_NS
8477 perf_fill_ns_link_info(&ns_link_info[IPC_NS_INDEX],
8478 task, &ipcns_operations);
8479 #endif
8480 #ifdef CONFIG_PID_NS
8481 perf_fill_ns_link_info(&ns_link_info[PID_NS_INDEX],
8482 task, &pidns_operations);
8483 #endif
8484 #ifdef CONFIG_CGROUPS
8485 perf_fill_ns_link_info(&ns_link_info[CGROUP_NS_INDEX],
8486 task, &cgroupns_operations);
8487 #endif
8488
8489 perf_iterate_sb(perf_event_namespaces_output,
8490 &namespaces_event,
8491 NULL);
8492 }
8493
8494 /*
8495 * cgroup tracking
8496 */
8497 #ifdef CONFIG_CGROUP_PERF
8498
8499 struct perf_cgroup_event {
8500 char *path;
8501 int path_size;
8502 struct {
8503 struct perf_event_header header;
8504 u64 id;
8505 char path[];
8506 } event_id;
8507 };
8508
perf_event_cgroup_match(struct perf_event * event)8509 static int perf_event_cgroup_match(struct perf_event *event)
8510 {
8511 return event->attr.cgroup;
8512 }
8513
perf_event_cgroup_output(struct perf_event * event,void * data)8514 static void perf_event_cgroup_output(struct perf_event *event, void *data)
8515 {
8516 struct perf_cgroup_event *cgroup_event = data;
8517 struct perf_output_handle handle;
8518 struct perf_sample_data sample;
8519 u16 header_size = cgroup_event->event_id.header.size;
8520 int ret;
8521
8522 if (!perf_event_cgroup_match(event))
8523 return;
8524
8525 perf_event_header__init_id(&cgroup_event->event_id.header,
8526 &sample, event);
8527 ret = perf_output_begin(&handle, &sample, event,
8528 cgroup_event->event_id.header.size);
8529 if (ret)
8530 goto out;
8531
8532 perf_output_put(&handle, cgroup_event->event_id);
8533 __output_copy(&handle, cgroup_event->path, cgroup_event->path_size);
8534
8535 perf_event__output_id_sample(event, &handle, &sample);
8536
8537 perf_output_end(&handle);
8538 out:
8539 cgroup_event->event_id.header.size = header_size;
8540 }
8541
perf_event_cgroup(struct cgroup * cgrp)8542 static void perf_event_cgroup(struct cgroup *cgrp)
8543 {
8544 struct perf_cgroup_event cgroup_event;
8545 char path_enomem[16] = "//enomem";
8546 char *pathname;
8547 size_t size;
8548
8549 if (!atomic_read(&nr_cgroup_events))
8550 return;
8551
8552 cgroup_event = (struct perf_cgroup_event){
8553 .event_id = {
8554 .header = {
8555 .type = PERF_RECORD_CGROUP,
8556 .misc = 0,
8557 .size = sizeof(cgroup_event.event_id),
8558 },
8559 .id = cgroup_id(cgrp),
8560 },
8561 };
8562
8563 pathname = kmalloc(PATH_MAX, GFP_KERNEL);
8564 if (pathname == NULL) {
8565 cgroup_event.path = path_enomem;
8566 } else {
8567 /* just to be sure to have enough space for alignment */
8568 cgroup_path(cgrp, pathname, PATH_MAX - sizeof(u64));
8569 cgroup_event.path = pathname;
8570 }
8571
8572 /*
8573 * Since our buffer works in 8 byte units we need to align our string
8574 * size to a multiple of 8. However, we must guarantee the tail end is
8575 * zero'd out to avoid leaking random bits to userspace.
8576 */
8577 size = strlen(cgroup_event.path) + 1;
8578 while (!IS_ALIGNED(size, sizeof(u64)))
8579 cgroup_event.path[size++] = '\0';
8580
8581 cgroup_event.event_id.header.size += size;
8582 cgroup_event.path_size = size;
8583
8584 perf_iterate_sb(perf_event_cgroup_output,
8585 &cgroup_event,
8586 NULL);
8587
8588 kfree(pathname);
8589 }
8590
8591 #endif
8592
8593 /*
8594 * mmap tracking
8595 */
8596
8597 struct perf_mmap_event {
8598 struct vm_area_struct *vma;
8599
8600 const char *file_name;
8601 int file_size;
8602 int maj, min;
8603 u64 ino;
8604 u64 ino_generation;
8605 u32 prot, flags;
8606 u8 build_id[BUILD_ID_SIZE_MAX];
8607 u32 build_id_size;
8608
8609 struct {
8610 struct perf_event_header header;
8611
8612 u32 pid;
8613 u32 tid;
8614 u64 start;
8615 u64 len;
8616 u64 pgoff;
8617 } event_id;
8618 };
8619
perf_event_mmap_match(struct perf_event * event,void * data)8620 static int perf_event_mmap_match(struct perf_event *event,
8621 void *data)
8622 {
8623 struct perf_mmap_event *mmap_event = data;
8624 struct vm_area_struct *vma = mmap_event->vma;
8625 int executable = vma->vm_flags & VM_EXEC;
8626
8627 return (!executable && event->attr.mmap_data) ||
8628 (executable && (event->attr.mmap || event->attr.mmap2));
8629 }
8630
perf_event_mmap_output(struct perf_event * event,void * data)8631 static void perf_event_mmap_output(struct perf_event *event,
8632 void *data)
8633 {
8634 struct perf_mmap_event *mmap_event = data;
8635 struct perf_output_handle handle;
8636 struct perf_sample_data sample;
8637 int size = mmap_event->event_id.header.size;
8638 u32 type = mmap_event->event_id.header.type;
8639 bool use_build_id;
8640 int ret;
8641
8642 if (!perf_event_mmap_match(event, data))
8643 return;
8644
8645 if (event->attr.mmap2) {
8646 mmap_event->event_id.header.type = PERF_RECORD_MMAP2;
8647 mmap_event->event_id.header.size += sizeof(mmap_event->maj);
8648 mmap_event->event_id.header.size += sizeof(mmap_event->min);
8649 mmap_event->event_id.header.size += sizeof(mmap_event->ino);
8650 mmap_event->event_id.header.size += sizeof(mmap_event->ino_generation);
8651 mmap_event->event_id.header.size += sizeof(mmap_event->prot);
8652 mmap_event->event_id.header.size += sizeof(mmap_event->flags);
8653 }
8654
8655 perf_event_header__init_id(&mmap_event->event_id.header, &sample, event);
8656 ret = perf_output_begin(&handle, &sample, event,
8657 mmap_event->event_id.header.size);
8658 if (ret)
8659 goto out;
8660
8661 mmap_event->event_id.pid = perf_event_pid(event, current);
8662 mmap_event->event_id.tid = perf_event_tid(event, current);
8663
8664 use_build_id = event->attr.build_id && mmap_event->build_id_size;
8665
8666 if (event->attr.mmap2 && use_build_id)
8667 mmap_event->event_id.header.misc |= PERF_RECORD_MISC_MMAP_BUILD_ID;
8668
8669 perf_output_put(&handle, mmap_event->event_id);
8670
8671 if (event->attr.mmap2) {
8672 if (use_build_id) {
8673 u8 size[4] = { (u8) mmap_event->build_id_size, 0, 0, 0 };
8674
8675 __output_copy(&handle, size, 4);
8676 __output_copy(&handle, mmap_event->build_id, BUILD_ID_SIZE_MAX);
8677 } else {
8678 perf_output_put(&handle, mmap_event->maj);
8679 perf_output_put(&handle, mmap_event->min);
8680 perf_output_put(&handle, mmap_event->ino);
8681 perf_output_put(&handle, mmap_event->ino_generation);
8682 }
8683 perf_output_put(&handle, mmap_event->prot);
8684 perf_output_put(&handle, mmap_event->flags);
8685 }
8686
8687 __output_copy(&handle, mmap_event->file_name,
8688 mmap_event->file_size);
8689
8690 perf_event__output_id_sample(event, &handle, &sample);
8691
8692 perf_output_end(&handle);
8693 out:
8694 mmap_event->event_id.header.size = size;
8695 mmap_event->event_id.header.type = type;
8696 }
8697
perf_event_mmap_event(struct perf_mmap_event * mmap_event)8698 static void perf_event_mmap_event(struct perf_mmap_event *mmap_event)
8699 {
8700 struct vm_area_struct *vma = mmap_event->vma;
8701 struct file *file = vma->vm_file;
8702 int maj = 0, min = 0;
8703 u64 ino = 0, gen = 0;
8704 u32 prot = 0, flags = 0;
8705 unsigned int size;
8706 char tmp[16];
8707 char *buf = NULL;
8708 char *name = NULL;
8709
8710 if (vma->vm_flags & VM_READ)
8711 prot |= PROT_READ;
8712 if (vma->vm_flags & VM_WRITE)
8713 prot |= PROT_WRITE;
8714 if (vma->vm_flags & VM_EXEC)
8715 prot |= PROT_EXEC;
8716
8717 if (vma->vm_flags & VM_MAYSHARE)
8718 flags = MAP_SHARED;
8719 else
8720 flags = MAP_PRIVATE;
8721
8722 if (vma->vm_flags & VM_LOCKED)
8723 flags |= MAP_LOCKED;
8724 if (is_vm_hugetlb_page(vma))
8725 flags |= MAP_HUGETLB;
8726
8727 if (file) {
8728 struct inode *inode;
8729 dev_t dev;
8730
8731 buf = kmalloc(PATH_MAX, GFP_KERNEL);
8732 if (!buf) {
8733 name = "//enomem";
8734 goto cpy_name;
8735 }
8736 /*
8737 * d_path() works from the end of the rb backwards, so we
8738 * need to add enough zero bytes after the string to handle
8739 * the 64bit alignment we do later.
8740 */
8741 name = file_path(file, buf, PATH_MAX - sizeof(u64));
8742 if (IS_ERR(name)) {
8743 name = "//toolong";
8744 goto cpy_name;
8745 }
8746 inode = file_inode(vma->vm_file);
8747 dev = inode->i_sb->s_dev;
8748 ino = inode->i_ino;
8749 gen = inode->i_generation;
8750 maj = MAJOR(dev);
8751 min = MINOR(dev);
8752
8753 goto got_name;
8754 } else {
8755 if (vma->vm_ops && vma->vm_ops->name)
8756 name = (char *) vma->vm_ops->name(vma);
8757 if (!name)
8758 name = (char *)arch_vma_name(vma);
8759 if (!name) {
8760 if (vma_is_initial_heap(vma))
8761 name = "[heap]";
8762 else if (vma_is_initial_stack(vma))
8763 name = "[stack]";
8764 else
8765 name = "//anon";
8766 }
8767 }
8768
8769 cpy_name:
8770 strscpy(tmp, name, sizeof(tmp));
8771 name = tmp;
8772 got_name:
8773 /*
8774 * Since our buffer works in 8 byte units we need to align our string
8775 * size to a multiple of 8. However, we must guarantee the tail end is
8776 * zero'd out to avoid leaking random bits to userspace.
8777 */
8778 size = strlen(name)+1;
8779 while (!IS_ALIGNED(size, sizeof(u64)))
8780 name[size++] = '\0';
8781
8782 mmap_event->file_name = name;
8783 mmap_event->file_size = size;
8784 mmap_event->maj = maj;
8785 mmap_event->min = min;
8786 mmap_event->ino = ino;
8787 mmap_event->ino_generation = gen;
8788 mmap_event->prot = prot;
8789 mmap_event->flags = flags;
8790
8791 if (!(vma->vm_flags & VM_EXEC))
8792 mmap_event->event_id.header.misc |= PERF_RECORD_MISC_MMAP_DATA;
8793
8794 mmap_event->event_id.header.size = sizeof(mmap_event->event_id) + size;
8795
8796 if (atomic_read(&nr_build_id_events))
8797 build_id_parse(vma, mmap_event->build_id, &mmap_event->build_id_size);
8798
8799 perf_iterate_sb(perf_event_mmap_output,
8800 mmap_event,
8801 NULL);
8802
8803 kfree(buf);
8804 }
8805
8806 /*
8807 * Check whether inode and address range match filter criteria.
8808 */
perf_addr_filter_match(struct perf_addr_filter * filter,struct file * file,unsigned long offset,unsigned long size)8809 static bool perf_addr_filter_match(struct perf_addr_filter *filter,
8810 struct file *file, unsigned long offset,
8811 unsigned long size)
8812 {
8813 /* d_inode(NULL) won't be equal to any mapped user-space file */
8814 if (!filter->path.dentry)
8815 return false;
8816
8817 if (d_inode(filter->path.dentry) != file_inode(file))
8818 return false;
8819
8820 if (filter->offset > offset + size)
8821 return false;
8822
8823 if (filter->offset + filter->size < offset)
8824 return false;
8825
8826 return true;
8827 }
8828
perf_addr_filter_vma_adjust(struct perf_addr_filter * filter,struct vm_area_struct * vma,struct perf_addr_filter_range * fr)8829 static bool perf_addr_filter_vma_adjust(struct perf_addr_filter *filter,
8830 struct vm_area_struct *vma,
8831 struct perf_addr_filter_range *fr)
8832 {
8833 unsigned long vma_size = vma->vm_end - vma->vm_start;
8834 unsigned long off = vma->vm_pgoff << PAGE_SHIFT;
8835 struct file *file = vma->vm_file;
8836
8837 if (!perf_addr_filter_match(filter, file, off, vma_size))
8838 return false;
8839
8840 if (filter->offset < off) {
8841 fr->start = vma->vm_start;
8842 fr->size = min(vma_size, filter->size - (off - filter->offset));
8843 } else {
8844 fr->start = vma->vm_start + filter->offset - off;
8845 fr->size = min(vma->vm_end - fr->start, filter->size);
8846 }
8847
8848 return true;
8849 }
8850
__perf_addr_filters_adjust(struct perf_event * event,void * data)8851 static void __perf_addr_filters_adjust(struct perf_event *event, void *data)
8852 {
8853 struct perf_addr_filters_head *ifh = perf_event_addr_filters(event);
8854 struct vm_area_struct *vma = data;
8855 struct perf_addr_filter *filter;
8856 unsigned int restart = 0, count = 0;
8857 unsigned long flags;
8858
8859 if (!has_addr_filter(event))
8860 return;
8861
8862 if (!vma->vm_file)
8863 return;
8864
8865 raw_spin_lock_irqsave(&ifh->lock, flags);
8866 list_for_each_entry(filter, &ifh->list, entry) {
8867 if (perf_addr_filter_vma_adjust(filter, vma,
8868 &event->addr_filter_ranges[count]))
8869 restart++;
8870
8871 count++;
8872 }
8873
8874 if (restart)
8875 event->addr_filters_gen++;
8876 raw_spin_unlock_irqrestore(&ifh->lock, flags);
8877
8878 if (restart)
8879 perf_event_stop(event, 1);
8880 }
8881
8882 /*
8883 * Adjust all task's events' filters to the new vma
8884 */
perf_addr_filters_adjust(struct vm_area_struct * vma)8885 static void perf_addr_filters_adjust(struct vm_area_struct *vma)
8886 {
8887 struct perf_event_context *ctx;
8888
8889 /*
8890 * Data tracing isn't supported yet and as such there is no need
8891 * to keep track of anything that isn't related to executable code:
8892 */
8893 if (!(vma->vm_flags & VM_EXEC))
8894 return;
8895
8896 rcu_read_lock();
8897 ctx = rcu_dereference(current->perf_event_ctxp);
8898 if (ctx)
8899 perf_iterate_ctx(ctx, __perf_addr_filters_adjust, vma, true);
8900 rcu_read_unlock();
8901 }
8902
perf_event_mmap(struct vm_area_struct * vma)8903 void perf_event_mmap(struct vm_area_struct *vma)
8904 {
8905 struct perf_mmap_event mmap_event;
8906
8907 if (!atomic_read(&nr_mmap_events))
8908 return;
8909
8910 mmap_event = (struct perf_mmap_event){
8911 .vma = vma,
8912 /* .file_name */
8913 /* .file_size */
8914 .event_id = {
8915 .header = {
8916 .type = PERF_RECORD_MMAP,
8917 .misc = PERF_RECORD_MISC_USER,
8918 /* .size */
8919 },
8920 /* .pid */
8921 /* .tid */
8922 .start = vma->vm_start,
8923 .len = vma->vm_end - vma->vm_start,
8924 .pgoff = (u64)vma->vm_pgoff << PAGE_SHIFT,
8925 },
8926 /* .maj (attr_mmap2 only) */
8927 /* .min (attr_mmap2 only) */
8928 /* .ino (attr_mmap2 only) */
8929 /* .ino_generation (attr_mmap2 only) */
8930 /* .prot (attr_mmap2 only) */
8931 /* .flags (attr_mmap2 only) */
8932 };
8933
8934 perf_addr_filters_adjust(vma);
8935 perf_event_mmap_event(&mmap_event);
8936 }
8937
perf_event_aux_event(struct perf_event * event,unsigned long head,unsigned long size,u64 flags)8938 void perf_event_aux_event(struct perf_event *event, unsigned long head,
8939 unsigned long size, u64 flags)
8940 {
8941 struct perf_output_handle handle;
8942 struct perf_sample_data sample;
8943 struct perf_aux_event {
8944 struct perf_event_header header;
8945 u64 offset;
8946 u64 size;
8947 u64 flags;
8948 } rec = {
8949 .header = {
8950 .type = PERF_RECORD_AUX,
8951 .misc = 0,
8952 .size = sizeof(rec),
8953 },
8954 .offset = head,
8955 .size = size,
8956 .flags = flags,
8957 };
8958 int ret;
8959
8960 perf_event_header__init_id(&rec.header, &sample, event);
8961 ret = perf_output_begin(&handle, &sample, event, rec.header.size);
8962
8963 if (ret)
8964 return;
8965
8966 perf_output_put(&handle, rec);
8967 perf_event__output_id_sample(event, &handle, &sample);
8968
8969 perf_output_end(&handle);
8970 }
8971
8972 /*
8973 * Lost/dropped samples logging
8974 */
perf_log_lost_samples(struct perf_event * event,u64 lost)8975 void perf_log_lost_samples(struct perf_event *event, u64 lost)
8976 {
8977 struct perf_output_handle handle;
8978 struct perf_sample_data sample;
8979 int ret;
8980
8981 struct {
8982 struct perf_event_header header;
8983 u64 lost;
8984 } lost_samples_event = {
8985 .header = {
8986 .type = PERF_RECORD_LOST_SAMPLES,
8987 .misc = 0,
8988 .size = sizeof(lost_samples_event),
8989 },
8990 .lost = lost,
8991 };
8992
8993 perf_event_header__init_id(&lost_samples_event.header, &sample, event);
8994
8995 ret = perf_output_begin(&handle, &sample, event,
8996 lost_samples_event.header.size);
8997 if (ret)
8998 return;
8999
9000 perf_output_put(&handle, lost_samples_event);
9001 perf_event__output_id_sample(event, &handle, &sample);
9002 perf_output_end(&handle);
9003 }
9004
9005 /*
9006 * context_switch tracking
9007 */
9008
9009 struct perf_switch_event {
9010 struct task_struct *task;
9011 struct task_struct *next_prev;
9012
9013 struct {
9014 struct perf_event_header header;
9015 u32 next_prev_pid;
9016 u32 next_prev_tid;
9017 } event_id;
9018 };
9019
perf_event_switch_match(struct perf_event * event)9020 static int perf_event_switch_match(struct perf_event *event)
9021 {
9022 return event->attr.context_switch;
9023 }
9024
perf_event_switch_output(struct perf_event * event,void * data)9025 static void perf_event_switch_output(struct perf_event *event, void *data)
9026 {
9027 struct perf_switch_event *se = data;
9028 struct perf_output_handle handle;
9029 struct perf_sample_data sample;
9030 int ret;
9031
9032 if (!perf_event_switch_match(event))
9033 return;
9034
9035 /* Only CPU-wide events are allowed to see next/prev pid/tid */
9036 if (event->ctx->task) {
9037 se->event_id.header.type = PERF_RECORD_SWITCH;
9038 se->event_id.header.size = sizeof(se->event_id.header);
9039 } else {
9040 se->event_id.header.type = PERF_RECORD_SWITCH_CPU_WIDE;
9041 se->event_id.header.size = sizeof(se->event_id);
9042 se->event_id.next_prev_pid =
9043 perf_event_pid(event, se->next_prev);
9044 se->event_id.next_prev_tid =
9045 perf_event_tid(event, se->next_prev);
9046 }
9047
9048 perf_event_header__init_id(&se->event_id.header, &sample, event);
9049
9050 ret = perf_output_begin(&handle, &sample, event, se->event_id.header.size);
9051 if (ret)
9052 return;
9053
9054 if (event->ctx->task)
9055 perf_output_put(&handle, se->event_id.header);
9056 else
9057 perf_output_put(&handle, se->event_id);
9058
9059 perf_event__output_id_sample(event, &handle, &sample);
9060
9061 perf_output_end(&handle);
9062 }
9063
perf_event_switch(struct task_struct * task,struct task_struct * next_prev,bool sched_in)9064 static void perf_event_switch(struct task_struct *task,
9065 struct task_struct *next_prev, bool sched_in)
9066 {
9067 struct perf_switch_event switch_event;
9068
9069 /* N.B. caller checks nr_switch_events != 0 */
9070
9071 switch_event = (struct perf_switch_event){
9072 .task = task,
9073 .next_prev = next_prev,
9074 .event_id = {
9075 .header = {
9076 /* .type */
9077 .misc = sched_in ? 0 : PERF_RECORD_MISC_SWITCH_OUT,
9078 /* .size */
9079 },
9080 /* .next_prev_pid */
9081 /* .next_prev_tid */
9082 },
9083 };
9084
9085 if (!sched_in && task->on_rq) {
9086 switch_event.event_id.header.misc |=
9087 PERF_RECORD_MISC_SWITCH_OUT_PREEMPT;
9088 }
9089
9090 perf_iterate_sb(perf_event_switch_output, &switch_event, NULL);
9091 }
9092
9093 /*
9094 * IRQ throttle logging
9095 */
9096
perf_log_throttle(struct perf_event * event,int enable)9097 static void perf_log_throttle(struct perf_event *event, int enable)
9098 {
9099 struct perf_output_handle handle;
9100 struct perf_sample_data sample;
9101 int ret;
9102
9103 struct {
9104 struct perf_event_header header;
9105 u64 time;
9106 u64 id;
9107 u64 stream_id;
9108 } throttle_event = {
9109 .header = {
9110 .type = PERF_RECORD_THROTTLE,
9111 .misc = 0,
9112 .size = sizeof(throttle_event),
9113 },
9114 .time = perf_event_clock(event),
9115 .id = primary_event_id(event),
9116 .stream_id = event->id,
9117 };
9118
9119 if (enable)
9120 throttle_event.header.type = PERF_RECORD_UNTHROTTLE;
9121
9122 perf_event_header__init_id(&throttle_event.header, &sample, event);
9123
9124 ret = perf_output_begin(&handle, &sample, event,
9125 throttle_event.header.size);
9126 if (ret)
9127 return;
9128
9129 perf_output_put(&handle, throttle_event);
9130 perf_event__output_id_sample(event, &handle, &sample);
9131 perf_output_end(&handle);
9132 }
9133
9134 /*
9135 * ksymbol register/unregister tracking
9136 */
9137
9138 struct perf_ksymbol_event {
9139 const char *name;
9140 int name_len;
9141 struct {
9142 struct perf_event_header header;
9143 u64 addr;
9144 u32 len;
9145 u16 ksym_type;
9146 u16 flags;
9147 } event_id;
9148 };
9149
perf_event_ksymbol_match(struct perf_event * event)9150 static int perf_event_ksymbol_match(struct perf_event *event)
9151 {
9152 return event->attr.ksymbol;
9153 }
9154
perf_event_ksymbol_output(struct perf_event * event,void * data)9155 static void perf_event_ksymbol_output(struct perf_event *event, void *data)
9156 {
9157 struct perf_ksymbol_event *ksymbol_event = data;
9158 struct perf_output_handle handle;
9159 struct perf_sample_data sample;
9160 int ret;
9161
9162 if (!perf_event_ksymbol_match(event))
9163 return;
9164
9165 perf_event_header__init_id(&ksymbol_event->event_id.header,
9166 &sample, event);
9167 ret = perf_output_begin(&handle, &sample, event,
9168 ksymbol_event->event_id.header.size);
9169 if (ret)
9170 return;
9171
9172 perf_output_put(&handle, ksymbol_event->event_id);
9173 __output_copy(&handle, ksymbol_event->name, ksymbol_event->name_len);
9174 perf_event__output_id_sample(event, &handle, &sample);
9175
9176 perf_output_end(&handle);
9177 }
9178
perf_event_ksymbol(u16 ksym_type,u64 addr,u32 len,bool unregister,const char * sym)9179 void perf_event_ksymbol(u16 ksym_type, u64 addr, u32 len, bool unregister,
9180 const char *sym)
9181 {
9182 struct perf_ksymbol_event ksymbol_event;
9183 char name[KSYM_NAME_LEN];
9184 u16 flags = 0;
9185 int name_len;
9186
9187 if (!atomic_read(&nr_ksymbol_events))
9188 return;
9189
9190 if (ksym_type >= PERF_RECORD_KSYMBOL_TYPE_MAX ||
9191 ksym_type == PERF_RECORD_KSYMBOL_TYPE_UNKNOWN)
9192 goto err;
9193
9194 strscpy(name, sym, KSYM_NAME_LEN);
9195 name_len = strlen(name) + 1;
9196 while (!IS_ALIGNED(name_len, sizeof(u64)))
9197 name[name_len++] = '\0';
9198 BUILD_BUG_ON(KSYM_NAME_LEN % sizeof(u64));
9199
9200 if (unregister)
9201 flags |= PERF_RECORD_KSYMBOL_FLAGS_UNREGISTER;
9202
9203 ksymbol_event = (struct perf_ksymbol_event){
9204 .name = name,
9205 .name_len = name_len,
9206 .event_id = {
9207 .header = {
9208 .type = PERF_RECORD_KSYMBOL,
9209 .size = sizeof(ksymbol_event.event_id) +
9210 name_len,
9211 },
9212 .addr = addr,
9213 .len = len,
9214 .ksym_type = ksym_type,
9215 .flags = flags,
9216 },
9217 };
9218
9219 perf_iterate_sb(perf_event_ksymbol_output, &ksymbol_event, NULL);
9220 return;
9221 err:
9222 WARN_ONCE(1, "%s: Invalid KSYMBOL type 0x%x\n", __func__, ksym_type);
9223 }
9224
9225 /*
9226 * bpf program load/unload tracking
9227 */
9228
9229 struct perf_bpf_event {
9230 struct bpf_prog *prog;
9231 struct {
9232 struct perf_event_header header;
9233 u16 type;
9234 u16 flags;
9235 u32 id;
9236 u8 tag[BPF_TAG_SIZE];
9237 } event_id;
9238 };
9239
perf_event_bpf_match(struct perf_event * event)9240 static int perf_event_bpf_match(struct perf_event *event)
9241 {
9242 return event->attr.bpf_event;
9243 }
9244
perf_event_bpf_output(struct perf_event * event,void * data)9245 static void perf_event_bpf_output(struct perf_event *event, void *data)
9246 {
9247 struct perf_bpf_event *bpf_event = data;
9248 struct perf_output_handle handle;
9249 struct perf_sample_data sample;
9250 int ret;
9251
9252 if (!perf_event_bpf_match(event))
9253 return;
9254
9255 perf_event_header__init_id(&bpf_event->event_id.header,
9256 &sample, event);
9257 ret = perf_output_begin(&handle, &sample, event,
9258 bpf_event->event_id.header.size);
9259 if (ret)
9260 return;
9261
9262 perf_output_put(&handle, bpf_event->event_id);
9263 perf_event__output_id_sample(event, &handle, &sample);
9264
9265 perf_output_end(&handle);
9266 }
9267
perf_event_bpf_emit_ksymbols(struct bpf_prog * prog,enum perf_bpf_event_type type)9268 static void perf_event_bpf_emit_ksymbols(struct bpf_prog *prog,
9269 enum perf_bpf_event_type type)
9270 {
9271 bool unregister = type == PERF_BPF_EVENT_PROG_UNLOAD;
9272 int i;
9273
9274 if (prog->aux->func_cnt == 0) {
9275 perf_event_ksymbol(PERF_RECORD_KSYMBOL_TYPE_BPF,
9276 (u64)(unsigned long)prog->bpf_func,
9277 prog->jited_len, unregister,
9278 prog->aux->ksym.name);
9279 } else {
9280 for (i = 0; i < prog->aux->func_cnt; i++) {
9281 struct bpf_prog *subprog = prog->aux->func[i];
9282
9283 perf_event_ksymbol(
9284 PERF_RECORD_KSYMBOL_TYPE_BPF,
9285 (u64)(unsigned long)subprog->bpf_func,
9286 subprog->jited_len, unregister,
9287 subprog->aux->ksym.name);
9288 }
9289 }
9290 }
9291
perf_event_bpf_event(struct bpf_prog * prog,enum perf_bpf_event_type type,u16 flags)9292 void perf_event_bpf_event(struct bpf_prog *prog,
9293 enum perf_bpf_event_type type,
9294 u16 flags)
9295 {
9296 struct perf_bpf_event bpf_event;
9297
9298 if (type <= PERF_BPF_EVENT_UNKNOWN ||
9299 type >= PERF_BPF_EVENT_MAX)
9300 return;
9301
9302 switch (type) {
9303 case PERF_BPF_EVENT_PROG_LOAD:
9304 case PERF_BPF_EVENT_PROG_UNLOAD:
9305 if (atomic_read(&nr_ksymbol_events))
9306 perf_event_bpf_emit_ksymbols(prog, type);
9307 break;
9308 default:
9309 break;
9310 }
9311
9312 if (!atomic_read(&nr_bpf_events))
9313 return;
9314
9315 bpf_event = (struct perf_bpf_event){
9316 .prog = prog,
9317 .event_id = {
9318 .header = {
9319 .type = PERF_RECORD_BPF_EVENT,
9320 .size = sizeof(bpf_event.event_id),
9321 },
9322 .type = type,
9323 .flags = flags,
9324 .id = prog->aux->id,
9325 },
9326 };
9327
9328 BUILD_BUG_ON(BPF_TAG_SIZE % sizeof(u64));
9329
9330 memcpy(bpf_event.event_id.tag, prog->tag, BPF_TAG_SIZE);
9331 perf_iterate_sb(perf_event_bpf_output, &bpf_event, NULL);
9332 }
9333
9334 struct perf_text_poke_event {
9335 const void *old_bytes;
9336 const void *new_bytes;
9337 size_t pad;
9338 u16 old_len;
9339 u16 new_len;
9340
9341 struct {
9342 struct perf_event_header header;
9343
9344 u64 addr;
9345 } event_id;
9346 };
9347
perf_event_text_poke_match(struct perf_event * event)9348 static int perf_event_text_poke_match(struct perf_event *event)
9349 {
9350 return event->attr.text_poke;
9351 }
9352
perf_event_text_poke_output(struct perf_event * event,void * data)9353 static void perf_event_text_poke_output(struct perf_event *event, void *data)
9354 {
9355 struct perf_text_poke_event *text_poke_event = data;
9356 struct perf_output_handle handle;
9357 struct perf_sample_data sample;
9358 u64 padding = 0;
9359 int ret;
9360
9361 if (!perf_event_text_poke_match(event))
9362 return;
9363
9364 perf_event_header__init_id(&text_poke_event->event_id.header, &sample, event);
9365
9366 ret = perf_output_begin(&handle, &sample, event,
9367 text_poke_event->event_id.header.size);
9368 if (ret)
9369 return;
9370
9371 perf_output_put(&handle, text_poke_event->event_id);
9372 perf_output_put(&handle, text_poke_event->old_len);
9373 perf_output_put(&handle, text_poke_event->new_len);
9374
9375 __output_copy(&handle, text_poke_event->old_bytes, text_poke_event->old_len);
9376 __output_copy(&handle, text_poke_event->new_bytes, text_poke_event->new_len);
9377
9378 if (text_poke_event->pad)
9379 __output_copy(&handle, &padding, text_poke_event->pad);
9380
9381 perf_event__output_id_sample(event, &handle, &sample);
9382
9383 perf_output_end(&handle);
9384 }
9385
perf_event_text_poke(const void * addr,const void * old_bytes,size_t old_len,const void * new_bytes,size_t new_len)9386 void perf_event_text_poke(const void *addr, const void *old_bytes,
9387 size_t old_len, const void *new_bytes, size_t new_len)
9388 {
9389 struct perf_text_poke_event text_poke_event;
9390 size_t tot, pad;
9391
9392 if (!atomic_read(&nr_text_poke_events))
9393 return;
9394
9395 tot = sizeof(text_poke_event.old_len) + old_len;
9396 tot += sizeof(text_poke_event.new_len) + new_len;
9397 pad = ALIGN(tot, sizeof(u64)) - tot;
9398
9399 text_poke_event = (struct perf_text_poke_event){
9400 .old_bytes = old_bytes,
9401 .new_bytes = new_bytes,
9402 .pad = pad,
9403 .old_len = old_len,
9404 .new_len = new_len,
9405 .event_id = {
9406 .header = {
9407 .type = PERF_RECORD_TEXT_POKE,
9408 .misc = PERF_RECORD_MISC_KERNEL,
9409 .size = sizeof(text_poke_event.event_id) + tot + pad,
9410 },
9411 .addr = (unsigned long)addr,
9412 },
9413 };
9414
9415 perf_iterate_sb(perf_event_text_poke_output, &text_poke_event, NULL);
9416 }
9417
perf_event_itrace_started(struct perf_event * event)9418 void perf_event_itrace_started(struct perf_event *event)
9419 {
9420 event->attach_state |= PERF_ATTACH_ITRACE;
9421 }
9422
perf_log_itrace_start(struct perf_event * event)9423 static void perf_log_itrace_start(struct perf_event *event)
9424 {
9425 struct perf_output_handle handle;
9426 struct perf_sample_data sample;
9427 struct perf_aux_event {
9428 struct perf_event_header header;
9429 u32 pid;
9430 u32 tid;
9431 } rec;
9432 int ret;
9433
9434 if (event->parent)
9435 event = event->parent;
9436
9437 if (!(event->pmu->capabilities & PERF_PMU_CAP_ITRACE) ||
9438 event->attach_state & PERF_ATTACH_ITRACE)
9439 return;
9440
9441 rec.header.type = PERF_RECORD_ITRACE_START;
9442 rec.header.misc = 0;
9443 rec.header.size = sizeof(rec);
9444 rec.pid = perf_event_pid(event, current);
9445 rec.tid = perf_event_tid(event, current);
9446
9447 perf_event_header__init_id(&rec.header, &sample, event);
9448 ret = perf_output_begin(&handle, &sample, event, rec.header.size);
9449
9450 if (ret)
9451 return;
9452
9453 perf_output_put(&handle, rec);
9454 perf_event__output_id_sample(event, &handle, &sample);
9455
9456 perf_output_end(&handle);
9457 }
9458
perf_report_aux_output_id(struct perf_event * event,u64 hw_id)9459 void perf_report_aux_output_id(struct perf_event *event, u64 hw_id)
9460 {
9461 struct perf_output_handle handle;
9462 struct perf_sample_data sample;
9463 struct perf_aux_event {
9464 struct perf_event_header header;
9465 u64 hw_id;
9466 } rec;
9467 int ret;
9468
9469 if (event->parent)
9470 event = event->parent;
9471
9472 rec.header.type = PERF_RECORD_AUX_OUTPUT_HW_ID;
9473 rec.header.misc = 0;
9474 rec.header.size = sizeof(rec);
9475 rec.hw_id = hw_id;
9476
9477 perf_event_header__init_id(&rec.header, &sample, event);
9478 ret = perf_output_begin(&handle, &sample, event, rec.header.size);
9479
9480 if (ret)
9481 return;
9482
9483 perf_output_put(&handle, rec);
9484 perf_event__output_id_sample(event, &handle, &sample);
9485
9486 perf_output_end(&handle);
9487 }
9488 EXPORT_SYMBOL_GPL(perf_report_aux_output_id);
9489
9490 static int
__perf_event_account_interrupt(struct perf_event * event,int throttle)9491 __perf_event_account_interrupt(struct perf_event *event, int throttle)
9492 {
9493 struct hw_perf_event *hwc = &event->hw;
9494 int ret = 0;
9495 u64 seq;
9496
9497 seq = __this_cpu_read(perf_throttled_seq);
9498 if (seq != hwc->interrupts_seq) {
9499 hwc->interrupts_seq = seq;
9500 hwc->interrupts = 1;
9501 } else {
9502 hwc->interrupts++;
9503 if (unlikely(throttle &&
9504 hwc->interrupts > max_samples_per_tick)) {
9505 __this_cpu_inc(perf_throttled_count);
9506 tick_dep_set_cpu(smp_processor_id(), TICK_DEP_BIT_PERF_EVENTS);
9507 hwc->interrupts = MAX_INTERRUPTS;
9508 perf_log_throttle(event, 0);
9509 ret = 1;
9510 }
9511 }
9512
9513 if (event->attr.freq) {
9514 u64 now = perf_clock();
9515 s64 delta = now - hwc->freq_time_stamp;
9516
9517 hwc->freq_time_stamp = now;
9518
9519 if (delta > 0 && delta < 2*TICK_NSEC)
9520 perf_adjust_period(event, delta, hwc->last_period, true);
9521 }
9522
9523 return ret;
9524 }
9525
perf_event_account_interrupt(struct perf_event * event)9526 int perf_event_account_interrupt(struct perf_event *event)
9527 {
9528 return __perf_event_account_interrupt(event, 1);
9529 }
9530
sample_is_allowed(struct perf_event * event,struct pt_regs * regs)9531 static inline bool sample_is_allowed(struct perf_event *event, struct pt_regs *regs)
9532 {
9533 /*
9534 * Due to interrupt latency (AKA "skid"), we may enter the
9535 * kernel before taking an overflow, even if the PMU is only
9536 * counting user events.
9537 */
9538 if (event->attr.exclude_kernel && !user_mode(regs))
9539 return false;
9540
9541 return true;
9542 }
9543
9544 /*
9545 * Generic event overflow handling, sampling.
9546 */
9547
__perf_event_overflow(struct perf_event * event,int throttle,struct perf_sample_data * data,struct pt_regs * regs)9548 static int __perf_event_overflow(struct perf_event *event,
9549 int throttle, struct perf_sample_data *data,
9550 struct pt_regs *regs)
9551 {
9552 int events = atomic_read(&event->event_limit);
9553 int ret = 0;
9554
9555 /*
9556 * Non-sampling counters might still use the PMI to fold short
9557 * hardware counters, ignore those.
9558 */
9559 if (unlikely(!is_sampling_event(event)))
9560 return 0;
9561
9562 ret = __perf_event_account_interrupt(event, throttle);
9563
9564 /*
9565 * XXX event_limit might not quite work as expected on inherited
9566 * events
9567 */
9568
9569 event->pending_kill = POLL_IN;
9570 if (events && atomic_dec_and_test(&event->event_limit)) {
9571 ret = 1;
9572 event->pending_kill = POLL_HUP;
9573 perf_event_disable_inatomic(event);
9574 }
9575
9576 if (event->attr.sigtrap) {
9577 /*
9578 * The desired behaviour of sigtrap vs invalid samples is a bit
9579 * tricky; on the one hand, one should not loose the SIGTRAP if
9580 * it is the first event, on the other hand, we should also not
9581 * trigger the WARN or override the data address.
9582 */
9583 bool valid_sample = sample_is_allowed(event, regs);
9584 unsigned int pending_id = 1;
9585
9586 if (regs)
9587 pending_id = hash32_ptr((void *)instruction_pointer(regs)) ?: 1;
9588 if (!event->pending_sigtrap) {
9589 event->pending_sigtrap = pending_id;
9590 local_inc(&event->ctx->nr_pending);
9591 } else if (event->attr.exclude_kernel && valid_sample) {
9592 /*
9593 * Should not be able to return to user space without
9594 * consuming pending_sigtrap; with exceptions:
9595 *
9596 * 1. Where !exclude_kernel, events can overflow again
9597 * in the kernel without returning to user space.
9598 *
9599 * 2. Events that can overflow again before the IRQ-
9600 * work without user space progress (e.g. hrtimer).
9601 * To approximate progress (with false negatives),
9602 * check 32-bit hash of the current IP.
9603 */
9604 WARN_ON_ONCE(event->pending_sigtrap != pending_id);
9605 }
9606
9607 event->pending_addr = 0;
9608 if (valid_sample && (data->sample_flags & PERF_SAMPLE_ADDR))
9609 event->pending_addr = data->addr;
9610 irq_work_queue(&event->pending_irq);
9611 }
9612
9613 READ_ONCE(event->overflow_handler)(event, data, regs);
9614
9615 if (*perf_event_fasync(event) && event->pending_kill) {
9616 event->pending_wakeup = 1;
9617 irq_work_queue(&event->pending_irq);
9618 }
9619
9620 return ret;
9621 }
9622
perf_event_overflow(struct perf_event * event,struct perf_sample_data * data,struct pt_regs * regs)9623 int perf_event_overflow(struct perf_event *event,
9624 struct perf_sample_data *data,
9625 struct pt_regs *regs)
9626 {
9627 return __perf_event_overflow(event, 1, data, regs);
9628 }
9629
9630 /*
9631 * Generic software event infrastructure
9632 */
9633
9634 struct swevent_htable {
9635 struct swevent_hlist *swevent_hlist;
9636 struct mutex hlist_mutex;
9637 int hlist_refcount;
9638
9639 /* Recursion avoidance in each contexts */
9640 int recursion[PERF_NR_CONTEXTS];
9641 };
9642
9643 static DEFINE_PER_CPU(struct swevent_htable, swevent_htable);
9644
9645 /*
9646 * We directly increment event->count and keep a second value in
9647 * event->hw.period_left to count intervals. This period event
9648 * is kept in the range [-sample_period, 0] so that we can use the
9649 * sign as trigger.
9650 */
9651
perf_swevent_set_period(struct perf_event * event)9652 u64 perf_swevent_set_period(struct perf_event *event)
9653 {
9654 struct hw_perf_event *hwc = &event->hw;
9655 u64 period = hwc->last_period;
9656 u64 nr, offset;
9657 s64 old, val;
9658
9659 hwc->last_period = hwc->sample_period;
9660
9661 old = local64_read(&hwc->period_left);
9662 do {
9663 val = old;
9664 if (val < 0)
9665 return 0;
9666
9667 nr = div64_u64(period + val, period);
9668 offset = nr * period;
9669 val -= offset;
9670 } while (!local64_try_cmpxchg(&hwc->period_left, &old, val));
9671
9672 return nr;
9673 }
9674
perf_swevent_overflow(struct perf_event * event,u64 overflow,struct perf_sample_data * data,struct pt_regs * regs)9675 static void perf_swevent_overflow(struct perf_event *event, u64 overflow,
9676 struct perf_sample_data *data,
9677 struct pt_regs *regs)
9678 {
9679 struct hw_perf_event *hwc = &event->hw;
9680 int throttle = 0;
9681
9682 if (!overflow)
9683 overflow = perf_swevent_set_period(event);
9684
9685 if (hwc->interrupts == MAX_INTERRUPTS)
9686 return;
9687
9688 for (; overflow; overflow--) {
9689 if (__perf_event_overflow(event, throttle,
9690 data, regs)) {
9691 /*
9692 * We inhibit the overflow from happening when
9693 * hwc->interrupts == MAX_INTERRUPTS.
9694 */
9695 break;
9696 }
9697 throttle = 1;
9698 }
9699 }
9700
perf_swevent_event(struct perf_event * event,u64 nr,struct perf_sample_data * data,struct pt_regs * regs)9701 static void perf_swevent_event(struct perf_event *event, u64 nr,
9702 struct perf_sample_data *data,
9703 struct pt_regs *regs)
9704 {
9705 struct hw_perf_event *hwc = &event->hw;
9706
9707 local64_add(nr, &event->count);
9708
9709 if (!regs)
9710 return;
9711
9712 if (!is_sampling_event(event))
9713 return;
9714
9715 if ((event->attr.sample_type & PERF_SAMPLE_PERIOD) && !event->attr.freq) {
9716 data->period = nr;
9717 return perf_swevent_overflow(event, 1, data, regs);
9718 } else
9719 data->period = event->hw.last_period;
9720
9721 if (nr == 1 && hwc->sample_period == 1 && !event->attr.freq)
9722 return perf_swevent_overflow(event, 1, data, regs);
9723
9724 if (local64_add_negative(nr, &hwc->period_left))
9725 return;
9726
9727 perf_swevent_overflow(event, 0, data, regs);
9728 }
9729
perf_exclude_event(struct perf_event * event,struct pt_regs * regs)9730 static int perf_exclude_event(struct perf_event *event,
9731 struct pt_regs *regs)
9732 {
9733 if (event->hw.state & PERF_HES_STOPPED)
9734 return 1;
9735
9736 if (regs) {
9737 if (event->attr.exclude_user && user_mode(regs))
9738 return 1;
9739
9740 if (event->attr.exclude_kernel && !user_mode(regs))
9741 return 1;
9742 }
9743
9744 return 0;
9745 }
9746
perf_swevent_match(struct perf_event * event,enum perf_type_id type,u32 event_id,struct perf_sample_data * data,struct pt_regs * regs)9747 static int perf_swevent_match(struct perf_event *event,
9748 enum perf_type_id type,
9749 u32 event_id,
9750 struct perf_sample_data *data,
9751 struct pt_regs *regs)
9752 {
9753 if (event->attr.type != type)
9754 return 0;
9755
9756 if (event->attr.config != event_id)
9757 return 0;
9758
9759 if (perf_exclude_event(event, regs))
9760 return 0;
9761
9762 return 1;
9763 }
9764
swevent_hash(u64 type,u32 event_id)9765 static inline u64 swevent_hash(u64 type, u32 event_id)
9766 {
9767 u64 val = event_id | (type << 32);
9768
9769 return hash_64(val, SWEVENT_HLIST_BITS);
9770 }
9771
9772 static inline struct hlist_head *
__find_swevent_head(struct swevent_hlist * hlist,u64 type,u32 event_id)9773 __find_swevent_head(struct swevent_hlist *hlist, u64 type, u32 event_id)
9774 {
9775 u64 hash = swevent_hash(type, event_id);
9776
9777 return &hlist->heads[hash];
9778 }
9779
9780 /* For the read side: events when they trigger */
9781 static inline struct hlist_head *
find_swevent_head_rcu(struct swevent_htable * swhash,u64 type,u32 event_id)9782 find_swevent_head_rcu(struct swevent_htable *swhash, u64 type, u32 event_id)
9783 {
9784 struct swevent_hlist *hlist;
9785
9786 hlist = rcu_dereference(swhash->swevent_hlist);
9787 if (!hlist)
9788 return NULL;
9789
9790 return __find_swevent_head(hlist, type, event_id);
9791 }
9792
9793 /* For the event head insertion and removal in the hlist */
9794 static inline struct hlist_head *
find_swevent_head(struct swevent_htable * swhash,struct perf_event * event)9795 find_swevent_head(struct swevent_htable *swhash, struct perf_event *event)
9796 {
9797 struct swevent_hlist *hlist;
9798 u32 event_id = event->attr.config;
9799 u64 type = event->attr.type;
9800
9801 /*
9802 * Event scheduling is always serialized against hlist allocation
9803 * and release. Which makes the protected version suitable here.
9804 * The context lock guarantees that.
9805 */
9806 hlist = rcu_dereference_protected(swhash->swevent_hlist,
9807 lockdep_is_held(&event->ctx->lock));
9808 if (!hlist)
9809 return NULL;
9810
9811 return __find_swevent_head(hlist, type, event_id);
9812 }
9813
do_perf_sw_event(enum perf_type_id type,u32 event_id,u64 nr,struct perf_sample_data * data,struct pt_regs * regs)9814 static void do_perf_sw_event(enum perf_type_id type, u32 event_id,
9815 u64 nr,
9816 struct perf_sample_data *data,
9817 struct pt_regs *regs)
9818 {
9819 struct swevent_htable *swhash = this_cpu_ptr(&swevent_htable);
9820 struct perf_event *event;
9821 struct hlist_head *head;
9822
9823 rcu_read_lock();
9824 head = find_swevent_head_rcu(swhash, type, event_id);
9825 if (!head)
9826 goto end;
9827
9828 hlist_for_each_entry_rcu(event, head, hlist_entry) {
9829 if (perf_swevent_match(event, type, event_id, data, regs))
9830 perf_swevent_event(event, nr, data, regs);
9831 }
9832 end:
9833 rcu_read_unlock();
9834 }
9835
9836 DEFINE_PER_CPU(struct pt_regs, __perf_regs[4]);
9837
perf_swevent_get_recursion_context(void)9838 int perf_swevent_get_recursion_context(void)
9839 {
9840 struct swevent_htable *swhash = this_cpu_ptr(&swevent_htable);
9841
9842 return get_recursion_context(swhash->recursion);
9843 }
9844 EXPORT_SYMBOL_GPL(perf_swevent_get_recursion_context);
9845
perf_swevent_put_recursion_context(int rctx)9846 void perf_swevent_put_recursion_context(int rctx)
9847 {
9848 struct swevent_htable *swhash = this_cpu_ptr(&swevent_htable);
9849
9850 put_recursion_context(swhash->recursion, rctx);
9851 }
9852
___perf_sw_event(u32 event_id,u64 nr,struct pt_regs * regs,u64 addr)9853 void ___perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr)
9854 {
9855 struct perf_sample_data data;
9856
9857 if (WARN_ON_ONCE(!regs))
9858 return;
9859
9860 perf_sample_data_init(&data, addr, 0);
9861 do_perf_sw_event(PERF_TYPE_SOFTWARE, event_id, nr, &data, regs);
9862 }
9863
__perf_sw_event(u32 event_id,u64 nr,struct pt_regs * regs,u64 addr)9864 void __perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr)
9865 {
9866 int rctx;
9867
9868 preempt_disable_notrace();
9869 rctx = perf_swevent_get_recursion_context();
9870 if (unlikely(rctx < 0))
9871 goto fail;
9872
9873 ___perf_sw_event(event_id, nr, regs, addr);
9874
9875 perf_swevent_put_recursion_context(rctx);
9876 fail:
9877 preempt_enable_notrace();
9878 }
9879
perf_swevent_read(struct perf_event * event)9880 static void perf_swevent_read(struct perf_event *event)
9881 {
9882 }
9883
perf_swevent_add(struct perf_event * event,int flags)9884 static int perf_swevent_add(struct perf_event *event, int flags)
9885 {
9886 struct swevent_htable *swhash = this_cpu_ptr(&swevent_htable);
9887 struct hw_perf_event *hwc = &event->hw;
9888 struct hlist_head *head;
9889
9890 if (is_sampling_event(event)) {
9891 hwc->last_period = hwc->sample_period;
9892 perf_swevent_set_period(event);
9893 }
9894
9895 hwc->state = !(flags & PERF_EF_START);
9896
9897 head = find_swevent_head(swhash, event);
9898 if (WARN_ON_ONCE(!head))
9899 return -EINVAL;
9900
9901 hlist_add_head_rcu(&event->hlist_entry, head);
9902 perf_event_update_userpage(event);
9903
9904 return 0;
9905 }
9906
perf_swevent_del(struct perf_event * event,int flags)9907 static void perf_swevent_del(struct perf_event *event, int flags)
9908 {
9909 hlist_del_rcu(&event->hlist_entry);
9910 }
9911
perf_swevent_start(struct perf_event * event,int flags)9912 static void perf_swevent_start(struct perf_event *event, int flags)
9913 {
9914 event->hw.state = 0;
9915 }
9916
perf_swevent_stop(struct perf_event * event,int flags)9917 static void perf_swevent_stop(struct perf_event *event, int flags)
9918 {
9919 event->hw.state = PERF_HES_STOPPED;
9920 }
9921
9922 /* Deref the hlist from the update side */
9923 static inline struct swevent_hlist *
swevent_hlist_deref(struct swevent_htable * swhash)9924 swevent_hlist_deref(struct swevent_htable *swhash)
9925 {
9926 return rcu_dereference_protected(swhash->swevent_hlist,
9927 lockdep_is_held(&swhash->hlist_mutex));
9928 }
9929
swevent_hlist_release(struct swevent_htable * swhash)9930 static void swevent_hlist_release(struct swevent_htable *swhash)
9931 {
9932 struct swevent_hlist *hlist = swevent_hlist_deref(swhash);
9933
9934 if (!hlist)
9935 return;
9936
9937 RCU_INIT_POINTER(swhash->swevent_hlist, NULL);
9938 kfree_rcu(hlist, rcu_head);
9939 }
9940
swevent_hlist_put_cpu(int cpu)9941 static void swevent_hlist_put_cpu(int cpu)
9942 {
9943 struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
9944
9945 mutex_lock(&swhash->hlist_mutex);
9946
9947 if (!--swhash->hlist_refcount)
9948 swevent_hlist_release(swhash);
9949
9950 mutex_unlock(&swhash->hlist_mutex);
9951 }
9952
swevent_hlist_put(void)9953 static void swevent_hlist_put(void)
9954 {
9955 int cpu;
9956
9957 for_each_possible_cpu(cpu)
9958 swevent_hlist_put_cpu(cpu);
9959 }
9960
swevent_hlist_get_cpu(int cpu)9961 static int swevent_hlist_get_cpu(int cpu)
9962 {
9963 struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
9964 int err = 0;
9965
9966 mutex_lock(&swhash->hlist_mutex);
9967 if (!swevent_hlist_deref(swhash) &&
9968 cpumask_test_cpu(cpu, perf_online_mask)) {
9969 struct swevent_hlist *hlist;
9970
9971 hlist = kzalloc(sizeof(*hlist), GFP_KERNEL);
9972 if (!hlist) {
9973 err = -ENOMEM;
9974 goto exit;
9975 }
9976 rcu_assign_pointer(swhash->swevent_hlist, hlist);
9977 }
9978 swhash->hlist_refcount++;
9979 exit:
9980 mutex_unlock(&swhash->hlist_mutex);
9981
9982 return err;
9983 }
9984
swevent_hlist_get(void)9985 static int swevent_hlist_get(void)
9986 {
9987 int err, cpu, failed_cpu;
9988
9989 mutex_lock(&pmus_lock);
9990 for_each_possible_cpu(cpu) {
9991 err = swevent_hlist_get_cpu(cpu);
9992 if (err) {
9993 failed_cpu = cpu;
9994 goto fail;
9995 }
9996 }
9997 mutex_unlock(&pmus_lock);
9998 return 0;
9999 fail:
10000 for_each_possible_cpu(cpu) {
10001 if (cpu == failed_cpu)
10002 break;
10003 swevent_hlist_put_cpu(cpu);
10004 }
10005 mutex_unlock(&pmus_lock);
10006 return err;
10007 }
10008
10009 struct static_key perf_swevent_enabled[PERF_COUNT_SW_MAX];
10010
sw_perf_event_destroy(struct perf_event * event)10011 static void sw_perf_event_destroy(struct perf_event *event)
10012 {
10013 u64 event_id = event->attr.config;
10014
10015 WARN_ON(event->parent);
10016
10017 static_key_slow_dec(&perf_swevent_enabled[event_id]);
10018 swevent_hlist_put();
10019 }
10020
10021 static struct pmu perf_cpu_clock; /* fwd declaration */
10022 static struct pmu perf_task_clock;
10023
perf_swevent_init(struct perf_event * event)10024 static int perf_swevent_init(struct perf_event *event)
10025 {
10026 u64 event_id = event->attr.config;
10027
10028 if (event->attr.type != PERF_TYPE_SOFTWARE)
10029 return -ENOENT;
10030
10031 /*
10032 * no branch sampling for software events
10033 */
10034 if (has_branch_stack(event))
10035 return -EOPNOTSUPP;
10036
10037 switch (event_id) {
10038 case PERF_COUNT_SW_CPU_CLOCK:
10039 event->attr.type = perf_cpu_clock.type;
10040 return -ENOENT;
10041 case PERF_COUNT_SW_TASK_CLOCK:
10042 event->attr.type = perf_task_clock.type;
10043 return -ENOENT;
10044
10045 default:
10046 break;
10047 }
10048
10049 if (event_id >= PERF_COUNT_SW_MAX)
10050 return -ENOENT;
10051
10052 if (!event->parent) {
10053 int err;
10054
10055 err = swevent_hlist_get();
10056 if (err)
10057 return err;
10058
10059 static_key_slow_inc(&perf_swevent_enabled[event_id]);
10060 event->destroy = sw_perf_event_destroy;
10061 }
10062
10063 return 0;
10064 }
10065
10066 static struct pmu perf_swevent = {
10067 .task_ctx_nr = perf_sw_context,
10068
10069 .capabilities = PERF_PMU_CAP_NO_NMI,
10070
10071 .event_init = perf_swevent_init,
10072 .add = perf_swevent_add,
10073 .del = perf_swevent_del,
10074 .start = perf_swevent_start,
10075 .stop = perf_swevent_stop,
10076 .read = perf_swevent_read,
10077 };
10078
10079 #ifdef CONFIG_EVENT_TRACING
10080
tp_perf_event_destroy(struct perf_event * event)10081 static void tp_perf_event_destroy(struct perf_event *event)
10082 {
10083 perf_trace_destroy(event);
10084 }
10085
perf_tp_event_init(struct perf_event * event)10086 static int perf_tp_event_init(struct perf_event *event)
10087 {
10088 int err;
10089
10090 if (event->attr.type != PERF_TYPE_TRACEPOINT)
10091 return -ENOENT;
10092
10093 /*
10094 * no branch sampling for tracepoint events
10095 */
10096 if (has_branch_stack(event))
10097 return -EOPNOTSUPP;
10098
10099 err = perf_trace_init(event);
10100 if (err)
10101 return err;
10102
10103 event->destroy = tp_perf_event_destroy;
10104
10105 return 0;
10106 }
10107
10108 static struct pmu perf_tracepoint = {
10109 .task_ctx_nr = perf_sw_context,
10110
10111 .event_init = perf_tp_event_init,
10112 .add = perf_trace_add,
10113 .del = perf_trace_del,
10114 .start = perf_swevent_start,
10115 .stop = perf_swevent_stop,
10116 .read = perf_swevent_read,
10117 };
10118
perf_tp_filter_match(struct perf_event * event,struct perf_sample_data * data)10119 static int perf_tp_filter_match(struct perf_event *event,
10120 struct perf_sample_data *data)
10121 {
10122 void *record = data->raw->frag.data;
10123
10124 /* only top level events have filters set */
10125 if (event->parent)
10126 event = event->parent;
10127
10128 if (likely(!event->filter) || filter_match_preds(event->filter, record))
10129 return 1;
10130 return 0;
10131 }
10132
perf_tp_event_match(struct perf_event * event,struct perf_sample_data * data,struct pt_regs * regs)10133 static int perf_tp_event_match(struct perf_event *event,
10134 struct perf_sample_data *data,
10135 struct pt_regs *regs)
10136 {
10137 if (event->hw.state & PERF_HES_STOPPED)
10138 return 0;
10139 /*
10140 * If exclude_kernel, only trace user-space tracepoints (uprobes)
10141 */
10142 if (event->attr.exclude_kernel && !user_mode(regs))
10143 return 0;
10144
10145 if (!perf_tp_filter_match(event, data))
10146 return 0;
10147
10148 return 1;
10149 }
10150
perf_trace_run_bpf_submit(void * raw_data,int size,int rctx,struct trace_event_call * call,u64 count,struct pt_regs * regs,struct hlist_head * head,struct task_struct * task)10151 void perf_trace_run_bpf_submit(void *raw_data, int size, int rctx,
10152 struct trace_event_call *call, u64 count,
10153 struct pt_regs *regs, struct hlist_head *head,
10154 struct task_struct *task)
10155 {
10156 if (bpf_prog_array_valid(call)) {
10157 *(struct pt_regs **)raw_data = regs;
10158 if (!trace_call_bpf(call, raw_data) || hlist_empty(head)) {
10159 perf_swevent_put_recursion_context(rctx);
10160 return;
10161 }
10162 }
10163 perf_tp_event(call->event.type, count, raw_data, size, regs, head,
10164 rctx, task);
10165 }
10166 EXPORT_SYMBOL_GPL(perf_trace_run_bpf_submit);
10167
__perf_tp_event_target_task(u64 count,void * record,struct pt_regs * regs,struct perf_sample_data * data,struct perf_event * event)10168 static void __perf_tp_event_target_task(u64 count, void *record,
10169 struct pt_regs *regs,
10170 struct perf_sample_data *data,
10171 struct perf_event *event)
10172 {
10173 struct trace_entry *entry = record;
10174
10175 if (event->attr.config != entry->type)
10176 return;
10177 /* Cannot deliver synchronous signal to other task. */
10178 if (event->attr.sigtrap)
10179 return;
10180 if (perf_tp_event_match(event, data, regs))
10181 perf_swevent_event(event, count, data, regs);
10182 }
10183
perf_tp_event_target_task(u64 count,void * record,struct pt_regs * regs,struct perf_sample_data * data,struct perf_event_context * ctx)10184 static void perf_tp_event_target_task(u64 count, void *record,
10185 struct pt_regs *regs,
10186 struct perf_sample_data *data,
10187 struct perf_event_context *ctx)
10188 {
10189 unsigned int cpu = smp_processor_id();
10190 struct pmu *pmu = &perf_tracepoint;
10191 struct perf_event *event, *sibling;
10192
10193 perf_event_groups_for_cpu_pmu(event, &ctx->pinned_groups, cpu, pmu) {
10194 __perf_tp_event_target_task(count, record, regs, data, event);
10195 for_each_sibling_event(sibling, event)
10196 __perf_tp_event_target_task(count, record, regs, data, sibling);
10197 }
10198
10199 perf_event_groups_for_cpu_pmu(event, &ctx->flexible_groups, cpu, pmu) {
10200 __perf_tp_event_target_task(count, record, regs, data, event);
10201 for_each_sibling_event(sibling, event)
10202 __perf_tp_event_target_task(count, record, regs, data, sibling);
10203 }
10204 }
10205
perf_tp_event(u16 event_type,u64 count,void * record,int entry_size,struct pt_regs * regs,struct hlist_head * head,int rctx,struct task_struct * task)10206 void perf_tp_event(u16 event_type, u64 count, void *record, int entry_size,
10207 struct pt_regs *regs, struct hlist_head *head, int rctx,
10208 struct task_struct *task)
10209 {
10210 struct perf_sample_data data;
10211 struct perf_event *event;
10212
10213 struct perf_raw_record raw = {
10214 .frag = {
10215 .size = entry_size,
10216 .data = record,
10217 },
10218 };
10219
10220 perf_sample_data_init(&data, 0, 0);
10221 perf_sample_save_raw_data(&data, &raw);
10222
10223 perf_trace_buf_update(record, event_type);
10224
10225 hlist_for_each_entry_rcu(event, head, hlist_entry) {
10226 if (perf_tp_event_match(event, &data, regs)) {
10227 perf_swevent_event(event, count, &data, regs);
10228
10229 /*
10230 * Here use the same on-stack perf_sample_data,
10231 * some members in data are event-specific and
10232 * need to be re-computed for different sweveents.
10233 * Re-initialize data->sample_flags safely to avoid
10234 * the problem that next event skips preparing data
10235 * because data->sample_flags is set.
10236 */
10237 perf_sample_data_init(&data, 0, 0);
10238 perf_sample_save_raw_data(&data, &raw);
10239 }
10240 }
10241
10242 /*
10243 * If we got specified a target task, also iterate its context and
10244 * deliver this event there too.
10245 */
10246 if (task && task != current) {
10247 struct perf_event_context *ctx;
10248
10249 rcu_read_lock();
10250 ctx = rcu_dereference(task->perf_event_ctxp);
10251 if (!ctx)
10252 goto unlock;
10253
10254 raw_spin_lock(&ctx->lock);
10255 perf_tp_event_target_task(count, record, regs, &data, ctx);
10256 raw_spin_unlock(&ctx->lock);
10257 unlock:
10258 rcu_read_unlock();
10259 }
10260
10261 perf_swevent_put_recursion_context(rctx);
10262 }
10263 EXPORT_SYMBOL_GPL(perf_tp_event);
10264
10265 #if defined(CONFIG_KPROBE_EVENTS) || defined(CONFIG_UPROBE_EVENTS)
10266 /*
10267 * Flags in config, used by dynamic PMU kprobe and uprobe
10268 * The flags should match following PMU_FORMAT_ATTR().
10269 *
10270 * PERF_PROBE_CONFIG_IS_RETPROBE if set, create kretprobe/uretprobe
10271 * if not set, create kprobe/uprobe
10272 *
10273 * The following values specify a reference counter (or semaphore in the
10274 * terminology of tools like dtrace, systemtap, etc.) Userspace Statically
10275 * Defined Tracepoints (USDT). Currently, we use 40 bit for the offset.
10276 *
10277 * PERF_UPROBE_REF_CTR_OFFSET_BITS # of bits in config as th offset
10278 * PERF_UPROBE_REF_CTR_OFFSET_SHIFT # of bits to shift left
10279 */
10280 enum perf_probe_config {
10281 PERF_PROBE_CONFIG_IS_RETPROBE = 1U << 0, /* [k,u]retprobe */
10282 PERF_UPROBE_REF_CTR_OFFSET_BITS = 32,
10283 PERF_UPROBE_REF_CTR_OFFSET_SHIFT = 64 - PERF_UPROBE_REF_CTR_OFFSET_BITS,
10284 };
10285
10286 PMU_FORMAT_ATTR(retprobe, "config:0");
10287 #endif
10288
10289 #ifdef CONFIG_KPROBE_EVENTS
10290 static struct attribute *kprobe_attrs[] = {
10291 &format_attr_retprobe.attr,
10292 NULL,
10293 };
10294
10295 static struct attribute_group kprobe_format_group = {
10296 .name = "format",
10297 .attrs = kprobe_attrs,
10298 };
10299
10300 static const struct attribute_group *kprobe_attr_groups[] = {
10301 &kprobe_format_group,
10302 NULL,
10303 };
10304
10305 static int perf_kprobe_event_init(struct perf_event *event);
10306 static struct pmu perf_kprobe = {
10307 .task_ctx_nr = perf_sw_context,
10308 .event_init = perf_kprobe_event_init,
10309 .add = perf_trace_add,
10310 .del = perf_trace_del,
10311 .start = perf_swevent_start,
10312 .stop = perf_swevent_stop,
10313 .read = perf_swevent_read,
10314 .attr_groups = kprobe_attr_groups,
10315 };
10316
perf_kprobe_event_init(struct perf_event * event)10317 static int perf_kprobe_event_init(struct perf_event *event)
10318 {
10319 int err;
10320 bool is_retprobe;
10321
10322 if (event->attr.type != perf_kprobe.type)
10323 return -ENOENT;
10324
10325 if (!perfmon_capable())
10326 return -EACCES;
10327
10328 /*
10329 * no branch sampling for probe events
10330 */
10331 if (has_branch_stack(event))
10332 return -EOPNOTSUPP;
10333
10334 is_retprobe = event->attr.config & PERF_PROBE_CONFIG_IS_RETPROBE;
10335 err = perf_kprobe_init(event, is_retprobe);
10336 if (err)
10337 return err;
10338
10339 event->destroy = perf_kprobe_destroy;
10340
10341 return 0;
10342 }
10343 #endif /* CONFIG_KPROBE_EVENTS */
10344
10345 #ifdef CONFIG_UPROBE_EVENTS
10346 PMU_FORMAT_ATTR(ref_ctr_offset, "config:32-63");
10347
10348 static struct attribute *uprobe_attrs[] = {
10349 &format_attr_retprobe.attr,
10350 &format_attr_ref_ctr_offset.attr,
10351 NULL,
10352 };
10353
10354 static struct attribute_group uprobe_format_group = {
10355 .name = "format",
10356 .attrs = uprobe_attrs,
10357 };
10358
10359 static const struct attribute_group *uprobe_attr_groups[] = {
10360 &uprobe_format_group,
10361 NULL,
10362 };
10363
10364 static int perf_uprobe_event_init(struct perf_event *event);
10365 static struct pmu perf_uprobe = {
10366 .task_ctx_nr = perf_sw_context,
10367 .event_init = perf_uprobe_event_init,
10368 .add = perf_trace_add,
10369 .del = perf_trace_del,
10370 .start = perf_swevent_start,
10371 .stop = perf_swevent_stop,
10372 .read = perf_swevent_read,
10373 .attr_groups = uprobe_attr_groups,
10374 };
10375
perf_uprobe_event_init(struct perf_event * event)10376 static int perf_uprobe_event_init(struct perf_event *event)
10377 {
10378 int err;
10379 unsigned long ref_ctr_offset;
10380 bool is_retprobe;
10381
10382 if (event->attr.type != perf_uprobe.type)
10383 return -ENOENT;
10384
10385 if (!perfmon_capable())
10386 return -EACCES;
10387
10388 /*
10389 * no branch sampling for probe events
10390 */
10391 if (has_branch_stack(event))
10392 return -EOPNOTSUPP;
10393
10394 is_retprobe = event->attr.config & PERF_PROBE_CONFIG_IS_RETPROBE;
10395 ref_ctr_offset = event->attr.config >> PERF_UPROBE_REF_CTR_OFFSET_SHIFT;
10396 err = perf_uprobe_init(event, ref_ctr_offset, is_retprobe);
10397 if (err)
10398 return err;
10399
10400 event->destroy = perf_uprobe_destroy;
10401
10402 return 0;
10403 }
10404 #endif /* CONFIG_UPROBE_EVENTS */
10405
perf_tp_register(void)10406 static inline void perf_tp_register(void)
10407 {
10408 perf_pmu_register(&perf_tracepoint, "tracepoint", PERF_TYPE_TRACEPOINT);
10409 #ifdef CONFIG_KPROBE_EVENTS
10410 perf_pmu_register(&perf_kprobe, "kprobe", -1);
10411 #endif
10412 #ifdef CONFIG_UPROBE_EVENTS
10413 perf_pmu_register(&perf_uprobe, "uprobe", -1);
10414 #endif
10415 }
10416
perf_event_free_filter(struct perf_event * event)10417 static void perf_event_free_filter(struct perf_event *event)
10418 {
10419 ftrace_profile_free_filter(event);
10420 }
10421
10422 #ifdef CONFIG_BPF_SYSCALL
bpf_overflow_handler(struct perf_event * event,struct perf_sample_data * data,struct pt_regs * regs)10423 static void bpf_overflow_handler(struct perf_event *event,
10424 struct perf_sample_data *data,
10425 struct pt_regs *regs)
10426 {
10427 struct bpf_perf_event_data_kern ctx = {
10428 .data = data,
10429 .event = event,
10430 };
10431 struct bpf_prog *prog;
10432 int ret = 0;
10433
10434 ctx.regs = perf_arch_bpf_user_pt_regs(regs);
10435 if (unlikely(__this_cpu_inc_return(bpf_prog_active) != 1))
10436 goto out;
10437 rcu_read_lock();
10438 prog = READ_ONCE(event->prog);
10439 if (prog) {
10440 perf_prepare_sample(data, event, regs);
10441 ret = bpf_prog_run(prog, &ctx);
10442 }
10443 rcu_read_unlock();
10444 out:
10445 __this_cpu_dec(bpf_prog_active);
10446 if (!ret)
10447 return;
10448
10449 event->orig_overflow_handler(event, data, regs);
10450 }
10451
perf_event_set_bpf_handler(struct perf_event * event,struct bpf_prog * prog,u64 bpf_cookie)10452 static int perf_event_set_bpf_handler(struct perf_event *event,
10453 struct bpf_prog *prog,
10454 u64 bpf_cookie)
10455 {
10456 if (event->overflow_handler_context)
10457 /* hw breakpoint or kernel counter */
10458 return -EINVAL;
10459
10460 if (event->prog)
10461 return -EEXIST;
10462
10463 if (prog->type != BPF_PROG_TYPE_PERF_EVENT)
10464 return -EINVAL;
10465
10466 if (event->attr.precise_ip &&
10467 prog->call_get_stack &&
10468 (!(event->attr.sample_type & PERF_SAMPLE_CALLCHAIN) ||
10469 event->attr.exclude_callchain_kernel ||
10470 event->attr.exclude_callchain_user)) {
10471 /*
10472 * On perf_event with precise_ip, calling bpf_get_stack()
10473 * may trigger unwinder warnings and occasional crashes.
10474 * bpf_get_[stack|stackid] works around this issue by using
10475 * callchain attached to perf_sample_data. If the
10476 * perf_event does not full (kernel and user) callchain
10477 * attached to perf_sample_data, do not allow attaching BPF
10478 * program that calls bpf_get_[stack|stackid].
10479 */
10480 return -EPROTO;
10481 }
10482
10483 event->prog = prog;
10484 event->bpf_cookie = bpf_cookie;
10485 event->orig_overflow_handler = READ_ONCE(event->overflow_handler);
10486 WRITE_ONCE(event->overflow_handler, bpf_overflow_handler);
10487 return 0;
10488 }
10489
perf_event_free_bpf_handler(struct perf_event * event)10490 static void perf_event_free_bpf_handler(struct perf_event *event)
10491 {
10492 struct bpf_prog *prog = event->prog;
10493
10494 if (!prog)
10495 return;
10496
10497 WRITE_ONCE(event->overflow_handler, event->orig_overflow_handler);
10498 event->prog = NULL;
10499 bpf_prog_put(prog);
10500 }
10501 #else
perf_event_set_bpf_handler(struct perf_event * event,struct bpf_prog * prog,u64 bpf_cookie)10502 static int perf_event_set_bpf_handler(struct perf_event *event,
10503 struct bpf_prog *prog,
10504 u64 bpf_cookie)
10505 {
10506 return -EOPNOTSUPP;
10507 }
perf_event_free_bpf_handler(struct perf_event * event)10508 static void perf_event_free_bpf_handler(struct perf_event *event)
10509 {
10510 }
10511 #endif
10512
10513 /*
10514 * returns true if the event is a tracepoint, or a kprobe/upprobe created
10515 * with perf_event_open()
10516 */
perf_event_is_tracing(struct perf_event * event)10517 static inline bool perf_event_is_tracing(struct perf_event *event)
10518 {
10519 if (event->pmu == &perf_tracepoint)
10520 return true;
10521 #ifdef CONFIG_KPROBE_EVENTS
10522 if (event->pmu == &perf_kprobe)
10523 return true;
10524 #endif
10525 #ifdef CONFIG_UPROBE_EVENTS
10526 if (event->pmu == &perf_uprobe)
10527 return true;
10528 #endif
10529 return false;
10530 }
10531
perf_event_set_bpf_prog(struct perf_event * event,struct bpf_prog * prog,u64 bpf_cookie)10532 int perf_event_set_bpf_prog(struct perf_event *event, struct bpf_prog *prog,
10533 u64 bpf_cookie)
10534 {
10535 bool is_kprobe, is_uprobe, is_tracepoint, is_syscall_tp;
10536
10537 if (!perf_event_is_tracing(event))
10538 return perf_event_set_bpf_handler(event, prog, bpf_cookie);
10539
10540 is_kprobe = event->tp_event->flags & TRACE_EVENT_FL_KPROBE;
10541 is_uprobe = event->tp_event->flags & TRACE_EVENT_FL_UPROBE;
10542 is_tracepoint = event->tp_event->flags & TRACE_EVENT_FL_TRACEPOINT;
10543 is_syscall_tp = is_syscall_trace_event(event->tp_event);
10544 if (!is_kprobe && !is_uprobe && !is_tracepoint && !is_syscall_tp)
10545 /* bpf programs can only be attached to u/kprobe or tracepoint */
10546 return -EINVAL;
10547
10548 if (((is_kprobe || is_uprobe) && prog->type != BPF_PROG_TYPE_KPROBE) ||
10549 (is_tracepoint && prog->type != BPF_PROG_TYPE_TRACEPOINT) ||
10550 (is_syscall_tp && prog->type != BPF_PROG_TYPE_TRACEPOINT))
10551 return -EINVAL;
10552
10553 if (prog->type == BPF_PROG_TYPE_KPROBE && prog->aux->sleepable && !is_uprobe)
10554 /* only uprobe programs are allowed to be sleepable */
10555 return -EINVAL;
10556
10557 /* Kprobe override only works for kprobes, not uprobes. */
10558 if (prog->kprobe_override && !is_kprobe)
10559 return -EINVAL;
10560
10561 if (is_tracepoint || is_syscall_tp) {
10562 int off = trace_event_get_offsets(event->tp_event);
10563
10564 if (prog->aux->max_ctx_offset > off)
10565 return -EACCES;
10566 }
10567
10568 return perf_event_attach_bpf_prog(event, prog, bpf_cookie);
10569 }
10570
perf_event_free_bpf_prog(struct perf_event * event)10571 void perf_event_free_bpf_prog(struct perf_event *event)
10572 {
10573 if (!perf_event_is_tracing(event)) {
10574 perf_event_free_bpf_handler(event);
10575 return;
10576 }
10577 perf_event_detach_bpf_prog(event);
10578 }
10579
10580 #else
10581
perf_tp_register(void)10582 static inline void perf_tp_register(void)
10583 {
10584 }
10585
perf_event_free_filter(struct perf_event * event)10586 static void perf_event_free_filter(struct perf_event *event)
10587 {
10588 }
10589
perf_event_set_bpf_prog(struct perf_event * event,struct bpf_prog * prog,u64 bpf_cookie)10590 int perf_event_set_bpf_prog(struct perf_event *event, struct bpf_prog *prog,
10591 u64 bpf_cookie)
10592 {
10593 return -ENOENT;
10594 }
10595
perf_event_free_bpf_prog(struct perf_event * event)10596 void perf_event_free_bpf_prog(struct perf_event *event)
10597 {
10598 }
10599 #endif /* CONFIG_EVENT_TRACING */
10600
10601 #ifdef CONFIG_HAVE_HW_BREAKPOINT
perf_bp_event(struct perf_event * bp,void * data)10602 void perf_bp_event(struct perf_event *bp, void *data)
10603 {
10604 struct perf_sample_data sample;
10605 struct pt_regs *regs = data;
10606
10607 perf_sample_data_init(&sample, bp->attr.bp_addr, 0);
10608
10609 if (!bp->hw.state && !perf_exclude_event(bp, regs))
10610 perf_swevent_event(bp, 1, &sample, regs);
10611 }
10612 #endif
10613
10614 /*
10615 * Allocate a new address filter
10616 */
10617 static struct perf_addr_filter *
perf_addr_filter_new(struct perf_event * event,struct list_head * filters)10618 perf_addr_filter_new(struct perf_event *event, struct list_head *filters)
10619 {
10620 int node = cpu_to_node(event->cpu == -1 ? 0 : event->cpu);
10621 struct perf_addr_filter *filter;
10622
10623 filter = kzalloc_node(sizeof(*filter), GFP_KERNEL, node);
10624 if (!filter)
10625 return NULL;
10626
10627 INIT_LIST_HEAD(&filter->entry);
10628 list_add_tail(&filter->entry, filters);
10629
10630 return filter;
10631 }
10632
free_filters_list(struct list_head * filters)10633 static void free_filters_list(struct list_head *filters)
10634 {
10635 struct perf_addr_filter *filter, *iter;
10636
10637 list_for_each_entry_safe(filter, iter, filters, entry) {
10638 path_put(&filter->path);
10639 list_del(&filter->entry);
10640 kfree(filter);
10641 }
10642 }
10643
10644 /*
10645 * Free existing address filters and optionally install new ones
10646 */
perf_addr_filters_splice(struct perf_event * event,struct list_head * head)10647 static void perf_addr_filters_splice(struct perf_event *event,
10648 struct list_head *head)
10649 {
10650 unsigned long flags;
10651 LIST_HEAD(list);
10652
10653 if (!has_addr_filter(event))
10654 return;
10655
10656 /* don't bother with children, they don't have their own filters */
10657 if (event->parent)
10658 return;
10659
10660 raw_spin_lock_irqsave(&event->addr_filters.lock, flags);
10661
10662 list_splice_init(&event->addr_filters.list, &list);
10663 if (head)
10664 list_splice(head, &event->addr_filters.list);
10665
10666 raw_spin_unlock_irqrestore(&event->addr_filters.lock, flags);
10667
10668 free_filters_list(&list);
10669 }
10670
10671 /*
10672 * Scan through mm's vmas and see if one of them matches the
10673 * @filter; if so, adjust filter's address range.
10674 * Called with mm::mmap_lock down for reading.
10675 */
perf_addr_filter_apply(struct perf_addr_filter * filter,struct mm_struct * mm,struct perf_addr_filter_range * fr)10676 static void perf_addr_filter_apply(struct perf_addr_filter *filter,
10677 struct mm_struct *mm,
10678 struct perf_addr_filter_range *fr)
10679 {
10680 struct vm_area_struct *vma;
10681 VMA_ITERATOR(vmi, mm, 0);
10682
10683 for_each_vma(vmi, vma) {
10684 if (!vma->vm_file)
10685 continue;
10686
10687 if (perf_addr_filter_vma_adjust(filter, vma, fr))
10688 return;
10689 }
10690 }
10691
10692 /*
10693 * Update event's address range filters based on the
10694 * task's existing mappings, if any.
10695 */
perf_event_addr_filters_apply(struct perf_event * event)10696 static void perf_event_addr_filters_apply(struct perf_event *event)
10697 {
10698 struct perf_addr_filters_head *ifh = perf_event_addr_filters(event);
10699 struct task_struct *task = READ_ONCE(event->ctx->task);
10700 struct perf_addr_filter *filter;
10701 struct mm_struct *mm = NULL;
10702 unsigned int count = 0;
10703 unsigned long flags;
10704
10705 /*
10706 * We may observe TASK_TOMBSTONE, which means that the event tear-down
10707 * will stop on the parent's child_mutex that our caller is also holding
10708 */
10709 if (task == TASK_TOMBSTONE)
10710 return;
10711
10712 if (ifh->nr_file_filters) {
10713 mm = get_task_mm(task);
10714 if (!mm)
10715 goto restart;
10716
10717 mmap_read_lock(mm);
10718 }
10719
10720 raw_spin_lock_irqsave(&ifh->lock, flags);
10721 list_for_each_entry(filter, &ifh->list, entry) {
10722 if (filter->path.dentry) {
10723 /*
10724 * Adjust base offset if the filter is associated to a
10725 * binary that needs to be mapped:
10726 */
10727 event->addr_filter_ranges[count].start = 0;
10728 event->addr_filter_ranges[count].size = 0;
10729
10730 perf_addr_filter_apply(filter, mm, &event->addr_filter_ranges[count]);
10731 } else {
10732 event->addr_filter_ranges[count].start = filter->offset;
10733 event->addr_filter_ranges[count].size = filter->size;
10734 }
10735
10736 count++;
10737 }
10738
10739 event->addr_filters_gen++;
10740 raw_spin_unlock_irqrestore(&ifh->lock, flags);
10741
10742 if (ifh->nr_file_filters) {
10743 mmap_read_unlock(mm);
10744
10745 mmput(mm);
10746 }
10747
10748 restart:
10749 perf_event_stop(event, 1);
10750 }
10751
10752 /*
10753 * Address range filtering: limiting the data to certain
10754 * instruction address ranges. Filters are ioctl()ed to us from
10755 * userspace as ascii strings.
10756 *
10757 * Filter string format:
10758 *
10759 * ACTION RANGE_SPEC
10760 * where ACTION is one of the
10761 * * "filter": limit the trace to this region
10762 * * "start": start tracing from this address
10763 * * "stop": stop tracing at this address/region;
10764 * RANGE_SPEC is
10765 * * for kernel addresses: <start address>[/<size>]
10766 * * for object files: <start address>[/<size>]@</path/to/object/file>
10767 *
10768 * if <size> is not specified or is zero, the range is treated as a single
10769 * address; not valid for ACTION=="filter".
10770 */
10771 enum {
10772 IF_ACT_NONE = -1,
10773 IF_ACT_FILTER,
10774 IF_ACT_START,
10775 IF_ACT_STOP,
10776 IF_SRC_FILE,
10777 IF_SRC_KERNEL,
10778 IF_SRC_FILEADDR,
10779 IF_SRC_KERNELADDR,
10780 };
10781
10782 enum {
10783 IF_STATE_ACTION = 0,
10784 IF_STATE_SOURCE,
10785 IF_STATE_END,
10786 };
10787
10788 static const match_table_t if_tokens = {
10789 { IF_ACT_FILTER, "filter" },
10790 { IF_ACT_START, "start" },
10791 { IF_ACT_STOP, "stop" },
10792 { IF_SRC_FILE, "%u/%u@%s" },
10793 { IF_SRC_KERNEL, "%u/%u" },
10794 { IF_SRC_FILEADDR, "%u@%s" },
10795 { IF_SRC_KERNELADDR, "%u" },
10796 { IF_ACT_NONE, NULL },
10797 };
10798
10799 /*
10800 * Address filter string parser
10801 */
10802 static int
perf_event_parse_addr_filter(struct perf_event * event,char * fstr,struct list_head * filters)10803 perf_event_parse_addr_filter(struct perf_event *event, char *fstr,
10804 struct list_head *filters)
10805 {
10806 struct perf_addr_filter *filter = NULL;
10807 char *start, *orig, *filename = NULL;
10808 substring_t args[MAX_OPT_ARGS];
10809 int state = IF_STATE_ACTION, token;
10810 unsigned int kernel = 0;
10811 int ret = -EINVAL;
10812
10813 orig = fstr = kstrdup(fstr, GFP_KERNEL);
10814 if (!fstr)
10815 return -ENOMEM;
10816
10817 while ((start = strsep(&fstr, " ,\n")) != NULL) {
10818 static const enum perf_addr_filter_action_t actions[] = {
10819 [IF_ACT_FILTER] = PERF_ADDR_FILTER_ACTION_FILTER,
10820 [IF_ACT_START] = PERF_ADDR_FILTER_ACTION_START,
10821 [IF_ACT_STOP] = PERF_ADDR_FILTER_ACTION_STOP,
10822 };
10823 ret = -EINVAL;
10824
10825 if (!*start)
10826 continue;
10827
10828 /* filter definition begins */
10829 if (state == IF_STATE_ACTION) {
10830 filter = perf_addr_filter_new(event, filters);
10831 if (!filter)
10832 goto fail;
10833 }
10834
10835 token = match_token(start, if_tokens, args);
10836 switch (token) {
10837 case IF_ACT_FILTER:
10838 case IF_ACT_START:
10839 case IF_ACT_STOP:
10840 if (state != IF_STATE_ACTION)
10841 goto fail;
10842
10843 filter->action = actions[token];
10844 state = IF_STATE_SOURCE;
10845 break;
10846
10847 case IF_SRC_KERNELADDR:
10848 case IF_SRC_KERNEL:
10849 kernel = 1;
10850 fallthrough;
10851
10852 case IF_SRC_FILEADDR:
10853 case IF_SRC_FILE:
10854 if (state != IF_STATE_SOURCE)
10855 goto fail;
10856
10857 *args[0].to = 0;
10858 ret = kstrtoul(args[0].from, 0, &filter->offset);
10859 if (ret)
10860 goto fail;
10861
10862 if (token == IF_SRC_KERNEL || token == IF_SRC_FILE) {
10863 *args[1].to = 0;
10864 ret = kstrtoul(args[1].from, 0, &filter->size);
10865 if (ret)
10866 goto fail;
10867 }
10868
10869 if (token == IF_SRC_FILE || token == IF_SRC_FILEADDR) {
10870 int fpos = token == IF_SRC_FILE ? 2 : 1;
10871
10872 kfree(filename);
10873 filename = match_strdup(&args[fpos]);
10874 if (!filename) {
10875 ret = -ENOMEM;
10876 goto fail;
10877 }
10878 }
10879
10880 state = IF_STATE_END;
10881 break;
10882
10883 default:
10884 goto fail;
10885 }
10886
10887 /*
10888 * Filter definition is fully parsed, validate and install it.
10889 * Make sure that it doesn't contradict itself or the event's
10890 * attribute.
10891 */
10892 if (state == IF_STATE_END) {
10893 ret = -EINVAL;
10894
10895 /*
10896 * ACTION "filter" must have a non-zero length region
10897 * specified.
10898 */
10899 if (filter->action == PERF_ADDR_FILTER_ACTION_FILTER &&
10900 !filter->size)
10901 goto fail;
10902
10903 if (!kernel) {
10904 if (!filename)
10905 goto fail;
10906
10907 /*
10908 * For now, we only support file-based filters
10909 * in per-task events; doing so for CPU-wide
10910 * events requires additional context switching
10911 * trickery, since same object code will be
10912 * mapped at different virtual addresses in
10913 * different processes.
10914 */
10915 ret = -EOPNOTSUPP;
10916 if (!event->ctx->task)
10917 goto fail;
10918
10919 /* look up the path and grab its inode */
10920 ret = kern_path(filename, LOOKUP_FOLLOW,
10921 &filter->path);
10922 if (ret)
10923 goto fail;
10924
10925 ret = -EINVAL;
10926 if (!filter->path.dentry ||
10927 !S_ISREG(d_inode(filter->path.dentry)
10928 ->i_mode))
10929 goto fail;
10930
10931 event->addr_filters.nr_file_filters++;
10932 }
10933
10934 /* ready to consume more filters */
10935 kfree(filename);
10936 filename = NULL;
10937 state = IF_STATE_ACTION;
10938 filter = NULL;
10939 kernel = 0;
10940 }
10941 }
10942
10943 if (state != IF_STATE_ACTION)
10944 goto fail;
10945
10946 kfree(filename);
10947 kfree(orig);
10948
10949 return 0;
10950
10951 fail:
10952 kfree(filename);
10953 free_filters_list(filters);
10954 kfree(orig);
10955
10956 return ret;
10957 }
10958
10959 static int
perf_event_set_addr_filter(struct perf_event * event,char * filter_str)10960 perf_event_set_addr_filter(struct perf_event *event, char *filter_str)
10961 {
10962 LIST_HEAD(filters);
10963 int ret;
10964
10965 /*
10966 * Since this is called in perf_ioctl() path, we're already holding
10967 * ctx::mutex.
10968 */
10969 lockdep_assert_held(&event->ctx->mutex);
10970
10971 if (WARN_ON_ONCE(event->parent))
10972 return -EINVAL;
10973
10974 ret = perf_event_parse_addr_filter(event, filter_str, &filters);
10975 if (ret)
10976 goto fail_clear_files;
10977
10978 ret = event->pmu->addr_filters_validate(&filters);
10979 if (ret)
10980 goto fail_free_filters;
10981
10982 /* remove existing filters, if any */
10983 perf_addr_filters_splice(event, &filters);
10984
10985 /* install new filters */
10986 perf_event_for_each_child(event, perf_event_addr_filters_apply);
10987
10988 return ret;
10989
10990 fail_free_filters:
10991 free_filters_list(&filters);
10992
10993 fail_clear_files:
10994 event->addr_filters.nr_file_filters = 0;
10995
10996 return ret;
10997 }
10998
perf_event_set_filter(struct perf_event * event,void __user * arg)10999 static int perf_event_set_filter(struct perf_event *event, void __user *arg)
11000 {
11001 int ret = -EINVAL;
11002 char *filter_str;
11003
11004 filter_str = strndup_user(arg, PAGE_SIZE);
11005 if (IS_ERR(filter_str))
11006 return PTR_ERR(filter_str);
11007
11008 #ifdef CONFIG_EVENT_TRACING
11009 if (perf_event_is_tracing(event)) {
11010 struct perf_event_context *ctx = event->ctx;
11011
11012 /*
11013 * Beware, here be dragons!!
11014 *
11015 * the tracepoint muck will deadlock against ctx->mutex, but
11016 * the tracepoint stuff does not actually need it. So
11017 * temporarily drop ctx->mutex. As per perf_event_ctx_lock() we
11018 * already have a reference on ctx.
11019 *
11020 * This can result in event getting moved to a different ctx,
11021 * but that does not affect the tracepoint state.
11022 */
11023 mutex_unlock(&ctx->mutex);
11024 ret = ftrace_profile_set_filter(event, event->attr.config, filter_str);
11025 mutex_lock(&ctx->mutex);
11026 } else
11027 #endif
11028 if (has_addr_filter(event))
11029 ret = perf_event_set_addr_filter(event, filter_str);
11030
11031 kfree(filter_str);
11032 return ret;
11033 }
11034
11035 /*
11036 * hrtimer based swevent callback
11037 */
11038
perf_swevent_hrtimer(struct hrtimer * hrtimer)11039 static enum hrtimer_restart perf_swevent_hrtimer(struct hrtimer *hrtimer)
11040 {
11041 enum hrtimer_restart ret = HRTIMER_RESTART;
11042 struct perf_sample_data data;
11043 struct pt_regs *regs;
11044 struct perf_event *event;
11045 u64 period;
11046
11047 event = container_of(hrtimer, struct perf_event, hw.hrtimer);
11048
11049 if (event->state != PERF_EVENT_STATE_ACTIVE)
11050 return HRTIMER_NORESTART;
11051
11052 event->pmu->read(event);
11053
11054 perf_sample_data_init(&data, 0, event->hw.last_period);
11055 regs = get_irq_regs();
11056
11057 if (regs && !perf_exclude_event(event, regs)) {
11058 if (!(event->attr.exclude_idle && is_idle_task(current)))
11059 if (__perf_event_overflow(event, 1, &data, regs))
11060 ret = HRTIMER_NORESTART;
11061 }
11062
11063 period = max_t(u64, 10000, event->hw.sample_period);
11064 hrtimer_forward_now(hrtimer, ns_to_ktime(period));
11065
11066 return ret;
11067 }
11068
perf_swevent_start_hrtimer(struct perf_event * event)11069 static void perf_swevent_start_hrtimer(struct perf_event *event)
11070 {
11071 struct hw_perf_event *hwc = &event->hw;
11072 s64 period;
11073
11074 if (!is_sampling_event(event))
11075 return;
11076
11077 period = local64_read(&hwc->period_left);
11078 if (period) {
11079 if (period < 0)
11080 period = 10000;
11081
11082 local64_set(&hwc->period_left, 0);
11083 } else {
11084 period = max_t(u64, 10000, hwc->sample_period);
11085 }
11086 hrtimer_start(&hwc->hrtimer, ns_to_ktime(period),
11087 HRTIMER_MODE_REL_PINNED_HARD);
11088 }
11089
perf_swevent_cancel_hrtimer(struct perf_event * event)11090 static void perf_swevent_cancel_hrtimer(struct perf_event *event)
11091 {
11092 struct hw_perf_event *hwc = &event->hw;
11093
11094 if (is_sampling_event(event)) {
11095 ktime_t remaining = hrtimer_get_remaining(&hwc->hrtimer);
11096 local64_set(&hwc->period_left, ktime_to_ns(remaining));
11097
11098 hrtimer_cancel(&hwc->hrtimer);
11099 }
11100 }
11101
perf_swevent_init_hrtimer(struct perf_event * event)11102 static void perf_swevent_init_hrtimer(struct perf_event *event)
11103 {
11104 struct hw_perf_event *hwc = &event->hw;
11105
11106 if (!is_sampling_event(event))
11107 return;
11108
11109 hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD);
11110 hwc->hrtimer.function = perf_swevent_hrtimer;
11111
11112 /*
11113 * Since hrtimers have a fixed rate, we can do a static freq->period
11114 * mapping and avoid the whole period adjust feedback stuff.
11115 */
11116 if (event->attr.freq) {
11117 long freq = event->attr.sample_freq;
11118
11119 event->attr.sample_period = NSEC_PER_SEC / freq;
11120 hwc->sample_period = event->attr.sample_period;
11121 local64_set(&hwc->period_left, hwc->sample_period);
11122 hwc->last_period = hwc->sample_period;
11123 event->attr.freq = 0;
11124 }
11125 }
11126
11127 /*
11128 * Software event: cpu wall time clock
11129 */
11130
cpu_clock_event_update(struct perf_event * event)11131 static void cpu_clock_event_update(struct perf_event *event)
11132 {
11133 s64 prev;
11134 u64 now;
11135
11136 now = local_clock();
11137 prev = local64_xchg(&event->hw.prev_count, now);
11138 local64_add(now - prev, &event->count);
11139 }
11140
cpu_clock_event_start(struct perf_event * event,int flags)11141 static void cpu_clock_event_start(struct perf_event *event, int flags)
11142 {
11143 local64_set(&event->hw.prev_count, local_clock());
11144 perf_swevent_start_hrtimer(event);
11145 }
11146
cpu_clock_event_stop(struct perf_event * event,int flags)11147 static void cpu_clock_event_stop(struct perf_event *event, int flags)
11148 {
11149 perf_swevent_cancel_hrtimer(event);
11150 cpu_clock_event_update(event);
11151 }
11152
cpu_clock_event_add(struct perf_event * event,int flags)11153 static int cpu_clock_event_add(struct perf_event *event, int flags)
11154 {
11155 if (flags & PERF_EF_START)
11156 cpu_clock_event_start(event, flags);
11157 perf_event_update_userpage(event);
11158
11159 return 0;
11160 }
11161
cpu_clock_event_del(struct perf_event * event,int flags)11162 static void cpu_clock_event_del(struct perf_event *event, int flags)
11163 {
11164 cpu_clock_event_stop(event, flags);
11165 }
11166
cpu_clock_event_read(struct perf_event * event)11167 static void cpu_clock_event_read(struct perf_event *event)
11168 {
11169 cpu_clock_event_update(event);
11170 }
11171
cpu_clock_event_init(struct perf_event * event)11172 static int cpu_clock_event_init(struct perf_event *event)
11173 {
11174 if (event->attr.type != perf_cpu_clock.type)
11175 return -ENOENT;
11176
11177 if (event->attr.config != PERF_COUNT_SW_CPU_CLOCK)
11178 return -ENOENT;
11179
11180 /*
11181 * no branch sampling for software events
11182 */
11183 if (has_branch_stack(event))
11184 return -EOPNOTSUPP;
11185
11186 perf_swevent_init_hrtimer(event);
11187
11188 return 0;
11189 }
11190
11191 static struct pmu perf_cpu_clock = {
11192 .task_ctx_nr = perf_sw_context,
11193
11194 .capabilities = PERF_PMU_CAP_NO_NMI,
11195 .dev = PMU_NULL_DEV,
11196
11197 .event_init = cpu_clock_event_init,
11198 .add = cpu_clock_event_add,
11199 .del = cpu_clock_event_del,
11200 .start = cpu_clock_event_start,
11201 .stop = cpu_clock_event_stop,
11202 .read = cpu_clock_event_read,
11203 };
11204
11205 /*
11206 * Software event: task time clock
11207 */
11208
task_clock_event_update(struct perf_event * event,u64 now)11209 static void task_clock_event_update(struct perf_event *event, u64 now)
11210 {
11211 u64 prev;
11212 s64 delta;
11213
11214 prev = local64_xchg(&event->hw.prev_count, now);
11215 delta = now - prev;
11216 local64_add(delta, &event->count);
11217 }
11218
task_clock_event_start(struct perf_event * event,int flags)11219 static void task_clock_event_start(struct perf_event *event, int flags)
11220 {
11221 local64_set(&event->hw.prev_count, event->ctx->time);
11222 perf_swevent_start_hrtimer(event);
11223 }
11224
task_clock_event_stop(struct perf_event * event,int flags)11225 static void task_clock_event_stop(struct perf_event *event, int flags)
11226 {
11227 perf_swevent_cancel_hrtimer(event);
11228 task_clock_event_update(event, event->ctx->time);
11229 }
11230
task_clock_event_add(struct perf_event * event,int flags)11231 static int task_clock_event_add(struct perf_event *event, int flags)
11232 {
11233 if (flags & PERF_EF_START)
11234 task_clock_event_start(event, flags);
11235 perf_event_update_userpage(event);
11236
11237 return 0;
11238 }
11239
task_clock_event_del(struct perf_event * event,int flags)11240 static void task_clock_event_del(struct perf_event *event, int flags)
11241 {
11242 task_clock_event_stop(event, PERF_EF_UPDATE);
11243 }
11244
task_clock_event_read(struct perf_event * event)11245 static void task_clock_event_read(struct perf_event *event)
11246 {
11247 u64 now = perf_clock();
11248 u64 delta = now - event->ctx->timestamp;
11249 u64 time = event->ctx->time + delta;
11250
11251 task_clock_event_update(event, time);
11252 }
11253
task_clock_event_init(struct perf_event * event)11254 static int task_clock_event_init(struct perf_event *event)
11255 {
11256 if (event->attr.type != perf_task_clock.type)
11257 return -ENOENT;
11258
11259 if (event->attr.config != PERF_COUNT_SW_TASK_CLOCK)
11260 return -ENOENT;
11261
11262 /*
11263 * no branch sampling for software events
11264 */
11265 if (has_branch_stack(event))
11266 return -EOPNOTSUPP;
11267
11268 perf_swevent_init_hrtimer(event);
11269
11270 return 0;
11271 }
11272
11273 static struct pmu perf_task_clock = {
11274 .task_ctx_nr = perf_sw_context,
11275
11276 .capabilities = PERF_PMU_CAP_NO_NMI,
11277 .dev = PMU_NULL_DEV,
11278
11279 .event_init = task_clock_event_init,
11280 .add = task_clock_event_add,
11281 .del = task_clock_event_del,
11282 .start = task_clock_event_start,
11283 .stop = task_clock_event_stop,
11284 .read = task_clock_event_read,
11285 };
11286
perf_pmu_nop_void(struct pmu * pmu)11287 static void perf_pmu_nop_void(struct pmu *pmu)
11288 {
11289 }
11290
perf_pmu_nop_txn(struct pmu * pmu,unsigned int flags)11291 static void perf_pmu_nop_txn(struct pmu *pmu, unsigned int flags)
11292 {
11293 }
11294
perf_pmu_nop_int(struct pmu * pmu)11295 static int perf_pmu_nop_int(struct pmu *pmu)
11296 {
11297 return 0;
11298 }
11299
perf_event_nop_int(struct perf_event * event,u64 value)11300 static int perf_event_nop_int(struct perf_event *event, u64 value)
11301 {
11302 return 0;
11303 }
11304
11305 static DEFINE_PER_CPU(unsigned int, nop_txn_flags);
11306
perf_pmu_start_txn(struct pmu * pmu,unsigned int flags)11307 static void perf_pmu_start_txn(struct pmu *pmu, unsigned int flags)
11308 {
11309 __this_cpu_write(nop_txn_flags, flags);
11310
11311 if (flags & ~PERF_PMU_TXN_ADD)
11312 return;
11313
11314 perf_pmu_disable(pmu);
11315 }
11316
perf_pmu_commit_txn(struct pmu * pmu)11317 static int perf_pmu_commit_txn(struct pmu *pmu)
11318 {
11319 unsigned int flags = __this_cpu_read(nop_txn_flags);
11320
11321 __this_cpu_write(nop_txn_flags, 0);
11322
11323 if (flags & ~PERF_PMU_TXN_ADD)
11324 return 0;
11325
11326 perf_pmu_enable(pmu);
11327 return 0;
11328 }
11329
perf_pmu_cancel_txn(struct pmu * pmu)11330 static void perf_pmu_cancel_txn(struct pmu *pmu)
11331 {
11332 unsigned int flags = __this_cpu_read(nop_txn_flags);
11333
11334 __this_cpu_write(nop_txn_flags, 0);
11335
11336 if (flags & ~PERF_PMU_TXN_ADD)
11337 return;
11338
11339 perf_pmu_enable(pmu);
11340 }
11341
perf_event_idx_default(struct perf_event * event)11342 static int perf_event_idx_default(struct perf_event *event)
11343 {
11344 return 0;
11345 }
11346
free_pmu_context(struct pmu * pmu)11347 static void free_pmu_context(struct pmu *pmu)
11348 {
11349 free_percpu(pmu->cpu_pmu_context);
11350 }
11351
11352 /*
11353 * Let userspace know that this PMU supports address range filtering:
11354 */
nr_addr_filters_show(struct device * dev,struct device_attribute * attr,char * page)11355 static ssize_t nr_addr_filters_show(struct device *dev,
11356 struct device_attribute *attr,
11357 char *page)
11358 {
11359 struct pmu *pmu = dev_get_drvdata(dev);
11360
11361 return scnprintf(page, PAGE_SIZE - 1, "%d\n", pmu->nr_addr_filters);
11362 }
11363 DEVICE_ATTR_RO(nr_addr_filters);
11364
11365 static struct idr pmu_idr;
11366
11367 static ssize_t
type_show(struct device * dev,struct device_attribute * attr,char * page)11368 type_show(struct device *dev, struct device_attribute *attr, char *page)
11369 {
11370 struct pmu *pmu = dev_get_drvdata(dev);
11371
11372 return scnprintf(page, PAGE_SIZE - 1, "%d\n", pmu->type);
11373 }
11374 static DEVICE_ATTR_RO(type);
11375
11376 static ssize_t
perf_event_mux_interval_ms_show(struct device * dev,struct device_attribute * attr,char * page)11377 perf_event_mux_interval_ms_show(struct device *dev,
11378 struct device_attribute *attr,
11379 char *page)
11380 {
11381 struct pmu *pmu = dev_get_drvdata(dev);
11382
11383 return scnprintf(page, PAGE_SIZE - 1, "%d\n", pmu->hrtimer_interval_ms);
11384 }
11385
11386 static DEFINE_MUTEX(mux_interval_mutex);
11387
11388 static ssize_t
perf_event_mux_interval_ms_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)11389 perf_event_mux_interval_ms_store(struct device *dev,
11390 struct device_attribute *attr,
11391 const char *buf, size_t count)
11392 {
11393 struct pmu *pmu = dev_get_drvdata(dev);
11394 int timer, cpu, ret;
11395
11396 ret = kstrtoint(buf, 0, &timer);
11397 if (ret)
11398 return ret;
11399
11400 if (timer < 1)
11401 return -EINVAL;
11402
11403 /* same value, noting to do */
11404 if (timer == pmu->hrtimer_interval_ms)
11405 return count;
11406
11407 mutex_lock(&mux_interval_mutex);
11408 pmu->hrtimer_interval_ms = timer;
11409
11410 /* update all cpuctx for this PMU */
11411 cpus_read_lock();
11412 for_each_online_cpu(cpu) {
11413 struct perf_cpu_pmu_context *cpc;
11414 cpc = per_cpu_ptr(pmu->cpu_pmu_context, cpu);
11415 cpc->hrtimer_interval = ns_to_ktime(NSEC_PER_MSEC * timer);
11416
11417 cpu_function_call(cpu, perf_mux_hrtimer_restart_ipi, cpc);
11418 }
11419 cpus_read_unlock();
11420 mutex_unlock(&mux_interval_mutex);
11421
11422 return count;
11423 }
11424 static DEVICE_ATTR_RW(perf_event_mux_interval_ms);
11425
11426 static struct attribute *pmu_dev_attrs[] = {
11427 &dev_attr_type.attr,
11428 &dev_attr_perf_event_mux_interval_ms.attr,
11429 &dev_attr_nr_addr_filters.attr,
11430 NULL,
11431 };
11432
pmu_dev_is_visible(struct kobject * kobj,struct attribute * a,int n)11433 static umode_t pmu_dev_is_visible(struct kobject *kobj, struct attribute *a, int n)
11434 {
11435 struct device *dev = kobj_to_dev(kobj);
11436 struct pmu *pmu = dev_get_drvdata(dev);
11437
11438 if (n == 2 && !pmu->nr_addr_filters)
11439 return 0;
11440
11441 return a->mode;
11442 }
11443
11444 static struct attribute_group pmu_dev_attr_group = {
11445 .is_visible = pmu_dev_is_visible,
11446 .attrs = pmu_dev_attrs,
11447 };
11448
11449 static const struct attribute_group *pmu_dev_groups[] = {
11450 &pmu_dev_attr_group,
11451 NULL,
11452 };
11453
11454 static int pmu_bus_running;
11455 static struct bus_type pmu_bus = {
11456 .name = "event_source",
11457 .dev_groups = pmu_dev_groups,
11458 };
11459
pmu_dev_release(struct device * dev)11460 static void pmu_dev_release(struct device *dev)
11461 {
11462 kfree(dev);
11463 }
11464
pmu_dev_alloc(struct pmu * pmu)11465 static int pmu_dev_alloc(struct pmu *pmu)
11466 {
11467 int ret = -ENOMEM;
11468
11469 pmu->dev = kzalloc(sizeof(struct device), GFP_KERNEL);
11470 if (!pmu->dev)
11471 goto out;
11472
11473 pmu->dev->groups = pmu->attr_groups;
11474 device_initialize(pmu->dev);
11475
11476 dev_set_drvdata(pmu->dev, pmu);
11477 pmu->dev->bus = &pmu_bus;
11478 pmu->dev->parent = pmu->parent;
11479 pmu->dev->release = pmu_dev_release;
11480
11481 ret = dev_set_name(pmu->dev, "%s", pmu->name);
11482 if (ret)
11483 goto free_dev;
11484
11485 ret = device_add(pmu->dev);
11486 if (ret)
11487 goto free_dev;
11488
11489 if (pmu->attr_update) {
11490 ret = sysfs_update_groups(&pmu->dev->kobj, pmu->attr_update);
11491 if (ret)
11492 goto del_dev;
11493 }
11494
11495 out:
11496 return ret;
11497
11498 del_dev:
11499 device_del(pmu->dev);
11500
11501 free_dev:
11502 put_device(pmu->dev);
11503 goto out;
11504 }
11505
11506 static struct lock_class_key cpuctx_mutex;
11507 static struct lock_class_key cpuctx_lock;
11508
perf_pmu_register(struct pmu * pmu,const char * name,int type)11509 int perf_pmu_register(struct pmu *pmu, const char *name, int type)
11510 {
11511 int cpu, ret, max = PERF_TYPE_MAX;
11512
11513 mutex_lock(&pmus_lock);
11514 ret = -ENOMEM;
11515 pmu->pmu_disable_count = alloc_percpu(int);
11516 if (!pmu->pmu_disable_count)
11517 goto unlock;
11518
11519 pmu->type = -1;
11520 if (WARN_ONCE(!name, "Can not register anonymous pmu.\n")) {
11521 ret = -EINVAL;
11522 goto free_pdc;
11523 }
11524
11525 pmu->name = name;
11526
11527 if (type >= 0)
11528 max = type;
11529
11530 ret = idr_alloc(&pmu_idr, pmu, max, 0, GFP_KERNEL);
11531 if (ret < 0)
11532 goto free_pdc;
11533
11534 WARN_ON(type >= 0 && ret != type);
11535
11536 type = ret;
11537 pmu->type = type;
11538
11539 if (pmu_bus_running && !pmu->dev) {
11540 ret = pmu_dev_alloc(pmu);
11541 if (ret)
11542 goto free_idr;
11543 }
11544
11545 ret = -ENOMEM;
11546 pmu->cpu_pmu_context = alloc_percpu(struct perf_cpu_pmu_context);
11547 if (!pmu->cpu_pmu_context)
11548 goto free_dev;
11549
11550 for_each_possible_cpu(cpu) {
11551 struct perf_cpu_pmu_context *cpc;
11552
11553 cpc = per_cpu_ptr(pmu->cpu_pmu_context, cpu);
11554 __perf_init_event_pmu_context(&cpc->epc, pmu);
11555 __perf_mux_hrtimer_init(cpc, cpu);
11556 }
11557
11558 if (!pmu->start_txn) {
11559 if (pmu->pmu_enable) {
11560 /*
11561 * If we have pmu_enable/pmu_disable calls, install
11562 * transaction stubs that use that to try and batch
11563 * hardware accesses.
11564 */
11565 pmu->start_txn = perf_pmu_start_txn;
11566 pmu->commit_txn = perf_pmu_commit_txn;
11567 pmu->cancel_txn = perf_pmu_cancel_txn;
11568 } else {
11569 pmu->start_txn = perf_pmu_nop_txn;
11570 pmu->commit_txn = perf_pmu_nop_int;
11571 pmu->cancel_txn = perf_pmu_nop_void;
11572 }
11573 }
11574
11575 if (!pmu->pmu_enable) {
11576 pmu->pmu_enable = perf_pmu_nop_void;
11577 pmu->pmu_disable = perf_pmu_nop_void;
11578 }
11579
11580 if (!pmu->check_period)
11581 pmu->check_period = perf_event_nop_int;
11582
11583 if (!pmu->event_idx)
11584 pmu->event_idx = perf_event_idx_default;
11585
11586 list_add_rcu(&pmu->entry, &pmus);
11587 atomic_set(&pmu->exclusive_cnt, 0);
11588 ret = 0;
11589 unlock:
11590 mutex_unlock(&pmus_lock);
11591
11592 return ret;
11593
11594 free_dev:
11595 if (pmu->dev && pmu->dev != PMU_NULL_DEV) {
11596 device_del(pmu->dev);
11597 put_device(pmu->dev);
11598 }
11599
11600 free_idr:
11601 idr_remove(&pmu_idr, pmu->type);
11602
11603 free_pdc:
11604 free_percpu(pmu->pmu_disable_count);
11605 goto unlock;
11606 }
11607 EXPORT_SYMBOL_GPL(perf_pmu_register);
11608
perf_pmu_unregister(struct pmu * pmu)11609 void perf_pmu_unregister(struct pmu *pmu)
11610 {
11611 mutex_lock(&pmus_lock);
11612 list_del_rcu(&pmu->entry);
11613
11614 /*
11615 * We dereference the pmu list under both SRCU and regular RCU, so
11616 * synchronize against both of those.
11617 */
11618 synchronize_srcu(&pmus_srcu);
11619 synchronize_rcu();
11620
11621 free_percpu(pmu->pmu_disable_count);
11622 idr_remove(&pmu_idr, pmu->type);
11623 if (pmu_bus_running && pmu->dev && pmu->dev != PMU_NULL_DEV) {
11624 if (pmu->nr_addr_filters)
11625 device_remove_file(pmu->dev, &dev_attr_nr_addr_filters);
11626 device_del(pmu->dev);
11627 put_device(pmu->dev);
11628 }
11629 free_pmu_context(pmu);
11630 mutex_unlock(&pmus_lock);
11631 }
11632 EXPORT_SYMBOL_GPL(perf_pmu_unregister);
11633
has_extended_regs(struct perf_event * event)11634 static inline bool has_extended_regs(struct perf_event *event)
11635 {
11636 return (event->attr.sample_regs_user & PERF_REG_EXTENDED_MASK) ||
11637 (event->attr.sample_regs_intr & PERF_REG_EXTENDED_MASK);
11638 }
11639
perf_try_init_event(struct pmu * pmu,struct perf_event * event)11640 static int perf_try_init_event(struct pmu *pmu, struct perf_event *event)
11641 {
11642 struct perf_event_context *ctx = NULL;
11643 int ret;
11644
11645 if (!try_module_get(pmu->module))
11646 return -ENODEV;
11647
11648 /*
11649 * A number of pmu->event_init() methods iterate the sibling_list to,
11650 * for example, validate if the group fits on the PMU. Therefore,
11651 * if this is a sibling event, acquire the ctx->mutex to protect
11652 * the sibling_list.
11653 */
11654 if (event->group_leader != event && pmu->task_ctx_nr != perf_sw_context) {
11655 /*
11656 * This ctx->mutex can nest when we're called through
11657 * inheritance. See the perf_event_ctx_lock_nested() comment.
11658 */
11659 ctx = perf_event_ctx_lock_nested(event->group_leader,
11660 SINGLE_DEPTH_NESTING);
11661 BUG_ON(!ctx);
11662 }
11663
11664 event->pmu = pmu;
11665 ret = pmu->event_init(event);
11666
11667 if (ctx)
11668 perf_event_ctx_unlock(event->group_leader, ctx);
11669
11670 if (!ret) {
11671 if (!(pmu->capabilities & PERF_PMU_CAP_EXTENDED_REGS) &&
11672 has_extended_regs(event))
11673 ret = -EOPNOTSUPP;
11674
11675 if (pmu->capabilities & PERF_PMU_CAP_NO_EXCLUDE &&
11676 event_has_any_exclude_flag(event))
11677 ret = -EINVAL;
11678
11679 if (ret && event->destroy)
11680 event->destroy(event);
11681 }
11682
11683 if (ret)
11684 module_put(pmu->module);
11685
11686 return ret;
11687 }
11688
perf_init_event(struct perf_event * event)11689 static struct pmu *perf_init_event(struct perf_event *event)
11690 {
11691 bool extended_type = false;
11692 int idx, type, ret;
11693 struct pmu *pmu;
11694
11695 idx = srcu_read_lock(&pmus_srcu);
11696
11697 /*
11698 * Save original type before calling pmu->event_init() since certain
11699 * pmus overwrites event->attr.type to forward event to another pmu.
11700 */
11701 event->orig_type = event->attr.type;
11702
11703 /* Try parent's PMU first: */
11704 if (event->parent && event->parent->pmu) {
11705 pmu = event->parent->pmu;
11706 ret = perf_try_init_event(pmu, event);
11707 if (!ret)
11708 goto unlock;
11709 }
11710
11711 /*
11712 * PERF_TYPE_HARDWARE and PERF_TYPE_HW_CACHE
11713 * are often aliases for PERF_TYPE_RAW.
11714 */
11715 type = event->attr.type;
11716 if (type == PERF_TYPE_HARDWARE || type == PERF_TYPE_HW_CACHE) {
11717 type = event->attr.config >> PERF_PMU_TYPE_SHIFT;
11718 if (!type) {
11719 type = PERF_TYPE_RAW;
11720 } else {
11721 extended_type = true;
11722 event->attr.config &= PERF_HW_EVENT_MASK;
11723 }
11724 }
11725
11726 again:
11727 rcu_read_lock();
11728 pmu = idr_find(&pmu_idr, type);
11729 rcu_read_unlock();
11730 if (pmu) {
11731 if (event->attr.type != type && type != PERF_TYPE_RAW &&
11732 !(pmu->capabilities & PERF_PMU_CAP_EXTENDED_HW_TYPE))
11733 goto fail;
11734
11735 ret = perf_try_init_event(pmu, event);
11736 if (ret == -ENOENT && event->attr.type != type && !extended_type) {
11737 type = event->attr.type;
11738 goto again;
11739 }
11740
11741 if (ret)
11742 pmu = ERR_PTR(ret);
11743
11744 goto unlock;
11745 }
11746
11747 list_for_each_entry_rcu(pmu, &pmus, entry, lockdep_is_held(&pmus_srcu)) {
11748 ret = perf_try_init_event(pmu, event);
11749 if (!ret)
11750 goto unlock;
11751
11752 if (ret != -ENOENT) {
11753 pmu = ERR_PTR(ret);
11754 goto unlock;
11755 }
11756 }
11757 fail:
11758 pmu = ERR_PTR(-ENOENT);
11759 unlock:
11760 srcu_read_unlock(&pmus_srcu, idx);
11761
11762 return pmu;
11763 }
11764
attach_sb_event(struct perf_event * event)11765 static void attach_sb_event(struct perf_event *event)
11766 {
11767 struct pmu_event_list *pel = per_cpu_ptr(&pmu_sb_events, event->cpu);
11768
11769 raw_spin_lock(&pel->lock);
11770 list_add_rcu(&event->sb_list, &pel->list);
11771 raw_spin_unlock(&pel->lock);
11772 }
11773
11774 /*
11775 * We keep a list of all !task (and therefore per-cpu) events
11776 * that need to receive side-band records.
11777 *
11778 * This avoids having to scan all the various PMU per-cpu contexts
11779 * looking for them.
11780 */
account_pmu_sb_event(struct perf_event * event)11781 static void account_pmu_sb_event(struct perf_event *event)
11782 {
11783 if (is_sb_event(event))
11784 attach_sb_event(event);
11785 }
11786
11787 /* Freq events need the tick to stay alive (see perf_event_task_tick). */
account_freq_event_nohz(void)11788 static void account_freq_event_nohz(void)
11789 {
11790 #ifdef CONFIG_NO_HZ_FULL
11791 /* Lock so we don't race with concurrent unaccount */
11792 spin_lock(&nr_freq_lock);
11793 if (atomic_inc_return(&nr_freq_events) == 1)
11794 tick_nohz_dep_set(TICK_DEP_BIT_PERF_EVENTS);
11795 spin_unlock(&nr_freq_lock);
11796 #endif
11797 }
11798
account_freq_event(void)11799 static void account_freq_event(void)
11800 {
11801 if (tick_nohz_full_enabled())
11802 account_freq_event_nohz();
11803 else
11804 atomic_inc(&nr_freq_events);
11805 }
11806
11807
account_event(struct perf_event * event)11808 static void account_event(struct perf_event *event)
11809 {
11810 bool inc = false;
11811
11812 if (event->parent)
11813 return;
11814
11815 if (event->attach_state & (PERF_ATTACH_TASK | PERF_ATTACH_SCHED_CB))
11816 inc = true;
11817 if (event->attr.mmap || event->attr.mmap_data)
11818 atomic_inc(&nr_mmap_events);
11819 if (event->attr.build_id)
11820 atomic_inc(&nr_build_id_events);
11821 if (event->attr.comm)
11822 atomic_inc(&nr_comm_events);
11823 if (event->attr.namespaces)
11824 atomic_inc(&nr_namespaces_events);
11825 if (event->attr.cgroup)
11826 atomic_inc(&nr_cgroup_events);
11827 if (event->attr.task)
11828 atomic_inc(&nr_task_events);
11829 if (event->attr.freq)
11830 account_freq_event();
11831 if (event->attr.context_switch) {
11832 atomic_inc(&nr_switch_events);
11833 inc = true;
11834 }
11835 if (has_branch_stack(event))
11836 inc = true;
11837 if (is_cgroup_event(event))
11838 inc = true;
11839 if (event->attr.ksymbol)
11840 atomic_inc(&nr_ksymbol_events);
11841 if (event->attr.bpf_event)
11842 atomic_inc(&nr_bpf_events);
11843 if (event->attr.text_poke)
11844 atomic_inc(&nr_text_poke_events);
11845
11846 if (inc) {
11847 /*
11848 * We need the mutex here because static_branch_enable()
11849 * must complete *before* the perf_sched_count increment
11850 * becomes visible.
11851 */
11852 if (atomic_inc_not_zero(&perf_sched_count))
11853 goto enabled;
11854
11855 mutex_lock(&perf_sched_mutex);
11856 if (!atomic_read(&perf_sched_count)) {
11857 static_branch_enable(&perf_sched_events);
11858 /*
11859 * Guarantee that all CPUs observe they key change and
11860 * call the perf scheduling hooks before proceeding to
11861 * install events that need them.
11862 */
11863 synchronize_rcu();
11864 }
11865 /*
11866 * Now that we have waited for the sync_sched(), allow further
11867 * increments to by-pass the mutex.
11868 */
11869 atomic_inc(&perf_sched_count);
11870 mutex_unlock(&perf_sched_mutex);
11871 }
11872 enabled:
11873
11874 account_pmu_sb_event(event);
11875 }
11876
11877 /*
11878 * Allocate and initialize an event structure
11879 */
11880 static struct perf_event *
perf_event_alloc(struct perf_event_attr * attr,int cpu,struct task_struct * task,struct perf_event * group_leader,struct perf_event * parent_event,perf_overflow_handler_t overflow_handler,void * context,int cgroup_fd)11881 perf_event_alloc(struct perf_event_attr *attr, int cpu,
11882 struct task_struct *task,
11883 struct perf_event *group_leader,
11884 struct perf_event *parent_event,
11885 perf_overflow_handler_t overflow_handler,
11886 void *context, int cgroup_fd)
11887 {
11888 struct pmu *pmu;
11889 struct perf_event *event;
11890 struct hw_perf_event *hwc;
11891 long err = -EINVAL;
11892 int node;
11893
11894 if ((unsigned)cpu >= nr_cpu_ids) {
11895 if (!task || cpu != -1)
11896 return ERR_PTR(-EINVAL);
11897 }
11898 if (attr->sigtrap && !task) {
11899 /* Requires a task: avoid signalling random tasks. */
11900 return ERR_PTR(-EINVAL);
11901 }
11902
11903 node = (cpu >= 0) ? cpu_to_node(cpu) : -1;
11904 event = kmem_cache_alloc_node(perf_event_cache, GFP_KERNEL | __GFP_ZERO,
11905 node);
11906 if (!event)
11907 return ERR_PTR(-ENOMEM);
11908
11909 /*
11910 * Single events are their own group leaders, with an
11911 * empty sibling list:
11912 */
11913 if (!group_leader)
11914 group_leader = event;
11915
11916 mutex_init(&event->child_mutex);
11917 INIT_LIST_HEAD(&event->child_list);
11918
11919 INIT_LIST_HEAD(&event->event_entry);
11920 INIT_LIST_HEAD(&event->sibling_list);
11921 INIT_LIST_HEAD(&event->active_list);
11922 init_event_group(event);
11923 INIT_LIST_HEAD(&event->rb_entry);
11924 INIT_LIST_HEAD(&event->active_entry);
11925 INIT_LIST_HEAD(&event->addr_filters.list);
11926 INIT_HLIST_NODE(&event->hlist_entry);
11927
11928
11929 init_waitqueue_head(&event->waitq);
11930 init_irq_work(&event->pending_irq, perf_pending_irq);
11931 init_task_work(&event->pending_task, perf_pending_task);
11932
11933 mutex_init(&event->mmap_mutex);
11934 raw_spin_lock_init(&event->addr_filters.lock);
11935
11936 atomic_long_set(&event->refcount, 1);
11937 event->cpu = cpu;
11938 event->attr = *attr;
11939 event->group_leader = group_leader;
11940 event->pmu = NULL;
11941 event->oncpu = -1;
11942
11943 event->parent = parent_event;
11944
11945 event->ns = get_pid_ns(task_active_pid_ns(current));
11946 event->id = atomic64_inc_return(&perf_event_id);
11947
11948 event->state = PERF_EVENT_STATE_INACTIVE;
11949
11950 if (parent_event)
11951 event->event_caps = parent_event->event_caps;
11952
11953 if (task) {
11954 event->attach_state = PERF_ATTACH_TASK;
11955 /*
11956 * XXX pmu::event_init needs to know what task to account to
11957 * and we cannot use the ctx information because we need the
11958 * pmu before we get a ctx.
11959 */
11960 event->hw.target = get_task_struct(task);
11961 }
11962
11963 event->clock = &local_clock;
11964 if (parent_event)
11965 event->clock = parent_event->clock;
11966
11967 if (!overflow_handler && parent_event) {
11968 overflow_handler = parent_event->overflow_handler;
11969 context = parent_event->overflow_handler_context;
11970 #if defined(CONFIG_BPF_SYSCALL) && defined(CONFIG_EVENT_TRACING)
11971 if (overflow_handler == bpf_overflow_handler) {
11972 struct bpf_prog *prog = parent_event->prog;
11973
11974 bpf_prog_inc(prog);
11975 event->prog = prog;
11976 event->orig_overflow_handler =
11977 parent_event->orig_overflow_handler;
11978 }
11979 #endif
11980 }
11981
11982 if (overflow_handler) {
11983 event->overflow_handler = overflow_handler;
11984 event->overflow_handler_context = context;
11985 } else if (is_write_backward(event)){
11986 event->overflow_handler = perf_event_output_backward;
11987 event->overflow_handler_context = NULL;
11988 } else {
11989 event->overflow_handler = perf_event_output_forward;
11990 event->overflow_handler_context = NULL;
11991 }
11992
11993 perf_event__state_init(event);
11994
11995 pmu = NULL;
11996
11997 hwc = &event->hw;
11998 hwc->sample_period = attr->sample_period;
11999 if (attr->freq && attr->sample_freq)
12000 hwc->sample_period = 1;
12001 hwc->last_period = hwc->sample_period;
12002
12003 local64_set(&hwc->period_left, hwc->sample_period);
12004
12005 /*
12006 * We currently do not support PERF_SAMPLE_READ on inherited events.
12007 * See perf_output_read().
12008 */
12009 if (attr->inherit && (attr->sample_type & PERF_SAMPLE_READ))
12010 goto err_ns;
12011
12012 if (!has_branch_stack(event))
12013 event->attr.branch_sample_type = 0;
12014
12015 pmu = perf_init_event(event);
12016 if (IS_ERR(pmu)) {
12017 err = PTR_ERR(pmu);
12018 goto err_ns;
12019 }
12020
12021 /*
12022 * Disallow uncore-task events. Similarly, disallow uncore-cgroup
12023 * events (they don't make sense as the cgroup will be different
12024 * on other CPUs in the uncore mask).
12025 */
12026 if (pmu->task_ctx_nr == perf_invalid_context && (task || cgroup_fd != -1)) {
12027 err = -EINVAL;
12028 goto err_pmu;
12029 }
12030
12031 if (event->attr.aux_output &&
12032 !(pmu->capabilities & PERF_PMU_CAP_AUX_OUTPUT)) {
12033 err = -EOPNOTSUPP;
12034 goto err_pmu;
12035 }
12036
12037 if (cgroup_fd != -1) {
12038 err = perf_cgroup_connect(cgroup_fd, event, attr, group_leader);
12039 if (err)
12040 goto err_pmu;
12041 }
12042
12043 err = exclusive_event_init(event);
12044 if (err)
12045 goto err_pmu;
12046
12047 if (has_addr_filter(event)) {
12048 event->addr_filter_ranges = kcalloc(pmu->nr_addr_filters,
12049 sizeof(struct perf_addr_filter_range),
12050 GFP_KERNEL);
12051 if (!event->addr_filter_ranges) {
12052 err = -ENOMEM;
12053 goto err_per_task;
12054 }
12055
12056 /*
12057 * Clone the parent's vma offsets: they are valid until exec()
12058 * even if the mm is not shared with the parent.
12059 */
12060 if (event->parent) {
12061 struct perf_addr_filters_head *ifh = perf_event_addr_filters(event);
12062
12063 raw_spin_lock_irq(&ifh->lock);
12064 memcpy(event->addr_filter_ranges,
12065 event->parent->addr_filter_ranges,
12066 pmu->nr_addr_filters * sizeof(struct perf_addr_filter_range));
12067 raw_spin_unlock_irq(&ifh->lock);
12068 }
12069
12070 /* force hw sync on the address filters */
12071 event->addr_filters_gen = 1;
12072 }
12073
12074 if (!event->parent) {
12075 if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN) {
12076 err = get_callchain_buffers(attr->sample_max_stack);
12077 if (err)
12078 goto err_addr_filters;
12079 }
12080 }
12081
12082 err = security_perf_event_alloc(event);
12083 if (err)
12084 goto err_callchain_buffer;
12085
12086 /* symmetric to unaccount_event() in _free_event() */
12087 account_event(event);
12088
12089 return event;
12090
12091 err_callchain_buffer:
12092 if (!event->parent) {
12093 if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN)
12094 put_callchain_buffers();
12095 }
12096 err_addr_filters:
12097 kfree(event->addr_filter_ranges);
12098
12099 err_per_task:
12100 exclusive_event_destroy(event);
12101
12102 err_pmu:
12103 if (is_cgroup_event(event))
12104 perf_detach_cgroup(event);
12105 if (event->destroy)
12106 event->destroy(event);
12107 module_put(pmu->module);
12108 err_ns:
12109 if (event->hw.target)
12110 put_task_struct(event->hw.target);
12111 call_rcu(&event->rcu_head, free_event_rcu);
12112
12113 return ERR_PTR(err);
12114 }
12115
perf_copy_attr(struct perf_event_attr __user * uattr,struct perf_event_attr * attr)12116 static int perf_copy_attr(struct perf_event_attr __user *uattr,
12117 struct perf_event_attr *attr)
12118 {
12119 u32 size;
12120 int ret;
12121
12122 /* Zero the full structure, so that a short copy will be nice. */
12123 memset(attr, 0, sizeof(*attr));
12124
12125 ret = get_user(size, &uattr->size);
12126 if (ret)
12127 return ret;
12128
12129 /* ABI compatibility quirk: */
12130 if (!size)
12131 size = PERF_ATTR_SIZE_VER0;
12132 if (size < PERF_ATTR_SIZE_VER0 || size > PAGE_SIZE)
12133 goto err_size;
12134
12135 ret = copy_struct_from_user(attr, sizeof(*attr), uattr, size);
12136 if (ret) {
12137 if (ret == -E2BIG)
12138 goto err_size;
12139 return ret;
12140 }
12141
12142 attr->size = size;
12143
12144 if (attr->__reserved_1 || attr->__reserved_2 || attr->__reserved_3)
12145 return -EINVAL;
12146
12147 if (attr->sample_type & ~(PERF_SAMPLE_MAX-1))
12148 return -EINVAL;
12149
12150 if (attr->read_format & ~(PERF_FORMAT_MAX-1))
12151 return -EINVAL;
12152
12153 if (attr->sample_type & PERF_SAMPLE_BRANCH_STACK) {
12154 u64 mask = attr->branch_sample_type;
12155
12156 /* only using defined bits */
12157 if (mask & ~(PERF_SAMPLE_BRANCH_MAX-1))
12158 return -EINVAL;
12159
12160 /* at least one branch bit must be set */
12161 if (!(mask & ~PERF_SAMPLE_BRANCH_PLM_ALL))
12162 return -EINVAL;
12163
12164 /* propagate priv level, when not set for branch */
12165 if (!(mask & PERF_SAMPLE_BRANCH_PLM_ALL)) {
12166
12167 /* exclude_kernel checked on syscall entry */
12168 if (!attr->exclude_kernel)
12169 mask |= PERF_SAMPLE_BRANCH_KERNEL;
12170
12171 if (!attr->exclude_user)
12172 mask |= PERF_SAMPLE_BRANCH_USER;
12173
12174 if (!attr->exclude_hv)
12175 mask |= PERF_SAMPLE_BRANCH_HV;
12176 /*
12177 * adjust user setting (for HW filter setup)
12178 */
12179 attr->branch_sample_type = mask;
12180 }
12181 /* privileged levels capture (kernel, hv): check permissions */
12182 if (mask & PERF_SAMPLE_BRANCH_PERM_PLM) {
12183 ret = perf_allow_kernel(attr);
12184 if (ret)
12185 return ret;
12186 }
12187 }
12188
12189 if (attr->sample_type & PERF_SAMPLE_REGS_USER) {
12190 ret = perf_reg_validate(attr->sample_regs_user);
12191 if (ret)
12192 return ret;
12193 }
12194
12195 if (attr->sample_type & PERF_SAMPLE_STACK_USER) {
12196 if (!arch_perf_have_user_stack_dump())
12197 return -ENOSYS;
12198
12199 /*
12200 * We have __u32 type for the size, but so far
12201 * we can only use __u16 as maximum due to the
12202 * __u16 sample size limit.
12203 */
12204 if (attr->sample_stack_user >= USHRT_MAX)
12205 return -EINVAL;
12206 else if (!IS_ALIGNED(attr->sample_stack_user, sizeof(u64)))
12207 return -EINVAL;
12208 }
12209
12210 if (!attr->sample_max_stack)
12211 attr->sample_max_stack = sysctl_perf_event_max_stack;
12212
12213 if (attr->sample_type & PERF_SAMPLE_REGS_INTR)
12214 ret = perf_reg_validate(attr->sample_regs_intr);
12215
12216 #ifndef CONFIG_CGROUP_PERF
12217 if (attr->sample_type & PERF_SAMPLE_CGROUP)
12218 return -EINVAL;
12219 #endif
12220 if ((attr->sample_type & PERF_SAMPLE_WEIGHT) &&
12221 (attr->sample_type & PERF_SAMPLE_WEIGHT_STRUCT))
12222 return -EINVAL;
12223
12224 if (!attr->inherit && attr->inherit_thread)
12225 return -EINVAL;
12226
12227 if (attr->remove_on_exec && attr->enable_on_exec)
12228 return -EINVAL;
12229
12230 if (attr->sigtrap && !attr->remove_on_exec)
12231 return -EINVAL;
12232
12233 out:
12234 return ret;
12235
12236 err_size:
12237 put_user(sizeof(*attr), &uattr->size);
12238 ret = -E2BIG;
12239 goto out;
12240 }
12241
mutex_lock_double(struct mutex * a,struct mutex * b)12242 static void mutex_lock_double(struct mutex *a, struct mutex *b)
12243 {
12244 if (b < a)
12245 swap(a, b);
12246
12247 mutex_lock(a);
12248 mutex_lock_nested(b, SINGLE_DEPTH_NESTING);
12249 }
12250
12251 static int
perf_event_set_output(struct perf_event * event,struct perf_event * output_event)12252 perf_event_set_output(struct perf_event *event, struct perf_event *output_event)
12253 {
12254 struct perf_buffer *rb = NULL;
12255 int ret = -EINVAL;
12256
12257 if (!output_event) {
12258 mutex_lock(&event->mmap_mutex);
12259 goto set;
12260 }
12261
12262 /* don't allow circular references */
12263 if (event == output_event)
12264 goto out;
12265
12266 /*
12267 * Don't allow cross-cpu buffers
12268 */
12269 if (output_event->cpu != event->cpu)
12270 goto out;
12271
12272 /*
12273 * If its not a per-cpu rb, it must be the same task.
12274 */
12275 if (output_event->cpu == -1 && output_event->hw.target != event->hw.target)
12276 goto out;
12277
12278 /*
12279 * Mixing clocks in the same buffer is trouble you don't need.
12280 */
12281 if (output_event->clock != event->clock)
12282 goto out;
12283
12284 /*
12285 * Either writing ring buffer from beginning or from end.
12286 * Mixing is not allowed.
12287 */
12288 if (is_write_backward(output_event) != is_write_backward(event))
12289 goto out;
12290
12291 /*
12292 * If both events generate aux data, they must be on the same PMU
12293 */
12294 if (has_aux(event) && has_aux(output_event) &&
12295 event->pmu != output_event->pmu)
12296 goto out;
12297
12298 /*
12299 * Hold both mmap_mutex to serialize against perf_mmap_close(). Since
12300 * output_event is already on rb->event_list, and the list iteration
12301 * restarts after every removal, it is guaranteed this new event is
12302 * observed *OR* if output_event is already removed, it's guaranteed we
12303 * observe !rb->mmap_count.
12304 */
12305 mutex_lock_double(&event->mmap_mutex, &output_event->mmap_mutex);
12306 set:
12307 /* Can't redirect output if we've got an active mmap() */
12308 if (atomic_read(&event->mmap_count))
12309 goto unlock;
12310
12311 if (output_event) {
12312 /* get the rb we want to redirect to */
12313 rb = ring_buffer_get(output_event);
12314 if (!rb)
12315 goto unlock;
12316
12317 /* did we race against perf_mmap_close() */
12318 if (!atomic_read(&rb->mmap_count)) {
12319 ring_buffer_put(rb);
12320 goto unlock;
12321 }
12322 }
12323
12324 ring_buffer_attach(event, rb);
12325
12326 ret = 0;
12327 unlock:
12328 mutex_unlock(&event->mmap_mutex);
12329 if (output_event)
12330 mutex_unlock(&output_event->mmap_mutex);
12331
12332 out:
12333 return ret;
12334 }
12335
perf_event_set_clock(struct perf_event * event,clockid_t clk_id)12336 static int perf_event_set_clock(struct perf_event *event, clockid_t clk_id)
12337 {
12338 bool nmi_safe = false;
12339
12340 switch (clk_id) {
12341 case CLOCK_MONOTONIC:
12342 event->clock = &ktime_get_mono_fast_ns;
12343 nmi_safe = true;
12344 break;
12345
12346 case CLOCK_MONOTONIC_RAW:
12347 event->clock = &ktime_get_raw_fast_ns;
12348 nmi_safe = true;
12349 break;
12350
12351 case CLOCK_REALTIME:
12352 event->clock = &ktime_get_real_ns;
12353 break;
12354
12355 case CLOCK_BOOTTIME:
12356 event->clock = &ktime_get_boottime_ns;
12357 break;
12358
12359 case CLOCK_TAI:
12360 event->clock = &ktime_get_clocktai_ns;
12361 break;
12362
12363 default:
12364 return -EINVAL;
12365 }
12366
12367 if (!nmi_safe && !(event->pmu->capabilities & PERF_PMU_CAP_NO_NMI))
12368 return -EINVAL;
12369
12370 return 0;
12371 }
12372
12373 static bool
perf_check_permission(struct perf_event_attr * attr,struct task_struct * task)12374 perf_check_permission(struct perf_event_attr *attr, struct task_struct *task)
12375 {
12376 unsigned int ptrace_mode = PTRACE_MODE_READ_REALCREDS;
12377 bool is_capable = perfmon_capable();
12378
12379 if (attr->sigtrap) {
12380 /*
12381 * perf_event_attr::sigtrap sends signals to the other task.
12382 * Require the current task to also have CAP_KILL.
12383 */
12384 rcu_read_lock();
12385 is_capable &= ns_capable(__task_cred(task)->user_ns, CAP_KILL);
12386 rcu_read_unlock();
12387
12388 /*
12389 * If the required capabilities aren't available, checks for
12390 * ptrace permissions: upgrade to ATTACH, since sending signals
12391 * can effectively change the target task.
12392 */
12393 ptrace_mode = PTRACE_MODE_ATTACH_REALCREDS;
12394 }
12395
12396 /*
12397 * Preserve ptrace permission check for backwards compatibility. The
12398 * ptrace check also includes checks that the current task and other
12399 * task have matching uids, and is therefore not done here explicitly.
12400 */
12401 return is_capable || ptrace_may_access(task, ptrace_mode);
12402 }
12403
12404 /**
12405 * sys_perf_event_open - open a performance event, associate it to a task/cpu
12406 *
12407 * @attr_uptr: event_id type attributes for monitoring/sampling
12408 * @pid: target pid
12409 * @cpu: target cpu
12410 * @group_fd: group leader event fd
12411 * @flags: perf event open flags
12412 */
SYSCALL_DEFINE5(perf_event_open,struct perf_event_attr __user *,attr_uptr,pid_t,pid,int,cpu,int,group_fd,unsigned long,flags)12413 SYSCALL_DEFINE5(perf_event_open,
12414 struct perf_event_attr __user *, attr_uptr,
12415 pid_t, pid, int, cpu, int, group_fd, unsigned long, flags)
12416 {
12417 struct perf_event *group_leader = NULL, *output_event = NULL;
12418 struct perf_event_pmu_context *pmu_ctx;
12419 struct perf_event *event, *sibling;
12420 struct perf_event_attr attr;
12421 struct perf_event_context *ctx;
12422 struct file *event_file = NULL;
12423 struct fd group = {NULL, 0};
12424 struct task_struct *task = NULL;
12425 struct pmu *pmu;
12426 int event_fd;
12427 int move_group = 0;
12428 int err;
12429 int f_flags = O_RDWR;
12430 int cgroup_fd = -1;
12431
12432 /* for future expandability... */
12433 if (flags & ~PERF_FLAG_ALL)
12434 return -EINVAL;
12435
12436 err = perf_copy_attr(attr_uptr, &attr);
12437 if (err)
12438 return err;
12439
12440 /* Do we allow access to perf_event_open(2) ? */
12441 err = security_perf_event_open(&attr, PERF_SECURITY_OPEN);
12442 if (err)
12443 return err;
12444
12445 if (!attr.exclude_kernel) {
12446 err = perf_allow_kernel(&attr);
12447 if (err)
12448 return err;
12449 }
12450
12451 if (attr.namespaces) {
12452 if (!perfmon_capable())
12453 return -EACCES;
12454 }
12455
12456 if (attr.freq) {
12457 if (attr.sample_freq > sysctl_perf_event_sample_rate)
12458 return -EINVAL;
12459 } else {
12460 if (attr.sample_period & (1ULL << 63))
12461 return -EINVAL;
12462 }
12463
12464 /* Only privileged users can get physical addresses */
12465 if ((attr.sample_type & PERF_SAMPLE_PHYS_ADDR)) {
12466 err = perf_allow_kernel(&attr);
12467 if (err)
12468 return err;
12469 }
12470
12471 /* REGS_INTR can leak data, lockdown must prevent this */
12472 if (attr.sample_type & PERF_SAMPLE_REGS_INTR) {
12473 err = security_locked_down(LOCKDOWN_PERF);
12474 if (err)
12475 return err;
12476 }
12477
12478 /*
12479 * In cgroup mode, the pid argument is used to pass the fd
12480 * opened to the cgroup directory in cgroupfs. The cpu argument
12481 * designates the cpu on which to monitor threads from that
12482 * cgroup.
12483 */
12484 if ((flags & PERF_FLAG_PID_CGROUP) && (pid == -1 || cpu == -1))
12485 return -EINVAL;
12486
12487 if (flags & PERF_FLAG_FD_CLOEXEC)
12488 f_flags |= O_CLOEXEC;
12489
12490 event_fd = get_unused_fd_flags(f_flags);
12491 if (event_fd < 0)
12492 return event_fd;
12493
12494 if (group_fd != -1) {
12495 err = perf_fget_light(group_fd, &group);
12496 if (err)
12497 goto err_fd;
12498 group_leader = group.file->private_data;
12499 if (flags & PERF_FLAG_FD_OUTPUT)
12500 output_event = group_leader;
12501 if (flags & PERF_FLAG_FD_NO_GROUP)
12502 group_leader = NULL;
12503 }
12504
12505 if (pid != -1 && !(flags & PERF_FLAG_PID_CGROUP)) {
12506 task = find_lively_task_by_vpid(pid);
12507 if (IS_ERR(task)) {
12508 err = PTR_ERR(task);
12509 goto err_group_fd;
12510 }
12511 }
12512
12513 if (task && group_leader &&
12514 group_leader->attr.inherit != attr.inherit) {
12515 err = -EINVAL;
12516 goto err_task;
12517 }
12518
12519 if (flags & PERF_FLAG_PID_CGROUP)
12520 cgroup_fd = pid;
12521
12522 event = perf_event_alloc(&attr, cpu, task, group_leader, NULL,
12523 NULL, NULL, cgroup_fd);
12524 if (IS_ERR(event)) {
12525 err = PTR_ERR(event);
12526 goto err_task;
12527 }
12528
12529 if (is_sampling_event(event)) {
12530 if (event->pmu->capabilities & PERF_PMU_CAP_NO_INTERRUPT) {
12531 err = -EOPNOTSUPP;
12532 goto err_alloc;
12533 }
12534 }
12535
12536 /*
12537 * Special case software events and allow them to be part of
12538 * any hardware group.
12539 */
12540 pmu = event->pmu;
12541
12542 if (attr.use_clockid) {
12543 err = perf_event_set_clock(event, attr.clockid);
12544 if (err)
12545 goto err_alloc;
12546 }
12547
12548 if (pmu->task_ctx_nr == perf_sw_context)
12549 event->event_caps |= PERF_EV_CAP_SOFTWARE;
12550
12551 if (task) {
12552 err = down_read_interruptible(&task->signal->exec_update_lock);
12553 if (err)
12554 goto err_alloc;
12555
12556 /*
12557 * We must hold exec_update_lock across this and any potential
12558 * perf_install_in_context() call for this new event to
12559 * serialize against exec() altering our credentials (and the
12560 * perf_event_exit_task() that could imply).
12561 */
12562 err = -EACCES;
12563 if (!perf_check_permission(&attr, task))
12564 goto err_cred;
12565 }
12566
12567 /*
12568 * Get the target context (task or percpu):
12569 */
12570 ctx = find_get_context(task, event);
12571 if (IS_ERR(ctx)) {
12572 err = PTR_ERR(ctx);
12573 goto err_cred;
12574 }
12575
12576 mutex_lock(&ctx->mutex);
12577
12578 if (ctx->task == TASK_TOMBSTONE) {
12579 err = -ESRCH;
12580 goto err_locked;
12581 }
12582
12583 if (!task) {
12584 /*
12585 * Check if the @cpu we're creating an event for is online.
12586 *
12587 * We use the perf_cpu_context::ctx::mutex to serialize against
12588 * the hotplug notifiers. See perf_event_{init,exit}_cpu().
12589 */
12590 struct perf_cpu_context *cpuctx = per_cpu_ptr(&perf_cpu_context, event->cpu);
12591
12592 if (!cpuctx->online) {
12593 err = -ENODEV;
12594 goto err_locked;
12595 }
12596 }
12597
12598 if (group_leader) {
12599 err = -EINVAL;
12600
12601 /*
12602 * Do not allow a recursive hierarchy (this new sibling
12603 * becoming part of another group-sibling):
12604 */
12605 if (group_leader->group_leader != group_leader)
12606 goto err_locked;
12607
12608 /* All events in a group should have the same clock */
12609 if (group_leader->clock != event->clock)
12610 goto err_locked;
12611
12612 /*
12613 * Make sure we're both events for the same CPU;
12614 * grouping events for different CPUs is broken; since
12615 * you can never concurrently schedule them anyhow.
12616 */
12617 if (group_leader->cpu != event->cpu)
12618 goto err_locked;
12619
12620 /*
12621 * Make sure we're both on the same context; either task or cpu.
12622 */
12623 if (group_leader->ctx != ctx)
12624 goto err_locked;
12625
12626 /*
12627 * Only a group leader can be exclusive or pinned
12628 */
12629 if (attr.exclusive || attr.pinned)
12630 goto err_locked;
12631
12632 if (is_software_event(event) &&
12633 !in_software_context(group_leader)) {
12634 /*
12635 * If the event is a sw event, but the group_leader
12636 * is on hw context.
12637 *
12638 * Allow the addition of software events to hw
12639 * groups, this is safe because software events
12640 * never fail to schedule.
12641 *
12642 * Note the comment that goes with struct
12643 * perf_event_pmu_context.
12644 */
12645 pmu = group_leader->pmu_ctx->pmu;
12646 } else if (!is_software_event(event)) {
12647 if (is_software_event(group_leader) &&
12648 (group_leader->group_caps & PERF_EV_CAP_SOFTWARE)) {
12649 /*
12650 * In case the group is a pure software group, and we
12651 * try to add a hardware event, move the whole group to
12652 * the hardware context.
12653 */
12654 move_group = 1;
12655 }
12656
12657 /* Don't allow group of multiple hw events from different pmus */
12658 if (!in_software_context(group_leader) &&
12659 group_leader->pmu_ctx->pmu != pmu)
12660 goto err_locked;
12661 }
12662 }
12663
12664 /*
12665 * Now that we're certain of the pmu; find the pmu_ctx.
12666 */
12667 pmu_ctx = find_get_pmu_context(pmu, ctx, event);
12668 if (IS_ERR(pmu_ctx)) {
12669 err = PTR_ERR(pmu_ctx);
12670 goto err_locked;
12671 }
12672 event->pmu_ctx = pmu_ctx;
12673
12674 if (output_event) {
12675 err = perf_event_set_output(event, output_event);
12676 if (err)
12677 goto err_context;
12678 }
12679
12680 if (!perf_event_validate_size(event)) {
12681 err = -E2BIG;
12682 goto err_context;
12683 }
12684
12685 if (perf_need_aux_event(event) && !perf_get_aux_event(event, group_leader)) {
12686 err = -EINVAL;
12687 goto err_context;
12688 }
12689
12690 /*
12691 * Must be under the same ctx::mutex as perf_install_in_context(),
12692 * because we need to serialize with concurrent event creation.
12693 */
12694 if (!exclusive_event_installable(event, ctx)) {
12695 err = -EBUSY;
12696 goto err_context;
12697 }
12698
12699 WARN_ON_ONCE(ctx->parent_ctx);
12700
12701 event_file = anon_inode_getfile("[perf_event]", &perf_fops, event, f_flags);
12702 if (IS_ERR(event_file)) {
12703 err = PTR_ERR(event_file);
12704 event_file = NULL;
12705 goto err_context;
12706 }
12707
12708 /*
12709 * This is the point on no return; we cannot fail hereafter. This is
12710 * where we start modifying current state.
12711 */
12712
12713 if (move_group) {
12714 perf_remove_from_context(group_leader, 0);
12715 put_pmu_ctx(group_leader->pmu_ctx);
12716
12717 for_each_sibling_event(sibling, group_leader) {
12718 perf_remove_from_context(sibling, 0);
12719 put_pmu_ctx(sibling->pmu_ctx);
12720 }
12721
12722 /*
12723 * Install the group siblings before the group leader.
12724 *
12725 * Because a group leader will try and install the entire group
12726 * (through the sibling list, which is still in-tact), we can
12727 * end up with siblings installed in the wrong context.
12728 *
12729 * By installing siblings first we NO-OP because they're not
12730 * reachable through the group lists.
12731 */
12732 for_each_sibling_event(sibling, group_leader) {
12733 sibling->pmu_ctx = pmu_ctx;
12734 get_pmu_ctx(pmu_ctx);
12735 perf_event__state_init(sibling);
12736 perf_install_in_context(ctx, sibling, sibling->cpu);
12737 }
12738
12739 /*
12740 * Removing from the context ends up with disabled
12741 * event. What we want here is event in the initial
12742 * startup state, ready to be add into new context.
12743 */
12744 group_leader->pmu_ctx = pmu_ctx;
12745 get_pmu_ctx(pmu_ctx);
12746 perf_event__state_init(group_leader);
12747 perf_install_in_context(ctx, group_leader, group_leader->cpu);
12748 }
12749
12750 /*
12751 * Precalculate sample_data sizes; do while holding ctx::mutex such
12752 * that we're serialized against further additions and before
12753 * perf_install_in_context() which is the point the event is active and
12754 * can use these values.
12755 */
12756 perf_event__header_size(event);
12757 perf_event__id_header_size(event);
12758
12759 event->owner = current;
12760
12761 perf_install_in_context(ctx, event, event->cpu);
12762 perf_unpin_context(ctx);
12763
12764 mutex_unlock(&ctx->mutex);
12765
12766 if (task) {
12767 up_read(&task->signal->exec_update_lock);
12768 put_task_struct(task);
12769 }
12770
12771 mutex_lock(¤t->perf_event_mutex);
12772 list_add_tail(&event->owner_entry, ¤t->perf_event_list);
12773 mutex_unlock(¤t->perf_event_mutex);
12774
12775 /*
12776 * Drop the reference on the group_event after placing the
12777 * new event on the sibling_list. This ensures destruction
12778 * of the group leader will find the pointer to itself in
12779 * perf_group_detach().
12780 */
12781 fdput(group);
12782 fd_install(event_fd, event_file);
12783 return event_fd;
12784
12785 err_context:
12786 put_pmu_ctx(event->pmu_ctx);
12787 event->pmu_ctx = NULL; /* _free_event() */
12788 err_locked:
12789 mutex_unlock(&ctx->mutex);
12790 perf_unpin_context(ctx);
12791 put_ctx(ctx);
12792 err_cred:
12793 if (task)
12794 up_read(&task->signal->exec_update_lock);
12795 err_alloc:
12796 free_event(event);
12797 err_task:
12798 if (task)
12799 put_task_struct(task);
12800 err_group_fd:
12801 fdput(group);
12802 err_fd:
12803 put_unused_fd(event_fd);
12804 return err;
12805 }
12806
12807 /**
12808 * perf_event_create_kernel_counter
12809 *
12810 * @attr: attributes of the counter to create
12811 * @cpu: cpu in which the counter is bound
12812 * @task: task to profile (NULL for percpu)
12813 * @overflow_handler: callback to trigger when we hit the event
12814 * @context: context data could be used in overflow_handler callback
12815 */
12816 struct perf_event *
perf_event_create_kernel_counter(struct perf_event_attr * attr,int cpu,struct task_struct * task,perf_overflow_handler_t overflow_handler,void * context)12817 perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu,
12818 struct task_struct *task,
12819 perf_overflow_handler_t overflow_handler,
12820 void *context)
12821 {
12822 struct perf_event_pmu_context *pmu_ctx;
12823 struct perf_event_context *ctx;
12824 struct perf_event *event;
12825 struct pmu *pmu;
12826 int err;
12827
12828 /*
12829 * Grouping is not supported for kernel events, neither is 'AUX',
12830 * make sure the caller's intentions are adjusted.
12831 */
12832 if (attr->aux_output)
12833 return ERR_PTR(-EINVAL);
12834
12835 event = perf_event_alloc(attr, cpu, task, NULL, NULL,
12836 overflow_handler, context, -1);
12837 if (IS_ERR(event)) {
12838 err = PTR_ERR(event);
12839 goto err;
12840 }
12841
12842 /* Mark owner so we could distinguish it from user events. */
12843 event->owner = TASK_TOMBSTONE;
12844 pmu = event->pmu;
12845
12846 if (pmu->task_ctx_nr == perf_sw_context)
12847 event->event_caps |= PERF_EV_CAP_SOFTWARE;
12848
12849 /*
12850 * Get the target context (task or percpu):
12851 */
12852 ctx = find_get_context(task, event);
12853 if (IS_ERR(ctx)) {
12854 err = PTR_ERR(ctx);
12855 goto err_alloc;
12856 }
12857
12858 WARN_ON_ONCE(ctx->parent_ctx);
12859 mutex_lock(&ctx->mutex);
12860 if (ctx->task == TASK_TOMBSTONE) {
12861 err = -ESRCH;
12862 goto err_unlock;
12863 }
12864
12865 pmu_ctx = find_get_pmu_context(pmu, ctx, event);
12866 if (IS_ERR(pmu_ctx)) {
12867 err = PTR_ERR(pmu_ctx);
12868 goto err_unlock;
12869 }
12870 event->pmu_ctx = pmu_ctx;
12871
12872 if (!task) {
12873 /*
12874 * Check if the @cpu we're creating an event for is online.
12875 *
12876 * We use the perf_cpu_context::ctx::mutex to serialize against
12877 * the hotplug notifiers. See perf_event_{init,exit}_cpu().
12878 */
12879 struct perf_cpu_context *cpuctx =
12880 container_of(ctx, struct perf_cpu_context, ctx);
12881 if (!cpuctx->online) {
12882 err = -ENODEV;
12883 goto err_pmu_ctx;
12884 }
12885 }
12886
12887 if (!exclusive_event_installable(event, ctx)) {
12888 err = -EBUSY;
12889 goto err_pmu_ctx;
12890 }
12891
12892 perf_install_in_context(ctx, event, event->cpu);
12893 perf_unpin_context(ctx);
12894 mutex_unlock(&ctx->mutex);
12895
12896 return event;
12897
12898 err_pmu_ctx:
12899 put_pmu_ctx(pmu_ctx);
12900 event->pmu_ctx = NULL; /* _free_event() */
12901 err_unlock:
12902 mutex_unlock(&ctx->mutex);
12903 perf_unpin_context(ctx);
12904 put_ctx(ctx);
12905 err_alloc:
12906 free_event(event);
12907 err:
12908 return ERR_PTR(err);
12909 }
12910 EXPORT_SYMBOL_GPL(perf_event_create_kernel_counter);
12911
__perf_pmu_remove(struct perf_event_context * ctx,int cpu,struct pmu * pmu,struct perf_event_groups * groups,struct list_head * events)12912 static void __perf_pmu_remove(struct perf_event_context *ctx,
12913 int cpu, struct pmu *pmu,
12914 struct perf_event_groups *groups,
12915 struct list_head *events)
12916 {
12917 struct perf_event *event, *sibling;
12918
12919 perf_event_groups_for_cpu_pmu(event, groups, cpu, pmu) {
12920 perf_remove_from_context(event, 0);
12921 put_pmu_ctx(event->pmu_ctx);
12922 list_add(&event->migrate_entry, events);
12923
12924 for_each_sibling_event(sibling, event) {
12925 perf_remove_from_context(sibling, 0);
12926 put_pmu_ctx(sibling->pmu_ctx);
12927 list_add(&sibling->migrate_entry, events);
12928 }
12929 }
12930 }
12931
__perf_pmu_install_event(struct pmu * pmu,struct perf_event_context * ctx,int cpu,struct perf_event * event)12932 static void __perf_pmu_install_event(struct pmu *pmu,
12933 struct perf_event_context *ctx,
12934 int cpu, struct perf_event *event)
12935 {
12936 struct perf_event_pmu_context *epc;
12937 struct perf_event_context *old_ctx = event->ctx;
12938
12939 get_ctx(ctx); /* normally find_get_context() */
12940
12941 event->cpu = cpu;
12942 epc = find_get_pmu_context(pmu, ctx, event);
12943 event->pmu_ctx = epc;
12944
12945 if (event->state >= PERF_EVENT_STATE_OFF)
12946 event->state = PERF_EVENT_STATE_INACTIVE;
12947 perf_install_in_context(ctx, event, cpu);
12948
12949 /*
12950 * Now that event->ctx is updated and visible, put the old ctx.
12951 */
12952 put_ctx(old_ctx);
12953 }
12954
__perf_pmu_install(struct perf_event_context * ctx,int cpu,struct pmu * pmu,struct list_head * events)12955 static void __perf_pmu_install(struct perf_event_context *ctx,
12956 int cpu, struct pmu *pmu, struct list_head *events)
12957 {
12958 struct perf_event *event, *tmp;
12959
12960 /*
12961 * Re-instate events in 2 passes.
12962 *
12963 * Skip over group leaders and only install siblings on this first
12964 * pass, siblings will not get enabled without a leader, however a
12965 * leader will enable its siblings, even if those are still on the old
12966 * context.
12967 */
12968 list_for_each_entry_safe(event, tmp, events, migrate_entry) {
12969 if (event->group_leader == event)
12970 continue;
12971
12972 list_del(&event->migrate_entry);
12973 __perf_pmu_install_event(pmu, ctx, cpu, event);
12974 }
12975
12976 /*
12977 * Once all the siblings are setup properly, install the group leaders
12978 * to make it go.
12979 */
12980 list_for_each_entry_safe(event, tmp, events, migrate_entry) {
12981 list_del(&event->migrate_entry);
12982 __perf_pmu_install_event(pmu, ctx, cpu, event);
12983 }
12984 }
12985
perf_pmu_migrate_context(struct pmu * pmu,int src_cpu,int dst_cpu)12986 void perf_pmu_migrate_context(struct pmu *pmu, int src_cpu, int dst_cpu)
12987 {
12988 struct perf_event_context *src_ctx, *dst_ctx;
12989 LIST_HEAD(events);
12990
12991 /*
12992 * Since per-cpu context is persistent, no need to grab an extra
12993 * reference.
12994 */
12995 src_ctx = &per_cpu_ptr(&perf_cpu_context, src_cpu)->ctx;
12996 dst_ctx = &per_cpu_ptr(&perf_cpu_context, dst_cpu)->ctx;
12997
12998 /*
12999 * See perf_event_ctx_lock() for comments on the details
13000 * of swizzling perf_event::ctx.
13001 */
13002 mutex_lock_double(&src_ctx->mutex, &dst_ctx->mutex);
13003
13004 __perf_pmu_remove(src_ctx, src_cpu, pmu, &src_ctx->pinned_groups, &events);
13005 __perf_pmu_remove(src_ctx, src_cpu, pmu, &src_ctx->flexible_groups, &events);
13006
13007 if (!list_empty(&events)) {
13008 /*
13009 * Wait for the events to quiesce before re-instating them.
13010 */
13011 synchronize_rcu();
13012
13013 __perf_pmu_install(dst_ctx, dst_cpu, pmu, &events);
13014 }
13015
13016 mutex_unlock(&dst_ctx->mutex);
13017 mutex_unlock(&src_ctx->mutex);
13018 }
13019 EXPORT_SYMBOL_GPL(perf_pmu_migrate_context);
13020
sync_child_event(struct perf_event * child_event)13021 static void sync_child_event(struct perf_event *child_event)
13022 {
13023 struct perf_event *parent_event = child_event->parent;
13024 u64 child_val;
13025
13026 if (child_event->attr.inherit_stat) {
13027 struct task_struct *task = child_event->ctx->task;
13028
13029 if (task && task != TASK_TOMBSTONE)
13030 perf_event_read_event(child_event, task);
13031 }
13032
13033 child_val = perf_event_count(child_event);
13034
13035 /*
13036 * Add back the child's count to the parent's count:
13037 */
13038 atomic64_add(child_val, &parent_event->child_count);
13039 atomic64_add(child_event->total_time_enabled,
13040 &parent_event->child_total_time_enabled);
13041 atomic64_add(child_event->total_time_running,
13042 &parent_event->child_total_time_running);
13043 }
13044
13045 static void
perf_event_exit_event(struct perf_event * event,struct perf_event_context * ctx)13046 perf_event_exit_event(struct perf_event *event, struct perf_event_context *ctx)
13047 {
13048 struct perf_event *parent_event = event->parent;
13049 unsigned long detach_flags = 0;
13050
13051 if (parent_event) {
13052 /*
13053 * Do not destroy the 'original' grouping; because of the
13054 * context switch optimization the original events could've
13055 * ended up in a random child task.
13056 *
13057 * If we were to destroy the original group, all group related
13058 * operations would cease to function properly after this
13059 * random child dies.
13060 *
13061 * Do destroy all inherited groups, we don't care about those
13062 * and being thorough is better.
13063 */
13064 detach_flags = DETACH_GROUP | DETACH_CHILD;
13065 mutex_lock(&parent_event->child_mutex);
13066 }
13067
13068 perf_remove_from_context(event, detach_flags);
13069
13070 raw_spin_lock_irq(&ctx->lock);
13071 if (event->state > PERF_EVENT_STATE_EXIT)
13072 perf_event_set_state(event, PERF_EVENT_STATE_EXIT);
13073 raw_spin_unlock_irq(&ctx->lock);
13074
13075 /*
13076 * Child events can be freed.
13077 */
13078 if (parent_event) {
13079 mutex_unlock(&parent_event->child_mutex);
13080 /*
13081 * Kick perf_poll() for is_event_hup();
13082 */
13083 perf_event_wakeup(parent_event);
13084 free_event(event);
13085 put_event(parent_event);
13086 return;
13087 }
13088
13089 /*
13090 * Parent events are governed by their filedesc, retain them.
13091 */
13092 perf_event_wakeup(event);
13093 }
13094
perf_event_exit_task_context(struct task_struct * child)13095 static void perf_event_exit_task_context(struct task_struct *child)
13096 {
13097 struct perf_event_context *child_ctx, *clone_ctx = NULL;
13098 struct perf_event *child_event, *next;
13099
13100 WARN_ON_ONCE(child != current);
13101
13102 child_ctx = perf_pin_task_context(child);
13103 if (!child_ctx)
13104 return;
13105
13106 /*
13107 * In order to reduce the amount of tricky in ctx tear-down, we hold
13108 * ctx::mutex over the entire thing. This serializes against almost
13109 * everything that wants to access the ctx.
13110 *
13111 * The exception is sys_perf_event_open() /
13112 * perf_event_create_kernel_count() which does find_get_context()
13113 * without ctx::mutex (it cannot because of the move_group double mutex
13114 * lock thing). See the comments in perf_install_in_context().
13115 */
13116 mutex_lock(&child_ctx->mutex);
13117
13118 /*
13119 * In a single ctx::lock section, de-schedule the events and detach the
13120 * context from the task such that we cannot ever get it scheduled back
13121 * in.
13122 */
13123 raw_spin_lock_irq(&child_ctx->lock);
13124 task_ctx_sched_out(child_ctx, EVENT_ALL);
13125
13126 /*
13127 * Now that the context is inactive, destroy the task <-> ctx relation
13128 * and mark the context dead.
13129 */
13130 RCU_INIT_POINTER(child->perf_event_ctxp, NULL);
13131 put_ctx(child_ctx); /* cannot be last */
13132 WRITE_ONCE(child_ctx->task, TASK_TOMBSTONE);
13133 put_task_struct(current); /* cannot be last */
13134
13135 clone_ctx = unclone_ctx(child_ctx);
13136 raw_spin_unlock_irq(&child_ctx->lock);
13137
13138 if (clone_ctx)
13139 put_ctx(clone_ctx);
13140
13141 /*
13142 * Report the task dead after unscheduling the events so that we
13143 * won't get any samples after PERF_RECORD_EXIT. We can however still
13144 * get a few PERF_RECORD_READ events.
13145 */
13146 perf_event_task(child, child_ctx, 0);
13147
13148 list_for_each_entry_safe(child_event, next, &child_ctx->event_list, event_entry)
13149 perf_event_exit_event(child_event, child_ctx);
13150
13151 mutex_unlock(&child_ctx->mutex);
13152
13153 put_ctx(child_ctx);
13154 }
13155
13156 /*
13157 * When a child task exits, feed back event values to parent events.
13158 *
13159 * Can be called with exec_update_lock held when called from
13160 * setup_new_exec().
13161 */
perf_event_exit_task(struct task_struct * child)13162 void perf_event_exit_task(struct task_struct *child)
13163 {
13164 struct perf_event *event, *tmp;
13165
13166 mutex_lock(&child->perf_event_mutex);
13167 list_for_each_entry_safe(event, tmp, &child->perf_event_list,
13168 owner_entry) {
13169 list_del_init(&event->owner_entry);
13170
13171 /*
13172 * Ensure the list deletion is visible before we clear
13173 * the owner, closes a race against perf_release() where
13174 * we need to serialize on the owner->perf_event_mutex.
13175 */
13176 smp_store_release(&event->owner, NULL);
13177 }
13178 mutex_unlock(&child->perf_event_mutex);
13179
13180 perf_event_exit_task_context(child);
13181
13182 /*
13183 * The perf_event_exit_task_context calls perf_event_task
13184 * with child's task_ctx, which generates EXIT events for
13185 * child contexts and sets child->perf_event_ctxp[] to NULL.
13186 * At this point we need to send EXIT events to cpu contexts.
13187 */
13188 perf_event_task(child, NULL, 0);
13189 }
13190
perf_free_event(struct perf_event * event,struct perf_event_context * ctx)13191 static void perf_free_event(struct perf_event *event,
13192 struct perf_event_context *ctx)
13193 {
13194 struct perf_event *parent = event->parent;
13195
13196 if (WARN_ON_ONCE(!parent))
13197 return;
13198
13199 mutex_lock(&parent->child_mutex);
13200 list_del_init(&event->child_list);
13201 mutex_unlock(&parent->child_mutex);
13202
13203 put_event(parent);
13204
13205 raw_spin_lock_irq(&ctx->lock);
13206 perf_group_detach(event);
13207 list_del_event(event, ctx);
13208 raw_spin_unlock_irq(&ctx->lock);
13209 free_event(event);
13210 }
13211
13212 /*
13213 * Free a context as created by inheritance by perf_event_init_task() below,
13214 * used by fork() in case of fail.
13215 *
13216 * Even though the task has never lived, the context and events have been
13217 * exposed through the child_list, so we must take care tearing it all down.
13218 */
perf_event_free_task(struct task_struct * task)13219 void perf_event_free_task(struct task_struct *task)
13220 {
13221 struct perf_event_context *ctx;
13222 struct perf_event *event, *tmp;
13223
13224 ctx = rcu_access_pointer(task->perf_event_ctxp);
13225 if (!ctx)
13226 return;
13227
13228 mutex_lock(&ctx->mutex);
13229 raw_spin_lock_irq(&ctx->lock);
13230 /*
13231 * Destroy the task <-> ctx relation and mark the context dead.
13232 *
13233 * This is important because even though the task hasn't been
13234 * exposed yet the context has been (through child_list).
13235 */
13236 RCU_INIT_POINTER(task->perf_event_ctxp, NULL);
13237 WRITE_ONCE(ctx->task, TASK_TOMBSTONE);
13238 put_task_struct(task); /* cannot be last */
13239 raw_spin_unlock_irq(&ctx->lock);
13240
13241
13242 list_for_each_entry_safe(event, tmp, &ctx->event_list, event_entry)
13243 perf_free_event(event, ctx);
13244
13245 mutex_unlock(&ctx->mutex);
13246
13247 /*
13248 * perf_event_release_kernel() could've stolen some of our
13249 * child events and still have them on its free_list. In that
13250 * case we must wait for these events to have been freed (in
13251 * particular all their references to this task must've been
13252 * dropped).
13253 *
13254 * Without this copy_process() will unconditionally free this
13255 * task (irrespective of its reference count) and
13256 * _free_event()'s put_task_struct(event->hw.target) will be a
13257 * use-after-free.
13258 *
13259 * Wait for all events to drop their context reference.
13260 */
13261 wait_var_event(&ctx->refcount, refcount_read(&ctx->refcount) == 1);
13262 put_ctx(ctx); /* must be last */
13263 }
13264
perf_event_delayed_put(struct task_struct * task)13265 void perf_event_delayed_put(struct task_struct *task)
13266 {
13267 WARN_ON_ONCE(task->perf_event_ctxp);
13268 }
13269
perf_event_get(unsigned int fd)13270 struct file *perf_event_get(unsigned int fd)
13271 {
13272 struct file *file = fget(fd);
13273 if (!file)
13274 return ERR_PTR(-EBADF);
13275
13276 if (file->f_op != &perf_fops) {
13277 fput(file);
13278 return ERR_PTR(-EBADF);
13279 }
13280
13281 return file;
13282 }
13283
perf_get_event(struct file * file)13284 const struct perf_event *perf_get_event(struct file *file)
13285 {
13286 if (file->f_op != &perf_fops)
13287 return ERR_PTR(-EINVAL);
13288
13289 return file->private_data;
13290 }
13291
perf_event_attrs(struct perf_event * event)13292 const struct perf_event_attr *perf_event_attrs(struct perf_event *event)
13293 {
13294 if (!event)
13295 return ERR_PTR(-EINVAL);
13296
13297 return &event->attr;
13298 }
13299
13300 /*
13301 * Inherit an event from parent task to child task.
13302 *
13303 * Returns:
13304 * - valid pointer on success
13305 * - NULL for orphaned events
13306 * - IS_ERR() on error
13307 */
13308 static struct perf_event *
inherit_event(struct perf_event * parent_event,struct task_struct * parent,struct perf_event_context * parent_ctx,struct task_struct * child,struct perf_event * group_leader,struct perf_event_context * child_ctx)13309 inherit_event(struct perf_event *parent_event,
13310 struct task_struct *parent,
13311 struct perf_event_context *parent_ctx,
13312 struct task_struct *child,
13313 struct perf_event *group_leader,
13314 struct perf_event_context *child_ctx)
13315 {
13316 enum perf_event_state parent_state = parent_event->state;
13317 struct perf_event_pmu_context *pmu_ctx;
13318 struct perf_event *child_event;
13319 unsigned long flags;
13320
13321 /*
13322 * Instead of creating recursive hierarchies of events,
13323 * we link inherited events back to the original parent,
13324 * which has a filp for sure, which we use as the reference
13325 * count:
13326 */
13327 if (parent_event->parent)
13328 parent_event = parent_event->parent;
13329
13330 child_event = perf_event_alloc(&parent_event->attr,
13331 parent_event->cpu,
13332 child,
13333 group_leader, parent_event,
13334 NULL, NULL, -1);
13335 if (IS_ERR(child_event))
13336 return child_event;
13337
13338 pmu_ctx = find_get_pmu_context(child_event->pmu, child_ctx, child_event);
13339 if (IS_ERR(pmu_ctx)) {
13340 free_event(child_event);
13341 return ERR_CAST(pmu_ctx);
13342 }
13343 child_event->pmu_ctx = pmu_ctx;
13344
13345 /*
13346 * is_orphaned_event() and list_add_tail(&parent_event->child_list)
13347 * must be under the same lock in order to serialize against
13348 * perf_event_release_kernel(), such that either we must observe
13349 * is_orphaned_event() or they will observe us on the child_list.
13350 */
13351 mutex_lock(&parent_event->child_mutex);
13352 if (is_orphaned_event(parent_event) ||
13353 !atomic_long_inc_not_zero(&parent_event->refcount)) {
13354 mutex_unlock(&parent_event->child_mutex);
13355 /* task_ctx_data is freed with child_ctx */
13356 free_event(child_event);
13357 return NULL;
13358 }
13359
13360 get_ctx(child_ctx);
13361
13362 /*
13363 * Make the child state follow the state of the parent event,
13364 * not its attr.disabled bit. We hold the parent's mutex,
13365 * so we won't race with perf_event_{en, dis}able_family.
13366 */
13367 if (parent_state >= PERF_EVENT_STATE_INACTIVE)
13368 child_event->state = PERF_EVENT_STATE_INACTIVE;
13369 else
13370 child_event->state = PERF_EVENT_STATE_OFF;
13371
13372 if (parent_event->attr.freq) {
13373 u64 sample_period = parent_event->hw.sample_period;
13374 struct hw_perf_event *hwc = &child_event->hw;
13375
13376 hwc->sample_period = sample_period;
13377 hwc->last_period = sample_period;
13378
13379 local64_set(&hwc->period_left, sample_period);
13380 }
13381
13382 child_event->ctx = child_ctx;
13383 child_event->overflow_handler = parent_event->overflow_handler;
13384 child_event->overflow_handler_context
13385 = parent_event->overflow_handler_context;
13386
13387 /*
13388 * Precalculate sample_data sizes
13389 */
13390 perf_event__header_size(child_event);
13391 perf_event__id_header_size(child_event);
13392
13393 /*
13394 * Link it up in the child's context:
13395 */
13396 raw_spin_lock_irqsave(&child_ctx->lock, flags);
13397 add_event_to_ctx(child_event, child_ctx);
13398 child_event->attach_state |= PERF_ATTACH_CHILD;
13399 raw_spin_unlock_irqrestore(&child_ctx->lock, flags);
13400
13401 /*
13402 * Link this into the parent event's child list
13403 */
13404 list_add_tail(&child_event->child_list, &parent_event->child_list);
13405 mutex_unlock(&parent_event->child_mutex);
13406
13407 return child_event;
13408 }
13409
13410 /*
13411 * Inherits an event group.
13412 *
13413 * This will quietly suppress orphaned events; !inherit_event() is not an error.
13414 * This matches with perf_event_release_kernel() removing all child events.
13415 *
13416 * Returns:
13417 * - 0 on success
13418 * - <0 on error
13419 */
inherit_group(struct perf_event * parent_event,struct task_struct * parent,struct perf_event_context * parent_ctx,struct task_struct * child,struct perf_event_context * child_ctx)13420 static int inherit_group(struct perf_event *parent_event,
13421 struct task_struct *parent,
13422 struct perf_event_context *parent_ctx,
13423 struct task_struct *child,
13424 struct perf_event_context *child_ctx)
13425 {
13426 struct perf_event *leader;
13427 struct perf_event *sub;
13428 struct perf_event *child_ctr;
13429
13430 leader = inherit_event(parent_event, parent, parent_ctx,
13431 child, NULL, child_ctx);
13432 if (IS_ERR(leader))
13433 return PTR_ERR(leader);
13434 /*
13435 * @leader can be NULL here because of is_orphaned_event(). In this
13436 * case inherit_event() will create individual events, similar to what
13437 * perf_group_detach() would do anyway.
13438 */
13439 for_each_sibling_event(sub, parent_event) {
13440 child_ctr = inherit_event(sub, parent, parent_ctx,
13441 child, leader, child_ctx);
13442 if (IS_ERR(child_ctr))
13443 return PTR_ERR(child_ctr);
13444
13445 if (sub->aux_event == parent_event && child_ctr &&
13446 !perf_get_aux_event(child_ctr, leader))
13447 return -EINVAL;
13448 }
13449 if (leader)
13450 leader->group_generation = parent_event->group_generation;
13451 return 0;
13452 }
13453
13454 /*
13455 * Creates the child task context and tries to inherit the event-group.
13456 *
13457 * Clears @inherited_all on !attr.inherited or error. Note that we'll leave
13458 * inherited_all set when we 'fail' to inherit an orphaned event; this is
13459 * consistent with perf_event_release_kernel() removing all child events.
13460 *
13461 * Returns:
13462 * - 0 on success
13463 * - <0 on error
13464 */
13465 static int
inherit_task_group(struct perf_event * event,struct task_struct * parent,struct perf_event_context * parent_ctx,struct task_struct * child,u64 clone_flags,int * inherited_all)13466 inherit_task_group(struct perf_event *event, struct task_struct *parent,
13467 struct perf_event_context *parent_ctx,
13468 struct task_struct *child,
13469 u64 clone_flags, int *inherited_all)
13470 {
13471 struct perf_event_context *child_ctx;
13472 int ret;
13473
13474 if (!event->attr.inherit ||
13475 (event->attr.inherit_thread && !(clone_flags & CLONE_THREAD)) ||
13476 /* Do not inherit if sigtrap and signal handlers were cleared. */
13477 (event->attr.sigtrap && (clone_flags & CLONE_CLEAR_SIGHAND))) {
13478 *inherited_all = 0;
13479 return 0;
13480 }
13481
13482 child_ctx = child->perf_event_ctxp;
13483 if (!child_ctx) {
13484 /*
13485 * This is executed from the parent task context, so
13486 * inherit events that have been marked for cloning.
13487 * First allocate and initialize a context for the
13488 * child.
13489 */
13490 child_ctx = alloc_perf_context(child);
13491 if (!child_ctx)
13492 return -ENOMEM;
13493
13494 child->perf_event_ctxp = child_ctx;
13495 }
13496
13497 ret = inherit_group(event, parent, parent_ctx, child, child_ctx);
13498 if (ret)
13499 *inherited_all = 0;
13500
13501 return ret;
13502 }
13503
13504 /*
13505 * Initialize the perf_event context in task_struct
13506 */
perf_event_init_context(struct task_struct * child,u64 clone_flags)13507 static int perf_event_init_context(struct task_struct *child, u64 clone_flags)
13508 {
13509 struct perf_event_context *child_ctx, *parent_ctx;
13510 struct perf_event_context *cloned_ctx;
13511 struct perf_event *event;
13512 struct task_struct *parent = current;
13513 int inherited_all = 1;
13514 unsigned long flags;
13515 int ret = 0;
13516
13517 if (likely(!parent->perf_event_ctxp))
13518 return 0;
13519
13520 /*
13521 * If the parent's context is a clone, pin it so it won't get
13522 * swapped under us.
13523 */
13524 parent_ctx = perf_pin_task_context(parent);
13525 if (!parent_ctx)
13526 return 0;
13527
13528 /*
13529 * No need to check if parent_ctx != NULL here; since we saw
13530 * it non-NULL earlier, the only reason for it to become NULL
13531 * is if we exit, and since we're currently in the middle of
13532 * a fork we can't be exiting at the same time.
13533 */
13534
13535 /*
13536 * Lock the parent list. No need to lock the child - not PID
13537 * hashed yet and not running, so nobody can access it.
13538 */
13539 mutex_lock(&parent_ctx->mutex);
13540
13541 /*
13542 * We dont have to disable NMIs - we are only looking at
13543 * the list, not manipulating it:
13544 */
13545 perf_event_groups_for_each(event, &parent_ctx->pinned_groups) {
13546 ret = inherit_task_group(event, parent, parent_ctx,
13547 child, clone_flags, &inherited_all);
13548 if (ret)
13549 goto out_unlock;
13550 }
13551
13552 /*
13553 * We can't hold ctx->lock when iterating the ->flexible_group list due
13554 * to allocations, but we need to prevent rotation because
13555 * rotate_ctx() will change the list from interrupt context.
13556 */
13557 raw_spin_lock_irqsave(&parent_ctx->lock, flags);
13558 parent_ctx->rotate_disable = 1;
13559 raw_spin_unlock_irqrestore(&parent_ctx->lock, flags);
13560
13561 perf_event_groups_for_each(event, &parent_ctx->flexible_groups) {
13562 ret = inherit_task_group(event, parent, parent_ctx,
13563 child, clone_flags, &inherited_all);
13564 if (ret)
13565 goto out_unlock;
13566 }
13567
13568 raw_spin_lock_irqsave(&parent_ctx->lock, flags);
13569 parent_ctx->rotate_disable = 0;
13570
13571 child_ctx = child->perf_event_ctxp;
13572
13573 if (child_ctx && inherited_all) {
13574 /*
13575 * Mark the child context as a clone of the parent
13576 * context, or of whatever the parent is a clone of.
13577 *
13578 * Note that if the parent is a clone, the holding of
13579 * parent_ctx->lock avoids it from being uncloned.
13580 */
13581 cloned_ctx = parent_ctx->parent_ctx;
13582 if (cloned_ctx) {
13583 child_ctx->parent_ctx = cloned_ctx;
13584 child_ctx->parent_gen = parent_ctx->parent_gen;
13585 } else {
13586 child_ctx->parent_ctx = parent_ctx;
13587 child_ctx->parent_gen = parent_ctx->generation;
13588 }
13589 get_ctx(child_ctx->parent_ctx);
13590 }
13591
13592 raw_spin_unlock_irqrestore(&parent_ctx->lock, flags);
13593 out_unlock:
13594 mutex_unlock(&parent_ctx->mutex);
13595
13596 perf_unpin_context(parent_ctx);
13597 put_ctx(parent_ctx);
13598
13599 return ret;
13600 }
13601
13602 /*
13603 * Initialize the perf_event context in task_struct
13604 */
perf_event_init_task(struct task_struct * child,u64 clone_flags)13605 int perf_event_init_task(struct task_struct *child, u64 clone_flags)
13606 {
13607 int ret;
13608
13609 child->perf_event_ctxp = NULL;
13610 mutex_init(&child->perf_event_mutex);
13611 INIT_LIST_HEAD(&child->perf_event_list);
13612
13613 ret = perf_event_init_context(child, clone_flags);
13614 if (ret) {
13615 perf_event_free_task(child);
13616 return ret;
13617 }
13618
13619 return 0;
13620 }
13621
perf_event_init_all_cpus(void)13622 static void __init perf_event_init_all_cpus(void)
13623 {
13624 struct swevent_htable *swhash;
13625 struct perf_cpu_context *cpuctx;
13626 int cpu;
13627
13628 zalloc_cpumask_var(&perf_online_mask, GFP_KERNEL);
13629
13630 for_each_possible_cpu(cpu) {
13631 swhash = &per_cpu(swevent_htable, cpu);
13632 mutex_init(&swhash->hlist_mutex);
13633
13634 INIT_LIST_HEAD(&per_cpu(pmu_sb_events.list, cpu));
13635 raw_spin_lock_init(&per_cpu(pmu_sb_events.lock, cpu));
13636
13637 INIT_LIST_HEAD(&per_cpu(sched_cb_list, cpu));
13638
13639 cpuctx = per_cpu_ptr(&perf_cpu_context, cpu);
13640 __perf_event_init_context(&cpuctx->ctx);
13641 lockdep_set_class(&cpuctx->ctx.mutex, &cpuctx_mutex);
13642 lockdep_set_class(&cpuctx->ctx.lock, &cpuctx_lock);
13643 cpuctx->online = cpumask_test_cpu(cpu, perf_online_mask);
13644 cpuctx->heap_size = ARRAY_SIZE(cpuctx->heap_default);
13645 cpuctx->heap = cpuctx->heap_default;
13646 }
13647 }
13648
perf_swevent_init_cpu(unsigned int cpu)13649 static void perf_swevent_init_cpu(unsigned int cpu)
13650 {
13651 struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
13652
13653 mutex_lock(&swhash->hlist_mutex);
13654 if (swhash->hlist_refcount > 0 && !swevent_hlist_deref(swhash)) {
13655 struct swevent_hlist *hlist;
13656
13657 hlist = kzalloc_node(sizeof(*hlist), GFP_KERNEL, cpu_to_node(cpu));
13658 WARN_ON(!hlist);
13659 rcu_assign_pointer(swhash->swevent_hlist, hlist);
13660 }
13661 mutex_unlock(&swhash->hlist_mutex);
13662 }
13663
13664 #if defined CONFIG_HOTPLUG_CPU || defined CONFIG_KEXEC_CORE
__perf_event_exit_context(void * __info)13665 static void __perf_event_exit_context(void *__info)
13666 {
13667 struct perf_cpu_context *cpuctx = this_cpu_ptr(&perf_cpu_context);
13668 struct perf_event_context *ctx = __info;
13669 struct perf_event *event;
13670
13671 raw_spin_lock(&ctx->lock);
13672 ctx_sched_out(ctx, EVENT_TIME);
13673 list_for_each_entry(event, &ctx->event_list, event_entry)
13674 __perf_remove_from_context(event, cpuctx, ctx, (void *)DETACH_GROUP);
13675 raw_spin_unlock(&ctx->lock);
13676 }
13677
perf_event_exit_cpu_context(int cpu)13678 static void perf_event_exit_cpu_context(int cpu)
13679 {
13680 struct perf_cpu_context *cpuctx;
13681 struct perf_event_context *ctx;
13682
13683 // XXX simplify cpuctx->online
13684 mutex_lock(&pmus_lock);
13685 cpuctx = per_cpu_ptr(&perf_cpu_context, cpu);
13686 ctx = &cpuctx->ctx;
13687
13688 mutex_lock(&ctx->mutex);
13689 smp_call_function_single(cpu, __perf_event_exit_context, ctx, 1);
13690 cpuctx->online = 0;
13691 mutex_unlock(&ctx->mutex);
13692 cpumask_clear_cpu(cpu, perf_online_mask);
13693 mutex_unlock(&pmus_lock);
13694 }
13695 #else
13696
perf_event_exit_cpu_context(int cpu)13697 static void perf_event_exit_cpu_context(int cpu) { }
13698
13699 #endif
13700
perf_event_init_cpu(unsigned int cpu)13701 int perf_event_init_cpu(unsigned int cpu)
13702 {
13703 struct perf_cpu_context *cpuctx;
13704 struct perf_event_context *ctx;
13705
13706 perf_swevent_init_cpu(cpu);
13707
13708 mutex_lock(&pmus_lock);
13709 cpumask_set_cpu(cpu, perf_online_mask);
13710 cpuctx = per_cpu_ptr(&perf_cpu_context, cpu);
13711 ctx = &cpuctx->ctx;
13712
13713 mutex_lock(&ctx->mutex);
13714 cpuctx->online = 1;
13715 mutex_unlock(&ctx->mutex);
13716 mutex_unlock(&pmus_lock);
13717
13718 return 0;
13719 }
13720
perf_event_exit_cpu(unsigned int cpu)13721 int perf_event_exit_cpu(unsigned int cpu)
13722 {
13723 perf_event_exit_cpu_context(cpu);
13724 return 0;
13725 }
13726
13727 static int
perf_reboot(struct notifier_block * notifier,unsigned long val,void * v)13728 perf_reboot(struct notifier_block *notifier, unsigned long val, void *v)
13729 {
13730 int cpu;
13731
13732 for_each_online_cpu(cpu)
13733 perf_event_exit_cpu(cpu);
13734
13735 return NOTIFY_OK;
13736 }
13737
13738 /*
13739 * Run the perf reboot notifier at the very last possible moment so that
13740 * the generic watchdog code runs as long as possible.
13741 */
13742 static struct notifier_block perf_reboot_notifier = {
13743 .notifier_call = perf_reboot,
13744 .priority = INT_MIN,
13745 };
13746
perf_event_init(void)13747 void __init perf_event_init(void)
13748 {
13749 int ret;
13750
13751 idr_init(&pmu_idr);
13752
13753 perf_event_init_all_cpus();
13754 init_srcu_struct(&pmus_srcu);
13755 perf_pmu_register(&perf_swevent, "software", PERF_TYPE_SOFTWARE);
13756 perf_pmu_register(&perf_cpu_clock, "cpu_clock", -1);
13757 perf_pmu_register(&perf_task_clock, "task_clock", -1);
13758 perf_tp_register();
13759 perf_event_init_cpu(smp_processor_id());
13760 register_reboot_notifier(&perf_reboot_notifier);
13761
13762 ret = init_hw_breakpoint();
13763 WARN(ret, "hw_breakpoint initialization failed with: %d", ret);
13764
13765 perf_event_cache = KMEM_CACHE(perf_event, SLAB_PANIC);
13766
13767 /*
13768 * Build time assertion that we keep the data_head at the intended
13769 * location. IOW, validation we got the __reserved[] size right.
13770 */
13771 BUILD_BUG_ON((offsetof(struct perf_event_mmap_page, data_head))
13772 != 1024);
13773 }
13774
perf_event_sysfs_show(struct device * dev,struct device_attribute * attr,char * page)13775 ssize_t perf_event_sysfs_show(struct device *dev, struct device_attribute *attr,
13776 char *page)
13777 {
13778 struct perf_pmu_events_attr *pmu_attr =
13779 container_of(attr, struct perf_pmu_events_attr, attr);
13780
13781 if (pmu_attr->event_str)
13782 return sprintf(page, "%s\n", pmu_attr->event_str);
13783
13784 return 0;
13785 }
13786 EXPORT_SYMBOL_GPL(perf_event_sysfs_show);
13787
perf_event_sysfs_init(void)13788 static int __init perf_event_sysfs_init(void)
13789 {
13790 struct pmu *pmu;
13791 int ret;
13792
13793 mutex_lock(&pmus_lock);
13794
13795 ret = bus_register(&pmu_bus);
13796 if (ret)
13797 goto unlock;
13798
13799 list_for_each_entry(pmu, &pmus, entry) {
13800 if (pmu->dev)
13801 continue;
13802
13803 ret = pmu_dev_alloc(pmu);
13804 WARN(ret, "Failed to register pmu: %s, reason %d\n", pmu->name, ret);
13805 }
13806 pmu_bus_running = 1;
13807 ret = 0;
13808
13809 unlock:
13810 mutex_unlock(&pmus_lock);
13811
13812 return ret;
13813 }
13814 device_initcall(perf_event_sysfs_init);
13815
13816 #ifdef CONFIG_CGROUP_PERF
13817 static struct cgroup_subsys_state *
perf_cgroup_css_alloc(struct cgroup_subsys_state * parent_css)13818 perf_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
13819 {
13820 struct perf_cgroup *jc;
13821
13822 jc = kzalloc(sizeof(*jc), GFP_KERNEL);
13823 if (!jc)
13824 return ERR_PTR(-ENOMEM);
13825
13826 jc->info = alloc_percpu(struct perf_cgroup_info);
13827 if (!jc->info) {
13828 kfree(jc);
13829 return ERR_PTR(-ENOMEM);
13830 }
13831
13832 return &jc->css;
13833 }
13834
perf_cgroup_css_free(struct cgroup_subsys_state * css)13835 static void perf_cgroup_css_free(struct cgroup_subsys_state *css)
13836 {
13837 struct perf_cgroup *jc = container_of(css, struct perf_cgroup, css);
13838
13839 free_percpu(jc->info);
13840 kfree(jc);
13841 }
13842
perf_cgroup_css_online(struct cgroup_subsys_state * css)13843 static int perf_cgroup_css_online(struct cgroup_subsys_state *css)
13844 {
13845 perf_event_cgroup(css->cgroup);
13846 return 0;
13847 }
13848
__perf_cgroup_move(void * info)13849 static int __perf_cgroup_move(void *info)
13850 {
13851 struct task_struct *task = info;
13852
13853 preempt_disable();
13854 perf_cgroup_switch(task);
13855 preempt_enable();
13856
13857 return 0;
13858 }
13859
perf_cgroup_attach(struct cgroup_taskset * tset)13860 static void perf_cgroup_attach(struct cgroup_taskset *tset)
13861 {
13862 struct task_struct *task;
13863 struct cgroup_subsys_state *css;
13864
13865 cgroup_taskset_for_each(task, css, tset)
13866 task_function_call(task, __perf_cgroup_move, task);
13867 }
13868
13869 struct cgroup_subsys perf_event_cgrp_subsys = {
13870 .css_alloc = perf_cgroup_css_alloc,
13871 .css_free = perf_cgroup_css_free,
13872 .css_online = perf_cgroup_css_online,
13873 .attach = perf_cgroup_attach,
13874 /*
13875 * Implicitly enable on dfl hierarchy so that perf events can
13876 * always be filtered by cgroup2 path as long as perf_event
13877 * controller is not mounted on a legacy hierarchy.
13878 */
13879 .implicit_on_dfl = true,
13880 .threaded = true,
13881 };
13882 #endif /* CONFIG_CGROUP_PERF */
13883
13884 DEFINE_STATIC_CALL_RET0(perf_snapshot_branch_stack, perf_snapshot_branch_stack_t);
13885