1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Performance events core code:
4 *
5 * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
6 * Copyright (C) 2008-2011 Red Hat, Inc., Ingo Molnar
7 * Copyright (C) 2008-2011 Red Hat, Inc., Peter Zijlstra
8 * Copyright © 2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
9 */
10
11 #include <linux/fs.h>
12 #include <linux/mm.h>
13 #include <linux/cpu.h>
14 #include <linux/smp.h>
15 #include <linux/idr.h>
16 #include <linux/file.h>
17 #include <linux/poll.h>
18 #include <linux/slab.h>
19 #include <linux/hash.h>
20 #include <linux/tick.h>
21 #include <linux/sysfs.h>
22 #include <linux/dcache.h>
23 #include <linux/percpu.h>
24 #include <linux/ptrace.h>
25 #include <linux/reboot.h>
26 #include <linux/vmstat.h>
27 #include <linux/device.h>
28 #include <linux/export.h>
29 #include <linux/vmalloc.h>
30 #include <linux/hardirq.h>
31 #include <linux/hugetlb.h>
32 #include <linux/rculist.h>
33 #include <linux/uaccess.h>
34 #include <linux/syscalls.h>
35 #include <linux/anon_inodes.h>
36 #include <linux/kernel_stat.h>
37 #include <linux/cgroup.h>
38 #include <linux/perf_event.h>
39 #include <linux/trace_events.h>
40 #include <linux/hw_breakpoint.h>
41 #include <linux/mm_types.h>
42 #include <linux/module.h>
43 #include <linux/mman.h>
44 #include <linux/compat.h>
45 #include <linux/bpf.h>
46 #include <linux/filter.h>
47 #include <linux/namei.h>
48 #include <linux/parser.h>
49 #include <linux/sched/clock.h>
50 #include <linux/sched/mm.h>
51 #include <linux/proc_ns.h>
52 #include <linux/mount.h>
53 #include <linux/min_heap.h>
54 #include <linux/highmem.h>
55 #include <linux/pgtable.h>
56 #include <linux/buildid.h>
57 #include <linux/task_work.h>
58
59 #include "internal.h"
60
61 #include <asm/irq_regs.h>
62
63 typedef int (*remote_function_f)(void *);
64
65 struct remote_function_call {
66 struct task_struct *p;
67 remote_function_f func;
68 void *info;
69 int ret;
70 };
71
remote_function(void * data)72 static void remote_function(void *data)
73 {
74 struct remote_function_call *tfc = data;
75 struct task_struct *p = tfc->p;
76
77 if (p) {
78 /* -EAGAIN */
79 if (task_cpu(p) != smp_processor_id())
80 return;
81
82 /*
83 * Now that we're on right CPU with IRQs disabled, we can test
84 * if we hit the right task without races.
85 */
86
87 tfc->ret = -ESRCH; /* No such (running) process */
88 if (p != current)
89 return;
90 }
91
92 tfc->ret = tfc->func(tfc->info);
93 }
94
95 /**
96 * task_function_call - call a function on the cpu on which a task runs
97 * @p: the task to evaluate
98 * @func: the function to be called
99 * @info: the function call argument
100 *
101 * Calls the function @func when the task is currently running. This might
102 * be on the current CPU, which just calls the function directly. This will
103 * retry due to any failures in smp_call_function_single(), such as if the
104 * task_cpu() goes offline concurrently.
105 *
106 * returns @func return value or -ESRCH or -ENXIO when the process isn't running
107 */
108 static int
task_function_call(struct task_struct * p,remote_function_f func,void * info)109 task_function_call(struct task_struct *p, remote_function_f func, void *info)
110 {
111 struct remote_function_call data = {
112 .p = p,
113 .func = func,
114 .info = info,
115 .ret = -EAGAIN,
116 };
117 int ret;
118
119 for (;;) {
120 ret = smp_call_function_single(task_cpu(p), remote_function,
121 &data, 1);
122 if (!ret)
123 ret = data.ret;
124
125 if (ret != -EAGAIN)
126 break;
127
128 cond_resched();
129 }
130
131 return ret;
132 }
133
134 /**
135 * cpu_function_call - call a function on the cpu
136 * @cpu: target cpu to queue this function
137 * @func: the function to be called
138 * @info: the function call argument
139 *
140 * Calls the function @func on the remote cpu.
141 *
142 * returns: @func return value or -ENXIO when the cpu is offline
143 */
cpu_function_call(int cpu,remote_function_f func,void * info)144 static int cpu_function_call(int cpu, remote_function_f func, void *info)
145 {
146 struct remote_function_call data = {
147 .p = NULL,
148 .func = func,
149 .info = info,
150 .ret = -ENXIO, /* No such CPU */
151 };
152
153 smp_call_function_single(cpu, remote_function, &data, 1);
154
155 return data.ret;
156 }
157
perf_ctx_lock(struct perf_cpu_context * cpuctx,struct perf_event_context * ctx)158 static void perf_ctx_lock(struct perf_cpu_context *cpuctx,
159 struct perf_event_context *ctx)
160 {
161 raw_spin_lock(&cpuctx->ctx.lock);
162 if (ctx)
163 raw_spin_lock(&ctx->lock);
164 }
165
perf_ctx_unlock(struct perf_cpu_context * cpuctx,struct perf_event_context * ctx)166 static void perf_ctx_unlock(struct perf_cpu_context *cpuctx,
167 struct perf_event_context *ctx)
168 {
169 if (ctx)
170 raw_spin_unlock(&ctx->lock);
171 raw_spin_unlock(&cpuctx->ctx.lock);
172 }
173
174 #define TASK_TOMBSTONE ((void *)-1L)
175
is_kernel_event(struct perf_event * event)176 static bool is_kernel_event(struct perf_event *event)
177 {
178 return READ_ONCE(event->owner) == TASK_TOMBSTONE;
179 }
180
181 static DEFINE_PER_CPU(struct perf_cpu_context, perf_cpu_context);
182
perf_cpu_task_ctx(void)183 struct perf_event_context *perf_cpu_task_ctx(void)
184 {
185 lockdep_assert_irqs_disabled();
186 return this_cpu_ptr(&perf_cpu_context)->task_ctx;
187 }
188
189 /*
190 * On task ctx scheduling...
191 *
192 * When !ctx->nr_events a task context will not be scheduled. This means
193 * we can disable the scheduler hooks (for performance) without leaving
194 * pending task ctx state.
195 *
196 * This however results in two special cases:
197 *
198 * - removing the last event from a task ctx; this is relatively straight
199 * forward and is done in __perf_remove_from_context.
200 *
201 * - adding the first event to a task ctx; this is tricky because we cannot
202 * rely on ctx->is_active and therefore cannot use event_function_call().
203 * See perf_install_in_context().
204 *
205 * If ctx->nr_events, then ctx->is_active and cpuctx->task_ctx are set.
206 */
207
208 typedef void (*event_f)(struct perf_event *, struct perf_cpu_context *,
209 struct perf_event_context *, void *);
210
211 struct event_function_struct {
212 struct perf_event *event;
213 event_f func;
214 void *data;
215 };
216
event_function(void * info)217 static int event_function(void *info)
218 {
219 struct event_function_struct *efs = info;
220 struct perf_event *event = efs->event;
221 struct perf_event_context *ctx = event->ctx;
222 struct perf_cpu_context *cpuctx = this_cpu_ptr(&perf_cpu_context);
223 struct perf_event_context *task_ctx = cpuctx->task_ctx;
224 int ret = 0;
225
226 lockdep_assert_irqs_disabled();
227
228 perf_ctx_lock(cpuctx, task_ctx);
229 /*
230 * Since we do the IPI call without holding ctx->lock things can have
231 * changed, double check we hit the task we set out to hit.
232 */
233 if (ctx->task) {
234 if (ctx->task != current) {
235 ret = -ESRCH;
236 goto unlock;
237 }
238
239 /*
240 * We only use event_function_call() on established contexts,
241 * and event_function() is only ever called when active (or
242 * rather, we'll have bailed in task_function_call() or the
243 * above ctx->task != current test), therefore we must have
244 * ctx->is_active here.
245 */
246 WARN_ON_ONCE(!ctx->is_active);
247 /*
248 * And since we have ctx->is_active, cpuctx->task_ctx must
249 * match.
250 */
251 WARN_ON_ONCE(task_ctx != ctx);
252 } else {
253 WARN_ON_ONCE(&cpuctx->ctx != ctx);
254 }
255
256 efs->func(event, cpuctx, ctx, efs->data);
257 unlock:
258 perf_ctx_unlock(cpuctx, task_ctx);
259
260 return ret;
261 }
262
event_function_call(struct perf_event * event,event_f func,void * data)263 static void event_function_call(struct perf_event *event, event_f func, void *data)
264 {
265 struct perf_event_context *ctx = event->ctx;
266 struct task_struct *task = READ_ONCE(ctx->task); /* verified in event_function */
267 struct event_function_struct efs = {
268 .event = event,
269 .func = func,
270 .data = data,
271 };
272
273 if (!event->parent) {
274 /*
275 * If this is a !child event, we must hold ctx::mutex to
276 * stabilize the event->ctx relation. See
277 * perf_event_ctx_lock().
278 */
279 lockdep_assert_held(&ctx->mutex);
280 }
281
282 if (!task) {
283 cpu_function_call(event->cpu, event_function, &efs);
284 return;
285 }
286
287 if (task == TASK_TOMBSTONE)
288 return;
289
290 again:
291 if (!task_function_call(task, event_function, &efs))
292 return;
293
294 raw_spin_lock_irq(&ctx->lock);
295 /*
296 * Reload the task pointer, it might have been changed by
297 * a concurrent perf_event_context_sched_out().
298 */
299 task = ctx->task;
300 if (task == TASK_TOMBSTONE) {
301 raw_spin_unlock_irq(&ctx->lock);
302 return;
303 }
304 if (ctx->is_active) {
305 raw_spin_unlock_irq(&ctx->lock);
306 goto again;
307 }
308 func(event, NULL, ctx, data);
309 raw_spin_unlock_irq(&ctx->lock);
310 }
311
312 /*
313 * Similar to event_function_call() + event_function(), but hard assumes IRQs
314 * are already disabled and we're on the right CPU.
315 */
event_function_local(struct perf_event * event,event_f func,void * data)316 static void event_function_local(struct perf_event *event, event_f func, void *data)
317 {
318 struct perf_event_context *ctx = event->ctx;
319 struct perf_cpu_context *cpuctx = this_cpu_ptr(&perf_cpu_context);
320 struct task_struct *task = READ_ONCE(ctx->task);
321 struct perf_event_context *task_ctx = NULL;
322
323 lockdep_assert_irqs_disabled();
324
325 if (task) {
326 if (task == TASK_TOMBSTONE)
327 return;
328
329 task_ctx = ctx;
330 }
331
332 perf_ctx_lock(cpuctx, task_ctx);
333
334 task = ctx->task;
335 if (task == TASK_TOMBSTONE)
336 goto unlock;
337
338 if (task) {
339 /*
340 * We must be either inactive or active and the right task,
341 * otherwise we're screwed, since we cannot IPI to somewhere
342 * else.
343 */
344 if (ctx->is_active) {
345 if (WARN_ON_ONCE(task != current))
346 goto unlock;
347
348 if (WARN_ON_ONCE(cpuctx->task_ctx != ctx))
349 goto unlock;
350 }
351 } else {
352 WARN_ON_ONCE(&cpuctx->ctx != ctx);
353 }
354
355 func(event, cpuctx, ctx, data);
356 unlock:
357 perf_ctx_unlock(cpuctx, task_ctx);
358 }
359
360 #define PERF_FLAG_ALL (PERF_FLAG_FD_NO_GROUP |\
361 PERF_FLAG_FD_OUTPUT |\
362 PERF_FLAG_PID_CGROUP |\
363 PERF_FLAG_FD_CLOEXEC)
364
365 /*
366 * branch priv levels that need permission checks
367 */
368 #define PERF_SAMPLE_BRANCH_PERM_PLM \
369 (PERF_SAMPLE_BRANCH_KERNEL |\
370 PERF_SAMPLE_BRANCH_HV)
371
372 enum event_type_t {
373 EVENT_FLEXIBLE = 0x1,
374 EVENT_PINNED = 0x2,
375 EVENT_TIME = 0x4,
376 /* see ctx_resched() for details */
377 EVENT_CPU = 0x8,
378 EVENT_CGROUP = 0x10,
379 EVENT_ALL = EVENT_FLEXIBLE | EVENT_PINNED,
380 };
381
382 /*
383 * perf_sched_events : >0 events exist
384 */
385
386 static void perf_sched_delayed(struct work_struct *work);
387 DEFINE_STATIC_KEY_FALSE(perf_sched_events);
388 static DECLARE_DELAYED_WORK(perf_sched_work, perf_sched_delayed);
389 static DEFINE_MUTEX(perf_sched_mutex);
390 static atomic_t perf_sched_count;
391
392 static DEFINE_PER_CPU(struct pmu_event_list, pmu_sb_events);
393
394 static atomic_t nr_mmap_events __read_mostly;
395 static atomic_t nr_comm_events __read_mostly;
396 static atomic_t nr_namespaces_events __read_mostly;
397 static atomic_t nr_task_events __read_mostly;
398 static atomic_t nr_freq_events __read_mostly;
399 static atomic_t nr_switch_events __read_mostly;
400 static atomic_t nr_ksymbol_events __read_mostly;
401 static atomic_t nr_bpf_events __read_mostly;
402 static atomic_t nr_cgroup_events __read_mostly;
403 static atomic_t nr_text_poke_events __read_mostly;
404 static atomic_t nr_build_id_events __read_mostly;
405
406 static LIST_HEAD(pmus);
407 static DEFINE_MUTEX(pmus_lock);
408 static struct srcu_struct pmus_srcu;
409 static cpumask_var_t perf_online_mask;
410 static struct kmem_cache *perf_event_cache;
411
412 /*
413 * perf event paranoia level:
414 * -1 - not paranoid at all
415 * 0 - disallow raw tracepoint access for unpriv
416 * 1 - disallow cpu events for unpriv
417 * 2 - disallow kernel profiling for unpriv
418 */
419 int sysctl_perf_event_paranoid __read_mostly = 2;
420
421 /* Minimum for 512 kiB + 1 user control page */
422 int sysctl_perf_event_mlock __read_mostly = 512 + (PAGE_SIZE / 1024); /* 'free' kiB per user */
423
424 /*
425 * max perf event sample rate
426 */
427 #define DEFAULT_MAX_SAMPLE_RATE 100000
428 #define DEFAULT_SAMPLE_PERIOD_NS (NSEC_PER_SEC / DEFAULT_MAX_SAMPLE_RATE)
429 #define DEFAULT_CPU_TIME_MAX_PERCENT 25
430
431 int sysctl_perf_event_sample_rate __read_mostly = DEFAULT_MAX_SAMPLE_RATE;
432
433 static int max_samples_per_tick __read_mostly = DIV_ROUND_UP(DEFAULT_MAX_SAMPLE_RATE, HZ);
434 static int perf_sample_period_ns __read_mostly = DEFAULT_SAMPLE_PERIOD_NS;
435
436 static int perf_sample_allowed_ns __read_mostly =
437 DEFAULT_SAMPLE_PERIOD_NS * DEFAULT_CPU_TIME_MAX_PERCENT / 100;
438
update_perf_cpu_limits(void)439 static void update_perf_cpu_limits(void)
440 {
441 u64 tmp = perf_sample_period_ns;
442
443 tmp *= sysctl_perf_cpu_time_max_percent;
444 tmp = div_u64(tmp, 100);
445 if (!tmp)
446 tmp = 1;
447
448 WRITE_ONCE(perf_sample_allowed_ns, tmp);
449 }
450
451 static bool perf_rotate_context(struct perf_cpu_pmu_context *cpc);
452
perf_proc_update_handler(struct ctl_table * table,int write,void * buffer,size_t * lenp,loff_t * ppos)453 int perf_proc_update_handler(struct ctl_table *table, int write,
454 void *buffer, size_t *lenp, loff_t *ppos)
455 {
456 int ret;
457 int perf_cpu = sysctl_perf_cpu_time_max_percent;
458 /*
459 * If throttling is disabled don't allow the write:
460 */
461 if (write && (perf_cpu == 100 || perf_cpu == 0))
462 return -EINVAL;
463
464 ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
465 if (ret || !write)
466 return ret;
467
468 max_samples_per_tick = DIV_ROUND_UP(sysctl_perf_event_sample_rate, HZ);
469 perf_sample_period_ns = NSEC_PER_SEC / sysctl_perf_event_sample_rate;
470 update_perf_cpu_limits();
471
472 return 0;
473 }
474
475 int sysctl_perf_cpu_time_max_percent __read_mostly = DEFAULT_CPU_TIME_MAX_PERCENT;
476
perf_cpu_time_max_percent_handler(struct ctl_table * table,int write,void * buffer,size_t * lenp,loff_t * ppos)477 int perf_cpu_time_max_percent_handler(struct ctl_table *table, int write,
478 void *buffer, size_t *lenp, loff_t *ppos)
479 {
480 int ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
481
482 if (ret || !write)
483 return ret;
484
485 if (sysctl_perf_cpu_time_max_percent == 100 ||
486 sysctl_perf_cpu_time_max_percent == 0) {
487 printk(KERN_WARNING
488 "perf: Dynamic interrupt throttling disabled, can hang your system!\n");
489 WRITE_ONCE(perf_sample_allowed_ns, 0);
490 } else {
491 update_perf_cpu_limits();
492 }
493
494 return 0;
495 }
496
497 /*
498 * perf samples are done in some very critical code paths (NMIs).
499 * If they take too much CPU time, the system can lock up and not
500 * get any real work done. This will drop the sample rate when
501 * we detect that events are taking too long.
502 */
503 #define NR_ACCUMULATED_SAMPLES 128
504 static DEFINE_PER_CPU(u64, running_sample_length);
505
506 static u64 __report_avg;
507 static u64 __report_allowed;
508
perf_duration_warn(struct irq_work * w)509 static void perf_duration_warn(struct irq_work *w)
510 {
511 printk_ratelimited(KERN_INFO
512 "perf: interrupt took too long (%lld > %lld), lowering "
513 "kernel.perf_event_max_sample_rate to %d\n",
514 __report_avg, __report_allowed,
515 sysctl_perf_event_sample_rate);
516 }
517
518 static DEFINE_IRQ_WORK(perf_duration_work, perf_duration_warn);
519
perf_sample_event_took(u64 sample_len_ns)520 void perf_sample_event_took(u64 sample_len_ns)
521 {
522 u64 max_len = READ_ONCE(perf_sample_allowed_ns);
523 u64 running_len;
524 u64 avg_len;
525 u32 max;
526
527 if (max_len == 0)
528 return;
529
530 /* Decay the counter by 1 average sample. */
531 running_len = __this_cpu_read(running_sample_length);
532 running_len -= running_len/NR_ACCUMULATED_SAMPLES;
533 running_len += sample_len_ns;
534 __this_cpu_write(running_sample_length, running_len);
535
536 /*
537 * Note: this will be biased artifically low until we have
538 * seen NR_ACCUMULATED_SAMPLES. Doing it this way keeps us
539 * from having to maintain a count.
540 */
541 avg_len = running_len/NR_ACCUMULATED_SAMPLES;
542 if (avg_len <= max_len)
543 return;
544
545 __report_avg = avg_len;
546 __report_allowed = max_len;
547
548 /*
549 * Compute a throttle threshold 25% below the current duration.
550 */
551 avg_len += avg_len / 4;
552 max = (TICK_NSEC / 100) * sysctl_perf_cpu_time_max_percent;
553 if (avg_len < max)
554 max /= (u32)avg_len;
555 else
556 max = 1;
557
558 WRITE_ONCE(perf_sample_allowed_ns, avg_len);
559 WRITE_ONCE(max_samples_per_tick, max);
560
561 sysctl_perf_event_sample_rate = max * HZ;
562 perf_sample_period_ns = NSEC_PER_SEC / sysctl_perf_event_sample_rate;
563
564 if (!irq_work_queue(&perf_duration_work)) {
565 early_printk("perf: interrupt took too long (%lld > %lld), lowering "
566 "kernel.perf_event_max_sample_rate to %d\n",
567 __report_avg, __report_allowed,
568 sysctl_perf_event_sample_rate);
569 }
570 }
571
572 static atomic64_t perf_event_id;
573
574 static void update_context_time(struct perf_event_context *ctx);
575 static u64 perf_event_time(struct perf_event *event);
576
perf_event_print_debug(void)577 void __weak perf_event_print_debug(void) { }
578
perf_clock(void)579 static inline u64 perf_clock(void)
580 {
581 return local_clock();
582 }
583
perf_event_clock(struct perf_event * event)584 static inline u64 perf_event_clock(struct perf_event *event)
585 {
586 return event->clock();
587 }
588
589 /*
590 * State based event timekeeping...
591 *
592 * The basic idea is to use event->state to determine which (if any) time
593 * fields to increment with the current delta. This means we only need to
594 * update timestamps when we change state or when they are explicitly requested
595 * (read).
596 *
597 * Event groups make things a little more complicated, but not terribly so. The
598 * rules for a group are that if the group leader is OFF the entire group is
599 * OFF, irrespecive of what the group member states are. This results in
600 * __perf_effective_state().
601 *
602 * A futher ramification is that when a group leader flips between OFF and
603 * !OFF, we need to update all group member times.
604 *
605 *
606 * NOTE: perf_event_time() is based on the (cgroup) context time, and thus we
607 * need to make sure the relevant context time is updated before we try and
608 * update our timestamps.
609 */
610
611 static __always_inline enum perf_event_state
__perf_effective_state(struct perf_event * event)612 __perf_effective_state(struct perf_event *event)
613 {
614 struct perf_event *leader = event->group_leader;
615
616 if (leader->state <= PERF_EVENT_STATE_OFF)
617 return leader->state;
618
619 return event->state;
620 }
621
622 static __always_inline void
__perf_update_times(struct perf_event * event,u64 now,u64 * enabled,u64 * running)623 __perf_update_times(struct perf_event *event, u64 now, u64 *enabled, u64 *running)
624 {
625 enum perf_event_state state = __perf_effective_state(event);
626 u64 delta = now - event->tstamp;
627
628 *enabled = event->total_time_enabled;
629 if (state >= PERF_EVENT_STATE_INACTIVE)
630 *enabled += delta;
631
632 *running = event->total_time_running;
633 if (state >= PERF_EVENT_STATE_ACTIVE)
634 *running += delta;
635 }
636
perf_event_update_time(struct perf_event * event)637 static void perf_event_update_time(struct perf_event *event)
638 {
639 u64 now = perf_event_time(event);
640
641 __perf_update_times(event, now, &event->total_time_enabled,
642 &event->total_time_running);
643 event->tstamp = now;
644 }
645
perf_event_update_sibling_time(struct perf_event * leader)646 static void perf_event_update_sibling_time(struct perf_event *leader)
647 {
648 struct perf_event *sibling;
649
650 for_each_sibling_event(sibling, leader)
651 perf_event_update_time(sibling);
652 }
653
654 static void
perf_event_set_state(struct perf_event * event,enum perf_event_state state)655 perf_event_set_state(struct perf_event *event, enum perf_event_state state)
656 {
657 if (event->state == state)
658 return;
659
660 perf_event_update_time(event);
661 /*
662 * If a group leader gets enabled/disabled all its siblings
663 * are affected too.
664 */
665 if ((event->state < 0) ^ (state < 0))
666 perf_event_update_sibling_time(event);
667
668 WRITE_ONCE(event->state, state);
669 }
670
671 /*
672 * UP store-release, load-acquire
673 */
674
675 #define __store_release(ptr, val) \
676 do { \
677 barrier(); \
678 WRITE_ONCE(*(ptr), (val)); \
679 } while (0)
680
681 #define __load_acquire(ptr) \
682 ({ \
683 __unqual_scalar_typeof(*(ptr)) ___p = READ_ONCE(*(ptr)); \
684 barrier(); \
685 ___p; \
686 })
687
perf_ctx_disable(struct perf_event_context * ctx,bool cgroup)688 static void perf_ctx_disable(struct perf_event_context *ctx, bool cgroup)
689 {
690 struct perf_event_pmu_context *pmu_ctx;
691
692 list_for_each_entry(pmu_ctx, &ctx->pmu_ctx_list, pmu_ctx_entry) {
693 if (cgroup && !pmu_ctx->nr_cgroups)
694 continue;
695 perf_pmu_disable(pmu_ctx->pmu);
696 }
697 }
698
perf_ctx_enable(struct perf_event_context * ctx,bool cgroup)699 static void perf_ctx_enable(struct perf_event_context *ctx, bool cgroup)
700 {
701 struct perf_event_pmu_context *pmu_ctx;
702
703 list_for_each_entry(pmu_ctx, &ctx->pmu_ctx_list, pmu_ctx_entry) {
704 if (cgroup && !pmu_ctx->nr_cgroups)
705 continue;
706 perf_pmu_enable(pmu_ctx->pmu);
707 }
708 }
709
710 static void ctx_sched_out(struct perf_event_context *ctx, enum event_type_t event_type);
711 static void ctx_sched_in(struct perf_event_context *ctx, enum event_type_t event_type);
712
713 #ifdef CONFIG_CGROUP_PERF
714
715 static inline bool
perf_cgroup_match(struct perf_event * event)716 perf_cgroup_match(struct perf_event *event)
717 {
718 struct perf_cpu_context *cpuctx = this_cpu_ptr(&perf_cpu_context);
719
720 /* @event doesn't care about cgroup */
721 if (!event->cgrp)
722 return true;
723
724 /* wants specific cgroup scope but @cpuctx isn't associated with any */
725 if (!cpuctx->cgrp)
726 return false;
727
728 /*
729 * Cgroup scoping is recursive. An event enabled for a cgroup is
730 * also enabled for all its descendant cgroups. If @cpuctx's
731 * cgroup is a descendant of @event's (the test covers identity
732 * case), it's a match.
733 */
734 return cgroup_is_descendant(cpuctx->cgrp->css.cgroup,
735 event->cgrp->css.cgroup);
736 }
737
perf_detach_cgroup(struct perf_event * event)738 static inline void perf_detach_cgroup(struct perf_event *event)
739 {
740 css_put(&event->cgrp->css);
741 event->cgrp = NULL;
742 }
743
is_cgroup_event(struct perf_event * event)744 static inline int is_cgroup_event(struct perf_event *event)
745 {
746 return event->cgrp != NULL;
747 }
748
perf_cgroup_event_time(struct perf_event * event)749 static inline u64 perf_cgroup_event_time(struct perf_event *event)
750 {
751 struct perf_cgroup_info *t;
752
753 t = per_cpu_ptr(event->cgrp->info, event->cpu);
754 return t->time;
755 }
756
perf_cgroup_event_time_now(struct perf_event * event,u64 now)757 static inline u64 perf_cgroup_event_time_now(struct perf_event *event, u64 now)
758 {
759 struct perf_cgroup_info *t;
760
761 t = per_cpu_ptr(event->cgrp->info, event->cpu);
762 if (!__load_acquire(&t->active))
763 return t->time;
764 now += READ_ONCE(t->timeoffset);
765 return now;
766 }
767
__update_cgrp_time(struct perf_cgroup_info * info,u64 now,bool adv)768 static inline void __update_cgrp_time(struct perf_cgroup_info *info, u64 now, bool adv)
769 {
770 if (adv)
771 info->time += now - info->timestamp;
772 info->timestamp = now;
773 /*
774 * see update_context_time()
775 */
776 WRITE_ONCE(info->timeoffset, info->time - info->timestamp);
777 }
778
update_cgrp_time_from_cpuctx(struct perf_cpu_context * cpuctx,bool final)779 static inline void update_cgrp_time_from_cpuctx(struct perf_cpu_context *cpuctx, bool final)
780 {
781 struct perf_cgroup *cgrp = cpuctx->cgrp;
782 struct cgroup_subsys_state *css;
783 struct perf_cgroup_info *info;
784
785 if (cgrp) {
786 u64 now = perf_clock();
787
788 for (css = &cgrp->css; css; css = css->parent) {
789 cgrp = container_of(css, struct perf_cgroup, css);
790 info = this_cpu_ptr(cgrp->info);
791
792 __update_cgrp_time(info, now, true);
793 if (final)
794 __store_release(&info->active, 0);
795 }
796 }
797 }
798
update_cgrp_time_from_event(struct perf_event * event)799 static inline void update_cgrp_time_from_event(struct perf_event *event)
800 {
801 struct perf_cgroup_info *info;
802
803 /*
804 * ensure we access cgroup data only when needed and
805 * when we know the cgroup is pinned (css_get)
806 */
807 if (!is_cgroup_event(event))
808 return;
809
810 info = this_cpu_ptr(event->cgrp->info);
811 /*
812 * Do not update time when cgroup is not active
813 */
814 if (info->active)
815 __update_cgrp_time(info, perf_clock(), true);
816 }
817
818 static inline void
perf_cgroup_set_timestamp(struct perf_cpu_context * cpuctx)819 perf_cgroup_set_timestamp(struct perf_cpu_context *cpuctx)
820 {
821 struct perf_event_context *ctx = &cpuctx->ctx;
822 struct perf_cgroup *cgrp = cpuctx->cgrp;
823 struct perf_cgroup_info *info;
824 struct cgroup_subsys_state *css;
825
826 /*
827 * ctx->lock held by caller
828 * ensure we do not access cgroup data
829 * unless we have the cgroup pinned (css_get)
830 */
831 if (!cgrp)
832 return;
833
834 WARN_ON_ONCE(!ctx->nr_cgroups);
835
836 for (css = &cgrp->css; css; css = css->parent) {
837 cgrp = container_of(css, struct perf_cgroup, css);
838 info = this_cpu_ptr(cgrp->info);
839 __update_cgrp_time(info, ctx->timestamp, false);
840 __store_release(&info->active, 1);
841 }
842 }
843
844 /*
845 * reschedule events based on the cgroup constraint of task.
846 */
perf_cgroup_switch(struct task_struct * task)847 static void perf_cgroup_switch(struct task_struct *task)
848 {
849 struct perf_cpu_context *cpuctx = this_cpu_ptr(&perf_cpu_context);
850 struct perf_cgroup *cgrp;
851
852 /*
853 * cpuctx->cgrp is set when the first cgroup event enabled,
854 * and is cleared when the last cgroup event disabled.
855 */
856 if (READ_ONCE(cpuctx->cgrp) == NULL)
857 return;
858
859 WARN_ON_ONCE(cpuctx->ctx.nr_cgroups == 0);
860
861 cgrp = perf_cgroup_from_task(task, NULL);
862 if (READ_ONCE(cpuctx->cgrp) == cgrp)
863 return;
864
865 perf_ctx_lock(cpuctx, cpuctx->task_ctx);
866 perf_ctx_disable(&cpuctx->ctx, true);
867
868 ctx_sched_out(&cpuctx->ctx, EVENT_ALL|EVENT_CGROUP);
869 /*
870 * must not be done before ctxswout due
871 * to update_cgrp_time_from_cpuctx() in
872 * ctx_sched_out()
873 */
874 cpuctx->cgrp = cgrp;
875 /*
876 * set cgrp before ctxsw in to allow
877 * perf_cgroup_set_timestamp() in ctx_sched_in()
878 * to not have to pass task around
879 */
880 ctx_sched_in(&cpuctx->ctx, EVENT_ALL|EVENT_CGROUP);
881
882 perf_ctx_enable(&cpuctx->ctx, true);
883 perf_ctx_unlock(cpuctx, cpuctx->task_ctx);
884 }
885
perf_cgroup_ensure_storage(struct perf_event * event,struct cgroup_subsys_state * css)886 static int perf_cgroup_ensure_storage(struct perf_event *event,
887 struct cgroup_subsys_state *css)
888 {
889 struct perf_cpu_context *cpuctx;
890 struct perf_event **storage;
891 int cpu, heap_size, ret = 0;
892
893 /*
894 * Allow storage to have sufficent space for an iterator for each
895 * possibly nested cgroup plus an iterator for events with no cgroup.
896 */
897 for (heap_size = 1; css; css = css->parent)
898 heap_size++;
899
900 for_each_possible_cpu(cpu) {
901 cpuctx = per_cpu_ptr(&perf_cpu_context, cpu);
902 if (heap_size <= cpuctx->heap_size)
903 continue;
904
905 storage = kmalloc_node(heap_size * sizeof(struct perf_event *),
906 GFP_KERNEL, cpu_to_node(cpu));
907 if (!storage) {
908 ret = -ENOMEM;
909 break;
910 }
911
912 raw_spin_lock_irq(&cpuctx->ctx.lock);
913 if (cpuctx->heap_size < heap_size) {
914 swap(cpuctx->heap, storage);
915 if (storage == cpuctx->heap_default)
916 storage = NULL;
917 cpuctx->heap_size = heap_size;
918 }
919 raw_spin_unlock_irq(&cpuctx->ctx.lock);
920
921 kfree(storage);
922 }
923
924 return ret;
925 }
926
perf_cgroup_connect(int fd,struct perf_event * event,struct perf_event_attr * attr,struct perf_event * group_leader)927 static inline int perf_cgroup_connect(int fd, struct perf_event *event,
928 struct perf_event_attr *attr,
929 struct perf_event *group_leader)
930 {
931 struct perf_cgroup *cgrp;
932 struct cgroup_subsys_state *css;
933 struct fd f = fdget(fd);
934 int ret = 0;
935
936 if (!f.file)
937 return -EBADF;
938
939 css = css_tryget_online_from_dir(f.file->f_path.dentry,
940 &perf_event_cgrp_subsys);
941 if (IS_ERR(css)) {
942 ret = PTR_ERR(css);
943 goto out;
944 }
945
946 ret = perf_cgroup_ensure_storage(event, css);
947 if (ret)
948 goto out;
949
950 cgrp = container_of(css, struct perf_cgroup, css);
951 event->cgrp = cgrp;
952
953 /*
954 * all events in a group must monitor
955 * the same cgroup because a task belongs
956 * to only one perf cgroup at a time
957 */
958 if (group_leader && group_leader->cgrp != cgrp) {
959 perf_detach_cgroup(event);
960 ret = -EINVAL;
961 }
962 out:
963 fdput(f);
964 return ret;
965 }
966
967 static inline void
perf_cgroup_event_enable(struct perf_event * event,struct perf_event_context * ctx)968 perf_cgroup_event_enable(struct perf_event *event, struct perf_event_context *ctx)
969 {
970 struct perf_cpu_context *cpuctx;
971
972 if (!is_cgroup_event(event))
973 return;
974
975 event->pmu_ctx->nr_cgroups++;
976
977 /*
978 * Because cgroup events are always per-cpu events,
979 * @ctx == &cpuctx->ctx.
980 */
981 cpuctx = container_of(ctx, struct perf_cpu_context, ctx);
982
983 if (ctx->nr_cgroups++)
984 return;
985
986 cpuctx->cgrp = perf_cgroup_from_task(current, ctx);
987 }
988
989 static inline void
perf_cgroup_event_disable(struct perf_event * event,struct perf_event_context * ctx)990 perf_cgroup_event_disable(struct perf_event *event, struct perf_event_context *ctx)
991 {
992 struct perf_cpu_context *cpuctx;
993
994 if (!is_cgroup_event(event))
995 return;
996
997 event->pmu_ctx->nr_cgroups--;
998
999 /*
1000 * Because cgroup events are always per-cpu events,
1001 * @ctx == &cpuctx->ctx.
1002 */
1003 cpuctx = container_of(ctx, struct perf_cpu_context, ctx);
1004
1005 if (--ctx->nr_cgroups)
1006 return;
1007
1008 cpuctx->cgrp = NULL;
1009 }
1010
1011 #else /* !CONFIG_CGROUP_PERF */
1012
1013 static inline bool
perf_cgroup_match(struct perf_event * event)1014 perf_cgroup_match(struct perf_event *event)
1015 {
1016 return true;
1017 }
1018
perf_detach_cgroup(struct perf_event * event)1019 static inline void perf_detach_cgroup(struct perf_event *event)
1020 {}
1021
is_cgroup_event(struct perf_event * event)1022 static inline int is_cgroup_event(struct perf_event *event)
1023 {
1024 return 0;
1025 }
1026
update_cgrp_time_from_event(struct perf_event * event)1027 static inline void update_cgrp_time_from_event(struct perf_event *event)
1028 {
1029 }
1030
update_cgrp_time_from_cpuctx(struct perf_cpu_context * cpuctx,bool final)1031 static inline void update_cgrp_time_from_cpuctx(struct perf_cpu_context *cpuctx,
1032 bool final)
1033 {
1034 }
1035
perf_cgroup_connect(pid_t pid,struct perf_event * event,struct perf_event_attr * attr,struct perf_event * group_leader)1036 static inline int perf_cgroup_connect(pid_t pid, struct perf_event *event,
1037 struct perf_event_attr *attr,
1038 struct perf_event *group_leader)
1039 {
1040 return -EINVAL;
1041 }
1042
1043 static inline void
perf_cgroup_set_timestamp(struct perf_cpu_context * cpuctx)1044 perf_cgroup_set_timestamp(struct perf_cpu_context *cpuctx)
1045 {
1046 }
1047
perf_cgroup_event_time(struct perf_event * event)1048 static inline u64 perf_cgroup_event_time(struct perf_event *event)
1049 {
1050 return 0;
1051 }
1052
perf_cgroup_event_time_now(struct perf_event * event,u64 now)1053 static inline u64 perf_cgroup_event_time_now(struct perf_event *event, u64 now)
1054 {
1055 return 0;
1056 }
1057
1058 static inline void
perf_cgroup_event_enable(struct perf_event * event,struct perf_event_context * ctx)1059 perf_cgroup_event_enable(struct perf_event *event, struct perf_event_context *ctx)
1060 {
1061 }
1062
1063 static inline void
perf_cgroup_event_disable(struct perf_event * event,struct perf_event_context * ctx)1064 perf_cgroup_event_disable(struct perf_event *event, struct perf_event_context *ctx)
1065 {
1066 }
1067
perf_cgroup_switch(struct task_struct * task)1068 static void perf_cgroup_switch(struct task_struct *task)
1069 {
1070 }
1071 #endif
1072
1073 /*
1074 * set default to be dependent on timer tick just
1075 * like original code
1076 */
1077 #define PERF_CPU_HRTIMER (1000 / HZ)
1078 /*
1079 * function must be called with interrupts disabled
1080 */
perf_mux_hrtimer_handler(struct hrtimer * hr)1081 static enum hrtimer_restart perf_mux_hrtimer_handler(struct hrtimer *hr)
1082 {
1083 struct perf_cpu_pmu_context *cpc;
1084 bool rotations;
1085
1086 lockdep_assert_irqs_disabled();
1087
1088 cpc = container_of(hr, struct perf_cpu_pmu_context, hrtimer);
1089 rotations = perf_rotate_context(cpc);
1090
1091 raw_spin_lock(&cpc->hrtimer_lock);
1092 if (rotations)
1093 hrtimer_forward_now(hr, cpc->hrtimer_interval);
1094 else
1095 cpc->hrtimer_active = 0;
1096 raw_spin_unlock(&cpc->hrtimer_lock);
1097
1098 return rotations ? HRTIMER_RESTART : HRTIMER_NORESTART;
1099 }
1100
__perf_mux_hrtimer_init(struct perf_cpu_pmu_context * cpc,int cpu)1101 static void __perf_mux_hrtimer_init(struct perf_cpu_pmu_context *cpc, int cpu)
1102 {
1103 struct hrtimer *timer = &cpc->hrtimer;
1104 struct pmu *pmu = cpc->epc.pmu;
1105 u64 interval;
1106
1107 /*
1108 * check default is sane, if not set then force to
1109 * default interval (1/tick)
1110 */
1111 interval = pmu->hrtimer_interval_ms;
1112 if (interval < 1)
1113 interval = pmu->hrtimer_interval_ms = PERF_CPU_HRTIMER;
1114
1115 cpc->hrtimer_interval = ns_to_ktime(NSEC_PER_MSEC * interval);
1116
1117 raw_spin_lock_init(&cpc->hrtimer_lock);
1118 hrtimer_init(timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED_HARD);
1119 timer->function = perf_mux_hrtimer_handler;
1120 }
1121
perf_mux_hrtimer_restart(struct perf_cpu_pmu_context * cpc)1122 static int perf_mux_hrtimer_restart(struct perf_cpu_pmu_context *cpc)
1123 {
1124 struct hrtimer *timer = &cpc->hrtimer;
1125 unsigned long flags;
1126
1127 raw_spin_lock_irqsave(&cpc->hrtimer_lock, flags);
1128 if (!cpc->hrtimer_active) {
1129 cpc->hrtimer_active = 1;
1130 hrtimer_forward_now(timer, cpc->hrtimer_interval);
1131 hrtimer_start_expires(timer, HRTIMER_MODE_ABS_PINNED_HARD);
1132 }
1133 raw_spin_unlock_irqrestore(&cpc->hrtimer_lock, flags);
1134
1135 return 0;
1136 }
1137
perf_mux_hrtimer_restart_ipi(void * arg)1138 static int perf_mux_hrtimer_restart_ipi(void *arg)
1139 {
1140 return perf_mux_hrtimer_restart(arg);
1141 }
1142
perf_pmu_disable(struct pmu * pmu)1143 void perf_pmu_disable(struct pmu *pmu)
1144 {
1145 int *count = this_cpu_ptr(pmu->pmu_disable_count);
1146 if (!(*count)++)
1147 pmu->pmu_disable(pmu);
1148 }
1149
perf_pmu_enable(struct pmu * pmu)1150 void perf_pmu_enable(struct pmu *pmu)
1151 {
1152 int *count = this_cpu_ptr(pmu->pmu_disable_count);
1153 if (!--(*count))
1154 pmu->pmu_enable(pmu);
1155 }
1156
perf_assert_pmu_disabled(struct pmu * pmu)1157 static void perf_assert_pmu_disabled(struct pmu *pmu)
1158 {
1159 WARN_ON_ONCE(*this_cpu_ptr(pmu->pmu_disable_count) == 0);
1160 }
1161
get_ctx(struct perf_event_context * ctx)1162 static void get_ctx(struct perf_event_context *ctx)
1163 {
1164 refcount_inc(&ctx->refcount);
1165 }
1166
alloc_task_ctx_data(struct pmu * pmu)1167 static void *alloc_task_ctx_data(struct pmu *pmu)
1168 {
1169 if (pmu->task_ctx_cache)
1170 return kmem_cache_zalloc(pmu->task_ctx_cache, GFP_KERNEL);
1171
1172 return NULL;
1173 }
1174
free_task_ctx_data(struct pmu * pmu,void * task_ctx_data)1175 static void free_task_ctx_data(struct pmu *pmu, void *task_ctx_data)
1176 {
1177 if (pmu->task_ctx_cache && task_ctx_data)
1178 kmem_cache_free(pmu->task_ctx_cache, task_ctx_data);
1179 }
1180
free_ctx(struct rcu_head * head)1181 static void free_ctx(struct rcu_head *head)
1182 {
1183 struct perf_event_context *ctx;
1184
1185 ctx = container_of(head, struct perf_event_context, rcu_head);
1186 kfree(ctx);
1187 }
1188
put_ctx(struct perf_event_context * ctx)1189 static void put_ctx(struct perf_event_context *ctx)
1190 {
1191 if (refcount_dec_and_test(&ctx->refcount)) {
1192 if (ctx->parent_ctx)
1193 put_ctx(ctx->parent_ctx);
1194 if (ctx->task && ctx->task != TASK_TOMBSTONE)
1195 put_task_struct(ctx->task);
1196 call_rcu(&ctx->rcu_head, free_ctx);
1197 }
1198 }
1199
1200 /*
1201 * Because of perf_event::ctx migration in sys_perf_event_open::move_group and
1202 * perf_pmu_migrate_context() we need some magic.
1203 *
1204 * Those places that change perf_event::ctx will hold both
1205 * perf_event_ctx::mutex of the 'old' and 'new' ctx value.
1206 *
1207 * Lock ordering is by mutex address. There are two other sites where
1208 * perf_event_context::mutex nests and those are:
1209 *
1210 * - perf_event_exit_task_context() [ child , 0 ]
1211 * perf_event_exit_event()
1212 * put_event() [ parent, 1 ]
1213 *
1214 * - perf_event_init_context() [ parent, 0 ]
1215 * inherit_task_group()
1216 * inherit_group()
1217 * inherit_event()
1218 * perf_event_alloc()
1219 * perf_init_event()
1220 * perf_try_init_event() [ child , 1 ]
1221 *
1222 * While it appears there is an obvious deadlock here -- the parent and child
1223 * nesting levels are inverted between the two. This is in fact safe because
1224 * life-time rules separate them. That is an exiting task cannot fork, and a
1225 * spawning task cannot (yet) exit.
1226 *
1227 * But remember that these are parent<->child context relations, and
1228 * migration does not affect children, therefore these two orderings should not
1229 * interact.
1230 *
1231 * The change in perf_event::ctx does not affect children (as claimed above)
1232 * because the sys_perf_event_open() case will install a new event and break
1233 * the ctx parent<->child relation, and perf_pmu_migrate_context() is only
1234 * concerned with cpuctx and that doesn't have children.
1235 *
1236 * The places that change perf_event::ctx will issue:
1237 *
1238 * perf_remove_from_context();
1239 * synchronize_rcu();
1240 * perf_install_in_context();
1241 *
1242 * to affect the change. The remove_from_context() + synchronize_rcu() should
1243 * quiesce the event, after which we can install it in the new location. This
1244 * means that only external vectors (perf_fops, prctl) can perturb the event
1245 * while in transit. Therefore all such accessors should also acquire
1246 * perf_event_context::mutex to serialize against this.
1247 *
1248 * However; because event->ctx can change while we're waiting to acquire
1249 * ctx->mutex we must be careful and use the below perf_event_ctx_lock()
1250 * function.
1251 *
1252 * Lock order:
1253 * exec_update_lock
1254 * task_struct::perf_event_mutex
1255 * perf_event_context::mutex
1256 * perf_event::child_mutex;
1257 * perf_event_context::lock
1258 * mmap_lock
1259 * perf_event::mmap_mutex
1260 * perf_buffer::aux_mutex
1261 * perf_addr_filters_head::lock
1262 *
1263 * cpu_hotplug_lock
1264 * pmus_lock
1265 * cpuctx->mutex / perf_event_context::mutex
1266 */
1267 static struct perf_event_context *
perf_event_ctx_lock_nested(struct perf_event * event,int nesting)1268 perf_event_ctx_lock_nested(struct perf_event *event, int nesting)
1269 {
1270 struct perf_event_context *ctx;
1271
1272 again:
1273 rcu_read_lock();
1274 ctx = READ_ONCE(event->ctx);
1275 if (!refcount_inc_not_zero(&ctx->refcount)) {
1276 rcu_read_unlock();
1277 goto again;
1278 }
1279 rcu_read_unlock();
1280
1281 mutex_lock_nested(&ctx->mutex, nesting);
1282 if (event->ctx != ctx) {
1283 mutex_unlock(&ctx->mutex);
1284 put_ctx(ctx);
1285 goto again;
1286 }
1287
1288 return ctx;
1289 }
1290
1291 static inline struct perf_event_context *
perf_event_ctx_lock(struct perf_event * event)1292 perf_event_ctx_lock(struct perf_event *event)
1293 {
1294 return perf_event_ctx_lock_nested(event, 0);
1295 }
1296
perf_event_ctx_unlock(struct perf_event * event,struct perf_event_context * ctx)1297 static void perf_event_ctx_unlock(struct perf_event *event,
1298 struct perf_event_context *ctx)
1299 {
1300 mutex_unlock(&ctx->mutex);
1301 put_ctx(ctx);
1302 }
1303
1304 /*
1305 * This must be done under the ctx->lock, such as to serialize against
1306 * context_equiv(), therefore we cannot call put_ctx() since that might end up
1307 * calling scheduler related locks and ctx->lock nests inside those.
1308 */
1309 static __must_check struct perf_event_context *
unclone_ctx(struct perf_event_context * ctx)1310 unclone_ctx(struct perf_event_context *ctx)
1311 {
1312 struct perf_event_context *parent_ctx = ctx->parent_ctx;
1313
1314 lockdep_assert_held(&ctx->lock);
1315
1316 if (parent_ctx)
1317 ctx->parent_ctx = NULL;
1318 ctx->generation++;
1319
1320 return parent_ctx;
1321 }
1322
perf_event_pid_type(struct perf_event * event,struct task_struct * p,enum pid_type type)1323 static u32 perf_event_pid_type(struct perf_event *event, struct task_struct *p,
1324 enum pid_type type)
1325 {
1326 u32 nr;
1327 /*
1328 * only top level events have the pid namespace they were created in
1329 */
1330 if (event->parent)
1331 event = event->parent;
1332
1333 nr = __task_pid_nr_ns(p, type, event->ns);
1334 /* avoid -1 if it is idle thread or runs in another ns */
1335 if (!nr && !pid_alive(p))
1336 nr = -1;
1337 return nr;
1338 }
1339
perf_event_pid(struct perf_event * event,struct task_struct * p)1340 static u32 perf_event_pid(struct perf_event *event, struct task_struct *p)
1341 {
1342 return perf_event_pid_type(event, p, PIDTYPE_TGID);
1343 }
1344
perf_event_tid(struct perf_event * event,struct task_struct * p)1345 static u32 perf_event_tid(struct perf_event *event, struct task_struct *p)
1346 {
1347 return perf_event_pid_type(event, p, PIDTYPE_PID);
1348 }
1349
1350 /*
1351 * If we inherit events we want to return the parent event id
1352 * to userspace.
1353 */
primary_event_id(struct perf_event * event)1354 static u64 primary_event_id(struct perf_event *event)
1355 {
1356 u64 id = event->id;
1357
1358 if (event->parent)
1359 id = event->parent->id;
1360
1361 return id;
1362 }
1363
1364 /*
1365 * Get the perf_event_context for a task and lock it.
1366 *
1367 * This has to cope with the fact that until it is locked,
1368 * the context could get moved to another task.
1369 */
1370 static struct perf_event_context *
perf_lock_task_context(struct task_struct * task,unsigned long * flags)1371 perf_lock_task_context(struct task_struct *task, unsigned long *flags)
1372 {
1373 struct perf_event_context *ctx;
1374
1375 retry:
1376 /*
1377 * One of the few rules of preemptible RCU is that one cannot do
1378 * rcu_read_unlock() while holding a scheduler (or nested) lock when
1379 * part of the read side critical section was irqs-enabled -- see
1380 * rcu_read_unlock_special().
1381 *
1382 * Since ctx->lock nests under rq->lock we must ensure the entire read
1383 * side critical section has interrupts disabled.
1384 */
1385 local_irq_save(*flags);
1386 rcu_read_lock();
1387 ctx = rcu_dereference(task->perf_event_ctxp);
1388 if (ctx) {
1389 /*
1390 * If this context is a clone of another, it might
1391 * get swapped for another underneath us by
1392 * perf_event_task_sched_out, though the
1393 * rcu_read_lock() protects us from any context
1394 * getting freed. Lock the context and check if it
1395 * got swapped before we could get the lock, and retry
1396 * if so. If we locked the right context, then it
1397 * can't get swapped on us any more.
1398 */
1399 raw_spin_lock(&ctx->lock);
1400 if (ctx != rcu_dereference(task->perf_event_ctxp)) {
1401 raw_spin_unlock(&ctx->lock);
1402 rcu_read_unlock();
1403 local_irq_restore(*flags);
1404 goto retry;
1405 }
1406
1407 if (ctx->task == TASK_TOMBSTONE ||
1408 !refcount_inc_not_zero(&ctx->refcount)) {
1409 raw_spin_unlock(&ctx->lock);
1410 ctx = NULL;
1411 } else {
1412 WARN_ON_ONCE(ctx->task != task);
1413 }
1414 }
1415 rcu_read_unlock();
1416 if (!ctx)
1417 local_irq_restore(*flags);
1418 return ctx;
1419 }
1420
1421 /*
1422 * Get the context for a task and increment its pin_count so it
1423 * can't get swapped to another task. This also increments its
1424 * reference count so that the context can't get freed.
1425 */
1426 static struct perf_event_context *
perf_pin_task_context(struct task_struct * task)1427 perf_pin_task_context(struct task_struct *task)
1428 {
1429 struct perf_event_context *ctx;
1430 unsigned long flags;
1431
1432 ctx = perf_lock_task_context(task, &flags);
1433 if (ctx) {
1434 ++ctx->pin_count;
1435 raw_spin_unlock_irqrestore(&ctx->lock, flags);
1436 }
1437 return ctx;
1438 }
1439
perf_unpin_context(struct perf_event_context * ctx)1440 static void perf_unpin_context(struct perf_event_context *ctx)
1441 {
1442 unsigned long flags;
1443
1444 raw_spin_lock_irqsave(&ctx->lock, flags);
1445 --ctx->pin_count;
1446 raw_spin_unlock_irqrestore(&ctx->lock, flags);
1447 }
1448
1449 /*
1450 * Update the record of the current time in a context.
1451 */
__update_context_time(struct perf_event_context * ctx,bool adv)1452 static void __update_context_time(struct perf_event_context *ctx, bool adv)
1453 {
1454 u64 now = perf_clock();
1455
1456 lockdep_assert_held(&ctx->lock);
1457
1458 if (adv)
1459 ctx->time += now - ctx->timestamp;
1460 ctx->timestamp = now;
1461
1462 /*
1463 * The above: time' = time + (now - timestamp), can be re-arranged
1464 * into: time` = now + (time - timestamp), which gives a single value
1465 * offset to compute future time without locks on.
1466 *
1467 * See perf_event_time_now(), which can be used from NMI context where
1468 * it's (obviously) not possible to acquire ctx->lock in order to read
1469 * both the above values in a consistent manner.
1470 */
1471 WRITE_ONCE(ctx->timeoffset, ctx->time - ctx->timestamp);
1472 }
1473
update_context_time(struct perf_event_context * ctx)1474 static void update_context_time(struct perf_event_context *ctx)
1475 {
1476 __update_context_time(ctx, true);
1477 }
1478
perf_event_time(struct perf_event * event)1479 static u64 perf_event_time(struct perf_event *event)
1480 {
1481 struct perf_event_context *ctx = event->ctx;
1482
1483 if (unlikely(!ctx))
1484 return 0;
1485
1486 if (is_cgroup_event(event))
1487 return perf_cgroup_event_time(event);
1488
1489 return ctx->time;
1490 }
1491
perf_event_time_now(struct perf_event * event,u64 now)1492 static u64 perf_event_time_now(struct perf_event *event, u64 now)
1493 {
1494 struct perf_event_context *ctx = event->ctx;
1495
1496 if (unlikely(!ctx))
1497 return 0;
1498
1499 if (is_cgroup_event(event))
1500 return perf_cgroup_event_time_now(event, now);
1501
1502 if (!(__load_acquire(&ctx->is_active) & EVENT_TIME))
1503 return ctx->time;
1504
1505 now += READ_ONCE(ctx->timeoffset);
1506 return now;
1507 }
1508
get_event_type(struct perf_event * event)1509 static enum event_type_t get_event_type(struct perf_event *event)
1510 {
1511 struct perf_event_context *ctx = event->ctx;
1512 enum event_type_t event_type;
1513
1514 lockdep_assert_held(&ctx->lock);
1515
1516 /*
1517 * It's 'group type', really, because if our group leader is
1518 * pinned, so are we.
1519 */
1520 if (event->group_leader != event)
1521 event = event->group_leader;
1522
1523 event_type = event->attr.pinned ? EVENT_PINNED : EVENT_FLEXIBLE;
1524 if (!ctx->task)
1525 event_type |= EVENT_CPU;
1526
1527 return event_type;
1528 }
1529
1530 /*
1531 * Helper function to initialize event group nodes.
1532 */
init_event_group(struct perf_event * event)1533 static void init_event_group(struct perf_event *event)
1534 {
1535 RB_CLEAR_NODE(&event->group_node);
1536 event->group_index = 0;
1537 }
1538
1539 /*
1540 * Extract pinned or flexible groups from the context
1541 * based on event attrs bits.
1542 */
1543 static struct perf_event_groups *
get_event_groups(struct perf_event * event,struct perf_event_context * ctx)1544 get_event_groups(struct perf_event *event, struct perf_event_context *ctx)
1545 {
1546 if (event->attr.pinned)
1547 return &ctx->pinned_groups;
1548 else
1549 return &ctx->flexible_groups;
1550 }
1551
1552 /*
1553 * Helper function to initializes perf_event_group trees.
1554 */
perf_event_groups_init(struct perf_event_groups * groups)1555 static void perf_event_groups_init(struct perf_event_groups *groups)
1556 {
1557 groups->tree = RB_ROOT;
1558 groups->index = 0;
1559 }
1560
event_cgroup(const struct perf_event * event)1561 static inline struct cgroup *event_cgroup(const struct perf_event *event)
1562 {
1563 struct cgroup *cgroup = NULL;
1564
1565 #ifdef CONFIG_CGROUP_PERF
1566 if (event->cgrp)
1567 cgroup = event->cgrp->css.cgroup;
1568 #endif
1569
1570 return cgroup;
1571 }
1572
1573 /*
1574 * Compare function for event groups;
1575 *
1576 * Implements complex key that first sorts by CPU and then by virtual index
1577 * which provides ordering when rotating groups for the same CPU.
1578 */
1579 static __always_inline int
perf_event_groups_cmp(const int left_cpu,const struct pmu * left_pmu,const struct cgroup * left_cgroup,const u64 left_group_index,const struct perf_event * right)1580 perf_event_groups_cmp(const int left_cpu, const struct pmu *left_pmu,
1581 const struct cgroup *left_cgroup, const u64 left_group_index,
1582 const struct perf_event *right)
1583 {
1584 if (left_cpu < right->cpu)
1585 return -1;
1586 if (left_cpu > right->cpu)
1587 return 1;
1588
1589 if (left_pmu) {
1590 if (left_pmu < right->pmu_ctx->pmu)
1591 return -1;
1592 if (left_pmu > right->pmu_ctx->pmu)
1593 return 1;
1594 }
1595
1596 #ifdef CONFIG_CGROUP_PERF
1597 {
1598 const struct cgroup *right_cgroup = event_cgroup(right);
1599
1600 if (left_cgroup != right_cgroup) {
1601 if (!left_cgroup) {
1602 /*
1603 * Left has no cgroup but right does, no
1604 * cgroups come first.
1605 */
1606 return -1;
1607 }
1608 if (!right_cgroup) {
1609 /*
1610 * Right has no cgroup but left does, no
1611 * cgroups come first.
1612 */
1613 return 1;
1614 }
1615 /* Two dissimilar cgroups, order by id. */
1616 if (cgroup_id(left_cgroup) < cgroup_id(right_cgroup))
1617 return -1;
1618
1619 return 1;
1620 }
1621 }
1622 #endif
1623
1624 if (left_group_index < right->group_index)
1625 return -1;
1626 if (left_group_index > right->group_index)
1627 return 1;
1628
1629 return 0;
1630 }
1631
1632 #define __node_2_pe(node) \
1633 rb_entry((node), struct perf_event, group_node)
1634
__group_less(struct rb_node * a,const struct rb_node * b)1635 static inline bool __group_less(struct rb_node *a, const struct rb_node *b)
1636 {
1637 struct perf_event *e = __node_2_pe(a);
1638 return perf_event_groups_cmp(e->cpu, e->pmu_ctx->pmu, event_cgroup(e),
1639 e->group_index, __node_2_pe(b)) < 0;
1640 }
1641
1642 struct __group_key {
1643 int cpu;
1644 struct pmu *pmu;
1645 struct cgroup *cgroup;
1646 };
1647
__group_cmp(const void * key,const struct rb_node * node)1648 static inline int __group_cmp(const void *key, const struct rb_node *node)
1649 {
1650 const struct __group_key *a = key;
1651 const struct perf_event *b = __node_2_pe(node);
1652
1653 /* partial/subtree match: @cpu, @pmu, @cgroup; ignore: @group_index */
1654 return perf_event_groups_cmp(a->cpu, a->pmu, a->cgroup, b->group_index, b);
1655 }
1656
1657 static inline int
__group_cmp_ignore_cgroup(const void * key,const struct rb_node * node)1658 __group_cmp_ignore_cgroup(const void *key, const struct rb_node *node)
1659 {
1660 const struct __group_key *a = key;
1661 const struct perf_event *b = __node_2_pe(node);
1662
1663 /* partial/subtree match: @cpu, @pmu, ignore: @cgroup, @group_index */
1664 return perf_event_groups_cmp(a->cpu, a->pmu, event_cgroup(b),
1665 b->group_index, b);
1666 }
1667
1668 /*
1669 * Insert @event into @groups' tree; using
1670 * {@event->cpu, @event->pmu_ctx->pmu, event_cgroup(@event), ++@groups->index}
1671 * as key. This places it last inside the {cpu,pmu,cgroup} subtree.
1672 */
1673 static void
perf_event_groups_insert(struct perf_event_groups * groups,struct perf_event * event)1674 perf_event_groups_insert(struct perf_event_groups *groups,
1675 struct perf_event *event)
1676 {
1677 event->group_index = ++groups->index;
1678
1679 rb_add(&event->group_node, &groups->tree, __group_less);
1680 }
1681
1682 /*
1683 * Helper function to insert event into the pinned or flexible groups.
1684 */
1685 static void
add_event_to_groups(struct perf_event * event,struct perf_event_context * ctx)1686 add_event_to_groups(struct perf_event *event, struct perf_event_context *ctx)
1687 {
1688 struct perf_event_groups *groups;
1689
1690 groups = get_event_groups(event, ctx);
1691 perf_event_groups_insert(groups, event);
1692 }
1693
1694 /*
1695 * Delete a group from a tree.
1696 */
1697 static void
perf_event_groups_delete(struct perf_event_groups * groups,struct perf_event * event)1698 perf_event_groups_delete(struct perf_event_groups *groups,
1699 struct perf_event *event)
1700 {
1701 WARN_ON_ONCE(RB_EMPTY_NODE(&event->group_node) ||
1702 RB_EMPTY_ROOT(&groups->tree));
1703
1704 rb_erase(&event->group_node, &groups->tree);
1705 init_event_group(event);
1706 }
1707
1708 /*
1709 * Helper function to delete event from its groups.
1710 */
1711 static void
del_event_from_groups(struct perf_event * event,struct perf_event_context * ctx)1712 del_event_from_groups(struct perf_event *event, struct perf_event_context *ctx)
1713 {
1714 struct perf_event_groups *groups;
1715
1716 groups = get_event_groups(event, ctx);
1717 perf_event_groups_delete(groups, event);
1718 }
1719
1720 /*
1721 * Get the leftmost event in the {cpu,pmu,cgroup} subtree.
1722 */
1723 static struct perf_event *
perf_event_groups_first(struct perf_event_groups * groups,int cpu,struct pmu * pmu,struct cgroup * cgrp)1724 perf_event_groups_first(struct perf_event_groups *groups, int cpu,
1725 struct pmu *pmu, struct cgroup *cgrp)
1726 {
1727 struct __group_key key = {
1728 .cpu = cpu,
1729 .pmu = pmu,
1730 .cgroup = cgrp,
1731 };
1732 struct rb_node *node;
1733
1734 node = rb_find_first(&key, &groups->tree, __group_cmp);
1735 if (node)
1736 return __node_2_pe(node);
1737
1738 return NULL;
1739 }
1740
1741 static struct perf_event *
perf_event_groups_next(struct perf_event * event,struct pmu * pmu)1742 perf_event_groups_next(struct perf_event *event, struct pmu *pmu)
1743 {
1744 struct __group_key key = {
1745 .cpu = event->cpu,
1746 .pmu = pmu,
1747 .cgroup = event_cgroup(event),
1748 };
1749 struct rb_node *next;
1750
1751 next = rb_next_match(&key, &event->group_node, __group_cmp);
1752 if (next)
1753 return __node_2_pe(next);
1754
1755 return NULL;
1756 }
1757
1758 #define perf_event_groups_for_cpu_pmu(event, groups, cpu, pmu) \
1759 for (event = perf_event_groups_first(groups, cpu, pmu, NULL); \
1760 event; event = perf_event_groups_next(event, pmu))
1761
1762 /*
1763 * Iterate through the whole groups tree.
1764 */
1765 #define perf_event_groups_for_each(event, groups) \
1766 for (event = rb_entry_safe(rb_first(&((groups)->tree)), \
1767 typeof(*event), group_node); event; \
1768 event = rb_entry_safe(rb_next(&event->group_node), \
1769 typeof(*event), group_node))
1770
1771 /*
1772 * Add an event from the lists for its context.
1773 * Must be called with ctx->mutex and ctx->lock held.
1774 */
1775 static void
list_add_event(struct perf_event * event,struct perf_event_context * ctx)1776 list_add_event(struct perf_event *event, struct perf_event_context *ctx)
1777 {
1778 lockdep_assert_held(&ctx->lock);
1779
1780 WARN_ON_ONCE(event->attach_state & PERF_ATTACH_CONTEXT);
1781 event->attach_state |= PERF_ATTACH_CONTEXT;
1782
1783 event->tstamp = perf_event_time(event);
1784
1785 /*
1786 * If we're a stand alone event or group leader, we go to the context
1787 * list, group events are kept attached to the group so that
1788 * perf_group_detach can, at all times, locate all siblings.
1789 */
1790 if (event->group_leader == event) {
1791 event->group_caps = event->event_caps;
1792 add_event_to_groups(event, ctx);
1793 }
1794
1795 list_add_rcu(&event->event_entry, &ctx->event_list);
1796 ctx->nr_events++;
1797 if (event->hw.flags & PERF_EVENT_FLAG_USER_READ_CNT)
1798 ctx->nr_user++;
1799 if (event->attr.inherit_stat)
1800 ctx->nr_stat++;
1801
1802 if (event->state > PERF_EVENT_STATE_OFF)
1803 perf_cgroup_event_enable(event, ctx);
1804
1805 ctx->generation++;
1806 event->pmu_ctx->nr_events++;
1807 }
1808
1809 /*
1810 * Initialize event state based on the perf_event_attr::disabled.
1811 */
perf_event__state_init(struct perf_event * event)1812 static inline void perf_event__state_init(struct perf_event *event)
1813 {
1814 event->state = event->attr.disabled ? PERF_EVENT_STATE_OFF :
1815 PERF_EVENT_STATE_INACTIVE;
1816 }
1817
__perf_event_read_size(u64 read_format,int nr_siblings)1818 static int __perf_event_read_size(u64 read_format, int nr_siblings)
1819 {
1820 int entry = sizeof(u64); /* value */
1821 int size = 0;
1822 int nr = 1;
1823
1824 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
1825 size += sizeof(u64);
1826
1827 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
1828 size += sizeof(u64);
1829
1830 if (read_format & PERF_FORMAT_ID)
1831 entry += sizeof(u64);
1832
1833 if (read_format & PERF_FORMAT_LOST)
1834 entry += sizeof(u64);
1835
1836 if (read_format & PERF_FORMAT_GROUP) {
1837 nr += nr_siblings;
1838 size += sizeof(u64);
1839 }
1840
1841 /*
1842 * Since perf_event_validate_size() limits this to 16k and inhibits
1843 * adding more siblings, this will never overflow.
1844 */
1845 return size + nr * entry;
1846 }
1847
__perf_event_header_size(struct perf_event * event,u64 sample_type)1848 static void __perf_event_header_size(struct perf_event *event, u64 sample_type)
1849 {
1850 struct perf_sample_data *data;
1851 u16 size = 0;
1852
1853 if (sample_type & PERF_SAMPLE_IP)
1854 size += sizeof(data->ip);
1855
1856 if (sample_type & PERF_SAMPLE_ADDR)
1857 size += sizeof(data->addr);
1858
1859 if (sample_type & PERF_SAMPLE_PERIOD)
1860 size += sizeof(data->period);
1861
1862 if (sample_type & PERF_SAMPLE_WEIGHT_TYPE)
1863 size += sizeof(data->weight.full);
1864
1865 if (sample_type & PERF_SAMPLE_READ)
1866 size += event->read_size;
1867
1868 if (sample_type & PERF_SAMPLE_DATA_SRC)
1869 size += sizeof(data->data_src.val);
1870
1871 if (sample_type & PERF_SAMPLE_TRANSACTION)
1872 size += sizeof(data->txn);
1873
1874 if (sample_type & PERF_SAMPLE_PHYS_ADDR)
1875 size += sizeof(data->phys_addr);
1876
1877 if (sample_type & PERF_SAMPLE_CGROUP)
1878 size += sizeof(data->cgroup);
1879
1880 if (sample_type & PERF_SAMPLE_DATA_PAGE_SIZE)
1881 size += sizeof(data->data_page_size);
1882
1883 if (sample_type & PERF_SAMPLE_CODE_PAGE_SIZE)
1884 size += sizeof(data->code_page_size);
1885
1886 event->header_size = size;
1887 }
1888
1889 /*
1890 * Called at perf_event creation and when events are attached/detached from a
1891 * group.
1892 */
perf_event__header_size(struct perf_event * event)1893 static void perf_event__header_size(struct perf_event *event)
1894 {
1895 event->read_size =
1896 __perf_event_read_size(event->attr.read_format,
1897 event->group_leader->nr_siblings);
1898 __perf_event_header_size(event, event->attr.sample_type);
1899 }
1900
perf_event__id_header_size(struct perf_event * event)1901 static void perf_event__id_header_size(struct perf_event *event)
1902 {
1903 struct perf_sample_data *data;
1904 u64 sample_type = event->attr.sample_type;
1905 u16 size = 0;
1906
1907 if (sample_type & PERF_SAMPLE_TID)
1908 size += sizeof(data->tid_entry);
1909
1910 if (sample_type & PERF_SAMPLE_TIME)
1911 size += sizeof(data->time);
1912
1913 if (sample_type & PERF_SAMPLE_IDENTIFIER)
1914 size += sizeof(data->id);
1915
1916 if (sample_type & PERF_SAMPLE_ID)
1917 size += sizeof(data->id);
1918
1919 if (sample_type & PERF_SAMPLE_STREAM_ID)
1920 size += sizeof(data->stream_id);
1921
1922 if (sample_type & PERF_SAMPLE_CPU)
1923 size += sizeof(data->cpu_entry);
1924
1925 event->id_header_size = size;
1926 }
1927
1928 /*
1929 * Check that adding an event to the group does not result in anybody
1930 * overflowing the 64k event limit imposed by the output buffer.
1931 *
1932 * Specifically, check that the read_size for the event does not exceed 16k,
1933 * read_size being the one term that grows with groups size. Since read_size
1934 * depends on per-event read_format, also (re)check the existing events.
1935 *
1936 * This leaves 48k for the constant size fields and things like callchains,
1937 * branch stacks and register sets.
1938 */
perf_event_validate_size(struct perf_event * event)1939 static bool perf_event_validate_size(struct perf_event *event)
1940 {
1941 struct perf_event *sibling, *group_leader = event->group_leader;
1942
1943 if (__perf_event_read_size(event->attr.read_format,
1944 group_leader->nr_siblings + 1) > 16*1024)
1945 return false;
1946
1947 if (__perf_event_read_size(group_leader->attr.read_format,
1948 group_leader->nr_siblings + 1) > 16*1024)
1949 return false;
1950
1951 /*
1952 * When creating a new group leader, group_leader->ctx is initialized
1953 * after the size has been validated, but we cannot safely use
1954 * for_each_sibling_event() until group_leader->ctx is set. A new group
1955 * leader cannot have any siblings yet, so we can safely skip checking
1956 * the non-existent siblings.
1957 */
1958 if (event == group_leader)
1959 return true;
1960
1961 for_each_sibling_event(sibling, group_leader) {
1962 if (__perf_event_read_size(sibling->attr.read_format,
1963 group_leader->nr_siblings + 1) > 16*1024)
1964 return false;
1965 }
1966
1967 return true;
1968 }
1969
perf_group_attach(struct perf_event * event)1970 static void perf_group_attach(struct perf_event *event)
1971 {
1972 struct perf_event *group_leader = event->group_leader, *pos;
1973
1974 lockdep_assert_held(&event->ctx->lock);
1975
1976 /*
1977 * We can have double attach due to group movement (move_group) in
1978 * perf_event_open().
1979 */
1980 if (event->attach_state & PERF_ATTACH_GROUP)
1981 return;
1982
1983 event->attach_state |= PERF_ATTACH_GROUP;
1984
1985 if (group_leader == event)
1986 return;
1987
1988 WARN_ON_ONCE(group_leader->ctx != event->ctx);
1989
1990 group_leader->group_caps &= event->event_caps;
1991
1992 list_add_tail(&event->sibling_list, &group_leader->sibling_list);
1993 group_leader->nr_siblings++;
1994 group_leader->group_generation++;
1995
1996 perf_event__header_size(group_leader);
1997
1998 for_each_sibling_event(pos, group_leader)
1999 perf_event__header_size(pos);
2000 }
2001
2002 /*
2003 * Remove an event from the lists for its context.
2004 * Must be called with ctx->mutex and ctx->lock held.
2005 */
2006 static void
list_del_event(struct perf_event * event,struct perf_event_context * ctx)2007 list_del_event(struct perf_event *event, struct perf_event_context *ctx)
2008 {
2009 WARN_ON_ONCE(event->ctx != ctx);
2010 lockdep_assert_held(&ctx->lock);
2011
2012 /*
2013 * We can have double detach due to exit/hot-unplug + close.
2014 */
2015 if (!(event->attach_state & PERF_ATTACH_CONTEXT))
2016 return;
2017
2018 event->attach_state &= ~PERF_ATTACH_CONTEXT;
2019
2020 ctx->nr_events--;
2021 if (event->hw.flags & PERF_EVENT_FLAG_USER_READ_CNT)
2022 ctx->nr_user--;
2023 if (event->attr.inherit_stat)
2024 ctx->nr_stat--;
2025
2026 list_del_rcu(&event->event_entry);
2027
2028 if (event->group_leader == event)
2029 del_event_from_groups(event, ctx);
2030
2031 /*
2032 * If event was in error state, then keep it
2033 * that way, otherwise bogus counts will be
2034 * returned on read(). The only way to get out
2035 * of error state is by explicit re-enabling
2036 * of the event
2037 */
2038 if (event->state > PERF_EVENT_STATE_OFF) {
2039 perf_cgroup_event_disable(event, ctx);
2040 perf_event_set_state(event, PERF_EVENT_STATE_OFF);
2041 }
2042
2043 ctx->generation++;
2044 event->pmu_ctx->nr_events--;
2045 }
2046
2047 static int
perf_aux_output_match(struct perf_event * event,struct perf_event * aux_event)2048 perf_aux_output_match(struct perf_event *event, struct perf_event *aux_event)
2049 {
2050 if (!has_aux(aux_event))
2051 return 0;
2052
2053 if (!event->pmu->aux_output_match)
2054 return 0;
2055
2056 return event->pmu->aux_output_match(aux_event);
2057 }
2058
2059 static void put_event(struct perf_event *event);
2060 static void event_sched_out(struct perf_event *event,
2061 struct perf_event_context *ctx);
2062
perf_put_aux_event(struct perf_event * event)2063 static void perf_put_aux_event(struct perf_event *event)
2064 {
2065 struct perf_event_context *ctx = event->ctx;
2066 struct perf_event *iter;
2067
2068 /*
2069 * If event uses aux_event tear down the link
2070 */
2071 if (event->aux_event) {
2072 iter = event->aux_event;
2073 event->aux_event = NULL;
2074 put_event(iter);
2075 return;
2076 }
2077
2078 /*
2079 * If the event is an aux_event, tear down all links to
2080 * it from other events.
2081 */
2082 for_each_sibling_event(iter, event->group_leader) {
2083 if (iter->aux_event != event)
2084 continue;
2085
2086 iter->aux_event = NULL;
2087 put_event(event);
2088
2089 /*
2090 * If it's ACTIVE, schedule it out and put it into ERROR
2091 * state so that we don't try to schedule it again. Note
2092 * that perf_event_enable() will clear the ERROR status.
2093 */
2094 event_sched_out(iter, ctx);
2095 perf_event_set_state(event, PERF_EVENT_STATE_ERROR);
2096 }
2097 }
2098
perf_need_aux_event(struct perf_event * event)2099 static bool perf_need_aux_event(struct perf_event *event)
2100 {
2101 return !!event->attr.aux_output || !!event->attr.aux_sample_size;
2102 }
2103
perf_get_aux_event(struct perf_event * event,struct perf_event * group_leader)2104 static int perf_get_aux_event(struct perf_event *event,
2105 struct perf_event *group_leader)
2106 {
2107 /*
2108 * Our group leader must be an aux event if we want to be
2109 * an aux_output. This way, the aux event will precede its
2110 * aux_output events in the group, and therefore will always
2111 * schedule first.
2112 */
2113 if (!group_leader)
2114 return 0;
2115
2116 /*
2117 * aux_output and aux_sample_size are mutually exclusive.
2118 */
2119 if (event->attr.aux_output && event->attr.aux_sample_size)
2120 return 0;
2121
2122 if (event->attr.aux_output &&
2123 !perf_aux_output_match(event, group_leader))
2124 return 0;
2125
2126 if (event->attr.aux_sample_size && !group_leader->pmu->snapshot_aux)
2127 return 0;
2128
2129 if (!atomic_long_inc_not_zero(&group_leader->refcount))
2130 return 0;
2131
2132 /*
2133 * Link aux_outputs to their aux event; this is undone in
2134 * perf_group_detach() by perf_put_aux_event(). When the
2135 * group in torn down, the aux_output events loose their
2136 * link to the aux_event and can't schedule any more.
2137 */
2138 event->aux_event = group_leader;
2139
2140 return 1;
2141 }
2142
get_event_list(struct perf_event * event)2143 static inline struct list_head *get_event_list(struct perf_event *event)
2144 {
2145 return event->attr.pinned ? &event->pmu_ctx->pinned_active :
2146 &event->pmu_ctx->flexible_active;
2147 }
2148
2149 /*
2150 * Events that have PERF_EV_CAP_SIBLING require being part of a group and
2151 * cannot exist on their own, schedule them out and move them into the ERROR
2152 * state. Also see _perf_event_enable(), it will not be able to recover
2153 * this ERROR state.
2154 */
perf_remove_sibling_event(struct perf_event * event)2155 static inline void perf_remove_sibling_event(struct perf_event *event)
2156 {
2157 event_sched_out(event, event->ctx);
2158 perf_event_set_state(event, PERF_EVENT_STATE_ERROR);
2159 }
2160
perf_group_detach(struct perf_event * event)2161 static void perf_group_detach(struct perf_event *event)
2162 {
2163 struct perf_event *leader = event->group_leader;
2164 struct perf_event *sibling, *tmp;
2165 struct perf_event_context *ctx = event->ctx;
2166
2167 lockdep_assert_held(&ctx->lock);
2168
2169 /*
2170 * We can have double detach due to exit/hot-unplug + close.
2171 */
2172 if (!(event->attach_state & PERF_ATTACH_GROUP))
2173 return;
2174
2175 event->attach_state &= ~PERF_ATTACH_GROUP;
2176
2177 perf_put_aux_event(event);
2178
2179 /*
2180 * If this is a sibling, remove it from its group.
2181 */
2182 if (leader != event) {
2183 list_del_init(&event->sibling_list);
2184 event->group_leader->nr_siblings--;
2185 event->group_leader->group_generation++;
2186 goto out;
2187 }
2188
2189 /*
2190 * If this was a group event with sibling events then
2191 * upgrade the siblings to singleton events by adding them
2192 * to whatever list we are on.
2193 */
2194 list_for_each_entry_safe(sibling, tmp, &event->sibling_list, sibling_list) {
2195
2196 if (sibling->event_caps & PERF_EV_CAP_SIBLING)
2197 perf_remove_sibling_event(sibling);
2198
2199 sibling->group_leader = sibling;
2200 list_del_init(&sibling->sibling_list);
2201
2202 /* Inherit group flags from the previous leader */
2203 sibling->group_caps = event->group_caps;
2204
2205 if (sibling->attach_state & PERF_ATTACH_CONTEXT) {
2206 add_event_to_groups(sibling, event->ctx);
2207
2208 if (sibling->state == PERF_EVENT_STATE_ACTIVE)
2209 list_add_tail(&sibling->active_list, get_event_list(sibling));
2210 }
2211
2212 WARN_ON_ONCE(sibling->ctx != event->ctx);
2213 }
2214
2215 out:
2216 for_each_sibling_event(tmp, leader)
2217 perf_event__header_size(tmp);
2218
2219 perf_event__header_size(leader);
2220 }
2221
2222 static void sync_child_event(struct perf_event *child_event);
2223
perf_child_detach(struct perf_event * event)2224 static void perf_child_detach(struct perf_event *event)
2225 {
2226 struct perf_event *parent_event = event->parent;
2227
2228 if (!(event->attach_state & PERF_ATTACH_CHILD))
2229 return;
2230
2231 event->attach_state &= ~PERF_ATTACH_CHILD;
2232
2233 if (WARN_ON_ONCE(!parent_event))
2234 return;
2235
2236 lockdep_assert_held(&parent_event->child_mutex);
2237
2238 sync_child_event(event);
2239 list_del_init(&event->child_list);
2240 }
2241
is_orphaned_event(struct perf_event * event)2242 static bool is_orphaned_event(struct perf_event *event)
2243 {
2244 return event->state == PERF_EVENT_STATE_DEAD;
2245 }
2246
2247 static inline int
event_filter_match(struct perf_event * event)2248 event_filter_match(struct perf_event *event)
2249 {
2250 return (event->cpu == -1 || event->cpu == smp_processor_id()) &&
2251 perf_cgroup_match(event);
2252 }
2253
2254 static void
event_sched_out(struct perf_event * event,struct perf_event_context * ctx)2255 event_sched_out(struct perf_event *event, struct perf_event_context *ctx)
2256 {
2257 struct perf_event_pmu_context *epc = event->pmu_ctx;
2258 struct perf_cpu_pmu_context *cpc = this_cpu_ptr(epc->pmu->cpu_pmu_context);
2259 enum perf_event_state state = PERF_EVENT_STATE_INACTIVE;
2260
2261 // XXX cpc serialization, probably per-cpu IRQ disabled
2262
2263 WARN_ON_ONCE(event->ctx != ctx);
2264 lockdep_assert_held(&ctx->lock);
2265
2266 if (event->state != PERF_EVENT_STATE_ACTIVE)
2267 return;
2268
2269 /*
2270 * Asymmetry; we only schedule events _IN_ through ctx_sched_in(), but
2271 * we can schedule events _OUT_ individually through things like
2272 * __perf_remove_from_context().
2273 */
2274 list_del_init(&event->active_list);
2275
2276 perf_pmu_disable(event->pmu);
2277
2278 event->pmu->del(event, 0);
2279 event->oncpu = -1;
2280
2281 if (event->pending_disable) {
2282 event->pending_disable = 0;
2283 perf_cgroup_event_disable(event, ctx);
2284 state = PERF_EVENT_STATE_OFF;
2285 }
2286
2287 if (event->pending_sigtrap) {
2288 event->pending_sigtrap = 0;
2289 if (state != PERF_EVENT_STATE_OFF &&
2290 !event->pending_work &&
2291 !task_work_add(current, &event->pending_task, TWA_RESUME)) {
2292 event->pending_work = 1;
2293 } else {
2294 local_dec(&event->ctx->nr_pending);
2295 }
2296 }
2297
2298 perf_event_set_state(event, state);
2299
2300 if (!is_software_event(event))
2301 cpc->active_oncpu--;
2302 if (event->attr.freq && event->attr.sample_freq)
2303 ctx->nr_freq--;
2304 if (event->attr.exclusive || !cpc->active_oncpu)
2305 cpc->exclusive = 0;
2306
2307 perf_pmu_enable(event->pmu);
2308 }
2309
2310 static void
group_sched_out(struct perf_event * group_event,struct perf_event_context * ctx)2311 group_sched_out(struct perf_event *group_event, struct perf_event_context *ctx)
2312 {
2313 struct perf_event *event;
2314
2315 if (group_event->state != PERF_EVENT_STATE_ACTIVE)
2316 return;
2317
2318 perf_assert_pmu_disabled(group_event->pmu_ctx->pmu);
2319
2320 event_sched_out(group_event, ctx);
2321
2322 /*
2323 * Schedule out siblings (if any):
2324 */
2325 for_each_sibling_event(event, group_event)
2326 event_sched_out(event, ctx);
2327 }
2328
2329 #define DETACH_GROUP 0x01UL
2330 #define DETACH_CHILD 0x02UL
2331 #define DETACH_DEAD 0x04UL
2332
2333 /*
2334 * Cross CPU call to remove a performance event
2335 *
2336 * We disable the event on the hardware level first. After that we
2337 * remove it from the context list.
2338 */
2339 static void
__perf_remove_from_context(struct perf_event * event,struct perf_cpu_context * cpuctx,struct perf_event_context * ctx,void * info)2340 __perf_remove_from_context(struct perf_event *event,
2341 struct perf_cpu_context *cpuctx,
2342 struct perf_event_context *ctx,
2343 void *info)
2344 {
2345 struct perf_event_pmu_context *pmu_ctx = event->pmu_ctx;
2346 unsigned long flags = (unsigned long)info;
2347
2348 if (ctx->is_active & EVENT_TIME) {
2349 update_context_time(ctx);
2350 update_cgrp_time_from_cpuctx(cpuctx, false);
2351 }
2352
2353 /*
2354 * Ensure event_sched_out() switches to OFF, at the very least
2355 * this avoids raising perf_pending_task() at this time.
2356 */
2357 if (flags & DETACH_DEAD)
2358 event->pending_disable = 1;
2359 event_sched_out(event, ctx);
2360 if (flags & DETACH_GROUP)
2361 perf_group_detach(event);
2362 if (flags & DETACH_CHILD)
2363 perf_child_detach(event);
2364 list_del_event(event, ctx);
2365 if (flags & DETACH_DEAD)
2366 event->state = PERF_EVENT_STATE_DEAD;
2367
2368 if (!pmu_ctx->nr_events) {
2369 pmu_ctx->rotate_necessary = 0;
2370
2371 if (ctx->task && ctx->is_active) {
2372 struct perf_cpu_pmu_context *cpc;
2373
2374 cpc = this_cpu_ptr(pmu_ctx->pmu->cpu_pmu_context);
2375 WARN_ON_ONCE(cpc->task_epc && cpc->task_epc != pmu_ctx);
2376 cpc->task_epc = NULL;
2377 }
2378 }
2379
2380 if (!ctx->nr_events && ctx->is_active) {
2381 if (ctx == &cpuctx->ctx)
2382 update_cgrp_time_from_cpuctx(cpuctx, true);
2383
2384 ctx->is_active = 0;
2385 if (ctx->task) {
2386 WARN_ON_ONCE(cpuctx->task_ctx != ctx);
2387 cpuctx->task_ctx = NULL;
2388 }
2389 }
2390 }
2391
2392 /*
2393 * Remove the event from a task's (or a CPU's) list of events.
2394 *
2395 * If event->ctx is a cloned context, callers must make sure that
2396 * every task struct that event->ctx->task could possibly point to
2397 * remains valid. This is OK when called from perf_release since
2398 * that only calls us on the top-level context, which can't be a clone.
2399 * When called from perf_event_exit_task, it's OK because the
2400 * context has been detached from its task.
2401 */
perf_remove_from_context(struct perf_event * event,unsigned long flags)2402 static void perf_remove_from_context(struct perf_event *event, unsigned long flags)
2403 {
2404 struct perf_event_context *ctx = event->ctx;
2405
2406 lockdep_assert_held(&ctx->mutex);
2407
2408 /*
2409 * Because of perf_event_exit_task(), perf_remove_from_context() ought
2410 * to work in the face of TASK_TOMBSTONE, unlike every other
2411 * event_function_call() user.
2412 */
2413 raw_spin_lock_irq(&ctx->lock);
2414 if (!ctx->is_active) {
2415 __perf_remove_from_context(event, this_cpu_ptr(&perf_cpu_context),
2416 ctx, (void *)flags);
2417 raw_spin_unlock_irq(&ctx->lock);
2418 return;
2419 }
2420 raw_spin_unlock_irq(&ctx->lock);
2421
2422 event_function_call(event, __perf_remove_from_context, (void *)flags);
2423 }
2424
2425 /*
2426 * Cross CPU call to disable a performance event
2427 */
__perf_event_disable(struct perf_event * event,struct perf_cpu_context * cpuctx,struct perf_event_context * ctx,void * info)2428 static void __perf_event_disable(struct perf_event *event,
2429 struct perf_cpu_context *cpuctx,
2430 struct perf_event_context *ctx,
2431 void *info)
2432 {
2433 if (event->state < PERF_EVENT_STATE_INACTIVE)
2434 return;
2435
2436 if (ctx->is_active & EVENT_TIME) {
2437 update_context_time(ctx);
2438 update_cgrp_time_from_event(event);
2439 }
2440
2441 perf_pmu_disable(event->pmu_ctx->pmu);
2442
2443 if (event == event->group_leader)
2444 group_sched_out(event, ctx);
2445 else
2446 event_sched_out(event, ctx);
2447
2448 perf_event_set_state(event, PERF_EVENT_STATE_OFF);
2449 perf_cgroup_event_disable(event, ctx);
2450
2451 perf_pmu_enable(event->pmu_ctx->pmu);
2452 }
2453
2454 /*
2455 * Disable an event.
2456 *
2457 * If event->ctx is a cloned context, callers must make sure that
2458 * every task struct that event->ctx->task could possibly point to
2459 * remains valid. This condition is satisfied when called through
2460 * perf_event_for_each_child or perf_event_for_each because they
2461 * hold the top-level event's child_mutex, so any descendant that
2462 * goes to exit will block in perf_event_exit_event().
2463 *
2464 * When called from perf_pending_irq it's OK because event->ctx
2465 * is the current context on this CPU and preemption is disabled,
2466 * hence we can't get into perf_event_task_sched_out for this context.
2467 */
_perf_event_disable(struct perf_event * event)2468 static void _perf_event_disable(struct perf_event *event)
2469 {
2470 struct perf_event_context *ctx = event->ctx;
2471
2472 raw_spin_lock_irq(&ctx->lock);
2473 if (event->state <= PERF_EVENT_STATE_OFF) {
2474 raw_spin_unlock_irq(&ctx->lock);
2475 return;
2476 }
2477 raw_spin_unlock_irq(&ctx->lock);
2478
2479 event_function_call(event, __perf_event_disable, NULL);
2480 }
2481
perf_event_disable_local(struct perf_event * event)2482 void perf_event_disable_local(struct perf_event *event)
2483 {
2484 event_function_local(event, __perf_event_disable, NULL);
2485 }
2486
2487 /*
2488 * Strictly speaking kernel users cannot create groups and therefore this
2489 * interface does not need the perf_event_ctx_lock() magic.
2490 */
perf_event_disable(struct perf_event * event)2491 void perf_event_disable(struct perf_event *event)
2492 {
2493 struct perf_event_context *ctx;
2494
2495 ctx = perf_event_ctx_lock(event);
2496 _perf_event_disable(event);
2497 perf_event_ctx_unlock(event, ctx);
2498 }
2499 EXPORT_SYMBOL_GPL(perf_event_disable);
2500
perf_event_disable_inatomic(struct perf_event * event)2501 void perf_event_disable_inatomic(struct perf_event *event)
2502 {
2503 event->pending_disable = 1;
2504 irq_work_queue(&event->pending_irq);
2505 }
2506
2507 #define MAX_INTERRUPTS (~0ULL)
2508
2509 static void perf_log_throttle(struct perf_event *event, int enable);
2510 static void perf_log_itrace_start(struct perf_event *event);
2511
2512 static int
event_sched_in(struct perf_event * event,struct perf_event_context * ctx)2513 event_sched_in(struct perf_event *event, struct perf_event_context *ctx)
2514 {
2515 struct perf_event_pmu_context *epc = event->pmu_ctx;
2516 struct perf_cpu_pmu_context *cpc = this_cpu_ptr(epc->pmu->cpu_pmu_context);
2517 int ret = 0;
2518
2519 WARN_ON_ONCE(event->ctx != ctx);
2520
2521 lockdep_assert_held(&ctx->lock);
2522
2523 if (event->state <= PERF_EVENT_STATE_OFF)
2524 return 0;
2525
2526 WRITE_ONCE(event->oncpu, smp_processor_id());
2527 /*
2528 * Order event::oncpu write to happen before the ACTIVE state is
2529 * visible. This allows perf_event_{stop,read}() to observe the correct
2530 * ->oncpu if it sees ACTIVE.
2531 */
2532 smp_wmb();
2533 perf_event_set_state(event, PERF_EVENT_STATE_ACTIVE);
2534
2535 /*
2536 * Unthrottle events, since we scheduled we might have missed several
2537 * ticks already, also for a heavily scheduling task there is little
2538 * guarantee it'll get a tick in a timely manner.
2539 */
2540 if (unlikely(event->hw.interrupts == MAX_INTERRUPTS)) {
2541 perf_log_throttle(event, 1);
2542 event->hw.interrupts = 0;
2543 }
2544
2545 perf_pmu_disable(event->pmu);
2546
2547 perf_log_itrace_start(event);
2548
2549 if (event->pmu->add(event, PERF_EF_START)) {
2550 perf_event_set_state(event, PERF_EVENT_STATE_INACTIVE);
2551 event->oncpu = -1;
2552 ret = -EAGAIN;
2553 goto out;
2554 }
2555
2556 if (!is_software_event(event))
2557 cpc->active_oncpu++;
2558 if (event->attr.freq && event->attr.sample_freq)
2559 ctx->nr_freq++;
2560
2561 if (event->attr.exclusive)
2562 cpc->exclusive = 1;
2563
2564 out:
2565 perf_pmu_enable(event->pmu);
2566
2567 return ret;
2568 }
2569
2570 static int
group_sched_in(struct perf_event * group_event,struct perf_event_context * ctx)2571 group_sched_in(struct perf_event *group_event, struct perf_event_context *ctx)
2572 {
2573 struct perf_event *event, *partial_group = NULL;
2574 struct pmu *pmu = group_event->pmu_ctx->pmu;
2575
2576 if (group_event->state == PERF_EVENT_STATE_OFF)
2577 return 0;
2578
2579 pmu->start_txn(pmu, PERF_PMU_TXN_ADD);
2580
2581 if (event_sched_in(group_event, ctx))
2582 goto error;
2583
2584 /*
2585 * Schedule in siblings as one group (if any):
2586 */
2587 for_each_sibling_event(event, group_event) {
2588 if (event_sched_in(event, ctx)) {
2589 partial_group = event;
2590 goto group_error;
2591 }
2592 }
2593
2594 if (!pmu->commit_txn(pmu))
2595 return 0;
2596
2597 group_error:
2598 /*
2599 * Groups can be scheduled in as one unit only, so undo any
2600 * partial group before returning:
2601 * The events up to the failed event are scheduled out normally.
2602 */
2603 for_each_sibling_event(event, group_event) {
2604 if (event == partial_group)
2605 break;
2606
2607 event_sched_out(event, ctx);
2608 }
2609 event_sched_out(group_event, ctx);
2610
2611 error:
2612 pmu->cancel_txn(pmu);
2613 return -EAGAIN;
2614 }
2615
2616 /*
2617 * Work out whether we can put this event group on the CPU now.
2618 */
group_can_go_on(struct perf_event * event,int can_add_hw)2619 static int group_can_go_on(struct perf_event *event, int can_add_hw)
2620 {
2621 struct perf_event_pmu_context *epc = event->pmu_ctx;
2622 struct perf_cpu_pmu_context *cpc = this_cpu_ptr(epc->pmu->cpu_pmu_context);
2623
2624 /*
2625 * Groups consisting entirely of software events can always go on.
2626 */
2627 if (event->group_caps & PERF_EV_CAP_SOFTWARE)
2628 return 1;
2629 /*
2630 * If an exclusive group is already on, no other hardware
2631 * events can go on.
2632 */
2633 if (cpc->exclusive)
2634 return 0;
2635 /*
2636 * If this group is exclusive and there are already
2637 * events on the CPU, it can't go on.
2638 */
2639 if (event->attr.exclusive && !list_empty(get_event_list(event)))
2640 return 0;
2641 /*
2642 * Otherwise, try to add it if all previous groups were able
2643 * to go on.
2644 */
2645 return can_add_hw;
2646 }
2647
add_event_to_ctx(struct perf_event * event,struct perf_event_context * ctx)2648 static void add_event_to_ctx(struct perf_event *event,
2649 struct perf_event_context *ctx)
2650 {
2651 list_add_event(event, ctx);
2652 perf_group_attach(event);
2653 }
2654
task_ctx_sched_out(struct perf_event_context * ctx,enum event_type_t event_type)2655 static void task_ctx_sched_out(struct perf_event_context *ctx,
2656 enum event_type_t event_type)
2657 {
2658 struct perf_cpu_context *cpuctx = this_cpu_ptr(&perf_cpu_context);
2659
2660 if (!cpuctx->task_ctx)
2661 return;
2662
2663 if (WARN_ON_ONCE(ctx != cpuctx->task_ctx))
2664 return;
2665
2666 ctx_sched_out(ctx, event_type);
2667 }
2668
perf_event_sched_in(struct perf_cpu_context * cpuctx,struct perf_event_context * ctx)2669 static void perf_event_sched_in(struct perf_cpu_context *cpuctx,
2670 struct perf_event_context *ctx)
2671 {
2672 ctx_sched_in(&cpuctx->ctx, EVENT_PINNED);
2673 if (ctx)
2674 ctx_sched_in(ctx, EVENT_PINNED);
2675 ctx_sched_in(&cpuctx->ctx, EVENT_FLEXIBLE);
2676 if (ctx)
2677 ctx_sched_in(ctx, EVENT_FLEXIBLE);
2678 }
2679
2680 /*
2681 * We want to maintain the following priority of scheduling:
2682 * - CPU pinned (EVENT_CPU | EVENT_PINNED)
2683 * - task pinned (EVENT_PINNED)
2684 * - CPU flexible (EVENT_CPU | EVENT_FLEXIBLE)
2685 * - task flexible (EVENT_FLEXIBLE).
2686 *
2687 * In order to avoid unscheduling and scheduling back in everything every
2688 * time an event is added, only do it for the groups of equal priority and
2689 * below.
2690 *
2691 * This can be called after a batch operation on task events, in which case
2692 * event_type is a bit mask of the types of events involved. For CPU events,
2693 * event_type is only either EVENT_PINNED or EVENT_FLEXIBLE.
2694 */
2695 /*
2696 * XXX: ctx_resched() reschedule entire perf_event_context while adding new
2697 * event to the context or enabling existing event in the context. We can
2698 * probably optimize it by rescheduling only affected pmu_ctx.
2699 */
ctx_resched(struct perf_cpu_context * cpuctx,struct perf_event_context * task_ctx,enum event_type_t event_type)2700 static void ctx_resched(struct perf_cpu_context *cpuctx,
2701 struct perf_event_context *task_ctx,
2702 enum event_type_t event_type)
2703 {
2704 bool cpu_event = !!(event_type & EVENT_CPU);
2705
2706 /*
2707 * If pinned groups are involved, flexible groups also need to be
2708 * scheduled out.
2709 */
2710 if (event_type & EVENT_PINNED)
2711 event_type |= EVENT_FLEXIBLE;
2712
2713 event_type &= EVENT_ALL;
2714
2715 perf_ctx_disable(&cpuctx->ctx, false);
2716 if (task_ctx) {
2717 perf_ctx_disable(task_ctx, false);
2718 task_ctx_sched_out(task_ctx, event_type);
2719 }
2720
2721 /*
2722 * Decide which cpu ctx groups to schedule out based on the types
2723 * of events that caused rescheduling:
2724 * - EVENT_CPU: schedule out corresponding groups;
2725 * - EVENT_PINNED task events: schedule out EVENT_FLEXIBLE groups;
2726 * - otherwise, do nothing more.
2727 */
2728 if (cpu_event)
2729 ctx_sched_out(&cpuctx->ctx, event_type);
2730 else if (event_type & EVENT_PINNED)
2731 ctx_sched_out(&cpuctx->ctx, EVENT_FLEXIBLE);
2732
2733 perf_event_sched_in(cpuctx, task_ctx);
2734
2735 perf_ctx_enable(&cpuctx->ctx, false);
2736 if (task_ctx)
2737 perf_ctx_enable(task_ctx, false);
2738 }
2739
perf_pmu_resched(struct pmu * pmu)2740 void perf_pmu_resched(struct pmu *pmu)
2741 {
2742 struct perf_cpu_context *cpuctx = this_cpu_ptr(&perf_cpu_context);
2743 struct perf_event_context *task_ctx = cpuctx->task_ctx;
2744
2745 perf_ctx_lock(cpuctx, task_ctx);
2746 ctx_resched(cpuctx, task_ctx, EVENT_ALL|EVENT_CPU);
2747 perf_ctx_unlock(cpuctx, task_ctx);
2748 }
2749
2750 /*
2751 * Cross CPU call to install and enable a performance event
2752 *
2753 * Very similar to remote_function() + event_function() but cannot assume that
2754 * things like ctx->is_active and cpuctx->task_ctx are set.
2755 */
__perf_install_in_context(void * info)2756 static int __perf_install_in_context(void *info)
2757 {
2758 struct perf_event *event = info;
2759 struct perf_event_context *ctx = event->ctx;
2760 struct perf_cpu_context *cpuctx = this_cpu_ptr(&perf_cpu_context);
2761 struct perf_event_context *task_ctx = cpuctx->task_ctx;
2762 bool reprogram = true;
2763 int ret = 0;
2764
2765 raw_spin_lock(&cpuctx->ctx.lock);
2766 if (ctx->task) {
2767 raw_spin_lock(&ctx->lock);
2768 task_ctx = ctx;
2769
2770 reprogram = (ctx->task == current);
2771
2772 /*
2773 * If the task is running, it must be running on this CPU,
2774 * otherwise we cannot reprogram things.
2775 *
2776 * If its not running, we don't care, ctx->lock will
2777 * serialize against it becoming runnable.
2778 */
2779 if (task_curr(ctx->task) && !reprogram) {
2780 ret = -ESRCH;
2781 goto unlock;
2782 }
2783
2784 WARN_ON_ONCE(reprogram && cpuctx->task_ctx && cpuctx->task_ctx != ctx);
2785 } else if (task_ctx) {
2786 raw_spin_lock(&task_ctx->lock);
2787 }
2788
2789 #ifdef CONFIG_CGROUP_PERF
2790 if (event->state > PERF_EVENT_STATE_OFF && is_cgroup_event(event)) {
2791 /*
2792 * If the current cgroup doesn't match the event's
2793 * cgroup, we should not try to schedule it.
2794 */
2795 struct perf_cgroup *cgrp = perf_cgroup_from_task(current, ctx);
2796 reprogram = cgroup_is_descendant(cgrp->css.cgroup,
2797 event->cgrp->css.cgroup);
2798 }
2799 #endif
2800
2801 if (reprogram) {
2802 ctx_sched_out(ctx, EVENT_TIME);
2803 add_event_to_ctx(event, ctx);
2804 ctx_resched(cpuctx, task_ctx, get_event_type(event));
2805 } else {
2806 add_event_to_ctx(event, ctx);
2807 }
2808
2809 unlock:
2810 perf_ctx_unlock(cpuctx, task_ctx);
2811
2812 return ret;
2813 }
2814
2815 static bool exclusive_event_installable(struct perf_event *event,
2816 struct perf_event_context *ctx);
2817
2818 /*
2819 * Attach a performance event to a context.
2820 *
2821 * Very similar to event_function_call, see comment there.
2822 */
2823 static void
perf_install_in_context(struct perf_event_context * ctx,struct perf_event * event,int cpu)2824 perf_install_in_context(struct perf_event_context *ctx,
2825 struct perf_event *event,
2826 int cpu)
2827 {
2828 struct task_struct *task = READ_ONCE(ctx->task);
2829
2830 lockdep_assert_held(&ctx->mutex);
2831
2832 WARN_ON_ONCE(!exclusive_event_installable(event, ctx));
2833
2834 if (event->cpu != -1)
2835 WARN_ON_ONCE(event->cpu != cpu);
2836
2837 /*
2838 * Ensures that if we can observe event->ctx, both the event and ctx
2839 * will be 'complete'. See perf_iterate_sb_cpu().
2840 */
2841 smp_store_release(&event->ctx, ctx);
2842
2843 /*
2844 * perf_event_attr::disabled events will not run and can be initialized
2845 * without IPI. Except when this is the first event for the context, in
2846 * that case we need the magic of the IPI to set ctx->is_active.
2847 *
2848 * The IOC_ENABLE that is sure to follow the creation of a disabled
2849 * event will issue the IPI and reprogram the hardware.
2850 */
2851 if (__perf_effective_state(event) == PERF_EVENT_STATE_OFF &&
2852 ctx->nr_events && !is_cgroup_event(event)) {
2853 raw_spin_lock_irq(&ctx->lock);
2854 if (ctx->task == TASK_TOMBSTONE) {
2855 raw_spin_unlock_irq(&ctx->lock);
2856 return;
2857 }
2858 add_event_to_ctx(event, ctx);
2859 raw_spin_unlock_irq(&ctx->lock);
2860 return;
2861 }
2862
2863 if (!task) {
2864 cpu_function_call(cpu, __perf_install_in_context, event);
2865 return;
2866 }
2867
2868 /*
2869 * Should not happen, we validate the ctx is still alive before calling.
2870 */
2871 if (WARN_ON_ONCE(task == TASK_TOMBSTONE))
2872 return;
2873
2874 /*
2875 * Installing events is tricky because we cannot rely on ctx->is_active
2876 * to be set in case this is the nr_events 0 -> 1 transition.
2877 *
2878 * Instead we use task_curr(), which tells us if the task is running.
2879 * However, since we use task_curr() outside of rq::lock, we can race
2880 * against the actual state. This means the result can be wrong.
2881 *
2882 * If we get a false positive, we retry, this is harmless.
2883 *
2884 * If we get a false negative, things are complicated. If we are after
2885 * perf_event_context_sched_in() ctx::lock will serialize us, and the
2886 * value must be correct. If we're before, it doesn't matter since
2887 * perf_event_context_sched_in() will program the counter.
2888 *
2889 * However, this hinges on the remote context switch having observed
2890 * our task->perf_event_ctxp[] store, such that it will in fact take
2891 * ctx::lock in perf_event_context_sched_in().
2892 *
2893 * We do this by task_function_call(), if the IPI fails to hit the task
2894 * we know any future context switch of task must see the
2895 * perf_event_ctpx[] store.
2896 */
2897
2898 /*
2899 * This smp_mb() orders the task->perf_event_ctxp[] store with the
2900 * task_cpu() load, such that if the IPI then does not find the task
2901 * running, a future context switch of that task must observe the
2902 * store.
2903 */
2904 smp_mb();
2905 again:
2906 if (!task_function_call(task, __perf_install_in_context, event))
2907 return;
2908
2909 raw_spin_lock_irq(&ctx->lock);
2910 task = ctx->task;
2911 if (WARN_ON_ONCE(task == TASK_TOMBSTONE)) {
2912 /*
2913 * Cannot happen because we already checked above (which also
2914 * cannot happen), and we hold ctx->mutex, which serializes us
2915 * against perf_event_exit_task_context().
2916 */
2917 raw_spin_unlock_irq(&ctx->lock);
2918 return;
2919 }
2920 /*
2921 * If the task is not running, ctx->lock will avoid it becoming so,
2922 * thus we can safely install the event.
2923 */
2924 if (task_curr(task)) {
2925 raw_spin_unlock_irq(&ctx->lock);
2926 goto again;
2927 }
2928 add_event_to_ctx(event, ctx);
2929 raw_spin_unlock_irq(&ctx->lock);
2930 }
2931
2932 /*
2933 * Cross CPU call to enable a performance event
2934 */
__perf_event_enable(struct perf_event * event,struct perf_cpu_context * cpuctx,struct perf_event_context * ctx,void * info)2935 static void __perf_event_enable(struct perf_event *event,
2936 struct perf_cpu_context *cpuctx,
2937 struct perf_event_context *ctx,
2938 void *info)
2939 {
2940 struct perf_event *leader = event->group_leader;
2941 struct perf_event_context *task_ctx;
2942
2943 if (event->state >= PERF_EVENT_STATE_INACTIVE ||
2944 event->state <= PERF_EVENT_STATE_ERROR)
2945 return;
2946
2947 if (ctx->is_active)
2948 ctx_sched_out(ctx, EVENT_TIME);
2949
2950 perf_event_set_state(event, PERF_EVENT_STATE_INACTIVE);
2951 perf_cgroup_event_enable(event, ctx);
2952
2953 if (!ctx->is_active)
2954 return;
2955
2956 if (!event_filter_match(event)) {
2957 ctx_sched_in(ctx, EVENT_TIME);
2958 return;
2959 }
2960
2961 /*
2962 * If the event is in a group and isn't the group leader,
2963 * then don't put it on unless the group is on.
2964 */
2965 if (leader != event && leader->state != PERF_EVENT_STATE_ACTIVE) {
2966 ctx_sched_in(ctx, EVENT_TIME);
2967 return;
2968 }
2969
2970 task_ctx = cpuctx->task_ctx;
2971 if (ctx->task)
2972 WARN_ON_ONCE(task_ctx != ctx);
2973
2974 ctx_resched(cpuctx, task_ctx, get_event_type(event));
2975 }
2976
2977 /*
2978 * Enable an event.
2979 *
2980 * If event->ctx is a cloned context, callers must make sure that
2981 * every task struct that event->ctx->task could possibly point to
2982 * remains valid. This condition is satisfied when called through
2983 * perf_event_for_each_child or perf_event_for_each as described
2984 * for perf_event_disable.
2985 */
_perf_event_enable(struct perf_event * event)2986 static void _perf_event_enable(struct perf_event *event)
2987 {
2988 struct perf_event_context *ctx = event->ctx;
2989
2990 raw_spin_lock_irq(&ctx->lock);
2991 if (event->state >= PERF_EVENT_STATE_INACTIVE ||
2992 event->state < PERF_EVENT_STATE_ERROR) {
2993 out:
2994 raw_spin_unlock_irq(&ctx->lock);
2995 return;
2996 }
2997
2998 /*
2999 * If the event is in error state, clear that first.
3000 *
3001 * That way, if we see the event in error state below, we know that it
3002 * has gone back into error state, as distinct from the task having
3003 * been scheduled away before the cross-call arrived.
3004 */
3005 if (event->state == PERF_EVENT_STATE_ERROR) {
3006 /*
3007 * Detached SIBLING events cannot leave ERROR state.
3008 */
3009 if (event->event_caps & PERF_EV_CAP_SIBLING &&
3010 event->group_leader == event)
3011 goto out;
3012
3013 event->state = PERF_EVENT_STATE_OFF;
3014 }
3015 raw_spin_unlock_irq(&ctx->lock);
3016
3017 event_function_call(event, __perf_event_enable, NULL);
3018 }
3019
3020 /*
3021 * See perf_event_disable();
3022 */
perf_event_enable(struct perf_event * event)3023 void perf_event_enable(struct perf_event *event)
3024 {
3025 struct perf_event_context *ctx;
3026
3027 ctx = perf_event_ctx_lock(event);
3028 _perf_event_enable(event);
3029 perf_event_ctx_unlock(event, ctx);
3030 }
3031 EXPORT_SYMBOL_GPL(perf_event_enable);
3032
3033 struct stop_event_data {
3034 struct perf_event *event;
3035 unsigned int restart;
3036 };
3037
__perf_event_stop(void * info)3038 static int __perf_event_stop(void *info)
3039 {
3040 struct stop_event_data *sd = info;
3041 struct perf_event *event = sd->event;
3042
3043 /* if it's already INACTIVE, do nothing */
3044 if (READ_ONCE(event->state) != PERF_EVENT_STATE_ACTIVE)
3045 return 0;
3046
3047 /* matches smp_wmb() in event_sched_in() */
3048 smp_rmb();
3049
3050 /*
3051 * There is a window with interrupts enabled before we get here,
3052 * so we need to check again lest we try to stop another CPU's event.
3053 */
3054 if (READ_ONCE(event->oncpu) != smp_processor_id())
3055 return -EAGAIN;
3056
3057 event->pmu->stop(event, PERF_EF_UPDATE);
3058
3059 /*
3060 * May race with the actual stop (through perf_pmu_output_stop()),
3061 * but it is only used for events with AUX ring buffer, and such
3062 * events will refuse to restart because of rb::aux_mmap_count==0,
3063 * see comments in perf_aux_output_begin().
3064 *
3065 * Since this is happening on an event-local CPU, no trace is lost
3066 * while restarting.
3067 */
3068 if (sd->restart)
3069 event->pmu->start(event, 0);
3070
3071 return 0;
3072 }
3073
perf_event_stop(struct perf_event * event,int restart)3074 static int perf_event_stop(struct perf_event *event, int restart)
3075 {
3076 struct stop_event_data sd = {
3077 .event = event,
3078 .restart = restart,
3079 };
3080 int ret = 0;
3081
3082 do {
3083 if (READ_ONCE(event->state) != PERF_EVENT_STATE_ACTIVE)
3084 return 0;
3085
3086 /* matches smp_wmb() in event_sched_in() */
3087 smp_rmb();
3088
3089 /*
3090 * We only want to restart ACTIVE events, so if the event goes
3091 * inactive here (event->oncpu==-1), there's nothing more to do;
3092 * fall through with ret==-ENXIO.
3093 */
3094 ret = cpu_function_call(READ_ONCE(event->oncpu),
3095 __perf_event_stop, &sd);
3096 } while (ret == -EAGAIN);
3097
3098 return ret;
3099 }
3100
3101 /*
3102 * In order to contain the amount of racy and tricky in the address filter
3103 * configuration management, it is a two part process:
3104 *
3105 * (p1) when userspace mappings change as a result of (1) or (2) or (3) below,
3106 * we update the addresses of corresponding vmas in
3107 * event::addr_filter_ranges array and bump the event::addr_filters_gen;
3108 * (p2) when an event is scheduled in (pmu::add), it calls
3109 * perf_event_addr_filters_sync() which calls pmu::addr_filters_sync()
3110 * if the generation has changed since the previous call.
3111 *
3112 * If (p1) happens while the event is active, we restart it to force (p2).
3113 *
3114 * (1) perf_addr_filters_apply(): adjusting filters' offsets based on
3115 * pre-existing mappings, called once when new filters arrive via SET_FILTER
3116 * ioctl;
3117 * (2) perf_addr_filters_adjust(): adjusting filters' offsets based on newly
3118 * registered mapping, called for every new mmap(), with mm::mmap_lock down
3119 * for reading;
3120 * (3) perf_event_addr_filters_exec(): clearing filters' offsets in the process
3121 * of exec.
3122 */
perf_event_addr_filters_sync(struct perf_event * event)3123 void perf_event_addr_filters_sync(struct perf_event *event)
3124 {
3125 struct perf_addr_filters_head *ifh = perf_event_addr_filters(event);
3126
3127 if (!has_addr_filter(event))
3128 return;
3129
3130 raw_spin_lock(&ifh->lock);
3131 if (event->addr_filters_gen != event->hw.addr_filters_gen) {
3132 event->pmu->addr_filters_sync(event);
3133 event->hw.addr_filters_gen = event->addr_filters_gen;
3134 }
3135 raw_spin_unlock(&ifh->lock);
3136 }
3137 EXPORT_SYMBOL_GPL(perf_event_addr_filters_sync);
3138
_perf_event_refresh(struct perf_event * event,int refresh)3139 static int _perf_event_refresh(struct perf_event *event, int refresh)
3140 {
3141 /*
3142 * not supported on inherited events
3143 */
3144 if (event->attr.inherit || !is_sampling_event(event))
3145 return -EINVAL;
3146
3147 atomic_add(refresh, &event->event_limit);
3148 _perf_event_enable(event);
3149
3150 return 0;
3151 }
3152
3153 /*
3154 * See perf_event_disable()
3155 */
perf_event_refresh(struct perf_event * event,int refresh)3156 int perf_event_refresh(struct perf_event *event, int refresh)
3157 {
3158 struct perf_event_context *ctx;
3159 int ret;
3160
3161 ctx = perf_event_ctx_lock(event);
3162 ret = _perf_event_refresh(event, refresh);
3163 perf_event_ctx_unlock(event, ctx);
3164
3165 return ret;
3166 }
3167 EXPORT_SYMBOL_GPL(perf_event_refresh);
3168
perf_event_modify_breakpoint(struct perf_event * bp,struct perf_event_attr * attr)3169 static int perf_event_modify_breakpoint(struct perf_event *bp,
3170 struct perf_event_attr *attr)
3171 {
3172 int err;
3173
3174 _perf_event_disable(bp);
3175
3176 err = modify_user_hw_breakpoint_check(bp, attr, true);
3177
3178 if (!bp->attr.disabled)
3179 _perf_event_enable(bp);
3180
3181 return err;
3182 }
3183
3184 /*
3185 * Copy event-type-independent attributes that may be modified.
3186 */
perf_event_modify_copy_attr(struct perf_event_attr * to,const struct perf_event_attr * from)3187 static void perf_event_modify_copy_attr(struct perf_event_attr *to,
3188 const struct perf_event_attr *from)
3189 {
3190 to->sig_data = from->sig_data;
3191 }
3192
perf_event_modify_attr(struct perf_event * event,struct perf_event_attr * attr)3193 static int perf_event_modify_attr(struct perf_event *event,
3194 struct perf_event_attr *attr)
3195 {
3196 int (*func)(struct perf_event *, struct perf_event_attr *);
3197 struct perf_event *child;
3198 int err;
3199
3200 if (event->attr.type != attr->type)
3201 return -EINVAL;
3202
3203 switch (event->attr.type) {
3204 case PERF_TYPE_BREAKPOINT:
3205 func = perf_event_modify_breakpoint;
3206 break;
3207 default:
3208 /* Place holder for future additions. */
3209 return -EOPNOTSUPP;
3210 }
3211
3212 WARN_ON_ONCE(event->ctx->parent_ctx);
3213
3214 mutex_lock(&event->child_mutex);
3215 /*
3216 * Event-type-independent attributes must be copied before event-type
3217 * modification, which will validate that final attributes match the
3218 * source attributes after all relevant attributes have been copied.
3219 */
3220 perf_event_modify_copy_attr(&event->attr, attr);
3221 err = func(event, attr);
3222 if (err)
3223 goto out;
3224 list_for_each_entry(child, &event->child_list, child_list) {
3225 perf_event_modify_copy_attr(&child->attr, attr);
3226 err = func(child, attr);
3227 if (err)
3228 goto out;
3229 }
3230 out:
3231 mutex_unlock(&event->child_mutex);
3232 return err;
3233 }
3234
__pmu_ctx_sched_out(struct perf_event_pmu_context * pmu_ctx,enum event_type_t event_type)3235 static void __pmu_ctx_sched_out(struct perf_event_pmu_context *pmu_ctx,
3236 enum event_type_t event_type)
3237 {
3238 struct perf_event_context *ctx = pmu_ctx->ctx;
3239 struct perf_event *event, *tmp;
3240 struct pmu *pmu = pmu_ctx->pmu;
3241
3242 if (ctx->task && !ctx->is_active) {
3243 struct perf_cpu_pmu_context *cpc;
3244
3245 cpc = this_cpu_ptr(pmu->cpu_pmu_context);
3246 WARN_ON_ONCE(cpc->task_epc && cpc->task_epc != pmu_ctx);
3247 cpc->task_epc = NULL;
3248 }
3249
3250 if (!event_type)
3251 return;
3252
3253 perf_pmu_disable(pmu);
3254 if (event_type & EVENT_PINNED) {
3255 list_for_each_entry_safe(event, tmp,
3256 &pmu_ctx->pinned_active,
3257 active_list)
3258 group_sched_out(event, ctx);
3259 }
3260
3261 if (event_type & EVENT_FLEXIBLE) {
3262 list_for_each_entry_safe(event, tmp,
3263 &pmu_ctx->flexible_active,
3264 active_list)
3265 group_sched_out(event, ctx);
3266 /*
3267 * Since we cleared EVENT_FLEXIBLE, also clear
3268 * rotate_necessary, is will be reset by
3269 * ctx_flexible_sched_in() when needed.
3270 */
3271 pmu_ctx->rotate_necessary = 0;
3272 }
3273 perf_pmu_enable(pmu);
3274 }
3275
3276 static void
ctx_sched_out(struct perf_event_context * ctx,enum event_type_t event_type)3277 ctx_sched_out(struct perf_event_context *ctx, enum event_type_t event_type)
3278 {
3279 struct perf_cpu_context *cpuctx = this_cpu_ptr(&perf_cpu_context);
3280 struct perf_event_pmu_context *pmu_ctx;
3281 int is_active = ctx->is_active;
3282 bool cgroup = event_type & EVENT_CGROUP;
3283
3284 event_type &= ~EVENT_CGROUP;
3285
3286 lockdep_assert_held(&ctx->lock);
3287
3288 if (likely(!ctx->nr_events)) {
3289 /*
3290 * See __perf_remove_from_context().
3291 */
3292 WARN_ON_ONCE(ctx->is_active);
3293 if (ctx->task)
3294 WARN_ON_ONCE(cpuctx->task_ctx);
3295 return;
3296 }
3297
3298 /*
3299 * Always update time if it was set; not only when it changes.
3300 * Otherwise we can 'forget' to update time for any but the last
3301 * context we sched out. For example:
3302 *
3303 * ctx_sched_out(.event_type = EVENT_FLEXIBLE)
3304 * ctx_sched_out(.event_type = EVENT_PINNED)
3305 *
3306 * would only update time for the pinned events.
3307 */
3308 if (is_active & EVENT_TIME) {
3309 /* update (and stop) ctx time */
3310 update_context_time(ctx);
3311 update_cgrp_time_from_cpuctx(cpuctx, ctx == &cpuctx->ctx);
3312 /*
3313 * CPU-release for the below ->is_active store,
3314 * see __load_acquire() in perf_event_time_now()
3315 */
3316 barrier();
3317 }
3318
3319 ctx->is_active &= ~event_type;
3320 if (!(ctx->is_active & EVENT_ALL))
3321 ctx->is_active = 0;
3322
3323 if (ctx->task) {
3324 WARN_ON_ONCE(cpuctx->task_ctx != ctx);
3325 if (!ctx->is_active)
3326 cpuctx->task_ctx = NULL;
3327 }
3328
3329 is_active ^= ctx->is_active; /* changed bits */
3330
3331 list_for_each_entry(pmu_ctx, &ctx->pmu_ctx_list, pmu_ctx_entry) {
3332 if (cgroup && !pmu_ctx->nr_cgroups)
3333 continue;
3334 __pmu_ctx_sched_out(pmu_ctx, is_active);
3335 }
3336 }
3337
3338 /*
3339 * Test whether two contexts are equivalent, i.e. whether they have both been
3340 * cloned from the same version of the same context.
3341 *
3342 * Equivalence is measured using a generation number in the context that is
3343 * incremented on each modification to it; see unclone_ctx(), list_add_event()
3344 * and list_del_event().
3345 */
context_equiv(struct perf_event_context * ctx1,struct perf_event_context * ctx2)3346 static int context_equiv(struct perf_event_context *ctx1,
3347 struct perf_event_context *ctx2)
3348 {
3349 lockdep_assert_held(&ctx1->lock);
3350 lockdep_assert_held(&ctx2->lock);
3351
3352 /* Pinning disables the swap optimization */
3353 if (ctx1->pin_count || ctx2->pin_count)
3354 return 0;
3355
3356 /* If ctx1 is the parent of ctx2 */
3357 if (ctx1 == ctx2->parent_ctx && ctx1->generation == ctx2->parent_gen)
3358 return 1;
3359
3360 /* If ctx2 is the parent of ctx1 */
3361 if (ctx1->parent_ctx == ctx2 && ctx1->parent_gen == ctx2->generation)
3362 return 1;
3363
3364 /*
3365 * If ctx1 and ctx2 have the same parent; we flatten the parent
3366 * hierarchy, see perf_event_init_context().
3367 */
3368 if (ctx1->parent_ctx && ctx1->parent_ctx == ctx2->parent_ctx &&
3369 ctx1->parent_gen == ctx2->parent_gen)
3370 return 1;
3371
3372 /* Unmatched */
3373 return 0;
3374 }
3375
__perf_event_sync_stat(struct perf_event * event,struct perf_event * next_event)3376 static void __perf_event_sync_stat(struct perf_event *event,
3377 struct perf_event *next_event)
3378 {
3379 u64 value;
3380
3381 if (!event->attr.inherit_stat)
3382 return;
3383
3384 /*
3385 * Update the event value, we cannot use perf_event_read()
3386 * because we're in the middle of a context switch and have IRQs
3387 * disabled, which upsets smp_call_function_single(), however
3388 * we know the event must be on the current CPU, therefore we
3389 * don't need to use it.
3390 */
3391 if (event->state == PERF_EVENT_STATE_ACTIVE)
3392 event->pmu->read(event);
3393
3394 perf_event_update_time(event);
3395
3396 /*
3397 * In order to keep per-task stats reliable we need to flip the event
3398 * values when we flip the contexts.
3399 */
3400 value = local64_read(&next_event->count);
3401 value = local64_xchg(&event->count, value);
3402 local64_set(&next_event->count, value);
3403
3404 swap(event->total_time_enabled, next_event->total_time_enabled);
3405 swap(event->total_time_running, next_event->total_time_running);
3406
3407 /*
3408 * Since we swizzled the values, update the user visible data too.
3409 */
3410 perf_event_update_userpage(event);
3411 perf_event_update_userpage(next_event);
3412 }
3413
perf_event_sync_stat(struct perf_event_context * ctx,struct perf_event_context * next_ctx)3414 static void perf_event_sync_stat(struct perf_event_context *ctx,
3415 struct perf_event_context *next_ctx)
3416 {
3417 struct perf_event *event, *next_event;
3418
3419 if (!ctx->nr_stat)
3420 return;
3421
3422 update_context_time(ctx);
3423
3424 event = list_first_entry(&ctx->event_list,
3425 struct perf_event, event_entry);
3426
3427 next_event = list_first_entry(&next_ctx->event_list,
3428 struct perf_event, event_entry);
3429
3430 while (&event->event_entry != &ctx->event_list &&
3431 &next_event->event_entry != &next_ctx->event_list) {
3432
3433 __perf_event_sync_stat(event, next_event);
3434
3435 event = list_next_entry(event, event_entry);
3436 next_event = list_next_entry(next_event, event_entry);
3437 }
3438 }
3439
3440 #define double_list_for_each_entry(pos1, pos2, head1, head2, member) \
3441 for (pos1 = list_first_entry(head1, typeof(*pos1), member), \
3442 pos2 = list_first_entry(head2, typeof(*pos2), member); \
3443 !list_entry_is_head(pos1, head1, member) && \
3444 !list_entry_is_head(pos2, head2, member); \
3445 pos1 = list_next_entry(pos1, member), \
3446 pos2 = list_next_entry(pos2, member))
3447
perf_event_swap_task_ctx_data(struct perf_event_context * prev_ctx,struct perf_event_context * next_ctx)3448 static void perf_event_swap_task_ctx_data(struct perf_event_context *prev_ctx,
3449 struct perf_event_context *next_ctx)
3450 {
3451 struct perf_event_pmu_context *prev_epc, *next_epc;
3452
3453 if (!prev_ctx->nr_task_data)
3454 return;
3455
3456 double_list_for_each_entry(prev_epc, next_epc,
3457 &prev_ctx->pmu_ctx_list, &next_ctx->pmu_ctx_list,
3458 pmu_ctx_entry) {
3459
3460 if (WARN_ON_ONCE(prev_epc->pmu != next_epc->pmu))
3461 continue;
3462
3463 /*
3464 * PMU specific parts of task perf context can require
3465 * additional synchronization. As an example of such
3466 * synchronization see implementation details of Intel
3467 * LBR call stack data profiling;
3468 */
3469 if (prev_epc->pmu->swap_task_ctx)
3470 prev_epc->pmu->swap_task_ctx(prev_epc, next_epc);
3471 else
3472 swap(prev_epc->task_ctx_data, next_epc->task_ctx_data);
3473 }
3474 }
3475
perf_ctx_sched_task_cb(struct perf_event_context * ctx,bool sched_in)3476 static void perf_ctx_sched_task_cb(struct perf_event_context *ctx, bool sched_in)
3477 {
3478 struct perf_event_pmu_context *pmu_ctx;
3479 struct perf_cpu_pmu_context *cpc;
3480
3481 list_for_each_entry(pmu_ctx, &ctx->pmu_ctx_list, pmu_ctx_entry) {
3482 cpc = this_cpu_ptr(pmu_ctx->pmu->cpu_pmu_context);
3483
3484 if (cpc->sched_cb_usage && pmu_ctx->pmu->sched_task)
3485 pmu_ctx->pmu->sched_task(pmu_ctx, sched_in);
3486 }
3487 }
3488
3489 static void
perf_event_context_sched_out(struct task_struct * task,struct task_struct * next)3490 perf_event_context_sched_out(struct task_struct *task, struct task_struct *next)
3491 {
3492 struct perf_event_context *ctx = task->perf_event_ctxp;
3493 struct perf_event_context *next_ctx;
3494 struct perf_event_context *parent, *next_parent;
3495 int do_switch = 1;
3496
3497 if (likely(!ctx))
3498 return;
3499
3500 rcu_read_lock();
3501 next_ctx = rcu_dereference(next->perf_event_ctxp);
3502 if (!next_ctx)
3503 goto unlock;
3504
3505 parent = rcu_dereference(ctx->parent_ctx);
3506 next_parent = rcu_dereference(next_ctx->parent_ctx);
3507
3508 /* If neither context have a parent context; they cannot be clones. */
3509 if (!parent && !next_parent)
3510 goto unlock;
3511
3512 if (next_parent == ctx || next_ctx == parent || next_parent == parent) {
3513 /*
3514 * Looks like the two contexts are clones, so we might be
3515 * able to optimize the context switch. We lock both
3516 * contexts and check that they are clones under the
3517 * lock (including re-checking that neither has been
3518 * uncloned in the meantime). It doesn't matter which
3519 * order we take the locks because no other cpu could
3520 * be trying to lock both of these tasks.
3521 */
3522 raw_spin_lock(&ctx->lock);
3523 raw_spin_lock_nested(&next_ctx->lock, SINGLE_DEPTH_NESTING);
3524 if (context_equiv(ctx, next_ctx)) {
3525
3526 perf_ctx_disable(ctx, false);
3527
3528 /* PMIs are disabled; ctx->nr_pending is stable. */
3529 if (local_read(&ctx->nr_pending) ||
3530 local_read(&next_ctx->nr_pending)) {
3531 /*
3532 * Must not swap out ctx when there's pending
3533 * events that rely on the ctx->task relation.
3534 */
3535 raw_spin_unlock(&next_ctx->lock);
3536 rcu_read_unlock();
3537 goto inside_switch;
3538 }
3539
3540 WRITE_ONCE(ctx->task, next);
3541 WRITE_ONCE(next_ctx->task, task);
3542
3543 perf_ctx_sched_task_cb(ctx, false);
3544 perf_event_swap_task_ctx_data(ctx, next_ctx);
3545
3546 perf_ctx_enable(ctx, false);
3547
3548 /*
3549 * RCU_INIT_POINTER here is safe because we've not
3550 * modified the ctx and the above modification of
3551 * ctx->task and ctx->task_ctx_data are immaterial
3552 * since those values are always verified under
3553 * ctx->lock which we're now holding.
3554 */
3555 RCU_INIT_POINTER(task->perf_event_ctxp, next_ctx);
3556 RCU_INIT_POINTER(next->perf_event_ctxp, ctx);
3557
3558 do_switch = 0;
3559
3560 perf_event_sync_stat(ctx, next_ctx);
3561 }
3562 raw_spin_unlock(&next_ctx->lock);
3563 raw_spin_unlock(&ctx->lock);
3564 }
3565 unlock:
3566 rcu_read_unlock();
3567
3568 if (do_switch) {
3569 raw_spin_lock(&ctx->lock);
3570 perf_ctx_disable(ctx, false);
3571
3572 inside_switch:
3573 perf_ctx_sched_task_cb(ctx, false);
3574 task_ctx_sched_out(ctx, EVENT_ALL);
3575
3576 perf_ctx_enable(ctx, false);
3577 raw_spin_unlock(&ctx->lock);
3578 }
3579 }
3580
3581 static DEFINE_PER_CPU(struct list_head, sched_cb_list);
3582 static DEFINE_PER_CPU(int, perf_sched_cb_usages);
3583
perf_sched_cb_dec(struct pmu * pmu)3584 void perf_sched_cb_dec(struct pmu *pmu)
3585 {
3586 struct perf_cpu_pmu_context *cpc = this_cpu_ptr(pmu->cpu_pmu_context);
3587
3588 this_cpu_dec(perf_sched_cb_usages);
3589 barrier();
3590
3591 if (!--cpc->sched_cb_usage)
3592 list_del(&cpc->sched_cb_entry);
3593 }
3594
3595
perf_sched_cb_inc(struct pmu * pmu)3596 void perf_sched_cb_inc(struct pmu *pmu)
3597 {
3598 struct perf_cpu_pmu_context *cpc = this_cpu_ptr(pmu->cpu_pmu_context);
3599
3600 if (!cpc->sched_cb_usage++)
3601 list_add(&cpc->sched_cb_entry, this_cpu_ptr(&sched_cb_list));
3602
3603 barrier();
3604 this_cpu_inc(perf_sched_cb_usages);
3605 }
3606
3607 /*
3608 * This function provides the context switch callback to the lower code
3609 * layer. It is invoked ONLY when the context switch callback is enabled.
3610 *
3611 * This callback is relevant even to per-cpu events; for example multi event
3612 * PEBS requires this to provide PID/TID information. This requires we flush
3613 * all queued PEBS records before we context switch to a new task.
3614 */
__perf_pmu_sched_task(struct perf_cpu_pmu_context * cpc,bool sched_in)3615 static void __perf_pmu_sched_task(struct perf_cpu_pmu_context *cpc, bool sched_in)
3616 {
3617 struct perf_cpu_context *cpuctx = this_cpu_ptr(&perf_cpu_context);
3618 struct pmu *pmu;
3619
3620 pmu = cpc->epc.pmu;
3621
3622 /* software PMUs will not have sched_task */
3623 if (WARN_ON_ONCE(!pmu->sched_task))
3624 return;
3625
3626 perf_ctx_lock(cpuctx, cpuctx->task_ctx);
3627 perf_pmu_disable(pmu);
3628
3629 pmu->sched_task(cpc->task_epc, sched_in);
3630
3631 perf_pmu_enable(pmu);
3632 perf_ctx_unlock(cpuctx, cpuctx->task_ctx);
3633 }
3634
perf_pmu_sched_task(struct task_struct * prev,struct task_struct * next,bool sched_in)3635 static void perf_pmu_sched_task(struct task_struct *prev,
3636 struct task_struct *next,
3637 bool sched_in)
3638 {
3639 struct perf_cpu_context *cpuctx = this_cpu_ptr(&perf_cpu_context);
3640 struct perf_cpu_pmu_context *cpc;
3641
3642 /* cpuctx->task_ctx will be handled in perf_event_context_sched_in/out */
3643 if (prev == next || cpuctx->task_ctx)
3644 return;
3645
3646 list_for_each_entry(cpc, this_cpu_ptr(&sched_cb_list), sched_cb_entry)
3647 __perf_pmu_sched_task(cpc, sched_in);
3648 }
3649
3650 static void perf_event_switch(struct task_struct *task,
3651 struct task_struct *next_prev, bool sched_in);
3652
3653 /*
3654 * Called from scheduler to remove the events of the current task,
3655 * with interrupts disabled.
3656 *
3657 * We stop each event and update the event value in event->count.
3658 *
3659 * This does not protect us against NMI, but disable()
3660 * sets the disabled bit in the control field of event _before_
3661 * accessing the event control register. If a NMI hits, then it will
3662 * not restart the event.
3663 */
__perf_event_task_sched_out(struct task_struct * task,struct task_struct * next)3664 void __perf_event_task_sched_out(struct task_struct *task,
3665 struct task_struct *next)
3666 {
3667 if (__this_cpu_read(perf_sched_cb_usages))
3668 perf_pmu_sched_task(task, next, false);
3669
3670 if (atomic_read(&nr_switch_events))
3671 perf_event_switch(task, next, false);
3672
3673 perf_event_context_sched_out(task, next);
3674
3675 /*
3676 * if cgroup events exist on this CPU, then we need
3677 * to check if we have to switch out PMU state.
3678 * cgroup event are system-wide mode only
3679 */
3680 perf_cgroup_switch(next);
3681 }
3682
perf_less_group_idx(const void * l,const void * r)3683 static bool perf_less_group_idx(const void *l, const void *r)
3684 {
3685 const struct perf_event *le = *(const struct perf_event **)l;
3686 const struct perf_event *re = *(const struct perf_event **)r;
3687
3688 return le->group_index < re->group_index;
3689 }
3690
swap_ptr(void * l,void * r)3691 static void swap_ptr(void *l, void *r)
3692 {
3693 void **lp = l, **rp = r;
3694
3695 swap(*lp, *rp);
3696 }
3697
3698 static const struct min_heap_callbacks perf_min_heap = {
3699 .elem_size = sizeof(struct perf_event *),
3700 .less = perf_less_group_idx,
3701 .swp = swap_ptr,
3702 };
3703
__heap_add(struct min_heap * heap,struct perf_event * event)3704 static void __heap_add(struct min_heap *heap, struct perf_event *event)
3705 {
3706 struct perf_event **itrs = heap->data;
3707
3708 if (event) {
3709 itrs[heap->nr] = event;
3710 heap->nr++;
3711 }
3712 }
3713
__link_epc(struct perf_event_pmu_context * pmu_ctx)3714 static void __link_epc(struct perf_event_pmu_context *pmu_ctx)
3715 {
3716 struct perf_cpu_pmu_context *cpc;
3717
3718 if (!pmu_ctx->ctx->task)
3719 return;
3720
3721 cpc = this_cpu_ptr(pmu_ctx->pmu->cpu_pmu_context);
3722 WARN_ON_ONCE(cpc->task_epc && cpc->task_epc != pmu_ctx);
3723 cpc->task_epc = pmu_ctx;
3724 }
3725
visit_groups_merge(struct perf_event_context * ctx,struct perf_event_groups * groups,int cpu,struct pmu * pmu,int (* func)(struct perf_event *,void *),void * data)3726 static noinline int visit_groups_merge(struct perf_event_context *ctx,
3727 struct perf_event_groups *groups, int cpu,
3728 struct pmu *pmu,
3729 int (*func)(struct perf_event *, void *),
3730 void *data)
3731 {
3732 #ifdef CONFIG_CGROUP_PERF
3733 struct cgroup_subsys_state *css = NULL;
3734 #endif
3735 struct perf_cpu_context *cpuctx = NULL;
3736 /* Space for per CPU and/or any CPU event iterators. */
3737 struct perf_event *itrs[2];
3738 struct min_heap event_heap;
3739 struct perf_event **evt;
3740 int ret;
3741
3742 if (pmu->filter && pmu->filter(pmu, cpu))
3743 return 0;
3744
3745 if (!ctx->task) {
3746 cpuctx = this_cpu_ptr(&perf_cpu_context);
3747 event_heap = (struct min_heap){
3748 .data = cpuctx->heap,
3749 .nr = 0,
3750 .size = cpuctx->heap_size,
3751 };
3752
3753 lockdep_assert_held(&cpuctx->ctx.lock);
3754
3755 #ifdef CONFIG_CGROUP_PERF
3756 if (cpuctx->cgrp)
3757 css = &cpuctx->cgrp->css;
3758 #endif
3759 } else {
3760 event_heap = (struct min_heap){
3761 .data = itrs,
3762 .nr = 0,
3763 .size = ARRAY_SIZE(itrs),
3764 };
3765 /* Events not within a CPU context may be on any CPU. */
3766 __heap_add(&event_heap, perf_event_groups_first(groups, -1, pmu, NULL));
3767 }
3768 evt = event_heap.data;
3769
3770 __heap_add(&event_heap, perf_event_groups_first(groups, cpu, pmu, NULL));
3771
3772 #ifdef CONFIG_CGROUP_PERF
3773 for (; css; css = css->parent)
3774 __heap_add(&event_heap, perf_event_groups_first(groups, cpu, pmu, css->cgroup));
3775 #endif
3776
3777 if (event_heap.nr) {
3778 __link_epc((*evt)->pmu_ctx);
3779 perf_assert_pmu_disabled((*evt)->pmu_ctx->pmu);
3780 }
3781
3782 min_heapify_all(&event_heap, &perf_min_heap);
3783
3784 while (event_heap.nr) {
3785 ret = func(*evt, data);
3786 if (ret)
3787 return ret;
3788
3789 *evt = perf_event_groups_next(*evt, pmu);
3790 if (*evt)
3791 min_heapify(&event_heap, 0, &perf_min_heap);
3792 else
3793 min_heap_pop(&event_heap, &perf_min_heap);
3794 }
3795
3796 return 0;
3797 }
3798
3799 /*
3800 * Because the userpage is strictly per-event (there is no concept of context,
3801 * so there cannot be a context indirection), every userpage must be updated
3802 * when context time starts :-(
3803 *
3804 * IOW, we must not miss EVENT_TIME edges.
3805 */
event_update_userpage(struct perf_event * event)3806 static inline bool event_update_userpage(struct perf_event *event)
3807 {
3808 if (likely(!atomic_read(&event->mmap_count)))
3809 return false;
3810
3811 perf_event_update_time(event);
3812 perf_event_update_userpage(event);
3813
3814 return true;
3815 }
3816
group_update_userpage(struct perf_event * group_event)3817 static inline void group_update_userpage(struct perf_event *group_event)
3818 {
3819 struct perf_event *event;
3820
3821 if (!event_update_userpage(group_event))
3822 return;
3823
3824 for_each_sibling_event(event, group_event)
3825 event_update_userpage(event);
3826 }
3827
merge_sched_in(struct perf_event * event,void * data)3828 static int merge_sched_in(struct perf_event *event, void *data)
3829 {
3830 struct perf_event_context *ctx = event->ctx;
3831 int *can_add_hw = data;
3832
3833 if (event->state <= PERF_EVENT_STATE_OFF)
3834 return 0;
3835
3836 if (!event_filter_match(event))
3837 return 0;
3838
3839 if (group_can_go_on(event, *can_add_hw)) {
3840 if (!group_sched_in(event, ctx))
3841 list_add_tail(&event->active_list, get_event_list(event));
3842 }
3843
3844 if (event->state == PERF_EVENT_STATE_INACTIVE) {
3845 *can_add_hw = 0;
3846 if (event->attr.pinned) {
3847 perf_cgroup_event_disable(event, ctx);
3848 perf_event_set_state(event, PERF_EVENT_STATE_ERROR);
3849 } else {
3850 struct perf_cpu_pmu_context *cpc;
3851
3852 event->pmu_ctx->rotate_necessary = 1;
3853 cpc = this_cpu_ptr(event->pmu_ctx->pmu->cpu_pmu_context);
3854 perf_mux_hrtimer_restart(cpc);
3855 group_update_userpage(event);
3856 }
3857 }
3858
3859 return 0;
3860 }
3861
pmu_groups_sched_in(struct perf_event_context * ctx,struct perf_event_groups * groups,struct pmu * pmu)3862 static void pmu_groups_sched_in(struct perf_event_context *ctx,
3863 struct perf_event_groups *groups,
3864 struct pmu *pmu)
3865 {
3866 int can_add_hw = 1;
3867 visit_groups_merge(ctx, groups, smp_processor_id(), pmu,
3868 merge_sched_in, &can_add_hw);
3869 }
3870
ctx_groups_sched_in(struct perf_event_context * ctx,struct perf_event_groups * groups,bool cgroup)3871 static void ctx_groups_sched_in(struct perf_event_context *ctx,
3872 struct perf_event_groups *groups,
3873 bool cgroup)
3874 {
3875 struct perf_event_pmu_context *pmu_ctx;
3876
3877 list_for_each_entry(pmu_ctx, &ctx->pmu_ctx_list, pmu_ctx_entry) {
3878 if (cgroup && !pmu_ctx->nr_cgroups)
3879 continue;
3880 pmu_groups_sched_in(ctx, groups, pmu_ctx->pmu);
3881 }
3882 }
3883
__pmu_ctx_sched_in(struct perf_event_context * ctx,struct pmu * pmu)3884 static void __pmu_ctx_sched_in(struct perf_event_context *ctx,
3885 struct pmu *pmu)
3886 {
3887 pmu_groups_sched_in(ctx, &ctx->flexible_groups, pmu);
3888 }
3889
3890 static void
ctx_sched_in(struct perf_event_context * ctx,enum event_type_t event_type)3891 ctx_sched_in(struct perf_event_context *ctx, enum event_type_t event_type)
3892 {
3893 struct perf_cpu_context *cpuctx = this_cpu_ptr(&perf_cpu_context);
3894 int is_active = ctx->is_active;
3895 bool cgroup = event_type & EVENT_CGROUP;
3896
3897 event_type &= ~EVENT_CGROUP;
3898
3899 lockdep_assert_held(&ctx->lock);
3900
3901 if (likely(!ctx->nr_events))
3902 return;
3903
3904 if (!(is_active & EVENT_TIME)) {
3905 /* start ctx time */
3906 __update_context_time(ctx, false);
3907 perf_cgroup_set_timestamp(cpuctx);
3908 /*
3909 * CPU-release for the below ->is_active store,
3910 * see __load_acquire() in perf_event_time_now()
3911 */
3912 barrier();
3913 }
3914
3915 ctx->is_active |= (event_type | EVENT_TIME);
3916 if (ctx->task) {
3917 if (!is_active)
3918 cpuctx->task_ctx = ctx;
3919 else
3920 WARN_ON_ONCE(cpuctx->task_ctx != ctx);
3921 }
3922
3923 is_active ^= ctx->is_active; /* changed bits */
3924
3925 /*
3926 * First go through the list and put on any pinned groups
3927 * in order to give them the best chance of going on.
3928 */
3929 if (is_active & EVENT_PINNED)
3930 ctx_groups_sched_in(ctx, &ctx->pinned_groups, cgroup);
3931
3932 /* Then walk through the lower prio flexible groups */
3933 if (is_active & EVENT_FLEXIBLE)
3934 ctx_groups_sched_in(ctx, &ctx->flexible_groups, cgroup);
3935 }
3936
perf_event_context_sched_in(struct task_struct * task)3937 static void perf_event_context_sched_in(struct task_struct *task)
3938 {
3939 struct perf_cpu_context *cpuctx = this_cpu_ptr(&perf_cpu_context);
3940 struct perf_event_context *ctx;
3941
3942 rcu_read_lock();
3943 ctx = rcu_dereference(task->perf_event_ctxp);
3944 if (!ctx)
3945 goto rcu_unlock;
3946
3947 if (cpuctx->task_ctx == ctx) {
3948 perf_ctx_lock(cpuctx, ctx);
3949 perf_ctx_disable(ctx, false);
3950
3951 perf_ctx_sched_task_cb(ctx, true);
3952
3953 perf_ctx_enable(ctx, false);
3954 perf_ctx_unlock(cpuctx, ctx);
3955 goto rcu_unlock;
3956 }
3957
3958 perf_ctx_lock(cpuctx, ctx);
3959 /*
3960 * We must check ctx->nr_events while holding ctx->lock, such
3961 * that we serialize against perf_install_in_context().
3962 */
3963 if (!ctx->nr_events)
3964 goto unlock;
3965
3966 perf_ctx_disable(ctx, false);
3967 /*
3968 * We want to keep the following priority order:
3969 * cpu pinned (that don't need to move), task pinned,
3970 * cpu flexible, task flexible.
3971 *
3972 * However, if task's ctx is not carrying any pinned
3973 * events, no need to flip the cpuctx's events around.
3974 */
3975 if (!RB_EMPTY_ROOT(&ctx->pinned_groups.tree)) {
3976 perf_ctx_disable(&cpuctx->ctx, false);
3977 ctx_sched_out(&cpuctx->ctx, EVENT_FLEXIBLE);
3978 }
3979
3980 perf_event_sched_in(cpuctx, ctx);
3981
3982 perf_ctx_sched_task_cb(cpuctx->task_ctx, true);
3983
3984 if (!RB_EMPTY_ROOT(&ctx->pinned_groups.tree))
3985 perf_ctx_enable(&cpuctx->ctx, false);
3986
3987 perf_ctx_enable(ctx, false);
3988
3989 unlock:
3990 perf_ctx_unlock(cpuctx, ctx);
3991 rcu_unlock:
3992 rcu_read_unlock();
3993 }
3994
3995 /*
3996 * Called from scheduler to add the events of the current task
3997 * with interrupts disabled.
3998 *
3999 * We restore the event value and then enable it.
4000 *
4001 * This does not protect us against NMI, but enable()
4002 * sets the enabled bit in the control field of event _before_
4003 * accessing the event control register. If a NMI hits, then it will
4004 * keep the event running.
4005 */
__perf_event_task_sched_in(struct task_struct * prev,struct task_struct * task)4006 void __perf_event_task_sched_in(struct task_struct *prev,
4007 struct task_struct *task)
4008 {
4009 perf_event_context_sched_in(task);
4010
4011 if (atomic_read(&nr_switch_events))
4012 perf_event_switch(task, prev, true);
4013
4014 if (__this_cpu_read(perf_sched_cb_usages))
4015 perf_pmu_sched_task(prev, task, true);
4016 }
4017
perf_calculate_period(struct perf_event * event,u64 nsec,u64 count)4018 static u64 perf_calculate_period(struct perf_event *event, u64 nsec, u64 count)
4019 {
4020 u64 frequency = event->attr.sample_freq;
4021 u64 sec = NSEC_PER_SEC;
4022 u64 divisor, dividend;
4023
4024 int count_fls, nsec_fls, frequency_fls, sec_fls;
4025
4026 count_fls = fls64(count);
4027 nsec_fls = fls64(nsec);
4028 frequency_fls = fls64(frequency);
4029 sec_fls = 30;
4030
4031 /*
4032 * We got @count in @nsec, with a target of sample_freq HZ
4033 * the target period becomes:
4034 *
4035 * @count * 10^9
4036 * period = -------------------
4037 * @nsec * sample_freq
4038 *
4039 */
4040
4041 /*
4042 * Reduce accuracy by one bit such that @a and @b converge
4043 * to a similar magnitude.
4044 */
4045 #define REDUCE_FLS(a, b) \
4046 do { \
4047 if (a##_fls > b##_fls) { \
4048 a >>= 1; \
4049 a##_fls--; \
4050 } else { \
4051 b >>= 1; \
4052 b##_fls--; \
4053 } \
4054 } while (0)
4055
4056 /*
4057 * Reduce accuracy until either term fits in a u64, then proceed with
4058 * the other, so that finally we can do a u64/u64 division.
4059 */
4060 while (count_fls + sec_fls > 64 && nsec_fls + frequency_fls > 64) {
4061 REDUCE_FLS(nsec, frequency);
4062 REDUCE_FLS(sec, count);
4063 }
4064
4065 if (count_fls + sec_fls > 64) {
4066 divisor = nsec * frequency;
4067
4068 while (count_fls + sec_fls > 64) {
4069 REDUCE_FLS(count, sec);
4070 divisor >>= 1;
4071 }
4072
4073 dividend = count * sec;
4074 } else {
4075 dividend = count * sec;
4076
4077 while (nsec_fls + frequency_fls > 64) {
4078 REDUCE_FLS(nsec, frequency);
4079 dividend >>= 1;
4080 }
4081
4082 divisor = nsec * frequency;
4083 }
4084
4085 if (!divisor)
4086 return dividend;
4087
4088 return div64_u64(dividend, divisor);
4089 }
4090
4091 static DEFINE_PER_CPU(int, perf_throttled_count);
4092 static DEFINE_PER_CPU(u64, perf_throttled_seq);
4093
perf_adjust_period(struct perf_event * event,u64 nsec,u64 count,bool disable)4094 static void perf_adjust_period(struct perf_event *event, u64 nsec, u64 count, bool disable)
4095 {
4096 struct hw_perf_event *hwc = &event->hw;
4097 s64 period, sample_period;
4098 s64 delta;
4099
4100 period = perf_calculate_period(event, nsec, count);
4101
4102 delta = (s64)(period - hwc->sample_period);
4103 delta = (delta + 7) / 8; /* low pass filter */
4104
4105 sample_period = hwc->sample_period + delta;
4106
4107 if (!sample_period)
4108 sample_period = 1;
4109
4110 hwc->sample_period = sample_period;
4111
4112 if (local64_read(&hwc->period_left) > 8*sample_period) {
4113 if (disable)
4114 event->pmu->stop(event, PERF_EF_UPDATE);
4115
4116 local64_set(&hwc->period_left, 0);
4117
4118 if (disable)
4119 event->pmu->start(event, PERF_EF_RELOAD);
4120 }
4121 }
4122
4123 /*
4124 * combine freq adjustment with unthrottling to avoid two passes over the
4125 * events. At the same time, make sure, having freq events does not change
4126 * the rate of unthrottling as that would introduce bias.
4127 */
4128 static void
perf_adjust_freq_unthr_context(struct perf_event_context * ctx,bool unthrottle)4129 perf_adjust_freq_unthr_context(struct perf_event_context *ctx, bool unthrottle)
4130 {
4131 struct perf_event *event;
4132 struct hw_perf_event *hwc;
4133 u64 now, period = TICK_NSEC;
4134 s64 delta;
4135
4136 /*
4137 * only need to iterate over all events iff:
4138 * - context have events in frequency mode (needs freq adjust)
4139 * - there are events to unthrottle on this cpu
4140 */
4141 if (!(ctx->nr_freq || unthrottle))
4142 return;
4143
4144 raw_spin_lock(&ctx->lock);
4145
4146 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
4147 if (event->state != PERF_EVENT_STATE_ACTIVE)
4148 continue;
4149
4150 // XXX use visit thingy to avoid the -1,cpu match
4151 if (!event_filter_match(event))
4152 continue;
4153
4154 perf_pmu_disable(event->pmu);
4155
4156 hwc = &event->hw;
4157
4158 if (hwc->interrupts == MAX_INTERRUPTS) {
4159 hwc->interrupts = 0;
4160 perf_log_throttle(event, 1);
4161 event->pmu->start(event, 0);
4162 }
4163
4164 if (!event->attr.freq || !event->attr.sample_freq)
4165 goto next;
4166
4167 /*
4168 * stop the event and update event->count
4169 */
4170 event->pmu->stop(event, PERF_EF_UPDATE);
4171
4172 now = local64_read(&event->count);
4173 delta = now - hwc->freq_count_stamp;
4174 hwc->freq_count_stamp = now;
4175
4176 /*
4177 * restart the event
4178 * reload only if value has changed
4179 * we have stopped the event so tell that
4180 * to perf_adjust_period() to avoid stopping it
4181 * twice.
4182 */
4183 if (delta > 0)
4184 perf_adjust_period(event, period, delta, false);
4185
4186 event->pmu->start(event, delta > 0 ? PERF_EF_RELOAD : 0);
4187 next:
4188 perf_pmu_enable(event->pmu);
4189 }
4190
4191 raw_spin_unlock(&ctx->lock);
4192 }
4193
4194 /*
4195 * Move @event to the tail of the @ctx's elegible events.
4196 */
rotate_ctx(struct perf_event_context * ctx,struct perf_event * event)4197 static void rotate_ctx(struct perf_event_context *ctx, struct perf_event *event)
4198 {
4199 /*
4200 * Rotate the first entry last of non-pinned groups. Rotation might be
4201 * disabled by the inheritance code.
4202 */
4203 if (ctx->rotate_disable)
4204 return;
4205
4206 perf_event_groups_delete(&ctx->flexible_groups, event);
4207 perf_event_groups_insert(&ctx->flexible_groups, event);
4208 }
4209
4210 /* pick an event from the flexible_groups to rotate */
4211 static inline struct perf_event *
ctx_event_to_rotate(struct perf_event_pmu_context * pmu_ctx)4212 ctx_event_to_rotate(struct perf_event_pmu_context *pmu_ctx)
4213 {
4214 struct perf_event *event;
4215 struct rb_node *node;
4216 struct rb_root *tree;
4217 struct __group_key key = {
4218 .pmu = pmu_ctx->pmu,
4219 };
4220
4221 /* pick the first active flexible event */
4222 event = list_first_entry_or_null(&pmu_ctx->flexible_active,
4223 struct perf_event, active_list);
4224 if (event)
4225 goto out;
4226
4227 /* if no active flexible event, pick the first event */
4228 tree = &pmu_ctx->ctx->flexible_groups.tree;
4229
4230 if (!pmu_ctx->ctx->task) {
4231 key.cpu = smp_processor_id();
4232
4233 node = rb_find_first(&key, tree, __group_cmp_ignore_cgroup);
4234 if (node)
4235 event = __node_2_pe(node);
4236 goto out;
4237 }
4238
4239 key.cpu = -1;
4240 node = rb_find_first(&key, tree, __group_cmp_ignore_cgroup);
4241 if (node) {
4242 event = __node_2_pe(node);
4243 goto out;
4244 }
4245
4246 key.cpu = smp_processor_id();
4247 node = rb_find_first(&key, tree, __group_cmp_ignore_cgroup);
4248 if (node)
4249 event = __node_2_pe(node);
4250
4251 out:
4252 /*
4253 * Unconditionally clear rotate_necessary; if ctx_flexible_sched_in()
4254 * finds there are unschedulable events, it will set it again.
4255 */
4256 pmu_ctx->rotate_necessary = 0;
4257
4258 return event;
4259 }
4260
perf_rotate_context(struct perf_cpu_pmu_context * cpc)4261 static bool perf_rotate_context(struct perf_cpu_pmu_context *cpc)
4262 {
4263 struct perf_cpu_context *cpuctx = this_cpu_ptr(&perf_cpu_context);
4264 struct perf_event_pmu_context *cpu_epc, *task_epc = NULL;
4265 struct perf_event *cpu_event = NULL, *task_event = NULL;
4266 int cpu_rotate, task_rotate;
4267 struct pmu *pmu;
4268
4269 /*
4270 * Since we run this from IRQ context, nobody can install new
4271 * events, thus the event count values are stable.
4272 */
4273
4274 cpu_epc = &cpc->epc;
4275 pmu = cpu_epc->pmu;
4276 task_epc = cpc->task_epc;
4277
4278 cpu_rotate = cpu_epc->rotate_necessary;
4279 task_rotate = task_epc ? task_epc->rotate_necessary : 0;
4280
4281 if (!(cpu_rotate || task_rotate))
4282 return false;
4283
4284 perf_ctx_lock(cpuctx, cpuctx->task_ctx);
4285 perf_pmu_disable(pmu);
4286
4287 if (task_rotate)
4288 task_event = ctx_event_to_rotate(task_epc);
4289 if (cpu_rotate)
4290 cpu_event = ctx_event_to_rotate(cpu_epc);
4291
4292 /*
4293 * As per the order given at ctx_resched() first 'pop' task flexible
4294 * and then, if needed CPU flexible.
4295 */
4296 if (task_event || (task_epc && cpu_event)) {
4297 update_context_time(task_epc->ctx);
4298 __pmu_ctx_sched_out(task_epc, EVENT_FLEXIBLE);
4299 }
4300
4301 if (cpu_event) {
4302 update_context_time(&cpuctx->ctx);
4303 __pmu_ctx_sched_out(cpu_epc, EVENT_FLEXIBLE);
4304 rotate_ctx(&cpuctx->ctx, cpu_event);
4305 __pmu_ctx_sched_in(&cpuctx->ctx, pmu);
4306 }
4307
4308 if (task_event)
4309 rotate_ctx(task_epc->ctx, task_event);
4310
4311 if (task_event || (task_epc && cpu_event))
4312 __pmu_ctx_sched_in(task_epc->ctx, pmu);
4313
4314 perf_pmu_enable(pmu);
4315 perf_ctx_unlock(cpuctx, cpuctx->task_ctx);
4316
4317 return true;
4318 }
4319
perf_event_task_tick(void)4320 void perf_event_task_tick(void)
4321 {
4322 struct perf_cpu_context *cpuctx = this_cpu_ptr(&perf_cpu_context);
4323 struct perf_event_context *ctx;
4324 int throttled;
4325
4326 lockdep_assert_irqs_disabled();
4327
4328 __this_cpu_inc(perf_throttled_seq);
4329 throttled = __this_cpu_xchg(perf_throttled_count, 0);
4330 tick_dep_clear_cpu(smp_processor_id(), TICK_DEP_BIT_PERF_EVENTS);
4331
4332 perf_adjust_freq_unthr_context(&cpuctx->ctx, !!throttled);
4333
4334 rcu_read_lock();
4335 ctx = rcu_dereference(current->perf_event_ctxp);
4336 if (ctx)
4337 perf_adjust_freq_unthr_context(ctx, !!throttled);
4338 rcu_read_unlock();
4339 }
4340
event_enable_on_exec(struct perf_event * event,struct perf_event_context * ctx)4341 static int event_enable_on_exec(struct perf_event *event,
4342 struct perf_event_context *ctx)
4343 {
4344 if (!event->attr.enable_on_exec)
4345 return 0;
4346
4347 event->attr.enable_on_exec = 0;
4348 if (event->state >= PERF_EVENT_STATE_INACTIVE)
4349 return 0;
4350
4351 perf_event_set_state(event, PERF_EVENT_STATE_INACTIVE);
4352
4353 return 1;
4354 }
4355
4356 /*
4357 * Enable all of a task's events that have been marked enable-on-exec.
4358 * This expects task == current.
4359 */
perf_event_enable_on_exec(struct perf_event_context * ctx)4360 static void perf_event_enable_on_exec(struct perf_event_context *ctx)
4361 {
4362 struct perf_event_context *clone_ctx = NULL;
4363 enum event_type_t event_type = 0;
4364 struct perf_cpu_context *cpuctx;
4365 struct perf_event *event;
4366 unsigned long flags;
4367 int enabled = 0;
4368
4369 local_irq_save(flags);
4370 if (WARN_ON_ONCE(current->perf_event_ctxp != ctx))
4371 goto out;
4372
4373 if (!ctx->nr_events)
4374 goto out;
4375
4376 cpuctx = this_cpu_ptr(&perf_cpu_context);
4377 perf_ctx_lock(cpuctx, ctx);
4378 ctx_sched_out(ctx, EVENT_TIME);
4379
4380 list_for_each_entry(event, &ctx->event_list, event_entry) {
4381 enabled |= event_enable_on_exec(event, ctx);
4382 event_type |= get_event_type(event);
4383 }
4384
4385 /*
4386 * Unclone and reschedule this context if we enabled any event.
4387 */
4388 if (enabled) {
4389 clone_ctx = unclone_ctx(ctx);
4390 ctx_resched(cpuctx, ctx, event_type);
4391 } else {
4392 ctx_sched_in(ctx, EVENT_TIME);
4393 }
4394 perf_ctx_unlock(cpuctx, ctx);
4395
4396 out:
4397 local_irq_restore(flags);
4398
4399 if (clone_ctx)
4400 put_ctx(clone_ctx);
4401 }
4402
4403 static void perf_remove_from_owner(struct perf_event *event);
4404 static void perf_event_exit_event(struct perf_event *event,
4405 struct perf_event_context *ctx);
4406
4407 /*
4408 * Removes all events from the current task that have been marked
4409 * remove-on-exec, and feeds their values back to parent events.
4410 */
perf_event_remove_on_exec(struct perf_event_context * ctx)4411 static void perf_event_remove_on_exec(struct perf_event_context *ctx)
4412 {
4413 struct perf_event_context *clone_ctx = NULL;
4414 struct perf_event *event, *next;
4415 unsigned long flags;
4416 bool modified = false;
4417
4418 mutex_lock(&ctx->mutex);
4419
4420 if (WARN_ON_ONCE(ctx->task != current))
4421 goto unlock;
4422
4423 list_for_each_entry_safe(event, next, &ctx->event_list, event_entry) {
4424 if (!event->attr.remove_on_exec)
4425 continue;
4426
4427 if (!is_kernel_event(event))
4428 perf_remove_from_owner(event);
4429
4430 modified = true;
4431
4432 perf_event_exit_event(event, ctx);
4433 }
4434
4435 raw_spin_lock_irqsave(&ctx->lock, flags);
4436 if (modified)
4437 clone_ctx = unclone_ctx(ctx);
4438 raw_spin_unlock_irqrestore(&ctx->lock, flags);
4439
4440 unlock:
4441 mutex_unlock(&ctx->mutex);
4442
4443 if (clone_ctx)
4444 put_ctx(clone_ctx);
4445 }
4446
4447 struct perf_read_data {
4448 struct perf_event *event;
4449 bool group;
4450 int ret;
4451 };
4452
__perf_event_read_cpu(struct perf_event * event,int event_cpu)4453 static int __perf_event_read_cpu(struct perf_event *event, int event_cpu)
4454 {
4455 u16 local_pkg, event_pkg;
4456
4457 if (event->group_caps & PERF_EV_CAP_READ_ACTIVE_PKG) {
4458 int local_cpu = smp_processor_id();
4459
4460 event_pkg = topology_physical_package_id(event_cpu);
4461 local_pkg = topology_physical_package_id(local_cpu);
4462
4463 if (event_pkg == local_pkg)
4464 return local_cpu;
4465 }
4466
4467 return event_cpu;
4468 }
4469
4470 /*
4471 * Cross CPU call to read the hardware event
4472 */
__perf_event_read(void * info)4473 static void __perf_event_read(void *info)
4474 {
4475 struct perf_read_data *data = info;
4476 struct perf_event *sub, *event = data->event;
4477 struct perf_event_context *ctx = event->ctx;
4478 struct perf_cpu_context *cpuctx = this_cpu_ptr(&perf_cpu_context);
4479 struct pmu *pmu = event->pmu;
4480
4481 /*
4482 * If this is a task context, we need to check whether it is
4483 * the current task context of this cpu. If not it has been
4484 * scheduled out before the smp call arrived. In that case
4485 * event->count would have been updated to a recent sample
4486 * when the event was scheduled out.
4487 */
4488 if (ctx->task && cpuctx->task_ctx != ctx)
4489 return;
4490
4491 raw_spin_lock(&ctx->lock);
4492 if (ctx->is_active & EVENT_TIME) {
4493 update_context_time(ctx);
4494 update_cgrp_time_from_event(event);
4495 }
4496
4497 perf_event_update_time(event);
4498 if (data->group)
4499 perf_event_update_sibling_time(event);
4500
4501 if (event->state != PERF_EVENT_STATE_ACTIVE)
4502 goto unlock;
4503
4504 if (!data->group) {
4505 pmu->read(event);
4506 data->ret = 0;
4507 goto unlock;
4508 }
4509
4510 pmu->start_txn(pmu, PERF_PMU_TXN_READ);
4511
4512 pmu->read(event);
4513
4514 for_each_sibling_event(sub, event) {
4515 if (sub->state == PERF_EVENT_STATE_ACTIVE) {
4516 /*
4517 * Use sibling's PMU rather than @event's since
4518 * sibling could be on different (eg: software) PMU.
4519 */
4520 sub->pmu->read(sub);
4521 }
4522 }
4523
4524 data->ret = pmu->commit_txn(pmu);
4525
4526 unlock:
4527 raw_spin_unlock(&ctx->lock);
4528 }
4529
perf_event_count(struct perf_event * event)4530 static inline u64 perf_event_count(struct perf_event *event)
4531 {
4532 return local64_read(&event->count) + atomic64_read(&event->child_count);
4533 }
4534
calc_timer_values(struct perf_event * event,u64 * now,u64 * enabled,u64 * running)4535 static void calc_timer_values(struct perf_event *event,
4536 u64 *now,
4537 u64 *enabled,
4538 u64 *running)
4539 {
4540 u64 ctx_time;
4541
4542 *now = perf_clock();
4543 ctx_time = perf_event_time_now(event, *now);
4544 __perf_update_times(event, ctx_time, enabled, running);
4545 }
4546
4547 /*
4548 * NMI-safe method to read a local event, that is an event that
4549 * is:
4550 * - either for the current task, or for this CPU
4551 * - does not have inherit set, for inherited task events
4552 * will not be local and we cannot read them atomically
4553 * - must not have a pmu::count method
4554 */
perf_event_read_local(struct perf_event * event,u64 * value,u64 * enabled,u64 * running)4555 int perf_event_read_local(struct perf_event *event, u64 *value,
4556 u64 *enabled, u64 *running)
4557 {
4558 unsigned long flags;
4559 int ret = 0;
4560
4561 /*
4562 * Disabling interrupts avoids all counter scheduling (context
4563 * switches, timer based rotation and IPIs).
4564 */
4565 local_irq_save(flags);
4566
4567 /*
4568 * It must not be an event with inherit set, we cannot read
4569 * all child counters from atomic context.
4570 */
4571 if (event->attr.inherit) {
4572 ret = -EOPNOTSUPP;
4573 goto out;
4574 }
4575
4576 /* If this is a per-task event, it must be for current */
4577 if ((event->attach_state & PERF_ATTACH_TASK) &&
4578 event->hw.target != current) {
4579 ret = -EINVAL;
4580 goto out;
4581 }
4582
4583 /* If this is a per-CPU event, it must be for this CPU */
4584 if (!(event->attach_state & PERF_ATTACH_TASK) &&
4585 event->cpu != smp_processor_id()) {
4586 ret = -EINVAL;
4587 goto out;
4588 }
4589
4590 /* If this is a pinned event it must be running on this CPU */
4591 if (event->attr.pinned && event->oncpu != smp_processor_id()) {
4592 ret = -EBUSY;
4593 goto out;
4594 }
4595
4596 /*
4597 * If the event is currently on this CPU, its either a per-task event,
4598 * or local to this CPU. Furthermore it means its ACTIVE (otherwise
4599 * oncpu == -1).
4600 */
4601 if (event->oncpu == smp_processor_id())
4602 event->pmu->read(event);
4603
4604 *value = local64_read(&event->count);
4605 if (enabled || running) {
4606 u64 __enabled, __running, __now;
4607
4608 calc_timer_values(event, &__now, &__enabled, &__running);
4609 if (enabled)
4610 *enabled = __enabled;
4611 if (running)
4612 *running = __running;
4613 }
4614 out:
4615 local_irq_restore(flags);
4616
4617 return ret;
4618 }
4619
perf_event_read(struct perf_event * event,bool group)4620 static int perf_event_read(struct perf_event *event, bool group)
4621 {
4622 enum perf_event_state state = READ_ONCE(event->state);
4623 int event_cpu, ret = 0;
4624
4625 /*
4626 * If event is enabled and currently active on a CPU, update the
4627 * value in the event structure:
4628 */
4629 again:
4630 if (state == PERF_EVENT_STATE_ACTIVE) {
4631 struct perf_read_data data;
4632
4633 /*
4634 * Orders the ->state and ->oncpu loads such that if we see
4635 * ACTIVE we must also see the right ->oncpu.
4636 *
4637 * Matches the smp_wmb() from event_sched_in().
4638 */
4639 smp_rmb();
4640
4641 event_cpu = READ_ONCE(event->oncpu);
4642 if ((unsigned)event_cpu >= nr_cpu_ids)
4643 return 0;
4644
4645 data = (struct perf_read_data){
4646 .event = event,
4647 .group = group,
4648 .ret = 0,
4649 };
4650
4651 preempt_disable();
4652 event_cpu = __perf_event_read_cpu(event, event_cpu);
4653
4654 /*
4655 * Purposely ignore the smp_call_function_single() return
4656 * value.
4657 *
4658 * If event_cpu isn't a valid CPU it means the event got
4659 * scheduled out and that will have updated the event count.
4660 *
4661 * Therefore, either way, we'll have an up-to-date event count
4662 * after this.
4663 */
4664 (void)smp_call_function_single(event_cpu, __perf_event_read, &data, 1);
4665 preempt_enable();
4666 ret = data.ret;
4667
4668 } else if (state == PERF_EVENT_STATE_INACTIVE) {
4669 struct perf_event_context *ctx = event->ctx;
4670 unsigned long flags;
4671
4672 raw_spin_lock_irqsave(&ctx->lock, flags);
4673 state = event->state;
4674 if (state != PERF_EVENT_STATE_INACTIVE) {
4675 raw_spin_unlock_irqrestore(&ctx->lock, flags);
4676 goto again;
4677 }
4678
4679 /*
4680 * May read while context is not active (e.g., thread is
4681 * blocked), in that case we cannot update context time
4682 */
4683 if (ctx->is_active & EVENT_TIME) {
4684 update_context_time(ctx);
4685 update_cgrp_time_from_event(event);
4686 }
4687
4688 perf_event_update_time(event);
4689 if (group)
4690 perf_event_update_sibling_time(event);
4691 raw_spin_unlock_irqrestore(&ctx->lock, flags);
4692 }
4693
4694 return ret;
4695 }
4696
4697 /*
4698 * Initialize the perf_event context in a task_struct:
4699 */
__perf_event_init_context(struct perf_event_context * ctx)4700 static void __perf_event_init_context(struct perf_event_context *ctx)
4701 {
4702 raw_spin_lock_init(&ctx->lock);
4703 mutex_init(&ctx->mutex);
4704 INIT_LIST_HEAD(&ctx->pmu_ctx_list);
4705 perf_event_groups_init(&ctx->pinned_groups);
4706 perf_event_groups_init(&ctx->flexible_groups);
4707 INIT_LIST_HEAD(&ctx->event_list);
4708 refcount_set(&ctx->refcount, 1);
4709 }
4710
4711 static void
__perf_init_event_pmu_context(struct perf_event_pmu_context * epc,struct pmu * pmu)4712 __perf_init_event_pmu_context(struct perf_event_pmu_context *epc, struct pmu *pmu)
4713 {
4714 epc->pmu = pmu;
4715 INIT_LIST_HEAD(&epc->pmu_ctx_entry);
4716 INIT_LIST_HEAD(&epc->pinned_active);
4717 INIT_LIST_HEAD(&epc->flexible_active);
4718 atomic_set(&epc->refcount, 1);
4719 }
4720
4721 static struct perf_event_context *
alloc_perf_context(struct task_struct * task)4722 alloc_perf_context(struct task_struct *task)
4723 {
4724 struct perf_event_context *ctx;
4725
4726 ctx = kzalloc(sizeof(struct perf_event_context), GFP_KERNEL);
4727 if (!ctx)
4728 return NULL;
4729
4730 __perf_event_init_context(ctx);
4731 if (task)
4732 ctx->task = get_task_struct(task);
4733
4734 return ctx;
4735 }
4736
4737 static struct task_struct *
find_lively_task_by_vpid(pid_t vpid)4738 find_lively_task_by_vpid(pid_t vpid)
4739 {
4740 struct task_struct *task;
4741
4742 rcu_read_lock();
4743 if (!vpid)
4744 task = current;
4745 else
4746 task = find_task_by_vpid(vpid);
4747 if (task)
4748 get_task_struct(task);
4749 rcu_read_unlock();
4750
4751 if (!task)
4752 return ERR_PTR(-ESRCH);
4753
4754 return task;
4755 }
4756
4757 /*
4758 * Returns a matching context with refcount and pincount.
4759 */
4760 static struct perf_event_context *
find_get_context(struct task_struct * task,struct perf_event * event)4761 find_get_context(struct task_struct *task, struct perf_event *event)
4762 {
4763 struct perf_event_context *ctx, *clone_ctx = NULL;
4764 struct perf_cpu_context *cpuctx;
4765 unsigned long flags;
4766 int err;
4767
4768 if (!task) {
4769 /* Must be root to operate on a CPU event: */
4770 err = perf_allow_cpu(&event->attr);
4771 if (err)
4772 return ERR_PTR(err);
4773
4774 cpuctx = per_cpu_ptr(&perf_cpu_context, event->cpu);
4775 ctx = &cpuctx->ctx;
4776 get_ctx(ctx);
4777 raw_spin_lock_irqsave(&ctx->lock, flags);
4778 ++ctx->pin_count;
4779 raw_spin_unlock_irqrestore(&ctx->lock, flags);
4780
4781 return ctx;
4782 }
4783
4784 err = -EINVAL;
4785 retry:
4786 ctx = perf_lock_task_context(task, &flags);
4787 if (ctx) {
4788 clone_ctx = unclone_ctx(ctx);
4789 ++ctx->pin_count;
4790
4791 raw_spin_unlock_irqrestore(&ctx->lock, flags);
4792
4793 if (clone_ctx)
4794 put_ctx(clone_ctx);
4795 } else {
4796 ctx = alloc_perf_context(task);
4797 err = -ENOMEM;
4798 if (!ctx)
4799 goto errout;
4800
4801 err = 0;
4802 mutex_lock(&task->perf_event_mutex);
4803 /*
4804 * If it has already passed perf_event_exit_task().
4805 * we must see PF_EXITING, it takes this mutex too.
4806 */
4807 if (task->flags & PF_EXITING)
4808 err = -ESRCH;
4809 else if (task->perf_event_ctxp)
4810 err = -EAGAIN;
4811 else {
4812 get_ctx(ctx);
4813 ++ctx->pin_count;
4814 rcu_assign_pointer(task->perf_event_ctxp, ctx);
4815 }
4816 mutex_unlock(&task->perf_event_mutex);
4817
4818 if (unlikely(err)) {
4819 put_ctx(ctx);
4820
4821 if (err == -EAGAIN)
4822 goto retry;
4823 goto errout;
4824 }
4825 }
4826
4827 return ctx;
4828
4829 errout:
4830 return ERR_PTR(err);
4831 }
4832
4833 static struct perf_event_pmu_context *
find_get_pmu_context(struct pmu * pmu,struct perf_event_context * ctx,struct perf_event * event)4834 find_get_pmu_context(struct pmu *pmu, struct perf_event_context *ctx,
4835 struct perf_event *event)
4836 {
4837 struct perf_event_pmu_context *new = NULL, *epc;
4838 void *task_ctx_data = NULL;
4839
4840 if (!ctx->task) {
4841 /*
4842 * perf_pmu_migrate_context() / __perf_pmu_install_event()
4843 * relies on the fact that find_get_pmu_context() cannot fail
4844 * for CPU contexts.
4845 */
4846 struct perf_cpu_pmu_context *cpc;
4847
4848 cpc = per_cpu_ptr(pmu->cpu_pmu_context, event->cpu);
4849 epc = &cpc->epc;
4850 raw_spin_lock_irq(&ctx->lock);
4851 if (!epc->ctx) {
4852 atomic_set(&epc->refcount, 1);
4853 epc->embedded = 1;
4854 list_add(&epc->pmu_ctx_entry, &ctx->pmu_ctx_list);
4855 epc->ctx = ctx;
4856 } else {
4857 WARN_ON_ONCE(epc->ctx != ctx);
4858 atomic_inc(&epc->refcount);
4859 }
4860 raw_spin_unlock_irq(&ctx->lock);
4861 return epc;
4862 }
4863
4864 new = kzalloc(sizeof(*epc), GFP_KERNEL);
4865 if (!new)
4866 return ERR_PTR(-ENOMEM);
4867
4868 if (event->attach_state & PERF_ATTACH_TASK_DATA) {
4869 task_ctx_data = alloc_task_ctx_data(pmu);
4870 if (!task_ctx_data) {
4871 kfree(new);
4872 return ERR_PTR(-ENOMEM);
4873 }
4874 }
4875
4876 __perf_init_event_pmu_context(new, pmu);
4877
4878 /*
4879 * XXX
4880 *
4881 * lockdep_assert_held(&ctx->mutex);
4882 *
4883 * can't because perf_event_init_task() doesn't actually hold the
4884 * child_ctx->mutex.
4885 */
4886
4887 raw_spin_lock_irq(&ctx->lock);
4888 list_for_each_entry(epc, &ctx->pmu_ctx_list, pmu_ctx_entry) {
4889 if (epc->pmu == pmu) {
4890 WARN_ON_ONCE(epc->ctx != ctx);
4891 atomic_inc(&epc->refcount);
4892 goto found_epc;
4893 }
4894 }
4895
4896 epc = new;
4897 new = NULL;
4898
4899 list_add(&epc->pmu_ctx_entry, &ctx->pmu_ctx_list);
4900 epc->ctx = ctx;
4901
4902 found_epc:
4903 if (task_ctx_data && !epc->task_ctx_data) {
4904 epc->task_ctx_data = task_ctx_data;
4905 task_ctx_data = NULL;
4906 ctx->nr_task_data++;
4907 }
4908 raw_spin_unlock_irq(&ctx->lock);
4909
4910 free_task_ctx_data(pmu, task_ctx_data);
4911 kfree(new);
4912
4913 return epc;
4914 }
4915
get_pmu_ctx(struct perf_event_pmu_context * epc)4916 static void get_pmu_ctx(struct perf_event_pmu_context *epc)
4917 {
4918 WARN_ON_ONCE(!atomic_inc_not_zero(&epc->refcount));
4919 }
4920
free_epc_rcu(struct rcu_head * head)4921 static void free_epc_rcu(struct rcu_head *head)
4922 {
4923 struct perf_event_pmu_context *epc = container_of(head, typeof(*epc), rcu_head);
4924
4925 kfree(epc->task_ctx_data);
4926 kfree(epc);
4927 }
4928
put_pmu_ctx(struct perf_event_pmu_context * epc)4929 static void put_pmu_ctx(struct perf_event_pmu_context *epc)
4930 {
4931 struct perf_event_context *ctx = epc->ctx;
4932 unsigned long flags;
4933
4934 /*
4935 * XXX
4936 *
4937 * lockdep_assert_held(&ctx->mutex);
4938 *
4939 * can't because of the call-site in _free_event()/put_event()
4940 * which isn't always called under ctx->mutex.
4941 */
4942 if (!atomic_dec_and_raw_lock_irqsave(&epc->refcount, &ctx->lock, flags))
4943 return;
4944
4945 WARN_ON_ONCE(list_empty(&epc->pmu_ctx_entry));
4946
4947 list_del_init(&epc->pmu_ctx_entry);
4948 epc->ctx = NULL;
4949
4950 WARN_ON_ONCE(!list_empty(&epc->pinned_active));
4951 WARN_ON_ONCE(!list_empty(&epc->flexible_active));
4952
4953 raw_spin_unlock_irqrestore(&ctx->lock, flags);
4954
4955 if (epc->embedded)
4956 return;
4957
4958 call_rcu(&epc->rcu_head, free_epc_rcu);
4959 }
4960
4961 static void perf_event_free_filter(struct perf_event *event);
4962
free_event_rcu(struct rcu_head * head)4963 static void free_event_rcu(struct rcu_head *head)
4964 {
4965 struct perf_event *event = container_of(head, typeof(*event), rcu_head);
4966
4967 if (event->ns)
4968 put_pid_ns(event->ns);
4969 perf_event_free_filter(event);
4970 kmem_cache_free(perf_event_cache, event);
4971 }
4972
4973 static void ring_buffer_attach(struct perf_event *event,
4974 struct perf_buffer *rb);
4975
detach_sb_event(struct perf_event * event)4976 static void detach_sb_event(struct perf_event *event)
4977 {
4978 struct pmu_event_list *pel = per_cpu_ptr(&pmu_sb_events, event->cpu);
4979
4980 raw_spin_lock(&pel->lock);
4981 list_del_rcu(&event->sb_list);
4982 raw_spin_unlock(&pel->lock);
4983 }
4984
is_sb_event(struct perf_event * event)4985 static bool is_sb_event(struct perf_event *event)
4986 {
4987 struct perf_event_attr *attr = &event->attr;
4988
4989 if (event->parent)
4990 return false;
4991
4992 if (event->attach_state & PERF_ATTACH_TASK)
4993 return false;
4994
4995 if (attr->mmap || attr->mmap_data || attr->mmap2 ||
4996 attr->comm || attr->comm_exec ||
4997 attr->task || attr->ksymbol ||
4998 attr->context_switch || attr->text_poke ||
4999 attr->bpf_event)
5000 return true;
5001 return false;
5002 }
5003
unaccount_pmu_sb_event(struct perf_event * event)5004 static void unaccount_pmu_sb_event(struct perf_event *event)
5005 {
5006 if (is_sb_event(event))
5007 detach_sb_event(event);
5008 }
5009
5010 #ifdef CONFIG_NO_HZ_FULL
5011 static DEFINE_SPINLOCK(nr_freq_lock);
5012 #endif
5013
unaccount_freq_event_nohz(void)5014 static void unaccount_freq_event_nohz(void)
5015 {
5016 #ifdef CONFIG_NO_HZ_FULL
5017 spin_lock(&nr_freq_lock);
5018 if (atomic_dec_and_test(&nr_freq_events))
5019 tick_nohz_dep_clear(TICK_DEP_BIT_PERF_EVENTS);
5020 spin_unlock(&nr_freq_lock);
5021 #endif
5022 }
5023
unaccount_freq_event(void)5024 static void unaccount_freq_event(void)
5025 {
5026 if (tick_nohz_full_enabled())
5027 unaccount_freq_event_nohz();
5028 else
5029 atomic_dec(&nr_freq_events);
5030 }
5031
unaccount_event(struct perf_event * event)5032 static void unaccount_event(struct perf_event *event)
5033 {
5034 bool dec = false;
5035
5036 if (event->parent)
5037 return;
5038
5039 if (event->attach_state & (PERF_ATTACH_TASK | PERF_ATTACH_SCHED_CB))
5040 dec = true;
5041 if (event->attr.mmap || event->attr.mmap_data)
5042 atomic_dec(&nr_mmap_events);
5043 if (event->attr.build_id)
5044 atomic_dec(&nr_build_id_events);
5045 if (event->attr.comm)
5046 atomic_dec(&nr_comm_events);
5047 if (event->attr.namespaces)
5048 atomic_dec(&nr_namespaces_events);
5049 if (event->attr.cgroup)
5050 atomic_dec(&nr_cgroup_events);
5051 if (event->attr.task)
5052 atomic_dec(&nr_task_events);
5053 if (event->attr.freq)
5054 unaccount_freq_event();
5055 if (event->attr.context_switch) {
5056 dec = true;
5057 atomic_dec(&nr_switch_events);
5058 }
5059 if (is_cgroup_event(event))
5060 dec = true;
5061 if (has_branch_stack(event))
5062 dec = true;
5063 if (event->attr.ksymbol)
5064 atomic_dec(&nr_ksymbol_events);
5065 if (event->attr.bpf_event)
5066 atomic_dec(&nr_bpf_events);
5067 if (event->attr.text_poke)
5068 atomic_dec(&nr_text_poke_events);
5069
5070 if (dec) {
5071 if (!atomic_add_unless(&perf_sched_count, -1, 1))
5072 schedule_delayed_work(&perf_sched_work, HZ);
5073 }
5074
5075 unaccount_pmu_sb_event(event);
5076 }
5077
perf_sched_delayed(struct work_struct * work)5078 static void perf_sched_delayed(struct work_struct *work)
5079 {
5080 mutex_lock(&perf_sched_mutex);
5081 if (atomic_dec_and_test(&perf_sched_count))
5082 static_branch_disable(&perf_sched_events);
5083 mutex_unlock(&perf_sched_mutex);
5084 }
5085
5086 /*
5087 * The following implement mutual exclusion of events on "exclusive" pmus
5088 * (PERF_PMU_CAP_EXCLUSIVE). Such pmus can only have one event scheduled
5089 * at a time, so we disallow creating events that might conflict, namely:
5090 *
5091 * 1) cpu-wide events in the presence of per-task events,
5092 * 2) per-task events in the presence of cpu-wide events,
5093 * 3) two matching events on the same perf_event_context.
5094 *
5095 * The former two cases are handled in the allocation path (perf_event_alloc(),
5096 * _free_event()), the latter -- before the first perf_install_in_context().
5097 */
exclusive_event_init(struct perf_event * event)5098 static int exclusive_event_init(struct perf_event *event)
5099 {
5100 struct pmu *pmu = event->pmu;
5101
5102 if (!is_exclusive_pmu(pmu))
5103 return 0;
5104
5105 /*
5106 * Prevent co-existence of per-task and cpu-wide events on the
5107 * same exclusive pmu.
5108 *
5109 * Negative pmu::exclusive_cnt means there are cpu-wide
5110 * events on this "exclusive" pmu, positive means there are
5111 * per-task events.
5112 *
5113 * Since this is called in perf_event_alloc() path, event::ctx
5114 * doesn't exist yet; it is, however, safe to use PERF_ATTACH_TASK
5115 * to mean "per-task event", because unlike other attach states it
5116 * never gets cleared.
5117 */
5118 if (event->attach_state & PERF_ATTACH_TASK) {
5119 if (!atomic_inc_unless_negative(&pmu->exclusive_cnt))
5120 return -EBUSY;
5121 } else {
5122 if (!atomic_dec_unless_positive(&pmu->exclusive_cnt))
5123 return -EBUSY;
5124 }
5125
5126 return 0;
5127 }
5128
exclusive_event_destroy(struct perf_event * event)5129 static void exclusive_event_destroy(struct perf_event *event)
5130 {
5131 struct pmu *pmu = event->pmu;
5132
5133 if (!is_exclusive_pmu(pmu))
5134 return;
5135
5136 /* see comment in exclusive_event_init() */
5137 if (event->attach_state & PERF_ATTACH_TASK)
5138 atomic_dec(&pmu->exclusive_cnt);
5139 else
5140 atomic_inc(&pmu->exclusive_cnt);
5141 }
5142
exclusive_event_match(struct perf_event * e1,struct perf_event * e2)5143 static bool exclusive_event_match(struct perf_event *e1, struct perf_event *e2)
5144 {
5145 if ((e1->pmu == e2->pmu) &&
5146 (e1->cpu == e2->cpu ||
5147 e1->cpu == -1 ||
5148 e2->cpu == -1))
5149 return true;
5150 return false;
5151 }
5152
exclusive_event_installable(struct perf_event * event,struct perf_event_context * ctx)5153 static bool exclusive_event_installable(struct perf_event *event,
5154 struct perf_event_context *ctx)
5155 {
5156 struct perf_event *iter_event;
5157 struct pmu *pmu = event->pmu;
5158
5159 lockdep_assert_held(&ctx->mutex);
5160
5161 if (!is_exclusive_pmu(pmu))
5162 return true;
5163
5164 list_for_each_entry(iter_event, &ctx->event_list, event_entry) {
5165 if (exclusive_event_match(iter_event, event))
5166 return false;
5167 }
5168
5169 return true;
5170 }
5171
5172 static void perf_addr_filters_splice(struct perf_event *event,
5173 struct list_head *head);
5174
perf_pending_task_sync(struct perf_event * event)5175 static void perf_pending_task_sync(struct perf_event *event)
5176 {
5177 struct callback_head *head = &event->pending_task;
5178
5179 if (!event->pending_work)
5180 return;
5181 /*
5182 * If the task is queued to the current task's queue, we
5183 * obviously can't wait for it to complete. Simply cancel it.
5184 */
5185 if (task_work_cancel(current, head)) {
5186 event->pending_work = 0;
5187 local_dec(&event->ctx->nr_pending);
5188 return;
5189 }
5190
5191 /*
5192 * All accesses related to the event are within the same
5193 * non-preemptible section in perf_pending_task(). The RCU
5194 * grace period before the event is freed will make sure all
5195 * those accesses are complete by then.
5196 */
5197 rcuwait_wait_event(&event->pending_work_wait, !event->pending_work, TASK_UNINTERRUPTIBLE);
5198 }
5199
_free_event(struct perf_event * event)5200 static void _free_event(struct perf_event *event)
5201 {
5202 irq_work_sync(&event->pending_irq);
5203 perf_pending_task_sync(event);
5204
5205 unaccount_event(event);
5206
5207 security_perf_event_free(event);
5208
5209 if (event->rb) {
5210 /*
5211 * Can happen when we close an event with re-directed output.
5212 *
5213 * Since we have a 0 refcount, perf_mmap_close() will skip
5214 * over us; possibly making our ring_buffer_put() the last.
5215 */
5216 mutex_lock(&event->mmap_mutex);
5217 ring_buffer_attach(event, NULL);
5218 mutex_unlock(&event->mmap_mutex);
5219 }
5220
5221 if (is_cgroup_event(event))
5222 perf_detach_cgroup(event);
5223
5224 if (!event->parent) {
5225 if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN)
5226 put_callchain_buffers();
5227 }
5228
5229 perf_event_free_bpf_prog(event);
5230 perf_addr_filters_splice(event, NULL);
5231 kfree(event->addr_filter_ranges);
5232
5233 if (event->destroy)
5234 event->destroy(event);
5235
5236 /*
5237 * Must be after ->destroy(), due to uprobe_perf_close() using
5238 * hw.target.
5239 */
5240 if (event->hw.target)
5241 put_task_struct(event->hw.target);
5242
5243 if (event->pmu_ctx)
5244 put_pmu_ctx(event->pmu_ctx);
5245
5246 /*
5247 * perf_event_free_task() relies on put_ctx() being 'last', in particular
5248 * all task references must be cleaned up.
5249 */
5250 if (event->ctx)
5251 put_ctx(event->ctx);
5252
5253 exclusive_event_destroy(event);
5254 module_put(event->pmu->module);
5255
5256 call_rcu(&event->rcu_head, free_event_rcu);
5257 }
5258
5259 /*
5260 * Used to free events which have a known refcount of 1, such as in error paths
5261 * where the event isn't exposed yet and inherited events.
5262 */
free_event(struct perf_event * event)5263 static void free_event(struct perf_event *event)
5264 {
5265 if (WARN(atomic_long_cmpxchg(&event->refcount, 1, 0) != 1,
5266 "unexpected event refcount: %ld; ptr=%p\n",
5267 atomic_long_read(&event->refcount), event)) {
5268 /* leak to avoid use-after-free */
5269 return;
5270 }
5271
5272 _free_event(event);
5273 }
5274
5275 /*
5276 * Remove user event from the owner task.
5277 */
perf_remove_from_owner(struct perf_event * event)5278 static void perf_remove_from_owner(struct perf_event *event)
5279 {
5280 struct task_struct *owner;
5281
5282 rcu_read_lock();
5283 /*
5284 * Matches the smp_store_release() in perf_event_exit_task(). If we
5285 * observe !owner it means the list deletion is complete and we can
5286 * indeed free this event, otherwise we need to serialize on
5287 * owner->perf_event_mutex.
5288 */
5289 owner = READ_ONCE(event->owner);
5290 if (owner) {
5291 /*
5292 * Since delayed_put_task_struct() also drops the last
5293 * task reference we can safely take a new reference
5294 * while holding the rcu_read_lock().
5295 */
5296 get_task_struct(owner);
5297 }
5298 rcu_read_unlock();
5299
5300 if (owner) {
5301 /*
5302 * If we're here through perf_event_exit_task() we're already
5303 * holding ctx->mutex which would be an inversion wrt. the
5304 * normal lock order.
5305 *
5306 * However we can safely take this lock because its the child
5307 * ctx->mutex.
5308 */
5309 mutex_lock_nested(&owner->perf_event_mutex, SINGLE_DEPTH_NESTING);
5310
5311 /*
5312 * We have to re-check the event->owner field, if it is cleared
5313 * we raced with perf_event_exit_task(), acquiring the mutex
5314 * ensured they're done, and we can proceed with freeing the
5315 * event.
5316 */
5317 if (event->owner) {
5318 list_del_init(&event->owner_entry);
5319 smp_store_release(&event->owner, NULL);
5320 }
5321 mutex_unlock(&owner->perf_event_mutex);
5322 put_task_struct(owner);
5323 }
5324 }
5325
put_event(struct perf_event * event)5326 static void put_event(struct perf_event *event)
5327 {
5328 if (!atomic_long_dec_and_test(&event->refcount))
5329 return;
5330
5331 _free_event(event);
5332 }
5333
5334 /*
5335 * Kill an event dead; while event:refcount will preserve the event
5336 * object, it will not preserve its functionality. Once the last 'user'
5337 * gives up the object, we'll destroy the thing.
5338 */
perf_event_release_kernel(struct perf_event * event)5339 int perf_event_release_kernel(struct perf_event *event)
5340 {
5341 struct perf_event_context *ctx = event->ctx;
5342 struct perf_event *child, *tmp;
5343 LIST_HEAD(free_list);
5344
5345 /*
5346 * If we got here through err_alloc: free_event(event); we will not
5347 * have attached to a context yet.
5348 */
5349 if (!ctx) {
5350 WARN_ON_ONCE(event->attach_state &
5351 (PERF_ATTACH_CONTEXT|PERF_ATTACH_GROUP));
5352 goto no_ctx;
5353 }
5354
5355 if (!is_kernel_event(event))
5356 perf_remove_from_owner(event);
5357
5358 ctx = perf_event_ctx_lock(event);
5359 WARN_ON_ONCE(ctx->parent_ctx);
5360
5361 /*
5362 * Mark this event as STATE_DEAD, there is no external reference to it
5363 * anymore.
5364 *
5365 * Anybody acquiring event->child_mutex after the below loop _must_
5366 * also see this, most importantly inherit_event() which will avoid
5367 * placing more children on the list.
5368 *
5369 * Thus this guarantees that we will in fact observe and kill _ALL_
5370 * child events.
5371 */
5372 perf_remove_from_context(event, DETACH_GROUP|DETACH_DEAD);
5373
5374 perf_event_ctx_unlock(event, ctx);
5375
5376 again:
5377 mutex_lock(&event->child_mutex);
5378 list_for_each_entry(child, &event->child_list, child_list) {
5379 void *var = NULL;
5380
5381 /*
5382 * Cannot change, child events are not migrated, see the
5383 * comment with perf_event_ctx_lock_nested().
5384 */
5385 ctx = READ_ONCE(child->ctx);
5386 /*
5387 * Since child_mutex nests inside ctx::mutex, we must jump
5388 * through hoops. We start by grabbing a reference on the ctx.
5389 *
5390 * Since the event cannot get freed while we hold the
5391 * child_mutex, the context must also exist and have a !0
5392 * reference count.
5393 */
5394 get_ctx(ctx);
5395
5396 /*
5397 * Now that we have a ctx ref, we can drop child_mutex, and
5398 * acquire ctx::mutex without fear of it going away. Then we
5399 * can re-acquire child_mutex.
5400 */
5401 mutex_unlock(&event->child_mutex);
5402 mutex_lock(&ctx->mutex);
5403 mutex_lock(&event->child_mutex);
5404
5405 /*
5406 * Now that we hold ctx::mutex and child_mutex, revalidate our
5407 * state, if child is still the first entry, it didn't get freed
5408 * and we can continue doing so.
5409 */
5410 tmp = list_first_entry_or_null(&event->child_list,
5411 struct perf_event, child_list);
5412 if (tmp == child) {
5413 perf_remove_from_context(child, DETACH_GROUP);
5414 list_move(&child->child_list, &free_list);
5415 /*
5416 * This matches the refcount bump in inherit_event();
5417 * this can't be the last reference.
5418 */
5419 put_event(event);
5420 } else {
5421 var = &ctx->refcount;
5422 }
5423
5424 mutex_unlock(&event->child_mutex);
5425 mutex_unlock(&ctx->mutex);
5426 put_ctx(ctx);
5427
5428 if (var) {
5429 /*
5430 * If perf_event_free_task() has deleted all events from the
5431 * ctx while the child_mutex got released above, make sure to
5432 * notify about the preceding put_ctx().
5433 */
5434 smp_mb(); /* pairs with wait_var_event() */
5435 wake_up_var(var);
5436 }
5437 goto again;
5438 }
5439 mutex_unlock(&event->child_mutex);
5440
5441 list_for_each_entry_safe(child, tmp, &free_list, child_list) {
5442 void *var = &child->ctx->refcount;
5443
5444 list_del(&child->child_list);
5445 free_event(child);
5446
5447 /*
5448 * Wake any perf_event_free_task() waiting for this event to be
5449 * freed.
5450 */
5451 smp_mb(); /* pairs with wait_var_event() */
5452 wake_up_var(var);
5453 }
5454
5455 no_ctx:
5456 put_event(event); /* Must be the 'last' reference */
5457 return 0;
5458 }
5459 EXPORT_SYMBOL_GPL(perf_event_release_kernel);
5460
5461 /*
5462 * Called when the last reference to the file is gone.
5463 */
perf_release(struct inode * inode,struct file * file)5464 static int perf_release(struct inode *inode, struct file *file)
5465 {
5466 perf_event_release_kernel(file->private_data);
5467 return 0;
5468 }
5469
__perf_event_read_value(struct perf_event * event,u64 * enabled,u64 * running)5470 static u64 __perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running)
5471 {
5472 struct perf_event *child;
5473 u64 total = 0;
5474
5475 *enabled = 0;
5476 *running = 0;
5477
5478 mutex_lock(&event->child_mutex);
5479
5480 (void)perf_event_read(event, false);
5481 total += perf_event_count(event);
5482
5483 *enabled += event->total_time_enabled +
5484 atomic64_read(&event->child_total_time_enabled);
5485 *running += event->total_time_running +
5486 atomic64_read(&event->child_total_time_running);
5487
5488 list_for_each_entry(child, &event->child_list, child_list) {
5489 (void)perf_event_read(child, false);
5490 total += perf_event_count(child);
5491 *enabled += child->total_time_enabled;
5492 *running += child->total_time_running;
5493 }
5494 mutex_unlock(&event->child_mutex);
5495
5496 return total;
5497 }
5498
perf_event_read_value(struct perf_event * event,u64 * enabled,u64 * running)5499 u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running)
5500 {
5501 struct perf_event_context *ctx;
5502 u64 count;
5503
5504 ctx = perf_event_ctx_lock(event);
5505 count = __perf_event_read_value(event, enabled, running);
5506 perf_event_ctx_unlock(event, ctx);
5507
5508 return count;
5509 }
5510 EXPORT_SYMBOL_GPL(perf_event_read_value);
5511
__perf_read_group_add(struct perf_event * leader,u64 read_format,u64 * values)5512 static int __perf_read_group_add(struct perf_event *leader,
5513 u64 read_format, u64 *values)
5514 {
5515 struct perf_event_context *ctx = leader->ctx;
5516 struct perf_event *sub, *parent;
5517 unsigned long flags;
5518 int n = 1; /* skip @nr */
5519 int ret;
5520
5521 ret = perf_event_read(leader, true);
5522 if (ret)
5523 return ret;
5524
5525 raw_spin_lock_irqsave(&ctx->lock, flags);
5526 /*
5527 * Verify the grouping between the parent and child (inherited)
5528 * events is still in tact.
5529 *
5530 * Specifically:
5531 * - leader->ctx->lock pins leader->sibling_list
5532 * - parent->child_mutex pins parent->child_list
5533 * - parent->ctx->mutex pins parent->sibling_list
5534 *
5535 * Because parent->ctx != leader->ctx (and child_list nests inside
5536 * ctx->mutex), group destruction is not atomic between children, also
5537 * see perf_event_release_kernel(). Additionally, parent can grow the
5538 * group.
5539 *
5540 * Therefore it is possible to have parent and child groups in a
5541 * different configuration and summing over such a beast makes no sense
5542 * what so ever.
5543 *
5544 * Reject this.
5545 */
5546 parent = leader->parent;
5547 if (parent &&
5548 (parent->group_generation != leader->group_generation ||
5549 parent->nr_siblings != leader->nr_siblings)) {
5550 ret = -ECHILD;
5551 goto unlock;
5552 }
5553
5554 /*
5555 * Since we co-schedule groups, {enabled,running} times of siblings
5556 * will be identical to those of the leader, so we only publish one
5557 * set.
5558 */
5559 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
5560 values[n++] += leader->total_time_enabled +
5561 atomic64_read(&leader->child_total_time_enabled);
5562 }
5563
5564 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
5565 values[n++] += leader->total_time_running +
5566 atomic64_read(&leader->child_total_time_running);
5567 }
5568
5569 /*
5570 * Write {count,id} tuples for every sibling.
5571 */
5572 values[n++] += perf_event_count(leader);
5573 if (read_format & PERF_FORMAT_ID)
5574 values[n++] = primary_event_id(leader);
5575 if (read_format & PERF_FORMAT_LOST)
5576 values[n++] = atomic64_read(&leader->lost_samples);
5577
5578 for_each_sibling_event(sub, leader) {
5579 values[n++] += perf_event_count(sub);
5580 if (read_format & PERF_FORMAT_ID)
5581 values[n++] = primary_event_id(sub);
5582 if (read_format & PERF_FORMAT_LOST)
5583 values[n++] = atomic64_read(&sub->lost_samples);
5584 }
5585
5586 unlock:
5587 raw_spin_unlock_irqrestore(&ctx->lock, flags);
5588 return ret;
5589 }
5590
perf_read_group(struct perf_event * event,u64 read_format,char __user * buf)5591 static int perf_read_group(struct perf_event *event,
5592 u64 read_format, char __user *buf)
5593 {
5594 struct perf_event *leader = event->group_leader, *child;
5595 struct perf_event_context *ctx = leader->ctx;
5596 int ret;
5597 u64 *values;
5598
5599 lockdep_assert_held(&ctx->mutex);
5600
5601 values = kzalloc(event->read_size, GFP_KERNEL);
5602 if (!values)
5603 return -ENOMEM;
5604
5605 values[0] = 1 + leader->nr_siblings;
5606
5607 mutex_lock(&leader->child_mutex);
5608
5609 ret = __perf_read_group_add(leader, read_format, values);
5610 if (ret)
5611 goto unlock;
5612
5613 list_for_each_entry(child, &leader->child_list, child_list) {
5614 ret = __perf_read_group_add(child, read_format, values);
5615 if (ret)
5616 goto unlock;
5617 }
5618
5619 mutex_unlock(&leader->child_mutex);
5620
5621 ret = event->read_size;
5622 if (copy_to_user(buf, values, event->read_size))
5623 ret = -EFAULT;
5624 goto out;
5625
5626 unlock:
5627 mutex_unlock(&leader->child_mutex);
5628 out:
5629 kfree(values);
5630 return ret;
5631 }
5632
perf_read_one(struct perf_event * event,u64 read_format,char __user * buf)5633 static int perf_read_one(struct perf_event *event,
5634 u64 read_format, char __user *buf)
5635 {
5636 u64 enabled, running;
5637 u64 values[5];
5638 int n = 0;
5639
5640 values[n++] = __perf_event_read_value(event, &enabled, &running);
5641 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
5642 values[n++] = enabled;
5643 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
5644 values[n++] = running;
5645 if (read_format & PERF_FORMAT_ID)
5646 values[n++] = primary_event_id(event);
5647 if (read_format & PERF_FORMAT_LOST)
5648 values[n++] = atomic64_read(&event->lost_samples);
5649
5650 if (copy_to_user(buf, values, n * sizeof(u64)))
5651 return -EFAULT;
5652
5653 return n * sizeof(u64);
5654 }
5655
is_event_hup(struct perf_event * event)5656 static bool is_event_hup(struct perf_event *event)
5657 {
5658 bool no_children;
5659
5660 if (event->state > PERF_EVENT_STATE_EXIT)
5661 return false;
5662
5663 mutex_lock(&event->child_mutex);
5664 no_children = list_empty(&event->child_list);
5665 mutex_unlock(&event->child_mutex);
5666 return no_children;
5667 }
5668
5669 /*
5670 * Read the performance event - simple non blocking version for now
5671 */
5672 static ssize_t
__perf_read(struct perf_event * event,char __user * buf,size_t count)5673 __perf_read(struct perf_event *event, char __user *buf, size_t count)
5674 {
5675 u64 read_format = event->attr.read_format;
5676 int ret;
5677
5678 /*
5679 * Return end-of-file for a read on an event that is in
5680 * error state (i.e. because it was pinned but it couldn't be
5681 * scheduled on to the CPU at some point).
5682 */
5683 if (event->state == PERF_EVENT_STATE_ERROR)
5684 return 0;
5685
5686 if (count < event->read_size)
5687 return -ENOSPC;
5688
5689 WARN_ON_ONCE(event->ctx->parent_ctx);
5690 if (read_format & PERF_FORMAT_GROUP)
5691 ret = perf_read_group(event, read_format, buf);
5692 else
5693 ret = perf_read_one(event, read_format, buf);
5694
5695 return ret;
5696 }
5697
5698 static ssize_t
perf_read(struct file * file,char __user * buf,size_t count,loff_t * ppos)5699 perf_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
5700 {
5701 struct perf_event *event = file->private_data;
5702 struct perf_event_context *ctx;
5703 int ret;
5704
5705 ret = security_perf_event_read(event);
5706 if (ret)
5707 return ret;
5708
5709 ctx = perf_event_ctx_lock(event);
5710 ret = __perf_read(event, buf, count);
5711 perf_event_ctx_unlock(event, ctx);
5712
5713 return ret;
5714 }
5715
perf_poll(struct file * file,poll_table * wait)5716 static __poll_t perf_poll(struct file *file, poll_table *wait)
5717 {
5718 struct perf_event *event = file->private_data;
5719 struct perf_buffer *rb;
5720 __poll_t events = EPOLLHUP;
5721
5722 poll_wait(file, &event->waitq, wait);
5723
5724 if (is_event_hup(event))
5725 return events;
5726
5727 /*
5728 * Pin the event->rb by taking event->mmap_mutex; otherwise
5729 * perf_event_set_output() can swizzle our rb and make us miss wakeups.
5730 */
5731 mutex_lock(&event->mmap_mutex);
5732 rb = event->rb;
5733 if (rb)
5734 events = atomic_xchg(&rb->poll, 0);
5735 mutex_unlock(&event->mmap_mutex);
5736 return events;
5737 }
5738
_perf_event_reset(struct perf_event * event)5739 static void _perf_event_reset(struct perf_event *event)
5740 {
5741 (void)perf_event_read(event, false);
5742 local64_set(&event->count, 0);
5743 perf_event_update_userpage(event);
5744 }
5745
5746 /* Assume it's not an event with inherit set. */
perf_event_pause(struct perf_event * event,bool reset)5747 u64 perf_event_pause(struct perf_event *event, bool reset)
5748 {
5749 struct perf_event_context *ctx;
5750 u64 count;
5751
5752 ctx = perf_event_ctx_lock(event);
5753 WARN_ON_ONCE(event->attr.inherit);
5754 _perf_event_disable(event);
5755 count = local64_read(&event->count);
5756 if (reset)
5757 local64_set(&event->count, 0);
5758 perf_event_ctx_unlock(event, ctx);
5759
5760 return count;
5761 }
5762 EXPORT_SYMBOL_GPL(perf_event_pause);
5763
5764 /*
5765 * Holding the top-level event's child_mutex means that any
5766 * descendant process that has inherited this event will block
5767 * in perf_event_exit_event() if it goes to exit, thus satisfying the
5768 * task existence requirements of perf_event_enable/disable.
5769 */
perf_event_for_each_child(struct perf_event * event,void (* func)(struct perf_event *))5770 static void perf_event_for_each_child(struct perf_event *event,
5771 void (*func)(struct perf_event *))
5772 {
5773 struct perf_event *child;
5774
5775 WARN_ON_ONCE(event->ctx->parent_ctx);
5776
5777 mutex_lock(&event->child_mutex);
5778 func(event);
5779 list_for_each_entry(child, &event->child_list, child_list)
5780 func(child);
5781 mutex_unlock(&event->child_mutex);
5782 }
5783
perf_event_for_each(struct perf_event * event,void (* func)(struct perf_event *))5784 static void perf_event_for_each(struct perf_event *event,
5785 void (*func)(struct perf_event *))
5786 {
5787 struct perf_event_context *ctx = event->ctx;
5788 struct perf_event *sibling;
5789
5790 lockdep_assert_held(&ctx->mutex);
5791
5792 event = event->group_leader;
5793
5794 perf_event_for_each_child(event, func);
5795 for_each_sibling_event(sibling, event)
5796 perf_event_for_each_child(sibling, func);
5797 }
5798
__perf_event_period(struct perf_event * event,struct perf_cpu_context * cpuctx,struct perf_event_context * ctx,void * info)5799 static void __perf_event_period(struct perf_event *event,
5800 struct perf_cpu_context *cpuctx,
5801 struct perf_event_context *ctx,
5802 void *info)
5803 {
5804 u64 value = *((u64 *)info);
5805 bool active;
5806
5807 if (event->attr.freq) {
5808 event->attr.sample_freq = value;
5809 } else {
5810 event->attr.sample_period = value;
5811 event->hw.sample_period = value;
5812 }
5813
5814 active = (event->state == PERF_EVENT_STATE_ACTIVE);
5815 if (active) {
5816 perf_pmu_disable(event->pmu);
5817 /*
5818 * We could be throttled; unthrottle now to avoid the tick
5819 * trying to unthrottle while we already re-started the event.
5820 */
5821 if (event->hw.interrupts == MAX_INTERRUPTS) {
5822 event->hw.interrupts = 0;
5823 perf_log_throttle(event, 1);
5824 }
5825 event->pmu->stop(event, PERF_EF_UPDATE);
5826 }
5827
5828 local64_set(&event->hw.period_left, 0);
5829
5830 if (active) {
5831 event->pmu->start(event, PERF_EF_RELOAD);
5832 perf_pmu_enable(event->pmu);
5833 }
5834 }
5835
perf_event_check_period(struct perf_event * event,u64 value)5836 static int perf_event_check_period(struct perf_event *event, u64 value)
5837 {
5838 return event->pmu->check_period(event, value);
5839 }
5840
_perf_event_period(struct perf_event * event,u64 value)5841 static int _perf_event_period(struct perf_event *event, u64 value)
5842 {
5843 if (!is_sampling_event(event))
5844 return -EINVAL;
5845
5846 if (!value)
5847 return -EINVAL;
5848
5849 if (event->attr.freq && value > sysctl_perf_event_sample_rate)
5850 return -EINVAL;
5851
5852 if (perf_event_check_period(event, value))
5853 return -EINVAL;
5854
5855 if (!event->attr.freq && (value & (1ULL << 63)))
5856 return -EINVAL;
5857
5858 event_function_call(event, __perf_event_period, &value);
5859
5860 return 0;
5861 }
5862
perf_event_period(struct perf_event * event,u64 value)5863 int perf_event_period(struct perf_event *event, u64 value)
5864 {
5865 struct perf_event_context *ctx;
5866 int ret;
5867
5868 ctx = perf_event_ctx_lock(event);
5869 ret = _perf_event_period(event, value);
5870 perf_event_ctx_unlock(event, ctx);
5871
5872 return ret;
5873 }
5874 EXPORT_SYMBOL_GPL(perf_event_period);
5875
5876 static const struct file_operations perf_fops;
5877
perf_fget_light(int fd,struct fd * p)5878 static inline int perf_fget_light(int fd, struct fd *p)
5879 {
5880 struct fd f = fdget(fd);
5881 if (!f.file)
5882 return -EBADF;
5883
5884 if (f.file->f_op != &perf_fops) {
5885 fdput(f);
5886 return -EBADF;
5887 }
5888 *p = f;
5889 return 0;
5890 }
5891
5892 static int perf_event_set_output(struct perf_event *event,
5893 struct perf_event *output_event);
5894 static int perf_event_set_filter(struct perf_event *event, void __user *arg);
5895 static int perf_copy_attr(struct perf_event_attr __user *uattr,
5896 struct perf_event_attr *attr);
5897
_perf_ioctl(struct perf_event * event,unsigned int cmd,unsigned long arg)5898 static long _perf_ioctl(struct perf_event *event, unsigned int cmd, unsigned long arg)
5899 {
5900 void (*func)(struct perf_event *);
5901 u32 flags = arg;
5902
5903 switch (cmd) {
5904 case PERF_EVENT_IOC_ENABLE:
5905 func = _perf_event_enable;
5906 break;
5907 case PERF_EVENT_IOC_DISABLE:
5908 func = _perf_event_disable;
5909 break;
5910 case PERF_EVENT_IOC_RESET:
5911 func = _perf_event_reset;
5912 break;
5913
5914 case PERF_EVENT_IOC_REFRESH:
5915 return _perf_event_refresh(event, arg);
5916
5917 case PERF_EVENT_IOC_PERIOD:
5918 {
5919 u64 value;
5920
5921 if (copy_from_user(&value, (u64 __user *)arg, sizeof(value)))
5922 return -EFAULT;
5923
5924 return _perf_event_period(event, value);
5925 }
5926 case PERF_EVENT_IOC_ID:
5927 {
5928 u64 id = primary_event_id(event);
5929
5930 if (copy_to_user((void __user *)arg, &id, sizeof(id)))
5931 return -EFAULT;
5932 return 0;
5933 }
5934
5935 case PERF_EVENT_IOC_SET_OUTPUT:
5936 {
5937 int ret;
5938 if (arg != -1) {
5939 struct perf_event *output_event;
5940 struct fd output;
5941 ret = perf_fget_light(arg, &output);
5942 if (ret)
5943 return ret;
5944 output_event = output.file->private_data;
5945 ret = perf_event_set_output(event, output_event);
5946 fdput(output);
5947 } else {
5948 ret = perf_event_set_output(event, NULL);
5949 }
5950 return ret;
5951 }
5952
5953 case PERF_EVENT_IOC_SET_FILTER:
5954 return perf_event_set_filter(event, (void __user *)arg);
5955
5956 case PERF_EVENT_IOC_SET_BPF:
5957 {
5958 struct bpf_prog *prog;
5959 int err;
5960
5961 prog = bpf_prog_get(arg);
5962 if (IS_ERR(prog))
5963 return PTR_ERR(prog);
5964
5965 err = perf_event_set_bpf_prog(event, prog, 0);
5966 if (err) {
5967 bpf_prog_put(prog);
5968 return err;
5969 }
5970
5971 return 0;
5972 }
5973
5974 case PERF_EVENT_IOC_PAUSE_OUTPUT: {
5975 struct perf_buffer *rb;
5976
5977 rcu_read_lock();
5978 rb = rcu_dereference(event->rb);
5979 if (!rb || !rb->nr_pages) {
5980 rcu_read_unlock();
5981 return -EINVAL;
5982 }
5983 rb_toggle_paused(rb, !!arg);
5984 rcu_read_unlock();
5985 return 0;
5986 }
5987
5988 case PERF_EVENT_IOC_QUERY_BPF:
5989 return perf_event_query_prog_array(event, (void __user *)arg);
5990
5991 case PERF_EVENT_IOC_MODIFY_ATTRIBUTES: {
5992 struct perf_event_attr new_attr;
5993 int err = perf_copy_attr((struct perf_event_attr __user *)arg,
5994 &new_attr);
5995
5996 if (err)
5997 return err;
5998
5999 return perf_event_modify_attr(event, &new_attr);
6000 }
6001 default:
6002 return -ENOTTY;
6003 }
6004
6005 if (flags & PERF_IOC_FLAG_GROUP)
6006 perf_event_for_each(event, func);
6007 else
6008 perf_event_for_each_child(event, func);
6009
6010 return 0;
6011 }
6012
perf_ioctl(struct file * file,unsigned int cmd,unsigned long arg)6013 static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
6014 {
6015 struct perf_event *event = file->private_data;
6016 struct perf_event_context *ctx;
6017 long ret;
6018
6019 /* Treat ioctl like writes as it is likely a mutating operation. */
6020 ret = security_perf_event_write(event);
6021 if (ret)
6022 return ret;
6023
6024 ctx = perf_event_ctx_lock(event);
6025 ret = _perf_ioctl(event, cmd, arg);
6026 perf_event_ctx_unlock(event, ctx);
6027
6028 return ret;
6029 }
6030
6031 #ifdef CONFIG_COMPAT
perf_compat_ioctl(struct file * file,unsigned int cmd,unsigned long arg)6032 static long perf_compat_ioctl(struct file *file, unsigned int cmd,
6033 unsigned long arg)
6034 {
6035 switch (_IOC_NR(cmd)) {
6036 case _IOC_NR(PERF_EVENT_IOC_SET_FILTER):
6037 case _IOC_NR(PERF_EVENT_IOC_ID):
6038 case _IOC_NR(PERF_EVENT_IOC_QUERY_BPF):
6039 case _IOC_NR(PERF_EVENT_IOC_MODIFY_ATTRIBUTES):
6040 /* Fix up pointer size (usually 4 -> 8 in 32-on-64-bit case */
6041 if (_IOC_SIZE(cmd) == sizeof(compat_uptr_t)) {
6042 cmd &= ~IOCSIZE_MASK;
6043 cmd |= sizeof(void *) << IOCSIZE_SHIFT;
6044 }
6045 break;
6046 }
6047 return perf_ioctl(file, cmd, arg);
6048 }
6049 #else
6050 # define perf_compat_ioctl NULL
6051 #endif
6052
perf_event_task_enable(void)6053 int perf_event_task_enable(void)
6054 {
6055 struct perf_event_context *ctx;
6056 struct perf_event *event;
6057
6058 mutex_lock(¤t->perf_event_mutex);
6059 list_for_each_entry(event, ¤t->perf_event_list, owner_entry) {
6060 ctx = perf_event_ctx_lock(event);
6061 perf_event_for_each_child(event, _perf_event_enable);
6062 perf_event_ctx_unlock(event, ctx);
6063 }
6064 mutex_unlock(¤t->perf_event_mutex);
6065
6066 return 0;
6067 }
6068
perf_event_task_disable(void)6069 int perf_event_task_disable(void)
6070 {
6071 struct perf_event_context *ctx;
6072 struct perf_event *event;
6073
6074 mutex_lock(¤t->perf_event_mutex);
6075 list_for_each_entry(event, ¤t->perf_event_list, owner_entry) {
6076 ctx = perf_event_ctx_lock(event);
6077 perf_event_for_each_child(event, _perf_event_disable);
6078 perf_event_ctx_unlock(event, ctx);
6079 }
6080 mutex_unlock(¤t->perf_event_mutex);
6081
6082 return 0;
6083 }
6084
perf_event_index(struct perf_event * event)6085 static int perf_event_index(struct perf_event *event)
6086 {
6087 if (event->hw.state & PERF_HES_STOPPED)
6088 return 0;
6089
6090 if (event->state != PERF_EVENT_STATE_ACTIVE)
6091 return 0;
6092
6093 return event->pmu->event_idx(event);
6094 }
6095
perf_event_init_userpage(struct perf_event * event)6096 static void perf_event_init_userpage(struct perf_event *event)
6097 {
6098 struct perf_event_mmap_page *userpg;
6099 struct perf_buffer *rb;
6100
6101 rcu_read_lock();
6102 rb = rcu_dereference(event->rb);
6103 if (!rb)
6104 goto unlock;
6105
6106 userpg = rb->user_page;
6107
6108 /* Allow new userspace to detect that bit 0 is deprecated */
6109 userpg->cap_bit0_is_deprecated = 1;
6110 userpg->size = offsetof(struct perf_event_mmap_page, __reserved);
6111 userpg->data_offset = PAGE_SIZE;
6112 userpg->data_size = perf_data_size(rb);
6113
6114 unlock:
6115 rcu_read_unlock();
6116 }
6117
arch_perf_update_userpage(struct perf_event * event,struct perf_event_mmap_page * userpg,u64 now)6118 void __weak arch_perf_update_userpage(
6119 struct perf_event *event, struct perf_event_mmap_page *userpg, u64 now)
6120 {
6121 }
6122
6123 /*
6124 * Callers need to ensure there can be no nesting of this function, otherwise
6125 * the seqlock logic goes bad. We can not serialize this because the arch
6126 * code calls this from NMI context.
6127 */
perf_event_update_userpage(struct perf_event * event)6128 void perf_event_update_userpage(struct perf_event *event)
6129 {
6130 struct perf_event_mmap_page *userpg;
6131 struct perf_buffer *rb;
6132 u64 enabled, running, now;
6133
6134 rcu_read_lock();
6135 rb = rcu_dereference(event->rb);
6136 if (!rb)
6137 goto unlock;
6138
6139 /*
6140 * compute total_time_enabled, total_time_running
6141 * based on snapshot values taken when the event
6142 * was last scheduled in.
6143 *
6144 * we cannot simply called update_context_time()
6145 * because of locking issue as we can be called in
6146 * NMI context
6147 */
6148 calc_timer_values(event, &now, &enabled, &running);
6149
6150 userpg = rb->user_page;
6151 /*
6152 * Disable preemption to guarantee consistent time stamps are stored to
6153 * the user page.
6154 */
6155 preempt_disable();
6156 ++userpg->lock;
6157 barrier();
6158 userpg->index = perf_event_index(event);
6159 userpg->offset = perf_event_count(event);
6160 if (userpg->index)
6161 userpg->offset -= local64_read(&event->hw.prev_count);
6162
6163 userpg->time_enabled = enabled +
6164 atomic64_read(&event->child_total_time_enabled);
6165
6166 userpg->time_running = running +
6167 atomic64_read(&event->child_total_time_running);
6168
6169 arch_perf_update_userpage(event, userpg, now);
6170
6171 barrier();
6172 ++userpg->lock;
6173 preempt_enable();
6174 unlock:
6175 rcu_read_unlock();
6176 }
6177 EXPORT_SYMBOL_GPL(perf_event_update_userpage);
6178
perf_mmap_fault(struct vm_fault * vmf)6179 static vm_fault_t perf_mmap_fault(struct vm_fault *vmf)
6180 {
6181 struct perf_event *event = vmf->vma->vm_file->private_data;
6182 struct perf_buffer *rb;
6183 vm_fault_t ret = VM_FAULT_SIGBUS;
6184
6185 if (vmf->flags & FAULT_FLAG_MKWRITE) {
6186 if (vmf->pgoff == 0)
6187 ret = 0;
6188 return ret;
6189 }
6190
6191 rcu_read_lock();
6192 rb = rcu_dereference(event->rb);
6193 if (!rb)
6194 goto unlock;
6195
6196 if (vmf->pgoff && (vmf->flags & FAULT_FLAG_WRITE))
6197 goto unlock;
6198
6199 vmf->page = perf_mmap_to_page(rb, vmf->pgoff);
6200 if (!vmf->page)
6201 goto unlock;
6202
6203 get_page(vmf->page);
6204 vmf->page->mapping = vmf->vma->vm_file->f_mapping;
6205 vmf->page->index = vmf->pgoff;
6206
6207 ret = 0;
6208 unlock:
6209 rcu_read_unlock();
6210
6211 return ret;
6212 }
6213
ring_buffer_attach(struct perf_event * event,struct perf_buffer * rb)6214 static void ring_buffer_attach(struct perf_event *event,
6215 struct perf_buffer *rb)
6216 {
6217 struct perf_buffer *old_rb = NULL;
6218 unsigned long flags;
6219
6220 WARN_ON_ONCE(event->parent);
6221
6222 if (event->rb) {
6223 /*
6224 * Should be impossible, we set this when removing
6225 * event->rb_entry and wait/clear when adding event->rb_entry.
6226 */
6227 WARN_ON_ONCE(event->rcu_pending);
6228
6229 old_rb = event->rb;
6230 spin_lock_irqsave(&old_rb->event_lock, flags);
6231 list_del_rcu(&event->rb_entry);
6232 spin_unlock_irqrestore(&old_rb->event_lock, flags);
6233
6234 event->rcu_batches = get_state_synchronize_rcu();
6235 event->rcu_pending = 1;
6236 }
6237
6238 if (rb) {
6239 if (event->rcu_pending) {
6240 cond_synchronize_rcu(event->rcu_batches);
6241 event->rcu_pending = 0;
6242 }
6243
6244 spin_lock_irqsave(&rb->event_lock, flags);
6245 list_add_rcu(&event->rb_entry, &rb->event_list);
6246 spin_unlock_irqrestore(&rb->event_lock, flags);
6247 }
6248
6249 /*
6250 * Avoid racing with perf_mmap_close(AUX): stop the event
6251 * before swizzling the event::rb pointer; if it's getting
6252 * unmapped, its aux_mmap_count will be 0 and it won't
6253 * restart. See the comment in __perf_pmu_output_stop().
6254 *
6255 * Data will inevitably be lost when set_output is done in
6256 * mid-air, but then again, whoever does it like this is
6257 * not in for the data anyway.
6258 */
6259 if (has_aux(event))
6260 perf_event_stop(event, 0);
6261
6262 rcu_assign_pointer(event->rb, rb);
6263
6264 if (old_rb) {
6265 ring_buffer_put(old_rb);
6266 /*
6267 * Since we detached before setting the new rb, so that we
6268 * could attach the new rb, we could have missed a wakeup.
6269 * Provide it now.
6270 */
6271 wake_up_all(&event->waitq);
6272 }
6273 }
6274
ring_buffer_wakeup(struct perf_event * event)6275 static void ring_buffer_wakeup(struct perf_event *event)
6276 {
6277 struct perf_buffer *rb;
6278
6279 if (event->parent)
6280 event = event->parent;
6281
6282 rcu_read_lock();
6283 rb = rcu_dereference(event->rb);
6284 if (rb) {
6285 list_for_each_entry_rcu(event, &rb->event_list, rb_entry)
6286 wake_up_all(&event->waitq);
6287 }
6288 rcu_read_unlock();
6289 }
6290
ring_buffer_get(struct perf_event * event)6291 struct perf_buffer *ring_buffer_get(struct perf_event *event)
6292 {
6293 struct perf_buffer *rb;
6294
6295 if (event->parent)
6296 event = event->parent;
6297
6298 rcu_read_lock();
6299 rb = rcu_dereference(event->rb);
6300 if (rb) {
6301 if (!refcount_inc_not_zero(&rb->refcount))
6302 rb = NULL;
6303 }
6304 rcu_read_unlock();
6305
6306 return rb;
6307 }
6308
ring_buffer_put(struct perf_buffer * rb)6309 void ring_buffer_put(struct perf_buffer *rb)
6310 {
6311 if (!refcount_dec_and_test(&rb->refcount))
6312 return;
6313
6314 WARN_ON_ONCE(!list_empty(&rb->event_list));
6315
6316 call_rcu(&rb->rcu_head, rb_free_rcu);
6317 }
6318
perf_mmap_open(struct vm_area_struct * vma)6319 static void perf_mmap_open(struct vm_area_struct *vma)
6320 {
6321 struct perf_event *event = vma->vm_file->private_data;
6322
6323 atomic_inc(&event->mmap_count);
6324 atomic_inc(&event->rb->mmap_count);
6325
6326 if (vma->vm_pgoff)
6327 atomic_inc(&event->rb->aux_mmap_count);
6328
6329 if (event->pmu->event_mapped)
6330 event->pmu->event_mapped(event, vma->vm_mm);
6331 }
6332
6333 static void perf_pmu_output_stop(struct perf_event *event);
6334
6335 /*
6336 * A buffer can be mmap()ed multiple times; either directly through the same
6337 * event, or through other events by use of perf_event_set_output().
6338 *
6339 * In order to undo the VM accounting done by perf_mmap() we need to destroy
6340 * the buffer here, where we still have a VM context. This means we need
6341 * to detach all events redirecting to us.
6342 */
perf_mmap_close(struct vm_area_struct * vma)6343 static void perf_mmap_close(struct vm_area_struct *vma)
6344 {
6345 struct perf_event *event = vma->vm_file->private_data;
6346 struct perf_buffer *rb = ring_buffer_get(event);
6347 struct user_struct *mmap_user = rb->mmap_user;
6348 int mmap_locked = rb->mmap_locked;
6349 unsigned long size = perf_data_size(rb);
6350 bool detach_rest = false;
6351
6352 if (event->pmu->event_unmapped)
6353 event->pmu->event_unmapped(event, vma->vm_mm);
6354
6355 /*
6356 * The AUX buffer is strictly a sub-buffer, serialize using aux_mutex
6357 * to avoid complications.
6358 */
6359 if (rb_has_aux(rb) && vma->vm_pgoff == rb->aux_pgoff &&
6360 atomic_dec_and_mutex_lock(&rb->aux_mmap_count, &rb->aux_mutex)) {
6361 /*
6362 * Stop all AUX events that are writing to this buffer,
6363 * so that we can free its AUX pages and corresponding PMU
6364 * data. Note that after rb::aux_mmap_count dropped to zero,
6365 * they won't start any more (see perf_aux_output_begin()).
6366 */
6367 perf_pmu_output_stop(event);
6368
6369 /* now it's safe to free the pages */
6370 atomic_long_sub(rb->aux_nr_pages - rb->aux_mmap_locked, &mmap_user->locked_vm);
6371 atomic64_sub(rb->aux_mmap_locked, &vma->vm_mm->pinned_vm);
6372
6373 /* this has to be the last one */
6374 rb_free_aux(rb);
6375 WARN_ON_ONCE(refcount_read(&rb->aux_refcount));
6376
6377 mutex_unlock(&rb->aux_mutex);
6378 }
6379
6380 if (atomic_dec_and_test(&rb->mmap_count))
6381 detach_rest = true;
6382
6383 if (!atomic_dec_and_mutex_lock(&event->mmap_count, &event->mmap_mutex))
6384 goto out_put;
6385
6386 ring_buffer_attach(event, NULL);
6387 mutex_unlock(&event->mmap_mutex);
6388
6389 /* If there's still other mmap()s of this buffer, we're done. */
6390 if (!detach_rest)
6391 goto out_put;
6392
6393 /*
6394 * No other mmap()s, detach from all other events that might redirect
6395 * into the now unreachable buffer. Somewhat complicated by the
6396 * fact that rb::event_lock otherwise nests inside mmap_mutex.
6397 */
6398 again:
6399 rcu_read_lock();
6400 list_for_each_entry_rcu(event, &rb->event_list, rb_entry) {
6401 if (!atomic_long_inc_not_zero(&event->refcount)) {
6402 /*
6403 * This event is en-route to free_event() which will
6404 * detach it and remove it from the list.
6405 */
6406 continue;
6407 }
6408 rcu_read_unlock();
6409
6410 mutex_lock(&event->mmap_mutex);
6411 /*
6412 * Check we didn't race with perf_event_set_output() which can
6413 * swizzle the rb from under us while we were waiting to
6414 * acquire mmap_mutex.
6415 *
6416 * If we find a different rb; ignore this event, a next
6417 * iteration will no longer find it on the list. We have to
6418 * still restart the iteration to make sure we're not now
6419 * iterating the wrong list.
6420 */
6421 if (event->rb == rb)
6422 ring_buffer_attach(event, NULL);
6423
6424 mutex_unlock(&event->mmap_mutex);
6425 put_event(event);
6426
6427 /*
6428 * Restart the iteration; either we're on the wrong list or
6429 * destroyed its integrity by doing a deletion.
6430 */
6431 goto again;
6432 }
6433 rcu_read_unlock();
6434
6435 /*
6436 * It could be there's still a few 0-ref events on the list; they'll
6437 * get cleaned up by free_event() -- they'll also still have their
6438 * ref on the rb and will free it whenever they are done with it.
6439 *
6440 * Aside from that, this buffer is 'fully' detached and unmapped,
6441 * undo the VM accounting.
6442 */
6443
6444 atomic_long_sub((size >> PAGE_SHIFT) + 1 - mmap_locked,
6445 &mmap_user->locked_vm);
6446 atomic64_sub(mmap_locked, &vma->vm_mm->pinned_vm);
6447 free_uid(mmap_user);
6448
6449 out_put:
6450 ring_buffer_put(rb); /* could be last */
6451 }
6452
6453 static const struct vm_operations_struct perf_mmap_vmops = {
6454 .open = perf_mmap_open,
6455 .close = perf_mmap_close, /* non mergeable */
6456 .fault = perf_mmap_fault,
6457 .page_mkwrite = perf_mmap_fault,
6458 };
6459
perf_mmap(struct file * file,struct vm_area_struct * vma)6460 static int perf_mmap(struct file *file, struct vm_area_struct *vma)
6461 {
6462 struct perf_event *event = file->private_data;
6463 unsigned long user_locked, user_lock_limit;
6464 struct user_struct *user = current_user();
6465 struct mutex *aux_mutex = NULL;
6466 struct perf_buffer *rb = NULL;
6467 unsigned long locked, lock_limit;
6468 unsigned long vma_size;
6469 unsigned long nr_pages;
6470 long user_extra = 0, extra = 0;
6471 int ret = 0, flags = 0;
6472
6473 /*
6474 * Don't allow mmap() of inherited per-task counters. This would
6475 * create a performance issue due to all children writing to the
6476 * same rb.
6477 */
6478 if (event->cpu == -1 && event->attr.inherit)
6479 return -EINVAL;
6480
6481 if (!(vma->vm_flags & VM_SHARED))
6482 return -EINVAL;
6483
6484 ret = security_perf_event_read(event);
6485 if (ret)
6486 return ret;
6487
6488 vma_size = vma->vm_end - vma->vm_start;
6489
6490 if (vma->vm_pgoff == 0) {
6491 nr_pages = (vma_size / PAGE_SIZE) - 1;
6492 } else {
6493 /*
6494 * AUX area mapping: if rb->aux_nr_pages != 0, it's already
6495 * mapped, all subsequent mappings should have the same size
6496 * and offset. Must be above the normal perf buffer.
6497 */
6498 u64 aux_offset, aux_size;
6499
6500 if (!event->rb)
6501 return -EINVAL;
6502
6503 nr_pages = vma_size / PAGE_SIZE;
6504 if (nr_pages > INT_MAX)
6505 return -ENOMEM;
6506
6507 mutex_lock(&event->mmap_mutex);
6508 ret = -EINVAL;
6509
6510 rb = event->rb;
6511 if (!rb)
6512 goto aux_unlock;
6513
6514 aux_mutex = &rb->aux_mutex;
6515 mutex_lock(aux_mutex);
6516
6517 aux_offset = READ_ONCE(rb->user_page->aux_offset);
6518 aux_size = READ_ONCE(rb->user_page->aux_size);
6519
6520 if (aux_offset < perf_data_size(rb) + PAGE_SIZE)
6521 goto aux_unlock;
6522
6523 if (aux_offset != vma->vm_pgoff << PAGE_SHIFT)
6524 goto aux_unlock;
6525
6526 /* already mapped with a different offset */
6527 if (rb_has_aux(rb) && rb->aux_pgoff != vma->vm_pgoff)
6528 goto aux_unlock;
6529
6530 if (aux_size != vma_size || aux_size != nr_pages * PAGE_SIZE)
6531 goto aux_unlock;
6532
6533 /* already mapped with a different size */
6534 if (rb_has_aux(rb) && rb->aux_nr_pages != nr_pages)
6535 goto aux_unlock;
6536
6537 if (!is_power_of_2(nr_pages))
6538 goto aux_unlock;
6539
6540 if (!atomic_inc_not_zero(&rb->mmap_count))
6541 goto aux_unlock;
6542
6543 if (rb_has_aux(rb)) {
6544 atomic_inc(&rb->aux_mmap_count);
6545 ret = 0;
6546 goto unlock;
6547 }
6548
6549 atomic_set(&rb->aux_mmap_count, 1);
6550 user_extra = nr_pages;
6551
6552 goto accounting;
6553 }
6554
6555 /*
6556 * If we have rb pages ensure they're a power-of-two number, so we
6557 * can do bitmasks instead of modulo.
6558 */
6559 if (nr_pages != 0 && !is_power_of_2(nr_pages))
6560 return -EINVAL;
6561
6562 if (vma_size != PAGE_SIZE * (1 + nr_pages))
6563 return -EINVAL;
6564
6565 WARN_ON_ONCE(event->ctx->parent_ctx);
6566 again:
6567 mutex_lock(&event->mmap_mutex);
6568 if (event->rb) {
6569 if (data_page_nr(event->rb) != nr_pages) {
6570 ret = -EINVAL;
6571 goto unlock;
6572 }
6573
6574 if (!atomic_inc_not_zero(&event->rb->mmap_count)) {
6575 /*
6576 * Raced against perf_mmap_close(); remove the
6577 * event and try again.
6578 */
6579 ring_buffer_attach(event, NULL);
6580 mutex_unlock(&event->mmap_mutex);
6581 goto again;
6582 }
6583
6584 goto unlock;
6585 }
6586
6587 user_extra = nr_pages + 1;
6588
6589 accounting:
6590 user_lock_limit = sysctl_perf_event_mlock >> (PAGE_SHIFT - 10);
6591
6592 /*
6593 * Increase the limit linearly with more CPUs:
6594 */
6595 user_lock_limit *= num_online_cpus();
6596
6597 user_locked = atomic_long_read(&user->locked_vm);
6598
6599 /*
6600 * sysctl_perf_event_mlock may have changed, so that
6601 * user->locked_vm > user_lock_limit
6602 */
6603 if (user_locked > user_lock_limit)
6604 user_locked = user_lock_limit;
6605 user_locked += user_extra;
6606
6607 if (user_locked > user_lock_limit) {
6608 /*
6609 * charge locked_vm until it hits user_lock_limit;
6610 * charge the rest from pinned_vm
6611 */
6612 extra = user_locked - user_lock_limit;
6613 user_extra -= extra;
6614 }
6615
6616 lock_limit = rlimit(RLIMIT_MEMLOCK);
6617 lock_limit >>= PAGE_SHIFT;
6618 locked = atomic64_read(&vma->vm_mm->pinned_vm) + extra;
6619
6620 if ((locked > lock_limit) && perf_is_paranoid() &&
6621 !capable(CAP_IPC_LOCK)) {
6622 ret = -EPERM;
6623 goto unlock;
6624 }
6625
6626 WARN_ON(!rb && event->rb);
6627
6628 if (vma->vm_flags & VM_WRITE)
6629 flags |= RING_BUFFER_WRITABLE;
6630
6631 if (!rb) {
6632 rb = rb_alloc(nr_pages,
6633 event->attr.watermark ? event->attr.wakeup_watermark : 0,
6634 event->cpu, flags);
6635
6636 if (!rb) {
6637 ret = -ENOMEM;
6638 goto unlock;
6639 }
6640
6641 atomic_set(&rb->mmap_count, 1);
6642 rb->mmap_user = get_current_user();
6643 rb->mmap_locked = extra;
6644
6645 ring_buffer_attach(event, rb);
6646
6647 perf_event_update_time(event);
6648 perf_event_init_userpage(event);
6649 perf_event_update_userpage(event);
6650 } else {
6651 ret = rb_alloc_aux(rb, event, vma->vm_pgoff, nr_pages,
6652 event->attr.aux_watermark, flags);
6653 if (!ret)
6654 rb->aux_mmap_locked = extra;
6655 }
6656
6657 unlock:
6658 if (!ret) {
6659 atomic_long_add(user_extra, &user->locked_vm);
6660 atomic64_add(extra, &vma->vm_mm->pinned_vm);
6661
6662 atomic_inc(&event->mmap_count);
6663 } else if (rb) {
6664 atomic_dec(&rb->mmap_count);
6665 }
6666 aux_unlock:
6667 if (aux_mutex)
6668 mutex_unlock(aux_mutex);
6669 mutex_unlock(&event->mmap_mutex);
6670
6671 /*
6672 * Since pinned accounting is per vm we cannot allow fork() to copy our
6673 * vma.
6674 */
6675 vm_flags_set(vma, VM_DONTCOPY | VM_DONTEXPAND | VM_DONTDUMP);
6676 vma->vm_ops = &perf_mmap_vmops;
6677
6678 if (event->pmu->event_mapped)
6679 event->pmu->event_mapped(event, vma->vm_mm);
6680
6681 return ret;
6682 }
6683
perf_fasync(int fd,struct file * filp,int on)6684 static int perf_fasync(int fd, struct file *filp, int on)
6685 {
6686 struct inode *inode = file_inode(filp);
6687 struct perf_event *event = filp->private_data;
6688 int retval;
6689
6690 inode_lock(inode);
6691 retval = fasync_helper(fd, filp, on, &event->fasync);
6692 inode_unlock(inode);
6693
6694 if (retval < 0)
6695 return retval;
6696
6697 return 0;
6698 }
6699
6700 static const struct file_operations perf_fops = {
6701 .llseek = no_llseek,
6702 .release = perf_release,
6703 .read = perf_read,
6704 .poll = perf_poll,
6705 .unlocked_ioctl = perf_ioctl,
6706 .compat_ioctl = perf_compat_ioctl,
6707 .mmap = perf_mmap,
6708 .fasync = perf_fasync,
6709 };
6710
6711 /*
6712 * Perf event wakeup
6713 *
6714 * If there's data, ensure we set the poll() state and publish everything
6715 * to user-space before waking everybody up.
6716 */
6717
perf_event_fasync(struct perf_event * event)6718 static inline struct fasync_struct **perf_event_fasync(struct perf_event *event)
6719 {
6720 /* only the parent has fasync state */
6721 if (event->parent)
6722 event = event->parent;
6723 return &event->fasync;
6724 }
6725
perf_event_wakeup(struct perf_event * event)6726 void perf_event_wakeup(struct perf_event *event)
6727 {
6728 ring_buffer_wakeup(event);
6729
6730 if (event->pending_kill) {
6731 kill_fasync(perf_event_fasync(event), SIGIO, event->pending_kill);
6732 event->pending_kill = 0;
6733 }
6734 }
6735
perf_sigtrap(struct perf_event * event)6736 static void perf_sigtrap(struct perf_event *event)
6737 {
6738 /*
6739 * We'd expect this to only occur if the irq_work is delayed and either
6740 * ctx->task or current has changed in the meantime. This can be the
6741 * case on architectures that do not implement arch_irq_work_raise().
6742 */
6743 if (WARN_ON_ONCE(event->ctx->task != current))
6744 return;
6745
6746 /*
6747 * Both perf_pending_task() and perf_pending_irq() can race with the
6748 * task exiting.
6749 */
6750 if (current->flags & PF_EXITING)
6751 return;
6752
6753 send_sig_perf((void __user *)event->pending_addr,
6754 event->orig_type, event->attr.sig_data);
6755 }
6756
6757 /*
6758 * Deliver the pending work in-event-context or follow the context.
6759 */
__perf_pending_irq(struct perf_event * event)6760 static void __perf_pending_irq(struct perf_event *event)
6761 {
6762 int cpu = READ_ONCE(event->oncpu);
6763
6764 /*
6765 * If the event isn't running; we done. event_sched_out() will have
6766 * taken care of things.
6767 */
6768 if (cpu < 0)
6769 return;
6770
6771 /*
6772 * Yay, we hit home and are in the context of the event.
6773 */
6774 if (cpu == smp_processor_id()) {
6775 if (event->pending_sigtrap) {
6776 event->pending_sigtrap = 0;
6777 perf_sigtrap(event);
6778 local_dec(&event->ctx->nr_pending);
6779 }
6780 if (event->pending_disable) {
6781 event->pending_disable = 0;
6782 perf_event_disable_local(event);
6783 }
6784 return;
6785 }
6786
6787 /*
6788 * CPU-A CPU-B
6789 *
6790 * perf_event_disable_inatomic()
6791 * @pending_disable = CPU-A;
6792 * irq_work_queue();
6793 *
6794 * sched-out
6795 * @pending_disable = -1;
6796 *
6797 * sched-in
6798 * perf_event_disable_inatomic()
6799 * @pending_disable = CPU-B;
6800 * irq_work_queue(); // FAILS
6801 *
6802 * irq_work_run()
6803 * perf_pending_irq()
6804 *
6805 * But the event runs on CPU-B and wants disabling there.
6806 */
6807 irq_work_queue_on(&event->pending_irq, cpu);
6808 }
6809
perf_pending_irq(struct irq_work * entry)6810 static void perf_pending_irq(struct irq_work *entry)
6811 {
6812 struct perf_event *event = container_of(entry, struct perf_event, pending_irq);
6813 int rctx;
6814
6815 /*
6816 * If we 'fail' here, that's OK, it means recursion is already disabled
6817 * and we won't recurse 'further'.
6818 */
6819 rctx = perf_swevent_get_recursion_context();
6820
6821 /*
6822 * The wakeup isn't bound to the context of the event -- it can happen
6823 * irrespective of where the event is.
6824 */
6825 if (event->pending_wakeup) {
6826 event->pending_wakeup = 0;
6827 perf_event_wakeup(event);
6828 }
6829
6830 __perf_pending_irq(event);
6831
6832 if (rctx >= 0)
6833 perf_swevent_put_recursion_context(rctx);
6834 }
6835
perf_pending_task(struct callback_head * head)6836 static void perf_pending_task(struct callback_head *head)
6837 {
6838 struct perf_event *event = container_of(head, struct perf_event, pending_task);
6839 int rctx;
6840
6841 /*
6842 * All accesses to the event must belong to the same implicit RCU read-side
6843 * critical section as the ->pending_work reset. See comment in
6844 * perf_pending_task_sync().
6845 */
6846 preempt_disable_notrace();
6847 /*
6848 * If we 'fail' here, that's OK, it means recursion is already disabled
6849 * and we won't recurse 'further'.
6850 */
6851 rctx = perf_swevent_get_recursion_context();
6852
6853 if (event->pending_work) {
6854 event->pending_work = 0;
6855 perf_sigtrap(event);
6856 local_dec(&event->ctx->nr_pending);
6857 rcuwait_wake_up(&event->pending_work_wait);
6858 }
6859
6860 if (rctx >= 0)
6861 perf_swevent_put_recursion_context(rctx);
6862 preempt_enable_notrace();
6863 }
6864
6865 #ifdef CONFIG_GUEST_PERF_EVENTS
6866 struct perf_guest_info_callbacks __rcu *perf_guest_cbs;
6867
6868 DEFINE_STATIC_CALL_RET0(__perf_guest_state, *perf_guest_cbs->state);
6869 DEFINE_STATIC_CALL_RET0(__perf_guest_get_ip, *perf_guest_cbs->get_ip);
6870 DEFINE_STATIC_CALL_RET0(__perf_guest_handle_intel_pt_intr, *perf_guest_cbs->handle_intel_pt_intr);
6871
perf_register_guest_info_callbacks(struct perf_guest_info_callbacks * cbs)6872 void perf_register_guest_info_callbacks(struct perf_guest_info_callbacks *cbs)
6873 {
6874 if (WARN_ON_ONCE(rcu_access_pointer(perf_guest_cbs)))
6875 return;
6876
6877 rcu_assign_pointer(perf_guest_cbs, cbs);
6878 static_call_update(__perf_guest_state, cbs->state);
6879 static_call_update(__perf_guest_get_ip, cbs->get_ip);
6880
6881 /* Implementing ->handle_intel_pt_intr is optional. */
6882 if (cbs->handle_intel_pt_intr)
6883 static_call_update(__perf_guest_handle_intel_pt_intr,
6884 cbs->handle_intel_pt_intr);
6885 }
6886 EXPORT_SYMBOL_GPL(perf_register_guest_info_callbacks);
6887
perf_unregister_guest_info_callbacks(struct perf_guest_info_callbacks * cbs)6888 void perf_unregister_guest_info_callbacks(struct perf_guest_info_callbacks *cbs)
6889 {
6890 if (WARN_ON_ONCE(rcu_access_pointer(perf_guest_cbs) != cbs))
6891 return;
6892
6893 rcu_assign_pointer(perf_guest_cbs, NULL);
6894 static_call_update(__perf_guest_state, (void *)&__static_call_return0);
6895 static_call_update(__perf_guest_get_ip, (void *)&__static_call_return0);
6896 static_call_update(__perf_guest_handle_intel_pt_intr,
6897 (void *)&__static_call_return0);
6898 synchronize_rcu();
6899 }
6900 EXPORT_SYMBOL_GPL(perf_unregister_guest_info_callbacks);
6901 #endif
6902
6903 static void
perf_output_sample_regs(struct perf_output_handle * handle,struct pt_regs * regs,u64 mask)6904 perf_output_sample_regs(struct perf_output_handle *handle,
6905 struct pt_regs *regs, u64 mask)
6906 {
6907 int bit;
6908 DECLARE_BITMAP(_mask, 64);
6909
6910 bitmap_from_u64(_mask, mask);
6911 for_each_set_bit(bit, _mask, sizeof(mask) * BITS_PER_BYTE) {
6912 u64 val;
6913
6914 val = perf_reg_value(regs, bit);
6915 perf_output_put(handle, val);
6916 }
6917 }
6918
perf_sample_regs_user(struct perf_regs * regs_user,struct pt_regs * regs)6919 static void perf_sample_regs_user(struct perf_regs *regs_user,
6920 struct pt_regs *regs)
6921 {
6922 if (user_mode(regs)) {
6923 regs_user->abi = perf_reg_abi(current);
6924 regs_user->regs = regs;
6925 } else if (!(current->flags & PF_KTHREAD)) {
6926 perf_get_regs_user(regs_user, regs);
6927 } else {
6928 regs_user->abi = PERF_SAMPLE_REGS_ABI_NONE;
6929 regs_user->regs = NULL;
6930 }
6931 }
6932
perf_sample_regs_intr(struct perf_regs * regs_intr,struct pt_regs * regs)6933 static void perf_sample_regs_intr(struct perf_regs *regs_intr,
6934 struct pt_regs *regs)
6935 {
6936 regs_intr->regs = regs;
6937 regs_intr->abi = perf_reg_abi(current);
6938 }
6939
6940
6941 /*
6942 * Get remaining task size from user stack pointer.
6943 *
6944 * It'd be better to take stack vma map and limit this more
6945 * precisely, but there's no way to get it safely under interrupt,
6946 * so using TASK_SIZE as limit.
6947 */
perf_ustack_task_size(struct pt_regs * regs)6948 static u64 perf_ustack_task_size(struct pt_regs *regs)
6949 {
6950 unsigned long addr = perf_user_stack_pointer(regs);
6951
6952 if (!addr || addr >= TASK_SIZE)
6953 return 0;
6954
6955 return TASK_SIZE - addr;
6956 }
6957
6958 static u16
perf_sample_ustack_size(u16 stack_size,u16 header_size,struct pt_regs * regs)6959 perf_sample_ustack_size(u16 stack_size, u16 header_size,
6960 struct pt_regs *regs)
6961 {
6962 u64 task_size;
6963
6964 /* No regs, no stack pointer, no dump. */
6965 if (!regs)
6966 return 0;
6967
6968 /*
6969 * Check if we fit in with the requested stack size into the:
6970 * - TASK_SIZE
6971 * If we don't, we limit the size to the TASK_SIZE.
6972 *
6973 * - remaining sample size
6974 * If we don't, we customize the stack size to
6975 * fit in to the remaining sample size.
6976 */
6977
6978 task_size = min((u64) USHRT_MAX, perf_ustack_task_size(regs));
6979 stack_size = min(stack_size, (u16) task_size);
6980
6981 /* Current header size plus static size and dynamic size. */
6982 header_size += 2 * sizeof(u64);
6983
6984 /* Do we fit in with the current stack dump size? */
6985 if ((u16) (header_size + stack_size) < header_size) {
6986 /*
6987 * If we overflow the maximum size for the sample,
6988 * we customize the stack dump size to fit in.
6989 */
6990 stack_size = USHRT_MAX - header_size - sizeof(u64);
6991 stack_size = round_up(stack_size, sizeof(u64));
6992 }
6993
6994 return stack_size;
6995 }
6996
6997 static void
perf_output_sample_ustack(struct perf_output_handle * handle,u64 dump_size,struct pt_regs * regs)6998 perf_output_sample_ustack(struct perf_output_handle *handle, u64 dump_size,
6999 struct pt_regs *regs)
7000 {
7001 /* Case of a kernel thread, nothing to dump */
7002 if (!regs) {
7003 u64 size = 0;
7004 perf_output_put(handle, size);
7005 } else {
7006 unsigned long sp;
7007 unsigned int rem;
7008 u64 dyn_size;
7009
7010 /*
7011 * We dump:
7012 * static size
7013 * - the size requested by user or the best one we can fit
7014 * in to the sample max size
7015 * data
7016 * - user stack dump data
7017 * dynamic size
7018 * - the actual dumped size
7019 */
7020
7021 /* Static size. */
7022 perf_output_put(handle, dump_size);
7023
7024 /* Data. */
7025 sp = perf_user_stack_pointer(regs);
7026 rem = __output_copy_user(handle, (void *) sp, dump_size);
7027 dyn_size = dump_size - rem;
7028
7029 perf_output_skip(handle, rem);
7030
7031 /* Dynamic size. */
7032 perf_output_put(handle, dyn_size);
7033 }
7034 }
7035
perf_prepare_sample_aux(struct perf_event * event,struct perf_sample_data * data,size_t size)7036 static unsigned long perf_prepare_sample_aux(struct perf_event *event,
7037 struct perf_sample_data *data,
7038 size_t size)
7039 {
7040 struct perf_event *sampler = event->aux_event;
7041 struct perf_buffer *rb;
7042
7043 data->aux_size = 0;
7044
7045 if (!sampler)
7046 goto out;
7047
7048 if (WARN_ON_ONCE(READ_ONCE(sampler->state) != PERF_EVENT_STATE_ACTIVE))
7049 goto out;
7050
7051 if (WARN_ON_ONCE(READ_ONCE(sampler->oncpu) != smp_processor_id()))
7052 goto out;
7053
7054 rb = ring_buffer_get(sampler);
7055 if (!rb)
7056 goto out;
7057
7058 /*
7059 * If this is an NMI hit inside sampling code, don't take
7060 * the sample. See also perf_aux_sample_output().
7061 */
7062 if (READ_ONCE(rb->aux_in_sampling)) {
7063 data->aux_size = 0;
7064 } else {
7065 size = min_t(size_t, size, perf_aux_size(rb));
7066 data->aux_size = ALIGN(size, sizeof(u64));
7067 }
7068 ring_buffer_put(rb);
7069
7070 out:
7071 return data->aux_size;
7072 }
7073
perf_pmu_snapshot_aux(struct perf_buffer * rb,struct perf_event * event,struct perf_output_handle * handle,unsigned long size)7074 static long perf_pmu_snapshot_aux(struct perf_buffer *rb,
7075 struct perf_event *event,
7076 struct perf_output_handle *handle,
7077 unsigned long size)
7078 {
7079 unsigned long flags;
7080 long ret;
7081
7082 /*
7083 * Normal ->start()/->stop() callbacks run in IRQ mode in scheduler
7084 * paths. If we start calling them in NMI context, they may race with
7085 * the IRQ ones, that is, for example, re-starting an event that's just
7086 * been stopped, which is why we're using a separate callback that
7087 * doesn't change the event state.
7088 *
7089 * IRQs need to be disabled to prevent IPIs from racing with us.
7090 */
7091 local_irq_save(flags);
7092 /*
7093 * Guard against NMI hits inside the critical section;
7094 * see also perf_prepare_sample_aux().
7095 */
7096 WRITE_ONCE(rb->aux_in_sampling, 1);
7097 barrier();
7098
7099 ret = event->pmu->snapshot_aux(event, handle, size);
7100
7101 barrier();
7102 WRITE_ONCE(rb->aux_in_sampling, 0);
7103 local_irq_restore(flags);
7104
7105 return ret;
7106 }
7107
perf_aux_sample_output(struct perf_event * event,struct perf_output_handle * handle,struct perf_sample_data * data)7108 static void perf_aux_sample_output(struct perf_event *event,
7109 struct perf_output_handle *handle,
7110 struct perf_sample_data *data)
7111 {
7112 struct perf_event *sampler = event->aux_event;
7113 struct perf_buffer *rb;
7114 unsigned long pad;
7115 long size;
7116
7117 if (WARN_ON_ONCE(!sampler || !data->aux_size))
7118 return;
7119
7120 rb = ring_buffer_get(sampler);
7121 if (!rb)
7122 return;
7123
7124 size = perf_pmu_snapshot_aux(rb, sampler, handle, data->aux_size);
7125
7126 /*
7127 * An error here means that perf_output_copy() failed (returned a
7128 * non-zero surplus that it didn't copy), which in its current
7129 * enlightened implementation is not possible. If that changes, we'd
7130 * like to know.
7131 */
7132 if (WARN_ON_ONCE(size < 0))
7133 goto out_put;
7134
7135 /*
7136 * The pad comes from ALIGN()ing data->aux_size up to u64 in
7137 * perf_prepare_sample_aux(), so should not be more than that.
7138 */
7139 pad = data->aux_size - size;
7140 if (WARN_ON_ONCE(pad >= sizeof(u64)))
7141 pad = 8;
7142
7143 if (pad) {
7144 u64 zero = 0;
7145 perf_output_copy(handle, &zero, pad);
7146 }
7147
7148 out_put:
7149 ring_buffer_put(rb);
7150 }
7151
7152 /*
7153 * A set of common sample data types saved even for non-sample records
7154 * when event->attr.sample_id_all is set.
7155 */
7156 #define PERF_SAMPLE_ID_ALL (PERF_SAMPLE_TID | PERF_SAMPLE_TIME | \
7157 PERF_SAMPLE_ID | PERF_SAMPLE_STREAM_ID | \
7158 PERF_SAMPLE_CPU | PERF_SAMPLE_IDENTIFIER)
7159
__perf_event_header__init_id(struct perf_sample_data * data,struct perf_event * event,u64 sample_type)7160 static void __perf_event_header__init_id(struct perf_sample_data *data,
7161 struct perf_event *event,
7162 u64 sample_type)
7163 {
7164 data->type = event->attr.sample_type;
7165 data->sample_flags |= data->type & PERF_SAMPLE_ID_ALL;
7166
7167 if (sample_type & PERF_SAMPLE_TID) {
7168 /* namespace issues */
7169 data->tid_entry.pid = perf_event_pid(event, current);
7170 data->tid_entry.tid = perf_event_tid(event, current);
7171 }
7172
7173 if (sample_type & PERF_SAMPLE_TIME)
7174 data->time = perf_event_clock(event);
7175
7176 if (sample_type & (PERF_SAMPLE_ID | PERF_SAMPLE_IDENTIFIER))
7177 data->id = primary_event_id(event);
7178
7179 if (sample_type & PERF_SAMPLE_STREAM_ID)
7180 data->stream_id = event->id;
7181
7182 if (sample_type & PERF_SAMPLE_CPU) {
7183 data->cpu_entry.cpu = raw_smp_processor_id();
7184 data->cpu_entry.reserved = 0;
7185 }
7186 }
7187
perf_event_header__init_id(struct perf_event_header * header,struct perf_sample_data * data,struct perf_event * event)7188 void perf_event_header__init_id(struct perf_event_header *header,
7189 struct perf_sample_data *data,
7190 struct perf_event *event)
7191 {
7192 if (event->attr.sample_id_all) {
7193 header->size += event->id_header_size;
7194 __perf_event_header__init_id(data, event, event->attr.sample_type);
7195 }
7196 }
7197
__perf_event__output_id_sample(struct perf_output_handle * handle,struct perf_sample_data * data)7198 static void __perf_event__output_id_sample(struct perf_output_handle *handle,
7199 struct perf_sample_data *data)
7200 {
7201 u64 sample_type = data->type;
7202
7203 if (sample_type & PERF_SAMPLE_TID)
7204 perf_output_put(handle, data->tid_entry);
7205
7206 if (sample_type & PERF_SAMPLE_TIME)
7207 perf_output_put(handle, data->time);
7208
7209 if (sample_type & PERF_SAMPLE_ID)
7210 perf_output_put(handle, data->id);
7211
7212 if (sample_type & PERF_SAMPLE_STREAM_ID)
7213 perf_output_put(handle, data->stream_id);
7214
7215 if (sample_type & PERF_SAMPLE_CPU)
7216 perf_output_put(handle, data->cpu_entry);
7217
7218 if (sample_type & PERF_SAMPLE_IDENTIFIER)
7219 perf_output_put(handle, data->id);
7220 }
7221
perf_event__output_id_sample(struct perf_event * event,struct perf_output_handle * handle,struct perf_sample_data * sample)7222 void perf_event__output_id_sample(struct perf_event *event,
7223 struct perf_output_handle *handle,
7224 struct perf_sample_data *sample)
7225 {
7226 if (event->attr.sample_id_all)
7227 __perf_event__output_id_sample(handle, sample);
7228 }
7229
perf_output_read_one(struct perf_output_handle * handle,struct perf_event * event,u64 enabled,u64 running)7230 static void perf_output_read_one(struct perf_output_handle *handle,
7231 struct perf_event *event,
7232 u64 enabled, u64 running)
7233 {
7234 u64 read_format = event->attr.read_format;
7235 u64 values[5];
7236 int n = 0;
7237
7238 values[n++] = perf_event_count(event);
7239 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
7240 values[n++] = enabled +
7241 atomic64_read(&event->child_total_time_enabled);
7242 }
7243 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
7244 values[n++] = running +
7245 atomic64_read(&event->child_total_time_running);
7246 }
7247 if (read_format & PERF_FORMAT_ID)
7248 values[n++] = primary_event_id(event);
7249 if (read_format & PERF_FORMAT_LOST)
7250 values[n++] = atomic64_read(&event->lost_samples);
7251
7252 __output_copy(handle, values, n * sizeof(u64));
7253 }
7254
perf_output_read_group(struct perf_output_handle * handle,struct perf_event * event,u64 enabled,u64 running)7255 static void perf_output_read_group(struct perf_output_handle *handle,
7256 struct perf_event *event,
7257 u64 enabled, u64 running)
7258 {
7259 struct perf_event *leader = event->group_leader, *sub;
7260 u64 read_format = event->attr.read_format;
7261 unsigned long flags;
7262 u64 values[6];
7263 int n = 0;
7264
7265 /*
7266 * Disabling interrupts avoids all counter scheduling
7267 * (context switches, timer based rotation and IPIs).
7268 */
7269 local_irq_save(flags);
7270
7271 values[n++] = 1 + leader->nr_siblings;
7272
7273 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
7274 values[n++] = enabled;
7275
7276 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
7277 values[n++] = running;
7278
7279 if ((leader != event) &&
7280 (leader->state == PERF_EVENT_STATE_ACTIVE))
7281 leader->pmu->read(leader);
7282
7283 values[n++] = perf_event_count(leader);
7284 if (read_format & PERF_FORMAT_ID)
7285 values[n++] = primary_event_id(leader);
7286 if (read_format & PERF_FORMAT_LOST)
7287 values[n++] = atomic64_read(&leader->lost_samples);
7288
7289 __output_copy(handle, values, n * sizeof(u64));
7290
7291 for_each_sibling_event(sub, leader) {
7292 n = 0;
7293
7294 if ((sub != event) &&
7295 (sub->state == PERF_EVENT_STATE_ACTIVE))
7296 sub->pmu->read(sub);
7297
7298 values[n++] = perf_event_count(sub);
7299 if (read_format & PERF_FORMAT_ID)
7300 values[n++] = primary_event_id(sub);
7301 if (read_format & PERF_FORMAT_LOST)
7302 values[n++] = atomic64_read(&sub->lost_samples);
7303
7304 __output_copy(handle, values, n * sizeof(u64));
7305 }
7306
7307 local_irq_restore(flags);
7308 }
7309
7310 #define PERF_FORMAT_TOTAL_TIMES (PERF_FORMAT_TOTAL_TIME_ENABLED|\
7311 PERF_FORMAT_TOTAL_TIME_RUNNING)
7312
7313 /*
7314 * XXX PERF_SAMPLE_READ vs inherited events seems difficult.
7315 *
7316 * The problem is that its both hard and excessively expensive to iterate the
7317 * child list, not to mention that its impossible to IPI the children running
7318 * on another CPU, from interrupt/NMI context.
7319 */
perf_output_read(struct perf_output_handle * handle,struct perf_event * event)7320 static void perf_output_read(struct perf_output_handle *handle,
7321 struct perf_event *event)
7322 {
7323 u64 enabled = 0, running = 0, now;
7324 u64 read_format = event->attr.read_format;
7325
7326 /*
7327 * compute total_time_enabled, total_time_running
7328 * based on snapshot values taken when the event
7329 * was last scheduled in.
7330 *
7331 * we cannot simply called update_context_time()
7332 * because of locking issue as we are called in
7333 * NMI context
7334 */
7335 if (read_format & PERF_FORMAT_TOTAL_TIMES)
7336 calc_timer_values(event, &now, &enabled, &running);
7337
7338 if (event->attr.read_format & PERF_FORMAT_GROUP)
7339 perf_output_read_group(handle, event, enabled, running);
7340 else
7341 perf_output_read_one(handle, event, enabled, running);
7342 }
7343
perf_output_sample(struct perf_output_handle * handle,struct perf_event_header * header,struct perf_sample_data * data,struct perf_event * event)7344 void perf_output_sample(struct perf_output_handle *handle,
7345 struct perf_event_header *header,
7346 struct perf_sample_data *data,
7347 struct perf_event *event)
7348 {
7349 u64 sample_type = data->type;
7350
7351 perf_output_put(handle, *header);
7352
7353 if (sample_type & PERF_SAMPLE_IDENTIFIER)
7354 perf_output_put(handle, data->id);
7355
7356 if (sample_type & PERF_SAMPLE_IP)
7357 perf_output_put(handle, data->ip);
7358
7359 if (sample_type & PERF_SAMPLE_TID)
7360 perf_output_put(handle, data->tid_entry);
7361
7362 if (sample_type & PERF_SAMPLE_TIME)
7363 perf_output_put(handle, data->time);
7364
7365 if (sample_type & PERF_SAMPLE_ADDR)
7366 perf_output_put(handle, data->addr);
7367
7368 if (sample_type & PERF_SAMPLE_ID)
7369 perf_output_put(handle, data->id);
7370
7371 if (sample_type & PERF_SAMPLE_STREAM_ID)
7372 perf_output_put(handle, data->stream_id);
7373
7374 if (sample_type & PERF_SAMPLE_CPU)
7375 perf_output_put(handle, data->cpu_entry);
7376
7377 if (sample_type & PERF_SAMPLE_PERIOD)
7378 perf_output_put(handle, data->period);
7379
7380 if (sample_type & PERF_SAMPLE_READ)
7381 perf_output_read(handle, event);
7382
7383 if (sample_type & PERF_SAMPLE_CALLCHAIN) {
7384 int size = 1;
7385
7386 size += data->callchain->nr;
7387 size *= sizeof(u64);
7388 __output_copy(handle, data->callchain, size);
7389 }
7390
7391 if (sample_type & PERF_SAMPLE_RAW) {
7392 struct perf_raw_record *raw = data->raw;
7393
7394 if (raw) {
7395 struct perf_raw_frag *frag = &raw->frag;
7396
7397 perf_output_put(handle, raw->size);
7398 do {
7399 if (frag->copy) {
7400 __output_custom(handle, frag->copy,
7401 frag->data, frag->size);
7402 } else {
7403 __output_copy(handle, frag->data,
7404 frag->size);
7405 }
7406 if (perf_raw_frag_last(frag))
7407 break;
7408 frag = frag->next;
7409 } while (1);
7410 if (frag->pad)
7411 __output_skip(handle, NULL, frag->pad);
7412 } else {
7413 struct {
7414 u32 size;
7415 u32 data;
7416 } raw = {
7417 .size = sizeof(u32),
7418 .data = 0,
7419 };
7420 perf_output_put(handle, raw);
7421 }
7422 }
7423
7424 if (sample_type & PERF_SAMPLE_BRANCH_STACK) {
7425 if (data->br_stack) {
7426 size_t size;
7427
7428 size = data->br_stack->nr
7429 * sizeof(struct perf_branch_entry);
7430
7431 perf_output_put(handle, data->br_stack->nr);
7432 if (branch_sample_hw_index(event))
7433 perf_output_put(handle, data->br_stack->hw_idx);
7434 perf_output_copy(handle, data->br_stack->entries, size);
7435 } else {
7436 /*
7437 * we always store at least the value of nr
7438 */
7439 u64 nr = 0;
7440 perf_output_put(handle, nr);
7441 }
7442 }
7443
7444 if (sample_type & PERF_SAMPLE_REGS_USER) {
7445 u64 abi = data->regs_user.abi;
7446
7447 /*
7448 * If there are no regs to dump, notice it through
7449 * first u64 being zero (PERF_SAMPLE_REGS_ABI_NONE).
7450 */
7451 perf_output_put(handle, abi);
7452
7453 if (abi) {
7454 u64 mask = event->attr.sample_regs_user;
7455 perf_output_sample_regs(handle,
7456 data->regs_user.regs,
7457 mask);
7458 }
7459 }
7460
7461 if (sample_type & PERF_SAMPLE_STACK_USER) {
7462 perf_output_sample_ustack(handle,
7463 data->stack_user_size,
7464 data->regs_user.regs);
7465 }
7466
7467 if (sample_type & PERF_SAMPLE_WEIGHT_TYPE)
7468 perf_output_put(handle, data->weight.full);
7469
7470 if (sample_type & PERF_SAMPLE_DATA_SRC)
7471 perf_output_put(handle, data->data_src.val);
7472
7473 if (sample_type & PERF_SAMPLE_TRANSACTION)
7474 perf_output_put(handle, data->txn);
7475
7476 if (sample_type & PERF_SAMPLE_REGS_INTR) {
7477 u64 abi = data->regs_intr.abi;
7478 /*
7479 * If there are no regs to dump, notice it through
7480 * first u64 being zero (PERF_SAMPLE_REGS_ABI_NONE).
7481 */
7482 perf_output_put(handle, abi);
7483
7484 if (abi) {
7485 u64 mask = event->attr.sample_regs_intr;
7486
7487 perf_output_sample_regs(handle,
7488 data->regs_intr.regs,
7489 mask);
7490 }
7491 }
7492
7493 if (sample_type & PERF_SAMPLE_PHYS_ADDR)
7494 perf_output_put(handle, data->phys_addr);
7495
7496 if (sample_type & PERF_SAMPLE_CGROUP)
7497 perf_output_put(handle, data->cgroup);
7498
7499 if (sample_type & PERF_SAMPLE_DATA_PAGE_SIZE)
7500 perf_output_put(handle, data->data_page_size);
7501
7502 if (sample_type & PERF_SAMPLE_CODE_PAGE_SIZE)
7503 perf_output_put(handle, data->code_page_size);
7504
7505 if (sample_type & PERF_SAMPLE_AUX) {
7506 perf_output_put(handle, data->aux_size);
7507
7508 if (data->aux_size)
7509 perf_aux_sample_output(event, handle, data);
7510 }
7511
7512 if (!event->attr.watermark) {
7513 int wakeup_events = event->attr.wakeup_events;
7514
7515 if (wakeup_events) {
7516 struct perf_buffer *rb = handle->rb;
7517 int events = local_inc_return(&rb->events);
7518
7519 if (events >= wakeup_events) {
7520 local_sub(wakeup_events, &rb->events);
7521 local_inc(&rb->wakeup);
7522 }
7523 }
7524 }
7525 }
7526
perf_virt_to_phys(u64 virt)7527 static u64 perf_virt_to_phys(u64 virt)
7528 {
7529 u64 phys_addr = 0;
7530
7531 if (!virt)
7532 return 0;
7533
7534 if (virt >= TASK_SIZE) {
7535 /* If it's vmalloc()d memory, leave phys_addr as 0 */
7536 if (virt_addr_valid((void *)(uintptr_t)virt) &&
7537 !(virt >= VMALLOC_START && virt < VMALLOC_END))
7538 phys_addr = (u64)virt_to_phys((void *)(uintptr_t)virt);
7539 } else {
7540 /*
7541 * Walking the pages tables for user address.
7542 * Interrupts are disabled, so it prevents any tear down
7543 * of the page tables.
7544 * Try IRQ-safe get_user_page_fast_only first.
7545 * If failed, leave phys_addr as 0.
7546 */
7547 if (current->mm != NULL) {
7548 struct page *p;
7549
7550 pagefault_disable();
7551 if (get_user_page_fast_only(virt, 0, &p)) {
7552 phys_addr = page_to_phys(p) + virt % PAGE_SIZE;
7553 put_page(p);
7554 }
7555 pagefault_enable();
7556 }
7557 }
7558
7559 return phys_addr;
7560 }
7561
7562 /*
7563 * Return the pagetable size of a given virtual address.
7564 */
perf_get_pgtable_size(struct mm_struct * mm,unsigned long addr)7565 static u64 perf_get_pgtable_size(struct mm_struct *mm, unsigned long addr)
7566 {
7567 u64 size = 0;
7568
7569 #ifdef CONFIG_HAVE_FAST_GUP
7570 pgd_t *pgdp, pgd;
7571 p4d_t *p4dp, p4d;
7572 pud_t *pudp, pud;
7573 pmd_t *pmdp, pmd;
7574 pte_t *ptep, pte;
7575
7576 pgdp = pgd_offset(mm, addr);
7577 pgd = READ_ONCE(*pgdp);
7578 if (pgd_none(pgd))
7579 return 0;
7580
7581 if (pgd_leaf(pgd))
7582 return pgd_leaf_size(pgd);
7583
7584 p4dp = p4d_offset_lockless(pgdp, pgd, addr);
7585 p4d = READ_ONCE(*p4dp);
7586 if (!p4d_present(p4d))
7587 return 0;
7588
7589 if (p4d_leaf(p4d))
7590 return p4d_leaf_size(p4d);
7591
7592 pudp = pud_offset_lockless(p4dp, p4d, addr);
7593 pud = READ_ONCE(*pudp);
7594 if (!pud_present(pud))
7595 return 0;
7596
7597 if (pud_leaf(pud))
7598 return pud_leaf_size(pud);
7599
7600 pmdp = pmd_offset_lockless(pudp, pud, addr);
7601 again:
7602 pmd = pmdp_get_lockless(pmdp);
7603 if (!pmd_present(pmd))
7604 return 0;
7605
7606 if (pmd_leaf(pmd))
7607 return pmd_leaf_size(pmd);
7608
7609 ptep = pte_offset_map(&pmd, addr);
7610 if (!ptep)
7611 goto again;
7612
7613 pte = ptep_get_lockless(ptep);
7614 if (pte_present(pte))
7615 size = pte_leaf_size(pte);
7616 pte_unmap(ptep);
7617 #endif /* CONFIG_HAVE_FAST_GUP */
7618
7619 return size;
7620 }
7621
perf_get_page_size(unsigned long addr)7622 static u64 perf_get_page_size(unsigned long addr)
7623 {
7624 struct mm_struct *mm;
7625 unsigned long flags;
7626 u64 size;
7627
7628 if (!addr)
7629 return 0;
7630
7631 /*
7632 * Software page-table walkers must disable IRQs,
7633 * which prevents any tear down of the page tables.
7634 */
7635 local_irq_save(flags);
7636
7637 mm = current->mm;
7638 if (!mm) {
7639 /*
7640 * For kernel threads and the like, use init_mm so that
7641 * we can find kernel memory.
7642 */
7643 mm = &init_mm;
7644 }
7645
7646 size = perf_get_pgtable_size(mm, addr);
7647
7648 local_irq_restore(flags);
7649
7650 return size;
7651 }
7652
7653 static struct perf_callchain_entry __empty_callchain = { .nr = 0, };
7654
7655 struct perf_callchain_entry *
perf_callchain(struct perf_event * event,struct pt_regs * regs)7656 perf_callchain(struct perf_event *event, struct pt_regs *regs)
7657 {
7658 bool kernel = !event->attr.exclude_callchain_kernel;
7659 bool user = !event->attr.exclude_callchain_user;
7660 /* Disallow cross-task user callchains. */
7661 bool crosstask = event->ctx->task && event->ctx->task != current;
7662 const u32 max_stack = event->attr.sample_max_stack;
7663 struct perf_callchain_entry *callchain;
7664
7665 if (!kernel && !user)
7666 return &__empty_callchain;
7667
7668 callchain = get_perf_callchain(regs, 0, kernel, user,
7669 max_stack, crosstask, true);
7670 return callchain ?: &__empty_callchain;
7671 }
7672
__cond_set(u64 flags,u64 s,u64 d)7673 static __always_inline u64 __cond_set(u64 flags, u64 s, u64 d)
7674 {
7675 return d * !!(flags & s);
7676 }
7677
perf_prepare_sample(struct perf_sample_data * data,struct perf_event * event,struct pt_regs * regs)7678 void perf_prepare_sample(struct perf_sample_data *data,
7679 struct perf_event *event,
7680 struct pt_regs *regs)
7681 {
7682 u64 sample_type = event->attr.sample_type;
7683 u64 filtered_sample_type;
7684
7685 /*
7686 * Add the sample flags that are dependent to others. And clear the
7687 * sample flags that have already been done by the PMU driver.
7688 */
7689 filtered_sample_type = sample_type;
7690 filtered_sample_type |= __cond_set(sample_type, PERF_SAMPLE_CODE_PAGE_SIZE,
7691 PERF_SAMPLE_IP);
7692 filtered_sample_type |= __cond_set(sample_type, PERF_SAMPLE_DATA_PAGE_SIZE |
7693 PERF_SAMPLE_PHYS_ADDR, PERF_SAMPLE_ADDR);
7694 filtered_sample_type |= __cond_set(sample_type, PERF_SAMPLE_STACK_USER,
7695 PERF_SAMPLE_REGS_USER);
7696 filtered_sample_type &= ~data->sample_flags;
7697
7698 if (filtered_sample_type == 0) {
7699 /* Make sure it has the correct data->type for output */
7700 data->type = event->attr.sample_type;
7701 return;
7702 }
7703
7704 __perf_event_header__init_id(data, event, filtered_sample_type);
7705
7706 if (filtered_sample_type & PERF_SAMPLE_IP) {
7707 data->ip = perf_instruction_pointer(regs);
7708 data->sample_flags |= PERF_SAMPLE_IP;
7709 }
7710
7711 if (filtered_sample_type & PERF_SAMPLE_CALLCHAIN)
7712 perf_sample_save_callchain(data, event, regs);
7713
7714 if (filtered_sample_type & PERF_SAMPLE_RAW) {
7715 data->raw = NULL;
7716 data->dyn_size += sizeof(u64);
7717 data->sample_flags |= PERF_SAMPLE_RAW;
7718 }
7719
7720 if (filtered_sample_type & PERF_SAMPLE_BRANCH_STACK) {
7721 data->br_stack = NULL;
7722 data->dyn_size += sizeof(u64);
7723 data->sample_flags |= PERF_SAMPLE_BRANCH_STACK;
7724 }
7725
7726 if (filtered_sample_type & PERF_SAMPLE_REGS_USER)
7727 perf_sample_regs_user(&data->regs_user, regs);
7728
7729 /*
7730 * It cannot use the filtered_sample_type here as REGS_USER can be set
7731 * by STACK_USER (using __cond_set() above) and we don't want to update
7732 * the dyn_size if it's not requested by users.
7733 */
7734 if ((sample_type & ~data->sample_flags) & PERF_SAMPLE_REGS_USER) {
7735 /* regs dump ABI info */
7736 int size = sizeof(u64);
7737
7738 if (data->regs_user.regs) {
7739 u64 mask = event->attr.sample_regs_user;
7740 size += hweight64(mask) * sizeof(u64);
7741 }
7742
7743 data->dyn_size += size;
7744 data->sample_flags |= PERF_SAMPLE_REGS_USER;
7745 }
7746
7747 if (filtered_sample_type & PERF_SAMPLE_STACK_USER) {
7748 /*
7749 * Either we need PERF_SAMPLE_STACK_USER bit to be always
7750 * processed as the last one or have additional check added
7751 * in case new sample type is added, because we could eat
7752 * up the rest of the sample size.
7753 */
7754 u16 stack_size = event->attr.sample_stack_user;
7755 u16 header_size = perf_sample_data_size(data, event);
7756 u16 size = sizeof(u64);
7757
7758 stack_size = perf_sample_ustack_size(stack_size, header_size,
7759 data->regs_user.regs);
7760
7761 /*
7762 * If there is something to dump, add space for the dump
7763 * itself and for the field that tells the dynamic size,
7764 * which is how many have been actually dumped.
7765 */
7766 if (stack_size)
7767 size += sizeof(u64) + stack_size;
7768
7769 data->stack_user_size = stack_size;
7770 data->dyn_size += size;
7771 data->sample_flags |= PERF_SAMPLE_STACK_USER;
7772 }
7773
7774 if (filtered_sample_type & PERF_SAMPLE_WEIGHT_TYPE) {
7775 data->weight.full = 0;
7776 data->sample_flags |= PERF_SAMPLE_WEIGHT_TYPE;
7777 }
7778
7779 if (filtered_sample_type & PERF_SAMPLE_DATA_SRC) {
7780 data->data_src.val = PERF_MEM_NA;
7781 data->sample_flags |= PERF_SAMPLE_DATA_SRC;
7782 }
7783
7784 if (filtered_sample_type & PERF_SAMPLE_TRANSACTION) {
7785 data->txn = 0;
7786 data->sample_flags |= PERF_SAMPLE_TRANSACTION;
7787 }
7788
7789 if (filtered_sample_type & PERF_SAMPLE_ADDR) {
7790 data->addr = 0;
7791 data->sample_flags |= PERF_SAMPLE_ADDR;
7792 }
7793
7794 if (filtered_sample_type & PERF_SAMPLE_REGS_INTR) {
7795 /* regs dump ABI info */
7796 int size = sizeof(u64);
7797
7798 perf_sample_regs_intr(&data->regs_intr, regs);
7799
7800 if (data->regs_intr.regs) {
7801 u64 mask = event->attr.sample_regs_intr;
7802
7803 size += hweight64(mask) * sizeof(u64);
7804 }
7805
7806 data->dyn_size += size;
7807 data->sample_flags |= PERF_SAMPLE_REGS_INTR;
7808 }
7809
7810 if (filtered_sample_type & PERF_SAMPLE_PHYS_ADDR) {
7811 data->phys_addr = perf_virt_to_phys(data->addr);
7812 data->sample_flags |= PERF_SAMPLE_PHYS_ADDR;
7813 }
7814
7815 #ifdef CONFIG_CGROUP_PERF
7816 if (filtered_sample_type & PERF_SAMPLE_CGROUP) {
7817 struct cgroup *cgrp;
7818
7819 /* protected by RCU */
7820 cgrp = task_css_check(current, perf_event_cgrp_id, 1)->cgroup;
7821 data->cgroup = cgroup_id(cgrp);
7822 data->sample_flags |= PERF_SAMPLE_CGROUP;
7823 }
7824 #endif
7825
7826 /*
7827 * PERF_DATA_PAGE_SIZE requires PERF_SAMPLE_ADDR. If the user doesn't
7828 * require PERF_SAMPLE_ADDR, kernel implicitly retrieve the data->addr,
7829 * but the value will not dump to the userspace.
7830 */
7831 if (filtered_sample_type & PERF_SAMPLE_DATA_PAGE_SIZE) {
7832 data->data_page_size = perf_get_page_size(data->addr);
7833 data->sample_flags |= PERF_SAMPLE_DATA_PAGE_SIZE;
7834 }
7835
7836 if (filtered_sample_type & PERF_SAMPLE_CODE_PAGE_SIZE) {
7837 data->code_page_size = perf_get_page_size(data->ip);
7838 data->sample_flags |= PERF_SAMPLE_CODE_PAGE_SIZE;
7839 }
7840
7841 if (filtered_sample_type & PERF_SAMPLE_AUX) {
7842 u64 size;
7843 u16 header_size = perf_sample_data_size(data, event);
7844
7845 header_size += sizeof(u64); /* size */
7846
7847 /*
7848 * Given the 16bit nature of header::size, an AUX sample can
7849 * easily overflow it, what with all the preceding sample bits.
7850 * Make sure this doesn't happen by using up to U16_MAX bytes
7851 * per sample in total (rounded down to 8 byte boundary).
7852 */
7853 size = min_t(size_t, U16_MAX - header_size,
7854 event->attr.aux_sample_size);
7855 size = rounddown(size, 8);
7856 size = perf_prepare_sample_aux(event, data, size);
7857
7858 WARN_ON_ONCE(size + header_size > U16_MAX);
7859 data->dyn_size += size + sizeof(u64); /* size above */
7860 data->sample_flags |= PERF_SAMPLE_AUX;
7861 }
7862 }
7863
perf_prepare_header(struct perf_event_header * header,struct perf_sample_data * data,struct perf_event * event,struct pt_regs * regs)7864 void perf_prepare_header(struct perf_event_header *header,
7865 struct perf_sample_data *data,
7866 struct perf_event *event,
7867 struct pt_regs *regs)
7868 {
7869 header->type = PERF_RECORD_SAMPLE;
7870 header->size = perf_sample_data_size(data, event);
7871 header->misc = perf_misc_flags(regs);
7872
7873 /*
7874 * If you're adding more sample types here, you likely need to do
7875 * something about the overflowing header::size, like repurpose the
7876 * lowest 3 bits of size, which should be always zero at the moment.
7877 * This raises a more important question, do we really need 512k sized
7878 * samples and why, so good argumentation is in order for whatever you
7879 * do here next.
7880 */
7881 WARN_ON_ONCE(header->size & 7);
7882 }
7883
7884 static __always_inline int
__perf_event_output(struct perf_event * event,struct perf_sample_data * data,struct pt_regs * regs,int (* output_begin)(struct perf_output_handle *,struct perf_sample_data *,struct perf_event *,unsigned int))7885 __perf_event_output(struct perf_event *event,
7886 struct perf_sample_data *data,
7887 struct pt_regs *regs,
7888 int (*output_begin)(struct perf_output_handle *,
7889 struct perf_sample_data *,
7890 struct perf_event *,
7891 unsigned int))
7892 {
7893 struct perf_output_handle handle;
7894 struct perf_event_header header;
7895 int err;
7896
7897 /* protect the callchain buffers */
7898 rcu_read_lock();
7899
7900 perf_prepare_sample(data, event, regs);
7901 perf_prepare_header(&header, data, event, regs);
7902
7903 err = output_begin(&handle, data, event, header.size);
7904 if (err)
7905 goto exit;
7906
7907 perf_output_sample(&handle, &header, data, event);
7908
7909 perf_output_end(&handle);
7910
7911 exit:
7912 rcu_read_unlock();
7913 return err;
7914 }
7915
7916 void
perf_event_output_forward(struct perf_event * event,struct perf_sample_data * data,struct pt_regs * regs)7917 perf_event_output_forward(struct perf_event *event,
7918 struct perf_sample_data *data,
7919 struct pt_regs *regs)
7920 {
7921 __perf_event_output(event, data, regs, perf_output_begin_forward);
7922 }
7923
7924 void
perf_event_output_backward(struct perf_event * event,struct perf_sample_data * data,struct pt_regs * regs)7925 perf_event_output_backward(struct perf_event *event,
7926 struct perf_sample_data *data,
7927 struct pt_regs *regs)
7928 {
7929 __perf_event_output(event, data, regs, perf_output_begin_backward);
7930 }
7931
7932 int
perf_event_output(struct perf_event * event,struct perf_sample_data * data,struct pt_regs * regs)7933 perf_event_output(struct perf_event *event,
7934 struct perf_sample_data *data,
7935 struct pt_regs *regs)
7936 {
7937 return __perf_event_output(event, data, regs, perf_output_begin);
7938 }
7939
7940 /*
7941 * read event_id
7942 */
7943
7944 struct perf_read_event {
7945 struct perf_event_header header;
7946
7947 u32 pid;
7948 u32 tid;
7949 };
7950
7951 static void
perf_event_read_event(struct perf_event * event,struct task_struct * task)7952 perf_event_read_event(struct perf_event *event,
7953 struct task_struct *task)
7954 {
7955 struct perf_output_handle handle;
7956 struct perf_sample_data sample;
7957 struct perf_read_event read_event = {
7958 .header = {
7959 .type = PERF_RECORD_READ,
7960 .misc = 0,
7961 .size = sizeof(read_event) + event->read_size,
7962 },
7963 .pid = perf_event_pid(event, task),
7964 .tid = perf_event_tid(event, task),
7965 };
7966 int ret;
7967
7968 perf_event_header__init_id(&read_event.header, &sample, event);
7969 ret = perf_output_begin(&handle, &sample, event, read_event.header.size);
7970 if (ret)
7971 return;
7972
7973 perf_output_put(&handle, read_event);
7974 perf_output_read(&handle, event);
7975 perf_event__output_id_sample(event, &handle, &sample);
7976
7977 perf_output_end(&handle);
7978 }
7979
7980 typedef void (perf_iterate_f)(struct perf_event *event, void *data);
7981
7982 static void
perf_iterate_ctx(struct perf_event_context * ctx,perf_iterate_f output,void * data,bool all)7983 perf_iterate_ctx(struct perf_event_context *ctx,
7984 perf_iterate_f output,
7985 void *data, bool all)
7986 {
7987 struct perf_event *event;
7988
7989 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
7990 if (!all) {
7991 if (event->state < PERF_EVENT_STATE_INACTIVE)
7992 continue;
7993 if (!event_filter_match(event))
7994 continue;
7995 }
7996
7997 output(event, data);
7998 }
7999 }
8000
perf_iterate_sb_cpu(perf_iterate_f output,void * data)8001 static void perf_iterate_sb_cpu(perf_iterate_f output, void *data)
8002 {
8003 struct pmu_event_list *pel = this_cpu_ptr(&pmu_sb_events);
8004 struct perf_event *event;
8005
8006 list_for_each_entry_rcu(event, &pel->list, sb_list) {
8007 /*
8008 * Skip events that are not fully formed yet; ensure that
8009 * if we observe event->ctx, both event and ctx will be
8010 * complete enough. See perf_install_in_context().
8011 */
8012 if (!smp_load_acquire(&event->ctx))
8013 continue;
8014
8015 if (event->state < PERF_EVENT_STATE_INACTIVE)
8016 continue;
8017 if (!event_filter_match(event))
8018 continue;
8019 output(event, data);
8020 }
8021 }
8022
8023 /*
8024 * Iterate all events that need to receive side-band events.
8025 *
8026 * For new callers; ensure that account_pmu_sb_event() includes
8027 * your event, otherwise it might not get delivered.
8028 */
8029 static void
perf_iterate_sb(perf_iterate_f output,void * data,struct perf_event_context * task_ctx)8030 perf_iterate_sb(perf_iterate_f output, void *data,
8031 struct perf_event_context *task_ctx)
8032 {
8033 struct perf_event_context *ctx;
8034
8035 rcu_read_lock();
8036 preempt_disable();
8037
8038 /*
8039 * If we have task_ctx != NULL we only notify the task context itself.
8040 * The task_ctx is set only for EXIT events before releasing task
8041 * context.
8042 */
8043 if (task_ctx) {
8044 perf_iterate_ctx(task_ctx, output, data, false);
8045 goto done;
8046 }
8047
8048 perf_iterate_sb_cpu(output, data);
8049
8050 ctx = rcu_dereference(current->perf_event_ctxp);
8051 if (ctx)
8052 perf_iterate_ctx(ctx, output, data, false);
8053 done:
8054 preempt_enable();
8055 rcu_read_unlock();
8056 }
8057
8058 /*
8059 * Clear all file-based filters at exec, they'll have to be
8060 * re-instated when/if these objects are mmapped again.
8061 */
perf_event_addr_filters_exec(struct perf_event * event,void * data)8062 static void perf_event_addr_filters_exec(struct perf_event *event, void *data)
8063 {
8064 struct perf_addr_filters_head *ifh = perf_event_addr_filters(event);
8065 struct perf_addr_filter *filter;
8066 unsigned int restart = 0, count = 0;
8067 unsigned long flags;
8068
8069 if (!has_addr_filter(event))
8070 return;
8071
8072 raw_spin_lock_irqsave(&ifh->lock, flags);
8073 list_for_each_entry(filter, &ifh->list, entry) {
8074 if (filter->path.dentry) {
8075 event->addr_filter_ranges[count].start = 0;
8076 event->addr_filter_ranges[count].size = 0;
8077 restart++;
8078 }
8079
8080 count++;
8081 }
8082
8083 if (restart)
8084 event->addr_filters_gen++;
8085 raw_spin_unlock_irqrestore(&ifh->lock, flags);
8086
8087 if (restart)
8088 perf_event_stop(event, 1);
8089 }
8090
perf_event_exec(void)8091 void perf_event_exec(void)
8092 {
8093 struct perf_event_context *ctx;
8094
8095 ctx = perf_pin_task_context(current);
8096 if (!ctx)
8097 return;
8098
8099 perf_event_enable_on_exec(ctx);
8100 perf_event_remove_on_exec(ctx);
8101 perf_iterate_ctx(ctx, perf_event_addr_filters_exec, NULL, true);
8102
8103 perf_unpin_context(ctx);
8104 put_ctx(ctx);
8105 }
8106
8107 struct remote_output {
8108 struct perf_buffer *rb;
8109 int err;
8110 };
8111
__perf_event_output_stop(struct perf_event * event,void * data)8112 static void __perf_event_output_stop(struct perf_event *event, void *data)
8113 {
8114 struct perf_event *parent = event->parent;
8115 struct remote_output *ro = data;
8116 struct perf_buffer *rb = ro->rb;
8117 struct stop_event_data sd = {
8118 .event = event,
8119 };
8120
8121 if (!has_aux(event))
8122 return;
8123
8124 if (!parent)
8125 parent = event;
8126
8127 /*
8128 * In case of inheritance, it will be the parent that links to the
8129 * ring-buffer, but it will be the child that's actually using it.
8130 *
8131 * We are using event::rb to determine if the event should be stopped,
8132 * however this may race with ring_buffer_attach() (through set_output),
8133 * which will make us skip the event that actually needs to be stopped.
8134 * So ring_buffer_attach() has to stop an aux event before re-assigning
8135 * its rb pointer.
8136 */
8137 if (rcu_dereference(parent->rb) == rb)
8138 ro->err = __perf_event_stop(&sd);
8139 }
8140
__perf_pmu_output_stop(void * info)8141 static int __perf_pmu_output_stop(void *info)
8142 {
8143 struct perf_event *event = info;
8144 struct perf_cpu_context *cpuctx = this_cpu_ptr(&perf_cpu_context);
8145 struct remote_output ro = {
8146 .rb = event->rb,
8147 };
8148
8149 rcu_read_lock();
8150 perf_iterate_ctx(&cpuctx->ctx, __perf_event_output_stop, &ro, false);
8151 if (cpuctx->task_ctx)
8152 perf_iterate_ctx(cpuctx->task_ctx, __perf_event_output_stop,
8153 &ro, false);
8154 rcu_read_unlock();
8155
8156 return ro.err;
8157 }
8158
perf_pmu_output_stop(struct perf_event * event)8159 static void perf_pmu_output_stop(struct perf_event *event)
8160 {
8161 struct perf_event *iter;
8162 int err, cpu;
8163
8164 restart:
8165 rcu_read_lock();
8166 list_for_each_entry_rcu(iter, &event->rb->event_list, rb_entry) {
8167 /*
8168 * For per-CPU events, we need to make sure that neither they
8169 * nor their children are running; for cpu==-1 events it's
8170 * sufficient to stop the event itself if it's active, since
8171 * it can't have children.
8172 */
8173 cpu = iter->cpu;
8174 if (cpu == -1)
8175 cpu = READ_ONCE(iter->oncpu);
8176
8177 if (cpu == -1)
8178 continue;
8179
8180 err = cpu_function_call(cpu, __perf_pmu_output_stop, event);
8181 if (err == -EAGAIN) {
8182 rcu_read_unlock();
8183 goto restart;
8184 }
8185 }
8186 rcu_read_unlock();
8187 }
8188
8189 /*
8190 * task tracking -- fork/exit
8191 *
8192 * enabled by: attr.comm | attr.mmap | attr.mmap2 | attr.mmap_data | attr.task
8193 */
8194
8195 struct perf_task_event {
8196 struct task_struct *task;
8197 struct perf_event_context *task_ctx;
8198
8199 struct {
8200 struct perf_event_header header;
8201
8202 u32 pid;
8203 u32 ppid;
8204 u32 tid;
8205 u32 ptid;
8206 u64 time;
8207 } event_id;
8208 };
8209
perf_event_task_match(struct perf_event * event)8210 static int perf_event_task_match(struct perf_event *event)
8211 {
8212 return event->attr.comm || event->attr.mmap ||
8213 event->attr.mmap2 || event->attr.mmap_data ||
8214 event->attr.task;
8215 }
8216
perf_event_task_output(struct perf_event * event,void * data)8217 static void perf_event_task_output(struct perf_event *event,
8218 void *data)
8219 {
8220 struct perf_task_event *task_event = data;
8221 struct perf_output_handle handle;
8222 struct perf_sample_data sample;
8223 struct task_struct *task = task_event->task;
8224 int ret, size = task_event->event_id.header.size;
8225
8226 if (!perf_event_task_match(event))
8227 return;
8228
8229 perf_event_header__init_id(&task_event->event_id.header, &sample, event);
8230
8231 ret = perf_output_begin(&handle, &sample, event,
8232 task_event->event_id.header.size);
8233 if (ret)
8234 goto out;
8235
8236 task_event->event_id.pid = perf_event_pid(event, task);
8237 task_event->event_id.tid = perf_event_tid(event, task);
8238
8239 if (task_event->event_id.header.type == PERF_RECORD_EXIT) {
8240 task_event->event_id.ppid = perf_event_pid(event,
8241 task->real_parent);
8242 task_event->event_id.ptid = perf_event_pid(event,
8243 task->real_parent);
8244 } else { /* PERF_RECORD_FORK */
8245 task_event->event_id.ppid = perf_event_pid(event, current);
8246 task_event->event_id.ptid = perf_event_tid(event, current);
8247 }
8248
8249 task_event->event_id.time = perf_event_clock(event);
8250
8251 perf_output_put(&handle, task_event->event_id);
8252
8253 perf_event__output_id_sample(event, &handle, &sample);
8254
8255 perf_output_end(&handle);
8256 out:
8257 task_event->event_id.header.size = size;
8258 }
8259
perf_event_task(struct task_struct * task,struct perf_event_context * task_ctx,int new)8260 static void perf_event_task(struct task_struct *task,
8261 struct perf_event_context *task_ctx,
8262 int new)
8263 {
8264 struct perf_task_event task_event;
8265
8266 if (!atomic_read(&nr_comm_events) &&
8267 !atomic_read(&nr_mmap_events) &&
8268 !atomic_read(&nr_task_events))
8269 return;
8270
8271 task_event = (struct perf_task_event){
8272 .task = task,
8273 .task_ctx = task_ctx,
8274 .event_id = {
8275 .header = {
8276 .type = new ? PERF_RECORD_FORK : PERF_RECORD_EXIT,
8277 .misc = 0,
8278 .size = sizeof(task_event.event_id),
8279 },
8280 /* .pid */
8281 /* .ppid */
8282 /* .tid */
8283 /* .ptid */
8284 /* .time */
8285 },
8286 };
8287
8288 perf_iterate_sb(perf_event_task_output,
8289 &task_event,
8290 task_ctx);
8291 }
8292
perf_event_fork(struct task_struct * task)8293 void perf_event_fork(struct task_struct *task)
8294 {
8295 perf_event_task(task, NULL, 1);
8296 perf_event_namespaces(task);
8297 }
8298
8299 /*
8300 * comm tracking
8301 */
8302
8303 struct perf_comm_event {
8304 struct task_struct *task;
8305 char *comm;
8306 int comm_size;
8307
8308 struct {
8309 struct perf_event_header header;
8310
8311 u32 pid;
8312 u32 tid;
8313 } event_id;
8314 };
8315
perf_event_comm_match(struct perf_event * event)8316 static int perf_event_comm_match(struct perf_event *event)
8317 {
8318 return event->attr.comm;
8319 }
8320
perf_event_comm_output(struct perf_event * event,void * data)8321 static void perf_event_comm_output(struct perf_event *event,
8322 void *data)
8323 {
8324 struct perf_comm_event *comm_event = data;
8325 struct perf_output_handle handle;
8326 struct perf_sample_data sample;
8327 int size = comm_event->event_id.header.size;
8328 int ret;
8329
8330 if (!perf_event_comm_match(event))
8331 return;
8332
8333 perf_event_header__init_id(&comm_event->event_id.header, &sample, event);
8334 ret = perf_output_begin(&handle, &sample, event,
8335 comm_event->event_id.header.size);
8336
8337 if (ret)
8338 goto out;
8339
8340 comm_event->event_id.pid = perf_event_pid(event, comm_event->task);
8341 comm_event->event_id.tid = perf_event_tid(event, comm_event->task);
8342
8343 perf_output_put(&handle, comm_event->event_id);
8344 __output_copy(&handle, comm_event->comm,
8345 comm_event->comm_size);
8346
8347 perf_event__output_id_sample(event, &handle, &sample);
8348
8349 perf_output_end(&handle);
8350 out:
8351 comm_event->event_id.header.size = size;
8352 }
8353
perf_event_comm_event(struct perf_comm_event * comm_event)8354 static void perf_event_comm_event(struct perf_comm_event *comm_event)
8355 {
8356 char comm[TASK_COMM_LEN];
8357 unsigned int size;
8358
8359 memset(comm, 0, sizeof(comm));
8360 strscpy(comm, comm_event->task->comm, sizeof(comm));
8361 size = ALIGN(strlen(comm)+1, sizeof(u64));
8362
8363 comm_event->comm = comm;
8364 comm_event->comm_size = size;
8365
8366 comm_event->event_id.header.size = sizeof(comm_event->event_id) + size;
8367
8368 perf_iterate_sb(perf_event_comm_output,
8369 comm_event,
8370 NULL);
8371 }
8372
perf_event_comm(struct task_struct * task,bool exec)8373 void perf_event_comm(struct task_struct *task, bool exec)
8374 {
8375 struct perf_comm_event comm_event;
8376
8377 if (!atomic_read(&nr_comm_events))
8378 return;
8379
8380 comm_event = (struct perf_comm_event){
8381 .task = task,
8382 /* .comm */
8383 /* .comm_size */
8384 .event_id = {
8385 .header = {
8386 .type = PERF_RECORD_COMM,
8387 .misc = exec ? PERF_RECORD_MISC_COMM_EXEC : 0,
8388 /* .size */
8389 },
8390 /* .pid */
8391 /* .tid */
8392 },
8393 };
8394
8395 perf_event_comm_event(&comm_event);
8396 }
8397
8398 /*
8399 * namespaces tracking
8400 */
8401
8402 struct perf_namespaces_event {
8403 struct task_struct *task;
8404
8405 struct {
8406 struct perf_event_header header;
8407
8408 u32 pid;
8409 u32 tid;
8410 u64 nr_namespaces;
8411 struct perf_ns_link_info link_info[NR_NAMESPACES];
8412 } event_id;
8413 };
8414
perf_event_namespaces_match(struct perf_event * event)8415 static int perf_event_namespaces_match(struct perf_event *event)
8416 {
8417 return event->attr.namespaces;
8418 }
8419
perf_event_namespaces_output(struct perf_event * event,void * data)8420 static void perf_event_namespaces_output(struct perf_event *event,
8421 void *data)
8422 {
8423 struct perf_namespaces_event *namespaces_event = data;
8424 struct perf_output_handle handle;
8425 struct perf_sample_data sample;
8426 u16 header_size = namespaces_event->event_id.header.size;
8427 int ret;
8428
8429 if (!perf_event_namespaces_match(event))
8430 return;
8431
8432 perf_event_header__init_id(&namespaces_event->event_id.header,
8433 &sample, event);
8434 ret = perf_output_begin(&handle, &sample, event,
8435 namespaces_event->event_id.header.size);
8436 if (ret)
8437 goto out;
8438
8439 namespaces_event->event_id.pid = perf_event_pid(event,
8440 namespaces_event->task);
8441 namespaces_event->event_id.tid = perf_event_tid(event,
8442 namespaces_event->task);
8443
8444 perf_output_put(&handle, namespaces_event->event_id);
8445
8446 perf_event__output_id_sample(event, &handle, &sample);
8447
8448 perf_output_end(&handle);
8449 out:
8450 namespaces_event->event_id.header.size = header_size;
8451 }
8452
perf_fill_ns_link_info(struct perf_ns_link_info * ns_link_info,struct task_struct * task,const struct proc_ns_operations * ns_ops)8453 static void perf_fill_ns_link_info(struct perf_ns_link_info *ns_link_info,
8454 struct task_struct *task,
8455 const struct proc_ns_operations *ns_ops)
8456 {
8457 struct path ns_path;
8458 struct inode *ns_inode;
8459 int error;
8460
8461 error = ns_get_path(&ns_path, task, ns_ops);
8462 if (!error) {
8463 ns_inode = ns_path.dentry->d_inode;
8464 ns_link_info->dev = new_encode_dev(ns_inode->i_sb->s_dev);
8465 ns_link_info->ino = ns_inode->i_ino;
8466 path_put(&ns_path);
8467 }
8468 }
8469
perf_event_namespaces(struct task_struct * task)8470 void perf_event_namespaces(struct task_struct *task)
8471 {
8472 struct perf_namespaces_event namespaces_event;
8473 struct perf_ns_link_info *ns_link_info;
8474
8475 if (!atomic_read(&nr_namespaces_events))
8476 return;
8477
8478 namespaces_event = (struct perf_namespaces_event){
8479 .task = task,
8480 .event_id = {
8481 .header = {
8482 .type = PERF_RECORD_NAMESPACES,
8483 .misc = 0,
8484 .size = sizeof(namespaces_event.event_id),
8485 },
8486 /* .pid */
8487 /* .tid */
8488 .nr_namespaces = NR_NAMESPACES,
8489 /* .link_info[NR_NAMESPACES] */
8490 },
8491 };
8492
8493 ns_link_info = namespaces_event.event_id.link_info;
8494
8495 perf_fill_ns_link_info(&ns_link_info[MNT_NS_INDEX],
8496 task, &mntns_operations);
8497
8498 #ifdef CONFIG_USER_NS
8499 perf_fill_ns_link_info(&ns_link_info[USER_NS_INDEX],
8500 task, &userns_operations);
8501 #endif
8502 #ifdef CONFIG_NET_NS
8503 perf_fill_ns_link_info(&ns_link_info[NET_NS_INDEX],
8504 task, &netns_operations);
8505 #endif
8506 #ifdef CONFIG_UTS_NS
8507 perf_fill_ns_link_info(&ns_link_info[UTS_NS_INDEX],
8508 task, &utsns_operations);
8509 #endif
8510 #ifdef CONFIG_IPC_NS
8511 perf_fill_ns_link_info(&ns_link_info[IPC_NS_INDEX],
8512 task, &ipcns_operations);
8513 #endif
8514 #ifdef CONFIG_PID_NS
8515 perf_fill_ns_link_info(&ns_link_info[PID_NS_INDEX],
8516 task, &pidns_operations);
8517 #endif
8518 #ifdef CONFIG_CGROUPS
8519 perf_fill_ns_link_info(&ns_link_info[CGROUP_NS_INDEX],
8520 task, &cgroupns_operations);
8521 #endif
8522
8523 perf_iterate_sb(perf_event_namespaces_output,
8524 &namespaces_event,
8525 NULL);
8526 }
8527
8528 /*
8529 * cgroup tracking
8530 */
8531 #ifdef CONFIG_CGROUP_PERF
8532
8533 struct perf_cgroup_event {
8534 char *path;
8535 int path_size;
8536 struct {
8537 struct perf_event_header header;
8538 u64 id;
8539 char path[];
8540 } event_id;
8541 };
8542
perf_event_cgroup_match(struct perf_event * event)8543 static int perf_event_cgroup_match(struct perf_event *event)
8544 {
8545 return event->attr.cgroup;
8546 }
8547
perf_event_cgroup_output(struct perf_event * event,void * data)8548 static void perf_event_cgroup_output(struct perf_event *event, void *data)
8549 {
8550 struct perf_cgroup_event *cgroup_event = data;
8551 struct perf_output_handle handle;
8552 struct perf_sample_data sample;
8553 u16 header_size = cgroup_event->event_id.header.size;
8554 int ret;
8555
8556 if (!perf_event_cgroup_match(event))
8557 return;
8558
8559 perf_event_header__init_id(&cgroup_event->event_id.header,
8560 &sample, event);
8561 ret = perf_output_begin(&handle, &sample, event,
8562 cgroup_event->event_id.header.size);
8563 if (ret)
8564 goto out;
8565
8566 perf_output_put(&handle, cgroup_event->event_id);
8567 __output_copy(&handle, cgroup_event->path, cgroup_event->path_size);
8568
8569 perf_event__output_id_sample(event, &handle, &sample);
8570
8571 perf_output_end(&handle);
8572 out:
8573 cgroup_event->event_id.header.size = header_size;
8574 }
8575
perf_event_cgroup(struct cgroup * cgrp)8576 static void perf_event_cgroup(struct cgroup *cgrp)
8577 {
8578 struct perf_cgroup_event cgroup_event;
8579 char path_enomem[16] = "//enomem";
8580 char *pathname;
8581 size_t size;
8582
8583 if (!atomic_read(&nr_cgroup_events))
8584 return;
8585
8586 cgroup_event = (struct perf_cgroup_event){
8587 .event_id = {
8588 .header = {
8589 .type = PERF_RECORD_CGROUP,
8590 .misc = 0,
8591 .size = sizeof(cgroup_event.event_id),
8592 },
8593 .id = cgroup_id(cgrp),
8594 },
8595 };
8596
8597 pathname = kmalloc(PATH_MAX, GFP_KERNEL);
8598 if (pathname == NULL) {
8599 cgroup_event.path = path_enomem;
8600 } else {
8601 /* just to be sure to have enough space for alignment */
8602 cgroup_path(cgrp, pathname, PATH_MAX - sizeof(u64));
8603 cgroup_event.path = pathname;
8604 }
8605
8606 /*
8607 * Since our buffer works in 8 byte units we need to align our string
8608 * size to a multiple of 8. However, we must guarantee the tail end is
8609 * zero'd out to avoid leaking random bits to userspace.
8610 */
8611 size = strlen(cgroup_event.path) + 1;
8612 while (!IS_ALIGNED(size, sizeof(u64)))
8613 cgroup_event.path[size++] = '\0';
8614
8615 cgroup_event.event_id.header.size += size;
8616 cgroup_event.path_size = size;
8617
8618 perf_iterate_sb(perf_event_cgroup_output,
8619 &cgroup_event,
8620 NULL);
8621
8622 kfree(pathname);
8623 }
8624
8625 #endif
8626
8627 /*
8628 * mmap tracking
8629 */
8630
8631 struct perf_mmap_event {
8632 struct vm_area_struct *vma;
8633
8634 const char *file_name;
8635 int file_size;
8636 int maj, min;
8637 u64 ino;
8638 u64 ino_generation;
8639 u32 prot, flags;
8640 u8 build_id[BUILD_ID_SIZE_MAX];
8641 u32 build_id_size;
8642
8643 struct {
8644 struct perf_event_header header;
8645
8646 u32 pid;
8647 u32 tid;
8648 u64 start;
8649 u64 len;
8650 u64 pgoff;
8651 } event_id;
8652 };
8653
perf_event_mmap_match(struct perf_event * event,void * data)8654 static int perf_event_mmap_match(struct perf_event *event,
8655 void *data)
8656 {
8657 struct perf_mmap_event *mmap_event = data;
8658 struct vm_area_struct *vma = mmap_event->vma;
8659 int executable = vma->vm_flags & VM_EXEC;
8660
8661 return (!executable && event->attr.mmap_data) ||
8662 (executable && (event->attr.mmap || event->attr.mmap2));
8663 }
8664
perf_event_mmap_output(struct perf_event * event,void * data)8665 static void perf_event_mmap_output(struct perf_event *event,
8666 void *data)
8667 {
8668 struct perf_mmap_event *mmap_event = data;
8669 struct perf_output_handle handle;
8670 struct perf_sample_data sample;
8671 int size = mmap_event->event_id.header.size;
8672 u32 type = mmap_event->event_id.header.type;
8673 bool use_build_id;
8674 int ret;
8675
8676 if (!perf_event_mmap_match(event, data))
8677 return;
8678
8679 if (event->attr.mmap2) {
8680 mmap_event->event_id.header.type = PERF_RECORD_MMAP2;
8681 mmap_event->event_id.header.size += sizeof(mmap_event->maj);
8682 mmap_event->event_id.header.size += sizeof(mmap_event->min);
8683 mmap_event->event_id.header.size += sizeof(mmap_event->ino);
8684 mmap_event->event_id.header.size += sizeof(mmap_event->ino_generation);
8685 mmap_event->event_id.header.size += sizeof(mmap_event->prot);
8686 mmap_event->event_id.header.size += sizeof(mmap_event->flags);
8687 }
8688
8689 perf_event_header__init_id(&mmap_event->event_id.header, &sample, event);
8690 ret = perf_output_begin(&handle, &sample, event,
8691 mmap_event->event_id.header.size);
8692 if (ret)
8693 goto out;
8694
8695 mmap_event->event_id.pid = perf_event_pid(event, current);
8696 mmap_event->event_id.tid = perf_event_tid(event, current);
8697
8698 use_build_id = event->attr.build_id && mmap_event->build_id_size;
8699
8700 if (event->attr.mmap2 && use_build_id)
8701 mmap_event->event_id.header.misc |= PERF_RECORD_MISC_MMAP_BUILD_ID;
8702
8703 perf_output_put(&handle, mmap_event->event_id);
8704
8705 if (event->attr.mmap2) {
8706 if (use_build_id) {
8707 u8 size[4] = { (u8) mmap_event->build_id_size, 0, 0, 0 };
8708
8709 __output_copy(&handle, size, 4);
8710 __output_copy(&handle, mmap_event->build_id, BUILD_ID_SIZE_MAX);
8711 } else {
8712 perf_output_put(&handle, mmap_event->maj);
8713 perf_output_put(&handle, mmap_event->min);
8714 perf_output_put(&handle, mmap_event->ino);
8715 perf_output_put(&handle, mmap_event->ino_generation);
8716 }
8717 perf_output_put(&handle, mmap_event->prot);
8718 perf_output_put(&handle, mmap_event->flags);
8719 }
8720
8721 __output_copy(&handle, mmap_event->file_name,
8722 mmap_event->file_size);
8723
8724 perf_event__output_id_sample(event, &handle, &sample);
8725
8726 perf_output_end(&handle);
8727 out:
8728 mmap_event->event_id.header.size = size;
8729 mmap_event->event_id.header.type = type;
8730 }
8731
perf_event_mmap_event(struct perf_mmap_event * mmap_event)8732 static void perf_event_mmap_event(struct perf_mmap_event *mmap_event)
8733 {
8734 struct vm_area_struct *vma = mmap_event->vma;
8735 struct file *file = vma->vm_file;
8736 int maj = 0, min = 0;
8737 u64 ino = 0, gen = 0;
8738 u32 prot = 0, flags = 0;
8739 unsigned int size;
8740 char tmp[16];
8741 char *buf = NULL;
8742 char *name = NULL;
8743
8744 if (vma->vm_flags & VM_READ)
8745 prot |= PROT_READ;
8746 if (vma->vm_flags & VM_WRITE)
8747 prot |= PROT_WRITE;
8748 if (vma->vm_flags & VM_EXEC)
8749 prot |= PROT_EXEC;
8750
8751 if (vma->vm_flags & VM_MAYSHARE)
8752 flags = MAP_SHARED;
8753 else
8754 flags = MAP_PRIVATE;
8755
8756 if (vma->vm_flags & VM_LOCKED)
8757 flags |= MAP_LOCKED;
8758 if (is_vm_hugetlb_page(vma))
8759 flags |= MAP_HUGETLB;
8760
8761 if (file) {
8762 struct inode *inode;
8763 dev_t dev;
8764
8765 buf = kmalloc(PATH_MAX, GFP_KERNEL);
8766 if (!buf) {
8767 name = "//enomem";
8768 goto cpy_name;
8769 }
8770 /*
8771 * d_path() works from the end of the rb backwards, so we
8772 * need to add enough zero bytes after the string to handle
8773 * the 64bit alignment we do later.
8774 */
8775 name = file_path(file, buf, PATH_MAX - sizeof(u64));
8776 if (IS_ERR(name)) {
8777 name = "//toolong";
8778 goto cpy_name;
8779 }
8780 inode = file_inode(vma->vm_file);
8781 dev = inode->i_sb->s_dev;
8782 ino = inode->i_ino;
8783 gen = inode->i_generation;
8784 maj = MAJOR(dev);
8785 min = MINOR(dev);
8786
8787 goto got_name;
8788 } else {
8789 if (vma->vm_ops && vma->vm_ops->name)
8790 name = (char *) vma->vm_ops->name(vma);
8791 if (!name)
8792 name = (char *)arch_vma_name(vma);
8793 if (!name) {
8794 if (vma_is_initial_heap(vma))
8795 name = "[heap]";
8796 else if (vma_is_initial_stack(vma))
8797 name = "[stack]";
8798 else
8799 name = "//anon";
8800 }
8801 }
8802
8803 cpy_name:
8804 strscpy(tmp, name, sizeof(tmp));
8805 name = tmp;
8806 got_name:
8807 /*
8808 * Since our buffer works in 8 byte units we need to align our string
8809 * size to a multiple of 8. However, we must guarantee the tail end is
8810 * zero'd out to avoid leaking random bits to userspace.
8811 */
8812 size = strlen(name)+1;
8813 while (!IS_ALIGNED(size, sizeof(u64)))
8814 name[size++] = '\0';
8815
8816 mmap_event->file_name = name;
8817 mmap_event->file_size = size;
8818 mmap_event->maj = maj;
8819 mmap_event->min = min;
8820 mmap_event->ino = ino;
8821 mmap_event->ino_generation = gen;
8822 mmap_event->prot = prot;
8823 mmap_event->flags = flags;
8824
8825 if (!(vma->vm_flags & VM_EXEC))
8826 mmap_event->event_id.header.misc |= PERF_RECORD_MISC_MMAP_DATA;
8827
8828 mmap_event->event_id.header.size = sizeof(mmap_event->event_id) + size;
8829
8830 if (atomic_read(&nr_build_id_events))
8831 build_id_parse(vma, mmap_event->build_id, &mmap_event->build_id_size);
8832
8833 perf_iterate_sb(perf_event_mmap_output,
8834 mmap_event,
8835 NULL);
8836
8837 kfree(buf);
8838 }
8839
8840 /*
8841 * Check whether inode and address range match filter criteria.
8842 */
perf_addr_filter_match(struct perf_addr_filter * filter,struct file * file,unsigned long offset,unsigned long size)8843 static bool perf_addr_filter_match(struct perf_addr_filter *filter,
8844 struct file *file, unsigned long offset,
8845 unsigned long size)
8846 {
8847 /* d_inode(NULL) won't be equal to any mapped user-space file */
8848 if (!filter->path.dentry)
8849 return false;
8850
8851 if (d_inode(filter->path.dentry) != file_inode(file))
8852 return false;
8853
8854 if (filter->offset > offset + size)
8855 return false;
8856
8857 if (filter->offset + filter->size < offset)
8858 return false;
8859
8860 return true;
8861 }
8862
perf_addr_filter_vma_adjust(struct perf_addr_filter * filter,struct vm_area_struct * vma,struct perf_addr_filter_range * fr)8863 static bool perf_addr_filter_vma_adjust(struct perf_addr_filter *filter,
8864 struct vm_area_struct *vma,
8865 struct perf_addr_filter_range *fr)
8866 {
8867 unsigned long vma_size = vma->vm_end - vma->vm_start;
8868 unsigned long off = vma->vm_pgoff << PAGE_SHIFT;
8869 struct file *file = vma->vm_file;
8870
8871 if (!perf_addr_filter_match(filter, file, off, vma_size))
8872 return false;
8873
8874 if (filter->offset < off) {
8875 fr->start = vma->vm_start;
8876 fr->size = min(vma_size, filter->size - (off - filter->offset));
8877 } else {
8878 fr->start = vma->vm_start + filter->offset - off;
8879 fr->size = min(vma->vm_end - fr->start, filter->size);
8880 }
8881
8882 return true;
8883 }
8884
__perf_addr_filters_adjust(struct perf_event * event,void * data)8885 static void __perf_addr_filters_adjust(struct perf_event *event, void *data)
8886 {
8887 struct perf_addr_filters_head *ifh = perf_event_addr_filters(event);
8888 struct vm_area_struct *vma = data;
8889 struct perf_addr_filter *filter;
8890 unsigned int restart = 0, count = 0;
8891 unsigned long flags;
8892
8893 if (!has_addr_filter(event))
8894 return;
8895
8896 if (!vma->vm_file)
8897 return;
8898
8899 raw_spin_lock_irqsave(&ifh->lock, flags);
8900 list_for_each_entry(filter, &ifh->list, entry) {
8901 if (perf_addr_filter_vma_adjust(filter, vma,
8902 &event->addr_filter_ranges[count]))
8903 restart++;
8904
8905 count++;
8906 }
8907
8908 if (restart)
8909 event->addr_filters_gen++;
8910 raw_spin_unlock_irqrestore(&ifh->lock, flags);
8911
8912 if (restart)
8913 perf_event_stop(event, 1);
8914 }
8915
8916 /*
8917 * Adjust all task's events' filters to the new vma
8918 */
perf_addr_filters_adjust(struct vm_area_struct * vma)8919 static void perf_addr_filters_adjust(struct vm_area_struct *vma)
8920 {
8921 struct perf_event_context *ctx;
8922
8923 /*
8924 * Data tracing isn't supported yet and as such there is no need
8925 * to keep track of anything that isn't related to executable code:
8926 */
8927 if (!(vma->vm_flags & VM_EXEC))
8928 return;
8929
8930 rcu_read_lock();
8931 ctx = rcu_dereference(current->perf_event_ctxp);
8932 if (ctx)
8933 perf_iterate_ctx(ctx, __perf_addr_filters_adjust, vma, true);
8934 rcu_read_unlock();
8935 }
8936
perf_event_mmap(struct vm_area_struct * vma)8937 void perf_event_mmap(struct vm_area_struct *vma)
8938 {
8939 struct perf_mmap_event mmap_event;
8940
8941 if (!atomic_read(&nr_mmap_events))
8942 return;
8943
8944 mmap_event = (struct perf_mmap_event){
8945 .vma = vma,
8946 /* .file_name */
8947 /* .file_size */
8948 .event_id = {
8949 .header = {
8950 .type = PERF_RECORD_MMAP,
8951 .misc = PERF_RECORD_MISC_USER,
8952 /* .size */
8953 },
8954 /* .pid */
8955 /* .tid */
8956 .start = vma->vm_start,
8957 .len = vma->vm_end - vma->vm_start,
8958 .pgoff = (u64)vma->vm_pgoff << PAGE_SHIFT,
8959 },
8960 /* .maj (attr_mmap2 only) */
8961 /* .min (attr_mmap2 only) */
8962 /* .ino (attr_mmap2 only) */
8963 /* .ino_generation (attr_mmap2 only) */
8964 /* .prot (attr_mmap2 only) */
8965 /* .flags (attr_mmap2 only) */
8966 };
8967
8968 perf_addr_filters_adjust(vma);
8969 perf_event_mmap_event(&mmap_event);
8970 }
8971
perf_event_aux_event(struct perf_event * event,unsigned long head,unsigned long size,u64 flags)8972 void perf_event_aux_event(struct perf_event *event, unsigned long head,
8973 unsigned long size, u64 flags)
8974 {
8975 struct perf_output_handle handle;
8976 struct perf_sample_data sample;
8977 struct perf_aux_event {
8978 struct perf_event_header header;
8979 u64 offset;
8980 u64 size;
8981 u64 flags;
8982 } rec = {
8983 .header = {
8984 .type = PERF_RECORD_AUX,
8985 .misc = 0,
8986 .size = sizeof(rec),
8987 },
8988 .offset = head,
8989 .size = size,
8990 .flags = flags,
8991 };
8992 int ret;
8993
8994 perf_event_header__init_id(&rec.header, &sample, event);
8995 ret = perf_output_begin(&handle, &sample, event, rec.header.size);
8996
8997 if (ret)
8998 return;
8999
9000 perf_output_put(&handle, rec);
9001 perf_event__output_id_sample(event, &handle, &sample);
9002
9003 perf_output_end(&handle);
9004 }
9005
9006 /*
9007 * Lost/dropped samples logging
9008 */
perf_log_lost_samples(struct perf_event * event,u64 lost)9009 void perf_log_lost_samples(struct perf_event *event, u64 lost)
9010 {
9011 struct perf_output_handle handle;
9012 struct perf_sample_data sample;
9013 int ret;
9014
9015 struct {
9016 struct perf_event_header header;
9017 u64 lost;
9018 } lost_samples_event = {
9019 .header = {
9020 .type = PERF_RECORD_LOST_SAMPLES,
9021 .misc = 0,
9022 .size = sizeof(lost_samples_event),
9023 },
9024 .lost = lost,
9025 };
9026
9027 perf_event_header__init_id(&lost_samples_event.header, &sample, event);
9028
9029 ret = perf_output_begin(&handle, &sample, event,
9030 lost_samples_event.header.size);
9031 if (ret)
9032 return;
9033
9034 perf_output_put(&handle, lost_samples_event);
9035 perf_event__output_id_sample(event, &handle, &sample);
9036 perf_output_end(&handle);
9037 }
9038
9039 /*
9040 * context_switch tracking
9041 */
9042
9043 struct perf_switch_event {
9044 struct task_struct *task;
9045 struct task_struct *next_prev;
9046
9047 struct {
9048 struct perf_event_header header;
9049 u32 next_prev_pid;
9050 u32 next_prev_tid;
9051 } event_id;
9052 };
9053
perf_event_switch_match(struct perf_event * event)9054 static int perf_event_switch_match(struct perf_event *event)
9055 {
9056 return event->attr.context_switch;
9057 }
9058
perf_event_switch_output(struct perf_event * event,void * data)9059 static void perf_event_switch_output(struct perf_event *event, void *data)
9060 {
9061 struct perf_switch_event *se = data;
9062 struct perf_output_handle handle;
9063 struct perf_sample_data sample;
9064 int ret;
9065
9066 if (!perf_event_switch_match(event))
9067 return;
9068
9069 /* Only CPU-wide events are allowed to see next/prev pid/tid */
9070 if (event->ctx->task) {
9071 se->event_id.header.type = PERF_RECORD_SWITCH;
9072 se->event_id.header.size = sizeof(se->event_id.header);
9073 } else {
9074 se->event_id.header.type = PERF_RECORD_SWITCH_CPU_WIDE;
9075 se->event_id.header.size = sizeof(se->event_id);
9076 se->event_id.next_prev_pid =
9077 perf_event_pid(event, se->next_prev);
9078 se->event_id.next_prev_tid =
9079 perf_event_tid(event, se->next_prev);
9080 }
9081
9082 perf_event_header__init_id(&se->event_id.header, &sample, event);
9083
9084 ret = perf_output_begin(&handle, &sample, event, se->event_id.header.size);
9085 if (ret)
9086 return;
9087
9088 if (event->ctx->task)
9089 perf_output_put(&handle, se->event_id.header);
9090 else
9091 perf_output_put(&handle, se->event_id);
9092
9093 perf_event__output_id_sample(event, &handle, &sample);
9094
9095 perf_output_end(&handle);
9096 }
9097
perf_event_switch(struct task_struct * task,struct task_struct * next_prev,bool sched_in)9098 static void perf_event_switch(struct task_struct *task,
9099 struct task_struct *next_prev, bool sched_in)
9100 {
9101 struct perf_switch_event switch_event;
9102
9103 /* N.B. caller checks nr_switch_events != 0 */
9104
9105 switch_event = (struct perf_switch_event){
9106 .task = task,
9107 .next_prev = next_prev,
9108 .event_id = {
9109 .header = {
9110 /* .type */
9111 .misc = sched_in ? 0 : PERF_RECORD_MISC_SWITCH_OUT,
9112 /* .size */
9113 },
9114 /* .next_prev_pid */
9115 /* .next_prev_tid */
9116 },
9117 };
9118
9119 if (!sched_in && task->on_rq) {
9120 switch_event.event_id.header.misc |=
9121 PERF_RECORD_MISC_SWITCH_OUT_PREEMPT;
9122 }
9123
9124 perf_iterate_sb(perf_event_switch_output, &switch_event, NULL);
9125 }
9126
9127 /*
9128 * IRQ throttle logging
9129 */
9130
perf_log_throttle(struct perf_event * event,int enable)9131 static void perf_log_throttle(struct perf_event *event, int enable)
9132 {
9133 struct perf_output_handle handle;
9134 struct perf_sample_data sample;
9135 int ret;
9136
9137 struct {
9138 struct perf_event_header header;
9139 u64 time;
9140 u64 id;
9141 u64 stream_id;
9142 } throttle_event = {
9143 .header = {
9144 .type = PERF_RECORD_THROTTLE,
9145 .misc = 0,
9146 .size = sizeof(throttle_event),
9147 },
9148 .time = perf_event_clock(event),
9149 .id = primary_event_id(event),
9150 .stream_id = event->id,
9151 };
9152
9153 if (enable)
9154 throttle_event.header.type = PERF_RECORD_UNTHROTTLE;
9155
9156 perf_event_header__init_id(&throttle_event.header, &sample, event);
9157
9158 ret = perf_output_begin(&handle, &sample, event,
9159 throttle_event.header.size);
9160 if (ret)
9161 return;
9162
9163 perf_output_put(&handle, throttle_event);
9164 perf_event__output_id_sample(event, &handle, &sample);
9165 perf_output_end(&handle);
9166 }
9167
9168 /*
9169 * ksymbol register/unregister tracking
9170 */
9171
9172 struct perf_ksymbol_event {
9173 const char *name;
9174 int name_len;
9175 struct {
9176 struct perf_event_header header;
9177 u64 addr;
9178 u32 len;
9179 u16 ksym_type;
9180 u16 flags;
9181 } event_id;
9182 };
9183
perf_event_ksymbol_match(struct perf_event * event)9184 static int perf_event_ksymbol_match(struct perf_event *event)
9185 {
9186 return event->attr.ksymbol;
9187 }
9188
perf_event_ksymbol_output(struct perf_event * event,void * data)9189 static void perf_event_ksymbol_output(struct perf_event *event, void *data)
9190 {
9191 struct perf_ksymbol_event *ksymbol_event = data;
9192 struct perf_output_handle handle;
9193 struct perf_sample_data sample;
9194 int ret;
9195
9196 if (!perf_event_ksymbol_match(event))
9197 return;
9198
9199 perf_event_header__init_id(&ksymbol_event->event_id.header,
9200 &sample, event);
9201 ret = perf_output_begin(&handle, &sample, event,
9202 ksymbol_event->event_id.header.size);
9203 if (ret)
9204 return;
9205
9206 perf_output_put(&handle, ksymbol_event->event_id);
9207 __output_copy(&handle, ksymbol_event->name, ksymbol_event->name_len);
9208 perf_event__output_id_sample(event, &handle, &sample);
9209
9210 perf_output_end(&handle);
9211 }
9212
perf_event_ksymbol(u16 ksym_type,u64 addr,u32 len,bool unregister,const char * sym)9213 void perf_event_ksymbol(u16 ksym_type, u64 addr, u32 len, bool unregister,
9214 const char *sym)
9215 {
9216 struct perf_ksymbol_event ksymbol_event;
9217 char name[KSYM_NAME_LEN];
9218 u16 flags = 0;
9219 int name_len;
9220
9221 if (!atomic_read(&nr_ksymbol_events))
9222 return;
9223
9224 if (ksym_type >= PERF_RECORD_KSYMBOL_TYPE_MAX ||
9225 ksym_type == PERF_RECORD_KSYMBOL_TYPE_UNKNOWN)
9226 goto err;
9227
9228 strscpy(name, sym, KSYM_NAME_LEN);
9229 name_len = strlen(name) + 1;
9230 while (!IS_ALIGNED(name_len, sizeof(u64)))
9231 name[name_len++] = '\0';
9232 BUILD_BUG_ON(KSYM_NAME_LEN % sizeof(u64));
9233
9234 if (unregister)
9235 flags |= PERF_RECORD_KSYMBOL_FLAGS_UNREGISTER;
9236
9237 ksymbol_event = (struct perf_ksymbol_event){
9238 .name = name,
9239 .name_len = name_len,
9240 .event_id = {
9241 .header = {
9242 .type = PERF_RECORD_KSYMBOL,
9243 .size = sizeof(ksymbol_event.event_id) +
9244 name_len,
9245 },
9246 .addr = addr,
9247 .len = len,
9248 .ksym_type = ksym_type,
9249 .flags = flags,
9250 },
9251 };
9252
9253 perf_iterate_sb(perf_event_ksymbol_output, &ksymbol_event, NULL);
9254 return;
9255 err:
9256 WARN_ONCE(1, "%s: Invalid KSYMBOL type 0x%x\n", __func__, ksym_type);
9257 }
9258
9259 /*
9260 * bpf program load/unload tracking
9261 */
9262
9263 struct perf_bpf_event {
9264 struct bpf_prog *prog;
9265 struct {
9266 struct perf_event_header header;
9267 u16 type;
9268 u16 flags;
9269 u32 id;
9270 u8 tag[BPF_TAG_SIZE];
9271 } event_id;
9272 };
9273
perf_event_bpf_match(struct perf_event * event)9274 static int perf_event_bpf_match(struct perf_event *event)
9275 {
9276 return event->attr.bpf_event;
9277 }
9278
perf_event_bpf_output(struct perf_event * event,void * data)9279 static void perf_event_bpf_output(struct perf_event *event, void *data)
9280 {
9281 struct perf_bpf_event *bpf_event = data;
9282 struct perf_output_handle handle;
9283 struct perf_sample_data sample;
9284 int ret;
9285
9286 if (!perf_event_bpf_match(event))
9287 return;
9288
9289 perf_event_header__init_id(&bpf_event->event_id.header,
9290 &sample, event);
9291 ret = perf_output_begin(&handle, &sample, event,
9292 bpf_event->event_id.header.size);
9293 if (ret)
9294 return;
9295
9296 perf_output_put(&handle, bpf_event->event_id);
9297 perf_event__output_id_sample(event, &handle, &sample);
9298
9299 perf_output_end(&handle);
9300 }
9301
perf_event_bpf_emit_ksymbols(struct bpf_prog * prog,enum perf_bpf_event_type type)9302 static void perf_event_bpf_emit_ksymbols(struct bpf_prog *prog,
9303 enum perf_bpf_event_type type)
9304 {
9305 bool unregister = type == PERF_BPF_EVENT_PROG_UNLOAD;
9306 int i;
9307
9308 perf_event_ksymbol(PERF_RECORD_KSYMBOL_TYPE_BPF,
9309 (u64)(unsigned long)prog->bpf_func,
9310 prog->jited_len, unregister,
9311 prog->aux->ksym.name);
9312
9313 for (i = 1; i < prog->aux->func_cnt; i++) {
9314 struct bpf_prog *subprog = prog->aux->func[i];
9315
9316 perf_event_ksymbol(
9317 PERF_RECORD_KSYMBOL_TYPE_BPF,
9318 (u64)(unsigned long)subprog->bpf_func,
9319 subprog->jited_len, unregister,
9320 subprog->aux->ksym.name);
9321 }
9322 }
9323
perf_event_bpf_event(struct bpf_prog * prog,enum perf_bpf_event_type type,u16 flags)9324 void perf_event_bpf_event(struct bpf_prog *prog,
9325 enum perf_bpf_event_type type,
9326 u16 flags)
9327 {
9328 struct perf_bpf_event bpf_event;
9329
9330 if (type <= PERF_BPF_EVENT_UNKNOWN ||
9331 type >= PERF_BPF_EVENT_MAX)
9332 return;
9333
9334 switch (type) {
9335 case PERF_BPF_EVENT_PROG_LOAD:
9336 case PERF_BPF_EVENT_PROG_UNLOAD:
9337 if (atomic_read(&nr_ksymbol_events))
9338 perf_event_bpf_emit_ksymbols(prog, type);
9339 break;
9340 default:
9341 break;
9342 }
9343
9344 if (!atomic_read(&nr_bpf_events))
9345 return;
9346
9347 bpf_event = (struct perf_bpf_event){
9348 .prog = prog,
9349 .event_id = {
9350 .header = {
9351 .type = PERF_RECORD_BPF_EVENT,
9352 .size = sizeof(bpf_event.event_id),
9353 },
9354 .type = type,
9355 .flags = flags,
9356 .id = prog->aux->id,
9357 },
9358 };
9359
9360 BUILD_BUG_ON(BPF_TAG_SIZE % sizeof(u64));
9361
9362 memcpy(bpf_event.event_id.tag, prog->tag, BPF_TAG_SIZE);
9363 perf_iterate_sb(perf_event_bpf_output, &bpf_event, NULL);
9364 }
9365
9366 struct perf_text_poke_event {
9367 const void *old_bytes;
9368 const void *new_bytes;
9369 size_t pad;
9370 u16 old_len;
9371 u16 new_len;
9372
9373 struct {
9374 struct perf_event_header header;
9375
9376 u64 addr;
9377 } event_id;
9378 };
9379
perf_event_text_poke_match(struct perf_event * event)9380 static int perf_event_text_poke_match(struct perf_event *event)
9381 {
9382 return event->attr.text_poke;
9383 }
9384
perf_event_text_poke_output(struct perf_event * event,void * data)9385 static void perf_event_text_poke_output(struct perf_event *event, void *data)
9386 {
9387 struct perf_text_poke_event *text_poke_event = data;
9388 struct perf_output_handle handle;
9389 struct perf_sample_data sample;
9390 u64 padding = 0;
9391 int ret;
9392
9393 if (!perf_event_text_poke_match(event))
9394 return;
9395
9396 perf_event_header__init_id(&text_poke_event->event_id.header, &sample, event);
9397
9398 ret = perf_output_begin(&handle, &sample, event,
9399 text_poke_event->event_id.header.size);
9400 if (ret)
9401 return;
9402
9403 perf_output_put(&handle, text_poke_event->event_id);
9404 perf_output_put(&handle, text_poke_event->old_len);
9405 perf_output_put(&handle, text_poke_event->new_len);
9406
9407 __output_copy(&handle, text_poke_event->old_bytes, text_poke_event->old_len);
9408 __output_copy(&handle, text_poke_event->new_bytes, text_poke_event->new_len);
9409
9410 if (text_poke_event->pad)
9411 __output_copy(&handle, &padding, text_poke_event->pad);
9412
9413 perf_event__output_id_sample(event, &handle, &sample);
9414
9415 perf_output_end(&handle);
9416 }
9417
perf_event_text_poke(const void * addr,const void * old_bytes,size_t old_len,const void * new_bytes,size_t new_len)9418 void perf_event_text_poke(const void *addr, const void *old_bytes,
9419 size_t old_len, const void *new_bytes, size_t new_len)
9420 {
9421 struct perf_text_poke_event text_poke_event;
9422 size_t tot, pad;
9423
9424 if (!atomic_read(&nr_text_poke_events))
9425 return;
9426
9427 tot = sizeof(text_poke_event.old_len) + old_len;
9428 tot += sizeof(text_poke_event.new_len) + new_len;
9429 pad = ALIGN(tot, sizeof(u64)) - tot;
9430
9431 text_poke_event = (struct perf_text_poke_event){
9432 .old_bytes = old_bytes,
9433 .new_bytes = new_bytes,
9434 .pad = pad,
9435 .old_len = old_len,
9436 .new_len = new_len,
9437 .event_id = {
9438 .header = {
9439 .type = PERF_RECORD_TEXT_POKE,
9440 .misc = PERF_RECORD_MISC_KERNEL,
9441 .size = sizeof(text_poke_event.event_id) + tot + pad,
9442 },
9443 .addr = (unsigned long)addr,
9444 },
9445 };
9446
9447 perf_iterate_sb(perf_event_text_poke_output, &text_poke_event, NULL);
9448 }
9449
perf_event_itrace_started(struct perf_event * event)9450 void perf_event_itrace_started(struct perf_event *event)
9451 {
9452 event->attach_state |= PERF_ATTACH_ITRACE;
9453 }
9454
perf_log_itrace_start(struct perf_event * event)9455 static void perf_log_itrace_start(struct perf_event *event)
9456 {
9457 struct perf_output_handle handle;
9458 struct perf_sample_data sample;
9459 struct perf_aux_event {
9460 struct perf_event_header header;
9461 u32 pid;
9462 u32 tid;
9463 } rec;
9464 int ret;
9465
9466 if (event->parent)
9467 event = event->parent;
9468
9469 if (!(event->pmu->capabilities & PERF_PMU_CAP_ITRACE) ||
9470 event->attach_state & PERF_ATTACH_ITRACE)
9471 return;
9472
9473 rec.header.type = PERF_RECORD_ITRACE_START;
9474 rec.header.misc = 0;
9475 rec.header.size = sizeof(rec);
9476 rec.pid = perf_event_pid(event, current);
9477 rec.tid = perf_event_tid(event, current);
9478
9479 perf_event_header__init_id(&rec.header, &sample, event);
9480 ret = perf_output_begin(&handle, &sample, event, rec.header.size);
9481
9482 if (ret)
9483 return;
9484
9485 perf_output_put(&handle, rec);
9486 perf_event__output_id_sample(event, &handle, &sample);
9487
9488 perf_output_end(&handle);
9489 }
9490
perf_report_aux_output_id(struct perf_event * event,u64 hw_id)9491 void perf_report_aux_output_id(struct perf_event *event, u64 hw_id)
9492 {
9493 struct perf_output_handle handle;
9494 struct perf_sample_data sample;
9495 struct perf_aux_event {
9496 struct perf_event_header header;
9497 u64 hw_id;
9498 } rec;
9499 int ret;
9500
9501 if (event->parent)
9502 event = event->parent;
9503
9504 rec.header.type = PERF_RECORD_AUX_OUTPUT_HW_ID;
9505 rec.header.misc = 0;
9506 rec.header.size = sizeof(rec);
9507 rec.hw_id = hw_id;
9508
9509 perf_event_header__init_id(&rec.header, &sample, event);
9510 ret = perf_output_begin(&handle, &sample, event, rec.header.size);
9511
9512 if (ret)
9513 return;
9514
9515 perf_output_put(&handle, rec);
9516 perf_event__output_id_sample(event, &handle, &sample);
9517
9518 perf_output_end(&handle);
9519 }
9520 EXPORT_SYMBOL_GPL(perf_report_aux_output_id);
9521
9522 static int
__perf_event_account_interrupt(struct perf_event * event,int throttle)9523 __perf_event_account_interrupt(struct perf_event *event, int throttle)
9524 {
9525 struct hw_perf_event *hwc = &event->hw;
9526 int ret = 0;
9527 u64 seq;
9528
9529 seq = __this_cpu_read(perf_throttled_seq);
9530 if (seq != hwc->interrupts_seq) {
9531 hwc->interrupts_seq = seq;
9532 hwc->interrupts = 1;
9533 } else {
9534 hwc->interrupts++;
9535 if (unlikely(throttle &&
9536 hwc->interrupts > max_samples_per_tick)) {
9537 __this_cpu_inc(perf_throttled_count);
9538 tick_dep_set_cpu(smp_processor_id(), TICK_DEP_BIT_PERF_EVENTS);
9539 hwc->interrupts = MAX_INTERRUPTS;
9540 perf_log_throttle(event, 0);
9541 ret = 1;
9542 }
9543 }
9544
9545 if (event->attr.freq) {
9546 u64 now = perf_clock();
9547 s64 delta = now - hwc->freq_time_stamp;
9548
9549 hwc->freq_time_stamp = now;
9550
9551 if (delta > 0 && delta < 2*TICK_NSEC)
9552 perf_adjust_period(event, delta, hwc->last_period, true);
9553 }
9554
9555 return ret;
9556 }
9557
perf_event_account_interrupt(struct perf_event * event)9558 int perf_event_account_interrupt(struct perf_event *event)
9559 {
9560 return __perf_event_account_interrupt(event, 1);
9561 }
9562
sample_is_allowed(struct perf_event * event,struct pt_regs * regs)9563 static inline bool sample_is_allowed(struct perf_event *event, struct pt_regs *regs)
9564 {
9565 /*
9566 * Due to interrupt latency (AKA "skid"), we may enter the
9567 * kernel before taking an overflow, even if the PMU is only
9568 * counting user events.
9569 */
9570 if (event->attr.exclude_kernel && !user_mode(regs))
9571 return false;
9572
9573 return true;
9574 }
9575
9576 /*
9577 * Generic event overflow handling, sampling.
9578 */
9579
__perf_event_overflow(struct perf_event * event,int throttle,struct perf_sample_data * data,struct pt_regs * regs)9580 static int __perf_event_overflow(struct perf_event *event,
9581 int throttle, struct perf_sample_data *data,
9582 struct pt_regs *regs)
9583 {
9584 int events = atomic_read(&event->event_limit);
9585 int ret = 0;
9586
9587 /*
9588 * Non-sampling counters might still use the PMI to fold short
9589 * hardware counters, ignore those.
9590 */
9591 if (unlikely(!is_sampling_event(event)))
9592 return 0;
9593
9594 ret = __perf_event_account_interrupt(event, throttle);
9595
9596 /*
9597 * XXX event_limit might not quite work as expected on inherited
9598 * events
9599 */
9600
9601 event->pending_kill = POLL_IN;
9602 if (events && atomic_dec_and_test(&event->event_limit)) {
9603 ret = 1;
9604 event->pending_kill = POLL_HUP;
9605 perf_event_disable_inatomic(event);
9606 }
9607
9608 if (event->attr.sigtrap) {
9609 /*
9610 * The desired behaviour of sigtrap vs invalid samples is a bit
9611 * tricky; on the one hand, one should not loose the SIGTRAP if
9612 * it is the first event, on the other hand, we should also not
9613 * trigger the WARN or override the data address.
9614 */
9615 bool valid_sample = sample_is_allowed(event, regs);
9616 unsigned int pending_id = 1;
9617
9618 if (regs)
9619 pending_id = hash32_ptr((void *)instruction_pointer(regs)) ?: 1;
9620 if (!event->pending_sigtrap) {
9621 event->pending_sigtrap = pending_id;
9622 local_inc(&event->ctx->nr_pending);
9623 } else if (event->attr.exclude_kernel && valid_sample) {
9624 /*
9625 * Should not be able to return to user space without
9626 * consuming pending_sigtrap; with exceptions:
9627 *
9628 * 1. Where !exclude_kernel, events can overflow again
9629 * in the kernel without returning to user space.
9630 *
9631 * 2. Events that can overflow again before the IRQ-
9632 * work without user space progress (e.g. hrtimer).
9633 * To approximate progress (with false negatives),
9634 * check 32-bit hash of the current IP.
9635 */
9636 WARN_ON_ONCE(event->pending_sigtrap != pending_id);
9637 }
9638
9639 event->pending_addr = 0;
9640 if (valid_sample && (data->sample_flags & PERF_SAMPLE_ADDR))
9641 event->pending_addr = data->addr;
9642 irq_work_queue(&event->pending_irq);
9643 }
9644
9645 READ_ONCE(event->overflow_handler)(event, data, regs);
9646
9647 if (*perf_event_fasync(event) && event->pending_kill) {
9648 event->pending_wakeup = 1;
9649 irq_work_queue(&event->pending_irq);
9650 }
9651
9652 return ret;
9653 }
9654
perf_event_overflow(struct perf_event * event,struct perf_sample_data * data,struct pt_regs * regs)9655 int perf_event_overflow(struct perf_event *event,
9656 struct perf_sample_data *data,
9657 struct pt_regs *regs)
9658 {
9659 return __perf_event_overflow(event, 1, data, regs);
9660 }
9661
9662 /*
9663 * Generic software event infrastructure
9664 */
9665
9666 struct swevent_htable {
9667 struct swevent_hlist *swevent_hlist;
9668 struct mutex hlist_mutex;
9669 int hlist_refcount;
9670
9671 /* Recursion avoidance in each contexts */
9672 int recursion[PERF_NR_CONTEXTS];
9673 };
9674
9675 static DEFINE_PER_CPU(struct swevent_htable, swevent_htable);
9676
9677 /*
9678 * We directly increment event->count and keep a second value in
9679 * event->hw.period_left to count intervals. This period event
9680 * is kept in the range [-sample_period, 0] so that we can use the
9681 * sign as trigger.
9682 */
9683
perf_swevent_set_period(struct perf_event * event)9684 u64 perf_swevent_set_period(struct perf_event *event)
9685 {
9686 struct hw_perf_event *hwc = &event->hw;
9687 u64 period = hwc->last_period;
9688 u64 nr, offset;
9689 s64 old, val;
9690
9691 hwc->last_period = hwc->sample_period;
9692
9693 old = local64_read(&hwc->period_left);
9694 do {
9695 val = old;
9696 if (val < 0)
9697 return 0;
9698
9699 nr = div64_u64(period + val, period);
9700 offset = nr * period;
9701 val -= offset;
9702 } while (!local64_try_cmpxchg(&hwc->period_left, &old, val));
9703
9704 return nr;
9705 }
9706
perf_swevent_overflow(struct perf_event * event,u64 overflow,struct perf_sample_data * data,struct pt_regs * regs)9707 static void perf_swevent_overflow(struct perf_event *event, u64 overflow,
9708 struct perf_sample_data *data,
9709 struct pt_regs *regs)
9710 {
9711 struct hw_perf_event *hwc = &event->hw;
9712 int throttle = 0;
9713
9714 if (!overflow)
9715 overflow = perf_swevent_set_period(event);
9716
9717 if (hwc->interrupts == MAX_INTERRUPTS)
9718 return;
9719
9720 for (; overflow; overflow--) {
9721 if (__perf_event_overflow(event, throttle,
9722 data, regs)) {
9723 /*
9724 * We inhibit the overflow from happening when
9725 * hwc->interrupts == MAX_INTERRUPTS.
9726 */
9727 break;
9728 }
9729 throttle = 1;
9730 }
9731 }
9732
perf_swevent_event(struct perf_event * event,u64 nr,struct perf_sample_data * data,struct pt_regs * regs)9733 static void perf_swevent_event(struct perf_event *event, u64 nr,
9734 struct perf_sample_data *data,
9735 struct pt_regs *regs)
9736 {
9737 struct hw_perf_event *hwc = &event->hw;
9738
9739 local64_add(nr, &event->count);
9740
9741 if (!regs)
9742 return;
9743
9744 if (!is_sampling_event(event))
9745 return;
9746
9747 if ((event->attr.sample_type & PERF_SAMPLE_PERIOD) && !event->attr.freq) {
9748 data->period = nr;
9749 return perf_swevent_overflow(event, 1, data, regs);
9750 } else
9751 data->period = event->hw.last_period;
9752
9753 if (nr == 1 && hwc->sample_period == 1 && !event->attr.freq)
9754 return perf_swevent_overflow(event, 1, data, regs);
9755
9756 if (local64_add_negative(nr, &hwc->period_left))
9757 return;
9758
9759 perf_swevent_overflow(event, 0, data, regs);
9760 }
9761
perf_exclude_event(struct perf_event * event,struct pt_regs * regs)9762 static int perf_exclude_event(struct perf_event *event,
9763 struct pt_regs *regs)
9764 {
9765 if (event->hw.state & PERF_HES_STOPPED)
9766 return 1;
9767
9768 if (regs) {
9769 if (event->attr.exclude_user && user_mode(regs))
9770 return 1;
9771
9772 if (event->attr.exclude_kernel && !user_mode(regs))
9773 return 1;
9774 }
9775
9776 return 0;
9777 }
9778
perf_swevent_match(struct perf_event * event,enum perf_type_id type,u32 event_id,struct perf_sample_data * data,struct pt_regs * regs)9779 static int perf_swevent_match(struct perf_event *event,
9780 enum perf_type_id type,
9781 u32 event_id,
9782 struct perf_sample_data *data,
9783 struct pt_regs *regs)
9784 {
9785 if (event->attr.type != type)
9786 return 0;
9787
9788 if (event->attr.config != event_id)
9789 return 0;
9790
9791 if (perf_exclude_event(event, regs))
9792 return 0;
9793
9794 return 1;
9795 }
9796
swevent_hash(u64 type,u32 event_id)9797 static inline u64 swevent_hash(u64 type, u32 event_id)
9798 {
9799 u64 val = event_id | (type << 32);
9800
9801 return hash_64(val, SWEVENT_HLIST_BITS);
9802 }
9803
9804 static inline struct hlist_head *
__find_swevent_head(struct swevent_hlist * hlist,u64 type,u32 event_id)9805 __find_swevent_head(struct swevent_hlist *hlist, u64 type, u32 event_id)
9806 {
9807 u64 hash = swevent_hash(type, event_id);
9808
9809 return &hlist->heads[hash];
9810 }
9811
9812 /* For the read side: events when they trigger */
9813 static inline struct hlist_head *
find_swevent_head_rcu(struct swevent_htable * swhash,u64 type,u32 event_id)9814 find_swevent_head_rcu(struct swevent_htable *swhash, u64 type, u32 event_id)
9815 {
9816 struct swevent_hlist *hlist;
9817
9818 hlist = rcu_dereference(swhash->swevent_hlist);
9819 if (!hlist)
9820 return NULL;
9821
9822 return __find_swevent_head(hlist, type, event_id);
9823 }
9824
9825 /* For the event head insertion and removal in the hlist */
9826 static inline struct hlist_head *
find_swevent_head(struct swevent_htable * swhash,struct perf_event * event)9827 find_swevent_head(struct swevent_htable *swhash, struct perf_event *event)
9828 {
9829 struct swevent_hlist *hlist;
9830 u32 event_id = event->attr.config;
9831 u64 type = event->attr.type;
9832
9833 /*
9834 * Event scheduling is always serialized against hlist allocation
9835 * and release. Which makes the protected version suitable here.
9836 * The context lock guarantees that.
9837 */
9838 hlist = rcu_dereference_protected(swhash->swevent_hlist,
9839 lockdep_is_held(&event->ctx->lock));
9840 if (!hlist)
9841 return NULL;
9842
9843 return __find_swevent_head(hlist, type, event_id);
9844 }
9845
do_perf_sw_event(enum perf_type_id type,u32 event_id,u64 nr,struct perf_sample_data * data,struct pt_regs * regs)9846 static void do_perf_sw_event(enum perf_type_id type, u32 event_id,
9847 u64 nr,
9848 struct perf_sample_data *data,
9849 struct pt_regs *regs)
9850 {
9851 struct swevent_htable *swhash = this_cpu_ptr(&swevent_htable);
9852 struct perf_event *event;
9853 struct hlist_head *head;
9854
9855 rcu_read_lock();
9856 head = find_swevent_head_rcu(swhash, type, event_id);
9857 if (!head)
9858 goto end;
9859
9860 hlist_for_each_entry_rcu(event, head, hlist_entry) {
9861 if (perf_swevent_match(event, type, event_id, data, regs))
9862 perf_swevent_event(event, nr, data, regs);
9863 }
9864 end:
9865 rcu_read_unlock();
9866 }
9867
9868 DEFINE_PER_CPU(struct pt_regs, __perf_regs[4]);
9869
perf_swevent_get_recursion_context(void)9870 int perf_swevent_get_recursion_context(void)
9871 {
9872 struct swevent_htable *swhash = this_cpu_ptr(&swevent_htable);
9873
9874 return get_recursion_context(swhash->recursion);
9875 }
9876 EXPORT_SYMBOL_GPL(perf_swevent_get_recursion_context);
9877
perf_swevent_put_recursion_context(int rctx)9878 void perf_swevent_put_recursion_context(int rctx)
9879 {
9880 struct swevent_htable *swhash = this_cpu_ptr(&swevent_htable);
9881
9882 put_recursion_context(swhash->recursion, rctx);
9883 }
9884
___perf_sw_event(u32 event_id,u64 nr,struct pt_regs * regs,u64 addr)9885 void ___perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr)
9886 {
9887 struct perf_sample_data data;
9888
9889 if (WARN_ON_ONCE(!regs))
9890 return;
9891
9892 perf_sample_data_init(&data, addr, 0);
9893 do_perf_sw_event(PERF_TYPE_SOFTWARE, event_id, nr, &data, regs);
9894 }
9895
__perf_sw_event(u32 event_id,u64 nr,struct pt_regs * regs,u64 addr)9896 void __perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr)
9897 {
9898 int rctx;
9899
9900 preempt_disable_notrace();
9901 rctx = perf_swevent_get_recursion_context();
9902 if (unlikely(rctx < 0))
9903 goto fail;
9904
9905 ___perf_sw_event(event_id, nr, regs, addr);
9906
9907 perf_swevent_put_recursion_context(rctx);
9908 fail:
9909 preempt_enable_notrace();
9910 }
9911
perf_swevent_read(struct perf_event * event)9912 static void perf_swevent_read(struct perf_event *event)
9913 {
9914 }
9915
perf_swevent_add(struct perf_event * event,int flags)9916 static int perf_swevent_add(struct perf_event *event, int flags)
9917 {
9918 struct swevent_htable *swhash = this_cpu_ptr(&swevent_htable);
9919 struct hw_perf_event *hwc = &event->hw;
9920 struct hlist_head *head;
9921
9922 if (is_sampling_event(event)) {
9923 hwc->last_period = hwc->sample_period;
9924 perf_swevent_set_period(event);
9925 }
9926
9927 hwc->state = !(flags & PERF_EF_START);
9928
9929 head = find_swevent_head(swhash, event);
9930 if (WARN_ON_ONCE(!head))
9931 return -EINVAL;
9932
9933 hlist_add_head_rcu(&event->hlist_entry, head);
9934 perf_event_update_userpage(event);
9935
9936 return 0;
9937 }
9938
perf_swevent_del(struct perf_event * event,int flags)9939 static void perf_swevent_del(struct perf_event *event, int flags)
9940 {
9941 hlist_del_rcu(&event->hlist_entry);
9942 }
9943
perf_swevent_start(struct perf_event * event,int flags)9944 static void perf_swevent_start(struct perf_event *event, int flags)
9945 {
9946 event->hw.state = 0;
9947 }
9948
perf_swevent_stop(struct perf_event * event,int flags)9949 static void perf_swevent_stop(struct perf_event *event, int flags)
9950 {
9951 event->hw.state = PERF_HES_STOPPED;
9952 }
9953
9954 /* Deref the hlist from the update side */
9955 static inline struct swevent_hlist *
swevent_hlist_deref(struct swevent_htable * swhash)9956 swevent_hlist_deref(struct swevent_htable *swhash)
9957 {
9958 return rcu_dereference_protected(swhash->swevent_hlist,
9959 lockdep_is_held(&swhash->hlist_mutex));
9960 }
9961
swevent_hlist_release(struct swevent_htable * swhash)9962 static void swevent_hlist_release(struct swevent_htable *swhash)
9963 {
9964 struct swevent_hlist *hlist = swevent_hlist_deref(swhash);
9965
9966 if (!hlist)
9967 return;
9968
9969 RCU_INIT_POINTER(swhash->swevent_hlist, NULL);
9970 kfree_rcu(hlist, rcu_head);
9971 }
9972
swevent_hlist_put_cpu(int cpu)9973 static void swevent_hlist_put_cpu(int cpu)
9974 {
9975 struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
9976
9977 mutex_lock(&swhash->hlist_mutex);
9978
9979 if (!--swhash->hlist_refcount)
9980 swevent_hlist_release(swhash);
9981
9982 mutex_unlock(&swhash->hlist_mutex);
9983 }
9984
swevent_hlist_put(void)9985 static void swevent_hlist_put(void)
9986 {
9987 int cpu;
9988
9989 for_each_possible_cpu(cpu)
9990 swevent_hlist_put_cpu(cpu);
9991 }
9992
swevent_hlist_get_cpu(int cpu)9993 static int swevent_hlist_get_cpu(int cpu)
9994 {
9995 struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
9996 int err = 0;
9997
9998 mutex_lock(&swhash->hlist_mutex);
9999 if (!swevent_hlist_deref(swhash) &&
10000 cpumask_test_cpu(cpu, perf_online_mask)) {
10001 struct swevent_hlist *hlist;
10002
10003 hlist = kzalloc(sizeof(*hlist), GFP_KERNEL);
10004 if (!hlist) {
10005 err = -ENOMEM;
10006 goto exit;
10007 }
10008 rcu_assign_pointer(swhash->swevent_hlist, hlist);
10009 }
10010 swhash->hlist_refcount++;
10011 exit:
10012 mutex_unlock(&swhash->hlist_mutex);
10013
10014 return err;
10015 }
10016
swevent_hlist_get(void)10017 static int swevent_hlist_get(void)
10018 {
10019 int err, cpu, failed_cpu;
10020
10021 mutex_lock(&pmus_lock);
10022 for_each_possible_cpu(cpu) {
10023 err = swevent_hlist_get_cpu(cpu);
10024 if (err) {
10025 failed_cpu = cpu;
10026 goto fail;
10027 }
10028 }
10029 mutex_unlock(&pmus_lock);
10030 return 0;
10031 fail:
10032 for_each_possible_cpu(cpu) {
10033 if (cpu == failed_cpu)
10034 break;
10035 swevent_hlist_put_cpu(cpu);
10036 }
10037 mutex_unlock(&pmus_lock);
10038 return err;
10039 }
10040
10041 struct static_key perf_swevent_enabled[PERF_COUNT_SW_MAX];
10042
sw_perf_event_destroy(struct perf_event * event)10043 static void sw_perf_event_destroy(struct perf_event *event)
10044 {
10045 u64 event_id = event->attr.config;
10046
10047 WARN_ON(event->parent);
10048
10049 static_key_slow_dec(&perf_swevent_enabled[event_id]);
10050 swevent_hlist_put();
10051 }
10052
10053 static struct pmu perf_cpu_clock; /* fwd declaration */
10054 static struct pmu perf_task_clock;
10055
perf_swevent_init(struct perf_event * event)10056 static int perf_swevent_init(struct perf_event *event)
10057 {
10058 u64 event_id = event->attr.config;
10059
10060 if (event->attr.type != PERF_TYPE_SOFTWARE)
10061 return -ENOENT;
10062
10063 /*
10064 * no branch sampling for software events
10065 */
10066 if (has_branch_stack(event))
10067 return -EOPNOTSUPP;
10068
10069 switch (event_id) {
10070 case PERF_COUNT_SW_CPU_CLOCK:
10071 event->attr.type = perf_cpu_clock.type;
10072 return -ENOENT;
10073 case PERF_COUNT_SW_TASK_CLOCK:
10074 event->attr.type = perf_task_clock.type;
10075 return -ENOENT;
10076
10077 default:
10078 break;
10079 }
10080
10081 if (event_id >= PERF_COUNT_SW_MAX)
10082 return -ENOENT;
10083
10084 if (!event->parent) {
10085 int err;
10086
10087 err = swevent_hlist_get();
10088 if (err)
10089 return err;
10090
10091 static_key_slow_inc(&perf_swevent_enabled[event_id]);
10092 event->destroy = sw_perf_event_destroy;
10093 }
10094
10095 return 0;
10096 }
10097
10098 static struct pmu perf_swevent = {
10099 .task_ctx_nr = perf_sw_context,
10100
10101 .capabilities = PERF_PMU_CAP_NO_NMI,
10102
10103 .event_init = perf_swevent_init,
10104 .add = perf_swevent_add,
10105 .del = perf_swevent_del,
10106 .start = perf_swevent_start,
10107 .stop = perf_swevent_stop,
10108 .read = perf_swevent_read,
10109 };
10110
10111 #ifdef CONFIG_EVENT_TRACING
10112
tp_perf_event_destroy(struct perf_event * event)10113 static void tp_perf_event_destroy(struct perf_event *event)
10114 {
10115 perf_trace_destroy(event);
10116 }
10117
perf_tp_event_init(struct perf_event * event)10118 static int perf_tp_event_init(struct perf_event *event)
10119 {
10120 int err;
10121
10122 if (event->attr.type != PERF_TYPE_TRACEPOINT)
10123 return -ENOENT;
10124
10125 /*
10126 * no branch sampling for tracepoint events
10127 */
10128 if (has_branch_stack(event))
10129 return -EOPNOTSUPP;
10130
10131 err = perf_trace_init(event);
10132 if (err)
10133 return err;
10134
10135 event->destroy = tp_perf_event_destroy;
10136
10137 return 0;
10138 }
10139
10140 static struct pmu perf_tracepoint = {
10141 .task_ctx_nr = perf_sw_context,
10142
10143 .event_init = perf_tp_event_init,
10144 .add = perf_trace_add,
10145 .del = perf_trace_del,
10146 .start = perf_swevent_start,
10147 .stop = perf_swevent_stop,
10148 .read = perf_swevent_read,
10149 };
10150
perf_tp_filter_match(struct perf_event * event,struct perf_sample_data * data)10151 static int perf_tp_filter_match(struct perf_event *event,
10152 struct perf_sample_data *data)
10153 {
10154 void *record = data->raw->frag.data;
10155
10156 /* only top level events have filters set */
10157 if (event->parent)
10158 event = event->parent;
10159
10160 if (likely(!event->filter) || filter_match_preds(event->filter, record))
10161 return 1;
10162 return 0;
10163 }
10164
perf_tp_event_match(struct perf_event * event,struct perf_sample_data * data,struct pt_regs * regs)10165 static int perf_tp_event_match(struct perf_event *event,
10166 struct perf_sample_data *data,
10167 struct pt_regs *regs)
10168 {
10169 if (event->hw.state & PERF_HES_STOPPED)
10170 return 0;
10171 /*
10172 * If exclude_kernel, only trace user-space tracepoints (uprobes)
10173 */
10174 if (event->attr.exclude_kernel && !user_mode(regs))
10175 return 0;
10176
10177 if (!perf_tp_filter_match(event, data))
10178 return 0;
10179
10180 return 1;
10181 }
10182
perf_trace_run_bpf_submit(void * raw_data,int size,int rctx,struct trace_event_call * call,u64 count,struct pt_regs * regs,struct hlist_head * head,struct task_struct * task)10183 void perf_trace_run_bpf_submit(void *raw_data, int size, int rctx,
10184 struct trace_event_call *call, u64 count,
10185 struct pt_regs *regs, struct hlist_head *head,
10186 struct task_struct *task)
10187 {
10188 if (bpf_prog_array_valid(call)) {
10189 *(struct pt_regs **)raw_data = regs;
10190 if (!trace_call_bpf(call, raw_data) || hlist_empty(head)) {
10191 perf_swevent_put_recursion_context(rctx);
10192 return;
10193 }
10194 }
10195 perf_tp_event(call->event.type, count, raw_data, size, regs, head,
10196 rctx, task);
10197 }
10198 EXPORT_SYMBOL_GPL(perf_trace_run_bpf_submit);
10199
__perf_tp_event_target_task(u64 count,void * record,struct pt_regs * regs,struct perf_sample_data * data,struct perf_event * event)10200 static void __perf_tp_event_target_task(u64 count, void *record,
10201 struct pt_regs *regs,
10202 struct perf_sample_data *data,
10203 struct perf_event *event)
10204 {
10205 struct trace_entry *entry = record;
10206
10207 if (event->attr.config != entry->type)
10208 return;
10209 /* Cannot deliver synchronous signal to other task. */
10210 if (event->attr.sigtrap)
10211 return;
10212 if (perf_tp_event_match(event, data, regs))
10213 perf_swevent_event(event, count, data, regs);
10214 }
10215
perf_tp_event_target_task(u64 count,void * record,struct pt_regs * regs,struct perf_sample_data * data,struct perf_event_context * ctx)10216 static void perf_tp_event_target_task(u64 count, void *record,
10217 struct pt_regs *regs,
10218 struct perf_sample_data *data,
10219 struct perf_event_context *ctx)
10220 {
10221 unsigned int cpu = smp_processor_id();
10222 struct pmu *pmu = &perf_tracepoint;
10223 struct perf_event *event, *sibling;
10224
10225 perf_event_groups_for_cpu_pmu(event, &ctx->pinned_groups, cpu, pmu) {
10226 __perf_tp_event_target_task(count, record, regs, data, event);
10227 for_each_sibling_event(sibling, event)
10228 __perf_tp_event_target_task(count, record, regs, data, sibling);
10229 }
10230
10231 perf_event_groups_for_cpu_pmu(event, &ctx->flexible_groups, cpu, pmu) {
10232 __perf_tp_event_target_task(count, record, regs, data, event);
10233 for_each_sibling_event(sibling, event)
10234 __perf_tp_event_target_task(count, record, regs, data, sibling);
10235 }
10236 }
10237
perf_tp_event(u16 event_type,u64 count,void * record,int entry_size,struct pt_regs * regs,struct hlist_head * head,int rctx,struct task_struct * task)10238 void perf_tp_event(u16 event_type, u64 count, void *record, int entry_size,
10239 struct pt_regs *regs, struct hlist_head *head, int rctx,
10240 struct task_struct *task)
10241 {
10242 struct perf_sample_data data;
10243 struct perf_event *event;
10244
10245 struct perf_raw_record raw = {
10246 .frag = {
10247 .size = entry_size,
10248 .data = record,
10249 },
10250 };
10251
10252 perf_sample_data_init(&data, 0, 0);
10253 perf_sample_save_raw_data(&data, &raw);
10254
10255 perf_trace_buf_update(record, event_type);
10256
10257 hlist_for_each_entry_rcu(event, head, hlist_entry) {
10258 if (perf_tp_event_match(event, &data, regs)) {
10259 perf_swevent_event(event, count, &data, regs);
10260
10261 /*
10262 * Here use the same on-stack perf_sample_data,
10263 * some members in data are event-specific and
10264 * need to be re-computed for different sweveents.
10265 * Re-initialize data->sample_flags safely to avoid
10266 * the problem that next event skips preparing data
10267 * because data->sample_flags is set.
10268 */
10269 perf_sample_data_init(&data, 0, 0);
10270 perf_sample_save_raw_data(&data, &raw);
10271 }
10272 }
10273
10274 /*
10275 * If we got specified a target task, also iterate its context and
10276 * deliver this event there too.
10277 */
10278 if (task && task != current) {
10279 struct perf_event_context *ctx;
10280
10281 rcu_read_lock();
10282 ctx = rcu_dereference(task->perf_event_ctxp);
10283 if (!ctx)
10284 goto unlock;
10285
10286 raw_spin_lock(&ctx->lock);
10287 perf_tp_event_target_task(count, record, regs, &data, ctx);
10288 raw_spin_unlock(&ctx->lock);
10289 unlock:
10290 rcu_read_unlock();
10291 }
10292
10293 perf_swevent_put_recursion_context(rctx);
10294 }
10295 EXPORT_SYMBOL_GPL(perf_tp_event);
10296
10297 #if defined(CONFIG_KPROBE_EVENTS) || defined(CONFIG_UPROBE_EVENTS)
10298 /*
10299 * Flags in config, used by dynamic PMU kprobe and uprobe
10300 * The flags should match following PMU_FORMAT_ATTR().
10301 *
10302 * PERF_PROBE_CONFIG_IS_RETPROBE if set, create kretprobe/uretprobe
10303 * if not set, create kprobe/uprobe
10304 *
10305 * The following values specify a reference counter (or semaphore in the
10306 * terminology of tools like dtrace, systemtap, etc.) Userspace Statically
10307 * Defined Tracepoints (USDT). Currently, we use 40 bit for the offset.
10308 *
10309 * PERF_UPROBE_REF_CTR_OFFSET_BITS # of bits in config as th offset
10310 * PERF_UPROBE_REF_CTR_OFFSET_SHIFT # of bits to shift left
10311 */
10312 enum perf_probe_config {
10313 PERF_PROBE_CONFIG_IS_RETPROBE = 1U << 0, /* [k,u]retprobe */
10314 PERF_UPROBE_REF_CTR_OFFSET_BITS = 32,
10315 PERF_UPROBE_REF_CTR_OFFSET_SHIFT = 64 - PERF_UPROBE_REF_CTR_OFFSET_BITS,
10316 };
10317
10318 PMU_FORMAT_ATTR(retprobe, "config:0");
10319 #endif
10320
10321 #ifdef CONFIG_KPROBE_EVENTS
10322 static struct attribute *kprobe_attrs[] = {
10323 &format_attr_retprobe.attr,
10324 NULL,
10325 };
10326
10327 static struct attribute_group kprobe_format_group = {
10328 .name = "format",
10329 .attrs = kprobe_attrs,
10330 };
10331
10332 static const struct attribute_group *kprobe_attr_groups[] = {
10333 &kprobe_format_group,
10334 NULL,
10335 };
10336
10337 static int perf_kprobe_event_init(struct perf_event *event);
10338 static struct pmu perf_kprobe = {
10339 .task_ctx_nr = perf_sw_context,
10340 .event_init = perf_kprobe_event_init,
10341 .add = perf_trace_add,
10342 .del = perf_trace_del,
10343 .start = perf_swevent_start,
10344 .stop = perf_swevent_stop,
10345 .read = perf_swevent_read,
10346 .attr_groups = kprobe_attr_groups,
10347 };
10348
perf_kprobe_event_init(struct perf_event * event)10349 static int perf_kprobe_event_init(struct perf_event *event)
10350 {
10351 int err;
10352 bool is_retprobe;
10353
10354 if (event->attr.type != perf_kprobe.type)
10355 return -ENOENT;
10356
10357 if (!perfmon_capable())
10358 return -EACCES;
10359
10360 /*
10361 * no branch sampling for probe events
10362 */
10363 if (has_branch_stack(event))
10364 return -EOPNOTSUPP;
10365
10366 is_retprobe = event->attr.config & PERF_PROBE_CONFIG_IS_RETPROBE;
10367 err = perf_kprobe_init(event, is_retprobe);
10368 if (err)
10369 return err;
10370
10371 event->destroy = perf_kprobe_destroy;
10372
10373 return 0;
10374 }
10375 #endif /* CONFIG_KPROBE_EVENTS */
10376
10377 #ifdef CONFIG_UPROBE_EVENTS
10378 PMU_FORMAT_ATTR(ref_ctr_offset, "config:32-63");
10379
10380 static struct attribute *uprobe_attrs[] = {
10381 &format_attr_retprobe.attr,
10382 &format_attr_ref_ctr_offset.attr,
10383 NULL,
10384 };
10385
10386 static struct attribute_group uprobe_format_group = {
10387 .name = "format",
10388 .attrs = uprobe_attrs,
10389 };
10390
10391 static const struct attribute_group *uprobe_attr_groups[] = {
10392 &uprobe_format_group,
10393 NULL,
10394 };
10395
10396 static int perf_uprobe_event_init(struct perf_event *event);
10397 static struct pmu perf_uprobe = {
10398 .task_ctx_nr = perf_sw_context,
10399 .event_init = perf_uprobe_event_init,
10400 .add = perf_trace_add,
10401 .del = perf_trace_del,
10402 .start = perf_swevent_start,
10403 .stop = perf_swevent_stop,
10404 .read = perf_swevent_read,
10405 .attr_groups = uprobe_attr_groups,
10406 };
10407
perf_uprobe_event_init(struct perf_event * event)10408 static int perf_uprobe_event_init(struct perf_event *event)
10409 {
10410 int err;
10411 unsigned long ref_ctr_offset;
10412 bool is_retprobe;
10413
10414 if (event->attr.type != perf_uprobe.type)
10415 return -ENOENT;
10416
10417 if (!perfmon_capable())
10418 return -EACCES;
10419
10420 /*
10421 * no branch sampling for probe events
10422 */
10423 if (has_branch_stack(event))
10424 return -EOPNOTSUPP;
10425
10426 is_retprobe = event->attr.config & PERF_PROBE_CONFIG_IS_RETPROBE;
10427 ref_ctr_offset = event->attr.config >> PERF_UPROBE_REF_CTR_OFFSET_SHIFT;
10428 err = perf_uprobe_init(event, ref_ctr_offset, is_retprobe);
10429 if (err)
10430 return err;
10431
10432 event->destroy = perf_uprobe_destroy;
10433
10434 return 0;
10435 }
10436 #endif /* CONFIG_UPROBE_EVENTS */
10437
perf_tp_register(void)10438 static inline void perf_tp_register(void)
10439 {
10440 perf_pmu_register(&perf_tracepoint, "tracepoint", PERF_TYPE_TRACEPOINT);
10441 #ifdef CONFIG_KPROBE_EVENTS
10442 perf_pmu_register(&perf_kprobe, "kprobe", -1);
10443 #endif
10444 #ifdef CONFIG_UPROBE_EVENTS
10445 perf_pmu_register(&perf_uprobe, "uprobe", -1);
10446 #endif
10447 }
10448
perf_event_free_filter(struct perf_event * event)10449 static void perf_event_free_filter(struct perf_event *event)
10450 {
10451 ftrace_profile_free_filter(event);
10452 }
10453
10454 #ifdef CONFIG_BPF_SYSCALL
bpf_overflow_handler(struct perf_event * event,struct perf_sample_data * data,struct pt_regs * regs)10455 static void bpf_overflow_handler(struct perf_event *event,
10456 struct perf_sample_data *data,
10457 struct pt_regs *regs)
10458 {
10459 struct bpf_perf_event_data_kern ctx = {
10460 .data = data,
10461 .event = event,
10462 };
10463 struct bpf_prog *prog;
10464 int ret = 0;
10465
10466 ctx.regs = perf_arch_bpf_user_pt_regs(regs);
10467 if (unlikely(__this_cpu_inc_return(bpf_prog_active) != 1))
10468 goto out;
10469 rcu_read_lock();
10470 prog = READ_ONCE(event->prog);
10471 if (prog) {
10472 perf_prepare_sample(data, event, regs);
10473 ret = bpf_prog_run(prog, &ctx);
10474 }
10475 rcu_read_unlock();
10476 out:
10477 __this_cpu_dec(bpf_prog_active);
10478 if (!ret)
10479 return;
10480
10481 event->orig_overflow_handler(event, data, regs);
10482 }
10483
perf_event_set_bpf_handler(struct perf_event * event,struct bpf_prog * prog,u64 bpf_cookie)10484 static int perf_event_set_bpf_handler(struct perf_event *event,
10485 struct bpf_prog *prog,
10486 u64 bpf_cookie)
10487 {
10488 if (event->overflow_handler_context)
10489 /* hw breakpoint or kernel counter */
10490 return -EINVAL;
10491
10492 if (event->prog)
10493 return -EEXIST;
10494
10495 if (prog->type != BPF_PROG_TYPE_PERF_EVENT)
10496 return -EINVAL;
10497
10498 if (event->attr.precise_ip &&
10499 prog->call_get_stack &&
10500 (!(event->attr.sample_type & PERF_SAMPLE_CALLCHAIN) ||
10501 event->attr.exclude_callchain_kernel ||
10502 event->attr.exclude_callchain_user)) {
10503 /*
10504 * On perf_event with precise_ip, calling bpf_get_stack()
10505 * may trigger unwinder warnings and occasional crashes.
10506 * bpf_get_[stack|stackid] works around this issue by using
10507 * callchain attached to perf_sample_data. If the
10508 * perf_event does not full (kernel and user) callchain
10509 * attached to perf_sample_data, do not allow attaching BPF
10510 * program that calls bpf_get_[stack|stackid].
10511 */
10512 return -EPROTO;
10513 }
10514
10515 event->prog = prog;
10516 event->bpf_cookie = bpf_cookie;
10517 event->orig_overflow_handler = READ_ONCE(event->overflow_handler);
10518 WRITE_ONCE(event->overflow_handler, bpf_overflow_handler);
10519 return 0;
10520 }
10521
perf_event_free_bpf_handler(struct perf_event * event)10522 static void perf_event_free_bpf_handler(struct perf_event *event)
10523 {
10524 struct bpf_prog *prog = event->prog;
10525
10526 if (!prog)
10527 return;
10528
10529 WRITE_ONCE(event->overflow_handler, event->orig_overflow_handler);
10530 event->prog = NULL;
10531 bpf_prog_put(prog);
10532 }
10533 #else
perf_event_set_bpf_handler(struct perf_event * event,struct bpf_prog * prog,u64 bpf_cookie)10534 static int perf_event_set_bpf_handler(struct perf_event *event,
10535 struct bpf_prog *prog,
10536 u64 bpf_cookie)
10537 {
10538 return -EOPNOTSUPP;
10539 }
perf_event_free_bpf_handler(struct perf_event * event)10540 static void perf_event_free_bpf_handler(struct perf_event *event)
10541 {
10542 }
10543 #endif
10544
10545 /*
10546 * returns true if the event is a tracepoint, or a kprobe/upprobe created
10547 * with perf_event_open()
10548 */
perf_event_is_tracing(struct perf_event * event)10549 static inline bool perf_event_is_tracing(struct perf_event *event)
10550 {
10551 if (event->pmu == &perf_tracepoint)
10552 return true;
10553 #ifdef CONFIG_KPROBE_EVENTS
10554 if (event->pmu == &perf_kprobe)
10555 return true;
10556 #endif
10557 #ifdef CONFIG_UPROBE_EVENTS
10558 if (event->pmu == &perf_uprobe)
10559 return true;
10560 #endif
10561 return false;
10562 }
10563
perf_event_set_bpf_prog(struct perf_event * event,struct bpf_prog * prog,u64 bpf_cookie)10564 int perf_event_set_bpf_prog(struct perf_event *event, struct bpf_prog *prog,
10565 u64 bpf_cookie)
10566 {
10567 bool is_kprobe, is_uprobe, is_tracepoint, is_syscall_tp;
10568
10569 if (!perf_event_is_tracing(event))
10570 return perf_event_set_bpf_handler(event, prog, bpf_cookie);
10571
10572 is_kprobe = event->tp_event->flags & TRACE_EVENT_FL_KPROBE;
10573 is_uprobe = event->tp_event->flags & TRACE_EVENT_FL_UPROBE;
10574 is_tracepoint = event->tp_event->flags & TRACE_EVENT_FL_TRACEPOINT;
10575 is_syscall_tp = is_syscall_trace_event(event->tp_event);
10576 if (!is_kprobe && !is_uprobe && !is_tracepoint && !is_syscall_tp)
10577 /* bpf programs can only be attached to u/kprobe or tracepoint */
10578 return -EINVAL;
10579
10580 if (((is_kprobe || is_uprobe) && prog->type != BPF_PROG_TYPE_KPROBE) ||
10581 (is_tracepoint && prog->type != BPF_PROG_TYPE_TRACEPOINT) ||
10582 (is_syscall_tp && prog->type != BPF_PROG_TYPE_TRACEPOINT))
10583 return -EINVAL;
10584
10585 if (prog->type == BPF_PROG_TYPE_KPROBE && prog->aux->sleepable && !is_uprobe)
10586 /* only uprobe programs are allowed to be sleepable */
10587 return -EINVAL;
10588
10589 /* Kprobe override only works for kprobes, not uprobes. */
10590 if (prog->kprobe_override && !is_kprobe)
10591 return -EINVAL;
10592
10593 if (is_tracepoint || is_syscall_tp) {
10594 int off = trace_event_get_offsets(event->tp_event);
10595
10596 if (prog->aux->max_ctx_offset > off)
10597 return -EACCES;
10598 }
10599
10600 return perf_event_attach_bpf_prog(event, prog, bpf_cookie);
10601 }
10602
perf_event_free_bpf_prog(struct perf_event * event)10603 void perf_event_free_bpf_prog(struct perf_event *event)
10604 {
10605 if (!perf_event_is_tracing(event)) {
10606 perf_event_free_bpf_handler(event);
10607 return;
10608 }
10609 perf_event_detach_bpf_prog(event);
10610 }
10611
10612 #else
10613
perf_tp_register(void)10614 static inline void perf_tp_register(void)
10615 {
10616 }
10617
perf_event_free_filter(struct perf_event * event)10618 static void perf_event_free_filter(struct perf_event *event)
10619 {
10620 }
10621
perf_event_set_bpf_prog(struct perf_event * event,struct bpf_prog * prog,u64 bpf_cookie)10622 int perf_event_set_bpf_prog(struct perf_event *event, struct bpf_prog *prog,
10623 u64 bpf_cookie)
10624 {
10625 return -ENOENT;
10626 }
10627
perf_event_free_bpf_prog(struct perf_event * event)10628 void perf_event_free_bpf_prog(struct perf_event *event)
10629 {
10630 }
10631 #endif /* CONFIG_EVENT_TRACING */
10632
10633 #ifdef CONFIG_HAVE_HW_BREAKPOINT
perf_bp_event(struct perf_event * bp,void * data)10634 void perf_bp_event(struct perf_event *bp, void *data)
10635 {
10636 struct perf_sample_data sample;
10637 struct pt_regs *regs = data;
10638
10639 perf_sample_data_init(&sample, bp->attr.bp_addr, 0);
10640
10641 if (!bp->hw.state && !perf_exclude_event(bp, regs))
10642 perf_swevent_event(bp, 1, &sample, regs);
10643 }
10644 #endif
10645
10646 /*
10647 * Allocate a new address filter
10648 */
10649 static struct perf_addr_filter *
perf_addr_filter_new(struct perf_event * event,struct list_head * filters)10650 perf_addr_filter_new(struct perf_event *event, struct list_head *filters)
10651 {
10652 int node = cpu_to_node(event->cpu == -1 ? 0 : event->cpu);
10653 struct perf_addr_filter *filter;
10654
10655 filter = kzalloc_node(sizeof(*filter), GFP_KERNEL, node);
10656 if (!filter)
10657 return NULL;
10658
10659 INIT_LIST_HEAD(&filter->entry);
10660 list_add_tail(&filter->entry, filters);
10661
10662 return filter;
10663 }
10664
free_filters_list(struct list_head * filters)10665 static void free_filters_list(struct list_head *filters)
10666 {
10667 struct perf_addr_filter *filter, *iter;
10668
10669 list_for_each_entry_safe(filter, iter, filters, entry) {
10670 path_put(&filter->path);
10671 list_del(&filter->entry);
10672 kfree(filter);
10673 }
10674 }
10675
10676 /*
10677 * Free existing address filters and optionally install new ones
10678 */
perf_addr_filters_splice(struct perf_event * event,struct list_head * head)10679 static void perf_addr_filters_splice(struct perf_event *event,
10680 struct list_head *head)
10681 {
10682 unsigned long flags;
10683 LIST_HEAD(list);
10684
10685 if (!has_addr_filter(event))
10686 return;
10687
10688 /* don't bother with children, they don't have their own filters */
10689 if (event->parent)
10690 return;
10691
10692 raw_spin_lock_irqsave(&event->addr_filters.lock, flags);
10693
10694 list_splice_init(&event->addr_filters.list, &list);
10695 if (head)
10696 list_splice(head, &event->addr_filters.list);
10697
10698 raw_spin_unlock_irqrestore(&event->addr_filters.lock, flags);
10699
10700 free_filters_list(&list);
10701 }
10702
10703 /*
10704 * Scan through mm's vmas and see if one of them matches the
10705 * @filter; if so, adjust filter's address range.
10706 * Called with mm::mmap_lock down for reading.
10707 */
perf_addr_filter_apply(struct perf_addr_filter * filter,struct mm_struct * mm,struct perf_addr_filter_range * fr)10708 static void perf_addr_filter_apply(struct perf_addr_filter *filter,
10709 struct mm_struct *mm,
10710 struct perf_addr_filter_range *fr)
10711 {
10712 struct vm_area_struct *vma;
10713 VMA_ITERATOR(vmi, mm, 0);
10714
10715 for_each_vma(vmi, vma) {
10716 if (!vma->vm_file)
10717 continue;
10718
10719 if (perf_addr_filter_vma_adjust(filter, vma, fr))
10720 return;
10721 }
10722 }
10723
10724 /*
10725 * Update event's address range filters based on the
10726 * task's existing mappings, if any.
10727 */
perf_event_addr_filters_apply(struct perf_event * event)10728 static void perf_event_addr_filters_apply(struct perf_event *event)
10729 {
10730 struct perf_addr_filters_head *ifh = perf_event_addr_filters(event);
10731 struct task_struct *task = READ_ONCE(event->ctx->task);
10732 struct perf_addr_filter *filter;
10733 struct mm_struct *mm = NULL;
10734 unsigned int count = 0;
10735 unsigned long flags;
10736
10737 /*
10738 * We may observe TASK_TOMBSTONE, which means that the event tear-down
10739 * will stop on the parent's child_mutex that our caller is also holding
10740 */
10741 if (task == TASK_TOMBSTONE)
10742 return;
10743
10744 if (ifh->nr_file_filters) {
10745 mm = get_task_mm(task);
10746 if (!mm)
10747 goto restart;
10748
10749 mmap_read_lock(mm);
10750 }
10751
10752 raw_spin_lock_irqsave(&ifh->lock, flags);
10753 list_for_each_entry(filter, &ifh->list, entry) {
10754 if (filter->path.dentry) {
10755 /*
10756 * Adjust base offset if the filter is associated to a
10757 * binary that needs to be mapped:
10758 */
10759 event->addr_filter_ranges[count].start = 0;
10760 event->addr_filter_ranges[count].size = 0;
10761
10762 perf_addr_filter_apply(filter, mm, &event->addr_filter_ranges[count]);
10763 } else {
10764 event->addr_filter_ranges[count].start = filter->offset;
10765 event->addr_filter_ranges[count].size = filter->size;
10766 }
10767
10768 count++;
10769 }
10770
10771 event->addr_filters_gen++;
10772 raw_spin_unlock_irqrestore(&ifh->lock, flags);
10773
10774 if (ifh->nr_file_filters) {
10775 mmap_read_unlock(mm);
10776
10777 mmput(mm);
10778 }
10779
10780 restart:
10781 perf_event_stop(event, 1);
10782 }
10783
10784 /*
10785 * Address range filtering: limiting the data to certain
10786 * instruction address ranges. Filters are ioctl()ed to us from
10787 * userspace as ascii strings.
10788 *
10789 * Filter string format:
10790 *
10791 * ACTION RANGE_SPEC
10792 * where ACTION is one of the
10793 * * "filter": limit the trace to this region
10794 * * "start": start tracing from this address
10795 * * "stop": stop tracing at this address/region;
10796 * RANGE_SPEC is
10797 * * for kernel addresses: <start address>[/<size>]
10798 * * for object files: <start address>[/<size>]@</path/to/object/file>
10799 *
10800 * if <size> is not specified or is zero, the range is treated as a single
10801 * address; not valid for ACTION=="filter".
10802 */
10803 enum {
10804 IF_ACT_NONE = -1,
10805 IF_ACT_FILTER,
10806 IF_ACT_START,
10807 IF_ACT_STOP,
10808 IF_SRC_FILE,
10809 IF_SRC_KERNEL,
10810 IF_SRC_FILEADDR,
10811 IF_SRC_KERNELADDR,
10812 };
10813
10814 enum {
10815 IF_STATE_ACTION = 0,
10816 IF_STATE_SOURCE,
10817 IF_STATE_END,
10818 };
10819
10820 static const match_table_t if_tokens = {
10821 { IF_ACT_FILTER, "filter" },
10822 { IF_ACT_START, "start" },
10823 { IF_ACT_STOP, "stop" },
10824 { IF_SRC_FILE, "%u/%u@%s" },
10825 { IF_SRC_KERNEL, "%u/%u" },
10826 { IF_SRC_FILEADDR, "%u@%s" },
10827 { IF_SRC_KERNELADDR, "%u" },
10828 { IF_ACT_NONE, NULL },
10829 };
10830
10831 /*
10832 * Address filter string parser
10833 */
10834 static int
perf_event_parse_addr_filter(struct perf_event * event,char * fstr,struct list_head * filters)10835 perf_event_parse_addr_filter(struct perf_event *event, char *fstr,
10836 struct list_head *filters)
10837 {
10838 struct perf_addr_filter *filter = NULL;
10839 char *start, *orig, *filename = NULL;
10840 substring_t args[MAX_OPT_ARGS];
10841 int state = IF_STATE_ACTION, token;
10842 unsigned int kernel = 0;
10843 int ret = -EINVAL;
10844
10845 orig = fstr = kstrdup(fstr, GFP_KERNEL);
10846 if (!fstr)
10847 return -ENOMEM;
10848
10849 while ((start = strsep(&fstr, " ,\n")) != NULL) {
10850 static const enum perf_addr_filter_action_t actions[] = {
10851 [IF_ACT_FILTER] = PERF_ADDR_FILTER_ACTION_FILTER,
10852 [IF_ACT_START] = PERF_ADDR_FILTER_ACTION_START,
10853 [IF_ACT_STOP] = PERF_ADDR_FILTER_ACTION_STOP,
10854 };
10855 ret = -EINVAL;
10856
10857 if (!*start)
10858 continue;
10859
10860 /* filter definition begins */
10861 if (state == IF_STATE_ACTION) {
10862 filter = perf_addr_filter_new(event, filters);
10863 if (!filter)
10864 goto fail;
10865 }
10866
10867 token = match_token(start, if_tokens, args);
10868 switch (token) {
10869 case IF_ACT_FILTER:
10870 case IF_ACT_START:
10871 case IF_ACT_STOP:
10872 if (state != IF_STATE_ACTION)
10873 goto fail;
10874
10875 filter->action = actions[token];
10876 state = IF_STATE_SOURCE;
10877 break;
10878
10879 case IF_SRC_KERNELADDR:
10880 case IF_SRC_KERNEL:
10881 kernel = 1;
10882 fallthrough;
10883
10884 case IF_SRC_FILEADDR:
10885 case IF_SRC_FILE:
10886 if (state != IF_STATE_SOURCE)
10887 goto fail;
10888
10889 *args[0].to = 0;
10890 ret = kstrtoul(args[0].from, 0, &filter->offset);
10891 if (ret)
10892 goto fail;
10893
10894 if (token == IF_SRC_KERNEL || token == IF_SRC_FILE) {
10895 *args[1].to = 0;
10896 ret = kstrtoul(args[1].from, 0, &filter->size);
10897 if (ret)
10898 goto fail;
10899 }
10900
10901 if (token == IF_SRC_FILE || token == IF_SRC_FILEADDR) {
10902 int fpos = token == IF_SRC_FILE ? 2 : 1;
10903
10904 kfree(filename);
10905 filename = match_strdup(&args[fpos]);
10906 if (!filename) {
10907 ret = -ENOMEM;
10908 goto fail;
10909 }
10910 }
10911
10912 state = IF_STATE_END;
10913 break;
10914
10915 default:
10916 goto fail;
10917 }
10918
10919 /*
10920 * Filter definition is fully parsed, validate and install it.
10921 * Make sure that it doesn't contradict itself or the event's
10922 * attribute.
10923 */
10924 if (state == IF_STATE_END) {
10925 ret = -EINVAL;
10926
10927 /*
10928 * ACTION "filter" must have a non-zero length region
10929 * specified.
10930 */
10931 if (filter->action == PERF_ADDR_FILTER_ACTION_FILTER &&
10932 !filter->size)
10933 goto fail;
10934
10935 if (!kernel) {
10936 if (!filename)
10937 goto fail;
10938
10939 /*
10940 * For now, we only support file-based filters
10941 * in per-task events; doing so for CPU-wide
10942 * events requires additional context switching
10943 * trickery, since same object code will be
10944 * mapped at different virtual addresses in
10945 * different processes.
10946 */
10947 ret = -EOPNOTSUPP;
10948 if (!event->ctx->task)
10949 goto fail;
10950
10951 /* look up the path and grab its inode */
10952 ret = kern_path(filename, LOOKUP_FOLLOW,
10953 &filter->path);
10954 if (ret)
10955 goto fail;
10956
10957 ret = -EINVAL;
10958 if (!filter->path.dentry ||
10959 !S_ISREG(d_inode(filter->path.dentry)
10960 ->i_mode))
10961 goto fail;
10962
10963 event->addr_filters.nr_file_filters++;
10964 }
10965
10966 /* ready to consume more filters */
10967 kfree(filename);
10968 filename = NULL;
10969 state = IF_STATE_ACTION;
10970 filter = NULL;
10971 kernel = 0;
10972 }
10973 }
10974
10975 if (state != IF_STATE_ACTION)
10976 goto fail;
10977
10978 kfree(filename);
10979 kfree(orig);
10980
10981 return 0;
10982
10983 fail:
10984 kfree(filename);
10985 free_filters_list(filters);
10986 kfree(orig);
10987
10988 return ret;
10989 }
10990
10991 static int
perf_event_set_addr_filter(struct perf_event * event,char * filter_str)10992 perf_event_set_addr_filter(struct perf_event *event, char *filter_str)
10993 {
10994 LIST_HEAD(filters);
10995 int ret;
10996
10997 /*
10998 * Since this is called in perf_ioctl() path, we're already holding
10999 * ctx::mutex.
11000 */
11001 lockdep_assert_held(&event->ctx->mutex);
11002
11003 if (WARN_ON_ONCE(event->parent))
11004 return -EINVAL;
11005
11006 ret = perf_event_parse_addr_filter(event, filter_str, &filters);
11007 if (ret)
11008 goto fail_clear_files;
11009
11010 ret = event->pmu->addr_filters_validate(&filters);
11011 if (ret)
11012 goto fail_free_filters;
11013
11014 /* remove existing filters, if any */
11015 perf_addr_filters_splice(event, &filters);
11016
11017 /* install new filters */
11018 perf_event_for_each_child(event, perf_event_addr_filters_apply);
11019
11020 return ret;
11021
11022 fail_free_filters:
11023 free_filters_list(&filters);
11024
11025 fail_clear_files:
11026 event->addr_filters.nr_file_filters = 0;
11027
11028 return ret;
11029 }
11030
perf_event_set_filter(struct perf_event * event,void __user * arg)11031 static int perf_event_set_filter(struct perf_event *event, void __user *arg)
11032 {
11033 int ret = -EINVAL;
11034 char *filter_str;
11035
11036 filter_str = strndup_user(arg, PAGE_SIZE);
11037 if (IS_ERR(filter_str))
11038 return PTR_ERR(filter_str);
11039
11040 #ifdef CONFIG_EVENT_TRACING
11041 if (perf_event_is_tracing(event)) {
11042 struct perf_event_context *ctx = event->ctx;
11043
11044 /*
11045 * Beware, here be dragons!!
11046 *
11047 * the tracepoint muck will deadlock against ctx->mutex, but
11048 * the tracepoint stuff does not actually need it. So
11049 * temporarily drop ctx->mutex. As per perf_event_ctx_lock() we
11050 * already have a reference on ctx.
11051 *
11052 * This can result in event getting moved to a different ctx,
11053 * but that does not affect the tracepoint state.
11054 */
11055 mutex_unlock(&ctx->mutex);
11056 ret = ftrace_profile_set_filter(event, event->attr.config, filter_str);
11057 mutex_lock(&ctx->mutex);
11058 } else
11059 #endif
11060 if (has_addr_filter(event))
11061 ret = perf_event_set_addr_filter(event, filter_str);
11062
11063 kfree(filter_str);
11064 return ret;
11065 }
11066
11067 /*
11068 * hrtimer based swevent callback
11069 */
11070
perf_swevent_hrtimer(struct hrtimer * hrtimer)11071 static enum hrtimer_restart perf_swevent_hrtimer(struct hrtimer *hrtimer)
11072 {
11073 enum hrtimer_restart ret = HRTIMER_RESTART;
11074 struct perf_sample_data data;
11075 struct pt_regs *regs;
11076 struct perf_event *event;
11077 u64 period;
11078
11079 event = container_of(hrtimer, struct perf_event, hw.hrtimer);
11080
11081 if (event->state != PERF_EVENT_STATE_ACTIVE)
11082 return HRTIMER_NORESTART;
11083
11084 event->pmu->read(event);
11085
11086 perf_sample_data_init(&data, 0, event->hw.last_period);
11087 regs = get_irq_regs();
11088
11089 if (regs && !perf_exclude_event(event, regs)) {
11090 if (!(event->attr.exclude_idle && is_idle_task(current)))
11091 if (__perf_event_overflow(event, 1, &data, regs))
11092 ret = HRTIMER_NORESTART;
11093 }
11094
11095 period = max_t(u64, 10000, event->hw.sample_period);
11096 hrtimer_forward_now(hrtimer, ns_to_ktime(period));
11097
11098 return ret;
11099 }
11100
perf_swevent_start_hrtimer(struct perf_event * event)11101 static void perf_swevent_start_hrtimer(struct perf_event *event)
11102 {
11103 struct hw_perf_event *hwc = &event->hw;
11104 s64 period;
11105
11106 if (!is_sampling_event(event))
11107 return;
11108
11109 period = local64_read(&hwc->period_left);
11110 if (period) {
11111 if (period < 0)
11112 period = 10000;
11113
11114 local64_set(&hwc->period_left, 0);
11115 } else {
11116 period = max_t(u64, 10000, hwc->sample_period);
11117 }
11118 hrtimer_start(&hwc->hrtimer, ns_to_ktime(period),
11119 HRTIMER_MODE_REL_PINNED_HARD);
11120 }
11121
perf_swevent_cancel_hrtimer(struct perf_event * event)11122 static void perf_swevent_cancel_hrtimer(struct perf_event *event)
11123 {
11124 struct hw_perf_event *hwc = &event->hw;
11125
11126 if (is_sampling_event(event)) {
11127 ktime_t remaining = hrtimer_get_remaining(&hwc->hrtimer);
11128 local64_set(&hwc->period_left, ktime_to_ns(remaining));
11129
11130 hrtimer_cancel(&hwc->hrtimer);
11131 }
11132 }
11133
perf_swevent_init_hrtimer(struct perf_event * event)11134 static void perf_swevent_init_hrtimer(struct perf_event *event)
11135 {
11136 struct hw_perf_event *hwc = &event->hw;
11137
11138 if (!is_sampling_event(event))
11139 return;
11140
11141 hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD);
11142 hwc->hrtimer.function = perf_swevent_hrtimer;
11143
11144 /*
11145 * Since hrtimers have a fixed rate, we can do a static freq->period
11146 * mapping and avoid the whole period adjust feedback stuff.
11147 */
11148 if (event->attr.freq) {
11149 long freq = event->attr.sample_freq;
11150
11151 event->attr.sample_period = NSEC_PER_SEC / freq;
11152 hwc->sample_period = event->attr.sample_period;
11153 local64_set(&hwc->period_left, hwc->sample_period);
11154 hwc->last_period = hwc->sample_period;
11155 event->attr.freq = 0;
11156 }
11157 }
11158
11159 /*
11160 * Software event: cpu wall time clock
11161 */
11162
cpu_clock_event_update(struct perf_event * event)11163 static void cpu_clock_event_update(struct perf_event *event)
11164 {
11165 s64 prev;
11166 u64 now;
11167
11168 now = local_clock();
11169 prev = local64_xchg(&event->hw.prev_count, now);
11170 local64_add(now - prev, &event->count);
11171 }
11172
cpu_clock_event_start(struct perf_event * event,int flags)11173 static void cpu_clock_event_start(struct perf_event *event, int flags)
11174 {
11175 local64_set(&event->hw.prev_count, local_clock());
11176 perf_swevent_start_hrtimer(event);
11177 }
11178
cpu_clock_event_stop(struct perf_event * event,int flags)11179 static void cpu_clock_event_stop(struct perf_event *event, int flags)
11180 {
11181 perf_swevent_cancel_hrtimer(event);
11182 cpu_clock_event_update(event);
11183 }
11184
cpu_clock_event_add(struct perf_event * event,int flags)11185 static int cpu_clock_event_add(struct perf_event *event, int flags)
11186 {
11187 if (flags & PERF_EF_START)
11188 cpu_clock_event_start(event, flags);
11189 perf_event_update_userpage(event);
11190
11191 return 0;
11192 }
11193
cpu_clock_event_del(struct perf_event * event,int flags)11194 static void cpu_clock_event_del(struct perf_event *event, int flags)
11195 {
11196 cpu_clock_event_stop(event, flags);
11197 }
11198
cpu_clock_event_read(struct perf_event * event)11199 static void cpu_clock_event_read(struct perf_event *event)
11200 {
11201 cpu_clock_event_update(event);
11202 }
11203
cpu_clock_event_init(struct perf_event * event)11204 static int cpu_clock_event_init(struct perf_event *event)
11205 {
11206 if (event->attr.type != perf_cpu_clock.type)
11207 return -ENOENT;
11208
11209 if (event->attr.config != PERF_COUNT_SW_CPU_CLOCK)
11210 return -ENOENT;
11211
11212 /*
11213 * no branch sampling for software events
11214 */
11215 if (has_branch_stack(event))
11216 return -EOPNOTSUPP;
11217
11218 perf_swevent_init_hrtimer(event);
11219
11220 return 0;
11221 }
11222
11223 static struct pmu perf_cpu_clock = {
11224 .task_ctx_nr = perf_sw_context,
11225
11226 .capabilities = PERF_PMU_CAP_NO_NMI,
11227 .dev = PMU_NULL_DEV,
11228
11229 .event_init = cpu_clock_event_init,
11230 .add = cpu_clock_event_add,
11231 .del = cpu_clock_event_del,
11232 .start = cpu_clock_event_start,
11233 .stop = cpu_clock_event_stop,
11234 .read = cpu_clock_event_read,
11235 };
11236
11237 /*
11238 * Software event: task time clock
11239 */
11240
task_clock_event_update(struct perf_event * event,u64 now)11241 static void task_clock_event_update(struct perf_event *event, u64 now)
11242 {
11243 u64 prev;
11244 s64 delta;
11245
11246 prev = local64_xchg(&event->hw.prev_count, now);
11247 delta = now - prev;
11248 local64_add(delta, &event->count);
11249 }
11250
task_clock_event_start(struct perf_event * event,int flags)11251 static void task_clock_event_start(struct perf_event *event, int flags)
11252 {
11253 local64_set(&event->hw.prev_count, event->ctx->time);
11254 perf_swevent_start_hrtimer(event);
11255 }
11256
task_clock_event_stop(struct perf_event * event,int flags)11257 static void task_clock_event_stop(struct perf_event *event, int flags)
11258 {
11259 perf_swevent_cancel_hrtimer(event);
11260 task_clock_event_update(event, event->ctx->time);
11261 }
11262
task_clock_event_add(struct perf_event * event,int flags)11263 static int task_clock_event_add(struct perf_event *event, int flags)
11264 {
11265 if (flags & PERF_EF_START)
11266 task_clock_event_start(event, flags);
11267 perf_event_update_userpage(event);
11268
11269 return 0;
11270 }
11271
task_clock_event_del(struct perf_event * event,int flags)11272 static void task_clock_event_del(struct perf_event *event, int flags)
11273 {
11274 task_clock_event_stop(event, PERF_EF_UPDATE);
11275 }
11276
task_clock_event_read(struct perf_event * event)11277 static void task_clock_event_read(struct perf_event *event)
11278 {
11279 u64 now = perf_clock();
11280 u64 delta = now - event->ctx->timestamp;
11281 u64 time = event->ctx->time + delta;
11282
11283 task_clock_event_update(event, time);
11284 }
11285
task_clock_event_init(struct perf_event * event)11286 static int task_clock_event_init(struct perf_event *event)
11287 {
11288 if (event->attr.type != perf_task_clock.type)
11289 return -ENOENT;
11290
11291 if (event->attr.config != PERF_COUNT_SW_TASK_CLOCK)
11292 return -ENOENT;
11293
11294 /*
11295 * no branch sampling for software events
11296 */
11297 if (has_branch_stack(event))
11298 return -EOPNOTSUPP;
11299
11300 perf_swevent_init_hrtimer(event);
11301
11302 return 0;
11303 }
11304
11305 static struct pmu perf_task_clock = {
11306 .task_ctx_nr = perf_sw_context,
11307
11308 .capabilities = PERF_PMU_CAP_NO_NMI,
11309 .dev = PMU_NULL_DEV,
11310
11311 .event_init = task_clock_event_init,
11312 .add = task_clock_event_add,
11313 .del = task_clock_event_del,
11314 .start = task_clock_event_start,
11315 .stop = task_clock_event_stop,
11316 .read = task_clock_event_read,
11317 };
11318
perf_pmu_nop_void(struct pmu * pmu)11319 static void perf_pmu_nop_void(struct pmu *pmu)
11320 {
11321 }
11322
perf_pmu_nop_txn(struct pmu * pmu,unsigned int flags)11323 static void perf_pmu_nop_txn(struct pmu *pmu, unsigned int flags)
11324 {
11325 }
11326
perf_pmu_nop_int(struct pmu * pmu)11327 static int perf_pmu_nop_int(struct pmu *pmu)
11328 {
11329 return 0;
11330 }
11331
perf_event_nop_int(struct perf_event * event,u64 value)11332 static int perf_event_nop_int(struct perf_event *event, u64 value)
11333 {
11334 return 0;
11335 }
11336
11337 static DEFINE_PER_CPU(unsigned int, nop_txn_flags);
11338
perf_pmu_start_txn(struct pmu * pmu,unsigned int flags)11339 static void perf_pmu_start_txn(struct pmu *pmu, unsigned int flags)
11340 {
11341 __this_cpu_write(nop_txn_flags, flags);
11342
11343 if (flags & ~PERF_PMU_TXN_ADD)
11344 return;
11345
11346 perf_pmu_disable(pmu);
11347 }
11348
perf_pmu_commit_txn(struct pmu * pmu)11349 static int perf_pmu_commit_txn(struct pmu *pmu)
11350 {
11351 unsigned int flags = __this_cpu_read(nop_txn_flags);
11352
11353 __this_cpu_write(nop_txn_flags, 0);
11354
11355 if (flags & ~PERF_PMU_TXN_ADD)
11356 return 0;
11357
11358 perf_pmu_enable(pmu);
11359 return 0;
11360 }
11361
perf_pmu_cancel_txn(struct pmu * pmu)11362 static void perf_pmu_cancel_txn(struct pmu *pmu)
11363 {
11364 unsigned int flags = __this_cpu_read(nop_txn_flags);
11365
11366 __this_cpu_write(nop_txn_flags, 0);
11367
11368 if (flags & ~PERF_PMU_TXN_ADD)
11369 return;
11370
11371 perf_pmu_enable(pmu);
11372 }
11373
perf_event_idx_default(struct perf_event * event)11374 static int perf_event_idx_default(struct perf_event *event)
11375 {
11376 return 0;
11377 }
11378
free_pmu_context(struct pmu * pmu)11379 static void free_pmu_context(struct pmu *pmu)
11380 {
11381 free_percpu(pmu->cpu_pmu_context);
11382 }
11383
11384 /*
11385 * Let userspace know that this PMU supports address range filtering:
11386 */
nr_addr_filters_show(struct device * dev,struct device_attribute * attr,char * page)11387 static ssize_t nr_addr_filters_show(struct device *dev,
11388 struct device_attribute *attr,
11389 char *page)
11390 {
11391 struct pmu *pmu = dev_get_drvdata(dev);
11392
11393 return scnprintf(page, PAGE_SIZE - 1, "%d\n", pmu->nr_addr_filters);
11394 }
11395 DEVICE_ATTR_RO(nr_addr_filters);
11396
11397 static struct idr pmu_idr;
11398
11399 static ssize_t
type_show(struct device * dev,struct device_attribute * attr,char * page)11400 type_show(struct device *dev, struct device_attribute *attr, char *page)
11401 {
11402 struct pmu *pmu = dev_get_drvdata(dev);
11403
11404 return scnprintf(page, PAGE_SIZE - 1, "%d\n", pmu->type);
11405 }
11406 static DEVICE_ATTR_RO(type);
11407
11408 static ssize_t
perf_event_mux_interval_ms_show(struct device * dev,struct device_attribute * attr,char * page)11409 perf_event_mux_interval_ms_show(struct device *dev,
11410 struct device_attribute *attr,
11411 char *page)
11412 {
11413 struct pmu *pmu = dev_get_drvdata(dev);
11414
11415 return scnprintf(page, PAGE_SIZE - 1, "%d\n", pmu->hrtimer_interval_ms);
11416 }
11417
11418 static DEFINE_MUTEX(mux_interval_mutex);
11419
11420 static ssize_t
perf_event_mux_interval_ms_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)11421 perf_event_mux_interval_ms_store(struct device *dev,
11422 struct device_attribute *attr,
11423 const char *buf, size_t count)
11424 {
11425 struct pmu *pmu = dev_get_drvdata(dev);
11426 int timer, cpu, ret;
11427
11428 ret = kstrtoint(buf, 0, &timer);
11429 if (ret)
11430 return ret;
11431
11432 if (timer < 1)
11433 return -EINVAL;
11434
11435 /* same value, noting to do */
11436 if (timer == pmu->hrtimer_interval_ms)
11437 return count;
11438
11439 mutex_lock(&mux_interval_mutex);
11440 pmu->hrtimer_interval_ms = timer;
11441
11442 /* update all cpuctx for this PMU */
11443 cpus_read_lock();
11444 for_each_online_cpu(cpu) {
11445 struct perf_cpu_pmu_context *cpc;
11446 cpc = per_cpu_ptr(pmu->cpu_pmu_context, cpu);
11447 cpc->hrtimer_interval = ns_to_ktime(NSEC_PER_MSEC * timer);
11448
11449 cpu_function_call(cpu, perf_mux_hrtimer_restart_ipi, cpc);
11450 }
11451 cpus_read_unlock();
11452 mutex_unlock(&mux_interval_mutex);
11453
11454 return count;
11455 }
11456 static DEVICE_ATTR_RW(perf_event_mux_interval_ms);
11457
11458 static struct attribute *pmu_dev_attrs[] = {
11459 &dev_attr_type.attr,
11460 &dev_attr_perf_event_mux_interval_ms.attr,
11461 &dev_attr_nr_addr_filters.attr,
11462 NULL,
11463 };
11464
pmu_dev_is_visible(struct kobject * kobj,struct attribute * a,int n)11465 static umode_t pmu_dev_is_visible(struct kobject *kobj, struct attribute *a, int n)
11466 {
11467 struct device *dev = kobj_to_dev(kobj);
11468 struct pmu *pmu = dev_get_drvdata(dev);
11469
11470 if (n == 2 && !pmu->nr_addr_filters)
11471 return 0;
11472
11473 return a->mode;
11474 }
11475
11476 static struct attribute_group pmu_dev_attr_group = {
11477 .is_visible = pmu_dev_is_visible,
11478 .attrs = pmu_dev_attrs,
11479 };
11480
11481 static const struct attribute_group *pmu_dev_groups[] = {
11482 &pmu_dev_attr_group,
11483 NULL,
11484 };
11485
11486 static int pmu_bus_running;
11487 static struct bus_type pmu_bus = {
11488 .name = "event_source",
11489 .dev_groups = pmu_dev_groups,
11490 };
11491
pmu_dev_release(struct device * dev)11492 static void pmu_dev_release(struct device *dev)
11493 {
11494 kfree(dev);
11495 }
11496
pmu_dev_alloc(struct pmu * pmu)11497 static int pmu_dev_alloc(struct pmu *pmu)
11498 {
11499 int ret = -ENOMEM;
11500
11501 pmu->dev = kzalloc(sizeof(struct device), GFP_KERNEL);
11502 if (!pmu->dev)
11503 goto out;
11504
11505 pmu->dev->groups = pmu->attr_groups;
11506 device_initialize(pmu->dev);
11507
11508 dev_set_drvdata(pmu->dev, pmu);
11509 pmu->dev->bus = &pmu_bus;
11510 pmu->dev->parent = pmu->parent;
11511 pmu->dev->release = pmu_dev_release;
11512
11513 ret = dev_set_name(pmu->dev, "%s", pmu->name);
11514 if (ret)
11515 goto free_dev;
11516
11517 ret = device_add(pmu->dev);
11518 if (ret)
11519 goto free_dev;
11520
11521 if (pmu->attr_update) {
11522 ret = sysfs_update_groups(&pmu->dev->kobj, pmu->attr_update);
11523 if (ret)
11524 goto del_dev;
11525 }
11526
11527 out:
11528 return ret;
11529
11530 del_dev:
11531 device_del(pmu->dev);
11532
11533 free_dev:
11534 put_device(pmu->dev);
11535 goto out;
11536 }
11537
11538 static struct lock_class_key cpuctx_mutex;
11539 static struct lock_class_key cpuctx_lock;
11540
perf_pmu_register(struct pmu * pmu,const char * name,int type)11541 int perf_pmu_register(struct pmu *pmu, const char *name, int type)
11542 {
11543 int cpu, ret, max = PERF_TYPE_MAX;
11544
11545 mutex_lock(&pmus_lock);
11546 ret = -ENOMEM;
11547 pmu->pmu_disable_count = alloc_percpu(int);
11548 if (!pmu->pmu_disable_count)
11549 goto unlock;
11550
11551 pmu->type = -1;
11552 if (WARN_ONCE(!name, "Can not register anonymous pmu.\n")) {
11553 ret = -EINVAL;
11554 goto free_pdc;
11555 }
11556
11557 pmu->name = name;
11558
11559 if (type >= 0)
11560 max = type;
11561
11562 ret = idr_alloc(&pmu_idr, pmu, max, 0, GFP_KERNEL);
11563 if (ret < 0)
11564 goto free_pdc;
11565
11566 WARN_ON(type >= 0 && ret != type);
11567
11568 type = ret;
11569 pmu->type = type;
11570
11571 if (pmu_bus_running && !pmu->dev) {
11572 ret = pmu_dev_alloc(pmu);
11573 if (ret)
11574 goto free_idr;
11575 }
11576
11577 ret = -ENOMEM;
11578 pmu->cpu_pmu_context = alloc_percpu(struct perf_cpu_pmu_context);
11579 if (!pmu->cpu_pmu_context)
11580 goto free_dev;
11581
11582 for_each_possible_cpu(cpu) {
11583 struct perf_cpu_pmu_context *cpc;
11584
11585 cpc = per_cpu_ptr(pmu->cpu_pmu_context, cpu);
11586 __perf_init_event_pmu_context(&cpc->epc, pmu);
11587 __perf_mux_hrtimer_init(cpc, cpu);
11588 }
11589
11590 if (!pmu->start_txn) {
11591 if (pmu->pmu_enable) {
11592 /*
11593 * If we have pmu_enable/pmu_disable calls, install
11594 * transaction stubs that use that to try and batch
11595 * hardware accesses.
11596 */
11597 pmu->start_txn = perf_pmu_start_txn;
11598 pmu->commit_txn = perf_pmu_commit_txn;
11599 pmu->cancel_txn = perf_pmu_cancel_txn;
11600 } else {
11601 pmu->start_txn = perf_pmu_nop_txn;
11602 pmu->commit_txn = perf_pmu_nop_int;
11603 pmu->cancel_txn = perf_pmu_nop_void;
11604 }
11605 }
11606
11607 if (!pmu->pmu_enable) {
11608 pmu->pmu_enable = perf_pmu_nop_void;
11609 pmu->pmu_disable = perf_pmu_nop_void;
11610 }
11611
11612 if (!pmu->check_period)
11613 pmu->check_period = perf_event_nop_int;
11614
11615 if (!pmu->event_idx)
11616 pmu->event_idx = perf_event_idx_default;
11617
11618 list_add_rcu(&pmu->entry, &pmus);
11619 atomic_set(&pmu->exclusive_cnt, 0);
11620 ret = 0;
11621 unlock:
11622 mutex_unlock(&pmus_lock);
11623
11624 return ret;
11625
11626 free_dev:
11627 if (pmu->dev && pmu->dev != PMU_NULL_DEV) {
11628 device_del(pmu->dev);
11629 put_device(pmu->dev);
11630 }
11631
11632 free_idr:
11633 idr_remove(&pmu_idr, pmu->type);
11634
11635 free_pdc:
11636 free_percpu(pmu->pmu_disable_count);
11637 goto unlock;
11638 }
11639 EXPORT_SYMBOL_GPL(perf_pmu_register);
11640
perf_pmu_unregister(struct pmu * pmu)11641 void perf_pmu_unregister(struct pmu *pmu)
11642 {
11643 mutex_lock(&pmus_lock);
11644 list_del_rcu(&pmu->entry);
11645
11646 /*
11647 * We dereference the pmu list under both SRCU and regular RCU, so
11648 * synchronize against both of those.
11649 */
11650 synchronize_srcu(&pmus_srcu);
11651 synchronize_rcu();
11652
11653 free_percpu(pmu->pmu_disable_count);
11654 idr_remove(&pmu_idr, pmu->type);
11655 if (pmu_bus_running && pmu->dev && pmu->dev != PMU_NULL_DEV) {
11656 if (pmu->nr_addr_filters)
11657 device_remove_file(pmu->dev, &dev_attr_nr_addr_filters);
11658 device_del(pmu->dev);
11659 put_device(pmu->dev);
11660 }
11661 free_pmu_context(pmu);
11662 mutex_unlock(&pmus_lock);
11663 }
11664 EXPORT_SYMBOL_GPL(perf_pmu_unregister);
11665
has_extended_regs(struct perf_event * event)11666 static inline bool has_extended_regs(struct perf_event *event)
11667 {
11668 return (event->attr.sample_regs_user & PERF_REG_EXTENDED_MASK) ||
11669 (event->attr.sample_regs_intr & PERF_REG_EXTENDED_MASK);
11670 }
11671
perf_try_init_event(struct pmu * pmu,struct perf_event * event)11672 static int perf_try_init_event(struct pmu *pmu, struct perf_event *event)
11673 {
11674 struct perf_event_context *ctx = NULL;
11675 int ret;
11676
11677 if (!try_module_get(pmu->module))
11678 return -ENODEV;
11679
11680 /*
11681 * A number of pmu->event_init() methods iterate the sibling_list to,
11682 * for example, validate if the group fits on the PMU. Therefore,
11683 * if this is a sibling event, acquire the ctx->mutex to protect
11684 * the sibling_list.
11685 */
11686 if (event->group_leader != event && pmu->task_ctx_nr != perf_sw_context) {
11687 /*
11688 * This ctx->mutex can nest when we're called through
11689 * inheritance. See the perf_event_ctx_lock_nested() comment.
11690 */
11691 ctx = perf_event_ctx_lock_nested(event->group_leader,
11692 SINGLE_DEPTH_NESTING);
11693 BUG_ON(!ctx);
11694 }
11695
11696 event->pmu = pmu;
11697 ret = pmu->event_init(event);
11698
11699 if (ctx)
11700 perf_event_ctx_unlock(event->group_leader, ctx);
11701
11702 if (!ret) {
11703 if (!(pmu->capabilities & PERF_PMU_CAP_EXTENDED_REGS) &&
11704 has_extended_regs(event))
11705 ret = -EOPNOTSUPP;
11706
11707 if (pmu->capabilities & PERF_PMU_CAP_NO_EXCLUDE &&
11708 event_has_any_exclude_flag(event))
11709 ret = -EINVAL;
11710
11711 if (ret && event->destroy)
11712 event->destroy(event);
11713 }
11714
11715 if (ret)
11716 module_put(pmu->module);
11717
11718 return ret;
11719 }
11720
perf_init_event(struct perf_event * event)11721 static struct pmu *perf_init_event(struct perf_event *event)
11722 {
11723 bool extended_type = false;
11724 int idx, type, ret;
11725 struct pmu *pmu;
11726
11727 idx = srcu_read_lock(&pmus_srcu);
11728
11729 /*
11730 * Save original type before calling pmu->event_init() since certain
11731 * pmus overwrites event->attr.type to forward event to another pmu.
11732 */
11733 event->orig_type = event->attr.type;
11734
11735 /* Try parent's PMU first: */
11736 if (event->parent && event->parent->pmu) {
11737 pmu = event->parent->pmu;
11738 ret = perf_try_init_event(pmu, event);
11739 if (!ret)
11740 goto unlock;
11741 }
11742
11743 /*
11744 * PERF_TYPE_HARDWARE and PERF_TYPE_HW_CACHE
11745 * are often aliases for PERF_TYPE_RAW.
11746 */
11747 type = event->attr.type;
11748 if (type == PERF_TYPE_HARDWARE || type == PERF_TYPE_HW_CACHE) {
11749 type = event->attr.config >> PERF_PMU_TYPE_SHIFT;
11750 if (!type) {
11751 type = PERF_TYPE_RAW;
11752 } else {
11753 extended_type = true;
11754 event->attr.config &= PERF_HW_EVENT_MASK;
11755 }
11756 }
11757
11758 again:
11759 rcu_read_lock();
11760 pmu = idr_find(&pmu_idr, type);
11761 rcu_read_unlock();
11762 if (pmu) {
11763 if (event->attr.type != type && type != PERF_TYPE_RAW &&
11764 !(pmu->capabilities & PERF_PMU_CAP_EXTENDED_HW_TYPE))
11765 goto fail;
11766
11767 ret = perf_try_init_event(pmu, event);
11768 if (ret == -ENOENT && event->attr.type != type && !extended_type) {
11769 type = event->attr.type;
11770 goto again;
11771 }
11772
11773 if (ret)
11774 pmu = ERR_PTR(ret);
11775
11776 goto unlock;
11777 }
11778
11779 list_for_each_entry_rcu(pmu, &pmus, entry, lockdep_is_held(&pmus_srcu)) {
11780 ret = perf_try_init_event(pmu, event);
11781 if (!ret)
11782 goto unlock;
11783
11784 if (ret != -ENOENT) {
11785 pmu = ERR_PTR(ret);
11786 goto unlock;
11787 }
11788 }
11789 fail:
11790 pmu = ERR_PTR(-ENOENT);
11791 unlock:
11792 srcu_read_unlock(&pmus_srcu, idx);
11793
11794 return pmu;
11795 }
11796
attach_sb_event(struct perf_event * event)11797 static void attach_sb_event(struct perf_event *event)
11798 {
11799 struct pmu_event_list *pel = per_cpu_ptr(&pmu_sb_events, event->cpu);
11800
11801 raw_spin_lock(&pel->lock);
11802 list_add_rcu(&event->sb_list, &pel->list);
11803 raw_spin_unlock(&pel->lock);
11804 }
11805
11806 /*
11807 * We keep a list of all !task (and therefore per-cpu) events
11808 * that need to receive side-band records.
11809 *
11810 * This avoids having to scan all the various PMU per-cpu contexts
11811 * looking for them.
11812 */
account_pmu_sb_event(struct perf_event * event)11813 static void account_pmu_sb_event(struct perf_event *event)
11814 {
11815 if (is_sb_event(event))
11816 attach_sb_event(event);
11817 }
11818
11819 /* Freq events need the tick to stay alive (see perf_event_task_tick). */
account_freq_event_nohz(void)11820 static void account_freq_event_nohz(void)
11821 {
11822 #ifdef CONFIG_NO_HZ_FULL
11823 /* Lock so we don't race with concurrent unaccount */
11824 spin_lock(&nr_freq_lock);
11825 if (atomic_inc_return(&nr_freq_events) == 1)
11826 tick_nohz_dep_set(TICK_DEP_BIT_PERF_EVENTS);
11827 spin_unlock(&nr_freq_lock);
11828 #endif
11829 }
11830
account_freq_event(void)11831 static void account_freq_event(void)
11832 {
11833 if (tick_nohz_full_enabled())
11834 account_freq_event_nohz();
11835 else
11836 atomic_inc(&nr_freq_events);
11837 }
11838
11839
account_event(struct perf_event * event)11840 static void account_event(struct perf_event *event)
11841 {
11842 bool inc = false;
11843
11844 if (event->parent)
11845 return;
11846
11847 if (event->attach_state & (PERF_ATTACH_TASK | PERF_ATTACH_SCHED_CB))
11848 inc = true;
11849 if (event->attr.mmap || event->attr.mmap_data)
11850 atomic_inc(&nr_mmap_events);
11851 if (event->attr.build_id)
11852 atomic_inc(&nr_build_id_events);
11853 if (event->attr.comm)
11854 atomic_inc(&nr_comm_events);
11855 if (event->attr.namespaces)
11856 atomic_inc(&nr_namespaces_events);
11857 if (event->attr.cgroup)
11858 atomic_inc(&nr_cgroup_events);
11859 if (event->attr.task)
11860 atomic_inc(&nr_task_events);
11861 if (event->attr.freq)
11862 account_freq_event();
11863 if (event->attr.context_switch) {
11864 atomic_inc(&nr_switch_events);
11865 inc = true;
11866 }
11867 if (has_branch_stack(event))
11868 inc = true;
11869 if (is_cgroup_event(event))
11870 inc = true;
11871 if (event->attr.ksymbol)
11872 atomic_inc(&nr_ksymbol_events);
11873 if (event->attr.bpf_event)
11874 atomic_inc(&nr_bpf_events);
11875 if (event->attr.text_poke)
11876 atomic_inc(&nr_text_poke_events);
11877
11878 if (inc) {
11879 /*
11880 * We need the mutex here because static_branch_enable()
11881 * must complete *before* the perf_sched_count increment
11882 * becomes visible.
11883 */
11884 if (atomic_inc_not_zero(&perf_sched_count))
11885 goto enabled;
11886
11887 mutex_lock(&perf_sched_mutex);
11888 if (!atomic_read(&perf_sched_count)) {
11889 static_branch_enable(&perf_sched_events);
11890 /*
11891 * Guarantee that all CPUs observe they key change and
11892 * call the perf scheduling hooks before proceeding to
11893 * install events that need them.
11894 */
11895 synchronize_rcu();
11896 }
11897 /*
11898 * Now that we have waited for the sync_sched(), allow further
11899 * increments to by-pass the mutex.
11900 */
11901 atomic_inc(&perf_sched_count);
11902 mutex_unlock(&perf_sched_mutex);
11903 }
11904 enabled:
11905
11906 account_pmu_sb_event(event);
11907 }
11908
11909 /*
11910 * Allocate and initialize an event structure
11911 */
11912 static struct perf_event *
perf_event_alloc(struct perf_event_attr * attr,int cpu,struct task_struct * task,struct perf_event * group_leader,struct perf_event * parent_event,perf_overflow_handler_t overflow_handler,void * context,int cgroup_fd)11913 perf_event_alloc(struct perf_event_attr *attr, int cpu,
11914 struct task_struct *task,
11915 struct perf_event *group_leader,
11916 struct perf_event *parent_event,
11917 perf_overflow_handler_t overflow_handler,
11918 void *context, int cgroup_fd)
11919 {
11920 struct pmu *pmu;
11921 struct perf_event *event;
11922 struct hw_perf_event *hwc;
11923 long err = -EINVAL;
11924 int node;
11925
11926 if ((unsigned)cpu >= nr_cpu_ids) {
11927 if (!task || cpu != -1)
11928 return ERR_PTR(-EINVAL);
11929 }
11930 if (attr->sigtrap && !task) {
11931 /* Requires a task: avoid signalling random tasks. */
11932 return ERR_PTR(-EINVAL);
11933 }
11934
11935 node = (cpu >= 0) ? cpu_to_node(cpu) : -1;
11936 event = kmem_cache_alloc_node(perf_event_cache, GFP_KERNEL | __GFP_ZERO,
11937 node);
11938 if (!event)
11939 return ERR_PTR(-ENOMEM);
11940
11941 /*
11942 * Single events are their own group leaders, with an
11943 * empty sibling list:
11944 */
11945 if (!group_leader)
11946 group_leader = event;
11947
11948 mutex_init(&event->child_mutex);
11949 INIT_LIST_HEAD(&event->child_list);
11950
11951 INIT_LIST_HEAD(&event->event_entry);
11952 INIT_LIST_HEAD(&event->sibling_list);
11953 INIT_LIST_HEAD(&event->active_list);
11954 init_event_group(event);
11955 INIT_LIST_HEAD(&event->rb_entry);
11956 INIT_LIST_HEAD(&event->active_entry);
11957 INIT_LIST_HEAD(&event->addr_filters.list);
11958 INIT_HLIST_NODE(&event->hlist_entry);
11959
11960
11961 init_waitqueue_head(&event->waitq);
11962 init_irq_work(&event->pending_irq, perf_pending_irq);
11963 init_task_work(&event->pending_task, perf_pending_task);
11964 rcuwait_init(&event->pending_work_wait);
11965
11966 mutex_init(&event->mmap_mutex);
11967 raw_spin_lock_init(&event->addr_filters.lock);
11968
11969 atomic_long_set(&event->refcount, 1);
11970 event->cpu = cpu;
11971 event->attr = *attr;
11972 event->group_leader = group_leader;
11973 event->pmu = NULL;
11974 event->oncpu = -1;
11975
11976 event->parent = parent_event;
11977
11978 event->ns = get_pid_ns(task_active_pid_ns(current));
11979 event->id = atomic64_inc_return(&perf_event_id);
11980
11981 event->state = PERF_EVENT_STATE_INACTIVE;
11982
11983 if (parent_event)
11984 event->event_caps = parent_event->event_caps;
11985
11986 if (task) {
11987 event->attach_state = PERF_ATTACH_TASK;
11988 /*
11989 * XXX pmu::event_init needs to know what task to account to
11990 * and we cannot use the ctx information because we need the
11991 * pmu before we get a ctx.
11992 */
11993 event->hw.target = get_task_struct(task);
11994 }
11995
11996 event->clock = &local_clock;
11997 if (parent_event)
11998 event->clock = parent_event->clock;
11999
12000 if (!overflow_handler && parent_event) {
12001 overflow_handler = parent_event->overflow_handler;
12002 context = parent_event->overflow_handler_context;
12003 #if defined(CONFIG_BPF_SYSCALL) && defined(CONFIG_EVENT_TRACING)
12004 if (overflow_handler == bpf_overflow_handler) {
12005 struct bpf_prog *prog = parent_event->prog;
12006
12007 bpf_prog_inc(prog);
12008 event->prog = prog;
12009 event->orig_overflow_handler =
12010 parent_event->orig_overflow_handler;
12011 }
12012 #endif
12013 }
12014
12015 if (overflow_handler) {
12016 event->overflow_handler = overflow_handler;
12017 event->overflow_handler_context = context;
12018 } else if (is_write_backward(event)){
12019 event->overflow_handler = perf_event_output_backward;
12020 event->overflow_handler_context = NULL;
12021 } else {
12022 event->overflow_handler = perf_event_output_forward;
12023 event->overflow_handler_context = NULL;
12024 }
12025
12026 perf_event__state_init(event);
12027
12028 pmu = NULL;
12029
12030 hwc = &event->hw;
12031 hwc->sample_period = attr->sample_period;
12032 if (attr->freq && attr->sample_freq)
12033 hwc->sample_period = 1;
12034 hwc->last_period = hwc->sample_period;
12035
12036 local64_set(&hwc->period_left, hwc->sample_period);
12037
12038 /*
12039 * We currently do not support PERF_SAMPLE_READ on inherited events.
12040 * See perf_output_read().
12041 */
12042 if (attr->inherit && (attr->sample_type & PERF_SAMPLE_READ))
12043 goto err_ns;
12044
12045 if (!has_branch_stack(event))
12046 event->attr.branch_sample_type = 0;
12047
12048 pmu = perf_init_event(event);
12049 if (IS_ERR(pmu)) {
12050 err = PTR_ERR(pmu);
12051 goto err_ns;
12052 }
12053
12054 /*
12055 * Disallow uncore-task events. Similarly, disallow uncore-cgroup
12056 * events (they don't make sense as the cgroup will be different
12057 * on other CPUs in the uncore mask).
12058 */
12059 if (pmu->task_ctx_nr == perf_invalid_context && (task || cgroup_fd != -1)) {
12060 err = -EINVAL;
12061 goto err_pmu;
12062 }
12063
12064 if (event->attr.aux_output &&
12065 !(pmu->capabilities & PERF_PMU_CAP_AUX_OUTPUT)) {
12066 err = -EOPNOTSUPP;
12067 goto err_pmu;
12068 }
12069
12070 if (cgroup_fd != -1) {
12071 err = perf_cgroup_connect(cgroup_fd, event, attr, group_leader);
12072 if (err)
12073 goto err_pmu;
12074 }
12075
12076 err = exclusive_event_init(event);
12077 if (err)
12078 goto err_pmu;
12079
12080 if (has_addr_filter(event)) {
12081 event->addr_filter_ranges = kcalloc(pmu->nr_addr_filters,
12082 sizeof(struct perf_addr_filter_range),
12083 GFP_KERNEL);
12084 if (!event->addr_filter_ranges) {
12085 err = -ENOMEM;
12086 goto err_per_task;
12087 }
12088
12089 /*
12090 * Clone the parent's vma offsets: they are valid until exec()
12091 * even if the mm is not shared with the parent.
12092 */
12093 if (event->parent) {
12094 struct perf_addr_filters_head *ifh = perf_event_addr_filters(event);
12095
12096 raw_spin_lock_irq(&ifh->lock);
12097 memcpy(event->addr_filter_ranges,
12098 event->parent->addr_filter_ranges,
12099 pmu->nr_addr_filters * sizeof(struct perf_addr_filter_range));
12100 raw_spin_unlock_irq(&ifh->lock);
12101 }
12102
12103 /* force hw sync on the address filters */
12104 event->addr_filters_gen = 1;
12105 }
12106
12107 if (!event->parent) {
12108 if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN) {
12109 err = get_callchain_buffers(attr->sample_max_stack);
12110 if (err)
12111 goto err_addr_filters;
12112 }
12113 }
12114
12115 err = security_perf_event_alloc(event);
12116 if (err)
12117 goto err_callchain_buffer;
12118
12119 /* symmetric to unaccount_event() in _free_event() */
12120 account_event(event);
12121
12122 return event;
12123
12124 err_callchain_buffer:
12125 if (!event->parent) {
12126 if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN)
12127 put_callchain_buffers();
12128 }
12129 err_addr_filters:
12130 kfree(event->addr_filter_ranges);
12131
12132 err_per_task:
12133 exclusive_event_destroy(event);
12134
12135 err_pmu:
12136 if (is_cgroup_event(event))
12137 perf_detach_cgroup(event);
12138 if (event->destroy)
12139 event->destroy(event);
12140 module_put(pmu->module);
12141 err_ns:
12142 if (event->hw.target)
12143 put_task_struct(event->hw.target);
12144 call_rcu(&event->rcu_head, free_event_rcu);
12145
12146 return ERR_PTR(err);
12147 }
12148
perf_copy_attr(struct perf_event_attr __user * uattr,struct perf_event_attr * attr)12149 static int perf_copy_attr(struct perf_event_attr __user *uattr,
12150 struct perf_event_attr *attr)
12151 {
12152 u32 size;
12153 int ret;
12154
12155 /* Zero the full structure, so that a short copy will be nice. */
12156 memset(attr, 0, sizeof(*attr));
12157
12158 ret = get_user(size, &uattr->size);
12159 if (ret)
12160 return ret;
12161
12162 /* ABI compatibility quirk: */
12163 if (!size)
12164 size = PERF_ATTR_SIZE_VER0;
12165 if (size < PERF_ATTR_SIZE_VER0 || size > PAGE_SIZE)
12166 goto err_size;
12167
12168 ret = copy_struct_from_user(attr, sizeof(*attr), uattr, size);
12169 if (ret) {
12170 if (ret == -E2BIG)
12171 goto err_size;
12172 return ret;
12173 }
12174
12175 attr->size = size;
12176
12177 if (attr->__reserved_1 || attr->__reserved_2 || attr->__reserved_3)
12178 return -EINVAL;
12179
12180 if (attr->sample_type & ~(PERF_SAMPLE_MAX-1))
12181 return -EINVAL;
12182
12183 if (attr->read_format & ~(PERF_FORMAT_MAX-1))
12184 return -EINVAL;
12185
12186 if (attr->sample_type & PERF_SAMPLE_BRANCH_STACK) {
12187 u64 mask = attr->branch_sample_type;
12188
12189 /* only using defined bits */
12190 if (mask & ~(PERF_SAMPLE_BRANCH_MAX-1))
12191 return -EINVAL;
12192
12193 /* at least one branch bit must be set */
12194 if (!(mask & ~PERF_SAMPLE_BRANCH_PLM_ALL))
12195 return -EINVAL;
12196
12197 /* propagate priv level, when not set for branch */
12198 if (!(mask & PERF_SAMPLE_BRANCH_PLM_ALL)) {
12199
12200 /* exclude_kernel checked on syscall entry */
12201 if (!attr->exclude_kernel)
12202 mask |= PERF_SAMPLE_BRANCH_KERNEL;
12203
12204 if (!attr->exclude_user)
12205 mask |= PERF_SAMPLE_BRANCH_USER;
12206
12207 if (!attr->exclude_hv)
12208 mask |= PERF_SAMPLE_BRANCH_HV;
12209 /*
12210 * adjust user setting (for HW filter setup)
12211 */
12212 attr->branch_sample_type = mask;
12213 }
12214 /* privileged levels capture (kernel, hv): check permissions */
12215 if (mask & PERF_SAMPLE_BRANCH_PERM_PLM) {
12216 ret = perf_allow_kernel(attr);
12217 if (ret)
12218 return ret;
12219 }
12220 }
12221
12222 if (attr->sample_type & PERF_SAMPLE_REGS_USER) {
12223 ret = perf_reg_validate(attr->sample_regs_user);
12224 if (ret)
12225 return ret;
12226 }
12227
12228 if (attr->sample_type & PERF_SAMPLE_STACK_USER) {
12229 if (!arch_perf_have_user_stack_dump())
12230 return -ENOSYS;
12231
12232 /*
12233 * We have __u32 type for the size, but so far
12234 * we can only use __u16 as maximum due to the
12235 * __u16 sample size limit.
12236 */
12237 if (attr->sample_stack_user >= USHRT_MAX)
12238 return -EINVAL;
12239 else if (!IS_ALIGNED(attr->sample_stack_user, sizeof(u64)))
12240 return -EINVAL;
12241 }
12242
12243 if (!attr->sample_max_stack)
12244 attr->sample_max_stack = sysctl_perf_event_max_stack;
12245
12246 if (attr->sample_type & PERF_SAMPLE_REGS_INTR)
12247 ret = perf_reg_validate(attr->sample_regs_intr);
12248
12249 #ifndef CONFIG_CGROUP_PERF
12250 if (attr->sample_type & PERF_SAMPLE_CGROUP)
12251 return -EINVAL;
12252 #endif
12253 if ((attr->sample_type & PERF_SAMPLE_WEIGHT) &&
12254 (attr->sample_type & PERF_SAMPLE_WEIGHT_STRUCT))
12255 return -EINVAL;
12256
12257 if (!attr->inherit && attr->inherit_thread)
12258 return -EINVAL;
12259
12260 if (attr->remove_on_exec && attr->enable_on_exec)
12261 return -EINVAL;
12262
12263 if (attr->sigtrap && !attr->remove_on_exec)
12264 return -EINVAL;
12265
12266 out:
12267 return ret;
12268
12269 err_size:
12270 put_user(sizeof(*attr), &uattr->size);
12271 ret = -E2BIG;
12272 goto out;
12273 }
12274
mutex_lock_double(struct mutex * a,struct mutex * b)12275 static void mutex_lock_double(struct mutex *a, struct mutex *b)
12276 {
12277 if (b < a)
12278 swap(a, b);
12279
12280 mutex_lock(a);
12281 mutex_lock_nested(b, SINGLE_DEPTH_NESTING);
12282 }
12283
12284 static int
perf_event_set_output(struct perf_event * event,struct perf_event * output_event)12285 perf_event_set_output(struct perf_event *event, struct perf_event *output_event)
12286 {
12287 struct perf_buffer *rb = NULL;
12288 int ret = -EINVAL;
12289
12290 if (!output_event) {
12291 mutex_lock(&event->mmap_mutex);
12292 goto set;
12293 }
12294
12295 /* don't allow circular references */
12296 if (event == output_event)
12297 goto out;
12298
12299 /*
12300 * Don't allow cross-cpu buffers
12301 */
12302 if (output_event->cpu != event->cpu)
12303 goto out;
12304
12305 /*
12306 * If its not a per-cpu rb, it must be the same task.
12307 */
12308 if (output_event->cpu == -1 && output_event->hw.target != event->hw.target)
12309 goto out;
12310
12311 /*
12312 * Mixing clocks in the same buffer is trouble you don't need.
12313 */
12314 if (output_event->clock != event->clock)
12315 goto out;
12316
12317 /*
12318 * Either writing ring buffer from beginning or from end.
12319 * Mixing is not allowed.
12320 */
12321 if (is_write_backward(output_event) != is_write_backward(event))
12322 goto out;
12323
12324 /*
12325 * If both events generate aux data, they must be on the same PMU
12326 */
12327 if (has_aux(event) && has_aux(output_event) &&
12328 event->pmu != output_event->pmu)
12329 goto out;
12330
12331 /*
12332 * Hold both mmap_mutex to serialize against perf_mmap_close(). Since
12333 * output_event is already on rb->event_list, and the list iteration
12334 * restarts after every removal, it is guaranteed this new event is
12335 * observed *OR* if output_event is already removed, it's guaranteed we
12336 * observe !rb->mmap_count.
12337 */
12338 mutex_lock_double(&event->mmap_mutex, &output_event->mmap_mutex);
12339 set:
12340 /* Can't redirect output if we've got an active mmap() */
12341 if (atomic_read(&event->mmap_count))
12342 goto unlock;
12343
12344 if (output_event) {
12345 /* get the rb we want to redirect to */
12346 rb = ring_buffer_get(output_event);
12347 if (!rb)
12348 goto unlock;
12349
12350 /* did we race against perf_mmap_close() */
12351 if (!atomic_read(&rb->mmap_count)) {
12352 ring_buffer_put(rb);
12353 goto unlock;
12354 }
12355 }
12356
12357 ring_buffer_attach(event, rb);
12358
12359 ret = 0;
12360 unlock:
12361 mutex_unlock(&event->mmap_mutex);
12362 if (output_event)
12363 mutex_unlock(&output_event->mmap_mutex);
12364
12365 out:
12366 return ret;
12367 }
12368
perf_event_set_clock(struct perf_event * event,clockid_t clk_id)12369 static int perf_event_set_clock(struct perf_event *event, clockid_t clk_id)
12370 {
12371 bool nmi_safe = false;
12372
12373 switch (clk_id) {
12374 case CLOCK_MONOTONIC:
12375 event->clock = &ktime_get_mono_fast_ns;
12376 nmi_safe = true;
12377 break;
12378
12379 case CLOCK_MONOTONIC_RAW:
12380 event->clock = &ktime_get_raw_fast_ns;
12381 nmi_safe = true;
12382 break;
12383
12384 case CLOCK_REALTIME:
12385 event->clock = &ktime_get_real_ns;
12386 break;
12387
12388 case CLOCK_BOOTTIME:
12389 event->clock = &ktime_get_boottime_ns;
12390 break;
12391
12392 case CLOCK_TAI:
12393 event->clock = &ktime_get_clocktai_ns;
12394 break;
12395
12396 default:
12397 return -EINVAL;
12398 }
12399
12400 if (!nmi_safe && !(event->pmu->capabilities & PERF_PMU_CAP_NO_NMI))
12401 return -EINVAL;
12402
12403 return 0;
12404 }
12405
12406 static bool
perf_check_permission(struct perf_event_attr * attr,struct task_struct * task)12407 perf_check_permission(struct perf_event_attr *attr, struct task_struct *task)
12408 {
12409 unsigned int ptrace_mode = PTRACE_MODE_READ_REALCREDS;
12410 bool is_capable = perfmon_capable();
12411
12412 if (attr->sigtrap) {
12413 /*
12414 * perf_event_attr::sigtrap sends signals to the other task.
12415 * Require the current task to also have CAP_KILL.
12416 */
12417 rcu_read_lock();
12418 is_capable &= ns_capable(__task_cred(task)->user_ns, CAP_KILL);
12419 rcu_read_unlock();
12420
12421 /*
12422 * If the required capabilities aren't available, checks for
12423 * ptrace permissions: upgrade to ATTACH, since sending signals
12424 * can effectively change the target task.
12425 */
12426 ptrace_mode = PTRACE_MODE_ATTACH_REALCREDS;
12427 }
12428
12429 /*
12430 * Preserve ptrace permission check for backwards compatibility. The
12431 * ptrace check also includes checks that the current task and other
12432 * task have matching uids, and is therefore not done here explicitly.
12433 */
12434 return is_capable || ptrace_may_access(task, ptrace_mode);
12435 }
12436
12437 /**
12438 * sys_perf_event_open - open a performance event, associate it to a task/cpu
12439 *
12440 * @attr_uptr: event_id type attributes for monitoring/sampling
12441 * @pid: target pid
12442 * @cpu: target cpu
12443 * @group_fd: group leader event fd
12444 * @flags: perf event open flags
12445 */
SYSCALL_DEFINE5(perf_event_open,struct perf_event_attr __user *,attr_uptr,pid_t,pid,int,cpu,int,group_fd,unsigned long,flags)12446 SYSCALL_DEFINE5(perf_event_open,
12447 struct perf_event_attr __user *, attr_uptr,
12448 pid_t, pid, int, cpu, int, group_fd, unsigned long, flags)
12449 {
12450 struct perf_event *group_leader = NULL, *output_event = NULL;
12451 struct perf_event_pmu_context *pmu_ctx;
12452 struct perf_event *event, *sibling;
12453 struct perf_event_attr attr;
12454 struct perf_event_context *ctx;
12455 struct file *event_file = NULL;
12456 struct fd group = {NULL, 0};
12457 struct task_struct *task = NULL;
12458 struct pmu *pmu;
12459 int event_fd;
12460 int move_group = 0;
12461 int err;
12462 int f_flags = O_RDWR;
12463 int cgroup_fd = -1;
12464
12465 /* for future expandability... */
12466 if (flags & ~PERF_FLAG_ALL)
12467 return -EINVAL;
12468
12469 err = perf_copy_attr(attr_uptr, &attr);
12470 if (err)
12471 return err;
12472
12473 /* Do we allow access to perf_event_open(2) ? */
12474 err = security_perf_event_open(&attr, PERF_SECURITY_OPEN);
12475 if (err)
12476 return err;
12477
12478 if (!attr.exclude_kernel) {
12479 err = perf_allow_kernel(&attr);
12480 if (err)
12481 return err;
12482 }
12483
12484 if (attr.namespaces) {
12485 if (!perfmon_capable())
12486 return -EACCES;
12487 }
12488
12489 if (attr.freq) {
12490 if (attr.sample_freq > sysctl_perf_event_sample_rate)
12491 return -EINVAL;
12492 } else {
12493 if (attr.sample_period & (1ULL << 63))
12494 return -EINVAL;
12495 }
12496
12497 /* Only privileged users can get physical addresses */
12498 if ((attr.sample_type & PERF_SAMPLE_PHYS_ADDR)) {
12499 err = perf_allow_kernel(&attr);
12500 if (err)
12501 return err;
12502 }
12503
12504 /* REGS_INTR can leak data, lockdown must prevent this */
12505 if (attr.sample_type & PERF_SAMPLE_REGS_INTR) {
12506 err = security_locked_down(LOCKDOWN_PERF);
12507 if (err)
12508 return err;
12509 }
12510
12511 /*
12512 * In cgroup mode, the pid argument is used to pass the fd
12513 * opened to the cgroup directory in cgroupfs. The cpu argument
12514 * designates the cpu on which to monitor threads from that
12515 * cgroup.
12516 */
12517 if ((flags & PERF_FLAG_PID_CGROUP) && (pid == -1 || cpu == -1))
12518 return -EINVAL;
12519
12520 if (flags & PERF_FLAG_FD_CLOEXEC)
12521 f_flags |= O_CLOEXEC;
12522
12523 event_fd = get_unused_fd_flags(f_flags);
12524 if (event_fd < 0)
12525 return event_fd;
12526
12527 if (group_fd != -1) {
12528 err = perf_fget_light(group_fd, &group);
12529 if (err)
12530 goto err_fd;
12531 group_leader = group.file->private_data;
12532 if (flags & PERF_FLAG_FD_OUTPUT)
12533 output_event = group_leader;
12534 if (flags & PERF_FLAG_FD_NO_GROUP)
12535 group_leader = NULL;
12536 }
12537
12538 if (pid != -1 && !(flags & PERF_FLAG_PID_CGROUP)) {
12539 task = find_lively_task_by_vpid(pid);
12540 if (IS_ERR(task)) {
12541 err = PTR_ERR(task);
12542 goto err_group_fd;
12543 }
12544 }
12545
12546 if (task && group_leader &&
12547 group_leader->attr.inherit != attr.inherit) {
12548 err = -EINVAL;
12549 goto err_task;
12550 }
12551
12552 if (flags & PERF_FLAG_PID_CGROUP)
12553 cgroup_fd = pid;
12554
12555 event = perf_event_alloc(&attr, cpu, task, group_leader, NULL,
12556 NULL, NULL, cgroup_fd);
12557 if (IS_ERR(event)) {
12558 err = PTR_ERR(event);
12559 goto err_task;
12560 }
12561
12562 if (is_sampling_event(event)) {
12563 if (event->pmu->capabilities & PERF_PMU_CAP_NO_INTERRUPT) {
12564 err = -EOPNOTSUPP;
12565 goto err_alloc;
12566 }
12567 }
12568
12569 /*
12570 * Special case software events and allow them to be part of
12571 * any hardware group.
12572 */
12573 pmu = event->pmu;
12574
12575 if (attr.use_clockid) {
12576 err = perf_event_set_clock(event, attr.clockid);
12577 if (err)
12578 goto err_alloc;
12579 }
12580
12581 if (pmu->task_ctx_nr == perf_sw_context)
12582 event->event_caps |= PERF_EV_CAP_SOFTWARE;
12583
12584 if (task) {
12585 err = down_read_interruptible(&task->signal->exec_update_lock);
12586 if (err)
12587 goto err_alloc;
12588
12589 /*
12590 * We must hold exec_update_lock across this and any potential
12591 * perf_install_in_context() call for this new event to
12592 * serialize against exec() altering our credentials (and the
12593 * perf_event_exit_task() that could imply).
12594 */
12595 err = -EACCES;
12596 if (!perf_check_permission(&attr, task))
12597 goto err_cred;
12598 }
12599
12600 /*
12601 * Get the target context (task or percpu):
12602 */
12603 ctx = find_get_context(task, event);
12604 if (IS_ERR(ctx)) {
12605 err = PTR_ERR(ctx);
12606 goto err_cred;
12607 }
12608
12609 mutex_lock(&ctx->mutex);
12610
12611 if (ctx->task == TASK_TOMBSTONE) {
12612 err = -ESRCH;
12613 goto err_locked;
12614 }
12615
12616 if (!task) {
12617 /*
12618 * Check if the @cpu we're creating an event for is online.
12619 *
12620 * We use the perf_cpu_context::ctx::mutex to serialize against
12621 * the hotplug notifiers. See perf_event_{init,exit}_cpu().
12622 */
12623 struct perf_cpu_context *cpuctx = per_cpu_ptr(&perf_cpu_context, event->cpu);
12624
12625 if (!cpuctx->online) {
12626 err = -ENODEV;
12627 goto err_locked;
12628 }
12629 }
12630
12631 if (group_leader) {
12632 err = -EINVAL;
12633
12634 /*
12635 * Do not allow a recursive hierarchy (this new sibling
12636 * becoming part of another group-sibling):
12637 */
12638 if (group_leader->group_leader != group_leader)
12639 goto err_locked;
12640
12641 /* All events in a group should have the same clock */
12642 if (group_leader->clock != event->clock)
12643 goto err_locked;
12644
12645 /*
12646 * Make sure we're both events for the same CPU;
12647 * grouping events for different CPUs is broken; since
12648 * you can never concurrently schedule them anyhow.
12649 */
12650 if (group_leader->cpu != event->cpu)
12651 goto err_locked;
12652
12653 /*
12654 * Make sure we're both on the same context; either task or cpu.
12655 */
12656 if (group_leader->ctx != ctx)
12657 goto err_locked;
12658
12659 /*
12660 * Only a group leader can be exclusive or pinned
12661 */
12662 if (attr.exclusive || attr.pinned)
12663 goto err_locked;
12664
12665 if (is_software_event(event) &&
12666 !in_software_context(group_leader)) {
12667 /*
12668 * If the event is a sw event, but the group_leader
12669 * is on hw context.
12670 *
12671 * Allow the addition of software events to hw
12672 * groups, this is safe because software events
12673 * never fail to schedule.
12674 *
12675 * Note the comment that goes with struct
12676 * perf_event_pmu_context.
12677 */
12678 pmu = group_leader->pmu_ctx->pmu;
12679 } else if (!is_software_event(event)) {
12680 if (is_software_event(group_leader) &&
12681 (group_leader->group_caps & PERF_EV_CAP_SOFTWARE)) {
12682 /*
12683 * In case the group is a pure software group, and we
12684 * try to add a hardware event, move the whole group to
12685 * the hardware context.
12686 */
12687 move_group = 1;
12688 }
12689
12690 /* Don't allow group of multiple hw events from different pmus */
12691 if (!in_software_context(group_leader) &&
12692 group_leader->pmu_ctx->pmu != pmu)
12693 goto err_locked;
12694 }
12695 }
12696
12697 /*
12698 * Now that we're certain of the pmu; find the pmu_ctx.
12699 */
12700 pmu_ctx = find_get_pmu_context(pmu, ctx, event);
12701 if (IS_ERR(pmu_ctx)) {
12702 err = PTR_ERR(pmu_ctx);
12703 goto err_locked;
12704 }
12705 event->pmu_ctx = pmu_ctx;
12706
12707 if (output_event) {
12708 err = perf_event_set_output(event, output_event);
12709 if (err)
12710 goto err_context;
12711 }
12712
12713 if (!perf_event_validate_size(event)) {
12714 err = -E2BIG;
12715 goto err_context;
12716 }
12717
12718 if (perf_need_aux_event(event) && !perf_get_aux_event(event, group_leader)) {
12719 err = -EINVAL;
12720 goto err_context;
12721 }
12722
12723 /*
12724 * Must be under the same ctx::mutex as perf_install_in_context(),
12725 * because we need to serialize with concurrent event creation.
12726 */
12727 if (!exclusive_event_installable(event, ctx)) {
12728 err = -EBUSY;
12729 goto err_context;
12730 }
12731
12732 WARN_ON_ONCE(ctx->parent_ctx);
12733
12734 event_file = anon_inode_getfile("[perf_event]", &perf_fops, event, f_flags);
12735 if (IS_ERR(event_file)) {
12736 err = PTR_ERR(event_file);
12737 event_file = NULL;
12738 goto err_context;
12739 }
12740
12741 /*
12742 * This is the point on no return; we cannot fail hereafter. This is
12743 * where we start modifying current state.
12744 */
12745
12746 if (move_group) {
12747 perf_remove_from_context(group_leader, 0);
12748 put_pmu_ctx(group_leader->pmu_ctx);
12749
12750 for_each_sibling_event(sibling, group_leader) {
12751 perf_remove_from_context(sibling, 0);
12752 put_pmu_ctx(sibling->pmu_ctx);
12753 }
12754
12755 /*
12756 * Install the group siblings before the group leader.
12757 *
12758 * Because a group leader will try and install the entire group
12759 * (through the sibling list, which is still in-tact), we can
12760 * end up with siblings installed in the wrong context.
12761 *
12762 * By installing siblings first we NO-OP because they're not
12763 * reachable through the group lists.
12764 */
12765 for_each_sibling_event(sibling, group_leader) {
12766 sibling->pmu_ctx = pmu_ctx;
12767 get_pmu_ctx(pmu_ctx);
12768 perf_event__state_init(sibling);
12769 perf_install_in_context(ctx, sibling, sibling->cpu);
12770 }
12771
12772 /*
12773 * Removing from the context ends up with disabled
12774 * event. What we want here is event in the initial
12775 * startup state, ready to be add into new context.
12776 */
12777 group_leader->pmu_ctx = pmu_ctx;
12778 get_pmu_ctx(pmu_ctx);
12779 perf_event__state_init(group_leader);
12780 perf_install_in_context(ctx, group_leader, group_leader->cpu);
12781 }
12782
12783 /*
12784 * Precalculate sample_data sizes; do while holding ctx::mutex such
12785 * that we're serialized against further additions and before
12786 * perf_install_in_context() which is the point the event is active and
12787 * can use these values.
12788 */
12789 perf_event__header_size(event);
12790 perf_event__id_header_size(event);
12791
12792 event->owner = current;
12793
12794 perf_install_in_context(ctx, event, event->cpu);
12795 perf_unpin_context(ctx);
12796
12797 mutex_unlock(&ctx->mutex);
12798
12799 if (task) {
12800 up_read(&task->signal->exec_update_lock);
12801 put_task_struct(task);
12802 }
12803
12804 mutex_lock(¤t->perf_event_mutex);
12805 list_add_tail(&event->owner_entry, ¤t->perf_event_list);
12806 mutex_unlock(¤t->perf_event_mutex);
12807
12808 /*
12809 * Drop the reference on the group_event after placing the
12810 * new event on the sibling_list. This ensures destruction
12811 * of the group leader will find the pointer to itself in
12812 * perf_group_detach().
12813 */
12814 fdput(group);
12815 fd_install(event_fd, event_file);
12816 return event_fd;
12817
12818 err_context:
12819 put_pmu_ctx(event->pmu_ctx);
12820 event->pmu_ctx = NULL; /* _free_event() */
12821 err_locked:
12822 mutex_unlock(&ctx->mutex);
12823 perf_unpin_context(ctx);
12824 put_ctx(ctx);
12825 err_cred:
12826 if (task)
12827 up_read(&task->signal->exec_update_lock);
12828 err_alloc:
12829 free_event(event);
12830 err_task:
12831 if (task)
12832 put_task_struct(task);
12833 err_group_fd:
12834 fdput(group);
12835 err_fd:
12836 put_unused_fd(event_fd);
12837 return err;
12838 }
12839
12840 /**
12841 * perf_event_create_kernel_counter
12842 *
12843 * @attr: attributes of the counter to create
12844 * @cpu: cpu in which the counter is bound
12845 * @task: task to profile (NULL for percpu)
12846 * @overflow_handler: callback to trigger when we hit the event
12847 * @context: context data could be used in overflow_handler callback
12848 */
12849 struct perf_event *
perf_event_create_kernel_counter(struct perf_event_attr * attr,int cpu,struct task_struct * task,perf_overflow_handler_t overflow_handler,void * context)12850 perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu,
12851 struct task_struct *task,
12852 perf_overflow_handler_t overflow_handler,
12853 void *context)
12854 {
12855 struct perf_event_pmu_context *pmu_ctx;
12856 struct perf_event_context *ctx;
12857 struct perf_event *event;
12858 struct pmu *pmu;
12859 int err;
12860
12861 /*
12862 * Grouping is not supported for kernel events, neither is 'AUX',
12863 * make sure the caller's intentions are adjusted.
12864 */
12865 if (attr->aux_output)
12866 return ERR_PTR(-EINVAL);
12867
12868 event = perf_event_alloc(attr, cpu, task, NULL, NULL,
12869 overflow_handler, context, -1);
12870 if (IS_ERR(event)) {
12871 err = PTR_ERR(event);
12872 goto err;
12873 }
12874
12875 /* Mark owner so we could distinguish it from user events. */
12876 event->owner = TASK_TOMBSTONE;
12877 pmu = event->pmu;
12878
12879 if (pmu->task_ctx_nr == perf_sw_context)
12880 event->event_caps |= PERF_EV_CAP_SOFTWARE;
12881
12882 /*
12883 * Get the target context (task or percpu):
12884 */
12885 ctx = find_get_context(task, event);
12886 if (IS_ERR(ctx)) {
12887 err = PTR_ERR(ctx);
12888 goto err_alloc;
12889 }
12890
12891 WARN_ON_ONCE(ctx->parent_ctx);
12892 mutex_lock(&ctx->mutex);
12893 if (ctx->task == TASK_TOMBSTONE) {
12894 err = -ESRCH;
12895 goto err_unlock;
12896 }
12897
12898 pmu_ctx = find_get_pmu_context(pmu, ctx, event);
12899 if (IS_ERR(pmu_ctx)) {
12900 err = PTR_ERR(pmu_ctx);
12901 goto err_unlock;
12902 }
12903 event->pmu_ctx = pmu_ctx;
12904
12905 if (!task) {
12906 /*
12907 * Check if the @cpu we're creating an event for is online.
12908 *
12909 * We use the perf_cpu_context::ctx::mutex to serialize against
12910 * the hotplug notifiers. See perf_event_{init,exit}_cpu().
12911 */
12912 struct perf_cpu_context *cpuctx =
12913 container_of(ctx, struct perf_cpu_context, ctx);
12914 if (!cpuctx->online) {
12915 err = -ENODEV;
12916 goto err_pmu_ctx;
12917 }
12918 }
12919
12920 if (!exclusive_event_installable(event, ctx)) {
12921 err = -EBUSY;
12922 goto err_pmu_ctx;
12923 }
12924
12925 perf_install_in_context(ctx, event, event->cpu);
12926 perf_unpin_context(ctx);
12927 mutex_unlock(&ctx->mutex);
12928
12929 return event;
12930
12931 err_pmu_ctx:
12932 put_pmu_ctx(pmu_ctx);
12933 event->pmu_ctx = NULL; /* _free_event() */
12934 err_unlock:
12935 mutex_unlock(&ctx->mutex);
12936 perf_unpin_context(ctx);
12937 put_ctx(ctx);
12938 err_alloc:
12939 free_event(event);
12940 err:
12941 return ERR_PTR(err);
12942 }
12943 EXPORT_SYMBOL_GPL(perf_event_create_kernel_counter);
12944
__perf_pmu_remove(struct perf_event_context * ctx,int cpu,struct pmu * pmu,struct perf_event_groups * groups,struct list_head * events)12945 static void __perf_pmu_remove(struct perf_event_context *ctx,
12946 int cpu, struct pmu *pmu,
12947 struct perf_event_groups *groups,
12948 struct list_head *events)
12949 {
12950 struct perf_event *event, *sibling;
12951
12952 perf_event_groups_for_cpu_pmu(event, groups, cpu, pmu) {
12953 perf_remove_from_context(event, 0);
12954 put_pmu_ctx(event->pmu_ctx);
12955 list_add(&event->migrate_entry, events);
12956
12957 for_each_sibling_event(sibling, event) {
12958 perf_remove_from_context(sibling, 0);
12959 put_pmu_ctx(sibling->pmu_ctx);
12960 list_add(&sibling->migrate_entry, events);
12961 }
12962 }
12963 }
12964
__perf_pmu_install_event(struct pmu * pmu,struct perf_event_context * ctx,int cpu,struct perf_event * event)12965 static void __perf_pmu_install_event(struct pmu *pmu,
12966 struct perf_event_context *ctx,
12967 int cpu, struct perf_event *event)
12968 {
12969 struct perf_event_pmu_context *epc;
12970 struct perf_event_context *old_ctx = event->ctx;
12971
12972 get_ctx(ctx); /* normally find_get_context() */
12973
12974 event->cpu = cpu;
12975 epc = find_get_pmu_context(pmu, ctx, event);
12976 event->pmu_ctx = epc;
12977
12978 if (event->state >= PERF_EVENT_STATE_OFF)
12979 event->state = PERF_EVENT_STATE_INACTIVE;
12980 perf_install_in_context(ctx, event, cpu);
12981
12982 /*
12983 * Now that event->ctx is updated and visible, put the old ctx.
12984 */
12985 put_ctx(old_ctx);
12986 }
12987
__perf_pmu_install(struct perf_event_context * ctx,int cpu,struct pmu * pmu,struct list_head * events)12988 static void __perf_pmu_install(struct perf_event_context *ctx,
12989 int cpu, struct pmu *pmu, struct list_head *events)
12990 {
12991 struct perf_event *event, *tmp;
12992
12993 /*
12994 * Re-instate events in 2 passes.
12995 *
12996 * Skip over group leaders and only install siblings on this first
12997 * pass, siblings will not get enabled without a leader, however a
12998 * leader will enable its siblings, even if those are still on the old
12999 * context.
13000 */
13001 list_for_each_entry_safe(event, tmp, events, migrate_entry) {
13002 if (event->group_leader == event)
13003 continue;
13004
13005 list_del(&event->migrate_entry);
13006 __perf_pmu_install_event(pmu, ctx, cpu, event);
13007 }
13008
13009 /*
13010 * Once all the siblings are setup properly, install the group leaders
13011 * to make it go.
13012 */
13013 list_for_each_entry_safe(event, tmp, events, migrate_entry) {
13014 list_del(&event->migrate_entry);
13015 __perf_pmu_install_event(pmu, ctx, cpu, event);
13016 }
13017 }
13018
perf_pmu_migrate_context(struct pmu * pmu,int src_cpu,int dst_cpu)13019 void perf_pmu_migrate_context(struct pmu *pmu, int src_cpu, int dst_cpu)
13020 {
13021 struct perf_event_context *src_ctx, *dst_ctx;
13022 LIST_HEAD(events);
13023
13024 /*
13025 * Since per-cpu context is persistent, no need to grab an extra
13026 * reference.
13027 */
13028 src_ctx = &per_cpu_ptr(&perf_cpu_context, src_cpu)->ctx;
13029 dst_ctx = &per_cpu_ptr(&perf_cpu_context, dst_cpu)->ctx;
13030
13031 /*
13032 * See perf_event_ctx_lock() for comments on the details
13033 * of swizzling perf_event::ctx.
13034 */
13035 mutex_lock_double(&src_ctx->mutex, &dst_ctx->mutex);
13036
13037 __perf_pmu_remove(src_ctx, src_cpu, pmu, &src_ctx->pinned_groups, &events);
13038 __perf_pmu_remove(src_ctx, src_cpu, pmu, &src_ctx->flexible_groups, &events);
13039
13040 if (!list_empty(&events)) {
13041 /*
13042 * Wait for the events to quiesce before re-instating them.
13043 */
13044 synchronize_rcu();
13045
13046 __perf_pmu_install(dst_ctx, dst_cpu, pmu, &events);
13047 }
13048
13049 mutex_unlock(&dst_ctx->mutex);
13050 mutex_unlock(&src_ctx->mutex);
13051 }
13052 EXPORT_SYMBOL_GPL(perf_pmu_migrate_context);
13053
sync_child_event(struct perf_event * child_event)13054 static void sync_child_event(struct perf_event *child_event)
13055 {
13056 struct perf_event *parent_event = child_event->parent;
13057 u64 child_val;
13058
13059 if (child_event->attr.inherit_stat) {
13060 struct task_struct *task = child_event->ctx->task;
13061
13062 if (task && task != TASK_TOMBSTONE)
13063 perf_event_read_event(child_event, task);
13064 }
13065
13066 child_val = perf_event_count(child_event);
13067
13068 /*
13069 * Add back the child's count to the parent's count:
13070 */
13071 atomic64_add(child_val, &parent_event->child_count);
13072 atomic64_add(child_event->total_time_enabled,
13073 &parent_event->child_total_time_enabled);
13074 atomic64_add(child_event->total_time_running,
13075 &parent_event->child_total_time_running);
13076 }
13077
13078 static void
perf_event_exit_event(struct perf_event * event,struct perf_event_context * ctx)13079 perf_event_exit_event(struct perf_event *event, struct perf_event_context *ctx)
13080 {
13081 struct perf_event *parent_event = event->parent;
13082 unsigned long detach_flags = 0;
13083
13084 if (parent_event) {
13085 /*
13086 * Do not destroy the 'original' grouping; because of the
13087 * context switch optimization the original events could've
13088 * ended up in a random child task.
13089 *
13090 * If we were to destroy the original group, all group related
13091 * operations would cease to function properly after this
13092 * random child dies.
13093 *
13094 * Do destroy all inherited groups, we don't care about those
13095 * and being thorough is better.
13096 */
13097 detach_flags = DETACH_GROUP | DETACH_CHILD;
13098 mutex_lock(&parent_event->child_mutex);
13099 }
13100
13101 perf_remove_from_context(event, detach_flags);
13102
13103 raw_spin_lock_irq(&ctx->lock);
13104 if (event->state > PERF_EVENT_STATE_EXIT)
13105 perf_event_set_state(event, PERF_EVENT_STATE_EXIT);
13106 raw_spin_unlock_irq(&ctx->lock);
13107
13108 /*
13109 * Child events can be freed.
13110 */
13111 if (parent_event) {
13112 mutex_unlock(&parent_event->child_mutex);
13113 /*
13114 * Kick perf_poll() for is_event_hup();
13115 */
13116 perf_event_wakeup(parent_event);
13117 free_event(event);
13118 put_event(parent_event);
13119 return;
13120 }
13121
13122 /*
13123 * Parent events are governed by their filedesc, retain them.
13124 */
13125 perf_event_wakeup(event);
13126 }
13127
perf_event_exit_task_context(struct task_struct * child)13128 static void perf_event_exit_task_context(struct task_struct *child)
13129 {
13130 struct perf_event_context *child_ctx, *clone_ctx = NULL;
13131 struct perf_event *child_event, *next;
13132
13133 WARN_ON_ONCE(child != current);
13134
13135 child_ctx = perf_pin_task_context(child);
13136 if (!child_ctx)
13137 return;
13138
13139 /*
13140 * In order to reduce the amount of tricky in ctx tear-down, we hold
13141 * ctx::mutex over the entire thing. This serializes against almost
13142 * everything that wants to access the ctx.
13143 *
13144 * The exception is sys_perf_event_open() /
13145 * perf_event_create_kernel_count() which does find_get_context()
13146 * without ctx::mutex (it cannot because of the move_group double mutex
13147 * lock thing). See the comments in perf_install_in_context().
13148 */
13149 mutex_lock(&child_ctx->mutex);
13150
13151 /*
13152 * In a single ctx::lock section, de-schedule the events and detach the
13153 * context from the task such that we cannot ever get it scheduled back
13154 * in.
13155 */
13156 raw_spin_lock_irq(&child_ctx->lock);
13157 task_ctx_sched_out(child_ctx, EVENT_ALL);
13158
13159 /*
13160 * Now that the context is inactive, destroy the task <-> ctx relation
13161 * and mark the context dead.
13162 */
13163 RCU_INIT_POINTER(child->perf_event_ctxp, NULL);
13164 put_ctx(child_ctx); /* cannot be last */
13165 WRITE_ONCE(child_ctx->task, TASK_TOMBSTONE);
13166 put_task_struct(current); /* cannot be last */
13167
13168 clone_ctx = unclone_ctx(child_ctx);
13169 raw_spin_unlock_irq(&child_ctx->lock);
13170
13171 if (clone_ctx)
13172 put_ctx(clone_ctx);
13173
13174 /*
13175 * Report the task dead after unscheduling the events so that we
13176 * won't get any samples after PERF_RECORD_EXIT. We can however still
13177 * get a few PERF_RECORD_READ events.
13178 */
13179 perf_event_task(child, child_ctx, 0);
13180
13181 list_for_each_entry_safe(child_event, next, &child_ctx->event_list, event_entry)
13182 perf_event_exit_event(child_event, child_ctx);
13183
13184 mutex_unlock(&child_ctx->mutex);
13185
13186 put_ctx(child_ctx);
13187 }
13188
13189 /*
13190 * When a child task exits, feed back event values to parent events.
13191 *
13192 * Can be called with exec_update_lock held when called from
13193 * setup_new_exec().
13194 */
perf_event_exit_task(struct task_struct * child)13195 void perf_event_exit_task(struct task_struct *child)
13196 {
13197 struct perf_event *event, *tmp;
13198
13199 mutex_lock(&child->perf_event_mutex);
13200 list_for_each_entry_safe(event, tmp, &child->perf_event_list,
13201 owner_entry) {
13202 list_del_init(&event->owner_entry);
13203
13204 /*
13205 * Ensure the list deletion is visible before we clear
13206 * the owner, closes a race against perf_release() where
13207 * we need to serialize on the owner->perf_event_mutex.
13208 */
13209 smp_store_release(&event->owner, NULL);
13210 }
13211 mutex_unlock(&child->perf_event_mutex);
13212
13213 perf_event_exit_task_context(child);
13214
13215 /*
13216 * The perf_event_exit_task_context calls perf_event_task
13217 * with child's task_ctx, which generates EXIT events for
13218 * child contexts and sets child->perf_event_ctxp[] to NULL.
13219 * At this point we need to send EXIT events to cpu contexts.
13220 */
13221 perf_event_task(child, NULL, 0);
13222 }
13223
perf_free_event(struct perf_event * event,struct perf_event_context * ctx)13224 static void perf_free_event(struct perf_event *event,
13225 struct perf_event_context *ctx)
13226 {
13227 struct perf_event *parent = event->parent;
13228
13229 if (WARN_ON_ONCE(!parent))
13230 return;
13231
13232 mutex_lock(&parent->child_mutex);
13233 list_del_init(&event->child_list);
13234 mutex_unlock(&parent->child_mutex);
13235
13236 put_event(parent);
13237
13238 raw_spin_lock_irq(&ctx->lock);
13239 perf_group_detach(event);
13240 list_del_event(event, ctx);
13241 raw_spin_unlock_irq(&ctx->lock);
13242 free_event(event);
13243 }
13244
13245 /*
13246 * Free a context as created by inheritance by perf_event_init_task() below,
13247 * used by fork() in case of fail.
13248 *
13249 * Even though the task has never lived, the context and events have been
13250 * exposed through the child_list, so we must take care tearing it all down.
13251 */
perf_event_free_task(struct task_struct * task)13252 void perf_event_free_task(struct task_struct *task)
13253 {
13254 struct perf_event_context *ctx;
13255 struct perf_event *event, *tmp;
13256
13257 ctx = rcu_access_pointer(task->perf_event_ctxp);
13258 if (!ctx)
13259 return;
13260
13261 mutex_lock(&ctx->mutex);
13262 raw_spin_lock_irq(&ctx->lock);
13263 /*
13264 * Destroy the task <-> ctx relation and mark the context dead.
13265 *
13266 * This is important because even though the task hasn't been
13267 * exposed yet the context has been (through child_list).
13268 */
13269 RCU_INIT_POINTER(task->perf_event_ctxp, NULL);
13270 WRITE_ONCE(ctx->task, TASK_TOMBSTONE);
13271 put_task_struct(task); /* cannot be last */
13272 raw_spin_unlock_irq(&ctx->lock);
13273
13274
13275 list_for_each_entry_safe(event, tmp, &ctx->event_list, event_entry)
13276 perf_free_event(event, ctx);
13277
13278 mutex_unlock(&ctx->mutex);
13279
13280 /*
13281 * perf_event_release_kernel() could've stolen some of our
13282 * child events and still have them on its free_list. In that
13283 * case we must wait for these events to have been freed (in
13284 * particular all their references to this task must've been
13285 * dropped).
13286 *
13287 * Without this copy_process() will unconditionally free this
13288 * task (irrespective of its reference count) and
13289 * _free_event()'s put_task_struct(event->hw.target) will be a
13290 * use-after-free.
13291 *
13292 * Wait for all events to drop their context reference.
13293 */
13294 wait_var_event(&ctx->refcount, refcount_read(&ctx->refcount) == 1);
13295 put_ctx(ctx); /* must be last */
13296 }
13297
perf_event_delayed_put(struct task_struct * task)13298 void perf_event_delayed_put(struct task_struct *task)
13299 {
13300 WARN_ON_ONCE(task->perf_event_ctxp);
13301 }
13302
perf_event_get(unsigned int fd)13303 struct file *perf_event_get(unsigned int fd)
13304 {
13305 struct file *file = fget(fd);
13306 if (!file)
13307 return ERR_PTR(-EBADF);
13308
13309 if (file->f_op != &perf_fops) {
13310 fput(file);
13311 return ERR_PTR(-EBADF);
13312 }
13313
13314 return file;
13315 }
13316
perf_get_event(struct file * file)13317 const struct perf_event *perf_get_event(struct file *file)
13318 {
13319 if (file->f_op != &perf_fops)
13320 return ERR_PTR(-EINVAL);
13321
13322 return file->private_data;
13323 }
13324
perf_event_attrs(struct perf_event * event)13325 const struct perf_event_attr *perf_event_attrs(struct perf_event *event)
13326 {
13327 if (!event)
13328 return ERR_PTR(-EINVAL);
13329
13330 return &event->attr;
13331 }
13332
13333 /*
13334 * Inherit an event from parent task to child task.
13335 *
13336 * Returns:
13337 * - valid pointer on success
13338 * - NULL for orphaned events
13339 * - IS_ERR() on error
13340 */
13341 static struct perf_event *
inherit_event(struct perf_event * parent_event,struct task_struct * parent,struct perf_event_context * parent_ctx,struct task_struct * child,struct perf_event * group_leader,struct perf_event_context * child_ctx)13342 inherit_event(struct perf_event *parent_event,
13343 struct task_struct *parent,
13344 struct perf_event_context *parent_ctx,
13345 struct task_struct *child,
13346 struct perf_event *group_leader,
13347 struct perf_event_context *child_ctx)
13348 {
13349 enum perf_event_state parent_state = parent_event->state;
13350 struct perf_event_pmu_context *pmu_ctx;
13351 struct perf_event *child_event;
13352 unsigned long flags;
13353
13354 /*
13355 * Instead of creating recursive hierarchies of events,
13356 * we link inherited events back to the original parent,
13357 * which has a filp for sure, which we use as the reference
13358 * count:
13359 */
13360 if (parent_event->parent)
13361 parent_event = parent_event->parent;
13362
13363 child_event = perf_event_alloc(&parent_event->attr,
13364 parent_event->cpu,
13365 child,
13366 group_leader, parent_event,
13367 NULL, NULL, -1);
13368 if (IS_ERR(child_event))
13369 return child_event;
13370
13371 pmu_ctx = find_get_pmu_context(child_event->pmu, child_ctx, child_event);
13372 if (IS_ERR(pmu_ctx)) {
13373 free_event(child_event);
13374 return ERR_CAST(pmu_ctx);
13375 }
13376 child_event->pmu_ctx = pmu_ctx;
13377
13378 /*
13379 * is_orphaned_event() and list_add_tail(&parent_event->child_list)
13380 * must be under the same lock in order to serialize against
13381 * perf_event_release_kernel(), such that either we must observe
13382 * is_orphaned_event() or they will observe us on the child_list.
13383 */
13384 mutex_lock(&parent_event->child_mutex);
13385 if (is_orphaned_event(parent_event) ||
13386 !atomic_long_inc_not_zero(&parent_event->refcount)) {
13387 mutex_unlock(&parent_event->child_mutex);
13388 /* task_ctx_data is freed with child_ctx */
13389 free_event(child_event);
13390 return NULL;
13391 }
13392
13393 get_ctx(child_ctx);
13394
13395 /*
13396 * Make the child state follow the state of the parent event,
13397 * not its attr.disabled bit. We hold the parent's mutex,
13398 * so we won't race with perf_event_{en, dis}able_family.
13399 */
13400 if (parent_state >= PERF_EVENT_STATE_INACTIVE)
13401 child_event->state = PERF_EVENT_STATE_INACTIVE;
13402 else
13403 child_event->state = PERF_EVENT_STATE_OFF;
13404
13405 if (parent_event->attr.freq) {
13406 u64 sample_period = parent_event->hw.sample_period;
13407 struct hw_perf_event *hwc = &child_event->hw;
13408
13409 hwc->sample_period = sample_period;
13410 hwc->last_period = sample_period;
13411
13412 local64_set(&hwc->period_left, sample_period);
13413 }
13414
13415 child_event->ctx = child_ctx;
13416 child_event->overflow_handler = parent_event->overflow_handler;
13417 child_event->overflow_handler_context
13418 = parent_event->overflow_handler_context;
13419
13420 /*
13421 * Precalculate sample_data sizes
13422 */
13423 perf_event__header_size(child_event);
13424 perf_event__id_header_size(child_event);
13425
13426 /*
13427 * Link it up in the child's context:
13428 */
13429 raw_spin_lock_irqsave(&child_ctx->lock, flags);
13430 add_event_to_ctx(child_event, child_ctx);
13431 child_event->attach_state |= PERF_ATTACH_CHILD;
13432 raw_spin_unlock_irqrestore(&child_ctx->lock, flags);
13433
13434 /*
13435 * Link this into the parent event's child list
13436 */
13437 list_add_tail(&child_event->child_list, &parent_event->child_list);
13438 mutex_unlock(&parent_event->child_mutex);
13439
13440 return child_event;
13441 }
13442
13443 /*
13444 * Inherits an event group.
13445 *
13446 * This will quietly suppress orphaned events; !inherit_event() is not an error.
13447 * This matches with perf_event_release_kernel() removing all child events.
13448 *
13449 * Returns:
13450 * - 0 on success
13451 * - <0 on error
13452 */
inherit_group(struct perf_event * parent_event,struct task_struct * parent,struct perf_event_context * parent_ctx,struct task_struct * child,struct perf_event_context * child_ctx)13453 static int inherit_group(struct perf_event *parent_event,
13454 struct task_struct *parent,
13455 struct perf_event_context *parent_ctx,
13456 struct task_struct *child,
13457 struct perf_event_context *child_ctx)
13458 {
13459 struct perf_event *leader;
13460 struct perf_event *sub;
13461 struct perf_event *child_ctr;
13462
13463 leader = inherit_event(parent_event, parent, parent_ctx,
13464 child, NULL, child_ctx);
13465 if (IS_ERR(leader))
13466 return PTR_ERR(leader);
13467 /*
13468 * @leader can be NULL here because of is_orphaned_event(). In this
13469 * case inherit_event() will create individual events, similar to what
13470 * perf_group_detach() would do anyway.
13471 */
13472 for_each_sibling_event(sub, parent_event) {
13473 child_ctr = inherit_event(sub, parent, parent_ctx,
13474 child, leader, child_ctx);
13475 if (IS_ERR(child_ctr))
13476 return PTR_ERR(child_ctr);
13477
13478 if (sub->aux_event == parent_event && child_ctr &&
13479 !perf_get_aux_event(child_ctr, leader))
13480 return -EINVAL;
13481 }
13482 if (leader)
13483 leader->group_generation = parent_event->group_generation;
13484 return 0;
13485 }
13486
13487 /*
13488 * Creates the child task context and tries to inherit the event-group.
13489 *
13490 * Clears @inherited_all on !attr.inherited or error. Note that we'll leave
13491 * inherited_all set when we 'fail' to inherit an orphaned event; this is
13492 * consistent with perf_event_release_kernel() removing all child events.
13493 *
13494 * Returns:
13495 * - 0 on success
13496 * - <0 on error
13497 */
13498 static int
inherit_task_group(struct perf_event * event,struct task_struct * parent,struct perf_event_context * parent_ctx,struct task_struct * child,u64 clone_flags,int * inherited_all)13499 inherit_task_group(struct perf_event *event, struct task_struct *parent,
13500 struct perf_event_context *parent_ctx,
13501 struct task_struct *child,
13502 u64 clone_flags, int *inherited_all)
13503 {
13504 struct perf_event_context *child_ctx;
13505 int ret;
13506
13507 if (!event->attr.inherit ||
13508 (event->attr.inherit_thread && !(clone_flags & CLONE_THREAD)) ||
13509 /* Do not inherit if sigtrap and signal handlers were cleared. */
13510 (event->attr.sigtrap && (clone_flags & CLONE_CLEAR_SIGHAND))) {
13511 *inherited_all = 0;
13512 return 0;
13513 }
13514
13515 child_ctx = child->perf_event_ctxp;
13516 if (!child_ctx) {
13517 /*
13518 * This is executed from the parent task context, so
13519 * inherit events that have been marked for cloning.
13520 * First allocate and initialize a context for the
13521 * child.
13522 */
13523 child_ctx = alloc_perf_context(child);
13524 if (!child_ctx)
13525 return -ENOMEM;
13526
13527 child->perf_event_ctxp = child_ctx;
13528 }
13529
13530 ret = inherit_group(event, parent, parent_ctx, child, child_ctx);
13531 if (ret)
13532 *inherited_all = 0;
13533
13534 return ret;
13535 }
13536
13537 /*
13538 * Initialize the perf_event context in task_struct
13539 */
perf_event_init_context(struct task_struct * child,u64 clone_flags)13540 static int perf_event_init_context(struct task_struct *child, u64 clone_flags)
13541 {
13542 struct perf_event_context *child_ctx, *parent_ctx;
13543 struct perf_event_context *cloned_ctx;
13544 struct perf_event *event;
13545 struct task_struct *parent = current;
13546 int inherited_all = 1;
13547 unsigned long flags;
13548 int ret = 0;
13549
13550 if (likely(!parent->perf_event_ctxp))
13551 return 0;
13552
13553 /*
13554 * If the parent's context is a clone, pin it so it won't get
13555 * swapped under us.
13556 */
13557 parent_ctx = perf_pin_task_context(parent);
13558 if (!parent_ctx)
13559 return 0;
13560
13561 /*
13562 * No need to check if parent_ctx != NULL here; since we saw
13563 * it non-NULL earlier, the only reason for it to become NULL
13564 * is if we exit, and since we're currently in the middle of
13565 * a fork we can't be exiting at the same time.
13566 */
13567
13568 /*
13569 * Lock the parent list. No need to lock the child - not PID
13570 * hashed yet and not running, so nobody can access it.
13571 */
13572 mutex_lock(&parent_ctx->mutex);
13573
13574 /*
13575 * We dont have to disable NMIs - we are only looking at
13576 * the list, not manipulating it:
13577 */
13578 perf_event_groups_for_each(event, &parent_ctx->pinned_groups) {
13579 ret = inherit_task_group(event, parent, parent_ctx,
13580 child, clone_flags, &inherited_all);
13581 if (ret)
13582 goto out_unlock;
13583 }
13584
13585 /*
13586 * We can't hold ctx->lock when iterating the ->flexible_group list due
13587 * to allocations, but we need to prevent rotation because
13588 * rotate_ctx() will change the list from interrupt context.
13589 */
13590 raw_spin_lock_irqsave(&parent_ctx->lock, flags);
13591 parent_ctx->rotate_disable = 1;
13592 raw_spin_unlock_irqrestore(&parent_ctx->lock, flags);
13593
13594 perf_event_groups_for_each(event, &parent_ctx->flexible_groups) {
13595 ret = inherit_task_group(event, parent, parent_ctx,
13596 child, clone_flags, &inherited_all);
13597 if (ret)
13598 goto out_unlock;
13599 }
13600
13601 raw_spin_lock_irqsave(&parent_ctx->lock, flags);
13602 parent_ctx->rotate_disable = 0;
13603
13604 child_ctx = child->perf_event_ctxp;
13605
13606 if (child_ctx && inherited_all) {
13607 /*
13608 * Mark the child context as a clone of the parent
13609 * context, or of whatever the parent is a clone of.
13610 *
13611 * Note that if the parent is a clone, the holding of
13612 * parent_ctx->lock avoids it from being uncloned.
13613 */
13614 cloned_ctx = parent_ctx->parent_ctx;
13615 if (cloned_ctx) {
13616 child_ctx->parent_ctx = cloned_ctx;
13617 child_ctx->parent_gen = parent_ctx->parent_gen;
13618 } else {
13619 child_ctx->parent_ctx = parent_ctx;
13620 child_ctx->parent_gen = parent_ctx->generation;
13621 }
13622 get_ctx(child_ctx->parent_ctx);
13623 }
13624
13625 raw_spin_unlock_irqrestore(&parent_ctx->lock, flags);
13626 out_unlock:
13627 mutex_unlock(&parent_ctx->mutex);
13628
13629 perf_unpin_context(parent_ctx);
13630 put_ctx(parent_ctx);
13631
13632 return ret;
13633 }
13634
13635 /*
13636 * Initialize the perf_event context in task_struct
13637 */
perf_event_init_task(struct task_struct * child,u64 clone_flags)13638 int perf_event_init_task(struct task_struct *child, u64 clone_flags)
13639 {
13640 int ret;
13641
13642 child->perf_event_ctxp = NULL;
13643 mutex_init(&child->perf_event_mutex);
13644 INIT_LIST_HEAD(&child->perf_event_list);
13645
13646 ret = perf_event_init_context(child, clone_flags);
13647 if (ret) {
13648 perf_event_free_task(child);
13649 return ret;
13650 }
13651
13652 return 0;
13653 }
13654
perf_event_init_all_cpus(void)13655 static void __init perf_event_init_all_cpus(void)
13656 {
13657 struct swevent_htable *swhash;
13658 struct perf_cpu_context *cpuctx;
13659 int cpu;
13660
13661 zalloc_cpumask_var(&perf_online_mask, GFP_KERNEL);
13662
13663 for_each_possible_cpu(cpu) {
13664 swhash = &per_cpu(swevent_htable, cpu);
13665 mutex_init(&swhash->hlist_mutex);
13666
13667 INIT_LIST_HEAD(&per_cpu(pmu_sb_events.list, cpu));
13668 raw_spin_lock_init(&per_cpu(pmu_sb_events.lock, cpu));
13669
13670 INIT_LIST_HEAD(&per_cpu(sched_cb_list, cpu));
13671
13672 cpuctx = per_cpu_ptr(&perf_cpu_context, cpu);
13673 __perf_event_init_context(&cpuctx->ctx);
13674 lockdep_set_class(&cpuctx->ctx.mutex, &cpuctx_mutex);
13675 lockdep_set_class(&cpuctx->ctx.lock, &cpuctx_lock);
13676 cpuctx->online = cpumask_test_cpu(cpu, perf_online_mask);
13677 cpuctx->heap_size = ARRAY_SIZE(cpuctx->heap_default);
13678 cpuctx->heap = cpuctx->heap_default;
13679 }
13680 }
13681
perf_swevent_init_cpu(unsigned int cpu)13682 static void perf_swevent_init_cpu(unsigned int cpu)
13683 {
13684 struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
13685
13686 mutex_lock(&swhash->hlist_mutex);
13687 if (swhash->hlist_refcount > 0 && !swevent_hlist_deref(swhash)) {
13688 struct swevent_hlist *hlist;
13689
13690 hlist = kzalloc_node(sizeof(*hlist), GFP_KERNEL, cpu_to_node(cpu));
13691 WARN_ON(!hlist);
13692 rcu_assign_pointer(swhash->swevent_hlist, hlist);
13693 }
13694 mutex_unlock(&swhash->hlist_mutex);
13695 }
13696
13697 #if defined CONFIG_HOTPLUG_CPU || defined CONFIG_KEXEC_CORE
__perf_event_exit_context(void * __info)13698 static void __perf_event_exit_context(void *__info)
13699 {
13700 struct perf_cpu_context *cpuctx = this_cpu_ptr(&perf_cpu_context);
13701 struct perf_event_context *ctx = __info;
13702 struct perf_event *event;
13703
13704 raw_spin_lock(&ctx->lock);
13705 ctx_sched_out(ctx, EVENT_TIME);
13706 list_for_each_entry(event, &ctx->event_list, event_entry)
13707 __perf_remove_from_context(event, cpuctx, ctx, (void *)DETACH_GROUP);
13708 raw_spin_unlock(&ctx->lock);
13709 }
13710
perf_event_exit_cpu_context(int cpu)13711 static void perf_event_exit_cpu_context(int cpu)
13712 {
13713 struct perf_cpu_context *cpuctx;
13714 struct perf_event_context *ctx;
13715
13716 // XXX simplify cpuctx->online
13717 mutex_lock(&pmus_lock);
13718 cpuctx = per_cpu_ptr(&perf_cpu_context, cpu);
13719 ctx = &cpuctx->ctx;
13720
13721 mutex_lock(&ctx->mutex);
13722 smp_call_function_single(cpu, __perf_event_exit_context, ctx, 1);
13723 cpuctx->online = 0;
13724 mutex_unlock(&ctx->mutex);
13725 cpumask_clear_cpu(cpu, perf_online_mask);
13726 mutex_unlock(&pmus_lock);
13727 }
13728 #else
13729
perf_event_exit_cpu_context(int cpu)13730 static void perf_event_exit_cpu_context(int cpu) { }
13731
13732 #endif
13733
perf_event_init_cpu(unsigned int cpu)13734 int perf_event_init_cpu(unsigned int cpu)
13735 {
13736 struct perf_cpu_context *cpuctx;
13737 struct perf_event_context *ctx;
13738
13739 perf_swevent_init_cpu(cpu);
13740
13741 mutex_lock(&pmus_lock);
13742 cpumask_set_cpu(cpu, perf_online_mask);
13743 cpuctx = per_cpu_ptr(&perf_cpu_context, cpu);
13744 ctx = &cpuctx->ctx;
13745
13746 mutex_lock(&ctx->mutex);
13747 cpuctx->online = 1;
13748 mutex_unlock(&ctx->mutex);
13749 mutex_unlock(&pmus_lock);
13750
13751 return 0;
13752 }
13753
perf_event_exit_cpu(unsigned int cpu)13754 int perf_event_exit_cpu(unsigned int cpu)
13755 {
13756 perf_event_exit_cpu_context(cpu);
13757 return 0;
13758 }
13759
13760 static int
perf_reboot(struct notifier_block * notifier,unsigned long val,void * v)13761 perf_reboot(struct notifier_block *notifier, unsigned long val, void *v)
13762 {
13763 int cpu;
13764
13765 for_each_online_cpu(cpu)
13766 perf_event_exit_cpu(cpu);
13767
13768 return NOTIFY_OK;
13769 }
13770
13771 /*
13772 * Run the perf reboot notifier at the very last possible moment so that
13773 * the generic watchdog code runs as long as possible.
13774 */
13775 static struct notifier_block perf_reboot_notifier = {
13776 .notifier_call = perf_reboot,
13777 .priority = INT_MIN,
13778 };
13779
perf_event_init(void)13780 void __init perf_event_init(void)
13781 {
13782 int ret;
13783
13784 idr_init(&pmu_idr);
13785
13786 perf_event_init_all_cpus();
13787 init_srcu_struct(&pmus_srcu);
13788 perf_pmu_register(&perf_swevent, "software", PERF_TYPE_SOFTWARE);
13789 perf_pmu_register(&perf_cpu_clock, "cpu_clock", -1);
13790 perf_pmu_register(&perf_task_clock, "task_clock", -1);
13791 perf_tp_register();
13792 perf_event_init_cpu(smp_processor_id());
13793 register_reboot_notifier(&perf_reboot_notifier);
13794
13795 ret = init_hw_breakpoint();
13796 WARN(ret, "hw_breakpoint initialization failed with: %d", ret);
13797
13798 perf_event_cache = KMEM_CACHE(perf_event, SLAB_PANIC);
13799
13800 /*
13801 * Build time assertion that we keep the data_head at the intended
13802 * location. IOW, validation we got the __reserved[] size right.
13803 */
13804 BUILD_BUG_ON((offsetof(struct perf_event_mmap_page, data_head))
13805 != 1024);
13806 }
13807
perf_event_sysfs_show(struct device * dev,struct device_attribute * attr,char * page)13808 ssize_t perf_event_sysfs_show(struct device *dev, struct device_attribute *attr,
13809 char *page)
13810 {
13811 struct perf_pmu_events_attr *pmu_attr =
13812 container_of(attr, struct perf_pmu_events_attr, attr);
13813
13814 if (pmu_attr->event_str)
13815 return sprintf(page, "%s\n", pmu_attr->event_str);
13816
13817 return 0;
13818 }
13819 EXPORT_SYMBOL_GPL(perf_event_sysfs_show);
13820
perf_event_sysfs_init(void)13821 static int __init perf_event_sysfs_init(void)
13822 {
13823 struct pmu *pmu;
13824 int ret;
13825
13826 mutex_lock(&pmus_lock);
13827
13828 ret = bus_register(&pmu_bus);
13829 if (ret)
13830 goto unlock;
13831
13832 list_for_each_entry(pmu, &pmus, entry) {
13833 if (pmu->dev)
13834 continue;
13835
13836 ret = pmu_dev_alloc(pmu);
13837 WARN(ret, "Failed to register pmu: %s, reason %d\n", pmu->name, ret);
13838 }
13839 pmu_bus_running = 1;
13840 ret = 0;
13841
13842 unlock:
13843 mutex_unlock(&pmus_lock);
13844
13845 return ret;
13846 }
13847 device_initcall(perf_event_sysfs_init);
13848
13849 #ifdef CONFIG_CGROUP_PERF
13850 static struct cgroup_subsys_state *
perf_cgroup_css_alloc(struct cgroup_subsys_state * parent_css)13851 perf_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
13852 {
13853 struct perf_cgroup *jc;
13854
13855 jc = kzalloc(sizeof(*jc), GFP_KERNEL);
13856 if (!jc)
13857 return ERR_PTR(-ENOMEM);
13858
13859 jc->info = alloc_percpu(struct perf_cgroup_info);
13860 if (!jc->info) {
13861 kfree(jc);
13862 return ERR_PTR(-ENOMEM);
13863 }
13864
13865 return &jc->css;
13866 }
13867
perf_cgroup_css_free(struct cgroup_subsys_state * css)13868 static void perf_cgroup_css_free(struct cgroup_subsys_state *css)
13869 {
13870 struct perf_cgroup *jc = container_of(css, struct perf_cgroup, css);
13871
13872 free_percpu(jc->info);
13873 kfree(jc);
13874 }
13875
perf_cgroup_css_online(struct cgroup_subsys_state * css)13876 static int perf_cgroup_css_online(struct cgroup_subsys_state *css)
13877 {
13878 perf_event_cgroup(css->cgroup);
13879 return 0;
13880 }
13881
__perf_cgroup_move(void * info)13882 static int __perf_cgroup_move(void *info)
13883 {
13884 struct task_struct *task = info;
13885
13886 preempt_disable();
13887 perf_cgroup_switch(task);
13888 preempt_enable();
13889
13890 return 0;
13891 }
13892
perf_cgroup_attach(struct cgroup_taskset * tset)13893 static void perf_cgroup_attach(struct cgroup_taskset *tset)
13894 {
13895 struct task_struct *task;
13896 struct cgroup_subsys_state *css;
13897
13898 cgroup_taskset_for_each(task, css, tset)
13899 task_function_call(task, __perf_cgroup_move, task);
13900 }
13901
13902 struct cgroup_subsys perf_event_cgrp_subsys = {
13903 .css_alloc = perf_cgroup_css_alloc,
13904 .css_free = perf_cgroup_css_free,
13905 .css_online = perf_cgroup_css_online,
13906 .attach = perf_cgroup_attach,
13907 /*
13908 * Implicitly enable on dfl hierarchy so that perf events can
13909 * always be filtered by cgroup2 path as long as perf_event
13910 * controller is not mounted on a legacy hierarchy.
13911 */
13912 .implicit_on_dfl = true,
13913 .threaded = true,
13914 };
13915 #endif /* CONFIG_CGROUP_PERF */
13916
13917 DEFINE_STATIC_CALL_RET0(perf_snapshot_branch_stack, perf_snapshot_branch_stack_t);
13918