Lines Matching +full:event +full:-
1 // SPDX-License-Identifier: GPL-2.0
6 * Copyright (C) 2008-2011 Red Hat, Inc., Ingo Molnar
7 * Copyright (C) 2008-2011 Red Hat, Inc., Peter Zijlstra
75 struct task_struct *p = tfc->p; in remote_function()
78 /* -EAGAIN */ in remote_function()
87 tfc->ret = -ESRCH; /* No such (running) process */ in remote_function()
92 tfc->ret = tfc->func(tfc->info); in remote_function()
96 * task_function_call - call a function on the cpu on which a task runs
106 * returns @func return value or -ESRCH or -ENXIO when the process isn't running
115 .ret = -EAGAIN, in task_function_call()
125 if (ret != -EAGAIN) in task_function_call()
135 * cpu_function_call - call a function on the cpu
142 * returns: @func return value or -ENXIO when the cpu is offline
150 .ret = -ENXIO, /* No such CPU */ in cpu_function_call()
161 raw_spin_lock(&cpuctx->ctx.lock); in perf_ctx_lock()
163 raw_spin_lock(&ctx->lock); in perf_ctx_lock()
170 raw_spin_unlock(&ctx->lock); in perf_ctx_unlock()
171 raw_spin_unlock(&cpuctx->ctx.lock); in perf_ctx_unlock()
174 #define TASK_TOMBSTONE ((void *)-1L)
176 static bool is_kernel_event(struct perf_event *event) in is_kernel_event() argument
178 return READ_ONCE(event->owner) == TASK_TOMBSTONE; in is_kernel_event()
186 return this_cpu_ptr(&perf_cpu_context)->task_ctx; in perf_cpu_task_ctx()
192 * When !ctx->nr_events a task context will not be scheduled. This means
198 * - removing the last event from a task ctx; this is relatively straight
201 * - adding the first event to a task ctx; this is tricky because we cannot
202 * rely on ctx->is_active and therefore cannot use event_function_call().
205 * If ctx->nr_events, then ctx->is_active and cpuctx->task_ctx are set.
212 struct perf_event *event; member
220 struct perf_event *event = efs->event; in event_function() local
221 struct perf_event_context *ctx = event->ctx; in event_function()
223 struct perf_event_context *task_ctx = cpuctx->task_ctx; in event_function()
230 * Since we do the IPI call without holding ctx->lock things can have in event_function()
233 if (ctx->task) { in event_function()
234 if (ctx->task != current) { in event_function()
235 ret = -ESRCH; in event_function()
243 * above ctx->task != current test), therefore we must have in event_function()
244 * ctx->is_active here. in event_function()
246 WARN_ON_ONCE(!ctx->is_active); in event_function()
248 * And since we have ctx->is_active, cpuctx->task_ctx must in event_function()
253 WARN_ON_ONCE(&cpuctx->ctx != ctx); in event_function()
256 efs->func(event, cpuctx, ctx, efs->data); in event_function()
263 static void event_function_call(struct perf_event *event, event_f func, void *data) in event_function_call() argument
265 struct perf_event_context *ctx = event->ctx; in event_function_call()
266 struct task_struct *task = READ_ONCE(ctx->task); /* verified in event_function */ in event_function_call()
269 .event = event, in event_function_call()
274 if (!event->parent) { in event_function_call()
276 * If this is a !child event, we must hold ctx::mutex to in event_function_call()
277 * stabilize the event->ctx relation. See in event_function_call()
280 lockdep_assert_held(&ctx->mutex); in event_function_call()
284 cpu_function_call(event->cpu, event_function, &efs); in event_function_call()
302 task = ctx->task; in event_function_call()
305 if (ctx->is_active) { in event_function_call()
310 func(event, NULL, ctx, data); in event_function_call()
320 static void event_function_local(struct perf_event *event, event_f func, void *data) in event_function_local() argument
322 struct perf_event_context *ctx = event->ctx; in event_function_local()
324 struct task_struct *task = READ_ONCE(ctx->task); in event_function_local()
338 task = ctx->task; in event_function_local()
348 if (ctx->is_active) { in event_function_local()
352 if (WARN_ON_ONCE(cpuctx->task_ctx != ctx)) in event_function_local()
356 WARN_ON_ONCE(&cpuctx->ctx != ctx); in event_function_local()
359 func(event, cpuctx, ctx, data); in event_function_local()
417 * perf event paranoia level:
418 * -1 - not paranoid at all
419 * 0 - disallow raw tracepoint access for unpriv
420 * 1 - disallow cpu events for unpriv
421 * 2 - disallow kernel profiling for unpriv
429 * max perf event sample rate
466 return -EINVAL; in perf_proc_update_handler()
536 running_len -= running_len/NR_ACCUMULATED_SAMPLES; in perf_sample_event_took()
579 static u64 perf_event_time(struct perf_event *event);
588 static inline u64 perf_event_clock(struct perf_event *event) in perf_event_clock() argument
590 return event->clock(); in perf_event_clock()
594 * State based event timekeeping...
596 * The basic idea is to use event->state to determine which (if any) time
601 * Event groups make things a little more complicated, but not terribly so. The
616 __perf_effective_state(struct perf_event *event) in __perf_effective_state() argument
618 struct perf_event *leader = event->group_leader; in __perf_effective_state()
620 if (leader->state <= PERF_EVENT_STATE_OFF) in __perf_effective_state()
621 return leader->state; in __perf_effective_state()
623 return event->state; in __perf_effective_state()
627 __perf_update_times(struct perf_event *event, u64 now, u64 *enabled, u64 *running) in __perf_update_times() argument
629 enum perf_event_state state = __perf_effective_state(event); in __perf_update_times()
630 u64 delta = now - event->tstamp; in __perf_update_times()
632 *enabled = event->total_time_enabled; in __perf_update_times()
636 *running = event->total_time_running; in __perf_update_times()
641 static void perf_event_update_time(struct perf_event *event) in perf_event_update_time() argument
643 u64 now = perf_event_time(event); in perf_event_update_time()
645 __perf_update_times(event, now, &event->total_time_enabled, in perf_event_update_time()
646 &event->total_time_running); in perf_event_update_time()
647 event->tstamp = now; in perf_event_update_time()
659 perf_event_set_state(struct perf_event *event, enum perf_event_state state) in perf_event_set_state() argument
661 if (event->state == state) in perf_event_set_state()
664 perf_event_update_time(event); in perf_event_set_state()
669 if ((event->state < 0) ^ (state < 0)) in perf_event_set_state()
670 perf_event_update_sibling_time(event); in perf_event_set_state()
672 WRITE_ONCE(event->state, state); in perf_event_set_state()
676 * UP store-release, load-acquire
696 list_for_each_entry(pmu_ctx, &ctx->pmu_ctx_list, pmu_ctx_entry) { in perf_ctx_disable()
697 if (cgroup && !pmu_ctx->nr_cgroups) in perf_ctx_disable()
699 perf_pmu_disable(pmu_ctx->pmu); in perf_ctx_disable()
707 list_for_each_entry(pmu_ctx, &ctx->pmu_ctx_list, pmu_ctx_entry) { in perf_ctx_enable()
708 if (cgroup && !pmu_ctx->nr_cgroups) in perf_ctx_enable()
710 perf_pmu_enable(pmu_ctx->pmu); in perf_ctx_enable()
720 perf_cgroup_match(struct perf_event *event) in perf_cgroup_match() argument
724 /* @event doesn't care about cgroup */ in perf_cgroup_match()
725 if (!event->cgrp) in perf_cgroup_match()
729 if (!cpuctx->cgrp) in perf_cgroup_match()
733 * Cgroup scoping is recursive. An event enabled for a cgroup is in perf_cgroup_match()
735 * cgroup is a descendant of @event's (the test covers identity in perf_cgroup_match()
738 return cgroup_is_descendant(cpuctx->cgrp->css.cgroup, in perf_cgroup_match()
739 event->cgrp->css.cgroup); in perf_cgroup_match()
742 static inline void perf_detach_cgroup(struct perf_event *event) in perf_detach_cgroup() argument
744 css_put(&event->cgrp->css); in perf_detach_cgroup()
745 event->cgrp = NULL; in perf_detach_cgroup()
748 static inline int is_cgroup_event(struct perf_event *event) in is_cgroup_event() argument
750 return event->cgrp != NULL; in is_cgroup_event()
753 static inline u64 perf_cgroup_event_time(struct perf_event *event) in perf_cgroup_event_time() argument
757 t = per_cpu_ptr(event->cgrp->info, event->cpu); in perf_cgroup_event_time()
758 return t->time; in perf_cgroup_event_time()
761 static inline u64 perf_cgroup_event_time_now(struct perf_event *event, u64 now) in perf_cgroup_event_time_now() argument
765 t = per_cpu_ptr(event->cgrp->info, event->cpu); in perf_cgroup_event_time_now()
766 if (!__load_acquire(&t->active)) in perf_cgroup_event_time_now()
767 return t->time; in perf_cgroup_event_time_now()
768 now += READ_ONCE(t->timeoffset); in perf_cgroup_event_time_now()
775 info->time += now - info->timestamp; in __update_cgrp_time()
776 info->timestamp = now; in __update_cgrp_time()
780 WRITE_ONCE(info->timeoffset, info->time - info->timestamp); in __update_cgrp_time()
785 struct perf_cgroup *cgrp = cpuctx->cgrp; in update_cgrp_time_from_cpuctx()
792 for (css = &cgrp->css; css; css = css->parent) { in update_cgrp_time_from_cpuctx()
794 info = this_cpu_ptr(cgrp->info); in update_cgrp_time_from_cpuctx()
798 __store_release(&info->active, 0); in update_cgrp_time_from_cpuctx()
803 static inline void update_cgrp_time_from_event(struct perf_event *event) in update_cgrp_time_from_event() argument
811 if (!is_cgroup_event(event)) in update_cgrp_time_from_event()
814 info = this_cpu_ptr(event->cgrp->info); in update_cgrp_time_from_event()
818 if (info->active) in update_cgrp_time_from_event()
825 struct perf_event_context *ctx = &cpuctx->ctx; in perf_cgroup_set_timestamp()
826 struct perf_cgroup *cgrp = cpuctx->cgrp; in perf_cgroup_set_timestamp()
831 * ctx->lock held by caller in perf_cgroup_set_timestamp()
838 WARN_ON_ONCE(!ctx->nr_cgroups); in perf_cgroup_set_timestamp()
840 for (css = &cgrp->css; css; css = css->parent) { in perf_cgroup_set_timestamp()
842 info = this_cpu_ptr(cgrp->info); in perf_cgroup_set_timestamp()
843 __update_cgrp_time(info, ctx->timestamp, false); in perf_cgroup_set_timestamp()
844 __store_release(&info->active, 1); in perf_cgroup_set_timestamp()
857 * cpuctx->cgrp is set when the first cgroup event enabled, in perf_cgroup_switch()
858 * and is cleared when the last cgroup event disabled. in perf_cgroup_switch()
860 if (READ_ONCE(cpuctx->cgrp) == NULL) in perf_cgroup_switch()
863 WARN_ON_ONCE(cpuctx->ctx.nr_cgroups == 0); in perf_cgroup_switch()
866 if (READ_ONCE(cpuctx->cgrp) == cgrp) in perf_cgroup_switch()
869 perf_ctx_lock(cpuctx, cpuctx->task_ctx); in perf_cgroup_switch()
870 perf_ctx_disable(&cpuctx->ctx, true); in perf_cgroup_switch()
872 ctx_sched_out(&cpuctx->ctx, EVENT_ALL|EVENT_CGROUP); in perf_cgroup_switch()
878 cpuctx->cgrp = cgrp; in perf_cgroup_switch()
884 ctx_sched_in(&cpuctx->ctx, EVENT_ALL|EVENT_CGROUP); in perf_cgroup_switch()
886 perf_ctx_enable(&cpuctx->ctx, true); in perf_cgroup_switch()
887 perf_ctx_unlock(cpuctx, cpuctx->task_ctx); in perf_cgroup_switch()
890 static int perf_cgroup_ensure_storage(struct perf_event *event, in perf_cgroup_ensure_storage() argument
901 for (heap_size = 1; css; css = css->parent) in perf_cgroup_ensure_storage()
906 if (heap_size <= cpuctx->heap_size) in perf_cgroup_ensure_storage()
912 ret = -ENOMEM; in perf_cgroup_ensure_storage()
916 raw_spin_lock_irq(&cpuctx->ctx.lock); in perf_cgroup_ensure_storage()
917 if (cpuctx->heap_size < heap_size) { in perf_cgroup_ensure_storage()
918 swap(cpuctx->heap, storage); in perf_cgroup_ensure_storage()
919 if (storage == cpuctx->heap_default) in perf_cgroup_ensure_storage()
921 cpuctx->heap_size = heap_size; in perf_cgroup_ensure_storage()
923 raw_spin_unlock_irq(&cpuctx->ctx.lock); in perf_cgroup_ensure_storage()
931 static inline int perf_cgroup_connect(int fd, struct perf_event *event, in perf_cgroup_connect() argument
941 return -EBADF; in perf_cgroup_connect()
943 css = css_tryget_online_from_dir(f.file->f_path.dentry, in perf_cgroup_connect()
950 ret = perf_cgroup_ensure_storage(event, css); in perf_cgroup_connect()
955 event->cgrp = cgrp; in perf_cgroup_connect()
962 if (group_leader && group_leader->cgrp != cgrp) { in perf_cgroup_connect()
963 perf_detach_cgroup(event); in perf_cgroup_connect()
964 ret = -EINVAL; in perf_cgroup_connect()
972 perf_cgroup_event_enable(struct perf_event *event, struct perf_event_context *ctx) in perf_cgroup_event_enable() argument
976 if (!is_cgroup_event(event)) in perf_cgroup_event_enable()
979 event->pmu_ctx->nr_cgroups++; in perf_cgroup_event_enable()
982 * Because cgroup events are always per-cpu events, in perf_cgroup_event_enable()
983 * @ctx == &cpuctx->ctx. in perf_cgroup_event_enable()
987 if (ctx->nr_cgroups++) in perf_cgroup_event_enable()
990 cpuctx->cgrp = perf_cgroup_from_task(current, ctx); in perf_cgroup_event_enable()
994 perf_cgroup_event_disable(struct perf_event *event, struct perf_event_context *ctx) in perf_cgroup_event_disable() argument
998 if (!is_cgroup_event(event)) in perf_cgroup_event_disable()
1001 event->pmu_ctx->nr_cgroups--; in perf_cgroup_event_disable()
1004 * Because cgroup events are always per-cpu events, in perf_cgroup_event_disable()
1005 * @ctx == &cpuctx->ctx. in perf_cgroup_event_disable()
1009 if (--ctx->nr_cgroups) in perf_cgroup_event_disable()
1012 cpuctx->cgrp = NULL; in perf_cgroup_event_disable()
1018 perf_cgroup_match(struct perf_event *event) in perf_cgroup_match() argument
1023 static inline void perf_detach_cgroup(struct perf_event *event) in perf_detach_cgroup() argument
1026 static inline int is_cgroup_event(struct perf_event *event) in is_cgroup_event() argument
1031 static inline void update_cgrp_time_from_event(struct perf_event *event) in update_cgrp_time_from_event() argument
1040 static inline int perf_cgroup_connect(pid_t pid, struct perf_event *event, in perf_cgroup_connect() argument
1044 return -EINVAL; in perf_cgroup_connect()
1052 static inline u64 perf_cgroup_event_time(struct perf_event *event) in perf_cgroup_event_time() argument
1057 static inline u64 perf_cgroup_event_time_now(struct perf_event *event, u64 now) in perf_cgroup_event_time_now() argument
1063 perf_cgroup_event_enable(struct perf_event *event, struct perf_event_context *ctx) in perf_cgroup_event_enable() argument
1068 perf_cgroup_event_disable(struct perf_event *event, struct perf_event_context *ctx) in perf_cgroup_event_disable() argument
1095 raw_spin_lock(&cpc->hrtimer_lock); in perf_mux_hrtimer_handler()
1097 hrtimer_forward_now(hr, cpc->hrtimer_interval); in perf_mux_hrtimer_handler()
1099 cpc->hrtimer_active = 0; in perf_mux_hrtimer_handler()
1100 raw_spin_unlock(&cpc->hrtimer_lock); in perf_mux_hrtimer_handler()
1107 struct hrtimer *timer = &cpc->hrtimer; in __perf_mux_hrtimer_init()
1108 struct pmu *pmu = cpc->epc.pmu; in __perf_mux_hrtimer_init()
1115 interval = pmu->hrtimer_interval_ms; in __perf_mux_hrtimer_init()
1117 interval = pmu->hrtimer_interval_ms = PERF_CPU_HRTIMER; in __perf_mux_hrtimer_init()
1119 cpc->hrtimer_interval = ns_to_ktime(NSEC_PER_MSEC * interval); in __perf_mux_hrtimer_init()
1121 raw_spin_lock_init(&cpc->hrtimer_lock); in __perf_mux_hrtimer_init()
1123 timer->function = perf_mux_hrtimer_handler; in __perf_mux_hrtimer_init()
1128 struct hrtimer *timer = &cpc->hrtimer; in perf_mux_hrtimer_restart()
1131 raw_spin_lock_irqsave(&cpc->hrtimer_lock, flags); in perf_mux_hrtimer_restart()
1132 if (!cpc->hrtimer_active) { in perf_mux_hrtimer_restart()
1133 cpc->hrtimer_active = 1; in perf_mux_hrtimer_restart()
1134 hrtimer_forward_now(timer, cpc->hrtimer_interval); in perf_mux_hrtimer_restart()
1137 raw_spin_unlock_irqrestore(&cpc->hrtimer_lock, flags); in perf_mux_hrtimer_restart()
1149 int *count = this_cpu_ptr(pmu->pmu_disable_count); in perf_pmu_disable()
1151 pmu->pmu_disable(pmu); in perf_pmu_disable()
1156 int *count = this_cpu_ptr(pmu->pmu_disable_count); in perf_pmu_enable()
1157 if (!--(*count)) in perf_pmu_enable()
1158 pmu->pmu_enable(pmu); in perf_pmu_enable()
1163 WARN_ON_ONCE(*this_cpu_ptr(pmu->pmu_disable_count) == 0); in perf_assert_pmu_disabled()
1168 refcount_inc(&ctx->refcount); in get_ctx()
1173 if (pmu->task_ctx_cache) in alloc_task_ctx_data()
1174 return kmem_cache_zalloc(pmu->task_ctx_cache, GFP_KERNEL); in alloc_task_ctx_data()
1181 if (pmu->task_ctx_cache && task_ctx_data) in free_task_ctx_data()
1182 kmem_cache_free(pmu->task_ctx_cache, task_ctx_data); in free_task_ctx_data()
1195 if (refcount_dec_and_test(&ctx->refcount)) { in put_ctx()
1196 if (ctx->parent_ctx) in put_ctx()
1197 put_ctx(ctx->parent_ctx); in put_ctx()
1198 if (ctx->task && ctx->task != TASK_TOMBSTONE) in put_ctx()
1199 put_task_struct(ctx->task); in put_ctx()
1200 call_rcu(&ctx->rcu_head, free_ctx); in put_ctx()
1214 * - perf_event_exit_task_context() [ child , 0 ]
1218 * - perf_event_init_context() [ parent, 0 ]
1226 * While it appears there is an obvious deadlock here -- the parent and child
1228 * life-time rules separate them. That is an exiting task cannot fork, and a
1231 * But remember that these are parent<->child context relations, and
1236 * because the sys_perf_event_open() case will install a new event and break
1237 * the ctx parent<->child relation, and perf_pmu_migrate_context() is only
1247 * quiesce the event, after which we can install it in the new location. This
1248 * means that only external vectors (perf_fops, prctl) can perturb the event
1252 * However; because event->ctx can change while we're waiting to acquire
1253 * ctx->mutex we must be careful and use the below perf_event_ctx_lock()
1269 * cpuctx->mutex / perf_event_context::mutex
1272 perf_event_ctx_lock_nested(struct perf_event *event, int nesting) in perf_event_ctx_lock_nested() argument
1278 ctx = READ_ONCE(event->ctx); in perf_event_ctx_lock_nested()
1279 if (!refcount_inc_not_zero(&ctx->refcount)) { in perf_event_ctx_lock_nested()
1285 mutex_lock_nested(&ctx->mutex, nesting); in perf_event_ctx_lock_nested()
1286 if (event->ctx != ctx) { in perf_event_ctx_lock_nested()
1287 mutex_unlock(&ctx->mutex); in perf_event_ctx_lock_nested()
1296 perf_event_ctx_lock(struct perf_event *event) in perf_event_ctx_lock() argument
1298 return perf_event_ctx_lock_nested(event, 0); in perf_event_ctx_lock()
1301 static void perf_event_ctx_unlock(struct perf_event *event, in perf_event_ctx_unlock() argument
1304 mutex_unlock(&ctx->mutex); in perf_event_ctx_unlock()
1309 * This must be done under the ctx->lock, such as to serialize against
1311 * calling scheduler related locks and ctx->lock nests inside those.
1316 struct perf_event_context *parent_ctx = ctx->parent_ctx; in unclone_ctx()
1318 lockdep_assert_held(&ctx->lock); in unclone_ctx()
1321 ctx->parent_ctx = NULL; in unclone_ctx()
1322 ctx->generation++; in unclone_ctx()
1327 static u32 perf_event_pid_type(struct perf_event *event, struct task_struct *p, in perf_event_pid_type() argument
1334 if (event->parent) in perf_event_pid_type()
1335 event = event->parent; in perf_event_pid_type()
1337 nr = __task_pid_nr_ns(p, type, event->ns); in perf_event_pid_type()
1338 /* avoid -1 if it is idle thread or runs in another ns */ in perf_event_pid_type()
1340 nr = -1; in perf_event_pid_type()
1344 static u32 perf_event_pid(struct perf_event *event, struct task_struct *p) in perf_event_pid() argument
1346 return perf_event_pid_type(event, p, PIDTYPE_TGID); in perf_event_pid()
1349 static u32 perf_event_tid(struct perf_event *event, struct task_struct *p) in perf_event_tid() argument
1351 return perf_event_pid_type(event, p, PIDTYPE_PID); in perf_event_tid()
1355 * If we inherit events we want to return the parent event id
1358 static u64 primary_event_id(struct perf_event *event) in primary_event_id() argument
1360 u64 id = event->id; in primary_event_id()
1362 if (event->parent) in primary_event_id()
1363 id = event->parent->id; in primary_event_id()
1383 * part of the read side critical section was irqs-enabled -- see in perf_lock_task_context()
1386 * Since ctx->lock nests under rq->lock we must ensure the entire read in perf_lock_task_context()
1391 ctx = rcu_dereference(task->perf_event_ctxp); in perf_lock_task_context()
1403 raw_spin_lock(&ctx->lock); in perf_lock_task_context()
1404 if (ctx != rcu_dereference(task->perf_event_ctxp)) { in perf_lock_task_context()
1405 raw_spin_unlock(&ctx->lock); in perf_lock_task_context()
1411 if (ctx->task == TASK_TOMBSTONE || in perf_lock_task_context()
1412 !refcount_inc_not_zero(&ctx->refcount)) { in perf_lock_task_context()
1413 raw_spin_unlock(&ctx->lock); in perf_lock_task_context()
1416 WARN_ON_ONCE(ctx->task != task); in perf_lock_task_context()
1438 ++ctx->pin_count; in perf_pin_task_context()
1439 raw_spin_unlock_irqrestore(&ctx->lock, flags); in perf_pin_task_context()
1448 raw_spin_lock_irqsave(&ctx->lock, flags); in perf_unpin_context()
1449 --ctx->pin_count; in perf_unpin_context()
1450 raw_spin_unlock_irqrestore(&ctx->lock, flags); in perf_unpin_context()
1460 lockdep_assert_held(&ctx->lock); in __update_context_time()
1463 ctx->time += now - ctx->timestamp; in __update_context_time()
1464 ctx->timestamp = now; in __update_context_time()
1467 * The above: time' = time + (now - timestamp), can be re-arranged in __update_context_time()
1468 * into: time` = now + (time - timestamp), which gives a single value in __update_context_time()
1472 * it's (obviously) not possible to acquire ctx->lock in order to read in __update_context_time()
1475 WRITE_ONCE(ctx->timeoffset, ctx->time - ctx->timestamp); in __update_context_time()
1483 static u64 perf_event_time(struct perf_event *event) in perf_event_time() argument
1485 struct perf_event_context *ctx = event->ctx; in perf_event_time()
1490 if (is_cgroup_event(event)) in perf_event_time()
1491 return perf_cgroup_event_time(event); in perf_event_time()
1493 return ctx->time; in perf_event_time()
1496 static u64 perf_event_time_now(struct perf_event *event, u64 now) in perf_event_time_now() argument
1498 struct perf_event_context *ctx = event->ctx; in perf_event_time_now()
1503 if (is_cgroup_event(event)) in perf_event_time_now()
1504 return perf_cgroup_event_time_now(event, now); in perf_event_time_now()
1506 if (!(__load_acquire(&ctx->is_active) & EVENT_TIME)) in perf_event_time_now()
1507 return ctx->time; in perf_event_time_now()
1509 now += READ_ONCE(ctx->timeoffset); in perf_event_time_now()
1513 static enum event_type_t get_event_type(struct perf_event *event) in get_event_type() argument
1515 struct perf_event_context *ctx = event->ctx; in get_event_type()
1518 lockdep_assert_held(&ctx->lock); in get_event_type()
1524 if (event->group_leader != event) in get_event_type()
1525 event = event->group_leader; in get_event_type()
1527 event_type = event->attr.pinned ? EVENT_PINNED : EVENT_FLEXIBLE; in get_event_type()
1528 if (!ctx->task) in get_event_type()
1535 * Helper function to initialize event group nodes.
1537 static void init_event_group(struct perf_event *event) in init_event_group() argument
1539 RB_CLEAR_NODE(&event->group_node); in init_event_group()
1540 event->group_index = 0; in init_event_group()
1545 * based on event attrs bits.
1548 get_event_groups(struct perf_event *event, struct perf_event_context *ctx) in get_event_groups() argument
1550 if (event->attr.pinned) in get_event_groups()
1551 return &ctx->pinned_groups; in get_event_groups()
1553 return &ctx->flexible_groups; in get_event_groups()
1561 groups->tree = RB_ROOT; in perf_event_groups_init()
1562 groups->index = 0; in perf_event_groups_init()
1565 static inline struct cgroup *event_cgroup(const struct perf_event *event) in event_cgroup() argument
1570 if (event->cgrp) in event_cgroup()
1571 cgroup = event->cgrp->css.cgroup; in event_cgroup()
1578 * Compare function for event groups;
1588 if (left_cpu < right->cpu) in perf_event_groups_cmp()
1589 return -1; in perf_event_groups_cmp()
1590 if (left_cpu > right->cpu) in perf_event_groups_cmp()
1594 if (left_pmu < right->pmu_ctx->pmu) in perf_event_groups_cmp()
1595 return -1; in perf_event_groups_cmp()
1596 if (left_pmu > right->pmu_ctx->pmu) in perf_event_groups_cmp()
1610 return -1; in perf_event_groups_cmp()
1621 return -1; in perf_event_groups_cmp()
1628 if (left_group_index < right->group_index) in perf_event_groups_cmp()
1629 return -1; in perf_event_groups_cmp()
1630 if (left_group_index > right->group_index) in perf_event_groups_cmp()
1642 return perf_event_groups_cmp(e->cpu, e->pmu_ctx->pmu, event_cgroup(e), in __group_less()
1643 e->group_index, __node_2_pe(b)) < 0; in __group_less()
1658 return perf_event_groups_cmp(a->cpu, a->pmu, a->cgroup, b->group_index, b); in __group_cmp()
1668 return perf_event_groups_cmp(a->cpu, a->pmu, event_cgroup(b), in __group_cmp_ignore_cgroup()
1669 b->group_index, b); in __group_cmp_ignore_cgroup()
1673 * Insert @event into @groups' tree; using
1674 * {@event->cpu, @event->pmu_ctx->pmu, event_cgroup(@event), ++@groups->index}
1679 struct perf_event *event) in perf_event_groups_insert() argument
1681 event->group_index = ++groups->index; in perf_event_groups_insert()
1683 rb_add(&event->group_node, &groups->tree, __group_less); in perf_event_groups_insert()
1687 * Helper function to insert event into the pinned or flexible groups.
1690 add_event_to_groups(struct perf_event *event, struct perf_event_context *ctx) in add_event_to_groups() argument
1694 groups = get_event_groups(event, ctx); in add_event_to_groups()
1695 perf_event_groups_insert(groups, event); in add_event_to_groups()
1703 struct perf_event *event) in perf_event_groups_delete() argument
1705 WARN_ON_ONCE(RB_EMPTY_NODE(&event->group_node) || in perf_event_groups_delete()
1706 RB_EMPTY_ROOT(&groups->tree)); in perf_event_groups_delete()
1708 rb_erase(&event->group_node, &groups->tree); in perf_event_groups_delete()
1709 init_event_group(event); in perf_event_groups_delete()
1713 * Helper function to delete event from its groups.
1716 del_event_from_groups(struct perf_event *event, struct perf_event_context *ctx) in del_event_from_groups() argument
1720 groups = get_event_groups(event, ctx); in del_event_from_groups()
1721 perf_event_groups_delete(groups, event); in del_event_from_groups()
1725 * Get the leftmost event in the {cpu,pmu,cgroup} subtree.
1738 node = rb_find_first(&key, &groups->tree, __group_cmp); in perf_event_groups_first()
1746 perf_event_groups_next(struct perf_event *event, struct pmu *pmu) in perf_event_groups_next() argument
1749 .cpu = event->cpu, in perf_event_groups_next()
1751 .cgroup = event_cgroup(event), in perf_event_groups_next()
1755 next = rb_next_match(&key, &event->group_node, __group_cmp); in perf_event_groups_next()
1762 #define perf_event_groups_for_cpu_pmu(event, groups, cpu, pmu) \ argument
1763 for (event = perf_event_groups_first(groups, cpu, pmu, NULL); \
1764 event; event = perf_event_groups_next(event, pmu))
1769 #define perf_event_groups_for_each(event, groups) \ argument
1770 for (event = rb_entry_safe(rb_first(&((groups)->tree)), \
1771 typeof(*event), group_node); event; \
1772 event = rb_entry_safe(rb_next(&event->group_node), \
1773 typeof(*event), group_node))
1776 * Add an event from the lists for its context.
1777 * Must be called with ctx->mutex and ctx->lock held.
1780 list_add_event(struct perf_event *event, struct perf_event_context *ctx) in list_add_event() argument
1782 lockdep_assert_held(&ctx->lock); in list_add_event()
1784 WARN_ON_ONCE(event->attach_state & PERF_ATTACH_CONTEXT); in list_add_event()
1785 event->attach_state |= PERF_ATTACH_CONTEXT; in list_add_event()
1787 event->tstamp = perf_event_time(event); in list_add_event()
1790 * If we're a stand alone event or group leader, we go to the context in list_add_event()
1794 if (event->group_leader == event) { in list_add_event()
1795 event->group_caps = event->event_caps; in list_add_event()
1796 add_event_to_groups(event, ctx); in list_add_event()
1799 list_add_rcu(&event->event_entry, &ctx->event_list); in list_add_event()
1800 ctx->nr_events++; in list_add_event()
1801 if (event->hw.flags & PERF_EVENT_FLAG_USER_READ_CNT) in list_add_event()
1802 ctx->nr_user++; in list_add_event()
1803 if (event->attr.inherit_stat) in list_add_event()
1804 ctx->nr_stat++; in list_add_event()
1806 if (event->state > PERF_EVENT_STATE_OFF) in list_add_event()
1807 perf_cgroup_event_enable(event, ctx); in list_add_event()
1809 ctx->generation++; in list_add_event()
1810 event->pmu_ctx->nr_events++; in list_add_event()
1814 * Initialize event state based on the perf_event_attr::disabled.
1816 static inline void perf_event__state_init(struct perf_event *event) in perf_event__state_init() argument
1818 event->state = event->attr.disabled ? PERF_EVENT_STATE_OFF : in perf_event__state_init()
1852 static void __perf_event_header_size(struct perf_event *event, u64 sample_type) in __perf_event_header_size() argument
1858 size += sizeof(data->ip); in __perf_event_header_size()
1861 size += sizeof(data->addr); in __perf_event_header_size()
1864 size += sizeof(data->period); in __perf_event_header_size()
1867 size += sizeof(data->weight.full); in __perf_event_header_size()
1870 size += event->read_size; in __perf_event_header_size()
1873 size += sizeof(data->data_src.val); in __perf_event_header_size()
1876 size += sizeof(data->txn); in __perf_event_header_size()
1879 size += sizeof(data->phys_addr); in __perf_event_header_size()
1882 size += sizeof(data->cgroup); in __perf_event_header_size()
1885 size += sizeof(data->data_page_size); in __perf_event_header_size()
1888 size += sizeof(data->code_page_size); in __perf_event_header_size()
1890 event->header_size = size; in __perf_event_header_size()
1897 static void perf_event__header_size(struct perf_event *event) in perf_event__header_size() argument
1899 event->read_size = in perf_event__header_size()
1900 __perf_event_read_size(event->attr.read_format, in perf_event__header_size()
1901 event->group_leader->nr_siblings); in perf_event__header_size()
1902 __perf_event_header_size(event, event->attr.sample_type); in perf_event__header_size()
1905 static void perf_event__id_header_size(struct perf_event *event) in perf_event__id_header_size() argument
1908 u64 sample_type = event->attr.sample_type; in perf_event__id_header_size()
1912 size += sizeof(data->tid_entry); in perf_event__id_header_size()
1915 size += sizeof(data->time); in perf_event__id_header_size()
1918 size += sizeof(data->id); in perf_event__id_header_size()
1921 size += sizeof(data->id); in perf_event__id_header_size()
1924 size += sizeof(data->stream_id); in perf_event__id_header_size()
1927 size += sizeof(data->cpu_entry); in perf_event__id_header_size()
1929 event->id_header_size = size; in perf_event__id_header_size()
1933 * Check that adding an event to the group does not result in anybody
1934 * overflowing the 64k event limit imposed by the output buffer.
1936 * Specifically, check that the read_size for the event does not exceed 16k,
1938 * depends on per-event read_format, also (re)check the existing events.
1943 static bool perf_event_validate_size(struct perf_event *event) in perf_event_validate_size() argument
1945 struct perf_event *sibling, *group_leader = event->group_leader; in perf_event_validate_size()
1947 if (__perf_event_read_size(event->attr.read_format, in perf_event_validate_size()
1948 group_leader->nr_siblings + 1) > 16*1024) in perf_event_validate_size()
1951 if (__perf_event_read_size(group_leader->attr.read_format, in perf_event_validate_size()
1952 group_leader->nr_siblings + 1) > 16*1024) in perf_event_validate_size()
1956 * When creating a new group leader, group_leader->ctx is initialized in perf_event_validate_size()
1958 * for_each_sibling_event() until group_leader->ctx is set. A new group in perf_event_validate_size()
1960 * the non-existent siblings. in perf_event_validate_size()
1962 if (event == group_leader) in perf_event_validate_size()
1966 if (__perf_event_read_size(sibling->attr.read_format, in perf_event_validate_size()
1967 group_leader->nr_siblings + 1) > 16*1024) in perf_event_validate_size()
1974 static void perf_group_attach(struct perf_event *event) in perf_group_attach() argument
1976 struct perf_event *group_leader = event->group_leader, *pos; in perf_group_attach()
1978 lockdep_assert_held(&event->ctx->lock); in perf_group_attach()
1984 if (event->attach_state & PERF_ATTACH_GROUP) in perf_group_attach()
1987 event->attach_state |= PERF_ATTACH_GROUP; in perf_group_attach()
1989 if (group_leader == event) in perf_group_attach()
1992 WARN_ON_ONCE(group_leader->ctx != event->ctx); in perf_group_attach()
1994 group_leader->group_caps &= event->event_caps; in perf_group_attach()
1996 list_add_tail(&event->sibling_list, &group_leader->sibling_list); in perf_group_attach()
1997 group_leader->nr_siblings++; in perf_group_attach()
1998 group_leader->group_generation++; in perf_group_attach()
2007 * Remove an event from the lists for its context.
2008 * Must be called with ctx->mutex and ctx->lock held.
2011 list_del_event(struct perf_event *event, struct perf_event_context *ctx) in list_del_event() argument
2013 WARN_ON_ONCE(event->ctx != ctx); in list_del_event()
2014 lockdep_assert_held(&ctx->lock); in list_del_event()
2017 * We can have double detach due to exit/hot-unplug + close. in list_del_event()
2019 if (!(event->attach_state & PERF_ATTACH_CONTEXT)) in list_del_event()
2022 event->attach_state &= ~PERF_ATTACH_CONTEXT; in list_del_event()
2024 ctx->nr_events--; in list_del_event()
2025 if (event->hw.flags & PERF_EVENT_FLAG_USER_READ_CNT) in list_del_event()
2026 ctx->nr_user--; in list_del_event()
2027 if (event->attr.inherit_stat) in list_del_event()
2028 ctx->nr_stat--; in list_del_event()
2030 list_del_rcu(&event->event_entry); in list_del_event()
2032 if (event->group_leader == event) in list_del_event()
2033 del_event_from_groups(event, ctx); in list_del_event()
2036 * If event was in error state, then keep it in list_del_event()
2039 * of error state is by explicit re-enabling in list_del_event()
2040 * of the event in list_del_event()
2042 if (event->state > PERF_EVENT_STATE_OFF) { in list_del_event()
2043 perf_cgroup_event_disable(event, ctx); in list_del_event()
2044 perf_event_set_state(event, PERF_EVENT_STATE_OFF); in list_del_event()
2047 ctx->generation++; in list_del_event()
2048 event->pmu_ctx->nr_events--; in list_del_event()
2052 perf_aux_output_match(struct perf_event *event, struct perf_event *aux_event) in perf_aux_output_match() argument
2057 if (!event->pmu->aux_output_match) in perf_aux_output_match()
2060 return event->pmu->aux_output_match(aux_event); in perf_aux_output_match()
2063 static void put_event(struct perf_event *event);
2064 static void event_sched_out(struct perf_event *event,
2067 static void perf_put_aux_event(struct perf_event *event) in perf_put_aux_event() argument
2069 struct perf_event_context *ctx = event->ctx; in perf_put_aux_event()
2073 * If event uses aux_event tear down the link in perf_put_aux_event()
2075 if (event->aux_event) { in perf_put_aux_event()
2076 iter = event->aux_event; in perf_put_aux_event()
2077 event->aux_event = NULL; in perf_put_aux_event()
2083 * If the event is an aux_event, tear down all links to in perf_put_aux_event()
2086 for_each_sibling_event(iter, event->group_leader) { in perf_put_aux_event()
2087 if (iter->aux_event != event) in perf_put_aux_event()
2090 iter->aux_event = NULL; in perf_put_aux_event()
2091 put_event(event); in perf_put_aux_event()
2099 perf_event_set_state(event, PERF_EVENT_STATE_ERROR); in perf_put_aux_event()
2103 static bool perf_need_aux_event(struct perf_event *event) in perf_need_aux_event() argument
2105 return !!event->attr.aux_output || !!event->attr.aux_sample_size; in perf_need_aux_event()
2108 static int perf_get_aux_event(struct perf_event *event, in perf_get_aux_event() argument
2112 * Our group leader must be an aux event if we want to be in perf_get_aux_event()
2113 * an aux_output. This way, the aux event will precede its in perf_get_aux_event()
2123 if (event->attr.aux_output && event->attr.aux_sample_size) in perf_get_aux_event()
2126 if (event->attr.aux_output && in perf_get_aux_event()
2127 !perf_aux_output_match(event, group_leader)) in perf_get_aux_event()
2130 if (event->attr.aux_sample_size && !group_leader->pmu->snapshot_aux) in perf_get_aux_event()
2133 if (!atomic_long_inc_not_zero(&group_leader->refcount)) in perf_get_aux_event()
2137 * Link aux_outputs to their aux event; this is undone in in perf_get_aux_event()
2142 event->aux_event = group_leader; in perf_get_aux_event()
2147 static inline struct list_head *get_event_list(struct perf_event *event) in get_event_list() argument
2149 return event->attr.pinned ? &event->pmu_ctx->pinned_active : in get_event_list()
2150 &event->pmu_ctx->flexible_active; in get_event_list()
2159 static inline void perf_remove_sibling_event(struct perf_event *event) in perf_remove_sibling_event() argument
2161 event_sched_out(event, event->ctx); in perf_remove_sibling_event()
2162 perf_event_set_state(event, PERF_EVENT_STATE_ERROR); in perf_remove_sibling_event()
2165 static void perf_group_detach(struct perf_event *event) in perf_group_detach() argument
2167 struct perf_event *leader = event->group_leader; in perf_group_detach()
2169 struct perf_event_context *ctx = event->ctx; in perf_group_detach()
2171 lockdep_assert_held(&ctx->lock); in perf_group_detach()
2174 * We can have double detach due to exit/hot-unplug + close. in perf_group_detach()
2176 if (!(event->attach_state & PERF_ATTACH_GROUP)) in perf_group_detach()
2179 event->attach_state &= ~PERF_ATTACH_GROUP; in perf_group_detach()
2181 perf_put_aux_event(event); in perf_group_detach()
2186 if (leader != event) { in perf_group_detach()
2187 list_del_init(&event->sibling_list); in perf_group_detach()
2188 event->group_leader->nr_siblings--; in perf_group_detach()
2189 event->group_leader->group_generation++; in perf_group_detach()
2194 * If this was a group event with sibling events then in perf_group_detach()
2198 list_for_each_entry_safe(sibling, tmp, &event->sibling_list, sibling_list) { in perf_group_detach()
2200 if (sibling->event_caps & PERF_EV_CAP_SIBLING) in perf_group_detach()
2203 sibling->group_leader = sibling; in perf_group_detach()
2204 list_del_init(&sibling->sibling_list); in perf_group_detach()
2207 sibling->group_caps = event->group_caps; in perf_group_detach()
2209 if (sibling->attach_state & PERF_ATTACH_CONTEXT) { in perf_group_detach()
2210 add_event_to_groups(sibling, event->ctx); in perf_group_detach()
2212 if (sibling->state == PERF_EVENT_STATE_ACTIVE) in perf_group_detach()
2213 list_add_tail(&sibling->active_list, get_event_list(sibling)); in perf_group_detach()
2216 WARN_ON_ONCE(sibling->ctx != event->ctx); in perf_group_detach()
2228 static void perf_child_detach(struct perf_event *event) in perf_child_detach() argument
2230 struct perf_event *parent_event = event->parent; in perf_child_detach()
2232 if (!(event->attach_state & PERF_ATTACH_CHILD)) in perf_child_detach()
2235 event->attach_state &= ~PERF_ATTACH_CHILD; in perf_child_detach()
2240 lockdep_assert_held(&parent_event->child_mutex); in perf_child_detach()
2242 sync_child_event(event); in perf_child_detach()
2243 list_del_init(&event->child_list); in perf_child_detach()
2246 static bool is_orphaned_event(struct perf_event *event) in is_orphaned_event() argument
2248 return event->state == PERF_EVENT_STATE_DEAD; in is_orphaned_event()
2252 event_filter_match(struct perf_event *event) in event_filter_match() argument
2254 return (event->cpu == -1 || event->cpu == smp_processor_id()) && in event_filter_match()
2255 perf_cgroup_match(event); in event_filter_match()
2259 event_sched_out(struct perf_event *event, struct perf_event_context *ctx) in event_sched_out() argument
2261 struct perf_event_pmu_context *epc = event->pmu_ctx; in event_sched_out()
2262 struct perf_cpu_pmu_context *cpc = this_cpu_ptr(epc->pmu->cpu_pmu_context); in event_sched_out()
2265 // XXX cpc serialization, probably per-cpu IRQ disabled in event_sched_out()
2267 WARN_ON_ONCE(event->ctx != ctx); in event_sched_out()
2268 lockdep_assert_held(&ctx->lock); in event_sched_out()
2270 if (event->state != PERF_EVENT_STATE_ACTIVE) in event_sched_out()
2278 list_del_init(&event->active_list); in event_sched_out()
2280 perf_pmu_disable(event->pmu); in event_sched_out()
2282 event->pmu->del(event, 0); in event_sched_out()
2283 event->oncpu = -1; in event_sched_out()
2285 if (event->pending_disable) { in event_sched_out()
2286 event->pending_disable = 0; in event_sched_out()
2287 perf_cgroup_event_disable(event, ctx); in event_sched_out()
2291 if (event->pending_sigtrap) { in event_sched_out()
2292 event->pending_sigtrap = 0; in event_sched_out()
2294 !event->pending_work && in event_sched_out()
2295 !task_work_add(current, &event->pending_task, TWA_RESUME)) { in event_sched_out()
2296 event->pending_work = 1; in event_sched_out()
2298 local_dec(&event->ctx->nr_pending); in event_sched_out()
2302 perf_event_set_state(event, state); in event_sched_out()
2304 if (!is_software_event(event)) in event_sched_out()
2305 cpc->active_oncpu--; in event_sched_out()
2306 if (event->attr.freq && event->attr.sample_freq) in event_sched_out()
2307 ctx->nr_freq--; in event_sched_out()
2308 if (event->attr.exclusive || !cpc->active_oncpu) in event_sched_out()
2309 cpc->exclusive = 0; in event_sched_out()
2311 perf_pmu_enable(event->pmu); in event_sched_out()
2317 struct perf_event *event; in group_sched_out() local
2319 if (group_event->state != PERF_EVENT_STATE_ACTIVE) in group_sched_out()
2322 perf_assert_pmu_disabled(group_event->pmu_ctx->pmu); in group_sched_out()
2329 for_each_sibling_event(event, group_event) in group_sched_out()
2330 event_sched_out(event, ctx); in group_sched_out()
2338 * Cross CPU call to remove a performance event
2340 * We disable the event on the hardware level first. After that we
2344 __perf_remove_from_context(struct perf_event *event, in __perf_remove_from_context() argument
2349 struct perf_event_pmu_context *pmu_ctx = event->pmu_ctx; in __perf_remove_from_context()
2352 if (ctx->is_active & EVENT_TIME) { in __perf_remove_from_context()
2362 event->pending_disable = 1; in __perf_remove_from_context()
2363 event_sched_out(event, ctx); in __perf_remove_from_context()
2365 perf_group_detach(event); in __perf_remove_from_context()
2367 perf_child_detach(event); in __perf_remove_from_context()
2368 list_del_event(event, ctx); in __perf_remove_from_context()
2370 event->state = PERF_EVENT_STATE_DEAD; in __perf_remove_from_context()
2372 if (!pmu_ctx->nr_events) { in __perf_remove_from_context()
2373 pmu_ctx->rotate_necessary = 0; in __perf_remove_from_context()
2375 if (ctx->task && ctx->is_active) { in __perf_remove_from_context()
2378 cpc = this_cpu_ptr(pmu_ctx->pmu->cpu_pmu_context); in __perf_remove_from_context()
2379 WARN_ON_ONCE(cpc->task_epc && cpc->task_epc != pmu_ctx); in __perf_remove_from_context()
2380 cpc->task_epc = NULL; in __perf_remove_from_context()
2384 if (!ctx->nr_events && ctx->is_active) { in __perf_remove_from_context()
2385 if (ctx == &cpuctx->ctx) in __perf_remove_from_context()
2388 ctx->is_active = 0; in __perf_remove_from_context()
2389 if (ctx->task) { in __perf_remove_from_context()
2390 WARN_ON_ONCE(cpuctx->task_ctx != ctx); in __perf_remove_from_context()
2391 cpuctx->task_ctx = NULL; in __perf_remove_from_context()
2397 * Remove the event from a task's (or a CPU's) list of events.
2399 * If event->ctx is a cloned context, callers must make sure that
2400 * every task struct that event->ctx->task could possibly point to
2402 * that only calls us on the top-level context, which can't be a clone.
2406 static void perf_remove_from_context(struct perf_event *event, unsigned long flags) in perf_remove_from_context() argument
2408 struct perf_event_context *ctx = event->ctx; in perf_remove_from_context()
2410 lockdep_assert_held(&ctx->mutex); in perf_remove_from_context()
2417 raw_spin_lock_irq(&ctx->lock); in perf_remove_from_context()
2418 if (!ctx->is_active) { in perf_remove_from_context()
2419 __perf_remove_from_context(event, this_cpu_ptr(&perf_cpu_context), in perf_remove_from_context()
2421 raw_spin_unlock_irq(&ctx->lock); in perf_remove_from_context()
2424 raw_spin_unlock_irq(&ctx->lock); in perf_remove_from_context()
2426 event_function_call(event, __perf_remove_from_context, (void *)flags); in perf_remove_from_context()
2430 * Cross CPU call to disable a performance event
2432 static void __perf_event_disable(struct perf_event *event, in __perf_event_disable() argument
2437 if (event->state < PERF_EVENT_STATE_INACTIVE) in __perf_event_disable()
2440 if (ctx->is_active & EVENT_TIME) { in __perf_event_disable()
2442 update_cgrp_time_from_event(event); in __perf_event_disable()
2445 perf_pmu_disable(event->pmu_ctx->pmu); in __perf_event_disable()
2447 if (event == event->group_leader) in __perf_event_disable()
2448 group_sched_out(event, ctx); in __perf_event_disable()
2450 event_sched_out(event, ctx); in __perf_event_disable()
2452 perf_event_set_state(event, PERF_EVENT_STATE_OFF); in __perf_event_disable()
2453 perf_cgroup_event_disable(event, ctx); in __perf_event_disable()
2455 perf_pmu_enable(event->pmu_ctx->pmu); in __perf_event_disable()
2459 * Disable an event.
2461 * If event->ctx is a cloned context, callers must make sure that
2462 * every task struct that event->ctx->task could possibly point to
2465 * hold the top-level event's child_mutex, so any descendant that
2468 * When called from perf_pending_irq it's OK because event->ctx
2472 static void _perf_event_disable(struct perf_event *event) in _perf_event_disable() argument
2474 struct perf_event_context *ctx = event->ctx; in _perf_event_disable()
2476 raw_spin_lock_irq(&ctx->lock); in _perf_event_disable()
2477 if (event->state <= PERF_EVENT_STATE_OFF) { in _perf_event_disable()
2478 raw_spin_unlock_irq(&ctx->lock); in _perf_event_disable()
2481 raw_spin_unlock_irq(&ctx->lock); in _perf_event_disable()
2483 event_function_call(event, __perf_event_disable, NULL); in _perf_event_disable()
2486 void perf_event_disable_local(struct perf_event *event) in perf_event_disable_local() argument
2488 event_function_local(event, __perf_event_disable, NULL); in perf_event_disable_local()
2495 void perf_event_disable(struct perf_event *event) in perf_event_disable() argument
2499 ctx = perf_event_ctx_lock(event); in perf_event_disable()
2500 _perf_event_disable(event); in perf_event_disable()
2501 perf_event_ctx_unlock(event, ctx); in perf_event_disable()
2505 void perf_event_disable_inatomic(struct perf_event *event) in perf_event_disable_inatomic() argument
2507 event->pending_disable = 1; in perf_event_disable_inatomic()
2508 irq_work_queue(&event->pending_irq); in perf_event_disable_inatomic()
2513 static void perf_log_throttle(struct perf_event *event, int enable);
2514 static void perf_log_itrace_start(struct perf_event *event);
2517 event_sched_in(struct perf_event *event, struct perf_event_context *ctx) in event_sched_in() argument
2519 struct perf_event_pmu_context *epc = event->pmu_ctx; in event_sched_in()
2520 struct perf_cpu_pmu_context *cpc = this_cpu_ptr(epc->pmu->cpu_pmu_context); in event_sched_in()
2523 WARN_ON_ONCE(event->ctx != ctx); in event_sched_in()
2525 lockdep_assert_held(&ctx->lock); in event_sched_in()
2527 if (event->state <= PERF_EVENT_STATE_OFF) in event_sched_in()
2530 WRITE_ONCE(event->oncpu, smp_processor_id()); in event_sched_in()
2532 * Order event::oncpu write to happen before the ACTIVE state is in event_sched_in()
2534 * ->oncpu if it sees ACTIVE. in event_sched_in()
2537 perf_event_set_state(event, PERF_EVENT_STATE_ACTIVE); in event_sched_in()
2544 if (unlikely(event->hw.interrupts == MAX_INTERRUPTS)) { in event_sched_in()
2545 perf_log_throttle(event, 1); in event_sched_in()
2546 event->hw.interrupts = 0; in event_sched_in()
2549 perf_pmu_disable(event->pmu); in event_sched_in()
2551 perf_log_itrace_start(event); in event_sched_in()
2553 if (event->pmu->add(event, PERF_EF_START)) { in event_sched_in()
2554 perf_event_set_state(event, PERF_EVENT_STATE_INACTIVE); in event_sched_in()
2555 event->oncpu = -1; in event_sched_in()
2556 ret = -EAGAIN; in event_sched_in()
2560 if (!is_software_event(event)) in event_sched_in()
2561 cpc->active_oncpu++; in event_sched_in()
2562 if (event->attr.freq && event->attr.sample_freq) in event_sched_in()
2563 ctx->nr_freq++; in event_sched_in()
2565 if (event->attr.exclusive) in event_sched_in()
2566 cpc->exclusive = 1; in event_sched_in()
2569 perf_pmu_enable(event->pmu); in event_sched_in()
2577 struct perf_event *event, *partial_group = NULL; in group_sched_in() local
2578 struct pmu *pmu = group_event->pmu_ctx->pmu; in group_sched_in()
2580 if (group_event->state == PERF_EVENT_STATE_OFF) in group_sched_in()
2583 pmu->start_txn(pmu, PERF_PMU_TXN_ADD); in group_sched_in()
2591 for_each_sibling_event(event, group_event) { in group_sched_in()
2592 if (event_sched_in(event, ctx)) { in group_sched_in()
2593 partial_group = event; in group_sched_in()
2598 if (!pmu->commit_txn(pmu)) in group_sched_in()
2605 * The events up to the failed event are scheduled out normally. in group_sched_in()
2607 for_each_sibling_event(event, group_event) { in group_sched_in()
2608 if (event == partial_group) in group_sched_in()
2611 event_sched_out(event, ctx); in group_sched_in()
2616 pmu->cancel_txn(pmu); in group_sched_in()
2617 return -EAGAIN; in group_sched_in()
2621 * Work out whether we can put this event group on the CPU now.
2623 static int group_can_go_on(struct perf_event *event, int can_add_hw) in group_can_go_on() argument
2625 struct perf_event_pmu_context *epc = event->pmu_ctx; in group_can_go_on()
2626 struct perf_cpu_pmu_context *cpc = this_cpu_ptr(epc->pmu->cpu_pmu_context); in group_can_go_on()
2631 if (event->group_caps & PERF_EV_CAP_SOFTWARE) in group_can_go_on()
2637 if (cpc->exclusive) in group_can_go_on()
2643 if (event->attr.exclusive && !list_empty(get_event_list(event))) in group_can_go_on()
2652 static void add_event_to_ctx(struct perf_event *event, in add_event_to_ctx() argument
2655 list_add_event(event, ctx); in add_event_to_ctx()
2656 perf_group_attach(event); in add_event_to_ctx()
2664 if (!cpuctx->task_ctx) in task_ctx_sched_out()
2667 if (WARN_ON_ONCE(ctx != cpuctx->task_ctx)) in task_ctx_sched_out()
2676 ctx_sched_in(&cpuctx->ctx, EVENT_PINNED); in perf_event_sched_in()
2679 ctx_sched_in(&cpuctx->ctx, EVENT_FLEXIBLE); in perf_event_sched_in()
2686 * - CPU pinned (EVENT_CPU | EVENT_PINNED)
2687 * - task pinned (EVENT_PINNED)
2688 * - CPU flexible (EVENT_CPU | EVENT_FLEXIBLE)
2689 * - task flexible (EVENT_FLEXIBLE).
2692 * time an event is added, only do it for the groups of equal priority and
2701 * event to the context or enabling existing event in the context. We can
2719 perf_ctx_disable(&cpuctx->ctx, false); in ctx_resched()
2728 * - EVENT_CPU: schedule out corresponding groups; in ctx_resched()
2729 * - EVENT_PINNED task events: schedule out EVENT_FLEXIBLE groups; in ctx_resched()
2730 * - otherwise, do nothing more. in ctx_resched()
2733 ctx_sched_out(&cpuctx->ctx, event_type); in ctx_resched()
2735 ctx_sched_out(&cpuctx->ctx, EVENT_FLEXIBLE); in ctx_resched()
2739 perf_ctx_enable(&cpuctx->ctx, false); in ctx_resched()
2747 struct perf_event_context *task_ctx = cpuctx->task_ctx; in perf_pmu_resched()
2755 * Cross CPU call to install and enable a performance event
2758 * things like ctx->is_active and cpuctx->task_ctx are set.
2762 struct perf_event *event = info; in __perf_install_in_context() local
2763 struct perf_event_context *ctx = event->ctx; in __perf_install_in_context()
2765 struct perf_event_context *task_ctx = cpuctx->task_ctx; in __perf_install_in_context()
2769 raw_spin_lock(&cpuctx->ctx.lock); in __perf_install_in_context()
2770 if (ctx->task) { in __perf_install_in_context()
2771 raw_spin_lock(&ctx->lock); in __perf_install_in_context()
2774 reprogram = (ctx->task == current); in __perf_install_in_context()
2780 * If its not running, we don't care, ctx->lock will in __perf_install_in_context()
2783 if (task_curr(ctx->task) && !reprogram) { in __perf_install_in_context()
2784 ret = -ESRCH; in __perf_install_in_context()
2788 WARN_ON_ONCE(reprogram && cpuctx->task_ctx && cpuctx->task_ctx != ctx); in __perf_install_in_context()
2790 raw_spin_lock(&task_ctx->lock); in __perf_install_in_context()
2794 if (event->state > PERF_EVENT_STATE_OFF && is_cgroup_event(event)) { in __perf_install_in_context()
2796 * If the current cgroup doesn't match the event's in __perf_install_in_context()
2800 reprogram = cgroup_is_descendant(cgrp->css.cgroup, in __perf_install_in_context()
2801 event->cgrp->css.cgroup); in __perf_install_in_context()
2807 add_event_to_ctx(event, ctx); in __perf_install_in_context()
2808 ctx_resched(cpuctx, task_ctx, get_event_type(event)); in __perf_install_in_context()
2810 add_event_to_ctx(event, ctx); in __perf_install_in_context()
2819 static bool exclusive_event_installable(struct perf_event *event,
2823 * Attach a performance event to a context.
2829 struct perf_event *event, in perf_install_in_context() argument
2832 struct task_struct *task = READ_ONCE(ctx->task); in perf_install_in_context()
2834 lockdep_assert_held(&ctx->mutex); in perf_install_in_context()
2836 WARN_ON_ONCE(!exclusive_event_installable(event, ctx)); in perf_install_in_context()
2838 if (event->cpu != -1) in perf_install_in_context()
2839 WARN_ON_ONCE(event->cpu != cpu); in perf_install_in_context()
2842 * Ensures that if we can observe event->ctx, both the event and ctx in perf_install_in_context()
2845 smp_store_release(&event->ctx, ctx); in perf_install_in_context()
2849 * without IPI. Except when this is the first event for the context, in in perf_install_in_context()
2850 * that case we need the magic of the IPI to set ctx->is_active. in perf_install_in_context()
2853 * event will issue the IPI and reprogram the hardware. in perf_install_in_context()
2855 if (__perf_effective_state(event) == PERF_EVENT_STATE_OFF && in perf_install_in_context()
2856 ctx->nr_events && !is_cgroup_event(event)) { in perf_install_in_context()
2857 raw_spin_lock_irq(&ctx->lock); in perf_install_in_context()
2858 if (ctx->task == TASK_TOMBSTONE) { in perf_install_in_context()
2859 raw_spin_unlock_irq(&ctx->lock); in perf_install_in_context()
2862 add_event_to_ctx(event, ctx); in perf_install_in_context()
2863 raw_spin_unlock_irq(&ctx->lock); in perf_install_in_context()
2868 cpu_function_call(cpu, __perf_install_in_context, event); in perf_install_in_context()
2879 * Installing events is tricky because we cannot rely on ctx->is_active in perf_install_in_context()
2880 * to be set in case this is the nr_events 0 -> 1 transition. in perf_install_in_context()
2894 * our task->perf_event_ctxp[] store, such that it will in fact take in perf_install_in_context()
2903 * This smp_mb() orders the task->perf_event_ctxp[] store with the in perf_install_in_context()
2910 if (!task_function_call(task, __perf_install_in_context, event)) in perf_install_in_context()
2913 raw_spin_lock_irq(&ctx->lock); in perf_install_in_context()
2914 task = ctx->task; in perf_install_in_context()
2918 * cannot happen), and we hold ctx->mutex, which serializes us in perf_install_in_context()
2921 raw_spin_unlock_irq(&ctx->lock); in perf_install_in_context()
2925 * If the task is not running, ctx->lock will avoid it becoming so, in perf_install_in_context()
2926 * thus we can safely install the event. in perf_install_in_context()
2929 raw_spin_unlock_irq(&ctx->lock); in perf_install_in_context()
2932 add_event_to_ctx(event, ctx); in perf_install_in_context()
2933 raw_spin_unlock_irq(&ctx->lock); in perf_install_in_context()
2937 * Cross CPU call to enable a performance event
2939 static void __perf_event_enable(struct perf_event *event, in __perf_event_enable() argument
2944 struct perf_event *leader = event->group_leader; in __perf_event_enable()
2947 if (event->state >= PERF_EVENT_STATE_INACTIVE || in __perf_event_enable()
2948 event->state <= PERF_EVENT_STATE_ERROR) in __perf_event_enable()
2951 if (ctx->is_active) in __perf_event_enable()
2954 perf_event_set_state(event, PERF_EVENT_STATE_INACTIVE); in __perf_event_enable()
2955 perf_cgroup_event_enable(event, ctx); in __perf_event_enable()
2957 if (!ctx->is_active) in __perf_event_enable()
2960 if (!event_filter_match(event)) { in __perf_event_enable()
2966 * If the event is in a group and isn't the group leader, in __perf_event_enable()
2969 if (leader != event && leader->state != PERF_EVENT_STATE_ACTIVE) { in __perf_event_enable()
2974 task_ctx = cpuctx->task_ctx; in __perf_event_enable()
2975 if (ctx->task) in __perf_event_enable()
2978 ctx_resched(cpuctx, task_ctx, get_event_type(event)); in __perf_event_enable()
2982 * Enable an event.
2984 * If event->ctx is a cloned context, callers must make sure that
2985 * every task struct that event->ctx->task could possibly point to
2990 static void _perf_event_enable(struct perf_event *event) in _perf_event_enable() argument
2992 struct perf_event_context *ctx = event->ctx; in _perf_event_enable()
2994 raw_spin_lock_irq(&ctx->lock); in _perf_event_enable()
2995 if (event->state >= PERF_EVENT_STATE_INACTIVE || in _perf_event_enable()
2996 event->state < PERF_EVENT_STATE_ERROR) { in _perf_event_enable()
2998 raw_spin_unlock_irq(&ctx->lock); in _perf_event_enable()
3003 * If the event is in error state, clear that first. in _perf_event_enable()
3005 * That way, if we see the event in error state below, we know that it in _perf_event_enable()
3007 * been scheduled away before the cross-call arrived. in _perf_event_enable()
3009 if (event->state == PERF_EVENT_STATE_ERROR) { in _perf_event_enable()
3013 if (event->event_caps & PERF_EV_CAP_SIBLING && in _perf_event_enable()
3014 event->group_leader == event) in _perf_event_enable()
3017 event->state = PERF_EVENT_STATE_OFF; in _perf_event_enable()
3019 raw_spin_unlock_irq(&ctx->lock); in _perf_event_enable()
3021 event_function_call(event, __perf_event_enable, NULL); in _perf_event_enable()
3027 void perf_event_enable(struct perf_event *event) in perf_event_enable() argument
3031 ctx = perf_event_ctx_lock(event); in perf_event_enable()
3032 _perf_event_enable(event); in perf_event_enable()
3033 perf_event_ctx_unlock(event, ctx); in perf_event_enable()
3038 struct perf_event *event; member
3045 struct perf_event *event = sd->event; in __perf_event_stop() local
3048 if (READ_ONCE(event->state) != PERF_EVENT_STATE_ACTIVE) in __perf_event_stop()
3056 * so we need to check again lest we try to stop another CPU's event. in __perf_event_stop()
3058 if (READ_ONCE(event->oncpu) != smp_processor_id()) in __perf_event_stop()
3059 return -EAGAIN; in __perf_event_stop()
3061 event->pmu->stop(event, PERF_EF_UPDATE); in __perf_event_stop()
3069 * Since this is happening on an event-local CPU, no trace is lost in __perf_event_stop()
3072 if (sd->restart) in __perf_event_stop()
3073 event->pmu->start(event, 0); in __perf_event_stop()
3078 static int perf_event_stop(struct perf_event *event, int restart) in perf_event_stop() argument
3081 .event = event, in perf_event_stop()
3087 if (READ_ONCE(event->state) != PERF_EVENT_STATE_ACTIVE) in perf_event_stop()
3094 * We only want to restart ACTIVE events, so if the event goes in perf_event_stop()
3095 * inactive here (event->oncpu==-1), there's nothing more to do; in perf_event_stop()
3096 * fall through with ret==-ENXIO. in perf_event_stop()
3098 ret = cpu_function_call(READ_ONCE(event->oncpu), in perf_event_stop()
3100 } while (ret == -EAGAIN); in perf_event_stop()
3111 * event::addr_filter_ranges array and bump the event::addr_filters_gen;
3112 * (p2) when an event is scheduled in (pmu::add), it calls
3116 * If (p1) happens while the event is active, we restart it to force (p2).
3119 * pre-existing mappings, called once when new filters arrive via SET_FILTER
3127 void perf_event_addr_filters_sync(struct perf_event *event) in perf_event_addr_filters_sync() argument
3129 struct perf_addr_filters_head *ifh = perf_event_addr_filters(event); in perf_event_addr_filters_sync()
3131 if (!has_addr_filter(event)) in perf_event_addr_filters_sync()
3134 raw_spin_lock(&ifh->lock); in perf_event_addr_filters_sync()
3135 if (event->addr_filters_gen != event->hw.addr_filters_gen) { in perf_event_addr_filters_sync()
3136 event->pmu->addr_filters_sync(event); in perf_event_addr_filters_sync()
3137 event->hw.addr_filters_gen = event->addr_filters_gen; in perf_event_addr_filters_sync()
3139 raw_spin_unlock(&ifh->lock); in perf_event_addr_filters_sync()
3143 static int _perf_event_refresh(struct perf_event *event, int refresh) in _perf_event_refresh() argument
3148 if (event->attr.inherit || !is_sampling_event(event)) in _perf_event_refresh()
3149 return -EINVAL; in _perf_event_refresh()
3151 atomic_add(refresh, &event->event_limit); in _perf_event_refresh()
3152 _perf_event_enable(event); in _perf_event_refresh()
3160 int perf_event_refresh(struct perf_event *event, int refresh) in perf_event_refresh() argument
3165 ctx = perf_event_ctx_lock(event); in perf_event_refresh()
3166 ret = _perf_event_refresh(event, refresh); in perf_event_refresh()
3167 perf_event_ctx_unlock(event, ctx); in perf_event_refresh()
3182 if (!bp->attr.disabled) in perf_event_modify_breakpoint()
3189 * Copy event-type-independent attributes that may be modified.
3194 to->sig_data = from->sig_data; in perf_event_modify_copy_attr()
3197 static int perf_event_modify_attr(struct perf_event *event, in perf_event_modify_attr() argument
3204 if (event->attr.type != attr->type) in perf_event_modify_attr()
3205 return -EINVAL; in perf_event_modify_attr()
3207 switch (event->attr.type) { in perf_event_modify_attr()
3213 return -EOPNOTSUPP; in perf_event_modify_attr()
3216 WARN_ON_ONCE(event->ctx->parent_ctx); in perf_event_modify_attr()
3218 mutex_lock(&event->child_mutex); in perf_event_modify_attr()
3220 * Event-type-independent attributes must be copied before event-type in perf_event_modify_attr()
3224 perf_event_modify_copy_attr(&event->attr, attr); in perf_event_modify_attr()
3225 err = func(event, attr); in perf_event_modify_attr()
3228 list_for_each_entry(child, &event->child_list, child_list) { in perf_event_modify_attr()
3229 perf_event_modify_copy_attr(&child->attr, attr); in perf_event_modify_attr()
3235 mutex_unlock(&event->child_mutex); in perf_event_modify_attr()
3242 struct perf_event_context *ctx = pmu_ctx->ctx; in __pmu_ctx_sched_out()
3243 struct perf_event *event, *tmp; in __pmu_ctx_sched_out() local
3244 struct pmu *pmu = pmu_ctx->pmu; in __pmu_ctx_sched_out()
3246 if (ctx->task && !ctx->is_active) { in __pmu_ctx_sched_out()
3249 cpc = this_cpu_ptr(pmu->cpu_pmu_context); in __pmu_ctx_sched_out()
3250 WARN_ON_ONCE(cpc->task_epc && cpc->task_epc != pmu_ctx); in __pmu_ctx_sched_out()
3251 cpc->task_epc = NULL; in __pmu_ctx_sched_out()
3259 list_for_each_entry_safe(event, tmp, in __pmu_ctx_sched_out()
3260 &pmu_ctx->pinned_active, in __pmu_ctx_sched_out()
3262 group_sched_out(event, ctx); in __pmu_ctx_sched_out()
3266 list_for_each_entry_safe(event, tmp, in __pmu_ctx_sched_out()
3267 &pmu_ctx->flexible_active, in __pmu_ctx_sched_out()
3269 group_sched_out(event, ctx); in __pmu_ctx_sched_out()
3275 pmu_ctx->rotate_necessary = 0; in __pmu_ctx_sched_out()
3285 int is_active = ctx->is_active; in ctx_sched_out()
3290 lockdep_assert_held(&ctx->lock); in ctx_sched_out()
3292 if (likely(!ctx->nr_events)) { in ctx_sched_out()
3296 WARN_ON_ONCE(ctx->is_active); in ctx_sched_out()
3297 if (ctx->task) in ctx_sched_out()
3298 WARN_ON_ONCE(cpuctx->task_ctx); in ctx_sched_out()
3315 update_cgrp_time_from_cpuctx(cpuctx, ctx == &cpuctx->ctx); in ctx_sched_out()
3317 * CPU-release for the below ->is_active store, in ctx_sched_out()
3323 ctx->is_active &= ~event_type; in ctx_sched_out()
3324 if (!(ctx->is_active & EVENT_ALL)) in ctx_sched_out()
3325 ctx->is_active = 0; in ctx_sched_out()
3327 if (ctx->task) { in ctx_sched_out()
3328 WARN_ON_ONCE(cpuctx->task_ctx != ctx); in ctx_sched_out()
3329 if (!ctx->is_active) in ctx_sched_out()
3330 cpuctx->task_ctx = NULL; in ctx_sched_out()
3333 is_active ^= ctx->is_active; /* changed bits */ in ctx_sched_out()
3335 list_for_each_entry(pmu_ctx, &ctx->pmu_ctx_list, pmu_ctx_entry) { in ctx_sched_out()
3336 if (cgroup && !pmu_ctx->nr_cgroups) in ctx_sched_out()
3353 lockdep_assert_held(&ctx1->lock); in context_equiv()
3354 lockdep_assert_held(&ctx2->lock); in context_equiv()
3357 if (ctx1->pin_count || ctx2->pin_count) in context_equiv()
3361 if (ctx1 == ctx2->parent_ctx && ctx1->generation == ctx2->parent_gen) in context_equiv()
3365 if (ctx1->parent_ctx == ctx2 && ctx1->parent_gen == ctx2->generation) in context_equiv()
3372 if (ctx1->parent_ctx && ctx1->parent_ctx == ctx2->parent_ctx && in context_equiv()
3373 ctx1->parent_gen == ctx2->parent_gen) in context_equiv()
3380 static void __perf_event_sync_stat(struct perf_event *event, in __perf_event_sync_stat() argument
3385 if (!event->attr.inherit_stat) in __perf_event_sync_stat()
3389 * Update the event value, we cannot use perf_event_read() in __perf_event_sync_stat()
3392 * we know the event must be on the current CPU, therefore we in __perf_event_sync_stat()
3395 if (event->state == PERF_EVENT_STATE_ACTIVE) in __perf_event_sync_stat()
3396 event->pmu->read(event); in __perf_event_sync_stat()
3398 perf_event_update_time(event); in __perf_event_sync_stat()
3401 * In order to keep per-task stats reliable we need to flip the event in __perf_event_sync_stat()
3404 value = local64_read(&next_event->count); in __perf_event_sync_stat()
3405 value = local64_xchg(&event->count, value); in __perf_event_sync_stat()
3406 local64_set(&next_event->count, value); in __perf_event_sync_stat()
3408 swap(event->total_time_enabled, next_event->total_time_enabled); in __perf_event_sync_stat()
3409 swap(event->total_time_running, next_event->total_time_running); in __perf_event_sync_stat()
3414 perf_event_update_userpage(event); in __perf_event_sync_stat()
3421 struct perf_event *event, *next_event; in perf_event_sync_stat() local
3423 if (!ctx->nr_stat) in perf_event_sync_stat()
3428 event = list_first_entry(&ctx->event_list, in perf_event_sync_stat()
3431 next_event = list_first_entry(&next_ctx->event_list, in perf_event_sync_stat()
3434 while (&event->event_entry != &ctx->event_list && in perf_event_sync_stat()
3435 &next_event->event_entry != &next_ctx->event_list) { in perf_event_sync_stat()
3437 __perf_event_sync_stat(event, next_event); in perf_event_sync_stat()
3439 event = list_next_entry(event, event_entry); in perf_event_sync_stat()
3457 if (!prev_ctx->nr_task_data) in perf_event_swap_task_ctx_data()
3461 &prev_ctx->pmu_ctx_list, &next_ctx->pmu_ctx_list, in perf_event_swap_task_ctx_data()
3464 if (WARN_ON_ONCE(prev_epc->pmu != next_epc->pmu)) in perf_event_swap_task_ctx_data()
3473 if (prev_epc->pmu->swap_task_ctx) in perf_event_swap_task_ctx_data()
3474 prev_epc->pmu->swap_task_ctx(prev_epc, next_epc); in perf_event_swap_task_ctx_data()
3476 swap(prev_epc->task_ctx_data, next_epc->task_ctx_data); in perf_event_swap_task_ctx_data()
3485 list_for_each_entry(pmu_ctx, &ctx->pmu_ctx_list, pmu_ctx_entry) { in perf_ctx_sched_task_cb()
3486 cpc = this_cpu_ptr(pmu_ctx->pmu->cpu_pmu_context); in perf_ctx_sched_task_cb()
3488 if (cpc->sched_cb_usage && pmu_ctx->pmu->sched_task) in perf_ctx_sched_task_cb()
3489 pmu_ctx->pmu->sched_task(pmu_ctx, sched_in); in perf_ctx_sched_task_cb()
3496 struct perf_event_context *ctx = task->perf_event_ctxp; in perf_event_context_sched_out()
3505 next_ctx = rcu_dereference(next->perf_event_ctxp); in perf_event_context_sched_out()
3509 parent = rcu_dereference(ctx->parent_ctx); in perf_event_context_sched_out()
3510 next_parent = rcu_dereference(next_ctx->parent_ctx); in perf_event_context_sched_out()
3521 * lock (including re-checking that neither has been in perf_event_context_sched_out()
3526 raw_spin_lock(&ctx->lock); in perf_event_context_sched_out()
3527 raw_spin_lock_nested(&next_ctx->lock, SINGLE_DEPTH_NESTING); in perf_event_context_sched_out()
3532 /* PMIs are disabled; ctx->nr_pending is stable. */ in perf_event_context_sched_out()
3533 if (local_read(&ctx->nr_pending) || in perf_event_context_sched_out()
3534 local_read(&next_ctx->nr_pending)) { in perf_event_context_sched_out()
3537 * events that rely on the ctx->task relation. in perf_event_context_sched_out()
3539 raw_spin_unlock(&next_ctx->lock); in perf_event_context_sched_out()
3544 WRITE_ONCE(ctx->task, next); in perf_event_context_sched_out()
3545 WRITE_ONCE(next_ctx->task, task); in perf_event_context_sched_out()
3555 * ctx->task and ctx->task_ctx_data are immaterial in perf_event_context_sched_out()
3557 * ctx->lock which we're now holding. in perf_event_context_sched_out()
3559 RCU_INIT_POINTER(task->perf_event_ctxp, next_ctx); in perf_event_context_sched_out()
3560 RCU_INIT_POINTER(next->perf_event_ctxp, ctx); in perf_event_context_sched_out()
3566 raw_spin_unlock(&next_ctx->lock); in perf_event_context_sched_out()
3567 raw_spin_unlock(&ctx->lock); in perf_event_context_sched_out()
3573 raw_spin_lock(&ctx->lock); in perf_event_context_sched_out()
3581 raw_spin_unlock(&ctx->lock); in perf_event_context_sched_out()
3590 struct perf_cpu_pmu_context *cpc = this_cpu_ptr(pmu->cpu_pmu_context); in perf_sched_cb_dec()
3595 if (!--cpc->sched_cb_usage) in perf_sched_cb_dec()
3596 list_del(&cpc->sched_cb_entry); in perf_sched_cb_dec()
3602 struct perf_cpu_pmu_context *cpc = this_cpu_ptr(pmu->cpu_pmu_context); in perf_sched_cb_inc()
3604 if (!cpc->sched_cb_usage++) in perf_sched_cb_inc()
3605 list_add(&cpc->sched_cb_entry, this_cpu_ptr(&sched_cb_list)); in perf_sched_cb_inc()
3615 * This callback is relevant even to per-cpu events; for example multi event
3624 pmu = cpc->epc.pmu; in __perf_pmu_sched_task()
3627 if (WARN_ON_ONCE(!pmu->sched_task)) in __perf_pmu_sched_task()
3630 perf_ctx_lock(cpuctx, cpuctx->task_ctx); in __perf_pmu_sched_task()
3633 pmu->sched_task(cpc->task_epc, sched_in); in __perf_pmu_sched_task()
3636 perf_ctx_unlock(cpuctx, cpuctx->task_ctx); in __perf_pmu_sched_task()
3646 /* cpuctx->task_ctx will be handled in perf_event_context_sched_in/out */ in perf_pmu_sched_task()
3647 if (prev == next || cpuctx->task_ctx) in perf_pmu_sched_task()
3661 * We stop each event and update the event value in event->count.
3664 * sets the disabled bit in the control field of event _before_
3665 * accessing the event control register. If a NMI hits, then it will
3666 * not restart the event.
3682 * cgroup event are system-wide mode only in __perf_event_task_sched_out()
3692 return le->group_index < re->group_index; in perf_less_group_idx()
3708 static void __heap_add(struct min_heap *heap, struct perf_event *event) in __heap_add() argument
3710 struct perf_event **itrs = heap->data; in __heap_add()
3712 if (event) { in __heap_add()
3713 itrs[heap->nr] = event; in __heap_add()
3714 heap->nr++; in __heap_add()
3722 if (!pmu_ctx->ctx->task) in __link_epc()
3725 cpc = this_cpu_ptr(pmu_ctx->pmu->cpu_pmu_context); in __link_epc()
3726 WARN_ON_ONCE(cpc->task_epc && cpc->task_epc != pmu_ctx); in __link_epc()
3727 cpc->task_epc = pmu_ctx; in __link_epc()
3740 /* Space for per CPU and/or any CPU event iterators. */ in visit_groups_merge()
3746 if (pmu->filter && pmu->filter(pmu, cpu)) in visit_groups_merge()
3749 if (!ctx->task) { in visit_groups_merge()
3752 .data = cpuctx->heap, in visit_groups_merge()
3754 .size = cpuctx->heap_size, in visit_groups_merge()
3757 lockdep_assert_held(&cpuctx->ctx.lock); in visit_groups_merge()
3760 if (cpuctx->cgrp) in visit_groups_merge()
3761 css = &cpuctx->cgrp->css; in visit_groups_merge()
3770 __heap_add(&event_heap, perf_event_groups_first(groups, -1, pmu, NULL)); in visit_groups_merge()
3777 for (; css; css = css->parent) in visit_groups_merge()
3778 __heap_add(&event_heap, perf_event_groups_first(groups, cpu, pmu, css->cgroup)); in visit_groups_merge()
3782 __link_epc((*evt)->pmu_ctx); in visit_groups_merge()
3783 perf_assert_pmu_disabled((*evt)->pmu_ctx->pmu); in visit_groups_merge()
3804 * Because the userpage is strictly per-event (there is no concept of context,
3806 * when context time starts :-(
3810 static inline bool event_update_userpage(struct perf_event *event) in event_update_userpage() argument
3812 if (likely(!atomic_read(&event->mmap_count))) in event_update_userpage()
3815 perf_event_update_time(event); in event_update_userpage()
3816 perf_event_update_userpage(event); in event_update_userpage()
3823 struct perf_event *event; in group_update_userpage() local
3828 for_each_sibling_event(event, group_event) in group_update_userpage()
3829 event_update_userpage(event); in group_update_userpage()
3832 static int merge_sched_in(struct perf_event *event, void *data) in merge_sched_in() argument
3834 struct perf_event_context *ctx = event->ctx; in merge_sched_in()
3837 if (event->state <= PERF_EVENT_STATE_OFF) in merge_sched_in()
3840 if (!event_filter_match(event)) in merge_sched_in()
3843 if (group_can_go_on(event, *can_add_hw)) { in merge_sched_in()
3844 if (!group_sched_in(event, ctx)) in merge_sched_in()
3845 list_add_tail(&event->active_list, get_event_list(event)); in merge_sched_in()
3848 if (event->state == PERF_EVENT_STATE_INACTIVE) { in merge_sched_in()
3850 if (event->attr.pinned) { in merge_sched_in()
3851 perf_cgroup_event_disable(event, ctx); in merge_sched_in()
3852 perf_event_set_state(event, PERF_EVENT_STATE_ERROR); in merge_sched_in()
3856 event->pmu_ctx->rotate_necessary = 1; in merge_sched_in()
3857 cpc = this_cpu_ptr(event->pmu_ctx->pmu->cpu_pmu_context); in merge_sched_in()
3859 group_update_userpage(event); in merge_sched_in()
3881 list_for_each_entry(pmu_ctx, &ctx->pmu_ctx_list, pmu_ctx_entry) { in ctx_groups_sched_in()
3882 if (cgroup && !pmu_ctx->nr_cgroups) in ctx_groups_sched_in()
3884 pmu_groups_sched_in(ctx, groups, pmu_ctx->pmu); in ctx_groups_sched_in()
3891 pmu_groups_sched_in(ctx, &ctx->flexible_groups, pmu); in __pmu_ctx_sched_in()
3898 int is_active = ctx->is_active; in ctx_sched_in()
3903 lockdep_assert_held(&ctx->lock); in ctx_sched_in()
3905 if (likely(!ctx->nr_events)) in ctx_sched_in()
3913 * CPU-release for the below ->is_active store, in ctx_sched_in()
3919 ctx->is_active |= (event_type | EVENT_TIME); in ctx_sched_in()
3920 if (ctx->task) { in ctx_sched_in()
3922 cpuctx->task_ctx = ctx; in ctx_sched_in()
3924 WARN_ON_ONCE(cpuctx->task_ctx != ctx); in ctx_sched_in()
3927 is_active ^= ctx->is_active; /* changed bits */ in ctx_sched_in()
3934 ctx_groups_sched_in(ctx, &ctx->pinned_groups, cgroup); in ctx_sched_in()
3938 ctx_groups_sched_in(ctx, &ctx->flexible_groups, cgroup); in ctx_sched_in()
3947 ctx = rcu_dereference(task->perf_event_ctxp); in perf_event_context_sched_in()
3951 if (cpuctx->task_ctx == ctx) { in perf_event_context_sched_in()
3964 * We must check ctx->nr_events while holding ctx->lock, such in perf_event_context_sched_in()
3967 if (!ctx->nr_events) in perf_event_context_sched_in()
3979 if (!RB_EMPTY_ROOT(&ctx->pinned_groups.tree)) { in perf_event_context_sched_in()
3980 perf_ctx_disable(&cpuctx->ctx, false); in perf_event_context_sched_in()
3981 ctx_sched_out(&cpuctx->ctx, EVENT_FLEXIBLE); in perf_event_context_sched_in()
3986 perf_ctx_sched_task_cb(cpuctx->task_ctx, true); in perf_event_context_sched_in()
3988 if (!RB_EMPTY_ROOT(&ctx->pinned_groups.tree)) in perf_event_context_sched_in()
3989 perf_ctx_enable(&cpuctx->ctx, false); in perf_event_context_sched_in()
4003 * We restore the event value and then enable it.
4006 * sets the enabled bit in the control field of event _before_
4007 * accessing the event control register. If a NMI hits, then it will
4008 * keep the event running.
4022 static u64 perf_calculate_period(struct perf_event *event, u64 nsec, u64 count) in perf_calculate_period() argument
4024 u64 frequency = event->attr.sample_freq; in perf_calculate_period()
4040 * period = ------------------- in perf_calculate_period()
4053 a##_fls--; \ in perf_calculate_period()
4056 b##_fls--; \ in perf_calculate_period()
4098 static void perf_adjust_period(struct perf_event *event, u64 nsec, u64 count, bool disable) in perf_adjust_period() argument
4100 struct hw_perf_event *hwc = &event->hw; in perf_adjust_period()
4104 period = perf_calculate_period(event, nsec, count); in perf_adjust_period()
4106 delta = (s64)(period - hwc->sample_period); in perf_adjust_period()
4110 delta -= 7; in perf_adjust_period()
4113 sample_period = hwc->sample_period + delta; in perf_adjust_period()
4118 hwc->sample_period = sample_period; in perf_adjust_period()
4120 if (local64_read(&hwc->period_left) > 8*sample_period) { in perf_adjust_period()
4122 event->pmu->stop(event, PERF_EF_UPDATE); in perf_adjust_period()
4124 local64_set(&hwc->period_left, 0); in perf_adjust_period()
4127 event->pmu->start(event, PERF_EF_RELOAD); in perf_adjust_period()
4139 struct perf_event *event; in perf_adjust_freq_unthr_context() local
4146 * - context have events in frequency mode (needs freq adjust) in perf_adjust_freq_unthr_context()
4147 * - there are events to unthrottle on this cpu in perf_adjust_freq_unthr_context()
4149 if (!(ctx->nr_freq || unthrottle)) in perf_adjust_freq_unthr_context()
4152 raw_spin_lock(&ctx->lock); in perf_adjust_freq_unthr_context()
4154 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) { in perf_adjust_freq_unthr_context()
4155 if (event->state != PERF_EVENT_STATE_ACTIVE) in perf_adjust_freq_unthr_context()
4158 // XXX use visit thingy to avoid the -1,cpu match in perf_adjust_freq_unthr_context()
4159 if (!event_filter_match(event)) in perf_adjust_freq_unthr_context()
4162 perf_pmu_disable(event->pmu); in perf_adjust_freq_unthr_context()
4164 hwc = &event->hw; in perf_adjust_freq_unthr_context()
4166 if (hwc->interrupts == MAX_INTERRUPTS) { in perf_adjust_freq_unthr_context()
4167 hwc->interrupts = 0; in perf_adjust_freq_unthr_context()
4168 perf_log_throttle(event, 1); in perf_adjust_freq_unthr_context()
4169 event->pmu->start(event, 0); in perf_adjust_freq_unthr_context()
4172 if (!event->attr.freq || !event->attr.sample_freq) in perf_adjust_freq_unthr_context()
4176 * stop the event and update event->count in perf_adjust_freq_unthr_context()
4178 event->pmu->stop(event, PERF_EF_UPDATE); in perf_adjust_freq_unthr_context()
4180 now = local64_read(&event->count); in perf_adjust_freq_unthr_context()
4181 delta = now - hwc->freq_count_stamp; in perf_adjust_freq_unthr_context()
4182 hwc->freq_count_stamp = now; in perf_adjust_freq_unthr_context()
4185 * restart the event in perf_adjust_freq_unthr_context()
4187 * we have stopped the event so tell that in perf_adjust_freq_unthr_context()
4192 perf_adjust_period(event, period, delta, false); in perf_adjust_freq_unthr_context()
4194 event->pmu->start(event, delta > 0 ? PERF_EF_RELOAD : 0); in perf_adjust_freq_unthr_context()
4196 perf_pmu_enable(event->pmu); in perf_adjust_freq_unthr_context()
4199 raw_spin_unlock(&ctx->lock); in perf_adjust_freq_unthr_context()
4203 * Move @event to the tail of the @ctx's elegible events.
4205 static void rotate_ctx(struct perf_event_context *ctx, struct perf_event *event) in rotate_ctx() argument
4208 * Rotate the first entry last of non-pinned groups. Rotation might be in rotate_ctx()
4211 if (ctx->rotate_disable) in rotate_ctx()
4214 perf_event_groups_delete(&ctx->flexible_groups, event); in rotate_ctx()
4215 perf_event_groups_insert(&ctx->flexible_groups, event); in rotate_ctx()
4218 /* pick an event from the flexible_groups to rotate */
4222 struct perf_event *event; in ctx_event_to_rotate() local
4226 .pmu = pmu_ctx->pmu, in ctx_event_to_rotate()
4229 /* pick the first active flexible event */ in ctx_event_to_rotate()
4230 event = list_first_entry_or_null(&pmu_ctx->flexible_active, in ctx_event_to_rotate()
4232 if (event) in ctx_event_to_rotate()
4235 /* if no active flexible event, pick the first event */ in ctx_event_to_rotate()
4236 tree = &pmu_ctx->ctx->flexible_groups.tree; in ctx_event_to_rotate()
4238 if (!pmu_ctx->ctx->task) { in ctx_event_to_rotate()
4243 event = __node_2_pe(node); in ctx_event_to_rotate()
4247 key.cpu = -1; in ctx_event_to_rotate()
4250 event = __node_2_pe(node); in ctx_event_to_rotate()
4257 event = __node_2_pe(node); in ctx_event_to_rotate()
4264 pmu_ctx->rotate_necessary = 0; in ctx_event_to_rotate()
4266 return event; in ctx_event_to_rotate()
4279 * events, thus the event count values are stable. in perf_rotate_context()
4282 cpu_epc = &cpc->epc; in perf_rotate_context()
4283 pmu = cpu_epc->pmu; in perf_rotate_context()
4284 task_epc = cpc->task_epc; in perf_rotate_context()
4286 cpu_rotate = cpu_epc->rotate_necessary; in perf_rotate_context()
4287 task_rotate = task_epc ? task_epc->rotate_necessary : 0; in perf_rotate_context()
4292 perf_ctx_lock(cpuctx, cpuctx->task_ctx); in perf_rotate_context()
4305 update_context_time(task_epc->ctx); in perf_rotate_context()
4310 update_context_time(&cpuctx->ctx); in perf_rotate_context()
4312 rotate_ctx(&cpuctx->ctx, cpu_event); in perf_rotate_context()
4313 __pmu_ctx_sched_in(&cpuctx->ctx, pmu); in perf_rotate_context()
4317 rotate_ctx(task_epc->ctx, task_event); in perf_rotate_context()
4320 __pmu_ctx_sched_in(task_epc->ctx, pmu); in perf_rotate_context()
4323 perf_ctx_unlock(cpuctx, cpuctx->task_ctx); in perf_rotate_context()
4340 perf_adjust_freq_unthr_context(&cpuctx->ctx, !!throttled); in perf_event_task_tick()
4343 ctx = rcu_dereference(current->perf_event_ctxp); in perf_event_task_tick()
4349 static int event_enable_on_exec(struct perf_event *event, in event_enable_on_exec() argument
4352 if (!event->attr.enable_on_exec) in event_enable_on_exec()
4355 event->attr.enable_on_exec = 0; in event_enable_on_exec()
4356 if (event->state >= PERF_EVENT_STATE_INACTIVE) in event_enable_on_exec()
4359 perf_event_set_state(event, PERF_EVENT_STATE_INACTIVE); in event_enable_on_exec()
4365 * Enable all of a task's events that have been marked enable-on-exec.
4373 struct perf_event *event; in perf_event_enable_on_exec() local
4378 if (WARN_ON_ONCE(current->perf_event_ctxp != ctx)) in perf_event_enable_on_exec()
4381 if (!ctx->nr_events) in perf_event_enable_on_exec()
4388 list_for_each_entry(event, &ctx->event_list, event_entry) { in perf_event_enable_on_exec()
4389 enabled |= event_enable_on_exec(event, ctx); in perf_event_enable_on_exec()
4390 event_type |= get_event_type(event); in perf_event_enable_on_exec()
4394 * Unclone and reschedule this context if we enabled any event. in perf_event_enable_on_exec()
4411 static void perf_remove_from_owner(struct perf_event *event);
4412 static void perf_event_exit_event(struct perf_event *event,
4417 * remove-on-exec, and feeds their values back to parent events.
4422 struct perf_event *event, *next; in perf_event_remove_on_exec() local
4426 mutex_lock(&ctx->mutex); in perf_event_remove_on_exec()
4428 if (WARN_ON_ONCE(ctx->task != current)) in perf_event_remove_on_exec()
4431 list_for_each_entry_safe(event, next, &ctx->event_list, event_entry) { in perf_event_remove_on_exec()
4432 if (!event->attr.remove_on_exec) in perf_event_remove_on_exec()
4435 if (!is_kernel_event(event)) in perf_event_remove_on_exec()
4436 perf_remove_from_owner(event); in perf_event_remove_on_exec()
4440 perf_event_exit_event(event, ctx); in perf_event_remove_on_exec()
4443 raw_spin_lock_irqsave(&ctx->lock, flags); in perf_event_remove_on_exec()
4446 raw_spin_unlock_irqrestore(&ctx->lock, flags); in perf_event_remove_on_exec()
4449 mutex_unlock(&ctx->mutex); in perf_event_remove_on_exec()
4456 struct perf_event *event; member
4461 static int __perf_event_read_cpu(struct perf_event *event, int event_cpu) in __perf_event_read_cpu() argument
4465 if (event->group_caps & PERF_EV_CAP_READ_ACTIVE_PKG) { in __perf_event_read_cpu()
4479 * Cross CPU call to read the hardware event
4484 struct perf_event *sub, *event = data->event; in __perf_event_read() local
4485 struct perf_event_context *ctx = event->ctx; in __perf_event_read()
4487 struct pmu *pmu = event->pmu; in __perf_event_read()
4493 * event->count would have been updated to a recent sample in __perf_event_read()
4494 * when the event was scheduled out. in __perf_event_read()
4496 if (ctx->task && cpuctx->task_ctx != ctx) in __perf_event_read()
4499 raw_spin_lock(&ctx->lock); in __perf_event_read()
4500 if (ctx->is_active & EVENT_TIME) { in __perf_event_read()
4502 update_cgrp_time_from_event(event); in __perf_event_read()
4505 perf_event_update_time(event); in __perf_event_read()
4506 if (data->group) in __perf_event_read()
4507 perf_event_update_sibling_time(event); in __perf_event_read()
4509 if (event->state != PERF_EVENT_STATE_ACTIVE) in __perf_event_read()
4512 if (!data->group) { in __perf_event_read()
4513 pmu->read(event); in __perf_event_read()
4514 data->ret = 0; in __perf_event_read()
4518 pmu->start_txn(pmu, PERF_PMU_TXN_READ); in __perf_event_read()
4520 pmu->read(event); in __perf_event_read()
4522 for_each_sibling_event(sub, event) { in __perf_event_read()
4523 if (sub->state == PERF_EVENT_STATE_ACTIVE) { in __perf_event_read()
4525 * Use sibling's PMU rather than @event's since in __perf_event_read()
4528 sub->pmu->read(sub); in __perf_event_read()
4532 data->ret = pmu->commit_txn(pmu); in __perf_event_read()
4535 raw_spin_unlock(&ctx->lock); in __perf_event_read()
4538 static inline u64 perf_event_count(struct perf_event *event) in perf_event_count() argument
4540 return local64_read(&event->count) + atomic64_read(&event->child_count); in perf_event_count()
4543 static void calc_timer_values(struct perf_event *event, in calc_timer_values() argument
4551 ctx_time = perf_event_time_now(event, *now); in calc_timer_values()
4552 __perf_update_times(event, ctx_time, enabled, running); in calc_timer_values()
4556 * NMI-safe method to read a local event, that is an event that
4558 * - either for the current task, or for this CPU
4559 * - does not have inherit set, for inherited task events
4561 * - must not have a pmu::count method
4563 int perf_event_read_local(struct perf_event *event, u64 *value, in perf_event_read_local() argument
4576 * It must not be an event with inherit set, we cannot read in perf_event_read_local()
4579 if (event->attr.inherit) { in perf_event_read_local()
4580 ret = -EOPNOTSUPP; in perf_event_read_local()
4584 /* If this is a per-task event, it must be for current */ in perf_event_read_local()
4585 if ((event->attach_state & PERF_ATTACH_TASK) && in perf_event_read_local()
4586 event->hw.target != current) { in perf_event_read_local()
4587 ret = -EINVAL; in perf_event_read_local()
4591 /* If this is a per-CPU event, it must be for this CPU */ in perf_event_read_local()
4592 if (!(event->attach_state & PERF_ATTACH_TASK) && in perf_event_read_local()
4593 event->cpu != smp_processor_id()) { in perf_event_read_local()
4594 ret = -EINVAL; in perf_event_read_local()
4598 /* If this is a pinned event it must be running on this CPU */ in perf_event_read_local()
4599 if (event->attr.pinned && event->oncpu != smp_processor_id()) { in perf_event_read_local()
4600 ret = -EBUSY; in perf_event_read_local()
4605 * If the event is currently on this CPU, its either a per-task event, in perf_event_read_local()
4607 * oncpu == -1). in perf_event_read_local()
4609 if (event->oncpu == smp_processor_id()) in perf_event_read_local()
4610 event->pmu->read(event); in perf_event_read_local()
4612 *value = local64_read(&event->count); in perf_event_read_local()
4616 calc_timer_values(event, &__now, &__enabled, &__running); in perf_event_read_local()
4628 static int perf_event_read(struct perf_event *event, bool group) in perf_event_read() argument
4630 enum perf_event_state state = READ_ONCE(event->state); in perf_event_read()
4634 * If event is enabled and currently active on a CPU, update the in perf_event_read()
4635 * value in the event structure: in perf_event_read()
4642 * Orders the ->state and ->oncpu loads such that if we see in perf_event_read()
4643 * ACTIVE we must also see the right ->oncpu. in perf_event_read()
4649 event_cpu = READ_ONCE(event->oncpu); in perf_event_read()
4654 .event = event, in perf_event_read()
4660 event_cpu = __perf_event_read_cpu(event, event_cpu); in perf_event_read()
4666 * If event_cpu isn't a valid CPU it means the event got in perf_event_read()
4667 * scheduled out and that will have updated the event count. in perf_event_read()
4669 * Therefore, either way, we'll have an up-to-date event count in perf_event_read()
4677 struct perf_event_context *ctx = event->ctx; in perf_event_read()
4680 raw_spin_lock_irqsave(&ctx->lock, flags); in perf_event_read()
4681 state = event->state; in perf_event_read()
4683 raw_spin_unlock_irqrestore(&ctx->lock, flags); in perf_event_read()
4691 if (ctx->is_active & EVENT_TIME) { in perf_event_read()
4693 update_cgrp_time_from_event(event); in perf_event_read()
4696 perf_event_update_time(event); in perf_event_read()
4698 perf_event_update_sibling_time(event); in perf_event_read()
4699 raw_spin_unlock_irqrestore(&ctx->lock, flags); in perf_event_read()
4710 raw_spin_lock_init(&ctx->lock); in __perf_event_init_context()
4711 mutex_init(&ctx->mutex); in __perf_event_init_context()
4712 INIT_LIST_HEAD(&ctx->pmu_ctx_list); in __perf_event_init_context()
4713 perf_event_groups_init(&ctx->pinned_groups); in __perf_event_init_context()
4714 perf_event_groups_init(&ctx->flexible_groups); in __perf_event_init_context()
4715 INIT_LIST_HEAD(&ctx->event_list); in __perf_event_init_context()
4716 refcount_set(&ctx->refcount, 1); in __perf_event_init_context()
4722 epc->pmu = pmu; in __perf_init_event_pmu_context()
4723 INIT_LIST_HEAD(&epc->pmu_ctx_entry); in __perf_init_event_pmu_context()
4724 INIT_LIST_HEAD(&epc->pinned_active); in __perf_init_event_pmu_context()
4725 INIT_LIST_HEAD(&epc->flexible_active); in __perf_init_event_pmu_context()
4726 atomic_set(&epc->refcount, 1); in __perf_init_event_pmu_context()
4740 ctx->task = get_task_struct(task); in alloc_perf_context()
4760 return ERR_PTR(-ESRCH); in find_lively_task_by_vpid()
4769 find_get_context(struct task_struct *task, struct perf_event *event) in find_get_context() argument
4777 /* Must be root to operate on a CPU event: */ in find_get_context()
4778 err = perf_allow_cpu(&event->attr); in find_get_context()
4782 cpuctx = per_cpu_ptr(&perf_cpu_context, event->cpu); in find_get_context()
4783 ctx = &cpuctx->ctx; in find_get_context()
4785 raw_spin_lock_irqsave(&ctx->lock, flags); in find_get_context()
4786 ++ctx->pin_count; in find_get_context()
4787 raw_spin_unlock_irqrestore(&ctx->lock, flags); in find_get_context()
4792 err = -EINVAL; in find_get_context()
4797 ++ctx->pin_count; in find_get_context()
4799 raw_spin_unlock_irqrestore(&ctx->lock, flags); in find_get_context()
4805 err = -ENOMEM; in find_get_context()
4810 mutex_lock(&task->perf_event_mutex); in find_get_context()
4815 if (task->flags & PF_EXITING) in find_get_context()
4816 err = -ESRCH; in find_get_context()
4817 else if (task->perf_event_ctxp) in find_get_context()
4818 err = -EAGAIN; in find_get_context()
4821 ++ctx->pin_count; in find_get_context()
4822 rcu_assign_pointer(task->perf_event_ctxp, ctx); in find_get_context()
4824 mutex_unlock(&task->perf_event_mutex); in find_get_context()
4829 if (err == -EAGAIN) in find_get_context()
4843 struct perf_event *event) in find_get_pmu_context() argument
4848 if (!ctx->task) { in find_get_pmu_context()
4856 cpc = per_cpu_ptr(pmu->cpu_pmu_context, event->cpu); in find_get_pmu_context()
4857 epc = &cpc->epc; in find_get_pmu_context()
4858 raw_spin_lock_irq(&ctx->lock); in find_get_pmu_context()
4859 if (!epc->ctx) { in find_get_pmu_context()
4860 atomic_set(&epc->refcount, 1); in find_get_pmu_context()
4861 epc->embedded = 1; in find_get_pmu_context()
4862 list_add(&epc->pmu_ctx_entry, &ctx->pmu_ctx_list); in find_get_pmu_context()
4863 epc->ctx = ctx; in find_get_pmu_context()
4865 WARN_ON_ONCE(epc->ctx != ctx); in find_get_pmu_context()
4866 atomic_inc(&epc->refcount); in find_get_pmu_context()
4868 raw_spin_unlock_irq(&ctx->lock); in find_get_pmu_context()
4874 return ERR_PTR(-ENOMEM); in find_get_pmu_context()
4876 if (event->attach_state & PERF_ATTACH_TASK_DATA) { in find_get_pmu_context()
4880 return ERR_PTR(-ENOMEM); in find_get_pmu_context()
4889 * lockdep_assert_held(&ctx->mutex); in find_get_pmu_context()
4892 * child_ctx->mutex. in find_get_pmu_context()
4895 raw_spin_lock_irq(&ctx->lock); in find_get_pmu_context()
4896 list_for_each_entry(epc, &ctx->pmu_ctx_list, pmu_ctx_entry) { in find_get_pmu_context()
4897 if (epc->pmu == pmu) { in find_get_pmu_context()
4898 WARN_ON_ONCE(epc->ctx != ctx); in find_get_pmu_context()
4899 atomic_inc(&epc->refcount); in find_get_pmu_context()
4903 if (!pos && epc->pmu->type > pmu->type) in find_get_pmu_context()
4911 list_add_tail(&epc->pmu_ctx_entry, &ctx->pmu_ctx_list); in find_get_pmu_context()
4913 list_add(&epc->pmu_ctx_entry, pos->pmu_ctx_entry.prev); in find_get_pmu_context()
4915 epc->ctx = ctx; in find_get_pmu_context()
4918 if (task_ctx_data && !epc->task_ctx_data) { in find_get_pmu_context()
4919 epc->task_ctx_data = task_ctx_data; in find_get_pmu_context()
4921 ctx->nr_task_data++; in find_get_pmu_context()
4923 raw_spin_unlock_irq(&ctx->lock); in find_get_pmu_context()
4933 WARN_ON_ONCE(!atomic_inc_not_zero(&epc->refcount)); in get_pmu_ctx()
4940 kfree(epc->task_ctx_data); in free_epc_rcu()
4946 struct perf_event_context *ctx = epc->ctx; in put_pmu_ctx()
4952 * lockdep_assert_held(&ctx->mutex); in put_pmu_ctx()
4954 * can't because of the call-site in _free_event()/put_event() in put_pmu_ctx()
4955 * which isn't always called under ctx->mutex. in put_pmu_ctx()
4957 if (!atomic_dec_and_raw_lock_irqsave(&epc->refcount, &ctx->lock, flags)) in put_pmu_ctx()
4960 WARN_ON_ONCE(list_empty(&epc->pmu_ctx_entry)); in put_pmu_ctx()
4962 list_del_init(&epc->pmu_ctx_entry); in put_pmu_ctx()
4963 epc->ctx = NULL; in put_pmu_ctx()
4965 WARN_ON_ONCE(!list_empty(&epc->pinned_active)); in put_pmu_ctx()
4966 WARN_ON_ONCE(!list_empty(&epc->flexible_active)); in put_pmu_ctx()
4968 raw_spin_unlock_irqrestore(&ctx->lock, flags); in put_pmu_ctx()
4970 if (epc->embedded) in put_pmu_ctx()
4973 call_rcu(&epc->rcu_head, free_epc_rcu); in put_pmu_ctx()
4976 static void perf_event_free_filter(struct perf_event *event);
4980 struct perf_event *event = container_of(head, typeof(*event), rcu_head); in free_event_rcu() local
4982 if (event->ns) in free_event_rcu()
4983 put_pid_ns(event->ns); in free_event_rcu()
4984 perf_event_free_filter(event); in free_event_rcu()
4985 kmem_cache_free(perf_event_cache, event); in free_event_rcu()
4988 static void ring_buffer_attach(struct perf_event *event,
4991 static void detach_sb_event(struct perf_event *event) in detach_sb_event() argument
4993 struct pmu_event_list *pel = per_cpu_ptr(&pmu_sb_events, event->cpu); in detach_sb_event()
4995 raw_spin_lock(&pel->lock); in detach_sb_event()
4996 list_del_rcu(&event->sb_list); in detach_sb_event()
4997 raw_spin_unlock(&pel->lock); in detach_sb_event()
5000 static bool is_sb_event(struct perf_event *event) in is_sb_event() argument
5002 struct perf_event_attr *attr = &event->attr; in is_sb_event()
5004 if (event->parent) in is_sb_event()
5007 if (event->attach_state & PERF_ATTACH_TASK) in is_sb_event()
5010 if (attr->mmap || attr->mmap_data || attr->mmap2 || in is_sb_event()
5011 attr->comm || attr->comm_exec || in is_sb_event()
5012 attr->task || attr->ksymbol || in is_sb_event()
5013 attr->context_switch || attr->text_poke || in is_sb_event()
5014 attr->bpf_event) in is_sb_event()
5019 static void unaccount_pmu_sb_event(struct perf_event *event) in unaccount_pmu_sb_event() argument
5021 if (is_sb_event(event)) in unaccount_pmu_sb_event()
5022 detach_sb_event(event); in unaccount_pmu_sb_event()
5047 static void unaccount_event(struct perf_event *event) in unaccount_event() argument
5051 if (event->parent) in unaccount_event()
5054 if (event->attach_state & (PERF_ATTACH_TASK | PERF_ATTACH_SCHED_CB)) in unaccount_event()
5056 if (event->attr.mmap || event->attr.mmap_data) in unaccount_event()
5058 if (event->attr.build_id) in unaccount_event()
5060 if (event->attr.comm) in unaccount_event()
5062 if (event->attr.namespaces) in unaccount_event()
5064 if (event->attr.cgroup) in unaccount_event()
5066 if (event->attr.task) in unaccount_event()
5068 if (event->attr.freq) in unaccount_event()
5070 if (event->attr.context_switch) { in unaccount_event()
5074 if (is_cgroup_event(event)) in unaccount_event()
5076 if (has_branch_stack(event)) in unaccount_event()
5078 if (event->attr.ksymbol) in unaccount_event()
5080 if (event->attr.bpf_event) in unaccount_event()
5082 if (event->attr.text_poke) in unaccount_event()
5086 if (!atomic_add_unless(&perf_sched_count, -1, 1)) in unaccount_event()
5090 unaccount_pmu_sb_event(event); in unaccount_event()
5103 * (PERF_PMU_CAP_EXCLUSIVE). Such pmus can only have one event scheduled
5106 * 1) cpu-wide events in the presence of per-task events,
5107 * 2) per-task events in the presence of cpu-wide events,
5111 * _free_event()), the latter -- before the first perf_install_in_context().
5113 static int exclusive_event_init(struct perf_event *event) in exclusive_event_init() argument
5115 struct pmu *pmu = event->pmu; in exclusive_event_init()
5121 * Prevent co-existence of per-task and cpu-wide events on the in exclusive_event_init()
5124 * Negative pmu::exclusive_cnt means there are cpu-wide in exclusive_event_init()
5126 * per-task events. in exclusive_event_init()
5128 * Since this is called in perf_event_alloc() path, event::ctx in exclusive_event_init()
5130 * to mean "per-task event", because unlike other attach states it in exclusive_event_init()
5133 if (event->attach_state & PERF_ATTACH_TASK) { in exclusive_event_init()
5134 if (!atomic_inc_unless_negative(&pmu->exclusive_cnt)) in exclusive_event_init()
5135 return -EBUSY; in exclusive_event_init()
5137 if (!atomic_dec_unless_positive(&pmu->exclusive_cnt)) in exclusive_event_init()
5138 return -EBUSY; in exclusive_event_init()
5144 static void exclusive_event_destroy(struct perf_event *event) in exclusive_event_destroy() argument
5146 struct pmu *pmu = event->pmu; in exclusive_event_destroy()
5152 if (event->attach_state & PERF_ATTACH_TASK) in exclusive_event_destroy()
5153 atomic_dec(&pmu->exclusive_cnt); in exclusive_event_destroy()
5155 atomic_inc(&pmu->exclusive_cnt); in exclusive_event_destroy()
5160 if ((e1->pmu == e2->pmu) && in exclusive_event_match()
5161 (e1->cpu == e2->cpu || in exclusive_event_match()
5162 e1->cpu == -1 || in exclusive_event_match()
5163 e2->cpu == -1)) in exclusive_event_match()
5168 static bool exclusive_event_installable(struct perf_event *event, in exclusive_event_installable() argument
5172 struct pmu *pmu = event->pmu; in exclusive_event_installable()
5174 lockdep_assert_held(&ctx->mutex); in exclusive_event_installable()
5179 list_for_each_entry(iter_event, &ctx->event_list, event_entry) { in exclusive_event_installable()
5180 if (exclusive_event_match(iter_event, event)) in exclusive_event_installable()
5187 static void perf_addr_filters_splice(struct perf_event *event,
5190 static void perf_pending_task_sync(struct perf_event *event) in perf_pending_task_sync() argument
5192 struct callback_head *head = &event->pending_task; in perf_pending_task_sync()
5194 if (!event->pending_work) in perf_pending_task_sync()
5201 event->pending_work = 0; in perf_pending_task_sync()
5202 local_dec(&event->ctx->nr_pending); in perf_pending_task_sync()
5207 * All accesses related to the event are within the same in perf_pending_task_sync()
5208 * non-preemptible section in perf_pending_task(). The RCU in perf_pending_task_sync()
5209 * grace period before the event is freed will make sure all in perf_pending_task_sync()
5212 rcuwait_wait_event(&event->pending_work_wait, !event->pending_work, TASK_UNINTERRUPTIBLE); in perf_pending_task_sync()
5215 static void _free_event(struct perf_event *event) in _free_event() argument
5217 irq_work_sync(&event->pending_irq); in _free_event()
5218 perf_pending_task_sync(event); in _free_event()
5220 unaccount_event(event); in _free_event()
5222 security_perf_event_free(event); in _free_event()
5224 if (event->rb) { in _free_event()
5226 * Can happen when we close an event with re-directed output. in _free_event()
5231 mutex_lock(&event->mmap_mutex); in _free_event()
5232 ring_buffer_attach(event, NULL); in _free_event()
5233 mutex_unlock(&event->mmap_mutex); in _free_event()
5236 if (is_cgroup_event(event)) in _free_event()
5237 perf_detach_cgroup(event); in _free_event()
5239 if (!event->parent) { in _free_event()
5240 if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN) in _free_event()
5244 perf_event_free_bpf_prog(event); in _free_event()
5245 perf_addr_filters_splice(event, NULL); in _free_event()
5246 kfree(event->addr_filter_ranges); in _free_event()
5248 if (event->destroy) in _free_event()
5249 event->destroy(event); in _free_event()
5252 * Must be after ->destroy(), due to uprobe_perf_close() using in _free_event()
5255 if (event->hw.target) in _free_event()
5256 put_task_struct(event->hw.target); in _free_event()
5258 if (event->pmu_ctx) in _free_event()
5259 put_pmu_ctx(event->pmu_ctx); in _free_event()
5265 if (event->ctx) in _free_event()
5266 put_ctx(event->ctx); in _free_event()
5268 exclusive_event_destroy(event); in _free_event()
5269 module_put(event->pmu->module); in _free_event()
5271 call_rcu(&event->rcu_head, free_event_rcu); in _free_event()
5276 * where the event isn't exposed yet and inherited events.
5278 static void free_event(struct perf_event *event) in free_event() argument
5280 if (WARN(atomic_long_cmpxchg(&event->refcount, 1, 0) != 1, in free_event()
5281 "unexpected event refcount: %ld; ptr=%p\n", in free_event()
5282 atomic_long_read(&event->refcount), event)) { in free_event()
5283 /* leak to avoid use-after-free */ in free_event()
5287 _free_event(event); in free_event()
5291 * Remove user event from the owner task.
5293 static void perf_remove_from_owner(struct perf_event *event) in perf_remove_from_owner() argument
5301 * indeed free this event, otherwise we need to serialize on in perf_remove_from_owner()
5302 * owner->perf_event_mutex. in perf_remove_from_owner()
5304 owner = READ_ONCE(event->owner); in perf_remove_from_owner()
5318 * holding ctx->mutex which would be an inversion wrt. the in perf_remove_from_owner()
5322 * ctx->mutex. in perf_remove_from_owner()
5324 mutex_lock_nested(&owner->perf_event_mutex, SINGLE_DEPTH_NESTING); in perf_remove_from_owner()
5327 * We have to re-check the event->owner field, if it is cleared in perf_remove_from_owner()
5330 * event. in perf_remove_from_owner()
5332 if (event->owner) { in perf_remove_from_owner()
5333 list_del_init(&event->owner_entry); in perf_remove_from_owner()
5334 smp_store_release(&event->owner, NULL); in perf_remove_from_owner()
5336 mutex_unlock(&owner->perf_event_mutex); in perf_remove_from_owner()
5341 static void put_event(struct perf_event *event) in put_event() argument
5343 if (!atomic_long_dec_and_test(&event->refcount)) in put_event()
5346 _free_event(event); in put_event()
5350 * Kill an event dead; while event:refcount will preserve the event
5354 int perf_event_release_kernel(struct perf_event *event) in perf_event_release_kernel() argument
5356 struct perf_event_context *ctx = event->ctx; in perf_event_release_kernel()
5361 * If we got here through err_alloc: free_event(event); we will not in perf_event_release_kernel()
5365 WARN_ON_ONCE(event->attach_state & in perf_event_release_kernel()
5370 if (!is_kernel_event(event)) in perf_event_release_kernel()
5371 perf_remove_from_owner(event); in perf_event_release_kernel()
5373 ctx = perf_event_ctx_lock(event); in perf_event_release_kernel()
5374 WARN_ON_ONCE(ctx->parent_ctx); in perf_event_release_kernel()
5377 * Mark this event as STATE_DEAD, there is no external reference to it in perf_event_release_kernel()
5380 * Anybody acquiring event->child_mutex after the below loop _must_ in perf_event_release_kernel()
5387 perf_remove_from_context(event, DETACH_GROUP|DETACH_DEAD); in perf_event_release_kernel()
5389 perf_event_ctx_unlock(event, ctx); in perf_event_release_kernel()
5392 mutex_lock(&event->child_mutex); in perf_event_release_kernel()
5393 list_for_each_entry(child, &event->child_list, child_list) { in perf_event_release_kernel()
5400 ctx = READ_ONCE(child->ctx); in perf_event_release_kernel()
5405 * Since the event cannot get freed while we hold the in perf_event_release_kernel()
5414 * can re-acquire child_mutex. in perf_event_release_kernel()
5416 mutex_unlock(&event->child_mutex); in perf_event_release_kernel()
5417 mutex_lock(&ctx->mutex); in perf_event_release_kernel()
5418 mutex_lock(&event->child_mutex); in perf_event_release_kernel()
5425 tmp = list_first_entry_or_null(&event->child_list, in perf_event_release_kernel()
5429 list_move(&child->child_list, &free_list); in perf_event_release_kernel()
5434 put_event(event); in perf_event_release_kernel()
5436 var = &ctx->refcount; in perf_event_release_kernel()
5439 mutex_unlock(&event->child_mutex); in perf_event_release_kernel()
5440 mutex_unlock(&ctx->mutex); in perf_event_release_kernel()
5454 mutex_unlock(&event->child_mutex); in perf_event_release_kernel()
5457 void *var = &child->ctx->refcount; in perf_event_release_kernel()
5459 list_del(&child->child_list); in perf_event_release_kernel()
5463 * Wake any perf_event_free_task() waiting for this event to be in perf_event_release_kernel()
5471 put_event(event); /* Must be the 'last' reference */ in perf_event_release_kernel()
5481 perf_event_release_kernel(file->private_data); in perf_release()
5485 static u64 __perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running) in __perf_event_read_value() argument
5493 mutex_lock(&event->child_mutex); in __perf_event_read_value()
5495 (void)perf_event_read(event, false); in __perf_event_read_value()
5496 total += perf_event_count(event); in __perf_event_read_value()
5498 *enabled += event->total_time_enabled + in __perf_event_read_value()
5499 atomic64_read(&event->child_total_time_enabled); in __perf_event_read_value()
5500 *running += event->total_time_running + in __perf_event_read_value()
5501 atomic64_read(&event->child_total_time_running); in __perf_event_read_value()
5503 list_for_each_entry(child, &event->child_list, child_list) { in __perf_event_read_value()
5506 *enabled += child->total_time_enabled; in __perf_event_read_value()
5507 *running += child->total_time_running; in __perf_event_read_value()
5509 mutex_unlock(&event->child_mutex); in __perf_event_read_value()
5514 u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running) in perf_event_read_value() argument
5519 ctx = perf_event_ctx_lock(event); in perf_event_read_value()
5520 count = __perf_event_read_value(event, enabled, running); in perf_event_read_value()
5521 perf_event_ctx_unlock(event, ctx); in perf_event_read_value()
5530 struct perf_event_context *ctx = leader->ctx; in __perf_read_group_add()
5540 raw_spin_lock_irqsave(&ctx->lock, flags); in __perf_read_group_add()
5546 * - leader->ctx->lock pins leader->sibling_list in __perf_read_group_add()
5547 * - parent->child_mutex pins parent->child_list in __perf_read_group_add()
5548 * - parent->ctx->mutex pins parent->sibling_list in __perf_read_group_add()
5550 * Because parent->ctx != leader->ctx (and child_list nests inside in __perf_read_group_add()
5551 * ctx->mutex), group destruction is not atomic between children, also in __perf_read_group_add()
5561 parent = leader->parent; in __perf_read_group_add()
5563 (parent->group_generation != leader->group_generation || in __perf_read_group_add()
5564 parent->nr_siblings != leader->nr_siblings)) { in __perf_read_group_add()
5565 ret = -ECHILD; in __perf_read_group_add()
5570 * Since we co-schedule groups, {enabled,running} times of siblings in __perf_read_group_add()
5575 values[n++] += leader->total_time_enabled + in __perf_read_group_add()
5576 atomic64_read(&leader->child_total_time_enabled); in __perf_read_group_add()
5580 values[n++] += leader->total_time_running + in __perf_read_group_add()
5581 atomic64_read(&leader->child_total_time_running); in __perf_read_group_add()
5591 values[n++] = atomic64_read(&leader->lost_samples); in __perf_read_group_add()
5598 values[n++] = atomic64_read(&sub->lost_samples); in __perf_read_group_add()
5602 raw_spin_unlock_irqrestore(&ctx->lock, flags); in __perf_read_group_add()
5606 static int perf_read_group(struct perf_event *event, in perf_read_group() argument
5609 struct perf_event *leader = event->group_leader, *child; in perf_read_group()
5610 struct perf_event_context *ctx = leader->ctx; in perf_read_group()
5614 lockdep_assert_held(&ctx->mutex); in perf_read_group()
5616 values = kzalloc(event->read_size, GFP_KERNEL); in perf_read_group()
5618 return -ENOMEM; in perf_read_group()
5620 values[0] = 1 + leader->nr_siblings; in perf_read_group()
5622 mutex_lock(&leader->child_mutex); in perf_read_group()
5628 list_for_each_entry(child, &leader->child_list, child_list) { in perf_read_group()
5634 mutex_unlock(&leader->child_mutex); in perf_read_group()
5636 ret = event->read_size; in perf_read_group()
5637 if (copy_to_user(buf, values, event->read_size)) in perf_read_group()
5638 ret = -EFAULT; in perf_read_group()
5642 mutex_unlock(&leader->child_mutex); in perf_read_group()
5648 static int perf_read_one(struct perf_event *event, in perf_read_one() argument
5655 values[n++] = __perf_event_read_value(event, &enabled, &running); in perf_read_one()
5661 values[n++] = primary_event_id(event); in perf_read_one()
5663 values[n++] = atomic64_read(&event->lost_samples); in perf_read_one()
5666 return -EFAULT; in perf_read_one()
5671 static bool is_event_hup(struct perf_event *event) in is_event_hup() argument
5675 if (event->state > PERF_EVENT_STATE_EXIT) in is_event_hup()
5678 mutex_lock(&event->child_mutex); in is_event_hup()
5679 no_children = list_empty(&event->child_list); in is_event_hup()
5680 mutex_unlock(&event->child_mutex); in is_event_hup()
5685 * Read the performance event - simple non blocking version for now
5688 __perf_read(struct perf_event *event, char __user *buf, size_t count) in __perf_read() argument
5690 u64 read_format = event->attr.read_format; in __perf_read()
5694 * Return end-of-file for a read on an event that is in in __perf_read()
5698 if (event->state == PERF_EVENT_STATE_ERROR) in __perf_read()
5701 if (count < event->read_size) in __perf_read()
5702 return -ENOSPC; in __perf_read()
5704 WARN_ON_ONCE(event->ctx->parent_ctx); in __perf_read()
5706 ret = perf_read_group(event, read_format, buf); in __perf_read()
5708 ret = perf_read_one(event, read_format, buf); in __perf_read()
5716 struct perf_event *event = file->private_data; in perf_read() local
5720 ret = security_perf_event_read(event); in perf_read()
5724 ctx = perf_event_ctx_lock(event); in perf_read()
5725 ret = __perf_read(event, buf, count); in perf_read()
5726 perf_event_ctx_unlock(event, ctx); in perf_read()
5733 struct perf_event *event = file->private_data; in perf_poll() local
5737 poll_wait(file, &event->waitq, wait); in perf_poll()
5739 if (is_event_hup(event)) in perf_poll()
5743 * Pin the event->rb by taking event->mmap_mutex; otherwise in perf_poll()
5746 mutex_lock(&event->mmap_mutex); in perf_poll()
5747 rb = event->rb; in perf_poll()
5749 events = atomic_xchg(&rb->poll, 0); in perf_poll()
5750 mutex_unlock(&event->mmap_mutex); in perf_poll()
5754 static void _perf_event_reset(struct perf_event *event) in _perf_event_reset() argument
5756 (void)perf_event_read(event, false); in _perf_event_reset()
5757 local64_set(&event->count, 0); in _perf_event_reset()
5758 perf_event_update_userpage(event); in _perf_event_reset()
5761 /* Assume it's not an event with inherit set. */
5762 u64 perf_event_pause(struct perf_event *event, bool reset) in perf_event_pause() argument
5767 ctx = perf_event_ctx_lock(event); in perf_event_pause()
5768 WARN_ON_ONCE(event->attr.inherit); in perf_event_pause()
5769 _perf_event_disable(event); in perf_event_pause()
5770 count = local64_read(&event->count); in perf_event_pause()
5772 local64_set(&event->count, 0); in perf_event_pause()
5773 perf_event_ctx_unlock(event, ctx); in perf_event_pause()
5780 * Holding the top-level event's child_mutex means that any
5781 * descendant process that has inherited this event will block
5785 static void perf_event_for_each_child(struct perf_event *event, in perf_event_for_each_child() argument
5790 WARN_ON_ONCE(event->ctx->parent_ctx); in perf_event_for_each_child()
5792 mutex_lock(&event->child_mutex); in perf_event_for_each_child()
5793 func(event); in perf_event_for_each_child()
5794 list_for_each_entry(child, &event->child_list, child_list) in perf_event_for_each_child()
5796 mutex_unlock(&event->child_mutex); in perf_event_for_each_child()
5799 static void perf_event_for_each(struct perf_event *event, in perf_event_for_each() argument
5802 struct perf_event_context *ctx = event->ctx; in perf_event_for_each()
5805 lockdep_assert_held(&ctx->mutex); in perf_event_for_each()
5807 event = event->group_leader; in perf_event_for_each()
5809 perf_event_for_each_child(event, func); in perf_event_for_each()
5810 for_each_sibling_event(sibling, event) in perf_event_for_each()
5814 static void __perf_event_period(struct perf_event *event, in __perf_event_period() argument
5822 if (event->attr.freq) { in __perf_event_period()
5823 event->attr.sample_freq = value; in __perf_event_period()
5825 event->attr.sample_period = value; in __perf_event_period()
5826 event->hw.sample_period = value; in __perf_event_period()
5829 active = (event->state == PERF_EVENT_STATE_ACTIVE); in __perf_event_period()
5831 perf_pmu_disable(event->pmu); in __perf_event_period()
5834 * trying to unthrottle while we already re-started the event. in __perf_event_period()
5836 if (event->hw.interrupts == MAX_INTERRUPTS) { in __perf_event_period()
5837 event->hw.interrupts = 0; in __perf_event_period()
5838 perf_log_throttle(event, 1); in __perf_event_period()
5840 event->pmu->stop(event, PERF_EF_UPDATE); in __perf_event_period()
5843 local64_set(&event->hw.period_left, 0); in __perf_event_period()
5846 event->pmu->start(event, PERF_EF_RELOAD); in __perf_event_period()
5847 perf_pmu_enable(event->pmu); in __perf_event_period()
5851 static int perf_event_check_period(struct perf_event *event, u64 value) in perf_event_check_period() argument
5853 return event->pmu->check_period(event, value); in perf_event_check_period()
5856 static int _perf_event_period(struct perf_event *event, u64 value) in _perf_event_period() argument
5858 if (!is_sampling_event(event)) in _perf_event_period()
5859 return -EINVAL; in _perf_event_period()
5862 return -EINVAL; in _perf_event_period()
5864 if (event->attr.freq) { in _perf_event_period()
5866 return -EINVAL; in _perf_event_period()
5868 if (perf_event_check_period(event, value)) in _perf_event_period()
5869 return -EINVAL; in _perf_event_period()
5871 return -EINVAL; in _perf_event_period()
5874 event_function_call(event, __perf_event_period, &value); in _perf_event_period()
5879 int perf_event_period(struct perf_event *event, u64 value) in perf_event_period() argument
5884 ctx = perf_event_ctx_lock(event); in perf_event_period()
5885 ret = _perf_event_period(event, value); in perf_event_period()
5886 perf_event_ctx_unlock(event, ctx); in perf_event_period()
5898 return -EBADF; in perf_fget_light()
5900 if (f.file->f_op != &perf_fops) { in perf_fget_light()
5902 return -EBADF; in perf_fget_light()
5908 static int perf_event_set_output(struct perf_event *event,
5910 static int perf_event_set_filter(struct perf_event *event, void __user *arg);
5914 static long _perf_ioctl(struct perf_event *event, unsigned int cmd, unsigned long arg) in _perf_ioctl() argument
5931 return _perf_event_refresh(event, arg); in _perf_ioctl()
5938 return -EFAULT; in _perf_ioctl()
5940 return _perf_event_period(event, value); in _perf_ioctl()
5944 u64 id = primary_event_id(event); in _perf_ioctl()
5947 return -EFAULT; in _perf_ioctl()
5954 if (arg != -1) { in _perf_ioctl()
5960 output_event = output.file->private_data; in _perf_ioctl()
5961 ret = perf_event_set_output(event, output_event); in _perf_ioctl()
5964 ret = perf_event_set_output(event, NULL); in _perf_ioctl()
5970 return perf_event_set_filter(event, (void __user *)arg); in _perf_ioctl()
5981 err = perf_event_set_bpf_prog(event, prog, 0); in _perf_ioctl()
5994 rb = rcu_dereference(event->rb); in _perf_ioctl()
5995 if (!rb || !rb->nr_pages) { in _perf_ioctl()
5997 return -EINVAL; in _perf_ioctl()
6005 return perf_event_query_prog_array(event, (void __user *)arg); in _perf_ioctl()
6015 return perf_event_modify_attr(event, &new_attr); in _perf_ioctl()
6018 return -ENOTTY; in _perf_ioctl()
6022 perf_event_for_each(event, func); in _perf_ioctl()
6024 perf_event_for_each_child(event, func); in _perf_ioctl()
6031 struct perf_event *event = file->private_data; in perf_ioctl() local
6036 ret = security_perf_event_write(event); in perf_ioctl()
6040 ctx = perf_event_ctx_lock(event); in perf_ioctl()
6041 ret = _perf_ioctl(event, cmd, arg); in perf_ioctl()
6042 perf_event_ctx_unlock(event, ctx); in perf_ioctl()
6056 /* Fix up pointer size (usually 4 -> 8 in 32-on-64-bit case */ in perf_compat_ioctl()
6072 struct perf_event *event; in perf_event_task_enable() local
6074 mutex_lock(¤t->perf_event_mutex); in perf_event_task_enable()
6075 list_for_each_entry(event, ¤t->perf_event_list, owner_entry) { in perf_event_task_enable()
6076 ctx = perf_event_ctx_lock(event); in perf_event_task_enable()
6077 perf_event_for_each_child(event, _perf_event_enable); in perf_event_task_enable()
6078 perf_event_ctx_unlock(event, ctx); in perf_event_task_enable()
6080 mutex_unlock(¤t->perf_event_mutex); in perf_event_task_enable()
6088 struct perf_event *event; in perf_event_task_disable() local
6090 mutex_lock(¤t->perf_event_mutex); in perf_event_task_disable()
6091 list_for_each_entry(event, ¤t->perf_event_list, owner_entry) { in perf_event_task_disable()
6092 ctx = perf_event_ctx_lock(event); in perf_event_task_disable()
6093 perf_event_for_each_child(event, _perf_event_disable); in perf_event_task_disable()
6094 perf_event_ctx_unlock(event, ctx); in perf_event_task_disable()
6096 mutex_unlock(¤t->perf_event_mutex); in perf_event_task_disable()
6101 static int perf_event_index(struct perf_event *event) in perf_event_index() argument
6103 if (event->hw.state & PERF_HES_STOPPED) in perf_event_index()
6106 if (event->state != PERF_EVENT_STATE_ACTIVE) in perf_event_index()
6109 return event->pmu->event_idx(event); in perf_event_index()
6112 static void perf_event_init_userpage(struct perf_event *event) in perf_event_init_userpage() argument
6118 rb = rcu_dereference(event->rb); in perf_event_init_userpage()
6122 userpg = rb->user_page; in perf_event_init_userpage()
6125 userpg->cap_bit0_is_deprecated = 1; in perf_event_init_userpage()
6126 userpg->size = offsetof(struct perf_event_mmap_page, __reserved); in perf_event_init_userpage()
6127 userpg->data_offset = PAGE_SIZE; in perf_event_init_userpage()
6128 userpg->data_size = perf_data_size(rb); in perf_event_init_userpage()
6135 struct perf_event *event, struct perf_event_mmap_page *userpg, u64 now) in arch_perf_update_userpage() argument
6144 void perf_event_update_userpage(struct perf_event *event) in perf_event_update_userpage() argument
6151 rb = rcu_dereference(event->rb); in perf_event_update_userpage()
6157 * based on snapshot values taken when the event in perf_event_update_userpage()
6164 calc_timer_values(event, &now, &enabled, &running); in perf_event_update_userpage()
6166 userpg = rb->user_page; in perf_event_update_userpage()
6172 ++userpg->lock; in perf_event_update_userpage()
6174 userpg->index = perf_event_index(event); in perf_event_update_userpage()
6175 userpg->offset = perf_event_count(event); in perf_event_update_userpage()
6176 if (userpg->index) in perf_event_update_userpage()
6177 userpg->offset -= local64_read(&event->hw.prev_count); in perf_event_update_userpage()
6179 userpg->time_enabled = enabled + in perf_event_update_userpage()
6180 atomic64_read(&event->child_total_time_enabled); in perf_event_update_userpage()
6182 userpg->time_running = running + in perf_event_update_userpage()
6183 atomic64_read(&event->child_total_time_running); in perf_event_update_userpage()
6185 arch_perf_update_userpage(event, userpg, now); in perf_event_update_userpage()
6188 ++userpg->lock; in perf_event_update_userpage()
6197 struct perf_event *event = vmf->vma->vm_file->private_data; in perf_mmap_fault() local
6201 if (vmf->flags & FAULT_FLAG_MKWRITE) { in perf_mmap_fault()
6202 if (vmf->pgoff == 0) in perf_mmap_fault()
6208 rb = rcu_dereference(event->rb); in perf_mmap_fault()
6212 if (vmf->pgoff && (vmf->flags & FAULT_FLAG_WRITE)) in perf_mmap_fault()
6215 vmf->page = perf_mmap_to_page(rb, vmf->pgoff); in perf_mmap_fault()
6216 if (!vmf->page) in perf_mmap_fault()
6219 get_page(vmf->page); in perf_mmap_fault()
6220 vmf->page->mapping = vmf->vma->vm_file->f_mapping; in perf_mmap_fault()
6221 vmf->page->index = vmf->pgoff; in perf_mmap_fault()
6230 static void ring_buffer_attach(struct perf_event *event, in ring_buffer_attach() argument
6236 WARN_ON_ONCE(event->parent); in ring_buffer_attach()
6238 if (event->rb) { in ring_buffer_attach()
6241 * event->rb_entry and wait/clear when adding event->rb_entry. in ring_buffer_attach()
6243 WARN_ON_ONCE(event->rcu_pending); in ring_buffer_attach()
6245 old_rb = event->rb; in ring_buffer_attach()
6246 spin_lock_irqsave(&old_rb->event_lock, flags); in ring_buffer_attach()
6247 list_del_rcu(&event->rb_entry); in ring_buffer_attach()
6248 spin_unlock_irqrestore(&old_rb->event_lock, flags); in ring_buffer_attach()
6250 event->rcu_batches = get_state_synchronize_rcu(); in ring_buffer_attach()
6251 event->rcu_pending = 1; in ring_buffer_attach()
6255 if (event->rcu_pending) { in ring_buffer_attach()
6256 cond_synchronize_rcu(event->rcu_batches); in ring_buffer_attach()
6257 event->rcu_pending = 0; in ring_buffer_attach()
6260 spin_lock_irqsave(&rb->event_lock, flags); in ring_buffer_attach()
6261 list_add_rcu(&event->rb_entry, &rb->event_list); in ring_buffer_attach()
6262 spin_unlock_irqrestore(&rb->event_lock, flags); in ring_buffer_attach()
6266 * Avoid racing with perf_mmap_close(AUX): stop the event in ring_buffer_attach()
6267 * before swizzling the event::rb pointer; if it's getting in ring_buffer_attach()
6272 * mid-air, but then again, whoever does it like this is in ring_buffer_attach()
6275 if (has_aux(event)) in ring_buffer_attach()
6276 perf_event_stop(event, 0); in ring_buffer_attach()
6278 rcu_assign_pointer(event->rb, rb); in ring_buffer_attach()
6287 wake_up_all(&event->waitq); in ring_buffer_attach()
6291 static void ring_buffer_wakeup(struct perf_event *event) in ring_buffer_wakeup() argument
6295 if (event->parent) in ring_buffer_wakeup()
6296 event = event->parent; in ring_buffer_wakeup()
6299 rb = rcu_dereference(event->rb); in ring_buffer_wakeup()
6301 list_for_each_entry_rcu(event, &rb->event_list, rb_entry) in ring_buffer_wakeup()
6302 wake_up_all(&event->waitq); in ring_buffer_wakeup()
6307 struct perf_buffer *ring_buffer_get(struct perf_event *event) in ring_buffer_get() argument
6311 if (event->parent) in ring_buffer_get()
6312 event = event->parent; in ring_buffer_get()
6315 rb = rcu_dereference(event->rb); in ring_buffer_get()
6317 if (!refcount_inc_not_zero(&rb->refcount)) in ring_buffer_get()
6327 if (!refcount_dec_and_test(&rb->refcount)) in ring_buffer_put()
6330 WARN_ON_ONCE(!list_empty(&rb->event_list)); in ring_buffer_put()
6332 call_rcu(&rb->rcu_head, rb_free_rcu); in ring_buffer_put()
6337 struct perf_event *event = vma->vm_file->private_data; in perf_mmap_open() local
6339 atomic_inc(&event->mmap_count); in perf_mmap_open()
6340 atomic_inc(&event->rb->mmap_count); in perf_mmap_open()
6342 if (vma->vm_pgoff) in perf_mmap_open()
6343 atomic_inc(&event->rb->aux_mmap_count); in perf_mmap_open()
6345 if (event->pmu->event_mapped) in perf_mmap_open()
6346 event->pmu->event_mapped(event, vma->vm_mm); in perf_mmap_open()
6349 static void perf_pmu_output_stop(struct perf_event *event);
6353 * event, or through other events by use of perf_event_set_output().
6361 struct perf_event *event = vma->vm_file->private_data; in perf_mmap_close() local
6362 struct perf_buffer *rb = ring_buffer_get(event); in perf_mmap_close()
6363 struct user_struct *mmap_user = rb->mmap_user; in perf_mmap_close()
6364 int mmap_locked = rb->mmap_locked; in perf_mmap_close()
6368 if (event->pmu->event_unmapped) in perf_mmap_close()
6369 event->pmu->event_unmapped(event, vma->vm_mm); in perf_mmap_close()
6372 * The AUX buffer is strictly a sub-buffer, serialize using aux_mutex in perf_mmap_close()
6375 if (rb_has_aux(rb) && vma->vm_pgoff == rb->aux_pgoff && in perf_mmap_close()
6376 atomic_dec_and_mutex_lock(&rb->aux_mmap_count, &rb->aux_mutex)) { in perf_mmap_close()
6383 perf_pmu_output_stop(event); in perf_mmap_close()
6386 atomic_long_sub(rb->aux_nr_pages - rb->aux_mmap_locked, &mmap_user->locked_vm); in perf_mmap_close()
6387 atomic64_sub(rb->aux_mmap_locked, &vma->vm_mm->pinned_vm); in perf_mmap_close()
6391 WARN_ON_ONCE(refcount_read(&rb->aux_refcount)); in perf_mmap_close()
6393 mutex_unlock(&rb->aux_mutex); in perf_mmap_close()
6396 if (atomic_dec_and_test(&rb->mmap_count)) in perf_mmap_close()
6399 if (!atomic_dec_and_mutex_lock(&event->mmap_count, &event->mmap_mutex)) in perf_mmap_close()
6402 ring_buffer_attach(event, NULL); in perf_mmap_close()
6403 mutex_unlock(&event->mmap_mutex); in perf_mmap_close()
6416 list_for_each_entry_rcu(event, &rb->event_list, rb_entry) { in perf_mmap_close()
6417 if (!atomic_long_inc_not_zero(&event->refcount)) { in perf_mmap_close()
6419 * This event is en-route to free_event() which will in perf_mmap_close()
6426 mutex_lock(&event->mmap_mutex); in perf_mmap_close()
6432 * If we find a different rb; ignore this event, a next in perf_mmap_close()
6437 if (event->rb == rb) in perf_mmap_close()
6438 ring_buffer_attach(event, NULL); in perf_mmap_close()
6440 mutex_unlock(&event->mmap_mutex); in perf_mmap_close()
6441 put_event(event); in perf_mmap_close()
6452 * It could be there's still a few 0-ref events on the list; they'll in perf_mmap_close()
6453 * get cleaned up by free_event() -- they'll also still have their in perf_mmap_close()
6460 atomic_long_sub((size >> PAGE_SHIFT) + 1 - mmap_locked, in perf_mmap_close()
6461 &mmap_user->locked_vm); in perf_mmap_close()
6462 atomic64_sub(mmap_locked, &vma->vm_mm->pinned_vm); in perf_mmap_close()
6478 struct perf_event *event = file->private_data; in perf_mmap() local
6490 * Don't allow mmap() of inherited per-task counters. This would in perf_mmap()
6494 if (event->cpu == -1 && event->attr.inherit) in perf_mmap()
6495 return -EINVAL; in perf_mmap()
6497 if (!(vma->vm_flags & VM_SHARED)) in perf_mmap()
6498 return -EINVAL; in perf_mmap()
6500 ret = security_perf_event_read(event); in perf_mmap()
6504 vma_size = vma->vm_end - vma->vm_start; in perf_mmap()
6506 if (vma->vm_pgoff == 0) { in perf_mmap()
6507 nr_pages = (vma_size / PAGE_SIZE) - 1; in perf_mmap()
6510 * AUX area mapping: if rb->aux_nr_pages != 0, it's already in perf_mmap()
6516 if (!event->rb) in perf_mmap()
6517 return -EINVAL; in perf_mmap()
6521 return -ENOMEM; in perf_mmap()
6523 mutex_lock(&event->mmap_mutex); in perf_mmap()
6524 ret = -EINVAL; in perf_mmap()
6526 rb = event->rb; in perf_mmap()
6530 aux_mutex = &rb->aux_mutex; in perf_mmap()
6533 aux_offset = READ_ONCE(rb->user_page->aux_offset); in perf_mmap()
6534 aux_size = READ_ONCE(rb->user_page->aux_size); in perf_mmap()
6539 if (aux_offset != vma->vm_pgoff << PAGE_SHIFT) in perf_mmap()
6543 if (rb_has_aux(rb) && rb->aux_pgoff != vma->vm_pgoff) in perf_mmap()
6550 if (rb_has_aux(rb) && rb->aux_nr_pages != nr_pages) in perf_mmap()
6556 if (!atomic_inc_not_zero(&rb->mmap_count)) in perf_mmap()
6560 atomic_inc(&rb->aux_mmap_count); in perf_mmap()
6565 atomic_set(&rb->aux_mmap_count, 1); in perf_mmap()
6572 * If we have rb pages ensure they're a power-of-two number, so we in perf_mmap()
6576 return -EINVAL; in perf_mmap()
6579 return -EINVAL; in perf_mmap()
6581 WARN_ON_ONCE(event->ctx->parent_ctx); in perf_mmap()
6583 mutex_lock(&event->mmap_mutex); in perf_mmap()
6584 if (event->rb) { in perf_mmap()
6585 if (data_page_nr(event->rb) != nr_pages) { in perf_mmap()
6586 ret = -EINVAL; in perf_mmap()
6590 if (!atomic_inc_not_zero(&event->rb->mmap_count)) { in perf_mmap()
6593 * event and try again. in perf_mmap()
6595 ring_buffer_attach(event, NULL); in perf_mmap()
6596 mutex_unlock(&event->mmap_mutex); in perf_mmap()
6606 user_lock_limit = sysctl_perf_event_mlock >> (PAGE_SHIFT - 10); in perf_mmap()
6613 user_locked = atomic_long_read(&user->locked_vm); in perf_mmap()
6617 * user->locked_vm > user_lock_limit in perf_mmap()
6628 extra = user_locked - user_lock_limit; in perf_mmap()
6629 user_extra -= extra; in perf_mmap()
6634 locked = atomic64_read(&vma->vm_mm->pinned_vm) + extra; in perf_mmap()
6638 ret = -EPERM; in perf_mmap()
6642 WARN_ON(!rb && event->rb); in perf_mmap()
6644 if (vma->vm_flags & VM_WRITE) in perf_mmap()
6649 event->attr.watermark ? event->attr.wakeup_watermark : 0, in perf_mmap()
6650 event->cpu, flags); in perf_mmap()
6653 ret = -ENOMEM; in perf_mmap()
6657 atomic_set(&rb->mmap_count, 1); in perf_mmap()
6658 rb->mmap_user = get_current_user(); in perf_mmap()
6659 rb->mmap_locked = extra; in perf_mmap()
6661 ring_buffer_attach(event, rb); in perf_mmap()
6663 perf_event_update_time(event); in perf_mmap()
6664 perf_event_init_userpage(event); in perf_mmap()
6665 perf_event_update_userpage(event); in perf_mmap()
6667 ret = rb_alloc_aux(rb, event, vma->vm_pgoff, nr_pages, in perf_mmap()
6668 event->attr.aux_watermark, flags); in perf_mmap()
6670 rb->aux_mmap_locked = extra; in perf_mmap()
6675 atomic_long_add(user_extra, &user->locked_vm); in perf_mmap()
6676 atomic64_add(extra, &vma->vm_mm->pinned_vm); in perf_mmap()
6678 atomic_inc(&event->mmap_count); in perf_mmap()
6680 atomic_dec(&rb->mmap_count); in perf_mmap()
6685 mutex_unlock(&event->mmap_mutex); in perf_mmap()
6692 vma->vm_ops = &perf_mmap_vmops; in perf_mmap()
6694 if (event->pmu->event_mapped) in perf_mmap()
6695 event->pmu->event_mapped(event, vma->vm_mm); in perf_mmap()
6703 struct perf_event *event = filp->private_data; in perf_fasync() local
6707 retval = fasync_helper(fd, filp, on, &event->fasync); in perf_fasync()
6728 * Perf event wakeup
6731 * to user-space before waking everybody up.
6734 static inline struct fasync_struct **perf_event_fasync(struct perf_event *event) in perf_event_fasync() argument
6737 if (event->parent) in perf_event_fasync()
6738 event = event->parent; in perf_event_fasync()
6739 return &event->fasync; in perf_event_fasync()
6742 void perf_event_wakeup(struct perf_event *event) in perf_event_wakeup() argument
6744 ring_buffer_wakeup(event); in perf_event_wakeup()
6746 if (event->pending_kill) { in perf_event_wakeup()
6747 kill_fasync(perf_event_fasync(event), SIGIO, event->pending_kill); in perf_event_wakeup()
6748 event->pending_kill = 0; in perf_event_wakeup()
6752 static void perf_sigtrap(struct perf_event *event) in perf_sigtrap() argument
6756 * ctx->task or current has changed in the meantime. This can be the in perf_sigtrap()
6759 if (WARN_ON_ONCE(event->ctx->task != current)) in perf_sigtrap()
6766 if (current->flags & PF_EXITING) in perf_sigtrap()
6769 send_sig_perf((void __user *)event->pending_addr, in perf_sigtrap()
6770 event->orig_type, event->attr.sig_data); in perf_sigtrap()
6774 * Deliver the pending work in-event-context or follow the context.
6776 static void __perf_pending_irq(struct perf_event *event) in __perf_pending_irq() argument
6778 int cpu = READ_ONCE(event->oncpu); in __perf_pending_irq()
6781 * If the event isn't running; we done. event_sched_out() will have in __perf_pending_irq()
6788 * Yay, we hit home and are in the context of the event. in __perf_pending_irq()
6791 if (event->pending_sigtrap) { in __perf_pending_irq()
6792 event->pending_sigtrap = 0; in __perf_pending_irq()
6793 perf_sigtrap(event); in __perf_pending_irq()
6794 local_dec(&event->ctx->nr_pending); in __perf_pending_irq()
6796 if (event->pending_disable) { in __perf_pending_irq()
6797 event->pending_disable = 0; in __perf_pending_irq()
6798 perf_event_disable_local(event); in __perf_pending_irq()
6804 * CPU-A CPU-B in __perf_pending_irq()
6807 * @pending_disable = CPU-A; in __perf_pending_irq()
6810 * sched-out in __perf_pending_irq()
6811 * @pending_disable = -1; in __perf_pending_irq()
6813 * sched-in in __perf_pending_irq()
6815 * @pending_disable = CPU-B; in __perf_pending_irq()
6821 * But the event runs on CPU-B and wants disabling there. in __perf_pending_irq()
6823 irq_work_queue_on(&event->pending_irq, cpu); in __perf_pending_irq()
6828 struct perf_event *event = container_of(entry, struct perf_event, pending_irq); in perf_pending_irq() local
6838 * The wakeup isn't bound to the context of the event -- it can happen in perf_pending_irq()
6839 * irrespective of where the event is. in perf_pending_irq()
6841 if (event->pending_wakeup) { in perf_pending_irq()
6842 event->pending_wakeup = 0; in perf_pending_irq()
6843 perf_event_wakeup(event); in perf_pending_irq()
6846 __perf_pending_irq(event); in perf_pending_irq()
6854 struct perf_event *event = container_of(head, struct perf_event, pending_task); in perf_pending_task() local
6858 * All accesses to the event must belong to the same implicit RCU read-side in perf_pending_task()
6859 * critical section as the ->pending_work reset. See comment in in perf_pending_task()
6869 if (event->pending_work) { in perf_pending_task()
6870 event->pending_work = 0; in perf_pending_task()
6871 perf_sigtrap(event); in perf_pending_task()
6872 local_dec(&event->ctx->nr_pending); in perf_pending_task()
6873 rcuwait_wake_up(&event->pending_work_wait); in perf_pending_task()
6884 DEFINE_STATIC_CALL_RET0(__perf_guest_state, *perf_guest_cbs->state);
6885 DEFINE_STATIC_CALL_RET0(__perf_guest_get_ip, *perf_guest_cbs->get_ip);
6886 DEFINE_STATIC_CALL_RET0(__perf_guest_handle_intel_pt_intr, *perf_guest_cbs->handle_intel_pt_intr);
6894 static_call_update(__perf_guest_state, cbs->state); in perf_register_guest_info_callbacks()
6895 static_call_update(__perf_guest_get_ip, cbs->get_ip); in perf_register_guest_info_callbacks()
6897 /* Implementing ->handle_intel_pt_intr is optional. */ in perf_register_guest_info_callbacks()
6898 if (cbs->handle_intel_pt_intr) in perf_register_guest_info_callbacks()
6900 cbs->handle_intel_pt_intr); in perf_register_guest_info_callbacks()
6939 regs_user->abi = perf_reg_abi(current); in perf_sample_regs_user()
6940 regs_user->regs = regs; in perf_sample_regs_user()
6941 } else if (!(current->flags & PF_KTHREAD)) { in perf_sample_regs_user()
6944 regs_user->abi = PERF_SAMPLE_REGS_ABI_NONE; in perf_sample_regs_user()
6945 regs_user->regs = NULL; in perf_sample_regs_user()
6952 regs_intr->regs = regs; in perf_sample_regs_intr()
6953 regs_intr->abi = perf_reg_abi(current); in perf_sample_regs_intr()
6971 return TASK_SIZE - addr; in perf_ustack_task_size()
6986 * - TASK_SIZE in perf_sample_ustack_size()
6989 * - remaining sample size in perf_sample_ustack_size()
7006 stack_size = USHRT_MAX - header_size - sizeof(u64); in perf_sample_ustack_size()
7029 * - the size requested by user or the best one we can fit in perf_output_sample_ustack()
7032 * - user stack dump data in perf_output_sample_ustack()
7034 * - the actual dumped size in perf_output_sample_ustack()
7043 dyn_size = dump_size - rem; in perf_output_sample_ustack()
7052 static unsigned long perf_prepare_sample_aux(struct perf_event *event, in perf_prepare_sample_aux() argument
7056 struct perf_event *sampler = event->aux_event; in perf_prepare_sample_aux()
7059 data->aux_size = 0; in perf_prepare_sample_aux()
7064 if (WARN_ON_ONCE(READ_ONCE(sampler->state) != PERF_EVENT_STATE_ACTIVE)) in perf_prepare_sample_aux()
7067 if (WARN_ON_ONCE(READ_ONCE(sampler->oncpu) != smp_processor_id())) in perf_prepare_sample_aux()
7078 if (READ_ONCE(rb->aux_in_sampling)) { in perf_prepare_sample_aux()
7079 data->aux_size = 0; in perf_prepare_sample_aux()
7082 data->aux_size = ALIGN(size, sizeof(u64)); in perf_prepare_sample_aux()
7087 return data->aux_size; in perf_prepare_sample_aux()
7091 struct perf_event *event, in perf_pmu_snapshot_aux() argument
7099 * Normal ->start()/->stop() callbacks run in IRQ mode in scheduler in perf_pmu_snapshot_aux()
7101 * the IRQ ones, that is, for example, re-starting an event that's just in perf_pmu_snapshot_aux()
7103 * doesn't change the event state. in perf_pmu_snapshot_aux()
7112 WRITE_ONCE(rb->aux_in_sampling, 1); in perf_pmu_snapshot_aux()
7115 ret = event->pmu->snapshot_aux(event, handle, size); in perf_pmu_snapshot_aux()
7118 WRITE_ONCE(rb->aux_in_sampling, 0); in perf_pmu_snapshot_aux()
7124 static void perf_aux_sample_output(struct perf_event *event, in perf_aux_sample_output() argument
7128 struct perf_event *sampler = event->aux_event; in perf_aux_sample_output()
7133 if (WARN_ON_ONCE(!sampler || !data->aux_size)) in perf_aux_sample_output()
7140 size = perf_pmu_snapshot_aux(rb, sampler, handle, data->aux_size); in perf_aux_sample_output()
7144 * non-zero surplus that it didn't copy), which in its current in perf_aux_sample_output()
7152 * The pad comes from ALIGN()ing data->aux_size up to u64 in in perf_aux_sample_output()
7155 pad = data->aux_size - size; in perf_aux_sample_output()
7169 * A set of common sample data types saved even for non-sample records
7170 * when event->attr.sample_id_all is set.
7177 struct perf_event *event, in __perf_event_header__init_id() argument
7180 data->type = event->attr.sample_type; in __perf_event_header__init_id()
7181 data->sample_flags |= data->type & PERF_SAMPLE_ID_ALL; in __perf_event_header__init_id()
7185 data->tid_entry.pid = perf_event_pid(event, current); in __perf_event_header__init_id()
7186 data->tid_entry.tid = perf_event_tid(event, current); in __perf_event_header__init_id()
7190 data->time = perf_event_clock(event); in __perf_event_header__init_id()
7193 data->id = primary_event_id(event); in __perf_event_header__init_id()
7196 data->stream_id = event->id; in __perf_event_header__init_id()
7199 data->cpu_entry.cpu = raw_smp_processor_id(); in __perf_event_header__init_id()
7200 data->cpu_entry.reserved = 0; in __perf_event_header__init_id()
7206 struct perf_event *event) in perf_event_header__init_id() argument
7208 if (event->attr.sample_id_all) { in perf_event_header__init_id()
7209 header->size += event->id_header_size; in perf_event_header__init_id()
7210 __perf_event_header__init_id(data, event, event->attr.sample_type); in perf_event_header__init_id()
7217 u64 sample_type = data->type; in __perf_event__output_id_sample()
7220 perf_output_put(handle, data->tid_entry); in __perf_event__output_id_sample()
7223 perf_output_put(handle, data->time); in __perf_event__output_id_sample()
7226 perf_output_put(handle, data->id); in __perf_event__output_id_sample()
7229 perf_output_put(handle, data->stream_id); in __perf_event__output_id_sample()
7232 perf_output_put(handle, data->cpu_entry); in __perf_event__output_id_sample()
7235 perf_output_put(handle, data->id); in __perf_event__output_id_sample()
7238 void perf_event__output_id_sample(struct perf_event *event, in perf_event__output_id_sample() argument
7242 if (event->attr.sample_id_all) in perf_event__output_id_sample()
7247 struct perf_event *event, in perf_output_read_one() argument
7250 u64 read_format = event->attr.read_format; in perf_output_read_one()
7254 values[n++] = perf_event_count(event); in perf_output_read_one()
7257 atomic64_read(&event->child_total_time_enabled); in perf_output_read_one()
7261 atomic64_read(&event->child_total_time_running); in perf_output_read_one()
7264 values[n++] = primary_event_id(event); in perf_output_read_one()
7266 values[n++] = atomic64_read(&event->lost_samples); in perf_output_read_one()
7272 struct perf_event *event, in perf_output_read_group() argument
7275 struct perf_event *leader = event->group_leader, *sub; in perf_output_read_group()
7276 u64 read_format = event->attr.read_format; in perf_output_read_group()
7287 values[n++] = 1 + leader->nr_siblings; in perf_output_read_group()
7295 if ((leader != event) && in perf_output_read_group()
7296 (leader->state == PERF_EVENT_STATE_ACTIVE)) in perf_output_read_group()
7297 leader->pmu->read(leader); in perf_output_read_group()
7303 values[n++] = atomic64_read(&leader->lost_samples); in perf_output_read_group()
7310 if ((sub != event) && in perf_output_read_group()
7311 (sub->state == PERF_EVENT_STATE_ACTIVE)) in perf_output_read_group()
7312 sub->pmu->read(sub); in perf_output_read_group()
7318 values[n++] = atomic64_read(&sub->lost_samples); in perf_output_read_group()
7337 struct perf_event *event) in perf_output_read() argument
7340 u64 read_format = event->attr.read_format; in perf_output_read()
7344 * based on snapshot values taken when the event in perf_output_read()
7352 calc_timer_values(event, &now, &enabled, &running); in perf_output_read()
7354 if (event->attr.read_format & PERF_FORMAT_GROUP) in perf_output_read()
7355 perf_output_read_group(handle, event, enabled, running); in perf_output_read()
7357 perf_output_read_one(handle, event, enabled, running); in perf_output_read()
7363 struct perf_event *event) in perf_output_sample() argument
7365 u64 sample_type = data->type; in perf_output_sample()
7370 perf_output_put(handle, data->id); in perf_output_sample()
7373 perf_output_put(handle, data->ip); in perf_output_sample()
7376 perf_output_put(handle, data->tid_entry); in perf_output_sample()
7379 perf_output_put(handle, data->time); in perf_output_sample()
7382 perf_output_put(handle, data->addr); in perf_output_sample()
7385 perf_output_put(handle, data->id); in perf_output_sample()
7388 perf_output_put(handle, data->stream_id); in perf_output_sample()
7391 perf_output_put(handle, data->cpu_entry); in perf_output_sample()
7394 perf_output_put(handle, data->period); in perf_output_sample()
7397 perf_output_read(handle, event); in perf_output_sample()
7402 size += data->callchain->nr; in perf_output_sample()
7404 __output_copy(handle, data->callchain, size); in perf_output_sample()
7408 struct perf_raw_record *raw = data->raw; in perf_output_sample()
7411 struct perf_raw_frag *frag = &raw->frag; in perf_output_sample()
7413 perf_output_put(handle, raw->size); in perf_output_sample()
7415 if (frag->copy) { in perf_output_sample()
7416 __output_custom(handle, frag->copy, in perf_output_sample()
7417 frag->data, frag->size); in perf_output_sample()
7419 __output_copy(handle, frag->data, in perf_output_sample()
7420 frag->size); in perf_output_sample()
7424 frag = frag->next; in perf_output_sample()
7426 if (frag->pad) in perf_output_sample()
7427 __output_skip(handle, NULL, frag->pad); in perf_output_sample()
7441 if (data->br_stack) { in perf_output_sample()
7444 size = data->br_stack->nr in perf_output_sample()
7447 perf_output_put(handle, data->br_stack->nr); in perf_output_sample()
7448 if (branch_sample_hw_index(event)) in perf_output_sample()
7449 perf_output_put(handle, data->br_stack->hw_idx); in perf_output_sample()
7450 perf_output_copy(handle, data->br_stack->entries, size); in perf_output_sample()
7461 u64 abi = data->regs_user.abi; in perf_output_sample()
7470 u64 mask = event->attr.sample_regs_user; in perf_output_sample()
7472 data->regs_user.regs, in perf_output_sample()
7479 data->stack_user_size, in perf_output_sample()
7480 data->regs_user.regs); in perf_output_sample()
7484 perf_output_put(handle, data->weight.full); in perf_output_sample()
7487 perf_output_put(handle, data->data_src.val); in perf_output_sample()
7490 perf_output_put(handle, data->txn); in perf_output_sample()
7493 u64 abi = data->regs_intr.abi; in perf_output_sample()
7501 u64 mask = event->attr.sample_regs_intr; in perf_output_sample()
7504 data->regs_intr.regs, in perf_output_sample()
7510 perf_output_put(handle, data->phys_addr); in perf_output_sample()
7513 perf_output_put(handle, data->cgroup); in perf_output_sample()
7516 perf_output_put(handle, data->data_page_size); in perf_output_sample()
7519 perf_output_put(handle, data->code_page_size); in perf_output_sample()
7522 perf_output_put(handle, data->aux_size); in perf_output_sample()
7524 if (data->aux_size) in perf_output_sample()
7525 perf_aux_sample_output(event, handle, data); in perf_output_sample()
7528 if (!event->attr.watermark) { in perf_output_sample()
7529 int wakeup_events = event->attr.wakeup_events; in perf_output_sample()
7532 struct perf_buffer *rb = handle->rb; in perf_output_sample()
7533 int events = local_inc_return(&rb->events); in perf_output_sample()
7536 local_sub(wakeup_events, &rb->events); in perf_output_sample()
7537 local_inc(&rb->wakeup); in perf_output_sample()
7560 * Try IRQ-safe get_user_page_fast_only first. in perf_virt_to_phys()
7563 if (current->mm != NULL) { in perf_virt_to_phys()
7648 * Software page-table walkers must disable IRQs, in perf_get_page_size()
7653 mm = current->mm; in perf_get_page_size()
7672 perf_callchain(struct perf_event *event, struct pt_regs *regs) in perf_callchain() argument
7674 bool kernel = !event->attr.exclude_callchain_kernel; in perf_callchain()
7675 bool user = !event->attr.exclude_callchain_user; in perf_callchain()
7676 /* Disallow cross-task user callchains. */ in perf_callchain()
7677 bool crosstask = event->ctx->task && event->ctx->task != current; in perf_callchain()
7678 const u32 max_stack = event->attr.sample_max_stack; in perf_callchain()
7695 struct perf_event *event, in perf_prepare_sample() argument
7698 u64 sample_type = event->attr.sample_type; in perf_prepare_sample()
7712 filtered_sample_type &= ~data->sample_flags; in perf_prepare_sample()
7715 /* Make sure it has the correct data->type for output */ in perf_prepare_sample()
7716 data->type = event->attr.sample_type; in perf_prepare_sample()
7720 __perf_event_header__init_id(data, event, filtered_sample_type); in perf_prepare_sample()
7723 data->ip = perf_instruction_pointer(regs); in perf_prepare_sample()
7724 data->sample_flags |= PERF_SAMPLE_IP; in perf_prepare_sample()
7728 perf_sample_save_callchain(data, event, regs); in perf_prepare_sample()
7731 data->raw = NULL; in perf_prepare_sample()
7732 data->dyn_size += sizeof(u64); in perf_prepare_sample()
7733 data->sample_flags |= PERF_SAMPLE_RAW; in perf_prepare_sample()
7737 data->br_stack = NULL; in perf_prepare_sample()
7738 data->dyn_size += sizeof(u64); in perf_prepare_sample()
7739 data->sample_flags |= PERF_SAMPLE_BRANCH_STACK; in perf_prepare_sample()
7743 perf_sample_regs_user(&data->regs_user, regs); in perf_prepare_sample()
7750 if ((sample_type & ~data->sample_flags) & PERF_SAMPLE_REGS_USER) { in perf_prepare_sample()
7754 if (data->regs_user.regs) { in perf_prepare_sample()
7755 u64 mask = event->attr.sample_regs_user; in perf_prepare_sample()
7759 data->dyn_size += size; in perf_prepare_sample()
7760 data->sample_flags |= PERF_SAMPLE_REGS_USER; in perf_prepare_sample()
7770 u16 stack_size = event->attr.sample_stack_user; in perf_prepare_sample()
7771 u16 header_size = perf_sample_data_size(data, event); in perf_prepare_sample()
7775 data->regs_user.regs); in perf_prepare_sample()
7785 data->stack_user_size = stack_size; in perf_prepare_sample()
7786 data->dyn_size += size; in perf_prepare_sample()
7787 data->sample_flags |= PERF_SAMPLE_STACK_USER; in perf_prepare_sample()
7791 data->weight.full = 0; in perf_prepare_sample()
7792 data->sample_flags |= PERF_SAMPLE_WEIGHT_TYPE; in perf_prepare_sample()
7796 data->data_src.val = PERF_MEM_NA; in perf_prepare_sample()
7797 data->sample_flags |= PERF_SAMPLE_DATA_SRC; in perf_prepare_sample()
7801 data->txn = 0; in perf_prepare_sample()
7802 data->sample_flags |= PERF_SAMPLE_TRANSACTION; in perf_prepare_sample()
7806 data->addr = 0; in perf_prepare_sample()
7807 data->sample_flags |= PERF_SAMPLE_ADDR; in perf_prepare_sample()
7814 perf_sample_regs_intr(&data->regs_intr, regs); in perf_prepare_sample()
7816 if (data->regs_intr.regs) { in perf_prepare_sample()
7817 u64 mask = event->attr.sample_regs_intr; in perf_prepare_sample()
7822 data->dyn_size += size; in perf_prepare_sample()
7823 data->sample_flags |= PERF_SAMPLE_REGS_INTR; in perf_prepare_sample()
7827 data->phys_addr = perf_virt_to_phys(data->addr); in perf_prepare_sample()
7828 data->sample_flags |= PERF_SAMPLE_PHYS_ADDR; in perf_prepare_sample()
7836 cgrp = task_css_check(current, perf_event_cgrp_id, 1)->cgroup; in perf_prepare_sample()
7837 data->cgroup = cgroup_id(cgrp); in perf_prepare_sample()
7838 data->sample_flags |= PERF_SAMPLE_CGROUP; in perf_prepare_sample()
7844 * require PERF_SAMPLE_ADDR, kernel implicitly retrieve the data->addr, in perf_prepare_sample()
7848 data->data_page_size = perf_get_page_size(data->addr); in perf_prepare_sample()
7849 data->sample_flags |= PERF_SAMPLE_DATA_PAGE_SIZE; in perf_prepare_sample()
7853 data->code_page_size = perf_get_page_size(data->ip); in perf_prepare_sample()
7854 data->sample_flags |= PERF_SAMPLE_CODE_PAGE_SIZE; in perf_prepare_sample()
7859 u16 header_size = perf_sample_data_size(data, event); in perf_prepare_sample()
7869 size = min_t(size_t, U16_MAX - header_size, in perf_prepare_sample()
7870 event->attr.aux_sample_size); in perf_prepare_sample()
7872 size = perf_prepare_sample_aux(event, data, size); in perf_prepare_sample()
7875 data->dyn_size += size + sizeof(u64); /* size above */ in perf_prepare_sample()
7876 data->sample_flags |= PERF_SAMPLE_AUX; in perf_prepare_sample()
7882 struct perf_event *event, in perf_prepare_header() argument
7885 header->type = PERF_RECORD_SAMPLE; in perf_prepare_header()
7886 header->size = perf_sample_data_size(data, event); in perf_prepare_header()
7887 header->misc = perf_misc_flags(regs); in perf_prepare_header()
7897 WARN_ON_ONCE(header->size & 7); in perf_prepare_header()
7901 __perf_event_output(struct perf_event *event, in __perf_event_output() argument
7916 perf_prepare_sample(data, event, regs); in __perf_event_output()
7917 perf_prepare_header(&header, data, event, regs); in __perf_event_output()
7919 err = output_begin(&handle, data, event, header.size); in __perf_event_output()
7923 perf_output_sample(&handle, &header, data, event); in __perf_event_output()
7933 perf_event_output_forward(struct perf_event *event, in perf_event_output_forward() argument
7937 __perf_event_output(event, data, regs, perf_output_begin_forward); in perf_event_output_forward()
7941 perf_event_output_backward(struct perf_event *event, in perf_event_output_backward() argument
7945 __perf_event_output(event, data, regs, perf_output_begin_backward); in perf_event_output_backward()
7949 perf_event_output(struct perf_event *event, in perf_event_output() argument
7953 return __perf_event_output(event, data, regs, perf_output_begin); in perf_event_output()
7968 perf_event_read_event(struct perf_event *event, in perf_event_read_event() argument
7977 .size = sizeof(read_event) + event->read_size, in perf_event_read_event()
7979 .pid = perf_event_pid(event, task), in perf_event_read_event()
7980 .tid = perf_event_tid(event, task), in perf_event_read_event()
7984 perf_event_header__init_id(&read_event.header, &sample, event); in perf_event_read_event()
7985 ret = perf_output_begin(&handle, &sample, event, read_event.header.size); in perf_event_read_event()
7990 perf_output_read(&handle, event); in perf_event_read_event()
7991 perf_event__output_id_sample(event, &handle, &sample); in perf_event_read_event()
7996 typedef void (perf_iterate_f)(struct perf_event *event, void *data);
8003 struct perf_event *event; in perf_iterate_ctx() local
8005 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) { in perf_iterate_ctx()
8007 if (event->state < PERF_EVENT_STATE_INACTIVE) in perf_iterate_ctx()
8009 if (!event_filter_match(event)) in perf_iterate_ctx()
8013 output(event, data); in perf_iterate_ctx()
8020 struct perf_event *event; in perf_iterate_sb_cpu() local
8022 list_for_each_entry_rcu(event, &pel->list, sb_list) { in perf_iterate_sb_cpu()
8025 * if we observe event->ctx, both event and ctx will be in perf_iterate_sb_cpu()
8028 if (!smp_load_acquire(&event->ctx)) in perf_iterate_sb_cpu()
8031 if (event->state < PERF_EVENT_STATE_INACTIVE) in perf_iterate_sb_cpu()
8033 if (!event_filter_match(event)) in perf_iterate_sb_cpu()
8035 output(event, data); in perf_iterate_sb_cpu()
8040 * Iterate all events that need to receive side-band events.
8043 * your event, otherwise it might not get delivered.
8066 ctx = rcu_dereference(current->perf_event_ctxp); in perf_iterate_sb()
8075 * Clear all file-based filters at exec, they'll have to be
8076 * re-instated when/if these objects are mmapped again.
8078 static void perf_event_addr_filters_exec(struct perf_event *event, void *data) in perf_event_addr_filters_exec() argument
8080 struct perf_addr_filters_head *ifh = perf_event_addr_filters(event); in perf_event_addr_filters_exec()
8085 if (!has_addr_filter(event)) in perf_event_addr_filters_exec()
8088 raw_spin_lock_irqsave(&ifh->lock, flags); in perf_event_addr_filters_exec()
8089 list_for_each_entry(filter, &ifh->list, entry) { in perf_event_addr_filters_exec()
8090 if (filter->path.dentry) { in perf_event_addr_filters_exec()
8091 event->addr_filter_ranges[count].start = 0; in perf_event_addr_filters_exec()
8092 event->addr_filter_ranges[count].size = 0; in perf_event_addr_filters_exec()
8100 event->addr_filters_gen++; in perf_event_addr_filters_exec()
8101 raw_spin_unlock_irqrestore(&ifh->lock, flags); in perf_event_addr_filters_exec()
8104 perf_event_stop(event, 1); in perf_event_addr_filters_exec()
8129 static void __perf_event_output_stop(struct perf_event *event, void *data) in __perf_event_output_stop() argument
8131 struct perf_event *parent = event->parent; in __perf_event_output_stop()
8133 struct perf_buffer *rb = ro->rb; in __perf_event_output_stop()
8135 .event = event, in __perf_event_output_stop()
8138 if (!has_aux(event)) in __perf_event_output_stop()
8142 parent = event; in __perf_event_output_stop()
8146 * ring-buffer, but it will be the child that's actually using it. in __perf_event_output_stop()
8148 * We are using event::rb to determine if the event should be stopped, in __perf_event_output_stop()
8150 * which will make us skip the event that actually needs to be stopped. in __perf_event_output_stop()
8151 * So ring_buffer_attach() has to stop an aux event before re-assigning in __perf_event_output_stop()
8154 if (rcu_dereference(parent->rb) == rb) in __perf_event_output_stop()
8155 ro->err = __perf_event_stop(&sd); in __perf_event_output_stop()
8160 struct perf_event *event = info; in __perf_pmu_output_stop() local
8163 .rb = event->rb, in __perf_pmu_output_stop()
8167 perf_iterate_ctx(&cpuctx->ctx, __perf_event_output_stop, &ro, false); in __perf_pmu_output_stop()
8168 if (cpuctx->task_ctx) in __perf_pmu_output_stop()
8169 perf_iterate_ctx(cpuctx->task_ctx, __perf_event_output_stop, in __perf_pmu_output_stop()
8176 static void perf_pmu_output_stop(struct perf_event *event) in perf_pmu_output_stop() argument
8183 list_for_each_entry_rcu(iter, &event->rb->event_list, rb_entry) { in perf_pmu_output_stop()
8185 * For per-CPU events, we need to make sure that neither they in perf_pmu_output_stop()
8186 * nor their children are running; for cpu==-1 events it's in perf_pmu_output_stop()
8187 * sufficient to stop the event itself if it's active, since in perf_pmu_output_stop()
8190 cpu = iter->cpu; in perf_pmu_output_stop()
8191 if (cpu == -1) in perf_pmu_output_stop()
8192 cpu = READ_ONCE(iter->oncpu); in perf_pmu_output_stop()
8194 if (cpu == -1) in perf_pmu_output_stop()
8197 err = cpu_function_call(cpu, __perf_pmu_output_stop, event); in perf_pmu_output_stop()
8198 if (err == -EAGAIN) { in perf_pmu_output_stop()
8207 * task tracking -- fork/exit
8227 static int perf_event_task_match(struct perf_event *event) in perf_event_task_match() argument
8229 return event->attr.comm || event->attr.mmap || in perf_event_task_match()
8230 event->attr.mmap2 || event->attr.mmap_data || in perf_event_task_match()
8231 event->attr.task; in perf_event_task_match()
8234 static void perf_event_task_output(struct perf_event *event, in perf_event_task_output() argument
8240 struct task_struct *task = task_event->task; in perf_event_task_output()
8241 int ret, size = task_event->event_id.header.size; in perf_event_task_output()
8243 if (!perf_event_task_match(event)) in perf_event_task_output()
8246 perf_event_header__init_id(&task_event->event_id.header, &sample, event); in perf_event_task_output()
8248 ret = perf_output_begin(&handle, &sample, event, in perf_event_task_output()
8249 task_event->event_id.header.size); in perf_event_task_output()
8253 task_event->event_id.pid = perf_event_pid(event, task); in perf_event_task_output()
8254 task_event->event_id.tid = perf_event_tid(event, task); in perf_event_task_output()
8256 if (task_event->event_id.header.type == PERF_RECORD_EXIT) { in perf_event_task_output()
8257 task_event->event_id.ppid = perf_event_pid(event, in perf_event_task_output()
8258 task->real_parent); in perf_event_task_output()
8259 task_event->event_id.ptid = perf_event_pid(event, in perf_event_task_output()
8260 task->real_parent); in perf_event_task_output()
8262 task_event->event_id.ppid = perf_event_pid(event, current); in perf_event_task_output()
8263 task_event->event_id.ptid = perf_event_tid(event, current); in perf_event_task_output()
8266 task_event->event_id.time = perf_event_clock(event); in perf_event_task_output()
8268 perf_output_put(&handle, task_event->event_id); in perf_event_task_output()
8270 perf_event__output_id_sample(event, &handle, &sample); in perf_event_task_output()
8274 task_event->event_id.header.size = size; in perf_event_task_output()
8333 static int perf_event_comm_match(struct perf_event *event) in perf_event_comm_match() argument
8335 return event->attr.comm; in perf_event_comm_match()
8338 static void perf_event_comm_output(struct perf_event *event, in perf_event_comm_output() argument
8344 int size = comm_event->event_id.header.size; in perf_event_comm_output()
8347 if (!perf_event_comm_match(event)) in perf_event_comm_output()
8350 perf_event_header__init_id(&comm_event->event_id.header, &sample, event); in perf_event_comm_output()
8351 ret = perf_output_begin(&handle, &sample, event, in perf_event_comm_output()
8352 comm_event->event_id.header.size); in perf_event_comm_output()
8357 comm_event->event_id.pid = perf_event_pid(event, comm_event->task); in perf_event_comm_output()
8358 comm_event->event_id.tid = perf_event_tid(event, comm_event->task); in perf_event_comm_output()
8360 perf_output_put(&handle, comm_event->event_id); in perf_event_comm_output()
8361 __output_copy(&handle, comm_event->comm, in perf_event_comm_output()
8362 comm_event->comm_size); in perf_event_comm_output()
8364 perf_event__output_id_sample(event, &handle, &sample); in perf_event_comm_output()
8368 comm_event->event_id.header.size = size; in perf_event_comm_output()
8377 strscpy(comm, comm_event->task->comm, sizeof(comm)); in perf_event_comm_event()
8380 comm_event->comm = comm; in perf_event_comm_event()
8381 comm_event->comm_size = size; in perf_event_comm_event()
8383 comm_event->event_id.header.size = sizeof(comm_event->event_id) + size; in perf_event_comm_event()
8432 static int perf_event_namespaces_match(struct perf_event *event) in perf_event_namespaces_match() argument
8434 return event->attr.namespaces; in perf_event_namespaces_match()
8437 static void perf_event_namespaces_output(struct perf_event *event, in perf_event_namespaces_output() argument
8443 u16 header_size = namespaces_event->event_id.header.size; in perf_event_namespaces_output()
8446 if (!perf_event_namespaces_match(event)) in perf_event_namespaces_output()
8449 perf_event_header__init_id(&namespaces_event->event_id.header, in perf_event_namespaces_output()
8450 &sample, event); in perf_event_namespaces_output()
8451 ret = perf_output_begin(&handle, &sample, event, in perf_event_namespaces_output()
8452 namespaces_event->event_id.header.size); in perf_event_namespaces_output()
8456 namespaces_event->event_id.pid = perf_event_pid(event, in perf_event_namespaces_output()
8457 namespaces_event->task); in perf_event_namespaces_output()
8458 namespaces_event->event_id.tid = perf_event_tid(event, in perf_event_namespaces_output()
8459 namespaces_event->task); in perf_event_namespaces_output()
8461 perf_output_put(&handle, namespaces_event->event_id); in perf_event_namespaces_output()
8463 perf_event__output_id_sample(event, &handle, &sample); in perf_event_namespaces_output()
8467 namespaces_event->event_id.header.size = header_size; in perf_event_namespaces_output()
8480 ns_inode = ns_path.dentry->d_inode; in perf_fill_ns_link_info()
8481 ns_link_info->dev = new_encode_dev(ns_inode->i_sb->s_dev); in perf_fill_ns_link_info()
8482 ns_link_info->ino = ns_inode->i_ino; in perf_fill_ns_link_info()
8560 static int perf_event_cgroup_match(struct perf_event *event) in perf_event_cgroup_match() argument
8562 return event->attr.cgroup; in perf_event_cgroup_match()
8565 static void perf_event_cgroup_output(struct perf_event *event, void *data) in perf_event_cgroup_output() argument
8570 u16 header_size = cgroup_event->event_id.header.size; in perf_event_cgroup_output()
8573 if (!perf_event_cgroup_match(event)) in perf_event_cgroup_output()
8576 perf_event_header__init_id(&cgroup_event->event_id.header, in perf_event_cgroup_output()
8577 &sample, event); in perf_event_cgroup_output()
8578 ret = perf_output_begin(&handle, &sample, event, in perf_event_cgroup_output()
8579 cgroup_event->event_id.header.size); in perf_event_cgroup_output()
8583 perf_output_put(&handle, cgroup_event->event_id); in perf_event_cgroup_output()
8584 __output_copy(&handle, cgroup_event->path, cgroup_event->path_size); in perf_event_cgroup_output()
8586 perf_event__output_id_sample(event, &handle, &sample); in perf_event_cgroup_output()
8590 cgroup_event->event_id.header.size = header_size; in perf_event_cgroup_output()
8619 cgroup_path(cgrp, pathname, PATH_MAX - sizeof(u64)); in perf_event_cgroup()
8671 static int perf_event_mmap_match(struct perf_event *event, in perf_event_mmap_match() argument
8675 struct vm_area_struct *vma = mmap_event->vma; in perf_event_mmap_match()
8676 int executable = vma->vm_flags & VM_EXEC; in perf_event_mmap_match()
8678 return (!executable && event->attr.mmap_data) || in perf_event_mmap_match()
8679 (executable && (event->attr.mmap || event->attr.mmap2)); in perf_event_mmap_match()
8682 static void perf_event_mmap_output(struct perf_event *event, in perf_event_mmap_output() argument
8688 int size = mmap_event->event_id.header.size; in perf_event_mmap_output()
8689 u32 type = mmap_event->event_id.header.type; in perf_event_mmap_output()
8693 if (!perf_event_mmap_match(event, data)) in perf_event_mmap_output()
8696 if (event->attr.mmap2) { in perf_event_mmap_output()
8697 mmap_event->event_id.header.type = PERF_RECORD_MMAP2; in perf_event_mmap_output()
8698 mmap_event->event_id.header.size += sizeof(mmap_event->maj); in perf_event_mmap_output()
8699 mmap_event->event_id.header.size += sizeof(mmap_event->min); in perf_event_mmap_output()
8700 mmap_event->event_id.header.size += sizeof(mmap_event->ino); in perf_event_mmap_output()
8701 mmap_event->event_id.header.size += sizeof(mmap_event->ino_generation); in perf_event_mmap_output()
8702 mmap_event->event_id.header.size += sizeof(mmap_event->prot); in perf_event_mmap_output()
8703 mmap_event->event_id.header.size += sizeof(mmap_event->flags); in perf_event_mmap_output()
8706 perf_event_header__init_id(&mmap_event->event_id.header, &sample, event); in perf_event_mmap_output()
8707 ret = perf_output_begin(&handle, &sample, event, in perf_event_mmap_output()
8708 mmap_event->event_id.header.size); in perf_event_mmap_output()
8712 mmap_event->event_id.pid = perf_event_pid(event, current); in perf_event_mmap_output()
8713 mmap_event->event_id.tid = perf_event_tid(event, current); in perf_event_mmap_output()
8715 use_build_id = event->attr.build_id && mmap_event->build_id_size; in perf_event_mmap_output()
8717 if (event->attr.mmap2 && use_build_id) in perf_event_mmap_output()
8718 mmap_event->event_id.header.misc |= PERF_RECORD_MISC_MMAP_BUILD_ID; in perf_event_mmap_output()
8720 perf_output_put(&handle, mmap_event->event_id); in perf_event_mmap_output()
8722 if (event->attr.mmap2) { in perf_event_mmap_output()
8724 u8 size[4] = { (u8) mmap_event->build_id_size, 0, 0, 0 }; in perf_event_mmap_output()
8727 __output_copy(&handle, mmap_event->build_id, BUILD_ID_SIZE_MAX); in perf_event_mmap_output()
8729 perf_output_put(&handle, mmap_event->maj); in perf_event_mmap_output()
8730 perf_output_put(&handle, mmap_event->min); in perf_event_mmap_output()
8731 perf_output_put(&handle, mmap_event->ino); in perf_event_mmap_output()
8732 perf_output_put(&handle, mmap_event->ino_generation); in perf_event_mmap_output()
8734 perf_output_put(&handle, mmap_event->prot); in perf_event_mmap_output()
8735 perf_output_put(&handle, mmap_event->flags); in perf_event_mmap_output()
8738 __output_copy(&handle, mmap_event->file_name, in perf_event_mmap_output()
8739 mmap_event->file_size); in perf_event_mmap_output()
8741 perf_event__output_id_sample(event, &handle, &sample); in perf_event_mmap_output()
8745 mmap_event->event_id.header.size = size; in perf_event_mmap_output()
8746 mmap_event->event_id.header.type = type; in perf_event_mmap_output()
8751 struct vm_area_struct *vma = mmap_event->vma; in perf_event_mmap_event()
8752 struct file *file = vma->vm_file; in perf_event_mmap_event()
8761 if (vma->vm_flags & VM_READ) in perf_event_mmap_event()
8763 if (vma->vm_flags & VM_WRITE) in perf_event_mmap_event()
8765 if (vma->vm_flags & VM_EXEC) in perf_event_mmap_event()
8768 if (vma->vm_flags & VM_MAYSHARE) in perf_event_mmap_event()
8773 if (vma->vm_flags & VM_LOCKED) in perf_event_mmap_event()
8792 name = file_path(file, buf, PATH_MAX - sizeof(u64)); in perf_event_mmap_event()
8797 inode = file_inode(vma->vm_file); in perf_event_mmap_event()
8798 dev = inode->i_sb->s_dev; in perf_event_mmap_event()
8799 ino = inode->i_ino; in perf_event_mmap_event()
8800 gen = inode->i_generation; in perf_event_mmap_event()
8806 if (vma->vm_ops && vma->vm_ops->name) in perf_event_mmap_event()
8807 name = (char *) vma->vm_ops->name(vma); in perf_event_mmap_event()
8833 mmap_event->file_name = name; in perf_event_mmap_event()
8834 mmap_event->file_size = size; in perf_event_mmap_event()
8835 mmap_event->maj = maj; in perf_event_mmap_event()
8836 mmap_event->min = min; in perf_event_mmap_event()
8837 mmap_event->ino = ino; in perf_event_mmap_event()
8838 mmap_event->ino_generation = gen; in perf_event_mmap_event()
8839 mmap_event->prot = prot; in perf_event_mmap_event()
8840 mmap_event->flags = flags; in perf_event_mmap_event()
8842 if (!(vma->vm_flags & VM_EXEC)) in perf_event_mmap_event()
8843 mmap_event->event_id.header.misc |= PERF_RECORD_MISC_MMAP_DATA; in perf_event_mmap_event()
8845 mmap_event->event_id.header.size = sizeof(mmap_event->event_id) + size; in perf_event_mmap_event()
8848 build_id_parse(vma, mmap_event->build_id, &mmap_event->build_id_size); in perf_event_mmap_event()
8864 /* d_inode(NULL) won't be equal to any mapped user-space file */ in perf_addr_filter_match()
8865 if (!filter->path.dentry) in perf_addr_filter_match()
8868 if (d_inode(filter->path.dentry) != file_inode(file)) in perf_addr_filter_match()
8871 if (filter->offset > offset + size) in perf_addr_filter_match()
8874 if (filter->offset + filter->size < offset) in perf_addr_filter_match()
8884 unsigned long vma_size = vma->vm_end - vma->vm_start; in perf_addr_filter_vma_adjust()
8885 unsigned long off = vma->vm_pgoff << PAGE_SHIFT; in perf_addr_filter_vma_adjust()
8886 struct file *file = vma->vm_file; in perf_addr_filter_vma_adjust()
8891 if (filter->offset < off) { in perf_addr_filter_vma_adjust()
8892 fr->start = vma->vm_start; in perf_addr_filter_vma_adjust()
8893 fr->size = min(vma_size, filter->size - (off - filter->offset)); in perf_addr_filter_vma_adjust()
8895 fr->start = vma->vm_start + filter->offset - off; in perf_addr_filter_vma_adjust()
8896 fr->size = min(vma->vm_end - fr->start, filter->size); in perf_addr_filter_vma_adjust()
8902 static void __perf_addr_filters_adjust(struct perf_event *event, void *data) in __perf_addr_filters_adjust() argument
8904 struct perf_addr_filters_head *ifh = perf_event_addr_filters(event); in __perf_addr_filters_adjust()
8910 if (!has_addr_filter(event)) in __perf_addr_filters_adjust()
8913 if (!vma->vm_file) in __perf_addr_filters_adjust()
8916 raw_spin_lock_irqsave(&ifh->lock, flags); in __perf_addr_filters_adjust()
8917 list_for_each_entry(filter, &ifh->list, entry) { in __perf_addr_filters_adjust()
8919 &event->addr_filter_ranges[count])) in __perf_addr_filters_adjust()
8926 event->addr_filters_gen++; in __perf_addr_filters_adjust()
8927 raw_spin_unlock_irqrestore(&ifh->lock, flags); in __perf_addr_filters_adjust()
8930 perf_event_stop(event, 1); in __perf_addr_filters_adjust()
8944 if (!(vma->vm_flags & VM_EXEC)) in perf_addr_filters_adjust()
8948 ctx = rcu_dereference(current->perf_event_ctxp); in perf_addr_filters_adjust()
8973 .start = vma->vm_start, in perf_event_mmap()
8974 .len = vma->vm_end - vma->vm_start, in perf_event_mmap()
8975 .pgoff = (u64)vma->vm_pgoff << PAGE_SHIFT, in perf_event_mmap()
8989 void perf_event_aux_event(struct perf_event *event, unsigned long head, in perf_event_aux_event() argument
9011 perf_event_header__init_id(&rec.header, &sample, event); in perf_event_aux_event()
9012 ret = perf_output_begin(&handle, &sample, event, rec.header.size); in perf_event_aux_event()
9018 perf_event__output_id_sample(event, &handle, &sample); in perf_event_aux_event()
9026 void perf_log_lost_samples(struct perf_event *event, u64 lost) in perf_log_lost_samples() argument
9044 perf_event_header__init_id(&lost_samples_event.header, &sample, event); in perf_log_lost_samples()
9046 ret = perf_output_begin(&handle, &sample, event, in perf_log_lost_samples()
9052 perf_event__output_id_sample(event, &handle, &sample); in perf_log_lost_samples()
9071 static int perf_event_switch_match(struct perf_event *event) in perf_event_switch_match() argument
9073 return event->attr.context_switch; in perf_event_switch_match()
9076 static void perf_event_switch_output(struct perf_event *event, void *data) in perf_event_switch_output() argument
9083 if (!perf_event_switch_match(event)) in perf_event_switch_output()
9086 /* Only CPU-wide events are allowed to see next/prev pid/tid */ in perf_event_switch_output()
9087 if (event->ctx->task) { in perf_event_switch_output()
9088 se->event_id.header.type = PERF_RECORD_SWITCH; in perf_event_switch_output()
9089 se->event_id.header.size = sizeof(se->event_id.header); in perf_event_switch_output()
9091 se->event_id.header.type = PERF_RECORD_SWITCH_CPU_WIDE; in perf_event_switch_output()
9092 se->event_id.header.size = sizeof(se->event_id); in perf_event_switch_output()
9093 se->event_id.next_prev_pid = in perf_event_switch_output()
9094 perf_event_pid(event, se->next_prev); in perf_event_switch_output()
9095 se->event_id.next_prev_tid = in perf_event_switch_output()
9096 perf_event_tid(event, se->next_prev); in perf_event_switch_output()
9099 perf_event_header__init_id(&se->event_id.header, &sample, event); in perf_event_switch_output()
9101 ret = perf_output_begin(&handle, &sample, event, se->event_id.header.size); in perf_event_switch_output()
9105 if (event->ctx->task) in perf_event_switch_output()
9106 perf_output_put(&handle, se->event_id.header); in perf_event_switch_output()
9108 perf_output_put(&handle, se->event_id); in perf_event_switch_output()
9110 perf_event__output_id_sample(event, &handle, &sample); in perf_event_switch_output()
9136 if (!sched_in && task->on_rq) { in perf_event_switch()
9148 static void perf_log_throttle(struct perf_event *event, int enable) in perf_log_throttle() argument
9165 .time = perf_event_clock(event), in perf_log_throttle()
9166 .id = primary_event_id(event), in perf_log_throttle()
9167 .stream_id = event->id, in perf_log_throttle()
9173 perf_event_header__init_id(&throttle_event.header, &sample, event); in perf_log_throttle()
9175 ret = perf_output_begin(&handle, &sample, event, in perf_log_throttle()
9181 perf_event__output_id_sample(event, &handle, &sample); in perf_log_throttle()
9201 static int perf_event_ksymbol_match(struct perf_event *event) in perf_event_ksymbol_match() argument
9203 return event->attr.ksymbol; in perf_event_ksymbol_match()
9206 static void perf_event_ksymbol_output(struct perf_event *event, void *data) in perf_event_ksymbol_output() argument
9213 if (!perf_event_ksymbol_match(event)) in perf_event_ksymbol_output()
9216 perf_event_header__init_id(&ksymbol_event->event_id.header, in perf_event_ksymbol_output()
9217 &sample, event); in perf_event_ksymbol_output()
9218 ret = perf_output_begin(&handle, &sample, event, in perf_event_ksymbol_output()
9219 ksymbol_event->event_id.header.size); in perf_event_ksymbol_output()
9223 perf_output_put(&handle, ksymbol_event->event_id); in perf_event_ksymbol_output()
9224 __output_copy(&handle, ksymbol_event->name, ksymbol_event->name_len); in perf_event_ksymbol_output()
9225 perf_event__output_id_sample(event, &handle, &sample); in perf_event_ksymbol_output()
9291 static int perf_event_bpf_match(struct perf_event *event) in perf_event_bpf_match() argument
9293 return event->attr.bpf_event; in perf_event_bpf_match()
9296 static void perf_event_bpf_output(struct perf_event *event, void *data) in perf_event_bpf_output() argument
9303 if (!perf_event_bpf_match(event)) in perf_event_bpf_output()
9306 perf_event_header__init_id(&bpf_event->event_id.header, in perf_event_bpf_output()
9307 &sample, event); in perf_event_bpf_output()
9308 ret = perf_output_begin(&handle, &sample, event, in perf_event_bpf_output()
9309 bpf_event->event_id.header.size); in perf_event_bpf_output()
9313 perf_output_put(&handle, bpf_event->event_id); in perf_event_bpf_output()
9314 perf_event__output_id_sample(event, &handle, &sample); in perf_event_bpf_output()
9326 (u64)(unsigned long)prog->bpf_func, in perf_event_bpf_emit_ksymbols()
9327 prog->jited_len, unregister, in perf_event_bpf_emit_ksymbols()
9328 prog->aux->ksym.name); in perf_event_bpf_emit_ksymbols()
9330 for (i = 1; i < prog->aux->func_cnt; i++) { in perf_event_bpf_emit_ksymbols()
9331 struct bpf_prog *subprog = prog->aux->func[i]; in perf_event_bpf_emit_ksymbols()
9335 (u64)(unsigned long)subprog->bpf_func, in perf_event_bpf_emit_ksymbols()
9336 subprog->jited_len, unregister, in perf_event_bpf_emit_ksymbols()
9337 subprog->aux->ksym.name); in perf_event_bpf_emit_ksymbols()
9373 .id = prog->aux->id, in perf_event_bpf_event()
9379 memcpy(bpf_event.event_id.tag, prog->tag, BPF_TAG_SIZE); in perf_event_bpf_event()
9397 static int perf_event_text_poke_match(struct perf_event *event) in perf_event_text_poke_match() argument
9399 return event->attr.text_poke; in perf_event_text_poke_match()
9402 static void perf_event_text_poke_output(struct perf_event *event, void *data) in perf_event_text_poke_output() argument
9410 if (!perf_event_text_poke_match(event)) in perf_event_text_poke_output()
9413 perf_event_header__init_id(&text_poke_event->event_id.header, &sample, event); in perf_event_text_poke_output()
9415 ret = perf_output_begin(&handle, &sample, event, in perf_event_text_poke_output()
9416 text_poke_event->event_id.header.size); in perf_event_text_poke_output()
9420 perf_output_put(&handle, text_poke_event->event_id); in perf_event_text_poke_output()
9421 perf_output_put(&handle, text_poke_event->old_len); in perf_event_text_poke_output()
9422 perf_output_put(&handle, text_poke_event->new_len); in perf_event_text_poke_output()
9424 __output_copy(&handle, text_poke_event->old_bytes, text_poke_event->old_len); in perf_event_text_poke_output()
9425 __output_copy(&handle, text_poke_event->new_bytes, text_poke_event->new_len); in perf_event_text_poke_output()
9427 if (text_poke_event->pad) in perf_event_text_poke_output()
9428 __output_copy(&handle, &padding, text_poke_event->pad); in perf_event_text_poke_output()
9430 perf_event__output_id_sample(event, &handle, &sample); in perf_event_text_poke_output()
9446 pad = ALIGN(tot, sizeof(u64)) - tot; in perf_event_text_poke()
9467 void perf_event_itrace_started(struct perf_event *event) in perf_event_itrace_started() argument
9469 event->attach_state |= PERF_ATTACH_ITRACE; in perf_event_itrace_started()
9472 static void perf_log_itrace_start(struct perf_event *event) in perf_log_itrace_start() argument
9483 if (event->parent) in perf_log_itrace_start()
9484 event = event->parent; in perf_log_itrace_start()
9486 if (!(event->pmu->capabilities & PERF_PMU_CAP_ITRACE) || in perf_log_itrace_start()
9487 event->attach_state & PERF_ATTACH_ITRACE) in perf_log_itrace_start()
9493 rec.pid = perf_event_pid(event, current); in perf_log_itrace_start()
9494 rec.tid = perf_event_tid(event, current); in perf_log_itrace_start()
9496 perf_event_header__init_id(&rec.header, &sample, event); in perf_log_itrace_start()
9497 ret = perf_output_begin(&handle, &sample, event, rec.header.size); in perf_log_itrace_start()
9503 perf_event__output_id_sample(event, &handle, &sample); in perf_log_itrace_start()
9508 void perf_report_aux_output_id(struct perf_event *event, u64 hw_id) in perf_report_aux_output_id() argument
9518 if (event->parent) in perf_report_aux_output_id()
9519 event = event->parent; in perf_report_aux_output_id()
9526 perf_event_header__init_id(&rec.header, &sample, event); in perf_report_aux_output_id()
9527 ret = perf_output_begin(&handle, &sample, event, rec.header.size); in perf_report_aux_output_id()
9533 perf_event__output_id_sample(event, &handle, &sample); in perf_report_aux_output_id()
9540 __perf_event_account_interrupt(struct perf_event *event, int throttle) in __perf_event_account_interrupt() argument
9542 struct hw_perf_event *hwc = &event->hw; in __perf_event_account_interrupt()
9547 if (seq != hwc->interrupts_seq) { in __perf_event_account_interrupt()
9548 hwc->interrupts_seq = seq; in __perf_event_account_interrupt()
9549 hwc->interrupts = 1; in __perf_event_account_interrupt()
9551 hwc->interrupts++; in __perf_event_account_interrupt()
9553 hwc->interrupts > max_samples_per_tick)) { in __perf_event_account_interrupt()
9556 hwc->interrupts = MAX_INTERRUPTS; in __perf_event_account_interrupt()
9557 perf_log_throttle(event, 0); in __perf_event_account_interrupt()
9562 if (event->attr.freq) { in __perf_event_account_interrupt()
9564 s64 delta = now - hwc->freq_time_stamp; in __perf_event_account_interrupt()
9566 hwc->freq_time_stamp = now; in __perf_event_account_interrupt()
9569 perf_adjust_period(event, delta, hwc->last_period, true); in __perf_event_account_interrupt()
9575 int perf_event_account_interrupt(struct perf_event *event) in perf_event_account_interrupt() argument
9577 return __perf_event_account_interrupt(event, 1); in perf_event_account_interrupt()
9580 static inline bool sample_is_allowed(struct perf_event *event, struct pt_regs *regs) in sample_is_allowed() argument
9587 if (event->attr.exclude_kernel && !user_mode(regs)) in sample_is_allowed()
9594 * Generic event overflow handling, sampling.
9597 static int __perf_event_overflow(struct perf_event *event, in __perf_event_overflow() argument
9601 int events = atomic_read(&event->event_limit); in __perf_event_overflow()
9605 * Non-sampling counters might still use the PMI to fold short in __perf_event_overflow()
9608 if (unlikely(!is_sampling_event(event))) in __perf_event_overflow()
9611 ret = __perf_event_account_interrupt(event, throttle); in __perf_event_overflow()
9618 event->pending_kill = POLL_IN; in __perf_event_overflow()
9619 if (events && atomic_dec_and_test(&event->event_limit)) { in __perf_event_overflow()
9621 event->pending_kill = POLL_HUP; in __perf_event_overflow()
9622 perf_event_disable_inatomic(event); in __perf_event_overflow()
9625 if (event->attr.sigtrap) { in __perf_event_overflow()
9629 * it is the first event, on the other hand, we should also not in __perf_event_overflow()
9632 bool valid_sample = sample_is_allowed(event, regs); in __perf_event_overflow()
9637 if (!event->pending_sigtrap) { in __perf_event_overflow()
9638 event->pending_sigtrap = pending_id; in __perf_event_overflow()
9639 local_inc(&event->ctx->nr_pending); in __perf_event_overflow()
9640 } else if (event->attr.exclude_kernel && valid_sample) { in __perf_event_overflow()
9648 * 2. Events that can overflow again before the IRQ- in __perf_event_overflow()
9651 * check 32-bit hash of the current IP. in __perf_event_overflow()
9653 WARN_ON_ONCE(event->pending_sigtrap != pending_id); in __perf_event_overflow()
9656 event->pending_addr = 0; in __perf_event_overflow()
9657 if (valid_sample && (data->sample_flags & PERF_SAMPLE_ADDR)) in __perf_event_overflow()
9658 event->pending_addr = data->addr; in __perf_event_overflow()
9659 irq_work_queue(&event->pending_irq); in __perf_event_overflow()
9662 READ_ONCE(event->overflow_handler)(event, data, regs); in __perf_event_overflow() local
9664 if (*perf_event_fasync(event) && event->pending_kill) { in __perf_event_overflow()
9665 event->pending_wakeup = 1; in __perf_event_overflow()
9666 irq_work_queue(&event->pending_irq); in __perf_event_overflow()
9672 int perf_event_overflow(struct perf_event *event, in perf_event_overflow() argument
9676 return __perf_event_overflow(event, 1, data, regs); in perf_event_overflow()
9680 * Generic software event infrastructure
9695 * We directly increment event->count and keep a second value in
9696 * event->hw.period_left to count intervals. This period event
9697 * is kept in the range [-sample_period, 0] so that we can use the
9701 u64 perf_swevent_set_period(struct perf_event *event) in perf_swevent_set_period() argument
9703 struct hw_perf_event *hwc = &event->hw; in perf_swevent_set_period()
9704 u64 period = hwc->last_period; in perf_swevent_set_period()
9708 hwc->last_period = hwc->sample_period; in perf_swevent_set_period()
9710 old = local64_read(&hwc->period_left); in perf_swevent_set_period()
9718 val -= offset; in perf_swevent_set_period()
9719 } while (!local64_try_cmpxchg(&hwc->period_left, &old, val)); in perf_swevent_set_period()
9724 static void perf_swevent_overflow(struct perf_event *event, u64 overflow, in perf_swevent_overflow() argument
9728 struct hw_perf_event *hwc = &event->hw; in perf_swevent_overflow()
9732 overflow = perf_swevent_set_period(event); in perf_swevent_overflow()
9734 if (hwc->interrupts == MAX_INTERRUPTS) in perf_swevent_overflow()
9737 for (; overflow; overflow--) { in perf_swevent_overflow()
9738 if (__perf_event_overflow(event, throttle, in perf_swevent_overflow()
9742 * hwc->interrupts == MAX_INTERRUPTS. in perf_swevent_overflow()
9750 static void perf_swevent_event(struct perf_event *event, u64 nr, in perf_swevent_event() argument
9754 struct hw_perf_event *hwc = &event->hw; in perf_swevent_event()
9756 local64_add(nr, &event->count); in perf_swevent_event()
9761 if (!is_sampling_event(event)) in perf_swevent_event()
9764 if ((event->attr.sample_type & PERF_SAMPLE_PERIOD) && !event->attr.freq) { in perf_swevent_event()
9765 data->period = nr; in perf_swevent_event()
9766 return perf_swevent_overflow(event, 1, data, regs); in perf_swevent_event()
9768 data->period = event->hw.last_period; in perf_swevent_event()
9770 if (nr == 1 && hwc->sample_period == 1 && !event->attr.freq) in perf_swevent_event()
9771 return perf_swevent_overflow(event, 1, data, regs); in perf_swevent_event()
9773 if (local64_add_negative(nr, &hwc->period_left)) in perf_swevent_event()
9776 perf_swevent_overflow(event, 0, data, regs); in perf_swevent_event()
9779 static int perf_exclude_event(struct perf_event *event, in perf_exclude_event() argument
9782 if (event->hw.state & PERF_HES_STOPPED) in perf_exclude_event()
9786 if (event->attr.exclude_user && user_mode(regs)) in perf_exclude_event()
9789 if (event->attr.exclude_kernel && !user_mode(regs)) in perf_exclude_event()
9796 static int perf_swevent_match(struct perf_event *event, in perf_swevent_match() argument
9802 if (event->attr.type != type) in perf_swevent_match()
9805 if (event->attr.config != event_id) in perf_swevent_match()
9808 if (perf_exclude_event(event, regs)) in perf_swevent_match()
9826 return &hlist->heads[hash]; in __find_swevent_head()
9835 hlist = rcu_dereference(swhash->swevent_hlist); in find_swevent_head_rcu()
9842 /* For the event head insertion and removal in the hlist */
9844 find_swevent_head(struct swevent_htable *swhash, struct perf_event *event) in find_swevent_head() argument
9847 u32 event_id = event->attr.config; in find_swevent_head()
9848 u64 type = event->attr.type; in find_swevent_head()
9851 * Event scheduling is always serialized against hlist allocation in find_swevent_head()
9855 hlist = rcu_dereference_protected(swhash->swevent_hlist, in find_swevent_head()
9856 lockdep_is_held(&event->ctx->lock)); in find_swevent_head()
9869 struct perf_event *event; in do_perf_sw_event() local
9877 hlist_for_each_entry_rcu(event, head, hlist_entry) { in do_perf_sw_event()
9878 if (perf_swevent_match(event, type, event_id, data, regs)) in do_perf_sw_event()
9879 perf_swevent_event(event, nr, data, regs); in do_perf_sw_event()
9891 return get_recursion_context(swhash->recursion); in perf_swevent_get_recursion_context()
9899 put_recursion_context(swhash->recursion, rctx); in perf_swevent_put_recursion_context()
9929 static void perf_swevent_read(struct perf_event *event) in perf_swevent_read() argument
9933 static int perf_swevent_add(struct perf_event *event, int flags) in perf_swevent_add() argument
9936 struct hw_perf_event *hwc = &event->hw; in perf_swevent_add()
9939 if (is_sampling_event(event)) { in perf_swevent_add()
9940 hwc->last_period = hwc->sample_period; in perf_swevent_add()
9941 perf_swevent_set_period(event); in perf_swevent_add()
9944 hwc->state = !(flags & PERF_EF_START); in perf_swevent_add()
9946 head = find_swevent_head(swhash, event); in perf_swevent_add()
9948 return -EINVAL; in perf_swevent_add()
9950 hlist_add_head_rcu(&event->hlist_entry, head); in perf_swevent_add()
9951 perf_event_update_userpage(event); in perf_swevent_add()
9956 static void perf_swevent_del(struct perf_event *event, int flags) in perf_swevent_del() argument
9958 hlist_del_rcu(&event->hlist_entry); in perf_swevent_del()
9961 static void perf_swevent_start(struct perf_event *event, int flags) in perf_swevent_start() argument
9963 event->hw.state = 0; in perf_swevent_start()
9966 static void perf_swevent_stop(struct perf_event *event, int flags) in perf_swevent_stop() argument
9968 event->hw.state = PERF_HES_STOPPED; in perf_swevent_stop()
9975 return rcu_dereference_protected(swhash->swevent_hlist, in swevent_hlist_deref()
9976 lockdep_is_held(&swhash->hlist_mutex)); in swevent_hlist_deref()
9986 RCU_INIT_POINTER(swhash->swevent_hlist, NULL); in swevent_hlist_release()
9994 mutex_lock(&swhash->hlist_mutex); in swevent_hlist_put_cpu()
9996 if (!--swhash->hlist_refcount) in swevent_hlist_put_cpu()
9999 mutex_unlock(&swhash->hlist_mutex); in swevent_hlist_put_cpu()
10015 mutex_lock(&swhash->hlist_mutex); in swevent_hlist_get_cpu()
10022 err = -ENOMEM; in swevent_hlist_get_cpu()
10025 rcu_assign_pointer(swhash->swevent_hlist, hlist); in swevent_hlist_get_cpu()
10027 swhash->hlist_refcount++; in swevent_hlist_get_cpu()
10029 mutex_unlock(&swhash->hlist_mutex); in swevent_hlist_get_cpu()
10060 static void sw_perf_event_destroy(struct perf_event *event) in sw_perf_event_destroy() argument
10062 u64 event_id = event->attr.config; in sw_perf_event_destroy()
10064 WARN_ON(event->parent); in sw_perf_event_destroy()
10073 static int perf_swevent_init(struct perf_event *event) in perf_swevent_init() argument
10075 u64 event_id = event->attr.config; in perf_swevent_init()
10077 if (event->attr.type != PERF_TYPE_SOFTWARE) in perf_swevent_init()
10078 return -ENOENT; in perf_swevent_init()
10083 if (has_branch_stack(event)) in perf_swevent_init()
10084 return -EOPNOTSUPP; in perf_swevent_init()
10088 event->attr.type = perf_cpu_clock.type; in perf_swevent_init()
10089 return -ENOENT; in perf_swevent_init()
10091 event->attr.type = perf_task_clock.type; in perf_swevent_init()
10092 return -ENOENT; in perf_swevent_init()
10099 return -ENOENT; in perf_swevent_init()
10101 if (!event->parent) { in perf_swevent_init()
10109 event->destroy = sw_perf_event_destroy; in perf_swevent_init()
10130 static void tp_perf_event_destroy(struct perf_event *event) in tp_perf_event_destroy() argument
10132 perf_trace_destroy(event); in tp_perf_event_destroy()
10135 static int perf_tp_event_init(struct perf_event *event) in perf_tp_event_init() argument
10139 if (event->attr.type != PERF_TYPE_TRACEPOINT) in perf_tp_event_init()
10140 return -ENOENT; in perf_tp_event_init()
10145 if (has_branch_stack(event)) in perf_tp_event_init()
10146 return -EOPNOTSUPP; in perf_tp_event_init()
10148 err = perf_trace_init(event); in perf_tp_event_init()
10152 event->destroy = tp_perf_event_destroy; in perf_tp_event_init()
10168 static int perf_tp_filter_match(struct perf_event *event, in perf_tp_filter_match() argument
10171 void *record = raw->frag.data; in perf_tp_filter_match()
10174 if (event->parent) in perf_tp_filter_match()
10175 event = event->parent; in perf_tp_filter_match()
10177 if (likely(!event->filter) || filter_match_preds(event->filter, record)) in perf_tp_filter_match()
10182 static int perf_tp_event_match(struct perf_event *event, in perf_tp_event_match() argument
10186 if (event->hw.state & PERF_HES_STOPPED) in perf_tp_event_match()
10189 * If exclude_kernel, only trace user-space tracepoints (uprobes) in perf_tp_event_match()
10191 if (event->attr.exclude_kernel && !user_mode(regs)) in perf_tp_event_match()
10194 if (!perf_tp_filter_match(event, raw)) in perf_tp_event_match()
10212 perf_tp_event(call->event.type, count, raw_data, size, regs, head, in perf_trace_run_bpf_submit()
10221 struct perf_event *event) in __perf_tp_event_target_task() argument
10225 if (event->attr.config != entry->type) in __perf_tp_event_target_task()
10228 if (event->attr.sigtrap) in __perf_tp_event_target_task()
10230 if (perf_tp_event_match(event, raw, regs)) { in __perf_tp_event_target_task()
10232 perf_sample_save_raw_data(data, event, raw); in __perf_tp_event_target_task()
10233 perf_swevent_event(event, count, data, regs); in __perf_tp_event_target_task()
10245 struct perf_event *event, *sibling; in perf_tp_event_target_task() local
10247 perf_event_groups_for_cpu_pmu(event, &ctx->pinned_groups, cpu, pmu) { in perf_tp_event_target_task()
10248 __perf_tp_event_target_task(count, record, regs, data, raw, event); in perf_tp_event_target_task()
10249 for_each_sibling_event(sibling, event) in perf_tp_event_target_task()
10253 perf_event_groups_for_cpu_pmu(event, &ctx->flexible_groups, cpu, pmu) { in perf_tp_event_target_task()
10254 __perf_tp_event_target_task(count, record, regs, data, raw, event); in perf_tp_event_target_task()
10255 for_each_sibling_event(sibling, event) in perf_tp_event_target_task()
10265 struct perf_event *event; in perf_tp_event() local
10276 hlist_for_each_entry_rcu(event, head, hlist_entry) { in perf_tp_event()
10277 if (perf_tp_event_match(event, &raw, regs)) { in perf_tp_event()
10279 * Here use the same on-stack perf_sample_data, in perf_tp_event()
10280 * some members in data are event-specific and in perf_tp_event()
10281 * need to be re-computed for different sweveents. in perf_tp_event()
10282 * Re-initialize data->sample_flags safely to avoid in perf_tp_event()
10283 * the problem that next event skips preparing data in perf_tp_event()
10284 * because data->sample_flags is set. in perf_tp_event()
10287 perf_sample_save_raw_data(&data, event, &raw); in perf_tp_event()
10288 perf_swevent_event(event, count, &data, regs); in perf_tp_event()
10294 * deliver this event there too. in perf_tp_event()
10300 ctx = rcu_dereference(task->perf_event_ctxp); in perf_tp_event()
10304 raw_spin_lock(&ctx->lock); in perf_tp_event()
10306 raw_spin_unlock(&ctx->lock); in perf_tp_event()
10333 PERF_UPROBE_REF_CTR_OFFSET_SHIFT = 64 - PERF_UPROBE_REF_CTR_OFFSET_BITS,
10355 static int perf_kprobe_event_init(struct perf_event *event);
10367 static int perf_kprobe_event_init(struct perf_event *event) in perf_kprobe_event_init() argument
10372 if (event->attr.type != perf_kprobe.type) in perf_kprobe_event_init()
10373 return -ENOENT; in perf_kprobe_event_init()
10376 return -EACCES; in perf_kprobe_event_init()
10381 if (has_branch_stack(event)) in perf_kprobe_event_init()
10382 return -EOPNOTSUPP; in perf_kprobe_event_init()
10384 is_retprobe = event->attr.config & PERF_PROBE_CONFIG_IS_RETPROBE; in perf_kprobe_event_init()
10385 err = perf_kprobe_init(event, is_retprobe); in perf_kprobe_event_init()
10389 event->destroy = perf_kprobe_destroy; in perf_kprobe_event_init()
10396 PMU_FORMAT_ATTR(ref_ctr_offset, "config:32-63");
10414 static int perf_uprobe_event_init(struct perf_event *event);
10426 static int perf_uprobe_event_init(struct perf_event *event) in perf_uprobe_event_init() argument
10432 if (event->attr.type != perf_uprobe.type) in perf_uprobe_event_init()
10433 return -ENOENT; in perf_uprobe_event_init()
10436 return -EACCES; in perf_uprobe_event_init()
10441 if (has_branch_stack(event)) in perf_uprobe_event_init()
10442 return -EOPNOTSUPP; in perf_uprobe_event_init()
10444 is_retprobe = event->attr.config & PERF_PROBE_CONFIG_IS_RETPROBE; in perf_uprobe_event_init()
10445 ref_ctr_offset = event->attr.config >> PERF_UPROBE_REF_CTR_OFFSET_SHIFT; in perf_uprobe_event_init()
10446 err = perf_uprobe_init(event, ref_ctr_offset, is_retprobe); in perf_uprobe_event_init()
10450 event->destroy = perf_uprobe_destroy; in perf_uprobe_event_init()
10460 perf_pmu_register(&perf_kprobe, "kprobe", -1); in perf_tp_register()
10463 perf_pmu_register(&perf_uprobe, "uprobe", -1); in perf_tp_register()
10467 static void perf_event_free_filter(struct perf_event *event) in perf_event_free_filter() argument
10469 ftrace_profile_free_filter(event); in perf_event_free_filter()
10473 static void bpf_overflow_handler(struct perf_event *event, in bpf_overflow_handler() argument
10479 .event = event, in bpf_overflow_handler()
10488 prog = READ_ONCE(event->prog); in bpf_overflow_handler()
10490 perf_prepare_sample(data, event, regs); in bpf_overflow_handler()
10499 event->orig_overflow_handler(event, data, regs); in bpf_overflow_handler()
10502 static int perf_event_set_bpf_handler(struct perf_event *event, in perf_event_set_bpf_handler() argument
10506 if (event->overflow_handler_context) in perf_event_set_bpf_handler()
10508 return -EINVAL; in perf_event_set_bpf_handler()
10510 if (event->prog) in perf_event_set_bpf_handler()
10511 return -EEXIST; in perf_event_set_bpf_handler()
10513 if (prog->type != BPF_PROG_TYPE_PERF_EVENT) in perf_event_set_bpf_handler()
10514 return -EINVAL; in perf_event_set_bpf_handler()
10516 if (event->attr.precise_ip && in perf_event_set_bpf_handler()
10517 prog->call_get_stack && in perf_event_set_bpf_handler()
10518 (!(event->attr.sample_type & PERF_SAMPLE_CALLCHAIN) || in perf_event_set_bpf_handler()
10519 event->attr.exclude_callchain_kernel || in perf_event_set_bpf_handler()
10520 event->attr.exclude_callchain_user)) { in perf_event_set_bpf_handler()
10530 return -EPROTO; in perf_event_set_bpf_handler()
10533 event->prog = prog; in perf_event_set_bpf_handler()
10534 event->bpf_cookie = bpf_cookie; in perf_event_set_bpf_handler()
10535 event->orig_overflow_handler = READ_ONCE(event->overflow_handler); in perf_event_set_bpf_handler()
10536 WRITE_ONCE(event->overflow_handler, bpf_overflow_handler); in perf_event_set_bpf_handler()
10540 static void perf_event_free_bpf_handler(struct perf_event *event) in perf_event_free_bpf_handler() argument
10542 struct bpf_prog *prog = event->prog; in perf_event_free_bpf_handler()
10547 WRITE_ONCE(event->overflow_handler, event->orig_overflow_handler); in perf_event_free_bpf_handler()
10548 event->prog = NULL; in perf_event_free_bpf_handler()
10552 static int perf_event_set_bpf_handler(struct perf_event *event, in perf_event_set_bpf_handler() argument
10556 return -EOPNOTSUPP; in perf_event_set_bpf_handler()
10558 static void perf_event_free_bpf_handler(struct perf_event *event) in perf_event_free_bpf_handler() argument
10564 * returns true if the event is a tracepoint, or a kprobe/upprobe created
10567 static inline bool perf_event_is_tracing(struct perf_event *event) in perf_event_is_tracing() argument
10569 if (event->pmu == &perf_tracepoint) in perf_event_is_tracing()
10572 if (event->pmu == &perf_kprobe) in perf_event_is_tracing()
10576 if (event->pmu == &perf_uprobe) in perf_event_is_tracing()
10582 int perf_event_set_bpf_prog(struct perf_event *event, struct bpf_prog *prog, in perf_event_set_bpf_prog() argument
10587 if (!perf_event_is_tracing(event)) in perf_event_set_bpf_prog()
10588 return perf_event_set_bpf_handler(event, prog, bpf_cookie); in perf_event_set_bpf_prog()
10590 is_kprobe = event->tp_event->flags & TRACE_EVENT_FL_KPROBE; in perf_event_set_bpf_prog()
10591 is_uprobe = event->tp_event->flags & TRACE_EVENT_FL_UPROBE; in perf_event_set_bpf_prog()
10592 is_tracepoint = event->tp_event->flags & TRACE_EVENT_FL_TRACEPOINT; in perf_event_set_bpf_prog()
10593 is_syscall_tp = is_syscall_trace_event(event->tp_event); in perf_event_set_bpf_prog()
10596 return -EINVAL; in perf_event_set_bpf_prog()
10598 if (((is_kprobe || is_uprobe) && prog->type != BPF_PROG_TYPE_KPROBE) || in perf_event_set_bpf_prog()
10599 (is_tracepoint && prog->type != BPF_PROG_TYPE_TRACEPOINT) || in perf_event_set_bpf_prog()
10600 (is_syscall_tp && prog->type != BPF_PROG_TYPE_TRACEPOINT)) in perf_event_set_bpf_prog()
10601 return -EINVAL; in perf_event_set_bpf_prog()
10603 if (prog->type == BPF_PROG_TYPE_KPROBE && prog->aux->sleepable && !is_uprobe) in perf_event_set_bpf_prog()
10605 return -EINVAL; in perf_event_set_bpf_prog()
10608 if (prog->kprobe_override && !is_kprobe) in perf_event_set_bpf_prog()
10609 return -EINVAL; in perf_event_set_bpf_prog()
10612 int off = trace_event_get_offsets(event->tp_event); in perf_event_set_bpf_prog()
10614 if (prog->aux->max_ctx_offset > off) in perf_event_set_bpf_prog()
10615 return -EACCES; in perf_event_set_bpf_prog()
10618 return perf_event_attach_bpf_prog(event, prog, bpf_cookie); in perf_event_set_bpf_prog()
10621 void perf_event_free_bpf_prog(struct perf_event *event) in perf_event_free_bpf_prog() argument
10623 if (!perf_event_is_tracing(event)) { in perf_event_free_bpf_prog()
10624 perf_event_free_bpf_handler(event); in perf_event_free_bpf_prog()
10627 perf_event_detach_bpf_prog(event); in perf_event_free_bpf_prog()
10636 static void perf_event_free_filter(struct perf_event *event) in perf_event_free_filter() argument
10640 int perf_event_set_bpf_prog(struct perf_event *event, struct bpf_prog *prog, in perf_event_set_bpf_prog() argument
10643 return -ENOENT; in perf_event_set_bpf_prog()
10646 void perf_event_free_bpf_prog(struct perf_event *event) in perf_event_free_bpf_prog() argument
10657 perf_sample_data_init(&sample, bp->attr.bp_addr, 0); in perf_bp_event()
10659 if (!bp->hw.state && !perf_exclude_event(bp, regs)) in perf_bp_event()
10668 perf_addr_filter_new(struct perf_event *event, struct list_head *filters) in perf_addr_filter_new() argument
10670 int node = cpu_to_node(event->cpu == -1 ? 0 : event->cpu); in perf_addr_filter_new()
10677 INIT_LIST_HEAD(&filter->entry); in perf_addr_filter_new()
10678 list_add_tail(&filter->entry, filters); in perf_addr_filter_new()
10688 path_put(&filter->path); in free_filters_list()
10689 list_del(&filter->entry); in free_filters_list()
10697 static void perf_addr_filters_splice(struct perf_event *event, in perf_addr_filters_splice() argument
10703 if (!has_addr_filter(event)) in perf_addr_filters_splice()
10707 if (event->parent) in perf_addr_filters_splice()
10710 raw_spin_lock_irqsave(&event->addr_filters.lock, flags); in perf_addr_filters_splice()
10712 list_splice_init(&event->addr_filters.list, &list); in perf_addr_filters_splice()
10714 list_splice(head, &event->addr_filters.list); in perf_addr_filters_splice()
10716 raw_spin_unlock_irqrestore(&event->addr_filters.lock, flags); in perf_addr_filters_splice()
10734 if (!vma->vm_file) in perf_addr_filter_apply()
10743 * Update event's address range filters based on the
10746 static void perf_event_addr_filters_apply(struct perf_event *event) in perf_event_addr_filters_apply() argument
10748 struct perf_addr_filters_head *ifh = perf_event_addr_filters(event); in perf_event_addr_filters_apply()
10749 struct task_struct *task = READ_ONCE(event->ctx->task); in perf_event_addr_filters_apply()
10756 * We may observe TASK_TOMBSTONE, which means that the event tear-down in perf_event_addr_filters_apply()
10762 if (ifh->nr_file_filters) { in perf_event_addr_filters_apply()
10770 raw_spin_lock_irqsave(&ifh->lock, flags); in perf_event_addr_filters_apply()
10771 list_for_each_entry(filter, &ifh->list, entry) { in perf_event_addr_filters_apply()
10772 if (filter->path.dentry) { in perf_event_addr_filters_apply()
10777 event->addr_filter_ranges[count].start = 0; in perf_event_addr_filters_apply()
10778 event->addr_filter_ranges[count].size = 0; in perf_event_addr_filters_apply()
10780 perf_addr_filter_apply(filter, mm, &event->addr_filter_ranges[count]); in perf_event_addr_filters_apply()
10782 event->addr_filter_ranges[count].start = filter->offset; in perf_event_addr_filters_apply()
10783 event->addr_filter_ranges[count].size = filter->size; in perf_event_addr_filters_apply()
10789 event->addr_filters_gen++; in perf_event_addr_filters_apply()
10790 raw_spin_unlock_irqrestore(&ifh->lock, flags); in perf_event_addr_filters_apply()
10792 if (ifh->nr_file_filters) { in perf_event_addr_filters_apply()
10799 perf_event_stop(event, 1); in perf_event_addr_filters_apply()
10822 IF_ACT_NONE = -1,
10853 perf_event_parse_addr_filter(struct perf_event *event, char *fstr, in perf_event_parse_addr_filter() argument
10861 int ret = -EINVAL; in perf_event_parse_addr_filter()
10865 return -ENOMEM; in perf_event_parse_addr_filter()
10873 ret = -EINVAL; in perf_event_parse_addr_filter()
10880 filter = perf_addr_filter_new(event, filters); in perf_event_parse_addr_filter()
10893 filter->action = actions[token]; in perf_event_parse_addr_filter()
10908 ret = kstrtoul(args[0].from, 0, &filter->offset); in perf_event_parse_addr_filter()
10914 ret = kstrtoul(args[1].from, 0, &filter->size); in perf_event_parse_addr_filter()
10925 ret = -ENOMEM; in perf_event_parse_addr_filter()
10939 * Make sure that it doesn't contradict itself or the event's in perf_event_parse_addr_filter()
10943 ret = -EINVAL; in perf_event_parse_addr_filter()
10946 * ACTION "filter" must have a non-zero length region in perf_event_parse_addr_filter()
10949 if (filter->action == PERF_ADDR_FILTER_ACTION_FILTER && in perf_event_parse_addr_filter()
10950 !filter->size) in perf_event_parse_addr_filter()
10958 * For now, we only support file-based filters in perf_event_parse_addr_filter()
10959 * in per-task events; doing so for CPU-wide in perf_event_parse_addr_filter()
10965 ret = -EOPNOTSUPP; in perf_event_parse_addr_filter()
10966 if (!event->ctx->task) in perf_event_parse_addr_filter()
10971 &filter->path); in perf_event_parse_addr_filter()
10975 ret = -EINVAL; in perf_event_parse_addr_filter()
10976 if (!filter->path.dentry || in perf_event_parse_addr_filter()
10977 !S_ISREG(d_inode(filter->path.dentry) in perf_event_parse_addr_filter()
10978 ->i_mode)) in perf_event_parse_addr_filter()
10981 event->addr_filters.nr_file_filters++; in perf_event_parse_addr_filter()
11010 perf_event_set_addr_filter(struct perf_event *event, char *filter_str) in perf_event_set_addr_filter() argument
11019 lockdep_assert_held(&event->ctx->mutex); in perf_event_set_addr_filter()
11021 if (WARN_ON_ONCE(event->parent)) in perf_event_set_addr_filter()
11022 return -EINVAL; in perf_event_set_addr_filter()
11024 ret = perf_event_parse_addr_filter(event, filter_str, &filters); in perf_event_set_addr_filter()
11028 ret = event->pmu->addr_filters_validate(&filters); in perf_event_set_addr_filter()
11033 perf_addr_filters_splice(event, &filters); in perf_event_set_addr_filter()
11036 perf_event_for_each_child(event, perf_event_addr_filters_apply); in perf_event_set_addr_filter()
11044 event->addr_filters.nr_file_filters = 0; in perf_event_set_addr_filter()
11049 static int perf_event_set_filter(struct perf_event *event, void __user *arg) in perf_event_set_filter() argument
11051 int ret = -EINVAL; in perf_event_set_filter()
11059 if (perf_event_is_tracing(event)) { in perf_event_set_filter()
11060 struct perf_event_context *ctx = event->ctx; in perf_event_set_filter()
11065 * the tracepoint muck will deadlock against ctx->mutex, but in perf_event_set_filter()
11067 * temporarily drop ctx->mutex. As per perf_event_ctx_lock() we in perf_event_set_filter()
11070 * This can result in event getting moved to a different ctx, in perf_event_set_filter()
11073 mutex_unlock(&ctx->mutex); in perf_event_set_filter()
11074 ret = ftrace_profile_set_filter(event, event->attr.config, filter_str); in perf_event_set_filter()
11075 mutex_lock(&ctx->mutex); in perf_event_set_filter()
11078 if (has_addr_filter(event)) in perf_event_set_filter()
11079 ret = perf_event_set_addr_filter(event, filter_str); in perf_event_set_filter()
11094 struct perf_event *event; in perf_swevent_hrtimer() local
11097 event = container_of(hrtimer, struct perf_event, hw.hrtimer); in perf_swevent_hrtimer()
11099 if (event->state != PERF_EVENT_STATE_ACTIVE) in perf_swevent_hrtimer()
11102 event->pmu->read(event); in perf_swevent_hrtimer()
11104 perf_sample_data_init(&data, 0, event->hw.last_period); in perf_swevent_hrtimer()
11107 if (regs && !perf_exclude_event(event, regs)) { in perf_swevent_hrtimer()
11108 if (!(event->attr.exclude_idle && is_idle_task(current))) in perf_swevent_hrtimer()
11109 if (__perf_event_overflow(event, 1, &data, regs)) in perf_swevent_hrtimer()
11113 period = max_t(u64, 10000, event->hw.sample_period); in perf_swevent_hrtimer()
11119 static void perf_swevent_start_hrtimer(struct perf_event *event) in perf_swevent_start_hrtimer() argument
11121 struct hw_perf_event *hwc = &event->hw; in perf_swevent_start_hrtimer()
11124 if (!is_sampling_event(event)) in perf_swevent_start_hrtimer()
11127 period = local64_read(&hwc->period_left); in perf_swevent_start_hrtimer()
11132 local64_set(&hwc->period_left, 0); in perf_swevent_start_hrtimer()
11134 period = max_t(u64, 10000, hwc->sample_period); in perf_swevent_start_hrtimer()
11136 hrtimer_start(&hwc->hrtimer, ns_to_ktime(period), in perf_swevent_start_hrtimer()
11140 static void perf_swevent_cancel_hrtimer(struct perf_event *event) in perf_swevent_cancel_hrtimer() argument
11142 struct hw_perf_event *hwc = &event->hw; in perf_swevent_cancel_hrtimer()
11144 if (is_sampling_event(event)) { in perf_swevent_cancel_hrtimer()
11145 ktime_t remaining = hrtimer_get_remaining(&hwc->hrtimer); in perf_swevent_cancel_hrtimer()
11146 local64_set(&hwc->period_left, ktime_to_ns(remaining)); in perf_swevent_cancel_hrtimer()
11148 hrtimer_cancel(&hwc->hrtimer); in perf_swevent_cancel_hrtimer()
11152 static void perf_swevent_init_hrtimer(struct perf_event *event) in perf_swevent_init_hrtimer() argument
11154 struct hw_perf_event *hwc = &event->hw; in perf_swevent_init_hrtimer()
11156 if (!is_sampling_event(event)) in perf_swevent_init_hrtimer()
11159 hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD); in perf_swevent_init_hrtimer()
11160 hwc->hrtimer.function = perf_swevent_hrtimer; in perf_swevent_init_hrtimer()
11163 * Since hrtimers have a fixed rate, we can do a static freq->period in perf_swevent_init_hrtimer()
11166 if (event->attr.freq) { in perf_swevent_init_hrtimer()
11167 long freq = event->attr.sample_freq; in perf_swevent_init_hrtimer()
11169 event->attr.sample_period = NSEC_PER_SEC / freq; in perf_swevent_init_hrtimer()
11170 hwc->sample_period = event->attr.sample_period; in perf_swevent_init_hrtimer()
11171 local64_set(&hwc->period_left, hwc->sample_period); in perf_swevent_init_hrtimer()
11172 hwc->last_period = hwc->sample_period; in perf_swevent_init_hrtimer()
11173 event->attr.freq = 0; in perf_swevent_init_hrtimer()
11178 * Software event: cpu wall time clock
11181 static void cpu_clock_event_update(struct perf_event *event) in cpu_clock_event_update() argument
11187 prev = local64_xchg(&event->hw.prev_count, now); in cpu_clock_event_update()
11188 local64_add(now - prev, &event->count); in cpu_clock_event_update()
11191 static void cpu_clock_event_start(struct perf_event *event, int flags) in cpu_clock_event_start() argument
11193 local64_set(&event->hw.prev_count, local_clock()); in cpu_clock_event_start()
11194 perf_swevent_start_hrtimer(event); in cpu_clock_event_start()
11197 static void cpu_clock_event_stop(struct perf_event *event, int flags) in cpu_clock_event_stop() argument
11199 perf_swevent_cancel_hrtimer(event); in cpu_clock_event_stop()
11200 cpu_clock_event_update(event); in cpu_clock_event_stop()
11203 static int cpu_clock_event_add(struct perf_event *event, int flags) in cpu_clock_event_add() argument
11206 cpu_clock_event_start(event, flags); in cpu_clock_event_add()
11207 perf_event_update_userpage(event); in cpu_clock_event_add()
11212 static void cpu_clock_event_del(struct perf_event *event, int flags) in cpu_clock_event_del() argument
11214 cpu_clock_event_stop(event, flags); in cpu_clock_event_del()
11217 static void cpu_clock_event_read(struct perf_event *event) in cpu_clock_event_read() argument
11219 cpu_clock_event_update(event); in cpu_clock_event_read()
11222 static int cpu_clock_event_init(struct perf_event *event) in cpu_clock_event_init() argument
11224 if (event->attr.type != perf_cpu_clock.type) in cpu_clock_event_init()
11225 return -ENOENT; in cpu_clock_event_init()
11227 if (event->attr.config != PERF_COUNT_SW_CPU_CLOCK) in cpu_clock_event_init()
11228 return -ENOENT; in cpu_clock_event_init()
11233 if (has_branch_stack(event)) in cpu_clock_event_init()
11234 return -EOPNOTSUPP; in cpu_clock_event_init()
11236 perf_swevent_init_hrtimer(event); in cpu_clock_event_init()
11256 * Software event: task time clock
11259 static void task_clock_event_update(struct perf_event *event, u64 now) in task_clock_event_update() argument
11264 prev = local64_xchg(&event->hw.prev_count, now); in task_clock_event_update()
11265 delta = now - prev; in task_clock_event_update()
11266 local64_add(delta, &event->count); in task_clock_event_update()
11269 static void task_clock_event_start(struct perf_event *event, int flags) in task_clock_event_start() argument
11271 local64_set(&event->hw.prev_count, event->ctx->time); in task_clock_event_start()
11272 perf_swevent_start_hrtimer(event); in task_clock_event_start()
11275 static void task_clock_event_stop(struct perf_event *event, int flags) in task_clock_event_stop() argument
11277 perf_swevent_cancel_hrtimer(event); in task_clock_event_stop()
11278 task_clock_event_update(event, event->ctx->time); in task_clock_event_stop()
11281 static int task_clock_event_add(struct perf_event *event, int flags) in task_clock_event_add() argument
11284 task_clock_event_start(event, flags); in task_clock_event_add()
11285 perf_event_update_userpage(event); in task_clock_event_add()
11290 static void task_clock_event_del(struct perf_event *event, int flags) in task_clock_event_del() argument
11292 task_clock_event_stop(event, PERF_EF_UPDATE); in task_clock_event_del()
11295 static void task_clock_event_read(struct perf_event *event) in task_clock_event_read() argument
11298 u64 delta = now - event->ctx->timestamp; in task_clock_event_read()
11299 u64 time = event->ctx->time + delta; in task_clock_event_read()
11301 task_clock_event_update(event, time); in task_clock_event_read()
11304 static int task_clock_event_init(struct perf_event *event) in task_clock_event_init() argument
11306 if (event->attr.type != perf_task_clock.type) in task_clock_event_init()
11307 return -ENOENT; in task_clock_event_init()
11309 if (event->attr.config != PERF_COUNT_SW_TASK_CLOCK) in task_clock_event_init()
11310 return -ENOENT; in task_clock_event_init()
11315 if (has_branch_stack(event)) in task_clock_event_init()
11316 return -EOPNOTSUPP; in task_clock_event_init()
11318 perf_swevent_init_hrtimer(event); in task_clock_event_init()
11350 static int perf_event_nop_int(struct perf_event *event, u64 value) in perf_event_nop_int() argument
11392 static int perf_event_idx_default(struct perf_event *event) in perf_event_idx_default() argument
11399 free_percpu(pmu->cpu_pmu_context); in free_pmu_context()
11411 return scnprintf(page, PAGE_SIZE - 1, "%d\n", pmu->nr_addr_filters); in nr_addr_filters_show()
11422 return scnprintf(page, PAGE_SIZE - 1, "%d\n", pmu->type); in type_show()
11433 return scnprintf(page, PAGE_SIZE - 1, "%d\n", pmu->hrtimer_interval_ms); in perf_event_mux_interval_ms_show()
11451 return -EINVAL; in perf_event_mux_interval_ms_store()
11454 if (timer == pmu->hrtimer_interval_ms) in perf_event_mux_interval_ms_store()
11458 pmu->hrtimer_interval_ms = timer; in perf_event_mux_interval_ms_store()
11464 cpc = per_cpu_ptr(pmu->cpu_pmu_context, cpu); in perf_event_mux_interval_ms_store()
11465 cpc->hrtimer_interval = ns_to_ktime(NSEC_PER_MSEC * timer); in perf_event_mux_interval_ms_store()
11488 if (n == 2 && !pmu->nr_addr_filters) in pmu_dev_is_visible()
11491 return a->mode; in pmu_dev_is_visible()
11517 int ret = -ENOMEM; in pmu_dev_alloc()
11519 pmu->dev = kzalloc(sizeof(struct device), GFP_KERNEL); in pmu_dev_alloc()
11520 if (!pmu->dev) in pmu_dev_alloc()
11523 pmu->dev->groups = pmu->attr_groups; in pmu_dev_alloc()
11524 device_initialize(pmu->dev); in pmu_dev_alloc()
11526 dev_set_drvdata(pmu->dev, pmu); in pmu_dev_alloc()
11527 pmu->dev->bus = &pmu_bus; in pmu_dev_alloc()
11528 pmu->dev->parent = pmu->parent; in pmu_dev_alloc()
11529 pmu->dev->release = pmu_dev_release; in pmu_dev_alloc()
11531 ret = dev_set_name(pmu->dev, "%s", pmu->name); in pmu_dev_alloc()
11535 ret = device_add(pmu->dev); in pmu_dev_alloc()
11539 if (pmu->attr_update) { in pmu_dev_alloc()
11540 ret = sysfs_update_groups(&pmu->dev->kobj, pmu->attr_update); in pmu_dev_alloc()
11549 device_del(pmu->dev); in pmu_dev_alloc()
11552 put_device(pmu->dev); in pmu_dev_alloc()
11564 ret = -ENOMEM; in perf_pmu_register()
11565 pmu->pmu_disable_count = alloc_percpu(int); in perf_pmu_register()
11566 if (!pmu->pmu_disable_count) in perf_pmu_register()
11569 pmu->type = -1; in perf_pmu_register()
11571 ret = -EINVAL; in perf_pmu_register()
11575 pmu->name = name; in perf_pmu_register()
11587 pmu->type = type; in perf_pmu_register()
11589 if (pmu_bus_running && !pmu->dev) { in perf_pmu_register()
11595 ret = -ENOMEM; in perf_pmu_register()
11596 pmu->cpu_pmu_context = alloc_percpu(struct perf_cpu_pmu_context); in perf_pmu_register()
11597 if (!pmu->cpu_pmu_context) in perf_pmu_register()
11603 cpc = per_cpu_ptr(pmu->cpu_pmu_context, cpu); in perf_pmu_register()
11604 __perf_init_event_pmu_context(&cpc->epc, pmu); in perf_pmu_register()
11608 if (!pmu->start_txn) { in perf_pmu_register()
11609 if (pmu->pmu_enable) { in perf_pmu_register()
11615 pmu->start_txn = perf_pmu_start_txn; in perf_pmu_register()
11616 pmu->commit_txn = perf_pmu_commit_txn; in perf_pmu_register()
11617 pmu->cancel_txn = perf_pmu_cancel_txn; in perf_pmu_register()
11619 pmu->start_txn = perf_pmu_nop_txn; in perf_pmu_register()
11620 pmu->commit_txn = perf_pmu_nop_int; in perf_pmu_register()
11621 pmu->cancel_txn = perf_pmu_nop_void; in perf_pmu_register()
11625 if (!pmu->pmu_enable) { in perf_pmu_register()
11626 pmu->pmu_enable = perf_pmu_nop_void; in perf_pmu_register()
11627 pmu->pmu_disable = perf_pmu_nop_void; in perf_pmu_register()
11630 if (!pmu->check_period) in perf_pmu_register()
11631 pmu->check_period = perf_event_nop_int; in perf_pmu_register()
11633 if (!pmu->event_idx) in perf_pmu_register()
11634 pmu->event_idx = perf_event_idx_default; in perf_pmu_register()
11636 list_add_rcu(&pmu->entry, &pmus); in perf_pmu_register()
11637 atomic_set(&pmu->exclusive_cnt, 0); in perf_pmu_register()
11645 if (pmu->dev && pmu->dev != PMU_NULL_DEV) { in perf_pmu_register()
11646 device_del(pmu->dev); in perf_pmu_register()
11647 put_device(pmu->dev); in perf_pmu_register()
11651 idr_remove(&pmu_idr, pmu->type); in perf_pmu_register()
11654 free_percpu(pmu->pmu_disable_count); in perf_pmu_register()
11662 list_del_rcu(&pmu->entry); in perf_pmu_unregister()
11663 idr_remove(&pmu_idr, pmu->type); in perf_pmu_unregister()
11673 free_percpu(pmu->pmu_disable_count); in perf_pmu_unregister()
11674 if (pmu_bus_running && pmu->dev && pmu->dev != PMU_NULL_DEV) { in perf_pmu_unregister()
11675 if (pmu->nr_addr_filters) in perf_pmu_unregister()
11676 device_remove_file(pmu->dev, &dev_attr_nr_addr_filters); in perf_pmu_unregister()
11677 device_del(pmu->dev); in perf_pmu_unregister()
11678 put_device(pmu->dev); in perf_pmu_unregister()
11684 static inline bool has_extended_regs(struct perf_event *event) in has_extended_regs() argument
11686 return (event->attr.sample_regs_user & PERF_REG_EXTENDED_MASK) || in has_extended_regs()
11687 (event->attr.sample_regs_intr & PERF_REG_EXTENDED_MASK); in has_extended_regs()
11690 static int perf_try_init_event(struct pmu *pmu, struct perf_event *event) in perf_try_init_event() argument
11695 if (!try_module_get(pmu->module)) in perf_try_init_event()
11696 return -ENODEV; in perf_try_init_event()
11699 * A number of pmu->event_init() methods iterate the sibling_list to, in perf_try_init_event()
11701 * if this is a sibling event, acquire the ctx->mutex to protect in perf_try_init_event()
11704 if (event->group_leader != event && pmu->task_ctx_nr != perf_sw_context) { in perf_try_init_event()
11706 * This ctx->mutex can nest when we're called through in perf_try_init_event()
11709 ctx = perf_event_ctx_lock_nested(event->group_leader, in perf_try_init_event()
11714 event->pmu = pmu; in perf_try_init_event()
11715 ret = pmu->event_init(event); in perf_try_init_event()
11718 perf_event_ctx_unlock(event->group_leader, ctx); in perf_try_init_event()
11721 if (!(pmu->capabilities & PERF_PMU_CAP_EXTENDED_REGS) && in perf_try_init_event()
11722 has_extended_regs(event)) in perf_try_init_event()
11723 ret = -EOPNOTSUPP; in perf_try_init_event()
11725 if (pmu->capabilities & PERF_PMU_CAP_NO_EXCLUDE && in perf_try_init_event()
11726 event_has_any_exclude_flag(event)) in perf_try_init_event()
11727 ret = -EINVAL; in perf_try_init_event()
11729 if (ret && event->destroy) in perf_try_init_event()
11730 event->destroy(event); in perf_try_init_event()
11734 module_put(pmu->module); in perf_try_init_event()
11739 static struct pmu *perf_init_event(struct perf_event *event) in perf_init_event() argument
11748 * Save original type before calling pmu->event_init() since certain in perf_init_event()
11749 * pmus overwrites event->attr.type to forward event to another pmu. in perf_init_event()
11751 event->orig_type = event->attr.type; in perf_init_event()
11754 if (event->parent && event->parent->pmu) { in perf_init_event()
11755 pmu = event->parent->pmu; in perf_init_event()
11756 ret = perf_try_init_event(pmu, event); in perf_init_event()
11765 type = event->attr.type; in perf_init_event()
11767 type = event->attr.config >> PERF_PMU_TYPE_SHIFT; in perf_init_event()
11772 event->attr.config &= PERF_HW_EVENT_MASK; in perf_init_event()
11781 if (event->attr.type != type && type != PERF_TYPE_RAW && in perf_init_event()
11782 !(pmu->capabilities & PERF_PMU_CAP_EXTENDED_HW_TYPE)) in perf_init_event()
11785 ret = perf_try_init_event(pmu, event); in perf_init_event()
11786 if (ret == -ENOENT && event->attr.type != type && !extended_type) { in perf_init_event()
11787 type = event->attr.type; in perf_init_event()
11798 ret = perf_try_init_event(pmu, event); in perf_init_event()
11802 if (ret != -ENOENT) { in perf_init_event()
11808 pmu = ERR_PTR(-ENOENT); in perf_init_event()
11815 static void attach_sb_event(struct perf_event *event) in attach_sb_event() argument
11817 struct pmu_event_list *pel = per_cpu_ptr(&pmu_sb_events, event->cpu); in attach_sb_event()
11819 raw_spin_lock(&pel->lock); in attach_sb_event()
11820 list_add_rcu(&event->sb_list, &pel->list); in attach_sb_event()
11821 raw_spin_unlock(&pel->lock); in attach_sb_event()
11825 * We keep a list of all !task (and therefore per-cpu) events
11826 * that need to receive side-band records.
11828 * This avoids having to scan all the various PMU per-cpu contexts
11831 static void account_pmu_sb_event(struct perf_event *event) in account_pmu_sb_event() argument
11833 if (is_sb_event(event)) in account_pmu_sb_event()
11834 attach_sb_event(event); in account_pmu_sb_event()
11858 static void account_event(struct perf_event *event) in account_event() argument
11862 if (event->parent) in account_event()
11865 if (event->attach_state & (PERF_ATTACH_TASK | PERF_ATTACH_SCHED_CB)) in account_event()
11867 if (event->attr.mmap || event->attr.mmap_data) in account_event()
11869 if (event->attr.build_id) in account_event()
11871 if (event->attr.comm) in account_event()
11873 if (event->attr.namespaces) in account_event()
11875 if (event->attr.cgroup) in account_event()
11877 if (event->attr.task) in account_event()
11879 if (event->attr.freq) in account_event()
11881 if (event->attr.context_switch) { in account_event()
11885 if (has_branch_stack(event)) in account_event()
11887 if (is_cgroup_event(event)) in account_event()
11889 if (event->attr.ksymbol) in account_event()
11891 if (event->attr.bpf_event) in account_event()
11893 if (event->attr.text_poke) in account_event()
11917 * increments to by-pass the mutex. in account_event()
11924 account_pmu_sb_event(event); in account_event()
11928 * Allocate and initialize an event structure
11939 struct perf_event *event; in perf_event_alloc() local
11941 long err = -EINVAL; in perf_event_alloc()
11945 if (!task || cpu != -1) in perf_event_alloc()
11946 return ERR_PTR(-EINVAL); in perf_event_alloc()
11948 if (attr->sigtrap && !task) { in perf_event_alloc()
11950 return ERR_PTR(-EINVAL); in perf_event_alloc()
11953 node = (cpu >= 0) ? cpu_to_node(cpu) : -1; in perf_event_alloc()
11954 event = kmem_cache_alloc_node(perf_event_cache, GFP_KERNEL | __GFP_ZERO, in perf_event_alloc()
11956 if (!event) in perf_event_alloc()
11957 return ERR_PTR(-ENOMEM); in perf_event_alloc()
11964 group_leader = event; in perf_event_alloc()
11966 mutex_init(&event->child_mutex); in perf_event_alloc()
11967 INIT_LIST_HEAD(&event->child_list); in perf_event_alloc()
11969 INIT_LIST_HEAD(&event->event_entry); in perf_event_alloc()
11970 INIT_LIST_HEAD(&event->sibling_list); in perf_event_alloc()
11971 INIT_LIST_HEAD(&event->active_list); in perf_event_alloc()
11972 init_event_group(event); in perf_event_alloc()
11973 INIT_LIST_HEAD(&event->rb_entry); in perf_event_alloc()
11974 INIT_LIST_HEAD(&event->active_entry); in perf_event_alloc()
11975 INIT_LIST_HEAD(&event->addr_filters.list); in perf_event_alloc()
11976 INIT_HLIST_NODE(&event->hlist_entry); in perf_event_alloc()
11979 init_waitqueue_head(&event->waitq); in perf_event_alloc()
11980 init_irq_work(&event->pending_irq, perf_pending_irq); in perf_event_alloc()
11981 init_task_work(&event->pending_task, perf_pending_task); in perf_event_alloc()
11982 rcuwait_init(&event->pending_work_wait); in perf_event_alloc()
11984 mutex_init(&event->mmap_mutex); in perf_event_alloc()
11985 raw_spin_lock_init(&event->addr_filters.lock); in perf_event_alloc()
11987 atomic_long_set(&event->refcount, 1); in perf_event_alloc()
11988 event->cpu = cpu; in perf_event_alloc()
11989 event->attr = *attr; in perf_event_alloc()
11990 event->group_leader = group_leader; in perf_event_alloc()
11991 event->pmu = NULL; in perf_event_alloc()
11992 event->oncpu = -1; in perf_event_alloc()
11994 event->parent = parent_event; in perf_event_alloc()
11996 event->ns = get_pid_ns(task_active_pid_ns(current)); in perf_event_alloc()
11997 event->id = atomic64_inc_return(&perf_event_id); in perf_event_alloc()
11999 event->state = PERF_EVENT_STATE_INACTIVE; in perf_event_alloc()
12002 event->event_caps = parent_event->event_caps; in perf_event_alloc()
12005 event->attach_state = PERF_ATTACH_TASK; in perf_event_alloc()
12011 event->hw.target = get_task_struct(task); in perf_event_alloc()
12014 event->clock = &local_clock; in perf_event_alloc()
12016 event->clock = parent_event->clock; in perf_event_alloc()
12019 overflow_handler = parent_event->overflow_handler; in perf_event_alloc()
12020 context = parent_event->overflow_handler_context; in perf_event_alloc()
12023 struct bpf_prog *prog = parent_event->prog; in perf_event_alloc()
12026 event->prog = prog; in perf_event_alloc()
12027 event->orig_overflow_handler = in perf_event_alloc()
12028 parent_event->orig_overflow_handler; in perf_event_alloc()
12034 event->overflow_handler = overflow_handler; in perf_event_alloc()
12035 event->overflow_handler_context = context; in perf_event_alloc()
12036 } else if (is_write_backward(event)){ in perf_event_alloc()
12037 event->overflow_handler = perf_event_output_backward; in perf_event_alloc()
12038 event->overflow_handler_context = NULL; in perf_event_alloc()
12040 event->overflow_handler = perf_event_output_forward; in perf_event_alloc()
12041 event->overflow_handler_context = NULL; in perf_event_alloc()
12044 perf_event__state_init(event); in perf_event_alloc()
12048 hwc = &event->hw; in perf_event_alloc()
12049 hwc->sample_period = attr->sample_period; in perf_event_alloc()
12050 if (attr->freq && attr->sample_freq) in perf_event_alloc()
12051 hwc->sample_period = 1; in perf_event_alloc()
12052 hwc->last_period = hwc->sample_period; in perf_event_alloc()
12054 local64_set(&hwc->period_left, hwc->sample_period); in perf_event_alloc()
12060 if (attr->inherit && (attr->sample_type & PERF_SAMPLE_READ)) in perf_event_alloc()
12063 if (!has_branch_stack(event)) in perf_event_alloc()
12064 event->attr.branch_sample_type = 0; in perf_event_alloc()
12066 pmu = perf_init_event(event); in perf_event_alloc()
12073 * Disallow uncore-task events. Similarly, disallow uncore-cgroup in perf_event_alloc()
12077 if (pmu->task_ctx_nr == perf_invalid_context && (task || cgroup_fd != -1)) { in perf_event_alloc()
12078 err = -EINVAL; in perf_event_alloc()
12082 if (event->attr.aux_output && in perf_event_alloc()
12083 !(pmu->capabilities & PERF_PMU_CAP_AUX_OUTPUT)) { in perf_event_alloc()
12084 err = -EOPNOTSUPP; in perf_event_alloc()
12088 if (cgroup_fd != -1) { in perf_event_alloc()
12089 err = perf_cgroup_connect(cgroup_fd, event, attr, group_leader); in perf_event_alloc()
12094 err = exclusive_event_init(event); in perf_event_alloc()
12098 if (has_addr_filter(event)) { in perf_event_alloc()
12099 event->addr_filter_ranges = kcalloc(pmu->nr_addr_filters, in perf_event_alloc()
12102 if (!event->addr_filter_ranges) { in perf_event_alloc()
12103 err = -ENOMEM; in perf_event_alloc()
12111 if (event->parent) { in perf_event_alloc()
12112 struct perf_addr_filters_head *ifh = perf_event_addr_filters(event); in perf_event_alloc()
12114 raw_spin_lock_irq(&ifh->lock); in perf_event_alloc()
12115 memcpy(event->addr_filter_ranges, in perf_event_alloc()
12116 event->parent->addr_filter_ranges, in perf_event_alloc()
12117 pmu->nr_addr_filters * sizeof(struct perf_addr_filter_range)); in perf_event_alloc()
12118 raw_spin_unlock_irq(&ifh->lock); in perf_event_alloc()
12122 event->addr_filters_gen = 1; in perf_event_alloc()
12125 if (!event->parent) { in perf_event_alloc()
12126 if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN) { in perf_event_alloc()
12127 err = get_callchain_buffers(attr->sample_max_stack); in perf_event_alloc()
12133 err = security_perf_event_alloc(event); in perf_event_alloc()
12138 account_event(event); in perf_event_alloc()
12140 return event; in perf_event_alloc()
12143 if (!event->parent) { in perf_event_alloc()
12144 if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN) in perf_event_alloc()
12148 kfree(event->addr_filter_ranges); in perf_event_alloc()
12151 exclusive_event_destroy(event); in perf_event_alloc()
12154 if (is_cgroup_event(event)) in perf_event_alloc()
12155 perf_detach_cgroup(event); in perf_event_alloc()
12156 if (event->destroy) in perf_event_alloc()
12157 event->destroy(event); in perf_event_alloc()
12158 module_put(pmu->module); in perf_event_alloc()
12160 if (event->hw.target) in perf_event_alloc()
12161 put_task_struct(event->hw.target); in perf_event_alloc()
12162 call_rcu(&event->rcu_head, free_event_rcu); in perf_event_alloc()
12176 ret = get_user(size, &uattr->size); in perf_copy_attr()
12188 if (ret == -E2BIG) in perf_copy_attr()
12193 attr->size = size; in perf_copy_attr()
12195 if (attr->__reserved_1 || attr->__reserved_2 || attr->__reserved_3) in perf_copy_attr()
12196 return -EINVAL; in perf_copy_attr()
12198 if (attr->sample_type & ~(PERF_SAMPLE_MAX-1)) in perf_copy_attr()
12199 return -EINVAL; in perf_copy_attr()
12201 if (attr->read_format & ~(PERF_FORMAT_MAX-1)) in perf_copy_attr()
12202 return -EINVAL; in perf_copy_attr()
12204 if (attr->sample_type & PERF_SAMPLE_BRANCH_STACK) { in perf_copy_attr()
12205 u64 mask = attr->branch_sample_type; in perf_copy_attr()
12208 if (mask & ~(PERF_SAMPLE_BRANCH_MAX-1)) in perf_copy_attr()
12209 return -EINVAL; in perf_copy_attr()
12213 return -EINVAL; in perf_copy_attr()
12219 if (!attr->exclude_kernel) in perf_copy_attr()
12222 if (!attr->exclude_user) in perf_copy_attr()
12225 if (!attr->exclude_hv) in perf_copy_attr()
12230 attr->branch_sample_type = mask; in perf_copy_attr()
12240 if (attr->sample_type & PERF_SAMPLE_REGS_USER) { in perf_copy_attr()
12241 ret = perf_reg_validate(attr->sample_regs_user); in perf_copy_attr()
12246 if (attr->sample_type & PERF_SAMPLE_STACK_USER) { in perf_copy_attr()
12248 return -ENOSYS; in perf_copy_attr()
12255 if (attr->sample_stack_user >= USHRT_MAX) in perf_copy_attr()
12256 return -EINVAL; in perf_copy_attr()
12257 else if (!IS_ALIGNED(attr->sample_stack_user, sizeof(u64))) in perf_copy_attr()
12258 return -EINVAL; in perf_copy_attr()
12261 if (!attr->sample_max_stack) in perf_copy_attr()
12262 attr->sample_max_stack = sysctl_perf_event_max_stack; in perf_copy_attr()
12264 if (attr->sample_type & PERF_SAMPLE_REGS_INTR) in perf_copy_attr()
12265 ret = perf_reg_validate(attr->sample_regs_intr); in perf_copy_attr()
12268 if (attr->sample_type & PERF_SAMPLE_CGROUP) in perf_copy_attr()
12269 return -EINVAL; in perf_copy_attr()
12271 if ((attr->sample_type & PERF_SAMPLE_WEIGHT) && in perf_copy_attr()
12272 (attr->sample_type & PERF_SAMPLE_WEIGHT_STRUCT)) in perf_copy_attr()
12273 return -EINVAL; in perf_copy_attr()
12275 if (!attr->inherit && attr->inherit_thread) in perf_copy_attr()
12276 return -EINVAL; in perf_copy_attr()
12278 if (attr->remove_on_exec && attr->enable_on_exec) in perf_copy_attr()
12279 return -EINVAL; in perf_copy_attr()
12281 if (attr->sigtrap && !attr->remove_on_exec) in perf_copy_attr()
12282 return -EINVAL; in perf_copy_attr()
12288 put_user(sizeof(*attr), &uattr->size); in perf_copy_attr()
12289 ret = -E2BIG; in perf_copy_attr()
12303 perf_event_set_output(struct perf_event *event, struct perf_event *output_event) in perf_event_set_output() argument
12306 int ret = -EINVAL; in perf_event_set_output()
12309 mutex_lock(&event->mmap_mutex); in perf_event_set_output()
12314 if (event == output_event) in perf_event_set_output()
12318 * Don't allow cross-cpu buffers in perf_event_set_output()
12320 if (output_event->cpu != event->cpu) in perf_event_set_output()
12324 * If its not a per-cpu rb, it must be the same task. in perf_event_set_output()
12326 if (output_event->cpu == -1 && output_event->hw.target != event->hw.target) in perf_event_set_output()
12332 if (output_event->clock != event->clock) in perf_event_set_output()
12339 if (is_write_backward(output_event) != is_write_backward(event)) in perf_event_set_output()
12345 if (has_aux(event) && has_aux(output_event) && in perf_event_set_output()
12346 event->pmu != output_event->pmu) in perf_event_set_output()
12351 * output_event is already on rb->event_list, and the list iteration in perf_event_set_output()
12352 * restarts after every removal, it is guaranteed this new event is in perf_event_set_output()
12354 * observe !rb->mmap_count. in perf_event_set_output()
12356 mutex_lock_double(&event->mmap_mutex, &output_event->mmap_mutex); in perf_event_set_output()
12359 if (atomic_read(&event->mmap_count)) in perf_event_set_output()
12369 if (!atomic_read(&rb->mmap_count)) { in perf_event_set_output()
12375 ring_buffer_attach(event, rb); in perf_event_set_output()
12379 mutex_unlock(&event->mmap_mutex); in perf_event_set_output()
12381 mutex_unlock(&output_event->mmap_mutex); in perf_event_set_output()
12387 static int perf_event_set_clock(struct perf_event *event, clockid_t clk_id) in perf_event_set_clock() argument
12393 event->clock = &ktime_get_mono_fast_ns; in perf_event_set_clock()
12398 event->clock = &ktime_get_raw_fast_ns; in perf_event_set_clock()
12403 event->clock = &ktime_get_real_ns; in perf_event_set_clock()
12407 event->clock = &ktime_get_boottime_ns; in perf_event_set_clock()
12411 event->clock = &ktime_get_clocktai_ns; in perf_event_set_clock()
12415 return -EINVAL; in perf_event_set_clock()
12418 if (!nmi_safe && !(event->pmu->capabilities & PERF_PMU_CAP_NO_NMI)) in perf_event_set_clock()
12419 return -EINVAL; in perf_event_set_clock()
12430 if (attr->sigtrap) { in perf_check_permission()
12436 is_capable &= ns_capable(__task_cred(task)->user_ns, CAP_KILL); in perf_check_permission()
12456 * sys_perf_event_open - open a performance event, associate it to a task/cpu
12461 * @group_fd: group leader event fd
12462 * @flags: perf event open flags
12470 struct perf_event *event, *sibling; in SYSCALL_DEFINE5() local
12481 int cgroup_fd = -1; in SYSCALL_DEFINE5()
12485 return -EINVAL; in SYSCALL_DEFINE5()
12504 return -EACCES; in SYSCALL_DEFINE5()
12509 return -EINVAL; in SYSCALL_DEFINE5()
12512 return -EINVAL; in SYSCALL_DEFINE5()
12535 if ((flags & PERF_FLAG_PID_CGROUP) && (pid == -1 || cpu == -1)) in SYSCALL_DEFINE5()
12536 return -EINVAL; in SYSCALL_DEFINE5()
12545 if (group_fd != -1) { in SYSCALL_DEFINE5()
12549 group_leader = group.file->private_data; in SYSCALL_DEFINE5()
12556 if (pid != -1 && !(flags & PERF_FLAG_PID_CGROUP)) { in SYSCALL_DEFINE5()
12565 group_leader->attr.inherit != attr.inherit) { in SYSCALL_DEFINE5()
12566 err = -EINVAL; in SYSCALL_DEFINE5()
12573 event = perf_event_alloc(&attr, cpu, task, group_leader, NULL, in SYSCALL_DEFINE5()
12575 if (IS_ERR(event)) { in SYSCALL_DEFINE5()
12576 err = PTR_ERR(event); in SYSCALL_DEFINE5()
12580 if (is_sampling_event(event)) { in SYSCALL_DEFINE5()
12581 if (event->pmu->capabilities & PERF_PMU_CAP_NO_INTERRUPT) { in SYSCALL_DEFINE5()
12582 err = -EOPNOTSUPP; in SYSCALL_DEFINE5()
12591 pmu = event->pmu; in SYSCALL_DEFINE5()
12594 err = perf_event_set_clock(event, attr.clockid); in SYSCALL_DEFINE5()
12599 if (pmu->task_ctx_nr == perf_sw_context) in SYSCALL_DEFINE5()
12600 event->event_caps |= PERF_EV_CAP_SOFTWARE; in SYSCALL_DEFINE5()
12603 err = down_read_interruptible(&task->signal->exec_update_lock); in SYSCALL_DEFINE5()
12609 * perf_install_in_context() call for this new event to in SYSCALL_DEFINE5()
12613 err = -EACCES; in SYSCALL_DEFINE5()
12621 ctx = find_get_context(task, event); in SYSCALL_DEFINE5()
12627 mutex_lock(&ctx->mutex); in SYSCALL_DEFINE5()
12629 if (ctx->task == TASK_TOMBSTONE) { in SYSCALL_DEFINE5()
12630 err = -ESRCH; in SYSCALL_DEFINE5()
12636 * Check if the @cpu we're creating an event for is online. in SYSCALL_DEFINE5()
12641 struct perf_cpu_context *cpuctx = per_cpu_ptr(&perf_cpu_context, event->cpu); in SYSCALL_DEFINE5()
12643 if (!cpuctx->online) { in SYSCALL_DEFINE5()
12644 err = -ENODEV; in SYSCALL_DEFINE5()
12650 err = -EINVAL; in SYSCALL_DEFINE5()
12654 * becoming part of another group-sibling): in SYSCALL_DEFINE5()
12656 if (group_leader->group_leader != group_leader) in SYSCALL_DEFINE5()
12660 if (group_leader->clock != event->clock) in SYSCALL_DEFINE5()
12668 if (group_leader->cpu != event->cpu) in SYSCALL_DEFINE5()
12674 if (group_leader->ctx != ctx) in SYSCALL_DEFINE5()
12683 if (is_software_event(event) && in SYSCALL_DEFINE5()
12686 * If the event is a sw event, but the group_leader in SYSCALL_DEFINE5()
12696 pmu = group_leader->pmu_ctx->pmu; in SYSCALL_DEFINE5()
12697 } else if (!is_software_event(event)) { in SYSCALL_DEFINE5()
12699 (group_leader->group_caps & PERF_EV_CAP_SOFTWARE)) { in SYSCALL_DEFINE5()
12702 * try to add a hardware event, move the whole group to in SYSCALL_DEFINE5()
12710 group_leader->pmu_ctx->pmu != pmu) in SYSCALL_DEFINE5()
12718 pmu_ctx = find_get_pmu_context(pmu, ctx, event); in SYSCALL_DEFINE5()
12723 event->pmu_ctx = pmu_ctx; in SYSCALL_DEFINE5()
12726 err = perf_event_set_output(event, output_event); in SYSCALL_DEFINE5()
12731 if (!perf_event_validate_size(event)) { in SYSCALL_DEFINE5()
12732 err = -E2BIG; in SYSCALL_DEFINE5()
12736 if (perf_need_aux_event(event) && !perf_get_aux_event(event, group_leader)) { in SYSCALL_DEFINE5()
12737 err = -EINVAL; in SYSCALL_DEFINE5()
12743 * because we need to serialize with concurrent event creation. in SYSCALL_DEFINE5()
12745 if (!exclusive_event_installable(event, ctx)) { in SYSCALL_DEFINE5()
12746 err = -EBUSY; in SYSCALL_DEFINE5()
12750 WARN_ON_ONCE(ctx->parent_ctx); in SYSCALL_DEFINE5()
12752 event_file = anon_inode_getfile("[perf_event]", &perf_fops, event, f_flags); in SYSCALL_DEFINE5()
12766 put_pmu_ctx(group_leader->pmu_ctx); in SYSCALL_DEFINE5()
12770 put_pmu_ctx(sibling->pmu_ctx); in SYSCALL_DEFINE5()
12777 * (through the sibling list, which is still in-tact), we can in SYSCALL_DEFINE5()
12780 * By installing siblings first we NO-OP because they're not in SYSCALL_DEFINE5()
12784 sibling->pmu_ctx = pmu_ctx; in SYSCALL_DEFINE5()
12787 perf_install_in_context(ctx, sibling, sibling->cpu); in SYSCALL_DEFINE5()
12792 * event. What we want here is event in the initial in SYSCALL_DEFINE5()
12795 group_leader->pmu_ctx = pmu_ctx; in SYSCALL_DEFINE5()
12798 perf_install_in_context(ctx, group_leader, group_leader->cpu); in SYSCALL_DEFINE5()
12804 * perf_install_in_context() which is the point the event is active and in SYSCALL_DEFINE5()
12807 perf_event__header_size(event); in SYSCALL_DEFINE5()
12808 perf_event__id_header_size(event); in SYSCALL_DEFINE5()
12810 event->owner = current; in SYSCALL_DEFINE5()
12812 perf_install_in_context(ctx, event, event->cpu); in SYSCALL_DEFINE5()
12815 mutex_unlock(&ctx->mutex); in SYSCALL_DEFINE5()
12818 up_read(&task->signal->exec_update_lock); in SYSCALL_DEFINE5()
12822 mutex_lock(¤t->perf_event_mutex); in SYSCALL_DEFINE5()
12823 list_add_tail(&event->owner_entry, ¤t->perf_event_list); in SYSCALL_DEFINE5()
12824 mutex_unlock(¤t->perf_event_mutex); in SYSCALL_DEFINE5()
12828 * new event on the sibling_list. This ensures destruction in SYSCALL_DEFINE5()
12837 put_pmu_ctx(event->pmu_ctx); in SYSCALL_DEFINE5()
12838 event->pmu_ctx = NULL; /* _free_event() */ in SYSCALL_DEFINE5()
12840 mutex_unlock(&ctx->mutex); in SYSCALL_DEFINE5()
12845 up_read(&task->signal->exec_update_lock); in SYSCALL_DEFINE5()
12847 free_event(event); in SYSCALL_DEFINE5()
12864 * @overflow_handler: callback to trigger when we hit the event
12875 struct perf_event *event; in perf_event_create_kernel_counter() local
12883 if (attr->aux_output) in perf_event_create_kernel_counter()
12884 return ERR_PTR(-EINVAL); in perf_event_create_kernel_counter()
12886 event = perf_event_alloc(attr, cpu, task, NULL, NULL, in perf_event_create_kernel_counter()
12887 overflow_handler, context, -1); in perf_event_create_kernel_counter()
12888 if (IS_ERR(event)) { in perf_event_create_kernel_counter()
12889 err = PTR_ERR(event); in perf_event_create_kernel_counter()
12894 event->owner = TASK_TOMBSTONE; in perf_event_create_kernel_counter()
12895 pmu = event->pmu; in perf_event_create_kernel_counter()
12897 if (pmu->task_ctx_nr == perf_sw_context) in perf_event_create_kernel_counter()
12898 event->event_caps |= PERF_EV_CAP_SOFTWARE; in perf_event_create_kernel_counter()
12903 ctx = find_get_context(task, event); in perf_event_create_kernel_counter()
12909 WARN_ON_ONCE(ctx->parent_ctx); in perf_event_create_kernel_counter()
12910 mutex_lock(&ctx->mutex); in perf_event_create_kernel_counter()
12911 if (ctx->task == TASK_TOMBSTONE) { in perf_event_create_kernel_counter()
12912 err = -ESRCH; in perf_event_create_kernel_counter()
12916 pmu_ctx = find_get_pmu_context(pmu, ctx, event); in perf_event_create_kernel_counter()
12921 event->pmu_ctx = pmu_ctx; in perf_event_create_kernel_counter()
12925 * Check if the @cpu we're creating an event for is online. in perf_event_create_kernel_counter()
12932 if (!cpuctx->online) { in perf_event_create_kernel_counter()
12933 err = -ENODEV; in perf_event_create_kernel_counter()
12938 if (!exclusive_event_installable(event, ctx)) { in perf_event_create_kernel_counter()
12939 err = -EBUSY; in perf_event_create_kernel_counter()
12943 perf_install_in_context(ctx, event, event->cpu); in perf_event_create_kernel_counter()
12945 mutex_unlock(&ctx->mutex); in perf_event_create_kernel_counter()
12947 return event; in perf_event_create_kernel_counter()
12951 event->pmu_ctx = NULL; /* _free_event() */ in perf_event_create_kernel_counter()
12953 mutex_unlock(&ctx->mutex); in perf_event_create_kernel_counter()
12957 free_event(event); in perf_event_create_kernel_counter()
12968 struct perf_event *event, *sibling; in __perf_pmu_remove() local
12970 perf_event_groups_for_cpu_pmu(event, groups, cpu, pmu) { in __perf_pmu_remove()
12971 perf_remove_from_context(event, 0); in __perf_pmu_remove()
12972 put_pmu_ctx(event->pmu_ctx); in __perf_pmu_remove()
12973 list_add(&event->migrate_entry, events); in __perf_pmu_remove()
12975 for_each_sibling_event(sibling, event) { in __perf_pmu_remove()
12977 put_pmu_ctx(sibling->pmu_ctx); in __perf_pmu_remove()
12978 list_add(&sibling->migrate_entry, events); in __perf_pmu_remove()
12985 int cpu, struct perf_event *event) in __perf_pmu_install_event() argument
12988 struct perf_event_context *old_ctx = event->ctx; in __perf_pmu_install_event()
12992 event->cpu = cpu; in __perf_pmu_install_event()
12993 epc = find_get_pmu_context(pmu, ctx, event); in __perf_pmu_install_event()
12994 event->pmu_ctx = epc; in __perf_pmu_install_event()
12996 if (event->state >= PERF_EVENT_STATE_OFF) in __perf_pmu_install_event()
12997 event->state = PERF_EVENT_STATE_INACTIVE; in __perf_pmu_install_event()
12998 perf_install_in_context(ctx, event, cpu); in __perf_pmu_install_event()
13001 * Now that event->ctx is updated and visible, put the old ctx. in __perf_pmu_install_event()
13009 struct perf_event *event, *tmp; in __perf_pmu_install() local
13012 * Re-instate events in 2 passes. in __perf_pmu_install()
13019 list_for_each_entry_safe(event, tmp, events, migrate_entry) { in __perf_pmu_install()
13020 if (event->group_leader == event) in __perf_pmu_install()
13023 list_del(&event->migrate_entry); in __perf_pmu_install()
13024 __perf_pmu_install_event(pmu, ctx, cpu, event); in __perf_pmu_install()
13031 list_for_each_entry_safe(event, tmp, events, migrate_entry) { in __perf_pmu_install()
13032 list_del(&event->migrate_entry); in __perf_pmu_install()
13033 __perf_pmu_install_event(pmu, ctx, cpu, event); in __perf_pmu_install()
13043 * Since per-cpu context is persistent, no need to grab an extra in perf_pmu_migrate_context()
13046 src_ctx = &per_cpu_ptr(&perf_cpu_context, src_cpu)->ctx; in perf_pmu_migrate_context()
13047 dst_ctx = &per_cpu_ptr(&perf_cpu_context, dst_cpu)->ctx; in perf_pmu_migrate_context()
13053 mutex_lock_double(&src_ctx->mutex, &dst_ctx->mutex); in perf_pmu_migrate_context()
13055 __perf_pmu_remove(src_ctx, src_cpu, pmu, &src_ctx->pinned_groups, &events); in perf_pmu_migrate_context()
13056 __perf_pmu_remove(src_ctx, src_cpu, pmu, &src_ctx->flexible_groups, &events); in perf_pmu_migrate_context()
13060 * Wait for the events to quiesce before re-instating them. in perf_pmu_migrate_context()
13067 mutex_unlock(&dst_ctx->mutex); in perf_pmu_migrate_context()
13068 mutex_unlock(&src_ctx->mutex); in perf_pmu_migrate_context()
13074 struct perf_event *parent_event = child_event->parent; in sync_child_event()
13077 if (child_event->attr.inherit_stat) { in sync_child_event()
13078 struct task_struct *task = child_event->ctx->task; in sync_child_event()
13089 atomic64_add(child_val, &parent_event->child_count); in sync_child_event()
13090 atomic64_add(child_event->total_time_enabled, in sync_child_event()
13091 &parent_event->child_total_time_enabled); in sync_child_event()
13092 atomic64_add(child_event->total_time_running, in sync_child_event()
13093 &parent_event->child_total_time_running); in sync_child_event()
13097 perf_event_exit_event(struct perf_event *event, struct perf_event_context *ctx) in perf_event_exit_event() argument
13099 struct perf_event *parent_event = event->parent; in perf_event_exit_event()
13116 mutex_lock(&parent_event->child_mutex); in perf_event_exit_event()
13119 perf_remove_from_context(event, detach_flags); in perf_event_exit_event()
13121 raw_spin_lock_irq(&ctx->lock); in perf_event_exit_event()
13122 if (event->state > PERF_EVENT_STATE_EXIT) in perf_event_exit_event()
13123 perf_event_set_state(event, PERF_EVENT_STATE_EXIT); in perf_event_exit_event()
13124 raw_spin_unlock_irq(&ctx->lock); in perf_event_exit_event()
13130 mutex_unlock(&parent_event->child_mutex); in perf_event_exit_event()
13135 free_event(event); in perf_event_exit_event()
13143 perf_event_wakeup(event); in perf_event_exit_event()
13158 * In order to reduce the amount of tricky in ctx tear-down, we hold in perf_event_exit_task_context()
13167 mutex_lock(&child_ctx->mutex); in perf_event_exit_task_context()
13170 * In a single ctx::lock section, de-schedule the events and detach the in perf_event_exit_task_context()
13174 raw_spin_lock_irq(&child_ctx->lock); in perf_event_exit_task_context()
13178 * Now that the context is inactive, destroy the task <-> ctx relation in perf_event_exit_task_context()
13181 RCU_INIT_POINTER(child->perf_event_ctxp, NULL); in perf_event_exit_task_context()
13183 WRITE_ONCE(child_ctx->task, TASK_TOMBSTONE); in perf_event_exit_task_context()
13187 raw_spin_unlock_irq(&child_ctx->lock); in perf_event_exit_task_context()
13199 list_for_each_entry_safe(child_event, next, &child_ctx->event_list, event_entry) in perf_event_exit_task_context()
13202 mutex_unlock(&child_ctx->mutex); in perf_event_exit_task_context()
13208 * When a child task exits, feed back event values to parent events.
13215 struct perf_event *event, *tmp; in perf_event_exit_task() local
13217 mutex_lock(&child->perf_event_mutex); in perf_event_exit_task()
13218 list_for_each_entry_safe(event, tmp, &child->perf_event_list, in perf_event_exit_task()
13220 list_del_init(&event->owner_entry); in perf_event_exit_task()
13225 * we need to serialize on the owner->perf_event_mutex. in perf_event_exit_task()
13227 smp_store_release(&event->owner, NULL); in perf_event_exit_task()
13229 mutex_unlock(&child->perf_event_mutex); in perf_event_exit_task()
13236 * child contexts and sets child->perf_event_ctxp[] to NULL. in perf_event_exit_task()
13242 static void perf_free_event(struct perf_event *event, in perf_free_event() argument
13245 struct perf_event *parent = event->parent; in perf_free_event()
13250 mutex_lock(&parent->child_mutex); in perf_free_event()
13251 list_del_init(&event->child_list); in perf_free_event()
13252 mutex_unlock(&parent->child_mutex); in perf_free_event()
13256 raw_spin_lock_irq(&ctx->lock); in perf_free_event()
13257 perf_group_detach(event); in perf_free_event()
13258 list_del_event(event, ctx); in perf_free_event()
13259 raw_spin_unlock_irq(&ctx->lock); in perf_free_event()
13260 free_event(event); in perf_free_event()
13273 struct perf_event *event, *tmp; in perf_event_free_task() local
13275 ctx = rcu_access_pointer(task->perf_event_ctxp); in perf_event_free_task()
13279 mutex_lock(&ctx->mutex); in perf_event_free_task()
13280 raw_spin_lock_irq(&ctx->lock); in perf_event_free_task()
13282 * Destroy the task <-> ctx relation and mark the context dead. in perf_event_free_task()
13287 RCU_INIT_POINTER(task->perf_event_ctxp, NULL); in perf_event_free_task()
13288 WRITE_ONCE(ctx->task, TASK_TOMBSTONE); in perf_event_free_task()
13290 raw_spin_unlock_irq(&ctx->lock); in perf_event_free_task()
13293 list_for_each_entry_safe(event, tmp, &ctx->event_list, event_entry) in perf_event_free_task()
13294 perf_free_event(event, ctx); in perf_event_free_task()
13296 mutex_unlock(&ctx->mutex); in perf_event_free_task()
13307 * _free_event()'s put_task_struct(event->hw.target) will be a in perf_event_free_task()
13308 * use-after-free. in perf_event_free_task()
13312 wait_var_event(&ctx->refcount, refcount_read(&ctx->refcount) == 1); in perf_event_free_task()
13318 WARN_ON_ONCE(task->perf_event_ctxp); in perf_event_delayed_put()
13325 return ERR_PTR(-EBADF); in perf_event_get()
13327 if (file->f_op != &perf_fops) { in perf_event_get()
13329 return ERR_PTR(-EBADF); in perf_event_get()
13337 if (file->f_op != &perf_fops) in perf_get_event()
13338 return ERR_PTR(-EINVAL); in perf_get_event()
13340 return file->private_data; in perf_get_event()
13343 const struct perf_event_attr *perf_event_attrs(struct perf_event *event) in perf_event_attrs() argument
13345 if (!event) in perf_event_attrs()
13346 return ERR_PTR(-EINVAL); in perf_event_attrs()
13348 return &event->attr; in perf_event_attrs()
13354 return -EACCES; in perf_allow_kernel()
13361 * Inherit an event from parent task to child task.
13364 * - valid pointer on success
13365 * - NULL for orphaned events
13366 * - IS_ERR() on error
13376 enum perf_event_state parent_state = parent_event->state; in inherit_event()
13387 if (parent_event->parent) in inherit_event()
13388 parent_event = parent_event->parent; in inherit_event()
13390 child_event = perf_event_alloc(&parent_event->attr, in inherit_event()
13391 parent_event->cpu, in inherit_event()
13394 NULL, NULL, -1); in inherit_event()
13398 pmu_ctx = find_get_pmu_context(child_event->pmu, child_ctx, child_event); in inherit_event()
13403 child_event->pmu_ctx = pmu_ctx; in inherit_event()
13406 * is_orphaned_event() and list_add_tail(&parent_event->child_list) in inherit_event()
13411 mutex_lock(&parent_event->child_mutex); in inherit_event()
13413 !atomic_long_inc_not_zero(&parent_event->refcount)) { in inherit_event()
13414 mutex_unlock(&parent_event->child_mutex); in inherit_event()
13423 * Make the child state follow the state of the parent event, in inherit_event()
13428 child_event->state = PERF_EVENT_STATE_INACTIVE; in inherit_event()
13430 child_event->state = PERF_EVENT_STATE_OFF; in inherit_event()
13432 if (parent_event->attr.freq) { in inherit_event()
13433 u64 sample_period = parent_event->hw.sample_period; in inherit_event()
13434 struct hw_perf_event *hwc = &child_event->hw; in inherit_event()
13436 hwc->sample_period = sample_period; in inherit_event()
13437 hwc->last_period = sample_period; in inherit_event()
13439 local64_set(&hwc->period_left, sample_period); in inherit_event()
13442 child_event->ctx = child_ctx; in inherit_event()
13443 child_event->overflow_handler = parent_event->overflow_handler; in inherit_event()
13444 child_event->overflow_handler_context in inherit_event()
13445 = parent_event->overflow_handler_context; in inherit_event()
13456 raw_spin_lock_irqsave(&child_ctx->lock, flags); in inherit_event()
13458 child_event->attach_state |= PERF_ATTACH_CHILD; in inherit_event()
13459 raw_spin_unlock_irqrestore(&child_ctx->lock, flags); in inherit_event()
13462 * Link this into the parent event's child list in inherit_event()
13464 list_add_tail(&child_event->child_list, &parent_event->child_list); in inherit_event()
13465 mutex_unlock(&parent_event->child_mutex); in inherit_event()
13471 * Inherits an event group.
13477 * - 0 on success
13478 * - <0 on error
13505 if (sub->aux_event == parent_event && child_ctr && in inherit_group()
13507 return -EINVAL; in inherit_group()
13510 leader->group_generation = parent_event->group_generation; in inherit_group()
13515 * Creates the child task context and tries to inherit the event-group.
13518 * inherited_all set when we 'fail' to inherit an orphaned event; this is
13522 * - 0 on success
13523 * - <0 on error
13526 inherit_task_group(struct perf_event *event, struct task_struct *parent, in inherit_task_group() argument
13534 if (!event->attr.inherit || in inherit_task_group()
13535 (event->attr.inherit_thread && !(clone_flags & CLONE_THREAD)) || in inherit_task_group()
13537 (event->attr.sigtrap && (clone_flags & CLONE_CLEAR_SIGHAND))) { in inherit_task_group()
13542 child_ctx = child->perf_event_ctxp; in inherit_task_group()
13552 return -ENOMEM; in inherit_task_group()
13554 child->perf_event_ctxp = child_ctx; in inherit_task_group()
13557 ret = inherit_group(event, parent, parent_ctx, child, child_ctx); in inherit_task_group()
13571 struct perf_event *event; in perf_event_init_context() local
13577 if (likely(!parent->perf_event_ctxp)) in perf_event_init_context()
13590 * it non-NULL earlier, the only reason for it to become NULL in perf_event_init_context()
13596 * Lock the parent list. No need to lock the child - not PID in perf_event_init_context()
13599 mutex_lock(&parent_ctx->mutex); in perf_event_init_context()
13602 * We dont have to disable NMIs - we are only looking at in perf_event_init_context()
13605 perf_event_groups_for_each(event, &parent_ctx->pinned_groups) { in perf_event_init_context()
13606 ret = inherit_task_group(event, parent, parent_ctx, in perf_event_init_context()
13613 * We can't hold ctx->lock when iterating the ->flexible_group list due in perf_event_init_context()
13617 raw_spin_lock_irqsave(&parent_ctx->lock, flags); in perf_event_init_context()
13618 parent_ctx->rotate_disable = 1; in perf_event_init_context()
13619 raw_spin_unlock_irqrestore(&parent_ctx->lock, flags); in perf_event_init_context()
13621 perf_event_groups_for_each(event, &parent_ctx->flexible_groups) { in perf_event_init_context()
13622 ret = inherit_task_group(event, parent, parent_ctx, in perf_event_init_context()
13628 raw_spin_lock_irqsave(&parent_ctx->lock, flags); in perf_event_init_context()
13629 parent_ctx->rotate_disable = 0; in perf_event_init_context()
13631 child_ctx = child->perf_event_ctxp; in perf_event_init_context()
13639 * parent_ctx->lock avoids it from being uncloned. in perf_event_init_context()
13641 cloned_ctx = parent_ctx->parent_ctx; in perf_event_init_context()
13643 child_ctx->parent_ctx = cloned_ctx; in perf_event_init_context()
13644 child_ctx->parent_gen = parent_ctx->parent_gen; in perf_event_init_context()
13646 child_ctx->parent_ctx = parent_ctx; in perf_event_init_context()
13647 child_ctx->parent_gen = parent_ctx->generation; in perf_event_init_context()
13649 get_ctx(child_ctx->parent_ctx); in perf_event_init_context()
13652 raw_spin_unlock_irqrestore(&parent_ctx->lock, flags); in perf_event_init_context()
13654 mutex_unlock(&parent_ctx->mutex); in perf_event_init_context()
13669 child->perf_event_ctxp = NULL; in perf_event_init_task()
13670 mutex_init(&child->perf_event_mutex); in perf_event_init_task()
13671 INIT_LIST_HEAD(&child->perf_event_list); in perf_event_init_task()
13692 mutex_init(&swhash->hlist_mutex); in perf_event_init_all_cpus()
13700 __perf_event_init_context(&cpuctx->ctx); in perf_event_init_all_cpus()
13701 lockdep_set_class(&cpuctx->ctx.mutex, &cpuctx_mutex); in perf_event_init_all_cpus()
13702 lockdep_set_class(&cpuctx->ctx.lock, &cpuctx_lock); in perf_event_init_all_cpus()
13703 cpuctx->online = cpumask_test_cpu(cpu, perf_online_mask); in perf_event_init_all_cpus()
13704 cpuctx->heap_size = ARRAY_SIZE(cpuctx->heap_default); in perf_event_init_all_cpus()
13705 cpuctx->heap = cpuctx->heap_default; in perf_event_init_all_cpus()
13713 mutex_lock(&swhash->hlist_mutex); in perf_swevent_init_cpu()
13714 if (swhash->hlist_refcount > 0 && !swevent_hlist_deref(swhash)) { in perf_swevent_init_cpu()
13719 rcu_assign_pointer(swhash->swevent_hlist, hlist); in perf_swevent_init_cpu()
13721 mutex_unlock(&swhash->hlist_mutex); in perf_swevent_init_cpu()
13729 struct perf_event *event; in __perf_event_exit_context() local
13731 raw_spin_lock(&ctx->lock); in __perf_event_exit_context()
13733 list_for_each_entry(event, &ctx->event_list, event_entry) in __perf_event_exit_context()
13734 __perf_remove_from_context(event, cpuctx, ctx, (void *)DETACH_GROUP); in __perf_event_exit_context()
13735 raw_spin_unlock(&ctx->lock); in __perf_event_exit_context()
13743 // XXX simplify cpuctx->online in perf_event_exit_cpu_context()
13746 ctx = &cpuctx->ctx; in perf_event_exit_cpu_context()
13748 mutex_lock(&ctx->mutex); in perf_event_exit_cpu_context()
13750 cpuctx->online = 0; in perf_event_exit_cpu_context()
13751 mutex_unlock(&ctx->mutex); in perf_event_exit_cpu_context()
13771 ctx = &cpuctx->ctx; in perf_event_init_cpu()
13773 mutex_lock(&ctx->mutex); in perf_event_init_cpu()
13774 cpuctx->online = 1; in perf_event_init_cpu()
13775 mutex_unlock(&ctx->mutex); in perf_event_init_cpu()
13816 perf_pmu_register(&perf_cpu_clock, "cpu_clock", -1); in perf_event_init()
13817 perf_pmu_register(&perf_task_clock, "task_clock", -1); in perf_event_init()
13841 if (pmu_attr->event_str) in perf_event_sysfs_show()
13842 return sprintf(page, "%s\n", pmu_attr->event_str); in perf_event_sysfs_show()
13860 if (pmu->dev) in perf_event_sysfs_init()
13864 WARN(ret, "Failed to register pmu: %s, reason %d\n", pmu->name, ret); in perf_event_sysfs_init()
13884 return ERR_PTR(-ENOMEM); in perf_cgroup_css_alloc()
13886 jc->info = alloc_percpu(struct perf_cgroup_info); in perf_cgroup_css_alloc()
13887 if (!jc->info) { in perf_cgroup_css_alloc()
13889 return ERR_PTR(-ENOMEM); in perf_cgroup_css_alloc()
13892 return &jc->css; in perf_cgroup_css_alloc()
13899 free_percpu(jc->info); in perf_cgroup_css_free()
13905 perf_event_cgroup(css->cgroup); in perf_cgroup_css_online()