Lines Matching full:event

176 static bool is_kernel_event(struct perf_event *event)
178 return READ_ONCE(event->owner) == TASK_TOMBSTONE;
198 * - removing the last event from a task ctx; this is relatively straight
201 * - adding the first event to a task ctx; this is tricky because we cannot
212 struct perf_event *event;
220 struct perf_event *event = efs->event;
221 struct perf_event_context *ctx = event->ctx;
256 efs->func(event, cpuctx, ctx, efs->data);
263 static void event_function_call(struct perf_event *event, event_f func, void *data)
265 struct perf_event_context *ctx = event->ctx;
269 .event = event,
274 if (!event->parent) {
276 * If this is a !child event, we must hold ctx::mutex to
277 * stabilize the event->ctx relation. See
284 cpu_function_call(event->cpu, event_function, &efs);
310 func(event, NULL, ctx, data);
320 static void event_function_local(struct perf_event *event, event_f func, void *data)
322 struct perf_event_context *ctx = event->ctx;
359 func(event, cpuctx, ctx, data);
417 * perf event paranoia level:
429 * max perf event sample rate
579 static u64 perf_event_time(struct perf_event *event);
588 static inline u64 perf_event_clock(struct perf_event *event)
590 return event->clock();
594 * State based event timekeeping...
596 * The basic idea is to use event->state to determine which (if any) time
601 * Event groups make things a little more complicated, but not terribly so. The
616 __perf_effective_state(struct perf_event *event)
618 struct perf_event *leader = event->group_leader;
623 return event->state;
627 __perf_update_times(struct perf_event *event, u64 now, u64 *enabled, u64 *running)
629 enum perf_event_state state = __perf_effective_state(event);
630 u64 delta = now - event->tstamp;
632 *enabled = event->total_time_enabled;
636 *running = event->total_time_running;
641 static void perf_event_update_time(struct perf_event *event)
643 u64 now = perf_event_time(event);
645 __perf_update_times(event, now, &event->total_time_enabled,
646 &event->total_time_running);
647 event->tstamp = now;
659 perf_event_set_state(struct perf_event *event, enum perf_event_state state)
661 if (event->state == state)
664 perf_event_update_time(event);
669 if ((event->state < 0) ^ (state < 0))
670 perf_event_update_sibling_time(event);
672 WRITE_ONCE(event->state, state);
720 perf_cgroup_match(struct perf_event *event)
724 /* @event doesn't care about cgroup */
725 if (!event->cgrp)
733 * Cgroup scoping is recursive. An event enabled for a cgroup is
735 * cgroup is a descendant of @event's (the test covers identity
739 event->cgrp->css.cgroup);
742 static inline void perf_detach_cgroup(struct perf_event *event)
744 css_put(&event->cgrp->css);
745 event->cgrp = NULL;
748 static inline int is_cgroup_event(struct perf_event *event)
750 return event->cgrp != NULL;
753 static inline u64 perf_cgroup_event_time(struct perf_event *event)
757 t = per_cpu_ptr(event->cgrp->info, event->cpu);
761 static inline u64 perf_cgroup_event_time_now(struct perf_event *event, u64 now)
765 t = per_cpu_ptr(event->cgrp->info, event->cpu);
803 static inline void update_cgrp_time_from_event(struct perf_event *event)
811 if (!is_cgroup_event(event))
814 info = this_cpu_ptr(event->cgrp->info);
857 * cpuctx->cgrp is set when the first cgroup event enabled,
858 * and is cleared when the last cgroup event disabled.
890 static int perf_cgroup_ensure_storage(struct perf_event *event,
931 static inline int perf_cgroup_connect(int fd, struct perf_event *event,
950 ret = perf_cgroup_ensure_storage(event, css);
955 event->cgrp = cgrp;
963 perf_detach_cgroup(event);
972 perf_cgroup_event_enable(struct perf_event *event, struct perf_event_context *ctx)
976 if (!is_cgroup_event(event))
979 event->pmu_ctx->nr_cgroups++;
994 perf_cgroup_event_disable(struct perf_event *event, struct perf_event_context *ctx)
998 if (!is_cgroup_event(event))
1001 event->pmu_ctx->nr_cgroups--;
1018 perf_cgroup_match(struct perf_event *event)
1023 static inline void perf_detach_cgroup(struct perf_event *event)
1026 static inline int is_cgroup_event(struct perf_event *event)
1031 static inline void update_cgrp_time_from_event(struct perf_event *event)
1040 static inline int perf_cgroup_connect(pid_t pid, struct perf_event *event,
1052 static inline u64 perf_cgroup_event_time(struct perf_event *event)
1057 static inline u64 perf_cgroup_event_time_now(struct perf_event *event, u64 now)
1063 perf_cgroup_event_enable(struct perf_event *event, struct perf_event_context *ctx)
1068 perf_cgroup_event_disable(struct perf_event *event, struct perf_event_context *ctx)
1236 * because the sys_perf_event_open() case will install a new event and break
1247 * quiesce the event, after which we can install it in the new location. This
1248 * means that only external vectors (perf_fops, prctl) can perturb the event
1252 * However; because event->ctx can change while we're waiting to acquire
1272 perf_event_ctx_lock_nested(struct perf_event *event, int nesting)
1278 ctx = READ_ONCE(event->ctx);
1286 if (event->ctx != ctx) {
1296 perf_event_ctx_lock(struct perf_event *event)
1298 return perf_event_ctx_lock_nested(event, 0);
1301 static void perf_event_ctx_unlock(struct perf_event *event,
1327 static u32 perf_event_pid_type(struct perf_event *event, struct task_struct *p,
1334 if (event->parent)
1335 event = event->parent;
1337 nr = __task_pid_nr_ns(p, type, event->ns);
1344 static u32 perf_event_pid(struct perf_event *event, struct task_struct *p)
1346 return perf_event_pid_type(event, p, PIDTYPE_TGID);
1349 static u32 perf_event_tid(struct perf_event *event, struct task_struct *p)
1351 return perf_event_pid_type(event, p, PIDTYPE_PID);
1355 * If we inherit events we want to return the parent event id
1358 static u64 primary_event_id(struct perf_event *event)
1360 u64 id = event->id;
1362 if (event->parent)
1363 id = event->parent->id;
1483 static u64 perf_event_time(struct perf_event *event)
1485 struct perf_event_context *ctx = event->ctx;
1490 if (is_cgroup_event(event))
1491 return perf_cgroup_event_time(event);
1496 static u64 perf_event_time_now(struct perf_event *event, u64 now)
1498 struct perf_event_context *ctx = event->ctx;
1503 if (is_cgroup_event(event))
1504 return perf_cgroup_event_time_now(event, now);
1513 static enum event_type_t get_event_type(struct perf_event *event)
1515 struct perf_event_context *ctx = event->ctx;
1524 if (event->group_leader != event)
1525 event = event->group_leader;
1527 event_type = event->attr.pinned ? EVENT_PINNED : EVENT_FLEXIBLE;
1535 * Helper function to initialize event group nodes.
1537 static void init_event_group(struct perf_event *event)
1539 RB_CLEAR_NODE(&event->group_node);
1540 event->group_index = 0;
1545 * based on event attrs bits.
1548 get_event_groups(struct perf_event *event, struct perf_event_context *ctx)
1550 if (event->attr.pinned)
1565 static inline struct cgroup *event_cgroup(const struct perf_event *event)
1570 if (event->cgrp)
1571 cgroup = event->cgrp->css.cgroup;
1578 * Compare function for event groups;
1673 * Insert @event into @groups' tree; using
1674 * {@event->cpu, @event->pmu_ctx->pmu, event_cgroup(@event), ++@groups->index}
1679 struct perf_event *event)
1681 event->group_index = ++groups->index;
1683 rb_add(&event->group_node, &groups->tree, __group_less);
1687 * Helper function to insert event into the pinned or flexible groups.
1690 add_event_to_groups(struct perf_event *event, struct perf_event_context *ctx)
1694 groups = get_event_groups(event, ctx);
1695 perf_event_groups_insert(groups, event);
1703 struct perf_event *event)
1705 WARN_ON_ONCE(RB_EMPTY_NODE(&event->group_node) ||
1708 rb_erase(&event->group_node, &groups->tree);
1709 init_event_group(event);
1713 * Helper function to delete event from its groups.
1716 del_event_from_groups(struct perf_event *event, struct perf_event_context *ctx)
1720 groups = get_event_groups(event, ctx);
1721 perf_event_groups_delete(groups, event);
1725 * Get the leftmost event in the {cpu,pmu,cgroup} subtree.
1746 perf_event_groups_next(struct perf_event *event, struct pmu *pmu)
1749 .cpu = event->cpu,
1751 .cgroup = event_cgroup(event),
1755 next = rb_next_match(&key, &event->group_node, __group_cmp);
1762 #define perf_event_groups_for_cpu_pmu(event, groups, cpu, pmu) \
1763 for (event = perf_event_groups_first(groups, cpu, pmu, NULL); \
1764 event; event = perf_event_groups_next(event, pmu))
1769 #define perf_event_groups_for_each(event, groups) \
1770 for (event = rb_entry_safe(rb_first(&((groups)->tree)), \
1771 typeof(*event), group_node); event; \
1772 event = rb_entry_safe(rb_next(&event->group_node), \
1773 typeof(*event), group_node))
1776 * Add an event from the lists for its context.
1780 list_add_event(struct perf_event *event, struct perf_event_context *ctx)
1784 WARN_ON_ONCE(event->attach_state & PERF_ATTACH_CONTEXT);
1785 event->attach_state |= PERF_ATTACH_CONTEXT;
1787 event->tstamp = perf_event_time(event);
1790 * If we're a stand alone event or group leader, we go to the context
1794 if (event->group_leader == event) {
1795 event->group_caps = event->event_caps;
1796 add_event_to_groups(event, ctx);
1799 list_add_rcu(&event->event_entry, &ctx->event_list);
1801 if (event->hw.flags & PERF_EVENT_FLAG_USER_READ_CNT)
1803 if (event->attr.inherit_stat)
1806 if (event->state > PERF_EVENT_STATE_OFF)
1807 perf_cgroup_event_enable(event, ctx);
1810 event->pmu_ctx->nr_events++;
1814 * Initialize event state based on the perf_event_attr::disabled.
1816 static inline void perf_event__state_init(struct perf_event *event)
1818 event->state = event->attr.disabled ? PERF_EVENT_STATE_OFF :
1852 static void __perf_event_header_size(struct perf_event *event, u64 sample_type)
1870 size += event->read_size;
1890 event->header_size = size;
1897 static void perf_event__header_size(struct perf_event *event)
1899 event->read_size =
1900 __perf_event_read_size(event->attr.read_format,
1901 event->group_leader->nr_siblings);
1902 __perf_event_header_size(event, event->attr.sample_type);
1905 static void perf_event__id_header_size(struct perf_event *event)
1908 u64 sample_type = event->attr.sample_type;
1929 event->id_header_size = size;
1933 * Check that adding an event to the group does not result in anybody
1934 * overflowing the 64k event limit imposed by the output buffer.
1936 * Specifically, check that the read_size for the event does not exceed 16k,
1938 * depends on per-event read_format, also (re)check the existing events.
1943 static bool perf_event_validate_size(struct perf_event *event)
1945 struct perf_event *sibling, *group_leader = event->group_leader;
1947 if (__perf_event_read_size(event->attr.read_format,
1962 if (event == group_leader)
1974 static void perf_group_attach(struct perf_event *event)
1976 struct perf_event *group_leader = event->group_leader, *pos;
1978 lockdep_assert_held(&event->ctx->lock);
1984 if (event->attach_state & PERF_ATTACH_GROUP)
1987 event->attach_state |= PERF_ATTACH_GROUP;
1989 if (group_leader == event)
1992 WARN_ON_ONCE(group_leader->ctx != event->ctx);
1994 group_leader->group_caps &= event->event_caps;
1996 list_add_tail(&event->sibling_list, &group_leader->sibling_list);
2007 * Remove an event from the lists for its context.
2011 list_del_event(struct perf_event *event, struct perf_event_context *ctx)
2013 WARN_ON_ONCE(event->ctx != ctx);
2019 if (!(event->attach_state & PERF_ATTACH_CONTEXT))
2022 event->attach_state &= ~PERF_ATTACH_CONTEXT;
2025 if (event->hw.flags & PERF_EVENT_FLAG_USER_READ_CNT)
2027 if (event->attr.inherit_stat)
2030 list_del_rcu(&event->event_entry);
2032 if (event->group_leader == event)
2033 del_event_from_groups(event, ctx);
2036 * If event was in error state, then keep it
2040 * of the event
2042 if (event->state > PERF_EVENT_STATE_OFF) {
2043 perf_cgroup_event_disable(event, ctx);
2044 perf_event_set_state(event, PERF_EVENT_STATE_OFF);
2048 event->pmu_ctx->nr_events--;
2052 perf_aux_output_match(struct perf_event *event, struct perf_event *aux_event)
2057 if (!event->pmu->aux_output_match)
2060 return event->pmu->aux_output_match(aux_event);
2063 static void put_event(struct perf_event *event);
2064 static void event_sched_out(struct perf_event *event,
2067 static void perf_put_aux_event(struct perf_event *event)
2069 struct perf_event_context *ctx = event->ctx;
2073 * If event uses aux_event tear down the link
2075 if (event->aux_event) {
2076 iter = event->aux_event;
2077 event->aux_event = NULL;
2083 * If the event is an aux_event, tear down all links to
2086 for_each_sibling_event(iter, event->group_leader) {
2087 if (iter->aux_event != event)
2091 put_event(event);
2099 perf_event_set_state(event, PERF_EVENT_STATE_ERROR);
2103 static bool perf_need_aux_event(struct perf_event *event)
2105 return !!event->attr.aux_output || !!event->attr.aux_sample_size;
2108 static int perf_get_aux_event(struct perf_event *event,
2112 * Our group leader must be an aux event if we want to be
2113 * an aux_output. This way, the aux event will precede its
2123 if (event->attr.aux_output && event->attr.aux_sample_size)
2126 if (event->attr.aux_output &&
2127 !perf_aux_output_match(event, group_leader))
2130 if (event->attr.aux_sample_size && !group_leader->pmu->snapshot_aux)
2137 * Link aux_outputs to their aux event; this is undone in
2142 event->aux_event = group_leader;
2147 static inline struct list_head *get_event_list(struct perf_event *event)
2149 return event->attr.pinned ? &event->pmu_ctx->pinned_active :
2150 &event->pmu_ctx->flexible_active;
2159 static inline void perf_remove_sibling_event(struct perf_event *event)
2161 event_sched_out(event, event->ctx);
2162 perf_event_set_state(event, PERF_EVENT_STATE_ERROR);
2165 static void perf_group_detach(struct perf_event *event)
2167 struct perf_event *leader = event->group_leader;
2169 struct perf_event_context *ctx = event->ctx;
2176 if (!(event->attach_state & PERF_ATTACH_GROUP))
2179 event->attach_state &= ~PERF_ATTACH_GROUP;
2181 perf_put_aux_event(event);
2186 if (leader != event) {
2187 list_del_init(&event->sibling_list);
2188 event->group_leader->nr_siblings--;
2189 event->group_leader->group_generation++;
2194 * If this was a group event with sibling events then
2198 list_for_each_entry_safe(sibling, tmp, &event->sibling_list, sibling_list) {
2207 sibling->group_caps = event->group_caps;
2210 add_event_to_groups(sibling, event->ctx);
2216 WARN_ON_ONCE(sibling->ctx != event->ctx);
2228 static void perf_child_detach(struct perf_event *event)
2230 struct perf_event *parent_event = event->parent;
2232 if (!(event->attach_state & PERF_ATTACH_CHILD))
2235 event->attach_state &= ~PERF_ATTACH_CHILD;
2242 sync_child_event(event);
2243 list_del_init(&event->child_list);
2246 static bool is_orphaned_event(struct perf_event *event)
2248 return event->state == PERF_EVENT_STATE_DEAD;
2252 event_filter_match(struct perf_event *event)
2254 return (event->cpu == -1 || event->cpu == smp_processor_id()) &&
2255 perf_cgroup_match(event);
2259 event_sched_out(struct perf_event *event, struct perf_event_context *ctx)
2261 struct perf_event_pmu_context *epc = event->pmu_ctx;
2267 WARN_ON_ONCE(event->ctx != ctx);
2270 if (event->state != PERF_EVENT_STATE_ACTIVE)
2278 list_del_init(&event->active_list);
2280 perf_pmu_disable(event->pmu);
2282 event->pmu->del(event, 0);
2283 event->oncpu = -1;
2285 if (event->pending_disable) {
2286 event->pending_disable = 0;
2287 perf_cgroup_event_disable(event, ctx);
2291 if (event->pending_sigtrap) {
2292 event->pending_sigtrap = 0;
2294 !event->pending_work &&
2295 !task_work_add(current, &event->pending_task, TWA_RESUME)) {
2296 event->pending_work = 1;
2298 local_dec(&event->ctx->nr_pending);
2302 perf_event_set_state(event, state);
2304 if (!is_software_event(event))
2306 if (event->attr.freq && event->attr.sample_freq)
2308 if (event->attr.exclusive || !cpc->active_oncpu)
2311 perf_pmu_enable(event->pmu);
2317 struct perf_event *event;
2329 for_each_sibling_event(event, group_event)
2330 event_sched_out(event, ctx);
2338 * Cross CPU call to remove a performance event
2340 * We disable the event on the hardware level first. After that we
2344 __perf_remove_from_context(struct perf_event *event,
2349 struct perf_event_pmu_context *pmu_ctx = event->pmu_ctx;
2362 event->pending_disable = 1;
2363 event_sched_out(event, ctx);
2365 perf_group_detach(event);
2367 perf_child_detach(event);
2368 list_del_event(event, ctx);
2370 event->state = PERF_EVENT_STATE_DEAD;
2397 * Remove the event from a task's (or a CPU's) list of events.
2399 * If event->ctx is a cloned context, callers must make sure that
2400 * every task struct that event->ctx->task could possibly point to
2406 static void perf_remove_from_context(struct perf_event *event, unsigned long flags)
2408 struct perf_event_context *ctx = event->ctx;
2419 __perf_remove_from_context(event, this_cpu_ptr(&perf_cpu_context),
2426 event_function_call(event, __perf_remove_from_context, (void *)flags);
2430 * Cross CPU call to disable a performance event
2432 static void __perf_event_disable(struct perf_event *event,
2437 if (event->state < PERF_EVENT_STATE_INACTIVE)
2442 update_cgrp_time_from_event(event);
2445 perf_pmu_disable(event->pmu_ctx->pmu);
2447 if (event == event->group_leader)
2448 group_sched_out(event, ctx);
2450 event_sched_out(event, ctx);
2452 perf_event_set_state(event, PERF_EVENT_STATE_OFF);
2453 perf_cgroup_event_disable(event, ctx);
2455 perf_pmu_enable(event->pmu_ctx->pmu);
2459 * Disable an event.
2461 * If event->ctx is a cloned context, callers must make sure that
2462 * every task struct that event->ctx->task could possibly point to
2465 * hold the top-level event's child_mutex, so any descendant that
2468 * When called from perf_pending_irq it's OK because event->ctx
2472 static void _perf_event_disable(struct perf_event *event)
2474 struct perf_event_context *ctx = event->ctx;
2477 if (event->state <= PERF_EVENT_STATE_OFF) {
2483 event_function_call(event, __perf_event_disable, NULL);
2486 void perf_event_disable_local(struct perf_event *event)
2488 event_function_local(event, __perf_event_disable, NULL);
2495 void perf_event_disable(struct perf_event *event)
2499 ctx = perf_event_ctx_lock(event);
2500 _perf_event_disable(event);
2501 perf_event_ctx_unlock(event, ctx);
2505 void perf_event_disable_inatomic(struct perf_event *event)
2507 event->pending_disable = 1;
2508 irq_work_queue(&event->pending_irq);
2513 static void perf_log_throttle(struct perf_event *event, int enable);
2514 static void perf_log_itrace_start(struct perf_event *event);
2517 event_sched_in(struct perf_event *event, struct perf_event_context *ctx)
2519 struct perf_event_pmu_context *epc = event->pmu_ctx;
2523 WARN_ON_ONCE(event->ctx != ctx);
2527 if (event->state <= PERF_EVENT_STATE_OFF)
2530 WRITE_ONCE(event->oncpu, smp_processor_id());
2532 * Order event::oncpu write to happen before the ACTIVE state is
2537 perf_event_set_state(event, PERF_EVENT_STATE_ACTIVE);
2544 if (unlikely(event->hw.interrupts == MAX_INTERRUPTS)) {
2545 perf_log_throttle(event, 1);
2546 event->hw.interrupts = 0;
2549 perf_pmu_disable(event->pmu);
2551 perf_log_itrace_start(event);
2553 if (event->pmu->add(event, PERF_EF_START)) {
2554 perf_event_set_state(event, PERF_EVENT_STATE_INACTIVE);
2555 event->oncpu = -1;
2560 if (!is_software_event(event))
2562 if (event->attr.freq && event->attr.sample_freq)
2565 if (event->attr.exclusive)
2569 perf_pmu_enable(event->pmu);
2577 struct perf_event *event, *partial_group = NULL;
2591 for_each_sibling_event(event, group_event) {
2592 if (event_sched_in(event, ctx)) {
2593 partial_group = event;
2605 * The events up to the failed event are scheduled out normally.
2607 for_each_sibling_event(event, group_event) {
2608 if (event == partial_group)
2611 event_sched_out(event, ctx);
2621 * Work out whether we can put this event group on the CPU now.
2623 static int group_can_go_on(struct perf_event *event, int can_add_hw)
2625 struct perf_event_pmu_context *epc = event->pmu_ctx;
2631 if (event->group_caps & PERF_EV_CAP_SOFTWARE)
2643 if (event->attr.exclusive && !list_empty(get_event_list(event)))
2652 static void add_event_to_ctx(struct perf_event *event,
2655 list_add_event(event, ctx);
2656 perf_group_attach(event);
2692 * time an event is added, only do it for the groups of equal priority and
2701 * event to the context or enabling existing event in the context. We can
2755 * Cross CPU call to install and enable a performance event
2762 struct perf_event *event = info;
2763 struct perf_event_context *ctx = event->ctx;
2794 if (event->state > PERF_EVENT_STATE_OFF && is_cgroup_event(event)) {
2796 * If the current cgroup doesn't match the event's
2801 event->cgrp->css.cgroup);
2807 add_event_to_ctx(event, ctx);
2808 ctx_resched(cpuctx, task_ctx, get_event_type(event));
2810 add_event_to_ctx(event, ctx);
2819 static bool exclusive_event_installable(struct perf_event *event,
2823 * Attach a performance event to a context.
2829 struct perf_event *event,
2836 WARN_ON_ONCE(!exclusive_event_installable(event, ctx));
2838 if (event->cpu != -1)
2839 WARN_ON_ONCE(event->cpu != cpu);
2842 * Ensures that if we can observe event->ctx, both the event and ctx
2845 smp_store_release(&event->ctx, ctx);
2849 * without IPI. Except when this is the first event for the context, in
2853 * event will issue the IPI and reprogram the hardware.
2855 if (__perf_effective_state(event) == PERF_EVENT_STATE_OFF &&
2856 ctx->nr_events && !is_cgroup_event(event)) {
2862 add_event_to_ctx(event, ctx);
2868 cpu_function_call(cpu, __perf_install_in_context, event);
2910 if (!task_function_call(task, __perf_install_in_context, event))
2926 * thus we can safely install the event.
2932 add_event_to_ctx(event, ctx);
2937 * Cross CPU call to enable a performance event
2939 static void __perf_event_enable(struct perf_event *event,
2944 struct perf_event *leader = event->group_leader;
2947 if (event->state >= PERF_EVENT_STATE_INACTIVE ||
2948 event->state <= PERF_EVENT_STATE_ERROR)
2954 perf_event_set_state(event, PERF_EVENT_STATE_INACTIVE);
2955 perf_cgroup_event_enable(event, ctx);
2960 if (!event_filter_match(event)) {
2966 * If the event is in a group and isn't the group leader,
2969 if (leader != event && leader->state != PERF_EVENT_STATE_ACTIVE) {
2978 ctx_resched(cpuctx, task_ctx, get_event_type(event));
2982 * Enable an event.
2984 * If event->ctx is a cloned context, callers must make sure that
2985 * every task struct that event->ctx->task could possibly point to
2990 static void _perf_event_enable(struct perf_event *event)
2992 struct perf_event_context *ctx = event->ctx;
2995 if (event->state >= PERF_EVENT_STATE_INACTIVE ||
2996 event->state < PERF_EVENT_STATE_ERROR) {
3003 * If the event is in error state, clear that first.
3005 * That way, if we see the event in error state below, we know that it
3009 if (event->state == PERF_EVENT_STATE_ERROR) {
3013 if (event->event_caps & PERF_EV_CAP_SIBLING &&
3014 event->group_leader == event)
3017 event->state = PERF_EVENT_STATE_OFF;
3021 event_function_call(event, __perf_event_enable, NULL);
3027 void perf_event_enable(struct perf_event *event)
3031 ctx = perf_event_ctx_lock(event);
3032 _perf_event_enable(event);
3033 perf_event_ctx_unlock(event, ctx);
3038 struct perf_event *event;
3045 struct perf_event *event = sd->event;
3048 if (READ_ONCE(event->state) != PERF_EVENT_STATE_ACTIVE)
3056 * so we need to check again lest we try to stop another CPU's event.
3058 if (READ_ONCE(event->oncpu) != smp_processor_id())
3061 event->pmu->stop(event, PERF_EF_UPDATE);
3069 * Since this is happening on an event-local CPU, no trace is lost
3073 event->pmu->start(event, 0);
3078 static int perf_event_stop(struct perf_event *event, int restart)
3081 .event = event,
3087 if (READ_ONCE(event->state) != PERF_EVENT_STATE_ACTIVE)
3094 * We only want to restart ACTIVE events, so if the event goes
3095 * inactive here (event->oncpu==-1), there's nothing more to do;
3098 ret = cpu_function_call(READ_ONCE(event->oncpu),
3111 * event::addr_filter_ranges array and bump the event::addr_filters_gen;
3112 * (p2) when an event is scheduled in (pmu::add), it calls
3116 * If (p1) happens while the event is active, we restart it to force (p2).
3127 void perf_event_addr_filters_sync(struct perf_event *event)
3129 struct perf_addr_filters_head *ifh = perf_event_addr_filters(event);
3131 if (!has_addr_filter(event))
3135 if (event->addr_filters_gen != event->hw.addr_filters_gen) {
3136 event->pmu->addr_filters_sync(event);
3137 event->hw.addr_filters_gen = event->addr_filters_gen;
3143 static int _perf_event_refresh(struct perf_event *event, int refresh)
3148 if (event->attr.inherit || !is_sampling_event(event))
3151 atomic_add(refresh, &event->event_limit);
3152 _perf_event_enable(event);
3160 int perf_event_refresh(struct perf_event *event, int refresh)
3165 ctx = perf_event_ctx_lock(event);
3166 ret = _perf_event_refresh(event, refresh);
3167 perf_event_ctx_unlock(event, ctx);
3189 * Copy event-type-independent attributes that may be modified.
3197 static int perf_event_modify_attr(struct perf_event *event,
3204 if (event->attr.type != attr->type)
3207 switch (event->attr.type) {
3216 WARN_ON_ONCE(event->ctx->parent_ctx);
3218 mutex_lock(&event->child_mutex);
3220 * Event-type-independent attributes must be copied before event-type
3224 perf_event_modify_copy_attr(&event->attr, attr);
3225 err = func(event, attr);
3228 list_for_each_entry(child, &event->child_list, child_list) {
3235 mutex_unlock(&event->child_mutex);
3243 struct perf_event *event, *tmp;
3259 list_for_each_entry_safe(event, tmp,
3262 group_sched_out(event, ctx);
3266 list_for_each_entry_safe(event, tmp,
3269 group_sched_out(event, ctx);
3380 static void __perf_event_sync_stat(struct perf_event *event,
3385 if (!event->attr.inherit_stat)
3389 * Update the event value, we cannot use perf_event_read()
3392 * we know the event must be on the current CPU, therefore we
3395 if (event->state == PERF_EVENT_STATE_ACTIVE)
3396 event->pmu->read(event);
3398 perf_event_update_time(event);
3401 * In order to keep per-task stats reliable we need to flip the event
3405 value = local64_xchg(&event->count, value);
3408 swap(event->total_time_enabled, next_event->total_time_enabled);
3409 swap(event->total_time_running, next_event->total_time_running);
3414 perf_event_update_userpage(event);
3421 struct perf_event *event, *next_event;
3428 event = list_first_entry(&ctx->event_list,
3434 while (&event->event_entry != &ctx->event_list &&
3437 __perf_event_sync_stat(event, next_event);
3439 event = list_next_entry(event, event_entry);
3615 * This callback is relevant even to per-cpu events; for example multi event
3661 * We stop each event and update the event value in event->count.
3664 * sets the disabled bit in the control field of event _before_
3665 * accessing the event control register. If a NMI hits, then it will
3666 * not restart the event.
3682 * cgroup event are system-wide mode only
3708 static void __heap_add(struct min_heap *heap, struct perf_event *event)
3712 if (event) {
3713 itrs[heap->nr] = event;
3740 /* Space for per CPU and/or any CPU event iterators. */
3804 * Because the userpage is strictly per-event (there is no concept of context,
3810 static inline bool event_update_userpage(struct perf_event *event)
3812 if (likely(!atomic_read(&event->mmap_count)))
3815 perf_event_update_time(event);
3816 perf_event_update_userpage(event);
3823 struct perf_event *event;
3828 for_each_sibling_event(event, group_event)
3829 event_update_userpage(event);
3832 static int merge_sched_in(struct perf_event *event, void *data)
3834 struct perf_event_context *ctx = event->ctx;
3837 if (event->state <= PERF_EVENT_STATE_OFF)
3840 if (!event_filter_match(event))
3843 if (group_can_go_on(event, *can_add_hw)) {
3844 if (!group_sched_in(event, ctx))
3845 list_add_tail(&event->active_list, get_event_list(event));
3848 if (event->state == PERF_EVENT_STATE_INACTIVE) {
3850 if (event->attr.pinned) {
3851 perf_cgroup_event_disable(event, ctx);
3852 perf_event_set_state(event, PERF_EVENT_STATE_ERROR);
3856 event->pmu_ctx->rotate_necessary = 1;
3857 cpc = this_cpu_ptr(event->pmu_ctx->pmu->cpu_pmu_context);
3859 group_update_userpage(event);
4003 * We restore the event value and then enable it.
4006 * sets the enabled bit in the control field of event _before_
4007 * accessing the event control register. If a NMI hits, then it will
4008 * keep the event running.
4022 static u64 perf_calculate_period(struct perf_event *event, u64 nsec, u64 count)
4024 u64 frequency = event->attr.sample_freq;
4098 static void perf_adjust_period(struct perf_event *event, u64 nsec, u64 count, bool disable)
4100 struct hw_perf_event *hwc = &event->hw;
4104 period = perf_calculate_period(event, nsec, count);
4122 event->pmu->stop(event, PERF_EF_UPDATE);
4127 event->pmu->start(event, PERF_EF_RELOAD);
4139 struct perf_event *event;
4154 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
4155 if (event->state != PERF_EVENT_STATE_ACTIVE)
4159 if (!event_filter_match(event))
4162 perf_pmu_disable(event->pmu);
4164 hwc = &event->hw;
4168 perf_log_throttle(event, 1);
4169 event->pmu->start(event, 0);
4172 if (!event->attr.freq || !event->attr.sample_freq)
4176 * stop the event and update event->count
4178 event->pmu->stop(event, PERF_EF_UPDATE);
4180 now = local64_read(&event->count);
4185 * restart the event
4187 * we have stopped the event so tell that
4192 perf_adjust_period(event, period, delta, false);
4194 event->pmu->start(event, delta > 0 ? PERF_EF_RELOAD : 0);
4196 perf_pmu_enable(event->pmu);
4203 * Move @event to the tail of the @ctx's elegible events.
4205 static void rotate_ctx(struct perf_event_context *ctx, struct perf_event *event)
4214 perf_event_groups_delete(&ctx->flexible_groups, event);
4215 perf_event_groups_insert(&ctx->flexible_groups, event);
4218 /* pick an event from the flexible_groups to rotate */
4222 struct perf_event *event;
4229 /* pick the first active flexible event */
4230 event = list_first_entry_or_null(&pmu_ctx->flexible_active,
4232 if (event)
4235 /* if no active flexible event, pick the first event */
4243 event = __node_2_pe(node);
4250 event = __node_2_pe(node);
4257 event = __node_2_pe(node);
4266 return event;
4279 * events, thus the event count values are stable.
4349 static int event_enable_on_exec(struct perf_event *event,
4352 if (!event->attr.enable_on_exec)
4355 event->attr.enable_on_exec = 0;
4356 if (event->state >= PERF_EVENT_STATE_INACTIVE)
4359 perf_event_set_state(event, PERF_EVENT_STATE_INACTIVE);
4373 struct perf_event *event;
4388 list_for_each_entry(event, &ctx->event_list, event_entry) {
4389 enabled |= event_enable_on_exec(event, ctx);
4390 event_type |= get_event_type(event);
4394 * Unclone and reschedule this context if we enabled any event.
4411 static void perf_remove_from_owner(struct perf_event *event);
4412 static void perf_event_exit_event(struct perf_event *event,
4422 struct perf_event *event, *next;
4431 list_for_each_entry_safe(event, next, &ctx->event_list, event_entry) {
4432 if (!event->attr.remove_on_exec)
4435 if (!is_kernel_event(event))
4436 perf_remove_from_owner(event);
4440 perf_event_exit_event(event, ctx);
4456 struct perf_event *event;
4461 static int __perf_event_read_cpu(struct perf_event *event, int event_cpu)
4465 if (event->group_caps & PERF_EV_CAP_READ_ACTIVE_PKG) {
4479 * Cross CPU call to read the hardware event
4484 struct perf_event *sub, *event = data->event;
4485 struct perf_event_context *ctx = event->ctx;
4487 struct pmu *pmu = event->pmu;
4493 * event->count would have been updated to a recent sample
4494 * when the event was scheduled out.
4502 update_cgrp_time_from_event(event);
4505 perf_event_update_time(event);
4507 perf_event_update_sibling_time(event);
4509 if (event->state != PERF_EVENT_STATE_ACTIVE)
4513 pmu->read(event);
4520 pmu->read(event);
4522 for_each_sibling_event(sub, event) {
4525 * Use sibling's PMU rather than @event's since
4538 static inline u64 perf_event_count(struct perf_event *event)
4540 return local64_read(&event->count) + atomic64_read(&event->child_count);
4543 static void calc_timer_values(struct perf_event *event,
4551 ctx_time = perf_event_time_now(event, *now);
4552 __perf_update_times(event, ctx_time, enabled, running);
4556 * NMI-safe method to read a local event, that is an event that
4563 int perf_event_read_local(struct perf_event *event, u64 *value,
4576 * It must not be an event with inherit set, we cannot read
4579 if (event->attr.inherit) {
4584 /* If this is a per-task event, it must be for current */
4585 if ((event->attach_state & PERF_ATTACH_TASK) &&
4586 event->hw.target != current) {
4591 /* If this is a per-CPU event, it must be for this CPU */
4592 if (!(event->attach_state & PERF_ATTACH_TASK) &&
4593 event->cpu != smp_processor_id()) {
4598 /* If this is a pinned event it must be running on this CPU */
4599 if (event->attr.pinned && event->oncpu != smp_processor_id()) {
4605 * If the event is currently on this CPU, its either a per-task event,
4609 if (event->oncpu == smp_processor_id())
4610 event->pmu->read(event);
4612 *value = local64_read(&event->count);
4616 calc_timer_values(event, &__now, &__enabled, &__running);
4628 static int perf_event_read(struct perf_event *event, bool group)
4630 enum perf_event_state state = READ_ONCE(event->state);
4634 * If event is enabled and currently active on a CPU, update the
4635 * value in the event structure:
4649 event_cpu = READ_ONCE(event->oncpu);
4654 .event = event,
4660 event_cpu = __perf_event_read_cpu(event, event_cpu);
4666 * If event_cpu isn't a valid CPU it means the event got
4667 * scheduled out and that will have updated the event count.
4669 * Therefore, either way, we'll have an up-to-date event count
4677 struct perf_event_context *ctx = event->ctx;
4681 state = event->state;
4693 update_cgrp_time_from_event(event);
4696 perf_event_update_time(event);
4698 perf_event_update_sibling_time(event);
4769 find_get_context(struct task_struct *task, struct perf_event *event)
4777 /* Must be root to operate on a CPU event: */
4778 err = perf_allow_cpu(&event->attr);
4782 cpuctx = per_cpu_ptr(&perf_cpu_context, event->cpu);
4843 struct perf_event *event)
4856 cpc = per_cpu_ptr(pmu->cpu_pmu_context, event->cpu);
4876 if (event->attach_state & PERF_ATTACH_TASK_DATA) {
4976 static void perf_event_free_filter(struct perf_event *event);
4980 struct perf_event *event = container_of(head, typeof(*event), rcu_head);
4982 if (event->ns)
4983 put_pid_ns(event->ns);
4984 perf_event_free_filter(event);
4985 kmem_cache_free(perf_event_cache, event);
4988 static void ring_buffer_attach(struct perf_event *event,
4991 static void detach_sb_event(struct perf_event *event)
4993 struct pmu_event_list *pel = per_cpu_ptr(&pmu_sb_events, event->cpu);
4996 list_del_rcu(&event->sb_list);
5000 static bool is_sb_event(struct perf_event *event)
5002 struct perf_event_attr *attr = &event->attr;
5004 if (event->parent)
5007 if (event->attach_state & PERF_ATTACH_TASK)
5019 static void unaccount_pmu_sb_event(struct perf_event *event)
5021 if (is_sb_event(event))
5022 detach_sb_event(event);
5047 static void unaccount_event(struct perf_event *event)
5051 if (event->parent)
5054 if (event->attach_state & (PERF_ATTACH_TASK | PERF_ATTACH_SCHED_CB))
5056 if (event->attr.mmap || event->attr.mmap_data)
5058 if (event->attr.build_id)
5060 if (event->attr.comm)
5062 if (event->attr.namespaces)
5064 if (event->attr.cgroup)
5066 if (event->attr.task)
5068 if (event->attr.freq)
5070 if (event->attr.context_switch) {
5074 if (is_cgroup_event(event))
5076 if (has_branch_stack(event))
5078 if (event->attr.ksymbol)
5080 if (event->attr.bpf_event)
5082 if (event->attr.text_poke)
5090 unaccount_pmu_sb_event(event);
5103 * (PERF_PMU_CAP_EXCLUSIVE). Such pmus can only have one event scheduled
5113 static int exclusive_event_init(struct perf_event *event)
5115 struct pmu *pmu = event->pmu;
5128 * Since this is called in perf_event_alloc() path, event::ctx
5130 * to mean "per-task event", because unlike other attach states it
5133 if (event->attach_state & PERF_ATTACH_TASK) {
5144 static void exclusive_event_destroy(struct perf_event *event)
5146 struct pmu *pmu = event->pmu;
5152 if (event->attach_state & PERF_ATTACH_TASK)
5168 static bool exclusive_event_installable(struct perf_event *event,
5172 struct pmu *pmu = event->pmu;
5180 if (exclusive_event_match(iter_event, event))
5187 static void perf_addr_filters_splice(struct perf_event *event,
5190 static void perf_pending_task_sync(struct perf_event *event)
5192 struct callback_head *head = &event->pending_task;
5194 if (!event->pending_work)
5201 event->pending_work = 0;
5202 local_dec(&event->ctx->nr_pending);
5207 * All accesses related to the event are within the same
5209 * grace period before the event is freed will make sure all
5212 rcuwait_wait_event(&event->pending_work_wait, !event->pending_work, TASK_UNINTERRUPTIBLE);
5215 static void _free_event(struct perf_event *event)
5217 irq_work_sync(&event->pending_irq);
5218 perf_pending_task_sync(event);
5220 unaccount_event(event);
5222 security_perf_event_free(event);
5224 if (event->rb) {
5226 * Can happen when we close an event with re-directed output.
5231 mutex_lock(&event->mmap_mutex);
5232 ring_buffer_attach(event, NULL);
5233 mutex_unlock(&event->mmap_mutex);
5236 if (is_cgroup_event(event))
5237 perf_detach_cgroup(event);
5239 if (!event->parent) {
5240 if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN)
5244 perf_event_free_bpf_prog(event);
5245 perf_addr_filters_splice(event, NULL);
5246 kfree(event->addr_filter_ranges);
5248 if (event->destroy)
5249 event->destroy(event);
5255 if (event->hw.target)
5256 put_task_struct(event->hw.target);
5258 if (event->pmu_ctx)
5259 put_pmu_ctx(event->pmu_ctx);
5265 if (event->ctx)
5266 put_ctx(event->ctx);
5268 exclusive_event_destroy(event);
5269 module_put(event->pmu->module);
5271 call_rcu(&event->rcu_head, free_event_rcu);
5276 * where the event isn't exposed yet and inherited events.
5278 static void free_event(struct perf_event *event)
5280 if (WARN(atomic_long_cmpxchg(&event->refcount, 1, 0) != 1,
5281 "unexpected event refcount: %ld; ptr=%p\n",
5282 atomic_long_read(&event->refcount), event)) {
5287 _free_event(event);
5291 * Remove user event from the owner task.
5293 static void perf_remove_from_owner(struct perf_event *event)
5301 * indeed free this event, otherwise we need to serialize on
5304 owner = READ_ONCE(event->owner);
5327 * We have to re-check the event->owner field, if it is cleared
5330 * event.
5332 if (event->owner) {
5333 list_del_init(&event->owner_entry);
5334 smp_store_release(&event->owner, NULL);
5341 static void put_event(struct perf_event *event)
5343 if (!atomic_long_dec_and_test(&event->refcount))
5346 _free_event(event);
5350 * Kill an event dead; while event:refcount will preserve the event
5354 int perf_event_release_kernel(struct perf_event *event)
5356 struct perf_event_context *ctx = event->ctx;
5361 * If we got here through err_alloc: free_event(event); we will not
5365 WARN_ON_ONCE(event->attach_state &
5370 if (!is_kernel_event(event))
5371 perf_remove_from_owner(event);
5373 ctx = perf_event_ctx_lock(event);
5377 * Mark this event as STATE_DEAD, there is no external reference to it
5380 * Anybody acquiring event->child_mutex after the below loop _must_
5387 perf_remove_from_context(event, DETACH_GROUP|DETACH_DEAD);
5389 perf_event_ctx_unlock(event, ctx);
5392 mutex_lock(&event->child_mutex);
5393 list_for_each_entry(child, &event->child_list, child_list) {
5405 * Since the event cannot get freed while we hold the
5416 mutex_unlock(&event->child_mutex);
5418 mutex_lock(&event->child_mutex);
5425 tmp = list_first_entry_or_null(&event->child_list,
5434 put_event(event);
5439 mutex_unlock(&event->child_mutex);
5454 mutex_unlock(&event->child_mutex);
5463 * Wake any perf_event_free_task() waiting for this event to be
5471 put_event(event); /* Must be the 'last' reference */
5485 static u64 __perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running)
5493 mutex_lock(&event->child_mutex);
5495 (void)perf_event_read(event, false);
5496 total += perf_event_count(event);
5498 *enabled += event->total_time_enabled +
5499 atomic64_read(&event->child_total_time_enabled);
5500 *running += event->total_time_running +
5501 atomic64_read(&event->child_total_time_running);
5503 list_for_each_entry(child, &event->child_list, child_list) {
5509 mutex_unlock(&event->child_mutex);
5514 u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running)
5519 ctx = perf_event_ctx_lock(event);
5520 count = __perf_event_read_value(event, enabled, running);
5521 perf_event_ctx_unlock(event, ctx);
5606 static int perf_read_group(struct perf_event *event,
5609 struct perf_event *leader = event->group_leader, *child;
5616 values = kzalloc(event->read_size, GFP_KERNEL);
5636 ret = event->read_size;
5637 if (copy_to_user(buf, values, event->read_size))
5648 static int perf_read_one(struct perf_event *event,
5655 values[n++] = __perf_event_read_value(event, &enabled, &running);
5661 values[n++] = primary_event_id(event);
5663 values[n++] = atomic64_read(&event->lost_samples);
5671 static bool is_event_hup(struct perf_event *event)
5675 if (event->state > PERF_EVENT_STATE_EXIT)
5678 mutex_lock(&event->child_mutex);
5679 no_children = list_empty(&event->child_list);
5680 mutex_unlock(&event->child_mutex);
5685 * Read the performance event - simple non blocking version for now
5688 __perf_read(struct perf_event *event, char __user *buf, size_t count)
5690 u64 read_format = event->attr.read_format;
5694 * Return end-of-file for a read on an event that is in
5698 if (event->state == PERF_EVENT_STATE_ERROR)
5701 if (count < event->read_size)
5704 WARN_ON_ONCE(event->ctx->parent_ctx);
5706 ret = perf_read_group(event, read_format, buf);
5708 ret = perf_read_one(event, read_format, buf);
5716 struct perf_event *event = file->private_data;
5720 ret = security_perf_event_read(event);
5724 ctx = perf_event_ctx_lock(event);
5725 ret = __perf_read(event, buf, count);
5726 perf_event_ctx_unlock(event, ctx);
5733 struct perf_event *event = file->private_data;
5737 poll_wait(file, &event->waitq, wait);
5739 if (is_event_hup(event))
5743 * Pin the event->rb by taking event->mmap_mutex; otherwise
5746 mutex_lock(&event->mmap_mutex);
5747 rb = event->rb;
5750 mutex_unlock(&event->mmap_mutex);
5754 static void _perf_event_reset(struct perf_event *event)
5756 (void)perf_event_read(event, false);
5757 local64_set(&event->count, 0);
5758 perf_event_update_userpage(event);
5761 /* Assume it's not an event with inherit set. */
5762 u64 perf_event_pause(struct perf_event *event, bool reset)
5767 ctx = perf_event_ctx_lock(event);
5768 WARN_ON_ONCE(event->attr.inherit);
5769 _perf_event_disable(event);
5770 count = local64_read(&event->count);
5772 local64_set(&event->count, 0);
5773 perf_event_ctx_unlock(event, ctx);
5780 * Holding the top-level event's child_mutex means that any
5781 * descendant process that has inherited this event will block
5785 static void perf_event_for_each_child(struct perf_event *event,
5790 WARN_ON_ONCE(event->ctx->parent_ctx);
5792 mutex_lock(&event->child_mutex);
5793 func(event);
5794 list_for_each_entry(child, &event->child_list, child_list)
5796 mutex_unlock(&event->child_mutex);
5799 static void perf_event_for_each(struct perf_event *event,
5802 struct perf_event_context *ctx = event->ctx;
5807 event = event->group_leader;
5809 perf_event_for_each_child(event, func);
5810 for_each_sibling_event(sibling, event)
5814 static void __perf_event_period(struct perf_event *event,
5822 if (event->attr.freq) {
5823 event->attr.sample_freq = value;
5825 event->attr.sample_period = value;
5826 event->hw.sample_period = value;
5829 active = (event->state == PERF_EVENT_STATE_ACTIVE);
5831 perf_pmu_disable(event->pmu);
5834 * trying to unthrottle while we already re-started the event.
5836 if (event->hw.interrupts == MAX_INTERRUPTS) {
5837 event->hw.interrupts = 0;
5838 perf_log_throttle(event, 1);
5840 event->pmu->stop(event, PERF_EF_UPDATE);
5843 local64_set(&event->hw.period_left, 0);
5846 event->pmu->start(event, PERF_EF_RELOAD);
5847 perf_pmu_enable(event->pmu);
5851 static int perf_event_check_period(struct perf_event *event, u64 value)
5853 return event->pmu->check_period(event, value);
5856 static int _perf_event_period(struct perf_event *event, u64 value)
5858 if (!is_sampling_event(event))
5864 if (event->attr.freq) {
5868 if (perf_event_check_period(event, value))
5874 event_function_call(event, __perf_event_period, &value);
5879 int perf_event_period(struct perf_event *event, u64 value)
5884 ctx = perf_event_ctx_lock(event);
5885 ret = _perf_event_period(event, value);
5886 perf_event_ctx_unlock(event, ctx);
5908 static int perf_event_set_output(struct perf_event *event,
5910 static int perf_event_set_filter(struct perf_event *event, void __user *arg);
5914 static long _perf_ioctl(struct perf_event *event, unsigned int cmd, unsigned long arg)
5931 return _perf_event_refresh(event, arg);
5940 return _perf_event_period(event, value);
5944 u64 id = primary_event_id(event);
5961 ret = perf_event_set_output(event, output_event);
5964 ret = perf_event_set_output(event, NULL);
5970 return perf_event_set_filter(event, (void __user *)arg);
5981 err = perf_event_set_bpf_prog(event, prog, 0);
5994 rb = rcu_dereference(event->rb);
6005 return perf_event_query_prog_array(event, (void __user *)arg);
6015 return perf_event_modify_attr(event, &new_attr);
6022 perf_event_for_each(event, func);
6024 perf_event_for_each_child(event, func);
6031 struct perf_event *event = file->private_data;
6036 ret = security_perf_event_write(event);
6040 ctx = perf_event_ctx_lock(event);
6041 ret = _perf_ioctl(event, cmd, arg);
6042 perf_event_ctx_unlock(event, ctx);
6072 struct perf_event *event;
6075 list_for_each_entry(event, &current->perf_event_list, owner_entry) {
6076 ctx = perf_event_ctx_lock(event);
6077 perf_event_for_each_child(event, _perf_event_enable);
6078 perf_event_ctx_unlock(event, ctx);
6088 struct perf_event *event;
6091 list_for_each_entry(event, &current->perf_event_list, owner_entry) {
6092 ctx = perf_event_ctx_lock(event);
6093 perf_event_for_each_child(event, _perf_event_disable);
6094 perf_event_ctx_unlock(event, ctx);
6101 static int perf_event_index(struct perf_event *event)
6103 if (event->hw.state & PERF_HES_STOPPED)
6106 if (event->state != PERF_EVENT_STATE_ACTIVE)
6109 return event->pmu->event_idx(event);
6112 static void perf_event_init_userpage(struct perf_event *event)
6118 rb = rcu_dereference(event->rb);
6135 struct perf_event *event, struct perf_event_mmap_page *userpg, u64 now)
6144 void perf_event_update_userpage(struct perf_event *event)
6151 rb = rcu_dereference(event->rb);
6157 * based on snapshot values taken when the event
6164 calc_timer_values(event, &now, &enabled, &running);
6174 userpg->index = perf_event_index(event);
6175 userpg->offset = perf_event_count(event);
6177 userpg->offset -= local64_read(&event->hw.prev_count);
6180 atomic64_read(&event->child_total_time_enabled);
6183 atomic64_read(&event->child_total_time_running);
6185 arch_perf_update_userpage(event, userpg, now);
6197 struct perf_event *event = vmf->vma->vm_file->private_data;
6208 rb = rcu_dereference(event->rb);
6230 static void ring_buffer_attach(struct perf_event *event,
6236 WARN_ON_ONCE(event->parent);
6238 if (event->rb) {
6241 * event->rb_entry and wait/clear when adding event->rb_entry.
6243 WARN_ON_ONCE(event->rcu_pending);
6245 old_rb = event->rb;
6247 list_del_rcu(&event->rb_entry);
6250 event->rcu_batches = get_state_synchronize_rcu();
6251 event->rcu_pending = 1;
6255 if (event->rcu_pending) {
6256 cond_synchronize_rcu(event->rcu_batches);
6257 event->rcu_pending = 0;
6261 list_add_rcu(&event->rb_entry, &rb->event_list);
6266 * Avoid racing with perf_mmap_close(AUX): stop the event
6267 * before swizzling the event::rb pointer; if it's getting
6275 if (has_aux(event))
6276 perf_event_stop(event, 0);
6278 rcu_assign_pointer(event->rb, rb);
6287 wake_up_all(&event->waitq);
6291 static void ring_buffer_wakeup(struct perf_event *event)
6295 if (event->parent)
6296 event = event->parent;
6299 rb = rcu_dereference(event->rb);
6301 list_for_each_entry_rcu(event, &rb->event_list, rb_entry)
6302 wake_up_all(&event->waitq);
6307 struct perf_buffer *ring_buffer_get(struct perf_event *event)
6311 if (event->parent)
6312 event = event->parent;
6315 rb = rcu_dereference(event->rb);
6337 struct perf_event *event = vma->vm_file->private_data;
6339 atomic_inc(&event->mmap_count);
6340 atomic_inc(&event->rb->mmap_count);
6343 atomic_inc(&event->rb->aux_mmap_count);
6345 if (event->pmu->event_mapped)
6346 event->pmu->event_mapped(event, vma->vm_mm);
6349 static void perf_pmu_output_stop(struct perf_event *event);
6353 * event, or through other events by use of perf_event_set_output().
6361 struct perf_event *event = vma->vm_file->private_data;
6362 struct perf_buffer *rb = ring_buffer_get(event);
6368 if (event->pmu->event_unmapped)
6369 event->pmu->event_unmapped(event, vma->vm_mm);
6383 perf_pmu_output_stop(event);
6399 if (!atomic_dec_and_mutex_lock(&event->mmap_count, &event->mmap_mutex))
6402 ring_buffer_attach(event, NULL);
6403 mutex_unlock(&event->mmap_mutex);
6416 list_for_each_entry_rcu(event, &rb->event_list, rb_entry) {
6417 if (!atomic_long_inc_not_zero(&event->refcount)) {
6419 * This event is en-route to free_event() which will
6426 mutex_lock(&event->mmap_mutex);
6432 * If we find a different rb; ignore this event, a next
6437 if (event->rb == rb)
6438 ring_buffer_attach(event, NULL);
6440 mutex_unlock(&event->mmap_mutex);
6441 put_event(event);
6478 struct perf_event *event = file->private_data;
6494 if (event->cpu == -1 && event->attr.inherit)
6500 ret = security_perf_event_read(event);
6516 if (!event->rb)
6523 mutex_lock(&event->mmap_mutex);
6526 rb = event->rb;
6581 WARN_ON_ONCE(event->ctx->parent_ctx);
6583 mutex_lock(&event->mmap_mutex);
6584 if (event->rb) {
6585 if (data_page_nr(event->rb) != nr_pages) {
6590 if (!atomic_inc_not_zero(&event->rb->mmap_count)) {
6593 * event and try again.
6595 ring_buffer_attach(event, NULL);
6596 mutex_unlock(&event->mmap_mutex);
6642 WARN_ON(!rb && event->rb);
6649 event->attr.watermark ? event->attr.wakeup_watermark : 0,
6650 event->cpu, flags);
6661 ring_buffer_attach(event, rb);
6663 perf_event_update_time(event);
6664 perf_event_init_userpage(event);
6665 perf_event_update_userpage(event);
6667 ret = rb_alloc_aux(rb, event, vma->vm_pgoff, nr_pages,
6668 event->attr.aux_watermark, flags);
6678 atomic_inc(&event->mmap_count);
6685 mutex_unlock(&event->mmap_mutex);
6694 if (event->pmu->event_mapped)
6695 event->pmu->event_mapped(event, vma->vm_mm);
6703 struct perf_event *event = filp->private_data;
6707 retval = fasync_helper(fd, filp, on, &event->fasync);
6728 * Perf event wakeup
6734 static inline struct fasync_struct **perf_event_fasync(struct perf_event *event)
6737 if (event->parent)
6738 event = event->parent;
6739 return &event->fasync;
6742 void perf_event_wakeup(struct perf_event *event)
6744 ring_buffer_wakeup(event);
6746 if (event->pending_kill) {
6747 kill_fasync(perf_event_fasync(event), SIGIO, event->pending_kill);
6748 event->pending_kill = 0;
6752 static void perf_sigtrap(struct perf_event *event)
6759 if (WARN_ON_ONCE(event->ctx->task != current))
6769 send_sig_perf((void __user *)event->pending_addr,
6770 event->orig_type, event->attr.sig_data);
6774 * Deliver the pending work in-event-context or follow the context.
6776 static void __perf_pending_irq(struct perf_event *event)
6778 int cpu = READ_ONCE(event->oncpu);
6781 * If the event isn't running; we done. event_sched_out() will have
6788 * Yay, we hit home and are in the context of the event.
6791 if (event->pending_sigtrap) {
6792 event->pending_sigtrap = 0;
6793 perf_sigtrap(event);
6794 local_dec(&event->ctx->nr_pending);
6796 if (event->pending_disable) {
6797 event->pending_disable = 0;
6798 perf_event_disable_local(event);
6821 * But the event runs on CPU-B and wants disabling there.
6823 irq_work_queue_on(&event->pending_irq, cpu);
6828 struct perf_event *event = container_of(entry, struct perf_event, pending_irq);
6838 * The wakeup isn't bound to the context of the event -- it can happen
6839 * irrespective of where the event is.
6841 if (event->pending_wakeup) {
6842 event->pending_wakeup = 0;
6843 perf_event_wakeup(event);
6846 __perf_pending_irq(event);
6854 struct perf_event *event = container_of(head, struct perf_event, pending_task);
6858 * All accesses to the event must belong to the same implicit RCU read-side
6869 if (event->pending_work) {
6870 event->pending_work = 0;
6871 perf_sigtrap(event);
6872 local_dec(&event->ctx->nr_pending);
6873 rcuwait_wake_up(&event->pending_work_wait);
7052 static unsigned long perf_prepare_sample_aux(struct perf_event *event,
7056 struct perf_event *sampler = event->aux_event;
7091 struct perf_event *event,
7101 * the IRQ ones, that is, for example, re-starting an event that's just
7103 * doesn't change the event state.
7115 ret = event->pmu->snapshot_aux(event, handle, size);
7124 static void perf_aux_sample_output(struct perf_event *event,
7128 struct perf_event *sampler = event->aux_event;
7170 * when event->attr.sample_id_all is set.
7177 struct perf_event *event,
7180 data->type = event->attr.sample_type;
7185 data->tid_entry.pid = perf_event_pid(event, current);
7186 data->tid_entry.tid = perf_event_tid(event, current);
7190 data->time = perf_event_clock(event);
7193 data->id = primary_event_id(event);
7196 data->stream_id = event->id;
7206 struct perf_event *event)
7208 if (event->attr.sample_id_all) {
7209 header->size += event->id_header_size;
7210 __perf_event_header__init_id(data, event, event->attr.sample_type);
7238 void perf_event__output_id_sample(struct perf_event *event,
7242 if (event->attr.sample_id_all)
7247 struct perf_event *event,
7250 u64 read_format = event->attr.read_format;
7254 values[n++] = perf_event_count(event);
7257 atomic64_read(&event->child_total_time_enabled);
7261 atomic64_read(&event->child_total_time_running);
7264 values[n++] = primary_event_id(event);
7266 values[n++] = atomic64_read(&event->lost_samples);
7272 struct perf_event *event,
7275 struct perf_event *leader = event->group_leader, *sub;
7276 u64 read_format = event->attr.read_format;
7295 if ((leader != event) &&
7310 if ((sub != event) &&
7337 struct perf_event *event)
7340 u64 read_format = event->attr.read_format;
7344 * based on snapshot values taken when the event
7352 calc_timer_values(event, &now, &enabled, &running);
7354 if (event->attr.read_format & PERF_FORMAT_GROUP)
7355 perf_output_read_group(handle, event, enabled, running);
7357 perf_output_read_one(handle, event, enabled, running);
7363 struct perf_event *event)
7397 perf_output_read(handle, event);
7448 if (branch_sample_hw_index(event))
7470 u64 mask = event->attr.sample_regs_user;
7501 u64 mask = event->attr.sample_regs_intr;
7525 perf_aux_sample_output(event, handle, data);
7528 if (!event->attr.watermark) {
7529 int wakeup_events = event->attr.wakeup_events;
7672 perf_callchain(struct perf_event *event, struct pt_regs *regs)
7674 bool kernel = !event->attr.exclude_callchain_kernel;
7675 bool user = !event->attr.exclude_callchain_user;
7677 bool crosstask = event->ctx->task && event->ctx->task != current;
7678 const u32 max_stack = event->attr.sample_max_stack;
7695 struct perf_event *event,
7698 u64 sample_type = event->attr.sample_type;
7716 data->type = event->attr.sample_type;
7720 __perf_event_header__init_id(data, event, filtered_sample_type);
7728 perf_sample_save_callchain(data, event, regs);
7755 u64 mask = event->attr.sample_regs_user;
7770 u16 stack_size = event->attr.sample_stack_user;
7771 u16 header_size = perf_sample_data_size(data, event);
7817 u64 mask = event->attr.sample_regs_intr;
7859 u16 header_size = perf_sample_data_size(data, event);
7870 event->attr.aux_sample_size);
7872 size = perf_prepare_sample_aux(event, data, size);
7882 struct perf_event *event,
7886 header->size = perf_sample_data_size(data, event);
7901 __perf_event_output(struct perf_event *event,
7916 perf_prepare_sample(data, event, regs);
7917 perf_prepare_header(&header, data, event, regs);
7919 err = output_begin(&handle, data, event, header.size);
7923 perf_output_sample(&handle, &header, data, event);
7933 perf_event_output_forward(struct perf_event *event,
7937 __perf_event_output(event, data, regs, perf_output_begin_forward);
7941 perf_event_output_backward(struct perf_event *event,
7945 __perf_event_output(event, data, regs, perf_output_begin_backward);
7949 perf_event_output(struct perf_event *event,
7953 return __perf_event_output(event, data, regs, perf_output_begin);
7968 perf_event_read_event(struct perf_event *event,
7977 .size = sizeof(read_event) + event->read_size,
7979 .pid = perf_event_pid(event, task),
7980 .tid = perf_event_tid(event, task),
7984 perf_event_header__init_id(&read_event.header, &sample, event);
7985 ret = perf_output_begin(&handle, &sample, event, read_event.header.size);
7990 perf_output_read(&handle, event);
7991 perf_event__output_id_sample(event, &handle, &sample);
7996 typedef void (perf_iterate_f)(struct perf_event *event, void *data);
8003 struct perf_event *event;
8005 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
8007 if (event->state < PERF_EVENT_STATE_INACTIVE)
8009 if (!event_filter_match(event))
8013 output(event, data);
8020 struct perf_event *event;
8022 list_for_each_entry_rcu(event, &pel->list, sb_list) {
8025 * if we observe event->ctx, both event and ctx will be
8028 if (!smp_load_acquire(&event->ctx))
8031 if (event->state < PERF_EVENT_STATE_INACTIVE)
8033 if (!event_filter_match(event))
8035 output(event, data);
8043 * your event, otherwise it might not get delivered.
8078 static void perf_event_addr_filters_exec(struct perf_event *event, void *data)
8080 struct perf_addr_filters_head *ifh = perf_event_addr_filters(event);
8085 if (!has_addr_filter(event))
8091 event->addr_filter_ranges[count].start = 0;
8092 event->addr_filter_ranges[count].size = 0;
8100 event->addr_filters_gen++;
8104 perf_event_stop(event, 1);
8129 static void __perf_event_output_stop(struct perf_event *event, void *data)
8131 struct perf_event *parent = event->parent;
8135 .event = event,
8138 if (!has_aux(event))
8142 parent = event;
8148 * We are using event::rb to determine if the event should be stopped,
8150 * which will make us skip the event that actually needs to be stopped.
8151 * So ring_buffer_attach() has to stop an aux event before re-assigning
8160 struct perf_event *event = info;
8163 .rb = event->rb,
8176 static void perf_pmu_output_stop(struct perf_event *event)
8183 list_for_each_entry_rcu(iter, &event->rb->event_list, rb_entry) {
8187 * sufficient to stop the event itself if it's active, since
8197 err = cpu_function_call(cpu, __perf_pmu_output_stop, event);
8227 static int perf_event_task_match(struct perf_event *event)
8229 return event->attr.comm || event->attr.mmap ||
8230 event->attr.mmap2 || event->attr.mmap_data ||
8231 event->attr.task;
8234 static void perf_event_task_output(struct perf_event *event,
8243 if (!perf_event_task_match(event))
8246 perf_event_header__init_id(&task_event->event_id.header, &sample, event);
8248 ret = perf_output_begin(&handle, &sample, event,
8253 task_event->event_id.pid = perf_event_pid(event, task);
8254 task_event->event_id.tid = perf_event_tid(event, task);
8257 task_event->event_id.ppid = perf_event_pid(event,
8259 task_event->event_id.ptid = perf_event_pid(event,
8262 task_event->event_id.ppid = perf_event_pid(event, current);
8263 task_event->event_id.ptid = perf_event_tid(event, current);
8266 task_event->event_id.time = perf_event_clock(event);
8270 perf_event__output_id_sample(event, &handle, &sample);
8333 static int perf_event_comm_match(struct perf_event *event)
8335 return event->attr.comm;
8338 static void perf_event_comm_output(struct perf_event *event,
8347 if (!perf_event_comm_match(event))
8350 perf_event_header__init_id(&comm_event->event_id.header, &sample, event);
8351 ret = perf_output_begin(&handle, &sample, event,
8357 comm_event->event_id.pid = perf_event_pid(event, comm_event->task);
8358 comm_event->event_id.tid = perf_event_tid(event, comm_event->task);
8364 perf_event__output_id_sample(event, &handle, &sample);
8432 static int perf_event_namespaces_match(struct perf_event *event)
8434 return event->attr.namespaces;
8437 static void perf_event_namespaces_output(struct perf_event *event,
8446 if (!perf_event_namespaces_match(event))
8450 &sample, event);
8451 ret = perf_output_begin(&handle, &sample, event,
8456 namespaces_event->event_id.pid = perf_event_pid(event,
8458 namespaces_event->event_id.tid = perf_event_tid(event,
8463 perf_event__output_id_sample(event, &handle, &sample);
8560 static int perf_event_cgroup_match(struct perf_event *event)
8562 return event->attr.cgroup;
8565 static void perf_event_cgroup_output(struct perf_event *event, void *data)
8573 if (!perf_event_cgroup_match(event))
8577 &sample, event);
8578 ret = perf_output_begin(&handle, &sample, event,
8586 perf_event__output_id_sample(event, &handle, &sample);
8671 static int perf_event_mmap_match(struct perf_event *event,
8678 return (!executable && event->attr.mmap_data) ||
8679 (executable && (event->attr.mmap || event->attr.mmap2));
8682 static void perf_event_mmap_output(struct perf_event *event,
8693 if (!perf_event_mmap_match(event, data))
8696 if (event->attr.mmap2) {
8706 perf_event_header__init_id(&mmap_event->event_id.header, &sample, event);
8707 ret = perf_output_begin(&handle, &sample, event,
8712 mmap_event->event_id.pid = perf_event_pid(event, current);
8713 mmap_event->event_id.tid = perf_event_tid(event, current);
8715 use_build_id = event->attr.build_id && mmap_event->build_id_size;
8717 if (event->attr.mmap2 && use_build_id)
8722 if (event->attr.mmap2) {
8741 perf_event__output_id_sample(event, &handle, &sample);
8902 static void __perf_addr_filters_adjust(struct perf_event *event, void *data)
8904 struct perf_addr_filters_head *ifh = perf_event_addr_filters(event);
8910 if (!has_addr_filter(event))
8919 &event->addr_filter_ranges[count]))
8926 event->addr_filters_gen++;
8930 perf_event_stop(event, 1);
8989 void perf_event_aux_event(struct perf_event *event, unsigned long head,
9011 perf_event_header__init_id(&rec.header, &sample, event);
9012 ret = perf_output_begin(&handle, &sample, event, rec.header.size);
9018 perf_event__output_id_sample(event, &handle, &sample);
9026 void perf_log_lost_samples(struct perf_event *event, u64 lost)
9044 perf_event_header__init_id(&lost_samples_event.header, &sample, event);
9046 ret = perf_output_begin(&handle, &sample, event,
9052 perf_event__output_id_sample(event, &handle, &sample);
9071 static int perf_event_switch_match(struct perf_event *event)
9073 return event->attr.context_switch;
9076 static void perf_event_switch_output(struct perf_event *event, void *data)
9083 if (!perf_event_switch_match(event))
9087 if (event->ctx->task) {
9094 perf_event_pid(event, se->next_prev);
9096 perf_event_tid(event, se->next_prev);
9099 perf_event_header__init_id(&se->event_id.header, &sample, event);
9101 ret = perf_output_begin(&handle, &sample, event, se->event_id.header.size);
9105 if (event->ctx->task)
9110 perf_event__output_id_sample(event, &handle, &sample);
9148 static void perf_log_throttle(struct perf_event *event, int enable)
9165 .time = perf_event_clock(event),
9166 .id = primary_event_id(event),
9167 .stream_id = event->id,
9173 perf_event_header__init_id(&throttle_event.header, &sample, event);
9175 ret = perf_output_begin(&handle, &sample, event,
9181 perf_event__output_id_sample(event, &handle, &sample);
9201 static int perf_event_ksymbol_match(struct perf_event *event)
9203 return event->attr.ksymbol;
9206 static void perf_event_ksymbol_output(struct perf_event *event, void *data)
9213 if (!perf_event_ksymbol_match(event))
9217 &sample, event);
9218 ret = perf_output_begin(&handle, &sample, event,
9225 perf_event__output_id_sample(event, &handle, &sample);
9291 static int perf_event_bpf_match(struct perf_event *event)
9293 return event->attr.bpf_event;
9296 static void perf_event_bpf_output(struct perf_event *event, void *data)
9303 if (!perf_event_bpf_match(event))
9307 &sample, event);
9308 ret = perf_output_begin(&handle, &sample, event,
9314 perf_event__output_id_sample(event, &handle, &sample);
9397 static int perf_event_text_poke_match(struct perf_event *event)
9399 return event->attr.text_poke;
9402 static void perf_event_text_poke_output(struct perf_event *event, void *data)
9410 if (!perf_event_text_poke_match(event))
9413 perf_event_header__init_id(&text_poke_event->event_id.header, &sample, event);
9415 ret = perf_output_begin(&handle, &sample, event,
9430 perf_event__output_id_sample(event, &handle, &sample);
9467 void perf_event_itrace_started(struct perf_event *event)
9469 event->attach_state |= PERF_ATTACH_ITRACE;
9472 static void perf_log_itrace_start(struct perf_event *event)
9483 if (event->parent)
9484 event = event->parent;
9486 if (!(event->pmu->capabilities & PERF_PMU_CAP_ITRACE) ||
9487 event->attach_state & PERF_ATTACH_ITRACE)
9493 rec.pid = perf_event_pid(event, current);
9494 rec.tid = perf_event_tid(event, current);
9496 perf_event_header__init_id(&rec.header, &sample, event);
9497 ret = perf_output_begin(&handle, &sample, event, rec.header.size);
9503 perf_event__output_id_sample(event, &handle, &sample);
9508 void perf_report_aux_output_id(struct perf_event *event, u64 hw_id)
9518 if (event->parent)
9519 event = event->parent;
9526 perf_event_header__init_id(&rec.header, &sample, event);
9527 ret = perf_output_begin(&handle, &sample, event, rec.header.size);
9533 perf_event__output_id_sample(event, &handle, &sample);
9540 __perf_event_account_interrupt(struct perf_event *event, int throttle)
9542 struct hw_perf_event *hwc = &event->hw;
9557 perf_log_throttle(event, 0);
9562 if (event->attr.freq) {
9569 perf_adjust_period(event, delta, hwc->last_period, true);
9575 int perf_event_account_interrupt(struct perf_event *event)
9577 return __perf_event_account_interrupt(event, 1);
9580 static inline bool sample_is_allowed(struct perf_event *event, struct pt_regs *regs)
9587 if (event->attr.exclude_kernel && !user_mode(regs))
9594 * Generic event overflow handling, sampling.
9597 static int __perf_event_overflow(struct perf_event *event,
9601 int events = atomic_read(&event->event_limit);
9608 if (unlikely(!is_sampling_event(event)))
9611 ret = __perf_event_account_interrupt(event, throttle);
9618 event->pending_kill = POLL_IN;
9619 if (events && atomic_dec_and_test(&event->event_limit)) {
9621 event->pending_kill = POLL_HUP;
9622 perf_event_disable_inatomic(event);
9625 if (event->attr.sigtrap) {
9629 * it is the first event, on the other hand, we should also not
9632 bool valid_sample = sample_is_allowed(event, regs);
9637 if (!event->pending_sigtrap) {
9638 event->pending_sigtrap = pending_id;
9639 local_inc(&event->ctx->nr_pending);
9640 } else if (event->attr.exclude_kernel && valid_sample) {
9653 WARN_ON_ONCE(event->pending_sigtrap != pending_id);
9656 event->pending_addr = 0;
9658 event->pending_addr = data->addr;
9659 irq_work_queue(&event->pending_irq);
9662 READ_ONCE(event->overflow_handler)(event, data, regs);
9664 if (*perf_event_fasync(event) && event->pending_kill) {
9665 event->pending_wakeup = 1;
9666 irq_work_queue(&event->pending_irq);
9672 int perf_event_overflow(struct perf_event *event,
9676 return __perf_event_overflow(event, 1, data, regs);
9680 * Generic software event infrastructure
9695 * We directly increment event->count and keep a second value in
9696 * event->hw.period_left to count intervals. This period event
9701 u64 perf_swevent_set_period(struct perf_event *event)
9703 struct hw_perf_event *hwc = &event->hw;
9724 static void perf_swevent_overflow(struct perf_event *event, u64 overflow,
9728 struct hw_perf_event *hwc = &event->hw;
9732 overflow = perf_swevent_set_period(event);
9738 if (__perf_event_overflow(event, throttle,
9750 static void perf_swevent_event(struct perf_event *event, u64 nr,
9754 struct hw_perf_event *hwc = &event->hw;
9756 local64_add(nr, &event->count);
9761 if (!is_sampling_event(event))
9764 if ((event->attr.sample_type & PERF_SAMPLE_PERIOD) && !event->attr.freq) {
9766 return perf_swevent_overflow(event, 1, data, regs);
9768 data->period = event->hw.last_period;
9770 if (nr == 1 && hwc->sample_period == 1 && !event->attr.freq)
9771 return perf_swevent_overflow(event, 1, data, regs);
9776 perf_swevent_overflow(event, 0, data, regs);
9779 static int perf_exclude_event(struct perf_event *event,
9782 if (event->hw.state & PERF_HES_STOPPED)
9786 if (event->attr.exclude_user && user_mode(regs))
9789 if (event->attr.exclude_kernel && !user_mode(regs))
9796 static int perf_swevent_match(struct perf_event *event,
9802 if (event->attr.type != type)
9805 if (event->attr.config != event_id)
9808 if (perf_exclude_event(event, regs))
9842 /* For the event head insertion and removal in the hlist */
9844 find_swevent_head(struct swevent_htable *swhash, struct perf_event *event)
9847 u32 event_id = event->attr.config;
9848 u64 type = event->attr.type;
9851 * Event scheduling is always serialized against hlist allocation
9856 lockdep_is_held(&event->ctx->lock));
9869 struct perf_event *event;
9877 hlist_for_each_entry_rcu(event, head, hlist_entry) {
9878 if (perf_swevent_match(event, type, event_id, data, regs))
9879 perf_swevent_event(event, nr, data, regs);
9929 static void perf_swevent_read(struct perf_event *event)
9933 static int perf_swevent_add(struct perf_event *event, int flags)
9936 struct hw_perf_event *hwc = &event->hw;
9939 if (is_sampling_event(event)) {
9941 perf_swevent_set_period(event);
9946 head = find_swevent_head(swhash, event);
9950 hlist_add_head_rcu(&event->hlist_entry, head);
9951 perf_event_update_userpage(event);
9956 static void perf_swevent_del(struct perf_event *event, int flags)
9958 hlist_del_rcu(&event->hlist_entry);
9961 static void perf_swevent_start(struct perf_event *event, int flags)
9963 event->hw.state = 0;
9966 static void perf_swevent_stop(struct perf_event *event, int flags)
9968 event->hw.state = PERF_HES_STOPPED;
10060 static void sw_perf_event_destroy(struct perf_event *event)
10062 u64 event_id = event->attr.config;
10064 WARN_ON(event->parent);
10073 static int perf_swevent_init(struct perf_event *event)
10075 u64 event_id = event->attr.config;
10077 if (event->attr.type != PERF_TYPE_SOFTWARE)
10083 if (has_branch_stack(event))
10088 event->attr.type = perf_cpu_clock.type;
10091 event->attr.type = perf_task_clock.type;
10101 if (!event->parent) {
10109 event->destroy = sw_perf_event_destroy;
10130 static void tp_perf_event_destroy(struct perf_event *event)
10132 perf_trace_destroy(event);
10135 static int perf_tp_event_init(struct perf_event *event)
10139 if (event->attr.type != PERF_TYPE_TRACEPOINT)
10145 if (has_branch_stack(event))
10148 err = perf_trace_init(event);
10152 event->destroy = tp_perf_event_destroy;
10168 static int perf_tp_filter_match(struct perf_event *event,
10174 if (event->parent)
10175 event = event->parent;
10177 if (likely(!event->filter) || filter_match_preds(event->filter, record))
10182 static int perf_tp_event_match(struct perf_event *event,
10186 if (event->hw.state & PERF_HES_STOPPED)
10191 if (event->attr.exclude_kernel && !user_mode(regs))
10194 if (!perf_tp_filter_match(event, raw))
10212 perf_tp_event(call->event.type, count, raw_data, size, regs, head,
10221 struct perf_event *event)
10225 if (event->attr.config != entry->type)
10228 if (event->attr.sigtrap)
10230 if (perf_tp_event_match(event, raw, regs)) {
10232 perf_sample_save_raw_data(data, event, raw);
10233 perf_swevent_event(event, count, data, regs);
10245 struct perf_event *event, *sibling;
10247 perf_event_groups_for_cpu_pmu(event, &ctx->pinned_groups, cpu, pmu) {
10248 __perf_tp_event_target_task(count, record, regs, data, raw, event);
10249 for_each_sibling_event(sibling, event)
10253 perf_event_groups_for_cpu_pmu(event, &ctx->flexible_groups, cpu, pmu) {
10254 __perf_tp_event_target_task(count, record, regs, data, raw, event);
10255 for_each_sibling_event(sibling, event)
10265 struct perf_event *event;
10276 hlist_for_each_entry_rcu(event, head, hlist_entry) {
10277 if (perf_tp_event_match(event, &raw, regs)) {
10280 * some members in data are event-specific and
10283 * the problem that next event skips preparing data
10287 perf_sample_save_raw_data(&data, event, &raw);
10288 perf_swevent_event(event, count, &data, regs);
10294 * deliver this event there too.
10355 static int perf_kprobe_event_init(struct perf_event *event);
10367 static int perf_kprobe_event_init(struct perf_event *event)
10372 if (event->attr.type != perf_kprobe.type)
10381 if (has_branch_stack(event))
10384 is_retprobe = event->attr.config & PERF_PROBE_CONFIG_IS_RETPROBE;
10385 err = perf_kprobe_init(event, is_retprobe);
10389 event->destroy = perf_kprobe_destroy;
10414 static int perf_uprobe_event_init(struct perf_event *event);
10426 static int perf_uprobe_event_init(struct perf_event *event)
10432 if (event->attr.type != perf_uprobe.type)
10441 if (has_branch_stack(event))
10444 is_retprobe = event->attr.config & PERF_PROBE_CONFIG_IS_RETPROBE;
10445 ref_ctr_offset = event->attr.config >> PERF_UPROBE_REF_CTR_OFFSET_SHIFT;
10446 err = perf_uprobe_init(event, ref_ctr_offset, is_retprobe);
10450 event->destroy = perf_uprobe_destroy;
10467 static void perf_event_free_filter(struct perf_event *event)
10469 ftrace_profile_free_filter(event);
10473 static void bpf_overflow_handler(struct perf_event *event,
10479 .event = event,
10488 prog = READ_ONCE(event->prog);
10490 perf_prepare_sample(data, event, regs);
10499 event->orig_overflow_handler(event, data, regs);
10502 static int perf_event_set_bpf_handler(struct perf_event *event,
10506 if (event->overflow_handler_context)
10510 if (event->prog)
10516 if (event->attr.precise_ip &&
10518 (!(event->attr.sample_type & PERF_SAMPLE_CALLCHAIN) ||
10519 event->attr.exclude_callchain_kernel ||
10520 event->attr.exclude_callchain_user)) {
10533 event->prog = prog;
10534 event->bpf_cookie = bpf_cookie;
10535 event->orig_overflow_handler = READ_ONCE(event->overflow_handler);
10536 WRITE_ONCE(event->overflow_handler, bpf_overflow_handler);
10540 static void perf_event_free_bpf_handler(struct perf_event *event)
10542 struct bpf_prog *prog = event->prog;
10547 WRITE_ONCE(event->overflow_handler, event->orig_overflow_handler);
10548 event->prog = NULL;
10552 static int perf_event_set_bpf_handler(struct perf_event *event,
10558 static void perf_event_free_bpf_handler(struct perf_event *event)
10564 * returns true if the event is a tracepoint, or a kprobe/upprobe created
10567 static inline bool perf_event_is_tracing(struct perf_event *event)
10569 if (event->pmu == &perf_tracepoint)
10572 if (event->pmu == &perf_kprobe)
10576 if (event->pmu == &perf_uprobe)
10582 int perf_event_set_bpf_prog(struct perf_event *event, struct bpf_prog *prog,
10587 if (!perf_event_is_tracing(event))
10588 return perf_event_set_bpf_handler(event, prog, bpf_cookie);
10590 is_kprobe = event->tp_event->flags & TRACE_EVENT_FL_KPROBE;
10591 is_uprobe = event->tp_event->flags & TRACE_EVENT_FL_UPROBE;
10592 is_tracepoint = event->tp_event->flags & TRACE_EVENT_FL_TRACEPOINT;
10593 is_syscall_tp = is_syscall_trace_event(event->tp_event);
10612 int off = trace_event_get_offsets(event->tp_event);
10618 return perf_event_attach_bpf_prog(event, prog, bpf_cookie);
10621 void perf_event_free_bpf_prog(struct perf_event *event)
10623 if (!perf_event_is_tracing(event)) {
10624 perf_event_free_bpf_handler(event);
10627 perf_event_detach_bpf_prog(event);
10636 static void perf_event_free_filter(struct perf_event *event)
10640 int perf_event_set_bpf_prog(struct perf_event *event, struct bpf_prog *prog,
10646 void perf_event_free_bpf_prog(struct perf_event *event)
10668 perf_addr_filter_new(struct perf_event *event, struct list_head *filters)
10670 int node = cpu_to_node(event->cpu == -1 ? 0 : event->cpu);
10697 static void perf_addr_filters_splice(struct perf_event *event,
10703 if (!has_addr_filter(event))
10707 if (event->parent)
10710 raw_spin_lock_irqsave(&event->addr_filters.lock, flags);
10712 list_splice_init(&event->addr_filters.list, &list);
10714 list_splice(head, &event->addr_filters.list);
10716 raw_spin_unlock_irqrestore(&event->addr_filters.lock, flags);
10743 * Update event's address range filters based on the
10746 static void perf_event_addr_filters_apply(struct perf_event *event)
10748 struct perf_addr_filters_head *ifh = perf_event_addr_filters(event);
10749 struct task_struct *task = READ_ONCE(event->ctx->task);
10756 * We may observe TASK_TOMBSTONE, which means that the event tear-down
10777 event->addr_filter_ranges[count].start = 0;
10778 event->addr_filter_ranges[count].size = 0;
10780 perf_addr_filter_apply(filter, mm, &event->addr_filter_ranges[count]);
10782 event->addr_filter_ranges[count].start = filter->offset;
10783 event->addr_filter_ranges[count].size = filter->size;
10789 event->addr_filters_gen++;
10799 perf_event_stop(event, 1);
10853 perf_event_parse_addr_filter(struct perf_event *event, char *fstr,
10880 filter = perf_addr_filter_new(event, filters);
10939 * Make sure that it doesn't contradict itself or the event's
10966 if (!event->ctx->task)
10981 event->addr_filters.nr_file_filters++;
11010 perf_event_set_addr_filter(struct perf_event *event, char *filter_str)
11019 lockdep_assert_held(&event->ctx->mutex);
11021 if (WARN_ON_ONCE(event->parent))
11024 ret = perf_event_parse_addr_filter(event, filter_str, &filters);
11028 ret = event->pmu->addr_filters_validate(&filters);
11033 perf_addr_filters_splice(event, &filters);
11036 perf_event_for_each_child(event, perf_event_addr_filters_apply);
11044 event->addr_filters.nr_file_filters = 0;
11049 static int perf_event_set_filter(struct perf_event *event, void __user *arg)
11059 if (perf_event_is_tracing(event)) {
11060 struct perf_event_context *ctx = event->ctx;
11070 * This can result in event getting moved to a different ctx,
11074 ret = ftrace_profile_set_filter(event, event->attr.config, filter_str);
11078 if (has_addr_filter(event))
11079 ret = perf_event_set_addr_filter(event, filter_str);
11094 struct perf_event *event;
11097 event = container_of(hrtimer, struct perf_event, hw.hrtimer);
11099 if (event->state != PERF_EVENT_STATE_ACTIVE)
11102 event->pmu->read(event);
11104 perf_sample_data_init(&data, 0, event->hw.last_period);
11107 if (regs && !perf_exclude_event(event, regs)) {
11108 if (!(event->attr.exclude_idle && is_idle_task(current)))
11109 if (__perf_event_overflow(event, 1, &data, regs))
11113 period = max_t(u64, 10000, event->hw.sample_period);
11119 static void perf_swevent_start_hrtimer(struct perf_event *event)
11121 struct hw_perf_event *hwc = &event->hw;
11124 if (!is_sampling_event(event))
11140 static void perf_swevent_cancel_hrtimer(struct perf_event *event)
11142 struct hw_perf_event *hwc = &event->hw;
11144 if (is_sampling_event(event)) {
11152 static void perf_swevent_init_hrtimer(struct perf_event *event)
11154 struct hw_perf_event *hwc = &event->hw;
11156 if (!is_sampling_event(event))
11166 if (event->attr.freq) {
11167 long freq = event->attr.sample_freq;
11169 event->attr.sample_period = NSEC_PER_SEC / freq;
11170 hwc->sample_period = event->attr.sample_period;
11173 event->attr.freq = 0;
11178 * Software event: cpu wall time clock
11181 static void cpu_clock_event_update(struct perf_event *event)
11187 prev = local64_xchg(&event->hw.prev_count, now);
11188 local64_add(now - prev, &event->count);
11191 static void cpu_clock_event_start(struct perf_event *event, int flags)
11193 local64_set(&event->hw.prev_count, local_clock());
11194 perf_swevent_start_hrtimer(event);
11197 static void cpu_clock_event_stop(struct perf_event *event, int flags)
11199 perf_swevent_cancel_hrtimer(event);
11200 cpu_clock_event_update(event);
11203 static int cpu_clock_event_add(struct perf_event *event, int flags)
11206 cpu_clock_event_start(event, flags);
11207 perf_event_update_userpage(event);
11212 static void cpu_clock_event_del(struct perf_event *event, int flags)
11214 cpu_clock_event_stop(event, flags);
11217 static void cpu_clock_event_read(struct perf_event *event)
11219 cpu_clock_event_update(event);
11222 static int cpu_clock_event_init(struct perf_event *event)
11224 if (event->attr.type != perf_cpu_clock.type)
11227 if (event->attr.config != PERF_COUNT_SW_CPU_CLOCK)
11233 if (has_branch_stack(event))
11236 perf_swevent_init_hrtimer(event);
11256 * Software event: task time clock
11259 static void task_clock_event_update(struct perf_event *event, u64 now)
11264 prev = local64_xchg(&event->hw.prev_count, now);
11266 local64_add(delta, &event->count);
11269 static void task_clock_event_start(struct perf_event *event, int flags)
11271 local64_set(&event->hw.prev_count, event->ctx->time);
11272 perf_swevent_start_hrtimer(event);
11275 static void task_clock_event_stop(struct perf_event *event, int flags)
11277 perf_swevent_cancel_hrtimer(event);
11278 task_clock_event_update(event, event->ctx->time);
11281 static int task_clock_event_add(struct perf_event *event, int flags)
11284 task_clock_event_start(event, flags);
11285 perf_event_update_userpage(event);
11290 static void task_clock_event_del(struct perf_event *event, int flags)
11292 task_clock_event_stop(event, PERF_EF_UPDATE);
11295 static void task_clock_event_read(struct perf_event *event)
11298 u64 delta = now - event->ctx->timestamp;
11299 u64 time = event->ctx->time + delta;
11301 task_clock_event_update(event, time);
11304 static int task_clock_event_init(struct perf_event *event)
11306 if (event->attr.type != perf_task_clock.type)
11309 if (event->attr.config != PERF_COUNT_SW_TASK_CLOCK)
11315 if (has_branch_stack(event))
11318 perf_swevent_init_hrtimer(event);
11350 static int perf_event_nop_int(struct perf_event *event, u64 value)
11392 static int perf_event_idx_default(struct perf_event *event)
11684 static inline bool has_extended_regs(struct perf_event *event)
11686 return (event->attr.sample_regs_user & PERF_REG_EXTENDED_MASK) ||
11687 (event->attr.sample_regs_intr & PERF_REG_EXTENDED_MASK);
11690 static int perf_try_init_event(struct pmu *pmu, struct perf_event *event)
11701 * if this is a sibling event, acquire the ctx->mutex to protect
11704 if (event->group_leader != event && pmu->task_ctx_nr != perf_sw_context) {
11709 ctx = perf_event_ctx_lock_nested(event->group_leader,
11714 event->pmu = pmu;
11715 ret = pmu->event_init(event);
11718 perf_event_ctx_unlock(event->group_leader, ctx);
11722 has_extended_regs(event))
11726 event_has_any_exclude_flag(event))
11729 if (ret && event->destroy)
11730 event->destroy(event);
11739 static struct pmu *perf_init_event(struct perf_event *event)
11749 * pmus overwrites event->attr.type to forward event to another pmu.
11751 event->orig_type = event->attr.type;
11754 if (event->parent && event->parent->pmu) {
11755 pmu = event->parent->pmu;
11756 ret = perf_try_init_event(pmu, event);
11765 type = event->attr.type;
11767 type = event->attr.config >> PERF_PMU_TYPE_SHIFT;
11772 event->attr.config &= PERF_HW_EVENT_MASK;
11781 if (event->attr.type != type && type != PERF_TYPE_RAW &&
11785 ret = perf_try_init_event(pmu, event);
11786 if (ret == -ENOENT && event->attr.type != type && !extended_type) {
11787 type = event->attr.type;
11798 ret = perf_try_init_event(pmu, event);
11815 static void attach_sb_event(struct perf_event *event)
11817 struct pmu_event_list *pel = per_cpu_ptr(&pmu_sb_events, event->cpu);
11820 list_add_rcu(&event->sb_list, &pel->list);
11831 static void account_pmu_sb_event(struct perf_event *event)
11833 if (is_sb_event(event))
11834 attach_sb_event(event);
11858 static void account_event(struct perf_event *event)
11862 if (event->parent)
11865 if (event->attach_state & (PERF_ATTACH_TASK | PERF_ATTACH_SCHED_CB))
11867 if (event->attr.mmap || event->attr.mmap_data)
11869 if (event->attr.build_id)
11871 if (event->attr.comm)
11873 if (event->attr.namespaces)
11875 if (event->attr.cgroup)
11877 if (event->attr.task)
11879 if (event->attr.freq)
11881 if (event->attr.context_switch) {
11885 if (has_branch_stack(event))
11887 if (is_cgroup_event(event))
11889 if (event->attr.ksymbol)
11891 if (event->attr.bpf_event)
11893 if (event->attr.text_poke)
11924 account_pmu_sb_event(event);
11928 * Allocate and initialize an event structure
11939 struct perf_event *event;
11954 event = kmem_cache_alloc_node(perf_event_cache, GFP_KERNEL | __GFP_ZERO,
11956 if (!event)
11964 group_leader = event;
11966 mutex_init(&event->child_mutex);
11967 INIT_LIST_HEAD(&event->child_list);
11969 INIT_LIST_HEAD(&event->event_entry);
11970 INIT_LIST_HEAD(&event->sibling_list);
11971 INIT_LIST_HEAD(&event->active_list);
11972 init_event_group(event);
11973 INIT_LIST_HEAD(&event->rb_entry);
11974 INIT_LIST_HEAD(&event->active_entry);
11975 INIT_LIST_HEAD(&event->addr_filters.list);
11976 INIT_HLIST_NODE(&event->hlist_entry);
11979 init_waitqueue_head(&event->waitq);
11980 init_irq_work(&event->pending_irq, perf_pending_irq);
11981 init_task_work(&event->pending_task, perf_pending_task);
11982 rcuwait_init(&event->pending_work_wait);
11984 mutex_init(&event->mmap_mutex);
11985 raw_spin_lock_init(&event->addr_filters.lock);
11987 atomic_long_set(&event->refcount, 1);
11988 event->cpu = cpu;
11989 event->attr = *attr;
11990 event->group_leader = group_leader;
11991 event->pmu = NULL;
11992 event->oncpu = -1;
11994 event->parent = parent_event;
11996 event->ns = get_pid_ns(task_active_pid_ns(current));
11997 event->id = atomic64_inc_return(&perf_event_id);
11999 event->state = PERF_EVENT_STATE_INACTIVE;
12002 event->event_caps = parent_event->event_caps;
12005 event->attach_state = PERF_ATTACH_TASK;
12011 event->hw.target = get_task_struct(task);
12014 event->clock = &local_clock;
12016 event->clock = parent_event->clock;
12026 event->prog = prog;
12027 event->orig_overflow_handler =
12034 event->overflow_handler = overflow_handler;
12035 event->overflow_handler_context = context;
12036 } else if (is_write_backward(event)){
12037 event->overflow_handler = perf_event_output_backward;
12038 event->overflow_handler_context = NULL;
12040 event->overflow_handler = perf_event_output_forward;
12041 event->overflow_handler_context = NULL;
12044 perf_event__state_init(event);
12048 hwc = &event->hw;
12063 if (!has_branch_stack(event))
12064 event->attr.branch_sample_type = 0;
12066 pmu = perf_init_event(event);
12082 if (event->attr.aux_output &&
12089 err = perf_cgroup_connect(cgroup_fd, event, attr, group_leader);
12094 err = exclusive_event_init(event);
12098 if (has_addr_filter(event)) {
12099 event->addr_filter_ranges = kcalloc(pmu->nr_addr_filters,
12102 if (!event->addr_filter_ranges) {
12111 if (event->parent) {
12112 struct perf_addr_filters_head *ifh = perf_event_addr_filters(event);
12115 memcpy(event->addr_filter_ranges,
12116 event->parent->addr_filter_ranges,
12122 event->addr_filters_gen = 1;
12125 if (!event->parent) {
12126 if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN) {
12133 err = security_perf_event_alloc(event);
12138 account_event(event);
12140 return event;
12143 if (!event->parent) {
12144 if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN)
12148 kfree(event->addr_filter_ranges);
12151 exclusive_event_destroy(event);
12154 if (is_cgroup_event(event))
12155 perf_detach_cgroup(event);
12156 if (event->destroy)
12157 event->destroy(event);
12160 if (event->hw.target)
12161 put_task_struct(event->hw.target);
12162 call_rcu(&event->rcu_head, free_event_rcu);
12303 perf_event_set_output(struct perf_event *event, struct perf_event *output_event)
12309 mutex_lock(&event->mmap_mutex);
12314 if (event == output_event)
12320 if (output_event->cpu != event->cpu)
12326 if (output_event->cpu == -1 && output_event->hw.target != event->hw.target)
12332 if (output_event->clock != event->clock)
12339 if (is_write_backward(output_event) != is_write_backward(event))
12345 if (has_aux(event) && has_aux(output_event) &&
12346 event->pmu != output_event->pmu)
12352 * restarts after every removal, it is guaranteed this new event is
12356 mutex_lock_double(&event->mmap_mutex, &output_event->mmap_mutex);
12359 if (atomic_read(&event->mmap_count))
12375 ring_buffer_attach(event, rb);
12379 mutex_unlock(&event->mmap_mutex);
12387 static int perf_event_set_clock(struct perf_event *event, clockid_t clk_id)
12393 event->clock = &ktime_get_mono_fast_ns;
12398 event->clock = &ktime_get_raw_fast_ns;
12403 event->clock = &ktime_get_real_ns;
12407 event->clock = &ktime_get_boottime_ns;
12411 event->clock = &ktime_get_clocktai_ns;
12418 if (!nmi_safe && !(event->pmu->capabilities & PERF_PMU_CAP_NO_NMI))
12456 * sys_perf_event_open - open a performance event, associate it to a task/cpu
12461 * @group_fd: group leader event fd
12462 * @flags: perf event open flags
12470 struct perf_event *event, *sibling;
12573 event = perf_event_alloc(&attr, cpu, task, group_leader, NULL,
12575 if (IS_ERR(event)) {
12576 err = PTR_ERR(event);
12580 if (is_sampling_event(event)) {
12581 if (event->pmu->capabilities & PERF_PMU_CAP_NO_INTERRUPT) {
12591 pmu = event->pmu;
12594 err = perf_event_set_clock(event, attr.clockid);
12600 event->event_caps |= PERF_EV_CAP_SOFTWARE;
12609 * perf_install_in_context() call for this new event to
12621 ctx = find_get_context(task, event);
12636 * Check if the @cpu we're creating an event for is online.
12641 struct perf_cpu_context *cpuctx = per_cpu_ptr(&perf_cpu_context, event->cpu);
12660 if (group_leader->clock != event->clock)
12668 if (group_leader->cpu != event->cpu)
12683 if (is_software_event(event) &&
12686 * If the event is a sw event, but the group_leader
12697 } else if (!is_software_event(event)) {
12702 * try to add a hardware event, move the whole group to
12718 pmu_ctx = find_get_pmu_context(pmu, ctx, event);
12723 event->pmu_ctx = pmu_ctx;
12726 err = perf_event_set_output(event, output_event);
12731 if (!perf_event_validate_size(event)) {
12736 if (perf_need_aux_event(event) && !perf_get_aux_event(event, group_leader)) {
12743 * because we need to serialize with concurrent event creation.
12745 if (!exclusive_event_installable(event, ctx)) {
12752 event_file = anon_inode_getfile("[perf_event]", &perf_fops, event, f_flags);
12792 * event. What we want here is event in the initial
12804 * perf_install_in_context() which is the point the event is active and
12807 perf_event__header_size(event);
12808 perf_event__id_header_size(event);
12810 event->owner = current;
12812 perf_install_in_context(ctx, event, event->cpu);
12823 list_add_tail(&event->owner_entry, &current->perf_event_list);
12828 * new event on the sibling_list. This ensures destruction
12837 put_pmu_ctx(event->pmu_ctx);
12838 event->pmu_ctx = NULL; /* _free_event() */
12847 free_event(event);
12864 * @overflow_handler: callback to trigger when we hit the event
12875 struct perf_event *event;
12886 event = perf_event_alloc(attr, cpu, task, NULL, NULL,
12888 if (IS_ERR(event)) {
12889 err = PTR_ERR(event);
12894 event->owner = TASK_TOMBSTONE;
12895 pmu = event->pmu;
12898 event->event_caps |= PERF_EV_CAP_SOFTWARE;
12903 ctx = find_get_context(task, event);
12916 pmu_ctx = find_get_pmu_context(pmu, ctx, event);
12921 event->pmu_ctx = pmu_ctx;
12925 * Check if the @cpu we're creating an event for is online.
12938 if (!exclusive_event_installable(event, ctx)) {
12943 perf_install_in_context(ctx, event, event->cpu);
12947 return event;
12951 event->pmu_ctx = NULL; /* _free_event() */
12957 free_event(event);
12968 struct perf_event *event, *sibling;
12970 perf_event_groups_for_cpu_pmu(event, groups, cpu, pmu) {
12971 perf_remove_from_context(event, 0);
12972 put_pmu_ctx(event->pmu_ctx);
12973 list_add(&event->migrate_entry, events);
12975 for_each_sibling_event(sibling, event) {
12985 int cpu, struct perf_event *event)
12988 struct perf_event_context *old_ctx = event->ctx;
12992 event->cpu = cpu;
12993 epc = find_get_pmu_context(pmu, ctx, event);
12994 event->pmu_ctx = epc;
12996 if (event->state >= PERF_EVENT_STATE_OFF)
12997 event->state = PERF_EVENT_STATE_INACTIVE;
12998 perf_install_in_context(ctx, event, cpu);
13001 * Now that event->ctx is updated and visible, put the old ctx.
13009 struct perf_event *event, *tmp;
13019 list_for_each_entry_safe(event, tmp, events, migrate_entry) {
13020 if (event->group_leader == event)
13023 list_del(&event->migrate_entry);
13024 __perf_pmu_install_event(pmu, ctx, cpu, event);
13031 list_for_each_entry_safe(event, tmp, events, migrate_entry) {
13032 list_del(&event->migrate_entry);
13033 __perf_pmu_install_event(pmu, ctx, cpu, event);
13097 perf_event_exit_event(struct perf_event *event, struct perf_event_context *ctx)
13099 struct perf_event *parent_event = event->parent;
13119 perf_remove_from_context(event, detach_flags);
13122 if (event->state > PERF_EVENT_STATE_EXIT)
13123 perf_event_set_state(event, PERF_EVENT_STATE_EXIT);
13135 free_event(event);
13143 perf_event_wakeup(event);
13208 * When a child task exits, feed back event values to parent events.
13215 struct perf_event *event, *tmp;
13218 list_for_each_entry_safe(event, tmp, &child->perf_event_list,
13220 list_del_init(&event->owner_entry);
13227 smp_store_release(&event->owner, NULL);
13242 static void perf_free_event(struct perf_event *event,
13245 struct perf_event *parent = event->parent;
13251 list_del_init(&event->child_list);
13257 perf_group_detach(event);
13258 list_del_event(event, ctx);
13260 free_event(event);
13273 struct perf_event *event, *tmp;
13293 list_for_each_entry_safe(event, tmp, &ctx->event_list, event_entry)
13294 perf_free_event(event, ctx);
13307 * _free_event()'s put_task_struct(event->hw.target) will be a
13343 const struct perf_event_attr *perf_event_attrs(struct perf_event *event)
13345 if (!event)
13348 return &event->attr;
13361 * Inherit an event from parent task to child task.
13423 * Make the child state follow the state of the parent event,
13462 * Link this into the parent event's child list
13471 * Inherits an event group.
13515 * Creates the child task context and tries to inherit the event-group.
13518 * inherited_all set when we 'fail' to inherit an orphaned event; this is
13526 inherit_task_group(struct perf_event *event, struct task_struct *parent,
13534 if (!event->attr.inherit ||
13535 (event->attr.inherit_thread && !(clone_flags & CLONE_THREAD)) ||
13537 (event->attr.sigtrap && (clone_flags & CLONE_CLEAR_SIGHAND))) {
13557 ret = inherit_group(event, parent, parent_ctx, child, child_ctx);
13571 struct perf_event *event;
13605 perf_event_groups_for_each(event, &parent_ctx->pinned_groups) {
13606 ret = inherit_task_group(event, parent, parent_ctx,
13621 perf_event_groups_for_each(event, &parent_ctx->flexible_groups) {
13622 ret = inherit_task_group(event, parent, parent_ctx,
13729 struct perf_event *event;
13733 list_for_each_entry(event, &ctx->event_list, event_entry)
13734 __perf_remove_from_context(event, cpuctx, ctx, (void *)DETACH_GROUP);