Searched refs:oncpu (Results 1 – 13 of 13) sorted by relevance
/openbmc/linux/tools/testing/selftests/bpf/prog_tests/ |
H A D | get_stackid_cannot_attach.c | 30 bpf_program__set_type(skel->progs.oncpu, BPF_PROG_TYPE_PERF_EVENT); in test_get_stackid_cannot_attach() 49 skel->links.oncpu = bpf_program__attach_perf_event(skel->progs.oncpu, in test_get_stackid_cannot_attach() 51 ASSERT_ERR_PTR(skel->links.oncpu, "attach_perf_event_no_callchain"); in test_get_stackid_cannot_attach() 65 skel->links.oncpu = bpf_program__attach_perf_event(skel->progs.oncpu, in test_get_stackid_cannot_attach() 67 ASSERT_OK_PTR(skel->links.oncpu, "attach_perf_event_callchain"); in test_get_stackid_cannot_attach() 68 bpf_link__destroy(skel->links.oncpu); in test_get_stackid_cannot_attach() 82 skel->links.oncpu = bpf_program__attach_perf_event(skel->progs.oncpu, in test_get_stackid_cannot_attach() 84 ASSERT_ERR_PTR(skel->links.oncpu, "attach_perf_event_exclude_callchain_kernel"); in test_get_stackid_cannot_attach()
|
H A D | stacktrace_build_id_nmi.c | 29 bpf_program__set_type(skel->progs.oncpu, BPF_PROG_TYPE_PERF_EVENT); in test_stacktrace_build_id_nmi() 47 skel->links.oncpu = bpf_program__attach_perf_event(skel->progs.oncpu, in test_stacktrace_build_id_nmi() 49 if (!ASSERT_OK_PTR(skel->links.oncpu, "attach_perf_event")) { in test_stacktrace_build_id_nmi()
|
H A D | perf_event_stackmap.c | 99 skel->links.oncpu = bpf_program__attach_perf_event(skel->progs.oncpu, in test_perf_event_stackmap() 101 if (!ASSERT_OK_PTR(skel->links.oncpu, "attach_perf_event")) { in test_perf_event_stackmap()
|
/openbmc/linux/samples/bpf/ |
H A D | offwaketime.bpf.c | 108 int oncpu(struct trace_event_raw_sched_switch *ctx) function 114 int oncpu(struct pt_regs *ctx)
|
/openbmc/linux/tools/testing/selftests/bpf/progs/ |
H A D | test_tracepoint.c | 20 int oncpu(struct sched_switch_args *ctx) in oncpu() function
|
H A D | perf_event_stackmap.c | 31 int oncpu(void *ctx) in oncpu() function
|
H A D | stacktrace_map_skip.c | 36 int oncpu(struct trace_event_raw_sched_switch *ctx) in oncpu() function
|
H A D | test_stacktrace_build_id.c | 43 int oncpu(struct pt_regs *args) in oncpu() function
|
H A D | test_stacktrace_map.c | 54 int oncpu(struct sched_switch_args *ctx) in oncpu() function
|
/openbmc/linux/kernel/sched/ |
H A D | psi.c | 221 static bool test_state(unsigned int *tasks, enum psi_states state, bool oncpu) in test_state() argument 234 return unlikely(tasks[NR_RUNNING] > oncpu); in test_state() 236 return unlikely(tasks[NR_RUNNING] && !oncpu); in test_state()
|
/openbmc/linux/kernel/events/ |
H A D | core.c | 2283 event->oncpu = -1; in event_sched_out() 2530 WRITE_ONCE(event->oncpu, smp_processor_id()); in event_sched_in() 2532 * Order event::oncpu write to happen before the ACTIVE state is in event_sched_in() 2534 * ->oncpu if it sees ACTIVE. in event_sched_in() 2555 event->oncpu = -1; in event_sched_in() 3058 if (READ_ONCE(event->oncpu) != smp_processor_id()) in __perf_event_stop() 3095 * inactive here (event->oncpu==-1), there's nothing more to do; in perf_event_stop() 3098 ret = cpu_function_call(READ_ONCE(event->oncpu), in perf_event_stop() 4599 if (event->attr.pinned && event->oncpu != smp_processor_id()) { in perf_event_read_local() 4607 * oncpu in perf_event_read_local() [all...] |
/openbmc/linux/include/linux/ |
H A D | perf_event.h | 761 int oncpu; member
|
/openbmc/linux/kernel/trace/ |
H A D | bpf_trace.c | 642 if (unlikely(event->oncpu != cpu)) in __bpf_perf_event_output()
|