Lines Matching +full:close +full:- +full:range
1 // SPDX-License-Identifier: GPL-2.0-only
5 * Parts came from builtin-{top,stat,record}.c, see those files for further
26 #include "bpf-event.h"
33 #include "util/bpf-filter.h"
41 #include "parse-events.h"
42 #include <subcmd/parse-options.h>
69 #define FD(e, x, y) (*(int *)xyarray__entry(e->core.fd, x, y))
70 #define SID(e, x, y) xyarray__entry(e->core.sample_id, x, y)
75 perf_evlist__init(&evlist->core); in evlist__init()
76 perf_evlist__set_maps(&evlist->core, cpus, threads); in evlist__init()
77 evlist->workload.pid = -1; in evlist__init()
78 evlist->bkw_mmap_state = BKW_MMAP_NOTREADY; in evlist__init()
79 evlist->ctl_fd.fd = -1; in evlist__init()
80 evlist->ctl_fd.ack = -1; in evlist__init()
81 evlist->ctl_fd.pos = -1; in evlist__init()
110 if (evlist->core.nr_entries > 1) { in evlist__new_default()
133 * evlist__set_id_pos - set the positions of event ids.
143 evlist->id_pos = first->id_pos; in evlist__set_id_pos()
144 evlist->is_pos = first->is_pos; in evlist__set_id_pos()
162 list_del_init(&pos->core.node); in evlist__purge()
163 pos->evlist = NULL; in evlist__purge()
167 evlist->core.nr_entries = 0; in evlist__purge()
172 event_enable_timer__exit(&evlist->eet); in evlist__exit()
173 zfree(&evlist->mmap); in evlist__exit()
174 zfree(&evlist->overwrite_mmap); in evlist__exit()
175 perf_evlist__exit(&evlist->core); in evlist__exit()
193 perf_evlist__add(&evlist->core, &entry->core); in evlist__add()
194 entry->evlist = evlist; in evlist__add()
195 entry->tracking = !entry->core.idx; in evlist__add()
197 if (evlist->core.nr_entries == 1) in evlist__add()
203 evsel->evlist = NULL; in evlist__remove()
204 perf_evlist__remove(&evlist->core, &evsel->core); in evlist__remove()
213 list_del_init(&evsel->core.node); in evlist__splice_list_tail()
221 list_del_init(&evsel->core.node); in evlist__splice_list_tail()
240 err = -EEXIST; in __evlist__set_tracepoints_handlers()
241 if (evsel->handler != NULL) in __evlist__set_tracepoints_handlers()
243 evsel->handler = assocs[i].handler; in __evlist__set_tracepoints_handlers()
253 perf_evlist__set_leader(&evlist->core); in evlist__set_leader()
267 return evsel__new_idx(&attr, evlist->core.nr_entries); in evlist__dummy_event()
275 return -ENOMEM; in evlist__add_dummy()
288 evsel->core.attr.exclude_kernel = 1; in evlist__add_aux_dummy()
289 evsel->core.attr.exclude_guest = 1; in evlist__add_aux_dummy()
290 evsel->core.attr.exclude_hv = 1; in evlist__add_aux_dummy()
291 evsel->core.system_wide = system_wide; in evlist__add_aux_dummy()
292 evsel->no_aux_samples = true; in evlist__add_aux_dummy()
293 evsel->name = strdup("dummy:u"); in evlist__add_aux_dummy()
310 evsel->core.system_wide = system_wide; in evlist__add_sched_switch()
311 evsel->no_aux_samples = true; in evlist__add_sched_switch()
325 evsel = evsel__new_idx(attrs + i, evlist->core.nr_entries + i); in evlist__add_attrs()
328 list_add_tail(&evsel->core.node, &head); in evlist__add_attrs()
338 return -1; in evlist__add_attrs()
366 if (evsel->core.attr.type == PERF_TYPE_TRACEPOINT && in evlist__find_tracepoint_by_id()
367 (int)evsel->core.attr.config == id) in evlist__find_tracepoint_by_id()
379 if ((evsel->core.attr.type == PERF_TYPE_TRACEPOINT) && in evlist__find_tracepoint_by_name()
380 (strcmp(evsel->name, name) == 0)) in evlist__find_tracepoint_by_name()
393 return -1; in evlist__add_newtp()
395 evsel->handler = handler; in evlist__add_newtp()
408 .evlist_cpu_map_nr = perf_cpu_map__nr(evlist->core.all_cpus), in evlist__cpu_begin()
409 .cpu = (struct perf_cpu){ .cpu = -1}, in evlist__cpu_begin()
419 itr.cpu = perf_cpu_map__cpu(evlist->core.all_cpus, 0); in evlist__cpu_begin()
421 itr.cpu_map_idx = perf_cpu_map__idx(itr.evsel->core.cpus, itr.cpu); in evlist__cpu_begin()
426 if (itr.cpu_map_idx == -1) in evlist__cpu_begin()
435 while (evlist_cpu_itr->evsel != evlist__last(evlist_cpu_itr->container)) { in evlist_cpu_iterator__next()
436 evlist_cpu_itr->evsel = evsel__next(evlist_cpu_itr->evsel); in evlist_cpu_iterator__next()
437 evlist_cpu_itr->cpu_map_idx = in evlist_cpu_iterator__next()
438 perf_cpu_map__idx(evlist_cpu_itr->evsel->core.cpus, in evlist_cpu_iterator__next()
439 evlist_cpu_itr->cpu); in evlist_cpu_iterator__next()
440 if (evlist_cpu_itr->cpu_map_idx != -1) in evlist_cpu_iterator__next()
443 evlist_cpu_itr->evlist_cpu_map_idx++; in evlist_cpu_iterator__next()
444 if (evlist_cpu_itr->evlist_cpu_map_idx < evlist_cpu_itr->evlist_cpu_map_nr) { in evlist_cpu_iterator__next()
445 evlist_cpu_itr->evsel = evlist__first(evlist_cpu_itr->container); in evlist_cpu_iterator__next()
446 evlist_cpu_itr->cpu = in evlist_cpu_iterator__next()
447 perf_cpu_map__cpu(evlist_cpu_itr->container->core.all_cpus, in evlist_cpu_iterator__next()
448 evlist_cpu_itr->evlist_cpu_map_idx); in evlist_cpu_iterator__next()
449 if (evlist_cpu_itr->affinity) in evlist_cpu_iterator__next()
450 affinity__set(evlist_cpu_itr->affinity, evlist_cpu_itr->cpu.cpu); in evlist_cpu_iterator__next()
451 evlist_cpu_itr->cpu_map_idx = in evlist_cpu_iterator__next()
452 perf_cpu_map__idx(evlist_cpu_itr->evsel->core.cpus, in evlist_cpu_iterator__next()
453 evlist_cpu_itr->cpu); in evlist_cpu_iterator__next()
458 if (evlist_cpu_itr->cpu_map_idx == -1) in evlist_cpu_iterator__next()
465 return evlist_cpu_itr->evlist_cpu_map_idx >= evlist_cpu_itr->evlist_cpu_map_nr; in evlist_cpu_iterator__end()
482 if (!evsel__is_group_leader(pos) || !pos->core.fd) in evlist__is_enabled()
485 if (!pos->disabled) in evlist__is_enabled()
499 if (!cpu_map__is_dummy(evlist->core.user_requested_cpus)) { in __evlist__disable()
511 if (pos->disabled || !evsel__is_group_leader(pos) || !pos->core.fd) in __evlist__disable()
515 if (pos->immediate) in __evlist__disable()
517 if (pos->immediate != imm) in __evlist__disable()
529 if (!evsel__is_group_leader(pos) || !pos->core.fd) in __evlist__disable()
533 pos->disabled = true; in __evlist__disable()
541 evlist->enabled = evlist__is_enabled(evlist); in __evlist__disable()
543 evlist->enabled = false; in __evlist__disable()
568 if (!cpu_map__is_dummy(evlist->core.user_requested_cpus)) { in __evlist__enable()
578 if (!evsel__is_group_leader(pos) || !pos->core.fd) in __evlist__enable()
588 if (!evsel__is_group_leader(pos) || !pos->core.fd) in __evlist__enable()
592 pos->disabled = false; in __evlist__enable()
600 evlist->enabled = true; in __evlist__enable()
620 (evlist->enabled ? evlist__disable : evlist__enable)(evlist); in evlist__toggle_enable()
625 return perf_evlist__add_pollfd(&evlist->core, fd, NULL, POLLIN, fdarray_flag__default); in evlist__add_pollfd()
630 return perf_evlist__filter_pollfd(&evlist->core, revents_and_mask); in evlist__filter_pollfd()
636 return perf_evlist__add_pollfd(&evlist->core, fd, NULL, POLLIN, in evlist__add_wakeup_eventfd()
644 return perf_evlist__poll(&evlist->core, timeout); in evlist__poll()
654 head = &evlist->core.heads[hash]; in evlist__id2sid()
657 if (sid->id == id) in evlist__id2sid()
667 if (evlist->core.nr_entries == 1 || !id) in evlist__id2evsel()
672 return container_of(sid->evsel, struct evsel, core); in evlist__id2evsel()
689 return container_of(sid->evsel, struct evsel, core); in evlist__id2evsel_strict()
696 const __u64 *array = event->sample.array; in evlist__event2id()
699 n = (event->header.size - sizeof(event->header)) >> 3; in evlist__event2id()
701 if (event->header.type == PERF_RECORD_SAMPLE) { in evlist__event2id()
702 if (evlist->id_pos >= n) in evlist__event2id()
703 return -1; in evlist__event2id()
704 *id = array[evlist->id_pos]; in evlist__event2id()
706 if (evlist->is_pos > n) in evlist__event2id()
707 return -1; in evlist__event2id()
708 n -= evlist->is_pos; in evlist__event2id()
722 if (evlist->core.nr_entries == 1) in evlist__event2evsel()
725 if (!first->core.attr.sample_id_all && in evlist__event2evsel()
726 event->header.type != PERF_RECORD_SAMPLE) in evlist__event2evsel()
737 head = &evlist->core.heads[hash]; in evlist__event2evsel()
740 if (sid->id == id) in evlist__event2evsel()
741 return container_of(sid->evsel, struct evsel, core); in evlist__event2evsel()
750 if (!evlist->overwrite_mmap) in evlist__set_paused()
753 for (i = 0; i < evlist->core.nr_mmaps; i++) { in evlist__set_paused()
754 int fd = evlist->overwrite_mmap[i].core.fd; in evlist__set_paused()
780 if (evlist->mmap) in evlist__munmap_nofree()
781 for (i = 0; i < evlist->core.nr_mmaps; i++) in evlist__munmap_nofree()
782 perf_mmap__munmap(&evlist->mmap[i].core); in evlist__munmap_nofree()
784 if (evlist->overwrite_mmap) in evlist__munmap_nofree()
785 for (i = 0; i < evlist->core.nr_mmaps; i++) in evlist__munmap_nofree()
786 perf_mmap__munmap(&evlist->overwrite_mmap[i].core); in evlist__munmap_nofree()
792 zfree(&evlist->mmap); in evlist__munmap()
793 zfree(&evlist->overwrite_mmap); in evlist__munmap()
809 map = zalloc(evlist->core.nr_mmaps * sizeof(struct mmap)); in evlist__alloc_mmap()
813 for (i = 0; i < evlist->core.nr_mmaps; i++) { in evlist__alloc_mmap()
814 struct perf_mmap *prev = i ? &map[i - 1].core : NULL; in evlist__alloc_mmap()
841 auxtrace_mmap_params__set_idx(&mp->auxtrace_mp, evlist, evsel, idx); in perf_evlist__mmap_cb_idx()
850 maps = overwrite ? evlist->overwrite_mmap : evlist->mmap; in perf_evlist__mmap_cb_get()
858 evlist->overwrite_mmap = maps; in perf_evlist__mmap_cb_get()
859 if (evlist->bkw_mmap_state == BKW_MMAP_NOTREADY) in perf_evlist__mmap_cb_get()
862 evlist->mmap = maps; in perf_evlist__mmap_cb_get()
892 max -= (page_size / 1024); in perf_event_mlock_kb_in_pages()
925 return -EINVAL; in parse_pages_arg()
928 if (val != (unsigned long) -1) { in parse_pages_arg()
936 return -EINVAL; in parse_pages_arg()
947 return -EINVAL; in parse_pages_arg()
955 return -EINVAL; in parse_pages_arg()
970 pr_err("Invalid argument for --mmap_pages/-m\n"); in __evlist__parse_mmap_pages()
971 return -1; in __evlist__parse_mmap_pages()
980 return __evlist__parse_mmap_pages(opt->value, str); in evlist__parse_mmap_pages()
984 * evlist__mmap_ex - Create mmaps to receive events.
988 * @auxtrace_pages - auxtrace map length in pages
989 * @auxtrace_overwrite - overwrite older auxtrace data?
1022 evlist->core.mmap_len = evlist__mmap_size(pages); in evlist__mmap_ex()
1023 pr_debug("mmap size %zuB\n", evlist->core.mmap_len); in evlist__mmap_ex()
1025 auxtrace_mmap_params__init(&mp.auxtrace_mp, evlist->core.mmap_len, in evlist__mmap_ex()
1028 return perf_evlist__mmap_ops(&evlist->core, &ops, &mp.core); in evlist__mmap_ex()
1038 bool all_threads = (target->per_thread && target->system_wide); in evlist__create_maps()
1043 * If specify '-a' and '--per-thread' to perf record, perf record in evlist__create_maps()
1044 * will override '--per-thread'. target->per_thread = false and in evlist__create_maps()
1045 * target->system_wide = true. in evlist__create_maps()
1047 * If specify '--per-thread' only to perf record, in evlist__create_maps()
1048 * target->per_thread = true and target->system_wide = false. in evlist__create_maps()
1050 * So target->per_thread && target->system_wide is false. in evlist__create_maps()
1055 * For perf stat, it allows the case that target->per_thread and in evlist__create_maps()
1056 * target->system_wide are all true. It means to collect system-wide in evlist__create_maps()
1057 * per-thread data. thread_map__new_str will call in evlist__create_maps()
1060 threads = thread_map__new_str(target->pid, target->tid, target->uid, in evlist__create_maps()
1064 return -1; in evlist__create_maps()
1069 cpus = perf_cpu_map__new(target->cpu_list); in evlist__create_maps()
1074 evlist->core.has_user_cpus = !!target->cpu_list; in evlist__create_maps()
1076 perf_evlist__set_maps(&evlist->core, cpus, threads); in evlist__create_maps()
1086 return -1; in evlist__create_maps()
1099 if (evsel->filter) { in evlist__apply_filters()
1100 err = perf_evsel__apply_filter(&evsel->core, evsel->filter); in evlist__apply_filters()
1108 * non-tracepoint events can have BPF filters. in evlist__apply_filters()
1110 if (!list_empty(&evsel->bpf_filters)) { in evlist__apply_filters()
1128 return -1; in evlist__set_tp_filter()
1131 if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT) in evlist__set_tp_filter()
1148 return -1; in evlist__append_tp_filter()
1151 if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT) in evlist__append_tp_filter()
1220 if (evlist->core.nr_entries == 1) in evlist__valid_sample_type()
1223 if (evlist->id_pos < 0 || evlist->is_pos < 0) in evlist__valid_sample_type()
1227 if (pos->id_pos != evlist->id_pos || in evlist__valid_sample_type()
1228 pos->is_pos != evlist->is_pos) in evlist__valid_sample_type()
1239 if (evlist->combined_sample_type) in __evlist__combined_sample_type()
1240 return evlist->combined_sample_type; in __evlist__combined_sample_type()
1243 evlist->combined_sample_type |= evsel->core.attr.sample_type; in __evlist__combined_sample_type()
1245 return evlist->combined_sample_type; in __evlist__combined_sample_type()
1250 evlist->combined_sample_type = 0; in evlist__combined_sample_type()
1260 branch_type |= evsel->core.attr.branch_sample_type; in evlist__combined_branch_type()
1267 u64 read_format = first->core.attr.read_format; in evlist__valid_read_format()
1268 u64 sample_type = first->core.attr.sample_type; in evlist__valid_read_format()
1271 if (read_format != pos->core.attr.read_format) { in evlist__valid_read_format()
1273 read_format, (u64)pos->core.attr.read_format); in evlist__valid_read_format()
1290 return first->core.attr.sample_id_all ? evsel__id_hdr_size(first) : 0; in evlist__id_hdr_size()
1298 if (first->core.attr.sample_id_all != pos->core.attr.sample_id_all) in evlist__valid_sample_id_all()
1308 return first->core.attr.sample_id_all; in evlist__sample_id_all()
1313 evlist->selected = evsel; in evlist__set_selected()
1326 if (!evlist->core.user_requested_cpus || in evlist__close()
1327 cpu_map__is_dummy(evlist->core.user_requested_cpus)) { in evlist__close()
1337 perf_evsel__close_cpu(&evlist_cpu_itr.evsel->core, in evlist__close()
1343 perf_evsel__free_fd(&evsel->core); in evlist__close()
1344 perf_evsel__free_id(&evsel->core); in evlist__close()
1346 perf_evlist__reset_id_hash(&evlist->core); in evlist__close()
1358 * FIXME: -ENOMEM is the best we can do here, the cpu_map in evlist__create_syswide_maps()
1361 * default cpu identity map :-\ in evlist__create_syswide_maps()
1371 perf_evlist__set_maps(&evlist->core, cpus, threads); in evlist__create_syswide_maps()
1377 return -ENOMEM; in evlist__create_syswide_maps()
1387 * as sys_perf_event_open(cpu = -1, thread = -1) is EINVAL in evlist__open()
1389 if (evlist->core.threads == NULL && evlist->core.user_requested_cpus == NULL) { in evlist__open()
1398 err = evsel__open(evsel, evsel->core.cpus, evsel->core.threads); in evlist__open()
1406 errno = -err; in evlist__open()
1416 evlist->workload.cork_fd = -1; in evlist__prepare_workload()
1420 return -1; in evlist__prepare_workload()
1428 evlist->workload.pid = fork(); in evlist__prepare_workload()
1429 if (evlist->workload.pid < 0) { in evlist__prepare_workload()
1434 if (!evlist->workload.pid) { in evlist__prepare_workload()
1442 close(child_ready_pipe[0]); in evlist__prepare_workload()
1443 close(go_pipe[1]); in evlist__prepare_workload()
1447 * Change the name of this process not to confuse --exclude-perf users in evlist__prepare_workload()
1451 prctl(PR_SET_NAME, "perf-exec"); in evlist__prepare_workload()
1456 close(child_ready_pipe[1]); in evlist__prepare_workload()
1468 * the parent will just close workload.cork_fd, without writing in evlist__prepare_workload()
1473 if (ret == -1) in evlist__prepare_workload()
1488 exit(-1); in evlist__prepare_workload()
1500 if (evlist->core.threads == NULL) { in evlist__prepare_workload()
1501 fprintf(stderr, "FATAL: evlist->threads need to be set at this point (%s:%d).\n", in evlist__prepare_workload()
1505 perf_thread_map__set_pid(evlist->core.threads, 0, evlist->workload.pid); in evlist__prepare_workload()
1508 close(child_ready_pipe[1]); in evlist__prepare_workload()
1509 close(go_pipe[0]); in evlist__prepare_workload()
1513 if (read(child_ready_pipe[0], &bf, 1) == -1) { in evlist__prepare_workload()
1519 evlist->workload.cork_fd = go_pipe[1]; in evlist__prepare_workload()
1520 close(child_ready_pipe[0]); in evlist__prepare_workload()
1524 close(go_pipe[0]); in evlist__prepare_workload()
1525 close(go_pipe[1]); in evlist__prepare_workload()
1527 close(child_ready_pipe[0]); in evlist__prepare_workload()
1528 close(child_ready_pipe[1]); in evlist__prepare_workload()
1529 return -1; in evlist__prepare_workload()
1534 if (evlist->workload.cork_fd >= 0) { in evlist__start_workload()
1540 ret = write(evlist->workload.cork_fd, &bf, 1); in evlist__start_workload()
1544 close(evlist->workload.cork_fd); in evlist__start_workload()
1545 evlist->workload.cork_fd = -1; in evlist__start_workload()
1556 if (evlist->workload.cork_fd >= 0) { in evlist__cancel_workload()
1557 close(evlist->workload.cork_fd); in evlist__cancel_workload()
1558 evlist->workload.cork_fd = -1; in evlist__cancel_workload()
1559 waitpid(evlist->workload.pid, &status, WNOHANG); in evlist__cancel_workload()
1569 return -EFAULT; in evlist__parse_sample()
1573 if (perf_guest && sample->id) { in evlist__parse_sample()
1574 struct perf_sample_id *sid = evlist__id2sid(evlist, sample->id); in evlist__parse_sample()
1577 sample->machine_pid = sid->machine_pid; in evlist__parse_sample()
1578 sample->vcpu = sid->vcpu.cpu; in evlist__parse_sample()
1589 return -EFAULT; in evlist__parse_sample_timestamp()
1607 printed += scnprintf(buf + printed, size - printed, "\nHint:\t"); in evlist__strerror_open()
1610 printed += scnprintf(buf + printed, size - printed, in evlist__strerror_open()
1613 printed += scnprintf(buf + printed, size - printed, in evlist__strerror_open()
1614 "For system wide tracing it needs to be set to -1.\n"); in evlist__strerror_open()
1616 printed += scnprintf(buf + printed, size - printed, in evlist__strerror_open()
1617 "Hint:\tTry: 'sudo sh -c \"echo -1 > /proc/sys/kernel/perf_event_paranoid\"'\n" in evlist__strerror_open()
1627 if (first->core.attr.sample_freq < (u64)max_freq) in evlist__strerror_open()
1634 emsg, max_freq, first->core.attr.sample_freq); in evlist__strerror_open()
1649 int pages_attempted = evlist->core.mmap_len / 1024, pages_max_per_user, printed = 0; in evlist__strerror_mmap()
1654 printed += scnprintf(buf + printed, size - printed, in evlist__strerror_mmap()
1661 printed += scnprintf(buf + printed, size - printed, in evlist__strerror_mmap()
1662 "Hint:\tTry 'sudo sh -c \"echo %d > /proc/sys/kernel/perf_event_mlock_kb\"', or\n", in evlist__strerror_mmap()
1666 printed += scnprintf(buf + printed, size - printed, in evlist__strerror_mmap()
1667 "Hint:\tTry using a smaller -m/--mmap-pages value."); in evlist__strerror_mmap()
1687 list_move_tail(&evsel->core.node, &move); in evlist__to_front()
1690 list_splice(&move, &evlist->core.entries); in evlist__to_front()
1698 if (evsel->tracking) in evlist__get_tracking_event()
1709 if (tracking_evsel->tracking) in evlist__set_tracking_event()
1714 evsel->tracking = false; in evlist__set_tracking_event()
1717 tracking_evsel->tracking = true; in evlist__set_tracking_event()
1732 perf_evlist__go_system_wide(&evlist->core, &evsel->core); in evlist__findnew_tracking_event()
1743 if (!evsel->name) in evlist__find_evsel_by_str()
1754 enum bkw_mmap_state old_state = evlist->bkw_mmap_state; in evlist__toggle_bkw_mmap()
1761 if (!evlist->overwrite_mmap) in evlist__toggle_bkw_mmap()
1791 evlist->bkw_mmap_state = state; in evlist__toggle_bkw_mmap()
1814 if (!evsel->core.attr.exclude_kernel) in evlist__exclude_kernel()
1832 leader->forced_leader = true; in evlist__force_leader()
1836 struct evsel *evlist__reset_weak_group(struct evlist *evsel_list, struct evsel *evsel, bool close) in evlist__reset_weak_group() argument
1844 leader->name, leader->core.nr_members); in evlist__reset_weak_group()
1854 if (is_open && close) in evlist__reset_weak_group()
1855 perf_evsel__close(&c2->core); in evlist__reset_weak_group()
1857 * We want to close all members of the group and reopen in evlist__reset_weak_group()
1867 c2->reset_group = true; in evlist__reset_weak_group()
1871 if (leader->core.nr_members == 1) in evlist__reset_weak_group()
1872 leader->core.nr_members = 0; in evlist__reset_weak_group()
1882 return -EINVAL; in evlist__parse_control_fifo()
1886 return -EINVAL; in evlist__parse_control_fifo()
1890 return -ENOMEM; in evlist__parse_control_fifo()
1903 ret = -errno; in evlist__parse_control_fifo()
1914 ret = -errno; in evlist__parse_control_fifo()
1936 return -EINVAL; in evlist__parse_control()
1941 return -EINVAL; in evlist__parse_control()
1945 return -EINVAL; in evlist__parse_control()
1955 close(ctl_fd); in evlist__close_control()
1957 close(ctl_fd_ack); in evlist__close_control()
1963 if (fd == -1) { in evlist__initialize_ctlfd()
1968 evlist->ctl_fd.pos = perf_evlist__add_pollfd(&evlist->core, fd, NULL, POLLIN, in evlist__initialize_ctlfd()
1971 if (evlist->ctl_fd.pos < 0) { in evlist__initialize_ctlfd()
1972 evlist->ctl_fd.pos = -1; in evlist__initialize_ctlfd()
1974 return -1; in evlist__initialize_ctlfd()
1977 evlist->ctl_fd.fd = fd; in evlist__initialize_ctlfd()
1978 evlist->ctl_fd.ack = ack; in evlist__initialize_ctlfd()
1985 return evlist->ctl_fd.pos >= 0; in evlist__ctlfd_initialized()
1990 struct pollfd *entries = evlist->core.pollfd.entries; in evlist__finalize_ctlfd()
1995 entries[evlist->ctl_fd.pos].fd = -1; in evlist__finalize_ctlfd()
1996 entries[evlist->ctl_fd.pos].events = 0; in evlist__finalize_ctlfd()
1997 entries[evlist->ctl_fd.pos].revents = 0; in evlist__finalize_ctlfd()
1999 evlist->ctl_fd.pos = -1; in evlist__finalize_ctlfd()
2000 evlist->ctl_fd.ack = -1; in evlist__finalize_ctlfd()
2001 evlist->ctl_fd.fd = -1; in evlist__finalize_ctlfd()
2015 data_size--; in evlist__ctlfd_recv()
2018 err = read(evlist->ctl_fd.fd, &c, 1); in evlist__ctlfd_recv()
2026 } else if (err == -1) { in evlist__ctlfd_recv()
2032 pr_err("Failed to read from ctlfd %d: %m\n", evlist->ctl_fd.fd); in evlist__ctlfd_recv()
2042 (sizeof(EVLIST_CTL_CMD_ENABLE_TAG)-1))) { in evlist__ctlfd_recv()
2045 (sizeof(EVLIST_CTL_CMD_DISABLE_TAG)-1))) { in evlist__ctlfd_recv()
2048 (sizeof(EVLIST_CTL_CMD_SNAPSHOT_TAG)-1))) { in evlist__ctlfd_recv()
2052 (sizeof(EVLIST_CTL_CMD_EVLIST_TAG)-1))) { in evlist__ctlfd_recv()
2055 (sizeof(EVLIST_CTL_CMD_STOP_TAG)-1))) { in evlist__ctlfd_recv()
2058 (sizeof(EVLIST_CTL_CMD_PING_TAG)-1))) { in evlist__ctlfd_recv()
2070 if (evlist->ctl_fd.ack == -1) in evlist__ctlfd_ack()
2073 err = write(evlist->ctl_fd.ack, EVLIST_CTL_CMD_ACK_TAG, in evlist__ctlfd_ack()
2075 if (err == -1) in evlist__ctlfd_ack()
2076 pr_err("failed to write to ctl_ack_fd %d: %m\n", evlist->ctl_fd.ack); in evlist__ctlfd_ack()
2096 return -1; in get_cmd_arg()
2106 enable ? sizeof(EVLIST_CTL_CMD_ENABLE_TAG) - 1 : in evlist__ctlfd_enable()
2107 sizeof(EVLIST_CTL_CMD_DISABLE_TAG) - 1, in evlist__ctlfd_enable()
2111 return -1; in evlist__ctlfd_enable()
2121 pr_info("Event %s %s\n", evsel->name, in evlist__ctlfd_enable()
2147 sizeof(EVLIST_CTL_CMD_EVLIST_TAG) - 1, in evlist__ctlfd_list()
2151 return -1; in evlist__ctlfd_list()
2155 if (!strcmp(arg, "-v")) { in evlist__ctlfd_list()
2157 } else if (!strcmp(arg, "-g")) { in evlist__ctlfd_list()
2159 } else if (!strcmp(arg, "-F")) { in evlist__ctlfd_list()
2163 return -1; in evlist__ctlfd_list()
2177 int ctlfd_pos = evlist->ctl_fd.pos; in evlist__ctlfd_process()
2178 struct pollfd *entries = evlist->core.pollfd.entries; in evlist__ctlfd_process()
2221 * struct event_enable_time - perf record -D/--delay single time range.
2222 * @start: start of time range to enable events in milliseconds
2223 * @end: end of time range to enable events in milliseconds
2232 static int parse_event_enable_time(const char *str, struct event_enable_time *range, bool first) in parse_event_enable_time() argument
2234 const char *fmt = first ? "%u - %u %n" : " , %u - %u %n"; in parse_event_enable_time()
2239 return -EINVAL; in parse_event_enable_time()
2240 if (range) { in parse_event_enable_time()
2241 range->start = start; in parse_event_enable_time()
2242 range->end = end; in parse_event_enable_time()
2247 static ssize_t parse_event_enable_times(const char *str, struct event_enable_time *range) in parse_event_enable_times() argument
2249 int incr = !!range; in parse_event_enable_times()
2254 ret = parse_event_enable_time(str, range, first); in parse_event_enable_times()
2258 if (!first && range && range->start <= range[-1].end) in parse_event_enable_times()
2259 return -EINVAL; in parse_event_enable_times()
2261 range += incr; in parse_event_enable_times()
2268 * struct event_enable_timer - control structure for perf record -D/--delay.
2296 if (*endptr || d > INT_MAX || d < -1) in str_to_delay()
2313 opts->target.initial_delay = str_to_delay(str); in evlist__parse_event_enable_time()
2314 if (opts->target.initial_delay) in evlist__parse_event_enable_time()
2323 return -EINVAL; in evlist__parse_event_enable_time()
2327 return -ENOMEM; in evlist__parse_event_enable_time()
2329 eet->times = calloc(times_cnt, sizeof(*eet->times)); in evlist__parse_event_enable_time()
2330 if (!eet->times) { in evlist__parse_event_enable_time()
2331 err = -ENOMEM; in evlist__parse_event_enable_time()
2335 if (parse_event_enable_times(str, eet->times) != times_cnt) { in evlist__parse_event_enable_time()
2336 err = -EINVAL; in evlist__parse_event_enable_time()
2340 eet->times_cnt = times_cnt; in evlist__parse_event_enable_time()
2342 eet->timerfd = timerfd_create(CLOCK_MONOTONIC, TFD_CLOEXEC); in evlist__parse_event_enable_time()
2343 if (eet->timerfd == -1) { in evlist__parse_event_enable_time()
2344 err = -errno; in evlist__parse_event_enable_time()
2349 eet->pollfd_pos = perf_evlist__add_pollfd(&evlist->core, eet->timerfd, NULL, POLLIN, flags); in evlist__parse_event_enable_time()
2350 if (eet->pollfd_pos < 0) { in evlist__parse_event_enable_time()
2351 err = eet->pollfd_pos; in evlist__parse_event_enable_time()
2355 eet->evlist = evlist; in evlist__parse_event_enable_time()
2356 evlist->eet = eet; in evlist__parse_event_enable_time()
2357 opts->target.initial_delay = eet->times[0].start; in evlist__parse_event_enable_time()
2362 close(eet->timerfd); in evlist__parse_event_enable_time()
2364 zfree(&eet->times); in evlist__parse_event_enable_time()
2378 if (timerfd_settime(eet->timerfd, 0, &its, NULL) < 0) { in event_enable_timer__set_timer()
2379 err = -errno; in event_enable_timer__set_timer()
2392 ms = eet->times[0].end - eet->times[0].start; in event_enable_timer__start()
2393 eet->times_step = 1; in event_enable_timer__start()
2406 entries = eet->evlist->core.pollfd.entries; in event_enable_timer__process()
2407 revents = entries[eet->pollfd_pos].revents; in event_enable_timer__process()
2408 entries[eet->pollfd_pos].revents = 0; in event_enable_timer__process()
2411 size_t step = eet->times_step; in event_enable_timer__process()
2415 evlist__disable_non_dummy(eet->evlist); in event_enable_timer__process()
2417 if (pos >= eet->times_cnt - 1) { in event_enable_timer__process()
2423 evlist__enable_non_dummy(eet->evlist); in event_enable_timer__process()
2430 if (pos < eet->times_cnt) { in event_enable_timer__process()
2431 int *times = (int *)eet->times; /* Accessing 'times' as array of int */ in event_enable_timer__process()
2432 int ms = times[step] - times[step - 1]; in event_enable_timer__process()
2434 eet->times_step = step; in event_enable_timer__process()
2446 zfree(&(*ep)->times); in event_enable_timer__exit()
2455 if (evsel->core.idx == idx) in evlist__find_evsel()
2470 …printed += scnprintf(bf + printed, size - printed, "%s%s", printed ? "," : "", evsel__name(evsel)); in evlist__scnprintf_evsels()
2472 printed += scnprintf(bf + printed, size - printed, "%s...", printed ? "," : ""); in evlist__scnprintf_evsels()
2485 * For some platforms, the 'mem-loads' event is required to use in evlist__check_mem_load_aux()
2486 * together with 'mem-loads-aux' within a group and 'mem-loads-aux' in evlist__check_mem_load_aux()
2488 * because 'mem-loads-aux' is just an auxiliary event. It doesn't carry in evlist__check_mem_load_aux()
2496 if (leader->name && strstr(leader->name, "mem-loads-aux")) { in evlist__check_mem_load_aux()
2499 pos->core.nr_members = 0; in evlist__check_mem_load_aux()
2506 * evlist__warn_user_requested_cpus() - Check each evsel against requested CPUs
2530 to_test = pmu && pmu->is_core ? pmu->cpus : cpu_map__online(); in evlist__warn_user_requested_cpus()
2537 cpu_list, pmu ? pmu->name : "cpu", buf, evsel__name(pos)); in evlist__warn_user_requested_cpus()
2557 if (strchr(pos->name, '/')) in evlist__uniquify_name()
2561 pos->pmu_name, pos->name); in evlist__uniquify_name()
2563 free(pos->name); in evlist__uniquify_name()
2564 pos->name = new_name; in evlist__uniquify_name()