1 // SPDX-License-Identifier: GPL-2.0 2 #include <errno.h> 3 #include <inttypes.h> 4 /* For the CPU_* macros */ 5 #include <pthread.h> 6 7 #include <sys/types.h> 8 #include <sys/stat.h> 9 #include <fcntl.h> 10 #include <api/fs/fs.h> 11 #include <linux/err.h> 12 #include <linux/string.h> 13 #include <api/fs/tracing_path.h> 14 #include "evsel.h" 15 #include "tests.h" 16 #include "thread_map.h" 17 #include <perf/cpumap.h> 18 #include "debug.h" 19 #include "stat.h" 20 #include "util/counts.h" 21 22 int test__openat_syscall_event_on_all_cpus(struct test *test __maybe_unused, int subtest __maybe_unused) 23 { 24 int err = -1, fd, cpu; 25 struct perf_cpu_map *cpus; 26 struct evsel *evsel; 27 unsigned int nr_openat_calls = 111, i; 28 cpu_set_t cpu_set; 29 struct perf_thread_map *threads = thread_map__new(-1, getpid(), UINT_MAX); 30 char sbuf[STRERR_BUFSIZE]; 31 char errbuf[BUFSIZ]; 32 33 if (threads == NULL) { 34 pr_debug("thread_map__new\n"); 35 return -1; 36 } 37 38 cpus = perf_cpu_map__new(NULL); 39 if (cpus == NULL) { 40 pr_debug("perf_cpu_map__new\n"); 41 goto out_thread_map_delete; 42 } 43 44 CPU_ZERO(&cpu_set); 45 46 evsel = evsel__newtp("syscalls", "sys_enter_openat"); 47 if (IS_ERR(evsel)) { 48 tracing_path__strerror_open_tp(errno, errbuf, sizeof(errbuf), "syscalls", "sys_enter_openat"); 49 pr_debug("%s\n", errbuf); 50 goto out_cpu_map_delete; 51 } 52 53 if (evsel__open(evsel, cpus, threads) < 0) { 54 pr_debug("failed to open counter: %s, " 55 "tweak /proc/sys/kernel/perf_event_paranoid?\n", 56 str_error_r(errno, sbuf, sizeof(sbuf))); 57 goto out_evsel_delete; 58 } 59 60 for (cpu = 0; cpu < cpus->nr; ++cpu) { 61 unsigned int ncalls = nr_openat_calls + cpu; 62 /* 63 * XXX eventually lift this restriction in a way that 64 * keeps perf building on older glibc installations 65 * without CPU_ALLOC. 1024 cpus in 2010 still seems 66 * a reasonable upper limit tho :-) 67 */ 68 if (cpus->map[cpu] >= CPU_SETSIZE) { 69 pr_debug("Ignoring CPU %d\n", cpus->map[cpu]); 70 continue; 71 } 72 73 CPU_SET(cpus->map[cpu], &cpu_set); 74 if (sched_setaffinity(0, sizeof(cpu_set), &cpu_set) < 0) { 75 pr_debug("sched_setaffinity() failed on CPU %d: %s ", 76 cpus->map[cpu], 77 str_error_r(errno, sbuf, sizeof(sbuf))); 78 goto out_close_fd; 79 } 80 for (i = 0; i < ncalls; ++i) { 81 fd = openat(0, "/etc/passwd", O_RDONLY); 82 close(fd); 83 } 84 CPU_CLR(cpus->map[cpu], &cpu_set); 85 } 86 87 /* 88 * Here we need to explicitly preallocate the counts, as if 89 * we use the auto allocation it will allocate just for 1 cpu, 90 * as we start by cpu 0. 91 */ 92 if (evsel__alloc_counts(evsel, cpus->nr, 1) < 0) { 93 pr_debug("evsel__alloc_counts(ncpus=%d)\n", cpus->nr); 94 goto out_close_fd; 95 } 96 97 err = 0; 98 99 for (cpu = 0; cpu < cpus->nr; ++cpu) { 100 unsigned int expected; 101 102 if (cpus->map[cpu] >= CPU_SETSIZE) 103 continue; 104 105 if (evsel__read_on_cpu(evsel, cpu, 0) < 0) { 106 pr_debug("evsel__read_on_cpu\n"); 107 err = -1; 108 break; 109 } 110 111 expected = nr_openat_calls + cpu; 112 if (perf_counts(evsel->counts, cpu, 0)->val != expected) { 113 pr_debug("evsel__read_on_cpu: expected to intercept %d calls on cpu %d, got %" PRIu64 "\n", 114 expected, cpus->map[cpu], perf_counts(evsel->counts, cpu, 0)->val); 115 err = -1; 116 } 117 } 118 119 evsel__free_counts(evsel); 120 out_close_fd: 121 perf_evsel__close_fd(&evsel->core); 122 out_evsel_delete: 123 evsel__delete(evsel); 124 out_cpu_map_delete: 125 perf_cpu_map__put(cpus); 126 out_thread_map_delete: 127 perf_thread_map__put(threads); 128 return err; 129 } 130