1 /* For the CPU_* macros */ 2 #include <pthread.h> 3 4 #include <api/fs/fs.h> 5 #include <linux/err.h> 6 #include "evsel.h" 7 #include "tests.h" 8 #include "thread_map.h" 9 #include "cpumap.h" 10 #include "debug.h" 11 #include "stat.h" 12 13 int test__openat_syscall_event_on_all_cpus(int subtest __maybe_unused) 14 { 15 int err = -1, fd, cpu; 16 struct cpu_map *cpus; 17 struct perf_evsel *evsel; 18 unsigned int nr_openat_calls = 111, i; 19 cpu_set_t cpu_set; 20 struct thread_map *threads = thread_map__new(-1, getpid(), UINT_MAX); 21 char sbuf[STRERR_BUFSIZE]; 22 char errbuf[BUFSIZ]; 23 24 if (threads == NULL) { 25 pr_debug("thread_map__new\n"); 26 return -1; 27 } 28 29 cpus = cpu_map__new(NULL); 30 if (cpus == NULL) { 31 pr_debug("cpu_map__new\n"); 32 goto out_thread_map_delete; 33 } 34 35 CPU_ZERO(&cpu_set); 36 37 evsel = perf_evsel__newtp("syscalls", "sys_enter_openat"); 38 if (IS_ERR(evsel)) { 39 tracing_path__strerror_open_tp(errno, errbuf, sizeof(errbuf), "syscalls", "sys_enter_openat"); 40 pr_debug("%s\n", errbuf); 41 goto out_thread_map_delete; 42 } 43 44 if (perf_evsel__open(evsel, cpus, threads) < 0) { 45 pr_debug("failed to open counter: %s, " 46 "tweak /proc/sys/kernel/perf_event_paranoid?\n", 47 str_error_r(errno, sbuf, sizeof(sbuf))); 48 goto out_evsel_delete; 49 } 50 51 for (cpu = 0; cpu < cpus->nr; ++cpu) { 52 unsigned int ncalls = nr_openat_calls + cpu; 53 /* 54 * XXX eventually lift this restriction in a way that 55 * keeps perf building on older glibc installations 56 * without CPU_ALLOC. 1024 cpus in 2010 still seems 57 * a reasonable upper limit tho :-) 58 */ 59 if (cpus->map[cpu] >= CPU_SETSIZE) { 60 pr_debug("Ignoring CPU %d\n", cpus->map[cpu]); 61 continue; 62 } 63 64 CPU_SET(cpus->map[cpu], &cpu_set); 65 if (sched_setaffinity(0, sizeof(cpu_set), &cpu_set) < 0) { 66 pr_debug("sched_setaffinity() failed on CPU %d: %s ", 67 cpus->map[cpu], 68 str_error_r(errno, sbuf, sizeof(sbuf))); 69 goto out_close_fd; 70 } 71 for (i = 0; i < ncalls; ++i) { 72 fd = openat(0, "/etc/passwd", O_RDONLY); 73 close(fd); 74 } 75 CPU_CLR(cpus->map[cpu], &cpu_set); 76 } 77 78 /* 79 * Here we need to explicitly preallocate the counts, as if 80 * we use the auto allocation it will allocate just for 1 cpu, 81 * as we start by cpu 0. 82 */ 83 if (perf_evsel__alloc_counts(evsel, cpus->nr, 1) < 0) { 84 pr_debug("perf_evsel__alloc_counts(ncpus=%d)\n", cpus->nr); 85 goto out_close_fd; 86 } 87 88 err = 0; 89 90 for (cpu = 0; cpu < cpus->nr; ++cpu) { 91 unsigned int expected; 92 93 if (cpus->map[cpu] >= CPU_SETSIZE) 94 continue; 95 96 if (perf_evsel__read_on_cpu(evsel, cpu, 0) < 0) { 97 pr_debug("perf_evsel__read_on_cpu\n"); 98 err = -1; 99 break; 100 } 101 102 expected = nr_openat_calls + cpu; 103 if (perf_counts(evsel->counts, cpu, 0)->val != expected) { 104 pr_debug("perf_evsel__read_on_cpu: expected to intercept %d calls on cpu %d, got %" PRIu64 "\n", 105 expected, cpus->map[cpu], perf_counts(evsel->counts, cpu, 0)->val); 106 err = -1; 107 } 108 } 109 110 perf_evsel__free_counts(evsel); 111 out_close_fd: 112 perf_evsel__close_fd(evsel, 1, threads->nr); 113 out_evsel_delete: 114 perf_evsel__delete(evsel); 115 out_thread_map_delete: 116 thread_map__put(threads); 117 return err; 118 } 119