1 #include <errno.h> 2 #include <inttypes.h> 3 /* For the CPU_* macros */ 4 #include <pthread.h> 5 6 #include <sys/types.h> 7 #include <sys/stat.h> 8 #include <fcntl.h> 9 #include <api/fs/fs.h> 10 #include <linux/err.h> 11 #include <api/fs/tracing_path.h> 12 #include "evsel.h" 13 #include "tests.h" 14 #include "thread_map.h" 15 #include "cpumap.h" 16 #include "debug.h" 17 #include "stat.h" 18 19 int test__openat_syscall_event_on_all_cpus(struct test *test __maybe_unused, int subtest __maybe_unused) 20 { 21 int err = -1, fd, cpu; 22 struct cpu_map *cpus; 23 struct perf_evsel *evsel; 24 unsigned int nr_openat_calls = 111, i; 25 cpu_set_t cpu_set; 26 struct thread_map *threads = thread_map__new(-1, getpid(), UINT_MAX); 27 char sbuf[STRERR_BUFSIZE]; 28 char errbuf[BUFSIZ]; 29 30 if (threads == NULL) { 31 pr_debug("thread_map__new\n"); 32 return -1; 33 } 34 35 cpus = cpu_map__new(NULL); 36 if (cpus == NULL) { 37 pr_debug("cpu_map__new\n"); 38 goto out_thread_map_delete; 39 } 40 41 CPU_ZERO(&cpu_set); 42 43 evsel = perf_evsel__newtp("syscalls", "sys_enter_openat"); 44 if (IS_ERR(evsel)) { 45 tracing_path__strerror_open_tp(errno, errbuf, sizeof(errbuf), "syscalls", "sys_enter_openat"); 46 pr_debug("%s\n", errbuf); 47 goto out_thread_map_delete; 48 } 49 50 if (perf_evsel__open(evsel, cpus, threads) < 0) { 51 pr_debug("failed to open counter: %s, " 52 "tweak /proc/sys/kernel/perf_event_paranoid?\n", 53 str_error_r(errno, sbuf, sizeof(sbuf))); 54 goto out_evsel_delete; 55 } 56 57 for (cpu = 0; cpu < cpus->nr; ++cpu) { 58 unsigned int ncalls = nr_openat_calls + cpu; 59 /* 60 * XXX eventually lift this restriction in a way that 61 * keeps perf building on older glibc installations 62 * without CPU_ALLOC. 1024 cpus in 2010 still seems 63 * a reasonable upper limit tho :-) 64 */ 65 if (cpus->map[cpu] >= CPU_SETSIZE) { 66 pr_debug("Ignoring CPU %d\n", cpus->map[cpu]); 67 continue; 68 } 69 70 CPU_SET(cpus->map[cpu], &cpu_set); 71 if (sched_setaffinity(0, sizeof(cpu_set), &cpu_set) < 0) { 72 pr_debug("sched_setaffinity() failed on CPU %d: %s ", 73 cpus->map[cpu], 74 str_error_r(errno, sbuf, sizeof(sbuf))); 75 goto out_close_fd; 76 } 77 for (i = 0; i < ncalls; ++i) { 78 fd = openat(0, "/etc/passwd", O_RDONLY); 79 close(fd); 80 } 81 CPU_CLR(cpus->map[cpu], &cpu_set); 82 } 83 84 /* 85 * Here we need to explicitly preallocate the counts, as if 86 * we use the auto allocation it will allocate just for 1 cpu, 87 * as we start by cpu 0. 88 */ 89 if (perf_evsel__alloc_counts(evsel, cpus->nr, 1) < 0) { 90 pr_debug("perf_evsel__alloc_counts(ncpus=%d)\n", cpus->nr); 91 goto out_close_fd; 92 } 93 94 err = 0; 95 96 for (cpu = 0; cpu < cpus->nr; ++cpu) { 97 unsigned int expected; 98 99 if (cpus->map[cpu] >= CPU_SETSIZE) 100 continue; 101 102 if (perf_evsel__read_on_cpu(evsel, cpu, 0) < 0) { 103 pr_debug("perf_evsel__read_on_cpu\n"); 104 err = -1; 105 break; 106 } 107 108 expected = nr_openat_calls + cpu; 109 if (perf_counts(evsel->counts, cpu, 0)->val != expected) { 110 pr_debug("perf_evsel__read_on_cpu: expected to intercept %d calls on cpu %d, got %" PRIu64 "\n", 111 expected, cpus->map[cpu], perf_counts(evsel->counts, cpu, 0)->val); 112 err = -1; 113 } 114 } 115 116 perf_evsel__free_counts(evsel); 117 out_close_fd: 118 perf_evsel__close_fd(evsel); 119 out_evsel_delete: 120 perf_evsel__delete(evsel); 121 out_thread_map_delete: 122 thread_map__put(threads); 123 return err; 124 } 125