1 #include "evsel.h"
2 #include "tests.h"
3 #include "thread_map.h"
4 #include "cpumap.h"
5 #include "debug.h"
6 #include "stat.h"
7 
8 int test__openat_syscall_event_on_all_cpus(void)
9 {
10 	int err = -1, fd, cpu;
11 	struct cpu_map *cpus;
12 	struct perf_evsel *evsel;
13 	unsigned int nr_openat_calls = 111, i;
14 	cpu_set_t cpu_set;
15 	struct thread_map *threads = thread_map__new(-1, getpid(), UINT_MAX);
16 	char sbuf[STRERR_BUFSIZE];
17 
18 	if (threads == NULL) {
19 		pr_debug("thread_map__new\n");
20 		return -1;
21 	}
22 
23 	cpus = cpu_map__new(NULL);
24 	if (cpus == NULL) {
25 		pr_debug("cpu_map__new\n");
26 		goto out_thread_map_delete;
27 	}
28 
29 	CPU_ZERO(&cpu_set);
30 
31 	evsel = perf_evsel__newtp("syscalls", "sys_enter_openat");
32 	if (evsel == NULL) {
33 		if (tracefs_configured())
34 			pr_debug("is tracefs mounted on /sys/kernel/tracing?\n");
35 		else if (debugfs_configured())
36 			pr_debug("is debugfs mounted on /sys/kernel/debug?\n");
37 		else
38 			pr_debug("Neither tracefs or debugfs is enabled in this kernel\n");
39 		goto out_thread_map_delete;
40 	}
41 
42 	if (perf_evsel__open(evsel, cpus, threads) < 0) {
43 		pr_debug("failed to open counter: %s, "
44 			 "tweak /proc/sys/kernel/perf_event_paranoid?\n",
45 			 strerror_r(errno, sbuf, sizeof(sbuf)));
46 		goto out_evsel_delete;
47 	}
48 
49 	for (cpu = 0; cpu < cpus->nr; ++cpu) {
50 		unsigned int ncalls = nr_openat_calls + cpu;
51 		/*
52 		 * XXX eventually lift this restriction in a way that
53 		 * keeps perf building on older glibc installations
54 		 * without CPU_ALLOC. 1024 cpus in 2010 still seems
55 		 * a reasonable upper limit tho :-)
56 		 */
57 		if (cpus->map[cpu] >= CPU_SETSIZE) {
58 			pr_debug("Ignoring CPU %d\n", cpus->map[cpu]);
59 			continue;
60 		}
61 
62 		CPU_SET(cpus->map[cpu], &cpu_set);
63 		if (sched_setaffinity(0, sizeof(cpu_set), &cpu_set) < 0) {
64 			pr_debug("sched_setaffinity() failed on CPU %d: %s ",
65 				 cpus->map[cpu],
66 				 strerror_r(errno, sbuf, sizeof(sbuf)));
67 			goto out_close_fd;
68 		}
69 		for (i = 0; i < ncalls; ++i) {
70 			fd = openat(0, "/etc/passwd", O_RDONLY);
71 			close(fd);
72 		}
73 		CPU_CLR(cpus->map[cpu], &cpu_set);
74 	}
75 
76 	/*
77 	 * Here we need to explicitely preallocate the counts, as if
78 	 * we use the auto allocation it will allocate just for 1 cpu,
79 	 * as we start by cpu 0.
80 	 */
81 	if (perf_evsel__alloc_counts(evsel, cpus->nr, 1) < 0) {
82 		pr_debug("perf_evsel__alloc_counts(ncpus=%d)\n", cpus->nr);
83 		goto out_close_fd;
84 	}
85 
86 	err = 0;
87 
88 	for (cpu = 0; cpu < cpus->nr; ++cpu) {
89 		unsigned int expected;
90 
91 		if (cpus->map[cpu] >= CPU_SETSIZE)
92 			continue;
93 
94 		if (perf_evsel__read_on_cpu(evsel, cpu, 0) < 0) {
95 			pr_debug("perf_evsel__read_on_cpu\n");
96 			err = -1;
97 			break;
98 		}
99 
100 		expected = nr_openat_calls + cpu;
101 		if (perf_counts(evsel->counts, cpu, 0)->val != expected) {
102 			pr_debug("perf_evsel__read_on_cpu: expected to intercept %d calls on cpu %d, got %" PRIu64 "\n",
103 				 expected, cpus->map[cpu], perf_counts(evsel->counts, cpu, 0)->val);
104 			err = -1;
105 		}
106 	}
107 
108 	perf_evsel__free_counts(evsel);
109 out_close_fd:
110 	perf_evsel__close_fd(evsel, 1, threads->nr);
111 out_evsel_delete:
112 	perf_evsel__delete(evsel);
113 out_thread_map_delete:
114 	thread_map__put(threads);
115 	return err;
116 }
117