xref: /openbmc/linux/tools/perf/arch/x86/tests/intel-cqm.c (revision b9df3997)
1 // SPDX-License-Identifier: GPL-2.0
2 #include "tests/tests.h"
3 #include "cloexec.h"
4 #include "debug.h"
5 #include "evlist.h"
6 #include "evsel.h"
7 #include "arch-tests.h"
8 #include <internal/lib.h> // page_size
9 
10 #include <signal.h>
11 #include <sys/mman.h>
12 #include <sys/wait.h>
13 #include <errno.h>
14 #include <string.h>
15 
16 static pid_t spawn(void)
17 {
18 	pid_t pid;
19 
20 	pid = fork();
21 	if (pid)
22 		return pid;
23 
24 	while(1)
25 		sleep(5);
26 	return 0;
27 }
28 
29 /*
30  * Create an event group that contains both a sampled hardware
31  * (cpu-cycles) and software (intel_cqm/llc_occupancy/) event. We then
32  * wait for the hardware perf counter to overflow and generate a PMI,
33  * which triggers an event read for both of the events in the group.
34  *
35  * Since reading Intel CQM event counters requires sending SMP IPIs, the
36  * CQM pmu needs to handle the above situation gracefully, and return
37  * the last read counter value to avoid triggering a WARN_ON_ONCE() in
38  * smp_call_function_many() caused by sending IPIs from NMI context.
39  */
40 int test__intel_cqm_count_nmi_context(struct test *test __maybe_unused, int subtest __maybe_unused)
41 {
42 	struct evlist *evlist = NULL;
43 	struct evsel *evsel = NULL;
44 	struct perf_event_attr pe;
45 	int i, fd[2], flag, ret;
46 	size_t mmap_len;
47 	void *event;
48 	pid_t pid;
49 	int err = TEST_FAIL;
50 
51 	flag = perf_event_open_cloexec_flag();
52 
53 	evlist = evlist__new();
54 	if (!evlist) {
55 		pr_debug("perf_evlist__new failed\n");
56 		return TEST_FAIL;
57 	}
58 
59 	ret = parse_events(evlist, "intel_cqm/llc_occupancy/", NULL);
60 	if (ret) {
61 		pr_debug("parse_events failed, is \"intel_cqm/llc_occupancy/\" available?\n");
62 		err = TEST_SKIP;
63 		goto out;
64 	}
65 
66 	evsel = evlist__first(evlist);
67 	if (!evsel) {
68 		pr_debug("evlist__first failed\n");
69 		goto out;
70 	}
71 
72 	memset(&pe, 0, sizeof(pe));
73 	pe.size = sizeof(pe);
74 
75 	pe.type = PERF_TYPE_HARDWARE;
76 	pe.config = PERF_COUNT_HW_CPU_CYCLES;
77 	pe.read_format = PERF_FORMAT_GROUP;
78 
79 	pe.sample_period = 128;
80 	pe.sample_type = PERF_SAMPLE_IP | PERF_SAMPLE_READ;
81 
82 	pid = spawn();
83 
84 	fd[0] = sys_perf_event_open(&pe, pid, -1, -1, flag);
85 	if (fd[0] < 0) {
86 		pr_debug("failed to open event\n");
87 		goto out;
88 	}
89 
90 	memset(&pe, 0, sizeof(pe));
91 	pe.size = sizeof(pe);
92 
93 	pe.type = evsel->attr.type;
94 	pe.config = evsel->attr.config;
95 
96 	fd[1] = sys_perf_event_open(&pe, pid, -1, fd[0], flag);
97 	if (fd[1] < 0) {
98 		pr_debug("failed to open event\n");
99 		goto out;
100 	}
101 
102 	/*
103 	 * Pick a power-of-two number of pages + 1 for the meta-data
104 	 * page (struct perf_event_mmap_page). See tools/perf/design.txt.
105 	 */
106 	mmap_len = page_size * 65;
107 
108 	event = mmap(NULL, mmap_len, PROT_READ, MAP_SHARED, fd[0], 0);
109 	if (event == (void *)(-1)) {
110 		pr_debug("failed to mmap %d\n", errno);
111 		goto out;
112 	}
113 
114 	sleep(1);
115 
116 	err = TEST_OK;
117 
118 	munmap(event, mmap_len);
119 
120 	for (i = 0; i < 2; i++)
121 		close(fd[i]);
122 
123 	kill(pid, SIGKILL);
124 	wait(NULL);
125 out:
126 	evlist__delete(evlist);
127 	return err;
128 }
129