1b2441318SGreg Kroah-Hartman // SPDX-License-Identifier: GPL-2.0
2035827e9SMatt Fleming #include "tests/tests.h"
3035827e9SMatt Fleming #include "cloexec.h"
4035827e9SMatt Fleming #include "debug.h"
5035827e9SMatt Fleming #include "evlist.h"
6035827e9SMatt Fleming #include "evsel.h"
7035827e9SMatt Fleming #include "arch-tests.h"
820f2be1dSJiri Olsa #include <internal/lib.h> // page_size
9035827e9SMatt Fleming
109607ad3aSArnaldo Carvalho de Melo #include <signal.h>
11035827e9SMatt Fleming #include <sys/mman.h>
124208735dSArnaldo Carvalho de Melo #include <sys/wait.h>
13a43783aeSArnaldo Carvalho de Melo #include <errno.h>
14035827e9SMatt Fleming #include <string.h>
15035827e9SMatt Fleming
spawn(void)16035827e9SMatt Fleming static pid_t spawn(void)
17035827e9SMatt Fleming {
18035827e9SMatt Fleming pid_t pid;
19035827e9SMatt Fleming
20035827e9SMatt Fleming pid = fork();
21035827e9SMatt Fleming if (pid)
22035827e9SMatt Fleming return pid;
23035827e9SMatt Fleming
24cf89813aSMarkus Trippelsdorf while(1)
25035827e9SMatt Fleming sleep(5);
26035827e9SMatt Fleming return 0;
27035827e9SMatt Fleming }
28035827e9SMatt Fleming
29035827e9SMatt Fleming /*
30035827e9SMatt Fleming * Create an event group that contains both a sampled hardware
31035827e9SMatt Fleming * (cpu-cycles) and software (intel_cqm/llc_occupancy/) event. We then
32035827e9SMatt Fleming * wait for the hardware perf counter to overflow and generate a PMI,
33035827e9SMatt Fleming * which triggers an event read for both of the events in the group.
34035827e9SMatt Fleming *
35035827e9SMatt Fleming * Since reading Intel CQM event counters requires sending SMP IPIs, the
36035827e9SMatt Fleming * CQM pmu needs to handle the above situation gracefully, and return
37035827e9SMatt Fleming * the last read counter value to avoid triggering a WARN_ON_ONCE() in
38035827e9SMatt Fleming * smp_call_function_many() caused by sending IPIs from NMI context.
39035827e9SMatt Fleming */
test__intel_cqm_count_nmi_context(struct test_suite * test __maybe_unused,int subtest __maybe_unused)4033f44bfdSIan Rogers int test__intel_cqm_count_nmi_context(struct test_suite *test __maybe_unused, int subtest __maybe_unused)
41035827e9SMatt Fleming {
4263503dbaSJiri Olsa struct evlist *evlist = NULL;
4332dcd021SJiri Olsa struct evsel *evsel = NULL;
44035827e9SMatt Fleming struct perf_event_attr pe;
45035827e9SMatt Fleming int i, fd[2], flag, ret;
46035827e9SMatt Fleming size_t mmap_len;
47035827e9SMatt Fleming void *event;
48035827e9SMatt Fleming pid_t pid;
49035827e9SMatt Fleming int err = TEST_FAIL;
50035827e9SMatt Fleming
51035827e9SMatt Fleming flag = perf_event_open_cloexec_flag();
52035827e9SMatt Fleming
530f98b11cSJiri Olsa evlist = evlist__new();
54035827e9SMatt Fleming if (!evlist) {
55606e2c29SArnaldo Carvalho de Melo pr_debug("evlist__new failed\n");
56035827e9SMatt Fleming return TEST_FAIL;
57035827e9SMatt Fleming }
58035827e9SMatt Fleming
59*806731a9SAdrian Hunter ret = parse_event(evlist, "intel_cqm/llc_occupancy/");
60035827e9SMatt Fleming if (ret) {
61a831e679SArnaldo Carvalho de Melo pr_debug("parse_events failed, is \"intel_cqm/llc_occupancy/\" available?\n");
62035827e9SMatt Fleming err = TEST_SKIP;
63035827e9SMatt Fleming goto out;
64035827e9SMatt Fleming }
65035827e9SMatt Fleming
66515dbe48SJiri Olsa evsel = evlist__first(evlist);
67035827e9SMatt Fleming if (!evsel) {
68515dbe48SJiri Olsa pr_debug("evlist__first failed\n");
69035827e9SMatt Fleming goto out;
70035827e9SMatt Fleming }
71035827e9SMatt Fleming
72035827e9SMatt Fleming memset(&pe, 0, sizeof(pe));
73035827e9SMatt Fleming pe.size = sizeof(pe);
74035827e9SMatt Fleming
75035827e9SMatt Fleming pe.type = PERF_TYPE_HARDWARE;
76035827e9SMatt Fleming pe.config = PERF_COUNT_HW_CPU_CYCLES;
77035827e9SMatt Fleming pe.read_format = PERF_FORMAT_GROUP;
78035827e9SMatt Fleming
79035827e9SMatt Fleming pe.sample_period = 128;
80035827e9SMatt Fleming pe.sample_type = PERF_SAMPLE_IP | PERF_SAMPLE_READ;
81035827e9SMatt Fleming
82035827e9SMatt Fleming pid = spawn();
83035827e9SMatt Fleming
84035827e9SMatt Fleming fd[0] = sys_perf_event_open(&pe, pid, -1, -1, flag);
85035827e9SMatt Fleming if (fd[0] < 0) {
86035827e9SMatt Fleming pr_debug("failed to open event\n");
87035827e9SMatt Fleming goto out;
88035827e9SMatt Fleming }
89035827e9SMatt Fleming
90035827e9SMatt Fleming memset(&pe, 0, sizeof(pe));
91035827e9SMatt Fleming pe.size = sizeof(pe);
92035827e9SMatt Fleming
93035827e9SMatt Fleming pe.type = evsel->attr.type;
94035827e9SMatt Fleming pe.config = evsel->attr.config;
95035827e9SMatt Fleming
96035827e9SMatt Fleming fd[1] = sys_perf_event_open(&pe, pid, -1, fd[0], flag);
97035827e9SMatt Fleming if (fd[1] < 0) {
98035827e9SMatt Fleming pr_debug("failed to open event\n");
99035827e9SMatt Fleming goto out;
100035827e9SMatt Fleming }
101035827e9SMatt Fleming
102035827e9SMatt Fleming /*
103035827e9SMatt Fleming * Pick a power-of-two number of pages + 1 for the meta-data
104035827e9SMatt Fleming * page (struct perf_event_mmap_page). See tools/perf/design.txt.
105035827e9SMatt Fleming */
106035827e9SMatt Fleming mmap_len = page_size * 65;
107035827e9SMatt Fleming
108035827e9SMatt Fleming event = mmap(NULL, mmap_len, PROT_READ, MAP_SHARED, fd[0], 0);
109035827e9SMatt Fleming if (event == (void *)(-1)) {
110035827e9SMatt Fleming pr_debug("failed to mmap %d\n", errno);
111035827e9SMatt Fleming goto out;
112035827e9SMatt Fleming }
113035827e9SMatt Fleming
114035827e9SMatt Fleming sleep(1);
115035827e9SMatt Fleming
116035827e9SMatt Fleming err = TEST_OK;
117035827e9SMatt Fleming
118035827e9SMatt Fleming munmap(event, mmap_len);
119035827e9SMatt Fleming
120035827e9SMatt Fleming for (i = 0; i < 2; i++)
121035827e9SMatt Fleming close(fd[i]);
122035827e9SMatt Fleming
123035827e9SMatt Fleming kill(pid, SIGKILL);
124035827e9SMatt Fleming wait(NULL);
125035827e9SMatt Fleming out:
126c12995a5SJiri Olsa evlist__delete(evlist);
127035827e9SMatt Fleming return err;
128035827e9SMatt Fleming }
129