1 // SPDX-License-Identifier: GPL-2.0 2 #include <stdbool.h> 3 #include <inttypes.h> 4 #include <linux/kernel.h> 5 #include <linux/types.h> 6 7 #include "util.h" 8 #include "event.h" 9 #include "evsel.h" 10 #include "debug.h" 11 12 #include "tests.h" 13 14 #define COMP(m) do { \ 15 if (s1->m != s2->m) { \ 16 pr_debug("Samples differ at '"#m"'\n"); \ 17 return false; \ 18 } \ 19 } while (0) 20 21 #define MCOMP(m) do { \ 22 if (memcmp(&s1->m, &s2->m, sizeof(s1->m))) { \ 23 pr_debug("Samples differ at '"#m"'\n"); \ 24 return false; \ 25 } \ 26 } while (0) 27 28 static bool samples_same(const struct perf_sample *s1, 29 const struct perf_sample *s2, 30 u64 type, u64 read_format) 31 { 32 size_t i; 33 34 if (type & PERF_SAMPLE_IDENTIFIER) 35 COMP(id); 36 37 if (type & PERF_SAMPLE_IP) 38 COMP(ip); 39 40 if (type & PERF_SAMPLE_TID) { 41 COMP(pid); 42 COMP(tid); 43 } 44 45 if (type & PERF_SAMPLE_TIME) 46 COMP(time); 47 48 if (type & PERF_SAMPLE_ADDR) 49 COMP(addr); 50 51 if (type & PERF_SAMPLE_ID) 52 COMP(id); 53 54 if (type & PERF_SAMPLE_STREAM_ID) 55 COMP(stream_id); 56 57 if (type & PERF_SAMPLE_CPU) 58 COMP(cpu); 59 60 if (type & PERF_SAMPLE_PERIOD) 61 COMP(period); 62 63 if (type & PERF_SAMPLE_READ) { 64 if (read_format & PERF_FORMAT_GROUP) 65 COMP(read.group.nr); 66 else 67 COMP(read.one.value); 68 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) 69 COMP(read.time_enabled); 70 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) 71 COMP(read.time_running); 72 /* PERF_FORMAT_ID is forced for PERF_SAMPLE_READ */ 73 if (read_format & PERF_FORMAT_GROUP) { 74 for (i = 0; i < s1->read.group.nr; i++) 75 MCOMP(read.group.values[i]); 76 } else { 77 COMP(read.one.id); 78 } 79 } 80 81 if (type & PERF_SAMPLE_CALLCHAIN) { 82 COMP(callchain->nr); 83 for (i = 0; i < s1->callchain->nr; i++) 84 COMP(callchain->ips[i]); 85 } 86 87 if (type & PERF_SAMPLE_RAW) { 88 COMP(raw_size); 89 if (memcmp(s1->raw_data, s2->raw_data, s1->raw_size)) { 90 pr_debug("Samples differ at 'raw_data'\n"); 91 return false; 92 } 93 } 94 95 if (type & PERF_SAMPLE_BRANCH_STACK) { 96 COMP(branch_stack->nr); 97 for (i = 0; i < s1->branch_stack->nr; i++) 98 MCOMP(branch_stack->entries[i]); 99 } 100 101 if (type & PERF_SAMPLE_REGS_USER) { 102 size_t sz = hweight_long(s1->user_regs.mask) * sizeof(u64); 103 104 COMP(user_regs.mask); 105 COMP(user_regs.abi); 106 if (s1->user_regs.abi && 107 (!s1->user_regs.regs || !s2->user_regs.regs || 108 memcmp(s1->user_regs.regs, s2->user_regs.regs, sz))) { 109 pr_debug("Samples differ at 'user_regs'\n"); 110 return false; 111 } 112 } 113 114 if (type & PERF_SAMPLE_STACK_USER) { 115 COMP(user_stack.size); 116 if (memcmp(s1->user_stack.data, s2->user_stack.data, 117 s1->user_stack.size)) { 118 pr_debug("Samples differ at 'user_stack'\n"); 119 return false; 120 } 121 } 122 123 if (type & PERF_SAMPLE_WEIGHT) 124 COMP(weight); 125 126 if (type & PERF_SAMPLE_DATA_SRC) 127 COMP(data_src); 128 129 if (type & PERF_SAMPLE_TRANSACTION) 130 COMP(transaction); 131 132 if (type & PERF_SAMPLE_REGS_INTR) { 133 size_t sz = hweight_long(s1->intr_regs.mask) * sizeof(u64); 134 135 COMP(intr_regs.mask); 136 COMP(intr_regs.abi); 137 if (s1->intr_regs.abi && 138 (!s1->intr_regs.regs || !s2->intr_regs.regs || 139 memcmp(s1->intr_regs.regs, s2->intr_regs.regs, sz))) { 140 pr_debug("Samples differ at 'intr_regs'\n"); 141 return false; 142 } 143 } 144 145 if (type & PERF_SAMPLE_PHYS_ADDR) 146 COMP(phys_addr); 147 148 return true; 149 } 150 151 static int do_test(u64 sample_type, u64 sample_regs, u64 read_format) 152 { 153 struct perf_evsel evsel = { 154 .needs_swap = false, 155 .attr = { 156 .sample_type = sample_type, 157 .read_format = read_format, 158 }, 159 }; 160 union perf_event *event; 161 union { 162 struct ip_callchain callchain; 163 u64 data[64]; 164 } callchain = { 165 /* 3 ips */ 166 .data = {3, 201, 202, 203}, 167 }; 168 union { 169 struct branch_stack branch_stack; 170 u64 data[64]; 171 } branch_stack = { 172 /* 1 branch_entry */ 173 .data = {1, 211, 212, 213}, 174 }; 175 u64 regs[64]; 176 const u64 raw_data[] = {0x123456780a0b0c0dULL, 0x1102030405060708ULL}; 177 const u64 data[] = {0x2211443366558877ULL, 0, 0xaabbccddeeff4321ULL}; 178 struct perf_sample sample = { 179 .ip = 101, 180 .pid = 102, 181 .tid = 103, 182 .time = 104, 183 .addr = 105, 184 .id = 106, 185 .stream_id = 107, 186 .period = 108, 187 .weight = 109, 188 .cpu = 110, 189 .raw_size = sizeof(raw_data), 190 .data_src = 111, 191 .transaction = 112, 192 .raw_data = (void *)raw_data, 193 .callchain = &callchain.callchain, 194 .branch_stack = &branch_stack.branch_stack, 195 .user_regs = { 196 .abi = PERF_SAMPLE_REGS_ABI_64, 197 .mask = sample_regs, 198 .regs = regs, 199 }, 200 .user_stack = { 201 .size = sizeof(data), 202 .data = (void *)data, 203 }, 204 .read = { 205 .time_enabled = 0x030a59d664fca7deULL, 206 .time_running = 0x011b6ae553eb98edULL, 207 }, 208 .intr_regs = { 209 .abi = PERF_SAMPLE_REGS_ABI_64, 210 .mask = sample_regs, 211 .regs = regs, 212 }, 213 .phys_addr = 113, 214 }; 215 struct sample_read_value values[] = {{1, 5}, {9, 3}, {2, 7}, {6, 4},}; 216 struct perf_sample sample_out; 217 size_t i, sz, bufsz; 218 int err, ret = -1; 219 220 if (sample_type & PERF_SAMPLE_REGS_USER) 221 evsel.attr.sample_regs_user = sample_regs; 222 223 if (sample_type & PERF_SAMPLE_REGS_INTR) 224 evsel.attr.sample_regs_intr = sample_regs; 225 226 for (i = 0; i < sizeof(regs); i++) 227 *(i + (u8 *)regs) = i & 0xfe; 228 229 if (read_format & PERF_FORMAT_GROUP) { 230 sample.read.group.nr = 4; 231 sample.read.group.values = values; 232 } else { 233 sample.read.one.value = 0x08789faeb786aa87ULL; 234 sample.read.one.id = 99; 235 } 236 237 sz = perf_event__sample_event_size(&sample, sample_type, read_format); 238 bufsz = sz + 4096; /* Add a bit for overrun checking */ 239 event = malloc(bufsz); 240 if (!event) { 241 pr_debug("malloc failed\n"); 242 return -1; 243 } 244 245 memset(event, 0xff, bufsz); 246 event->header.type = PERF_RECORD_SAMPLE; 247 event->header.misc = 0; 248 event->header.size = sz; 249 250 err = perf_event__synthesize_sample(event, sample_type, read_format, 251 &sample); 252 if (err) { 253 pr_debug("%s failed for sample_type %#"PRIx64", error %d\n", 254 "perf_event__synthesize_sample", sample_type, err); 255 goto out_free; 256 } 257 258 /* The data does not contain 0xff so we use that to check the size */ 259 for (i = bufsz; i > 0; i--) { 260 if (*(i - 1 + (u8 *)event) != 0xff) 261 break; 262 } 263 if (i != sz) { 264 pr_debug("Event size mismatch: actual %zu vs expected %zu\n", 265 i, sz); 266 goto out_free; 267 } 268 269 evsel.sample_size = __perf_evsel__sample_size(sample_type); 270 271 err = perf_evsel__parse_sample(&evsel, event, &sample_out); 272 if (err) { 273 pr_debug("%s failed for sample_type %#"PRIx64", error %d\n", 274 "perf_evsel__parse_sample", sample_type, err); 275 goto out_free; 276 } 277 278 if (!samples_same(&sample, &sample_out, sample_type, read_format)) { 279 pr_debug("parsing failed for sample_type %#"PRIx64"\n", 280 sample_type); 281 goto out_free; 282 } 283 284 ret = 0; 285 out_free: 286 free(event); 287 if (ret && read_format) 288 pr_debug("read_format %#"PRIx64"\n", read_format); 289 return ret; 290 } 291 292 /** 293 * test__sample_parsing - test sample parsing. 294 * 295 * This function implements a test that synthesizes a sample event, parses it 296 * and then checks that the parsed sample matches the original sample. The test 297 * checks sample format bits separately and together. If the test passes %0 is 298 * returned, otherwise %-1 is returned. 299 */ 300 int test__sample_parsing(struct test *test __maybe_unused, int subtest __maybe_unused) 301 { 302 const u64 rf[] = {4, 5, 6, 7, 12, 13, 14, 15}; 303 u64 sample_type; 304 u64 sample_regs; 305 size_t i; 306 int err; 307 308 /* 309 * Fail the test if it has not been updated when new sample format bits 310 * were added. Please actually update the test rather than just change 311 * the condition below. 312 */ 313 if (PERF_SAMPLE_MAX > PERF_SAMPLE_PHYS_ADDR << 1) { 314 pr_debug("sample format has changed, some new PERF_SAMPLE_ bit was introduced - test needs updating\n"); 315 return -1; 316 } 317 318 /* Test each sample format bit separately */ 319 for (sample_type = 1; sample_type != PERF_SAMPLE_MAX; 320 sample_type <<= 1) { 321 /* Test read_format variations */ 322 if (sample_type == PERF_SAMPLE_READ) { 323 for (i = 0; i < ARRAY_SIZE(rf); i++) { 324 err = do_test(sample_type, 0, rf[i]); 325 if (err) 326 return err; 327 } 328 continue; 329 } 330 sample_regs = 0; 331 332 if (sample_type == PERF_SAMPLE_REGS_USER) 333 sample_regs = 0x3fff; 334 335 if (sample_type == PERF_SAMPLE_REGS_INTR) 336 sample_regs = 0xff0fff; 337 338 err = do_test(sample_type, sample_regs, 0); 339 if (err) 340 return err; 341 } 342 343 /* Test all sample format bits together */ 344 sample_type = PERF_SAMPLE_MAX - 1; 345 sample_regs = 0x3fff; /* shared yb intr and user regs */ 346 for (i = 0; i < ARRAY_SIZE(rf); i++) { 347 err = do_test(sample_type, sample_regs, rf[i]); 348 if (err) 349 return err; 350 } 351 352 return 0; 353 } 354