xref: /openbmc/linux/tools/perf/arch/x86/util/event.c (revision 66c98360)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/types.h>
3 #include <linux/string.h>
4 #include <linux/zalloc.h>
5 #include <stdlib.h>
6 
7 #include "../../../util/event.h"
8 #include "../../../util/synthetic-events.h"
9 #include "../../../util/machine.h"
10 #include "../../../util/tool.h"
11 #include "../../../util/map.h"
12 #include "../../../util/debug.h"
13 #include "util/sample.h"
14 
15 #if defined(__x86_64__)
16 
17 int perf_event__synthesize_extra_kmaps(struct perf_tool *tool,
18 				       perf_event__handler_t process,
19 				       struct machine *machine)
20 {
21 	int rc = 0;
22 	struct map_rb_node *pos;
23 	struct maps *kmaps = machine__kernel_maps(machine);
24 	union perf_event *event = zalloc(sizeof(event->mmap) +
25 					 machine->id_hdr_size);
26 
27 	if (!event) {
28 		pr_debug("Not enough memory synthesizing mmap event "
29 			 "for extra kernel maps\n");
30 		return -1;
31 	}
32 
33 	maps__for_each_entry(kmaps, pos) {
34 		struct kmap *kmap;
35 		size_t size;
36 		struct map *map = pos->map;
37 
38 		if (!__map__is_extra_kernel_map(map))
39 			continue;
40 
41 		kmap = map__kmap(map);
42 
43 		size = sizeof(event->mmap) - sizeof(event->mmap.filename) +
44 		       PERF_ALIGN(strlen(kmap->name) + 1, sizeof(u64)) +
45 		       machine->id_hdr_size;
46 
47 		memset(event, 0, size);
48 
49 		event->mmap.header.type = PERF_RECORD_MMAP;
50 
51 		/*
52 		 * kernel uses 0 for user space maps, see kernel/perf_event.c
53 		 * __perf_event_mmap
54 		 */
55 		if (machine__is_host(machine))
56 			event->header.misc = PERF_RECORD_MISC_KERNEL;
57 		else
58 			event->header.misc = PERF_RECORD_MISC_GUEST_KERNEL;
59 
60 		event->mmap.header.size = size;
61 
62 		event->mmap.start = map__start(map);
63 		event->mmap.len   = map__size(map);
64 		event->mmap.pgoff = map__pgoff(map);
65 		event->mmap.pid   = machine->pid;
66 
67 		strlcpy(event->mmap.filename, kmap->name, PATH_MAX);
68 
69 		if (perf_tool__process_synth_event(tool, event, machine,
70 						   process) != 0) {
71 			rc = -1;
72 			break;
73 		}
74 	}
75 
76 	free(event);
77 	return rc;
78 }
79 
80 #endif
81 
82 void arch_perf_parse_sample_weight(struct perf_sample *data,
83 				   const __u64 *array, u64 type)
84 {
85 	union perf_sample_weight weight;
86 
87 	weight.full = *array;
88 	if (type & PERF_SAMPLE_WEIGHT)
89 		data->weight = weight.full;
90 	else {
91 		data->weight = weight.var1_dw;
92 		data->ins_lat = weight.var2_w;
93 		data->retire_lat = weight.var3_w;
94 	}
95 }
96 
97 void arch_perf_synthesize_sample_weight(const struct perf_sample *data,
98 					__u64 *array, u64 type)
99 {
100 	*array = data->weight;
101 
102 	if (type & PERF_SAMPLE_WEIGHT_STRUCT) {
103 		*array &= 0xffffffff;
104 		*array |= ((u64)data->ins_lat << 32);
105 		*array |= ((u64)data->retire_lat << 48);
106 	}
107 }
108 
109 const char *arch_perf_header_entry(const char *se_header)
110 {
111 	if (!strcmp(se_header, "Local Pipeline Stage Cycle"))
112 		return "Local Retire Latency";
113 	else if (!strcmp(se_header, "Pipeline Stage Cycle"))
114 		return "Retire Latency";
115 
116 	return se_header;
117 }
118 
119 int arch_support_sort_key(const char *sort_key)
120 {
121 	if (!strcmp(sort_key, "p_stage_cyc"))
122 		return 1;
123 	if (!strcmp(sort_key, "local_p_stage_cyc"))
124 		return 1;
125 	return 0;
126 }
127