xref: /openbmc/linux/include/trace/events/kvm.h (revision 95e9fd10)
1 #if !defined(_TRACE_KVM_MAIN_H) || defined(TRACE_HEADER_MULTI_READ)
2 #define _TRACE_KVM_MAIN_H
3 
4 #include <linux/tracepoint.h>
5 
6 #undef TRACE_SYSTEM
7 #define TRACE_SYSTEM kvm
8 
9 #define ERSN(x) { KVM_EXIT_##x, "KVM_EXIT_" #x }
10 
11 #define kvm_trace_exit_reason						\
12 	ERSN(UNKNOWN), ERSN(EXCEPTION), ERSN(IO), ERSN(HYPERCALL),	\
13 	ERSN(DEBUG), ERSN(HLT), ERSN(MMIO), ERSN(IRQ_WINDOW_OPEN),	\
14 	ERSN(SHUTDOWN), ERSN(FAIL_ENTRY), ERSN(INTR), ERSN(SET_TPR),	\
15 	ERSN(TPR_ACCESS), ERSN(S390_SIEIC), ERSN(S390_RESET), ERSN(DCR),\
16 	ERSN(NMI), ERSN(INTERNAL_ERROR), ERSN(OSI), ERSN(PAPR_HCALL),	\
17 	ERSN(S390_UCONTROL)
18 
19 TRACE_EVENT(kvm_userspace_exit,
20 	    TP_PROTO(__u32 reason, int errno),
21 	    TP_ARGS(reason, errno),
22 
23 	TP_STRUCT__entry(
24 		__field(	__u32,		reason		)
25 		__field(	int,		errno		)
26 	),
27 
28 	TP_fast_assign(
29 		__entry->reason		= reason;
30 		__entry->errno		= errno;
31 	),
32 
33 	TP_printk("reason %s (%d)",
34 		  __entry->errno < 0 ?
35 		  (__entry->errno == -EINTR ? "restart" : "error") :
36 		  __print_symbolic(__entry->reason, kvm_trace_exit_reason),
37 		  __entry->errno < 0 ? -__entry->errno : __entry->reason)
38 );
39 
40 #if defined(__KVM_HAVE_IRQ_LINE)
41 TRACE_EVENT(kvm_set_irq,
42 	TP_PROTO(unsigned int gsi, int level, int irq_source_id),
43 	TP_ARGS(gsi, level, irq_source_id),
44 
45 	TP_STRUCT__entry(
46 		__field(	unsigned int,	gsi		)
47 		__field(	int,		level		)
48 		__field(	int,		irq_source_id	)
49 	),
50 
51 	TP_fast_assign(
52 		__entry->gsi		= gsi;
53 		__entry->level		= level;
54 		__entry->irq_source_id	= irq_source_id;
55 	),
56 
57 	TP_printk("gsi %u level %d source %d",
58 		  __entry->gsi, __entry->level, __entry->irq_source_id)
59 );
60 #endif
61 
62 #if defined(__KVM_HAVE_IOAPIC)
63 #define kvm_deliver_mode		\
64 	{0x0, "Fixed"},			\
65 	{0x1, "LowPrio"},		\
66 	{0x2, "SMI"},			\
67 	{0x3, "Res3"},			\
68 	{0x4, "NMI"},			\
69 	{0x5, "INIT"},			\
70 	{0x6, "SIPI"},			\
71 	{0x7, "ExtINT"}
72 
73 TRACE_EVENT(kvm_ioapic_set_irq,
74 	    TP_PROTO(__u64 e, int pin, bool coalesced),
75 	    TP_ARGS(e, pin, coalesced),
76 
77 	TP_STRUCT__entry(
78 		__field(	__u64,		e		)
79 		__field(	int,		pin		)
80 		__field(	bool,		coalesced	)
81 	),
82 
83 	TP_fast_assign(
84 		__entry->e		= e;
85 		__entry->pin		= pin;
86 		__entry->coalesced	= coalesced;
87 	),
88 
89 	TP_printk("pin %u dst %x vec=%u (%s|%s|%s%s)%s",
90 		  __entry->pin, (u8)(__entry->e >> 56), (u8)__entry->e,
91 		  __print_symbolic((__entry->e >> 8 & 0x7), kvm_deliver_mode),
92 		  (__entry->e & (1<<11)) ? "logical" : "physical",
93 		  (__entry->e & (1<<15)) ? "level" : "edge",
94 		  (__entry->e & (1<<16)) ? "|masked" : "",
95 		  __entry->coalesced ? " (coalesced)" : "")
96 );
97 
98 TRACE_EVENT(kvm_msi_set_irq,
99 	    TP_PROTO(__u64 address, __u64 data),
100 	    TP_ARGS(address, data),
101 
102 	TP_STRUCT__entry(
103 		__field(	__u64,		address		)
104 		__field(	__u64,		data		)
105 	),
106 
107 	TP_fast_assign(
108 		__entry->address	= address;
109 		__entry->data		= data;
110 	),
111 
112 	TP_printk("dst %u vec %x (%s|%s|%s%s)",
113 		  (u8)(__entry->address >> 12), (u8)__entry->data,
114 		  __print_symbolic((__entry->data >> 8 & 0x7), kvm_deliver_mode),
115 		  (__entry->address & (1<<2)) ? "logical" : "physical",
116 		  (__entry->data & (1<<15)) ? "level" : "edge",
117 		  (__entry->address & (1<<3)) ? "|rh" : "")
118 );
119 
120 #define kvm_irqchips						\
121 	{KVM_IRQCHIP_PIC_MASTER,	"PIC master"},		\
122 	{KVM_IRQCHIP_PIC_SLAVE,		"PIC slave"},		\
123 	{KVM_IRQCHIP_IOAPIC,		"IOAPIC"}
124 
125 TRACE_EVENT(kvm_ack_irq,
126 	TP_PROTO(unsigned int irqchip, unsigned int pin),
127 	TP_ARGS(irqchip, pin),
128 
129 	TP_STRUCT__entry(
130 		__field(	unsigned int,	irqchip		)
131 		__field(	unsigned int,	pin		)
132 	),
133 
134 	TP_fast_assign(
135 		__entry->irqchip	= irqchip;
136 		__entry->pin		= pin;
137 	),
138 
139 	TP_printk("irqchip %s pin %u",
140 		  __print_symbolic(__entry->irqchip, kvm_irqchips),
141 		 __entry->pin)
142 );
143 
144 
145 
146 #endif /* defined(__KVM_HAVE_IOAPIC) */
147 
148 #define KVM_TRACE_MMIO_READ_UNSATISFIED 0
149 #define KVM_TRACE_MMIO_READ 1
150 #define KVM_TRACE_MMIO_WRITE 2
151 
152 #define kvm_trace_symbol_mmio \
153 	{ KVM_TRACE_MMIO_READ_UNSATISFIED, "unsatisfied-read" }, \
154 	{ KVM_TRACE_MMIO_READ, "read" }, \
155 	{ KVM_TRACE_MMIO_WRITE, "write" }
156 
157 TRACE_EVENT(kvm_mmio,
158 	TP_PROTO(int type, int len, u64 gpa, u64 val),
159 	TP_ARGS(type, len, gpa, val),
160 
161 	TP_STRUCT__entry(
162 		__field(	u32,	type		)
163 		__field(	u32,	len		)
164 		__field(	u64,	gpa		)
165 		__field(	u64,	val		)
166 	),
167 
168 	TP_fast_assign(
169 		__entry->type		= type;
170 		__entry->len		= len;
171 		__entry->gpa		= gpa;
172 		__entry->val		= val;
173 	),
174 
175 	TP_printk("mmio %s len %u gpa 0x%llx val 0x%llx",
176 		  __print_symbolic(__entry->type, kvm_trace_symbol_mmio),
177 		  __entry->len, __entry->gpa, __entry->val)
178 );
179 
180 #define kvm_fpu_load_symbol	\
181 	{0, "unload"},		\
182 	{1, "load"}
183 
184 TRACE_EVENT(kvm_fpu,
185 	TP_PROTO(int load),
186 	TP_ARGS(load),
187 
188 	TP_STRUCT__entry(
189 		__field(	u32,	        load		)
190 	),
191 
192 	TP_fast_assign(
193 		__entry->load		= load;
194 	),
195 
196 	TP_printk("%s", __print_symbolic(__entry->load, kvm_fpu_load_symbol))
197 );
198 
199 TRACE_EVENT(kvm_age_page,
200 	TP_PROTO(ulong hva, struct kvm_memory_slot *slot, int ref),
201 	TP_ARGS(hva, slot, ref),
202 
203 	TP_STRUCT__entry(
204 		__field(	u64,	hva		)
205 		__field(	u64,	gfn		)
206 		__field(	u8,	referenced	)
207 	),
208 
209 	TP_fast_assign(
210 		__entry->hva		= hva;
211 		__entry->gfn		=
212 		  slot->base_gfn + ((hva - slot->userspace_addr) >> PAGE_SHIFT);
213 		__entry->referenced	= ref;
214 	),
215 
216 	TP_printk("hva %llx gfn %llx %s",
217 		  __entry->hva, __entry->gfn,
218 		  __entry->referenced ? "YOUNG" : "OLD")
219 );
220 
221 #ifdef CONFIG_KVM_ASYNC_PF
222 DECLARE_EVENT_CLASS(kvm_async_get_page_class,
223 
224 	TP_PROTO(u64 gva, u64 gfn),
225 
226 	TP_ARGS(gva, gfn),
227 
228 	TP_STRUCT__entry(
229 		__field(__u64, gva)
230 		__field(u64, gfn)
231 	),
232 
233 	TP_fast_assign(
234 		__entry->gva = gva;
235 		__entry->gfn = gfn;
236 	),
237 
238 	TP_printk("gva = %#llx, gfn = %#llx", __entry->gva, __entry->gfn)
239 );
240 
241 DEFINE_EVENT(kvm_async_get_page_class, kvm_try_async_get_page,
242 
243 	TP_PROTO(u64 gva, u64 gfn),
244 
245 	TP_ARGS(gva, gfn)
246 );
247 
248 DEFINE_EVENT(kvm_async_get_page_class, kvm_async_pf_doublefault,
249 
250 	TP_PROTO(u64 gva, u64 gfn),
251 
252 	TP_ARGS(gva, gfn)
253 );
254 
255 DECLARE_EVENT_CLASS(kvm_async_pf_nopresent_ready,
256 
257 	TP_PROTO(u64 token, u64 gva),
258 
259 	TP_ARGS(token, gva),
260 
261 	TP_STRUCT__entry(
262 		__field(__u64, token)
263 		__field(__u64, gva)
264 	),
265 
266 	TP_fast_assign(
267 		__entry->token = token;
268 		__entry->gva = gva;
269 	),
270 
271 	TP_printk("token %#llx gva %#llx", __entry->token, __entry->gva)
272 
273 );
274 
275 DEFINE_EVENT(kvm_async_pf_nopresent_ready, kvm_async_pf_not_present,
276 
277 	TP_PROTO(u64 token, u64 gva),
278 
279 	TP_ARGS(token, gva)
280 );
281 
282 DEFINE_EVENT(kvm_async_pf_nopresent_ready, kvm_async_pf_ready,
283 
284 	TP_PROTO(u64 token, u64 gva),
285 
286 	TP_ARGS(token, gva)
287 );
288 
289 TRACE_EVENT(
290 	kvm_async_pf_completed,
291 	TP_PROTO(unsigned long address, struct page *page, u64 gva),
292 	TP_ARGS(address, page, gva),
293 
294 	TP_STRUCT__entry(
295 		__field(unsigned long, address)
296 		__field(pfn_t, pfn)
297 		__field(u64, gva)
298 		),
299 
300 	TP_fast_assign(
301 		__entry->address = address;
302 		__entry->pfn = page ? page_to_pfn(page) : 0;
303 		__entry->gva = gva;
304 		),
305 
306 	TP_printk("gva %#llx address %#lx pfn %#llx",  __entry->gva,
307 		  __entry->address, __entry->pfn)
308 );
309 
310 #endif
311 
312 #endif /* _TRACE_KVM_MAIN_H */
313 
314 /* This part must be outside protection */
315 #include <trace/define_trace.h>
316