1 #if !defined(_TRACE_KVM_MAIN_H) || defined(TRACE_HEADER_MULTI_READ) 2 #define _TRACE_KVM_MAIN_H 3 4 #include <linux/tracepoint.h> 5 6 #undef TRACE_SYSTEM 7 #define TRACE_SYSTEM kvm 8 9 #define ERSN(x) { KVM_EXIT_##x, "KVM_EXIT_" #x } 10 11 #define kvm_trace_exit_reason \ 12 ERSN(UNKNOWN), ERSN(EXCEPTION), ERSN(IO), ERSN(HYPERCALL), \ 13 ERSN(DEBUG), ERSN(HLT), ERSN(MMIO), ERSN(IRQ_WINDOW_OPEN), \ 14 ERSN(SHUTDOWN), ERSN(FAIL_ENTRY), ERSN(INTR), ERSN(SET_TPR), \ 15 ERSN(TPR_ACCESS), ERSN(S390_SIEIC), ERSN(S390_RESET), ERSN(DCR),\ 16 ERSN(NMI), ERSN(INTERNAL_ERROR), ERSN(OSI), ERSN(PAPR_HCALL), \ 17 ERSN(S390_UCONTROL), ERSN(WATCHDOG), ERSN(S390_TSCH), ERSN(EPR),\ 18 ERSN(SYSTEM_EVENT), ERSN(S390_STSI), ERSN(IOAPIC_EOI), \ 19 ERSN(HYPERV) 20 21 TRACE_EVENT(kvm_userspace_exit, 22 TP_PROTO(__u32 reason, int errno), 23 TP_ARGS(reason, errno), 24 25 TP_STRUCT__entry( 26 __field( __u32, reason ) 27 __field( int, errno ) 28 ), 29 30 TP_fast_assign( 31 __entry->reason = reason; 32 __entry->errno = errno; 33 ), 34 35 TP_printk("reason %s (%d)", 36 __entry->errno < 0 ? 37 (__entry->errno == -EINTR ? "restart" : "error") : 38 __print_symbolic(__entry->reason, kvm_trace_exit_reason), 39 __entry->errno < 0 ? -__entry->errno : __entry->reason) 40 ); 41 42 TRACE_EVENT(kvm_vcpu_wakeup, 43 TP_PROTO(__u64 ns, bool waited, bool valid), 44 TP_ARGS(ns, waited, valid), 45 46 TP_STRUCT__entry( 47 __field( __u64, ns ) 48 __field( bool, waited ) 49 __field( bool, valid ) 50 ), 51 52 TP_fast_assign( 53 __entry->ns = ns; 54 __entry->waited = waited; 55 __entry->valid = valid; 56 ), 57 58 TP_printk("%s time %lld ns, polling %s", 59 __entry->waited ? "wait" : "poll", 60 __entry->ns, 61 __entry->valid ? "valid" : "invalid") 62 ); 63 64 #if defined(CONFIG_HAVE_KVM_IRQFD) 65 TRACE_EVENT(kvm_set_irq, 66 TP_PROTO(unsigned int gsi, int level, int irq_source_id), 67 TP_ARGS(gsi, level, irq_source_id), 68 69 TP_STRUCT__entry( 70 __field( unsigned int, gsi ) 71 __field( int, level ) 72 __field( int, irq_source_id ) 73 ), 74 75 TP_fast_assign( 76 __entry->gsi = gsi; 77 __entry->level = level; 78 __entry->irq_source_id = irq_source_id; 79 ), 80 81 TP_printk("gsi %u level %d source %d", 82 __entry->gsi, __entry->level, __entry->irq_source_id) 83 ); 84 #endif /* defined(CONFIG_HAVE_KVM_IRQFD) */ 85 86 #if defined(__KVM_HAVE_IOAPIC) 87 #define kvm_deliver_mode \ 88 {0x0, "Fixed"}, \ 89 {0x1, "LowPrio"}, \ 90 {0x2, "SMI"}, \ 91 {0x3, "Res3"}, \ 92 {0x4, "NMI"}, \ 93 {0x5, "INIT"}, \ 94 {0x6, "SIPI"}, \ 95 {0x7, "ExtINT"} 96 97 TRACE_EVENT(kvm_ioapic_set_irq, 98 TP_PROTO(__u64 e, int pin, bool coalesced), 99 TP_ARGS(e, pin, coalesced), 100 101 TP_STRUCT__entry( 102 __field( __u64, e ) 103 __field( int, pin ) 104 __field( bool, coalesced ) 105 ), 106 107 TP_fast_assign( 108 __entry->e = e; 109 __entry->pin = pin; 110 __entry->coalesced = coalesced; 111 ), 112 113 TP_printk("pin %u dst %x vec %u (%s|%s|%s%s)%s", 114 __entry->pin, (u8)(__entry->e >> 56), (u8)__entry->e, 115 __print_symbolic((__entry->e >> 8 & 0x7), kvm_deliver_mode), 116 (__entry->e & (1<<11)) ? "logical" : "physical", 117 (__entry->e & (1<<15)) ? "level" : "edge", 118 (__entry->e & (1<<16)) ? "|masked" : "", 119 __entry->coalesced ? " (coalesced)" : "") 120 ); 121 122 TRACE_EVENT(kvm_ioapic_delayed_eoi_inj, 123 TP_PROTO(__u64 e), 124 TP_ARGS(e), 125 126 TP_STRUCT__entry( 127 __field( __u64, e ) 128 ), 129 130 TP_fast_assign( 131 __entry->e = e; 132 ), 133 134 TP_printk("dst %x vec %u (%s|%s|%s%s)", 135 (u8)(__entry->e >> 56), (u8)__entry->e, 136 __print_symbolic((__entry->e >> 8 & 0x7), kvm_deliver_mode), 137 (__entry->e & (1<<11)) ? "logical" : "physical", 138 (__entry->e & (1<<15)) ? "level" : "edge", 139 (__entry->e & (1<<16)) ? "|masked" : "") 140 ); 141 142 TRACE_EVENT(kvm_msi_set_irq, 143 TP_PROTO(__u64 address, __u64 data), 144 TP_ARGS(address, data), 145 146 TP_STRUCT__entry( 147 __field( __u64, address ) 148 __field( __u64, data ) 149 ), 150 151 TP_fast_assign( 152 __entry->address = address; 153 __entry->data = data; 154 ), 155 156 TP_printk("dst %llx vec %u (%s|%s|%s%s)", 157 (u8)(__entry->address >> 12) | ((__entry->address >> 32) & 0xffffff00), 158 (u8)__entry->data, 159 __print_symbolic((__entry->data >> 8 & 0x7), kvm_deliver_mode), 160 (__entry->address & (1<<2)) ? "logical" : "physical", 161 (__entry->data & (1<<15)) ? "level" : "edge", 162 (__entry->address & (1<<3)) ? "|rh" : "") 163 ); 164 165 #define kvm_irqchips \ 166 {KVM_IRQCHIP_PIC_MASTER, "PIC master"}, \ 167 {KVM_IRQCHIP_PIC_SLAVE, "PIC slave"}, \ 168 {KVM_IRQCHIP_IOAPIC, "IOAPIC"} 169 170 #endif /* defined(__KVM_HAVE_IOAPIC) */ 171 172 #if defined(CONFIG_HAVE_KVM_IRQFD) 173 174 #ifdef kvm_irqchips 175 #define kvm_ack_irq_string "irqchip %s pin %u" 176 #define kvm_ack_irq_parm __print_symbolic(__entry->irqchip, kvm_irqchips), __entry->pin 177 #else 178 #define kvm_ack_irq_string "irqchip %d pin %u" 179 #define kvm_ack_irq_parm __entry->irqchip, __entry->pin 180 #endif 181 182 TRACE_EVENT(kvm_ack_irq, 183 TP_PROTO(unsigned int irqchip, unsigned int pin), 184 TP_ARGS(irqchip, pin), 185 186 TP_STRUCT__entry( 187 __field( unsigned int, irqchip ) 188 __field( unsigned int, pin ) 189 ), 190 191 TP_fast_assign( 192 __entry->irqchip = irqchip; 193 __entry->pin = pin; 194 ), 195 196 TP_printk(kvm_ack_irq_string, kvm_ack_irq_parm) 197 ); 198 199 #endif /* defined(CONFIG_HAVE_KVM_IRQFD) */ 200 201 202 203 #define KVM_TRACE_MMIO_READ_UNSATISFIED 0 204 #define KVM_TRACE_MMIO_READ 1 205 #define KVM_TRACE_MMIO_WRITE 2 206 207 #define kvm_trace_symbol_mmio \ 208 { KVM_TRACE_MMIO_READ_UNSATISFIED, "unsatisfied-read" }, \ 209 { KVM_TRACE_MMIO_READ, "read" }, \ 210 { KVM_TRACE_MMIO_WRITE, "write" } 211 212 TRACE_EVENT(kvm_mmio, 213 TP_PROTO(int type, int len, u64 gpa, u64 val), 214 TP_ARGS(type, len, gpa, val), 215 216 TP_STRUCT__entry( 217 __field( u32, type ) 218 __field( u32, len ) 219 __field( u64, gpa ) 220 __field( u64, val ) 221 ), 222 223 TP_fast_assign( 224 __entry->type = type; 225 __entry->len = len; 226 __entry->gpa = gpa; 227 __entry->val = val; 228 ), 229 230 TP_printk("mmio %s len %u gpa 0x%llx val 0x%llx", 231 __print_symbolic(__entry->type, kvm_trace_symbol_mmio), 232 __entry->len, __entry->gpa, __entry->val) 233 ); 234 235 #define kvm_fpu_load_symbol \ 236 {0, "unload"}, \ 237 {1, "load"} 238 239 TRACE_EVENT(kvm_fpu, 240 TP_PROTO(int load), 241 TP_ARGS(load), 242 243 TP_STRUCT__entry( 244 __field( u32, load ) 245 ), 246 247 TP_fast_assign( 248 __entry->load = load; 249 ), 250 251 TP_printk("%s", __print_symbolic(__entry->load, kvm_fpu_load_symbol)) 252 ); 253 254 TRACE_EVENT(kvm_age_page, 255 TP_PROTO(ulong gfn, int level, struct kvm_memory_slot *slot, int ref), 256 TP_ARGS(gfn, level, slot, ref), 257 258 TP_STRUCT__entry( 259 __field( u64, hva ) 260 __field( u64, gfn ) 261 __field( u8, level ) 262 __field( u8, referenced ) 263 ), 264 265 TP_fast_assign( 266 __entry->gfn = gfn; 267 __entry->level = level; 268 __entry->hva = ((gfn - slot->base_gfn) << 269 PAGE_SHIFT) + slot->userspace_addr; 270 __entry->referenced = ref; 271 ), 272 273 TP_printk("hva %llx gfn %llx level %u %s", 274 __entry->hva, __entry->gfn, __entry->level, 275 __entry->referenced ? "YOUNG" : "OLD") 276 ); 277 278 #ifdef CONFIG_KVM_ASYNC_PF 279 DECLARE_EVENT_CLASS(kvm_async_get_page_class, 280 281 TP_PROTO(u64 gva, u64 gfn), 282 283 TP_ARGS(gva, gfn), 284 285 TP_STRUCT__entry( 286 __field(__u64, gva) 287 __field(u64, gfn) 288 ), 289 290 TP_fast_assign( 291 __entry->gva = gva; 292 __entry->gfn = gfn; 293 ), 294 295 TP_printk("gva = %#llx, gfn = %#llx", __entry->gva, __entry->gfn) 296 ); 297 298 DEFINE_EVENT(kvm_async_get_page_class, kvm_try_async_get_page, 299 300 TP_PROTO(u64 gva, u64 gfn), 301 302 TP_ARGS(gva, gfn) 303 ); 304 305 DEFINE_EVENT(kvm_async_get_page_class, kvm_async_pf_doublefault, 306 307 TP_PROTO(u64 gva, u64 gfn), 308 309 TP_ARGS(gva, gfn) 310 ); 311 312 DECLARE_EVENT_CLASS(kvm_async_pf_nopresent_ready, 313 314 TP_PROTO(u64 token, u64 gva), 315 316 TP_ARGS(token, gva), 317 318 TP_STRUCT__entry( 319 __field(__u64, token) 320 __field(__u64, gva) 321 ), 322 323 TP_fast_assign( 324 __entry->token = token; 325 __entry->gva = gva; 326 ), 327 328 TP_printk("token %#llx gva %#llx", __entry->token, __entry->gva) 329 330 ); 331 332 DEFINE_EVENT(kvm_async_pf_nopresent_ready, kvm_async_pf_not_present, 333 334 TP_PROTO(u64 token, u64 gva), 335 336 TP_ARGS(token, gva) 337 ); 338 339 DEFINE_EVENT(kvm_async_pf_nopresent_ready, kvm_async_pf_ready, 340 341 TP_PROTO(u64 token, u64 gva), 342 343 TP_ARGS(token, gva) 344 ); 345 346 TRACE_EVENT( 347 kvm_async_pf_completed, 348 TP_PROTO(unsigned long address, u64 gva), 349 TP_ARGS(address, gva), 350 351 TP_STRUCT__entry( 352 __field(unsigned long, address) 353 __field(u64, gva) 354 ), 355 356 TP_fast_assign( 357 __entry->address = address; 358 __entry->gva = gva; 359 ), 360 361 TP_printk("gva %#llx address %#lx", __entry->gva, 362 __entry->address) 363 ); 364 365 #endif 366 367 TRACE_EVENT(kvm_halt_poll_ns, 368 TP_PROTO(bool grow, unsigned int vcpu_id, unsigned int new, 369 unsigned int old), 370 TP_ARGS(grow, vcpu_id, new, old), 371 372 TP_STRUCT__entry( 373 __field(bool, grow) 374 __field(unsigned int, vcpu_id) 375 __field(unsigned int, new) 376 __field(unsigned int, old) 377 ), 378 379 TP_fast_assign( 380 __entry->grow = grow; 381 __entry->vcpu_id = vcpu_id; 382 __entry->new = new; 383 __entry->old = old; 384 ), 385 386 TP_printk("vcpu %u: halt_poll_ns %u (%s %u)", 387 __entry->vcpu_id, 388 __entry->new, 389 __entry->grow ? "grow" : "shrink", 390 __entry->old) 391 ); 392 393 #define trace_kvm_halt_poll_ns_grow(vcpu_id, new, old) \ 394 trace_kvm_halt_poll_ns(true, vcpu_id, new, old) 395 #define trace_kvm_halt_poll_ns_shrink(vcpu_id, new, old) \ 396 trace_kvm_halt_poll_ns(false, vcpu_id, new, old) 397 398 #endif /* _TRACE_KVM_MAIN_H */ 399 400 /* This part must be outside protection */ 401 #include <trace/define_trace.h> 402