1 #if !defined(_TRACE_KVM_H) || defined(TRACE_HEADER_MULTI_READ) 2 #define _TRACE_KVM_H 3 4 #include <linux/tracepoint.h> 5 #include <asm/vmx.h> 6 #include <asm/svm.h> 7 #include <asm/clocksource.h> 8 #include <asm/pvclock-abi.h> 9 10 #undef TRACE_SYSTEM 11 #define TRACE_SYSTEM kvm 12 13 /* 14 * Tracepoint for guest mode entry. 15 */ 16 TRACE_EVENT(kvm_entry, 17 TP_PROTO(unsigned int vcpu_id), 18 TP_ARGS(vcpu_id), 19 20 TP_STRUCT__entry( 21 __field( unsigned int, vcpu_id ) 22 ), 23 24 TP_fast_assign( 25 __entry->vcpu_id = vcpu_id; 26 ), 27 28 TP_printk("vcpu %u", __entry->vcpu_id) 29 ); 30 31 /* 32 * Tracepoint for hypercall. 33 */ 34 TRACE_EVENT(kvm_hypercall, 35 TP_PROTO(unsigned long nr, unsigned long a0, unsigned long a1, 36 unsigned long a2, unsigned long a3), 37 TP_ARGS(nr, a0, a1, a2, a3), 38 39 TP_STRUCT__entry( 40 __field( unsigned long, nr ) 41 __field( unsigned long, a0 ) 42 __field( unsigned long, a1 ) 43 __field( unsigned long, a2 ) 44 __field( unsigned long, a3 ) 45 ), 46 47 TP_fast_assign( 48 __entry->nr = nr; 49 __entry->a0 = a0; 50 __entry->a1 = a1; 51 __entry->a2 = a2; 52 __entry->a3 = a3; 53 ), 54 55 TP_printk("nr 0x%lx a0 0x%lx a1 0x%lx a2 0x%lx a3 0x%lx", 56 __entry->nr, __entry->a0, __entry->a1, __entry->a2, 57 __entry->a3) 58 ); 59 60 /* 61 * Tracepoint for hypercall. 62 */ 63 TRACE_EVENT(kvm_hv_hypercall, 64 TP_PROTO(__u16 code, bool fast, __u16 rep_cnt, __u16 rep_idx, 65 __u64 ingpa, __u64 outgpa), 66 TP_ARGS(code, fast, rep_cnt, rep_idx, ingpa, outgpa), 67 68 TP_STRUCT__entry( 69 __field( __u16, rep_cnt ) 70 __field( __u16, rep_idx ) 71 __field( __u64, ingpa ) 72 __field( __u64, outgpa ) 73 __field( __u16, code ) 74 __field( bool, fast ) 75 ), 76 77 TP_fast_assign( 78 __entry->rep_cnt = rep_cnt; 79 __entry->rep_idx = rep_idx; 80 __entry->ingpa = ingpa; 81 __entry->outgpa = outgpa; 82 __entry->code = code; 83 __entry->fast = fast; 84 ), 85 86 TP_printk("code 0x%x %s cnt 0x%x idx 0x%x in 0x%llx out 0x%llx", 87 __entry->code, __entry->fast ? "fast" : "slow", 88 __entry->rep_cnt, __entry->rep_idx, __entry->ingpa, 89 __entry->outgpa) 90 ); 91 92 /* 93 * Tracepoint for PIO. 94 */ 95 96 #define KVM_PIO_IN 0 97 #define KVM_PIO_OUT 1 98 99 TRACE_EVENT(kvm_pio, 100 TP_PROTO(unsigned int rw, unsigned int port, unsigned int size, 101 unsigned int count, void *data), 102 TP_ARGS(rw, port, size, count, data), 103 104 TP_STRUCT__entry( 105 __field( unsigned int, rw ) 106 __field( unsigned int, port ) 107 __field( unsigned int, size ) 108 __field( unsigned int, count ) 109 __field( unsigned int, val ) 110 ), 111 112 TP_fast_assign( 113 __entry->rw = rw; 114 __entry->port = port; 115 __entry->size = size; 116 __entry->count = count; 117 if (size == 1) 118 __entry->val = *(unsigned char *)data; 119 else if (size == 2) 120 __entry->val = *(unsigned short *)data; 121 else 122 __entry->val = *(unsigned int *)data; 123 ), 124 125 TP_printk("pio_%s at 0x%x size %d count %d val 0x%x %s", 126 __entry->rw ? "write" : "read", 127 __entry->port, __entry->size, __entry->count, __entry->val, 128 __entry->count > 1 ? "(...)" : "") 129 ); 130 131 /* 132 * Tracepoint for fast mmio. 133 */ 134 TRACE_EVENT(kvm_fast_mmio, 135 TP_PROTO(u64 gpa), 136 TP_ARGS(gpa), 137 138 TP_STRUCT__entry( 139 __field(u64, gpa) 140 ), 141 142 TP_fast_assign( 143 __entry->gpa = gpa; 144 ), 145 146 TP_printk("fast mmio at gpa 0x%llx", __entry->gpa) 147 ); 148 149 /* 150 * Tracepoint for cpuid. 151 */ 152 TRACE_EVENT(kvm_cpuid, 153 TP_PROTO(unsigned int function, unsigned long rax, unsigned long rbx, 154 unsigned long rcx, unsigned long rdx, bool found), 155 TP_ARGS(function, rax, rbx, rcx, rdx, found), 156 157 TP_STRUCT__entry( 158 __field( unsigned int, function ) 159 __field( unsigned long, rax ) 160 __field( unsigned long, rbx ) 161 __field( unsigned long, rcx ) 162 __field( unsigned long, rdx ) 163 __field( bool, found ) 164 ), 165 166 TP_fast_assign( 167 __entry->function = function; 168 __entry->rax = rax; 169 __entry->rbx = rbx; 170 __entry->rcx = rcx; 171 __entry->rdx = rdx; 172 __entry->found = found; 173 ), 174 175 TP_printk("func %x rax %lx rbx %lx rcx %lx rdx %lx, cpuid entry %s", 176 __entry->function, __entry->rax, 177 __entry->rbx, __entry->rcx, __entry->rdx, 178 __entry->found ? "found" : "not found") 179 ); 180 181 #define AREG(x) { APIC_##x, "APIC_" #x } 182 183 #define kvm_trace_symbol_apic \ 184 AREG(ID), AREG(LVR), AREG(TASKPRI), AREG(ARBPRI), AREG(PROCPRI), \ 185 AREG(EOI), AREG(RRR), AREG(LDR), AREG(DFR), AREG(SPIV), AREG(ISR), \ 186 AREG(TMR), AREG(IRR), AREG(ESR), AREG(ICR), AREG(ICR2), AREG(LVTT), \ 187 AREG(LVTTHMR), AREG(LVTPC), AREG(LVT0), AREG(LVT1), AREG(LVTERR), \ 188 AREG(TMICT), AREG(TMCCT), AREG(TDCR), AREG(SELF_IPI), AREG(EFEAT), \ 189 AREG(ECTRL) 190 /* 191 * Tracepoint for apic access. 192 */ 193 TRACE_EVENT(kvm_apic, 194 TP_PROTO(unsigned int rw, unsigned int reg, unsigned int val), 195 TP_ARGS(rw, reg, val), 196 197 TP_STRUCT__entry( 198 __field( unsigned int, rw ) 199 __field( unsigned int, reg ) 200 __field( unsigned int, val ) 201 ), 202 203 TP_fast_assign( 204 __entry->rw = rw; 205 __entry->reg = reg; 206 __entry->val = val; 207 ), 208 209 TP_printk("apic_%s %s = 0x%x", 210 __entry->rw ? "write" : "read", 211 __print_symbolic(__entry->reg, kvm_trace_symbol_apic), 212 __entry->val) 213 ); 214 215 #define trace_kvm_apic_read(reg, val) trace_kvm_apic(0, reg, val) 216 #define trace_kvm_apic_write(reg, val) trace_kvm_apic(1, reg, val) 217 218 #define KVM_ISA_VMX 1 219 #define KVM_ISA_SVM 2 220 221 /* 222 * Tracepoint for kvm guest exit: 223 */ 224 TRACE_EVENT(kvm_exit, 225 TP_PROTO(unsigned int exit_reason, struct kvm_vcpu *vcpu, u32 isa), 226 TP_ARGS(exit_reason, vcpu, isa), 227 228 TP_STRUCT__entry( 229 __field( unsigned int, exit_reason ) 230 __field( unsigned long, guest_rip ) 231 __field( u32, isa ) 232 __field( u64, info1 ) 233 __field( u64, info2 ) 234 ), 235 236 TP_fast_assign( 237 __entry->exit_reason = exit_reason; 238 __entry->guest_rip = kvm_rip_read(vcpu); 239 __entry->isa = isa; 240 kvm_x86_ops->get_exit_info(vcpu, &__entry->info1, 241 &__entry->info2); 242 ), 243 244 TP_printk("reason %s rip 0x%lx info %llx %llx", 245 (__entry->isa == KVM_ISA_VMX) ? 246 __print_symbolic(__entry->exit_reason, VMX_EXIT_REASONS) : 247 __print_symbolic(__entry->exit_reason, SVM_EXIT_REASONS), 248 __entry->guest_rip, __entry->info1, __entry->info2) 249 ); 250 251 /* 252 * Tracepoint for kvm interrupt injection: 253 */ 254 TRACE_EVENT(kvm_inj_virq, 255 TP_PROTO(unsigned int irq), 256 TP_ARGS(irq), 257 258 TP_STRUCT__entry( 259 __field( unsigned int, irq ) 260 ), 261 262 TP_fast_assign( 263 __entry->irq = irq; 264 ), 265 266 TP_printk("irq %u", __entry->irq) 267 ); 268 269 #define EXS(x) { x##_VECTOR, "#" #x } 270 271 #define kvm_trace_sym_exc \ 272 EXS(DE), EXS(DB), EXS(BP), EXS(OF), EXS(BR), EXS(UD), EXS(NM), \ 273 EXS(DF), EXS(TS), EXS(NP), EXS(SS), EXS(GP), EXS(PF), \ 274 EXS(MF), EXS(AC), EXS(MC) 275 276 /* 277 * Tracepoint for kvm interrupt injection: 278 */ 279 TRACE_EVENT(kvm_inj_exception, 280 TP_PROTO(unsigned exception, bool has_error, unsigned error_code), 281 TP_ARGS(exception, has_error, error_code), 282 283 TP_STRUCT__entry( 284 __field( u8, exception ) 285 __field( u8, has_error ) 286 __field( u32, error_code ) 287 ), 288 289 TP_fast_assign( 290 __entry->exception = exception; 291 __entry->has_error = has_error; 292 __entry->error_code = error_code; 293 ), 294 295 TP_printk("%s (0x%x)", 296 __print_symbolic(__entry->exception, kvm_trace_sym_exc), 297 /* FIXME: don't print error_code if not present */ 298 __entry->has_error ? __entry->error_code : 0) 299 ); 300 301 /* 302 * Tracepoint for page fault. 303 */ 304 TRACE_EVENT(kvm_page_fault, 305 TP_PROTO(unsigned long fault_address, unsigned int error_code), 306 TP_ARGS(fault_address, error_code), 307 308 TP_STRUCT__entry( 309 __field( unsigned long, fault_address ) 310 __field( unsigned int, error_code ) 311 ), 312 313 TP_fast_assign( 314 __entry->fault_address = fault_address; 315 __entry->error_code = error_code; 316 ), 317 318 TP_printk("address %lx error_code %x", 319 __entry->fault_address, __entry->error_code) 320 ); 321 322 /* 323 * Tracepoint for guest MSR access. 324 */ 325 TRACE_EVENT(kvm_msr, 326 TP_PROTO(unsigned write, u32 ecx, u64 data, bool exception), 327 TP_ARGS(write, ecx, data, exception), 328 329 TP_STRUCT__entry( 330 __field( unsigned, write ) 331 __field( u32, ecx ) 332 __field( u64, data ) 333 __field( u8, exception ) 334 ), 335 336 TP_fast_assign( 337 __entry->write = write; 338 __entry->ecx = ecx; 339 __entry->data = data; 340 __entry->exception = exception; 341 ), 342 343 TP_printk("msr_%s %x = 0x%llx%s", 344 __entry->write ? "write" : "read", 345 __entry->ecx, __entry->data, 346 __entry->exception ? " (#GP)" : "") 347 ); 348 349 #define trace_kvm_msr_read(ecx, data) trace_kvm_msr(0, ecx, data, false) 350 #define trace_kvm_msr_write(ecx, data) trace_kvm_msr(1, ecx, data, false) 351 #define trace_kvm_msr_read_ex(ecx) trace_kvm_msr(0, ecx, 0, true) 352 #define trace_kvm_msr_write_ex(ecx, data) trace_kvm_msr(1, ecx, data, true) 353 354 /* 355 * Tracepoint for guest CR access. 356 */ 357 TRACE_EVENT(kvm_cr, 358 TP_PROTO(unsigned int rw, unsigned int cr, unsigned long val), 359 TP_ARGS(rw, cr, val), 360 361 TP_STRUCT__entry( 362 __field( unsigned int, rw ) 363 __field( unsigned int, cr ) 364 __field( unsigned long, val ) 365 ), 366 367 TP_fast_assign( 368 __entry->rw = rw; 369 __entry->cr = cr; 370 __entry->val = val; 371 ), 372 373 TP_printk("cr_%s %x = 0x%lx", 374 __entry->rw ? "write" : "read", 375 __entry->cr, __entry->val) 376 ); 377 378 #define trace_kvm_cr_read(cr, val) trace_kvm_cr(0, cr, val) 379 #define trace_kvm_cr_write(cr, val) trace_kvm_cr(1, cr, val) 380 381 TRACE_EVENT(kvm_pic_set_irq, 382 TP_PROTO(__u8 chip, __u8 pin, __u8 elcr, __u8 imr, bool coalesced), 383 TP_ARGS(chip, pin, elcr, imr, coalesced), 384 385 TP_STRUCT__entry( 386 __field( __u8, chip ) 387 __field( __u8, pin ) 388 __field( __u8, elcr ) 389 __field( __u8, imr ) 390 __field( bool, coalesced ) 391 ), 392 393 TP_fast_assign( 394 __entry->chip = chip; 395 __entry->pin = pin; 396 __entry->elcr = elcr; 397 __entry->imr = imr; 398 __entry->coalesced = coalesced; 399 ), 400 401 TP_printk("chip %u pin %u (%s%s)%s", 402 __entry->chip, __entry->pin, 403 (__entry->elcr & (1 << __entry->pin)) ? "level":"edge", 404 (__entry->imr & (1 << __entry->pin)) ? "|masked":"", 405 __entry->coalesced ? " (coalesced)" : "") 406 ); 407 408 #define kvm_apic_dst_shorthand \ 409 {0x0, "dst"}, \ 410 {0x1, "self"}, \ 411 {0x2, "all"}, \ 412 {0x3, "all-but-self"} 413 414 TRACE_EVENT(kvm_apic_ipi, 415 TP_PROTO(__u32 icr_low, __u32 dest_id), 416 TP_ARGS(icr_low, dest_id), 417 418 TP_STRUCT__entry( 419 __field( __u32, icr_low ) 420 __field( __u32, dest_id ) 421 ), 422 423 TP_fast_assign( 424 __entry->icr_low = icr_low; 425 __entry->dest_id = dest_id; 426 ), 427 428 TP_printk("dst %x vec %u (%s|%s|%s|%s|%s)", 429 __entry->dest_id, (u8)__entry->icr_low, 430 __print_symbolic((__entry->icr_low >> 8 & 0x7), 431 kvm_deliver_mode), 432 (__entry->icr_low & (1<<11)) ? "logical" : "physical", 433 (__entry->icr_low & (1<<14)) ? "assert" : "de-assert", 434 (__entry->icr_low & (1<<15)) ? "level" : "edge", 435 __print_symbolic((__entry->icr_low >> 18 & 0x3), 436 kvm_apic_dst_shorthand)) 437 ); 438 439 TRACE_EVENT(kvm_apic_accept_irq, 440 TP_PROTO(__u32 apicid, __u16 dm, __u8 tm, __u8 vec), 441 TP_ARGS(apicid, dm, tm, vec), 442 443 TP_STRUCT__entry( 444 __field( __u32, apicid ) 445 __field( __u16, dm ) 446 __field( __u8, tm ) 447 __field( __u8, vec ) 448 ), 449 450 TP_fast_assign( 451 __entry->apicid = apicid; 452 __entry->dm = dm; 453 __entry->tm = tm; 454 __entry->vec = vec; 455 ), 456 457 TP_printk("apicid %x vec %u (%s|%s)", 458 __entry->apicid, __entry->vec, 459 __print_symbolic((__entry->dm >> 8 & 0x7), kvm_deliver_mode), 460 __entry->tm ? "level" : "edge") 461 ); 462 463 TRACE_EVENT(kvm_eoi, 464 TP_PROTO(struct kvm_lapic *apic, int vector), 465 TP_ARGS(apic, vector), 466 467 TP_STRUCT__entry( 468 __field( __u32, apicid ) 469 __field( int, vector ) 470 ), 471 472 TP_fast_assign( 473 __entry->apicid = apic->vcpu->vcpu_id; 474 __entry->vector = vector; 475 ), 476 477 TP_printk("apicid %x vector %d", __entry->apicid, __entry->vector) 478 ); 479 480 TRACE_EVENT(kvm_pv_eoi, 481 TP_PROTO(struct kvm_lapic *apic, int vector), 482 TP_ARGS(apic, vector), 483 484 TP_STRUCT__entry( 485 __field( __u32, apicid ) 486 __field( int, vector ) 487 ), 488 489 TP_fast_assign( 490 __entry->apicid = apic->vcpu->vcpu_id; 491 __entry->vector = vector; 492 ), 493 494 TP_printk("apicid %x vector %d", __entry->apicid, __entry->vector) 495 ); 496 497 /* 498 * Tracepoint for nested VMRUN 499 */ 500 TRACE_EVENT(kvm_nested_vmrun, 501 TP_PROTO(__u64 rip, __u64 vmcb, __u64 nested_rip, __u32 int_ctl, 502 __u32 event_inj, bool npt), 503 TP_ARGS(rip, vmcb, nested_rip, int_ctl, event_inj, npt), 504 505 TP_STRUCT__entry( 506 __field( __u64, rip ) 507 __field( __u64, vmcb ) 508 __field( __u64, nested_rip ) 509 __field( __u32, int_ctl ) 510 __field( __u32, event_inj ) 511 __field( bool, npt ) 512 ), 513 514 TP_fast_assign( 515 __entry->rip = rip; 516 __entry->vmcb = vmcb; 517 __entry->nested_rip = nested_rip; 518 __entry->int_ctl = int_ctl; 519 __entry->event_inj = event_inj; 520 __entry->npt = npt; 521 ), 522 523 TP_printk("rip: 0x%016llx vmcb: 0x%016llx nrip: 0x%016llx int_ctl: 0x%08x " 524 "event_inj: 0x%08x npt: %s", 525 __entry->rip, __entry->vmcb, __entry->nested_rip, 526 __entry->int_ctl, __entry->event_inj, 527 __entry->npt ? "on" : "off") 528 ); 529 530 TRACE_EVENT(kvm_nested_intercepts, 531 TP_PROTO(__u16 cr_read, __u16 cr_write, __u32 exceptions, __u64 intercept), 532 TP_ARGS(cr_read, cr_write, exceptions, intercept), 533 534 TP_STRUCT__entry( 535 __field( __u16, cr_read ) 536 __field( __u16, cr_write ) 537 __field( __u32, exceptions ) 538 __field( __u64, intercept ) 539 ), 540 541 TP_fast_assign( 542 __entry->cr_read = cr_read; 543 __entry->cr_write = cr_write; 544 __entry->exceptions = exceptions; 545 __entry->intercept = intercept; 546 ), 547 548 TP_printk("cr_read: %04x cr_write: %04x excp: %08x intercept: %016llx", 549 __entry->cr_read, __entry->cr_write, __entry->exceptions, 550 __entry->intercept) 551 ); 552 /* 553 * Tracepoint for #VMEXIT while nested 554 */ 555 TRACE_EVENT(kvm_nested_vmexit, 556 TP_PROTO(__u64 rip, __u32 exit_code, 557 __u64 exit_info1, __u64 exit_info2, 558 __u32 exit_int_info, __u32 exit_int_info_err, __u32 isa), 559 TP_ARGS(rip, exit_code, exit_info1, exit_info2, 560 exit_int_info, exit_int_info_err, isa), 561 562 TP_STRUCT__entry( 563 __field( __u64, rip ) 564 __field( __u32, exit_code ) 565 __field( __u64, exit_info1 ) 566 __field( __u64, exit_info2 ) 567 __field( __u32, exit_int_info ) 568 __field( __u32, exit_int_info_err ) 569 __field( __u32, isa ) 570 ), 571 572 TP_fast_assign( 573 __entry->rip = rip; 574 __entry->exit_code = exit_code; 575 __entry->exit_info1 = exit_info1; 576 __entry->exit_info2 = exit_info2; 577 __entry->exit_int_info = exit_int_info; 578 __entry->exit_int_info_err = exit_int_info_err; 579 __entry->isa = isa; 580 ), 581 TP_printk("rip: 0x%016llx reason: %s ext_inf1: 0x%016llx " 582 "ext_inf2: 0x%016llx ext_int: 0x%08x ext_int_err: 0x%08x", 583 __entry->rip, 584 (__entry->isa == KVM_ISA_VMX) ? 585 __print_symbolic(__entry->exit_code, VMX_EXIT_REASONS) : 586 __print_symbolic(__entry->exit_code, SVM_EXIT_REASONS), 587 __entry->exit_info1, __entry->exit_info2, 588 __entry->exit_int_info, __entry->exit_int_info_err) 589 ); 590 591 /* 592 * Tracepoint for #VMEXIT reinjected to the guest 593 */ 594 TRACE_EVENT(kvm_nested_vmexit_inject, 595 TP_PROTO(__u32 exit_code, 596 __u64 exit_info1, __u64 exit_info2, 597 __u32 exit_int_info, __u32 exit_int_info_err, __u32 isa), 598 TP_ARGS(exit_code, exit_info1, exit_info2, 599 exit_int_info, exit_int_info_err, isa), 600 601 TP_STRUCT__entry( 602 __field( __u32, exit_code ) 603 __field( __u64, exit_info1 ) 604 __field( __u64, exit_info2 ) 605 __field( __u32, exit_int_info ) 606 __field( __u32, exit_int_info_err ) 607 __field( __u32, isa ) 608 ), 609 610 TP_fast_assign( 611 __entry->exit_code = exit_code; 612 __entry->exit_info1 = exit_info1; 613 __entry->exit_info2 = exit_info2; 614 __entry->exit_int_info = exit_int_info; 615 __entry->exit_int_info_err = exit_int_info_err; 616 __entry->isa = isa; 617 ), 618 619 TP_printk("reason: %s ext_inf1: 0x%016llx " 620 "ext_inf2: 0x%016llx ext_int: 0x%08x ext_int_err: 0x%08x", 621 (__entry->isa == KVM_ISA_VMX) ? 622 __print_symbolic(__entry->exit_code, VMX_EXIT_REASONS) : 623 __print_symbolic(__entry->exit_code, SVM_EXIT_REASONS), 624 __entry->exit_info1, __entry->exit_info2, 625 __entry->exit_int_info, __entry->exit_int_info_err) 626 ); 627 628 /* 629 * Tracepoint for nested #vmexit because of interrupt pending 630 */ 631 TRACE_EVENT(kvm_nested_intr_vmexit, 632 TP_PROTO(__u64 rip), 633 TP_ARGS(rip), 634 635 TP_STRUCT__entry( 636 __field( __u64, rip ) 637 ), 638 639 TP_fast_assign( 640 __entry->rip = rip 641 ), 642 643 TP_printk("rip: 0x%016llx", __entry->rip) 644 ); 645 646 /* 647 * Tracepoint for nested #vmexit because of interrupt pending 648 */ 649 TRACE_EVENT(kvm_invlpga, 650 TP_PROTO(__u64 rip, int asid, u64 address), 651 TP_ARGS(rip, asid, address), 652 653 TP_STRUCT__entry( 654 __field( __u64, rip ) 655 __field( int, asid ) 656 __field( __u64, address ) 657 ), 658 659 TP_fast_assign( 660 __entry->rip = rip; 661 __entry->asid = asid; 662 __entry->address = address; 663 ), 664 665 TP_printk("rip: 0x%016llx asid: %d address: 0x%016llx", 666 __entry->rip, __entry->asid, __entry->address) 667 ); 668 669 /* 670 * Tracepoint for nested #vmexit because of interrupt pending 671 */ 672 TRACE_EVENT(kvm_skinit, 673 TP_PROTO(__u64 rip, __u32 slb), 674 TP_ARGS(rip, slb), 675 676 TP_STRUCT__entry( 677 __field( __u64, rip ) 678 __field( __u32, slb ) 679 ), 680 681 TP_fast_assign( 682 __entry->rip = rip; 683 __entry->slb = slb; 684 ), 685 686 TP_printk("rip: 0x%016llx slb: 0x%08x", 687 __entry->rip, __entry->slb) 688 ); 689 690 #define KVM_EMUL_INSN_F_CR0_PE (1 << 0) 691 #define KVM_EMUL_INSN_F_EFL_VM (1 << 1) 692 #define KVM_EMUL_INSN_F_CS_D (1 << 2) 693 #define KVM_EMUL_INSN_F_CS_L (1 << 3) 694 695 #define kvm_trace_symbol_emul_flags \ 696 { 0, "real" }, \ 697 { KVM_EMUL_INSN_F_CR0_PE \ 698 | KVM_EMUL_INSN_F_EFL_VM, "vm16" }, \ 699 { KVM_EMUL_INSN_F_CR0_PE, "prot16" }, \ 700 { KVM_EMUL_INSN_F_CR0_PE \ 701 | KVM_EMUL_INSN_F_CS_D, "prot32" }, \ 702 { KVM_EMUL_INSN_F_CR0_PE \ 703 | KVM_EMUL_INSN_F_CS_L, "prot64" } 704 705 #define kei_decode_mode(mode) ({ \ 706 u8 flags = 0xff; \ 707 switch (mode) { \ 708 case X86EMUL_MODE_REAL: \ 709 flags = 0; \ 710 break; \ 711 case X86EMUL_MODE_VM86: \ 712 flags = KVM_EMUL_INSN_F_EFL_VM; \ 713 break; \ 714 case X86EMUL_MODE_PROT16: \ 715 flags = KVM_EMUL_INSN_F_CR0_PE; \ 716 break; \ 717 case X86EMUL_MODE_PROT32: \ 718 flags = KVM_EMUL_INSN_F_CR0_PE \ 719 | KVM_EMUL_INSN_F_CS_D; \ 720 break; \ 721 case X86EMUL_MODE_PROT64: \ 722 flags = KVM_EMUL_INSN_F_CR0_PE \ 723 | KVM_EMUL_INSN_F_CS_L; \ 724 break; \ 725 } \ 726 flags; \ 727 }) 728 729 TRACE_EVENT(kvm_emulate_insn, 730 TP_PROTO(struct kvm_vcpu *vcpu, __u8 failed), 731 TP_ARGS(vcpu, failed), 732 733 TP_STRUCT__entry( 734 __field( __u64, rip ) 735 __field( __u32, csbase ) 736 __field( __u8, len ) 737 __array( __u8, insn, 15 ) 738 __field( __u8, flags ) 739 __field( __u8, failed ) 740 ), 741 742 TP_fast_assign( 743 __entry->csbase = kvm_x86_ops->get_segment_base(vcpu, VCPU_SREG_CS); 744 __entry->len = vcpu->arch.emulate_ctxt.fetch.ptr 745 - vcpu->arch.emulate_ctxt.fetch.data; 746 __entry->rip = vcpu->arch.emulate_ctxt._eip - __entry->len; 747 memcpy(__entry->insn, 748 vcpu->arch.emulate_ctxt.fetch.data, 749 15); 750 __entry->flags = kei_decode_mode(vcpu->arch.emulate_ctxt.mode); 751 __entry->failed = failed; 752 ), 753 754 TP_printk("%x:%llx:%s (%s)%s", 755 __entry->csbase, __entry->rip, 756 __print_hex(__entry->insn, __entry->len), 757 __print_symbolic(__entry->flags, 758 kvm_trace_symbol_emul_flags), 759 __entry->failed ? " failed" : "" 760 ) 761 ); 762 763 #define trace_kvm_emulate_insn_start(vcpu) trace_kvm_emulate_insn(vcpu, 0) 764 #define trace_kvm_emulate_insn_failed(vcpu) trace_kvm_emulate_insn(vcpu, 1) 765 766 TRACE_EVENT( 767 vcpu_match_mmio, 768 TP_PROTO(gva_t gva, gpa_t gpa, bool write, bool gpa_match), 769 TP_ARGS(gva, gpa, write, gpa_match), 770 771 TP_STRUCT__entry( 772 __field(gva_t, gva) 773 __field(gpa_t, gpa) 774 __field(bool, write) 775 __field(bool, gpa_match) 776 ), 777 778 TP_fast_assign( 779 __entry->gva = gva; 780 __entry->gpa = gpa; 781 __entry->write = write; 782 __entry->gpa_match = gpa_match 783 ), 784 785 TP_printk("gva %#lx gpa %#llx %s %s", __entry->gva, __entry->gpa, 786 __entry->write ? "Write" : "Read", 787 __entry->gpa_match ? "GPA" : "GVA") 788 ); 789 790 TRACE_EVENT(kvm_write_tsc_offset, 791 TP_PROTO(unsigned int vcpu_id, __u64 previous_tsc_offset, 792 __u64 next_tsc_offset), 793 TP_ARGS(vcpu_id, previous_tsc_offset, next_tsc_offset), 794 795 TP_STRUCT__entry( 796 __field( unsigned int, vcpu_id ) 797 __field( __u64, previous_tsc_offset ) 798 __field( __u64, next_tsc_offset ) 799 ), 800 801 TP_fast_assign( 802 __entry->vcpu_id = vcpu_id; 803 __entry->previous_tsc_offset = previous_tsc_offset; 804 __entry->next_tsc_offset = next_tsc_offset; 805 ), 806 807 TP_printk("vcpu=%u prev=%llu next=%llu", __entry->vcpu_id, 808 __entry->previous_tsc_offset, __entry->next_tsc_offset) 809 ); 810 811 #ifdef CONFIG_X86_64 812 813 #define host_clocks \ 814 {VCLOCK_NONE, "none"}, \ 815 {VCLOCK_TSC, "tsc"} \ 816 817 TRACE_EVENT(kvm_update_master_clock, 818 TP_PROTO(bool use_master_clock, unsigned int host_clock, bool offset_matched), 819 TP_ARGS(use_master_clock, host_clock, offset_matched), 820 821 TP_STRUCT__entry( 822 __field( bool, use_master_clock ) 823 __field( unsigned int, host_clock ) 824 __field( bool, offset_matched ) 825 ), 826 827 TP_fast_assign( 828 __entry->use_master_clock = use_master_clock; 829 __entry->host_clock = host_clock; 830 __entry->offset_matched = offset_matched; 831 ), 832 833 TP_printk("masterclock %d hostclock %s offsetmatched %u", 834 __entry->use_master_clock, 835 __print_symbolic(__entry->host_clock, host_clocks), 836 __entry->offset_matched) 837 ); 838 839 TRACE_EVENT(kvm_track_tsc, 840 TP_PROTO(unsigned int vcpu_id, unsigned int nr_matched, 841 unsigned int online_vcpus, bool use_master_clock, 842 unsigned int host_clock), 843 TP_ARGS(vcpu_id, nr_matched, online_vcpus, use_master_clock, 844 host_clock), 845 846 TP_STRUCT__entry( 847 __field( unsigned int, vcpu_id ) 848 __field( unsigned int, nr_vcpus_matched_tsc ) 849 __field( unsigned int, online_vcpus ) 850 __field( bool, use_master_clock ) 851 __field( unsigned int, host_clock ) 852 ), 853 854 TP_fast_assign( 855 __entry->vcpu_id = vcpu_id; 856 __entry->nr_vcpus_matched_tsc = nr_matched; 857 __entry->online_vcpus = online_vcpus; 858 __entry->use_master_clock = use_master_clock; 859 __entry->host_clock = host_clock; 860 ), 861 862 TP_printk("vcpu_id %u masterclock %u offsetmatched %u nr_online %u" 863 " hostclock %s", 864 __entry->vcpu_id, __entry->use_master_clock, 865 __entry->nr_vcpus_matched_tsc, __entry->online_vcpus, 866 __print_symbolic(__entry->host_clock, host_clocks)) 867 ); 868 869 #endif /* CONFIG_X86_64 */ 870 871 /* 872 * Tracepoint for PML full VMEXIT. 873 */ 874 TRACE_EVENT(kvm_pml_full, 875 TP_PROTO(unsigned int vcpu_id), 876 TP_ARGS(vcpu_id), 877 878 TP_STRUCT__entry( 879 __field( unsigned int, vcpu_id ) 880 ), 881 882 TP_fast_assign( 883 __entry->vcpu_id = vcpu_id; 884 ), 885 886 TP_printk("vcpu %d: PML full", __entry->vcpu_id) 887 ); 888 889 TRACE_EVENT(kvm_ple_window, 890 TP_PROTO(bool grow, unsigned int vcpu_id, int new, int old), 891 TP_ARGS(grow, vcpu_id, new, old), 892 893 TP_STRUCT__entry( 894 __field( bool, grow ) 895 __field( unsigned int, vcpu_id ) 896 __field( int, new ) 897 __field( int, old ) 898 ), 899 900 TP_fast_assign( 901 __entry->grow = grow; 902 __entry->vcpu_id = vcpu_id; 903 __entry->new = new; 904 __entry->old = old; 905 ), 906 907 TP_printk("vcpu %u: ple_window %d (%s %d)", 908 __entry->vcpu_id, 909 __entry->new, 910 __entry->grow ? "grow" : "shrink", 911 __entry->old) 912 ); 913 914 #define trace_kvm_ple_window_grow(vcpu_id, new, old) \ 915 trace_kvm_ple_window(true, vcpu_id, new, old) 916 #define trace_kvm_ple_window_shrink(vcpu_id, new, old) \ 917 trace_kvm_ple_window(false, vcpu_id, new, old) 918 919 TRACE_EVENT(kvm_pvclock_update, 920 TP_PROTO(unsigned int vcpu_id, struct pvclock_vcpu_time_info *pvclock), 921 TP_ARGS(vcpu_id, pvclock), 922 923 TP_STRUCT__entry( 924 __field( unsigned int, vcpu_id ) 925 __field( __u32, version ) 926 __field( __u64, tsc_timestamp ) 927 __field( __u64, system_time ) 928 __field( __u32, tsc_to_system_mul ) 929 __field( __s8, tsc_shift ) 930 __field( __u8, flags ) 931 ), 932 933 TP_fast_assign( 934 __entry->vcpu_id = vcpu_id; 935 __entry->version = pvclock->version; 936 __entry->tsc_timestamp = pvclock->tsc_timestamp; 937 __entry->system_time = pvclock->system_time; 938 __entry->tsc_to_system_mul = pvclock->tsc_to_system_mul; 939 __entry->tsc_shift = pvclock->tsc_shift; 940 __entry->flags = pvclock->flags; 941 ), 942 943 TP_printk("vcpu_id %u, pvclock { version %u, tsc_timestamp 0x%llx, " 944 "system_time 0x%llx, tsc_to_system_mul 0x%x, tsc_shift %d, " 945 "flags 0x%x }", 946 __entry->vcpu_id, 947 __entry->version, 948 __entry->tsc_timestamp, 949 __entry->system_time, 950 __entry->tsc_to_system_mul, 951 __entry->tsc_shift, 952 __entry->flags) 953 ); 954 955 TRACE_EVENT(kvm_wait_lapic_expire, 956 TP_PROTO(unsigned int vcpu_id, s64 delta), 957 TP_ARGS(vcpu_id, delta), 958 959 TP_STRUCT__entry( 960 __field( unsigned int, vcpu_id ) 961 __field( s64, delta ) 962 ), 963 964 TP_fast_assign( 965 __entry->vcpu_id = vcpu_id; 966 __entry->delta = delta; 967 ), 968 969 TP_printk("vcpu %u: delta %lld (%s)", 970 __entry->vcpu_id, 971 __entry->delta, 972 __entry->delta < 0 ? "early" : "late") 973 ); 974 975 TRACE_EVENT(kvm_enter_smm, 976 TP_PROTO(unsigned int vcpu_id, u64 smbase, bool entering), 977 TP_ARGS(vcpu_id, smbase, entering), 978 979 TP_STRUCT__entry( 980 __field( unsigned int, vcpu_id ) 981 __field( u64, smbase ) 982 __field( bool, entering ) 983 ), 984 985 TP_fast_assign( 986 __entry->vcpu_id = vcpu_id; 987 __entry->smbase = smbase; 988 __entry->entering = entering; 989 ), 990 991 TP_printk("vcpu %u: %s SMM, smbase 0x%llx", 992 __entry->vcpu_id, 993 __entry->entering ? "entering" : "leaving", 994 __entry->smbase) 995 ); 996 997 /* 998 * Tracepoint for VT-d posted-interrupts. 999 */ 1000 TRACE_EVENT(kvm_pi_irte_update, 1001 TP_PROTO(unsigned int host_irq, unsigned int vcpu_id, 1002 unsigned int gsi, unsigned int gvec, 1003 u64 pi_desc_addr, bool set), 1004 TP_ARGS(host_irq, vcpu_id, gsi, gvec, pi_desc_addr, set), 1005 1006 TP_STRUCT__entry( 1007 __field( unsigned int, host_irq ) 1008 __field( unsigned int, vcpu_id ) 1009 __field( unsigned int, gsi ) 1010 __field( unsigned int, gvec ) 1011 __field( u64, pi_desc_addr ) 1012 __field( bool, set ) 1013 ), 1014 1015 TP_fast_assign( 1016 __entry->host_irq = host_irq; 1017 __entry->vcpu_id = vcpu_id; 1018 __entry->gsi = gsi; 1019 __entry->gvec = gvec; 1020 __entry->pi_desc_addr = pi_desc_addr; 1021 __entry->set = set; 1022 ), 1023 1024 TP_printk("VT-d PI is %s for irq %u, vcpu %u, gsi: 0x%x, " 1025 "gvec: 0x%x, pi_desc_addr: 0x%llx", 1026 __entry->set ? "enabled and being updated" : "disabled", 1027 __entry->host_irq, 1028 __entry->vcpu_id, 1029 __entry->gsi, 1030 __entry->gvec, 1031 __entry->pi_desc_addr) 1032 ); 1033 1034 /* 1035 * Tracepoint for kvm_hv_notify_acked_sint. 1036 */ 1037 TRACE_EVENT(kvm_hv_notify_acked_sint, 1038 TP_PROTO(int vcpu_id, u32 sint), 1039 TP_ARGS(vcpu_id, sint), 1040 1041 TP_STRUCT__entry( 1042 __field(int, vcpu_id) 1043 __field(u32, sint) 1044 ), 1045 1046 TP_fast_assign( 1047 __entry->vcpu_id = vcpu_id; 1048 __entry->sint = sint; 1049 ), 1050 1051 TP_printk("vcpu_id %d sint %u", __entry->vcpu_id, __entry->sint) 1052 ); 1053 1054 /* 1055 * Tracepoint for synic_set_irq. 1056 */ 1057 TRACE_EVENT(kvm_hv_synic_set_irq, 1058 TP_PROTO(int vcpu_id, u32 sint, int vector, int ret), 1059 TP_ARGS(vcpu_id, sint, vector, ret), 1060 1061 TP_STRUCT__entry( 1062 __field(int, vcpu_id) 1063 __field(u32, sint) 1064 __field(int, vector) 1065 __field(int, ret) 1066 ), 1067 1068 TP_fast_assign( 1069 __entry->vcpu_id = vcpu_id; 1070 __entry->sint = sint; 1071 __entry->vector = vector; 1072 __entry->ret = ret; 1073 ), 1074 1075 TP_printk("vcpu_id %d sint %u vector %d ret %d", 1076 __entry->vcpu_id, __entry->sint, __entry->vector, 1077 __entry->ret) 1078 ); 1079 1080 /* 1081 * Tracepoint for kvm_hv_synic_send_eoi. 1082 */ 1083 TRACE_EVENT(kvm_hv_synic_send_eoi, 1084 TP_PROTO(int vcpu_id, int vector), 1085 TP_ARGS(vcpu_id, vector), 1086 1087 TP_STRUCT__entry( 1088 __field(int, vcpu_id) 1089 __field(u32, sint) 1090 __field(int, vector) 1091 __field(int, ret) 1092 ), 1093 1094 TP_fast_assign( 1095 __entry->vcpu_id = vcpu_id; 1096 __entry->vector = vector; 1097 ), 1098 1099 TP_printk("vcpu_id %d vector %d", __entry->vcpu_id, __entry->vector) 1100 ); 1101 1102 /* 1103 * Tracepoint for synic_set_msr. 1104 */ 1105 TRACE_EVENT(kvm_hv_synic_set_msr, 1106 TP_PROTO(int vcpu_id, u32 msr, u64 data, bool host), 1107 TP_ARGS(vcpu_id, msr, data, host), 1108 1109 TP_STRUCT__entry( 1110 __field(int, vcpu_id) 1111 __field(u32, msr) 1112 __field(u64, data) 1113 __field(bool, host) 1114 ), 1115 1116 TP_fast_assign( 1117 __entry->vcpu_id = vcpu_id; 1118 __entry->msr = msr; 1119 __entry->data = data; 1120 __entry->host = host 1121 ), 1122 1123 TP_printk("vcpu_id %d msr 0x%x data 0x%llx host %d", 1124 __entry->vcpu_id, __entry->msr, __entry->data, __entry->host) 1125 ); 1126 1127 /* 1128 * Tracepoint for stimer_set_config. 1129 */ 1130 TRACE_EVENT(kvm_hv_stimer_set_config, 1131 TP_PROTO(int vcpu_id, int timer_index, u64 config, bool host), 1132 TP_ARGS(vcpu_id, timer_index, config, host), 1133 1134 TP_STRUCT__entry( 1135 __field(int, vcpu_id) 1136 __field(int, timer_index) 1137 __field(u64, config) 1138 __field(bool, host) 1139 ), 1140 1141 TP_fast_assign( 1142 __entry->vcpu_id = vcpu_id; 1143 __entry->timer_index = timer_index; 1144 __entry->config = config; 1145 __entry->host = host; 1146 ), 1147 1148 TP_printk("vcpu_id %d timer %d config 0x%llx host %d", 1149 __entry->vcpu_id, __entry->timer_index, __entry->config, 1150 __entry->host) 1151 ); 1152 1153 /* 1154 * Tracepoint for stimer_set_count. 1155 */ 1156 TRACE_EVENT(kvm_hv_stimer_set_count, 1157 TP_PROTO(int vcpu_id, int timer_index, u64 count, bool host), 1158 TP_ARGS(vcpu_id, timer_index, count, host), 1159 1160 TP_STRUCT__entry( 1161 __field(int, vcpu_id) 1162 __field(int, timer_index) 1163 __field(u64, count) 1164 __field(bool, host) 1165 ), 1166 1167 TP_fast_assign( 1168 __entry->vcpu_id = vcpu_id; 1169 __entry->timer_index = timer_index; 1170 __entry->count = count; 1171 __entry->host = host; 1172 ), 1173 1174 TP_printk("vcpu_id %d timer %d count %llu host %d", 1175 __entry->vcpu_id, __entry->timer_index, __entry->count, 1176 __entry->host) 1177 ); 1178 1179 /* 1180 * Tracepoint for stimer_start(periodic timer case). 1181 */ 1182 TRACE_EVENT(kvm_hv_stimer_start_periodic, 1183 TP_PROTO(int vcpu_id, int timer_index, u64 time_now, u64 exp_time), 1184 TP_ARGS(vcpu_id, timer_index, time_now, exp_time), 1185 1186 TP_STRUCT__entry( 1187 __field(int, vcpu_id) 1188 __field(int, timer_index) 1189 __field(u64, time_now) 1190 __field(u64, exp_time) 1191 ), 1192 1193 TP_fast_assign( 1194 __entry->vcpu_id = vcpu_id; 1195 __entry->timer_index = timer_index; 1196 __entry->time_now = time_now; 1197 __entry->exp_time = exp_time; 1198 ), 1199 1200 TP_printk("vcpu_id %d timer %d time_now %llu exp_time %llu", 1201 __entry->vcpu_id, __entry->timer_index, __entry->time_now, 1202 __entry->exp_time) 1203 ); 1204 1205 /* 1206 * Tracepoint for stimer_start(one-shot timer case). 1207 */ 1208 TRACE_EVENT(kvm_hv_stimer_start_one_shot, 1209 TP_PROTO(int vcpu_id, int timer_index, u64 time_now, u64 count), 1210 TP_ARGS(vcpu_id, timer_index, time_now, count), 1211 1212 TP_STRUCT__entry( 1213 __field(int, vcpu_id) 1214 __field(int, timer_index) 1215 __field(u64, time_now) 1216 __field(u64, count) 1217 ), 1218 1219 TP_fast_assign( 1220 __entry->vcpu_id = vcpu_id; 1221 __entry->timer_index = timer_index; 1222 __entry->time_now = time_now; 1223 __entry->count = count; 1224 ), 1225 1226 TP_printk("vcpu_id %d timer %d time_now %llu count %llu", 1227 __entry->vcpu_id, __entry->timer_index, __entry->time_now, 1228 __entry->count) 1229 ); 1230 1231 /* 1232 * Tracepoint for stimer_timer_callback. 1233 */ 1234 TRACE_EVENT(kvm_hv_stimer_callback, 1235 TP_PROTO(int vcpu_id, int timer_index), 1236 TP_ARGS(vcpu_id, timer_index), 1237 1238 TP_STRUCT__entry( 1239 __field(int, vcpu_id) 1240 __field(int, timer_index) 1241 ), 1242 1243 TP_fast_assign( 1244 __entry->vcpu_id = vcpu_id; 1245 __entry->timer_index = timer_index; 1246 ), 1247 1248 TP_printk("vcpu_id %d timer %d", 1249 __entry->vcpu_id, __entry->timer_index) 1250 ); 1251 1252 /* 1253 * Tracepoint for stimer_expiration. 1254 */ 1255 TRACE_EVENT(kvm_hv_stimer_expiration, 1256 TP_PROTO(int vcpu_id, int timer_index, int msg_send_result), 1257 TP_ARGS(vcpu_id, timer_index, msg_send_result), 1258 1259 TP_STRUCT__entry( 1260 __field(int, vcpu_id) 1261 __field(int, timer_index) 1262 __field(int, msg_send_result) 1263 ), 1264 1265 TP_fast_assign( 1266 __entry->vcpu_id = vcpu_id; 1267 __entry->timer_index = timer_index; 1268 __entry->msg_send_result = msg_send_result; 1269 ), 1270 1271 TP_printk("vcpu_id %d timer %d msg send result %d", 1272 __entry->vcpu_id, __entry->timer_index, 1273 __entry->msg_send_result) 1274 ); 1275 1276 /* 1277 * Tracepoint for stimer_cleanup. 1278 */ 1279 TRACE_EVENT(kvm_hv_stimer_cleanup, 1280 TP_PROTO(int vcpu_id, int timer_index), 1281 TP_ARGS(vcpu_id, timer_index), 1282 1283 TP_STRUCT__entry( 1284 __field(int, vcpu_id) 1285 __field(int, timer_index) 1286 ), 1287 1288 TP_fast_assign( 1289 __entry->vcpu_id = vcpu_id; 1290 __entry->timer_index = timer_index; 1291 ), 1292 1293 TP_printk("vcpu_id %d timer %d", 1294 __entry->vcpu_id, __entry->timer_index) 1295 ); 1296 1297 /* 1298 * Tracepoint for AMD AVIC 1299 */ 1300 TRACE_EVENT(kvm_avic_incomplete_ipi, 1301 TP_PROTO(u32 vcpu, u32 icrh, u32 icrl, u32 id, u32 index), 1302 TP_ARGS(vcpu, icrh, icrl, id, index), 1303 1304 TP_STRUCT__entry( 1305 __field(u32, vcpu) 1306 __field(u32, icrh) 1307 __field(u32, icrl) 1308 __field(u32, id) 1309 __field(u32, index) 1310 ), 1311 1312 TP_fast_assign( 1313 __entry->vcpu = vcpu; 1314 __entry->icrh = icrh; 1315 __entry->icrl = icrl; 1316 __entry->id = id; 1317 __entry->index = index; 1318 ), 1319 1320 TP_printk("vcpu=%u, icrh:icrl=%#010x:%08x, id=%u, index=%u\n", 1321 __entry->vcpu, __entry->icrh, __entry->icrl, 1322 __entry->id, __entry->index) 1323 ); 1324 1325 TRACE_EVENT(kvm_avic_unaccelerated_access, 1326 TP_PROTO(u32 vcpu, u32 offset, bool ft, bool rw, u32 vec), 1327 TP_ARGS(vcpu, offset, ft, rw, vec), 1328 1329 TP_STRUCT__entry( 1330 __field(u32, vcpu) 1331 __field(u32, offset) 1332 __field(bool, ft) 1333 __field(bool, rw) 1334 __field(u32, vec) 1335 ), 1336 1337 TP_fast_assign( 1338 __entry->vcpu = vcpu; 1339 __entry->offset = offset; 1340 __entry->ft = ft; 1341 __entry->rw = rw; 1342 __entry->vec = vec; 1343 ), 1344 1345 TP_printk("vcpu=%u, offset=%#x(%s), %s, %s, vec=%#x\n", 1346 __entry->vcpu, 1347 __entry->offset, 1348 __print_symbolic(__entry->offset, kvm_trace_symbol_apic), 1349 __entry->ft ? "trap" : "fault", 1350 __entry->rw ? "write" : "read", 1351 __entry->vec) 1352 ); 1353 1354 TRACE_EVENT(kvm_hv_timer_state, 1355 TP_PROTO(unsigned int vcpu_id, unsigned int hv_timer_in_use), 1356 TP_ARGS(vcpu_id, hv_timer_in_use), 1357 TP_STRUCT__entry( 1358 __field(unsigned int, vcpu_id) 1359 __field(unsigned int, hv_timer_in_use) 1360 ), 1361 TP_fast_assign( 1362 __entry->vcpu_id = vcpu_id; 1363 __entry->hv_timer_in_use = hv_timer_in_use; 1364 ), 1365 TP_printk("vcpu_id %x hv_timer %x\n", 1366 __entry->vcpu_id, 1367 __entry->hv_timer_in_use) 1368 ); 1369 #endif /* _TRACE_KVM_H */ 1370 1371 #undef TRACE_INCLUDE_PATH 1372 #define TRACE_INCLUDE_PATH arch/x86/kvm 1373 #undef TRACE_INCLUDE_FILE 1374 #define TRACE_INCLUDE_FILE trace 1375 1376 /* This part must be outside protection */ 1377 #include <trace/define_trace.h> 1378