1 #if !defined(_TRACE_KVM_H) || defined(TRACE_HEADER_MULTI_READ) 2 #define _TRACE_KVM_H 3 4 #include <linux/tracepoint.h> 5 #include <asm/vmx.h> 6 #include <asm/svm.h> 7 #include <asm/clocksource.h> 8 #include <asm/pvclock-abi.h> 9 10 #undef TRACE_SYSTEM 11 #define TRACE_SYSTEM kvm 12 13 /* 14 * Tracepoint for guest mode entry. 15 */ 16 TRACE_EVENT(kvm_entry, 17 TP_PROTO(unsigned int vcpu_id), 18 TP_ARGS(vcpu_id), 19 20 TP_STRUCT__entry( 21 __field( unsigned int, vcpu_id ) 22 ), 23 24 TP_fast_assign( 25 __entry->vcpu_id = vcpu_id; 26 ), 27 28 TP_printk("vcpu %u", __entry->vcpu_id) 29 ); 30 31 /* 32 * Tracepoint for hypercall. 33 */ 34 TRACE_EVENT(kvm_hypercall, 35 TP_PROTO(unsigned long nr, unsigned long a0, unsigned long a1, 36 unsigned long a2, unsigned long a3), 37 TP_ARGS(nr, a0, a1, a2, a3), 38 39 TP_STRUCT__entry( 40 __field( unsigned long, nr ) 41 __field( unsigned long, a0 ) 42 __field( unsigned long, a1 ) 43 __field( unsigned long, a2 ) 44 __field( unsigned long, a3 ) 45 ), 46 47 TP_fast_assign( 48 __entry->nr = nr; 49 __entry->a0 = a0; 50 __entry->a1 = a1; 51 __entry->a2 = a2; 52 __entry->a3 = a3; 53 ), 54 55 TP_printk("nr 0x%lx a0 0x%lx a1 0x%lx a2 0x%lx a3 0x%lx", 56 __entry->nr, __entry->a0, __entry->a1, __entry->a2, 57 __entry->a3) 58 ); 59 60 /* 61 * Tracepoint for hypercall. 62 */ 63 TRACE_EVENT(kvm_hv_hypercall, 64 TP_PROTO(__u16 code, bool fast, __u16 rep_cnt, __u16 rep_idx, 65 __u64 ingpa, __u64 outgpa), 66 TP_ARGS(code, fast, rep_cnt, rep_idx, ingpa, outgpa), 67 68 TP_STRUCT__entry( 69 __field( __u16, rep_cnt ) 70 __field( __u16, rep_idx ) 71 __field( __u64, ingpa ) 72 __field( __u64, outgpa ) 73 __field( __u16, code ) 74 __field( bool, fast ) 75 ), 76 77 TP_fast_assign( 78 __entry->rep_cnt = rep_cnt; 79 __entry->rep_idx = rep_idx; 80 __entry->ingpa = ingpa; 81 __entry->outgpa = outgpa; 82 __entry->code = code; 83 __entry->fast = fast; 84 ), 85 86 TP_printk("code 0x%x %s cnt 0x%x idx 0x%x in 0x%llx out 0x%llx", 87 __entry->code, __entry->fast ? "fast" : "slow", 88 __entry->rep_cnt, __entry->rep_idx, __entry->ingpa, 89 __entry->outgpa) 90 ); 91 92 /* 93 * Tracepoint for PIO. 94 */ 95 96 #define KVM_PIO_IN 0 97 #define KVM_PIO_OUT 1 98 99 TRACE_EVENT(kvm_pio, 100 TP_PROTO(unsigned int rw, unsigned int port, unsigned int size, 101 unsigned int count, void *data), 102 TP_ARGS(rw, port, size, count, data), 103 104 TP_STRUCT__entry( 105 __field( unsigned int, rw ) 106 __field( unsigned int, port ) 107 __field( unsigned int, size ) 108 __field( unsigned int, count ) 109 __field( unsigned int, val ) 110 ), 111 112 TP_fast_assign( 113 __entry->rw = rw; 114 __entry->port = port; 115 __entry->size = size; 116 __entry->count = count; 117 if (size == 1) 118 __entry->val = *(unsigned char *)data; 119 else if (size == 2) 120 __entry->val = *(unsigned short *)data; 121 else 122 __entry->val = *(unsigned int *)data; 123 ), 124 125 TP_printk("pio_%s at 0x%x size %d count %d val 0x%x %s", 126 __entry->rw ? "write" : "read", 127 __entry->port, __entry->size, __entry->count, __entry->val, 128 __entry->count > 1 ? "(...)" : "") 129 ); 130 131 /* 132 * Tracepoint for fast mmio. 133 */ 134 TRACE_EVENT(kvm_fast_mmio, 135 TP_PROTO(u64 gpa), 136 TP_ARGS(gpa), 137 138 TP_STRUCT__entry( 139 __field(u64, gpa) 140 ), 141 142 TP_fast_assign( 143 __entry->gpa = gpa; 144 ), 145 146 TP_printk("fast mmio at gpa 0x%llx", __entry->gpa) 147 ); 148 149 /* 150 * Tracepoint for cpuid. 151 */ 152 TRACE_EVENT(kvm_cpuid, 153 TP_PROTO(unsigned int function, unsigned long rax, unsigned long rbx, 154 unsigned long rcx, unsigned long rdx), 155 TP_ARGS(function, rax, rbx, rcx, rdx), 156 157 TP_STRUCT__entry( 158 __field( unsigned int, function ) 159 __field( unsigned long, rax ) 160 __field( unsigned long, rbx ) 161 __field( unsigned long, rcx ) 162 __field( unsigned long, rdx ) 163 ), 164 165 TP_fast_assign( 166 __entry->function = function; 167 __entry->rax = rax; 168 __entry->rbx = rbx; 169 __entry->rcx = rcx; 170 __entry->rdx = rdx; 171 ), 172 173 TP_printk("func %x rax %lx rbx %lx rcx %lx rdx %lx", 174 __entry->function, __entry->rax, 175 __entry->rbx, __entry->rcx, __entry->rdx) 176 ); 177 178 #define AREG(x) { APIC_##x, "APIC_" #x } 179 180 #define kvm_trace_symbol_apic \ 181 AREG(ID), AREG(LVR), AREG(TASKPRI), AREG(ARBPRI), AREG(PROCPRI), \ 182 AREG(EOI), AREG(RRR), AREG(LDR), AREG(DFR), AREG(SPIV), AREG(ISR), \ 183 AREG(TMR), AREG(IRR), AREG(ESR), AREG(ICR), AREG(ICR2), AREG(LVTT), \ 184 AREG(LVTTHMR), AREG(LVTPC), AREG(LVT0), AREG(LVT1), AREG(LVTERR), \ 185 AREG(TMICT), AREG(TMCCT), AREG(TDCR), AREG(SELF_IPI), AREG(EFEAT), \ 186 AREG(ECTRL) 187 /* 188 * Tracepoint for apic access. 189 */ 190 TRACE_EVENT(kvm_apic, 191 TP_PROTO(unsigned int rw, unsigned int reg, unsigned int val), 192 TP_ARGS(rw, reg, val), 193 194 TP_STRUCT__entry( 195 __field( unsigned int, rw ) 196 __field( unsigned int, reg ) 197 __field( unsigned int, val ) 198 ), 199 200 TP_fast_assign( 201 __entry->rw = rw; 202 __entry->reg = reg; 203 __entry->val = val; 204 ), 205 206 TP_printk("apic_%s %s = 0x%x", 207 __entry->rw ? "write" : "read", 208 __print_symbolic(__entry->reg, kvm_trace_symbol_apic), 209 __entry->val) 210 ); 211 212 #define trace_kvm_apic_read(reg, val) trace_kvm_apic(0, reg, val) 213 #define trace_kvm_apic_write(reg, val) trace_kvm_apic(1, reg, val) 214 215 #define KVM_ISA_VMX 1 216 #define KVM_ISA_SVM 2 217 218 /* 219 * Tracepoint for kvm guest exit: 220 */ 221 TRACE_EVENT(kvm_exit, 222 TP_PROTO(unsigned int exit_reason, struct kvm_vcpu *vcpu, u32 isa), 223 TP_ARGS(exit_reason, vcpu, isa), 224 225 TP_STRUCT__entry( 226 __field( unsigned int, exit_reason ) 227 __field( unsigned long, guest_rip ) 228 __field( u32, isa ) 229 __field( u64, info1 ) 230 __field( u64, info2 ) 231 ), 232 233 TP_fast_assign( 234 __entry->exit_reason = exit_reason; 235 __entry->guest_rip = kvm_rip_read(vcpu); 236 __entry->isa = isa; 237 kvm_x86_ops->get_exit_info(vcpu, &__entry->info1, 238 &__entry->info2); 239 ), 240 241 TP_printk("reason %s rip 0x%lx info %llx %llx", 242 (__entry->isa == KVM_ISA_VMX) ? 243 __print_symbolic(__entry->exit_reason, VMX_EXIT_REASONS) : 244 __print_symbolic(__entry->exit_reason, SVM_EXIT_REASONS), 245 __entry->guest_rip, __entry->info1, __entry->info2) 246 ); 247 248 /* 249 * Tracepoint for kvm interrupt injection: 250 */ 251 TRACE_EVENT(kvm_inj_virq, 252 TP_PROTO(unsigned int irq), 253 TP_ARGS(irq), 254 255 TP_STRUCT__entry( 256 __field( unsigned int, irq ) 257 ), 258 259 TP_fast_assign( 260 __entry->irq = irq; 261 ), 262 263 TP_printk("irq %u", __entry->irq) 264 ); 265 266 #define EXS(x) { x##_VECTOR, "#" #x } 267 268 #define kvm_trace_sym_exc \ 269 EXS(DE), EXS(DB), EXS(BP), EXS(OF), EXS(BR), EXS(UD), EXS(NM), \ 270 EXS(DF), EXS(TS), EXS(NP), EXS(SS), EXS(GP), EXS(PF), \ 271 EXS(MF), EXS(AC), EXS(MC) 272 273 /* 274 * Tracepoint for kvm interrupt injection: 275 */ 276 TRACE_EVENT(kvm_inj_exception, 277 TP_PROTO(unsigned exception, bool has_error, unsigned error_code), 278 TP_ARGS(exception, has_error, error_code), 279 280 TP_STRUCT__entry( 281 __field( u8, exception ) 282 __field( u8, has_error ) 283 __field( u32, error_code ) 284 ), 285 286 TP_fast_assign( 287 __entry->exception = exception; 288 __entry->has_error = has_error; 289 __entry->error_code = error_code; 290 ), 291 292 TP_printk("%s (0x%x)", 293 __print_symbolic(__entry->exception, kvm_trace_sym_exc), 294 /* FIXME: don't print error_code if not present */ 295 __entry->has_error ? __entry->error_code : 0) 296 ); 297 298 /* 299 * Tracepoint for page fault. 300 */ 301 TRACE_EVENT(kvm_page_fault, 302 TP_PROTO(unsigned long fault_address, unsigned int error_code), 303 TP_ARGS(fault_address, error_code), 304 305 TP_STRUCT__entry( 306 __field( unsigned long, fault_address ) 307 __field( unsigned int, error_code ) 308 ), 309 310 TP_fast_assign( 311 __entry->fault_address = fault_address; 312 __entry->error_code = error_code; 313 ), 314 315 TP_printk("address %lx error_code %x", 316 __entry->fault_address, __entry->error_code) 317 ); 318 319 /* 320 * Tracepoint for guest MSR access. 321 */ 322 TRACE_EVENT(kvm_msr, 323 TP_PROTO(unsigned write, u32 ecx, u64 data, bool exception), 324 TP_ARGS(write, ecx, data, exception), 325 326 TP_STRUCT__entry( 327 __field( unsigned, write ) 328 __field( u32, ecx ) 329 __field( u64, data ) 330 __field( u8, exception ) 331 ), 332 333 TP_fast_assign( 334 __entry->write = write; 335 __entry->ecx = ecx; 336 __entry->data = data; 337 __entry->exception = exception; 338 ), 339 340 TP_printk("msr_%s %x = 0x%llx%s", 341 __entry->write ? "write" : "read", 342 __entry->ecx, __entry->data, 343 __entry->exception ? " (#GP)" : "") 344 ); 345 346 #define trace_kvm_msr_read(ecx, data) trace_kvm_msr(0, ecx, data, false) 347 #define trace_kvm_msr_write(ecx, data) trace_kvm_msr(1, ecx, data, false) 348 #define trace_kvm_msr_read_ex(ecx) trace_kvm_msr(0, ecx, 0, true) 349 #define trace_kvm_msr_write_ex(ecx, data) trace_kvm_msr(1, ecx, data, true) 350 351 /* 352 * Tracepoint for guest CR access. 353 */ 354 TRACE_EVENT(kvm_cr, 355 TP_PROTO(unsigned int rw, unsigned int cr, unsigned long val), 356 TP_ARGS(rw, cr, val), 357 358 TP_STRUCT__entry( 359 __field( unsigned int, rw ) 360 __field( unsigned int, cr ) 361 __field( unsigned long, val ) 362 ), 363 364 TP_fast_assign( 365 __entry->rw = rw; 366 __entry->cr = cr; 367 __entry->val = val; 368 ), 369 370 TP_printk("cr_%s %x = 0x%lx", 371 __entry->rw ? "write" : "read", 372 __entry->cr, __entry->val) 373 ); 374 375 #define trace_kvm_cr_read(cr, val) trace_kvm_cr(0, cr, val) 376 #define trace_kvm_cr_write(cr, val) trace_kvm_cr(1, cr, val) 377 378 TRACE_EVENT(kvm_pic_set_irq, 379 TP_PROTO(__u8 chip, __u8 pin, __u8 elcr, __u8 imr, bool coalesced), 380 TP_ARGS(chip, pin, elcr, imr, coalesced), 381 382 TP_STRUCT__entry( 383 __field( __u8, chip ) 384 __field( __u8, pin ) 385 __field( __u8, elcr ) 386 __field( __u8, imr ) 387 __field( bool, coalesced ) 388 ), 389 390 TP_fast_assign( 391 __entry->chip = chip; 392 __entry->pin = pin; 393 __entry->elcr = elcr; 394 __entry->imr = imr; 395 __entry->coalesced = coalesced; 396 ), 397 398 TP_printk("chip %u pin %u (%s%s)%s", 399 __entry->chip, __entry->pin, 400 (__entry->elcr & (1 << __entry->pin)) ? "level":"edge", 401 (__entry->imr & (1 << __entry->pin)) ? "|masked":"", 402 __entry->coalesced ? " (coalesced)" : "") 403 ); 404 405 #define kvm_apic_dst_shorthand \ 406 {0x0, "dst"}, \ 407 {0x1, "self"}, \ 408 {0x2, "all"}, \ 409 {0x3, "all-but-self"} 410 411 TRACE_EVENT(kvm_apic_ipi, 412 TP_PROTO(__u32 icr_low, __u32 dest_id), 413 TP_ARGS(icr_low, dest_id), 414 415 TP_STRUCT__entry( 416 __field( __u32, icr_low ) 417 __field( __u32, dest_id ) 418 ), 419 420 TP_fast_assign( 421 __entry->icr_low = icr_low; 422 __entry->dest_id = dest_id; 423 ), 424 425 TP_printk("dst %x vec %u (%s|%s|%s|%s|%s)", 426 __entry->dest_id, (u8)__entry->icr_low, 427 __print_symbolic((__entry->icr_low >> 8 & 0x7), 428 kvm_deliver_mode), 429 (__entry->icr_low & (1<<11)) ? "logical" : "physical", 430 (__entry->icr_low & (1<<14)) ? "assert" : "de-assert", 431 (__entry->icr_low & (1<<15)) ? "level" : "edge", 432 __print_symbolic((__entry->icr_low >> 18 & 0x3), 433 kvm_apic_dst_shorthand)) 434 ); 435 436 TRACE_EVENT(kvm_apic_accept_irq, 437 TP_PROTO(__u32 apicid, __u16 dm, __u8 tm, __u8 vec), 438 TP_ARGS(apicid, dm, tm, vec), 439 440 TP_STRUCT__entry( 441 __field( __u32, apicid ) 442 __field( __u16, dm ) 443 __field( __u8, tm ) 444 __field( __u8, vec ) 445 ), 446 447 TP_fast_assign( 448 __entry->apicid = apicid; 449 __entry->dm = dm; 450 __entry->tm = tm; 451 __entry->vec = vec; 452 ), 453 454 TP_printk("apicid %x vec %u (%s|%s)", 455 __entry->apicid, __entry->vec, 456 __print_symbolic((__entry->dm >> 8 & 0x7), kvm_deliver_mode), 457 __entry->tm ? "level" : "edge") 458 ); 459 460 TRACE_EVENT(kvm_eoi, 461 TP_PROTO(struct kvm_lapic *apic, int vector), 462 TP_ARGS(apic, vector), 463 464 TP_STRUCT__entry( 465 __field( __u32, apicid ) 466 __field( int, vector ) 467 ), 468 469 TP_fast_assign( 470 __entry->apicid = apic->vcpu->vcpu_id; 471 __entry->vector = vector; 472 ), 473 474 TP_printk("apicid %x vector %d", __entry->apicid, __entry->vector) 475 ); 476 477 TRACE_EVENT(kvm_pv_eoi, 478 TP_PROTO(struct kvm_lapic *apic, int vector), 479 TP_ARGS(apic, vector), 480 481 TP_STRUCT__entry( 482 __field( __u32, apicid ) 483 __field( int, vector ) 484 ), 485 486 TP_fast_assign( 487 __entry->apicid = apic->vcpu->vcpu_id; 488 __entry->vector = vector; 489 ), 490 491 TP_printk("apicid %x vector %d", __entry->apicid, __entry->vector) 492 ); 493 494 /* 495 * Tracepoint for nested VMRUN 496 */ 497 TRACE_EVENT(kvm_nested_vmrun, 498 TP_PROTO(__u64 rip, __u64 vmcb, __u64 nested_rip, __u32 int_ctl, 499 __u32 event_inj, bool npt), 500 TP_ARGS(rip, vmcb, nested_rip, int_ctl, event_inj, npt), 501 502 TP_STRUCT__entry( 503 __field( __u64, rip ) 504 __field( __u64, vmcb ) 505 __field( __u64, nested_rip ) 506 __field( __u32, int_ctl ) 507 __field( __u32, event_inj ) 508 __field( bool, npt ) 509 ), 510 511 TP_fast_assign( 512 __entry->rip = rip; 513 __entry->vmcb = vmcb; 514 __entry->nested_rip = nested_rip; 515 __entry->int_ctl = int_ctl; 516 __entry->event_inj = event_inj; 517 __entry->npt = npt; 518 ), 519 520 TP_printk("rip: 0x%016llx vmcb: 0x%016llx nrip: 0x%016llx int_ctl: 0x%08x " 521 "event_inj: 0x%08x npt: %s", 522 __entry->rip, __entry->vmcb, __entry->nested_rip, 523 __entry->int_ctl, __entry->event_inj, 524 __entry->npt ? "on" : "off") 525 ); 526 527 TRACE_EVENT(kvm_nested_intercepts, 528 TP_PROTO(__u16 cr_read, __u16 cr_write, __u32 exceptions, __u64 intercept), 529 TP_ARGS(cr_read, cr_write, exceptions, intercept), 530 531 TP_STRUCT__entry( 532 __field( __u16, cr_read ) 533 __field( __u16, cr_write ) 534 __field( __u32, exceptions ) 535 __field( __u64, intercept ) 536 ), 537 538 TP_fast_assign( 539 __entry->cr_read = cr_read; 540 __entry->cr_write = cr_write; 541 __entry->exceptions = exceptions; 542 __entry->intercept = intercept; 543 ), 544 545 TP_printk("cr_read: %04x cr_write: %04x excp: %08x intercept: %016llx", 546 __entry->cr_read, __entry->cr_write, __entry->exceptions, 547 __entry->intercept) 548 ); 549 /* 550 * Tracepoint for #VMEXIT while nested 551 */ 552 TRACE_EVENT(kvm_nested_vmexit, 553 TP_PROTO(__u64 rip, __u32 exit_code, 554 __u64 exit_info1, __u64 exit_info2, 555 __u32 exit_int_info, __u32 exit_int_info_err, __u32 isa), 556 TP_ARGS(rip, exit_code, exit_info1, exit_info2, 557 exit_int_info, exit_int_info_err, isa), 558 559 TP_STRUCT__entry( 560 __field( __u64, rip ) 561 __field( __u32, exit_code ) 562 __field( __u64, exit_info1 ) 563 __field( __u64, exit_info2 ) 564 __field( __u32, exit_int_info ) 565 __field( __u32, exit_int_info_err ) 566 __field( __u32, isa ) 567 ), 568 569 TP_fast_assign( 570 __entry->rip = rip; 571 __entry->exit_code = exit_code; 572 __entry->exit_info1 = exit_info1; 573 __entry->exit_info2 = exit_info2; 574 __entry->exit_int_info = exit_int_info; 575 __entry->exit_int_info_err = exit_int_info_err; 576 __entry->isa = isa; 577 ), 578 TP_printk("rip: 0x%016llx reason: %s ext_inf1: 0x%016llx " 579 "ext_inf2: 0x%016llx ext_int: 0x%08x ext_int_err: 0x%08x", 580 __entry->rip, 581 (__entry->isa == KVM_ISA_VMX) ? 582 __print_symbolic(__entry->exit_code, VMX_EXIT_REASONS) : 583 __print_symbolic(__entry->exit_code, SVM_EXIT_REASONS), 584 __entry->exit_info1, __entry->exit_info2, 585 __entry->exit_int_info, __entry->exit_int_info_err) 586 ); 587 588 /* 589 * Tracepoint for #VMEXIT reinjected to the guest 590 */ 591 TRACE_EVENT(kvm_nested_vmexit_inject, 592 TP_PROTO(__u32 exit_code, 593 __u64 exit_info1, __u64 exit_info2, 594 __u32 exit_int_info, __u32 exit_int_info_err, __u32 isa), 595 TP_ARGS(exit_code, exit_info1, exit_info2, 596 exit_int_info, exit_int_info_err, isa), 597 598 TP_STRUCT__entry( 599 __field( __u32, exit_code ) 600 __field( __u64, exit_info1 ) 601 __field( __u64, exit_info2 ) 602 __field( __u32, exit_int_info ) 603 __field( __u32, exit_int_info_err ) 604 __field( __u32, isa ) 605 ), 606 607 TP_fast_assign( 608 __entry->exit_code = exit_code; 609 __entry->exit_info1 = exit_info1; 610 __entry->exit_info2 = exit_info2; 611 __entry->exit_int_info = exit_int_info; 612 __entry->exit_int_info_err = exit_int_info_err; 613 __entry->isa = isa; 614 ), 615 616 TP_printk("reason: %s ext_inf1: 0x%016llx " 617 "ext_inf2: 0x%016llx ext_int: 0x%08x ext_int_err: 0x%08x", 618 (__entry->isa == KVM_ISA_VMX) ? 619 __print_symbolic(__entry->exit_code, VMX_EXIT_REASONS) : 620 __print_symbolic(__entry->exit_code, SVM_EXIT_REASONS), 621 __entry->exit_info1, __entry->exit_info2, 622 __entry->exit_int_info, __entry->exit_int_info_err) 623 ); 624 625 /* 626 * Tracepoint for nested #vmexit because of interrupt pending 627 */ 628 TRACE_EVENT(kvm_nested_intr_vmexit, 629 TP_PROTO(__u64 rip), 630 TP_ARGS(rip), 631 632 TP_STRUCT__entry( 633 __field( __u64, rip ) 634 ), 635 636 TP_fast_assign( 637 __entry->rip = rip 638 ), 639 640 TP_printk("rip: 0x%016llx", __entry->rip) 641 ); 642 643 /* 644 * Tracepoint for nested #vmexit because of interrupt pending 645 */ 646 TRACE_EVENT(kvm_invlpga, 647 TP_PROTO(__u64 rip, int asid, u64 address), 648 TP_ARGS(rip, asid, address), 649 650 TP_STRUCT__entry( 651 __field( __u64, rip ) 652 __field( int, asid ) 653 __field( __u64, address ) 654 ), 655 656 TP_fast_assign( 657 __entry->rip = rip; 658 __entry->asid = asid; 659 __entry->address = address; 660 ), 661 662 TP_printk("rip: 0x%016llx asid: %d address: 0x%016llx", 663 __entry->rip, __entry->asid, __entry->address) 664 ); 665 666 /* 667 * Tracepoint for nested #vmexit because of interrupt pending 668 */ 669 TRACE_EVENT(kvm_skinit, 670 TP_PROTO(__u64 rip, __u32 slb), 671 TP_ARGS(rip, slb), 672 673 TP_STRUCT__entry( 674 __field( __u64, rip ) 675 __field( __u32, slb ) 676 ), 677 678 TP_fast_assign( 679 __entry->rip = rip; 680 __entry->slb = slb; 681 ), 682 683 TP_printk("rip: 0x%016llx slb: 0x%08x", 684 __entry->rip, __entry->slb) 685 ); 686 687 #define KVM_EMUL_INSN_F_CR0_PE (1 << 0) 688 #define KVM_EMUL_INSN_F_EFL_VM (1 << 1) 689 #define KVM_EMUL_INSN_F_CS_D (1 << 2) 690 #define KVM_EMUL_INSN_F_CS_L (1 << 3) 691 692 #define kvm_trace_symbol_emul_flags \ 693 { 0, "real" }, \ 694 { KVM_EMUL_INSN_F_CR0_PE \ 695 | KVM_EMUL_INSN_F_EFL_VM, "vm16" }, \ 696 { KVM_EMUL_INSN_F_CR0_PE, "prot16" }, \ 697 { KVM_EMUL_INSN_F_CR0_PE \ 698 | KVM_EMUL_INSN_F_CS_D, "prot32" }, \ 699 { KVM_EMUL_INSN_F_CR0_PE \ 700 | KVM_EMUL_INSN_F_CS_L, "prot64" } 701 702 #define kei_decode_mode(mode) ({ \ 703 u8 flags = 0xff; \ 704 switch (mode) { \ 705 case X86EMUL_MODE_REAL: \ 706 flags = 0; \ 707 break; \ 708 case X86EMUL_MODE_VM86: \ 709 flags = KVM_EMUL_INSN_F_EFL_VM; \ 710 break; \ 711 case X86EMUL_MODE_PROT16: \ 712 flags = KVM_EMUL_INSN_F_CR0_PE; \ 713 break; \ 714 case X86EMUL_MODE_PROT32: \ 715 flags = KVM_EMUL_INSN_F_CR0_PE \ 716 | KVM_EMUL_INSN_F_CS_D; \ 717 break; \ 718 case X86EMUL_MODE_PROT64: \ 719 flags = KVM_EMUL_INSN_F_CR0_PE \ 720 | KVM_EMUL_INSN_F_CS_L; \ 721 break; \ 722 } \ 723 flags; \ 724 }) 725 726 TRACE_EVENT(kvm_emulate_insn, 727 TP_PROTO(struct kvm_vcpu *vcpu, __u8 failed), 728 TP_ARGS(vcpu, failed), 729 730 TP_STRUCT__entry( 731 __field( __u64, rip ) 732 __field( __u32, csbase ) 733 __field( __u8, len ) 734 __array( __u8, insn, 15 ) 735 __field( __u8, flags ) 736 __field( __u8, failed ) 737 ), 738 739 TP_fast_assign( 740 __entry->csbase = kvm_x86_ops->get_segment_base(vcpu, VCPU_SREG_CS); 741 __entry->len = vcpu->arch.emulate_ctxt.fetch.ptr 742 - vcpu->arch.emulate_ctxt.fetch.data; 743 __entry->rip = vcpu->arch.emulate_ctxt._eip - __entry->len; 744 memcpy(__entry->insn, 745 vcpu->arch.emulate_ctxt.fetch.data, 746 15); 747 __entry->flags = kei_decode_mode(vcpu->arch.emulate_ctxt.mode); 748 __entry->failed = failed; 749 ), 750 751 TP_printk("%x:%llx:%s (%s)%s", 752 __entry->csbase, __entry->rip, 753 __print_hex(__entry->insn, __entry->len), 754 __print_symbolic(__entry->flags, 755 kvm_trace_symbol_emul_flags), 756 __entry->failed ? " failed" : "" 757 ) 758 ); 759 760 #define trace_kvm_emulate_insn_start(vcpu) trace_kvm_emulate_insn(vcpu, 0) 761 #define trace_kvm_emulate_insn_failed(vcpu) trace_kvm_emulate_insn(vcpu, 1) 762 763 TRACE_EVENT( 764 vcpu_match_mmio, 765 TP_PROTO(gva_t gva, gpa_t gpa, bool write, bool gpa_match), 766 TP_ARGS(gva, gpa, write, gpa_match), 767 768 TP_STRUCT__entry( 769 __field(gva_t, gva) 770 __field(gpa_t, gpa) 771 __field(bool, write) 772 __field(bool, gpa_match) 773 ), 774 775 TP_fast_assign( 776 __entry->gva = gva; 777 __entry->gpa = gpa; 778 __entry->write = write; 779 __entry->gpa_match = gpa_match 780 ), 781 782 TP_printk("gva %#lx gpa %#llx %s %s", __entry->gva, __entry->gpa, 783 __entry->write ? "Write" : "Read", 784 __entry->gpa_match ? "GPA" : "GVA") 785 ); 786 787 TRACE_EVENT(kvm_write_tsc_offset, 788 TP_PROTO(unsigned int vcpu_id, __u64 previous_tsc_offset, 789 __u64 next_tsc_offset), 790 TP_ARGS(vcpu_id, previous_tsc_offset, next_tsc_offset), 791 792 TP_STRUCT__entry( 793 __field( unsigned int, vcpu_id ) 794 __field( __u64, previous_tsc_offset ) 795 __field( __u64, next_tsc_offset ) 796 ), 797 798 TP_fast_assign( 799 __entry->vcpu_id = vcpu_id; 800 __entry->previous_tsc_offset = previous_tsc_offset; 801 __entry->next_tsc_offset = next_tsc_offset; 802 ), 803 804 TP_printk("vcpu=%u prev=%llu next=%llu", __entry->vcpu_id, 805 __entry->previous_tsc_offset, __entry->next_tsc_offset) 806 ); 807 808 #ifdef CONFIG_X86_64 809 810 #define host_clocks \ 811 {VCLOCK_NONE, "none"}, \ 812 {VCLOCK_TSC, "tsc"} \ 813 814 TRACE_EVENT(kvm_update_master_clock, 815 TP_PROTO(bool use_master_clock, unsigned int host_clock, bool offset_matched), 816 TP_ARGS(use_master_clock, host_clock, offset_matched), 817 818 TP_STRUCT__entry( 819 __field( bool, use_master_clock ) 820 __field( unsigned int, host_clock ) 821 __field( bool, offset_matched ) 822 ), 823 824 TP_fast_assign( 825 __entry->use_master_clock = use_master_clock; 826 __entry->host_clock = host_clock; 827 __entry->offset_matched = offset_matched; 828 ), 829 830 TP_printk("masterclock %d hostclock %s offsetmatched %u", 831 __entry->use_master_clock, 832 __print_symbolic(__entry->host_clock, host_clocks), 833 __entry->offset_matched) 834 ); 835 836 TRACE_EVENT(kvm_track_tsc, 837 TP_PROTO(unsigned int vcpu_id, unsigned int nr_matched, 838 unsigned int online_vcpus, bool use_master_clock, 839 unsigned int host_clock), 840 TP_ARGS(vcpu_id, nr_matched, online_vcpus, use_master_clock, 841 host_clock), 842 843 TP_STRUCT__entry( 844 __field( unsigned int, vcpu_id ) 845 __field( unsigned int, nr_vcpus_matched_tsc ) 846 __field( unsigned int, online_vcpus ) 847 __field( bool, use_master_clock ) 848 __field( unsigned int, host_clock ) 849 ), 850 851 TP_fast_assign( 852 __entry->vcpu_id = vcpu_id; 853 __entry->nr_vcpus_matched_tsc = nr_matched; 854 __entry->online_vcpus = online_vcpus; 855 __entry->use_master_clock = use_master_clock; 856 __entry->host_clock = host_clock; 857 ), 858 859 TP_printk("vcpu_id %u masterclock %u offsetmatched %u nr_online %u" 860 " hostclock %s", 861 __entry->vcpu_id, __entry->use_master_clock, 862 __entry->nr_vcpus_matched_tsc, __entry->online_vcpus, 863 __print_symbolic(__entry->host_clock, host_clocks)) 864 ); 865 866 #endif /* CONFIG_X86_64 */ 867 868 /* 869 * Tracepoint for PML full VMEXIT. 870 */ 871 TRACE_EVENT(kvm_pml_full, 872 TP_PROTO(unsigned int vcpu_id), 873 TP_ARGS(vcpu_id), 874 875 TP_STRUCT__entry( 876 __field( unsigned int, vcpu_id ) 877 ), 878 879 TP_fast_assign( 880 __entry->vcpu_id = vcpu_id; 881 ), 882 883 TP_printk("vcpu %d: PML full", __entry->vcpu_id) 884 ); 885 886 TRACE_EVENT(kvm_ple_window, 887 TP_PROTO(bool grow, unsigned int vcpu_id, int new, int old), 888 TP_ARGS(grow, vcpu_id, new, old), 889 890 TP_STRUCT__entry( 891 __field( bool, grow ) 892 __field( unsigned int, vcpu_id ) 893 __field( int, new ) 894 __field( int, old ) 895 ), 896 897 TP_fast_assign( 898 __entry->grow = grow; 899 __entry->vcpu_id = vcpu_id; 900 __entry->new = new; 901 __entry->old = old; 902 ), 903 904 TP_printk("vcpu %u: ple_window %d (%s %d)", 905 __entry->vcpu_id, 906 __entry->new, 907 __entry->grow ? "grow" : "shrink", 908 __entry->old) 909 ); 910 911 #define trace_kvm_ple_window_grow(vcpu_id, new, old) \ 912 trace_kvm_ple_window(true, vcpu_id, new, old) 913 #define trace_kvm_ple_window_shrink(vcpu_id, new, old) \ 914 trace_kvm_ple_window(false, vcpu_id, new, old) 915 916 TRACE_EVENT(kvm_pvclock_update, 917 TP_PROTO(unsigned int vcpu_id, struct pvclock_vcpu_time_info *pvclock), 918 TP_ARGS(vcpu_id, pvclock), 919 920 TP_STRUCT__entry( 921 __field( unsigned int, vcpu_id ) 922 __field( __u32, version ) 923 __field( __u64, tsc_timestamp ) 924 __field( __u64, system_time ) 925 __field( __u32, tsc_to_system_mul ) 926 __field( __s8, tsc_shift ) 927 __field( __u8, flags ) 928 ), 929 930 TP_fast_assign( 931 __entry->vcpu_id = vcpu_id; 932 __entry->version = pvclock->version; 933 __entry->tsc_timestamp = pvclock->tsc_timestamp; 934 __entry->system_time = pvclock->system_time; 935 __entry->tsc_to_system_mul = pvclock->tsc_to_system_mul; 936 __entry->tsc_shift = pvclock->tsc_shift; 937 __entry->flags = pvclock->flags; 938 ), 939 940 TP_printk("vcpu_id %u, pvclock { version %u, tsc_timestamp 0x%llx, " 941 "system_time 0x%llx, tsc_to_system_mul 0x%x, tsc_shift %d, " 942 "flags 0x%x }", 943 __entry->vcpu_id, 944 __entry->version, 945 __entry->tsc_timestamp, 946 __entry->system_time, 947 __entry->tsc_to_system_mul, 948 __entry->tsc_shift, 949 __entry->flags) 950 ); 951 952 TRACE_EVENT(kvm_wait_lapic_expire, 953 TP_PROTO(unsigned int vcpu_id, s64 delta), 954 TP_ARGS(vcpu_id, delta), 955 956 TP_STRUCT__entry( 957 __field( unsigned int, vcpu_id ) 958 __field( s64, delta ) 959 ), 960 961 TP_fast_assign( 962 __entry->vcpu_id = vcpu_id; 963 __entry->delta = delta; 964 ), 965 966 TP_printk("vcpu %u: delta %lld (%s)", 967 __entry->vcpu_id, 968 __entry->delta, 969 __entry->delta < 0 ? "early" : "late") 970 ); 971 972 TRACE_EVENT(kvm_enter_smm, 973 TP_PROTO(unsigned int vcpu_id, u64 smbase, bool entering), 974 TP_ARGS(vcpu_id, smbase, entering), 975 976 TP_STRUCT__entry( 977 __field( unsigned int, vcpu_id ) 978 __field( u64, smbase ) 979 __field( bool, entering ) 980 ), 981 982 TP_fast_assign( 983 __entry->vcpu_id = vcpu_id; 984 __entry->smbase = smbase; 985 __entry->entering = entering; 986 ), 987 988 TP_printk("vcpu %u: %s SMM, smbase 0x%llx", 989 __entry->vcpu_id, 990 __entry->entering ? "entering" : "leaving", 991 __entry->smbase) 992 ); 993 994 /* 995 * Tracepoint for VT-d posted-interrupts. 996 */ 997 TRACE_EVENT(kvm_pi_irte_update, 998 TP_PROTO(unsigned int host_irq, unsigned int vcpu_id, 999 unsigned int gsi, unsigned int gvec, 1000 u64 pi_desc_addr, bool set), 1001 TP_ARGS(host_irq, vcpu_id, gsi, gvec, pi_desc_addr, set), 1002 1003 TP_STRUCT__entry( 1004 __field( unsigned int, host_irq ) 1005 __field( unsigned int, vcpu_id ) 1006 __field( unsigned int, gsi ) 1007 __field( unsigned int, gvec ) 1008 __field( u64, pi_desc_addr ) 1009 __field( bool, set ) 1010 ), 1011 1012 TP_fast_assign( 1013 __entry->host_irq = host_irq; 1014 __entry->vcpu_id = vcpu_id; 1015 __entry->gsi = gsi; 1016 __entry->gvec = gvec; 1017 __entry->pi_desc_addr = pi_desc_addr; 1018 __entry->set = set; 1019 ), 1020 1021 TP_printk("VT-d PI is %s for irq %u, vcpu %u, gsi: 0x%x, " 1022 "gvec: 0x%x, pi_desc_addr: 0x%llx", 1023 __entry->set ? "enabled and being updated" : "disabled", 1024 __entry->host_irq, 1025 __entry->vcpu_id, 1026 __entry->gsi, 1027 __entry->gvec, 1028 __entry->pi_desc_addr) 1029 ); 1030 1031 /* 1032 * Tracepoint for kvm_hv_notify_acked_sint. 1033 */ 1034 TRACE_EVENT(kvm_hv_notify_acked_sint, 1035 TP_PROTO(int vcpu_id, u32 sint), 1036 TP_ARGS(vcpu_id, sint), 1037 1038 TP_STRUCT__entry( 1039 __field(int, vcpu_id) 1040 __field(u32, sint) 1041 ), 1042 1043 TP_fast_assign( 1044 __entry->vcpu_id = vcpu_id; 1045 __entry->sint = sint; 1046 ), 1047 1048 TP_printk("vcpu_id %d sint %u", __entry->vcpu_id, __entry->sint) 1049 ); 1050 1051 /* 1052 * Tracepoint for synic_set_irq. 1053 */ 1054 TRACE_EVENT(kvm_hv_synic_set_irq, 1055 TP_PROTO(int vcpu_id, u32 sint, int vector, int ret), 1056 TP_ARGS(vcpu_id, sint, vector, ret), 1057 1058 TP_STRUCT__entry( 1059 __field(int, vcpu_id) 1060 __field(u32, sint) 1061 __field(int, vector) 1062 __field(int, ret) 1063 ), 1064 1065 TP_fast_assign( 1066 __entry->vcpu_id = vcpu_id; 1067 __entry->sint = sint; 1068 __entry->vector = vector; 1069 __entry->ret = ret; 1070 ), 1071 1072 TP_printk("vcpu_id %d sint %u vector %d ret %d", 1073 __entry->vcpu_id, __entry->sint, __entry->vector, 1074 __entry->ret) 1075 ); 1076 1077 /* 1078 * Tracepoint for kvm_hv_synic_send_eoi. 1079 */ 1080 TRACE_EVENT(kvm_hv_synic_send_eoi, 1081 TP_PROTO(int vcpu_id, int vector), 1082 TP_ARGS(vcpu_id, vector), 1083 1084 TP_STRUCT__entry( 1085 __field(int, vcpu_id) 1086 __field(u32, sint) 1087 __field(int, vector) 1088 __field(int, ret) 1089 ), 1090 1091 TP_fast_assign( 1092 __entry->vcpu_id = vcpu_id; 1093 __entry->vector = vector; 1094 ), 1095 1096 TP_printk("vcpu_id %d vector %d", __entry->vcpu_id, __entry->vector) 1097 ); 1098 1099 /* 1100 * Tracepoint for synic_set_msr. 1101 */ 1102 TRACE_EVENT(kvm_hv_synic_set_msr, 1103 TP_PROTO(int vcpu_id, u32 msr, u64 data, bool host), 1104 TP_ARGS(vcpu_id, msr, data, host), 1105 1106 TP_STRUCT__entry( 1107 __field(int, vcpu_id) 1108 __field(u32, msr) 1109 __field(u64, data) 1110 __field(bool, host) 1111 ), 1112 1113 TP_fast_assign( 1114 __entry->vcpu_id = vcpu_id; 1115 __entry->msr = msr; 1116 __entry->data = data; 1117 __entry->host = host 1118 ), 1119 1120 TP_printk("vcpu_id %d msr 0x%x data 0x%llx host %d", 1121 __entry->vcpu_id, __entry->msr, __entry->data, __entry->host) 1122 ); 1123 1124 /* 1125 * Tracepoint for stimer_set_config. 1126 */ 1127 TRACE_EVENT(kvm_hv_stimer_set_config, 1128 TP_PROTO(int vcpu_id, int timer_index, u64 config, bool host), 1129 TP_ARGS(vcpu_id, timer_index, config, host), 1130 1131 TP_STRUCT__entry( 1132 __field(int, vcpu_id) 1133 __field(int, timer_index) 1134 __field(u64, config) 1135 __field(bool, host) 1136 ), 1137 1138 TP_fast_assign( 1139 __entry->vcpu_id = vcpu_id; 1140 __entry->timer_index = timer_index; 1141 __entry->config = config; 1142 __entry->host = host; 1143 ), 1144 1145 TP_printk("vcpu_id %d timer %d config 0x%llx host %d", 1146 __entry->vcpu_id, __entry->timer_index, __entry->config, 1147 __entry->host) 1148 ); 1149 1150 /* 1151 * Tracepoint for stimer_set_count. 1152 */ 1153 TRACE_EVENT(kvm_hv_stimer_set_count, 1154 TP_PROTO(int vcpu_id, int timer_index, u64 count, bool host), 1155 TP_ARGS(vcpu_id, timer_index, count, host), 1156 1157 TP_STRUCT__entry( 1158 __field(int, vcpu_id) 1159 __field(int, timer_index) 1160 __field(u64, count) 1161 __field(bool, host) 1162 ), 1163 1164 TP_fast_assign( 1165 __entry->vcpu_id = vcpu_id; 1166 __entry->timer_index = timer_index; 1167 __entry->count = count; 1168 __entry->host = host; 1169 ), 1170 1171 TP_printk("vcpu_id %d timer %d count %llu host %d", 1172 __entry->vcpu_id, __entry->timer_index, __entry->count, 1173 __entry->host) 1174 ); 1175 1176 /* 1177 * Tracepoint for stimer_start(periodic timer case). 1178 */ 1179 TRACE_EVENT(kvm_hv_stimer_start_periodic, 1180 TP_PROTO(int vcpu_id, int timer_index, u64 time_now, u64 exp_time), 1181 TP_ARGS(vcpu_id, timer_index, time_now, exp_time), 1182 1183 TP_STRUCT__entry( 1184 __field(int, vcpu_id) 1185 __field(int, timer_index) 1186 __field(u64, time_now) 1187 __field(u64, exp_time) 1188 ), 1189 1190 TP_fast_assign( 1191 __entry->vcpu_id = vcpu_id; 1192 __entry->timer_index = timer_index; 1193 __entry->time_now = time_now; 1194 __entry->exp_time = exp_time; 1195 ), 1196 1197 TP_printk("vcpu_id %d timer %d time_now %llu exp_time %llu", 1198 __entry->vcpu_id, __entry->timer_index, __entry->time_now, 1199 __entry->exp_time) 1200 ); 1201 1202 /* 1203 * Tracepoint for stimer_start(one-shot timer case). 1204 */ 1205 TRACE_EVENT(kvm_hv_stimer_start_one_shot, 1206 TP_PROTO(int vcpu_id, int timer_index, u64 time_now, u64 count), 1207 TP_ARGS(vcpu_id, timer_index, time_now, count), 1208 1209 TP_STRUCT__entry( 1210 __field(int, vcpu_id) 1211 __field(int, timer_index) 1212 __field(u64, time_now) 1213 __field(u64, count) 1214 ), 1215 1216 TP_fast_assign( 1217 __entry->vcpu_id = vcpu_id; 1218 __entry->timer_index = timer_index; 1219 __entry->time_now = time_now; 1220 __entry->count = count; 1221 ), 1222 1223 TP_printk("vcpu_id %d timer %d time_now %llu count %llu", 1224 __entry->vcpu_id, __entry->timer_index, __entry->time_now, 1225 __entry->count) 1226 ); 1227 1228 /* 1229 * Tracepoint for stimer_timer_callback. 1230 */ 1231 TRACE_EVENT(kvm_hv_stimer_callback, 1232 TP_PROTO(int vcpu_id, int timer_index), 1233 TP_ARGS(vcpu_id, timer_index), 1234 1235 TP_STRUCT__entry( 1236 __field(int, vcpu_id) 1237 __field(int, timer_index) 1238 ), 1239 1240 TP_fast_assign( 1241 __entry->vcpu_id = vcpu_id; 1242 __entry->timer_index = timer_index; 1243 ), 1244 1245 TP_printk("vcpu_id %d timer %d", 1246 __entry->vcpu_id, __entry->timer_index) 1247 ); 1248 1249 /* 1250 * Tracepoint for stimer_expiration. 1251 */ 1252 TRACE_EVENT(kvm_hv_stimer_expiration, 1253 TP_PROTO(int vcpu_id, int timer_index, int msg_send_result), 1254 TP_ARGS(vcpu_id, timer_index, msg_send_result), 1255 1256 TP_STRUCT__entry( 1257 __field(int, vcpu_id) 1258 __field(int, timer_index) 1259 __field(int, msg_send_result) 1260 ), 1261 1262 TP_fast_assign( 1263 __entry->vcpu_id = vcpu_id; 1264 __entry->timer_index = timer_index; 1265 __entry->msg_send_result = msg_send_result; 1266 ), 1267 1268 TP_printk("vcpu_id %d timer %d msg send result %d", 1269 __entry->vcpu_id, __entry->timer_index, 1270 __entry->msg_send_result) 1271 ); 1272 1273 /* 1274 * Tracepoint for stimer_cleanup. 1275 */ 1276 TRACE_EVENT(kvm_hv_stimer_cleanup, 1277 TP_PROTO(int vcpu_id, int timer_index), 1278 TP_ARGS(vcpu_id, timer_index), 1279 1280 TP_STRUCT__entry( 1281 __field(int, vcpu_id) 1282 __field(int, timer_index) 1283 ), 1284 1285 TP_fast_assign( 1286 __entry->vcpu_id = vcpu_id; 1287 __entry->timer_index = timer_index; 1288 ), 1289 1290 TP_printk("vcpu_id %d timer %d", 1291 __entry->vcpu_id, __entry->timer_index) 1292 ); 1293 1294 /* 1295 * Tracepoint for AMD AVIC 1296 */ 1297 TRACE_EVENT(kvm_avic_incomplete_ipi, 1298 TP_PROTO(u32 vcpu, u32 icrh, u32 icrl, u32 id, u32 index), 1299 TP_ARGS(vcpu, icrh, icrl, id, index), 1300 1301 TP_STRUCT__entry( 1302 __field(u32, vcpu) 1303 __field(u32, icrh) 1304 __field(u32, icrl) 1305 __field(u32, id) 1306 __field(u32, index) 1307 ), 1308 1309 TP_fast_assign( 1310 __entry->vcpu = vcpu; 1311 __entry->icrh = icrh; 1312 __entry->icrl = icrl; 1313 __entry->id = id; 1314 __entry->index = index; 1315 ), 1316 1317 TP_printk("vcpu=%u, icrh:icrl=%#010x:%08x, id=%u, index=%u\n", 1318 __entry->vcpu, __entry->icrh, __entry->icrl, 1319 __entry->id, __entry->index) 1320 ); 1321 1322 TRACE_EVENT(kvm_avic_unaccelerated_access, 1323 TP_PROTO(u32 vcpu, u32 offset, bool ft, bool rw, u32 vec), 1324 TP_ARGS(vcpu, offset, ft, rw, vec), 1325 1326 TP_STRUCT__entry( 1327 __field(u32, vcpu) 1328 __field(u32, offset) 1329 __field(bool, ft) 1330 __field(bool, rw) 1331 __field(u32, vec) 1332 ), 1333 1334 TP_fast_assign( 1335 __entry->vcpu = vcpu; 1336 __entry->offset = offset; 1337 __entry->ft = ft; 1338 __entry->rw = rw; 1339 __entry->vec = vec; 1340 ), 1341 1342 TP_printk("vcpu=%u, offset=%#x(%s), %s, %s, vec=%#x\n", 1343 __entry->vcpu, 1344 __entry->offset, 1345 __print_symbolic(__entry->offset, kvm_trace_symbol_apic), 1346 __entry->ft ? "trap" : "fault", 1347 __entry->rw ? "write" : "read", 1348 __entry->vec) 1349 ); 1350 1351 TRACE_EVENT(kvm_hv_timer_state, 1352 TP_PROTO(unsigned int vcpu_id, unsigned int hv_timer_in_use), 1353 TP_ARGS(vcpu_id, hv_timer_in_use), 1354 TP_STRUCT__entry( 1355 __field(unsigned int, vcpu_id) 1356 __field(unsigned int, hv_timer_in_use) 1357 ), 1358 TP_fast_assign( 1359 __entry->vcpu_id = vcpu_id; 1360 __entry->hv_timer_in_use = hv_timer_in_use; 1361 ), 1362 TP_printk("vcpu_id %x hv_timer %x\n", 1363 __entry->vcpu_id, 1364 __entry->hv_timer_in_use) 1365 ); 1366 #endif /* _TRACE_KVM_H */ 1367 1368 #undef TRACE_INCLUDE_PATH 1369 #define TRACE_INCLUDE_PATH arch/x86/kvm 1370 #undef TRACE_INCLUDE_FILE 1371 #define TRACE_INCLUDE_FILE trace 1372 1373 /* This part must be outside protection */ 1374 #include <trace/define_trace.h> 1375