1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #if !defined(_TRACE_KVM_H) || defined(TRACE_HEADER_MULTI_READ) 3 #define _TRACE_KVM_H 4 5 #include <linux/tracepoint.h> 6 #include <asm/vmx.h> 7 #include <asm/svm.h> 8 #include <asm/clocksource.h> 9 #include <asm/pvclock-abi.h> 10 11 #undef TRACE_SYSTEM 12 #define TRACE_SYSTEM kvm 13 14 /* 15 * Tracepoint for guest mode entry. 16 */ 17 TRACE_EVENT(kvm_entry, 18 TP_PROTO(unsigned int vcpu_id), 19 TP_ARGS(vcpu_id), 20 21 TP_STRUCT__entry( 22 __field( unsigned int, vcpu_id ) 23 ), 24 25 TP_fast_assign( 26 __entry->vcpu_id = vcpu_id; 27 ), 28 29 TP_printk("vcpu %u", __entry->vcpu_id) 30 ); 31 32 /* 33 * Tracepoint for hypercall. 34 */ 35 TRACE_EVENT(kvm_hypercall, 36 TP_PROTO(unsigned long nr, unsigned long a0, unsigned long a1, 37 unsigned long a2, unsigned long a3), 38 TP_ARGS(nr, a0, a1, a2, a3), 39 40 TP_STRUCT__entry( 41 __field( unsigned long, nr ) 42 __field( unsigned long, a0 ) 43 __field( unsigned long, a1 ) 44 __field( unsigned long, a2 ) 45 __field( unsigned long, a3 ) 46 ), 47 48 TP_fast_assign( 49 __entry->nr = nr; 50 __entry->a0 = a0; 51 __entry->a1 = a1; 52 __entry->a2 = a2; 53 __entry->a3 = a3; 54 ), 55 56 TP_printk("nr 0x%lx a0 0x%lx a1 0x%lx a2 0x%lx a3 0x%lx", 57 __entry->nr, __entry->a0, __entry->a1, __entry->a2, 58 __entry->a3) 59 ); 60 61 /* 62 * Tracepoint for hypercall. 63 */ 64 TRACE_EVENT(kvm_hv_hypercall, 65 TP_PROTO(__u16 code, bool fast, __u16 rep_cnt, __u16 rep_idx, 66 __u64 ingpa, __u64 outgpa), 67 TP_ARGS(code, fast, rep_cnt, rep_idx, ingpa, outgpa), 68 69 TP_STRUCT__entry( 70 __field( __u16, rep_cnt ) 71 __field( __u16, rep_idx ) 72 __field( __u64, ingpa ) 73 __field( __u64, outgpa ) 74 __field( __u16, code ) 75 __field( bool, fast ) 76 ), 77 78 TP_fast_assign( 79 __entry->rep_cnt = rep_cnt; 80 __entry->rep_idx = rep_idx; 81 __entry->ingpa = ingpa; 82 __entry->outgpa = outgpa; 83 __entry->code = code; 84 __entry->fast = fast; 85 ), 86 87 TP_printk("code 0x%x %s cnt 0x%x idx 0x%x in 0x%llx out 0x%llx", 88 __entry->code, __entry->fast ? "fast" : "slow", 89 __entry->rep_cnt, __entry->rep_idx, __entry->ingpa, 90 __entry->outgpa) 91 ); 92 93 /* 94 * Tracepoint for PIO. 95 */ 96 97 #define KVM_PIO_IN 0 98 #define KVM_PIO_OUT 1 99 100 TRACE_EVENT(kvm_pio, 101 TP_PROTO(unsigned int rw, unsigned int port, unsigned int size, 102 unsigned int count, void *data), 103 TP_ARGS(rw, port, size, count, data), 104 105 TP_STRUCT__entry( 106 __field( unsigned int, rw ) 107 __field( unsigned int, port ) 108 __field( unsigned int, size ) 109 __field( unsigned int, count ) 110 __field( unsigned int, val ) 111 ), 112 113 TP_fast_assign( 114 __entry->rw = rw; 115 __entry->port = port; 116 __entry->size = size; 117 __entry->count = count; 118 if (size == 1) 119 __entry->val = *(unsigned char *)data; 120 else if (size == 2) 121 __entry->val = *(unsigned short *)data; 122 else 123 __entry->val = *(unsigned int *)data; 124 ), 125 126 TP_printk("pio_%s at 0x%x size %d count %d val 0x%x %s", 127 __entry->rw ? "write" : "read", 128 __entry->port, __entry->size, __entry->count, __entry->val, 129 __entry->count > 1 ? "(...)" : "") 130 ); 131 132 /* 133 * Tracepoint for fast mmio. 134 */ 135 TRACE_EVENT(kvm_fast_mmio, 136 TP_PROTO(u64 gpa), 137 TP_ARGS(gpa), 138 139 TP_STRUCT__entry( 140 __field(u64, gpa) 141 ), 142 143 TP_fast_assign( 144 __entry->gpa = gpa; 145 ), 146 147 TP_printk("fast mmio at gpa 0x%llx", __entry->gpa) 148 ); 149 150 /* 151 * Tracepoint for cpuid. 152 */ 153 TRACE_EVENT(kvm_cpuid, 154 TP_PROTO(unsigned int function, unsigned long rax, unsigned long rbx, 155 unsigned long rcx, unsigned long rdx, bool found), 156 TP_ARGS(function, rax, rbx, rcx, rdx, found), 157 158 TP_STRUCT__entry( 159 __field( unsigned int, function ) 160 __field( unsigned long, rax ) 161 __field( unsigned long, rbx ) 162 __field( unsigned long, rcx ) 163 __field( unsigned long, rdx ) 164 __field( bool, found ) 165 ), 166 167 TP_fast_assign( 168 __entry->function = function; 169 __entry->rax = rax; 170 __entry->rbx = rbx; 171 __entry->rcx = rcx; 172 __entry->rdx = rdx; 173 __entry->found = found; 174 ), 175 176 TP_printk("func %x rax %lx rbx %lx rcx %lx rdx %lx, cpuid entry %s", 177 __entry->function, __entry->rax, 178 __entry->rbx, __entry->rcx, __entry->rdx, 179 __entry->found ? "found" : "not found") 180 ); 181 182 #define AREG(x) { APIC_##x, "APIC_" #x } 183 184 #define kvm_trace_symbol_apic \ 185 AREG(ID), AREG(LVR), AREG(TASKPRI), AREG(ARBPRI), AREG(PROCPRI), \ 186 AREG(EOI), AREG(RRR), AREG(LDR), AREG(DFR), AREG(SPIV), AREG(ISR), \ 187 AREG(TMR), AREG(IRR), AREG(ESR), AREG(ICR), AREG(ICR2), AREG(LVTT), \ 188 AREG(LVTTHMR), AREG(LVTPC), AREG(LVT0), AREG(LVT1), AREG(LVTERR), \ 189 AREG(TMICT), AREG(TMCCT), AREG(TDCR), AREG(SELF_IPI), AREG(EFEAT), \ 190 AREG(ECTRL) 191 /* 192 * Tracepoint for apic access. 193 */ 194 TRACE_EVENT(kvm_apic, 195 TP_PROTO(unsigned int rw, unsigned int reg, unsigned int val), 196 TP_ARGS(rw, reg, val), 197 198 TP_STRUCT__entry( 199 __field( unsigned int, rw ) 200 __field( unsigned int, reg ) 201 __field( unsigned int, val ) 202 ), 203 204 TP_fast_assign( 205 __entry->rw = rw; 206 __entry->reg = reg; 207 __entry->val = val; 208 ), 209 210 TP_printk("apic_%s %s = 0x%x", 211 __entry->rw ? "write" : "read", 212 __print_symbolic(__entry->reg, kvm_trace_symbol_apic), 213 __entry->val) 214 ); 215 216 #define trace_kvm_apic_read(reg, val) trace_kvm_apic(0, reg, val) 217 #define trace_kvm_apic_write(reg, val) trace_kvm_apic(1, reg, val) 218 219 #define KVM_ISA_VMX 1 220 #define KVM_ISA_SVM 2 221 222 /* 223 * Tracepoint for kvm guest exit: 224 */ 225 TRACE_EVENT(kvm_exit, 226 TP_PROTO(unsigned int exit_reason, struct kvm_vcpu *vcpu, u32 isa), 227 TP_ARGS(exit_reason, vcpu, isa), 228 229 TP_STRUCT__entry( 230 __field( unsigned int, exit_reason ) 231 __field( unsigned long, guest_rip ) 232 __field( u32, isa ) 233 __field( u64, info1 ) 234 __field( u64, info2 ) 235 __field( unsigned int, vcpu_id ) 236 ), 237 238 TP_fast_assign( 239 __entry->exit_reason = exit_reason; 240 __entry->guest_rip = kvm_rip_read(vcpu); 241 __entry->isa = isa; 242 __entry->vcpu_id = vcpu->vcpu_id; 243 kvm_x86_ops->get_exit_info(vcpu, &__entry->info1, 244 &__entry->info2); 245 ), 246 247 TP_printk("vcpu %u reason %s rip 0x%lx info %llx %llx", 248 __entry->vcpu_id, 249 (__entry->isa == KVM_ISA_VMX) ? 250 __print_symbolic(__entry->exit_reason, VMX_EXIT_REASONS) : 251 __print_symbolic(__entry->exit_reason, SVM_EXIT_REASONS), 252 __entry->guest_rip, __entry->info1, __entry->info2) 253 ); 254 255 /* 256 * Tracepoint for kvm interrupt injection: 257 */ 258 TRACE_EVENT(kvm_inj_virq, 259 TP_PROTO(unsigned int irq), 260 TP_ARGS(irq), 261 262 TP_STRUCT__entry( 263 __field( unsigned int, irq ) 264 ), 265 266 TP_fast_assign( 267 __entry->irq = irq; 268 ), 269 270 TP_printk("irq %u", __entry->irq) 271 ); 272 273 #define EXS(x) { x##_VECTOR, "#" #x } 274 275 #define kvm_trace_sym_exc \ 276 EXS(DE), EXS(DB), EXS(BP), EXS(OF), EXS(BR), EXS(UD), EXS(NM), \ 277 EXS(DF), EXS(TS), EXS(NP), EXS(SS), EXS(GP), EXS(PF), \ 278 EXS(MF), EXS(AC), EXS(MC) 279 280 /* 281 * Tracepoint for kvm interrupt injection: 282 */ 283 TRACE_EVENT(kvm_inj_exception, 284 TP_PROTO(unsigned exception, bool has_error, unsigned error_code), 285 TP_ARGS(exception, has_error, error_code), 286 287 TP_STRUCT__entry( 288 __field( u8, exception ) 289 __field( u8, has_error ) 290 __field( u32, error_code ) 291 ), 292 293 TP_fast_assign( 294 __entry->exception = exception; 295 __entry->has_error = has_error; 296 __entry->error_code = error_code; 297 ), 298 299 TP_printk("%s (0x%x)", 300 __print_symbolic(__entry->exception, kvm_trace_sym_exc), 301 /* FIXME: don't print error_code if not present */ 302 __entry->has_error ? __entry->error_code : 0) 303 ); 304 305 /* 306 * Tracepoint for page fault. 307 */ 308 TRACE_EVENT(kvm_page_fault, 309 TP_PROTO(unsigned long fault_address, unsigned int error_code), 310 TP_ARGS(fault_address, error_code), 311 312 TP_STRUCT__entry( 313 __field( unsigned long, fault_address ) 314 __field( unsigned int, error_code ) 315 ), 316 317 TP_fast_assign( 318 __entry->fault_address = fault_address; 319 __entry->error_code = error_code; 320 ), 321 322 TP_printk("address %lx error_code %x", 323 __entry->fault_address, __entry->error_code) 324 ); 325 326 /* 327 * Tracepoint for guest MSR access. 328 */ 329 TRACE_EVENT(kvm_msr, 330 TP_PROTO(unsigned write, u32 ecx, u64 data, bool exception), 331 TP_ARGS(write, ecx, data, exception), 332 333 TP_STRUCT__entry( 334 __field( unsigned, write ) 335 __field( u32, ecx ) 336 __field( u64, data ) 337 __field( u8, exception ) 338 ), 339 340 TP_fast_assign( 341 __entry->write = write; 342 __entry->ecx = ecx; 343 __entry->data = data; 344 __entry->exception = exception; 345 ), 346 347 TP_printk("msr_%s %x = 0x%llx%s", 348 __entry->write ? "write" : "read", 349 __entry->ecx, __entry->data, 350 __entry->exception ? " (#GP)" : "") 351 ); 352 353 #define trace_kvm_msr_read(ecx, data) trace_kvm_msr(0, ecx, data, false) 354 #define trace_kvm_msr_write(ecx, data) trace_kvm_msr(1, ecx, data, false) 355 #define trace_kvm_msr_read_ex(ecx) trace_kvm_msr(0, ecx, 0, true) 356 #define trace_kvm_msr_write_ex(ecx, data) trace_kvm_msr(1, ecx, data, true) 357 358 /* 359 * Tracepoint for guest CR access. 360 */ 361 TRACE_EVENT(kvm_cr, 362 TP_PROTO(unsigned int rw, unsigned int cr, unsigned long val), 363 TP_ARGS(rw, cr, val), 364 365 TP_STRUCT__entry( 366 __field( unsigned int, rw ) 367 __field( unsigned int, cr ) 368 __field( unsigned long, val ) 369 ), 370 371 TP_fast_assign( 372 __entry->rw = rw; 373 __entry->cr = cr; 374 __entry->val = val; 375 ), 376 377 TP_printk("cr_%s %x = 0x%lx", 378 __entry->rw ? "write" : "read", 379 __entry->cr, __entry->val) 380 ); 381 382 #define trace_kvm_cr_read(cr, val) trace_kvm_cr(0, cr, val) 383 #define trace_kvm_cr_write(cr, val) trace_kvm_cr(1, cr, val) 384 385 TRACE_EVENT(kvm_pic_set_irq, 386 TP_PROTO(__u8 chip, __u8 pin, __u8 elcr, __u8 imr, bool coalesced), 387 TP_ARGS(chip, pin, elcr, imr, coalesced), 388 389 TP_STRUCT__entry( 390 __field( __u8, chip ) 391 __field( __u8, pin ) 392 __field( __u8, elcr ) 393 __field( __u8, imr ) 394 __field( bool, coalesced ) 395 ), 396 397 TP_fast_assign( 398 __entry->chip = chip; 399 __entry->pin = pin; 400 __entry->elcr = elcr; 401 __entry->imr = imr; 402 __entry->coalesced = coalesced; 403 ), 404 405 TP_printk("chip %u pin %u (%s%s)%s", 406 __entry->chip, __entry->pin, 407 (__entry->elcr & (1 << __entry->pin)) ? "level":"edge", 408 (__entry->imr & (1 << __entry->pin)) ? "|masked":"", 409 __entry->coalesced ? " (coalesced)" : "") 410 ); 411 412 #define kvm_apic_dst_shorthand \ 413 {0x0, "dst"}, \ 414 {0x1, "self"}, \ 415 {0x2, "all"}, \ 416 {0x3, "all-but-self"} 417 418 TRACE_EVENT(kvm_apic_ipi, 419 TP_PROTO(__u32 icr_low, __u32 dest_id), 420 TP_ARGS(icr_low, dest_id), 421 422 TP_STRUCT__entry( 423 __field( __u32, icr_low ) 424 __field( __u32, dest_id ) 425 ), 426 427 TP_fast_assign( 428 __entry->icr_low = icr_low; 429 __entry->dest_id = dest_id; 430 ), 431 432 TP_printk("dst %x vec %u (%s|%s|%s|%s|%s)", 433 __entry->dest_id, (u8)__entry->icr_low, 434 __print_symbolic((__entry->icr_low >> 8 & 0x7), 435 kvm_deliver_mode), 436 (__entry->icr_low & (1<<11)) ? "logical" : "physical", 437 (__entry->icr_low & (1<<14)) ? "assert" : "de-assert", 438 (__entry->icr_low & (1<<15)) ? "level" : "edge", 439 __print_symbolic((__entry->icr_low >> 18 & 0x3), 440 kvm_apic_dst_shorthand)) 441 ); 442 443 TRACE_EVENT(kvm_apic_accept_irq, 444 TP_PROTO(__u32 apicid, __u16 dm, __u16 tm, __u8 vec), 445 TP_ARGS(apicid, dm, tm, vec), 446 447 TP_STRUCT__entry( 448 __field( __u32, apicid ) 449 __field( __u16, dm ) 450 __field( __u16, tm ) 451 __field( __u8, vec ) 452 ), 453 454 TP_fast_assign( 455 __entry->apicid = apicid; 456 __entry->dm = dm; 457 __entry->tm = tm; 458 __entry->vec = vec; 459 ), 460 461 TP_printk("apicid %x vec %u (%s|%s)", 462 __entry->apicid, __entry->vec, 463 __print_symbolic((__entry->dm >> 8 & 0x7), kvm_deliver_mode), 464 __entry->tm ? "level" : "edge") 465 ); 466 467 TRACE_EVENT(kvm_eoi, 468 TP_PROTO(struct kvm_lapic *apic, int vector), 469 TP_ARGS(apic, vector), 470 471 TP_STRUCT__entry( 472 __field( __u32, apicid ) 473 __field( int, vector ) 474 ), 475 476 TP_fast_assign( 477 __entry->apicid = apic->vcpu->vcpu_id; 478 __entry->vector = vector; 479 ), 480 481 TP_printk("apicid %x vector %d", __entry->apicid, __entry->vector) 482 ); 483 484 TRACE_EVENT(kvm_pv_eoi, 485 TP_PROTO(struct kvm_lapic *apic, int vector), 486 TP_ARGS(apic, vector), 487 488 TP_STRUCT__entry( 489 __field( __u32, apicid ) 490 __field( int, vector ) 491 ), 492 493 TP_fast_assign( 494 __entry->apicid = apic->vcpu->vcpu_id; 495 __entry->vector = vector; 496 ), 497 498 TP_printk("apicid %x vector %d", __entry->apicid, __entry->vector) 499 ); 500 501 /* 502 * Tracepoint for nested VMRUN 503 */ 504 TRACE_EVENT(kvm_nested_vmrun, 505 TP_PROTO(__u64 rip, __u64 vmcb, __u64 nested_rip, __u32 int_ctl, 506 __u32 event_inj, bool npt), 507 TP_ARGS(rip, vmcb, nested_rip, int_ctl, event_inj, npt), 508 509 TP_STRUCT__entry( 510 __field( __u64, rip ) 511 __field( __u64, vmcb ) 512 __field( __u64, nested_rip ) 513 __field( __u32, int_ctl ) 514 __field( __u32, event_inj ) 515 __field( bool, npt ) 516 ), 517 518 TP_fast_assign( 519 __entry->rip = rip; 520 __entry->vmcb = vmcb; 521 __entry->nested_rip = nested_rip; 522 __entry->int_ctl = int_ctl; 523 __entry->event_inj = event_inj; 524 __entry->npt = npt; 525 ), 526 527 TP_printk("rip: 0x%016llx vmcb: 0x%016llx nrip: 0x%016llx int_ctl: 0x%08x " 528 "event_inj: 0x%08x npt: %s", 529 __entry->rip, __entry->vmcb, __entry->nested_rip, 530 __entry->int_ctl, __entry->event_inj, 531 __entry->npt ? "on" : "off") 532 ); 533 534 TRACE_EVENT(kvm_nested_intercepts, 535 TP_PROTO(__u16 cr_read, __u16 cr_write, __u32 exceptions, __u64 intercept), 536 TP_ARGS(cr_read, cr_write, exceptions, intercept), 537 538 TP_STRUCT__entry( 539 __field( __u16, cr_read ) 540 __field( __u16, cr_write ) 541 __field( __u32, exceptions ) 542 __field( __u64, intercept ) 543 ), 544 545 TP_fast_assign( 546 __entry->cr_read = cr_read; 547 __entry->cr_write = cr_write; 548 __entry->exceptions = exceptions; 549 __entry->intercept = intercept; 550 ), 551 552 TP_printk("cr_read: %04x cr_write: %04x excp: %08x intercept: %016llx", 553 __entry->cr_read, __entry->cr_write, __entry->exceptions, 554 __entry->intercept) 555 ); 556 /* 557 * Tracepoint for #VMEXIT while nested 558 */ 559 TRACE_EVENT(kvm_nested_vmexit, 560 TP_PROTO(__u64 rip, __u32 exit_code, 561 __u64 exit_info1, __u64 exit_info2, 562 __u32 exit_int_info, __u32 exit_int_info_err, __u32 isa), 563 TP_ARGS(rip, exit_code, exit_info1, exit_info2, 564 exit_int_info, exit_int_info_err, isa), 565 566 TP_STRUCT__entry( 567 __field( __u64, rip ) 568 __field( __u32, exit_code ) 569 __field( __u64, exit_info1 ) 570 __field( __u64, exit_info2 ) 571 __field( __u32, exit_int_info ) 572 __field( __u32, exit_int_info_err ) 573 __field( __u32, isa ) 574 ), 575 576 TP_fast_assign( 577 __entry->rip = rip; 578 __entry->exit_code = exit_code; 579 __entry->exit_info1 = exit_info1; 580 __entry->exit_info2 = exit_info2; 581 __entry->exit_int_info = exit_int_info; 582 __entry->exit_int_info_err = exit_int_info_err; 583 __entry->isa = isa; 584 ), 585 TP_printk("rip: 0x%016llx reason: %s ext_inf1: 0x%016llx " 586 "ext_inf2: 0x%016llx ext_int: 0x%08x ext_int_err: 0x%08x", 587 __entry->rip, 588 (__entry->isa == KVM_ISA_VMX) ? 589 __print_symbolic(__entry->exit_code, VMX_EXIT_REASONS) : 590 __print_symbolic(__entry->exit_code, SVM_EXIT_REASONS), 591 __entry->exit_info1, __entry->exit_info2, 592 __entry->exit_int_info, __entry->exit_int_info_err) 593 ); 594 595 /* 596 * Tracepoint for #VMEXIT reinjected to the guest 597 */ 598 TRACE_EVENT(kvm_nested_vmexit_inject, 599 TP_PROTO(__u32 exit_code, 600 __u64 exit_info1, __u64 exit_info2, 601 __u32 exit_int_info, __u32 exit_int_info_err, __u32 isa), 602 TP_ARGS(exit_code, exit_info1, exit_info2, 603 exit_int_info, exit_int_info_err, isa), 604 605 TP_STRUCT__entry( 606 __field( __u32, exit_code ) 607 __field( __u64, exit_info1 ) 608 __field( __u64, exit_info2 ) 609 __field( __u32, exit_int_info ) 610 __field( __u32, exit_int_info_err ) 611 __field( __u32, isa ) 612 ), 613 614 TP_fast_assign( 615 __entry->exit_code = exit_code; 616 __entry->exit_info1 = exit_info1; 617 __entry->exit_info2 = exit_info2; 618 __entry->exit_int_info = exit_int_info; 619 __entry->exit_int_info_err = exit_int_info_err; 620 __entry->isa = isa; 621 ), 622 623 TP_printk("reason: %s ext_inf1: 0x%016llx " 624 "ext_inf2: 0x%016llx ext_int: 0x%08x ext_int_err: 0x%08x", 625 (__entry->isa == KVM_ISA_VMX) ? 626 __print_symbolic(__entry->exit_code, VMX_EXIT_REASONS) : 627 __print_symbolic(__entry->exit_code, SVM_EXIT_REASONS), 628 __entry->exit_info1, __entry->exit_info2, 629 __entry->exit_int_info, __entry->exit_int_info_err) 630 ); 631 632 /* 633 * Tracepoint for nested #vmexit because of interrupt pending 634 */ 635 TRACE_EVENT(kvm_nested_intr_vmexit, 636 TP_PROTO(__u64 rip), 637 TP_ARGS(rip), 638 639 TP_STRUCT__entry( 640 __field( __u64, rip ) 641 ), 642 643 TP_fast_assign( 644 __entry->rip = rip 645 ), 646 647 TP_printk("rip: 0x%016llx", __entry->rip) 648 ); 649 650 /* 651 * Tracepoint for nested #vmexit because of interrupt pending 652 */ 653 TRACE_EVENT(kvm_invlpga, 654 TP_PROTO(__u64 rip, int asid, u64 address), 655 TP_ARGS(rip, asid, address), 656 657 TP_STRUCT__entry( 658 __field( __u64, rip ) 659 __field( int, asid ) 660 __field( __u64, address ) 661 ), 662 663 TP_fast_assign( 664 __entry->rip = rip; 665 __entry->asid = asid; 666 __entry->address = address; 667 ), 668 669 TP_printk("rip: 0x%016llx asid: %d address: 0x%016llx", 670 __entry->rip, __entry->asid, __entry->address) 671 ); 672 673 /* 674 * Tracepoint for nested #vmexit because of interrupt pending 675 */ 676 TRACE_EVENT(kvm_skinit, 677 TP_PROTO(__u64 rip, __u32 slb), 678 TP_ARGS(rip, slb), 679 680 TP_STRUCT__entry( 681 __field( __u64, rip ) 682 __field( __u32, slb ) 683 ), 684 685 TP_fast_assign( 686 __entry->rip = rip; 687 __entry->slb = slb; 688 ), 689 690 TP_printk("rip: 0x%016llx slb: 0x%08x", 691 __entry->rip, __entry->slb) 692 ); 693 694 #define KVM_EMUL_INSN_F_CR0_PE (1 << 0) 695 #define KVM_EMUL_INSN_F_EFL_VM (1 << 1) 696 #define KVM_EMUL_INSN_F_CS_D (1 << 2) 697 #define KVM_EMUL_INSN_F_CS_L (1 << 3) 698 699 #define kvm_trace_symbol_emul_flags \ 700 { 0, "real" }, \ 701 { KVM_EMUL_INSN_F_CR0_PE \ 702 | KVM_EMUL_INSN_F_EFL_VM, "vm16" }, \ 703 { KVM_EMUL_INSN_F_CR0_PE, "prot16" }, \ 704 { KVM_EMUL_INSN_F_CR0_PE \ 705 | KVM_EMUL_INSN_F_CS_D, "prot32" }, \ 706 { KVM_EMUL_INSN_F_CR0_PE \ 707 | KVM_EMUL_INSN_F_CS_L, "prot64" } 708 709 #define kei_decode_mode(mode) ({ \ 710 u8 flags = 0xff; \ 711 switch (mode) { \ 712 case X86EMUL_MODE_REAL: \ 713 flags = 0; \ 714 break; \ 715 case X86EMUL_MODE_VM86: \ 716 flags = KVM_EMUL_INSN_F_EFL_VM; \ 717 break; \ 718 case X86EMUL_MODE_PROT16: \ 719 flags = KVM_EMUL_INSN_F_CR0_PE; \ 720 break; \ 721 case X86EMUL_MODE_PROT32: \ 722 flags = KVM_EMUL_INSN_F_CR0_PE \ 723 | KVM_EMUL_INSN_F_CS_D; \ 724 break; \ 725 case X86EMUL_MODE_PROT64: \ 726 flags = KVM_EMUL_INSN_F_CR0_PE \ 727 | KVM_EMUL_INSN_F_CS_L; \ 728 break; \ 729 } \ 730 flags; \ 731 }) 732 733 TRACE_EVENT(kvm_emulate_insn, 734 TP_PROTO(struct kvm_vcpu *vcpu, __u8 failed), 735 TP_ARGS(vcpu, failed), 736 737 TP_STRUCT__entry( 738 __field( __u64, rip ) 739 __field( __u32, csbase ) 740 __field( __u8, len ) 741 __array( __u8, insn, 15 ) 742 __field( __u8, flags ) 743 __field( __u8, failed ) 744 ), 745 746 TP_fast_assign( 747 __entry->csbase = kvm_x86_ops->get_segment_base(vcpu, VCPU_SREG_CS); 748 __entry->len = vcpu->arch.emulate_ctxt.fetch.ptr 749 - vcpu->arch.emulate_ctxt.fetch.data; 750 __entry->rip = vcpu->arch.emulate_ctxt._eip - __entry->len; 751 memcpy(__entry->insn, 752 vcpu->arch.emulate_ctxt.fetch.data, 753 15); 754 __entry->flags = kei_decode_mode(vcpu->arch.emulate_ctxt.mode); 755 __entry->failed = failed; 756 ), 757 758 TP_printk("%x:%llx:%s (%s)%s", 759 __entry->csbase, __entry->rip, 760 __print_hex(__entry->insn, __entry->len), 761 __print_symbolic(__entry->flags, 762 kvm_trace_symbol_emul_flags), 763 __entry->failed ? " failed" : "" 764 ) 765 ); 766 767 #define trace_kvm_emulate_insn_start(vcpu) trace_kvm_emulate_insn(vcpu, 0) 768 #define trace_kvm_emulate_insn_failed(vcpu) trace_kvm_emulate_insn(vcpu, 1) 769 770 TRACE_EVENT( 771 vcpu_match_mmio, 772 TP_PROTO(gva_t gva, gpa_t gpa, bool write, bool gpa_match), 773 TP_ARGS(gva, gpa, write, gpa_match), 774 775 TP_STRUCT__entry( 776 __field(gva_t, gva) 777 __field(gpa_t, gpa) 778 __field(bool, write) 779 __field(bool, gpa_match) 780 ), 781 782 TP_fast_assign( 783 __entry->gva = gva; 784 __entry->gpa = gpa; 785 __entry->write = write; 786 __entry->gpa_match = gpa_match 787 ), 788 789 TP_printk("gva %#lx gpa %#llx %s %s", __entry->gva, __entry->gpa, 790 __entry->write ? "Write" : "Read", 791 __entry->gpa_match ? "GPA" : "GVA") 792 ); 793 794 TRACE_EVENT(kvm_write_tsc_offset, 795 TP_PROTO(unsigned int vcpu_id, __u64 previous_tsc_offset, 796 __u64 next_tsc_offset), 797 TP_ARGS(vcpu_id, previous_tsc_offset, next_tsc_offset), 798 799 TP_STRUCT__entry( 800 __field( unsigned int, vcpu_id ) 801 __field( __u64, previous_tsc_offset ) 802 __field( __u64, next_tsc_offset ) 803 ), 804 805 TP_fast_assign( 806 __entry->vcpu_id = vcpu_id; 807 __entry->previous_tsc_offset = previous_tsc_offset; 808 __entry->next_tsc_offset = next_tsc_offset; 809 ), 810 811 TP_printk("vcpu=%u prev=%llu next=%llu", __entry->vcpu_id, 812 __entry->previous_tsc_offset, __entry->next_tsc_offset) 813 ); 814 815 #ifdef CONFIG_X86_64 816 817 #define host_clocks \ 818 {VCLOCK_NONE, "none"}, \ 819 {VCLOCK_TSC, "tsc"} \ 820 821 TRACE_EVENT(kvm_update_master_clock, 822 TP_PROTO(bool use_master_clock, unsigned int host_clock, bool offset_matched), 823 TP_ARGS(use_master_clock, host_clock, offset_matched), 824 825 TP_STRUCT__entry( 826 __field( bool, use_master_clock ) 827 __field( unsigned int, host_clock ) 828 __field( bool, offset_matched ) 829 ), 830 831 TP_fast_assign( 832 __entry->use_master_clock = use_master_clock; 833 __entry->host_clock = host_clock; 834 __entry->offset_matched = offset_matched; 835 ), 836 837 TP_printk("masterclock %d hostclock %s offsetmatched %u", 838 __entry->use_master_clock, 839 __print_symbolic(__entry->host_clock, host_clocks), 840 __entry->offset_matched) 841 ); 842 843 TRACE_EVENT(kvm_track_tsc, 844 TP_PROTO(unsigned int vcpu_id, unsigned int nr_matched, 845 unsigned int online_vcpus, bool use_master_clock, 846 unsigned int host_clock), 847 TP_ARGS(vcpu_id, nr_matched, online_vcpus, use_master_clock, 848 host_clock), 849 850 TP_STRUCT__entry( 851 __field( unsigned int, vcpu_id ) 852 __field( unsigned int, nr_vcpus_matched_tsc ) 853 __field( unsigned int, online_vcpus ) 854 __field( bool, use_master_clock ) 855 __field( unsigned int, host_clock ) 856 ), 857 858 TP_fast_assign( 859 __entry->vcpu_id = vcpu_id; 860 __entry->nr_vcpus_matched_tsc = nr_matched; 861 __entry->online_vcpus = online_vcpus; 862 __entry->use_master_clock = use_master_clock; 863 __entry->host_clock = host_clock; 864 ), 865 866 TP_printk("vcpu_id %u masterclock %u offsetmatched %u nr_online %u" 867 " hostclock %s", 868 __entry->vcpu_id, __entry->use_master_clock, 869 __entry->nr_vcpus_matched_tsc, __entry->online_vcpus, 870 __print_symbolic(__entry->host_clock, host_clocks)) 871 ); 872 873 #endif /* CONFIG_X86_64 */ 874 875 /* 876 * Tracepoint for PML full VMEXIT. 877 */ 878 TRACE_EVENT(kvm_pml_full, 879 TP_PROTO(unsigned int vcpu_id), 880 TP_ARGS(vcpu_id), 881 882 TP_STRUCT__entry( 883 __field( unsigned int, vcpu_id ) 884 ), 885 886 TP_fast_assign( 887 __entry->vcpu_id = vcpu_id; 888 ), 889 890 TP_printk("vcpu %d: PML full", __entry->vcpu_id) 891 ); 892 893 TRACE_EVENT(kvm_ple_window_update, 894 TP_PROTO(unsigned int vcpu_id, unsigned int new, unsigned int old), 895 TP_ARGS(vcpu_id, new, old), 896 897 TP_STRUCT__entry( 898 __field( unsigned int, vcpu_id ) 899 __field( unsigned int, new ) 900 __field( unsigned int, old ) 901 ), 902 903 TP_fast_assign( 904 __entry->vcpu_id = vcpu_id; 905 __entry->new = new; 906 __entry->old = old; 907 ), 908 909 TP_printk("vcpu %u old %u new %u (%s)", 910 __entry->vcpu_id, __entry->old, __entry->new, 911 __entry->old < __entry->new ? "growed" : "shrinked") 912 ); 913 914 TRACE_EVENT(kvm_pvclock_update, 915 TP_PROTO(unsigned int vcpu_id, struct pvclock_vcpu_time_info *pvclock), 916 TP_ARGS(vcpu_id, pvclock), 917 918 TP_STRUCT__entry( 919 __field( unsigned int, vcpu_id ) 920 __field( __u32, version ) 921 __field( __u64, tsc_timestamp ) 922 __field( __u64, system_time ) 923 __field( __u32, tsc_to_system_mul ) 924 __field( __s8, tsc_shift ) 925 __field( __u8, flags ) 926 ), 927 928 TP_fast_assign( 929 __entry->vcpu_id = vcpu_id; 930 __entry->version = pvclock->version; 931 __entry->tsc_timestamp = pvclock->tsc_timestamp; 932 __entry->system_time = pvclock->system_time; 933 __entry->tsc_to_system_mul = pvclock->tsc_to_system_mul; 934 __entry->tsc_shift = pvclock->tsc_shift; 935 __entry->flags = pvclock->flags; 936 ), 937 938 TP_printk("vcpu_id %u, pvclock { version %u, tsc_timestamp 0x%llx, " 939 "system_time 0x%llx, tsc_to_system_mul 0x%x, tsc_shift %d, " 940 "flags 0x%x }", 941 __entry->vcpu_id, 942 __entry->version, 943 __entry->tsc_timestamp, 944 __entry->system_time, 945 __entry->tsc_to_system_mul, 946 __entry->tsc_shift, 947 __entry->flags) 948 ); 949 950 TRACE_EVENT(kvm_wait_lapic_expire, 951 TP_PROTO(unsigned int vcpu_id, s64 delta), 952 TP_ARGS(vcpu_id, delta), 953 954 TP_STRUCT__entry( 955 __field( unsigned int, vcpu_id ) 956 __field( s64, delta ) 957 ), 958 959 TP_fast_assign( 960 __entry->vcpu_id = vcpu_id; 961 __entry->delta = delta; 962 ), 963 964 TP_printk("vcpu %u: delta %lld (%s)", 965 __entry->vcpu_id, 966 __entry->delta, 967 __entry->delta < 0 ? "early" : "late") 968 ); 969 970 TRACE_EVENT(kvm_enter_smm, 971 TP_PROTO(unsigned int vcpu_id, u64 smbase, bool entering), 972 TP_ARGS(vcpu_id, smbase, entering), 973 974 TP_STRUCT__entry( 975 __field( unsigned int, vcpu_id ) 976 __field( u64, smbase ) 977 __field( bool, entering ) 978 ), 979 980 TP_fast_assign( 981 __entry->vcpu_id = vcpu_id; 982 __entry->smbase = smbase; 983 __entry->entering = entering; 984 ), 985 986 TP_printk("vcpu %u: %s SMM, smbase 0x%llx", 987 __entry->vcpu_id, 988 __entry->entering ? "entering" : "leaving", 989 __entry->smbase) 990 ); 991 992 /* 993 * Tracepoint for VT-d posted-interrupts. 994 */ 995 TRACE_EVENT(kvm_pi_irte_update, 996 TP_PROTO(unsigned int host_irq, unsigned int vcpu_id, 997 unsigned int gsi, unsigned int gvec, 998 u64 pi_desc_addr, bool set), 999 TP_ARGS(host_irq, vcpu_id, gsi, gvec, pi_desc_addr, set), 1000 1001 TP_STRUCT__entry( 1002 __field( unsigned int, host_irq ) 1003 __field( unsigned int, vcpu_id ) 1004 __field( unsigned int, gsi ) 1005 __field( unsigned int, gvec ) 1006 __field( u64, pi_desc_addr ) 1007 __field( bool, set ) 1008 ), 1009 1010 TP_fast_assign( 1011 __entry->host_irq = host_irq; 1012 __entry->vcpu_id = vcpu_id; 1013 __entry->gsi = gsi; 1014 __entry->gvec = gvec; 1015 __entry->pi_desc_addr = pi_desc_addr; 1016 __entry->set = set; 1017 ), 1018 1019 TP_printk("VT-d PI is %s for irq %u, vcpu %u, gsi: 0x%x, " 1020 "gvec: 0x%x, pi_desc_addr: 0x%llx", 1021 __entry->set ? "enabled and being updated" : "disabled", 1022 __entry->host_irq, 1023 __entry->vcpu_id, 1024 __entry->gsi, 1025 __entry->gvec, 1026 __entry->pi_desc_addr) 1027 ); 1028 1029 /* 1030 * Tracepoint for kvm_hv_notify_acked_sint. 1031 */ 1032 TRACE_EVENT(kvm_hv_notify_acked_sint, 1033 TP_PROTO(int vcpu_id, u32 sint), 1034 TP_ARGS(vcpu_id, sint), 1035 1036 TP_STRUCT__entry( 1037 __field(int, vcpu_id) 1038 __field(u32, sint) 1039 ), 1040 1041 TP_fast_assign( 1042 __entry->vcpu_id = vcpu_id; 1043 __entry->sint = sint; 1044 ), 1045 1046 TP_printk("vcpu_id %d sint %u", __entry->vcpu_id, __entry->sint) 1047 ); 1048 1049 /* 1050 * Tracepoint for synic_set_irq. 1051 */ 1052 TRACE_EVENT(kvm_hv_synic_set_irq, 1053 TP_PROTO(int vcpu_id, u32 sint, int vector, int ret), 1054 TP_ARGS(vcpu_id, sint, vector, ret), 1055 1056 TP_STRUCT__entry( 1057 __field(int, vcpu_id) 1058 __field(u32, sint) 1059 __field(int, vector) 1060 __field(int, ret) 1061 ), 1062 1063 TP_fast_assign( 1064 __entry->vcpu_id = vcpu_id; 1065 __entry->sint = sint; 1066 __entry->vector = vector; 1067 __entry->ret = ret; 1068 ), 1069 1070 TP_printk("vcpu_id %d sint %u vector %d ret %d", 1071 __entry->vcpu_id, __entry->sint, __entry->vector, 1072 __entry->ret) 1073 ); 1074 1075 /* 1076 * Tracepoint for kvm_hv_synic_send_eoi. 1077 */ 1078 TRACE_EVENT(kvm_hv_synic_send_eoi, 1079 TP_PROTO(int vcpu_id, int vector), 1080 TP_ARGS(vcpu_id, vector), 1081 1082 TP_STRUCT__entry( 1083 __field(int, vcpu_id) 1084 __field(u32, sint) 1085 __field(int, vector) 1086 __field(int, ret) 1087 ), 1088 1089 TP_fast_assign( 1090 __entry->vcpu_id = vcpu_id; 1091 __entry->vector = vector; 1092 ), 1093 1094 TP_printk("vcpu_id %d vector %d", __entry->vcpu_id, __entry->vector) 1095 ); 1096 1097 /* 1098 * Tracepoint for synic_set_msr. 1099 */ 1100 TRACE_EVENT(kvm_hv_synic_set_msr, 1101 TP_PROTO(int vcpu_id, u32 msr, u64 data, bool host), 1102 TP_ARGS(vcpu_id, msr, data, host), 1103 1104 TP_STRUCT__entry( 1105 __field(int, vcpu_id) 1106 __field(u32, msr) 1107 __field(u64, data) 1108 __field(bool, host) 1109 ), 1110 1111 TP_fast_assign( 1112 __entry->vcpu_id = vcpu_id; 1113 __entry->msr = msr; 1114 __entry->data = data; 1115 __entry->host = host 1116 ), 1117 1118 TP_printk("vcpu_id %d msr 0x%x data 0x%llx host %d", 1119 __entry->vcpu_id, __entry->msr, __entry->data, __entry->host) 1120 ); 1121 1122 /* 1123 * Tracepoint for stimer_set_config. 1124 */ 1125 TRACE_EVENT(kvm_hv_stimer_set_config, 1126 TP_PROTO(int vcpu_id, int timer_index, u64 config, bool host), 1127 TP_ARGS(vcpu_id, timer_index, config, host), 1128 1129 TP_STRUCT__entry( 1130 __field(int, vcpu_id) 1131 __field(int, timer_index) 1132 __field(u64, config) 1133 __field(bool, host) 1134 ), 1135 1136 TP_fast_assign( 1137 __entry->vcpu_id = vcpu_id; 1138 __entry->timer_index = timer_index; 1139 __entry->config = config; 1140 __entry->host = host; 1141 ), 1142 1143 TP_printk("vcpu_id %d timer %d config 0x%llx host %d", 1144 __entry->vcpu_id, __entry->timer_index, __entry->config, 1145 __entry->host) 1146 ); 1147 1148 /* 1149 * Tracepoint for stimer_set_count. 1150 */ 1151 TRACE_EVENT(kvm_hv_stimer_set_count, 1152 TP_PROTO(int vcpu_id, int timer_index, u64 count, bool host), 1153 TP_ARGS(vcpu_id, timer_index, count, host), 1154 1155 TP_STRUCT__entry( 1156 __field(int, vcpu_id) 1157 __field(int, timer_index) 1158 __field(u64, count) 1159 __field(bool, host) 1160 ), 1161 1162 TP_fast_assign( 1163 __entry->vcpu_id = vcpu_id; 1164 __entry->timer_index = timer_index; 1165 __entry->count = count; 1166 __entry->host = host; 1167 ), 1168 1169 TP_printk("vcpu_id %d timer %d count %llu host %d", 1170 __entry->vcpu_id, __entry->timer_index, __entry->count, 1171 __entry->host) 1172 ); 1173 1174 /* 1175 * Tracepoint for stimer_start(periodic timer case). 1176 */ 1177 TRACE_EVENT(kvm_hv_stimer_start_periodic, 1178 TP_PROTO(int vcpu_id, int timer_index, u64 time_now, u64 exp_time), 1179 TP_ARGS(vcpu_id, timer_index, time_now, exp_time), 1180 1181 TP_STRUCT__entry( 1182 __field(int, vcpu_id) 1183 __field(int, timer_index) 1184 __field(u64, time_now) 1185 __field(u64, exp_time) 1186 ), 1187 1188 TP_fast_assign( 1189 __entry->vcpu_id = vcpu_id; 1190 __entry->timer_index = timer_index; 1191 __entry->time_now = time_now; 1192 __entry->exp_time = exp_time; 1193 ), 1194 1195 TP_printk("vcpu_id %d timer %d time_now %llu exp_time %llu", 1196 __entry->vcpu_id, __entry->timer_index, __entry->time_now, 1197 __entry->exp_time) 1198 ); 1199 1200 /* 1201 * Tracepoint for stimer_start(one-shot timer case). 1202 */ 1203 TRACE_EVENT(kvm_hv_stimer_start_one_shot, 1204 TP_PROTO(int vcpu_id, int timer_index, u64 time_now, u64 count), 1205 TP_ARGS(vcpu_id, timer_index, time_now, count), 1206 1207 TP_STRUCT__entry( 1208 __field(int, vcpu_id) 1209 __field(int, timer_index) 1210 __field(u64, time_now) 1211 __field(u64, count) 1212 ), 1213 1214 TP_fast_assign( 1215 __entry->vcpu_id = vcpu_id; 1216 __entry->timer_index = timer_index; 1217 __entry->time_now = time_now; 1218 __entry->count = count; 1219 ), 1220 1221 TP_printk("vcpu_id %d timer %d time_now %llu count %llu", 1222 __entry->vcpu_id, __entry->timer_index, __entry->time_now, 1223 __entry->count) 1224 ); 1225 1226 /* 1227 * Tracepoint for stimer_timer_callback. 1228 */ 1229 TRACE_EVENT(kvm_hv_stimer_callback, 1230 TP_PROTO(int vcpu_id, int timer_index), 1231 TP_ARGS(vcpu_id, timer_index), 1232 1233 TP_STRUCT__entry( 1234 __field(int, vcpu_id) 1235 __field(int, timer_index) 1236 ), 1237 1238 TP_fast_assign( 1239 __entry->vcpu_id = vcpu_id; 1240 __entry->timer_index = timer_index; 1241 ), 1242 1243 TP_printk("vcpu_id %d timer %d", 1244 __entry->vcpu_id, __entry->timer_index) 1245 ); 1246 1247 /* 1248 * Tracepoint for stimer_expiration. 1249 */ 1250 TRACE_EVENT(kvm_hv_stimer_expiration, 1251 TP_PROTO(int vcpu_id, int timer_index, int direct, int msg_send_result), 1252 TP_ARGS(vcpu_id, timer_index, direct, msg_send_result), 1253 1254 TP_STRUCT__entry( 1255 __field(int, vcpu_id) 1256 __field(int, timer_index) 1257 __field(int, direct) 1258 __field(int, msg_send_result) 1259 ), 1260 1261 TP_fast_assign( 1262 __entry->vcpu_id = vcpu_id; 1263 __entry->timer_index = timer_index; 1264 __entry->direct = direct; 1265 __entry->msg_send_result = msg_send_result; 1266 ), 1267 1268 TP_printk("vcpu_id %d timer %d direct %d send result %d", 1269 __entry->vcpu_id, __entry->timer_index, 1270 __entry->direct, __entry->msg_send_result) 1271 ); 1272 1273 /* 1274 * Tracepoint for stimer_cleanup. 1275 */ 1276 TRACE_EVENT(kvm_hv_stimer_cleanup, 1277 TP_PROTO(int vcpu_id, int timer_index), 1278 TP_ARGS(vcpu_id, timer_index), 1279 1280 TP_STRUCT__entry( 1281 __field(int, vcpu_id) 1282 __field(int, timer_index) 1283 ), 1284 1285 TP_fast_assign( 1286 __entry->vcpu_id = vcpu_id; 1287 __entry->timer_index = timer_index; 1288 ), 1289 1290 TP_printk("vcpu_id %d timer %d", 1291 __entry->vcpu_id, __entry->timer_index) 1292 ); 1293 1294 TRACE_EVENT(kvm_apicv_update_request, 1295 TP_PROTO(bool activate, unsigned long bit), 1296 TP_ARGS(activate, bit), 1297 1298 TP_STRUCT__entry( 1299 __field(bool, activate) 1300 __field(unsigned long, bit) 1301 ), 1302 1303 TP_fast_assign( 1304 __entry->activate = activate; 1305 __entry->bit = bit; 1306 ), 1307 1308 TP_printk("%s bit=%lu", 1309 __entry->activate ? "activate" : "deactivate", 1310 __entry->bit) 1311 ); 1312 1313 /* 1314 * Tracepoint for AMD AVIC 1315 */ 1316 TRACE_EVENT(kvm_avic_incomplete_ipi, 1317 TP_PROTO(u32 vcpu, u32 icrh, u32 icrl, u32 id, u32 index), 1318 TP_ARGS(vcpu, icrh, icrl, id, index), 1319 1320 TP_STRUCT__entry( 1321 __field(u32, vcpu) 1322 __field(u32, icrh) 1323 __field(u32, icrl) 1324 __field(u32, id) 1325 __field(u32, index) 1326 ), 1327 1328 TP_fast_assign( 1329 __entry->vcpu = vcpu; 1330 __entry->icrh = icrh; 1331 __entry->icrl = icrl; 1332 __entry->id = id; 1333 __entry->index = index; 1334 ), 1335 1336 TP_printk("vcpu=%u, icrh:icrl=%#010x:%08x, id=%u, index=%u", 1337 __entry->vcpu, __entry->icrh, __entry->icrl, 1338 __entry->id, __entry->index) 1339 ); 1340 1341 TRACE_EVENT(kvm_avic_unaccelerated_access, 1342 TP_PROTO(u32 vcpu, u32 offset, bool ft, bool rw, u32 vec), 1343 TP_ARGS(vcpu, offset, ft, rw, vec), 1344 1345 TP_STRUCT__entry( 1346 __field(u32, vcpu) 1347 __field(u32, offset) 1348 __field(bool, ft) 1349 __field(bool, rw) 1350 __field(u32, vec) 1351 ), 1352 1353 TP_fast_assign( 1354 __entry->vcpu = vcpu; 1355 __entry->offset = offset; 1356 __entry->ft = ft; 1357 __entry->rw = rw; 1358 __entry->vec = vec; 1359 ), 1360 1361 TP_printk("vcpu=%u, offset=%#x(%s), %s, %s, vec=%#x", 1362 __entry->vcpu, 1363 __entry->offset, 1364 __print_symbolic(__entry->offset, kvm_trace_symbol_apic), 1365 __entry->ft ? "trap" : "fault", 1366 __entry->rw ? "write" : "read", 1367 __entry->vec) 1368 ); 1369 1370 TRACE_EVENT(kvm_hv_timer_state, 1371 TP_PROTO(unsigned int vcpu_id, unsigned int hv_timer_in_use), 1372 TP_ARGS(vcpu_id, hv_timer_in_use), 1373 TP_STRUCT__entry( 1374 __field(unsigned int, vcpu_id) 1375 __field(unsigned int, hv_timer_in_use) 1376 ), 1377 TP_fast_assign( 1378 __entry->vcpu_id = vcpu_id; 1379 __entry->hv_timer_in_use = hv_timer_in_use; 1380 ), 1381 TP_printk("vcpu_id %x hv_timer %x", 1382 __entry->vcpu_id, 1383 __entry->hv_timer_in_use) 1384 ); 1385 1386 /* 1387 * Tracepoint for kvm_hv_flush_tlb. 1388 */ 1389 TRACE_EVENT(kvm_hv_flush_tlb, 1390 TP_PROTO(u64 processor_mask, u64 address_space, u64 flags), 1391 TP_ARGS(processor_mask, address_space, flags), 1392 1393 TP_STRUCT__entry( 1394 __field(u64, processor_mask) 1395 __field(u64, address_space) 1396 __field(u64, flags) 1397 ), 1398 1399 TP_fast_assign( 1400 __entry->processor_mask = processor_mask; 1401 __entry->address_space = address_space; 1402 __entry->flags = flags; 1403 ), 1404 1405 TP_printk("processor_mask 0x%llx address_space 0x%llx flags 0x%llx", 1406 __entry->processor_mask, __entry->address_space, 1407 __entry->flags) 1408 ); 1409 1410 /* 1411 * Tracepoint for kvm_hv_flush_tlb_ex. 1412 */ 1413 TRACE_EVENT(kvm_hv_flush_tlb_ex, 1414 TP_PROTO(u64 valid_bank_mask, u64 format, u64 address_space, u64 flags), 1415 TP_ARGS(valid_bank_mask, format, address_space, flags), 1416 1417 TP_STRUCT__entry( 1418 __field(u64, valid_bank_mask) 1419 __field(u64, format) 1420 __field(u64, address_space) 1421 __field(u64, flags) 1422 ), 1423 1424 TP_fast_assign( 1425 __entry->valid_bank_mask = valid_bank_mask; 1426 __entry->format = format; 1427 __entry->address_space = address_space; 1428 __entry->flags = flags; 1429 ), 1430 1431 TP_printk("valid_bank_mask 0x%llx format 0x%llx " 1432 "address_space 0x%llx flags 0x%llx", 1433 __entry->valid_bank_mask, __entry->format, 1434 __entry->address_space, __entry->flags) 1435 ); 1436 1437 /* 1438 * Tracepoints for kvm_hv_send_ipi. 1439 */ 1440 TRACE_EVENT(kvm_hv_send_ipi, 1441 TP_PROTO(u32 vector, u64 processor_mask), 1442 TP_ARGS(vector, processor_mask), 1443 1444 TP_STRUCT__entry( 1445 __field(u32, vector) 1446 __field(u64, processor_mask) 1447 ), 1448 1449 TP_fast_assign( 1450 __entry->vector = vector; 1451 __entry->processor_mask = processor_mask; 1452 ), 1453 1454 TP_printk("vector %x processor_mask 0x%llx", 1455 __entry->vector, __entry->processor_mask) 1456 ); 1457 1458 TRACE_EVENT(kvm_hv_send_ipi_ex, 1459 TP_PROTO(u32 vector, u64 format, u64 valid_bank_mask), 1460 TP_ARGS(vector, format, valid_bank_mask), 1461 1462 TP_STRUCT__entry( 1463 __field(u32, vector) 1464 __field(u64, format) 1465 __field(u64, valid_bank_mask) 1466 ), 1467 1468 TP_fast_assign( 1469 __entry->vector = vector; 1470 __entry->format = format; 1471 __entry->valid_bank_mask = valid_bank_mask; 1472 ), 1473 1474 TP_printk("vector %x format %llx valid_bank_mask 0x%llx", 1475 __entry->vector, __entry->format, 1476 __entry->valid_bank_mask) 1477 ); 1478 1479 TRACE_EVENT(kvm_pv_tlb_flush, 1480 TP_PROTO(unsigned int vcpu_id, bool need_flush_tlb), 1481 TP_ARGS(vcpu_id, need_flush_tlb), 1482 1483 TP_STRUCT__entry( 1484 __field( unsigned int, vcpu_id ) 1485 __field( bool, need_flush_tlb ) 1486 ), 1487 1488 TP_fast_assign( 1489 __entry->vcpu_id = vcpu_id; 1490 __entry->need_flush_tlb = need_flush_tlb; 1491 ), 1492 1493 TP_printk("vcpu %u need_flush_tlb %s", __entry->vcpu_id, 1494 __entry->need_flush_tlb ? "true" : "false") 1495 ); 1496 1497 /* 1498 * Tracepoint for failed nested VMX VM-Enter. 1499 */ 1500 TRACE_EVENT(kvm_nested_vmenter_failed, 1501 TP_PROTO(const char *msg, u32 err), 1502 TP_ARGS(msg, err), 1503 1504 TP_STRUCT__entry( 1505 __field(const char *, msg) 1506 __field(u32, err) 1507 ), 1508 1509 TP_fast_assign( 1510 __entry->msg = msg; 1511 __entry->err = err; 1512 ), 1513 1514 TP_printk("%s%s", __entry->msg, !__entry->err ? "" : 1515 __print_symbolic(__entry->err, VMX_VMENTER_INSTRUCTION_ERRORS)) 1516 ); 1517 1518 #endif /* _TRACE_KVM_H */ 1519 1520 #undef TRACE_INCLUDE_PATH 1521 #define TRACE_INCLUDE_PATH ../../arch/x86/kvm 1522 #undef TRACE_INCLUDE_FILE 1523 #define TRACE_INCLUDE_FILE trace 1524 1525 /* This part must be outside protection */ 1526 #include <trace/define_trace.h> 1527