1 #if !defined(_TRACE_KVM_H) || defined(TRACE_HEADER_MULTI_READ) 2 #define _TRACE_KVM_H 3 4 #include <linux/tracepoint.h> 5 #include <asm/vmx.h> 6 #include <asm/svm.h> 7 #include <asm/clocksource.h> 8 9 #undef TRACE_SYSTEM 10 #define TRACE_SYSTEM kvm 11 12 /* 13 * Tracepoint for guest mode entry. 14 */ 15 TRACE_EVENT(kvm_entry, 16 TP_PROTO(unsigned int vcpu_id), 17 TP_ARGS(vcpu_id), 18 19 TP_STRUCT__entry( 20 __field( unsigned int, vcpu_id ) 21 ), 22 23 TP_fast_assign( 24 __entry->vcpu_id = vcpu_id; 25 ), 26 27 TP_printk("vcpu %u", __entry->vcpu_id) 28 ); 29 30 /* 31 * Tracepoint for hypercall. 32 */ 33 TRACE_EVENT(kvm_hypercall, 34 TP_PROTO(unsigned long nr, unsigned long a0, unsigned long a1, 35 unsigned long a2, unsigned long a3), 36 TP_ARGS(nr, a0, a1, a2, a3), 37 38 TP_STRUCT__entry( 39 __field( unsigned long, nr ) 40 __field( unsigned long, a0 ) 41 __field( unsigned long, a1 ) 42 __field( unsigned long, a2 ) 43 __field( unsigned long, a3 ) 44 ), 45 46 TP_fast_assign( 47 __entry->nr = nr; 48 __entry->a0 = a0; 49 __entry->a1 = a1; 50 __entry->a2 = a2; 51 __entry->a3 = a3; 52 ), 53 54 TP_printk("nr 0x%lx a0 0x%lx a1 0x%lx a2 0x%lx a3 0x%lx", 55 __entry->nr, __entry->a0, __entry->a1, __entry->a2, 56 __entry->a3) 57 ); 58 59 /* 60 * Tracepoint for hypercall. 61 */ 62 TRACE_EVENT(kvm_hv_hypercall, 63 TP_PROTO(__u16 code, bool fast, __u16 rep_cnt, __u16 rep_idx, 64 __u64 ingpa, __u64 outgpa), 65 TP_ARGS(code, fast, rep_cnt, rep_idx, ingpa, outgpa), 66 67 TP_STRUCT__entry( 68 __field( __u16, rep_cnt ) 69 __field( __u16, rep_idx ) 70 __field( __u64, ingpa ) 71 __field( __u64, outgpa ) 72 __field( __u16, code ) 73 __field( bool, fast ) 74 ), 75 76 TP_fast_assign( 77 __entry->rep_cnt = rep_cnt; 78 __entry->rep_idx = rep_idx; 79 __entry->ingpa = ingpa; 80 __entry->outgpa = outgpa; 81 __entry->code = code; 82 __entry->fast = fast; 83 ), 84 85 TP_printk("code 0x%x %s cnt 0x%x idx 0x%x in 0x%llx out 0x%llx", 86 __entry->code, __entry->fast ? "fast" : "slow", 87 __entry->rep_cnt, __entry->rep_idx, __entry->ingpa, 88 __entry->outgpa) 89 ); 90 91 /* 92 * Tracepoint for PIO. 93 */ 94 TRACE_EVENT(kvm_pio, 95 TP_PROTO(unsigned int rw, unsigned int port, unsigned int size, 96 unsigned int count), 97 TP_ARGS(rw, port, size, count), 98 99 TP_STRUCT__entry( 100 __field( unsigned int, rw ) 101 __field( unsigned int, port ) 102 __field( unsigned int, size ) 103 __field( unsigned int, count ) 104 ), 105 106 TP_fast_assign( 107 __entry->rw = rw; 108 __entry->port = port; 109 __entry->size = size; 110 __entry->count = count; 111 ), 112 113 TP_printk("pio_%s at 0x%x size %d count %d", 114 __entry->rw ? "write" : "read", 115 __entry->port, __entry->size, __entry->count) 116 ); 117 118 /* 119 * Tracepoint for cpuid. 120 */ 121 TRACE_EVENT(kvm_cpuid, 122 TP_PROTO(unsigned int function, unsigned long rax, unsigned long rbx, 123 unsigned long rcx, unsigned long rdx), 124 TP_ARGS(function, rax, rbx, rcx, rdx), 125 126 TP_STRUCT__entry( 127 __field( unsigned int, function ) 128 __field( unsigned long, rax ) 129 __field( unsigned long, rbx ) 130 __field( unsigned long, rcx ) 131 __field( unsigned long, rdx ) 132 ), 133 134 TP_fast_assign( 135 __entry->function = function; 136 __entry->rax = rax; 137 __entry->rbx = rbx; 138 __entry->rcx = rcx; 139 __entry->rdx = rdx; 140 ), 141 142 TP_printk("func %x rax %lx rbx %lx rcx %lx rdx %lx", 143 __entry->function, __entry->rax, 144 __entry->rbx, __entry->rcx, __entry->rdx) 145 ); 146 147 #define AREG(x) { APIC_##x, "APIC_" #x } 148 149 #define kvm_trace_symbol_apic \ 150 AREG(ID), AREG(LVR), AREG(TASKPRI), AREG(ARBPRI), AREG(PROCPRI), \ 151 AREG(EOI), AREG(RRR), AREG(LDR), AREG(DFR), AREG(SPIV), AREG(ISR), \ 152 AREG(TMR), AREG(IRR), AREG(ESR), AREG(ICR), AREG(ICR2), AREG(LVTT), \ 153 AREG(LVTTHMR), AREG(LVTPC), AREG(LVT0), AREG(LVT1), AREG(LVTERR), \ 154 AREG(TMICT), AREG(TMCCT), AREG(TDCR), AREG(SELF_IPI), AREG(EFEAT), \ 155 AREG(ECTRL) 156 /* 157 * Tracepoint for apic access. 158 */ 159 TRACE_EVENT(kvm_apic, 160 TP_PROTO(unsigned int rw, unsigned int reg, unsigned int val), 161 TP_ARGS(rw, reg, val), 162 163 TP_STRUCT__entry( 164 __field( unsigned int, rw ) 165 __field( unsigned int, reg ) 166 __field( unsigned int, val ) 167 ), 168 169 TP_fast_assign( 170 __entry->rw = rw; 171 __entry->reg = reg; 172 __entry->val = val; 173 ), 174 175 TP_printk("apic_%s %s = 0x%x", 176 __entry->rw ? "write" : "read", 177 __print_symbolic(__entry->reg, kvm_trace_symbol_apic), 178 __entry->val) 179 ); 180 181 #define trace_kvm_apic_read(reg, val) trace_kvm_apic(0, reg, val) 182 #define trace_kvm_apic_write(reg, val) trace_kvm_apic(1, reg, val) 183 184 #define KVM_ISA_VMX 1 185 #define KVM_ISA_SVM 2 186 187 /* 188 * Tracepoint for kvm guest exit: 189 */ 190 TRACE_EVENT(kvm_exit, 191 TP_PROTO(unsigned int exit_reason, struct kvm_vcpu *vcpu, u32 isa), 192 TP_ARGS(exit_reason, vcpu, isa), 193 194 TP_STRUCT__entry( 195 __field( unsigned int, exit_reason ) 196 __field( unsigned long, guest_rip ) 197 __field( u32, isa ) 198 __field( u64, info1 ) 199 __field( u64, info2 ) 200 ), 201 202 TP_fast_assign( 203 __entry->exit_reason = exit_reason; 204 __entry->guest_rip = kvm_rip_read(vcpu); 205 __entry->isa = isa; 206 kvm_x86_ops->get_exit_info(vcpu, &__entry->info1, 207 &__entry->info2); 208 ), 209 210 TP_printk("reason %s rip 0x%lx info %llx %llx", 211 (__entry->isa == KVM_ISA_VMX) ? 212 __print_symbolic(__entry->exit_reason, VMX_EXIT_REASONS) : 213 __print_symbolic(__entry->exit_reason, SVM_EXIT_REASONS), 214 __entry->guest_rip, __entry->info1, __entry->info2) 215 ); 216 217 /* 218 * Tracepoint for kvm interrupt injection: 219 */ 220 TRACE_EVENT(kvm_inj_virq, 221 TP_PROTO(unsigned int irq), 222 TP_ARGS(irq), 223 224 TP_STRUCT__entry( 225 __field( unsigned int, irq ) 226 ), 227 228 TP_fast_assign( 229 __entry->irq = irq; 230 ), 231 232 TP_printk("irq %u", __entry->irq) 233 ); 234 235 #define EXS(x) { x##_VECTOR, "#" #x } 236 237 #define kvm_trace_sym_exc \ 238 EXS(DE), EXS(DB), EXS(BP), EXS(OF), EXS(BR), EXS(UD), EXS(NM), \ 239 EXS(DF), EXS(TS), EXS(NP), EXS(SS), EXS(GP), EXS(PF), \ 240 EXS(MF), EXS(MC) 241 242 /* 243 * Tracepoint for kvm interrupt injection: 244 */ 245 TRACE_EVENT(kvm_inj_exception, 246 TP_PROTO(unsigned exception, bool has_error, unsigned error_code), 247 TP_ARGS(exception, has_error, error_code), 248 249 TP_STRUCT__entry( 250 __field( u8, exception ) 251 __field( u8, has_error ) 252 __field( u32, error_code ) 253 ), 254 255 TP_fast_assign( 256 __entry->exception = exception; 257 __entry->has_error = has_error; 258 __entry->error_code = error_code; 259 ), 260 261 TP_printk("%s (0x%x)", 262 __print_symbolic(__entry->exception, kvm_trace_sym_exc), 263 /* FIXME: don't print error_code if not present */ 264 __entry->has_error ? __entry->error_code : 0) 265 ); 266 267 /* 268 * Tracepoint for page fault. 269 */ 270 TRACE_EVENT(kvm_page_fault, 271 TP_PROTO(unsigned long fault_address, unsigned int error_code), 272 TP_ARGS(fault_address, error_code), 273 274 TP_STRUCT__entry( 275 __field( unsigned long, fault_address ) 276 __field( unsigned int, error_code ) 277 ), 278 279 TP_fast_assign( 280 __entry->fault_address = fault_address; 281 __entry->error_code = error_code; 282 ), 283 284 TP_printk("address %lx error_code %x", 285 __entry->fault_address, __entry->error_code) 286 ); 287 288 /* 289 * Tracepoint for guest MSR access. 290 */ 291 TRACE_EVENT(kvm_msr, 292 TP_PROTO(unsigned write, u32 ecx, u64 data, bool exception), 293 TP_ARGS(write, ecx, data, exception), 294 295 TP_STRUCT__entry( 296 __field( unsigned, write ) 297 __field( u32, ecx ) 298 __field( u64, data ) 299 __field( u8, exception ) 300 ), 301 302 TP_fast_assign( 303 __entry->write = write; 304 __entry->ecx = ecx; 305 __entry->data = data; 306 __entry->exception = exception; 307 ), 308 309 TP_printk("msr_%s %x = 0x%llx%s", 310 __entry->write ? "write" : "read", 311 __entry->ecx, __entry->data, 312 __entry->exception ? " (#GP)" : "") 313 ); 314 315 #define trace_kvm_msr_read(ecx, data) trace_kvm_msr(0, ecx, data, false) 316 #define trace_kvm_msr_write(ecx, data) trace_kvm_msr(1, ecx, data, false) 317 #define trace_kvm_msr_read_ex(ecx) trace_kvm_msr(0, ecx, 0, true) 318 #define trace_kvm_msr_write_ex(ecx, data) trace_kvm_msr(1, ecx, data, true) 319 320 /* 321 * Tracepoint for guest CR access. 322 */ 323 TRACE_EVENT(kvm_cr, 324 TP_PROTO(unsigned int rw, unsigned int cr, unsigned long val), 325 TP_ARGS(rw, cr, val), 326 327 TP_STRUCT__entry( 328 __field( unsigned int, rw ) 329 __field( unsigned int, cr ) 330 __field( unsigned long, val ) 331 ), 332 333 TP_fast_assign( 334 __entry->rw = rw; 335 __entry->cr = cr; 336 __entry->val = val; 337 ), 338 339 TP_printk("cr_%s %x = 0x%lx", 340 __entry->rw ? "write" : "read", 341 __entry->cr, __entry->val) 342 ); 343 344 #define trace_kvm_cr_read(cr, val) trace_kvm_cr(0, cr, val) 345 #define trace_kvm_cr_write(cr, val) trace_kvm_cr(1, cr, val) 346 347 TRACE_EVENT(kvm_pic_set_irq, 348 TP_PROTO(__u8 chip, __u8 pin, __u8 elcr, __u8 imr, bool coalesced), 349 TP_ARGS(chip, pin, elcr, imr, coalesced), 350 351 TP_STRUCT__entry( 352 __field( __u8, chip ) 353 __field( __u8, pin ) 354 __field( __u8, elcr ) 355 __field( __u8, imr ) 356 __field( bool, coalesced ) 357 ), 358 359 TP_fast_assign( 360 __entry->chip = chip; 361 __entry->pin = pin; 362 __entry->elcr = elcr; 363 __entry->imr = imr; 364 __entry->coalesced = coalesced; 365 ), 366 367 TP_printk("chip %u pin %u (%s%s)%s", 368 __entry->chip, __entry->pin, 369 (__entry->elcr & (1 << __entry->pin)) ? "level":"edge", 370 (__entry->imr & (1 << __entry->pin)) ? "|masked":"", 371 __entry->coalesced ? " (coalesced)" : "") 372 ); 373 374 #define kvm_apic_dst_shorthand \ 375 {0x0, "dst"}, \ 376 {0x1, "self"}, \ 377 {0x2, "all"}, \ 378 {0x3, "all-but-self"} 379 380 TRACE_EVENT(kvm_apic_ipi, 381 TP_PROTO(__u32 icr_low, __u32 dest_id), 382 TP_ARGS(icr_low, dest_id), 383 384 TP_STRUCT__entry( 385 __field( __u32, icr_low ) 386 __field( __u32, dest_id ) 387 ), 388 389 TP_fast_assign( 390 __entry->icr_low = icr_low; 391 __entry->dest_id = dest_id; 392 ), 393 394 TP_printk("dst %x vec %u (%s|%s|%s|%s|%s)", 395 __entry->dest_id, (u8)__entry->icr_low, 396 __print_symbolic((__entry->icr_low >> 8 & 0x7), 397 kvm_deliver_mode), 398 (__entry->icr_low & (1<<11)) ? "logical" : "physical", 399 (__entry->icr_low & (1<<14)) ? "assert" : "de-assert", 400 (__entry->icr_low & (1<<15)) ? "level" : "edge", 401 __print_symbolic((__entry->icr_low >> 18 & 0x3), 402 kvm_apic_dst_shorthand)) 403 ); 404 405 TRACE_EVENT(kvm_apic_accept_irq, 406 TP_PROTO(__u32 apicid, __u16 dm, __u8 tm, __u8 vec, bool coalesced), 407 TP_ARGS(apicid, dm, tm, vec, coalesced), 408 409 TP_STRUCT__entry( 410 __field( __u32, apicid ) 411 __field( __u16, dm ) 412 __field( __u8, tm ) 413 __field( __u8, vec ) 414 __field( bool, coalesced ) 415 ), 416 417 TP_fast_assign( 418 __entry->apicid = apicid; 419 __entry->dm = dm; 420 __entry->tm = tm; 421 __entry->vec = vec; 422 __entry->coalesced = coalesced; 423 ), 424 425 TP_printk("apicid %x vec %u (%s|%s)%s", 426 __entry->apicid, __entry->vec, 427 __print_symbolic((__entry->dm >> 8 & 0x7), kvm_deliver_mode), 428 __entry->tm ? "level" : "edge", 429 __entry->coalesced ? " (coalesced)" : "") 430 ); 431 432 TRACE_EVENT(kvm_eoi, 433 TP_PROTO(struct kvm_lapic *apic, int vector), 434 TP_ARGS(apic, vector), 435 436 TP_STRUCT__entry( 437 __field( __u32, apicid ) 438 __field( int, vector ) 439 ), 440 441 TP_fast_assign( 442 __entry->apicid = apic->vcpu->vcpu_id; 443 __entry->vector = vector; 444 ), 445 446 TP_printk("apicid %x vector %d", __entry->apicid, __entry->vector) 447 ); 448 449 TRACE_EVENT(kvm_pv_eoi, 450 TP_PROTO(struct kvm_lapic *apic, int vector), 451 TP_ARGS(apic, vector), 452 453 TP_STRUCT__entry( 454 __field( __u32, apicid ) 455 __field( int, vector ) 456 ), 457 458 TP_fast_assign( 459 __entry->apicid = apic->vcpu->vcpu_id; 460 __entry->vector = vector; 461 ), 462 463 TP_printk("apicid %x vector %d", __entry->apicid, __entry->vector) 464 ); 465 466 /* 467 * Tracepoint for nested VMRUN 468 */ 469 TRACE_EVENT(kvm_nested_vmrun, 470 TP_PROTO(__u64 rip, __u64 vmcb, __u64 nested_rip, __u32 int_ctl, 471 __u32 event_inj, bool npt), 472 TP_ARGS(rip, vmcb, nested_rip, int_ctl, event_inj, npt), 473 474 TP_STRUCT__entry( 475 __field( __u64, rip ) 476 __field( __u64, vmcb ) 477 __field( __u64, nested_rip ) 478 __field( __u32, int_ctl ) 479 __field( __u32, event_inj ) 480 __field( bool, npt ) 481 ), 482 483 TP_fast_assign( 484 __entry->rip = rip; 485 __entry->vmcb = vmcb; 486 __entry->nested_rip = nested_rip; 487 __entry->int_ctl = int_ctl; 488 __entry->event_inj = event_inj; 489 __entry->npt = npt; 490 ), 491 492 TP_printk("rip: 0x%016llx vmcb: 0x%016llx nrip: 0x%016llx int_ctl: 0x%08x " 493 "event_inj: 0x%08x npt: %s", 494 __entry->rip, __entry->vmcb, __entry->nested_rip, 495 __entry->int_ctl, __entry->event_inj, 496 __entry->npt ? "on" : "off") 497 ); 498 499 TRACE_EVENT(kvm_nested_intercepts, 500 TP_PROTO(__u16 cr_read, __u16 cr_write, __u32 exceptions, __u64 intercept), 501 TP_ARGS(cr_read, cr_write, exceptions, intercept), 502 503 TP_STRUCT__entry( 504 __field( __u16, cr_read ) 505 __field( __u16, cr_write ) 506 __field( __u32, exceptions ) 507 __field( __u64, intercept ) 508 ), 509 510 TP_fast_assign( 511 __entry->cr_read = cr_read; 512 __entry->cr_write = cr_write; 513 __entry->exceptions = exceptions; 514 __entry->intercept = intercept; 515 ), 516 517 TP_printk("cr_read: %04x cr_write: %04x excp: %08x intercept: %016llx", 518 __entry->cr_read, __entry->cr_write, __entry->exceptions, 519 __entry->intercept) 520 ); 521 /* 522 * Tracepoint for #VMEXIT while nested 523 */ 524 TRACE_EVENT(kvm_nested_vmexit, 525 TP_PROTO(__u64 rip, __u32 exit_code, 526 __u64 exit_info1, __u64 exit_info2, 527 __u32 exit_int_info, __u32 exit_int_info_err, __u32 isa), 528 TP_ARGS(rip, exit_code, exit_info1, exit_info2, 529 exit_int_info, exit_int_info_err, isa), 530 531 TP_STRUCT__entry( 532 __field( __u64, rip ) 533 __field( __u32, exit_code ) 534 __field( __u64, exit_info1 ) 535 __field( __u64, exit_info2 ) 536 __field( __u32, exit_int_info ) 537 __field( __u32, exit_int_info_err ) 538 __field( __u32, isa ) 539 ), 540 541 TP_fast_assign( 542 __entry->rip = rip; 543 __entry->exit_code = exit_code; 544 __entry->exit_info1 = exit_info1; 545 __entry->exit_info2 = exit_info2; 546 __entry->exit_int_info = exit_int_info; 547 __entry->exit_int_info_err = exit_int_info_err; 548 __entry->isa = isa; 549 ), 550 TP_printk("rip: 0x%016llx reason: %s ext_inf1: 0x%016llx " 551 "ext_inf2: 0x%016llx ext_int: 0x%08x ext_int_err: 0x%08x", 552 __entry->rip, 553 (__entry->isa == KVM_ISA_VMX) ? 554 __print_symbolic(__entry->exit_code, VMX_EXIT_REASONS) : 555 __print_symbolic(__entry->exit_code, SVM_EXIT_REASONS), 556 __entry->exit_info1, __entry->exit_info2, 557 __entry->exit_int_info, __entry->exit_int_info_err) 558 ); 559 560 /* 561 * Tracepoint for #VMEXIT reinjected to the guest 562 */ 563 TRACE_EVENT(kvm_nested_vmexit_inject, 564 TP_PROTO(__u32 exit_code, 565 __u64 exit_info1, __u64 exit_info2, 566 __u32 exit_int_info, __u32 exit_int_info_err, __u32 isa), 567 TP_ARGS(exit_code, exit_info1, exit_info2, 568 exit_int_info, exit_int_info_err, isa), 569 570 TP_STRUCT__entry( 571 __field( __u32, exit_code ) 572 __field( __u64, exit_info1 ) 573 __field( __u64, exit_info2 ) 574 __field( __u32, exit_int_info ) 575 __field( __u32, exit_int_info_err ) 576 __field( __u32, isa ) 577 ), 578 579 TP_fast_assign( 580 __entry->exit_code = exit_code; 581 __entry->exit_info1 = exit_info1; 582 __entry->exit_info2 = exit_info2; 583 __entry->exit_int_info = exit_int_info; 584 __entry->exit_int_info_err = exit_int_info_err; 585 __entry->isa = isa; 586 ), 587 588 TP_printk("reason: %s ext_inf1: 0x%016llx " 589 "ext_inf2: 0x%016llx ext_int: 0x%08x ext_int_err: 0x%08x", 590 (__entry->isa == KVM_ISA_VMX) ? 591 __print_symbolic(__entry->exit_code, VMX_EXIT_REASONS) : 592 __print_symbolic(__entry->exit_code, SVM_EXIT_REASONS), 593 __entry->exit_info1, __entry->exit_info2, 594 __entry->exit_int_info, __entry->exit_int_info_err) 595 ); 596 597 /* 598 * Tracepoint for nested #vmexit because of interrupt pending 599 */ 600 TRACE_EVENT(kvm_nested_intr_vmexit, 601 TP_PROTO(__u64 rip), 602 TP_ARGS(rip), 603 604 TP_STRUCT__entry( 605 __field( __u64, rip ) 606 ), 607 608 TP_fast_assign( 609 __entry->rip = rip 610 ), 611 612 TP_printk("rip: 0x%016llx", __entry->rip) 613 ); 614 615 /* 616 * Tracepoint for nested #vmexit because of interrupt pending 617 */ 618 TRACE_EVENT(kvm_invlpga, 619 TP_PROTO(__u64 rip, int asid, u64 address), 620 TP_ARGS(rip, asid, address), 621 622 TP_STRUCT__entry( 623 __field( __u64, rip ) 624 __field( int, asid ) 625 __field( __u64, address ) 626 ), 627 628 TP_fast_assign( 629 __entry->rip = rip; 630 __entry->asid = asid; 631 __entry->address = address; 632 ), 633 634 TP_printk("rip: 0x%016llx asid: %d address: 0x%016llx", 635 __entry->rip, __entry->asid, __entry->address) 636 ); 637 638 /* 639 * Tracepoint for nested #vmexit because of interrupt pending 640 */ 641 TRACE_EVENT(kvm_skinit, 642 TP_PROTO(__u64 rip, __u32 slb), 643 TP_ARGS(rip, slb), 644 645 TP_STRUCT__entry( 646 __field( __u64, rip ) 647 __field( __u32, slb ) 648 ), 649 650 TP_fast_assign( 651 __entry->rip = rip; 652 __entry->slb = slb; 653 ), 654 655 TP_printk("rip: 0x%016llx slb: 0x%08x", 656 __entry->rip, __entry->slb) 657 ); 658 659 #define KVM_EMUL_INSN_F_CR0_PE (1 << 0) 660 #define KVM_EMUL_INSN_F_EFL_VM (1 << 1) 661 #define KVM_EMUL_INSN_F_CS_D (1 << 2) 662 #define KVM_EMUL_INSN_F_CS_L (1 << 3) 663 664 #define kvm_trace_symbol_emul_flags \ 665 { 0, "real" }, \ 666 { KVM_EMUL_INSN_F_CR0_PE \ 667 | KVM_EMUL_INSN_F_EFL_VM, "vm16" }, \ 668 { KVM_EMUL_INSN_F_CR0_PE, "prot16" }, \ 669 { KVM_EMUL_INSN_F_CR0_PE \ 670 | KVM_EMUL_INSN_F_CS_D, "prot32" }, \ 671 { KVM_EMUL_INSN_F_CR0_PE \ 672 | KVM_EMUL_INSN_F_CS_L, "prot64" } 673 674 #define kei_decode_mode(mode) ({ \ 675 u8 flags = 0xff; \ 676 switch (mode) { \ 677 case X86EMUL_MODE_REAL: \ 678 flags = 0; \ 679 break; \ 680 case X86EMUL_MODE_VM86: \ 681 flags = KVM_EMUL_INSN_F_EFL_VM; \ 682 break; \ 683 case X86EMUL_MODE_PROT16: \ 684 flags = KVM_EMUL_INSN_F_CR0_PE; \ 685 break; \ 686 case X86EMUL_MODE_PROT32: \ 687 flags = KVM_EMUL_INSN_F_CR0_PE \ 688 | KVM_EMUL_INSN_F_CS_D; \ 689 break; \ 690 case X86EMUL_MODE_PROT64: \ 691 flags = KVM_EMUL_INSN_F_CR0_PE \ 692 | KVM_EMUL_INSN_F_CS_L; \ 693 break; \ 694 } \ 695 flags; \ 696 }) 697 698 TRACE_EVENT(kvm_emulate_insn, 699 TP_PROTO(struct kvm_vcpu *vcpu, __u8 failed), 700 TP_ARGS(vcpu, failed), 701 702 TP_STRUCT__entry( 703 __field( __u64, rip ) 704 __field( __u32, csbase ) 705 __field( __u8, len ) 706 __array( __u8, insn, 15 ) 707 __field( __u8, flags ) 708 __field( __u8, failed ) 709 ), 710 711 TP_fast_assign( 712 __entry->rip = vcpu->arch.emulate_ctxt.fetch.start; 713 __entry->csbase = kvm_x86_ops->get_segment_base(vcpu, VCPU_SREG_CS); 714 __entry->len = vcpu->arch.emulate_ctxt._eip 715 - vcpu->arch.emulate_ctxt.fetch.start; 716 memcpy(__entry->insn, 717 vcpu->arch.emulate_ctxt.fetch.data, 718 15); 719 __entry->flags = kei_decode_mode(vcpu->arch.emulate_ctxt.mode); 720 __entry->failed = failed; 721 ), 722 723 TP_printk("%x:%llx:%s (%s)%s", 724 __entry->csbase, __entry->rip, 725 __print_hex(__entry->insn, __entry->len), 726 __print_symbolic(__entry->flags, 727 kvm_trace_symbol_emul_flags), 728 __entry->failed ? " failed" : "" 729 ) 730 ); 731 732 #define trace_kvm_emulate_insn_start(vcpu) trace_kvm_emulate_insn(vcpu, 0) 733 #define trace_kvm_emulate_insn_failed(vcpu) trace_kvm_emulate_insn(vcpu, 1) 734 735 TRACE_EVENT( 736 vcpu_match_mmio, 737 TP_PROTO(gva_t gva, gpa_t gpa, bool write, bool gpa_match), 738 TP_ARGS(gva, gpa, write, gpa_match), 739 740 TP_STRUCT__entry( 741 __field(gva_t, gva) 742 __field(gpa_t, gpa) 743 __field(bool, write) 744 __field(bool, gpa_match) 745 ), 746 747 TP_fast_assign( 748 __entry->gva = gva; 749 __entry->gpa = gpa; 750 __entry->write = write; 751 __entry->gpa_match = gpa_match 752 ), 753 754 TP_printk("gva %#lx gpa %#llx %s %s", __entry->gva, __entry->gpa, 755 __entry->write ? "Write" : "Read", 756 __entry->gpa_match ? "GPA" : "GVA") 757 ); 758 759 #ifdef CONFIG_X86_64 760 761 #define host_clocks \ 762 {VCLOCK_NONE, "none"}, \ 763 {VCLOCK_TSC, "tsc"}, \ 764 {VCLOCK_HPET, "hpet"} \ 765 766 TRACE_EVENT(kvm_update_master_clock, 767 TP_PROTO(bool use_master_clock, unsigned int host_clock, bool offset_matched), 768 TP_ARGS(use_master_clock, host_clock, offset_matched), 769 770 TP_STRUCT__entry( 771 __field( bool, use_master_clock ) 772 __field( unsigned int, host_clock ) 773 __field( bool, offset_matched ) 774 ), 775 776 TP_fast_assign( 777 __entry->use_master_clock = use_master_clock; 778 __entry->host_clock = host_clock; 779 __entry->offset_matched = offset_matched; 780 ), 781 782 TP_printk("masterclock %d hostclock %s offsetmatched %u", 783 __entry->use_master_clock, 784 __print_symbolic(__entry->host_clock, host_clocks), 785 __entry->offset_matched) 786 ); 787 788 TRACE_EVENT(kvm_track_tsc, 789 TP_PROTO(unsigned int vcpu_id, unsigned int nr_matched, 790 unsigned int online_vcpus, bool use_master_clock, 791 unsigned int host_clock), 792 TP_ARGS(vcpu_id, nr_matched, online_vcpus, use_master_clock, 793 host_clock), 794 795 TP_STRUCT__entry( 796 __field( unsigned int, vcpu_id ) 797 __field( unsigned int, nr_vcpus_matched_tsc ) 798 __field( unsigned int, online_vcpus ) 799 __field( bool, use_master_clock ) 800 __field( unsigned int, host_clock ) 801 ), 802 803 TP_fast_assign( 804 __entry->vcpu_id = vcpu_id; 805 __entry->nr_vcpus_matched_tsc = nr_matched; 806 __entry->online_vcpus = online_vcpus; 807 __entry->use_master_clock = use_master_clock; 808 __entry->host_clock = host_clock; 809 ), 810 811 TP_printk("vcpu_id %u masterclock %u offsetmatched %u nr_online %u" 812 " hostclock %s", 813 __entry->vcpu_id, __entry->use_master_clock, 814 __entry->nr_vcpus_matched_tsc, __entry->online_vcpus, 815 __print_symbolic(__entry->host_clock, host_clocks)) 816 ); 817 818 #endif /* CONFIG_X86_64 */ 819 820 #endif /* _TRACE_KVM_H */ 821 822 #undef TRACE_INCLUDE_PATH 823 #define TRACE_INCLUDE_PATH arch/x86/kvm 824 #undef TRACE_INCLUDE_FILE 825 #define TRACE_INCLUDE_FILE trace 826 827 /* This part must be outside protection */ 828 #include <trace/define_trace.h> 829