1 /* 2 * This file is subject to the terms and conditions of the GNU General Public 3 * License. See the file "COPYING" in the main directory of this archive 4 * for more details. 5 * 6 * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. 7 * Authors: Sanjay Lal <sanjayl@kymasys.com> 8 */ 9 10 #if !defined(_TRACE_KVM_H) || defined(TRACE_HEADER_MULTI_READ) 11 #define _TRACE_KVM_H 12 13 #include <linux/tracepoint.h> 14 15 #undef TRACE_SYSTEM 16 #define TRACE_SYSTEM kvm 17 #define TRACE_INCLUDE_PATH . 18 #define TRACE_INCLUDE_FILE trace 19 20 /* 21 * Tracepoints for VM enters 22 */ 23 DECLARE_EVENT_CLASS(kvm_transition, 24 TP_PROTO(struct kvm_vcpu *vcpu), 25 TP_ARGS(vcpu), 26 TP_STRUCT__entry( 27 __field(unsigned long, pc) 28 ), 29 30 TP_fast_assign( 31 __entry->pc = vcpu->arch.pc; 32 ), 33 34 TP_printk("PC: 0x%08lx", 35 __entry->pc) 36 ); 37 38 DEFINE_EVENT(kvm_transition, kvm_enter, 39 TP_PROTO(struct kvm_vcpu *vcpu), 40 TP_ARGS(vcpu)); 41 42 DEFINE_EVENT(kvm_transition, kvm_reenter, 43 TP_PROTO(struct kvm_vcpu *vcpu), 44 TP_ARGS(vcpu)); 45 46 DEFINE_EVENT(kvm_transition, kvm_out, 47 TP_PROTO(struct kvm_vcpu *vcpu), 48 TP_ARGS(vcpu)); 49 50 /* The first 32 exit reasons correspond to Cause.ExcCode */ 51 #define KVM_TRACE_EXIT_INT 0 52 #define KVM_TRACE_EXIT_TLBMOD 1 53 #define KVM_TRACE_EXIT_TLBMISS_LD 2 54 #define KVM_TRACE_EXIT_TLBMISS_ST 3 55 #define KVM_TRACE_EXIT_ADDRERR_LD 4 56 #define KVM_TRACE_EXIT_ADDRERR_ST 5 57 #define KVM_TRACE_EXIT_SYSCALL 8 58 #define KVM_TRACE_EXIT_BREAK_INST 9 59 #define KVM_TRACE_EXIT_RESVD_INST 10 60 #define KVM_TRACE_EXIT_COP_UNUSABLE 11 61 #define KVM_TRACE_EXIT_TRAP_INST 13 62 #define KVM_TRACE_EXIT_MSA_FPE 14 63 #define KVM_TRACE_EXIT_FPE 15 64 #define KVM_TRACE_EXIT_MSA_DISABLED 21 65 #define KVM_TRACE_EXIT_GUEST_EXIT 27 66 /* Further exit reasons */ 67 #define KVM_TRACE_EXIT_WAIT 32 68 #define KVM_TRACE_EXIT_CACHE 33 69 #define KVM_TRACE_EXIT_SIGNAL 34 70 /* 32 exit reasons correspond to GuestCtl0.GExcCode (VZ) */ 71 #define KVM_TRACE_EXIT_GEXCCODE_BASE 64 72 #define KVM_TRACE_EXIT_GPSI 64 /* 0 */ 73 #define KVM_TRACE_EXIT_GSFC 65 /* 1 */ 74 #define KVM_TRACE_EXIT_HC 66 /* 2 */ 75 #define KVM_TRACE_EXIT_GRR 67 /* 3 */ 76 #define KVM_TRACE_EXIT_GVA 72 /* 8 */ 77 #define KVM_TRACE_EXIT_GHFC 73 /* 9 */ 78 #define KVM_TRACE_EXIT_GPA 74 /* 10 */ 79 80 /* Tracepoints for VM exits */ 81 #define kvm_trace_symbol_exit_types \ 82 { KVM_TRACE_EXIT_INT, "Interrupt" }, \ 83 { KVM_TRACE_EXIT_TLBMOD, "TLB Mod" }, \ 84 { KVM_TRACE_EXIT_TLBMISS_LD, "TLB Miss (LD)" }, \ 85 { KVM_TRACE_EXIT_TLBMISS_ST, "TLB Miss (ST)" }, \ 86 { KVM_TRACE_EXIT_ADDRERR_LD, "Address Error (LD)" }, \ 87 { KVM_TRACE_EXIT_ADDRERR_ST, "Address Err (ST)" }, \ 88 { KVM_TRACE_EXIT_SYSCALL, "System Call" }, \ 89 { KVM_TRACE_EXIT_BREAK_INST, "Break Inst" }, \ 90 { KVM_TRACE_EXIT_RESVD_INST, "Reserved Inst" }, \ 91 { KVM_TRACE_EXIT_COP_UNUSABLE, "COP0/1 Unusable" }, \ 92 { KVM_TRACE_EXIT_TRAP_INST, "Trap Inst" }, \ 93 { KVM_TRACE_EXIT_MSA_FPE, "MSA FPE" }, \ 94 { KVM_TRACE_EXIT_FPE, "FPE" }, \ 95 { KVM_TRACE_EXIT_MSA_DISABLED, "MSA Disabled" }, \ 96 { KVM_TRACE_EXIT_GUEST_EXIT, "Guest Exit" }, \ 97 { KVM_TRACE_EXIT_WAIT, "WAIT" }, \ 98 { KVM_TRACE_EXIT_CACHE, "CACHE" }, \ 99 { KVM_TRACE_EXIT_SIGNAL, "Signal" }, \ 100 { KVM_TRACE_EXIT_GPSI, "GPSI" }, \ 101 { KVM_TRACE_EXIT_GSFC, "GSFC" }, \ 102 { KVM_TRACE_EXIT_HC, "HC" }, \ 103 { KVM_TRACE_EXIT_GRR, "GRR" }, \ 104 { KVM_TRACE_EXIT_GVA, "GVA" }, \ 105 { KVM_TRACE_EXIT_GHFC, "GHFC" }, \ 106 { KVM_TRACE_EXIT_GPA, "GPA" } 107 108 TRACE_EVENT(kvm_exit, 109 TP_PROTO(struct kvm_vcpu *vcpu, unsigned int reason), 110 TP_ARGS(vcpu, reason), 111 TP_STRUCT__entry( 112 __field(unsigned long, pc) 113 __field(unsigned int, reason) 114 ), 115 116 TP_fast_assign( 117 __entry->pc = vcpu->arch.pc; 118 __entry->reason = reason; 119 ), 120 121 TP_printk("[%s]PC: 0x%08lx", 122 __print_symbolic(__entry->reason, 123 kvm_trace_symbol_exit_types), 124 __entry->pc) 125 ); 126 127 #define KVM_TRACE_MFC0 0 128 #define KVM_TRACE_MTC0 1 129 #define KVM_TRACE_DMFC0 2 130 #define KVM_TRACE_DMTC0 3 131 #define KVM_TRACE_RDHWR 4 132 133 #define KVM_TRACE_HWR_COP0 0 134 #define KVM_TRACE_HWR_HWR 1 135 136 #define KVM_TRACE_COP0(REG, SEL) ((KVM_TRACE_HWR_COP0 << 8) | \ 137 ((REG) << 3) | (SEL)) 138 #define KVM_TRACE_HWR(REG, SEL) ((KVM_TRACE_HWR_HWR << 8) | \ 139 ((REG) << 3) | (SEL)) 140 141 #define kvm_trace_symbol_hwr_ops \ 142 { KVM_TRACE_MFC0, "MFC0" }, \ 143 { KVM_TRACE_MTC0, "MTC0" }, \ 144 { KVM_TRACE_DMFC0, "DMFC0" }, \ 145 { KVM_TRACE_DMTC0, "DMTC0" }, \ 146 { KVM_TRACE_RDHWR, "RDHWR" } 147 148 #define kvm_trace_symbol_hwr_cop \ 149 { KVM_TRACE_HWR_COP0, "COP0" }, \ 150 { KVM_TRACE_HWR_HWR, "HWR" } 151 152 #define kvm_trace_symbol_hwr_regs \ 153 { KVM_TRACE_COP0( 0, 0), "Index" }, \ 154 { KVM_TRACE_COP0( 2, 0), "EntryLo0" }, \ 155 { KVM_TRACE_COP0( 3, 0), "EntryLo1" }, \ 156 { KVM_TRACE_COP0( 4, 0), "Context" }, \ 157 { KVM_TRACE_COP0( 4, 2), "UserLocal" }, \ 158 { KVM_TRACE_COP0( 5, 0), "PageMask" }, \ 159 { KVM_TRACE_COP0( 6, 0), "Wired" }, \ 160 { KVM_TRACE_COP0( 7, 0), "HWREna" }, \ 161 { KVM_TRACE_COP0( 8, 0), "BadVAddr" }, \ 162 { KVM_TRACE_COP0( 9, 0), "Count" }, \ 163 { KVM_TRACE_COP0(10, 0), "EntryHi" }, \ 164 { KVM_TRACE_COP0(11, 0), "Compare" }, \ 165 { KVM_TRACE_COP0(12, 0), "Status" }, \ 166 { KVM_TRACE_COP0(12, 1), "IntCtl" }, \ 167 { KVM_TRACE_COP0(12, 2), "SRSCtl" }, \ 168 { KVM_TRACE_COP0(13, 0), "Cause" }, \ 169 { KVM_TRACE_COP0(14, 0), "EPC" }, \ 170 { KVM_TRACE_COP0(15, 0), "PRId" }, \ 171 { KVM_TRACE_COP0(15, 1), "EBase" }, \ 172 { KVM_TRACE_COP0(16, 0), "Config" }, \ 173 { KVM_TRACE_COP0(16, 1), "Config1" }, \ 174 { KVM_TRACE_COP0(16, 2), "Config2" }, \ 175 { KVM_TRACE_COP0(16, 3), "Config3" }, \ 176 { KVM_TRACE_COP0(16, 4), "Config4" }, \ 177 { KVM_TRACE_COP0(16, 5), "Config5" }, \ 178 { KVM_TRACE_COP0(16, 7), "Config7" }, \ 179 { KVM_TRACE_COP0(17, 1), "MAAR" }, \ 180 { KVM_TRACE_COP0(17, 2), "MAARI" }, \ 181 { KVM_TRACE_COP0(26, 0), "ECC" }, \ 182 { KVM_TRACE_COP0(30, 0), "ErrorEPC" }, \ 183 { KVM_TRACE_COP0(31, 2), "KScratch1" }, \ 184 { KVM_TRACE_COP0(31, 3), "KScratch2" }, \ 185 { KVM_TRACE_COP0(31, 4), "KScratch3" }, \ 186 { KVM_TRACE_COP0(31, 5), "KScratch4" }, \ 187 { KVM_TRACE_COP0(31, 6), "KScratch5" }, \ 188 { KVM_TRACE_COP0(31, 7), "KScratch6" }, \ 189 { KVM_TRACE_HWR( 0, 0), "CPUNum" }, \ 190 { KVM_TRACE_HWR( 1, 0), "SYNCI_Step" }, \ 191 { KVM_TRACE_HWR( 2, 0), "CC" }, \ 192 { KVM_TRACE_HWR( 3, 0), "CCRes" }, \ 193 { KVM_TRACE_HWR(29, 0), "ULR" } 194 195 TRACE_EVENT(kvm_hwr, 196 TP_PROTO(struct kvm_vcpu *vcpu, unsigned int op, unsigned int reg, 197 unsigned long val), 198 TP_ARGS(vcpu, op, reg, val), 199 TP_STRUCT__entry( 200 __field(unsigned long, val) 201 __field(u16, reg) 202 __field(u8, op) 203 ), 204 205 TP_fast_assign( 206 __entry->val = val; 207 __entry->reg = reg; 208 __entry->op = op; 209 ), 210 211 TP_printk("%s %s (%s:%u:%u) 0x%08lx", 212 __print_symbolic(__entry->op, 213 kvm_trace_symbol_hwr_ops), 214 __print_symbolic(__entry->reg, 215 kvm_trace_symbol_hwr_regs), 216 __print_symbolic(__entry->reg >> 8, 217 kvm_trace_symbol_hwr_cop), 218 (__entry->reg >> 3) & 0x1f, 219 __entry->reg & 0x7, 220 __entry->val) 221 ); 222 223 #define KVM_TRACE_AUX_RESTORE 0 224 #define KVM_TRACE_AUX_SAVE 1 225 #define KVM_TRACE_AUX_ENABLE 2 226 #define KVM_TRACE_AUX_DISABLE 3 227 #define KVM_TRACE_AUX_DISCARD 4 228 229 #define KVM_TRACE_AUX_FPU 1 230 #define KVM_TRACE_AUX_MSA 2 231 #define KVM_TRACE_AUX_FPU_MSA 3 232 233 #define kvm_trace_symbol_aux_op \ 234 { KVM_TRACE_AUX_RESTORE, "restore" }, \ 235 { KVM_TRACE_AUX_SAVE, "save" }, \ 236 { KVM_TRACE_AUX_ENABLE, "enable" }, \ 237 { KVM_TRACE_AUX_DISABLE, "disable" }, \ 238 { KVM_TRACE_AUX_DISCARD, "discard" } 239 240 #define kvm_trace_symbol_aux_state \ 241 { KVM_TRACE_AUX_FPU, "FPU" }, \ 242 { KVM_TRACE_AUX_MSA, "MSA" }, \ 243 { KVM_TRACE_AUX_FPU_MSA, "FPU & MSA" } 244 245 TRACE_EVENT(kvm_aux, 246 TP_PROTO(struct kvm_vcpu *vcpu, unsigned int op, 247 unsigned int state), 248 TP_ARGS(vcpu, op, state), 249 TP_STRUCT__entry( 250 __field(unsigned long, pc) 251 __field(u8, op) 252 __field(u8, state) 253 ), 254 255 TP_fast_assign( 256 __entry->pc = vcpu->arch.pc; 257 __entry->op = op; 258 __entry->state = state; 259 ), 260 261 TP_printk("%s %s PC: 0x%08lx", 262 __print_symbolic(__entry->op, 263 kvm_trace_symbol_aux_op), 264 __print_symbolic(__entry->state, 265 kvm_trace_symbol_aux_state), 266 __entry->pc) 267 ); 268 269 TRACE_EVENT(kvm_asid_change, 270 TP_PROTO(struct kvm_vcpu *vcpu, unsigned int old_asid, 271 unsigned int new_asid), 272 TP_ARGS(vcpu, old_asid, new_asid), 273 TP_STRUCT__entry( 274 __field(unsigned long, pc) 275 __field(u8, old_asid) 276 __field(u8, new_asid) 277 ), 278 279 TP_fast_assign( 280 __entry->pc = vcpu->arch.pc; 281 __entry->old_asid = old_asid; 282 __entry->new_asid = new_asid; 283 ), 284 285 TP_printk("PC: 0x%08lx old: 0x%02x new: 0x%02x", 286 __entry->pc, 287 __entry->old_asid, 288 __entry->new_asid) 289 ); 290 291 TRACE_EVENT(kvm_guestid_change, 292 TP_PROTO(struct kvm_vcpu *vcpu, unsigned int guestid), 293 TP_ARGS(vcpu, guestid), 294 TP_STRUCT__entry( 295 __field(unsigned int, guestid) 296 ), 297 298 TP_fast_assign( 299 __entry->guestid = guestid; 300 ), 301 302 TP_printk("GuestID: 0x%02x", 303 __entry->guestid) 304 ); 305 306 #endif /* _TRACE_KVM_H */ 307 308 /* This part must be outside protection */ 309 #include <trace/define_trace.h> 310