1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * intel_pt_insn_decoder.c: Intel Processor Trace support 4 * Copyright (c) 2013-2014, Intel Corporation. 5 */ 6 7 #include <linux/kernel.h> 8 #include <stdio.h> 9 #include <string.h> 10 #include <endian.h> 11 #include <byteswap.h> 12 #include "../../../arch/x86/include/asm/insn.h" 13 14 #include "../../../arch/x86/lib/inat.c" 15 #include "../../../arch/x86/lib/insn.c" 16 17 #include "event.h" 18 19 #include "intel-pt-insn-decoder.h" 20 #include "dump-insn.h" 21 #include "util/sample.h" 22 23 #if INTEL_PT_INSN_BUF_SZ < MAX_INSN_SIZE || INTEL_PT_INSN_BUF_SZ > MAX_INSN 24 #error Instruction buffer size too small 25 #endif 26 27 /* Based on branch_type() from arch/x86/events/intel/lbr.c */ 28 static void intel_pt_insn_decoder(struct insn *insn, 29 struct intel_pt_insn *intel_pt_insn) 30 { 31 enum intel_pt_insn_op op = INTEL_PT_OP_OTHER; 32 enum intel_pt_insn_branch branch = INTEL_PT_BR_NO_BRANCH; 33 int ext; 34 35 intel_pt_insn->rel = 0; 36 intel_pt_insn->emulated_ptwrite = false; 37 38 if (insn_is_avx(insn)) { 39 intel_pt_insn->op = INTEL_PT_OP_OTHER; 40 intel_pt_insn->branch = INTEL_PT_BR_NO_BRANCH; 41 intel_pt_insn->length = insn->length; 42 return; 43 } 44 45 switch (insn->opcode.bytes[0]) { 46 case 0xf: 47 switch (insn->opcode.bytes[1]) { 48 case 0x01: 49 switch (insn->modrm.bytes[0]) { 50 case 0xc2: /* vmlaunch */ 51 case 0xc3: /* vmresume */ 52 op = INTEL_PT_OP_VMENTRY; 53 branch = INTEL_PT_BR_INDIRECT; 54 break; 55 case 0xca: 56 switch (insn->prefixes.bytes[3]) { 57 case 0xf2: /* erets */ 58 op = INTEL_PT_OP_ERETS; 59 branch = INTEL_PT_BR_INDIRECT; 60 break; 61 case 0xf3: /* eretu */ 62 op = INTEL_PT_OP_ERETU; 63 branch = INTEL_PT_BR_INDIRECT; 64 break; 65 default: 66 break; 67 } 68 break; 69 default: 70 break; 71 } 72 break; 73 case 0x05: /* syscall */ 74 case 0x34: /* sysenter */ 75 op = INTEL_PT_OP_SYSCALL; 76 branch = INTEL_PT_BR_INDIRECT; 77 break; 78 case 0x07: /* sysret */ 79 case 0x35: /* sysexit */ 80 op = INTEL_PT_OP_SYSRET; 81 branch = INTEL_PT_BR_INDIRECT; 82 break; 83 case 0x80 ... 0x8f: /* jcc */ 84 op = INTEL_PT_OP_JCC; 85 branch = INTEL_PT_BR_CONDITIONAL; 86 break; 87 default: 88 break; 89 } 90 break; 91 case 0x70 ... 0x7f: /* jcc */ 92 op = INTEL_PT_OP_JCC; 93 branch = INTEL_PT_BR_CONDITIONAL; 94 break; 95 case 0xc2: /* near ret */ 96 case 0xc3: /* near ret */ 97 case 0xca: /* far ret */ 98 case 0xcb: /* far ret */ 99 op = INTEL_PT_OP_RET; 100 branch = INTEL_PT_BR_INDIRECT; 101 break; 102 case 0xcf: /* iret */ 103 op = INTEL_PT_OP_IRET; 104 branch = INTEL_PT_BR_INDIRECT; 105 break; 106 case 0xcc ... 0xce: /* int */ 107 op = INTEL_PT_OP_INT; 108 branch = INTEL_PT_BR_INDIRECT; 109 break; 110 case 0xe8: /* call near rel */ 111 op = INTEL_PT_OP_CALL; 112 branch = INTEL_PT_BR_UNCONDITIONAL; 113 break; 114 case 0x9a: /* call far absolute */ 115 op = INTEL_PT_OP_CALL; 116 branch = INTEL_PT_BR_INDIRECT; 117 break; 118 case 0xe0 ... 0xe2: /* loop */ 119 op = INTEL_PT_OP_LOOP; 120 branch = INTEL_PT_BR_CONDITIONAL; 121 break; 122 case 0xe3: /* jcc */ 123 op = INTEL_PT_OP_JCC; 124 branch = INTEL_PT_BR_CONDITIONAL; 125 break; 126 case 0xe9: /* jmp */ 127 case 0xeb: /* jmp */ 128 op = INTEL_PT_OP_JMP; 129 branch = INTEL_PT_BR_UNCONDITIONAL; 130 break; 131 case 0xea: /* far jmp */ 132 op = INTEL_PT_OP_JMP; 133 branch = INTEL_PT_BR_INDIRECT; 134 break; 135 case 0xff: /* call near absolute, call far absolute ind */ 136 ext = (insn->modrm.bytes[0] >> 3) & 0x7; 137 switch (ext) { 138 case 2: /* near ind call */ 139 case 3: /* far ind call */ 140 op = INTEL_PT_OP_CALL; 141 branch = INTEL_PT_BR_INDIRECT; 142 break; 143 case 4: 144 case 5: 145 op = INTEL_PT_OP_JMP; 146 branch = INTEL_PT_BR_INDIRECT; 147 break; 148 default: 149 break; 150 } 151 break; 152 default: 153 break; 154 } 155 156 intel_pt_insn->op = op; 157 intel_pt_insn->branch = branch; 158 intel_pt_insn->length = insn->length; 159 160 if (branch == INTEL_PT_BR_CONDITIONAL || 161 branch == INTEL_PT_BR_UNCONDITIONAL) { 162 #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ 163 switch (insn->immediate.nbytes) { 164 case 1: 165 intel_pt_insn->rel = insn->immediate.value; 166 break; 167 case 2: 168 intel_pt_insn->rel = 169 bswap_16((short)insn->immediate.value); 170 break; 171 case 4: 172 intel_pt_insn->rel = bswap_32(insn->immediate.value); 173 break; 174 default: 175 intel_pt_insn->rel = 0; 176 break; 177 } 178 #else 179 intel_pt_insn->rel = insn->immediate.value; 180 #endif 181 } 182 } 183 184 int intel_pt_get_insn(const unsigned char *buf, size_t len, int x86_64, 185 struct intel_pt_insn *intel_pt_insn) 186 { 187 struct insn insn; 188 int ret; 189 190 ret = insn_decode(&insn, buf, len, 191 x86_64 ? INSN_MODE_64 : INSN_MODE_32); 192 if (ret < 0 || insn.length > len) 193 return -1; 194 195 intel_pt_insn_decoder(&insn, intel_pt_insn); 196 if (insn.length < INTEL_PT_INSN_BUF_SZ) 197 memcpy(intel_pt_insn->buf, buf, insn.length); 198 else 199 memcpy(intel_pt_insn->buf, buf, INTEL_PT_INSN_BUF_SZ); 200 return 0; 201 } 202 203 int arch_is_branch(const unsigned char *buf, size_t len, int x86_64) 204 { 205 struct intel_pt_insn in; 206 if (intel_pt_get_insn(buf, len, x86_64, &in) < 0) 207 return -1; 208 return in.branch != INTEL_PT_BR_NO_BRANCH; 209 } 210 211 const char *dump_insn(struct perf_insn *x, uint64_t ip __maybe_unused, 212 u8 *inbuf, int inlen, int *lenp) 213 { 214 struct insn insn; 215 int n, i, ret; 216 int left; 217 218 ret = insn_decode(&insn, inbuf, inlen, 219 x->is64bit ? INSN_MODE_64 : INSN_MODE_32); 220 221 if (ret < 0 || insn.length > inlen) 222 return "<bad>"; 223 if (lenp) 224 *lenp = insn.length; 225 left = sizeof(x->out); 226 n = snprintf(x->out, left, "insn: "); 227 left -= n; 228 for (i = 0; i < insn.length; i++) { 229 n += snprintf(x->out + n, left, "%02x ", inbuf[i]); 230 left -= n; 231 } 232 return x->out; 233 } 234 235 const char *branch_name[] = { 236 [INTEL_PT_OP_OTHER] = "Other", 237 [INTEL_PT_OP_CALL] = "Call", 238 [INTEL_PT_OP_RET] = "Ret", 239 [INTEL_PT_OP_JCC] = "Jcc", 240 [INTEL_PT_OP_JMP] = "Jmp", 241 [INTEL_PT_OP_LOOP] = "Loop", 242 [INTEL_PT_OP_IRET] = "IRet", 243 [INTEL_PT_OP_INT] = "Int", 244 [INTEL_PT_OP_SYSCALL] = "Syscall", 245 [INTEL_PT_OP_SYSRET] = "Sysret", 246 [INTEL_PT_OP_VMENTRY] = "VMentry", 247 [INTEL_PT_OP_ERETS] = "Erets", 248 [INTEL_PT_OP_ERETU] = "Eretu", 249 }; 250 251 const char *intel_pt_insn_name(enum intel_pt_insn_op op) 252 { 253 return branch_name[op]; 254 } 255 256 int intel_pt_insn_desc(const struct intel_pt_insn *intel_pt_insn, char *buf, 257 size_t buf_len) 258 { 259 switch (intel_pt_insn->branch) { 260 case INTEL_PT_BR_CONDITIONAL: 261 case INTEL_PT_BR_UNCONDITIONAL: 262 return snprintf(buf, buf_len, "%s %s%d", 263 intel_pt_insn_name(intel_pt_insn->op), 264 intel_pt_insn->rel > 0 ? "+" : "", 265 intel_pt_insn->rel); 266 case INTEL_PT_BR_NO_BRANCH: 267 case INTEL_PT_BR_INDIRECT: 268 return snprintf(buf, buf_len, "%s", 269 intel_pt_insn_name(intel_pt_insn->op)); 270 default: 271 break; 272 } 273 return 0; 274 } 275 276 int intel_pt_insn_type(enum intel_pt_insn_op op) 277 { 278 switch (op) { 279 case INTEL_PT_OP_OTHER: 280 return 0; 281 case INTEL_PT_OP_CALL: 282 return PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_CALL; 283 case INTEL_PT_OP_RET: 284 return PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_RETURN; 285 case INTEL_PT_OP_JCC: 286 return PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_CONDITIONAL; 287 case INTEL_PT_OP_JMP: 288 return PERF_IP_FLAG_BRANCH; 289 case INTEL_PT_OP_LOOP: 290 return PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_CONDITIONAL; 291 case INTEL_PT_OP_IRET: 292 case INTEL_PT_OP_ERETS: 293 case INTEL_PT_OP_ERETU: 294 return PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_RETURN | 295 PERF_IP_FLAG_INTERRUPT; 296 case INTEL_PT_OP_INT: 297 return PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_CALL | 298 PERF_IP_FLAG_INTERRUPT; 299 case INTEL_PT_OP_SYSCALL: 300 return PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_CALL | 301 PERF_IP_FLAG_SYSCALLRET; 302 case INTEL_PT_OP_SYSRET: 303 return PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_RETURN | 304 PERF_IP_FLAG_SYSCALLRET; 305 case INTEL_PT_OP_VMENTRY: 306 return PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_CALL | 307 PERF_IP_FLAG_VMENTRY; 308 default: 309 return 0; 310 } 311 } 312