1 /****************************************************************************** 2 * emulate.c 3 * 4 * Generic x86 (32-bit and 64-bit) instruction decoder and emulator. 5 * 6 * Copyright (c) 2005 Keir Fraser 7 * 8 * Linux coding style, mod r/m decoder, segment base fixes, real-mode 9 * privileged instructions: 10 * 11 * Copyright (C) 2006 Qumranet 12 * Copyright 2010 Red Hat, Inc. and/or its affiliates. 13 * 14 * Avi Kivity <avi@qumranet.com> 15 * Yaniv Kamay <yaniv@qumranet.com> 16 * 17 * This work is licensed under the terms of the GNU GPL, version 2. See 18 * the COPYING file in the top-level directory. 19 * 20 * From: xen-unstable 10676:af9809f51f81a3c43f276f00c81a52ef558afda4 21 */ 22 23 #include <linux/kvm_host.h> 24 #include "kvm_cache_regs.h" 25 #include <linux/module.h> 26 #include <asm/kvm_emulate.h> 27 #include <linux/stringify.h> 28 29 #include "x86.h" 30 #include "tss.h" 31 32 /* 33 * Operand types 34 */ 35 #define OpNone 0ull 36 #define OpImplicit 1ull /* No generic decode */ 37 #define OpReg 2ull /* Register */ 38 #define OpMem 3ull /* Memory */ 39 #define OpAcc 4ull /* Accumulator: AL/AX/EAX/RAX */ 40 #define OpDI 5ull /* ES:DI/EDI/RDI */ 41 #define OpMem64 6ull /* Memory, 64-bit */ 42 #define OpImmUByte 7ull /* Zero-extended 8-bit immediate */ 43 #define OpDX 8ull /* DX register */ 44 #define OpCL 9ull /* CL register (for shifts) */ 45 #define OpImmByte 10ull /* 8-bit sign extended immediate */ 46 #define OpOne 11ull /* Implied 1 */ 47 #define OpImm 12ull /* Sign extended up to 32-bit immediate */ 48 #define OpMem16 13ull /* Memory operand (16-bit). */ 49 #define OpMem32 14ull /* Memory operand (32-bit). */ 50 #define OpImmU 15ull /* Immediate operand, zero extended */ 51 #define OpSI 16ull /* SI/ESI/RSI */ 52 #define OpImmFAddr 17ull /* Immediate far address */ 53 #define OpMemFAddr 18ull /* Far address in memory */ 54 #define OpImmU16 19ull /* Immediate operand, 16 bits, zero extended */ 55 #define OpES 20ull /* ES */ 56 #define OpCS 21ull /* CS */ 57 #define OpSS 22ull /* SS */ 58 #define OpDS 23ull /* DS */ 59 #define OpFS 24ull /* FS */ 60 #define OpGS 25ull /* GS */ 61 #define OpMem8 26ull /* 8-bit zero extended memory operand */ 62 #define OpImm64 27ull /* Sign extended 16/32/64-bit immediate */ 63 #define OpXLat 28ull /* memory at BX/EBX/RBX + zero-extended AL */ 64 #define OpAccLo 29ull /* Low part of extended acc (AX/AX/EAX/RAX) */ 65 #define OpAccHi 30ull /* High part of extended acc (-/DX/EDX/RDX) */ 66 67 #define OpBits 5 /* Width of operand field */ 68 #define OpMask ((1ull << OpBits) - 1) 69 70 /* 71 * Opcode effective-address decode tables. 72 * Note that we only emulate instructions that have at least one memory 73 * operand (excluding implicit stack references). We assume that stack 74 * references and instruction fetches will never occur in special memory 75 * areas that require emulation. So, for example, 'mov <imm>,<reg>' need 76 * not be handled. 77 */ 78 79 /* Operand sizes: 8-bit operands or specified/overridden size. */ 80 #define ByteOp (1<<0) /* 8-bit operands. */ 81 /* Destination operand type. */ 82 #define DstShift 1 83 #define ImplicitOps (OpImplicit << DstShift) 84 #define DstReg (OpReg << DstShift) 85 #define DstMem (OpMem << DstShift) 86 #define DstAcc (OpAcc << DstShift) 87 #define DstDI (OpDI << DstShift) 88 #define DstMem64 (OpMem64 << DstShift) 89 #define DstImmUByte (OpImmUByte << DstShift) 90 #define DstDX (OpDX << DstShift) 91 #define DstAccLo (OpAccLo << DstShift) 92 #define DstMask (OpMask << DstShift) 93 /* Source operand type. */ 94 #define SrcShift 6 95 #define SrcNone (OpNone << SrcShift) 96 #define SrcReg (OpReg << SrcShift) 97 #define SrcMem (OpMem << SrcShift) 98 #define SrcMem16 (OpMem16 << SrcShift) 99 #define SrcMem32 (OpMem32 << SrcShift) 100 #define SrcImm (OpImm << SrcShift) 101 #define SrcImmByte (OpImmByte << SrcShift) 102 #define SrcOne (OpOne << SrcShift) 103 #define SrcImmUByte (OpImmUByte << SrcShift) 104 #define SrcImmU (OpImmU << SrcShift) 105 #define SrcSI (OpSI << SrcShift) 106 #define SrcXLat (OpXLat << SrcShift) 107 #define SrcImmFAddr (OpImmFAddr << SrcShift) 108 #define SrcMemFAddr (OpMemFAddr << SrcShift) 109 #define SrcAcc (OpAcc << SrcShift) 110 #define SrcImmU16 (OpImmU16 << SrcShift) 111 #define SrcImm64 (OpImm64 << SrcShift) 112 #define SrcDX (OpDX << SrcShift) 113 #define SrcMem8 (OpMem8 << SrcShift) 114 #define SrcAccHi (OpAccHi << SrcShift) 115 #define SrcMask (OpMask << SrcShift) 116 #define BitOp (1<<11) 117 #define MemAbs (1<<12) /* Memory operand is absolute displacement */ 118 #define String (1<<13) /* String instruction (rep capable) */ 119 #define Stack (1<<14) /* Stack instruction (push/pop) */ 120 #define GroupMask (7<<15) /* Opcode uses one of the group mechanisms */ 121 #define Group (1<<15) /* Bits 3:5 of modrm byte extend opcode */ 122 #define GroupDual (2<<15) /* Alternate decoding of mod == 3 */ 123 #define Prefix (3<<15) /* Instruction varies with 66/f2/f3 prefix */ 124 #define RMExt (4<<15) /* Opcode extension in ModRM r/m if mod == 3 */ 125 #define Escape (5<<15) /* Escape to coprocessor instruction */ 126 #define Sse (1<<18) /* SSE Vector instruction */ 127 /* Generic ModRM decode. */ 128 #define ModRM (1<<19) 129 /* Destination is only written; never read. */ 130 #define Mov (1<<20) 131 /* Misc flags */ 132 #define Prot (1<<21) /* instruction generates #UD if not in prot-mode */ 133 #define EmulateOnUD (1<<22) /* Emulate if unsupported by the host */ 134 #define NoAccess (1<<23) /* Don't access memory (lea/invlpg/verr etc) */ 135 #define Op3264 (1<<24) /* Operand is 64b in long mode, 32b otherwise */ 136 #define Undefined (1<<25) /* No Such Instruction */ 137 #define Lock (1<<26) /* lock prefix is allowed for the instruction */ 138 #define Priv (1<<27) /* instruction generates #GP if current CPL != 0 */ 139 #define No64 (1<<28) 140 #define PageTable (1 << 29) /* instruction used to write page table */ 141 #define NotImpl (1 << 30) /* instruction is not implemented */ 142 /* Source 2 operand type */ 143 #define Src2Shift (31) 144 #define Src2None (OpNone << Src2Shift) 145 #define Src2Mem (OpMem << Src2Shift) 146 #define Src2CL (OpCL << Src2Shift) 147 #define Src2ImmByte (OpImmByte << Src2Shift) 148 #define Src2One (OpOne << Src2Shift) 149 #define Src2Imm (OpImm << Src2Shift) 150 #define Src2ES (OpES << Src2Shift) 151 #define Src2CS (OpCS << Src2Shift) 152 #define Src2SS (OpSS << Src2Shift) 153 #define Src2DS (OpDS << Src2Shift) 154 #define Src2FS (OpFS << Src2Shift) 155 #define Src2GS (OpGS << Src2Shift) 156 #define Src2Mask (OpMask << Src2Shift) 157 #define Mmx ((u64)1 << 40) /* MMX Vector instruction */ 158 #define Aligned ((u64)1 << 41) /* Explicitly aligned (e.g. MOVDQA) */ 159 #define Unaligned ((u64)1 << 42) /* Explicitly unaligned (e.g. MOVDQU) */ 160 #define Avx ((u64)1 << 43) /* Advanced Vector Extensions */ 161 #define Fastop ((u64)1 << 44) /* Use opcode::u.fastop */ 162 #define NoWrite ((u64)1 << 45) /* No writeback */ 163 #define SrcWrite ((u64)1 << 46) /* Write back src operand */ 164 #define NoMod ((u64)1 << 47) /* Mod field is ignored */ 165 #define Intercept ((u64)1 << 48) /* Has valid intercept field */ 166 #define CheckPerm ((u64)1 << 49) /* Has valid check_perm field */ 167 #define NoBigReal ((u64)1 << 50) /* No big real mode */ 168 #define PrivUD ((u64)1 << 51) /* #UD instead of #GP on CPL > 0 */ 169 170 #define DstXacc (DstAccLo | SrcAccHi | SrcWrite) 171 172 #define X2(x...) x, x 173 #define X3(x...) X2(x), x 174 #define X4(x...) X2(x), X2(x) 175 #define X5(x...) X4(x), x 176 #define X6(x...) X4(x), X2(x) 177 #define X7(x...) X4(x), X3(x) 178 #define X8(x...) X4(x), X4(x) 179 #define X16(x...) X8(x), X8(x) 180 181 #define NR_FASTOP (ilog2(sizeof(ulong)) + 1) 182 #define FASTOP_SIZE 8 183 184 /* 185 * fastop functions have a special calling convention: 186 * 187 * dst: rax (in/out) 188 * src: rdx (in/out) 189 * src2: rcx (in) 190 * flags: rflags (in/out) 191 * ex: rsi (in:fastop pointer, out:zero if exception) 192 * 193 * Moreover, they are all exactly FASTOP_SIZE bytes long, so functions for 194 * different operand sizes can be reached by calculation, rather than a jump 195 * table (which would be bigger than the code). 196 * 197 * fastop functions are declared as taking a never-defined fastop parameter, 198 * so they can't be called from C directly. 199 */ 200 201 struct fastop; 202 203 struct opcode { 204 u64 flags : 56; 205 u64 intercept : 8; 206 union { 207 int (*execute)(struct x86_emulate_ctxt *ctxt); 208 const struct opcode *group; 209 const struct group_dual *gdual; 210 const struct gprefix *gprefix; 211 const struct escape *esc; 212 void (*fastop)(struct fastop *fake); 213 } u; 214 int (*check_perm)(struct x86_emulate_ctxt *ctxt); 215 }; 216 217 struct group_dual { 218 struct opcode mod012[8]; 219 struct opcode mod3[8]; 220 }; 221 222 struct gprefix { 223 struct opcode pfx_no; 224 struct opcode pfx_66; 225 struct opcode pfx_f2; 226 struct opcode pfx_f3; 227 }; 228 229 struct escape { 230 struct opcode op[8]; 231 struct opcode high[64]; 232 }; 233 234 /* EFLAGS bit definitions. */ 235 #define EFLG_ID (1<<21) 236 #define EFLG_VIP (1<<20) 237 #define EFLG_VIF (1<<19) 238 #define EFLG_AC (1<<18) 239 #define EFLG_VM (1<<17) 240 #define EFLG_RF (1<<16) 241 #define EFLG_IOPL (3<<12) 242 #define EFLG_NT (1<<14) 243 #define EFLG_OF (1<<11) 244 #define EFLG_DF (1<<10) 245 #define EFLG_IF (1<<9) 246 #define EFLG_TF (1<<8) 247 #define EFLG_SF (1<<7) 248 #define EFLG_ZF (1<<6) 249 #define EFLG_AF (1<<4) 250 #define EFLG_PF (1<<2) 251 #define EFLG_CF (1<<0) 252 253 #define EFLG_RESERVED_ZEROS_MASK 0xffc0802a 254 #define EFLG_RESERVED_ONE_MASK 2 255 256 static ulong reg_read(struct x86_emulate_ctxt *ctxt, unsigned nr) 257 { 258 if (!(ctxt->regs_valid & (1 << nr))) { 259 ctxt->regs_valid |= 1 << nr; 260 ctxt->_regs[nr] = ctxt->ops->read_gpr(ctxt, nr); 261 } 262 return ctxt->_regs[nr]; 263 } 264 265 static ulong *reg_write(struct x86_emulate_ctxt *ctxt, unsigned nr) 266 { 267 ctxt->regs_valid |= 1 << nr; 268 ctxt->regs_dirty |= 1 << nr; 269 return &ctxt->_regs[nr]; 270 } 271 272 static ulong *reg_rmw(struct x86_emulate_ctxt *ctxt, unsigned nr) 273 { 274 reg_read(ctxt, nr); 275 return reg_write(ctxt, nr); 276 } 277 278 static void writeback_registers(struct x86_emulate_ctxt *ctxt) 279 { 280 unsigned reg; 281 282 for_each_set_bit(reg, (ulong *)&ctxt->regs_dirty, 16) 283 ctxt->ops->write_gpr(ctxt, reg, ctxt->_regs[reg]); 284 } 285 286 static void invalidate_registers(struct x86_emulate_ctxt *ctxt) 287 { 288 ctxt->regs_dirty = 0; 289 ctxt->regs_valid = 0; 290 } 291 292 /* 293 * These EFLAGS bits are restored from saved value during emulation, and 294 * any changes are written back to the saved value after emulation. 295 */ 296 #define EFLAGS_MASK (EFLG_OF|EFLG_SF|EFLG_ZF|EFLG_AF|EFLG_PF|EFLG_CF) 297 298 #ifdef CONFIG_X86_64 299 #define ON64(x) x 300 #else 301 #define ON64(x) 302 #endif 303 304 static int fastop(struct x86_emulate_ctxt *ctxt, void (*fop)(struct fastop *)); 305 306 #define FOP_ALIGN ".align " __stringify(FASTOP_SIZE) " \n\t" 307 #define FOP_RET "ret \n\t" 308 309 #define FOP_START(op) \ 310 extern void em_##op(struct fastop *fake); \ 311 asm(".pushsection .text, \"ax\" \n\t" \ 312 ".global em_" #op " \n\t" \ 313 FOP_ALIGN \ 314 "em_" #op ": \n\t" 315 316 #define FOP_END \ 317 ".popsection") 318 319 #define FOPNOP() FOP_ALIGN FOP_RET 320 321 #define FOP1E(op, dst) \ 322 FOP_ALIGN "10: " #op " %" #dst " \n\t" FOP_RET 323 324 #define FOP1EEX(op, dst) \ 325 FOP1E(op, dst) _ASM_EXTABLE(10b, kvm_fastop_exception) 326 327 #define FASTOP1(op) \ 328 FOP_START(op) \ 329 FOP1E(op##b, al) \ 330 FOP1E(op##w, ax) \ 331 FOP1E(op##l, eax) \ 332 ON64(FOP1E(op##q, rax)) \ 333 FOP_END 334 335 /* 1-operand, using src2 (for MUL/DIV r/m) */ 336 #define FASTOP1SRC2(op, name) \ 337 FOP_START(name) \ 338 FOP1E(op, cl) \ 339 FOP1E(op, cx) \ 340 FOP1E(op, ecx) \ 341 ON64(FOP1E(op, rcx)) \ 342 FOP_END 343 344 /* 1-operand, using src2 (for MUL/DIV r/m), with exceptions */ 345 #define FASTOP1SRC2EX(op, name) \ 346 FOP_START(name) \ 347 FOP1EEX(op, cl) \ 348 FOP1EEX(op, cx) \ 349 FOP1EEX(op, ecx) \ 350 ON64(FOP1EEX(op, rcx)) \ 351 FOP_END 352 353 #define FOP2E(op, dst, src) \ 354 FOP_ALIGN #op " %" #src ", %" #dst " \n\t" FOP_RET 355 356 #define FASTOP2(op) \ 357 FOP_START(op) \ 358 FOP2E(op##b, al, dl) \ 359 FOP2E(op##w, ax, dx) \ 360 FOP2E(op##l, eax, edx) \ 361 ON64(FOP2E(op##q, rax, rdx)) \ 362 FOP_END 363 364 /* 2 operand, word only */ 365 #define FASTOP2W(op) \ 366 FOP_START(op) \ 367 FOPNOP() \ 368 FOP2E(op##w, ax, dx) \ 369 FOP2E(op##l, eax, edx) \ 370 ON64(FOP2E(op##q, rax, rdx)) \ 371 FOP_END 372 373 /* 2 operand, src is CL */ 374 #define FASTOP2CL(op) \ 375 FOP_START(op) \ 376 FOP2E(op##b, al, cl) \ 377 FOP2E(op##w, ax, cl) \ 378 FOP2E(op##l, eax, cl) \ 379 ON64(FOP2E(op##q, rax, cl)) \ 380 FOP_END 381 382 #define FOP3E(op, dst, src, src2) \ 383 FOP_ALIGN #op " %" #src2 ", %" #src ", %" #dst " \n\t" FOP_RET 384 385 /* 3-operand, word-only, src2=cl */ 386 #define FASTOP3WCL(op) \ 387 FOP_START(op) \ 388 FOPNOP() \ 389 FOP3E(op##w, ax, dx, cl) \ 390 FOP3E(op##l, eax, edx, cl) \ 391 ON64(FOP3E(op##q, rax, rdx, cl)) \ 392 FOP_END 393 394 /* Special case for SETcc - 1 instruction per cc */ 395 #define FOP_SETCC(op) ".align 4; " #op " %al; ret \n\t" 396 397 asm(".global kvm_fastop_exception \n" 398 "kvm_fastop_exception: xor %esi, %esi; ret"); 399 400 FOP_START(setcc) 401 FOP_SETCC(seto) 402 FOP_SETCC(setno) 403 FOP_SETCC(setc) 404 FOP_SETCC(setnc) 405 FOP_SETCC(setz) 406 FOP_SETCC(setnz) 407 FOP_SETCC(setbe) 408 FOP_SETCC(setnbe) 409 FOP_SETCC(sets) 410 FOP_SETCC(setns) 411 FOP_SETCC(setp) 412 FOP_SETCC(setnp) 413 FOP_SETCC(setl) 414 FOP_SETCC(setnl) 415 FOP_SETCC(setle) 416 FOP_SETCC(setnle) 417 FOP_END; 418 419 FOP_START(salc) "pushf; sbb %al, %al; popf \n\t" FOP_RET 420 FOP_END; 421 422 static int emulator_check_intercept(struct x86_emulate_ctxt *ctxt, 423 enum x86_intercept intercept, 424 enum x86_intercept_stage stage) 425 { 426 struct x86_instruction_info info = { 427 .intercept = intercept, 428 .rep_prefix = ctxt->rep_prefix, 429 .modrm_mod = ctxt->modrm_mod, 430 .modrm_reg = ctxt->modrm_reg, 431 .modrm_rm = ctxt->modrm_rm, 432 .src_val = ctxt->src.val64, 433 .dst_val = ctxt->dst.val64, 434 .src_bytes = ctxt->src.bytes, 435 .dst_bytes = ctxt->dst.bytes, 436 .ad_bytes = ctxt->ad_bytes, 437 .next_rip = ctxt->eip, 438 }; 439 440 return ctxt->ops->intercept(ctxt, &info, stage); 441 } 442 443 static void assign_masked(ulong *dest, ulong src, ulong mask) 444 { 445 *dest = (*dest & ~mask) | (src & mask); 446 } 447 448 static inline unsigned long ad_mask(struct x86_emulate_ctxt *ctxt) 449 { 450 return (1UL << (ctxt->ad_bytes << 3)) - 1; 451 } 452 453 static ulong stack_mask(struct x86_emulate_ctxt *ctxt) 454 { 455 u16 sel; 456 struct desc_struct ss; 457 458 if (ctxt->mode == X86EMUL_MODE_PROT64) 459 return ~0UL; 460 ctxt->ops->get_segment(ctxt, &sel, &ss, NULL, VCPU_SREG_SS); 461 return ~0U >> ((ss.d ^ 1) * 16); /* d=0: 0xffff; d=1: 0xffffffff */ 462 } 463 464 static int stack_size(struct x86_emulate_ctxt *ctxt) 465 { 466 return (__fls(stack_mask(ctxt)) + 1) >> 3; 467 } 468 469 /* Access/update address held in a register, based on addressing mode. */ 470 static inline unsigned long 471 address_mask(struct x86_emulate_ctxt *ctxt, unsigned long reg) 472 { 473 if (ctxt->ad_bytes == sizeof(unsigned long)) 474 return reg; 475 else 476 return reg & ad_mask(ctxt); 477 } 478 479 static inline unsigned long 480 register_address(struct x86_emulate_ctxt *ctxt, unsigned long reg) 481 { 482 return address_mask(ctxt, reg); 483 } 484 485 static void masked_increment(ulong *reg, ulong mask, int inc) 486 { 487 assign_masked(reg, *reg + inc, mask); 488 } 489 490 static inline void 491 register_address_increment(struct x86_emulate_ctxt *ctxt, unsigned long *reg, int inc) 492 { 493 ulong mask; 494 495 if (ctxt->ad_bytes == sizeof(unsigned long)) 496 mask = ~0UL; 497 else 498 mask = ad_mask(ctxt); 499 masked_increment(reg, mask, inc); 500 } 501 502 static void rsp_increment(struct x86_emulate_ctxt *ctxt, int inc) 503 { 504 masked_increment(reg_rmw(ctxt, VCPU_REGS_RSP), stack_mask(ctxt), inc); 505 } 506 507 static inline void jmp_rel(struct x86_emulate_ctxt *ctxt, int rel) 508 { 509 register_address_increment(ctxt, &ctxt->_eip, rel); 510 } 511 512 static u32 desc_limit_scaled(struct desc_struct *desc) 513 { 514 u32 limit = get_desc_limit(desc); 515 516 return desc->g ? (limit << 12) | 0xfff : limit; 517 } 518 519 static unsigned long seg_base(struct x86_emulate_ctxt *ctxt, int seg) 520 { 521 if (ctxt->mode == X86EMUL_MODE_PROT64 && seg < VCPU_SREG_FS) 522 return 0; 523 524 return ctxt->ops->get_cached_segment_base(ctxt, seg); 525 } 526 527 static int emulate_exception(struct x86_emulate_ctxt *ctxt, int vec, 528 u32 error, bool valid) 529 { 530 ctxt->exception.vector = vec; 531 ctxt->exception.error_code = error; 532 ctxt->exception.error_code_valid = valid; 533 return X86EMUL_PROPAGATE_FAULT; 534 } 535 536 static int emulate_db(struct x86_emulate_ctxt *ctxt) 537 { 538 return emulate_exception(ctxt, DB_VECTOR, 0, false); 539 } 540 541 static int emulate_gp(struct x86_emulate_ctxt *ctxt, int err) 542 { 543 return emulate_exception(ctxt, GP_VECTOR, err, true); 544 } 545 546 static int emulate_ss(struct x86_emulate_ctxt *ctxt, int err) 547 { 548 return emulate_exception(ctxt, SS_VECTOR, err, true); 549 } 550 551 static int emulate_ud(struct x86_emulate_ctxt *ctxt) 552 { 553 return emulate_exception(ctxt, UD_VECTOR, 0, false); 554 } 555 556 static int emulate_ts(struct x86_emulate_ctxt *ctxt, int err) 557 { 558 return emulate_exception(ctxt, TS_VECTOR, err, true); 559 } 560 561 static int emulate_de(struct x86_emulate_ctxt *ctxt) 562 { 563 return emulate_exception(ctxt, DE_VECTOR, 0, false); 564 } 565 566 static int emulate_nm(struct x86_emulate_ctxt *ctxt) 567 { 568 return emulate_exception(ctxt, NM_VECTOR, 0, false); 569 } 570 571 static u16 get_segment_selector(struct x86_emulate_ctxt *ctxt, unsigned seg) 572 { 573 u16 selector; 574 struct desc_struct desc; 575 576 ctxt->ops->get_segment(ctxt, &selector, &desc, NULL, seg); 577 return selector; 578 } 579 580 static void set_segment_selector(struct x86_emulate_ctxt *ctxt, u16 selector, 581 unsigned seg) 582 { 583 u16 dummy; 584 u32 base3; 585 struct desc_struct desc; 586 587 ctxt->ops->get_segment(ctxt, &dummy, &desc, &base3, seg); 588 ctxt->ops->set_segment(ctxt, selector, &desc, base3, seg); 589 } 590 591 /* 592 * x86 defines three classes of vector instructions: explicitly 593 * aligned, explicitly unaligned, and the rest, which change behaviour 594 * depending on whether they're AVX encoded or not. 595 * 596 * Also included is CMPXCHG16B which is not a vector instruction, yet it is 597 * subject to the same check. 598 */ 599 static bool insn_aligned(struct x86_emulate_ctxt *ctxt, unsigned size) 600 { 601 if (likely(size < 16)) 602 return false; 603 604 if (ctxt->d & Aligned) 605 return true; 606 else if (ctxt->d & Unaligned) 607 return false; 608 else if (ctxt->d & Avx) 609 return false; 610 else 611 return true; 612 } 613 614 static int __linearize(struct x86_emulate_ctxt *ctxt, 615 struct segmented_address addr, 616 unsigned size, bool write, bool fetch, 617 ulong *linear) 618 { 619 struct desc_struct desc; 620 bool usable; 621 ulong la; 622 u32 lim; 623 u16 sel; 624 unsigned cpl; 625 626 la = seg_base(ctxt, addr.seg) + addr.ea; 627 switch (ctxt->mode) { 628 case X86EMUL_MODE_PROT64: 629 if (((signed long)la << 16) >> 16 != la) 630 return emulate_gp(ctxt, 0); 631 break; 632 default: 633 usable = ctxt->ops->get_segment(ctxt, &sel, &desc, NULL, 634 addr.seg); 635 if (!usable) 636 goto bad; 637 /* code segment in protected mode or read-only data segment */ 638 if ((((ctxt->mode != X86EMUL_MODE_REAL) && (desc.type & 8)) 639 || !(desc.type & 2)) && write) 640 goto bad; 641 /* unreadable code segment */ 642 if (!fetch && (desc.type & 8) && !(desc.type & 2)) 643 goto bad; 644 lim = desc_limit_scaled(&desc); 645 if ((ctxt->mode == X86EMUL_MODE_REAL) && !fetch && 646 (ctxt->d & NoBigReal)) { 647 /* la is between zero and 0xffff */ 648 if (la > 0xffff || (u32)(la + size - 1) > 0xffff) 649 goto bad; 650 } else if ((desc.type & 8) || !(desc.type & 4)) { 651 /* expand-up segment */ 652 if (addr.ea > lim || (u32)(addr.ea + size - 1) > lim) 653 goto bad; 654 } else { 655 /* expand-down segment */ 656 if (addr.ea <= lim || (u32)(addr.ea + size - 1) <= lim) 657 goto bad; 658 lim = desc.d ? 0xffffffff : 0xffff; 659 if (addr.ea > lim || (u32)(addr.ea + size - 1) > lim) 660 goto bad; 661 } 662 cpl = ctxt->ops->cpl(ctxt); 663 if (!(desc.type & 8)) { 664 /* data segment */ 665 if (cpl > desc.dpl) 666 goto bad; 667 } else if ((desc.type & 8) && !(desc.type & 4)) { 668 /* nonconforming code segment */ 669 if (cpl != desc.dpl) 670 goto bad; 671 } else if ((desc.type & 8) && (desc.type & 4)) { 672 /* conforming code segment */ 673 if (cpl < desc.dpl) 674 goto bad; 675 } 676 break; 677 } 678 if (fetch ? ctxt->mode != X86EMUL_MODE_PROT64 : ctxt->ad_bytes != 8) 679 la &= (u32)-1; 680 if (insn_aligned(ctxt, size) && ((la & (size - 1)) != 0)) 681 return emulate_gp(ctxt, 0); 682 *linear = la; 683 return X86EMUL_CONTINUE; 684 bad: 685 if (addr.seg == VCPU_SREG_SS) 686 return emulate_ss(ctxt, sel); 687 else 688 return emulate_gp(ctxt, sel); 689 } 690 691 static int linearize(struct x86_emulate_ctxt *ctxt, 692 struct segmented_address addr, 693 unsigned size, bool write, 694 ulong *linear) 695 { 696 return __linearize(ctxt, addr, size, write, false, linear); 697 } 698 699 700 static int segmented_read_std(struct x86_emulate_ctxt *ctxt, 701 struct segmented_address addr, 702 void *data, 703 unsigned size) 704 { 705 int rc; 706 ulong linear; 707 708 rc = linearize(ctxt, addr, size, false, &linear); 709 if (rc != X86EMUL_CONTINUE) 710 return rc; 711 return ctxt->ops->read_std(ctxt, linear, data, size, &ctxt->exception); 712 } 713 714 /* 715 * Prefetch the remaining bytes of the instruction without crossing page 716 * boundary if they are not in fetch_cache yet. 717 */ 718 static int __do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt, int op_size) 719 { 720 int rc; 721 unsigned size; 722 unsigned long linear; 723 int cur_size = ctxt->fetch.end - ctxt->fetch.data; 724 struct segmented_address addr = { .seg = VCPU_SREG_CS, 725 .ea = ctxt->eip + cur_size }; 726 727 size = 15UL ^ cur_size; 728 rc = __linearize(ctxt, addr, size, false, true, &linear); 729 if (unlikely(rc != X86EMUL_CONTINUE)) 730 return rc; 731 732 size = min_t(unsigned, size, PAGE_SIZE - offset_in_page(linear)); 733 734 /* 735 * One instruction can only straddle two pages, 736 * and one has been loaded at the beginning of 737 * x86_decode_insn. So, if not enough bytes 738 * still, we must have hit the 15-byte boundary. 739 */ 740 if (unlikely(size < op_size)) 741 return X86EMUL_UNHANDLEABLE; 742 rc = ctxt->ops->fetch(ctxt, linear, ctxt->fetch.end, 743 size, &ctxt->exception); 744 if (unlikely(rc != X86EMUL_CONTINUE)) 745 return rc; 746 ctxt->fetch.end += size; 747 return X86EMUL_CONTINUE; 748 } 749 750 static __always_inline int do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt, 751 unsigned size) 752 { 753 if (unlikely(ctxt->fetch.end - ctxt->fetch.ptr < size)) 754 return __do_insn_fetch_bytes(ctxt, size); 755 else 756 return X86EMUL_CONTINUE; 757 } 758 759 /* Fetch next part of the instruction being emulated. */ 760 #define insn_fetch(_type, _ctxt) \ 761 ({ _type _x; \ 762 \ 763 rc = do_insn_fetch_bytes(_ctxt, sizeof(_type)); \ 764 if (rc != X86EMUL_CONTINUE) \ 765 goto done; \ 766 ctxt->_eip += sizeof(_type); \ 767 _x = *(_type __aligned(1) *) ctxt->fetch.ptr; \ 768 ctxt->fetch.ptr += sizeof(_type); \ 769 _x; \ 770 }) 771 772 #define insn_fetch_arr(_arr, _size, _ctxt) \ 773 ({ \ 774 rc = do_insn_fetch_bytes(_ctxt, _size); \ 775 if (rc != X86EMUL_CONTINUE) \ 776 goto done; \ 777 ctxt->_eip += (_size); \ 778 memcpy(_arr, ctxt->fetch.ptr, _size); \ 779 ctxt->fetch.ptr += (_size); \ 780 }) 781 782 /* 783 * Given the 'reg' portion of a ModRM byte, and a register block, return a 784 * pointer into the block that addresses the relevant register. 785 * @highbyte_regs specifies whether to decode AH,CH,DH,BH. 786 */ 787 static void *decode_register(struct x86_emulate_ctxt *ctxt, u8 modrm_reg, 788 int byteop) 789 { 790 void *p; 791 int highbyte_regs = (ctxt->rex_prefix == 0) && byteop; 792 793 if (highbyte_regs && modrm_reg >= 4 && modrm_reg < 8) 794 p = (unsigned char *)reg_rmw(ctxt, modrm_reg & 3) + 1; 795 else 796 p = reg_rmw(ctxt, modrm_reg); 797 return p; 798 } 799 800 static int read_descriptor(struct x86_emulate_ctxt *ctxt, 801 struct segmented_address addr, 802 u16 *size, unsigned long *address, int op_bytes) 803 { 804 int rc; 805 806 if (op_bytes == 2) 807 op_bytes = 3; 808 *address = 0; 809 rc = segmented_read_std(ctxt, addr, size, 2); 810 if (rc != X86EMUL_CONTINUE) 811 return rc; 812 addr.ea += 2; 813 rc = segmented_read_std(ctxt, addr, address, op_bytes); 814 return rc; 815 } 816 817 FASTOP2(add); 818 FASTOP2(or); 819 FASTOP2(adc); 820 FASTOP2(sbb); 821 FASTOP2(and); 822 FASTOP2(sub); 823 FASTOP2(xor); 824 FASTOP2(cmp); 825 FASTOP2(test); 826 827 FASTOP1SRC2(mul, mul_ex); 828 FASTOP1SRC2(imul, imul_ex); 829 FASTOP1SRC2EX(div, div_ex); 830 FASTOP1SRC2EX(idiv, idiv_ex); 831 832 FASTOP3WCL(shld); 833 FASTOP3WCL(shrd); 834 835 FASTOP2W(imul); 836 837 FASTOP1(not); 838 FASTOP1(neg); 839 FASTOP1(inc); 840 FASTOP1(dec); 841 842 FASTOP2CL(rol); 843 FASTOP2CL(ror); 844 FASTOP2CL(rcl); 845 FASTOP2CL(rcr); 846 FASTOP2CL(shl); 847 FASTOP2CL(shr); 848 FASTOP2CL(sar); 849 850 FASTOP2W(bsf); 851 FASTOP2W(bsr); 852 FASTOP2W(bt); 853 FASTOP2W(bts); 854 FASTOP2W(btr); 855 FASTOP2W(btc); 856 857 FASTOP2(xadd); 858 859 static u8 test_cc(unsigned int condition, unsigned long flags) 860 { 861 u8 rc; 862 void (*fop)(void) = (void *)em_setcc + 4 * (condition & 0xf); 863 864 flags = (flags & EFLAGS_MASK) | X86_EFLAGS_IF; 865 asm("push %[flags]; popf; call *%[fastop]" 866 : "=a"(rc) : [fastop]"r"(fop), [flags]"r"(flags)); 867 return rc; 868 } 869 870 static void fetch_register_operand(struct operand *op) 871 { 872 switch (op->bytes) { 873 case 1: 874 op->val = *(u8 *)op->addr.reg; 875 break; 876 case 2: 877 op->val = *(u16 *)op->addr.reg; 878 break; 879 case 4: 880 op->val = *(u32 *)op->addr.reg; 881 break; 882 case 8: 883 op->val = *(u64 *)op->addr.reg; 884 break; 885 } 886 } 887 888 static void read_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data, int reg) 889 { 890 ctxt->ops->get_fpu(ctxt); 891 switch (reg) { 892 case 0: asm("movdqa %%xmm0, %0" : "=m"(*data)); break; 893 case 1: asm("movdqa %%xmm1, %0" : "=m"(*data)); break; 894 case 2: asm("movdqa %%xmm2, %0" : "=m"(*data)); break; 895 case 3: asm("movdqa %%xmm3, %0" : "=m"(*data)); break; 896 case 4: asm("movdqa %%xmm4, %0" : "=m"(*data)); break; 897 case 5: asm("movdqa %%xmm5, %0" : "=m"(*data)); break; 898 case 6: asm("movdqa %%xmm6, %0" : "=m"(*data)); break; 899 case 7: asm("movdqa %%xmm7, %0" : "=m"(*data)); break; 900 #ifdef CONFIG_X86_64 901 case 8: asm("movdqa %%xmm8, %0" : "=m"(*data)); break; 902 case 9: asm("movdqa %%xmm9, %0" : "=m"(*data)); break; 903 case 10: asm("movdqa %%xmm10, %0" : "=m"(*data)); break; 904 case 11: asm("movdqa %%xmm11, %0" : "=m"(*data)); break; 905 case 12: asm("movdqa %%xmm12, %0" : "=m"(*data)); break; 906 case 13: asm("movdqa %%xmm13, %0" : "=m"(*data)); break; 907 case 14: asm("movdqa %%xmm14, %0" : "=m"(*data)); break; 908 case 15: asm("movdqa %%xmm15, %0" : "=m"(*data)); break; 909 #endif 910 default: BUG(); 911 } 912 ctxt->ops->put_fpu(ctxt); 913 } 914 915 static void write_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data, 916 int reg) 917 { 918 ctxt->ops->get_fpu(ctxt); 919 switch (reg) { 920 case 0: asm("movdqa %0, %%xmm0" : : "m"(*data)); break; 921 case 1: asm("movdqa %0, %%xmm1" : : "m"(*data)); break; 922 case 2: asm("movdqa %0, %%xmm2" : : "m"(*data)); break; 923 case 3: asm("movdqa %0, %%xmm3" : : "m"(*data)); break; 924 case 4: asm("movdqa %0, %%xmm4" : : "m"(*data)); break; 925 case 5: asm("movdqa %0, %%xmm5" : : "m"(*data)); break; 926 case 6: asm("movdqa %0, %%xmm6" : : "m"(*data)); break; 927 case 7: asm("movdqa %0, %%xmm7" : : "m"(*data)); break; 928 #ifdef CONFIG_X86_64 929 case 8: asm("movdqa %0, %%xmm8" : : "m"(*data)); break; 930 case 9: asm("movdqa %0, %%xmm9" : : "m"(*data)); break; 931 case 10: asm("movdqa %0, %%xmm10" : : "m"(*data)); break; 932 case 11: asm("movdqa %0, %%xmm11" : : "m"(*data)); break; 933 case 12: asm("movdqa %0, %%xmm12" : : "m"(*data)); break; 934 case 13: asm("movdqa %0, %%xmm13" : : "m"(*data)); break; 935 case 14: asm("movdqa %0, %%xmm14" : : "m"(*data)); break; 936 case 15: asm("movdqa %0, %%xmm15" : : "m"(*data)); break; 937 #endif 938 default: BUG(); 939 } 940 ctxt->ops->put_fpu(ctxt); 941 } 942 943 static void read_mmx_reg(struct x86_emulate_ctxt *ctxt, u64 *data, int reg) 944 { 945 ctxt->ops->get_fpu(ctxt); 946 switch (reg) { 947 case 0: asm("movq %%mm0, %0" : "=m"(*data)); break; 948 case 1: asm("movq %%mm1, %0" : "=m"(*data)); break; 949 case 2: asm("movq %%mm2, %0" : "=m"(*data)); break; 950 case 3: asm("movq %%mm3, %0" : "=m"(*data)); break; 951 case 4: asm("movq %%mm4, %0" : "=m"(*data)); break; 952 case 5: asm("movq %%mm5, %0" : "=m"(*data)); break; 953 case 6: asm("movq %%mm6, %0" : "=m"(*data)); break; 954 case 7: asm("movq %%mm7, %0" : "=m"(*data)); break; 955 default: BUG(); 956 } 957 ctxt->ops->put_fpu(ctxt); 958 } 959 960 static void write_mmx_reg(struct x86_emulate_ctxt *ctxt, u64 *data, int reg) 961 { 962 ctxt->ops->get_fpu(ctxt); 963 switch (reg) { 964 case 0: asm("movq %0, %%mm0" : : "m"(*data)); break; 965 case 1: asm("movq %0, %%mm1" : : "m"(*data)); break; 966 case 2: asm("movq %0, %%mm2" : : "m"(*data)); break; 967 case 3: asm("movq %0, %%mm3" : : "m"(*data)); break; 968 case 4: asm("movq %0, %%mm4" : : "m"(*data)); break; 969 case 5: asm("movq %0, %%mm5" : : "m"(*data)); break; 970 case 6: asm("movq %0, %%mm6" : : "m"(*data)); break; 971 case 7: asm("movq %0, %%mm7" : : "m"(*data)); break; 972 default: BUG(); 973 } 974 ctxt->ops->put_fpu(ctxt); 975 } 976 977 static int em_fninit(struct x86_emulate_ctxt *ctxt) 978 { 979 if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM)) 980 return emulate_nm(ctxt); 981 982 ctxt->ops->get_fpu(ctxt); 983 asm volatile("fninit"); 984 ctxt->ops->put_fpu(ctxt); 985 return X86EMUL_CONTINUE; 986 } 987 988 static int em_fnstcw(struct x86_emulate_ctxt *ctxt) 989 { 990 u16 fcw; 991 992 if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM)) 993 return emulate_nm(ctxt); 994 995 ctxt->ops->get_fpu(ctxt); 996 asm volatile("fnstcw %0": "+m"(fcw)); 997 ctxt->ops->put_fpu(ctxt); 998 999 /* force 2 byte destination */ 1000 ctxt->dst.bytes = 2; 1001 ctxt->dst.val = fcw; 1002 1003 return X86EMUL_CONTINUE; 1004 } 1005 1006 static int em_fnstsw(struct x86_emulate_ctxt *ctxt) 1007 { 1008 u16 fsw; 1009 1010 if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM)) 1011 return emulate_nm(ctxt); 1012 1013 ctxt->ops->get_fpu(ctxt); 1014 asm volatile("fnstsw %0": "+m"(fsw)); 1015 ctxt->ops->put_fpu(ctxt); 1016 1017 /* force 2 byte destination */ 1018 ctxt->dst.bytes = 2; 1019 ctxt->dst.val = fsw; 1020 1021 return X86EMUL_CONTINUE; 1022 } 1023 1024 static void decode_register_operand(struct x86_emulate_ctxt *ctxt, 1025 struct operand *op) 1026 { 1027 unsigned reg = ctxt->modrm_reg; 1028 1029 if (!(ctxt->d & ModRM)) 1030 reg = (ctxt->b & 7) | ((ctxt->rex_prefix & 1) << 3); 1031 1032 if (ctxt->d & Sse) { 1033 op->type = OP_XMM; 1034 op->bytes = 16; 1035 op->addr.xmm = reg; 1036 read_sse_reg(ctxt, &op->vec_val, reg); 1037 return; 1038 } 1039 if (ctxt->d & Mmx) { 1040 reg &= 7; 1041 op->type = OP_MM; 1042 op->bytes = 8; 1043 op->addr.mm = reg; 1044 return; 1045 } 1046 1047 op->type = OP_REG; 1048 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes; 1049 op->addr.reg = decode_register(ctxt, reg, ctxt->d & ByteOp); 1050 1051 fetch_register_operand(op); 1052 op->orig_val = op->val; 1053 } 1054 1055 static void adjust_modrm_seg(struct x86_emulate_ctxt *ctxt, int base_reg) 1056 { 1057 if (base_reg == VCPU_REGS_RSP || base_reg == VCPU_REGS_RBP) 1058 ctxt->modrm_seg = VCPU_SREG_SS; 1059 } 1060 1061 static int decode_modrm(struct x86_emulate_ctxt *ctxt, 1062 struct operand *op) 1063 { 1064 u8 sib; 1065 int index_reg, base_reg, scale; 1066 int rc = X86EMUL_CONTINUE; 1067 ulong modrm_ea = 0; 1068 1069 ctxt->modrm_reg = ((ctxt->rex_prefix << 1) & 8); /* REX.R */ 1070 index_reg = (ctxt->rex_prefix << 2) & 8; /* REX.X */ 1071 base_reg = (ctxt->rex_prefix << 3) & 8; /* REX.B */ 1072 1073 ctxt->modrm_mod = (ctxt->modrm & 0xc0) >> 6; 1074 ctxt->modrm_reg |= (ctxt->modrm & 0x38) >> 3; 1075 ctxt->modrm_rm = base_reg | (ctxt->modrm & 0x07); 1076 ctxt->modrm_seg = VCPU_SREG_DS; 1077 1078 if (ctxt->modrm_mod == 3 || (ctxt->d & NoMod)) { 1079 op->type = OP_REG; 1080 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes; 1081 op->addr.reg = decode_register(ctxt, ctxt->modrm_rm, 1082 ctxt->d & ByteOp); 1083 if (ctxt->d & Sse) { 1084 op->type = OP_XMM; 1085 op->bytes = 16; 1086 op->addr.xmm = ctxt->modrm_rm; 1087 read_sse_reg(ctxt, &op->vec_val, ctxt->modrm_rm); 1088 return rc; 1089 } 1090 if (ctxt->d & Mmx) { 1091 op->type = OP_MM; 1092 op->bytes = 8; 1093 op->addr.mm = ctxt->modrm_rm & 7; 1094 return rc; 1095 } 1096 fetch_register_operand(op); 1097 return rc; 1098 } 1099 1100 op->type = OP_MEM; 1101 1102 if (ctxt->ad_bytes == 2) { 1103 unsigned bx = reg_read(ctxt, VCPU_REGS_RBX); 1104 unsigned bp = reg_read(ctxt, VCPU_REGS_RBP); 1105 unsigned si = reg_read(ctxt, VCPU_REGS_RSI); 1106 unsigned di = reg_read(ctxt, VCPU_REGS_RDI); 1107 1108 /* 16-bit ModR/M decode. */ 1109 switch (ctxt->modrm_mod) { 1110 case 0: 1111 if (ctxt->modrm_rm == 6) 1112 modrm_ea += insn_fetch(u16, ctxt); 1113 break; 1114 case 1: 1115 modrm_ea += insn_fetch(s8, ctxt); 1116 break; 1117 case 2: 1118 modrm_ea += insn_fetch(u16, ctxt); 1119 break; 1120 } 1121 switch (ctxt->modrm_rm) { 1122 case 0: 1123 modrm_ea += bx + si; 1124 break; 1125 case 1: 1126 modrm_ea += bx + di; 1127 break; 1128 case 2: 1129 modrm_ea += bp + si; 1130 break; 1131 case 3: 1132 modrm_ea += bp + di; 1133 break; 1134 case 4: 1135 modrm_ea += si; 1136 break; 1137 case 5: 1138 modrm_ea += di; 1139 break; 1140 case 6: 1141 if (ctxt->modrm_mod != 0) 1142 modrm_ea += bp; 1143 break; 1144 case 7: 1145 modrm_ea += bx; 1146 break; 1147 } 1148 if (ctxt->modrm_rm == 2 || ctxt->modrm_rm == 3 || 1149 (ctxt->modrm_rm == 6 && ctxt->modrm_mod != 0)) 1150 ctxt->modrm_seg = VCPU_SREG_SS; 1151 modrm_ea = (u16)modrm_ea; 1152 } else { 1153 /* 32/64-bit ModR/M decode. */ 1154 if ((ctxt->modrm_rm & 7) == 4) { 1155 sib = insn_fetch(u8, ctxt); 1156 index_reg |= (sib >> 3) & 7; 1157 base_reg |= sib & 7; 1158 scale = sib >> 6; 1159 1160 if ((base_reg & 7) == 5 && ctxt->modrm_mod == 0) 1161 modrm_ea += insn_fetch(s32, ctxt); 1162 else { 1163 modrm_ea += reg_read(ctxt, base_reg); 1164 adjust_modrm_seg(ctxt, base_reg); 1165 } 1166 if (index_reg != 4) 1167 modrm_ea += reg_read(ctxt, index_reg) << scale; 1168 } else if ((ctxt->modrm_rm & 7) == 5 && ctxt->modrm_mod == 0) { 1169 if (ctxt->mode == X86EMUL_MODE_PROT64) 1170 ctxt->rip_relative = 1; 1171 } else { 1172 base_reg = ctxt->modrm_rm; 1173 modrm_ea += reg_read(ctxt, base_reg); 1174 adjust_modrm_seg(ctxt, base_reg); 1175 } 1176 switch (ctxt->modrm_mod) { 1177 case 0: 1178 if (ctxt->modrm_rm == 5) 1179 modrm_ea += insn_fetch(s32, ctxt); 1180 break; 1181 case 1: 1182 modrm_ea += insn_fetch(s8, ctxt); 1183 break; 1184 case 2: 1185 modrm_ea += insn_fetch(s32, ctxt); 1186 break; 1187 } 1188 } 1189 op->addr.mem.ea = modrm_ea; 1190 if (ctxt->ad_bytes != 8) 1191 ctxt->memop.addr.mem.ea = (u32)ctxt->memop.addr.mem.ea; 1192 1193 done: 1194 return rc; 1195 } 1196 1197 static int decode_abs(struct x86_emulate_ctxt *ctxt, 1198 struct operand *op) 1199 { 1200 int rc = X86EMUL_CONTINUE; 1201 1202 op->type = OP_MEM; 1203 switch (ctxt->ad_bytes) { 1204 case 2: 1205 op->addr.mem.ea = insn_fetch(u16, ctxt); 1206 break; 1207 case 4: 1208 op->addr.mem.ea = insn_fetch(u32, ctxt); 1209 break; 1210 case 8: 1211 op->addr.mem.ea = insn_fetch(u64, ctxt); 1212 break; 1213 } 1214 done: 1215 return rc; 1216 } 1217 1218 static void fetch_bit_operand(struct x86_emulate_ctxt *ctxt) 1219 { 1220 long sv = 0, mask; 1221 1222 if (ctxt->dst.type == OP_MEM && ctxt->src.type == OP_REG) { 1223 mask = ~((long)ctxt->dst.bytes * 8 - 1); 1224 1225 if (ctxt->src.bytes == 2) 1226 sv = (s16)ctxt->src.val & (s16)mask; 1227 else if (ctxt->src.bytes == 4) 1228 sv = (s32)ctxt->src.val & (s32)mask; 1229 else 1230 sv = (s64)ctxt->src.val & (s64)mask; 1231 1232 ctxt->dst.addr.mem.ea += (sv >> 3); 1233 } 1234 1235 /* only subword offset */ 1236 ctxt->src.val &= (ctxt->dst.bytes << 3) - 1; 1237 } 1238 1239 static int read_emulated(struct x86_emulate_ctxt *ctxt, 1240 unsigned long addr, void *dest, unsigned size) 1241 { 1242 int rc; 1243 struct read_cache *mc = &ctxt->mem_read; 1244 1245 if (mc->pos < mc->end) 1246 goto read_cached; 1247 1248 WARN_ON((mc->end + size) >= sizeof(mc->data)); 1249 1250 rc = ctxt->ops->read_emulated(ctxt, addr, mc->data + mc->end, size, 1251 &ctxt->exception); 1252 if (rc != X86EMUL_CONTINUE) 1253 return rc; 1254 1255 mc->end += size; 1256 1257 read_cached: 1258 memcpy(dest, mc->data + mc->pos, size); 1259 mc->pos += size; 1260 return X86EMUL_CONTINUE; 1261 } 1262 1263 static int segmented_read(struct x86_emulate_ctxt *ctxt, 1264 struct segmented_address addr, 1265 void *data, 1266 unsigned size) 1267 { 1268 int rc; 1269 ulong linear; 1270 1271 rc = linearize(ctxt, addr, size, false, &linear); 1272 if (rc != X86EMUL_CONTINUE) 1273 return rc; 1274 return read_emulated(ctxt, linear, data, size); 1275 } 1276 1277 static int segmented_write(struct x86_emulate_ctxt *ctxt, 1278 struct segmented_address addr, 1279 const void *data, 1280 unsigned size) 1281 { 1282 int rc; 1283 ulong linear; 1284 1285 rc = linearize(ctxt, addr, size, true, &linear); 1286 if (rc != X86EMUL_CONTINUE) 1287 return rc; 1288 return ctxt->ops->write_emulated(ctxt, linear, data, size, 1289 &ctxt->exception); 1290 } 1291 1292 static int segmented_cmpxchg(struct x86_emulate_ctxt *ctxt, 1293 struct segmented_address addr, 1294 const void *orig_data, const void *data, 1295 unsigned size) 1296 { 1297 int rc; 1298 ulong linear; 1299 1300 rc = linearize(ctxt, addr, size, true, &linear); 1301 if (rc != X86EMUL_CONTINUE) 1302 return rc; 1303 return ctxt->ops->cmpxchg_emulated(ctxt, linear, orig_data, data, 1304 size, &ctxt->exception); 1305 } 1306 1307 static int pio_in_emulated(struct x86_emulate_ctxt *ctxt, 1308 unsigned int size, unsigned short port, 1309 void *dest) 1310 { 1311 struct read_cache *rc = &ctxt->io_read; 1312 1313 if (rc->pos == rc->end) { /* refill pio read ahead */ 1314 unsigned int in_page, n; 1315 unsigned int count = ctxt->rep_prefix ? 1316 address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) : 1; 1317 in_page = (ctxt->eflags & EFLG_DF) ? 1318 offset_in_page(reg_read(ctxt, VCPU_REGS_RDI)) : 1319 PAGE_SIZE - offset_in_page(reg_read(ctxt, VCPU_REGS_RDI)); 1320 n = min3(in_page, (unsigned int)sizeof(rc->data) / size, count); 1321 if (n == 0) 1322 n = 1; 1323 rc->pos = rc->end = 0; 1324 if (!ctxt->ops->pio_in_emulated(ctxt, size, port, rc->data, n)) 1325 return 0; 1326 rc->end = n * size; 1327 } 1328 1329 if (ctxt->rep_prefix && (ctxt->d & String) && 1330 !(ctxt->eflags & EFLG_DF)) { 1331 ctxt->dst.data = rc->data + rc->pos; 1332 ctxt->dst.type = OP_MEM_STR; 1333 ctxt->dst.count = (rc->end - rc->pos) / size; 1334 rc->pos = rc->end; 1335 } else { 1336 memcpy(dest, rc->data + rc->pos, size); 1337 rc->pos += size; 1338 } 1339 return 1; 1340 } 1341 1342 static int read_interrupt_descriptor(struct x86_emulate_ctxt *ctxt, 1343 u16 index, struct desc_struct *desc) 1344 { 1345 struct desc_ptr dt; 1346 ulong addr; 1347 1348 ctxt->ops->get_idt(ctxt, &dt); 1349 1350 if (dt.size < index * 8 + 7) 1351 return emulate_gp(ctxt, index << 3 | 0x2); 1352 1353 addr = dt.address + index * 8; 1354 return ctxt->ops->read_std(ctxt, addr, desc, sizeof *desc, 1355 &ctxt->exception); 1356 } 1357 1358 static void get_descriptor_table_ptr(struct x86_emulate_ctxt *ctxt, 1359 u16 selector, struct desc_ptr *dt) 1360 { 1361 const struct x86_emulate_ops *ops = ctxt->ops; 1362 u32 base3 = 0; 1363 1364 if (selector & 1 << 2) { 1365 struct desc_struct desc; 1366 u16 sel; 1367 1368 memset (dt, 0, sizeof *dt); 1369 if (!ops->get_segment(ctxt, &sel, &desc, &base3, 1370 VCPU_SREG_LDTR)) 1371 return; 1372 1373 dt->size = desc_limit_scaled(&desc); /* what if limit > 65535? */ 1374 dt->address = get_desc_base(&desc) | ((u64)base3 << 32); 1375 } else 1376 ops->get_gdt(ctxt, dt); 1377 } 1378 1379 /* allowed just for 8 bytes segments */ 1380 static int read_segment_descriptor(struct x86_emulate_ctxt *ctxt, 1381 u16 selector, struct desc_struct *desc, 1382 ulong *desc_addr_p) 1383 { 1384 struct desc_ptr dt; 1385 u16 index = selector >> 3; 1386 ulong addr; 1387 1388 get_descriptor_table_ptr(ctxt, selector, &dt); 1389 1390 if (dt.size < index * 8 + 7) 1391 return emulate_gp(ctxt, selector & 0xfffc); 1392 1393 *desc_addr_p = addr = dt.address + index * 8; 1394 return ctxt->ops->read_std(ctxt, addr, desc, sizeof *desc, 1395 &ctxt->exception); 1396 } 1397 1398 /* allowed just for 8 bytes segments */ 1399 static int write_segment_descriptor(struct x86_emulate_ctxt *ctxt, 1400 u16 selector, struct desc_struct *desc) 1401 { 1402 struct desc_ptr dt; 1403 u16 index = selector >> 3; 1404 ulong addr; 1405 1406 get_descriptor_table_ptr(ctxt, selector, &dt); 1407 1408 if (dt.size < index * 8 + 7) 1409 return emulate_gp(ctxt, selector & 0xfffc); 1410 1411 addr = dt.address + index * 8; 1412 return ctxt->ops->write_std(ctxt, addr, desc, sizeof *desc, 1413 &ctxt->exception); 1414 } 1415 1416 /* Does not support long mode */ 1417 static int __load_segment_descriptor(struct x86_emulate_ctxt *ctxt, 1418 u16 selector, int seg, u8 cpl, bool in_task_switch) 1419 { 1420 struct desc_struct seg_desc, old_desc; 1421 u8 dpl, rpl; 1422 unsigned err_vec = GP_VECTOR; 1423 u32 err_code = 0; 1424 bool null_selector = !(selector & ~0x3); /* 0000-0003 are null */ 1425 ulong desc_addr; 1426 int ret; 1427 u16 dummy; 1428 u32 base3 = 0; 1429 1430 memset(&seg_desc, 0, sizeof seg_desc); 1431 1432 if (ctxt->mode == X86EMUL_MODE_REAL) { 1433 /* set real mode segment descriptor (keep limit etc. for 1434 * unreal mode) */ 1435 ctxt->ops->get_segment(ctxt, &dummy, &seg_desc, NULL, seg); 1436 set_desc_base(&seg_desc, selector << 4); 1437 goto load; 1438 } else if (seg <= VCPU_SREG_GS && ctxt->mode == X86EMUL_MODE_VM86) { 1439 /* VM86 needs a clean new segment descriptor */ 1440 set_desc_base(&seg_desc, selector << 4); 1441 set_desc_limit(&seg_desc, 0xffff); 1442 seg_desc.type = 3; 1443 seg_desc.p = 1; 1444 seg_desc.s = 1; 1445 seg_desc.dpl = 3; 1446 goto load; 1447 } 1448 1449 rpl = selector & 3; 1450 1451 /* NULL selector is not valid for TR, CS and SS (except for long mode) */ 1452 if ((seg == VCPU_SREG_CS 1453 || (seg == VCPU_SREG_SS 1454 && (ctxt->mode != X86EMUL_MODE_PROT64 || rpl != cpl)) 1455 || seg == VCPU_SREG_TR) 1456 && null_selector) 1457 goto exception; 1458 1459 /* TR should be in GDT only */ 1460 if (seg == VCPU_SREG_TR && (selector & (1 << 2))) 1461 goto exception; 1462 1463 if (null_selector) /* for NULL selector skip all following checks */ 1464 goto load; 1465 1466 ret = read_segment_descriptor(ctxt, selector, &seg_desc, &desc_addr); 1467 if (ret != X86EMUL_CONTINUE) 1468 return ret; 1469 1470 err_code = selector & 0xfffc; 1471 err_vec = GP_VECTOR; 1472 1473 /* can't load system descriptor into segment selector */ 1474 if (seg <= VCPU_SREG_GS && !seg_desc.s) 1475 goto exception; 1476 1477 if (!seg_desc.p) { 1478 err_vec = (seg == VCPU_SREG_SS) ? SS_VECTOR : NP_VECTOR; 1479 goto exception; 1480 } 1481 1482 dpl = seg_desc.dpl; 1483 1484 switch (seg) { 1485 case VCPU_SREG_SS: 1486 /* 1487 * segment is not a writable data segment or segment 1488 * selector's RPL != CPL or segment selector's RPL != CPL 1489 */ 1490 if (rpl != cpl || (seg_desc.type & 0xa) != 0x2 || dpl != cpl) 1491 goto exception; 1492 break; 1493 case VCPU_SREG_CS: 1494 if (in_task_switch && rpl != dpl) 1495 goto exception; 1496 1497 if (!(seg_desc.type & 8)) 1498 goto exception; 1499 1500 if (seg_desc.type & 4) { 1501 /* conforming */ 1502 if (dpl > cpl) 1503 goto exception; 1504 } else { 1505 /* nonconforming */ 1506 if (rpl > cpl || dpl != cpl) 1507 goto exception; 1508 } 1509 /* CS(RPL) <- CPL */ 1510 selector = (selector & 0xfffc) | cpl; 1511 break; 1512 case VCPU_SREG_TR: 1513 if (seg_desc.s || (seg_desc.type != 1 && seg_desc.type != 9)) 1514 goto exception; 1515 old_desc = seg_desc; 1516 seg_desc.type |= 2; /* busy */ 1517 ret = ctxt->ops->cmpxchg_emulated(ctxt, desc_addr, &old_desc, &seg_desc, 1518 sizeof(seg_desc), &ctxt->exception); 1519 if (ret != X86EMUL_CONTINUE) 1520 return ret; 1521 break; 1522 case VCPU_SREG_LDTR: 1523 if (seg_desc.s || seg_desc.type != 2) 1524 goto exception; 1525 break; 1526 default: /* DS, ES, FS, or GS */ 1527 /* 1528 * segment is not a data or readable code segment or 1529 * ((segment is a data or nonconforming code segment) 1530 * and (both RPL and CPL > DPL)) 1531 */ 1532 if ((seg_desc.type & 0xa) == 0x8 || 1533 (((seg_desc.type & 0xc) != 0xc) && 1534 (rpl > dpl && cpl > dpl))) 1535 goto exception; 1536 break; 1537 } 1538 1539 if (seg_desc.s) { 1540 /* mark segment as accessed */ 1541 seg_desc.type |= 1; 1542 ret = write_segment_descriptor(ctxt, selector, &seg_desc); 1543 if (ret != X86EMUL_CONTINUE) 1544 return ret; 1545 } else if (ctxt->mode == X86EMUL_MODE_PROT64) { 1546 ret = ctxt->ops->read_std(ctxt, desc_addr+8, &base3, 1547 sizeof(base3), &ctxt->exception); 1548 if (ret != X86EMUL_CONTINUE) 1549 return ret; 1550 } 1551 load: 1552 ctxt->ops->set_segment(ctxt, selector, &seg_desc, base3, seg); 1553 return X86EMUL_CONTINUE; 1554 exception: 1555 emulate_exception(ctxt, err_vec, err_code, true); 1556 return X86EMUL_PROPAGATE_FAULT; 1557 } 1558 1559 static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt, 1560 u16 selector, int seg) 1561 { 1562 u8 cpl = ctxt->ops->cpl(ctxt); 1563 return __load_segment_descriptor(ctxt, selector, seg, cpl, false); 1564 } 1565 1566 static void write_register_operand(struct operand *op) 1567 { 1568 /* The 4-byte case *is* correct: in 64-bit mode we zero-extend. */ 1569 switch (op->bytes) { 1570 case 1: 1571 *(u8 *)op->addr.reg = (u8)op->val; 1572 break; 1573 case 2: 1574 *(u16 *)op->addr.reg = (u16)op->val; 1575 break; 1576 case 4: 1577 *op->addr.reg = (u32)op->val; 1578 break; /* 64b: zero-extend */ 1579 case 8: 1580 *op->addr.reg = op->val; 1581 break; 1582 } 1583 } 1584 1585 static int writeback(struct x86_emulate_ctxt *ctxt, struct operand *op) 1586 { 1587 switch (op->type) { 1588 case OP_REG: 1589 write_register_operand(op); 1590 break; 1591 case OP_MEM: 1592 if (ctxt->lock_prefix) 1593 return segmented_cmpxchg(ctxt, 1594 op->addr.mem, 1595 &op->orig_val, 1596 &op->val, 1597 op->bytes); 1598 else 1599 return segmented_write(ctxt, 1600 op->addr.mem, 1601 &op->val, 1602 op->bytes); 1603 break; 1604 case OP_MEM_STR: 1605 return segmented_write(ctxt, 1606 op->addr.mem, 1607 op->data, 1608 op->bytes * op->count); 1609 break; 1610 case OP_XMM: 1611 write_sse_reg(ctxt, &op->vec_val, op->addr.xmm); 1612 break; 1613 case OP_MM: 1614 write_mmx_reg(ctxt, &op->mm_val, op->addr.mm); 1615 break; 1616 case OP_NONE: 1617 /* no writeback */ 1618 break; 1619 default: 1620 break; 1621 } 1622 return X86EMUL_CONTINUE; 1623 } 1624 1625 static int push(struct x86_emulate_ctxt *ctxt, void *data, int bytes) 1626 { 1627 struct segmented_address addr; 1628 1629 rsp_increment(ctxt, -bytes); 1630 addr.ea = reg_read(ctxt, VCPU_REGS_RSP) & stack_mask(ctxt); 1631 addr.seg = VCPU_SREG_SS; 1632 1633 return segmented_write(ctxt, addr, data, bytes); 1634 } 1635 1636 static int em_push(struct x86_emulate_ctxt *ctxt) 1637 { 1638 /* Disable writeback. */ 1639 ctxt->dst.type = OP_NONE; 1640 return push(ctxt, &ctxt->src.val, ctxt->op_bytes); 1641 } 1642 1643 static int emulate_pop(struct x86_emulate_ctxt *ctxt, 1644 void *dest, int len) 1645 { 1646 int rc; 1647 struct segmented_address addr; 1648 1649 addr.ea = reg_read(ctxt, VCPU_REGS_RSP) & stack_mask(ctxt); 1650 addr.seg = VCPU_SREG_SS; 1651 rc = segmented_read(ctxt, addr, dest, len); 1652 if (rc != X86EMUL_CONTINUE) 1653 return rc; 1654 1655 rsp_increment(ctxt, len); 1656 return rc; 1657 } 1658 1659 static int em_pop(struct x86_emulate_ctxt *ctxt) 1660 { 1661 return emulate_pop(ctxt, &ctxt->dst.val, ctxt->op_bytes); 1662 } 1663 1664 static int emulate_popf(struct x86_emulate_ctxt *ctxt, 1665 void *dest, int len) 1666 { 1667 int rc; 1668 unsigned long val, change_mask; 1669 int iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> IOPL_SHIFT; 1670 int cpl = ctxt->ops->cpl(ctxt); 1671 1672 rc = emulate_pop(ctxt, &val, len); 1673 if (rc != X86EMUL_CONTINUE) 1674 return rc; 1675 1676 change_mask = EFLG_CF | EFLG_PF | EFLG_AF | EFLG_ZF | EFLG_SF | EFLG_OF 1677 | EFLG_TF | EFLG_DF | EFLG_NT | EFLG_AC | EFLG_ID; 1678 1679 switch(ctxt->mode) { 1680 case X86EMUL_MODE_PROT64: 1681 case X86EMUL_MODE_PROT32: 1682 case X86EMUL_MODE_PROT16: 1683 if (cpl == 0) 1684 change_mask |= EFLG_IOPL; 1685 if (cpl <= iopl) 1686 change_mask |= EFLG_IF; 1687 break; 1688 case X86EMUL_MODE_VM86: 1689 if (iopl < 3) 1690 return emulate_gp(ctxt, 0); 1691 change_mask |= EFLG_IF; 1692 break; 1693 default: /* real mode */ 1694 change_mask |= (EFLG_IOPL | EFLG_IF); 1695 break; 1696 } 1697 1698 *(unsigned long *)dest = 1699 (ctxt->eflags & ~change_mask) | (val & change_mask); 1700 1701 return rc; 1702 } 1703 1704 static int em_popf(struct x86_emulate_ctxt *ctxt) 1705 { 1706 ctxt->dst.type = OP_REG; 1707 ctxt->dst.addr.reg = &ctxt->eflags; 1708 ctxt->dst.bytes = ctxt->op_bytes; 1709 return emulate_popf(ctxt, &ctxt->dst.val, ctxt->op_bytes); 1710 } 1711 1712 static int em_enter(struct x86_emulate_ctxt *ctxt) 1713 { 1714 int rc; 1715 unsigned frame_size = ctxt->src.val; 1716 unsigned nesting_level = ctxt->src2.val & 31; 1717 ulong rbp; 1718 1719 if (nesting_level) 1720 return X86EMUL_UNHANDLEABLE; 1721 1722 rbp = reg_read(ctxt, VCPU_REGS_RBP); 1723 rc = push(ctxt, &rbp, stack_size(ctxt)); 1724 if (rc != X86EMUL_CONTINUE) 1725 return rc; 1726 assign_masked(reg_rmw(ctxt, VCPU_REGS_RBP), reg_read(ctxt, VCPU_REGS_RSP), 1727 stack_mask(ctxt)); 1728 assign_masked(reg_rmw(ctxt, VCPU_REGS_RSP), 1729 reg_read(ctxt, VCPU_REGS_RSP) - frame_size, 1730 stack_mask(ctxt)); 1731 return X86EMUL_CONTINUE; 1732 } 1733 1734 static int em_leave(struct x86_emulate_ctxt *ctxt) 1735 { 1736 assign_masked(reg_rmw(ctxt, VCPU_REGS_RSP), reg_read(ctxt, VCPU_REGS_RBP), 1737 stack_mask(ctxt)); 1738 return emulate_pop(ctxt, reg_rmw(ctxt, VCPU_REGS_RBP), ctxt->op_bytes); 1739 } 1740 1741 static int em_push_sreg(struct x86_emulate_ctxt *ctxt) 1742 { 1743 int seg = ctxt->src2.val; 1744 1745 ctxt->src.val = get_segment_selector(ctxt, seg); 1746 1747 return em_push(ctxt); 1748 } 1749 1750 static int em_pop_sreg(struct x86_emulate_ctxt *ctxt) 1751 { 1752 int seg = ctxt->src2.val; 1753 unsigned long selector; 1754 int rc; 1755 1756 rc = emulate_pop(ctxt, &selector, ctxt->op_bytes); 1757 if (rc != X86EMUL_CONTINUE) 1758 return rc; 1759 1760 if (ctxt->modrm_reg == VCPU_SREG_SS) 1761 ctxt->interruptibility = KVM_X86_SHADOW_INT_MOV_SS; 1762 1763 rc = load_segment_descriptor(ctxt, (u16)selector, seg); 1764 return rc; 1765 } 1766 1767 static int em_pusha(struct x86_emulate_ctxt *ctxt) 1768 { 1769 unsigned long old_esp = reg_read(ctxt, VCPU_REGS_RSP); 1770 int rc = X86EMUL_CONTINUE; 1771 int reg = VCPU_REGS_RAX; 1772 1773 while (reg <= VCPU_REGS_RDI) { 1774 (reg == VCPU_REGS_RSP) ? 1775 (ctxt->src.val = old_esp) : (ctxt->src.val = reg_read(ctxt, reg)); 1776 1777 rc = em_push(ctxt); 1778 if (rc != X86EMUL_CONTINUE) 1779 return rc; 1780 1781 ++reg; 1782 } 1783 1784 return rc; 1785 } 1786 1787 static int em_pushf(struct x86_emulate_ctxt *ctxt) 1788 { 1789 ctxt->src.val = (unsigned long)ctxt->eflags; 1790 return em_push(ctxt); 1791 } 1792 1793 static int em_popa(struct x86_emulate_ctxt *ctxt) 1794 { 1795 int rc = X86EMUL_CONTINUE; 1796 int reg = VCPU_REGS_RDI; 1797 1798 while (reg >= VCPU_REGS_RAX) { 1799 if (reg == VCPU_REGS_RSP) { 1800 rsp_increment(ctxt, ctxt->op_bytes); 1801 --reg; 1802 } 1803 1804 rc = emulate_pop(ctxt, reg_rmw(ctxt, reg), ctxt->op_bytes); 1805 if (rc != X86EMUL_CONTINUE) 1806 break; 1807 --reg; 1808 } 1809 return rc; 1810 } 1811 1812 static int __emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq) 1813 { 1814 const struct x86_emulate_ops *ops = ctxt->ops; 1815 int rc; 1816 struct desc_ptr dt; 1817 gva_t cs_addr; 1818 gva_t eip_addr; 1819 u16 cs, eip; 1820 1821 /* TODO: Add limit checks */ 1822 ctxt->src.val = ctxt->eflags; 1823 rc = em_push(ctxt); 1824 if (rc != X86EMUL_CONTINUE) 1825 return rc; 1826 1827 ctxt->eflags &= ~(EFLG_IF | EFLG_TF | EFLG_AC); 1828 1829 ctxt->src.val = get_segment_selector(ctxt, VCPU_SREG_CS); 1830 rc = em_push(ctxt); 1831 if (rc != X86EMUL_CONTINUE) 1832 return rc; 1833 1834 ctxt->src.val = ctxt->_eip; 1835 rc = em_push(ctxt); 1836 if (rc != X86EMUL_CONTINUE) 1837 return rc; 1838 1839 ops->get_idt(ctxt, &dt); 1840 1841 eip_addr = dt.address + (irq << 2); 1842 cs_addr = dt.address + (irq << 2) + 2; 1843 1844 rc = ops->read_std(ctxt, cs_addr, &cs, 2, &ctxt->exception); 1845 if (rc != X86EMUL_CONTINUE) 1846 return rc; 1847 1848 rc = ops->read_std(ctxt, eip_addr, &eip, 2, &ctxt->exception); 1849 if (rc != X86EMUL_CONTINUE) 1850 return rc; 1851 1852 rc = load_segment_descriptor(ctxt, cs, VCPU_SREG_CS); 1853 if (rc != X86EMUL_CONTINUE) 1854 return rc; 1855 1856 ctxt->_eip = eip; 1857 1858 return rc; 1859 } 1860 1861 int emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq) 1862 { 1863 int rc; 1864 1865 invalidate_registers(ctxt); 1866 rc = __emulate_int_real(ctxt, irq); 1867 if (rc == X86EMUL_CONTINUE) 1868 writeback_registers(ctxt); 1869 return rc; 1870 } 1871 1872 static int emulate_int(struct x86_emulate_ctxt *ctxt, int irq) 1873 { 1874 switch(ctxt->mode) { 1875 case X86EMUL_MODE_REAL: 1876 return __emulate_int_real(ctxt, irq); 1877 case X86EMUL_MODE_VM86: 1878 case X86EMUL_MODE_PROT16: 1879 case X86EMUL_MODE_PROT32: 1880 case X86EMUL_MODE_PROT64: 1881 default: 1882 /* Protected mode interrupts unimplemented yet */ 1883 return X86EMUL_UNHANDLEABLE; 1884 } 1885 } 1886 1887 static int emulate_iret_real(struct x86_emulate_ctxt *ctxt) 1888 { 1889 int rc = X86EMUL_CONTINUE; 1890 unsigned long temp_eip = 0; 1891 unsigned long temp_eflags = 0; 1892 unsigned long cs = 0; 1893 unsigned long mask = EFLG_CF | EFLG_PF | EFLG_AF | EFLG_ZF | EFLG_SF | EFLG_TF | 1894 EFLG_IF | EFLG_DF | EFLG_OF | EFLG_IOPL | EFLG_NT | EFLG_RF | 1895 EFLG_AC | EFLG_ID | (1 << 1); /* Last one is the reserved bit */ 1896 unsigned long vm86_mask = EFLG_VM | EFLG_VIF | EFLG_VIP; 1897 1898 /* TODO: Add stack limit check */ 1899 1900 rc = emulate_pop(ctxt, &temp_eip, ctxt->op_bytes); 1901 1902 if (rc != X86EMUL_CONTINUE) 1903 return rc; 1904 1905 if (temp_eip & ~0xffff) 1906 return emulate_gp(ctxt, 0); 1907 1908 rc = emulate_pop(ctxt, &cs, ctxt->op_bytes); 1909 1910 if (rc != X86EMUL_CONTINUE) 1911 return rc; 1912 1913 rc = emulate_pop(ctxt, &temp_eflags, ctxt->op_bytes); 1914 1915 if (rc != X86EMUL_CONTINUE) 1916 return rc; 1917 1918 rc = load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS); 1919 1920 if (rc != X86EMUL_CONTINUE) 1921 return rc; 1922 1923 ctxt->_eip = temp_eip; 1924 1925 1926 if (ctxt->op_bytes == 4) 1927 ctxt->eflags = ((temp_eflags & mask) | (ctxt->eflags & vm86_mask)); 1928 else if (ctxt->op_bytes == 2) { 1929 ctxt->eflags &= ~0xffff; 1930 ctxt->eflags |= temp_eflags; 1931 } 1932 1933 ctxt->eflags &= ~EFLG_RESERVED_ZEROS_MASK; /* Clear reserved zeros */ 1934 ctxt->eflags |= EFLG_RESERVED_ONE_MASK; 1935 1936 return rc; 1937 } 1938 1939 static int em_iret(struct x86_emulate_ctxt *ctxt) 1940 { 1941 switch(ctxt->mode) { 1942 case X86EMUL_MODE_REAL: 1943 return emulate_iret_real(ctxt); 1944 case X86EMUL_MODE_VM86: 1945 case X86EMUL_MODE_PROT16: 1946 case X86EMUL_MODE_PROT32: 1947 case X86EMUL_MODE_PROT64: 1948 default: 1949 /* iret from protected mode unimplemented yet */ 1950 return X86EMUL_UNHANDLEABLE; 1951 } 1952 } 1953 1954 static int em_jmp_far(struct x86_emulate_ctxt *ctxt) 1955 { 1956 int rc; 1957 unsigned short sel; 1958 1959 memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2); 1960 1961 rc = load_segment_descriptor(ctxt, sel, VCPU_SREG_CS); 1962 if (rc != X86EMUL_CONTINUE) 1963 return rc; 1964 1965 ctxt->_eip = 0; 1966 memcpy(&ctxt->_eip, ctxt->src.valptr, ctxt->op_bytes); 1967 return X86EMUL_CONTINUE; 1968 } 1969 1970 static int em_grp45(struct x86_emulate_ctxt *ctxt) 1971 { 1972 int rc = X86EMUL_CONTINUE; 1973 1974 switch (ctxt->modrm_reg) { 1975 case 2: /* call near abs */ { 1976 long int old_eip; 1977 old_eip = ctxt->_eip; 1978 ctxt->_eip = ctxt->src.val; 1979 ctxt->src.val = old_eip; 1980 rc = em_push(ctxt); 1981 break; 1982 } 1983 case 4: /* jmp abs */ 1984 ctxt->_eip = ctxt->src.val; 1985 break; 1986 case 5: /* jmp far */ 1987 rc = em_jmp_far(ctxt); 1988 break; 1989 case 6: /* push */ 1990 rc = em_push(ctxt); 1991 break; 1992 } 1993 return rc; 1994 } 1995 1996 static int em_cmpxchg8b(struct x86_emulate_ctxt *ctxt) 1997 { 1998 u64 old = ctxt->dst.orig_val64; 1999 2000 if (ctxt->dst.bytes == 16) 2001 return X86EMUL_UNHANDLEABLE; 2002 2003 if (((u32) (old >> 0) != (u32) reg_read(ctxt, VCPU_REGS_RAX)) || 2004 ((u32) (old >> 32) != (u32) reg_read(ctxt, VCPU_REGS_RDX))) { 2005 *reg_write(ctxt, VCPU_REGS_RAX) = (u32) (old >> 0); 2006 *reg_write(ctxt, VCPU_REGS_RDX) = (u32) (old >> 32); 2007 ctxt->eflags &= ~EFLG_ZF; 2008 } else { 2009 ctxt->dst.val64 = ((u64)reg_read(ctxt, VCPU_REGS_RCX) << 32) | 2010 (u32) reg_read(ctxt, VCPU_REGS_RBX); 2011 2012 ctxt->eflags |= EFLG_ZF; 2013 } 2014 return X86EMUL_CONTINUE; 2015 } 2016 2017 static int em_ret(struct x86_emulate_ctxt *ctxt) 2018 { 2019 ctxt->dst.type = OP_REG; 2020 ctxt->dst.addr.reg = &ctxt->_eip; 2021 ctxt->dst.bytes = ctxt->op_bytes; 2022 return em_pop(ctxt); 2023 } 2024 2025 static int em_ret_far(struct x86_emulate_ctxt *ctxt) 2026 { 2027 int rc; 2028 unsigned long cs; 2029 int cpl = ctxt->ops->cpl(ctxt); 2030 2031 rc = emulate_pop(ctxt, &ctxt->_eip, ctxt->op_bytes); 2032 if (rc != X86EMUL_CONTINUE) 2033 return rc; 2034 if (ctxt->op_bytes == 4) 2035 ctxt->_eip = (u32)ctxt->_eip; 2036 rc = emulate_pop(ctxt, &cs, ctxt->op_bytes); 2037 if (rc != X86EMUL_CONTINUE) 2038 return rc; 2039 /* Outer-privilege level return is not implemented */ 2040 if (ctxt->mode >= X86EMUL_MODE_PROT16 && (cs & 3) > cpl) 2041 return X86EMUL_UNHANDLEABLE; 2042 rc = load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS); 2043 return rc; 2044 } 2045 2046 static int em_ret_far_imm(struct x86_emulate_ctxt *ctxt) 2047 { 2048 int rc; 2049 2050 rc = em_ret_far(ctxt); 2051 if (rc != X86EMUL_CONTINUE) 2052 return rc; 2053 rsp_increment(ctxt, ctxt->src.val); 2054 return X86EMUL_CONTINUE; 2055 } 2056 2057 static int em_cmpxchg(struct x86_emulate_ctxt *ctxt) 2058 { 2059 /* Save real source value, then compare EAX against destination. */ 2060 ctxt->dst.orig_val = ctxt->dst.val; 2061 ctxt->dst.val = reg_read(ctxt, VCPU_REGS_RAX); 2062 ctxt->src.orig_val = ctxt->src.val; 2063 ctxt->src.val = ctxt->dst.orig_val; 2064 fastop(ctxt, em_cmp); 2065 2066 if (ctxt->eflags & EFLG_ZF) { 2067 /* Success: write back to memory. */ 2068 ctxt->dst.val = ctxt->src.orig_val; 2069 } else { 2070 /* Failure: write the value we saw to EAX. */ 2071 ctxt->dst.type = OP_REG; 2072 ctxt->dst.addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX); 2073 ctxt->dst.val = ctxt->dst.orig_val; 2074 } 2075 return X86EMUL_CONTINUE; 2076 } 2077 2078 static int em_lseg(struct x86_emulate_ctxt *ctxt) 2079 { 2080 int seg = ctxt->src2.val; 2081 unsigned short sel; 2082 int rc; 2083 2084 memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2); 2085 2086 rc = load_segment_descriptor(ctxt, sel, seg); 2087 if (rc != X86EMUL_CONTINUE) 2088 return rc; 2089 2090 ctxt->dst.val = ctxt->src.val; 2091 return rc; 2092 } 2093 2094 static void 2095 setup_syscalls_segments(struct x86_emulate_ctxt *ctxt, 2096 struct desc_struct *cs, struct desc_struct *ss) 2097 { 2098 cs->l = 0; /* will be adjusted later */ 2099 set_desc_base(cs, 0); /* flat segment */ 2100 cs->g = 1; /* 4kb granularity */ 2101 set_desc_limit(cs, 0xfffff); /* 4GB limit */ 2102 cs->type = 0x0b; /* Read, Execute, Accessed */ 2103 cs->s = 1; 2104 cs->dpl = 0; /* will be adjusted later */ 2105 cs->p = 1; 2106 cs->d = 1; 2107 cs->avl = 0; 2108 2109 set_desc_base(ss, 0); /* flat segment */ 2110 set_desc_limit(ss, 0xfffff); /* 4GB limit */ 2111 ss->g = 1; /* 4kb granularity */ 2112 ss->s = 1; 2113 ss->type = 0x03; /* Read/Write, Accessed */ 2114 ss->d = 1; /* 32bit stack segment */ 2115 ss->dpl = 0; 2116 ss->p = 1; 2117 ss->l = 0; 2118 ss->avl = 0; 2119 } 2120 2121 static bool vendor_intel(struct x86_emulate_ctxt *ctxt) 2122 { 2123 u32 eax, ebx, ecx, edx; 2124 2125 eax = ecx = 0; 2126 ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx); 2127 return ebx == X86EMUL_CPUID_VENDOR_GenuineIntel_ebx 2128 && ecx == X86EMUL_CPUID_VENDOR_GenuineIntel_ecx 2129 && edx == X86EMUL_CPUID_VENDOR_GenuineIntel_edx; 2130 } 2131 2132 static bool em_syscall_is_enabled(struct x86_emulate_ctxt *ctxt) 2133 { 2134 const struct x86_emulate_ops *ops = ctxt->ops; 2135 u32 eax, ebx, ecx, edx; 2136 2137 /* 2138 * syscall should always be enabled in longmode - so only become 2139 * vendor specific (cpuid) if other modes are active... 2140 */ 2141 if (ctxt->mode == X86EMUL_MODE_PROT64) 2142 return true; 2143 2144 eax = 0x00000000; 2145 ecx = 0x00000000; 2146 ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx); 2147 /* 2148 * Intel ("GenuineIntel") 2149 * remark: Intel CPUs only support "syscall" in 64bit 2150 * longmode. Also an 64bit guest with a 2151 * 32bit compat-app running will #UD !! While this 2152 * behaviour can be fixed (by emulating) into AMD 2153 * response - CPUs of AMD can't behave like Intel. 2154 */ 2155 if (ebx == X86EMUL_CPUID_VENDOR_GenuineIntel_ebx && 2156 ecx == X86EMUL_CPUID_VENDOR_GenuineIntel_ecx && 2157 edx == X86EMUL_CPUID_VENDOR_GenuineIntel_edx) 2158 return false; 2159 2160 /* AMD ("AuthenticAMD") */ 2161 if (ebx == X86EMUL_CPUID_VENDOR_AuthenticAMD_ebx && 2162 ecx == X86EMUL_CPUID_VENDOR_AuthenticAMD_ecx && 2163 edx == X86EMUL_CPUID_VENDOR_AuthenticAMD_edx) 2164 return true; 2165 2166 /* AMD ("AMDisbetter!") */ 2167 if (ebx == X86EMUL_CPUID_VENDOR_AMDisbetterI_ebx && 2168 ecx == X86EMUL_CPUID_VENDOR_AMDisbetterI_ecx && 2169 edx == X86EMUL_CPUID_VENDOR_AMDisbetterI_edx) 2170 return true; 2171 2172 /* default: (not Intel, not AMD), apply Intel's stricter rules... */ 2173 return false; 2174 } 2175 2176 static int em_syscall(struct x86_emulate_ctxt *ctxt) 2177 { 2178 const struct x86_emulate_ops *ops = ctxt->ops; 2179 struct desc_struct cs, ss; 2180 u64 msr_data; 2181 u16 cs_sel, ss_sel; 2182 u64 efer = 0; 2183 2184 /* syscall is not available in real mode */ 2185 if (ctxt->mode == X86EMUL_MODE_REAL || 2186 ctxt->mode == X86EMUL_MODE_VM86) 2187 return emulate_ud(ctxt); 2188 2189 if (!(em_syscall_is_enabled(ctxt))) 2190 return emulate_ud(ctxt); 2191 2192 ops->get_msr(ctxt, MSR_EFER, &efer); 2193 setup_syscalls_segments(ctxt, &cs, &ss); 2194 2195 if (!(efer & EFER_SCE)) 2196 return emulate_ud(ctxt); 2197 2198 ops->get_msr(ctxt, MSR_STAR, &msr_data); 2199 msr_data >>= 32; 2200 cs_sel = (u16)(msr_data & 0xfffc); 2201 ss_sel = (u16)(msr_data + 8); 2202 2203 if (efer & EFER_LMA) { 2204 cs.d = 0; 2205 cs.l = 1; 2206 } 2207 ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS); 2208 ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS); 2209 2210 *reg_write(ctxt, VCPU_REGS_RCX) = ctxt->_eip; 2211 if (efer & EFER_LMA) { 2212 #ifdef CONFIG_X86_64 2213 *reg_write(ctxt, VCPU_REGS_R11) = ctxt->eflags; 2214 2215 ops->get_msr(ctxt, 2216 ctxt->mode == X86EMUL_MODE_PROT64 ? 2217 MSR_LSTAR : MSR_CSTAR, &msr_data); 2218 ctxt->_eip = msr_data; 2219 2220 ops->get_msr(ctxt, MSR_SYSCALL_MASK, &msr_data); 2221 ctxt->eflags &= ~msr_data; 2222 #endif 2223 } else { 2224 /* legacy mode */ 2225 ops->get_msr(ctxt, MSR_STAR, &msr_data); 2226 ctxt->_eip = (u32)msr_data; 2227 2228 ctxt->eflags &= ~(EFLG_VM | EFLG_IF); 2229 } 2230 2231 return X86EMUL_CONTINUE; 2232 } 2233 2234 static int em_sysenter(struct x86_emulate_ctxt *ctxt) 2235 { 2236 const struct x86_emulate_ops *ops = ctxt->ops; 2237 struct desc_struct cs, ss; 2238 u64 msr_data; 2239 u16 cs_sel, ss_sel; 2240 u64 efer = 0; 2241 2242 ops->get_msr(ctxt, MSR_EFER, &efer); 2243 /* inject #GP if in real mode */ 2244 if (ctxt->mode == X86EMUL_MODE_REAL) 2245 return emulate_gp(ctxt, 0); 2246 2247 /* 2248 * Not recognized on AMD in compat mode (but is recognized in legacy 2249 * mode). 2250 */ 2251 if ((ctxt->mode == X86EMUL_MODE_PROT32) && (efer & EFER_LMA) 2252 && !vendor_intel(ctxt)) 2253 return emulate_ud(ctxt); 2254 2255 /* XXX sysenter/sysexit have not been tested in 64bit mode. 2256 * Therefore, we inject an #UD. 2257 */ 2258 if (ctxt->mode == X86EMUL_MODE_PROT64) 2259 return emulate_ud(ctxt); 2260 2261 setup_syscalls_segments(ctxt, &cs, &ss); 2262 2263 ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data); 2264 switch (ctxt->mode) { 2265 case X86EMUL_MODE_PROT32: 2266 if ((msr_data & 0xfffc) == 0x0) 2267 return emulate_gp(ctxt, 0); 2268 break; 2269 case X86EMUL_MODE_PROT64: 2270 if (msr_data == 0x0) 2271 return emulate_gp(ctxt, 0); 2272 break; 2273 default: 2274 break; 2275 } 2276 2277 ctxt->eflags &= ~(EFLG_VM | EFLG_IF); 2278 cs_sel = (u16)msr_data; 2279 cs_sel &= ~SELECTOR_RPL_MASK; 2280 ss_sel = cs_sel + 8; 2281 ss_sel &= ~SELECTOR_RPL_MASK; 2282 if (ctxt->mode == X86EMUL_MODE_PROT64 || (efer & EFER_LMA)) { 2283 cs.d = 0; 2284 cs.l = 1; 2285 } 2286 2287 ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS); 2288 ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS); 2289 2290 ops->get_msr(ctxt, MSR_IA32_SYSENTER_EIP, &msr_data); 2291 ctxt->_eip = msr_data; 2292 2293 ops->get_msr(ctxt, MSR_IA32_SYSENTER_ESP, &msr_data); 2294 *reg_write(ctxt, VCPU_REGS_RSP) = msr_data; 2295 2296 return X86EMUL_CONTINUE; 2297 } 2298 2299 static int em_sysexit(struct x86_emulate_ctxt *ctxt) 2300 { 2301 const struct x86_emulate_ops *ops = ctxt->ops; 2302 struct desc_struct cs, ss; 2303 u64 msr_data; 2304 int usermode; 2305 u16 cs_sel = 0, ss_sel = 0; 2306 2307 /* inject #GP if in real mode or Virtual 8086 mode */ 2308 if (ctxt->mode == X86EMUL_MODE_REAL || 2309 ctxt->mode == X86EMUL_MODE_VM86) 2310 return emulate_gp(ctxt, 0); 2311 2312 setup_syscalls_segments(ctxt, &cs, &ss); 2313 2314 if ((ctxt->rex_prefix & 0x8) != 0x0) 2315 usermode = X86EMUL_MODE_PROT64; 2316 else 2317 usermode = X86EMUL_MODE_PROT32; 2318 2319 cs.dpl = 3; 2320 ss.dpl = 3; 2321 ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data); 2322 switch (usermode) { 2323 case X86EMUL_MODE_PROT32: 2324 cs_sel = (u16)(msr_data + 16); 2325 if ((msr_data & 0xfffc) == 0x0) 2326 return emulate_gp(ctxt, 0); 2327 ss_sel = (u16)(msr_data + 24); 2328 break; 2329 case X86EMUL_MODE_PROT64: 2330 cs_sel = (u16)(msr_data + 32); 2331 if (msr_data == 0x0) 2332 return emulate_gp(ctxt, 0); 2333 ss_sel = cs_sel + 8; 2334 cs.d = 0; 2335 cs.l = 1; 2336 break; 2337 } 2338 cs_sel |= SELECTOR_RPL_MASK; 2339 ss_sel |= SELECTOR_RPL_MASK; 2340 2341 ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS); 2342 ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS); 2343 2344 ctxt->_eip = reg_read(ctxt, VCPU_REGS_RDX); 2345 *reg_write(ctxt, VCPU_REGS_RSP) = reg_read(ctxt, VCPU_REGS_RCX); 2346 2347 return X86EMUL_CONTINUE; 2348 } 2349 2350 static bool emulator_bad_iopl(struct x86_emulate_ctxt *ctxt) 2351 { 2352 int iopl; 2353 if (ctxt->mode == X86EMUL_MODE_REAL) 2354 return false; 2355 if (ctxt->mode == X86EMUL_MODE_VM86) 2356 return true; 2357 iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> IOPL_SHIFT; 2358 return ctxt->ops->cpl(ctxt) > iopl; 2359 } 2360 2361 static bool emulator_io_port_access_allowed(struct x86_emulate_ctxt *ctxt, 2362 u16 port, u16 len) 2363 { 2364 const struct x86_emulate_ops *ops = ctxt->ops; 2365 struct desc_struct tr_seg; 2366 u32 base3; 2367 int r; 2368 u16 tr, io_bitmap_ptr, perm, bit_idx = port & 0x7; 2369 unsigned mask = (1 << len) - 1; 2370 unsigned long base; 2371 2372 ops->get_segment(ctxt, &tr, &tr_seg, &base3, VCPU_SREG_TR); 2373 if (!tr_seg.p) 2374 return false; 2375 if (desc_limit_scaled(&tr_seg) < 103) 2376 return false; 2377 base = get_desc_base(&tr_seg); 2378 #ifdef CONFIG_X86_64 2379 base |= ((u64)base3) << 32; 2380 #endif 2381 r = ops->read_std(ctxt, base + 102, &io_bitmap_ptr, 2, NULL); 2382 if (r != X86EMUL_CONTINUE) 2383 return false; 2384 if (io_bitmap_ptr + port/8 > desc_limit_scaled(&tr_seg)) 2385 return false; 2386 r = ops->read_std(ctxt, base + io_bitmap_ptr + port/8, &perm, 2, NULL); 2387 if (r != X86EMUL_CONTINUE) 2388 return false; 2389 if ((perm >> bit_idx) & mask) 2390 return false; 2391 return true; 2392 } 2393 2394 static bool emulator_io_permited(struct x86_emulate_ctxt *ctxt, 2395 u16 port, u16 len) 2396 { 2397 if (ctxt->perm_ok) 2398 return true; 2399 2400 if (emulator_bad_iopl(ctxt)) 2401 if (!emulator_io_port_access_allowed(ctxt, port, len)) 2402 return false; 2403 2404 ctxt->perm_ok = true; 2405 2406 return true; 2407 } 2408 2409 static void save_state_to_tss16(struct x86_emulate_ctxt *ctxt, 2410 struct tss_segment_16 *tss) 2411 { 2412 tss->ip = ctxt->_eip; 2413 tss->flag = ctxt->eflags; 2414 tss->ax = reg_read(ctxt, VCPU_REGS_RAX); 2415 tss->cx = reg_read(ctxt, VCPU_REGS_RCX); 2416 tss->dx = reg_read(ctxt, VCPU_REGS_RDX); 2417 tss->bx = reg_read(ctxt, VCPU_REGS_RBX); 2418 tss->sp = reg_read(ctxt, VCPU_REGS_RSP); 2419 tss->bp = reg_read(ctxt, VCPU_REGS_RBP); 2420 tss->si = reg_read(ctxt, VCPU_REGS_RSI); 2421 tss->di = reg_read(ctxt, VCPU_REGS_RDI); 2422 2423 tss->es = get_segment_selector(ctxt, VCPU_SREG_ES); 2424 tss->cs = get_segment_selector(ctxt, VCPU_SREG_CS); 2425 tss->ss = get_segment_selector(ctxt, VCPU_SREG_SS); 2426 tss->ds = get_segment_selector(ctxt, VCPU_SREG_DS); 2427 tss->ldt = get_segment_selector(ctxt, VCPU_SREG_LDTR); 2428 } 2429 2430 static int load_state_from_tss16(struct x86_emulate_ctxt *ctxt, 2431 struct tss_segment_16 *tss) 2432 { 2433 int ret; 2434 u8 cpl; 2435 2436 ctxt->_eip = tss->ip; 2437 ctxt->eflags = tss->flag | 2; 2438 *reg_write(ctxt, VCPU_REGS_RAX) = tss->ax; 2439 *reg_write(ctxt, VCPU_REGS_RCX) = tss->cx; 2440 *reg_write(ctxt, VCPU_REGS_RDX) = tss->dx; 2441 *reg_write(ctxt, VCPU_REGS_RBX) = tss->bx; 2442 *reg_write(ctxt, VCPU_REGS_RSP) = tss->sp; 2443 *reg_write(ctxt, VCPU_REGS_RBP) = tss->bp; 2444 *reg_write(ctxt, VCPU_REGS_RSI) = tss->si; 2445 *reg_write(ctxt, VCPU_REGS_RDI) = tss->di; 2446 2447 /* 2448 * SDM says that segment selectors are loaded before segment 2449 * descriptors 2450 */ 2451 set_segment_selector(ctxt, tss->ldt, VCPU_SREG_LDTR); 2452 set_segment_selector(ctxt, tss->es, VCPU_SREG_ES); 2453 set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS); 2454 set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS); 2455 set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS); 2456 2457 cpl = tss->cs & 3; 2458 2459 /* 2460 * Now load segment descriptors. If fault happens at this stage 2461 * it is handled in a context of new task 2462 */ 2463 ret = __load_segment_descriptor(ctxt, tss->ldt, VCPU_SREG_LDTR, cpl, true); 2464 if (ret != X86EMUL_CONTINUE) 2465 return ret; 2466 ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl, true); 2467 if (ret != X86EMUL_CONTINUE) 2468 return ret; 2469 ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl, true); 2470 if (ret != X86EMUL_CONTINUE) 2471 return ret; 2472 ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl, true); 2473 if (ret != X86EMUL_CONTINUE) 2474 return ret; 2475 ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl, true); 2476 if (ret != X86EMUL_CONTINUE) 2477 return ret; 2478 2479 return X86EMUL_CONTINUE; 2480 } 2481 2482 static int task_switch_16(struct x86_emulate_ctxt *ctxt, 2483 u16 tss_selector, u16 old_tss_sel, 2484 ulong old_tss_base, struct desc_struct *new_desc) 2485 { 2486 const struct x86_emulate_ops *ops = ctxt->ops; 2487 struct tss_segment_16 tss_seg; 2488 int ret; 2489 u32 new_tss_base = get_desc_base(new_desc); 2490 2491 ret = ops->read_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg, 2492 &ctxt->exception); 2493 if (ret != X86EMUL_CONTINUE) 2494 /* FIXME: need to provide precise fault address */ 2495 return ret; 2496 2497 save_state_to_tss16(ctxt, &tss_seg); 2498 2499 ret = ops->write_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg, 2500 &ctxt->exception); 2501 if (ret != X86EMUL_CONTINUE) 2502 /* FIXME: need to provide precise fault address */ 2503 return ret; 2504 2505 ret = ops->read_std(ctxt, new_tss_base, &tss_seg, sizeof tss_seg, 2506 &ctxt->exception); 2507 if (ret != X86EMUL_CONTINUE) 2508 /* FIXME: need to provide precise fault address */ 2509 return ret; 2510 2511 if (old_tss_sel != 0xffff) { 2512 tss_seg.prev_task_link = old_tss_sel; 2513 2514 ret = ops->write_std(ctxt, new_tss_base, 2515 &tss_seg.prev_task_link, 2516 sizeof tss_seg.prev_task_link, 2517 &ctxt->exception); 2518 if (ret != X86EMUL_CONTINUE) 2519 /* FIXME: need to provide precise fault address */ 2520 return ret; 2521 } 2522 2523 return load_state_from_tss16(ctxt, &tss_seg); 2524 } 2525 2526 static void save_state_to_tss32(struct x86_emulate_ctxt *ctxt, 2527 struct tss_segment_32 *tss) 2528 { 2529 /* CR3 and ldt selector are not saved intentionally */ 2530 tss->eip = ctxt->_eip; 2531 tss->eflags = ctxt->eflags; 2532 tss->eax = reg_read(ctxt, VCPU_REGS_RAX); 2533 tss->ecx = reg_read(ctxt, VCPU_REGS_RCX); 2534 tss->edx = reg_read(ctxt, VCPU_REGS_RDX); 2535 tss->ebx = reg_read(ctxt, VCPU_REGS_RBX); 2536 tss->esp = reg_read(ctxt, VCPU_REGS_RSP); 2537 tss->ebp = reg_read(ctxt, VCPU_REGS_RBP); 2538 tss->esi = reg_read(ctxt, VCPU_REGS_RSI); 2539 tss->edi = reg_read(ctxt, VCPU_REGS_RDI); 2540 2541 tss->es = get_segment_selector(ctxt, VCPU_SREG_ES); 2542 tss->cs = get_segment_selector(ctxt, VCPU_SREG_CS); 2543 tss->ss = get_segment_selector(ctxt, VCPU_SREG_SS); 2544 tss->ds = get_segment_selector(ctxt, VCPU_SREG_DS); 2545 tss->fs = get_segment_selector(ctxt, VCPU_SREG_FS); 2546 tss->gs = get_segment_selector(ctxt, VCPU_SREG_GS); 2547 } 2548 2549 static int load_state_from_tss32(struct x86_emulate_ctxt *ctxt, 2550 struct tss_segment_32 *tss) 2551 { 2552 int ret; 2553 u8 cpl; 2554 2555 if (ctxt->ops->set_cr(ctxt, 3, tss->cr3)) 2556 return emulate_gp(ctxt, 0); 2557 ctxt->_eip = tss->eip; 2558 ctxt->eflags = tss->eflags | 2; 2559 2560 /* General purpose registers */ 2561 *reg_write(ctxt, VCPU_REGS_RAX) = tss->eax; 2562 *reg_write(ctxt, VCPU_REGS_RCX) = tss->ecx; 2563 *reg_write(ctxt, VCPU_REGS_RDX) = tss->edx; 2564 *reg_write(ctxt, VCPU_REGS_RBX) = tss->ebx; 2565 *reg_write(ctxt, VCPU_REGS_RSP) = tss->esp; 2566 *reg_write(ctxt, VCPU_REGS_RBP) = tss->ebp; 2567 *reg_write(ctxt, VCPU_REGS_RSI) = tss->esi; 2568 *reg_write(ctxt, VCPU_REGS_RDI) = tss->edi; 2569 2570 /* 2571 * SDM says that segment selectors are loaded before segment 2572 * descriptors. This is important because CPL checks will 2573 * use CS.RPL. 2574 */ 2575 set_segment_selector(ctxt, tss->ldt_selector, VCPU_SREG_LDTR); 2576 set_segment_selector(ctxt, tss->es, VCPU_SREG_ES); 2577 set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS); 2578 set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS); 2579 set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS); 2580 set_segment_selector(ctxt, tss->fs, VCPU_SREG_FS); 2581 set_segment_selector(ctxt, tss->gs, VCPU_SREG_GS); 2582 2583 /* 2584 * If we're switching between Protected Mode and VM86, we need to make 2585 * sure to update the mode before loading the segment descriptors so 2586 * that the selectors are interpreted correctly. 2587 */ 2588 if (ctxt->eflags & X86_EFLAGS_VM) { 2589 ctxt->mode = X86EMUL_MODE_VM86; 2590 cpl = 3; 2591 } else { 2592 ctxt->mode = X86EMUL_MODE_PROT32; 2593 cpl = tss->cs & 3; 2594 } 2595 2596 /* 2597 * Now load segment descriptors. If fault happenes at this stage 2598 * it is handled in a context of new task 2599 */ 2600 ret = __load_segment_descriptor(ctxt, tss->ldt_selector, VCPU_SREG_LDTR, cpl, true); 2601 if (ret != X86EMUL_CONTINUE) 2602 return ret; 2603 ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl, true); 2604 if (ret != X86EMUL_CONTINUE) 2605 return ret; 2606 ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl, true); 2607 if (ret != X86EMUL_CONTINUE) 2608 return ret; 2609 ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl, true); 2610 if (ret != X86EMUL_CONTINUE) 2611 return ret; 2612 ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl, true); 2613 if (ret != X86EMUL_CONTINUE) 2614 return ret; 2615 ret = __load_segment_descriptor(ctxt, tss->fs, VCPU_SREG_FS, cpl, true); 2616 if (ret != X86EMUL_CONTINUE) 2617 return ret; 2618 ret = __load_segment_descriptor(ctxt, tss->gs, VCPU_SREG_GS, cpl, true); 2619 if (ret != X86EMUL_CONTINUE) 2620 return ret; 2621 2622 return X86EMUL_CONTINUE; 2623 } 2624 2625 static int task_switch_32(struct x86_emulate_ctxt *ctxt, 2626 u16 tss_selector, u16 old_tss_sel, 2627 ulong old_tss_base, struct desc_struct *new_desc) 2628 { 2629 const struct x86_emulate_ops *ops = ctxt->ops; 2630 struct tss_segment_32 tss_seg; 2631 int ret; 2632 u32 new_tss_base = get_desc_base(new_desc); 2633 u32 eip_offset = offsetof(struct tss_segment_32, eip); 2634 u32 ldt_sel_offset = offsetof(struct tss_segment_32, ldt_selector); 2635 2636 ret = ops->read_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg, 2637 &ctxt->exception); 2638 if (ret != X86EMUL_CONTINUE) 2639 /* FIXME: need to provide precise fault address */ 2640 return ret; 2641 2642 save_state_to_tss32(ctxt, &tss_seg); 2643 2644 /* Only GP registers and segment selectors are saved */ 2645 ret = ops->write_std(ctxt, old_tss_base + eip_offset, &tss_seg.eip, 2646 ldt_sel_offset - eip_offset, &ctxt->exception); 2647 if (ret != X86EMUL_CONTINUE) 2648 /* FIXME: need to provide precise fault address */ 2649 return ret; 2650 2651 ret = ops->read_std(ctxt, new_tss_base, &tss_seg, sizeof tss_seg, 2652 &ctxt->exception); 2653 if (ret != X86EMUL_CONTINUE) 2654 /* FIXME: need to provide precise fault address */ 2655 return ret; 2656 2657 if (old_tss_sel != 0xffff) { 2658 tss_seg.prev_task_link = old_tss_sel; 2659 2660 ret = ops->write_std(ctxt, new_tss_base, 2661 &tss_seg.prev_task_link, 2662 sizeof tss_seg.prev_task_link, 2663 &ctxt->exception); 2664 if (ret != X86EMUL_CONTINUE) 2665 /* FIXME: need to provide precise fault address */ 2666 return ret; 2667 } 2668 2669 return load_state_from_tss32(ctxt, &tss_seg); 2670 } 2671 2672 static int emulator_do_task_switch(struct x86_emulate_ctxt *ctxt, 2673 u16 tss_selector, int idt_index, int reason, 2674 bool has_error_code, u32 error_code) 2675 { 2676 const struct x86_emulate_ops *ops = ctxt->ops; 2677 struct desc_struct curr_tss_desc, next_tss_desc; 2678 int ret; 2679 u16 old_tss_sel = get_segment_selector(ctxt, VCPU_SREG_TR); 2680 ulong old_tss_base = 2681 ops->get_cached_segment_base(ctxt, VCPU_SREG_TR); 2682 u32 desc_limit; 2683 ulong desc_addr; 2684 2685 /* FIXME: old_tss_base == ~0 ? */ 2686 2687 ret = read_segment_descriptor(ctxt, tss_selector, &next_tss_desc, &desc_addr); 2688 if (ret != X86EMUL_CONTINUE) 2689 return ret; 2690 ret = read_segment_descriptor(ctxt, old_tss_sel, &curr_tss_desc, &desc_addr); 2691 if (ret != X86EMUL_CONTINUE) 2692 return ret; 2693 2694 /* FIXME: check that next_tss_desc is tss */ 2695 2696 /* 2697 * Check privileges. The three cases are task switch caused by... 2698 * 2699 * 1. jmp/call/int to task gate: Check against DPL of the task gate 2700 * 2. Exception/IRQ/iret: No check is performed 2701 * 3. jmp/call to TSS: Check against DPL of the TSS 2702 */ 2703 if (reason == TASK_SWITCH_GATE) { 2704 if (idt_index != -1) { 2705 /* Software interrupts */ 2706 struct desc_struct task_gate_desc; 2707 int dpl; 2708 2709 ret = read_interrupt_descriptor(ctxt, idt_index, 2710 &task_gate_desc); 2711 if (ret != X86EMUL_CONTINUE) 2712 return ret; 2713 2714 dpl = task_gate_desc.dpl; 2715 if ((tss_selector & 3) > dpl || ops->cpl(ctxt) > dpl) 2716 return emulate_gp(ctxt, (idt_index << 3) | 0x2); 2717 } 2718 } else if (reason != TASK_SWITCH_IRET) { 2719 int dpl = next_tss_desc.dpl; 2720 if ((tss_selector & 3) > dpl || ops->cpl(ctxt) > dpl) 2721 return emulate_gp(ctxt, tss_selector); 2722 } 2723 2724 2725 desc_limit = desc_limit_scaled(&next_tss_desc); 2726 if (!next_tss_desc.p || 2727 ((desc_limit < 0x67 && (next_tss_desc.type & 8)) || 2728 desc_limit < 0x2b)) { 2729 emulate_ts(ctxt, tss_selector & 0xfffc); 2730 return X86EMUL_PROPAGATE_FAULT; 2731 } 2732 2733 if (reason == TASK_SWITCH_IRET || reason == TASK_SWITCH_JMP) { 2734 curr_tss_desc.type &= ~(1 << 1); /* clear busy flag */ 2735 write_segment_descriptor(ctxt, old_tss_sel, &curr_tss_desc); 2736 } 2737 2738 if (reason == TASK_SWITCH_IRET) 2739 ctxt->eflags = ctxt->eflags & ~X86_EFLAGS_NT; 2740 2741 /* set back link to prev task only if NT bit is set in eflags 2742 note that old_tss_sel is not used after this point */ 2743 if (reason != TASK_SWITCH_CALL && reason != TASK_SWITCH_GATE) 2744 old_tss_sel = 0xffff; 2745 2746 if (next_tss_desc.type & 8) 2747 ret = task_switch_32(ctxt, tss_selector, old_tss_sel, 2748 old_tss_base, &next_tss_desc); 2749 else 2750 ret = task_switch_16(ctxt, tss_selector, old_tss_sel, 2751 old_tss_base, &next_tss_desc); 2752 if (ret != X86EMUL_CONTINUE) 2753 return ret; 2754 2755 if (reason == TASK_SWITCH_CALL || reason == TASK_SWITCH_GATE) 2756 ctxt->eflags = ctxt->eflags | X86_EFLAGS_NT; 2757 2758 if (reason != TASK_SWITCH_IRET) { 2759 next_tss_desc.type |= (1 << 1); /* set busy flag */ 2760 write_segment_descriptor(ctxt, tss_selector, &next_tss_desc); 2761 } 2762 2763 ops->set_cr(ctxt, 0, ops->get_cr(ctxt, 0) | X86_CR0_TS); 2764 ops->set_segment(ctxt, tss_selector, &next_tss_desc, 0, VCPU_SREG_TR); 2765 2766 if (has_error_code) { 2767 ctxt->op_bytes = ctxt->ad_bytes = (next_tss_desc.type & 8) ? 4 : 2; 2768 ctxt->lock_prefix = 0; 2769 ctxt->src.val = (unsigned long) error_code; 2770 ret = em_push(ctxt); 2771 } 2772 2773 return ret; 2774 } 2775 2776 int emulator_task_switch(struct x86_emulate_ctxt *ctxt, 2777 u16 tss_selector, int idt_index, int reason, 2778 bool has_error_code, u32 error_code) 2779 { 2780 int rc; 2781 2782 invalidate_registers(ctxt); 2783 ctxt->_eip = ctxt->eip; 2784 ctxt->dst.type = OP_NONE; 2785 2786 rc = emulator_do_task_switch(ctxt, tss_selector, idt_index, reason, 2787 has_error_code, error_code); 2788 2789 if (rc == X86EMUL_CONTINUE) { 2790 ctxt->eip = ctxt->_eip; 2791 writeback_registers(ctxt); 2792 } 2793 2794 return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK; 2795 } 2796 2797 static void string_addr_inc(struct x86_emulate_ctxt *ctxt, int reg, 2798 struct operand *op) 2799 { 2800 int df = (ctxt->eflags & EFLG_DF) ? -op->count : op->count; 2801 2802 register_address_increment(ctxt, reg_rmw(ctxt, reg), df * op->bytes); 2803 op->addr.mem.ea = register_address(ctxt, reg_read(ctxt, reg)); 2804 } 2805 2806 static int em_das(struct x86_emulate_ctxt *ctxt) 2807 { 2808 u8 al, old_al; 2809 bool af, cf, old_cf; 2810 2811 cf = ctxt->eflags & X86_EFLAGS_CF; 2812 al = ctxt->dst.val; 2813 2814 old_al = al; 2815 old_cf = cf; 2816 cf = false; 2817 af = ctxt->eflags & X86_EFLAGS_AF; 2818 if ((al & 0x0f) > 9 || af) { 2819 al -= 6; 2820 cf = old_cf | (al >= 250); 2821 af = true; 2822 } else { 2823 af = false; 2824 } 2825 if (old_al > 0x99 || old_cf) { 2826 al -= 0x60; 2827 cf = true; 2828 } 2829 2830 ctxt->dst.val = al; 2831 /* Set PF, ZF, SF */ 2832 ctxt->src.type = OP_IMM; 2833 ctxt->src.val = 0; 2834 ctxt->src.bytes = 1; 2835 fastop(ctxt, em_or); 2836 ctxt->eflags &= ~(X86_EFLAGS_AF | X86_EFLAGS_CF); 2837 if (cf) 2838 ctxt->eflags |= X86_EFLAGS_CF; 2839 if (af) 2840 ctxt->eflags |= X86_EFLAGS_AF; 2841 return X86EMUL_CONTINUE; 2842 } 2843 2844 static int em_aam(struct x86_emulate_ctxt *ctxt) 2845 { 2846 u8 al, ah; 2847 2848 if (ctxt->src.val == 0) 2849 return emulate_de(ctxt); 2850 2851 al = ctxt->dst.val & 0xff; 2852 ah = al / ctxt->src.val; 2853 al %= ctxt->src.val; 2854 2855 ctxt->dst.val = (ctxt->dst.val & 0xffff0000) | al | (ah << 8); 2856 2857 /* Set PF, ZF, SF */ 2858 ctxt->src.type = OP_IMM; 2859 ctxt->src.val = 0; 2860 ctxt->src.bytes = 1; 2861 fastop(ctxt, em_or); 2862 2863 return X86EMUL_CONTINUE; 2864 } 2865 2866 static int em_aad(struct x86_emulate_ctxt *ctxt) 2867 { 2868 u8 al = ctxt->dst.val & 0xff; 2869 u8 ah = (ctxt->dst.val >> 8) & 0xff; 2870 2871 al = (al + (ah * ctxt->src.val)) & 0xff; 2872 2873 ctxt->dst.val = (ctxt->dst.val & 0xffff0000) | al; 2874 2875 /* Set PF, ZF, SF */ 2876 ctxt->src.type = OP_IMM; 2877 ctxt->src.val = 0; 2878 ctxt->src.bytes = 1; 2879 fastop(ctxt, em_or); 2880 2881 return X86EMUL_CONTINUE; 2882 } 2883 2884 static int em_call(struct x86_emulate_ctxt *ctxt) 2885 { 2886 long rel = ctxt->src.val; 2887 2888 ctxt->src.val = (unsigned long)ctxt->_eip; 2889 jmp_rel(ctxt, rel); 2890 return em_push(ctxt); 2891 } 2892 2893 static int em_call_far(struct x86_emulate_ctxt *ctxt) 2894 { 2895 u16 sel, old_cs; 2896 ulong old_eip; 2897 int rc; 2898 2899 old_cs = get_segment_selector(ctxt, VCPU_SREG_CS); 2900 old_eip = ctxt->_eip; 2901 2902 memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2); 2903 if (load_segment_descriptor(ctxt, sel, VCPU_SREG_CS)) 2904 return X86EMUL_CONTINUE; 2905 2906 ctxt->_eip = 0; 2907 memcpy(&ctxt->_eip, ctxt->src.valptr, ctxt->op_bytes); 2908 2909 ctxt->src.val = old_cs; 2910 rc = em_push(ctxt); 2911 if (rc != X86EMUL_CONTINUE) 2912 return rc; 2913 2914 ctxt->src.val = old_eip; 2915 return em_push(ctxt); 2916 } 2917 2918 static int em_ret_near_imm(struct x86_emulate_ctxt *ctxt) 2919 { 2920 int rc; 2921 2922 ctxt->dst.type = OP_REG; 2923 ctxt->dst.addr.reg = &ctxt->_eip; 2924 ctxt->dst.bytes = ctxt->op_bytes; 2925 rc = emulate_pop(ctxt, &ctxt->dst.val, ctxt->op_bytes); 2926 if (rc != X86EMUL_CONTINUE) 2927 return rc; 2928 rsp_increment(ctxt, ctxt->src.val); 2929 return X86EMUL_CONTINUE; 2930 } 2931 2932 static int em_xchg(struct x86_emulate_ctxt *ctxt) 2933 { 2934 /* Write back the register source. */ 2935 ctxt->src.val = ctxt->dst.val; 2936 write_register_operand(&ctxt->src); 2937 2938 /* Write back the memory destination with implicit LOCK prefix. */ 2939 ctxt->dst.val = ctxt->src.orig_val; 2940 ctxt->lock_prefix = 1; 2941 return X86EMUL_CONTINUE; 2942 } 2943 2944 static int em_imul_3op(struct x86_emulate_ctxt *ctxt) 2945 { 2946 ctxt->dst.val = ctxt->src2.val; 2947 return fastop(ctxt, em_imul); 2948 } 2949 2950 static int em_cwd(struct x86_emulate_ctxt *ctxt) 2951 { 2952 ctxt->dst.type = OP_REG; 2953 ctxt->dst.bytes = ctxt->src.bytes; 2954 ctxt->dst.addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX); 2955 ctxt->dst.val = ~((ctxt->src.val >> (ctxt->src.bytes * 8 - 1)) - 1); 2956 2957 return X86EMUL_CONTINUE; 2958 } 2959 2960 static int em_rdtsc(struct x86_emulate_ctxt *ctxt) 2961 { 2962 u64 tsc = 0; 2963 2964 ctxt->ops->get_msr(ctxt, MSR_IA32_TSC, &tsc); 2965 *reg_write(ctxt, VCPU_REGS_RAX) = (u32)tsc; 2966 *reg_write(ctxt, VCPU_REGS_RDX) = tsc >> 32; 2967 return X86EMUL_CONTINUE; 2968 } 2969 2970 static int em_rdpmc(struct x86_emulate_ctxt *ctxt) 2971 { 2972 u64 pmc; 2973 2974 if (ctxt->ops->read_pmc(ctxt, reg_read(ctxt, VCPU_REGS_RCX), &pmc)) 2975 return emulate_gp(ctxt, 0); 2976 *reg_write(ctxt, VCPU_REGS_RAX) = (u32)pmc; 2977 *reg_write(ctxt, VCPU_REGS_RDX) = pmc >> 32; 2978 return X86EMUL_CONTINUE; 2979 } 2980 2981 static int em_mov(struct x86_emulate_ctxt *ctxt) 2982 { 2983 memcpy(ctxt->dst.valptr, ctxt->src.valptr, sizeof(ctxt->src.valptr)); 2984 return X86EMUL_CONTINUE; 2985 } 2986 2987 #define FFL(x) bit(X86_FEATURE_##x) 2988 2989 static int em_movbe(struct x86_emulate_ctxt *ctxt) 2990 { 2991 u32 ebx, ecx, edx, eax = 1; 2992 u16 tmp; 2993 2994 /* 2995 * Check MOVBE is set in the guest-visible CPUID leaf. 2996 */ 2997 ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx); 2998 if (!(ecx & FFL(MOVBE))) 2999 return emulate_ud(ctxt); 3000 3001 switch (ctxt->op_bytes) { 3002 case 2: 3003 /* 3004 * From MOVBE definition: "...When the operand size is 16 bits, 3005 * the upper word of the destination register remains unchanged 3006 * ..." 3007 * 3008 * Both casting ->valptr and ->val to u16 breaks strict aliasing 3009 * rules so we have to do the operation almost per hand. 3010 */ 3011 tmp = (u16)ctxt->src.val; 3012 ctxt->dst.val &= ~0xffffUL; 3013 ctxt->dst.val |= (unsigned long)swab16(tmp); 3014 break; 3015 case 4: 3016 ctxt->dst.val = swab32((u32)ctxt->src.val); 3017 break; 3018 case 8: 3019 ctxt->dst.val = swab64(ctxt->src.val); 3020 break; 3021 default: 3022 return X86EMUL_PROPAGATE_FAULT; 3023 } 3024 return X86EMUL_CONTINUE; 3025 } 3026 3027 static int em_cr_write(struct x86_emulate_ctxt *ctxt) 3028 { 3029 if (ctxt->ops->set_cr(ctxt, ctxt->modrm_reg, ctxt->src.val)) 3030 return emulate_gp(ctxt, 0); 3031 3032 /* Disable writeback. */ 3033 ctxt->dst.type = OP_NONE; 3034 return X86EMUL_CONTINUE; 3035 } 3036 3037 static int em_dr_write(struct x86_emulate_ctxt *ctxt) 3038 { 3039 unsigned long val; 3040 3041 if (ctxt->mode == X86EMUL_MODE_PROT64) 3042 val = ctxt->src.val & ~0ULL; 3043 else 3044 val = ctxt->src.val & ~0U; 3045 3046 /* #UD condition is already handled. */ 3047 if (ctxt->ops->set_dr(ctxt, ctxt->modrm_reg, val) < 0) 3048 return emulate_gp(ctxt, 0); 3049 3050 /* Disable writeback. */ 3051 ctxt->dst.type = OP_NONE; 3052 return X86EMUL_CONTINUE; 3053 } 3054 3055 static int em_wrmsr(struct x86_emulate_ctxt *ctxt) 3056 { 3057 u64 msr_data; 3058 3059 msr_data = (u32)reg_read(ctxt, VCPU_REGS_RAX) 3060 | ((u64)reg_read(ctxt, VCPU_REGS_RDX) << 32); 3061 if (ctxt->ops->set_msr(ctxt, reg_read(ctxt, VCPU_REGS_RCX), msr_data)) 3062 return emulate_gp(ctxt, 0); 3063 3064 return X86EMUL_CONTINUE; 3065 } 3066 3067 static int em_rdmsr(struct x86_emulate_ctxt *ctxt) 3068 { 3069 u64 msr_data; 3070 3071 if (ctxt->ops->get_msr(ctxt, reg_read(ctxt, VCPU_REGS_RCX), &msr_data)) 3072 return emulate_gp(ctxt, 0); 3073 3074 *reg_write(ctxt, VCPU_REGS_RAX) = (u32)msr_data; 3075 *reg_write(ctxt, VCPU_REGS_RDX) = msr_data >> 32; 3076 return X86EMUL_CONTINUE; 3077 } 3078 3079 static int em_mov_rm_sreg(struct x86_emulate_ctxt *ctxt) 3080 { 3081 if (ctxt->modrm_reg > VCPU_SREG_GS) 3082 return emulate_ud(ctxt); 3083 3084 ctxt->dst.val = get_segment_selector(ctxt, ctxt->modrm_reg); 3085 return X86EMUL_CONTINUE; 3086 } 3087 3088 static int em_mov_sreg_rm(struct x86_emulate_ctxt *ctxt) 3089 { 3090 u16 sel = ctxt->src.val; 3091 3092 if (ctxt->modrm_reg == VCPU_SREG_CS || ctxt->modrm_reg > VCPU_SREG_GS) 3093 return emulate_ud(ctxt); 3094 3095 if (ctxt->modrm_reg == VCPU_SREG_SS) 3096 ctxt->interruptibility = KVM_X86_SHADOW_INT_MOV_SS; 3097 3098 /* Disable writeback. */ 3099 ctxt->dst.type = OP_NONE; 3100 return load_segment_descriptor(ctxt, sel, ctxt->modrm_reg); 3101 } 3102 3103 static int em_lldt(struct x86_emulate_ctxt *ctxt) 3104 { 3105 u16 sel = ctxt->src.val; 3106 3107 /* Disable writeback. */ 3108 ctxt->dst.type = OP_NONE; 3109 return load_segment_descriptor(ctxt, sel, VCPU_SREG_LDTR); 3110 } 3111 3112 static int em_ltr(struct x86_emulate_ctxt *ctxt) 3113 { 3114 u16 sel = ctxt->src.val; 3115 3116 /* Disable writeback. */ 3117 ctxt->dst.type = OP_NONE; 3118 return load_segment_descriptor(ctxt, sel, VCPU_SREG_TR); 3119 } 3120 3121 static int em_invlpg(struct x86_emulate_ctxt *ctxt) 3122 { 3123 int rc; 3124 ulong linear; 3125 3126 rc = linearize(ctxt, ctxt->src.addr.mem, 1, false, &linear); 3127 if (rc == X86EMUL_CONTINUE) 3128 ctxt->ops->invlpg(ctxt, linear); 3129 /* Disable writeback. */ 3130 ctxt->dst.type = OP_NONE; 3131 return X86EMUL_CONTINUE; 3132 } 3133 3134 static int em_clts(struct x86_emulate_ctxt *ctxt) 3135 { 3136 ulong cr0; 3137 3138 cr0 = ctxt->ops->get_cr(ctxt, 0); 3139 cr0 &= ~X86_CR0_TS; 3140 ctxt->ops->set_cr(ctxt, 0, cr0); 3141 return X86EMUL_CONTINUE; 3142 } 3143 3144 static int em_vmcall(struct x86_emulate_ctxt *ctxt) 3145 { 3146 int rc; 3147 3148 if (ctxt->modrm_mod != 3 || ctxt->modrm_rm != 1) 3149 return X86EMUL_UNHANDLEABLE; 3150 3151 rc = ctxt->ops->fix_hypercall(ctxt); 3152 if (rc != X86EMUL_CONTINUE) 3153 return rc; 3154 3155 /* Let the processor re-execute the fixed hypercall */ 3156 ctxt->_eip = ctxt->eip; 3157 /* Disable writeback. */ 3158 ctxt->dst.type = OP_NONE; 3159 return X86EMUL_CONTINUE; 3160 } 3161 3162 static int emulate_store_desc_ptr(struct x86_emulate_ctxt *ctxt, 3163 void (*get)(struct x86_emulate_ctxt *ctxt, 3164 struct desc_ptr *ptr)) 3165 { 3166 struct desc_ptr desc_ptr; 3167 3168 if (ctxt->mode == X86EMUL_MODE_PROT64) 3169 ctxt->op_bytes = 8; 3170 get(ctxt, &desc_ptr); 3171 if (ctxt->op_bytes == 2) { 3172 ctxt->op_bytes = 4; 3173 desc_ptr.address &= 0x00ffffff; 3174 } 3175 /* Disable writeback. */ 3176 ctxt->dst.type = OP_NONE; 3177 return segmented_write(ctxt, ctxt->dst.addr.mem, 3178 &desc_ptr, 2 + ctxt->op_bytes); 3179 } 3180 3181 static int em_sgdt(struct x86_emulate_ctxt *ctxt) 3182 { 3183 return emulate_store_desc_ptr(ctxt, ctxt->ops->get_gdt); 3184 } 3185 3186 static int em_sidt(struct x86_emulate_ctxt *ctxt) 3187 { 3188 return emulate_store_desc_ptr(ctxt, ctxt->ops->get_idt); 3189 } 3190 3191 static int em_lgdt(struct x86_emulate_ctxt *ctxt) 3192 { 3193 struct desc_ptr desc_ptr; 3194 int rc; 3195 3196 if (ctxt->mode == X86EMUL_MODE_PROT64) 3197 ctxt->op_bytes = 8; 3198 rc = read_descriptor(ctxt, ctxt->src.addr.mem, 3199 &desc_ptr.size, &desc_ptr.address, 3200 ctxt->op_bytes); 3201 if (rc != X86EMUL_CONTINUE) 3202 return rc; 3203 ctxt->ops->set_gdt(ctxt, &desc_ptr); 3204 /* Disable writeback. */ 3205 ctxt->dst.type = OP_NONE; 3206 return X86EMUL_CONTINUE; 3207 } 3208 3209 static int em_vmmcall(struct x86_emulate_ctxt *ctxt) 3210 { 3211 int rc; 3212 3213 rc = ctxt->ops->fix_hypercall(ctxt); 3214 3215 /* Disable writeback. */ 3216 ctxt->dst.type = OP_NONE; 3217 return rc; 3218 } 3219 3220 static int em_lidt(struct x86_emulate_ctxt *ctxt) 3221 { 3222 struct desc_ptr desc_ptr; 3223 int rc; 3224 3225 if (ctxt->mode == X86EMUL_MODE_PROT64) 3226 ctxt->op_bytes = 8; 3227 rc = read_descriptor(ctxt, ctxt->src.addr.mem, 3228 &desc_ptr.size, &desc_ptr.address, 3229 ctxt->op_bytes); 3230 if (rc != X86EMUL_CONTINUE) 3231 return rc; 3232 ctxt->ops->set_idt(ctxt, &desc_ptr); 3233 /* Disable writeback. */ 3234 ctxt->dst.type = OP_NONE; 3235 return X86EMUL_CONTINUE; 3236 } 3237 3238 static int em_smsw(struct x86_emulate_ctxt *ctxt) 3239 { 3240 if (ctxt->dst.type == OP_MEM) 3241 ctxt->dst.bytes = 2; 3242 ctxt->dst.val = ctxt->ops->get_cr(ctxt, 0); 3243 return X86EMUL_CONTINUE; 3244 } 3245 3246 static int em_lmsw(struct x86_emulate_ctxt *ctxt) 3247 { 3248 ctxt->ops->set_cr(ctxt, 0, (ctxt->ops->get_cr(ctxt, 0) & ~0x0eul) 3249 | (ctxt->src.val & 0x0f)); 3250 ctxt->dst.type = OP_NONE; 3251 return X86EMUL_CONTINUE; 3252 } 3253 3254 static int em_loop(struct x86_emulate_ctxt *ctxt) 3255 { 3256 register_address_increment(ctxt, reg_rmw(ctxt, VCPU_REGS_RCX), -1); 3257 if ((address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) != 0) && 3258 (ctxt->b == 0xe2 || test_cc(ctxt->b ^ 0x5, ctxt->eflags))) 3259 jmp_rel(ctxt, ctxt->src.val); 3260 3261 return X86EMUL_CONTINUE; 3262 } 3263 3264 static int em_jcxz(struct x86_emulate_ctxt *ctxt) 3265 { 3266 if (address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) == 0) 3267 jmp_rel(ctxt, ctxt->src.val); 3268 3269 return X86EMUL_CONTINUE; 3270 } 3271 3272 static int em_in(struct x86_emulate_ctxt *ctxt) 3273 { 3274 if (!pio_in_emulated(ctxt, ctxt->dst.bytes, ctxt->src.val, 3275 &ctxt->dst.val)) 3276 return X86EMUL_IO_NEEDED; 3277 3278 return X86EMUL_CONTINUE; 3279 } 3280 3281 static int em_out(struct x86_emulate_ctxt *ctxt) 3282 { 3283 ctxt->ops->pio_out_emulated(ctxt, ctxt->src.bytes, ctxt->dst.val, 3284 &ctxt->src.val, 1); 3285 /* Disable writeback. */ 3286 ctxt->dst.type = OP_NONE; 3287 return X86EMUL_CONTINUE; 3288 } 3289 3290 static int em_cli(struct x86_emulate_ctxt *ctxt) 3291 { 3292 if (emulator_bad_iopl(ctxt)) 3293 return emulate_gp(ctxt, 0); 3294 3295 ctxt->eflags &= ~X86_EFLAGS_IF; 3296 return X86EMUL_CONTINUE; 3297 } 3298 3299 static int em_sti(struct x86_emulate_ctxt *ctxt) 3300 { 3301 if (emulator_bad_iopl(ctxt)) 3302 return emulate_gp(ctxt, 0); 3303 3304 ctxt->interruptibility = KVM_X86_SHADOW_INT_STI; 3305 ctxt->eflags |= X86_EFLAGS_IF; 3306 return X86EMUL_CONTINUE; 3307 } 3308 3309 static int em_cpuid(struct x86_emulate_ctxt *ctxt) 3310 { 3311 u32 eax, ebx, ecx, edx; 3312 3313 eax = reg_read(ctxt, VCPU_REGS_RAX); 3314 ecx = reg_read(ctxt, VCPU_REGS_RCX); 3315 ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx); 3316 *reg_write(ctxt, VCPU_REGS_RAX) = eax; 3317 *reg_write(ctxt, VCPU_REGS_RBX) = ebx; 3318 *reg_write(ctxt, VCPU_REGS_RCX) = ecx; 3319 *reg_write(ctxt, VCPU_REGS_RDX) = edx; 3320 return X86EMUL_CONTINUE; 3321 } 3322 3323 static int em_sahf(struct x86_emulate_ctxt *ctxt) 3324 { 3325 u32 flags; 3326 3327 flags = EFLG_CF | EFLG_PF | EFLG_AF | EFLG_ZF | EFLG_SF; 3328 flags &= *reg_rmw(ctxt, VCPU_REGS_RAX) >> 8; 3329 3330 ctxt->eflags &= ~0xffUL; 3331 ctxt->eflags |= flags | X86_EFLAGS_FIXED; 3332 return X86EMUL_CONTINUE; 3333 } 3334 3335 static int em_lahf(struct x86_emulate_ctxt *ctxt) 3336 { 3337 *reg_rmw(ctxt, VCPU_REGS_RAX) &= ~0xff00UL; 3338 *reg_rmw(ctxt, VCPU_REGS_RAX) |= (ctxt->eflags & 0xff) << 8; 3339 return X86EMUL_CONTINUE; 3340 } 3341 3342 static int em_bswap(struct x86_emulate_ctxt *ctxt) 3343 { 3344 switch (ctxt->op_bytes) { 3345 #ifdef CONFIG_X86_64 3346 case 8: 3347 asm("bswap %0" : "+r"(ctxt->dst.val)); 3348 break; 3349 #endif 3350 default: 3351 asm("bswap %0" : "+r"(*(u32 *)&ctxt->dst.val)); 3352 break; 3353 } 3354 return X86EMUL_CONTINUE; 3355 } 3356 3357 static bool valid_cr(int nr) 3358 { 3359 switch (nr) { 3360 case 0: 3361 case 2 ... 4: 3362 case 8: 3363 return true; 3364 default: 3365 return false; 3366 } 3367 } 3368 3369 static int check_cr_read(struct x86_emulate_ctxt *ctxt) 3370 { 3371 if (!valid_cr(ctxt->modrm_reg)) 3372 return emulate_ud(ctxt); 3373 3374 return X86EMUL_CONTINUE; 3375 } 3376 3377 static int check_cr_write(struct x86_emulate_ctxt *ctxt) 3378 { 3379 u64 new_val = ctxt->src.val64; 3380 int cr = ctxt->modrm_reg; 3381 u64 efer = 0; 3382 3383 static u64 cr_reserved_bits[] = { 3384 0xffffffff00000000ULL, 3385 0, 0, 0, /* CR3 checked later */ 3386 CR4_RESERVED_BITS, 3387 0, 0, 0, 3388 CR8_RESERVED_BITS, 3389 }; 3390 3391 if (!valid_cr(cr)) 3392 return emulate_ud(ctxt); 3393 3394 if (new_val & cr_reserved_bits[cr]) 3395 return emulate_gp(ctxt, 0); 3396 3397 switch (cr) { 3398 case 0: { 3399 u64 cr4; 3400 if (((new_val & X86_CR0_PG) && !(new_val & X86_CR0_PE)) || 3401 ((new_val & X86_CR0_NW) && !(new_val & X86_CR0_CD))) 3402 return emulate_gp(ctxt, 0); 3403 3404 cr4 = ctxt->ops->get_cr(ctxt, 4); 3405 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer); 3406 3407 if ((new_val & X86_CR0_PG) && (efer & EFER_LME) && 3408 !(cr4 & X86_CR4_PAE)) 3409 return emulate_gp(ctxt, 0); 3410 3411 break; 3412 } 3413 case 3: { 3414 u64 rsvd = 0; 3415 3416 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer); 3417 if (efer & EFER_LMA) 3418 rsvd = CR3_L_MODE_RESERVED_BITS; 3419 3420 if (new_val & rsvd) 3421 return emulate_gp(ctxt, 0); 3422 3423 break; 3424 } 3425 case 4: { 3426 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer); 3427 3428 if ((efer & EFER_LMA) && !(new_val & X86_CR4_PAE)) 3429 return emulate_gp(ctxt, 0); 3430 3431 break; 3432 } 3433 } 3434 3435 return X86EMUL_CONTINUE; 3436 } 3437 3438 static int check_dr7_gd(struct x86_emulate_ctxt *ctxt) 3439 { 3440 unsigned long dr7; 3441 3442 ctxt->ops->get_dr(ctxt, 7, &dr7); 3443 3444 /* Check if DR7.Global_Enable is set */ 3445 return dr7 & (1 << 13); 3446 } 3447 3448 static int check_dr_read(struct x86_emulate_ctxt *ctxt) 3449 { 3450 int dr = ctxt->modrm_reg; 3451 u64 cr4; 3452 3453 if (dr > 7) 3454 return emulate_ud(ctxt); 3455 3456 cr4 = ctxt->ops->get_cr(ctxt, 4); 3457 if ((cr4 & X86_CR4_DE) && (dr == 4 || dr == 5)) 3458 return emulate_ud(ctxt); 3459 3460 if (check_dr7_gd(ctxt)) 3461 return emulate_db(ctxt); 3462 3463 return X86EMUL_CONTINUE; 3464 } 3465 3466 static int check_dr_write(struct x86_emulate_ctxt *ctxt) 3467 { 3468 u64 new_val = ctxt->src.val64; 3469 int dr = ctxt->modrm_reg; 3470 3471 if ((dr == 6 || dr == 7) && (new_val & 0xffffffff00000000ULL)) 3472 return emulate_gp(ctxt, 0); 3473 3474 return check_dr_read(ctxt); 3475 } 3476 3477 static int check_svme(struct x86_emulate_ctxt *ctxt) 3478 { 3479 u64 efer; 3480 3481 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer); 3482 3483 if (!(efer & EFER_SVME)) 3484 return emulate_ud(ctxt); 3485 3486 return X86EMUL_CONTINUE; 3487 } 3488 3489 static int check_svme_pa(struct x86_emulate_ctxt *ctxt) 3490 { 3491 u64 rax = reg_read(ctxt, VCPU_REGS_RAX); 3492 3493 /* Valid physical address? */ 3494 if (rax & 0xffff000000000000ULL) 3495 return emulate_gp(ctxt, 0); 3496 3497 return check_svme(ctxt); 3498 } 3499 3500 static int check_rdtsc(struct x86_emulate_ctxt *ctxt) 3501 { 3502 u64 cr4 = ctxt->ops->get_cr(ctxt, 4); 3503 3504 if (cr4 & X86_CR4_TSD && ctxt->ops->cpl(ctxt)) 3505 return emulate_ud(ctxt); 3506 3507 return X86EMUL_CONTINUE; 3508 } 3509 3510 static int check_rdpmc(struct x86_emulate_ctxt *ctxt) 3511 { 3512 u64 cr4 = ctxt->ops->get_cr(ctxt, 4); 3513 u64 rcx = reg_read(ctxt, VCPU_REGS_RCX); 3514 3515 if ((!(cr4 & X86_CR4_PCE) && ctxt->ops->cpl(ctxt)) || 3516 ctxt->ops->check_pmc(ctxt, rcx)) 3517 return emulate_gp(ctxt, 0); 3518 3519 return X86EMUL_CONTINUE; 3520 } 3521 3522 static int check_perm_in(struct x86_emulate_ctxt *ctxt) 3523 { 3524 ctxt->dst.bytes = min(ctxt->dst.bytes, 4u); 3525 if (!emulator_io_permited(ctxt, ctxt->src.val, ctxt->dst.bytes)) 3526 return emulate_gp(ctxt, 0); 3527 3528 return X86EMUL_CONTINUE; 3529 } 3530 3531 static int check_perm_out(struct x86_emulate_ctxt *ctxt) 3532 { 3533 ctxt->src.bytes = min(ctxt->src.bytes, 4u); 3534 if (!emulator_io_permited(ctxt, ctxt->dst.val, ctxt->src.bytes)) 3535 return emulate_gp(ctxt, 0); 3536 3537 return X86EMUL_CONTINUE; 3538 } 3539 3540 #define D(_y) { .flags = (_y) } 3541 #define DI(_y, _i) { .flags = (_y)|Intercept, .intercept = x86_intercept_##_i } 3542 #define DIP(_y, _i, _p) { .flags = (_y)|Intercept|CheckPerm, \ 3543 .intercept = x86_intercept_##_i, .check_perm = (_p) } 3544 #define N D(NotImpl) 3545 #define EXT(_f, _e) { .flags = ((_f) | RMExt), .u.group = (_e) } 3546 #define G(_f, _g) { .flags = ((_f) | Group | ModRM), .u.group = (_g) } 3547 #define GD(_f, _g) { .flags = ((_f) | GroupDual | ModRM), .u.gdual = (_g) } 3548 #define E(_f, _e) { .flags = ((_f) | Escape | ModRM), .u.esc = (_e) } 3549 #define I(_f, _e) { .flags = (_f), .u.execute = (_e) } 3550 #define F(_f, _e) { .flags = (_f) | Fastop, .u.fastop = (_e) } 3551 #define II(_f, _e, _i) \ 3552 { .flags = (_f)|Intercept, .u.execute = (_e), .intercept = x86_intercept_##_i } 3553 #define IIP(_f, _e, _i, _p) \ 3554 { .flags = (_f)|Intercept|CheckPerm, .u.execute = (_e), \ 3555 .intercept = x86_intercept_##_i, .check_perm = (_p) } 3556 #define GP(_f, _g) { .flags = ((_f) | Prefix), .u.gprefix = (_g) } 3557 3558 #define D2bv(_f) D((_f) | ByteOp), D(_f) 3559 #define D2bvIP(_f, _i, _p) DIP((_f) | ByteOp, _i, _p), DIP(_f, _i, _p) 3560 #define I2bv(_f, _e) I((_f) | ByteOp, _e), I(_f, _e) 3561 #define F2bv(_f, _e) F((_f) | ByteOp, _e), F(_f, _e) 3562 #define I2bvIP(_f, _e, _i, _p) \ 3563 IIP((_f) | ByteOp, _e, _i, _p), IIP(_f, _e, _i, _p) 3564 3565 #define F6ALU(_f, _e) F2bv((_f) | DstMem | SrcReg | ModRM, _e), \ 3566 F2bv(((_f) | DstReg | SrcMem | ModRM) & ~Lock, _e), \ 3567 F2bv(((_f) & ~Lock) | DstAcc | SrcImm, _e) 3568 3569 static const struct opcode group7_rm1[] = { 3570 DI(SrcNone | Priv, monitor), 3571 DI(SrcNone | Priv, mwait), 3572 N, N, N, N, N, N, 3573 }; 3574 3575 static const struct opcode group7_rm3[] = { 3576 DIP(SrcNone | Prot | Priv, vmrun, check_svme_pa), 3577 II(SrcNone | Prot | EmulateOnUD, em_vmmcall, vmmcall), 3578 DIP(SrcNone | Prot | Priv, vmload, check_svme_pa), 3579 DIP(SrcNone | Prot | Priv, vmsave, check_svme_pa), 3580 DIP(SrcNone | Prot | Priv, stgi, check_svme), 3581 DIP(SrcNone | Prot | Priv, clgi, check_svme), 3582 DIP(SrcNone | Prot | Priv, skinit, check_svme), 3583 DIP(SrcNone | Prot | Priv, invlpga, check_svme), 3584 }; 3585 3586 static const struct opcode group7_rm7[] = { 3587 N, 3588 DIP(SrcNone, rdtscp, check_rdtsc), 3589 N, N, N, N, N, N, 3590 }; 3591 3592 static const struct opcode group1[] = { 3593 F(Lock, em_add), 3594 F(Lock | PageTable, em_or), 3595 F(Lock, em_adc), 3596 F(Lock, em_sbb), 3597 F(Lock | PageTable, em_and), 3598 F(Lock, em_sub), 3599 F(Lock, em_xor), 3600 F(NoWrite, em_cmp), 3601 }; 3602 3603 static const struct opcode group1A[] = { 3604 I(DstMem | SrcNone | Mov | Stack, em_pop), N, N, N, N, N, N, N, 3605 }; 3606 3607 static const struct opcode group2[] = { 3608 F(DstMem | ModRM, em_rol), 3609 F(DstMem | ModRM, em_ror), 3610 F(DstMem | ModRM, em_rcl), 3611 F(DstMem | ModRM, em_rcr), 3612 F(DstMem | ModRM, em_shl), 3613 F(DstMem | ModRM, em_shr), 3614 F(DstMem | ModRM, em_shl), 3615 F(DstMem | ModRM, em_sar), 3616 }; 3617 3618 static const struct opcode group3[] = { 3619 F(DstMem | SrcImm | NoWrite, em_test), 3620 F(DstMem | SrcImm | NoWrite, em_test), 3621 F(DstMem | SrcNone | Lock, em_not), 3622 F(DstMem | SrcNone | Lock, em_neg), 3623 F(DstXacc | Src2Mem, em_mul_ex), 3624 F(DstXacc | Src2Mem, em_imul_ex), 3625 F(DstXacc | Src2Mem, em_div_ex), 3626 F(DstXacc | Src2Mem, em_idiv_ex), 3627 }; 3628 3629 static const struct opcode group4[] = { 3630 F(ByteOp | DstMem | SrcNone | Lock, em_inc), 3631 F(ByteOp | DstMem | SrcNone | Lock, em_dec), 3632 N, N, N, N, N, N, 3633 }; 3634 3635 static const struct opcode group5[] = { 3636 F(DstMem | SrcNone | Lock, em_inc), 3637 F(DstMem | SrcNone | Lock, em_dec), 3638 I(SrcMem | Stack, em_grp45), 3639 I(SrcMemFAddr | ImplicitOps | Stack, em_call_far), 3640 I(SrcMem | Stack, em_grp45), 3641 I(SrcMemFAddr | ImplicitOps, em_grp45), 3642 I(SrcMem | Stack, em_grp45), D(Undefined), 3643 }; 3644 3645 static const struct opcode group6[] = { 3646 DI(Prot, sldt), 3647 DI(Prot, str), 3648 II(Prot | Priv | SrcMem16, em_lldt, lldt), 3649 II(Prot | Priv | SrcMem16, em_ltr, ltr), 3650 N, N, N, N, 3651 }; 3652 3653 static const struct group_dual group7 = { { 3654 II(Mov | DstMem, em_sgdt, sgdt), 3655 II(Mov | DstMem, em_sidt, sidt), 3656 II(SrcMem | Priv, em_lgdt, lgdt), 3657 II(SrcMem | Priv, em_lidt, lidt), 3658 II(SrcNone | DstMem | Mov, em_smsw, smsw), N, 3659 II(SrcMem16 | Mov | Priv, em_lmsw, lmsw), 3660 II(SrcMem | ByteOp | Priv | NoAccess, em_invlpg, invlpg), 3661 }, { 3662 I(SrcNone | Priv | EmulateOnUD, em_vmcall), 3663 EXT(0, group7_rm1), 3664 N, EXT(0, group7_rm3), 3665 II(SrcNone | DstMem | Mov, em_smsw, smsw), N, 3666 II(SrcMem16 | Mov | Priv, em_lmsw, lmsw), 3667 EXT(0, group7_rm7), 3668 } }; 3669 3670 static const struct opcode group8[] = { 3671 N, N, N, N, 3672 F(DstMem | SrcImmByte | NoWrite, em_bt), 3673 F(DstMem | SrcImmByte | Lock | PageTable, em_bts), 3674 F(DstMem | SrcImmByte | Lock, em_btr), 3675 F(DstMem | SrcImmByte | Lock | PageTable, em_btc), 3676 }; 3677 3678 static const struct group_dual group9 = { { 3679 N, I(DstMem64 | Lock | PageTable, em_cmpxchg8b), N, N, N, N, N, N, 3680 }, { 3681 N, N, N, N, N, N, N, N, 3682 } }; 3683 3684 static const struct opcode group11[] = { 3685 I(DstMem | SrcImm | Mov | PageTable, em_mov), 3686 X7(D(Undefined)), 3687 }; 3688 3689 static const struct gprefix pfx_0f_6f_0f_7f = { 3690 I(Mmx, em_mov), I(Sse | Aligned, em_mov), N, I(Sse | Unaligned, em_mov), 3691 }; 3692 3693 static const struct gprefix pfx_vmovntpx = { 3694 I(0, em_mov), N, N, N, 3695 }; 3696 3697 static const struct gprefix pfx_0f_28_0f_29 = { 3698 I(Aligned, em_mov), I(Aligned, em_mov), N, N, 3699 }; 3700 3701 static const struct escape escape_d9 = { { 3702 N, N, N, N, N, N, N, I(DstMem, em_fnstcw), 3703 }, { 3704 /* 0xC0 - 0xC7 */ 3705 N, N, N, N, N, N, N, N, 3706 /* 0xC8 - 0xCF */ 3707 N, N, N, N, N, N, N, N, 3708 /* 0xD0 - 0xC7 */ 3709 N, N, N, N, N, N, N, N, 3710 /* 0xD8 - 0xDF */ 3711 N, N, N, N, N, N, N, N, 3712 /* 0xE0 - 0xE7 */ 3713 N, N, N, N, N, N, N, N, 3714 /* 0xE8 - 0xEF */ 3715 N, N, N, N, N, N, N, N, 3716 /* 0xF0 - 0xF7 */ 3717 N, N, N, N, N, N, N, N, 3718 /* 0xF8 - 0xFF */ 3719 N, N, N, N, N, N, N, N, 3720 } }; 3721 3722 static const struct escape escape_db = { { 3723 N, N, N, N, N, N, N, N, 3724 }, { 3725 /* 0xC0 - 0xC7 */ 3726 N, N, N, N, N, N, N, N, 3727 /* 0xC8 - 0xCF */ 3728 N, N, N, N, N, N, N, N, 3729 /* 0xD0 - 0xC7 */ 3730 N, N, N, N, N, N, N, N, 3731 /* 0xD8 - 0xDF */ 3732 N, N, N, N, N, N, N, N, 3733 /* 0xE0 - 0xE7 */ 3734 N, N, N, I(ImplicitOps, em_fninit), N, N, N, N, 3735 /* 0xE8 - 0xEF */ 3736 N, N, N, N, N, N, N, N, 3737 /* 0xF0 - 0xF7 */ 3738 N, N, N, N, N, N, N, N, 3739 /* 0xF8 - 0xFF */ 3740 N, N, N, N, N, N, N, N, 3741 } }; 3742 3743 static const struct escape escape_dd = { { 3744 N, N, N, N, N, N, N, I(DstMem, em_fnstsw), 3745 }, { 3746 /* 0xC0 - 0xC7 */ 3747 N, N, N, N, N, N, N, N, 3748 /* 0xC8 - 0xCF */ 3749 N, N, N, N, N, N, N, N, 3750 /* 0xD0 - 0xC7 */ 3751 N, N, N, N, N, N, N, N, 3752 /* 0xD8 - 0xDF */ 3753 N, N, N, N, N, N, N, N, 3754 /* 0xE0 - 0xE7 */ 3755 N, N, N, N, N, N, N, N, 3756 /* 0xE8 - 0xEF */ 3757 N, N, N, N, N, N, N, N, 3758 /* 0xF0 - 0xF7 */ 3759 N, N, N, N, N, N, N, N, 3760 /* 0xF8 - 0xFF */ 3761 N, N, N, N, N, N, N, N, 3762 } }; 3763 3764 static const struct opcode opcode_table[256] = { 3765 /* 0x00 - 0x07 */ 3766 F6ALU(Lock, em_add), 3767 I(ImplicitOps | Stack | No64 | Src2ES, em_push_sreg), 3768 I(ImplicitOps | Stack | No64 | Src2ES, em_pop_sreg), 3769 /* 0x08 - 0x0F */ 3770 F6ALU(Lock | PageTable, em_or), 3771 I(ImplicitOps | Stack | No64 | Src2CS, em_push_sreg), 3772 N, 3773 /* 0x10 - 0x17 */ 3774 F6ALU(Lock, em_adc), 3775 I(ImplicitOps | Stack | No64 | Src2SS, em_push_sreg), 3776 I(ImplicitOps | Stack | No64 | Src2SS, em_pop_sreg), 3777 /* 0x18 - 0x1F */ 3778 F6ALU(Lock, em_sbb), 3779 I(ImplicitOps | Stack | No64 | Src2DS, em_push_sreg), 3780 I(ImplicitOps | Stack | No64 | Src2DS, em_pop_sreg), 3781 /* 0x20 - 0x27 */ 3782 F6ALU(Lock | PageTable, em_and), N, N, 3783 /* 0x28 - 0x2F */ 3784 F6ALU(Lock, em_sub), N, I(ByteOp | DstAcc | No64, em_das), 3785 /* 0x30 - 0x37 */ 3786 F6ALU(Lock, em_xor), N, N, 3787 /* 0x38 - 0x3F */ 3788 F6ALU(NoWrite, em_cmp), N, N, 3789 /* 0x40 - 0x4F */ 3790 X8(F(DstReg, em_inc)), X8(F(DstReg, em_dec)), 3791 /* 0x50 - 0x57 */ 3792 X8(I(SrcReg | Stack, em_push)), 3793 /* 0x58 - 0x5F */ 3794 X8(I(DstReg | Stack, em_pop)), 3795 /* 0x60 - 0x67 */ 3796 I(ImplicitOps | Stack | No64, em_pusha), 3797 I(ImplicitOps | Stack | No64, em_popa), 3798 N, D(DstReg | SrcMem32 | ModRM | Mov) /* movsxd (x86/64) */ , 3799 N, N, N, N, 3800 /* 0x68 - 0x6F */ 3801 I(SrcImm | Mov | Stack, em_push), 3802 I(DstReg | SrcMem | ModRM | Src2Imm, em_imul_3op), 3803 I(SrcImmByte | Mov | Stack, em_push), 3804 I(DstReg | SrcMem | ModRM | Src2ImmByte, em_imul_3op), 3805 I2bvIP(DstDI | SrcDX | Mov | String | Unaligned, em_in, ins, check_perm_in), /* insb, insw/insd */ 3806 I2bvIP(SrcSI | DstDX | String, em_out, outs, check_perm_out), /* outsb, outsw/outsd */ 3807 /* 0x70 - 0x7F */ 3808 X16(D(SrcImmByte)), 3809 /* 0x80 - 0x87 */ 3810 G(ByteOp | DstMem | SrcImm, group1), 3811 G(DstMem | SrcImm, group1), 3812 G(ByteOp | DstMem | SrcImm | No64, group1), 3813 G(DstMem | SrcImmByte, group1), 3814 F2bv(DstMem | SrcReg | ModRM | NoWrite, em_test), 3815 I2bv(DstMem | SrcReg | ModRM | Lock | PageTable, em_xchg), 3816 /* 0x88 - 0x8F */ 3817 I2bv(DstMem | SrcReg | ModRM | Mov | PageTable, em_mov), 3818 I2bv(DstReg | SrcMem | ModRM | Mov, em_mov), 3819 I(DstMem | SrcNone | ModRM | Mov | PageTable, em_mov_rm_sreg), 3820 D(ModRM | SrcMem | NoAccess | DstReg), 3821 I(ImplicitOps | SrcMem16 | ModRM, em_mov_sreg_rm), 3822 G(0, group1A), 3823 /* 0x90 - 0x97 */ 3824 DI(SrcAcc | DstReg, pause), X7(D(SrcAcc | DstReg)), 3825 /* 0x98 - 0x9F */ 3826 D(DstAcc | SrcNone), I(ImplicitOps | SrcAcc, em_cwd), 3827 I(SrcImmFAddr | No64, em_call_far), N, 3828 II(ImplicitOps | Stack, em_pushf, pushf), 3829 II(ImplicitOps | Stack, em_popf, popf), 3830 I(ImplicitOps, em_sahf), I(ImplicitOps, em_lahf), 3831 /* 0xA0 - 0xA7 */ 3832 I2bv(DstAcc | SrcMem | Mov | MemAbs, em_mov), 3833 I2bv(DstMem | SrcAcc | Mov | MemAbs | PageTable, em_mov), 3834 I2bv(SrcSI | DstDI | Mov | String, em_mov), 3835 F2bv(SrcSI | DstDI | String | NoWrite, em_cmp), 3836 /* 0xA8 - 0xAF */ 3837 F2bv(DstAcc | SrcImm | NoWrite, em_test), 3838 I2bv(SrcAcc | DstDI | Mov | String, em_mov), 3839 I2bv(SrcSI | DstAcc | Mov | String, em_mov), 3840 F2bv(SrcAcc | DstDI | String | NoWrite, em_cmp), 3841 /* 0xB0 - 0xB7 */ 3842 X8(I(ByteOp | DstReg | SrcImm | Mov, em_mov)), 3843 /* 0xB8 - 0xBF */ 3844 X8(I(DstReg | SrcImm64 | Mov, em_mov)), 3845 /* 0xC0 - 0xC7 */ 3846 G(ByteOp | Src2ImmByte, group2), G(Src2ImmByte, group2), 3847 I(ImplicitOps | Stack | SrcImmU16, em_ret_near_imm), 3848 I(ImplicitOps | Stack, em_ret), 3849 I(DstReg | SrcMemFAddr | ModRM | No64 | Src2ES, em_lseg), 3850 I(DstReg | SrcMemFAddr | ModRM | No64 | Src2DS, em_lseg), 3851 G(ByteOp, group11), G(0, group11), 3852 /* 0xC8 - 0xCF */ 3853 I(Stack | SrcImmU16 | Src2ImmByte, em_enter), I(Stack, em_leave), 3854 I(ImplicitOps | Stack | SrcImmU16, em_ret_far_imm), 3855 I(ImplicitOps | Stack, em_ret_far), 3856 D(ImplicitOps), DI(SrcImmByte, intn), 3857 D(ImplicitOps | No64), II(ImplicitOps, em_iret, iret), 3858 /* 0xD0 - 0xD7 */ 3859 G(Src2One | ByteOp, group2), G(Src2One, group2), 3860 G(Src2CL | ByteOp, group2), G(Src2CL, group2), 3861 I(DstAcc | SrcImmUByte | No64, em_aam), 3862 I(DstAcc | SrcImmUByte | No64, em_aad), 3863 F(DstAcc | ByteOp | No64, em_salc), 3864 I(DstAcc | SrcXLat | ByteOp, em_mov), 3865 /* 0xD8 - 0xDF */ 3866 N, E(0, &escape_d9), N, E(0, &escape_db), N, E(0, &escape_dd), N, N, 3867 /* 0xE0 - 0xE7 */ 3868 X3(I(SrcImmByte, em_loop)), 3869 I(SrcImmByte, em_jcxz), 3870 I2bvIP(SrcImmUByte | DstAcc, em_in, in, check_perm_in), 3871 I2bvIP(SrcAcc | DstImmUByte, em_out, out, check_perm_out), 3872 /* 0xE8 - 0xEF */ 3873 I(SrcImm | Stack, em_call), D(SrcImm | ImplicitOps), 3874 I(SrcImmFAddr | No64, em_jmp_far), D(SrcImmByte | ImplicitOps), 3875 I2bvIP(SrcDX | DstAcc, em_in, in, check_perm_in), 3876 I2bvIP(SrcAcc | DstDX, em_out, out, check_perm_out), 3877 /* 0xF0 - 0xF7 */ 3878 N, DI(ImplicitOps, icebp), N, N, 3879 DI(ImplicitOps | Priv, hlt), D(ImplicitOps), 3880 G(ByteOp, group3), G(0, group3), 3881 /* 0xF8 - 0xFF */ 3882 D(ImplicitOps), D(ImplicitOps), 3883 I(ImplicitOps, em_cli), I(ImplicitOps, em_sti), 3884 D(ImplicitOps), D(ImplicitOps), G(0, group4), G(0, group5), 3885 }; 3886 3887 static const struct opcode twobyte_table[256] = { 3888 /* 0x00 - 0x0F */ 3889 G(0, group6), GD(0, &group7), N, N, 3890 N, I(ImplicitOps | EmulateOnUD, em_syscall), 3891 II(ImplicitOps | Priv, em_clts, clts), N, 3892 DI(ImplicitOps | Priv, invd), DI(ImplicitOps | Priv, wbinvd), N, N, 3893 N, D(ImplicitOps | ModRM), N, N, 3894 /* 0x10 - 0x1F */ 3895 N, N, N, N, N, N, N, N, 3896 D(ImplicitOps | ModRM), N, N, N, N, N, N, D(ImplicitOps | ModRM), 3897 /* 0x20 - 0x2F */ 3898 DIP(ModRM | DstMem | Priv | Op3264 | NoMod, cr_read, check_cr_read), 3899 DIP(ModRM | DstMem | Priv | Op3264 | NoMod, dr_read, check_dr_read), 3900 IIP(ModRM | SrcMem | Priv | Op3264 | NoMod, em_cr_write, cr_write, 3901 check_cr_write), 3902 IIP(ModRM | SrcMem | Priv | Op3264 | NoMod, em_dr_write, dr_write, 3903 check_dr_write), 3904 N, N, N, N, 3905 GP(ModRM | DstReg | SrcMem | Mov | Sse, &pfx_0f_28_0f_29), 3906 GP(ModRM | DstMem | SrcReg | Mov | Sse, &pfx_0f_28_0f_29), 3907 N, GP(ModRM | DstMem | SrcReg | Sse | Mov | Aligned, &pfx_vmovntpx), 3908 N, N, N, N, 3909 /* 0x30 - 0x3F */ 3910 II(ImplicitOps | Priv, em_wrmsr, wrmsr), 3911 IIP(ImplicitOps, em_rdtsc, rdtsc, check_rdtsc), 3912 II(ImplicitOps | Priv, em_rdmsr, rdmsr), 3913 IIP(ImplicitOps, em_rdpmc, rdpmc, check_rdpmc), 3914 I(ImplicitOps | EmulateOnUD, em_sysenter), 3915 I(ImplicitOps | Priv | EmulateOnUD, em_sysexit), 3916 N, N, 3917 N, N, N, N, N, N, N, N, 3918 /* 0x40 - 0x4F */ 3919 X16(D(DstReg | SrcMem | ModRM)), 3920 /* 0x50 - 0x5F */ 3921 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, 3922 /* 0x60 - 0x6F */ 3923 N, N, N, N, 3924 N, N, N, N, 3925 N, N, N, N, 3926 N, N, N, GP(SrcMem | DstReg | ModRM | Mov, &pfx_0f_6f_0f_7f), 3927 /* 0x70 - 0x7F */ 3928 N, N, N, N, 3929 N, N, N, N, 3930 N, N, N, N, 3931 N, N, N, GP(SrcReg | DstMem | ModRM | Mov, &pfx_0f_6f_0f_7f), 3932 /* 0x80 - 0x8F */ 3933 X16(D(SrcImm)), 3934 /* 0x90 - 0x9F */ 3935 X16(D(ByteOp | DstMem | SrcNone | ModRM| Mov)), 3936 /* 0xA0 - 0xA7 */ 3937 I(Stack | Src2FS, em_push_sreg), I(Stack | Src2FS, em_pop_sreg), 3938 II(ImplicitOps, em_cpuid, cpuid), 3939 F(DstMem | SrcReg | ModRM | BitOp | NoWrite, em_bt), 3940 F(DstMem | SrcReg | Src2ImmByte | ModRM, em_shld), 3941 F(DstMem | SrcReg | Src2CL | ModRM, em_shld), N, N, 3942 /* 0xA8 - 0xAF */ 3943 I(Stack | Src2GS, em_push_sreg), I(Stack | Src2GS, em_pop_sreg), 3944 DI(ImplicitOps, rsm), 3945 F(DstMem | SrcReg | ModRM | BitOp | Lock | PageTable, em_bts), 3946 F(DstMem | SrcReg | Src2ImmByte | ModRM, em_shrd), 3947 F(DstMem | SrcReg | Src2CL | ModRM, em_shrd), 3948 D(ModRM), F(DstReg | SrcMem | ModRM, em_imul), 3949 /* 0xB0 - 0xB7 */ 3950 I2bv(DstMem | SrcReg | ModRM | Lock | PageTable, em_cmpxchg), 3951 I(DstReg | SrcMemFAddr | ModRM | Src2SS, em_lseg), 3952 F(DstMem | SrcReg | ModRM | BitOp | Lock, em_btr), 3953 I(DstReg | SrcMemFAddr | ModRM | Src2FS, em_lseg), 3954 I(DstReg | SrcMemFAddr | ModRM | Src2GS, em_lseg), 3955 D(DstReg | SrcMem8 | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov), 3956 /* 0xB8 - 0xBF */ 3957 N, N, 3958 G(BitOp, group8), 3959 F(DstMem | SrcReg | ModRM | BitOp | Lock | PageTable, em_btc), 3960 F(DstReg | SrcMem | ModRM, em_bsf), F(DstReg | SrcMem | ModRM, em_bsr), 3961 D(DstReg | SrcMem8 | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov), 3962 /* 0xC0 - 0xC7 */ 3963 F2bv(DstMem | SrcReg | ModRM | SrcWrite | Lock, em_xadd), 3964 N, D(DstMem | SrcReg | ModRM | Mov), 3965 N, N, N, GD(0, &group9), 3966 /* 0xC8 - 0xCF */ 3967 X8(I(DstReg, em_bswap)), 3968 /* 0xD0 - 0xDF */ 3969 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, 3970 /* 0xE0 - 0xEF */ 3971 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, 3972 /* 0xF0 - 0xFF */ 3973 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N 3974 }; 3975 3976 static const struct gprefix three_byte_0f_38_f0 = { 3977 I(DstReg | SrcMem | Mov, em_movbe), N, N, N 3978 }; 3979 3980 static const struct gprefix three_byte_0f_38_f1 = { 3981 I(DstMem | SrcReg | Mov, em_movbe), N, N, N 3982 }; 3983 3984 /* 3985 * Insns below are selected by the prefix which indexed by the third opcode 3986 * byte. 3987 */ 3988 static const struct opcode opcode_map_0f_38[256] = { 3989 /* 0x00 - 0x7f */ 3990 X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), 3991 /* 0x80 - 0xef */ 3992 X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), 3993 /* 0xf0 - 0xf1 */ 3994 GP(EmulateOnUD | ModRM | Prefix, &three_byte_0f_38_f0), 3995 GP(EmulateOnUD | ModRM | Prefix, &three_byte_0f_38_f1), 3996 /* 0xf2 - 0xff */ 3997 N, N, X4(N), X8(N) 3998 }; 3999 4000 #undef D 4001 #undef N 4002 #undef G 4003 #undef GD 4004 #undef I 4005 #undef GP 4006 #undef EXT 4007 4008 #undef D2bv 4009 #undef D2bvIP 4010 #undef I2bv 4011 #undef I2bvIP 4012 #undef I6ALU 4013 4014 static unsigned imm_size(struct x86_emulate_ctxt *ctxt) 4015 { 4016 unsigned size; 4017 4018 size = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes; 4019 if (size == 8) 4020 size = 4; 4021 return size; 4022 } 4023 4024 static int decode_imm(struct x86_emulate_ctxt *ctxt, struct operand *op, 4025 unsigned size, bool sign_extension) 4026 { 4027 int rc = X86EMUL_CONTINUE; 4028 4029 op->type = OP_IMM; 4030 op->bytes = size; 4031 op->addr.mem.ea = ctxt->_eip; 4032 /* NB. Immediates are sign-extended as necessary. */ 4033 switch (op->bytes) { 4034 case 1: 4035 op->val = insn_fetch(s8, ctxt); 4036 break; 4037 case 2: 4038 op->val = insn_fetch(s16, ctxt); 4039 break; 4040 case 4: 4041 op->val = insn_fetch(s32, ctxt); 4042 break; 4043 case 8: 4044 op->val = insn_fetch(s64, ctxt); 4045 break; 4046 } 4047 if (!sign_extension) { 4048 switch (op->bytes) { 4049 case 1: 4050 op->val &= 0xff; 4051 break; 4052 case 2: 4053 op->val &= 0xffff; 4054 break; 4055 case 4: 4056 op->val &= 0xffffffff; 4057 break; 4058 } 4059 } 4060 done: 4061 return rc; 4062 } 4063 4064 static int decode_operand(struct x86_emulate_ctxt *ctxt, struct operand *op, 4065 unsigned d) 4066 { 4067 int rc = X86EMUL_CONTINUE; 4068 4069 switch (d) { 4070 case OpReg: 4071 decode_register_operand(ctxt, op); 4072 break; 4073 case OpImmUByte: 4074 rc = decode_imm(ctxt, op, 1, false); 4075 break; 4076 case OpMem: 4077 ctxt->memop.bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes; 4078 mem_common: 4079 *op = ctxt->memop; 4080 ctxt->memopp = op; 4081 if (ctxt->d & BitOp) 4082 fetch_bit_operand(ctxt); 4083 op->orig_val = op->val; 4084 break; 4085 case OpMem64: 4086 ctxt->memop.bytes = (ctxt->op_bytes == 8) ? 16 : 8; 4087 goto mem_common; 4088 case OpAcc: 4089 op->type = OP_REG; 4090 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes; 4091 op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX); 4092 fetch_register_operand(op); 4093 op->orig_val = op->val; 4094 break; 4095 case OpAccLo: 4096 op->type = OP_REG; 4097 op->bytes = (ctxt->d & ByteOp) ? 2 : ctxt->op_bytes; 4098 op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX); 4099 fetch_register_operand(op); 4100 op->orig_val = op->val; 4101 break; 4102 case OpAccHi: 4103 if (ctxt->d & ByteOp) { 4104 op->type = OP_NONE; 4105 break; 4106 } 4107 op->type = OP_REG; 4108 op->bytes = ctxt->op_bytes; 4109 op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX); 4110 fetch_register_operand(op); 4111 op->orig_val = op->val; 4112 break; 4113 case OpDI: 4114 op->type = OP_MEM; 4115 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes; 4116 op->addr.mem.ea = 4117 register_address(ctxt, reg_read(ctxt, VCPU_REGS_RDI)); 4118 op->addr.mem.seg = VCPU_SREG_ES; 4119 op->val = 0; 4120 op->count = 1; 4121 break; 4122 case OpDX: 4123 op->type = OP_REG; 4124 op->bytes = 2; 4125 op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX); 4126 fetch_register_operand(op); 4127 break; 4128 case OpCL: 4129 op->bytes = 1; 4130 op->val = reg_read(ctxt, VCPU_REGS_RCX) & 0xff; 4131 break; 4132 case OpImmByte: 4133 rc = decode_imm(ctxt, op, 1, true); 4134 break; 4135 case OpOne: 4136 op->bytes = 1; 4137 op->val = 1; 4138 break; 4139 case OpImm: 4140 rc = decode_imm(ctxt, op, imm_size(ctxt), true); 4141 break; 4142 case OpImm64: 4143 rc = decode_imm(ctxt, op, ctxt->op_bytes, true); 4144 break; 4145 case OpMem8: 4146 ctxt->memop.bytes = 1; 4147 if (ctxt->memop.type == OP_REG) { 4148 ctxt->memop.addr.reg = decode_register(ctxt, 4149 ctxt->modrm_rm, true); 4150 fetch_register_operand(&ctxt->memop); 4151 } 4152 goto mem_common; 4153 case OpMem16: 4154 ctxt->memop.bytes = 2; 4155 goto mem_common; 4156 case OpMem32: 4157 ctxt->memop.bytes = 4; 4158 goto mem_common; 4159 case OpImmU16: 4160 rc = decode_imm(ctxt, op, 2, false); 4161 break; 4162 case OpImmU: 4163 rc = decode_imm(ctxt, op, imm_size(ctxt), false); 4164 break; 4165 case OpSI: 4166 op->type = OP_MEM; 4167 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes; 4168 op->addr.mem.ea = 4169 register_address(ctxt, reg_read(ctxt, VCPU_REGS_RSI)); 4170 op->addr.mem.seg = ctxt->seg_override; 4171 op->val = 0; 4172 op->count = 1; 4173 break; 4174 case OpXLat: 4175 op->type = OP_MEM; 4176 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes; 4177 op->addr.mem.ea = 4178 register_address(ctxt, 4179 reg_read(ctxt, VCPU_REGS_RBX) + 4180 (reg_read(ctxt, VCPU_REGS_RAX) & 0xff)); 4181 op->addr.mem.seg = ctxt->seg_override; 4182 op->val = 0; 4183 break; 4184 case OpImmFAddr: 4185 op->type = OP_IMM; 4186 op->addr.mem.ea = ctxt->_eip; 4187 op->bytes = ctxt->op_bytes + 2; 4188 insn_fetch_arr(op->valptr, op->bytes, ctxt); 4189 break; 4190 case OpMemFAddr: 4191 ctxt->memop.bytes = ctxt->op_bytes + 2; 4192 goto mem_common; 4193 case OpES: 4194 op->val = VCPU_SREG_ES; 4195 break; 4196 case OpCS: 4197 op->val = VCPU_SREG_CS; 4198 break; 4199 case OpSS: 4200 op->val = VCPU_SREG_SS; 4201 break; 4202 case OpDS: 4203 op->val = VCPU_SREG_DS; 4204 break; 4205 case OpFS: 4206 op->val = VCPU_SREG_FS; 4207 break; 4208 case OpGS: 4209 op->val = VCPU_SREG_GS; 4210 break; 4211 case OpImplicit: 4212 /* Special instructions do their own operand decoding. */ 4213 default: 4214 op->type = OP_NONE; /* Disable writeback. */ 4215 break; 4216 } 4217 4218 done: 4219 return rc; 4220 } 4221 4222 int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len) 4223 { 4224 int rc = X86EMUL_CONTINUE; 4225 int mode = ctxt->mode; 4226 int def_op_bytes, def_ad_bytes, goffset, simd_prefix; 4227 bool op_prefix = false; 4228 bool has_seg_override = false; 4229 struct opcode opcode; 4230 4231 ctxt->memop.type = OP_NONE; 4232 ctxt->memopp = NULL; 4233 ctxt->_eip = ctxt->eip; 4234 ctxt->fetch.ptr = ctxt->fetch.data; 4235 ctxt->fetch.end = ctxt->fetch.data + insn_len; 4236 ctxt->opcode_len = 1; 4237 if (insn_len > 0) 4238 memcpy(ctxt->fetch.data, insn, insn_len); 4239 else { 4240 rc = __do_insn_fetch_bytes(ctxt, 1); 4241 if (rc != X86EMUL_CONTINUE) 4242 return rc; 4243 } 4244 4245 switch (mode) { 4246 case X86EMUL_MODE_REAL: 4247 case X86EMUL_MODE_VM86: 4248 case X86EMUL_MODE_PROT16: 4249 def_op_bytes = def_ad_bytes = 2; 4250 break; 4251 case X86EMUL_MODE_PROT32: 4252 def_op_bytes = def_ad_bytes = 4; 4253 break; 4254 #ifdef CONFIG_X86_64 4255 case X86EMUL_MODE_PROT64: 4256 def_op_bytes = 4; 4257 def_ad_bytes = 8; 4258 break; 4259 #endif 4260 default: 4261 return EMULATION_FAILED; 4262 } 4263 4264 ctxt->op_bytes = def_op_bytes; 4265 ctxt->ad_bytes = def_ad_bytes; 4266 4267 /* Legacy prefixes. */ 4268 for (;;) { 4269 switch (ctxt->b = insn_fetch(u8, ctxt)) { 4270 case 0x66: /* operand-size override */ 4271 op_prefix = true; 4272 /* switch between 2/4 bytes */ 4273 ctxt->op_bytes = def_op_bytes ^ 6; 4274 break; 4275 case 0x67: /* address-size override */ 4276 if (mode == X86EMUL_MODE_PROT64) 4277 /* switch between 4/8 bytes */ 4278 ctxt->ad_bytes = def_ad_bytes ^ 12; 4279 else 4280 /* switch between 2/4 bytes */ 4281 ctxt->ad_bytes = def_ad_bytes ^ 6; 4282 break; 4283 case 0x26: /* ES override */ 4284 case 0x2e: /* CS override */ 4285 case 0x36: /* SS override */ 4286 case 0x3e: /* DS override */ 4287 has_seg_override = true; 4288 ctxt->seg_override = (ctxt->b >> 3) & 3; 4289 break; 4290 case 0x64: /* FS override */ 4291 case 0x65: /* GS override */ 4292 has_seg_override = true; 4293 ctxt->seg_override = ctxt->b & 7; 4294 break; 4295 case 0x40 ... 0x4f: /* REX */ 4296 if (mode != X86EMUL_MODE_PROT64) 4297 goto done_prefixes; 4298 ctxt->rex_prefix = ctxt->b; 4299 continue; 4300 case 0xf0: /* LOCK */ 4301 ctxt->lock_prefix = 1; 4302 break; 4303 case 0xf2: /* REPNE/REPNZ */ 4304 case 0xf3: /* REP/REPE/REPZ */ 4305 ctxt->rep_prefix = ctxt->b; 4306 break; 4307 default: 4308 goto done_prefixes; 4309 } 4310 4311 /* Any legacy prefix after a REX prefix nullifies its effect. */ 4312 4313 ctxt->rex_prefix = 0; 4314 } 4315 4316 done_prefixes: 4317 4318 /* REX prefix. */ 4319 if (ctxt->rex_prefix & 8) 4320 ctxt->op_bytes = 8; /* REX.W */ 4321 4322 /* Opcode byte(s). */ 4323 opcode = opcode_table[ctxt->b]; 4324 /* Two-byte opcode? */ 4325 if (ctxt->b == 0x0f) { 4326 ctxt->opcode_len = 2; 4327 ctxt->b = insn_fetch(u8, ctxt); 4328 opcode = twobyte_table[ctxt->b]; 4329 4330 /* 0F_38 opcode map */ 4331 if (ctxt->b == 0x38) { 4332 ctxt->opcode_len = 3; 4333 ctxt->b = insn_fetch(u8, ctxt); 4334 opcode = opcode_map_0f_38[ctxt->b]; 4335 } 4336 } 4337 ctxt->d = opcode.flags; 4338 4339 if (ctxt->d & ModRM) 4340 ctxt->modrm = insn_fetch(u8, ctxt); 4341 4342 /* vex-prefix instructions are not implemented */ 4343 if (ctxt->opcode_len == 1 && (ctxt->b == 0xc5 || ctxt->b == 0xc4) && 4344 (mode == X86EMUL_MODE_PROT64 || 4345 (mode >= X86EMUL_MODE_PROT16 && (ctxt->modrm & 0x80)))) { 4346 ctxt->d = NotImpl; 4347 } 4348 4349 while (ctxt->d & GroupMask) { 4350 switch (ctxt->d & GroupMask) { 4351 case Group: 4352 goffset = (ctxt->modrm >> 3) & 7; 4353 opcode = opcode.u.group[goffset]; 4354 break; 4355 case GroupDual: 4356 goffset = (ctxt->modrm >> 3) & 7; 4357 if ((ctxt->modrm >> 6) == 3) 4358 opcode = opcode.u.gdual->mod3[goffset]; 4359 else 4360 opcode = opcode.u.gdual->mod012[goffset]; 4361 break; 4362 case RMExt: 4363 goffset = ctxt->modrm & 7; 4364 opcode = opcode.u.group[goffset]; 4365 break; 4366 case Prefix: 4367 if (ctxt->rep_prefix && op_prefix) 4368 return EMULATION_FAILED; 4369 simd_prefix = op_prefix ? 0x66 : ctxt->rep_prefix; 4370 switch (simd_prefix) { 4371 case 0x00: opcode = opcode.u.gprefix->pfx_no; break; 4372 case 0x66: opcode = opcode.u.gprefix->pfx_66; break; 4373 case 0xf2: opcode = opcode.u.gprefix->pfx_f2; break; 4374 case 0xf3: opcode = opcode.u.gprefix->pfx_f3; break; 4375 } 4376 break; 4377 case Escape: 4378 if (ctxt->modrm > 0xbf) 4379 opcode = opcode.u.esc->high[ctxt->modrm - 0xc0]; 4380 else 4381 opcode = opcode.u.esc->op[(ctxt->modrm >> 3) & 7]; 4382 break; 4383 default: 4384 return EMULATION_FAILED; 4385 } 4386 4387 ctxt->d &= ~(u64)GroupMask; 4388 ctxt->d |= opcode.flags; 4389 } 4390 4391 /* Unrecognised? */ 4392 if (ctxt->d == 0) 4393 return EMULATION_FAILED; 4394 4395 ctxt->execute = opcode.u.execute; 4396 4397 if (unlikely(ctxt->d & 4398 (NotImpl|EmulateOnUD|Stack|Op3264|Sse|Mmx|Intercept|CheckPerm))) { 4399 /* 4400 * These are copied unconditionally here, and checked unconditionally 4401 * in x86_emulate_insn. 4402 */ 4403 ctxt->check_perm = opcode.check_perm; 4404 ctxt->intercept = opcode.intercept; 4405 4406 if (ctxt->d & NotImpl) 4407 return EMULATION_FAILED; 4408 4409 if (!(ctxt->d & EmulateOnUD) && ctxt->ud) 4410 return EMULATION_FAILED; 4411 4412 if (mode == X86EMUL_MODE_PROT64 && (ctxt->d & Stack)) 4413 ctxt->op_bytes = 8; 4414 4415 if (ctxt->d & Op3264) { 4416 if (mode == X86EMUL_MODE_PROT64) 4417 ctxt->op_bytes = 8; 4418 else 4419 ctxt->op_bytes = 4; 4420 } 4421 4422 if (ctxt->d & Sse) 4423 ctxt->op_bytes = 16; 4424 else if (ctxt->d & Mmx) 4425 ctxt->op_bytes = 8; 4426 } 4427 4428 /* ModRM and SIB bytes. */ 4429 if (ctxt->d & ModRM) { 4430 rc = decode_modrm(ctxt, &ctxt->memop); 4431 if (!has_seg_override) { 4432 has_seg_override = true; 4433 ctxt->seg_override = ctxt->modrm_seg; 4434 } 4435 } else if (ctxt->d & MemAbs) 4436 rc = decode_abs(ctxt, &ctxt->memop); 4437 if (rc != X86EMUL_CONTINUE) 4438 goto done; 4439 4440 if (!has_seg_override) 4441 ctxt->seg_override = VCPU_SREG_DS; 4442 4443 ctxt->memop.addr.mem.seg = ctxt->seg_override; 4444 4445 /* 4446 * Decode and fetch the source operand: register, memory 4447 * or immediate. 4448 */ 4449 rc = decode_operand(ctxt, &ctxt->src, (ctxt->d >> SrcShift) & OpMask); 4450 if (rc != X86EMUL_CONTINUE) 4451 goto done; 4452 4453 /* 4454 * Decode and fetch the second source operand: register, memory 4455 * or immediate. 4456 */ 4457 rc = decode_operand(ctxt, &ctxt->src2, (ctxt->d >> Src2Shift) & OpMask); 4458 if (rc != X86EMUL_CONTINUE) 4459 goto done; 4460 4461 /* Decode and fetch the destination operand: register or memory. */ 4462 rc = decode_operand(ctxt, &ctxt->dst, (ctxt->d >> DstShift) & OpMask); 4463 4464 done: 4465 if (ctxt->rip_relative) 4466 ctxt->memopp->addr.mem.ea += ctxt->_eip; 4467 4468 return (rc != X86EMUL_CONTINUE) ? EMULATION_FAILED : EMULATION_OK; 4469 } 4470 4471 bool x86_page_table_writing_insn(struct x86_emulate_ctxt *ctxt) 4472 { 4473 return ctxt->d & PageTable; 4474 } 4475 4476 static bool string_insn_completed(struct x86_emulate_ctxt *ctxt) 4477 { 4478 /* The second termination condition only applies for REPE 4479 * and REPNE. Test if the repeat string operation prefix is 4480 * REPE/REPZ or REPNE/REPNZ and if it's the case it tests the 4481 * corresponding termination condition according to: 4482 * - if REPE/REPZ and ZF = 0 then done 4483 * - if REPNE/REPNZ and ZF = 1 then done 4484 */ 4485 if (((ctxt->b == 0xa6) || (ctxt->b == 0xa7) || 4486 (ctxt->b == 0xae) || (ctxt->b == 0xaf)) 4487 && (((ctxt->rep_prefix == REPE_PREFIX) && 4488 ((ctxt->eflags & EFLG_ZF) == 0)) 4489 || ((ctxt->rep_prefix == REPNE_PREFIX) && 4490 ((ctxt->eflags & EFLG_ZF) == EFLG_ZF)))) 4491 return true; 4492 4493 return false; 4494 } 4495 4496 static int flush_pending_x87_faults(struct x86_emulate_ctxt *ctxt) 4497 { 4498 bool fault = false; 4499 4500 ctxt->ops->get_fpu(ctxt); 4501 asm volatile("1: fwait \n\t" 4502 "2: \n\t" 4503 ".pushsection .fixup,\"ax\" \n\t" 4504 "3: \n\t" 4505 "movb $1, %[fault] \n\t" 4506 "jmp 2b \n\t" 4507 ".popsection \n\t" 4508 _ASM_EXTABLE(1b, 3b) 4509 : [fault]"+qm"(fault)); 4510 ctxt->ops->put_fpu(ctxt); 4511 4512 if (unlikely(fault)) 4513 return emulate_exception(ctxt, MF_VECTOR, 0, false); 4514 4515 return X86EMUL_CONTINUE; 4516 } 4517 4518 static void fetch_possible_mmx_operand(struct x86_emulate_ctxt *ctxt, 4519 struct operand *op) 4520 { 4521 if (op->type == OP_MM) 4522 read_mmx_reg(ctxt, &op->mm_val, op->addr.mm); 4523 } 4524 4525 static int fastop(struct x86_emulate_ctxt *ctxt, void (*fop)(struct fastop *)) 4526 { 4527 ulong flags = (ctxt->eflags & EFLAGS_MASK) | X86_EFLAGS_IF; 4528 if (!(ctxt->d & ByteOp)) 4529 fop += __ffs(ctxt->dst.bytes) * FASTOP_SIZE; 4530 asm("push %[flags]; popf; call *%[fastop]; pushf; pop %[flags]\n" 4531 : "+a"(ctxt->dst.val), "+d"(ctxt->src.val), [flags]"+D"(flags), 4532 [fastop]"+S"(fop) 4533 : "c"(ctxt->src2.val)); 4534 ctxt->eflags = (ctxt->eflags & ~EFLAGS_MASK) | (flags & EFLAGS_MASK); 4535 if (!fop) /* exception is returned in fop variable */ 4536 return emulate_de(ctxt); 4537 return X86EMUL_CONTINUE; 4538 } 4539 4540 void init_decode_cache(struct x86_emulate_ctxt *ctxt) 4541 { 4542 memset(&ctxt->rip_relative, 0, 4543 (void *)&ctxt->modrm - (void *)&ctxt->rip_relative); 4544 4545 ctxt->io_read.pos = 0; 4546 ctxt->io_read.end = 0; 4547 ctxt->mem_read.end = 0; 4548 } 4549 4550 int x86_emulate_insn(struct x86_emulate_ctxt *ctxt) 4551 { 4552 const struct x86_emulate_ops *ops = ctxt->ops; 4553 int rc = X86EMUL_CONTINUE; 4554 int saved_dst_type = ctxt->dst.type; 4555 4556 ctxt->mem_read.pos = 0; 4557 4558 /* LOCK prefix is allowed only with some instructions */ 4559 if (ctxt->lock_prefix && (!(ctxt->d & Lock) || ctxt->dst.type != OP_MEM)) { 4560 rc = emulate_ud(ctxt); 4561 goto done; 4562 } 4563 4564 if ((ctxt->d & SrcMask) == SrcMemFAddr && ctxt->src.type != OP_MEM) { 4565 rc = emulate_ud(ctxt); 4566 goto done; 4567 } 4568 4569 if (unlikely(ctxt->d & 4570 (No64|Undefined|Sse|Mmx|Intercept|CheckPerm|Priv|Prot|String))) { 4571 if ((ctxt->mode == X86EMUL_MODE_PROT64 && (ctxt->d & No64)) || 4572 (ctxt->d & Undefined)) { 4573 rc = emulate_ud(ctxt); 4574 goto done; 4575 } 4576 4577 if (((ctxt->d & (Sse|Mmx)) && ((ops->get_cr(ctxt, 0) & X86_CR0_EM))) 4578 || ((ctxt->d & Sse) && !(ops->get_cr(ctxt, 4) & X86_CR4_OSFXSR))) { 4579 rc = emulate_ud(ctxt); 4580 goto done; 4581 } 4582 4583 if ((ctxt->d & (Sse|Mmx)) && (ops->get_cr(ctxt, 0) & X86_CR0_TS)) { 4584 rc = emulate_nm(ctxt); 4585 goto done; 4586 } 4587 4588 if (ctxt->d & Mmx) { 4589 rc = flush_pending_x87_faults(ctxt); 4590 if (rc != X86EMUL_CONTINUE) 4591 goto done; 4592 /* 4593 * Now that we know the fpu is exception safe, we can fetch 4594 * operands from it. 4595 */ 4596 fetch_possible_mmx_operand(ctxt, &ctxt->src); 4597 fetch_possible_mmx_operand(ctxt, &ctxt->src2); 4598 if (!(ctxt->d & Mov)) 4599 fetch_possible_mmx_operand(ctxt, &ctxt->dst); 4600 } 4601 4602 if (unlikely(ctxt->guest_mode) && (ctxt->d & Intercept)) { 4603 rc = emulator_check_intercept(ctxt, ctxt->intercept, 4604 X86_ICPT_PRE_EXCEPT); 4605 if (rc != X86EMUL_CONTINUE) 4606 goto done; 4607 } 4608 4609 /* Privileged instruction can be executed only in CPL=0 */ 4610 if ((ctxt->d & Priv) && ops->cpl(ctxt)) { 4611 if (ctxt->d & PrivUD) 4612 rc = emulate_ud(ctxt); 4613 else 4614 rc = emulate_gp(ctxt, 0); 4615 goto done; 4616 } 4617 4618 /* Instruction can only be executed in protected mode */ 4619 if ((ctxt->d & Prot) && ctxt->mode < X86EMUL_MODE_PROT16) { 4620 rc = emulate_ud(ctxt); 4621 goto done; 4622 } 4623 4624 /* Do instruction specific permission checks */ 4625 if (ctxt->d & CheckPerm) { 4626 rc = ctxt->check_perm(ctxt); 4627 if (rc != X86EMUL_CONTINUE) 4628 goto done; 4629 } 4630 4631 if (unlikely(ctxt->guest_mode) && (ctxt->d & Intercept)) { 4632 rc = emulator_check_intercept(ctxt, ctxt->intercept, 4633 X86_ICPT_POST_EXCEPT); 4634 if (rc != X86EMUL_CONTINUE) 4635 goto done; 4636 } 4637 4638 if (ctxt->rep_prefix && (ctxt->d & String)) { 4639 /* All REP prefixes have the same first termination condition */ 4640 if (address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) == 0) { 4641 ctxt->eip = ctxt->_eip; 4642 ctxt->eflags &= ~EFLG_RF; 4643 goto done; 4644 } 4645 } 4646 } 4647 4648 if ((ctxt->src.type == OP_MEM) && !(ctxt->d & NoAccess)) { 4649 rc = segmented_read(ctxt, ctxt->src.addr.mem, 4650 ctxt->src.valptr, ctxt->src.bytes); 4651 if (rc != X86EMUL_CONTINUE) 4652 goto done; 4653 ctxt->src.orig_val64 = ctxt->src.val64; 4654 } 4655 4656 if (ctxt->src2.type == OP_MEM) { 4657 rc = segmented_read(ctxt, ctxt->src2.addr.mem, 4658 &ctxt->src2.val, ctxt->src2.bytes); 4659 if (rc != X86EMUL_CONTINUE) 4660 goto done; 4661 } 4662 4663 if ((ctxt->d & DstMask) == ImplicitOps) 4664 goto special_insn; 4665 4666 4667 if ((ctxt->dst.type == OP_MEM) && !(ctxt->d & Mov)) { 4668 /* optimisation - avoid slow emulated read if Mov */ 4669 rc = segmented_read(ctxt, ctxt->dst.addr.mem, 4670 &ctxt->dst.val, ctxt->dst.bytes); 4671 if (rc != X86EMUL_CONTINUE) 4672 goto done; 4673 } 4674 ctxt->dst.orig_val = ctxt->dst.val; 4675 4676 special_insn: 4677 4678 if (unlikely(ctxt->guest_mode) && (ctxt->d & Intercept)) { 4679 rc = emulator_check_intercept(ctxt, ctxt->intercept, 4680 X86_ICPT_POST_MEMACCESS); 4681 if (rc != X86EMUL_CONTINUE) 4682 goto done; 4683 } 4684 4685 if (ctxt->rep_prefix && (ctxt->d & String)) 4686 ctxt->eflags |= EFLG_RF; 4687 else 4688 ctxt->eflags &= ~EFLG_RF; 4689 4690 if (ctxt->execute) { 4691 if (ctxt->d & Fastop) { 4692 void (*fop)(struct fastop *) = (void *)ctxt->execute; 4693 rc = fastop(ctxt, fop); 4694 if (rc != X86EMUL_CONTINUE) 4695 goto done; 4696 goto writeback; 4697 } 4698 rc = ctxt->execute(ctxt); 4699 if (rc != X86EMUL_CONTINUE) 4700 goto done; 4701 goto writeback; 4702 } 4703 4704 if (ctxt->opcode_len == 2) 4705 goto twobyte_insn; 4706 else if (ctxt->opcode_len == 3) 4707 goto threebyte_insn; 4708 4709 switch (ctxt->b) { 4710 case 0x63: /* movsxd */ 4711 if (ctxt->mode != X86EMUL_MODE_PROT64) 4712 goto cannot_emulate; 4713 ctxt->dst.val = (s32) ctxt->src.val; 4714 break; 4715 case 0x70 ... 0x7f: /* jcc (short) */ 4716 if (test_cc(ctxt->b, ctxt->eflags)) 4717 jmp_rel(ctxt, ctxt->src.val); 4718 break; 4719 case 0x8d: /* lea r16/r32, m */ 4720 ctxt->dst.val = ctxt->src.addr.mem.ea; 4721 break; 4722 case 0x90 ... 0x97: /* nop / xchg reg, rax */ 4723 if (ctxt->dst.addr.reg == reg_rmw(ctxt, VCPU_REGS_RAX)) 4724 ctxt->dst.type = OP_NONE; 4725 else 4726 rc = em_xchg(ctxt); 4727 break; 4728 case 0x98: /* cbw/cwde/cdqe */ 4729 switch (ctxt->op_bytes) { 4730 case 2: ctxt->dst.val = (s8)ctxt->dst.val; break; 4731 case 4: ctxt->dst.val = (s16)ctxt->dst.val; break; 4732 case 8: ctxt->dst.val = (s32)ctxt->dst.val; break; 4733 } 4734 break; 4735 case 0xcc: /* int3 */ 4736 rc = emulate_int(ctxt, 3); 4737 break; 4738 case 0xcd: /* int n */ 4739 rc = emulate_int(ctxt, ctxt->src.val); 4740 break; 4741 case 0xce: /* into */ 4742 if (ctxt->eflags & EFLG_OF) 4743 rc = emulate_int(ctxt, 4); 4744 break; 4745 case 0xe9: /* jmp rel */ 4746 case 0xeb: /* jmp rel short */ 4747 jmp_rel(ctxt, ctxt->src.val); 4748 ctxt->dst.type = OP_NONE; /* Disable writeback. */ 4749 break; 4750 case 0xf4: /* hlt */ 4751 ctxt->ops->halt(ctxt); 4752 break; 4753 case 0xf5: /* cmc */ 4754 /* complement carry flag from eflags reg */ 4755 ctxt->eflags ^= EFLG_CF; 4756 break; 4757 case 0xf8: /* clc */ 4758 ctxt->eflags &= ~EFLG_CF; 4759 break; 4760 case 0xf9: /* stc */ 4761 ctxt->eflags |= EFLG_CF; 4762 break; 4763 case 0xfc: /* cld */ 4764 ctxt->eflags &= ~EFLG_DF; 4765 break; 4766 case 0xfd: /* std */ 4767 ctxt->eflags |= EFLG_DF; 4768 break; 4769 default: 4770 goto cannot_emulate; 4771 } 4772 4773 if (rc != X86EMUL_CONTINUE) 4774 goto done; 4775 4776 writeback: 4777 if (ctxt->d & SrcWrite) { 4778 BUG_ON(ctxt->src.type == OP_MEM || ctxt->src.type == OP_MEM_STR); 4779 rc = writeback(ctxt, &ctxt->src); 4780 if (rc != X86EMUL_CONTINUE) 4781 goto done; 4782 } 4783 if (!(ctxt->d & NoWrite)) { 4784 rc = writeback(ctxt, &ctxt->dst); 4785 if (rc != X86EMUL_CONTINUE) 4786 goto done; 4787 } 4788 4789 /* 4790 * restore dst type in case the decoding will be reused 4791 * (happens for string instruction ) 4792 */ 4793 ctxt->dst.type = saved_dst_type; 4794 4795 if ((ctxt->d & SrcMask) == SrcSI) 4796 string_addr_inc(ctxt, VCPU_REGS_RSI, &ctxt->src); 4797 4798 if ((ctxt->d & DstMask) == DstDI) 4799 string_addr_inc(ctxt, VCPU_REGS_RDI, &ctxt->dst); 4800 4801 if (ctxt->rep_prefix && (ctxt->d & String)) { 4802 unsigned int count; 4803 struct read_cache *r = &ctxt->io_read; 4804 if ((ctxt->d & SrcMask) == SrcSI) 4805 count = ctxt->src.count; 4806 else 4807 count = ctxt->dst.count; 4808 register_address_increment(ctxt, reg_rmw(ctxt, VCPU_REGS_RCX), 4809 -count); 4810 4811 if (!string_insn_completed(ctxt)) { 4812 /* 4813 * Re-enter guest when pio read ahead buffer is empty 4814 * or, if it is not used, after each 1024 iteration. 4815 */ 4816 if ((r->end != 0 || reg_read(ctxt, VCPU_REGS_RCX) & 0x3ff) && 4817 (r->end == 0 || r->end != r->pos)) { 4818 /* 4819 * Reset read cache. Usually happens before 4820 * decode, but since instruction is restarted 4821 * we have to do it here. 4822 */ 4823 ctxt->mem_read.end = 0; 4824 writeback_registers(ctxt); 4825 return EMULATION_RESTART; 4826 } 4827 goto done; /* skip rip writeback */ 4828 } 4829 ctxt->eflags &= ~EFLG_RF; 4830 } 4831 4832 ctxt->eip = ctxt->_eip; 4833 4834 done: 4835 if (rc == X86EMUL_PROPAGATE_FAULT) 4836 ctxt->have_exception = true; 4837 if (rc == X86EMUL_INTERCEPTED) 4838 return EMULATION_INTERCEPTED; 4839 4840 if (rc == X86EMUL_CONTINUE) 4841 writeback_registers(ctxt); 4842 4843 return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK; 4844 4845 twobyte_insn: 4846 switch (ctxt->b) { 4847 case 0x09: /* wbinvd */ 4848 (ctxt->ops->wbinvd)(ctxt); 4849 break; 4850 case 0x08: /* invd */ 4851 case 0x0d: /* GrpP (prefetch) */ 4852 case 0x18: /* Grp16 (prefetch/nop) */ 4853 case 0x1f: /* nop */ 4854 break; 4855 case 0x20: /* mov cr, reg */ 4856 ctxt->dst.val = ops->get_cr(ctxt, ctxt->modrm_reg); 4857 break; 4858 case 0x21: /* mov from dr to reg */ 4859 ops->get_dr(ctxt, ctxt->modrm_reg, &ctxt->dst.val); 4860 break; 4861 case 0x40 ... 0x4f: /* cmov */ 4862 if (test_cc(ctxt->b, ctxt->eflags)) 4863 ctxt->dst.val = ctxt->src.val; 4864 else if (ctxt->mode != X86EMUL_MODE_PROT64 || 4865 ctxt->op_bytes != 4) 4866 ctxt->dst.type = OP_NONE; /* no writeback */ 4867 break; 4868 case 0x80 ... 0x8f: /* jnz rel, etc*/ 4869 if (test_cc(ctxt->b, ctxt->eflags)) 4870 jmp_rel(ctxt, ctxt->src.val); 4871 break; 4872 case 0x90 ... 0x9f: /* setcc r/m8 */ 4873 ctxt->dst.val = test_cc(ctxt->b, ctxt->eflags); 4874 break; 4875 case 0xae: /* clflush */ 4876 break; 4877 case 0xb6 ... 0xb7: /* movzx */ 4878 ctxt->dst.bytes = ctxt->op_bytes; 4879 ctxt->dst.val = (ctxt->src.bytes == 1) ? (u8) ctxt->src.val 4880 : (u16) ctxt->src.val; 4881 break; 4882 case 0xbe ... 0xbf: /* movsx */ 4883 ctxt->dst.bytes = ctxt->op_bytes; 4884 ctxt->dst.val = (ctxt->src.bytes == 1) ? (s8) ctxt->src.val : 4885 (s16) ctxt->src.val; 4886 break; 4887 case 0xc3: /* movnti */ 4888 ctxt->dst.bytes = ctxt->op_bytes; 4889 ctxt->dst.val = (ctxt->op_bytes == 8) ? (u64) ctxt->src.val : 4890 (u32) ctxt->src.val; 4891 break; 4892 default: 4893 goto cannot_emulate; 4894 } 4895 4896 threebyte_insn: 4897 4898 if (rc != X86EMUL_CONTINUE) 4899 goto done; 4900 4901 goto writeback; 4902 4903 cannot_emulate: 4904 return EMULATION_FAILED; 4905 } 4906 4907 void emulator_invalidate_register_cache(struct x86_emulate_ctxt *ctxt) 4908 { 4909 invalidate_registers(ctxt); 4910 } 4911 4912 void emulator_writeback_register_cache(struct x86_emulate_ctxt *ctxt) 4913 { 4914 writeback_registers(ctxt); 4915 } 4916