1 /****************************************************************************** 2 * emulate.c 3 * 4 * Generic x86 (32-bit and 64-bit) instruction decoder and emulator. 5 * 6 * Copyright (c) 2005 Keir Fraser 7 * 8 * Linux coding style, mod r/m decoder, segment base fixes, real-mode 9 * privileged instructions: 10 * 11 * Copyright (C) 2006 Qumranet 12 * Copyright 2010 Red Hat, Inc. and/or its affiliates. 13 * 14 * Avi Kivity <avi@qumranet.com> 15 * Yaniv Kamay <yaniv@qumranet.com> 16 * 17 * This work is licensed under the terms of the GNU GPL, version 2. See 18 * the COPYING file in the top-level directory. 19 * 20 * From: xen-unstable 10676:af9809f51f81a3c43f276f00c81a52ef558afda4 21 */ 22 23 #include <linux/kvm_host.h> 24 #include "kvm_cache_regs.h" 25 #include <asm/kvm_emulate.h> 26 #include <linux/stringify.h> 27 #include <asm/debugreg.h> 28 29 #include "x86.h" 30 #include "tss.h" 31 32 /* 33 * Operand types 34 */ 35 #define OpNone 0ull 36 #define OpImplicit 1ull /* No generic decode */ 37 #define OpReg 2ull /* Register */ 38 #define OpMem 3ull /* Memory */ 39 #define OpAcc 4ull /* Accumulator: AL/AX/EAX/RAX */ 40 #define OpDI 5ull /* ES:DI/EDI/RDI */ 41 #define OpMem64 6ull /* Memory, 64-bit */ 42 #define OpImmUByte 7ull /* Zero-extended 8-bit immediate */ 43 #define OpDX 8ull /* DX register */ 44 #define OpCL 9ull /* CL register (for shifts) */ 45 #define OpImmByte 10ull /* 8-bit sign extended immediate */ 46 #define OpOne 11ull /* Implied 1 */ 47 #define OpImm 12ull /* Sign extended up to 32-bit immediate */ 48 #define OpMem16 13ull /* Memory operand (16-bit). */ 49 #define OpMem32 14ull /* Memory operand (32-bit). */ 50 #define OpImmU 15ull /* Immediate operand, zero extended */ 51 #define OpSI 16ull /* SI/ESI/RSI */ 52 #define OpImmFAddr 17ull /* Immediate far address */ 53 #define OpMemFAddr 18ull /* Far address in memory */ 54 #define OpImmU16 19ull /* Immediate operand, 16 bits, zero extended */ 55 #define OpES 20ull /* ES */ 56 #define OpCS 21ull /* CS */ 57 #define OpSS 22ull /* SS */ 58 #define OpDS 23ull /* DS */ 59 #define OpFS 24ull /* FS */ 60 #define OpGS 25ull /* GS */ 61 #define OpMem8 26ull /* 8-bit zero extended memory operand */ 62 #define OpImm64 27ull /* Sign extended 16/32/64-bit immediate */ 63 #define OpXLat 28ull /* memory at BX/EBX/RBX + zero-extended AL */ 64 #define OpAccLo 29ull /* Low part of extended acc (AX/AX/EAX/RAX) */ 65 #define OpAccHi 30ull /* High part of extended acc (-/DX/EDX/RDX) */ 66 67 #define OpBits 5 /* Width of operand field */ 68 #define OpMask ((1ull << OpBits) - 1) 69 70 /* 71 * Opcode effective-address decode tables. 72 * Note that we only emulate instructions that have at least one memory 73 * operand (excluding implicit stack references). We assume that stack 74 * references and instruction fetches will never occur in special memory 75 * areas that require emulation. So, for example, 'mov <imm>,<reg>' need 76 * not be handled. 77 */ 78 79 /* Operand sizes: 8-bit operands or specified/overridden size. */ 80 #define ByteOp (1<<0) /* 8-bit operands. */ 81 /* Destination operand type. */ 82 #define DstShift 1 83 #define ImplicitOps (OpImplicit << DstShift) 84 #define DstReg (OpReg << DstShift) 85 #define DstMem (OpMem << DstShift) 86 #define DstAcc (OpAcc << DstShift) 87 #define DstDI (OpDI << DstShift) 88 #define DstMem64 (OpMem64 << DstShift) 89 #define DstMem16 (OpMem16 << DstShift) 90 #define DstImmUByte (OpImmUByte << DstShift) 91 #define DstDX (OpDX << DstShift) 92 #define DstAccLo (OpAccLo << DstShift) 93 #define DstMask (OpMask << DstShift) 94 /* Source operand type. */ 95 #define SrcShift 6 96 #define SrcNone (OpNone << SrcShift) 97 #define SrcReg (OpReg << SrcShift) 98 #define SrcMem (OpMem << SrcShift) 99 #define SrcMem16 (OpMem16 << SrcShift) 100 #define SrcMem32 (OpMem32 << SrcShift) 101 #define SrcImm (OpImm << SrcShift) 102 #define SrcImmByte (OpImmByte << SrcShift) 103 #define SrcOne (OpOne << SrcShift) 104 #define SrcImmUByte (OpImmUByte << SrcShift) 105 #define SrcImmU (OpImmU << SrcShift) 106 #define SrcSI (OpSI << SrcShift) 107 #define SrcXLat (OpXLat << SrcShift) 108 #define SrcImmFAddr (OpImmFAddr << SrcShift) 109 #define SrcMemFAddr (OpMemFAddr << SrcShift) 110 #define SrcAcc (OpAcc << SrcShift) 111 #define SrcImmU16 (OpImmU16 << SrcShift) 112 #define SrcImm64 (OpImm64 << SrcShift) 113 #define SrcDX (OpDX << SrcShift) 114 #define SrcMem8 (OpMem8 << SrcShift) 115 #define SrcAccHi (OpAccHi << SrcShift) 116 #define SrcMask (OpMask << SrcShift) 117 #define BitOp (1<<11) 118 #define MemAbs (1<<12) /* Memory operand is absolute displacement */ 119 #define String (1<<13) /* String instruction (rep capable) */ 120 #define Stack (1<<14) /* Stack instruction (push/pop) */ 121 #define GroupMask (7<<15) /* Opcode uses one of the group mechanisms */ 122 #define Group (1<<15) /* Bits 3:5 of modrm byte extend opcode */ 123 #define GroupDual (2<<15) /* Alternate decoding of mod == 3 */ 124 #define Prefix (3<<15) /* Instruction varies with 66/f2/f3 prefix */ 125 #define RMExt (4<<15) /* Opcode extension in ModRM r/m if mod == 3 */ 126 #define Escape (5<<15) /* Escape to coprocessor instruction */ 127 #define InstrDual (6<<15) /* Alternate instruction decoding of mod == 3 */ 128 #define ModeDual (7<<15) /* Different instruction for 32/64 bit */ 129 #define Sse (1<<18) /* SSE Vector instruction */ 130 /* Generic ModRM decode. */ 131 #define ModRM (1<<19) 132 /* Destination is only written; never read. */ 133 #define Mov (1<<20) 134 /* Misc flags */ 135 #define Prot (1<<21) /* instruction generates #UD if not in prot-mode */ 136 #define EmulateOnUD (1<<22) /* Emulate if unsupported by the host */ 137 #define NoAccess (1<<23) /* Don't access memory (lea/invlpg/verr etc) */ 138 #define Op3264 (1<<24) /* Operand is 64b in long mode, 32b otherwise */ 139 #define Undefined (1<<25) /* No Such Instruction */ 140 #define Lock (1<<26) /* lock prefix is allowed for the instruction */ 141 #define Priv (1<<27) /* instruction generates #GP if current CPL != 0 */ 142 #define No64 (1<<28) 143 #define PageTable (1 << 29) /* instruction used to write page table */ 144 #define NotImpl (1 << 30) /* instruction is not implemented */ 145 /* Source 2 operand type */ 146 #define Src2Shift (31) 147 #define Src2None (OpNone << Src2Shift) 148 #define Src2Mem (OpMem << Src2Shift) 149 #define Src2CL (OpCL << Src2Shift) 150 #define Src2ImmByte (OpImmByte << Src2Shift) 151 #define Src2One (OpOne << Src2Shift) 152 #define Src2Imm (OpImm << Src2Shift) 153 #define Src2ES (OpES << Src2Shift) 154 #define Src2CS (OpCS << Src2Shift) 155 #define Src2SS (OpSS << Src2Shift) 156 #define Src2DS (OpDS << Src2Shift) 157 #define Src2FS (OpFS << Src2Shift) 158 #define Src2GS (OpGS << Src2Shift) 159 #define Src2Mask (OpMask << Src2Shift) 160 #define Mmx ((u64)1 << 40) /* MMX Vector instruction */ 161 #define AlignMask ((u64)7 << 41) 162 #define Aligned ((u64)1 << 41) /* Explicitly aligned (e.g. MOVDQA) */ 163 #define Unaligned ((u64)2 << 41) /* Explicitly unaligned (e.g. MOVDQU) */ 164 #define Avx ((u64)3 << 41) /* Advanced Vector Extensions */ 165 #define Aligned16 ((u64)4 << 41) /* Aligned to 16 byte boundary (e.g. FXSAVE) */ 166 #define Fastop ((u64)1 << 44) /* Use opcode::u.fastop */ 167 #define NoWrite ((u64)1 << 45) /* No writeback */ 168 #define SrcWrite ((u64)1 << 46) /* Write back src operand */ 169 #define NoMod ((u64)1 << 47) /* Mod field is ignored */ 170 #define Intercept ((u64)1 << 48) /* Has valid intercept field */ 171 #define CheckPerm ((u64)1 << 49) /* Has valid check_perm field */ 172 #define PrivUD ((u64)1 << 51) /* #UD instead of #GP on CPL > 0 */ 173 #define NearBranch ((u64)1 << 52) /* Near branches */ 174 #define No16 ((u64)1 << 53) /* No 16 bit operand */ 175 #define IncSP ((u64)1 << 54) /* SP is incremented before ModRM calc */ 176 #define TwoMemOp ((u64)1 << 55) /* Instruction has two memory operand */ 177 178 #define DstXacc (DstAccLo | SrcAccHi | SrcWrite) 179 180 #define X2(x...) x, x 181 #define X3(x...) X2(x), x 182 #define X4(x...) X2(x), X2(x) 183 #define X5(x...) X4(x), x 184 #define X6(x...) X4(x), X2(x) 185 #define X7(x...) X4(x), X3(x) 186 #define X8(x...) X4(x), X4(x) 187 #define X16(x...) X8(x), X8(x) 188 189 #define NR_FASTOP (ilog2(sizeof(ulong)) + 1) 190 #define FASTOP_SIZE 8 191 192 /* 193 * fastop functions have a special calling convention: 194 * 195 * dst: rax (in/out) 196 * src: rdx (in/out) 197 * src2: rcx (in) 198 * flags: rflags (in/out) 199 * ex: rsi (in:fastop pointer, out:zero if exception) 200 * 201 * Moreover, they are all exactly FASTOP_SIZE bytes long, so functions for 202 * different operand sizes can be reached by calculation, rather than a jump 203 * table (which would be bigger than the code). 204 * 205 * fastop functions are declared as taking a never-defined fastop parameter, 206 * so they can't be called from C directly. 207 */ 208 209 struct fastop; 210 211 struct opcode { 212 u64 flags : 56; 213 u64 intercept : 8; 214 union { 215 int (*execute)(struct x86_emulate_ctxt *ctxt); 216 const struct opcode *group; 217 const struct group_dual *gdual; 218 const struct gprefix *gprefix; 219 const struct escape *esc; 220 const struct instr_dual *idual; 221 const struct mode_dual *mdual; 222 void (*fastop)(struct fastop *fake); 223 } u; 224 int (*check_perm)(struct x86_emulate_ctxt *ctxt); 225 }; 226 227 struct group_dual { 228 struct opcode mod012[8]; 229 struct opcode mod3[8]; 230 }; 231 232 struct gprefix { 233 struct opcode pfx_no; 234 struct opcode pfx_66; 235 struct opcode pfx_f2; 236 struct opcode pfx_f3; 237 }; 238 239 struct escape { 240 struct opcode op[8]; 241 struct opcode high[64]; 242 }; 243 244 struct instr_dual { 245 struct opcode mod012; 246 struct opcode mod3; 247 }; 248 249 struct mode_dual { 250 struct opcode mode32; 251 struct opcode mode64; 252 }; 253 254 #define EFLG_RESERVED_ZEROS_MASK 0xffc0802a 255 256 enum x86_transfer_type { 257 X86_TRANSFER_NONE, 258 X86_TRANSFER_CALL_JMP, 259 X86_TRANSFER_RET, 260 X86_TRANSFER_TASK_SWITCH, 261 }; 262 263 static ulong reg_read(struct x86_emulate_ctxt *ctxt, unsigned nr) 264 { 265 if (!(ctxt->regs_valid & (1 << nr))) { 266 ctxt->regs_valid |= 1 << nr; 267 ctxt->_regs[nr] = ctxt->ops->read_gpr(ctxt, nr); 268 } 269 return ctxt->_regs[nr]; 270 } 271 272 static ulong *reg_write(struct x86_emulate_ctxt *ctxt, unsigned nr) 273 { 274 ctxt->regs_valid |= 1 << nr; 275 ctxt->regs_dirty |= 1 << nr; 276 return &ctxt->_regs[nr]; 277 } 278 279 static ulong *reg_rmw(struct x86_emulate_ctxt *ctxt, unsigned nr) 280 { 281 reg_read(ctxt, nr); 282 return reg_write(ctxt, nr); 283 } 284 285 static void writeback_registers(struct x86_emulate_ctxt *ctxt) 286 { 287 unsigned reg; 288 289 for_each_set_bit(reg, (ulong *)&ctxt->regs_dirty, 16) 290 ctxt->ops->write_gpr(ctxt, reg, ctxt->_regs[reg]); 291 } 292 293 static void invalidate_registers(struct x86_emulate_ctxt *ctxt) 294 { 295 ctxt->regs_dirty = 0; 296 ctxt->regs_valid = 0; 297 } 298 299 /* 300 * These EFLAGS bits are restored from saved value during emulation, and 301 * any changes are written back to the saved value after emulation. 302 */ 303 #define EFLAGS_MASK (X86_EFLAGS_OF|X86_EFLAGS_SF|X86_EFLAGS_ZF|X86_EFLAGS_AF|\ 304 X86_EFLAGS_PF|X86_EFLAGS_CF) 305 306 #ifdef CONFIG_X86_64 307 #define ON64(x) x 308 #else 309 #define ON64(x) 310 #endif 311 312 static int fastop(struct x86_emulate_ctxt *ctxt, void (*fop)(struct fastop *)); 313 314 #define FOP_FUNC(name) \ 315 ".align " __stringify(FASTOP_SIZE) " \n\t" \ 316 ".type " name ", @function \n\t" \ 317 name ":\n\t" 318 319 #define FOP_RET "ret \n\t" 320 321 #define FOP_START(op) \ 322 extern void em_##op(struct fastop *fake); \ 323 asm(".pushsection .text, \"ax\" \n\t" \ 324 ".global em_" #op " \n\t" \ 325 FOP_FUNC("em_" #op) 326 327 #define FOP_END \ 328 ".popsection") 329 330 #define FOPNOP() \ 331 FOP_FUNC(__stringify(__UNIQUE_ID(nop))) \ 332 FOP_RET 333 334 #define FOP1E(op, dst) \ 335 FOP_FUNC(#op "_" #dst) \ 336 "10: " #op " %" #dst " \n\t" FOP_RET 337 338 #define FOP1EEX(op, dst) \ 339 FOP1E(op, dst) _ASM_EXTABLE(10b, kvm_fastop_exception) 340 341 #define FASTOP1(op) \ 342 FOP_START(op) \ 343 FOP1E(op##b, al) \ 344 FOP1E(op##w, ax) \ 345 FOP1E(op##l, eax) \ 346 ON64(FOP1E(op##q, rax)) \ 347 FOP_END 348 349 /* 1-operand, using src2 (for MUL/DIV r/m) */ 350 #define FASTOP1SRC2(op, name) \ 351 FOP_START(name) \ 352 FOP1E(op, cl) \ 353 FOP1E(op, cx) \ 354 FOP1E(op, ecx) \ 355 ON64(FOP1E(op, rcx)) \ 356 FOP_END 357 358 /* 1-operand, using src2 (for MUL/DIV r/m), with exceptions */ 359 #define FASTOP1SRC2EX(op, name) \ 360 FOP_START(name) \ 361 FOP1EEX(op, cl) \ 362 FOP1EEX(op, cx) \ 363 FOP1EEX(op, ecx) \ 364 ON64(FOP1EEX(op, rcx)) \ 365 FOP_END 366 367 #define FOP2E(op, dst, src) \ 368 FOP_FUNC(#op "_" #dst "_" #src) \ 369 #op " %" #src ", %" #dst " \n\t" FOP_RET 370 371 #define FASTOP2(op) \ 372 FOP_START(op) \ 373 FOP2E(op##b, al, dl) \ 374 FOP2E(op##w, ax, dx) \ 375 FOP2E(op##l, eax, edx) \ 376 ON64(FOP2E(op##q, rax, rdx)) \ 377 FOP_END 378 379 /* 2 operand, word only */ 380 #define FASTOP2W(op) \ 381 FOP_START(op) \ 382 FOPNOP() \ 383 FOP2E(op##w, ax, dx) \ 384 FOP2E(op##l, eax, edx) \ 385 ON64(FOP2E(op##q, rax, rdx)) \ 386 FOP_END 387 388 /* 2 operand, src is CL */ 389 #define FASTOP2CL(op) \ 390 FOP_START(op) \ 391 FOP2E(op##b, al, cl) \ 392 FOP2E(op##w, ax, cl) \ 393 FOP2E(op##l, eax, cl) \ 394 ON64(FOP2E(op##q, rax, cl)) \ 395 FOP_END 396 397 /* 2 operand, src and dest are reversed */ 398 #define FASTOP2R(op, name) \ 399 FOP_START(name) \ 400 FOP2E(op##b, dl, al) \ 401 FOP2E(op##w, dx, ax) \ 402 FOP2E(op##l, edx, eax) \ 403 ON64(FOP2E(op##q, rdx, rax)) \ 404 FOP_END 405 406 #define FOP3E(op, dst, src, src2) \ 407 FOP_FUNC(#op "_" #dst "_" #src "_" #src2) \ 408 #op " %" #src2 ", %" #src ", %" #dst " \n\t" FOP_RET 409 410 /* 3-operand, word-only, src2=cl */ 411 #define FASTOP3WCL(op) \ 412 FOP_START(op) \ 413 FOPNOP() \ 414 FOP3E(op##w, ax, dx, cl) \ 415 FOP3E(op##l, eax, edx, cl) \ 416 ON64(FOP3E(op##q, rax, rdx, cl)) \ 417 FOP_END 418 419 /* Special case for SETcc - 1 instruction per cc */ 420 #define FOP_SETCC(op) \ 421 ".align 4 \n\t" \ 422 ".type " #op ", @function \n\t" \ 423 #op ": \n\t" \ 424 #op " %al \n\t" \ 425 FOP_RET 426 427 asm(".global kvm_fastop_exception \n" 428 "kvm_fastop_exception: xor %esi, %esi; ret"); 429 430 FOP_START(setcc) 431 FOP_SETCC(seto) 432 FOP_SETCC(setno) 433 FOP_SETCC(setc) 434 FOP_SETCC(setnc) 435 FOP_SETCC(setz) 436 FOP_SETCC(setnz) 437 FOP_SETCC(setbe) 438 FOP_SETCC(setnbe) 439 FOP_SETCC(sets) 440 FOP_SETCC(setns) 441 FOP_SETCC(setp) 442 FOP_SETCC(setnp) 443 FOP_SETCC(setl) 444 FOP_SETCC(setnl) 445 FOP_SETCC(setle) 446 FOP_SETCC(setnle) 447 FOP_END; 448 449 FOP_START(salc) "pushf; sbb %al, %al; popf \n\t" FOP_RET 450 FOP_END; 451 452 /* 453 * XXX: inoutclob user must know where the argument is being expanded. 454 * Relying on CC_HAVE_ASM_GOTO would allow us to remove _fault. 455 */ 456 #define asm_safe(insn, inoutclob...) \ 457 ({ \ 458 int _fault = 0; \ 459 \ 460 asm volatile("1:" insn "\n" \ 461 "2:\n" \ 462 ".pushsection .fixup, \"ax\"\n" \ 463 "3: movl $1, %[_fault]\n" \ 464 " jmp 2b\n" \ 465 ".popsection\n" \ 466 _ASM_EXTABLE(1b, 3b) \ 467 : [_fault] "+qm"(_fault) inoutclob ); \ 468 \ 469 _fault ? X86EMUL_UNHANDLEABLE : X86EMUL_CONTINUE; \ 470 }) 471 472 static int emulator_check_intercept(struct x86_emulate_ctxt *ctxt, 473 enum x86_intercept intercept, 474 enum x86_intercept_stage stage) 475 { 476 struct x86_instruction_info info = { 477 .intercept = intercept, 478 .rep_prefix = ctxt->rep_prefix, 479 .modrm_mod = ctxt->modrm_mod, 480 .modrm_reg = ctxt->modrm_reg, 481 .modrm_rm = ctxt->modrm_rm, 482 .src_val = ctxt->src.val64, 483 .dst_val = ctxt->dst.val64, 484 .src_bytes = ctxt->src.bytes, 485 .dst_bytes = ctxt->dst.bytes, 486 .ad_bytes = ctxt->ad_bytes, 487 .next_rip = ctxt->eip, 488 }; 489 490 return ctxt->ops->intercept(ctxt, &info, stage); 491 } 492 493 static void assign_masked(ulong *dest, ulong src, ulong mask) 494 { 495 *dest = (*dest & ~mask) | (src & mask); 496 } 497 498 static void assign_register(unsigned long *reg, u64 val, int bytes) 499 { 500 /* The 4-byte case *is* correct: in 64-bit mode we zero-extend. */ 501 switch (bytes) { 502 case 1: 503 *(u8 *)reg = (u8)val; 504 break; 505 case 2: 506 *(u16 *)reg = (u16)val; 507 break; 508 case 4: 509 *reg = (u32)val; 510 break; /* 64b: zero-extend */ 511 case 8: 512 *reg = val; 513 break; 514 } 515 } 516 517 static inline unsigned long ad_mask(struct x86_emulate_ctxt *ctxt) 518 { 519 return (1UL << (ctxt->ad_bytes << 3)) - 1; 520 } 521 522 static ulong stack_mask(struct x86_emulate_ctxt *ctxt) 523 { 524 u16 sel; 525 struct desc_struct ss; 526 527 if (ctxt->mode == X86EMUL_MODE_PROT64) 528 return ~0UL; 529 ctxt->ops->get_segment(ctxt, &sel, &ss, NULL, VCPU_SREG_SS); 530 return ~0U >> ((ss.d ^ 1) * 16); /* d=0: 0xffff; d=1: 0xffffffff */ 531 } 532 533 static int stack_size(struct x86_emulate_ctxt *ctxt) 534 { 535 return (__fls(stack_mask(ctxt)) + 1) >> 3; 536 } 537 538 /* Access/update address held in a register, based on addressing mode. */ 539 static inline unsigned long 540 address_mask(struct x86_emulate_ctxt *ctxt, unsigned long reg) 541 { 542 if (ctxt->ad_bytes == sizeof(unsigned long)) 543 return reg; 544 else 545 return reg & ad_mask(ctxt); 546 } 547 548 static inline unsigned long 549 register_address(struct x86_emulate_ctxt *ctxt, int reg) 550 { 551 return address_mask(ctxt, reg_read(ctxt, reg)); 552 } 553 554 static void masked_increment(ulong *reg, ulong mask, int inc) 555 { 556 assign_masked(reg, *reg + inc, mask); 557 } 558 559 static inline void 560 register_address_increment(struct x86_emulate_ctxt *ctxt, int reg, int inc) 561 { 562 ulong *preg = reg_rmw(ctxt, reg); 563 564 assign_register(preg, *preg + inc, ctxt->ad_bytes); 565 } 566 567 static void rsp_increment(struct x86_emulate_ctxt *ctxt, int inc) 568 { 569 masked_increment(reg_rmw(ctxt, VCPU_REGS_RSP), stack_mask(ctxt), inc); 570 } 571 572 static u32 desc_limit_scaled(struct desc_struct *desc) 573 { 574 u32 limit = get_desc_limit(desc); 575 576 return desc->g ? (limit << 12) | 0xfff : limit; 577 } 578 579 static unsigned long seg_base(struct x86_emulate_ctxt *ctxt, int seg) 580 { 581 if (ctxt->mode == X86EMUL_MODE_PROT64 && seg < VCPU_SREG_FS) 582 return 0; 583 584 return ctxt->ops->get_cached_segment_base(ctxt, seg); 585 } 586 587 static int emulate_exception(struct x86_emulate_ctxt *ctxt, int vec, 588 u32 error, bool valid) 589 { 590 WARN_ON(vec > 0x1f); 591 ctxt->exception.vector = vec; 592 ctxt->exception.error_code = error; 593 ctxt->exception.error_code_valid = valid; 594 return X86EMUL_PROPAGATE_FAULT; 595 } 596 597 static int emulate_db(struct x86_emulate_ctxt *ctxt) 598 { 599 return emulate_exception(ctxt, DB_VECTOR, 0, false); 600 } 601 602 static int emulate_gp(struct x86_emulate_ctxt *ctxt, int err) 603 { 604 return emulate_exception(ctxt, GP_VECTOR, err, true); 605 } 606 607 static int emulate_ss(struct x86_emulate_ctxt *ctxt, int err) 608 { 609 return emulate_exception(ctxt, SS_VECTOR, err, true); 610 } 611 612 static int emulate_ud(struct x86_emulate_ctxt *ctxt) 613 { 614 return emulate_exception(ctxt, UD_VECTOR, 0, false); 615 } 616 617 static int emulate_ts(struct x86_emulate_ctxt *ctxt, int err) 618 { 619 return emulate_exception(ctxt, TS_VECTOR, err, true); 620 } 621 622 static int emulate_de(struct x86_emulate_ctxt *ctxt) 623 { 624 return emulate_exception(ctxt, DE_VECTOR, 0, false); 625 } 626 627 static int emulate_nm(struct x86_emulate_ctxt *ctxt) 628 { 629 return emulate_exception(ctxt, NM_VECTOR, 0, false); 630 } 631 632 static u16 get_segment_selector(struct x86_emulate_ctxt *ctxt, unsigned seg) 633 { 634 u16 selector; 635 struct desc_struct desc; 636 637 ctxt->ops->get_segment(ctxt, &selector, &desc, NULL, seg); 638 return selector; 639 } 640 641 static void set_segment_selector(struct x86_emulate_ctxt *ctxt, u16 selector, 642 unsigned seg) 643 { 644 u16 dummy; 645 u32 base3; 646 struct desc_struct desc; 647 648 ctxt->ops->get_segment(ctxt, &dummy, &desc, &base3, seg); 649 ctxt->ops->set_segment(ctxt, selector, &desc, base3, seg); 650 } 651 652 /* 653 * x86 defines three classes of vector instructions: explicitly 654 * aligned, explicitly unaligned, and the rest, which change behaviour 655 * depending on whether they're AVX encoded or not. 656 * 657 * Also included is CMPXCHG16B which is not a vector instruction, yet it is 658 * subject to the same check. FXSAVE and FXRSTOR are checked here too as their 659 * 512 bytes of data must be aligned to a 16 byte boundary. 660 */ 661 static unsigned insn_alignment(struct x86_emulate_ctxt *ctxt, unsigned size) 662 { 663 u64 alignment = ctxt->d & AlignMask; 664 665 if (likely(size < 16)) 666 return 1; 667 668 switch (alignment) { 669 case Unaligned: 670 case Avx: 671 return 1; 672 case Aligned16: 673 return 16; 674 case Aligned: 675 default: 676 return size; 677 } 678 } 679 680 static __always_inline int __linearize(struct x86_emulate_ctxt *ctxt, 681 struct segmented_address addr, 682 unsigned *max_size, unsigned size, 683 bool write, bool fetch, 684 enum x86emul_mode mode, ulong *linear) 685 { 686 struct desc_struct desc; 687 bool usable; 688 ulong la; 689 u32 lim; 690 u16 sel; 691 692 la = seg_base(ctxt, addr.seg) + addr.ea; 693 *max_size = 0; 694 switch (mode) { 695 case X86EMUL_MODE_PROT64: 696 *linear = la; 697 if (is_noncanonical_address(la)) 698 goto bad; 699 700 *max_size = min_t(u64, ~0u, (1ull << 48) - la); 701 if (size > *max_size) 702 goto bad; 703 break; 704 default: 705 *linear = la = (u32)la; 706 usable = ctxt->ops->get_segment(ctxt, &sel, &desc, NULL, 707 addr.seg); 708 if (!usable) 709 goto bad; 710 /* code segment in protected mode or read-only data segment */ 711 if ((((ctxt->mode != X86EMUL_MODE_REAL) && (desc.type & 8)) 712 || !(desc.type & 2)) && write) 713 goto bad; 714 /* unreadable code segment */ 715 if (!fetch && (desc.type & 8) && !(desc.type & 2)) 716 goto bad; 717 lim = desc_limit_scaled(&desc); 718 if (!(desc.type & 8) && (desc.type & 4)) { 719 /* expand-down segment */ 720 if (addr.ea <= lim) 721 goto bad; 722 lim = desc.d ? 0xffffffff : 0xffff; 723 } 724 if (addr.ea > lim) 725 goto bad; 726 if (lim == 0xffffffff) 727 *max_size = ~0u; 728 else { 729 *max_size = (u64)lim + 1 - addr.ea; 730 if (size > *max_size) 731 goto bad; 732 } 733 break; 734 } 735 if (la & (insn_alignment(ctxt, size) - 1)) 736 return emulate_gp(ctxt, 0); 737 return X86EMUL_CONTINUE; 738 bad: 739 if (addr.seg == VCPU_SREG_SS) 740 return emulate_ss(ctxt, 0); 741 else 742 return emulate_gp(ctxt, 0); 743 } 744 745 static int linearize(struct x86_emulate_ctxt *ctxt, 746 struct segmented_address addr, 747 unsigned size, bool write, 748 ulong *linear) 749 { 750 unsigned max_size; 751 return __linearize(ctxt, addr, &max_size, size, write, false, 752 ctxt->mode, linear); 753 } 754 755 static inline int assign_eip(struct x86_emulate_ctxt *ctxt, ulong dst, 756 enum x86emul_mode mode) 757 { 758 ulong linear; 759 int rc; 760 unsigned max_size; 761 struct segmented_address addr = { .seg = VCPU_SREG_CS, 762 .ea = dst }; 763 764 if (ctxt->op_bytes != sizeof(unsigned long)) 765 addr.ea = dst & ((1UL << (ctxt->op_bytes << 3)) - 1); 766 rc = __linearize(ctxt, addr, &max_size, 1, false, true, mode, &linear); 767 if (rc == X86EMUL_CONTINUE) 768 ctxt->_eip = addr.ea; 769 return rc; 770 } 771 772 static inline int assign_eip_near(struct x86_emulate_ctxt *ctxt, ulong dst) 773 { 774 return assign_eip(ctxt, dst, ctxt->mode); 775 } 776 777 static int assign_eip_far(struct x86_emulate_ctxt *ctxt, ulong dst, 778 const struct desc_struct *cs_desc) 779 { 780 enum x86emul_mode mode = ctxt->mode; 781 int rc; 782 783 #ifdef CONFIG_X86_64 784 if (ctxt->mode >= X86EMUL_MODE_PROT16) { 785 if (cs_desc->l) { 786 u64 efer = 0; 787 788 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer); 789 if (efer & EFER_LMA) 790 mode = X86EMUL_MODE_PROT64; 791 } else 792 mode = X86EMUL_MODE_PROT32; /* temporary value */ 793 } 794 #endif 795 if (mode == X86EMUL_MODE_PROT16 || mode == X86EMUL_MODE_PROT32) 796 mode = cs_desc->d ? X86EMUL_MODE_PROT32 : X86EMUL_MODE_PROT16; 797 rc = assign_eip(ctxt, dst, mode); 798 if (rc == X86EMUL_CONTINUE) 799 ctxt->mode = mode; 800 return rc; 801 } 802 803 static inline int jmp_rel(struct x86_emulate_ctxt *ctxt, int rel) 804 { 805 return assign_eip_near(ctxt, ctxt->_eip + rel); 806 } 807 808 static int segmented_read_std(struct x86_emulate_ctxt *ctxt, 809 struct segmented_address addr, 810 void *data, 811 unsigned size) 812 { 813 int rc; 814 ulong linear; 815 816 rc = linearize(ctxt, addr, size, false, &linear); 817 if (rc != X86EMUL_CONTINUE) 818 return rc; 819 return ctxt->ops->read_std(ctxt, linear, data, size, &ctxt->exception); 820 } 821 822 static int segmented_write_std(struct x86_emulate_ctxt *ctxt, 823 struct segmented_address addr, 824 void *data, 825 unsigned int size) 826 { 827 int rc; 828 ulong linear; 829 830 rc = linearize(ctxt, addr, size, true, &linear); 831 if (rc != X86EMUL_CONTINUE) 832 return rc; 833 return ctxt->ops->write_std(ctxt, linear, data, size, &ctxt->exception); 834 } 835 836 /* 837 * Prefetch the remaining bytes of the instruction without crossing page 838 * boundary if they are not in fetch_cache yet. 839 */ 840 static int __do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt, int op_size) 841 { 842 int rc; 843 unsigned size, max_size; 844 unsigned long linear; 845 int cur_size = ctxt->fetch.end - ctxt->fetch.data; 846 struct segmented_address addr = { .seg = VCPU_SREG_CS, 847 .ea = ctxt->eip + cur_size }; 848 849 /* 850 * We do not know exactly how many bytes will be needed, and 851 * __linearize is expensive, so fetch as much as possible. We 852 * just have to avoid going beyond the 15 byte limit, the end 853 * of the segment, or the end of the page. 854 * 855 * __linearize is called with size 0 so that it does not do any 856 * boundary check itself. Instead, we use max_size to check 857 * against op_size. 858 */ 859 rc = __linearize(ctxt, addr, &max_size, 0, false, true, ctxt->mode, 860 &linear); 861 if (unlikely(rc != X86EMUL_CONTINUE)) 862 return rc; 863 864 size = min_t(unsigned, 15UL ^ cur_size, max_size); 865 size = min_t(unsigned, size, PAGE_SIZE - offset_in_page(linear)); 866 867 /* 868 * One instruction can only straddle two pages, 869 * and one has been loaded at the beginning of 870 * x86_decode_insn. So, if not enough bytes 871 * still, we must have hit the 15-byte boundary. 872 */ 873 if (unlikely(size < op_size)) 874 return emulate_gp(ctxt, 0); 875 876 rc = ctxt->ops->fetch(ctxt, linear, ctxt->fetch.end, 877 size, &ctxt->exception); 878 if (unlikely(rc != X86EMUL_CONTINUE)) 879 return rc; 880 ctxt->fetch.end += size; 881 return X86EMUL_CONTINUE; 882 } 883 884 static __always_inline int do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt, 885 unsigned size) 886 { 887 unsigned done_size = ctxt->fetch.end - ctxt->fetch.ptr; 888 889 if (unlikely(done_size < size)) 890 return __do_insn_fetch_bytes(ctxt, size - done_size); 891 else 892 return X86EMUL_CONTINUE; 893 } 894 895 /* Fetch next part of the instruction being emulated. */ 896 #define insn_fetch(_type, _ctxt) \ 897 ({ _type _x; \ 898 \ 899 rc = do_insn_fetch_bytes(_ctxt, sizeof(_type)); \ 900 if (rc != X86EMUL_CONTINUE) \ 901 goto done; \ 902 ctxt->_eip += sizeof(_type); \ 903 memcpy(&_x, ctxt->fetch.ptr, sizeof(_type)); \ 904 ctxt->fetch.ptr += sizeof(_type); \ 905 _x; \ 906 }) 907 908 #define insn_fetch_arr(_arr, _size, _ctxt) \ 909 ({ \ 910 rc = do_insn_fetch_bytes(_ctxt, _size); \ 911 if (rc != X86EMUL_CONTINUE) \ 912 goto done; \ 913 ctxt->_eip += (_size); \ 914 memcpy(_arr, ctxt->fetch.ptr, _size); \ 915 ctxt->fetch.ptr += (_size); \ 916 }) 917 918 /* 919 * Given the 'reg' portion of a ModRM byte, and a register block, return a 920 * pointer into the block that addresses the relevant register. 921 * @highbyte_regs specifies whether to decode AH,CH,DH,BH. 922 */ 923 static void *decode_register(struct x86_emulate_ctxt *ctxt, u8 modrm_reg, 924 int byteop) 925 { 926 void *p; 927 int highbyte_regs = (ctxt->rex_prefix == 0) && byteop; 928 929 if (highbyte_regs && modrm_reg >= 4 && modrm_reg < 8) 930 p = (unsigned char *)reg_rmw(ctxt, modrm_reg & 3) + 1; 931 else 932 p = reg_rmw(ctxt, modrm_reg); 933 return p; 934 } 935 936 static int read_descriptor(struct x86_emulate_ctxt *ctxt, 937 struct segmented_address addr, 938 u16 *size, unsigned long *address, int op_bytes) 939 { 940 int rc; 941 942 if (op_bytes == 2) 943 op_bytes = 3; 944 *address = 0; 945 rc = segmented_read_std(ctxt, addr, size, 2); 946 if (rc != X86EMUL_CONTINUE) 947 return rc; 948 addr.ea += 2; 949 rc = segmented_read_std(ctxt, addr, address, op_bytes); 950 return rc; 951 } 952 953 FASTOP2(add); 954 FASTOP2(or); 955 FASTOP2(adc); 956 FASTOP2(sbb); 957 FASTOP2(and); 958 FASTOP2(sub); 959 FASTOP2(xor); 960 FASTOP2(cmp); 961 FASTOP2(test); 962 963 FASTOP1SRC2(mul, mul_ex); 964 FASTOP1SRC2(imul, imul_ex); 965 FASTOP1SRC2EX(div, div_ex); 966 FASTOP1SRC2EX(idiv, idiv_ex); 967 968 FASTOP3WCL(shld); 969 FASTOP3WCL(shrd); 970 971 FASTOP2W(imul); 972 973 FASTOP1(not); 974 FASTOP1(neg); 975 FASTOP1(inc); 976 FASTOP1(dec); 977 978 FASTOP2CL(rol); 979 FASTOP2CL(ror); 980 FASTOP2CL(rcl); 981 FASTOP2CL(rcr); 982 FASTOP2CL(shl); 983 FASTOP2CL(shr); 984 FASTOP2CL(sar); 985 986 FASTOP2W(bsf); 987 FASTOP2W(bsr); 988 FASTOP2W(bt); 989 FASTOP2W(bts); 990 FASTOP2W(btr); 991 FASTOP2W(btc); 992 993 FASTOP2(xadd); 994 995 FASTOP2R(cmp, cmp_r); 996 997 static int em_bsf_c(struct x86_emulate_ctxt *ctxt) 998 { 999 /* If src is zero, do not writeback, but update flags */ 1000 if (ctxt->src.val == 0) 1001 ctxt->dst.type = OP_NONE; 1002 return fastop(ctxt, em_bsf); 1003 } 1004 1005 static int em_bsr_c(struct x86_emulate_ctxt *ctxt) 1006 { 1007 /* If src is zero, do not writeback, but update flags */ 1008 if (ctxt->src.val == 0) 1009 ctxt->dst.type = OP_NONE; 1010 return fastop(ctxt, em_bsr); 1011 } 1012 1013 static __always_inline u8 test_cc(unsigned int condition, unsigned long flags) 1014 { 1015 u8 rc; 1016 void (*fop)(void) = (void *)em_setcc + 4 * (condition & 0xf); 1017 1018 flags = (flags & EFLAGS_MASK) | X86_EFLAGS_IF; 1019 asm("push %[flags]; popf; call *%[fastop]" 1020 : "=a"(rc) : [fastop]"r"(fop), [flags]"r"(flags)); 1021 return rc; 1022 } 1023 1024 static void fetch_register_operand(struct operand *op) 1025 { 1026 switch (op->bytes) { 1027 case 1: 1028 op->val = *(u8 *)op->addr.reg; 1029 break; 1030 case 2: 1031 op->val = *(u16 *)op->addr.reg; 1032 break; 1033 case 4: 1034 op->val = *(u32 *)op->addr.reg; 1035 break; 1036 case 8: 1037 op->val = *(u64 *)op->addr.reg; 1038 break; 1039 } 1040 } 1041 1042 static void read_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data, int reg) 1043 { 1044 ctxt->ops->get_fpu(ctxt); 1045 switch (reg) { 1046 case 0: asm("movdqa %%xmm0, %0" : "=m"(*data)); break; 1047 case 1: asm("movdqa %%xmm1, %0" : "=m"(*data)); break; 1048 case 2: asm("movdqa %%xmm2, %0" : "=m"(*data)); break; 1049 case 3: asm("movdqa %%xmm3, %0" : "=m"(*data)); break; 1050 case 4: asm("movdqa %%xmm4, %0" : "=m"(*data)); break; 1051 case 5: asm("movdqa %%xmm5, %0" : "=m"(*data)); break; 1052 case 6: asm("movdqa %%xmm6, %0" : "=m"(*data)); break; 1053 case 7: asm("movdqa %%xmm7, %0" : "=m"(*data)); break; 1054 #ifdef CONFIG_X86_64 1055 case 8: asm("movdqa %%xmm8, %0" : "=m"(*data)); break; 1056 case 9: asm("movdqa %%xmm9, %0" : "=m"(*data)); break; 1057 case 10: asm("movdqa %%xmm10, %0" : "=m"(*data)); break; 1058 case 11: asm("movdqa %%xmm11, %0" : "=m"(*data)); break; 1059 case 12: asm("movdqa %%xmm12, %0" : "=m"(*data)); break; 1060 case 13: asm("movdqa %%xmm13, %0" : "=m"(*data)); break; 1061 case 14: asm("movdqa %%xmm14, %0" : "=m"(*data)); break; 1062 case 15: asm("movdqa %%xmm15, %0" : "=m"(*data)); break; 1063 #endif 1064 default: BUG(); 1065 } 1066 ctxt->ops->put_fpu(ctxt); 1067 } 1068 1069 static void write_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data, 1070 int reg) 1071 { 1072 ctxt->ops->get_fpu(ctxt); 1073 switch (reg) { 1074 case 0: asm("movdqa %0, %%xmm0" : : "m"(*data)); break; 1075 case 1: asm("movdqa %0, %%xmm1" : : "m"(*data)); break; 1076 case 2: asm("movdqa %0, %%xmm2" : : "m"(*data)); break; 1077 case 3: asm("movdqa %0, %%xmm3" : : "m"(*data)); break; 1078 case 4: asm("movdqa %0, %%xmm4" : : "m"(*data)); break; 1079 case 5: asm("movdqa %0, %%xmm5" : : "m"(*data)); break; 1080 case 6: asm("movdqa %0, %%xmm6" : : "m"(*data)); break; 1081 case 7: asm("movdqa %0, %%xmm7" : : "m"(*data)); break; 1082 #ifdef CONFIG_X86_64 1083 case 8: asm("movdqa %0, %%xmm8" : : "m"(*data)); break; 1084 case 9: asm("movdqa %0, %%xmm9" : : "m"(*data)); break; 1085 case 10: asm("movdqa %0, %%xmm10" : : "m"(*data)); break; 1086 case 11: asm("movdqa %0, %%xmm11" : : "m"(*data)); break; 1087 case 12: asm("movdqa %0, %%xmm12" : : "m"(*data)); break; 1088 case 13: asm("movdqa %0, %%xmm13" : : "m"(*data)); break; 1089 case 14: asm("movdqa %0, %%xmm14" : : "m"(*data)); break; 1090 case 15: asm("movdqa %0, %%xmm15" : : "m"(*data)); break; 1091 #endif 1092 default: BUG(); 1093 } 1094 ctxt->ops->put_fpu(ctxt); 1095 } 1096 1097 static void read_mmx_reg(struct x86_emulate_ctxt *ctxt, u64 *data, int reg) 1098 { 1099 ctxt->ops->get_fpu(ctxt); 1100 switch (reg) { 1101 case 0: asm("movq %%mm0, %0" : "=m"(*data)); break; 1102 case 1: asm("movq %%mm1, %0" : "=m"(*data)); break; 1103 case 2: asm("movq %%mm2, %0" : "=m"(*data)); break; 1104 case 3: asm("movq %%mm3, %0" : "=m"(*data)); break; 1105 case 4: asm("movq %%mm4, %0" : "=m"(*data)); break; 1106 case 5: asm("movq %%mm5, %0" : "=m"(*data)); break; 1107 case 6: asm("movq %%mm6, %0" : "=m"(*data)); break; 1108 case 7: asm("movq %%mm7, %0" : "=m"(*data)); break; 1109 default: BUG(); 1110 } 1111 ctxt->ops->put_fpu(ctxt); 1112 } 1113 1114 static void write_mmx_reg(struct x86_emulate_ctxt *ctxt, u64 *data, int reg) 1115 { 1116 ctxt->ops->get_fpu(ctxt); 1117 switch (reg) { 1118 case 0: asm("movq %0, %%mm0" : : "m"(*data)); break; 1119 case 1: asm("movq %0, %%mm1" : : "m"(*data)); break; 1120 case 2: asm("movq %0, %%mm2" : : "m"(*data)); break; 1121 case 3: asm("movq %0, %%mm3" : : "m"(*data)); break; 1122 case 4: asm("movq %0, %%mm4" : : "m"(*data)); break; 1123 case 5: asm("movq %0, %%mm5" : : "m"(*data)); break; 1124 case 6: asm("movq %0, %%mm6" : : "m"(*data)); break; 1125 case 7: asm("movq %0, %%mm7" : : "m"(*data)); break; 1126 default: BUG(); 1127 } 1128 ctxt->ops->put_fpu(ctxt); 1129 } 1130 1131 static int em_fninit(struct x86_emulate_ctxt *ctxt) 1132 { 1133 if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM)) 1134 return emulate_nm(ctxt); 1135 1136 ctxt->ops->get_fpu(ctxt); 1137 asm volatile("fninit"); 1138 ctxt->ops->put_fpu(ctxt); 1139 return X86EMUL_CONTINUE; 1140 } 1141 1142 static int em_fnstcw(struct x86_emulate_ctxt *ctxt) 1143 { 1144 u16 fcw; 1145 1146 if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM)) 1147 return emulate_nm(ctxt); 1148 1149 ctxt->ops->get_fpu(ctxt); 1150 asm volatile("fnstcw %0": "+m"(fcw)); 1151 ctxt->ops->put_fpu(ctxt); 1152 1153 ctxt->dst.val = fcw; 1154 1155 return X86EMUL_CONTINUE; 1156 } 1157 1158 static int em_fnstsw(struct x86_emulate_ctxt *ctxt) 1159 { 1160 u16 fsw; 1161 1162 if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM)) 1163 return emulate_nm(ctxt); 1164 1165 ctxt->ops->get_fpu(ctxt); 1166 asm volatile("fnstsw %0": "+m"(fsw)); 1167 ctxt->ops->put_fpu(ctxt); 1168 1169 ctxt->dst.val = fsw; 1170 1171 return X86EMUL_CONTINUE; 1172 } 1173 1174 static void decode_register_operand(struct x86_emulate_ctxt *ctxt, 1175 struct operand *op) 1176 { 1177 unsigned reg = ctxt->modrm_reg; 1178 1179 if (!(ctxt->d & ModRM)) 1180 reg = (ctxt->b & 7) | ((ctxt->rex_prefix & 1) << 3); 1181 1182 if (ctxt->d & Sse) { 1183 op->type = OP_XMM; 1184 op->bytes = 16; 1185 op->addr.xmm = reg; 1186 read_sse_reg(ctxt, &op->vec_val, reg); 1187 return; 1188 } 1189 if (ctxt->d & Mmx) { 1190 reg &= 7; 1191 op->type = OP_MM; 1192 op->bytes = 8; 1193 op->addr.mm = reg; 1194 return; 1195 } 1196 1197 op->type = OP_REG; 1198 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes; 1199 op->addr.reg = decode_register(ctxt, reg, ctxt->d & ByteOp); 1200 1201 fetch_register_operand(op); 1202 op->orig_val = op->val; 1203 } 1204 1205 static void adjust_modrm_seg(struct x86_emulate_ctxt *ctxt, int base_reg) 1206 { 1207 if (base_reg == VCPU_REGS_RSP || base_reg == VCPU_REGS_RBP) 1208 ctxt->modrm_seg = VCPU_SREG_SS; 1209 } 1210 1211 static int decode_modrm(struct x86_emulate_ctxt *ctxt, 1212 struct operand *op) 1213 { 1214 u8 sib; 1215 int index_reg, base_reg, scale; 1216 int rc = X86EMUL_CONTINUE; 1217 ulong modrm_ea = 0; 1218 1219 ctxt->modrm_reg = ((ctxt->rex_prefix << 1) & 8); /* REX.R */ 1220 index_reg = (ctxt->rex_prefix << 2) & 8; /* REX.X */ 1221 base_reg = (ctxt->rex_prefix << 3) & 8; /* REX.B */ 1222 1223 ctxt->modrm_mod = (ctxt->modrm & 0xc0) >> 6; 1224 ctxt->modrm_reg |= (ctxt->modrm & 0x38) >> 3; 1225 ctxt->modrm_rm = base_reg | (ctxt->modrm & 0x07); 1226 ctxt->modrm_seg = VCPU_SREG_DS; 1227 1228 if (ctxt->modrm_mod == 3 || (ctxt->d & NoMod)) { 1229 op->type = OP_REG; 1230 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes; 1231 op->addr.reg = decode_register(ctxt, ctxt->modrm_rm, 1232 ctxt->d & ByteOp); 1233 if (ctxt->d & Sse) { 1234 op->type = OP_XMM; 1235 op->bytes = 16; 1236 op->addr.xmm = ctxt->modrm_rm; 1237 read_sse_reg(ctxt, &op->vec_val, ctxt->modrm_rm); 1238 return rc; 1239 } 1240 if (ctxt->d & Mmx) { 1241 op->type = OP_MM; 1242 op->bytes = 8; 1243 op->addr.mm = ctxt->modrm_rm & 7; 1244 return rc; 1245 } 1246 fetch_register_operand(op); 1247 return rc; 1248 } 1249 1250 op->type = OP_MEM; 1251 1252 if (ctxt->ad_bytes == 2) { 1253 unsigned bx = reg_read(ctxt, VCPU_REGS_RBX); 1254 unsigned bp = reg_read(ctxt, VCPU_REGS_RBP); 1255 unsigned si = reg_read(ctxt, VCPU_REGS_RSI); 1256 unsigned di = reg_read(ctxt, VCPU_REGS_RDI); 1257 1258 /* 16-bit ModR/M decode. */ 1259 switch (ctxt->modrm_mod) { 1260 case 0: 1261 if (ctxt->modrm_rm == 6) 1262 modrm_ea += insn_fetch(u16, ctxt); 1263 break; 1264 case 1: 1265 modrm_ea += insn_fetch(s8, ctxt); 1266 break; 1267 case 2: 1268 modrm_ea += insn_fetch(u16, ctxt); 1269 break; 1270 } 1271 switch (ctxt->modrm_rm) { 1272 case 0: 1273 modrm_ea += bx + si; 1274 break; 1275 case 1: 1276 modrm_ea += bx + di; 1277 break; 1278 case 2: 1279 modrm_ea += bp + si; 1280 break; 1281 case 3: 1282 modrm_ea += bp + di; 1283 break; 1284 case 4: 1285 modrm_ea += si; 1286 break; 1287 case 5: 1288 modrm_ea += di; 1289 break; 1290 case 6: 1291 if (ctxt->modrm_mod != 0) 1292 modrm_ea += bp; 1293 break; 1294 case 7: 1295 modrm_ea += bx; 1296 break; 1297 } 1298 if (ctxt->modrm_rm == 2 || ctxt->modrm_rm == 3 || 1299 (ctxt->modrm_rm == 6 && ctxt->modrm_mod != 0)) 1300 ctxt->modrm_seg = VCPU_SREG_SS; 1301 modrm_ea = (u16)modrm_ea; 1302 } else { 1303 /* 32/64-bit ModR/M decode. */ 1304 if ((ctxt->modrm_rm & 7) == 4) { 1305 sib = insn_fetch(u8, ctxt); 1306 index_reg |= (sib >> 3) & 7; 1307 base_reg |= sib & 7; 1308 scale = sib >> 6; 1309 1310 if ((base_reg & 7) == 5 && ctxt->modrm_mod == 0) 1311 modrm_ea += insn_fetch(s32, ctxt); 1312 else { 1313 modrm_ea += reg_read(ctxt, base_reg); 1314 adjust_modrm_seg(ctxt, base_reg); 1315 /* Increment ESP on POP [ESP] */ 1316 if ((ctxt->d & IncSP) && 1317 base_reg == VCPU_REGS_RSP) 1318 modrm_ea += ctxt->op_bytes; 1319 } 1320 if (index_reg != 4) 1321 modrm_ea += reg_read(ctxt, index_reg) << scale; 1322 } else if ((ctxt->modrm_rm & 7) == 5 && ctxt->modrm_mod == 0) { 1323 modrm_ea += insn_fetch(s32, ctxt); 1324 if (ctxt->mode == X86EMUL_MODE_PROT64) 1325 ctxt->rip_relative = 1; 1326 } else { 1327 base_reg = ctxt->modrm_rm; 1328 modrm_ea += reg_read(ctxt, base_reg); 1329 adjust_modrm_seg(ctxt, base_reg); 1330 } 1331 switch (ctxt->modrm_mod) { 1332 case 1: 1333 modrm_ea += insn_fetch(s8, ctxt); 1334 break; 1335 case 2: 1336 modrm_ea += insn_fetch(s32, ctxt); 1337 break; 1338 } 1339 } 1340 op->addr.mem.ea = modrm_ea; 1341 if (ctxt->ad_bytes != 8) 1342 ctxt->memop.addr.mem.ea = (u32)ctxt->memop.addr.mem.ea; 1343 1344 done: 1345 return rc; 1346 } 1347 1348 static int decode_abs(struct x86_emulate_ctxt *ctxt, 1349 struct operand *op) 1350 { 1351 int rc = X86EMUL_CONTINUE; 1352 1353 op->type = OP_MEM; 1354 switch (ctxt->ad_bytes) { 1355 case 2: 1356 op->addr.mem.ea = insn_fetch(u16, ctxt); 1357 break; 1358 case 4: 1359 op->addr.mem.ea = insn_fetch(u32, ctxt); 1360 break; 1361 case 8: 1362 op->addr.mem.ea = insn_fetch(u64, ctxt); 1363 break; 1364 } 1365 done: 1366 return rc; 1367 } 1368 1369 static void fetch_bit_operand(struct x86_emulate_ctxt *ctxt) 1370 { 1371 long sv = 0, mask; 1372 1373 if (ctxt->dst.type == OP_MEM && ctxt->src.type == OP_REG) { 1374 mask = ~((long)ctxt->dst.bytes * 8 - 1); 1375 1376 if (ctxt->src.bytes == 2) 1377 sv = (s16)ctxt->src.val & (s16)mask; 1378 else if (ctxt->src.bytes == 4) 1379 sv = (s32)ctxt->src.val & (s32)mask; 1380 else 1381 sv = (s64)ctxt->src.val & (s64)mask; 1382 1383 ctxt->dst.addr.mem.ea = address_mask(ctxt, 1384 ctxt->dst.addr.mem.ea + (sv >> 3)); 1385 } 1386 1387 /* only subword offset */ 1388 ctxt->src.val &= (ctxt->dst.bytes << 3) - 1; 1389 } 1390 1391 static int read_emulated(struct x86_emulate_ctxt *ctxt, 1392 unsigned long addr, void *dest, unsigned size) 1393 { 1394 int rc; 1395 struct read_cache *mc = &ctxt->mem_read; 1396 1397 if (mc->pos < mc->end) 1398 goto read_cached; 1399 1400 WARN_ON((mc->end + size) >= sizeof(mc->data)); 1401 1402 rc = ctxt->ops->read_emulated(ctxt, addr, mc->data + mc->end, size, 1403 &ctxt->exception); 1404 if (rc != X86EMUL_CONTINUE) 1405 return rc; 1406 1407 mc->end += size; 1408 1409 read_cached: 1410 memcpy(dest, mc->data + mc->pos, size); 1411 mc->pos += size; 1412 return X86EMUL_CONTINUE; 1413 } 1414 1415 static int segmented_read(struct x86_emulate_ctxt *ctxt, 1416 struct segmented_address addr, 1417 void *data, 1418 unsigned size) 1419 { 1420 int rc; 1421 ulong linear; 1422 1423 rc = linearize(ctxt, addr, size, false, &linear); 1424 if (rc != X86EMUL_CONTINUE) 1425 return rc; 1426 return read_emulated(ctxt, linear, data, size); 1427 } 1428 1429 static int segmented_write(struct x86_emulate_ctxt *ctxt, 1430 struct segmented_address addr, 1431 const void *data, 1432 unsigned size) 1433 { 1434 int rc; 1435 ulong linear; 1436 1437 rc = linearize(ctxt, addr, size, true, &linear); 1438 if (rc != X86EMUL_CONTINUE) 1439 return rc; 1440 return ctxt->ops->write_emulated(ctxt, linear, data, size, 1441 &ctxt->exception); 1442 } 1443 1444 static int segmented_cmpxchg(struct x86_emulate_ctxt *ctxt, 1445 struct segmented_address addr, 1446 const void *orig_data, const void *data, 1447 unsigned size) 1448 { 1449 int rc; 1450 ulong linear; 1451 1452 rc = linearize(ctxt, addr, size, true, &linear); 1453 if (rc != X86EMUL_CONTINUE) 1454 return rc; 1455 return ctxt->ops->cmpxchg_emulated(ctxt, linear, orig_data, data, 1456 size, &ctxt->exception); 1457 } 1458 1459 static int pio_in_emulated(struct x86_emulate_ctxt *ctxt, 1460 unsigned int size, unsigned short port, 1461 void *dest) 1462 { 1463 struct read_cache *rc = &ctxt->io_read; 1464 1465 if (rc->pos == rc->end) { /* refill pio read ahead */ 1466 unsigned int in_page, n; 1467 unsigned int count = ctxt->rep_prefix ? 1468 address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) : 1; 1469 in_page = (ctxt->eflags & X86_EFLAGS_DF) ? 1470 offset_in_page(reg_read(ctxt, VCPU_REGS_RDI)) : 1471 PAGE_SIZE - offset_in_page(reg_read(ctxt, VCPU_REGS_RDI)); 1472 n = min3(in_page, (unsigned int)sizeof(rc->data) / size, count); 1473 if (n == 0) 1474 n = 1; 1475 rc->pos = rc->end = 0; 1476 if (!ctxt->ops->pio_in_emulated(ctxt, size, port, rc->data, n)) 1477 return 0; 1478 rc->end = n * size; 1479 } 1480 1481 if (ctxt->rep_prefix && (ctxt->d & String) && 1482 !(ctxt->eflags & X86_EFLAGS_DF)) { 1483 ctxt->dst.data = rc->data + rc->pos; 1484 ctxt->dst.type = OP_MEM_STR; 1485 ctxt->dst.count = (rc->end - rc->pos) / size; 1486 rc->pos = rc->end; 1487 } else { 1488 memcpy(dest, rc->data + rc->pos, size); 1489 rc->pos += size; 1490 } 1491 return 1; 1492 } 1493 1494 static int read_interrupt_descriptor(struct x86_emulate_ctxt *ctxt, 1495 u16 index, struct desc_struct *desc) 1496 { 1497 struct desc_ptr dt; 1498 ulong addr; 1499 1500 ctxt->ops->get_idt(ctxt, &dt); 1501 1502 if (dt.size < index * 8 + 7) 1503 return emulate_gp(ctxt, index << 3 | 0x2); 1504 1505 addr = dt.address + index * 8; 1506 return ctxt->ops->read_std(ctxt, addr, desc, sizeof *desc, 1507 &ctxt->exception); 1508 } 1509 1510 static void get_descriptor_table_ptr(struct x86_emulate_ctxt *ctxt, 1511 u16 selector, struct desc_ptr *dt) 1512 { 1513 const struct x86_emulate_ops *ops = ctxt->ops; 1514 u32 base3 = 0; 1515 1516 if (selector & 1 << 2) { 1517 struct desc_struct desc; 1518 u16 sel; 1519 1520 memset (dt, 0, sizeof *dt); 1521 if (!ops->get_segment(ctxt, &sel, &desc, &base3, 1522 VCPU_SREG_LDTR)) 1523 return; 1524 1525 dt->size = desc_limit_scaled(&desc); /* what if limit > 65535? */ 1526 dt->address = get_desc_base(&desc) | ((u64)base3 << 32); 1527 } else 1528 ops->get_gdt(ctxt, dt); 1529 } 1530 1531 static int get_descriptor_ptr(struct x86_emulate_ctxt *ctxt, 1532 u16 selector, ulong *desc_addr_p) 1533 { 1534 struct desc_ptr dt; 1535 u16 index = selector >> 3; 1536 ulong addr; 1537 1538 get_descriptor_table_ptr(ctxt, selector, &dt); 1539 1540 if (dt.size < index * 8 + 7) 1541 return emulate_gp(ctxt, selector & 0xfffc); 1542 1543 addr = dt.address + index * 8; 1544 1545 #ifdef CONFIG_X86_64 1546 if (addr >> 32 != 0) { 1547 u64 efer = 0; 1548 1549 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer); 1550 if (!(efer & EFER_LMA)) 1551 addr &= (u32)-1; 1552 } 1553 #endif 1554 1555 *desc_addr_p = addr; 1556 return X86EMUL_CONTINUE; 1557 } 1558 1559 /* allowed just for 8 bytes segments */ 1560 static int read_segment_descriptor(struct x86_emulate_ctxt *ctxt, 1561 u16 selector, struct desc_struct *desc, 1562 ulong *desc_addr_p) 1563 { 1564 int rc; 1565 1566 rc = get_descriptor_ptr(ctxt, selector, desc_addr_p); 1567 if (rc != X86EMUL_CONTINUE) 1568 return rc; 1569 1570 return ctxt->ops->read_std(ctxt, *desc_addr_p, desc, sizeof(*desc), 1571 &ctxt->exception); 1572 } 1573 1574 /* allowed just for 8 bytes segments */ 1575 static int write_segment_descriptor(struct x86_emulate_ctxt *ctxt, 1576 u16 selector, struct desc_struct *desc) 1577 { 1578 int rc; 1579 ulong addr; 1580 1581 rc = get_descriptor_ptr(ctxt, selector, &addr); 1582 if (rc != X86EMUL_CONTINUE) 1583 return rc; 1584 1585 return ctxt->ops->write_std(ctxt, addr, desc, sizeof *desc, 1586 &ctxt->exception); 1587 } 1588 1589 static int __load_segment_descriptor(struct x86_emulate_ctxt *ctxt, 1590 u16 selector, int seg, u8 cpl, 1591 enum x86_transfer_type transfer, 1592 struct desc_struct *desc) 1593 { 1594 struct desc_struct seg_desc, old_desc; 1595 u8 dpl, rpl; 1596 unsigned err_vec = GP_VECTOR; 1597 u32 err_code = 0; 1598 bool null_selector = !(selector & ~0x3); /* 0000-0003 are null */ 1599 ulong desc_addr; 1600 int ret; 1601 u16 dummy; 1602 u32 base3 = 0; 1603 1604 memset(&seg_desc, 0, sizeof seg_desc); 1605 1606 if (ctxt->mode == X86EMUL_MODE_REAL) { 1607 /* set real mode segment descriptor (keep limit etc. for 1608 * unreal mode) */ 1609 ctxt->ops->get_segment(ctxt, &dummy, &seg_desc, NULL, seg); 1610 set_desc_base(&seg_desc, selector << 4); 1611 goto load; 1612 } else if (seg <= VCPU_SREG_GS && ctxt->mode == X86EMUL_MODE_VM86) { 1613 /* VM86 needs a clean new segment descriptor */ 1614 set_desc_base(&seg_desc, selector << 4); 1615 set_desc_limit(&seg_desc, 0xffff); 1616 seg_desc.type = 3; 1617 seg_desc.p = 1; 1618 seg_desc.s = 1; 1619 seg_desc.dpl = 3; 1620 goto load; 1621 } 1622 1623 rpl = selector & 3; 1624 1625 /* TR should be in GDT only */ 1626 if (seg == VCPU_SREG_TR && (selector & (1 << 2))) 1627 goto exception; 1628 1629 /* NULL selector is not valid for TR, CS and (except for long mode) SS */ 1630 if (null_selector) { 1631 if (seg == VCPU_SREG_CS || seg == VCPU_SREG_TR) 1632 goto exception; 1633 1634 if (seg == VCPU_SREG_SS) { 1635 if (ctxt->mode != X86EMUL_MODE_PROT64 || rpl != cpl) 1636 goto exception; 1637 1638 /* 1639 * ctxt->ops->set_segment expects the CPL to be in 1640 * SS.DPL, so fake an expand-up 32-bit data segment. 1641 */ 1642 seg_desc.type = 3; 1643 seg_desc.p = 1; 1644 seg_desc.s = 1; 1645 seg_desc.dpl = cpl; 1646 seg_desc.d = 1; 1647 seg_desc.g = 1; 1648 } 1649 1650 /* Skip all following checks */ 1651 goto load; 1652 } 1653 1654 ret = read_segment_descriptor(ctxt, selector, &seg_desc, &desc_addr); 1655 if (ret != X86EMUL_CONTINUE) 1656 return ret; 1657 1658 err_code = selector & 0xfffc; 1659 err_vec = (transfer == X86_TRANSFER_TASK_SWITCH) ? TS_VECTOR : 1660 GP_VECTOR; 1661 1662 /* can't load system descriptor into segment selector */ 1663 if (seg <= VCPU_SREG_GS && !seg_desc.s) { 1664 if (transfer == X86_TRANSFER_CALL_JMP) 1665 return X86EMUL_UNHANDLEABLE; 1666 goto exception; 1667 } 1668 1669 if (!seg_desc.p) { 1670 err_vec = (seg == VCPU_SREG_SS) ? SS_VECTOR : NP_VECTOR; 1671 goto exception; 1672 } 1673 1674 dpl = seg_desc.dpl; 1675 1676 switch (seg) { 1677 case VCPU_SREG_SS: 1678 /* 1679 * segment is not a writable data segment or segment 1680 * selector's RPL != CPL or segment selector's RPL != CPL 1681 */ 1682 if (rpl != cpl || (seg_desc.type & 0xa) != 0x2 || dpl != cpl) 1683 goto exception; 1684 break; 1685 case VCPU_SREG_CS: 1686 if (!(seg_desc.type & 8)) 1687 goto exception; 1688 1689 if (seg_desc.type & 4) { 1690 /* conforming */ 1691 if (dpl > cpl) 1692 goto exception; 1693 } else { 1694 /* nonconforming */ 1695 if (rpl > cpl || dpl != cpl) 1696 goto exception; 1697 } 1698 /* in long-mode d/b must be clear if l is set */ 1699 if (seg_desc.d && seg_desc.l) { 1700 u64 efer = 0; 1701 1702 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer); 1703 if (efer & EFER_LMA) 1704 goto exception; 1705 } 1706 1707 /* CS(RPL) <- CPL */ 1708 selector = (selector & 0xfffc) | cpl; 1709 break; 1710 case VCPU_SREG_TR: 1711 if (seg_desc.s || (seg_desc.type != 1 && seg_desc.type != 9)) 1712 goto exception; 1713 old_desc = seg_desc; 1714 seg_desc.type |= 2; /* busy */ 1715 ret = ctxt->ops->cmpxchg_emulated(ctxt, desc_addr, &old_desc, &seg_desc, 1716 sizeof(seg_desc), &ctxt->exception); 1717 if (ret != X86EMUL_CONTINUE) 1718 return ret; 1719 break; 1720 case VCPU_SREG_LDTR: 1721 if (seg_desc.s || seg_desc.type != 2) 1722 goto exception; 1723 break; 1724 default: /* DS, ES, FS, or GS */ 1725 /* 1726 * segment is not a data or readable code segment or 1727 * ((segment is a data or nonconforming code segment) 1728 * and (both RPL and CPL > DPL)) 1729 */ 1730 if ((seg_desc.type & 0xa) == 0x8 || 1731 (((seg_desc.type & 0xc) != 0xc) && 1732 (rpl > dpl && cpl > dpl))) 1733 goto exception; 1734 break; 1735 } 1736 1737 if (seg_desc.s) { 1738 /* mark segment as accessed */ 1739 if (!(seg_desc.type & 1)) { 1740 seg_desc.type |= 1; 1741 ret = write_segment_descriptor(ctxt, selector, 1742 &seg_desc); 1743 if (ret != X86EMUL_CONTINUE) 1744 return ret; 1745 } 1746 } else if (ctxt->mode == X86EMUL_MODE_PROT64) { 1747 ret = ctxt->ops->read_std(ctxt, desc_addr+8, &base3, 1748 sizeof(base3), &ctxt->exception); 1749 if (ret != X86EMUL_CONTINUE) 1750 return ret; 1751 if (is_noncanonical_address(get_desc_base(&seg_desc) | 1752 ((u64)base3 << 32))) 1753 return emulate_gp(ctxt, 0); 1754 } 1755 load: 1756 ctxt->ops->set_segment(ctxt, selector, &seg_desc, base3, seg); 1757 if (desc) 1758 *desc = seg_desc; 1759 return X86EMUL_CONTINUE; 1760 exception: 1761 return emulate_exception(ctxt, err_vec, err_code, true); 1762 } 1763 1764 static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt, 1765 u16 selector, int seg) 1766 { 1767 u8 cpl = ctxt->ops->cpl(ctxt); 1768 1769 /* 1770 * None of MOV, POP and LSS can load a NULL selector in CPL=3, but 1771 * they can load it at CPL<3 (Intel's manual says only LSS can, 1772 * but it's wrong). 1773 * 1774 * However, the Intel manual says that putting IST=1/DPL=3 in 1775 * an interrupt gate will result in SS=3 (the AMD manual instead 1776 * says it doesn't), so allow SS=3 in __load_segment_descriptor 1777 * and only forbid it here. 1778 */ 1779 if (seg == VCPU_SREG_SS && selector == 3 && 1780 ctxt->mode == X86EMUL_MODE_PROT64) 1781 return emulate_exception(ctxt, GP_VECTOR, 0, true); 1782 1783 return __load_segment_descriptor(ctxt, selector, seg, cpl, 1784 X86_TRANSFER_NONE, NULL); 1785 } 1786 1787 static void write_register_operand(struct operand *op) 1788 { 1789 return assign_register(op->addr.reg, op->val, op->bytes); 1790 } 1791 1792 static int writeback(struct x86_emulate_ctxt *ctxt, struct operand *op) 1793 { 1794 switch (op->type) { 1795 case OP_REG: 1796 write_register_operand(op); 1797 break; 1798 case OP_MEM: 1799 if (ctxt->lock_prefix) 1800 return segmented_cmpxchg(ctxt, 1801 op->addr.mem, 1802 &op->orig_val, 1803 &op->val, 1804 op->bytes); 1805 else 1806 return segmented_write(ctxt, 1807 op->addr.mem, 1808 &op->val, 1809 op->bytes); 1810 break; 1811 case OP_MEM_STR: 1812 return segmented_write(ctxt, 1813 op->addr.mem, 1814 op->data, 1815 op->bytes * op->count); 1816 break; 1817 case OP_XMM: 1818 write_sse_reg(ctxt, &op->vec_val, op->addr.xmm); 1819 break; 1820 case OP_MM: 1821 write_mmx_reg(ctxt, &op->mm_val, op->addr.mm); 1822 break; 1823 case OP_NONE: 1824 /* no writeback */ 1825 break; 1826 default: 1827 break; 1828 } 1829 return X86EMUL_CONTINUE; 1830 } 1831 1832 static int push(struct x86_emulate_ctxt *ctxt, void *data, int bytes) 1833 { 1834 struct segmented_address addr; 1835 1836 rsp_increment(ctxt, -bytes); 1837 addr.ea = reg_read(ctxt, VCPU_REGS_RSP) & stack_mask(ctxt); 1838 addr.seg = VCPU_SREG_SS; 1839 1840 return segmented_write(ctxt, addr, data, bytes); 1841 } 1842 1843 static int em_push(struct x86_emulate_ctxt *ctxt) 1844 { 1845 /* Disable writeback. */ 1846 ctxt->dst.type = OP_NONE; 1847 return push(ctxt, &ctxt->src.val, ctxt->op_bytes); 1848 } 1849 1850 static int emulate_pop(struct x86_emulate_ctxt *ctxt, 1851 void *dest, int len) 1852 { 1853 int rc; 1854 struct segmented_address addr; 1855 1856 addr.ea = reg_read(ctxt, VCPU_REGS_RSP) & stack_mask(ctxt); 1857 addr.seg = VCPU_SREG_SS; 1858 rc = segmented_read(ctxt, addr, dest, len); 1859 if (rc != X86EMUL_CONTINUE) 1860 return rc; 1861 1862 rsp_increment(ctxt, len); 1863 return rc; 1864 } 1865 1866 static int em_pop(struct x86_emulate_ctxt *ctxt) 1867 { 1868 return emulate_pop(ctxt, &ctxt->dst.val, ctxt->op_bytes); 1869 } 1870 1871 static int emulate_popf(struct x86_emulate_ctxt *ctxt, 1872 void *dest, int len) 1873 { 1874 int rc; 1875 unsigned long val, change_mask; 1876 int iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> X86_EFLAGS_IOPL_BIT; 1877 int cpl = ctxt->ops->cpl(ctxt); 1878 1879 rc = emulate_pop(ctxt, &val, len); 1880 if (rc != X86EMUL_CONTINUE) 1881 return rc; 1882 1883 change_mask = X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF | 1884 X86_EFLAGS_ZF | X86_EFLAGS_SF | X86_EFLAGS_OF | 1885 X86_EFLAGS_TF | X86_EFLAGS_DF | X86_EFLAGS_NT | 1886 X86_EFLAGS_AC | X86_EFLAGS_ID; 1887 1888 switch(ctxt->mode) { 1889 case X86EMUL_MODE_PROT64: 1890 case X86EMUL_MODE_PROT32: 1891 case X86EMUL_MODE_PROT16: 1892 if (cpl == 0) 1893 change_mask |= X86_EFLAGS_IOPL; 1894 if (cpl <= iopl) 1895 change_mask |= X86_EFLAGS_IF; 1896 break; 1897 case X86EMUL_MODE_VM86: 1898 if (iopl < 3) 1899 return emulate_gp(ctxt, 0); 1900 change_mask |= X86_EFLAGS_IF; 1901 break; 1902 default: /* real mode */ 1903 change_mask |= (X86_EFLAGS_IOPL | X86_EFLAGS_IF); 1904 break; 1905 } 1906 1907 *(unsigned long *)dest = 1908 (ctxt->eflags & ~change_mask) | (val & change_mask); 1909 1910 return rc; 1911 } 1912 1913 static int em_popf(struct x86_emulate_ctxt *ctxt) 1914 { 1915 ctxt->dst.type = OP_REG; 1916 ctxt->dst.addr.reg = &ctxt->eflags; 1917 ctxt->dst.bytes = ctxt->op_bytes; 1918 return emulate_popf(ctxt, &ctxt->dst.val, ctxt->op_bytes); 1919 } 1920 1921 static int em_enter(struct x86_emulate_ctxt *ctxt) 1922 { 1923 int rc; 1924 unsigned frame_size = ctxt->src.val; 1925 unsigned nesting_level = ctxt->src2.val & 31; 1926 ulong rbp; 1927 1928 if (nesting_level) 1929 return X86EMUL_UNHANDLEABLE; 1930 1931 rbp = reg_read(ctxt, VCPU_REGS_RBP); 1932 rc = push(ctxt, &rbp, stack_size(ctxt)); 1933 if (rc != X86EMUL_CONTINUE) 1934 return rc; 1935 assign_masked(reg_rmw(ctxt, VCPU_REGS_RBP), reg_read(ctxt, VCPU_REGS_RSP), 1936 stack_mask(ctxt)); 1937 assign_masked(reg_rmw(ctxt, VCPU_REGS_RSP), 1938 reg_read(ctxt, VCPU_REGS_RSP) - frame_size, 1939 stack_mask(ctxt)); 1940 return X86EMUL_CONTINUE; 1941 } 1942 1943 static int em_leave(struct x86_emulate_ctxt *ctxt) 1944 { 1945 assign_masked(reg_rmw(ctxt, VCPU_REGS_RSP), reg_read(ctxt, VCPU_REGS_RBP), 1946 stack_mask(ctxt)); 1947 return emulate_pop(ctxt, reg_rmw(ctxt, VCPU_REGS_RBP), ctxt->op_bytes); 1948 } 1949 1950 static int em_push_sreg(struct x86_emulate_ctxt *ctxt) 1951 { 1952 int seg = ctxt->src2.val; 1953 1954 ctxt->src.val = get_segment_selector(ctxt, seg); 1955 if (ctxt->op_bytes == 4) { 1956 rsp_increment(ctxt, -2); 1957 ctxt->op_bytes = 2; 1958 } 1959 1960 return em_push(ctxt); 1961 } 1962 1963 static int em_pop_sreg(struct x86_emulate_ctxt *ctxt) 1964 { 1965 int seg = ctxt->src2.val; 1966 unsigned long selector; 1967 int rc; 1968 1969 rc = emulate_pop(ctxt, &selector, 2); 1970 if (rc != X86EMUL_CONTINUE) 1971 return rc; 1972 1973 if (ctxt->modrm_reg == VCPU_SREG_SS) 1974 ctxt->interruptibility = KVM_X86_SHADOW_INT_MOV_SS; 1975 if (ctxt->op_bytes > 2) 1976 rsp_increment(ctxt, ctxt->op_bytes - 2); 1977 1978 rc = load_segment_descriptor(ctxt, (u16)selector, seg); 1979 return rc; 1980 } 1981 1982 static int em_pusha(struct x86_emulate_ctxt *ctxt) 1983 { 1984 unsigned long old_esp = reg_read(ctxt, VCPU_REGS_RSP); 1985 int rc = X86EMUL_CONTINUE; 1986 int reg = VCPU_REGS_RAX; 1987 1988 while (reg <= VCPU_REGS_RDI) { 1989 (reg == VCPU_REGS_RSP) ? 1990 (ctxt->src.val = old_esp) : (ctxt->src.val = reg_read(ctxt, reg)); 1991 1992 rc = em_push(ctxt); 1993 if (rc != X86EMUL_CONTINUE) 1994 return rc; 1995 1996 ++reg; 1997 } 1998 1999 return rc; 2000 } 2001 2002 static int em_pushf(struct x86_emulate_ctxt *ctxt) 2003 { 2004 ctxt->src.val = (unsigned long)ctxt->eflags & ~X86_EFLAGS_VM; 2005 return em_push(ctxt); 2006 } 2007 2008 static int em_popa(struct x86_emulate_ctxt *ctxt) 2009 { 2010 int rc = X86EMUL_CONTINUE; 2011 int reg = VCPU_REGS_RDI; 2012 u32 val; 2013 2014 while (reg >= VCPU_REGS_RAX) { 2015 if (reg == VCPU_REGS_RSP) { 2016 rsp_increment(ctxt, ctxt->op_bytes); 2017 --reg; 2018 } 2019 2020 rc = emulate_pop(ctxt, &val, ctxt->op_bytes); 2021 if (rc != X86EMUL_CONTINUE) 2022 break; 2023 assign_register(reg_rmw(ctxt, reg), val, ctxt->op_bytes); 2024 --reg; 2025 } 2026 return rc; 2027 } 2028 2029 static int __emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq) 2030 { 2031 const struct x86_emulate_ops *ops = ctxt->ops; 2032 int rc; 2033 struct desc_ptr dt; 2034 gva_t cs_addr; 2035 gva_t eip_addr; 2036 u16 cs, eip; 2037 2038 /* TODO: Add limit checks */ 2039 ctxt->src.val = ctxt->eflags; 2040 rc = em_push(ctxt); 2041 if (rc != X86EMUL_CONTINUE) 2042 return rc; 2043 2044 ctxt->eflags &= ~(X86_EFLAGS_IF | X86_EFLAGS_TF | X86_EFLAGS_AC); 2045 2046 ctxt->src.val = get_segment_selector(ctxt, VCPU_SREG_CS); 2047 rc = em_push(ctxt); 2048 if (rc != X86EMUL_CONTINUE) 2049 return rc; 2050 2051 ctxt->src.val = ctxt->_eip; 2052 rc = em_push(ctxt); 2053 if (rc != X86EMUL_CONTINUE) 2054 return rc; 2055 2056 ops->get_idt(ctxt, &dt); 2057 2058 eip_addr = dt.address + (irq << 2); 2059 cs_addr = dt.address + (irq << 2) + 2; 2060 2061 rc = ops->read_std(ctxt, cs_addr, &cs, 2, &ctxt->exception); 2062 if (rc != X86EMUL_CONTINUE) 2063 return rc; 2064 2065 rc = ops->read_std(ctxt, eip_addr, &eip, 2, &ctxt->exception); 2066 if (rc != X86EMUL_CONTINUE) 2067 return rc; 2068 2069 rc = load_segment_descriptor(ctxt, cs, VCPU_SREG_CS); 2070 if (rc != X86EMUL_CONTINUE) 2071 return rc; 2072 2073 ctxt->_eip = eip; 2074 2075 return rc; 2076 } 2077 2078 int emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq) 2079 { 2080 int rc; 2081 2082 invalidate_registers(ctxt); 2083 rc = __emulate_int_real(ctxt, irq); 2084 if (rc == X86EMUL_CONTINUE) 2085 writeback_registers(ctxt); 2086 return rc; 2087 } 2088 2089 static int emulate_int(struct x86_emulate_ctxt *ctxt, int irq) 2090 { 2091 switch(ctxt->mode) { 2092 case X86EMUL_MODE_REAL: 2093 return __emulate_int_real(ctxt, irq); 2094 case X86EMUL_MODE_VM86: 2095 case X86EMUL_MODE_PROT16: 2096 case X86EMUL_MODE_PROT32: 2097 case X86EMUL_MODE_PROT64: 2098 default: 2099 /* Protected mode interrupts unimplemented yet */ 2100 return X86EMUL_UNHANDLEABLE; 2101 } 2102 } 2103 2104 static int emulate_iret_real(struct x86_emulate_ctxt *ctxt) 2105 { 2106 int rc = X86EMUL_CONTINUE; 2107 unsigned long temp_eip = 0; 2108 unsigned long temp_eflags = 0; 2109 unsigned long cs = 0; 2110 unsigned long mask = X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF | 2111 X86_EFLAGS_ZF | X86_EFLAGS_SF | X86_EFLAGS_TF | 2112 X86_EFLAGS_IF | X86_EFLAGS_DF | X86_EFLAGS_OF | 2113 X86_EFLAGS_IOPL | X86_EFLAGS_NT | X86_EFLAGS_RF | 2114 X86_EFLAGS_AC | X86_EFLAGS_ID | 2115 X86_EFLAGS_FIXED; 2116 unsigned long vm86_mask = X86_EFLAGS_VM | X86_EFLAGS_VIF | 2117 X86_EFLAGS_VIP; 2118 2119 /* TODO: Add stack limit check */ 2120 2121 rc = emulate_pop(ctxt, &temp_eip, ctxt->op_bytes); 2122 2123 if (rc != X86EMUL_CONTINUE) 2124 return rc; 2125 2126 if (temp_eip & ~0xffff) 2127 return emulate_gp(ctxt, 0); 2128 2129 rc = emulate_pop(ctxt, &cs, ctxt->op_bytes); 2130 2131 if (rc != X86EMUL_CONTINUE) 2132 return rc; 2133 2134 rc = emulate_pop(ctxt, &temp_eflags, ctxt->op_bytes); 2135 2136 if (rc != X86EMUL_CONTINUE) 2137 return rc; 2138 2139 rc = load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS); 2140 2141 if (rc != X86EMUL_CONTINUE) 2142 return rc; 2143 2144 ctxt->_eip = temp_eip; 2145 2146 if (ctxt->op_bytes == 4) 2147 ctxt->eflags = ((temp_eflags & mask) | (ctxt->eflags & vm86_mask)); 2148 else if (ctxt->op_bytes == 2) { 2149 ctxt->eflags &= ~0xffff; 2150 ctxt->eflags |= temp_eflags; 2151 } 2152 2153 ctxt->eflags &= ~EFLG_RESERVED_ZEROS_MASK; /* Clear reserved zeros */ 2154 ctxt->eflags |= X86_EFLAGS_FIXED; 2155 ctxt->ops->set_nmi_mask(ctxt, false); 2156 2157 return rc; 2158 } 2159 2160 static int em_iret(struct x86_emulate_ctxt *ctxt) 2161 { 2162 switch(ctxt->mode) { 2163 case X86EMUL_MODE_REAL: 2164 return emulate_iret_real(ctxt); 2165 case X86EMUL_MODE_VM86: 2166 case X86EMUL_MODE_PROT16: 2167 case X86EMUL_MODE_PROT32: 2168 case X86EMUL_MODE_PROT64: 2169 default: 2170 /* iret from protected mode unimplemented yet */ 2171 return X86EMUL_UNHANDLEABLE; 2172 } 2173 } 2174 2175 static int em_jmp_far(struct x86_emulate_ctxt *ctxt) 2176 { 2177 int rc; 2178 unsigned short sel; 2179 struct desc_struct new_desc; 2180 u8 cpl = ctxt->ops->cpl(ctxt); 2181 2182 memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2); 2183 2184 rc = __load_segment_descriptor(ctxt, sel, VCPU_SREG_CS, cpl, 2185 X86_TRANSFER_CALL_JMP, 2186 &new_desc); 2187 if (rc != X86EMUL_CONTINUE) 2188 return rc; 2189 2190 rc = assign_eip_far(ctxt, ctxt->src.val, &new_desc); 2191 /* Error handling is not implemented. */ 2192 if (rc != X86EMUL_CONTINUE) 2193 return X86EMUL_UNHANDLEABLE; 2194 2195 return rc; 2196 } 2197 2198 static int em_jmp_abs(struct x86_emulate_ctxt *ctxt) 2199 { 2200 return assign_eip_near(ctxt, ctxt->src.val); 2201 } 2202 2203 static int em_call_near_abs(struct x86_emulate_ctxt *ctxt) 2204 { 2205 int rc; 2206 long int old_eip; 2207 2208 old_eip = ctxt->_eip; 2209 rc = assign_eip_near(ctxt, ctxt->src.val); 2210 if (rc != X86EMUL_CONTINUE) 2211 return rc; 2212 ctxt->src.val = old_eip; 2213 rc = em_push(ctxt); 2214 return rc; 2215 } 2216 2217 static int em_cmpxchg8b(struct x86_emulate_ctxt *ctxt) 2218 { 2219 u64 old = ctxt->dst.orig_val64; 2220 2221 if (ctxt->dst.bytes == 16) 2222 return X86EMUL_UNHANDLEABLE; 2223 2224 if (((u32) (old >> 0) != (u32) reg_read(ctxt, VCPU_REGS_RAX)) || 2225 ((u32) (old >> 32) != (u32) reg_read(ctxt, VCPU_REGS_RDX))) { 2226 *reg_write(ctxt, VCPU_REGS_RAX) = (u32) (old >> 0); 2227 *reg_write(ctxt, VCPU_REGS_RDX) = (u32) (old >> 32); 2228 ctxt->eflags &= ~X86_EFLAGS_ZF; 2229 } else { 2230 ctxt->dst.val64 = ((u64)reg_read(ctxt, VCPU_REGS_RCX) << 32) | 2231 (u32) reg_read(ctxt, VCPU_REGS_RBX); 2232 2233 ctxt->eflags |= X86_EFLAGS_ZF; 2234 } 2235 return X86EMUL_CONTINUE; 2236 } 2237 2238 static int em_ret(struct x86_emulate_ctxt *ctxt) 2239 { 2240 int rc; 2241 unsigned long eip; 2242 2243 rc = emulate_pop(ctxt, &eip, ctxt->op_bytes); 2244 if (rc != X86EMUL_CONTINUE) 2245 return rc; 2246 2247 return assign_eip_near(ctxt, eip); 2248 } 2249 2250 static int em_ret_far(struct x86_emulate_ctxt *ctxt) 2251 { 2252 int rc; 2253 unsigned long eip, cs; 2254 int cpl = ctxt->ops->cpl(ctxt); 2255 struct desc_struct new_desc; 2256 2257 rc = emulate_pop(ctxt, &eip, ctxt->op_bytes); 2258 if (rc != X86EMUL_CONTINUE) 2259 return rc; 2260 rc = emulate_pop(ctxt, &cs, ctxt->op_bytes); 2261 if (rc != X86EMUL_CONTINUE) 2262 return rc; 2263 /* Outer-privilege level return is not implemented */ 2264 if (ctxt->mode >= X86EMUL_MODE_PROT16 && (cs & 3) > cpl) 2265 return X86EMUL_UNHANDLEABLE; 2266 rc = __load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS, cpl, 2267 X86_TRANSFER_RET, 2268 &new_desc); 2269 if (rc != X86EMUL_CONTINUE) 2270 return rc; 2271 rc = assign_eip_far(ctxt, eip, &new_desc); 2272 /* Error handling is not implemented. */ 2273 if (rc != X86EMUL_CONTINUE) 2274 return X86EMUL_UNHANDLEABLE; 2275 2276 return rc; 2277 } 2278 2279 static int em_ret_far_imm(struct x86_emulate_ctxt *ctxt) 2280 { 2281 int rc; 2282 2283 rc = em_ret_far(ctxt); 2284 if (rc != X86EMUL_CONTINUE) 2285 return rc; 2286 rsp_increment(ctxt, ctxt->src.val); 2287 return X86EMUL_CONTINUE; 2288 } 2289 2290 static int em_cmpxchg(struct x86_emulate_ctxt *ctxt) 2291 { 2292 /* Save real source value, then compare EAX against destination. */ 2293 ctxt->dst.orig_val = ctxt->dst.val; 2294 ctxt->dst.val = reg_read(ctxt, VCPU_REGS_RAX); 2295 ctxt->src.orig_val = ctxt->src.val; 2296 ctxt->src.val = ctxt->dst.orig_val; 2297 fastop(ctxt, em_cmp); 2298 2299 if (ctxt->eflags & X86_EFLAGS_ZF) { 2300 /* Success: write back to memory; no update of EAX */ 2301 ctxt->src.type = OP_NONE; 2302 ctxt->dst.val = ctxt->src.orig_val; 2303 } else { 2304 /* Failure: write the value we saw to EAX. */ 2305 ctxt->src.type = OP_REG; 2306 ctxt->src.addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX); 2307 ctxt->src.val = ctxt->dst.orig_val; 2308 /* Create write-cycle to dest by writing the same value */ 2309 ctxt->dst.val = ctxt->dst.orig_val; 2310 } 2311 return X86EMUL_CONTINUE; 2312 } 2313 2314 static int em_lseg(struct x86_emulate_ctxt *ctxt) 2315 { 2316 int seg = ctxt->src2.val; 2317 unsigned short sel; 2318 int rc; 2319 2320 memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2); 2321 2322 rc = load_segment_descriptor(ctxt, sel, seg); 2323 if (rc != X86EMUL_CONTINUE) 2324 return rc; 2325 2326 ctxt->dst.val = ctxt->src.val; 2327 return rc; 2328 } 2329 2330 static int emulator_has_longmode(struct x86_emulate_ctxt *ctxt) 2331 { 2332 u32 eax, ebx, ecx, edx; 2333 2334 eax = 0x80000001; 2335 ecx = 0; 2336 ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx); 2337 return edx & bit(X86_FEATURE_LM); 2338 } 2339 2340 #define GET_SMSTATE(type, smbase, offset) \ 2341 ({ \ 2342 type __val; \ 2343 int r = ctxt->ops->read_phys(ctxt, smbase + offset, &__val, \ 2344 sizeof(__val)); \ 2345 if (r != X86EMUL_CONTINUE) \ 2346 return X86EMUL_UNHANDLEABLE; \ 2347 __val; \ 2348 }) 2349 2350 static void rsm_set_desc_flags(struct desc_struct *desc, u32 flags) 2351 { 2352 desc->g = (flags >> 23) & 1; 2353 desc->d = (flags >> 22) & 1; 2354 desc->l = (flags >> 21) & 1; 2355 desc->avl = (flags >> 20) & 1; 2356 desc->p = (flags >> 15) & 1; 2357 desc->dpl = (flags >> 13) & 3; 2358 desc->s = (flags >> 12) & 1; 2359 desc->type = (flags >> 8) & 15; 2360 } 2361 2362 static int rsm_load_seg_32(struct x86_emulate_ctxt *ctxt, u64 smbase, int n) 2363 { 2364 struct desc_struct desc; 2365 int offset; 2366 u16 selector; 2367 2368 selector = GET_SMSTATE(u32, smbase, 0x7fa8 + n * 4); 2369 2370 if (n < 3) 2371 offset = 0x7f84 + n * 12; 2372 else 2373 offset = 0x7f2c + (n - 3) * 12; 2374 2375 set_desc_base(&desc, GET_SMSTATE(u32, smbase, offset + 8)); 2376 set_desc_limit(&desc, GET_SMSTATE(u32, smbase, offset + 4)); 2377 rsm_set_desc_flags(&desc, GET_SMSTATE(u32, smbase, offset)); 2378 ctxt->ops->set_segment(ctxt, selector, &desc, 0, n); 2379 return X86EMUL_CONTINUE; 2380 } 2381 2382 static int rsm_load_seg_64(struct x86_emulate_ctxt *ctxt, u64 smbase, int n) 2383 { 2384 struct desc_struct desc; 2385 int offset; 2386 u16 selector; 2387 u32 base3; 2388 2389 offset = 0x7e00 + n * 16; 2390 2391 selector = GET_SMSTATE(u16, smbase, offset); 2392 rsm_set_desc_flags(&desc, GET_SMSTATE(u16, smbase, offset + 2) << 8); 2393 set_desc_limit(&desc, GET_SMSTATE(u32, smbase, offset + 4)); 2394 set_desc_base(&desc, GET_SMSTATE(u32, smbase, offset + 8)); 2395 base3 = GET_SMSTATE(u32, smbase, offset + 12); 2396 2397 ctxt->ops->set_segment(ctxt, selector, &desc, base3, n); 2398 return X86EMUL_CONTINUE; 2399 } 2400 2401 static int rsm_enter_protected_mode(struct x86_emulate_ctxt *ctxt, 2402 u64 cr0, u64 cr4) 2403 { 2404 int bad; 2405 2406 /* 2407 * First enable PAE, long mode needs it before CR0.PG = 1 is set. 2408 * Then enable protected mode. However, PCID cannot be enabled 2409 * if EFER.LMA=0, so set it separately. 2410 */ 2411 bad = ctxt->ops->set_cr(ctxt, 4, cr4 & ~X86_CR4_PCIDE); 2412 if (bad) 2413 return X86EMUL_UNHANDLEABLE; 2414 2415 bad = ctxt->ops->set_cr(ctxt, 0, cr0); 2416 if (bad) 2417 return X86EMUL_UNHANDLEABLE; 2418 2419 if (cr4 & X86_CR4_PCIDE) { 2420 bad = ctxt->ops->set_cr(ctxt, 4, cr4); 2421 if (bad) 2422 return X86EMUL_UNHANDLEABLE; 2423 } 2424 2425 return X86EMUL_CONTINUE; 2426 } 2427 2428 static int rsm_load_state_32(struct x86_emulate_ctxt *ctxt, u64 smbase) 2429 { 2430 struct desc_struct desc; 2431 struct desc_ptr dt; 2432 u16 selector; 2433 u32 val, cr0, cr4; 2434 int i; 2435 2436 cr0 = GET_SMSTATE(u32, smbase, 0x7ffc); 2437 ctxt->ops->set_cr(ctxt, 3, GET_SMSTATE(u32, smbase, 0x7ff8)); 2438 ctxt->eflags = GET_SMSTATE(u32, smbase, 0x7ff4) | X86_EFLAGS_FIXED; 2439 ctxt->_eip = GET_SMSTATE(u32, smbase, 0x7ff0); 2440 2441 for (i = 0; i < 8; i++) 2442 *reg_write(ctxt, i) = GET_SMSTATE(u32, smbase, 0x7fd0 + i * 4); 2443 2444 val = GET_SMSTATE(u32, smbase, 0x7fcc); 2445 ctxt->ops->set_dr(ctxt, 6, (val & DR6_VOLATILE) | DR6_FIXED_1); 2446 val = GET_SMSTATE(u32, smbase, 0x7fc8); 2447 ctxt->ops->set_dr(ctxt, 7, (val & DR7_VOLATILE) | DR7_FIXED_1); 2448 2449 selector = GET_SMSTATE(u32, smbase, 0x7fc4); 2450 set_desc_base(&desc, GET_SMSTATE(u32, smbase, 0x7f64)); 2451 set_desc_limit(&desc, GET_SMSTATE(u32, smbase, 0x7f60)); 2452 rsm_set_desc_flags(&desc, GET_SMSTATE(u32, smbase, 0x7f5c)); 2453 ctxt->ops->set_segment(ctxt, selector, &desc, 0, VCPU_SREG_TR); 2454 2455 selector = GET_SMSTATE(u32, smbase, 0x7fc0); 2456 set_desc_base(&desc, GET_SMSTATE(u32, smbase, 0x7f80)); 2457 set_desc_limit(&desc, GET_SMSTATE(u32, smbase, 0x7f7c)); 2458 rsm_set_desc_flags(&desc, GET_SMSTATE(u32, smbase, 0x7f78)); 2459 ctxt->ops->set_segment(ctxt, selector, &desc, 0, VCPU_SREG_LDTR); 2460 2461 dt.address = GET_SMSTATE(u32, smbase, 0x7f74); 2462 dt.size = GET_SMSTATE(u32, smbase, 0x7f70); 2463 ctxt->ops->set_gdt(ctxt, &dt); 2464 2465 dt.address = GET_SMSTATE(u32, smbase, 0x7f58); 2466 dt.size = GET_SMSTATE(u32, smbase, 0x7f54); 2467 ctxt->ops->set_idt(ctxt, &dt); 2468 2469 for (i = 0; i < 6; i++) { 2470 int r = rsm_load_seg_32(ctxt, smbase, i); 2471 if (r != X86EMUL_CONTINUE) 2472 return r; 2473 } 2474 2475 cr4 = GET_SMSTATE(u32, smbase, 0x7f14); 2476 2477 ctxt->ops->set_smbase(ctxt, GET_SMSTATE(u32, smbase, 0x7ef8)); 2478 2479 return rsm_enter_protected_mode(ctxt, cr0, cr4); 2480 } 2481 2482 static int rsm_load_state_64(struct x86_emulate_ctxt *ctxt, u64 smbase) 2483 { 2484 struct desc_struct desc; 2485 struct desc_ptr dt; 2486 u64 val, cr0, cr4; 2487 u32 base3; 2488 u16 selector; 2489 int i, r; 2490 2491 for (i = 0; i < 16; i++) 2492 *reg_write(ctxt, i) = GET_SMSTATE(u64, smbase, 0x7ff8 - i * 8); 2493 2494 ctxt->_eip = GET_SMSTATE(u64, smbase, 0x7f78); 2495 ctxt->eflags = GET_SMSTATE(u32, smbase, 0x7f70) | X86_EFLAGS_FIXED; 2496 2497 val = GET_SMSTATE(u32, smbase, 0x7f68); 2498 ctxt->ops->set_dr(ctxt, 6, (val & DR6_VOLATILE) | DR6_FIXED_1); 2499 val = GET_SMSTATE(u32, smbase, 0x7f60); 2500 ctxt->ops->set_dr(ctxt, 7, (val & DR7_VOLATILE) | DR7_FIXED_1); 2501 2502 cr0 = GET_SMSTATE(u64, smbase, 0x7f58); 2503 ctxt->ops->set_cr(ctxt, 3, GET_SMSTATE(u64, smbase, 0x7f50)); 2504 cr4 = GET_SMSTATE(u64, smbase, 0x7f48); 2505 ctxt->ops->set_smbase(ctxt, GET_SMSTATE(u32, smbase, 0x7f00)); 2506 val = GET_SMSTATE(u64, smbase, 0x7ed0); 2507 ctxt->ops->set_msr(ctxt, MSR_EFER, val & ~EFER_LMA); 2508 2509 selector = GET_SMSTATE(u32, smbase, 0x7e90); 2510 rsm_set_desc_flags(&desc, GET_SMSTATE(u32, smbase, 0x7e92) << 8); 2511 set_desc_limit(&desc, GET_SMSTATE(u32, smbase, 0x7e94)); 2512 set_desc_base(&desc, GET_SMSTATE(u32, smbase, 0x7e98)); 2513 base3 = GET_SMSTATE(u32, smbase, 0x7e9c); 2514 ctxt->ops->set_segment(ctxt, selector, &desc, base3, VCPU_SREG_TR); 2515 2516 dt.size = GET_SMSTATE(u32, smbase, 0x7e84); 2517 dt.address = GET_SMSTATE(u64, smbase, 0x7e88); 2518 ctxt->ops->set_idt(ctxt, &dt); 2519 2520 selector = GET_SMSTATE(u32, smbase, 0x7e70); 2521 rsm_set_desc_flags(&desc, GET_SMSTATE(u32, smbase, 0x7e72) << 8); 2522 set_desc_limit(&desc, GET_SMSTATE(u32, smbase, 0x7e74)); 2523 set_desc_base(&desc, GET_SMSTATE(u32, smbase, 0x7e78)); 2524 base3 = GET_SMSTATE(u32, smbase, 0x7e7c); 2525 ctxt->ops->set_segment(ctxt, selector, &desc, base3, VCPU_SREG_LDTR); 2526 2527 dt.size = GET_SMSTATE(u32, smbase, 0x7e64); 2528 dt.address = GET_SMSTATE(u64, smbase, 0x7e68); 2529 ctxt->ops->set_gdt(ctxt, &dt); 2530 2531 r = rsm_enter_protected_mode(ctxt, cr0, cr4); 2532 if (r != X86EMUL_CONTINUE) 2533 return r; 2534 2535 for (i = 0; i < 6; i++) { 2536 r = rsm_load_seg_64(ctxt, smbase, i); 2537 if (r != X86EMUL_CONTINUE) 2538 return r; 2539 } 2540 2541 return X86EMUL_CONTINUE; 2542 } 2543 2544 static int em_rsm(struct x86_emulate_ctxt *ctxt) 2545 { 2546 unsigned long cr0, cr4, efer; 2547 u64 smbase; 2548 int ret; 2549 2550 if ((ctxt->ops->get_hflags(ctxt) & X86EMUL_SMM_MASK) == 0) 2551 return emulate_ud(ctxt); 2552 2553 /* 2554 * Get back to real mode, to prepare a safe state in which to load 2555 * CR0/CR3/CR4/EFER. It's all a bit more complicated if the vCPU 2556 * supports long mode. 2557 */ 2558 cr4 = ctxt->ops->get_cr(ctxt, 4); 2559 if (emulator_has_longmode(ctxt)) { 2560 struct desc_struct cs_desc; 2561 2562 /* Zero CR4.PCIDE before CR0.PG. */ 2563 if (cr4 & X86_CR4_PCIDE) { 2564 ctxt->ops->set_cr(ctxt, 4, cr4 & ~X86_CR4_PCIDE); 2565 cr4 &= ~X86_CR4_PCIDE; 2566 } 2567 2568 /* A 32-bit code segment is required to clear EFER.LMA. */ 2569 memset(&cs_desc, 0, sizeof(cs_desc)); 2570 cs_desc.type = 0xb; 2571 cs_desc.s = cs_desc.g = cs_desc.p = 1; 2572 ctxt->ops->set_segment(ctxt, 0, &cs_desc, 0, VCPU_SREG_CS); 2573 } 2574 2575 /* For the 64-bit case, this will clear EFER.LMA. */ 2576 cr0 = ctxt->ops->get_cr(ctxt, 0); 2577 if (cr0 & X86_CR0_PE) 2578 ctxt->ops->set_cr(ctxt, 0, cr0 & ~(X86_CR0_PG | X86_CR0_PE)); 2579 2580 /* Now clear CR4.PAE (which must be done before clearing EFER.LME). */ 2581 if (cr4 & X86_CR4_PAE) 2582 ctxt->ops->set_cr(ctxt, 4, cr4 & ~X86_CR4_PAE); 2583 2584 /* And finally go back to 32-bit mode. */ 2585 efer = 0; 2586 ctxt->ops->set_msr(ctxt, MSR_EFER, efer); 2587 2588 smbase = ctxt->ops->get_smbase(ctxt); 2589 if (emulator_has_longmode(ctxt)) 2590 ret = rsm_load_state_64(ctxt, smbase + 0x8000); 2591 else 2592 ret = rsm_load_state_32(ctxt, smbase + 0x8000); 2593 2594 if (ret != X86EMUL_CONTINUE) { 2595 /* FIXME: should triple fault */ 2596 return X86EMUL_UNHANDLEABLE; 2597 } 2598 2599 if ((ctxt->ops->get_hflags(ctxt) & X86EMUL_SMM_INSIDE_NMI_MASK) == 0) 2600 ctxt->ops->set_nmi_mask(ctxt, false); 2601 2602 ctxt->ops->set_hflags(ctxt, ctxt->ops->get_hflags(ctxt) & 2603 ~(X86EMUL_SMM_INSIDE_NMI_MASK | X86EMUL_SMM_MASK)); 2604 return X86EMUL_CONTINUE; 2605 } 2606 2607 static void 2608 setup_syscalls_segments(struct x86_emulate_ctxt *ctxt, 2609 struct desc_struct *cs, struct desc_struct *ss) 2610 { 2611 cs->l = 0; /* will be adjusted later */ 2612 set_desc_base(cs, 0); /* flat segment */ 2613 cs->g = 1; /* 4kb granularity */ 2614 set_desc_limit(cs, 0xfffff); /* 4GB limit */ 2615 cs->type = 0x0b; /* Read, Execute, Accessed */ 2616 cs->s = 1; 2617 cs->dpl = 0; /* will be adjusted later */ 2618 cs->p = 1; 2619 cs->d = 1; 2620 cs->avl = 0; 2621 2622 set_desc_base(ss, 0); /* flat segment */ 2623 set_desc_limit(ss, 0xfffff); /* 4GB limit */ 2624 ss->g = 1; /* 4kb granularity */ 2625 ss->s = 1; 2626 ss->type = 0x03; /* Read/Write, Accessed */ 2627 ss->d = 1; /* 32bit stack segment */ 2628 ss->dpl = 0; 2629 ss->p = 1; 2630 ss->l = 0; 2631 ss->avl = 0; 2632 } 2633 2634 static bool vendor_intel(struct x86_emulate_ctxt *ctxt) 2635 { 2636 u32 eax, ebx, ecx, edx; 2637 2638 eax = ecx = 0; 2639 ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx); 2640 return ebx == X86EMUL_CPUID_VENDOR_GenuineIntel_ebx 2641 && ecx == X86EMUL_CPUID_VENDOR_GenuineIntel_ecx 2642 && edx == X86EMUL_CPUID_VENDOR_GenuineIntel_edx; 2643 } 2644 2645 static bool em_syscall_is_enabled(struct x86_emulate_ctxt *ctxt) 2646 { 2647 const struct x86_emulate_ops *ops = ctxt->ops; 2648 u32 eax, ebx, ecx, edx; 2649 2650 /* 2651 * syscall should always be enabled in longmode - so only become 2652 * vendor specific (cpuid) if other modes are active... 2653 */ 2654 if (ctxt->mode == X86EMUL_MODE_PROT64) 2655 return true; 2656 2657 eax = 0x00000000; 2658 ecx = 0x00000000; 2659 ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx); 2660 /* 2661 * Intel ("GenuineIntel") 2662 * remark: Intel CPUs only support "syscall" in 64bit 2663 * longmode. Also an 64bit guest with a 2664 * 32bit compat-app running will #UD !! While this 2665 * behaviour can be fixed (by emulating) into AMD 2666 * response - CPUs of AMD can't behave like Intel. 2667 */ 2668 if (ebx == X86EMUL_CPUID_VENDOR_GenuineIntel_ebx && 2669 ecx == X86EMUL_CPUID_VENDOR_GenuineIntel_ecx && 2670 edx == X86EMUL_CPUID_VENDOR_GenuineIntel_edx) 2671 return false; 2672 2673 /* AMD ("AuthenticAMD") */ 2674 if (ebx == X86EMUL_CPUID_VENDOR_AuthenticAMD_ebx && 2675 ecx == X86EMUL_CPUID_VENDOR_AuthenticAMD_ecx && 2676 edx == X86EMUL_CPUID_VENDOR_AuthenticAMD_edx) 2677 return true; 2678 2679 /* AMD ("AMDisbetter!") */ 2680 if (ebx == X86EMUL_CPUID_VENDOR_AMDisbetterI_ebx && 2681 ecx == X86EMUL_CPUID_VENDOR_AMDisbetterI_ecx && 2682 edx == X86EMUL_CPUID_VENDOR_AMDisbetterI_edx) 2683 return true; 2684 2685 /* default: (not Intel, not AMD), apply Intel's stricter rules... */ 2686 return false; 2687 } 2688 2689 static int em_syscall(struct x86_emulate_ctxt *ctxt) 2690 { 2691 const struct x86_emulate_ops *ops = ctxt->ops; 2692 struct desc_struct cs, ss; 2693 u64 msr_data; 2694 u16 cs_sel, ss_sel; 2695 u64 efer = 0; 2696 2697 /* syscall is not available in real mode */ 2698 if (ctxt->mode == X86EMUL_MODE_REAL || 2699 ctxt->mode == X86EMUL_MODE_VM86) 2700 return emulate_ud(ctxt); 2701 2702 if (!(em_syscall_is_enabled(ctxt))) 2703 return emulate_ud(ctxt); 2704 2705 ops->get_msr(ctxt, MSR_EFER, &efer); 2706 setup_syscalls_segments(ctxt, &cs, &ss); 2707 2708 if (!(efer & EFER_SCE)) 2709 return emulate_ud(ctxt); 2710 2711 ops->get_msr(ctxt, MSR_STAR, &msr_data); 2712 msr_data >>= 32; 2713 cs_sel = (u16)(msr_data & 0xfffc); 2714 ss_sel = (u16)(msr_data + 8); 2715 2716 if (efer & EFER_LMA) { 2717 cs.d = 0; 2718 cs.l = 1; 2719 } 2720 ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS); 2721 ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS); 2722 2723 *reg_write(ctxt, VCPU_REGS_RCX) = ctxt->_eip; 2724 if (efer & EFER_LMA) { 2725 #ifdef CONFIG_X86_64 2726 *reg_write(ctxt, VCPU_REGS_R11) = ctxt->eflags; 2727 2728 ops->get_msr(ctxt, 2729 ctxt->mode == X86EMUL_MODE_PROT64 ? 2730 MSR_LSTAR : MSR_CSTAR, &msr_data); 2731 ctxt->_eip = msr_data; 2732 2733 ops->get_msr(ctxt, MSR_SYSCALL_MASK, &msr_data); 2734 ctxt->eflags &= ~msr_data; 2735 ctxt->eflags |= X86_EFLAGS_FIXED; 2736 #endif 2737 } else { 2738 /* legacy mode */ 2739 ops->get_msr(ctxt, MSR_STAR, &msr_data); 2740 ctxt->_eip = (u32)msr_data; 2741 2742 ctxt->eflags &= ~(X86_EFLAGS_VM | X86_EFLAGS_IF); 2743 } 2744 2745 ctxt->tf = (ctxt->eflags & X86_EFLAGS_TF) != 0; 2746 return X86EMUL_CONTINUE; 2747 } 2748 2749 static int em_sysenter(struct x86_emulate_ctxt *ctxt) 2750 { 2751 const struct x86_emulate_ops *ops = ctxt->ops; 2752 struct desc_struct cs, ss; 2753 u64 msr_data; 2754 u16 cs_sel, ss_sel; 2755 u64 efer = 0; 2756 2757 ops->get_msr(ctxt, MSR_EFER, &efer); 2758 /* inject #GP if in real mode */ 2759 if (ctxt->mode == X86EMUL_MODE_REAL) 2760 return emulate_gp(ctxt, 0); 2761 2762 /* 2763 * Not recognized on AMD in compat mode (but is recognized in legacy 2764 * mode). 2765 */ 2766 if ((ctxt->mode != X86EMUL_MODE_PROT64) && (efer & EFER_LMA) 2767 && !vendor_intel(ctxt)) 2768 return emulate_ud(ctxt); 2769 2770 /* sysenter/sysexit have not been tested in 64bit mode. */ 2771 if (ctxt->mode == X86EMUL_MODE_PROT64) 2772 return X86EMUL_UNHANDLEABLE; 2773 2774 setup_syscalls_segments(ctxt, &cs, &ss); 2775 2776 ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data); 2777 if ((msr_data & 0xfffc) == 0x0) 2778 return emulate_gp(ctxt, 0); 2779 2780 ctxt->eflags &= ~(X86_EFLAGS_VM | X86_EFLAGS_IF); 2781 cs_sel = (u16)msr_data & ~SEGMENT_RPL_MASK; 2782 ss_sel = cs_sel + 8; 2783 if (efer & EFER_LMA) { 2784 cs.d = 0; 2785 cs.l = 1; 2786 } 2787 2788 ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS); 2789 ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS); 2790 2791 ops->get_msr(ctxt, MSR_IA32_SYSENTER_EIP, &msr_data); 2792 ctxt->_eip = (efer & EFER_LMA) ? msr_data : (u32)msr_data; 2793 2794 ops->get_msr(ctxt, MSR_IA32_SYSENTER_ESP, &msr_data); 2795 *reg_write(ctxt, VCPU_REGS_RSP) = (efer & EFER_LMA) ? msr_data : 2796 (u32)msr_data; 2797 2798 return X86EMUL_CONTINUE; 2799 } 2800 2801 static int em_sysexit(struct x86_emulate_ctxt *ctxt) 2802 { 2803 const struct x86_emulate_ops *ops = ctxt->ops; 2804 struct desc_struct cs, ss; 2805 u64 msr_data, rcx, rdx; 2806 int usermode; 2807 u16 cs_sel = 0, ss_sel = 0; 2808 2809 /* inject #GP if in real mode or Virtual 8086 mode */ 2810 if (ctxt->mode == X86EMUL_MODE_REAL || 2811 ctxt->mode == X86EMUL_MODE_VM86) 2812 return emulate_gp(ctxt, 0); 2813 2814 setup_syscalls_segments(ctxt, &cs, &ss); 2815 2816 if ((ctxt->rex_prefix & 0x8) != 0x0) 2817 usermode = X86EMUL_MODE_PROT64; 2818 else 2819 usermode = X86EMUL_MODE_PROT32; 2820 2821 rcx = reg_read(ctxt, VCPU_REGS_RCX); 2822 rdx = reg_read(ctxt, VCPU_REGS_RDX); 2823 2824 cs.dpl = 3; 2825 ss.dpl = 3; 2826 ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data); 2827 switch (usermode) { 2828 case X86EMUL_MODE_PROT32: 2829 cs_sel = (u16)(msr_data + 16); 2830 if ((msr_data & 0xfffc) == 0x0) 2831 return emulate_gp(ctxt, 0); 2832 ss_sel = (u16)(msr_data + 24); 2833 rcx = (u32)rcx; 2834 rdx = (u32)rdx; 2835 break; 2836 case X86EMUL_MODE_PROT64: 2837 cs_sel = (u16)(msr_data + 32); 2838 if (msr_data == 0x0) 2839 return emulate_gp(ctxt, 0); 2840 ss_sel = cs_sel + 8; 2841 cs.d = 0; 2842 cs.l = 1; 2843 if (is_noncanonical_address(rcx) || 2844 is_noncanonical_address(rdx)) 2845 return emulate_gp(ctxt, 0); 2846 break; 2847 } 2848 cs_sel |= SEGMENT_RPL_MASK; 2849 ss_sel |= SEGMENT_RPL_MASK; 2850 2851 ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS); 2852 ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS); 2853 2854 ctxt->_eip = rdx; 2855 *reg_write(ctxt, VCPU_REGS_RSP) = rcx; 2856 2857 return X86EMUL_CONTINUE; 2858 } 2859 2860 static bool emulator_bad_iopl(struct x86_emulate_ctxt *ctxt) 2861 { 2862 int iopl; 2863 if (ctxt->mode == X86EMUL_MODE_REAL) 2864 return false; 2865 if (ctxt->mode == X86EMUL_MODE_VM86) 2866 return true; 2867 iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> X86_EFLAGS_IOPL_BIT; 2868 return ctxt->ops->cpl(ctxt) > iopl; 2869 } 2870 2871 static bool emulator_io_port_access_allowed(struct x86_emulate_ctxt *ctxt, 2872 u16 port, u16 len) 2873 { 2874 const struct x86_emulate_ops *ops = ctxt->ops; 2875 struct desc_struct tr_seg; 2876 u32 base3; 2877 int r; 2878 u16 tr, io_bitmap_ptr, perm, bit_idx = port & 0x7; 2879 unsigned mask = (1 << len) - 1; 2880 unsigned long base; 2881 2882 ops->get_segment(ctxt, &tr, &tr_seg, &base3, VCPU_SREG_TR); 2883 if (!tr_seg.p) 2884 return false; 2885 if (desc_limit_scaled(&tr_seg) < 103) 2886 return false; 2887 base = get_desc_base(&tr_seg); 2888 #ifdef CONFIG_X86_64 2889 base |= ((u64)base3) << 32; 2890 #endif 2891 r = ops->read_std(ctxt, base + 102, &io_bitmap_ptr, 2, NULL); 2892 if (r != X86EMUL_CONTINUE) 2893 return false; 2894 if (io_bitmap_ptr + port/8 > desc_limit_scaled(&tr_seg)) 2895 return false; 2896 r = ops->read_std(ctxt, base + io_bitmap_ptr + port/8, &perm, 2, NULL); 2897 if (r != X86EMUL_CONTINUE) 2898 return false; 2899 if ((perm >> bit_idx) & mask) 2900 return false; 2901 return true; 2902 } 2903 2904 static bool emulator_io_permited(struct x86_emulate_ctxt *ctxt, 2905 u16 port, u16 len) 2906 { 2907 if (ctxt->perm_ok) 2908 return true; 2909 2910 if (emulator_bad_iopl(ctxt)) 2911 if (!emulator_io_port_access_allowed(ctxt, port, len)) 2912 return false; 2913 2914 ctxt->perm_ok = true; 2915 2916 return true; 2917 } 2918 2919 static void string_registers_quirk(struct x86_emulate_ctxt *ctxt) 2920 { 2921 /* 2922 * Intel CPUs mask the counter and pointers in quite strange 2923 * manner when ECX is zero due to REP-string optimizations. 2924 */ 2925 #ifdef CONFIG_X86_64 2926 if (ctxt->ad_bytes != 4 || !vendor_intel(ctxt)) 2927 return; 2928 2929 *reg_write(ctxt, VCPU_REGS_RCX) = 0; 2930 2931 switch (ctxt->b) { 2932 case 0xa4: /* movsb */ 2933 case 0xa5: /* movsd/w */ 2934 *reg_rmw(ctxt, VCPU_REGS_RSI) &= (u32)-1; 2935 /* fall through */ 2936 case 0xaa: /* stosb */ 2937 case 0xab: /* stosd/w */ 2938 *reg_rmw(ctxt, VCPU_REGS_RDI) &= (u32)-1; 2939 } 2940 #endif 2941 } 2942 2943 static void save_state_to_tss16(struct x86_emulate_ctxt *ctxt, 2944 struct tss_segment_16 *tss) 2945 { 2946 tss->ip = ctxt->_eip; 2947 tss->flag = ctxt->eflags; 2948 tss->ax = reg_read(ctxt, VCPU_REGS_RAX); 2949 tss->cx = reg_read(ctxt, VCPU_REGS_RCX); 2950 tss->dx = reg_read(ctxt, VCPU_REGS_RDX); 2951 tss->bx = reg_read(ctxt, VCPU_REGS_RBX); 2952 tss->sp = reg_read(ctxt, VCPU_REGS_RSP); 2953 tss->bp = reg_read(ctxt, VCPU_REGS_RBP); 2954 tss->si = reg_read(ctxt, VCPU_REGS_RSI); 2955 tss->di = reg_read(ctxt, VCPU_REGS_RDI); 2956 2957 tss->es = get_segment_selector(ctxt, VCPU_SREG_ES); 2958 tss->cs = get_segment_selector(ctxt, VCPU_SREG_CS); 2959 tss->ss = get_segment_selector(ctxt, VCPU_SREG_SS); 2960 tss->ds = get_segment_selector(ctxt, VCPU_SREG_DS); 2961 tss->ldt = get_segment_selector(ctxt, VCPU_SREG_LDTR); 2962 } 2963 2964 static int load_state_from_tss16(struct x86_emulate_ctxt *ctxt, 2965 struct tss_segment_16 *tss) 2966 { 2967 int ret; 2968 u8 cpl; 2969 2970 ctxt->_eip = tss->ip; 2971 ctxt->eflags = tss->flag | 2; 2972 *reg_write(ctxt, VCPU_REGS_RAX) = tss->ax; 2973 *reg_write(ctxt, VCPU_REGS_RCX) = tss->cx; 2974 *reg_write(ctxt, VCPU_REGS_RDX) = tss->dx; 2975 *reg_write(ctxt, VCPU_REGS_RBX) = tss->bx; 2976 *reg_write(ctxt, VCPU_REGS_RSP) = tss->sp; 2977 *reg_write(ctxt, VCPU_REGS_RBP) = tss->bp; 2978 *reg_write(ctxt, VCPU_REGS_RSI) = tss->si; 2979 *reg_write(ctxt, VCPU_REGS_RDI) = tss->di; 2980 2981 /* 2982 * SDM says that segment selectors are loaded before segment 2983 * descriptors 2984 */ 2985 set_segment_selector(ctxt, tss->ldt, VCPU_SREG_LDTR); 2986 set_segment_selector(ctxt, tss->es, VCPU_SREG_ES); 2987 set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS); 2988 set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS); 2989 set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS); 2990 2991 cpl = tss->cs & 3; 2992 2993 /* 2994 * Now load segment descriptors. If fault happens at this stage 2995 * it is handled in a context of new task 2996 */ 2997 ret = __load_segment_descriptor(ctxt, tss->ldt, VCPU_SREG_LDTR, cpl, 2998 X86_TRANSFER_TASK_SWITCH, NULL); 2999 if (ret != X86EMUL_CONTINUE) 3000 return ret; 3001 ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl, 3002 X86_TRANSFER_TASK_SWITCH, NULL); 3003 if (ret != X86EMUL_CONTINUE) 3004 return ret; 3005 ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl, 3006 X86_TRANSFER_TASK_SWITCH, NULL); 3007 if (ret != X86EMUL_CONTINUE) 3008 return ret; 3009 ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl, 3010 X86_TRANSFER_TASK_SWITCH, NULL); 3011 if (ret != X86EMUL_CONTINUE) 3012 return ret; 3013 ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl, 3014 X86_TRANSFER_TASK_SWITCH, NULL); 3015 if (ret != X86EMUL_CONTINUE) 3016 return ret; 3017 3018 return X86EMUL_CONTINUE; 3019 } 3020 3021 static int task_switch_16(struct x86_emulate_ctxt *ctxt, 3022 u16 tss_selector, u16 old_tss_sel, 3023 ulong old_tss_base, struct desc_struct *new_desc) 3024 { 3025 const struct x86_emulate_ops *ops = ctxt->ops; 3026 struct tss_segment_16 tss_seg; 3027 int ret; 3028 u32 new_tss_base = get_desc_base(new_desc); 3029 3030 ret = ops->read_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg, 3031 &ctxt->exception); 3032 if (ret != X86EMUL_CONTINUE) 3033 return ret; 3034 3035 save_state_to_tss16(ctxt, &tss_seg); 3036 3037 ret = ops->write_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg, 3038 &ctxt->exception); 3039 if (ret != X86EMUL_CONTINUE) 3040 return ret; 3041 3042 ret = ops->read_std(ctxt, new_tss_base, &tss_seg, sizeof tss_seg, 3043 &ctxt->exception); 3044 if (ret != X86EMUL_CONTINUE) 3045 return ret; 3046 3047 if (old_tss_sel != 0xffff) { 3048 tss_seg.prev_task_link = old_tss_sel; 3049 3050 ret = ops->write_std(ctxt, new_tss_base, 3051 &tss_seg.prev_task_link, 3052 sizeof tss_seg.prev_task_link, 3053 &ctxt->exception); 3054 if (ret != X86EMUL_CONTINUE) 3055 return ret; 3056 } 3057 3058 return load_state_from_tss16(ctxt, &tss_seg); 3059 } 3060 3061 static void save_state_to_tss32(struct x86_emulate_ctxt *ctxt, 3062 struct tss_segment_32 *tss) 3063 { 3064 /* CR3 and ldt selector are not saved intentionally */ 3065 tss->eip = ctxt->_eip; 3066 tss->eflags = ctxt->eflags; 3067 tss->eax = reg_read(ctxt, VCPU_REGS_RAX); 3068 tss->ecx = reg_read(ctxt, VCPU_REGS_RCX); 3069 tss->edx = reg_read(ctxt, VCPU_REGS_RDX); 3070 tss->ebx = reg_read(ctxt, VCPU_REGS_RBX); 3071 tss->esp = reg_read(ctxt, VCPU_REGS_RSP); 3072 tss->ebp = reg_read(ctxt, VCPU_REGS_RBP); 3073 tss->esi = reg_read(ctxt, VCPU_REGS_RSI); 3074 tss->edi = reg_read(ctxt, VCPU_REGS_RDI); 3075 3076 tss->es = get_segment_selector(ctxt, VCPU_SREG_ES); 3077 tss->cs = get_segment_selector(ctxt, VCPU_SREG_CS); 3078 tss->ss = get_segment_selector(ctxt, VCPU_SREG_SS); 3079 tss->ds = get_segment_selector(ctxt, VCPU_SREG_DS); 3080 tss->fs = get_segment_selector(ctxt, VCPU_SREG_FS); 3081 tss->gs = get_segment_selector(ctxt, VCPU_SREG_GS); 3082 } 3083 3084 static int load_state_from_tss32(struct x86_emulate_ctxt *ctxt, 3085 struct tss_segment_32 *tss) 3086 { 3087 int ret; 3088 u8 cpl; 3089 3090 if (ctxt->ops->set_cr(ctxt, 3, tss->cr3)) 3091 return emulate_gp(ctxt, 0); 3092 ctxt->_eip = tss->eip; 3093 ctxt->eflags = tss->eflags | 2; 3094 3095 /* General purpose registers */ 3096 *reg_write(ctxt, VCPU_REGS_RAX) = tss->eax; 3097 *reg_write(ctxt, VCPU_REGS_RCX) = tss->ecx; 3098 *reg_write(ctxt, VCPU_REGS_RDX) = tss->edx; 3099 *reg_write(ctxt, VCPU_REGS_RBX) = tss->ebx; 3100 *reg_write(ctxt, VCPU_REGS_RSP) = tss->esp; 3101 *reg_write(ctxt, VCPU_REGS_RBP) = tss->ebp; 3102 *reg_write(ctxt, VCPU_REGS_RSI) = tss->esi; 3103 *reg_write(ctxt, VCPU_REGS_RDI) = tss->edi; 3104 3105 /* 3106 * SDM says that segment selectors are loaded before segment 3107 * descriptors. This is important because CPL checks will 3108 * use CS.RPL. 3109 */ 3110 set_segment_selector(ctxt, tss->ldt_selector, VCPU_SREG_LDTR); 3111 set_segment_selector(ctxt, tss->es, VCPU_SREG_ES); 3112 set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS); 3113 set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS); 3114 set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS); 3115 set_segment_selector(ctxt, tss->fs, VCPU_SREG_FS); 3116 set_segment_selector(ctxt, tss->gs, VCPU_SREG_GS); 3117 3118 /* 3119 * If we're switching between Protected Mode and VM86, we need to make 3120 * sure to update the mode before loading the segment descriptors so 3121 * that the selectors are interpreted correctly. 3122 */ 3123 if (ctxt->eflags & X86_EFLAGS_VM) { 3124 ctxt->mode = X86EMUL_MODE_VM86; 3125 cpl = 3; 3126 } else { 3127 ctxt->mode = X86EMUL_MODE_PROT32; 3128 cpl = tss->cs & 3; 3129 } 3130 3131 /* 3132 * Now load segment descriptors. If fault happenes at this stage 3133 * it is handled in a context of new task 3134 */ 3135 ret = __load_segment_descriptor(ctxt, tss->ldt_selector, VCPU_SREG_LDTR, 3136 cpl, X86_TRANSFER_TASK_SWITCH, NULL); 3137 if (ret != X86EMUL_CONTINUE) 3138 return ret; 3139 ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl, 3140 X86_TRANSFER_TASK_SWITCH, NULL); 3141 if (ret != X86EMUL_CONTINUE) 3142 return ret; 3143 ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl, 3144 X86_TRANSFER_TASK_SWITCH, NULL); 3145 if (ret != X86EMUL_CONTINUE) 3146 return ret; 3147 ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl, 3148 X86_TRANSFER_TASK_SWITCH, NULL); 3149 if (ret != X86EMUL_CONTINUE) 3150 return ret; 3151 ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl, 3152 X86_TRANSFER_TASK_SWITCH, NULL); 3153 if (ret != X86EMUL_CONTINUE) 3154 return ret; 3155 ret = __load_segment_descriptor(ctxt, tss->fs, VCPU_SREG_FS, cpl, 3156 X86_TRANSFER_TASK_SWITCH, NULL); 3157 if (ret != X86EMUL_CONTINUE) 3158 return ret; 3159 ret = __load_segment_descriptor(ctxt, tss->gs, VCPU_SREG_GS, cpl, 3160 X86_TRANSFER_TASK_SWITCH, NULL); 3161 3162 return ret; 3163 } 3164 3165 static int task_switch_32(struct x86_emulate_ctxt *ctxt, 3166 u16 tss_selector, u16 old_tss_sel, 3167 ulong old_tss_base, struct desc_struct *new_desc) 3168 { 3169 const struct x86_emulate_ops *ops = ctxt->ops; 3170 struct tss_segment_32 tss_seg; 3171 int ret; 3172 u32 new_tss_base = get_desc_base(new_desc); 3173 u32 eip_offset = offsetof(struct tss_segment_32, eip); 3174 u32 ldt_sel_offset = offsetof(struct tss_segment_32, ldt_selector); 3175 3176 ret = ops->read_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg, 3177 &ctxt->exception); 3178 if (ret != X86EMUL_CONTINUE) 3179 return ret; 3180 3181 save_state_to_tss32(ctxt, &tss_seg); 3182 3183 /* Only GP registers and segment selectors are saved */ 3184 ret = ops->write_std(ctxt, old_tss_base + eip_offset, &tss_seg.eip, 3185 ldt_sel_offset - eip_offset, &ctxt->exception); 3186 if (ret != X86EMUL_CONTINUE) 3187 return ret; 3188 3189 ret = ops->read_std(ctxt, new_tss_base, &tss_seg, sizeof tss_seg, 3190 &ctxt->exception); 3191 if (ret != X86EMUL_CONTINUE) 3192 return ret; 3193 3194 if (old_tss_sel != 0xffff) { 3195 tss_seg.prev_task_link = old_tss_sel; 3196 3197 ret = ops->write_std(ctxt, new_tss_base, 3198 &tss_seg.prev_task_link, 3199 sizeof tss_seg.prev_task_link, 3200 &ctxt->exception); 3201 if (ret != X86EMUL_CONTINUE) 3202 return ret; 3203 } 3204 3205 return load_state_from_tss32(ctxt, &tss_seg); 3206 } 3207 3208 static int emulator_do_task_switch(struct x86_emulate_ctxt *ctxt, 3209 u16 tss_selector, int idt_index, int reason, 3210 bool has_error_code, u32 error_code) 3211 { 3212 const struct x86_emulate_ops *ops = ctxt->ops; 3213 struct desc_struct curr_tss_desc, next_tss_desc; 3214 int ret; 3215 u16 old_tss_sel = get_segment_selector(ctxt, VCPU_SREG_TR); 3216 ulong old_tss_base = 3217 ops->get_cached_segment_base(ctxt, VCPU_SREG_TR); 3218 u32 desc_limit; 3219 ulong desc_addr, dr7; 3220 3221 /* FIXME: old_tss_base == ~0 ? */ 3222 3223 ret = read_segment_descriptor(ctxt, tss_selector, &next_tss_desc, &desc_addr); 3224 if (ret != X86EMUL_CONTINUE) 3225 return ret; 3226 ret = read_segment_descriptor(ctxt, old_tss_sel, &curr_tss_desc, &desc_addr); 3227 if (ret != X86EMUL_CONTINUE) 3228 return ret; 3229 3230 /* FIXME: check that next_tss_desc is tss */ 3231 3232 /* 3233 * Check privileges. The three cases are task switch caused by... 3234 * 3235 * 1. jmp/call/int to task gate: Check against DPL of the task gate 3236 * 2. Exception/IRQ/iret: No check is performed 3237 * 3. jmp/call to TSS/task-gate: No check is performed since the 3238 * hardware checks it before exiting. 3239 */ 3240 if (reason == TASK_SWITCH_GATE) { 3241 if (idt_index != -1) { 3242 /* Software interrupts */ 3243 struct desc_struct task_gate_desc; 3244 int dpl; 3245 3246 ret = read_interrupt_descriptor(ctxt, idt_index, 3247 &task_gate_desc); 3248 if (ret != X86EMUL_CONTINUE) 3249 return ret; 3250 3251 dpl = task_gate_desc.dpl; 3252 if ((tss_selector & 3) > dpl || ops->cpl(ctxt) > dpl) 3253 return emulate_gp(ctxt, (idt_index << 3) | 0x2); 3254 } 3255 } 3256 3257 desc_limit = desc_limit_scaled(&next_tss_desc); 3258 if (!next_tss_desc.p || 3259 ((desc_limit < 0x67 && (next_tss_desc.type & 8)) || 3260 desc_limit < 0x2b)) { 3261 return emulate_ts(ctxt, tss_selector & 0xfffc); 3262 } 3263 3264 if (reason == TASK_SWITCH_IRET || reason == TASK_SWITCH_JMP) { 3265 curr_tss_desc.type &= ~(1 << 1); /* clear busy flag */ 3266 write_segment_descriptor(ctxt, old_tss_sel, &curr_tss_desc); 3267 } 3268 3269 if (reason == TASK_SWITCH_IRET) 3270 ctxt->eflags = ctxt->eflags & ~X86_EFLAGS_NT; 3271 3272 /* set back link to prev task only if NT bit is set in eflags 3273 note that old_tss_sel is not used after this point */ 3274 if (reason != TASK_SWITCH_CALL && reason != TASK_SWITCH_GATE) 3275 old_tss_sel = 0xffff; 3276 3277 if (next_tss_desc.type & 8) 3278 ret = task_switch_32(ctxt, tss_selector, old_tss_sel, 3279 old_tss_base, &next_tss_desc); 3280 else 3281 ret = task_switch_16(ctxt, tss_selector, old_tss_sel, 3282 old_tss_base, &next_tss_desc); 3283 if (ret != X86EMUL_CONTINUE) 3284 return ret; 3285 3286 if (reason == TASK_SWITCH_CALL || reason == TASK_SWITCH_GATE) 3287 ctxt->eflags = ctxt->eflags | X86_EFLAGS_NT; 3288 3289 if (reason != TASK_SWITCH_IRET) { 3290 next_tss_desc.type |= (1 << 1); /* set busy flag */ 3291 write_segment_descriptor(ctxt, tss_selector, &next_tss_desc); 3292 } 3293 3294 ops->set_cr(ctxt, 0, ops->get_cr(ctxt, 0) | X86_CR0_TS); 3295 ops->set_segment(ctxt, tss_selector, &next_tss_desc, 0, VCPU_SREG_TR); 3296 3297 if (has_error_code) { 3298 ctxt->op_bytes = ctxt->ad_bytes = (next_tss_desc.type & 8) ? 4 : 2; 3299 ctxt->lock_prefix = 0; 3300 ctxt->src.val = (unsigned long) error_code; 3301 ret = em_push(ctxt); 3302 } 3303 3304 ops->get_dr(ctxt, 7, &dr7); 3305 ops->set_dr(ctxt, 7, dr7 & ~(DR_LOCAL_ENABLE_MASK | DR_LOCAL_SLOWDOWN)); 3306 3307 return ret; 3308 } 3309 3310 int emulator_task_switch(struct x86_emulate_ctxt *ctxt, 3311 u16 tss_selector, int idt_index, int reason, 3312 bool has_error_code, u32 error_code) 3313 { 3314 int rc; 3315 3316 invalidate_registers(ctxt); 3317 ctxt->_eip = ctxt->eip; 3318 ctxt->dst.type = OP_NONE; 3319 3320 rc = emulator_do_task_switch(ctxt, tss_selector, idt_index, reason, 3321 has_error_code, error_code); 3322 3323 if (rc == X86EMUL_CONTINUE) { 3324 ctxt->eip = ctxt->_eip; 3325 writeback_registers(ctxt); 3326 } 3327 3328 return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK; 3329 } 3330 3331 static void string_addr_inc(struct x86_emulate_ctxt *ctxt, int reg, 3332 struct operand *op) 3333 { 3334 int df = (ctxt->eflags & X86_EFLAGS_DF) ? -op->count : op->count; 3335 3336 register_address_increment(ctxt, reg, df * op->bytes); 3337 op->addr.mem.ea = register_address(ctxt, reg); 3338 } 3339 3340 static int em_das(struct x86_emulate_ctxt *ctxt) 3341 { 3342 u8 al, old_al; 3343 bool af, cf, old_cf; 3344 3345 cf = ctxt->eflags & X86_EFLAGS_CF; 3346 al = ctxt->dst.val; 3347 3348 old_al = al; 3349 old_cf = cf; 3350 cf = false; 3351 af = ctxt->eflags & X86_EFLAGS_AF; 3352 if ((al & 0x0f) > 9 || af) { 3353 al -= 6; 3354 cf = old_cf | (al >= 250); 3355 af = true; 3356 } else { 3357 af = false; 3358 } 3359 if (old_al > 0x99 || old_cf) { 3360 al -= 0x60; 3361 cf = true; 3362 } 3363 3364 ctxt->dst.val = al; 3365 /* Set PF, ZF, SF */ 3366 ctxt->src.type = OP_IMM; 3367 ctxt->src.val = 0; 3368 ctxt->src.bytes = 1; 3369 fastop(ctxt, em_or); 3370 ctxt->eflags &= ~(X86_EFLAGS_AF | X86_EFLAGS_CF); 3371 if (cf) 3372 ctxt->eflags |= X86_EFLAGS_CF; 3373 if (af) 3374 ctxt->eflags |= X86_EFLAGS_AF; 3375 return X86EMUL_CONTINUE; 3376 } 3377 3378 static int em_aam(struct x86_emulate_ctxt *ctxt) 3379 { 3380 u8 al, ah; 3381 3382 if (ctxt->src.val == 0) 3383 return emulate_de(ctxt); 3384 3385 al = ctxt->dst.val & 0xff; 3386 ah = al / ctxt->src.val; 3387 al %= ctxt->src.val; 3388 3389 ctxt->dst.val = (ctxt->dst.val & 0xffff0000) | al | (ah << 8); 3390 3391 /* Set PF, ZF, SF */ 3392 ctxt->src.type = OP_IMM; 3393 ctxt->src.val = 0; 3394 ctxt->src.bytes = 1; 3395 fastop(ctxt, em_or); 3396 3397 return X86EMUL_CONTINUE; 3398 } 3399 3400 static int em_aad(struct x86_emulate_ctxt *ctxt) 3401 { 3402 u8 al = ctxt->dst.val & 0xff; 3403 u8 ah = (ctxt->dst.val >> 8) & 0xff; 3404 3405 al = (al + (ah * ctxt->src.val)) & 0xff; 3406 3407 ctxt->dst.val = (ctxt->dst.val & 0xffff0000) | al; 3408 3409 /* Set PF, ZF, SF */ 3410 ctxt->src.type = OP_IMM; 3411 ctxt->src.val = 0; 3412 ctxt->src.bytes = 1; 3413 fastop(ctxt, em_or); 3414 3415 return X86EMUL_CONTINUE; 3416 } 3417 3418 static int em_call(struct x86_emulate_ctxt *ctxt) 3419 { 3420 int rc; 3421 long rel = ctxt->src.val; 3422 3423 ctxt->src.val = (unsigned long)ctxt->_eip; 3424 rc = jmp_rel(ctxt, rel); 3425 if (rc != X86EMUL_CONTINUE) 3426 return rc; 3427 return em_push(ctxt); 3428 } 3429 3430 static int em_call_far(struct x86_emulate_ctxt *ctxt) 3431 { 3432 u16 sel, old_cs; 3433 ulong old_eip; 3434 int rc; 3435 struct desc_struct old_desc, new_desc; 3436 const struct x86_emulate_ops *ops = ctxt->ops; 3437 int cpl = ctxt->ops->cpl(ctxt); 3438 enum x86emul_mode prev_mode = ctxt->mode; 3439 3440 old_eip = ctxt->_eip; 3441 ops->get_segment(ctxt, &old_cs, &old_desc, NULL, VCPU_SREG_CS); 3442 3443 memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2); 3444 rc = __load_segment_descriptor(ctxt, sel, VCPU_SREG_CS, cpl, 3445 X86_TRANSFER_CALL_JMP, &new_desc); 3446 if (rc != X86EMUL_CONTINUE) 3447 return rc; 3448 3449 rc = assign_eip_far(ctxt, ctxt->src.val, &new_desc); 3450 if (rc != X86EMUL_CONTINUE) 3451 goto fail; 3452 3453 ctxt->src.val = old_cs; 3454 rc = em_push(ctxt); 3455 if (rc != X86EMUL_CONTINUE) 3456 goto fail; 3457 3458 ctxt->src.val = old_eip; 3459 rc = em_push(ctxt); 3460 /* If we failed, we tainted the memory, but the very least we should 3461 restore cs */ 3462 if (rc != X86EMUL_CONTINUE) { 3463 pr_warn_once("faulting far call emulation tainted memory\n"); 3464 goto fail; 3465 } 3466 return rc; 3467 fail: 3468 ops->set_segment(ctxt, old_cs, &old_desc, 0, VCPU_SREG_CS); 3469 ctxt->mode = prev_mode; 3470 return rc; 3471 3472 } 3473 3474 static int em_ret_near_imm(struct x86_emulate_ctxt *ctxt) 3475 { 3476 int rc; 3477 unsigned long eip; 3478 3479 rc = emulate_pop(ctxt, &eip, ctxt->op_bytes); 3480 if (rc != X86EMUL_CONTINUE) 3481 return rc; 3482 rc = assign_eip_near(ctxt, eip); 3483 if (rc != X86EMUL_CONTINUE) 3484 return rc; 3485 rsp_increment(ctxt, ctxt->src.val); 3486 return X86EMUL_CONTINUE; 3487 } 3488 3489 static int em_xchg(struct x86_emulate_ctxt *ctxt) 3490 { 3491 /* Write back the register source. */ 3492 ctxt->src.val = ctxt->dst.val; 3493 write_register_operand(&ctxt->src); 3494 3495 /* Write back the memory destination with implicit LOCK prefix. */ 3496 ctxt->dst.val = ctxt->src.orig_val; 3497 ctxt->lock_prefix = 1; 3498 return X86EMUL_CONTINUE; 3499 } 3500 3501 static int em_imul_3op(struct x86_emulate_ctxt *ctxt) 3502 { 3503 ctxt->dst.val = ctxt->src2.val; 3504 return fastop(ctxt, em_imul); 3505 } 3506 3507 static int em_cwd(struct x86_emulate_ctxt *ctxt) 3508 { 3509 ctxt->dst.type = OP_REG; 3510 ctxt->dst.bytes = ctxt->src.bytes; 3511 ctxt->dst.addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX); 3512 ctxt->dst.val = ~((ctxt->src.val >> (ctxt->src.bytes * 8 - 1)) - 1); 3513 3514 return X86EMUL_CONTINUE; 3515 } 3516 3517 static int em_rdtsc(struct x86_emulate_ctxt *ctxt) 3518 { 3519 u64 tsc = 0; 3520 3521 ctxt->ops->get_msr(ctxt, MSR_IA32_TSC, &tsc); 3522 *reg_write(ctxt, VCPU_REGS_RAX) = (u32)tsc; 3523 *reg_write(ctxt, VCPU_REGS_RDX) = tsc >> 32; 3524 return X86EMUL_CONTINUE; 3525 } 3526 3527 static int em_rdpmc(struct x86_emulate_ctxt *ctxt) 3528 { 3529 u64 pmc; 3530 3531 if (ctxt->ops->read_pmc(ctxt, reg_read(ctxt, VCPU_REGS_RCX), &pmc)) 3532 return emulate_gp(ctxt, 0); 3533 *reg_write(ctxt, VCPU_REGS_RAX) = (u32)pmc; 3534 *reg_write(ctxt, VCPU_REGS_RDX) = pmc >> 32; 3535 return X86EMUL_CONTINUE; 3536 } 3537 3538 static int em_mov(struct x86_emulate_ctxt *ctxt) 3539 { 3540 memcpy(ctxt->dst.valptr, ctxt->src.valptr, sizeof(ctxt->src.valptr)); 3541 return X86EMUL_CONTINUE; 3542 } 3543 3544 #define FFL(x) bit(X86_FEATURE_##x) 3545 3546 static int em_movbe(struct x86_emulate_ctxt *ctxt) 3547 { 3548 u32 ebx, ecx, edx, eax = 1; 3549 u16 tmp; 3550 3551 /* 3552 * Check MOVBE is set in the guest-visible CPUID leaf. 3553 */ 3554 ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx); 3555 if (!(ecx & FFL(MOVBE))) 3556 return emulate_ud(ctxt); 3557 3558 switch (ctxt->op_bytes) { 3559 case 2: 3560 /* 3561 * From MOVBE definition: "...When the operand size is 16 bits, 3562 * the upper word of the destination register remains unchanged 3563 * ..." 3564 * 3565 * Both casting ->valptr and ->val to u16 breaks strict aliasing 3566 * rules so we have to do the operation almost per hand. 3567 */ 3568 tmp = (u16)ctxt->src.val; 3569 ctxt->dst.val &= ~0xffffUL; 3570 ctxt->dst.val |= (unsigned long)swab16(tmp); 3571 break; 3572 case 4: 3573 ctxt->dst.val = swab32((u32)ctxt->src.val); 3574 break; 3575 case 8: 3576 ctxt->dst.val = swab64(ctxt->src.val); 3577 break; 3578 default: 3579 BUG(); 3580 } 3581 return X86EMUL_CONTINUE; 3582 } 3583 3584 static int em_cr_write(struct x86_emulate_ctxt *ctxt) 3585 { 3586 if (ctxt->ops->set_cr(ctxt, ctxt->modrm_reg, ctxt->src.val)) 3587 return emulate_gp(ctxt, 0); 3588 3589 /* Disable writeback. */ 3590 ctxt->dst.type = OP_NONE; 3591 return X86EMUL_CONTINUE; 3592 } 3593 3594 static int em_dr_write(struct x86_emulate_ctxt *ctxt) 3595 { 3596 unsigned long val; 3597 3598 if (ctxt->mode == X86EMUL_MODE_PROT64) 3599 val = ctxt->src.val & ~0ULL; 3600 else 3601 val = ctxt->src.val & ~0U; 3602 3603 /* #UD condition is already handled. */ 3604 if (ctxt->ops->set_dr(ctxt, ctxt->modrm_reg, val) < 0) 3605 return emulate_gp(ctxt, 0); 3606 3607 /* Disable writeback. */ 3608 ctxt->dst.type = OP_NONE; 3609 return X86EMUL_CONTINUE; 3610 } 3611 3612 static int em_wrmsr(struct x86_emulate_ctxt *ctxt) 3613 { 3614 u64 msr_data; 3615 3616 msr_data = (u32)reg_read(ctxt, VCPU_REGS_RAX) 3617 | ((u64)reg_read(ctxt, VCPU_REGS_RDX) << 32); 3618 if (ctxt->ops->set_msr(ctxt, reg_read(ctxt, VCPU_REGS_RCX), msr_data)) 3619 return emulate_gp(ctxt, 0); 3620 3621 return X86EMUL_CONTINUE; 3622 } 3623 3624 static int em_rdmsr(struct x86_emulate_ctxt *ctxt) 3625 { 3626 u64 msr_data; 3627 3628 if (ctxt->ops->get_msr(ctxt, reg_read(ctxt, VCPU_REGS_RCX), &msr_data)) 3629 return emulate_gp(ctxt, 0); 3630 3631 *reg_write(ctxt, VCPU_REGS_RAX) = (u32)msr_data; 3632 *reg_write(ctxt, VCPU_REGS_RDX) = msr_data >> 32; 3633 return X86EMUL_CONTINUE; 3634 } 3635 3636 static int em_mov_rm_sreg(struct x86_emulate_ctxt *ctxt) 3637 { 3638 if (ctxt->modrm_reg > VCPU_SREG_GS) 3639 return emulate_ud(ctxt); 3640 3641 ctxt->dst.val = get_segment_selector(ctxt, ctxt->modrm_reg); 3642 if (ctxt->dst.bytes == 4 && ctxt->dst.type == OP_MEM) 3643 ctxt->dst.bytes = 2; 3644 return X86EMUL_CONTINUE; 3645 } 3646 3647 static int em_mov_sreg_rm(struct x86_emulate_ctxt *ctxt) 3648 { 3649 u16 sel = ctxt->src.val; 3650 3651 if (ctxt->modrm_reg == VCPU_SREG_CS || ctxt->modrm_reg > VCPU_SREG_GS) 3652 return emulate_ud(ctxt); 3653 3654 if (ctxt->modrm_reg == VCPU_SREG_SS) 3655 ctxt->interruptibility = KVM_X86_SHADOW_INT_MOV_SS; 3656 3657 /* Disable writeback. */ 3658 ctxt->dst.type = OP_NONE; 3659 return load_segment_descriptor(ctxt, sel, ctxt->modrm_reg); 3660 } 3661 3662 static int em_lldt(struct x86_emulate_ctxt *ctxt) 3663 { 3664 u16 sel = ctxt->src.val; 3665 3666 /* Disable writeback. */ 3667 ctxt->dst.type = OP_NONE; 3668 return load_segment_descriptor(ctxt, sel, VCPU_SREG_LDTR); 3669 } 3670 3671 static int em_ltr(struct x86_emulate_ctxt *ctxt) 3672 { 3673 u16 sel = ctxt->src.val; 3674 3675 /* Disable writeback. */ 3676 ctxt->dst.type = OP_NONE; 3677 return load_segment_descriptor(ctxt, sel, VCPU_SREG_TR); 3678 } 3679 3680 static int em_invlpg(struct x86_emulate_ctxt *ctxt) 3681 { 3682 int rc; 3683 ulong linear; 3684 3685 rc = linearize(ctxt, ctxt->src.addr.mem, 1, false, &linear); 3686 if (rc == X86EMUL_CONTINUE) 3687 ctxt->ops->invlpg(ctxt, linear); 3688 /* Disable writeback. */ 3689 ctxt->dst.type = OP_NONE; 3690 return X86EMUL_CONTINUE; 3691 } 3692 3693 static int em_clts(struct x86_emulate_ctxt *ctxt) 3694 { 3695 ulong cr0; 3696 3697 cr0 = ctxt->ops->get_cr(ctxt, 0); 3698 cr0 &= ~X86_CR0_TS; 3699 ctxt->ops->set_cr(ctxt, 0, cr0); 3700 return X86EMUL_CONTINUE; 3701 } 3702 3703 static int em_hypercall(struct x86_emulate_ctxt *ctxt) 3704 { 3705 int rc = ctxt->ops->fix_hypercall(ctxt); 3706 3707 if (rc != X86EMUL_CONTINUE) 3708 return rc; 3709 3710 /* Let the processor re-execute the fixed hypercall */ 3711 ctxt->_eip = ctxt->eip; 3712 /* Disable writeback. */ 3713 ctxt->dst.type = OP_NONE; 3714 return X86EMUL_CONTINUE; 3715 } 3716 3717 static int emulate_store_desc_ptr(struct x86_emulate_ctxt *ctxt, 3718 void (*get)(struct x86_emulate_ctxt *ctxt, 3719 struct desc_ptr *ptr)) 3720 { 3721 struct desc_ptr desc_ptr; 3722 3723 if (ctxt->mode == X86EMUL_MODE_PROT64) 3724 ctxt->op_bytes = 8; 3725 get(ctxt, &desc_ptr); 3726 if (ctxt->op_bytes == 2) { 3727 ctxt->op_bytes = 4; 3728 desc_ptr.address &= 0x00ffffff; 3729 } 3730 /* Disable writeback. */ 3731 ctxt->dst.type = OP_NONE; 3732 return segmented_write_std(ctxt, ctxt->dst.addr.mem, 3733 &desc_ptr, 2 + ctxt->op_bytes); 3734 } 3735 3736 static int em_sgdt(struct x86_emulate_ctxt *ctxt) 3737 { 3738 return emulate_store_desc_ptr(ctxt, ctxt->ops->get_gdt); 3739 } 3740 3741 static int em_sidt(struct x86_emulate_ctxt *ctxt) 3742 { 3743 return emulate_store_desc_ptr(ctxt, ctxt->ops->get_idt); 3744 } 3745 3746 static int em_lgdt_lidt(struct x86_emulate_ctxt *ctxt, bool lgdt) 3747 { 3748 struct desc_ptr desc_ptr; 3749 int rc; 3750 3751 if (ctxt->mode == X86EMUL_MODE_PROT64) 3752 ctxt->op_bytes = 8; 3753 rc = read_descriptor(ctxt, ctxt->src.addr.mem, 3754 &desc_ptr.size, &desc_ptr.address, 3755 ctxt->op_bytes); 3756 if (rc != X86EMUL_CONTINUE) 3757 return rc; 3758 if (ctxt->mode == X86EMUL_MODE_PROT64 && 3759 is_noncanonical_address(desc_ptr.address)) 3760 return emulate_gp(ctxt, 0); 3761 if (lgdt) 3762 ctxt->ops->set_gdt(ctxt, &desc_ptr); 3763 else 3764 ctxt->ops->set_idt(ctxt, &desc_ptr); 3765 /* Disable writeback. */ 3766 ctxt->dst.type = OP_NONE; 3767 return X86EMUL_CONTINUE; 3768 } 3769 3770 static int em_lgdt(struct x86_emulate_ctxt *ctxt) 3771 { 3772 return em_lgdt_lidt(ctxt, true); 3773 } 3774 3775 static int em_lidt(struct x86_emulate_ctxt *ctxt) 3776 { 3777 return em_lgdt_lidt(ctxt, false); 3778 } 3779 3780 static int em_smsw(struct x86_emulate_ctxt *ctxt) 3781 { 3782 if (ctxt->dst.type == OP_MEM) 3783 ctxt->dst.bytes = 2; 3784 ctxt->dst.val = ctxt->ops->get_cr(ctxt, 0); 3785 return X86EMUL_CONTINUE; 3786 } 3787 3788 static int em_lmsw(struct x86_emulate_ctxt *ctxt) 3789 { 3790 ctxt->ops->set_cr(ctxt, 0, (ctxt->ops->get_cr(ctxt, 0) & ~0x0eul) 3791 | (ctxt->src.val & 0x0f)); 3792 ctxt->dst.type = OP_NONE; 3793 return X86EMUL_CONTINUE; 3794 } 3795 3796 static int em_loop(struct x86_emulate_ctxt *ctxt) 3797 { 3798 int rc = X86EMUL_CONTINUE; 3799 3800 register_address_increment(ctxt, VCPU_REGS_RCX, -1); 3801 if ((address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) != 0) && 3802 (ctxt->b == 0xe2 || test_cc(ctxt->b ^ 0x5, ctxt->eflags))) 3803 rc = jmp_rel(ctxt, ctxt->src.val); 3804 3805 return rc; 3806 } 3807 3808 static int em_jcxz(struct x86_emulate_ctxt *ctxt) 3809 { 3810 int rc = X86EMUL_CONTINUE; 3811 3812 if (address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) == 0) 3813 rc = jmp_rel(ctxt, ctxt->src.val); 3814 3815 return rc; 3816 } 3817 3818 static int em_in(struct x86_emulate_ctxt *ctxt) 3819 { 3820 if (!pio_in_emulated(ctxt, ctxt->dst.bytes, ctxt->src.val, 3821 &ctxt->dst.val)) 3822 return X86EMUL_IO_NEEDED; 3823 3824 return X86EMUL_CONTINUE; 3825 } 3826 3827 static int em_out(struct x86_emulate_ctxt *ctxt) 3828 { 3829 ctxt->ops->pio_out_emulated(ctxt, ctxt->src.bytes, ctxt->dst.val, 3830 &ctxt->src.val, 1); 3831 /* Disable writeback. */ 3832 ctxt->dst.type = OP_NONE; 3833 return X86EMUL_CONTINUE; 3834 } 3835 3836 static int em_cli(struct x86_emulate_ctxt *ctxt) 3837 { 3838 if (emulator_bad_iopl(ctxt)) 3839 return emulate_gp(ctxt, 0); 3840 3841 ctxt->eflags &= ~X86_EFLAGS_IF; 3842 return X86EMUL_CONTINUE; 3843 } 3844 3845 static int em_sti(struct x86_emulate_ctxt *ctxt) 3846 { 3847 if (emulator_bad_iopl(ctxt)) 3848 return emulate_gp(ctxt, 0); 3849 3850 ctxt->interruptibility = KVM_X86_SHADOW_INT_STI; 3851 ctxt->eflags |= X86_EFLAGS_IF; 3852 return X86EMUL_CONTINUE; 3853 } 3854 3855 static int em_cpuid(struct x86_emulate_ctxt *ctxt) 3856 { 3857 u32 eax, ebx, ecx, edx; 3858 u64 msr = 0; 3859 3860 ctxt->ops->get_msr(ctxt, MSR_MISC_FEATURES_ENABLES, &msr); 3861 if (msr & MSR_MISC_FEATURES_ENABLES_CPUID_FAULT && 3862 ctxt->ops->cpl(ctxt)) { 3863 return emulate_gp(ctxt, 0); 3864 } 3865 3866 eax = reg_read(ctxt, VCPU_REGS_RAX); 3867 ecx = reg_read(ctxt, VCPU_REGS_RCX); 3868 ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx); 3869 *reg_write(ctxt, VCPU_REGS_RAX) = eax; 3870 *reg_write(ctxt, VCPU_REGS_RBX) = ebx; 3871 *reg_write(ctxt, VCPU_REGS_RCX) = ecx; 3872 *reg_write(ctxt, VCPU_REGS_RDX) = edx; 3873 return X86EMUL_CONTINUE; 3874 } 3875 3876 static int em_sahf(struct x86_emulate_ctxt *ctxt) 3877 { 3878 u32 flags; 3879 3880 flags = X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF | X86_EFLAGS_ZF | 3881 X86_EFLAGS_SF; 3882 flags &= *reg_rmw(ctxt, VCPU_REGS_RAX) >> 8; 3883 3884 ctxt->eflags &= ~0xffUL; 3885 ctxt->eflags |= flags | X86_EFLAGS_FIXED; 3886 return X86EMUL_CONTINUE; 3887 } 3888 3889 static int em_lahf(struct x86_emulate_ctxt *ctxt) 3890 { 3891 *reg_rmw(ctxt, VCPU_REGS_RAX) &= ~0xff00UL; 3892 *reg_rmw(ctxt, VCPU_REGS_RAX) |= (ctxt->eflags & 0xff) << 8; 3893 return X86EMUL_CONTINUE; 3894 } 3895 3896 static int em_bswap(struct x86_emulate_ctxt *ctxt) 3897 { 3898 switch (ctxt->op_bytes) { 3899 #ifdef CONFIG_X86_64 3900 case 8: 3901 asm("bswap %0" : "+r"(ctxt->dst.val)); 3902 break; 3903 #endif 3904 default: 3905 asm("bswap %0" : "+r"(*(u32 *)&ctxt->dst.val)); 3906 break; 3907 } 3908 return X86EMUL_CONTINUE; 3909 } 3910 3911 static int em_clflush(struct x86_emulate_ctxt *ctxt) 3912 { 3913 /* emulating clflush regardless of cpuid */ 3914 return X86EMUL_CONTINUE; 3915 } 3916 3917 static int em_movsxd(struct x86_emulate_ctxt *ctxt) 3918 { 3919 ctxt->dst.val = (s32) ctxt->src.val; 3920 return X86EMUL_CONTINUE; 3921 } 3922 3923 static int check_fxsr(struct x86_emulate_ctxt *ctxt) 3924 { 3925 u32 eax = 1, ebx, ecx = 0, edx; 3926 3927 ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx); 3928 if (!(edx & FFL(FXSR))) 3929 return emulate_ud(ctxt); 3930 3931 if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM)) 3932 return emulate_nm(ctxt); 3933 3934 /* 3935 * Don't emulate a case that should never be hit, instead of working 3936 * around a lack of fxsave64/fxrstor64 on old compilers. 3937 */ 3938 if (ctxt->mode >= X86EMUL_MODE_PROT64) 3939 return X86EMUL_UNHANDLEABLE; 3940 3941 return X86EMUL_CONTINUE; 3942 } 3943 3944 /* 3945 * Hardware doesn't save and restore XMM 0-7 without CR4.OSFXSR, but does save 3946 * and restore MXCSR. 3947 */ 3948 static size_t __fxstate_size(int nregs) 3949 { 3950 return offsetof(struct fxregs_state, xmm_space[0]) + nregs * 16; 3951 } 3952 3953 static inline size_t fxstate_size(struct x86_emulate_ctxt *ctxt) 3954 { 3955 bool cr4_osfxsr; 3956 if (ctxt->mode == X86EMUL_MODE_PROT64) 3957 return __fxstate_size(16); 3958 3959 cr4_osfxsr = ctxt->ops->get_cr(ctxt, 4) & X86_CR4_OSFXSR; 3960 return __fxstate_size(cr4_osfxsr ? 8 : 0); 3961 } 3962 3963 /* 3964 * FXSAVE and FXRSTOR have 4 different formats depending on execution mode, 3965 * 1) 16 bit mode 3966 * 2) 32 bit mode 3967 * - like (1), but FIP and FDP (foo) are only 16 bit. At least Intel CPUs 3968 * preserve whole 32 bit values, though, so (1) and (2) are the same wrt. 3969 * save and restore 3970 * 3) 64-bit mode with REX.W prefix 3971 * - like (2), but XMM 8-15 are being saved and restored 3972 * 4) 64-bit mode without REX.W prefix 3973 * - like (3), but FIP and FDP are 64 bit 3974 * 3975 * Emulation uses (3) for (1) and (2) and preserves XMM 8-15 to reach the 3976 * desired result. (4) is not emulated. 3977 * 3978 * Note: Guest and host CPUID.(EAX=07H,ECX=0H):EBX[bit 13] (deprecate FPU CS 3979 * and FPU DS) should match. 3980 */ 3981 static int em_fxsave(struct x86_emulate_ctxt *ctxt) 3982 { 3983 struct fxregs_state fx_state; 3984 int rc; 3985 3986 rc = check_fxsr(ctxt); 3987 if (rc != X86EMUL_CONTINUE) 3988 return rc; 3989 3990 ctxt->ops->get_fpu(ctxt); 3991 3992 rc = asm_safe("fxsave %[fx]", , [fx] "+m"(fx_state)); 3993 3994 ctxt->ops->put_fpu(ctxt); 3995 3996 if (rc != X86EMUL_CONTINUE) 3997 return rc; 3998 3999 return segmented_write_std(ctxt, ctxt->memop.addr.mem, &fx_state, 4000 fxstate_size(ctxt)); 4001 } 4002 4003 static int em_fxrstor(struct x86_emulate_ctxt *ctxt) 4004 { 4005 struct fxregs_state fx_state; 4006 int rc; 4007 size_t size; 4008 4009 rc = check_fxsr(ctxt); 4010 if (rc != X86EMUL_CONTINUE) 4011 return rc; 4012 4013 ctxt->ops->get_fpu(ctxt); 4014 4015 size = fxstate_size(ctxt); 4016 if (size < __fxstate_size(16)) { 4017 rc = asm_safe("fxsave %[fx]", , [fx] "+m"(fx_state)); 4018 if (rc != X86EMUL_CONTINUE) 4019 goto out; 4020 } 4021 4022 rc = segmented_read_std(ctxt, ctxt->memop.addr.mem, &fx_state, size); 4023 if (rc != X86EMUL_CONTINUE) 4024 goto out; 4025 4026 if (fx_state.mxcsr >> 16) { 4027 rc = emulate_gp(ctxt, 0); 4028 goto out; 4029 } 4030 4031 if (rc == X86EMUL_CONTINUE) 4032 rc = asm_safe("fxrstor %[fx]", : [fx] "m"(fx_state)); 4033 4034 out: 4035 ctxt->ops->put_fpu(ctxt); 4036 4037 return rc; 4038 } 4039 4040 static bool valid_cr(int nr) 4041 { 4042 switch (nr) { 4043 case 0: 4044 case 2 ... 4: 4045 case 8: 4046 return true; 4047 default: 4048 return false; 4049 } 4050 } 4051 4052 static int check_cr_read(struct x86_emulate_ctxt *ctxt) 4053 { 4054 if (!valid_cr(ctxt->modrm_reg)) 4055 return emulate_ud(ctxt); 4056 4057 return X86EMUL_CONTINUE; 4058 } 4059 4060 static int check_cr_write(struct x86_emulate_ctxt *ctxt) 4061 { 4062 u64 new_val = ctxt->src.val64; 4063 int cr = ctxt->modrm_reg; 4064 u64 efer = 0; 4065 4066 static u64 cr_reserved_bits[] = { 4067 0xffffffff00000000ULL, 4068 0, 0, 0, /* CR3 checked later */ 4069 CR4_RESERVED_BITS, 4070 0, 0, 0, 4071 CR8_RESERVED_BITS, 4072 }; 4073 4074 if (!valid_cr(cr)) 4075 return emulate_ud(ctxt); 4076 4077 if (new_val & cr_reserved_bits[cr]) 4078 return emulate_gp(ctxt, 0); 4079 4080 switch (cr) { 4081 case 0: { 4082 u64 cr4; 4083 if (((new_val & X86_CR0_PG) && !(new_val & X86_CR0_PE)) || 4084 ((new_val & X86_CR0_NW) && !(new_val & X86_CR0_CD))) 4085 return emulate_gp(ctxt, 0); 4086 4087 cr4 = ctxt->ops->get_cr(ctxt, 4); 4088 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer); 4089 4090 if ((new_val & X86_CR0_PG) && (efer & EFER_LME) && 4091 !(cr4 & X86_CR4_PAE)) 4092 return emulate_gp(ctxt, 0); 4093 4094 break; 4095 } 4096 case 3: { 4097 u64 rsvd = 0; 4098 4099 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer); 4100 if (efer & EFER_LMA) 4101 rsvd = CR3_L_MODE_RESERVED_BITS & ~CR3_PCID_INVD; 4102 4103 if (new_val & rsvd) 4104 return emulate_gp(ctxt, 0); 4105 4106 break; 4107 } 4108 case 4: { 4109 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer); 4110 4111 if ((efer & EFER_LMA) && !(new_val & X86_CR4_PAE)) 4112 return emulate_gp(ctxt, 0); 4113 4114 break; 4115 } 4116 } 4117 4118 return X86EMUL_CONTINUE; 4119 } 4120 4121 static int check_dr7_gd(struct x86_emulate_ctxt *ctxt) 4122 { 4123 unsigned long dr7; 4124 4125 ctxt->ops->get_dr(ctxt, 7, &dr7); 4126 4127 /* Check if DR7.Global_Enable is set */ 4128 return dr7 & (1 << 13); 4129 } 4130 4131 static int check_dr_read(struct x86_emulate_ctxt *ctxt) 4132 { 4133 int dr = ctxt->modrm_reg; 4134 u64 cr4; 4135 4136 if (dr > 7) 4137 return emulate_ud(ctxt); 4138 4139 cr4 = ctxt->ops->get_cr(ctxt, 4); 4140 if ((cr4 & X86_CR4_DE) && (dr == 4 || dr == 5)) 4141 return emulate_ud(ctxt); 4142 4143 if (check_dr7_gd(ctxt)) { 4144 ulong dr6; 4145 4146 ctxt->ops->get_dr(ctxt, 6, &dr6); 4147 dr6 &= ~15; 4148 dr6 |= DR6_BD | DR6_RTM; 4149 ctxt->ops->set_dr(ctxt, 6, dr6); 4150 return emulate_db(ctxt); 4151 } 4152 4153 return X86EMUL_CONTINUE; 4154 } 4155 4156 static int check_dr_write(struct x86_emulate_ctxt *ctxt) 4157 { 4158 u64 new_val = ctxt->src.val64; 4159 int dr = ctxt->modrm_reg; 4160 4161 if ((dr == 6 || dr == 7) && (new_val & 0xffffffff00000000ULL)) 4162 return emulate_gp(ctxt, 0); 4163 4164 return check_dr_read(ctxt); 4165 } 4166 4167 static int check_svme(struct x86_emulate_ctxt *ctxt) 4168 { 4169 u64 efer = 0; 4170 4171 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer); 4172 4173 if (!(efer & EFER_SVME)) 4174 return emulate_ud(ctxt); 4175 4176 return X86EMUL_CONTINUE; 4177 } 4178 4179 static int check_svme_pa(struct x86_emulate_ctxt *ctxt) 4180 { 4181 u64 rax = reg_read(ctxt, VCPU_REGS_RAX); 4182 4183 /* Valid physical address? */ 4184 if (rax & 0xffff000000000000ULL) 4185 return emulate_gp(ctxt, 0); 4186 4187 return check_svme(ctxt); 4188 } 4189 4190 static int check_rdtsc(struct x86_emulate_ctxt *ctxt) 4191 { 4192 u64 cr4 = ctxt->ops->get_cr(ctxt, 4); 4193 4194 if (cr4 & X86_CR4_TSD && ctxt->ops->cpl(ctxt)) 4195 return emulate_ud(ctxt); 4196 4197 return X86EMUL_CONTINUE; 4198 } 4199 4200 static int check_rdpmc(struct x86_emulate_ctxt *ctxt) 4201 { 4202 u64 cr4 = ctxt->ops->get_cr(ctxt, 4); 4203 u64 rcx = reg_read(ctxt, VCPU_REGS_RCX); 4204 4205 if ((!(cr4 & X86_CR4_PCE) && ctxt->ops->cpl(ctxt)) || 4206 ctxt->ops->check_pmc(ctxt, rcx)) 4207 return emulate_gp(ctxt, 0); 4208 4209 return X86EMUL_CONTINUE; 4210 } 4211 4212 static int check_perm_in(struct x86_emulate_ctxt *ctxt) 4213 { 4214 ctxt->dst.bytes = min(ctxt->dst.bytes, 4u); 4215 if (!emulator_io_permited(ctxt, ctxt->src.val, ctxt->dst.bytes)) 4216 return emulate_gp(ctxt, 0); 4217 4218 return X86EMUL_CONTINUE; 4219 } 4220 4221 static int check_perm_out(struct x86_emulate_ctxt *ctxt) 4222 { 4223 ctxt->src.bytes = min(ctxt->src.bytes, 4u); 4224 if (!emulator_io_permited(ctxt, ctxt->dst.val, ctxt->src.bytes)) 4225 return emulate_gp(ctxt, 0); 4226 4227 return X86EMUL_CONTINUE; 4228 } 4229 4230 #define D(_y) { .flags = (_y) } 4231 #define DI(_y, _i) { .flags = (_y)|Intercept, .intercept = x86_intercept_##_i } 4232 #define DIP(_y, _i, _p) { .flags = (_y)|Intercept|CheckPerm, \ 4233 .intercept = x86_intercept_##_i, .check_perm = (_p) } 4234 #define N D(NotImpl) 4235 #define EXT(_f, _e) { .flags = ((_f) | RMExt), .u.group = (_e) } 4236 #define G(_f, _g) { .flags = ((_f) | Group | ModRM), .u.group = (_g) } 4237 #define GD(_f, _g) { .flags = ((_f) | GroupDual | ModRM), .u.gdual = (_g) } 4238 #define ID(_f, _i) { .flags = ((_f) | InstrDual | ModRM), .u.idual = (_i) } 4239 #define MD(_f, _m) { .flags = ((_f) | ModeDual), .u.mdual = (_m) } 4240 #define E(_f, _e) { .flags = ((_f) | Escape | ModRM), .u.esc = (_e) } 4241 #define I(_f, _e) { .flags = (_f), .u.execute = (_e) } 4242 #define F(_f, _e) { .flags = (_f) | Fastop, .u.fastop = (_e) } 4243 #define II(_f, _e, _i) \ 4244 { .flags = (_f)|Intercept, .u.execute = (_e), .intercept = x86_intercept_##_i } 4245 #define IIP(_f, _e, _i, _p) \ 4246 { .flags = (_f)|Intercept|CheckPerm, .u.execute = (_e), \ 4247 .intercept = x86_intercept_##_i, .check_perm = (_p) } 4248 #define GP(_f, _g) { .flags = ((_f) | Prefix), .u.gprefix = (_g) } 4249 4250 #define D2bv(_f) D((_f) | ByteOp), D(_f) 4251 #define D2bvIP(_f, _i, _p) DIP((_f) | ByteOp, _i, _p), DIP(_f, _i, _p) 4252 #define I2bv(_f, _e) I((_f) | ByteOp, _e), I(_f, _e) 4253 #define F2bv(_f, _e) F((_f) | ByteOp, _e), F(_f, _e) 4254 #define I2bvIP(_f, _e, _i, _p) \ 4255 IIP((_f) | ByteOp, _e, _i, _p), IIP(_f, _e, _i, _p) 4256 4257 #define F6ALU(_f, _e) F2bv((_f) | DstMem | SrcReg | ModRM, _e), \ 4258 F2bv(((_f) | DstReg | SrcMem | ModRM) & ~Lock, _e), \ 4259 F2bv(((_f) & ~Lock) | DstAcc | SrcImm, _e) 4260 4261 static const struct opcode group7_rm0[] = { 4262 N, 4263 I(SrcNone | Priv | EmulateOnUD, em_hypercall), 4264 N, N, N, N, N, N, 4265 }; 4266 4267 static const struct opcode group7_rm1[] = { 4268 DI(SrcNone | Priv, monitor), 4269 DI(SrcNone | Priv, mwait), 4270 N, N, N, N, N, N, 4271 }; 4272 4273 static const struct opcode group7_rm3[] = { 4274 DIP(SrcNone | Prot | Priv, vmrun, check_svme_pa), 4275 II(SrcNone | Prot | EmulateOnUD, em_hypercall, vmmcall), 4276 DIP(SrcNone | Prot | Priv, vmload, check_svme_pa), 4277 DIP(SrcNone | Prot | Priv, vmsave, check_svme_pa), 4278 DIP(SrcNone | Prot | Priv, stgi, check_svme), 4279 DIP(SrcNone | Prot | Priv, clgi, check_svme), 4280 DIP(SrcNone | Prot | Priv, skinit, check_svme), 4281 DIP(SrcNone | Prot | Priv, invlpga, check_svme), 4282 }; 4283 4284 static const struct opcode group7_rm7[] = { 4285 N, 4286 DIP(SrcNone, rdtscp, check_rdtsc), 4287 N, N, N, N, N, N, 4288 }; 4289 4290 static const struct opcode group1[] = { 4291 F(Lock, em_add), 4292 F(Lock | PageTable, em_or), 4293 F(Lock, em_adc), 4294 F(Lock, em_sbb), 4295 F(Lock | PageTable, em_and), 4296 F(Lock, em_sub), 4297 F(Lock, em_xor), 4298 F(NoWrite, em_cmp), 4299 }; 4300 4301 static const struct opcode group1A[] = { 4302 I(DstMem | SrcNone | Mov | Stack | IncSP | TwoMemOp, em_pop), N, N, N, N, N, N, N, 4303 }; 4304 4305 static const struct opcode group2[] = { 4306 F(DstMem | ModRM, em_rol), 4307 F(DstMem | ModRM, em_ror), 4308 F(DstMem | ModRM, em_rcl), 4309 F(DstMem | ModRM, em_rcr), 4310 F(DstMem | ModRM, em_shl), 4311 F(DstMem | ModRM, em_shr), 4312 F(DstMem | ModRM, em_shl), 4313 F(DstMem | ModRM, em_sar), 4314 }; 4315 4316 static const struct opcode group3[] = { 4317 F(DstMem | SrcImm | NoWrite, em_test), 4318 F(DstMem | SrcImm | NoWrite, em_test), 4319 F(DstMem | SrcNone | Lock, em_not), 4320 F(DstMem | SrcNone | Lock, em_neg), 4321 F(DstXacc | Src2Mem, em_mul_ex), 4322 F(DstXacc | Src2Mem, em_imul_ex), 4323 F(DstXacc | Src2Mem, em_div_ex), 4324 F(DstXacc | Src2Mem, em_idiv_ex), 4325 }; 4326 4327 static const struct opcode group4[] = { 4328 F(ByteOp | DstMem | SrcNone | Lock, em_inc), 4329 F(ByteOp | DstMem | SrcNone | Lock, em_dec), 4330 N, N, N, N, N, N, 4331 }; 4332 4333 static const struct opcode group5[] = { 4334 F(DstMem | SrcNone | Lock, em_inc), 4335 F(DstMem | SrcNone | Lock, em_dec), 4336 I(SrcMem | NearBranch, em_call_near_abs), 4337 I(SrcMemFAddr | ImplicitOps, em_call_far), 4338 I(SrcMem | NearBranch, em_jmp_abs), 4339 I(SrcMemFAddr | ImplicitOps, em_jmp_far), 4340 I(SrcMem | Stack | TwoMemOp, em_push), D(Undefined), 4341 }; 4342 4343 static const struct opcode group6[] = { 4344 DI(Prot | DstMem, sldt), 4345 DI(Prot | DstMem, str), 4346 II(Prot | Priv | SrcMem16, em_lldt, lldt), 4347 II(Prot | Priv | SrcMem16, em_ltr, ltr), 4348 N, N, N, N, 4349 }; 4350 4351 static const struct group_dual group7 = { { 4352 II(Mov | DstMem, em_sgdt, sgdt), 4353 II(Mov | DstMem, em_sidt, sidt), 4354 II(SrcMem | Priv, em_lgdt, lgdt), 4355 II(SrcMem | Priv, em_lidt, lidt), 4356 II(SrcNone | DstMem | Mov, em_smsw, smsw), N, 4357 II(SrcMem16 | Mov | Priv, em_lmsw, lmsw), 4358 II(SrcMem | ByteOp | Priv | NoAccess, em_invlpg, invlpg), 4359 }, { 4360 EXT(0, group7_rm0), 4361 EXT(0, group7_rm1), 4362 N, EXT(0, group7_rm3), 4363 II(SrcNone | DstMem | Mov, em_smsw, smsw), N, 4364 II(SrcMem16 | Mov | Priv, em_lmsw, lmsw), 4365 EXT(0, group7_rm7), 4366 } }; 4367 4368 static const struct opcode group8[] = { 4369 N, N, N, N, 4370 F(DstMem | SrcImmByte | NoWrite, em_bt), 4371 F(DstMem | SrcImmByte | Lock | PageTable, em_bts), 4372 F(DstMem | SrcImmByte | Lock, em_btr), 4373 F(DstMem | SrcImmByte | Lock | PageTable, em_btc), 4374 }; 4375 4376 static const struct group_dual group9 = { { 4377 N, I(DstMem64 | Lock | PageTable, em_cmpxchg8b), N, N, N, N, N, N, 4378 }, { 4379 N, N, N, N, N, N, N, N, 4380 } }; 4381 4382 static const struct opcode group11[] = { 4383 I(DstMem | SrcImm | Mov | PageTable, em_mov), 4384 X7(D(Undefined)), 4385 }; 4386 4387 static const struct gprefix pfx_0f_ae_7 = { 4388 I(SrcMem | ByteOp, em_clflush), N, N, N, 4389 }; 4390 4391 static const struct group_dual group15 = { { 4392 I(ModRM | Aligned16, em_fxsave), 4393 I(ModRM | Aligned16, em_fxrstor), 4394 N, N, N, N, N, GP(0, &pfx_0f_ae_7), 4395 }, { 4396 N, N, N, N, N, N, N, N, 4397 } }; 4398 4399 static const struct gprefix pfx_0f_6f_0f_7f = { 4400 I(Mmx, em_mov), I(Sse | Aligned, em_mov), N, I(Sse | Unaligned, em_mov), 4401 }; 4402 4403 static const struct instr_dual instr_dual_0f_2b = { 4404 I(0, em_mov), N 4405 }; 4406 4407 static const struct gprefix pfx_0f_2b = { 4408 ID(0, &instr_dual_0f_2b), ID(0, &instr_dual_0f_2b), N, N, 4409 }; 4410 4411 static const struct gprefix pfx_0f_28_0f_29 = { 4412 I(Aligned, em_mov), I(Aligned, em_mov), N, N, 4413 }; 4414 4415 static const struct gprefix pfx_0f_e7 = { 4416 N, I(Sse, em_mov), N, N, 4417 }; 4418 4419 static const struct escape escape_d9 = { { 4420 N, N, N, N, N, N, N, I(DstMem16 | Mov, em_fnstcw), 4421 }, { 4422 /* 0xC0 - 0xC7 */ 4423 N, N, N, N, N, N, N, N, 4424 /* 0xC8 - 0xCF */ 4425 N, N, N, N, N, N, N, N, 4426 /* 0xD0 - 0xC7 */ 4427 N, N, N, N, N, N, N, N, 4428 /* 0xD8 - 0xDF */ 4429 N, N, N, N, N, N, N, N, 4430 /* 0xE0 - 0xE7 */ 4431 N, N, N, N, N, N, N, N, 4432 /* 0xE8 - 0xEF */ 4433 N, N, N, N, N, N, N, N, 4434 /* 0xF0 - 0xF7 */ 4435 N, N, N, N, N, N, N, N, 4436 /* 0xF8 - 0xFF */ 4437 N, N, N, N, N, N, N, N, 4438 } }; 4439 4440 static const struct escape escape_db = { { 4441 N, N, N, N, N, N, N, N, 4442 }, { 4443 /* 0xC0 - 0xC7 */ 4444 N, N, N, N, N, N, N, N, 4445 /* 0xC8 - 0xCF */ 4446 N, N, N, N, N, N, N, N, 4447 /* 0xD0 - 0xC7 */ 4448 N, N, N, N, N, N, N, N, 4449 /* 0xD8 - 0xDF */ 4450 N, N, N, N, N, N, N, N, 4451 /* 0xE0 - 0xE7 */ 4452 N, N, N, I(ImplicitOps, em_fninit), N, N, N, N, 4453 /* 0xE8 - 0xEF */ 4454 N, N, N, N, N, N, N, N, 4455 /* 0xF0 - 0xF7 */ 4456 N, N, N, N, N, N, N, N, 4457 /* 0xF8 - 0xFF */ 4458 N, N, N, N, N, N, N, N, 4459 } }; 4460 4461 static const struct escape escape_dd = { { 4462 N, N, N, N, N, N, N, I(DstMem16 | Mov, em_fnstsw), 4463 }, { 4464 /* 0xC0 - 0xC7 */ 4465 N, N, N, N, N, N, N, N, 4466 /* 0xC8 - 0xCF */ 4467 N, N, N, N, N, N, N, N, 4468 /* 0xD0 - 0xC7 */ 4469 N, N, N, N, N, N, N, N, 4470 /* 0xD8 - 0xDF */ 4471 N, N, N, N, N, N, N, N, 4472 /* 0xE0 - 0xE7 */ 4473 N, N, N, N, N, N, N, N, 4474 /* 0xE8 - 0xEF */ 4475 N, N, N, N, N, N, N, N, 4476 /* 0xF0 - 0xF7 */ 4477 N, N, N, N, N, N, N, N, 4478 /* 0xF8 - 0xFF */ 4479 N, N, N, N, N, N, N, N, 4480 } }; 4481 4482 static const struct instr_dual instr_dual_0f_c3 = { 4483 I(DstMem | SrcReg | ModRM | No16 | Mov, em_mov), N 4484 }; 4485 4486 static const struct mode_dual mode_dual_63 = { 4487 N, I(DstReg | SrcMem32 | ModRM | Mov, em_movsxd) 4488 }; 4489 4490 static const struct opcode opcode_table[256] = { 4491 /* 0x00 - 0x07 */ 4492 F6ALU(Lock, em_add), 4493 I(ImplicitOps | Stack | No64 | Src2ES, em_push_sreg), 4494 I(ImplicitOps | Stack | No64 | Src2ES, em_pop_sreg), 4495 /* 0x08 - 0x0F */ 4496 F6ALU(Lock | PageTable, em_or), 4497 I(ImplicitOps | Stack | No64 | Src2CS, em_push_sreg), 4498 N, 4499 /* 0x10 - 0x17 */ 4500 F6ALU(Lock, em_adc), 4501 I(ImplicitOps | Stack | No64 | Src2SS, em_push_sreg), 4502 I(ImplicitOps | Stack | No64 | Src2SS, em_pop_sreg), 4503 /* 0x18 - 0x1F */ 4504 F6ALU(Lock, em_sbb), 4505 I(ImplicitOps | Stack | No64 | Src2DS, em_push_sreg), 4506 I(ImplicitOps | Stack | No64 | Src2DS, em_pop_sreg), 4507 /* 0x20 - 0x27 */ 4508 F6ALU(Lock | PageTable, em_and), N, N, 4509 /* 0x28 - 0x2F */ 4510 F6ALU(Lock, em_sub), N, I(ByteOp | DstAcc | No64, em_das), 4511 /* 0x30 - 0x37 */ 4512 F6ALU(Lock, em_xor), N, N, 4513 /* 0x38 - 0x3F */ 4514 F6ALU(NoWrite, em_cmp), N, N, 4515 /* 0x40 - 0x4F */ 4516 X8(F(DstReg, em_inc)), X8(F(DstReg, em_dec)), 4517 /* 0x50 - 0x57 */ 4518 X8(I(SrcReg | Stack, em_push)), 4519 /* 0x58 - 0x5F */ 4520 X8(I(DstReg | Stack, em_pop)), 4521 /* 0x60 - 0x67 */ 4522 I(ImplicitOps | Stack | No64, em_pusha), 4523 I(ImplicitOps | Stack | No64, em_popa), 4524 N, MD(ModRM, &mode_dual_63), 4525 N, N, N, N, 4526 /* 0x68 - 0x6F */ 4527 I(SrcImm | Mov | Stack, em_push), 4528 I(DstReg | SrcMem | ModRM | Src2Imm, em_imul_3op), 4529 I(SrcImmByte | Mov | Stack, em_push), 4530 I(DstReg | SrcMem | ModRM | Src2ImmByte, em_imul_3op), 4531 I2bvIP(DstDI | SrcDX | Mov | String | Unaligned, em_in, ins, check_perm_in), /* insb, insw/insd */ 4532 I2bvIP(SrcSI | DstDX | String, em_out, outs, check_perm_out), /* outsb, outsw/outsd */ 4533 /* 0x70 - 0x7F */ 4534 X16(D(SrcImmByte | NearBranch)), 4535 /* 0x80 - 0x87 */ 4536 G(ByteOp | DstMem | SrcImm, group1), 4537 G(DstMem | SrcImm, group1), 4538 G(ByteOp | DstMem | SrcImm | No64, group1), 4539 G(DstMem | SrcImmByte, group1), 4540 F2bv(DstMem | SrcReg | ModRM | NoWrite, em_test), 4541 I2bv(DstMem | SrcReg | ModRM | Lock | PageTable, em_xchg), 4542 /* 0x88 - 0x8F */ 4543 I2bv(DstMem | SrcReg | ModRM | Mov | PageTable, em_mov), 4544 I2bv(DstReg | SrcMem | ModRM | Mov, em_mov), 4545 I(DstMem | SrcNone | ModRM | Mov | PageTable, em_mov_rm_sreg), 4546 D(ModRM | SrcMem | NoAccess | DstReg), 4547 I(ImplicitOps | SrcMem16 | ModRM, em_mov_sreg_rm), 4548 G(0, group1A), 4549 /* 0x90 - 0x97 */ 4550 DI(SrcAcc | DstReg, pause), X7(D(SrcAcc | DstReg)), 4551 /* 0x98 - 0x9F */ 4552 D(DstAcc | SrcNone), I(ImplicitOps | SrcAcc, em_cwd), 4553 I(SrcImmFAddr | No64, em_call_far), N, 4554 II(ImplicitOps | Stack, em_pushf, pushf), 4555 II(ImplicitOps | Stack, em_popf, popf), 4556 I(ImplicitOps, em_sahf), I(ImplicitOps, em_lahf), 4557 /* 0xA0 - 0xA7 */ 4558 I2bv(DstAcc | SrcMem | Mov | MemAbs, em_mov), 4559 I2bv(DstMem | SrcAcc | Mov | MemAbs | PageTable, em_mov), 4560 I2bv(SrcSI | DstDI | Mov | String | TwoMemOp, em_mov), 4561 F2bv(SrcSI | DstDI | String | NoWrite | TwoMemOp, em_cmp_r), 4562 /* 0xA8 - 0xAF */ 4563 F2bv(DstAcc | SrcImm | NoWrite, em_test), 4564 I2bv(SrcAcc | DstDI | Mov | String, em_mov), 4565 I2bv(SrcSI | DstAcc | Mov | String, em_mov), 4566 F2bv(SrcAcc | DstDI | String | NoWrite, em_cmp_r), 4567 /* 0xB0 - 0xB7 */ 4568 X8(I(ByteOp | DstReg | SrcImm | Mov, em_mov)), 4569 /* 0xB8 - 0xBF */ 4570 X8(I(DstReg | SrcImm64 | Mov, em_mov)), 4571 /* 0xC0 - 0xC7 */ 4572 G(ByteOp | Src2ImmByte, group2), G(Src2ImmByte, group2), 4573 I(ImplicitOps | NearBranch | SrcImmU16, em_ret_near_imm), 4574 I(ImplicitOps | NearBranch, em_ret), 4575 I(DstReg | SrcMemFAddr | ModRM | No64 | Src2ES, em_lseg), 4576 I(DstReg | SrcMemFAddr | ModRM | No64 | Src2DS, em_lseg), 4577 G(ByteOp, group11), G(0, group11), 4578 /* 0xC8 - 0xCF */ 4579 I(Stack | SrcImmU16 | Src2ImmByte, em_enter), I(Stack, em_leave), 4580 I(ImplicitOps | SrcImmU16, em_ret_far_imm), 4581 I(ImplicitOps, em_ret_far), 4582 D(ImplicitOps), DI(SrcImmByte, intn), 4583 D(ImplicitOps | No64), II(ImplicitOps, em_iret, iret), 4584 /* 0xD0 - 0xD7 */ 4585 G(Src2One | ByteOp, group2), G(Src2One, group2), 4586 G(Src2CL | ByteOp, group2), G(Src2CL, group2), 4587 I(DstAcc | SrcImmUByte | No64, em_aam), 4588 I(DstAcc | SrcImmUByte | No64, em_aad), 4589 F(DstAcc | ByteOp | No64, em_salc), 4590 I(DstAcc | SrcXLat | ByteOp, em_mov), 4591 /* 0xD8 - 0xDF */ 4592 N, E(0, &escape_d9), N, E(0, &escape_db), N, E(0, &escape_dd), N, N, 4593 /* 0xE0 - 0xE7 */ 4594 X3(I(SrcImmByte | NearBranch, em_loop)), 4595 I(SrcImmByte | NearBranch, em_jcxz), 4596 I2bvIP(SrcImmUByte | DstAcc, em_in, in, check_perm_in), 4597 I2bvIP(SrcAcc | DstImmUByte, em_out, out, check_perm_out), 4598 /* 0xE8 - 0xEF */ 4599 I(SrcImm | NearBranch, em_call), D(SrcImm | ImplicitOps | NearBranch), 4600 I(SrcImmFAddr | No64, em_jmp_far), 4601 D(SrcImmByte | ImplicitOps | NearBranch), 4602 I2bvIP(SrcDX | DstAcc, em_in, in, check_perm_in), 4603 I2bvIP(SrcAcc | DstDX, em_out, out, check_perm_out), 4604 /* 0xF0 - 0xF7 */ 4605 N, DI(ImplicitOps, icebp), N, N, 4606 DI(ImplicitOps | Priv, hlt), D(ImplicitOps), 4607 G(ByteOp, group3), G(0, group3), 4608 /* 0xF8 - 0xFF */ 4609 D(ImplicitOps), D(ImplicitOps), 4610 I(ImplicitOps, em_cli), I(ImplicitOps, em_sti), 4611 D(ImplicitOps), D(ImplicitOps), G(0, group4), G(0, group5), 4612 }; 4613 4614 static const struct opcode twobyte_table[256] = { 4615 /* 0x00 - 0x0F */ 4616 G(0, group6), GD(0, &group7), N, N, 4617 N, I(ImplicitOps | EmulateOnUD, em_syscall), 4618 II(ImplicitOps | Priv, em_clts, clts), N, 4619 DI(ImplicitOps | Priv, invd), DI(ImplicitOps | Priv, wbinvd), N, N, 4620 N, D(ImplicitOps | ModRM | SrcMem | NoAccess), N, N, 4621 /* 0x10 - 0x1F */ 4622 N, N, N, N, N, N, N, N, 4623 D(ImplicitOps | ModRM | SrcMem | NoAccess), 4624 N, N, N, N, N, N, D(ImplicitOps | ModRM | SrcMem | NoAccess), 4625 /* 0x20 - 0x2F */ 4626 DIP(ModRM | DstMem | Priv | Op3264 | NoMod, cr_read, check_cr_read), 4627 DIP(ModRM | DstMem | Priv | Op3264 | NoMod, dr_read, check_dr_read), 4628 IIP(ModRM | SrcMem | Priv | Op3264 | NoMod, em_cr_write, cr_write, 4629 check_cr_write), 4630 IIP(ModRM | SrcMem | Priv | Op3264 | NoMod, em_dr_write, dr_write, 4631 check_dr_write), 4632 N, N, N, N, 4633 GP(ModRM | DstReg | SrcMem | Mov | Sse, &pfx_0f_28_0f_29), 4634 GP(ModRM | DstMem | SrcReg | Mov | Sse, &pfx_0f_28_0f_29), 4635 N, GP(ModRM | DstMem | SrcReg | Mov | Sse, &pfx_0f_2b), 4636 N, N, N, N, 4637 /* 0x30 - 0x3F */ 4638 II(ImplicitOps | Priv, em_wrmsr, wrmsr), 4639 IIP(ImplicitOps, em_rdtsc, rdtsc, check_rdtsc), 4640 II(ImplicitOps | Priv, em_rdmsr, rdmsr), 4641 IIP(ImplicitOps, em_rdpmc, rdpmc, check_rdpmc), 4642 I(ImplicitOps | EmulateOnUD, em_sysenter), 4643 I(ImplicitOps | Priv | EmulateOnUD, em_sysexit), 4644 N, N, 4645 N, N, N, N, N, N, N, N, 4646 /* 0x40 - 0x4F */ 4647 X16(D(DstReg | SrcMem | ModRM)), 4648 /* 0x50 - 0x5F */ 4649 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, 4650 /* 0x60 - 0x6F */ 4651 N, N, N, N, 4652 N, N, N, N, 4653 N, N, N, N, 4654 N, N, N, GP(SrcMem | DstReg | ModRM | Mov, &pfx_0f_6f_0f_7f), 4655 /* 0x70 - 0x7F */ 4656 N, N, N, N, 4657 N, N, N, N, 4658 N, N, N, N, 4659 N, N, N, GP(SrcReg | DstMem | ModRM | Mov, &pfx_0f_6f_0f_7f), 4660 /* 0x80 - 0x8F */ 4661 X16(D(SrcImm | NearBranch)), 4662 /* 0x90 - 0x9F */ 4663 X16(D(ByteOp | DstMem | SrcNone | ModRM| Mov)), 4664 /* 0xA0 - 0xA7 */ 4665 I(Stack | Src2FS, em_push_sreg), I(Stack | Src2FS, em_pop_sreg), 4666 II(ImplicitOps, em_cpuid, cpuid), 4667 F(DstMem | SrcReg | ModRM | BitOp | NoWrite, em_bt), 4668 F(DstMem | SrcReg | Src2ImmByte | ModRM, em_shld), 4669 F(DstMem | SrcReg | Src2CL | ModRM, em_shld), N, N, 4670 /* 0xA8 - 0xAF */ 4671 I(Stack | Src2GS, em_push_sreg), I(Stack | Src2GS, em_pop_sreg), 4672 II(EmulateOnUD | ImplicitOps, em_rsm, rsm), 4673 F(DstMem | SrcReg | ModRM | BitOp | Lock | PageTable, em_bts), 4674 F(DstMem | SrcReg | Src2ImmByte | ModRM, em_shrd), 4675 F(DstMem | SrcReg | Src2CL | ModRM, em_shrd), 4676 GD(0, &group15), F(DstReg | SrcMem | ModRM, em_imul), 4677 /* 0xB0 - 0xB7 */ 4678 I2bv(DstMem | SrcReg | ModRM | Lock | PageTable | SrcWrite, em_cmpxchg), 4679 I(DstReg | SrcMemFAddr | ModRM | Src2SS, em_lseg), 4680 F(DstMem | SrcReg | ModRM | BitOp | Lock, em_btr), 4681 I(DstReg | SrcMemFAddr | ModRM | Src2FS, em_lseg), 4682 I(DstReg | SrcMemFAddr | ModRM | Src2GS, em_lseg), 4683 D(DstReg | SrcMem8 | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov), 4684 /* 0xB8 - 0xBF */ 4685 N, N, 4686 G(BitOp, group8), 4687 F(DstMem | SrcReg | ModRM | BitOp | Lock | PageTable, em_btc), 4688 I(DstReg | SrcMem | ModRM, em_bsf_c), 4689 I(DstReg | SrcMem | ModRM, em_bsr_c), 4690 D(DstReg | SrcMem8 | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov), 4691 /* 0xC0 - 0xC7 */ 4692 F2bv(DstMem | SrcReg | ModRM | SrcWrite | Lock, em_xadd), 4693 N, ID(0, &instr_dual_0f_c3), 4694 N, N, N, GD(0, &group9), 4695 /* 0xC8 - 0xCF */ 4696 X8(I(DstReg, em_bswap)), 4697 /* 0xD0 - 0xDF */ 4698 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, 4699 /* 0xE0 - 0xEF */ 4700 N, N, N, N, N, N, N, GP(SrcReg | DstMem | ModRM | Mov, &pfx_0f_e7), 4701 N, N, N, N, N, N, N, N, 4702 /* 0xF0 - 0xFF */ 4703 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N 4704 }; 4705 4706 static const struct instr_dual instr_dual_0f_38_f0 = { 4707 I(DstReg | SrcMem | Mov, em_movbe), N 4708 }; 4709 4710 static const struct instr_dual instr_dual_0f_38_f1 = { 4711 I(DstMem | SrcReg | Mov, em_movbe), N 4712 }; 4713 4714 static const struct gprefix three_byte_0f_38_f0 = { 4715 ID(0, &instr_dual_0f_38_f0), N, N, N 4716 }; 4717 4718 static const struct gprefix three_byte_0f_38_f1 = { 4719 ID(0, &instr_dual_0f_38_f1), N, N, N 4720 }; 4721 4722 /* 4723 * Insns below are selected by the prefix which indexed by the third opcode 4724 * byte. 4725 */ 4726 static const struct opcode opcode_map_0f_38[256] = { 4727 /* 0x00 - 0x7f */ 4728 X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), 4729 /* 0x80 - 0xef */ 4730 X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), 4731 /* 0xf0 - 0xf1 */ 4732 GP(EmulateOnUD | ModRM, &three_byte_0f_38_f0), 4733 GP(EmulateOnUD | ModRM, &three_byte_0f_38_f1), 4734 /* 0xf2 - 0xff */ 4735 N, N, X4(N), X8(N) 4736 }; 4737 4738 #undef D 4739 #undef N 4740 #undef G 4741 #undef GD 4742 #undef I 4743 #undef GP 4744 #undef EXT 4745 #undef MD 4746 #undef ID 4747 4748 #undef D2bv 4749 #undef D2bvIP 4750 #undef I2bv 4751 #undef I2bvIP 4752 #undef I6ALU 4753 4754 static unsigned imm_size(struct x86_emulate_ctxt *ctxt) 4755 { 4756 unsigned size; 4757 4758 size = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes; 4759 if (size == 8) 4760 size = 4; 4761 return size; 4762 } 4763 4764 static int decode_imm(struct x86_emulate_ctxt *ctxt, struct operand *op, 4765 unsigned size, bool sign_extension) 4766 { 4767 int rc = X86EMUL_CONTINUE; 4768 4769 op->type = OP_IMM; 4770 op->bytes = size; 4771 op->addr.mem.ea = ctxt->_eip; 4772 /* NB. Immediates are sign-extended as necessary. */ 4773 switch (op->bytes) { 4774 case 1: 4775 op->val = insn_fetch(s8, ctxt); 4776 break; 4777 case 2: 4778 op->val = insn_fetch(s16, ctxt); 4779 break; 4780 case 4: 4781 op->val = insn_fetch(s32, ctxt); 4782 break; 4783 case 8: 4784 op->val = insn_fetch(s64, ctxt); 4785 break; 4786 } 4787 if (!sign_extension) { 4788 switch (op->bytes) { 4789 case 1: 4790 op->val &= 0xff; 4791 break; 4792 case 2: 4793 op->val &= 0xffff; 4794 break; 4795 case 4: 4796 op->val &= 0xffffffff; 4797 break; 4798 } 4799 } 4800 done: 4801 return rc; 4802 } 4803 4804 static int decode_operand(struct x86_emulate_ctxt *ctxt, struct operand *op, 4805 unsigned d) 4806 { 4807 int rc = X86EMUL_CONTINUE; 4808 4809 switch (d) { 4810 case OpReg: 4811 decode_register_operand(ctxt, op); 4812 break; 4813 case OpImmUByte: 4814 rc = decode_imm(ctxt, op, 1, false); 4815 break; 4816 case OpMem: 4817 ctxt->memop.bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes; 4818 mem_common: 4819 *op = ctxt->memop; 4820 ctxt->memopp = op; 4821 if (ctxt->d & BitOp) 4822 fetch_bit_operand(ctxt); 4823 op->orig_val = op->val; 4824 break; 4825 case OpMem64: 4826 ctxt->memop.bytes = (ctxt->op_bytes == 8) ? 16 : 8; 4827 goto mem_common; 4828 case OpAcc: 4829 op->type = OP_REG; 4830 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes; 4831 op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX); 4832 fetch_register_operand(op); 4833 op->orig_val = op->val; 4834 break; 4835 case OpAccLo: 4836 op->type = OP_REG; 4837 op->bytes = (ctxt->d & ByteOp) ? 2 : ctxt->op_bytes; 4838 op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX); 4839 fetch_register_operand(op); 4840 op->orig_val = op->val; 4841 break; 4842 case OpAccHi: 4843 if (ctxt->d & ByteOp) { 4844 op->type = OP_NONE; 4845 break; 4846 } 4847 op->type = OP_REG; 4848 op->bytes = ctxt->op_bytes; 4849 op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX); 4850 fetch_register_operand(op); 4851 op->orig_val = op->val; 4852 break; 4853 case OpDI: 4854 op->type = OP_MEM; 4855 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes; 4856 op->addr.mem.ea = 4857 register_address(ctxt, VCPU_REGS_RDI); 4858 op->addr.mem.seg = VCPU_SREG_ES; 4859 op->val = 0; 4860 op->count = 1; 4861 break; 4862 case OpDX: 4863 op->type = OP_REG; 4864 op->bytes = 2; 4865 op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX); 4866 fetch_register_operand(op); 4867 break; 4868 case OpCL: 4869 op->type = OP_IMM; 4870 op->bytes = 1; 4871 op->val = reg_read(ctxt, VCPU_REGS_RCX) & 0xff; 4872 break; 4873 case OpImmByte: 4874 rc = decode_imm(ctxt, op, 1, true); 4875 break; 4876 case OpOne: 4877 op->type = OP_IMM; 4878 op->bytes = 1; 4879 op->val = 1; 4880 break; 4881 case OpImm: 4882 rc = decode_imm(ctxt, op, imm_size(ctxt), true); 4883 break; 4884 case OpImm64: 4885 rc = decode_imm(ctxt, op, ctxt->op_bytes, true); 4886 break; 4887 case OpMem8: 4888 ctxt->memop.bytes = 1; 4889 if (ctxt->memop.type == OP_REG) { 4890 ctxt->memop.addr.reg = decode_register(ctxt, 4891 ctxt->modrm_rm, true); 4892 fetch_register_operand(&ctxt->memop); 4893 } 4894 goto mem_common; 4895 case OpMem16: 4896 ctxt->memop.bytes = 2; 4897 goto mem_common; 4898 case OpMem32: 4899 ctxt->memop.bytes = 4; 4900 goto mem_common; 4901 case OpImmU16: 4902 rc = decode_imm(ctxt, op, 2, false); 4903 break; 4904 case OpImmU: 4905 rc = decode_imm(ctxt, op, imm_size(ctxt), false); 4906 break; 4907 case OpSI: 4908 op->type = OP_MEM; 4909 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes; 4910 op->addr.mem.ea = 4911 register_address(ctxt, VCPU_REGS_RSI); 4912 op->addr.mem.seg = ctxt->seg_override; 4913 op->val = 0; 4914 op->count = 1; 4915 break; 4916 case OpXLat: 4917 op->type = OP_MEM; 4918 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes; 4919 op->addr.mem.ea = 4920 address_mask(ctxt, 4921 reg_read(ctxt, VCPU_REGS_RBX) + 4922 (reg_read(ctxt, VCPU_REGS_RAX) & 0xff)); 4923 op->addr.mem.seg = ctxt->seg_override; 4924 op->val = 0; 4925 break; 4926 case OpImmFAddr: 4927 op->type = OP_IMM; 4928 op->addr.mem.ea = ctxt->_eip; 4929 op->bytes = ctxt->op_bytes + 2; 4930 insn_fetch_arr(op->valptr, op->bytes, ctxt); 4931 break; 4932 case OpMemFAddr: 4933 ctxt->memop.bytes = ctxt->op_bytes + 2; 4934 goto mem_common; 4935 case OpES: 4936 op->type = OP_IMM; 4937 op->val = VCPU_SREG_ES; 4938 break; 4939 case OpCS: 4940 op->type = OP_IMM; 4941 op->val = VCPU_SREG_CS; 4942 break; 4943 case OpSS: 4944 op->type = OP_IMM; 4945 op->val = VCPU_SREG_SS; 4946 break; 4947 case OpDS: 4948 op->type = OP_IMM; 4949 op->val = VCPU_SREG_DS; 4950 break; 4951 case OpFS: 4952 op->type = OP_IMM; 4953 op->val = VCPU_SREG_FS; 4954 break; 4955 case OpGS: 4956 op->type = OP_IMM; 4957 op->val = VCPU_SREG_GS; 4958 break; 4959 case OpImplicit: 4960 /* Special instructions do their own operand decoding. */ 4961 default: 4962 op->type = OP_NONE; /* Disable writeback. */ 4963 break; 4964 } 4965 4966 done: 4967 return rc; 4968 } 4969 4970 int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len) 4971 { 4972 int rc = X86EMUL_CONTINUE; 4973 int mode = ctxt->mode; 4974 int def_op_bytes, def_ad_bytes, goffset, simd_prefix; 4975 bool op_prefix = false; 4976 bool has_seg_override = false; 4977 struct opcode opcode; 4978 4979 ctxt->memop.type = OP_NONE; 4980 ctxt->memopp = NULL; 4981 ctxt->_eip = ctxt->eip; 4982 ctxt->fetch.ptr = ctxt->fetch.data; 4983 ctxt->fetch.end = ctxt->fetch.data + insn_len; 4984 ctxt->opcode_len = 1; 4985 if (insn_len > 0) 4986 memcpy(ctxt->fetch.data, insn, insn_len); 4987 else { 4988 rc = __do_insn_fetch_bytes(ctxt, 1); 4989 if (rc != X86EMUL_CONTINUE) 4990 return rc; 4991 } 4992 4993 switch (mode) { 4994 case X86EMUL_MODE_REAL: 4995 case X86EMUL_MODE_VM86: 4996 case X86EMUL_MODE_PROT16: 4997 def_op_bytes = def_ad_bytes = 2; 4998 break; 4999 case X86EMUL_MODE_PROT32: 5000 def_op_bytes = def_ad_bytes = 4; 5001 break; 5002 #ifdef CONFIG_X86_64 5003 case X86EMUL_MODE_PROT64: 5004 def_op_bytes = 4; 5005 def_ad_bytes = 8; 5006 break; 5007 #endif 5008 default: 5009 return EMULATION_FAILED; 5010 } 5011 5012 ctxt->op_bytes = def_op_bytes; 5013 ctxt->ad_bytes = def_ad_bytes; 5014 5015 /* Legacy prefixes. */ 5016 for (;;) { 5017 switch (ctxt->b = insn_fetch(u8, ctxt)) { 5018 case 0x66: /* operand-size override */ 5019 op_prefix = true; 5020 /* switch between 2/4 bytes */ 5021 ctxt->op_bytes = def_op_bytes ^ 6; 5022 break; 5023 case 0x67: /* address-size override */ 5024 if (mode == X86EMUL_MODE_PROT64) 5025 /* switch between 4/8 bytes */ 5026 ctxt->ad_bytes = def_ad_bytes ^ 12; 5027 else 5028 /* switch between 2/4 bytes */ 5029 ctxt->ad_bytes = def_ad_bytes ^ 6; 5030 break; 5031 case 0x26: /* ES override */ 5032 case 0x2e: /* CS override */ 5033 case 0x36: /* SS override */ 5034 case 0x3e: /* DS override */ 5035 has_seg_override = true; 5036 ctxt->seg_override = (ctxt->b >> 3) & 3; 5037 break; 5038 case 0x64: /* FS override */ 5039 case 0x65: /* GS override */ 5040 has_seg_override = true; 5041 ctxt->seg_override = ctxt->b & 7; 5042 break; 5043 case 0x40 ... 0x4f: /* REX */ 5044 if (mode != X86EMUL_MODE_PROT64) 5045 goto done_prefixes; 5046 ctxt->rex_prefix = ctxt->b; 5047 continue; 5048 case 0xf0: /* LOCK */ 5049 ctxt->lock_prefix = 1; 5050 break; 5051 case 0xf2: /* REPNE/REPNZ */ 5052 case 0xf3: /* REP/REPE/REPZ */ 5053 ctxt->rep_prefix = ctxt->b; 5054 break; 5055 default: 5056 goto done_prefixes; 5057 } 5058 5059 /* Any legacy prefix after a REX prefix nullifies its effect. */ 5060 5061 ctxt->rex_prefix = 0; 5062 } 5063 5064 done_prefixes: 5065 5066 /* REX prefix. */ 5067 if (ctxt->rex_prefix & 8) 5068 ctxt->op_bytes = 8; /* REX.W */ 5069 5070 /* Opcode byte(s). */ 5071 opcode = opcode_table[ctxt->b]; 5072 /* Two-byte opcode? */ 5073 if (ctxt->b == 0x0f) { 5074 ctxt->opcode_len = 2; 5075 ctxt->b = insn_fetch(u8, ctxt); 5076 opcode = twobyte_table[ctxt->b]; 5077 5078 /* 0F_38 opcode map */ 5079 if (ctxt->b == 0x38) { 5080 ctxt->opcode_len = 3; 5081 ctxt->b = insn_fetch(u8, ctxt); 5082 opcode = opcode_map_0f_38[ctxt->b]; 5083 } 5084 } 5085 ctxt->d = opcode.flags; 5086 5087 if (ctxt->d & ModRM) 5088 ctxt->modrm = insn_fetch(u8, ctxt); 5089 5090 /* vex-prefix instructions are not implemented */ 5091 if (ctxt->opcode_len == 1 && (ctxt->b == 0xc5 || ctxt->b == 0xc4) && 5092 (mode == X86EMUL_MODE_PROT64 || (ctxt->modrm & 0xc0) == 0xc0)) { 5093 ctxt->d = NotImpl; 5094 } 5095 5096 while (ctxt->d & GroupMask) { 5097 switch (ctxt->d & GroupMask) { 5098 case Group: 5099 goffset = (ctxt->modrm >> 3) & 7; 5100 opcode = opcode.u.group[goffset]; 5101 break; 5102 case GroupDual: 5103 goffset = (ctxt->modrm >> 3) & 7; 5104 if ((ctxt->modrm >> 6) == 3) 5105 opcode = opcode.u.gdual->mod3[goffset]; 5106 else 5107 opcode = opcode.u.gdual->mod012[goffset]; 5108 break; 5109 case RMExt: 5110 goffset = ctxt->modrm & 7; 5111 opcode = opcode.u.group[goffset]; 5112 break; 5113 case Prefix: 5114 if (ctxt->rep_prefix && op_prefix) 5115 return EMULATION_FAILED; 5116 simd_prefix = op_prefix ? 0x66 : ctxt->rep_prefix; 5117 switch (simd_prefix) { 5118 case 0x00: opcode = opcode.u.gprefix->pfx_no; break; 5119 case 0x66: opcode = opcode.u.gprefix->pfx_66; break; 5120 case 0xf2: opcode = opcode.u.gprefix->pfx_f2; break; 5121 case 0xf3: opcode = opcode.u.gprefix->pfx_f3; break; 5122 } 5123 break; 5124 case Escape: 5125 if (ctxt->modrm > 0xbf) 5126 opcode = opcode.u.esc->high[ctxt->modrm - 0xc0]; 5127 else 5128 opcode = opcode.u.esc->op[(ctxt->modrm >> 3) & 7]; 5129 break; 5130 case InstrDual: 5131 if ((ctxt->modrm >> 6) == 3) 5132 opcode = opcode.u.idual->mod3; 5133 else 5134 opcode = opcode.u.idual->mod012; 5135 break; 5136 case ModeDual: 5137 if (ctxt->mode == X86EMUL_MODE_PROT64) 5138 opcode = opcode.u.mdual->mode64; 5139 else 5140 opcode = opcode.u.mdual->mode32; 5141 break; 5142 default: 5143 return EMULATION_FAILED; 5144 } 5145 5146 ctxt->d &= ~(u64)GroupMask; 5147 ctxt->d |= opcode.flags; 5148 } 5149 5150 /* Unrecognised? */ 5151 if (ctxt->d == 0) 5152 return EMULATION_FAILED; 5153 5154 ctxt->execute = opcode.u.execute; 5155 5156 if (unlikely(ctxt->ud) && likely(!(ctxt->d & EmulateOnUD))) 5157 return EMULATION_FAILED; 5158 5159 if (unlikely(ctxt->d & 5160 (NotImpl|Stack|Op3264|Sse|Mmx|Intercept|CheckPerm|NearBranch| 5161 No16))) { 5162 /* 5163 * These are copied unconditionally here, and checked unconditionally 5164 * in x86_emulate_insn. 5165 */ 5166 ctxt->check_perm = opcode.check_perm; 5167 ctxt->intercept = opcode.intercept; 5168 5169 if (ctxt->d & NotImpl) 5170 return EMULATION_FAILED; 5171 5172 if (mode == X86EMUL_MODE_PROT64) { 5173 if (ctxt->op_bytes == 4 && (ctxt->d & Stack)) 5174 ctxt->op_bytes = 8; 5175 else if (ctxt->d & NearBranch) 5176 ctxt->op_bytes = 8; 5177 } 5178 5179 if (ctxt->d & Op3264) { 5180 if (mode == X86EMUL_MODE_PROT64) 5181 ctxt->op_bytes = 8; 5182 else 5183 ctxt->op_bytes = 4; 5184 } 5185 5186 if ((ctxt->d & No16) && ctxt->op_bytes == 2) 5187 ctxt->op_bytes = 4; 5188 5189 if (ctxt->d & Sse) 5190 ctxt->op_bytes = 16; 5191 else if (ctxt->d & Mmx) 5192 ctxt->op_bytes = 8; 5193 } 5194 5195 /* ModRM and SIB bytes. */ 5196 if (ctxt->d & ModRM) { 5197 rc = decode_modrm(ctxt, &ctxt->memop); 5198 if (!has_seg_override) { 5199 has_seg_override = true; 5200 ctxt->seg_override = ctxt->modrm_seg; 5201 } 5202 } else if (ctxt->d & MemAbs) 5203 rc = decode_abs(ctxt, &ctxt->memop); 5204 if (rc != X86EMUL_CONTINUE) 5205 goto done; 5206 5207 if (!has_seg_override) 5208 ctxt->seg_override = VCPU_SREG_DS; 5209 5210 ctxt->memop.addr.mem.seg = ctxt->seg_override; 5211 5212 /* 5213 * Decode and fetch the source operand: register, memory 5214 * or immediate. 5215 */ 5216 rc = decode_operand(ctxt, &ctxt->src, (ctxt->d >> SrcShift) & OpMask); 5217 if (rc != X86EMUL_CONTINUE) 5218 goto done; 5219 5220 /* 5221 * Decode and fetch the second source operand: register, memory 5222 * or immediate. 5223 */ 5224 rc = decode_operand(ctxt, &ctxt->src2, (ctxt->d >> Src2Shift) & OpMask); 5225 if (rc != X86EMUL_CONTINUE) 5226 goto done; 5227 5228 /* Decode and fetch the destination operand: register or memory. */ 5229 rc = decode_operand(ctxt, &ctxt->dst, (ctxt->d >> DstShift) & OpMask); 5230 5231 if (ctxt->rip_relative && likely(ctxt->memopp)) 5232 ctxt->memopp->addr.mem.ea = address_mask(ctxt, 5233 ctxt->memopp->addr.mem.ea + ctxt->_eip); 5234 5235 done: 5236 return (rc != X86EMUL_CONTINUE) ? EMULATION_FAILED : EMULATION_OK; 5237 } 5238 5239 bool x86_page_table_writing_insn(struct x86_emulate_ctxt *ctxt) 5240 { 5241 return ctxt->d & PageTable; 5242 } 5243 5244 static bool string_insn_completed(struct x86_emulate_ctxt *ctxt) 5245 { 5246 /* The second termination condition only applies for REPE 5247 * and REPNE. Test if the repeat string operation prefix is 5248 * REPE/REPZ or REPNE/REPNZ and if it's the case it tests the 5249 * corresponding termination condition according to: 5250 * - if REPE/REPZ and ZF = 0 then done 5251 * - if REPNE/REPNZ and ZF = 1 then done 5252 */ 5253 if (((ctxt->b == 0xa6) || (ctxt->b == 0xa7) || 5254 (ctxt->b == 0xae) || (ctxt->b == 0xaf)) 5255 && (((ctxt->rep_prefix == REPE_PREFIX) && 5256 ((ctxt->eflags & X86_EFLAGS_ZF) == 0)) 5257 || ((ctxt->rep_prefix == REPNE_PREFIX) && 5258 ((ctxt->eflags & X86_EFLAGS_ZF) == X86_EFLAGS_ZF)))) 5259 return true; 5260 5261 return false; 5262 } 5263 5264 static int flush_pending_x87_faults(struct x86_emulate_ctxt *ctxt) 5265 { 5266 int rc; 5267 5268 ctxt->ops->get_fpu(ctxt); 5269 rc = asm_safe("fwait"); 5270 ctxt->ops->put_fpu(ctxt); 5271 5272 if (unlikely(rc != X86EMUL_CONTINUE)) 5273 return emulate_exception(ctxt, MF_VECTOR, 0, false); 5274 5275 return X86EMUL_CONTINUE; 5276 } 5277 5278 static void fetch_possible_mmx_operand(struct x86_emulate_ctxt *ctxt, 5279 struct operand *op) 5280 { 5281 if (op->type == OP_MM) 5282 read_mmx_reg(ctxt, &op->mm_val, op->addr.mm); 5283 } 5284 5285 static int fastop(struct x86_emulate_ctxt *ctxt, void (*fop)(struct fastop *)) 5286 { 5287 register void *__sp asm(_ASM_SP); 5288 ulong flags = (ctxt->eflags & EFLAGS_MASK) | X86_EFLAGS_IF; 5289 5290 if (!(ctxt->d & ByteOp)) 5291 fop += __ffs(ctxt->dst.bytes) * FASTOP_SIZE; 5292 5293 asm("push %[flags]; popf; call *%[fastop]; pushf; pop %[flags]\n" 5294 : "+a"(ctxt->dst.val), "+d"(ctxt->src.val), [flags]"+D"(flags), 5295 [fastop]"+S"(fop), "+r"(__sp) 5296 : "c"(ctxt->src2.val)); 5297 5298 ctxt->eflags = (ctxt->eflags & ~EFLAGS_MASK) | (flags & EFLAGS_MASK); 5299 if (!fop) /* exception is returned in fop variable */ 5300 return emulate_de(ctxt); 5301 return X86EMUL_CONTINUE; 5302 } 5303 5304 void init_decode_cache(struct x86_emulate_ctxt *ctxt) 5305 { 5306 memset(&ctxt->rip_relative, 0, 5307 (void *)&ctxt->modrm - (void *)&ctxt->rip_relative); 5308 5309 ctxt->io_read.pos = 0; 5310 ctxt->io_read.end = 0; 5311 ctxt->mem_read.end = 0; 5312 } 5313 5314 int x86_emulate_insn(struct x86_emulate_ctxt *ctxt) 5315 { 5316 const struct x86_emulate_ops *ops = ctxt->ops; 5317 int rc = X86EMUL_CONTINUE; 5318 int saved_dst_type = ctxt->dst.type; 5319 unsigned emul_flags; 5320 5321 ctxt->mem_read.pos = 0; 5322 5323 /* LOCK prefix is allowed only with some instructions */ 5324 if (ctxt->lock_prefix && (!(ctxt->d & Lock) || ctxt->dst.type != OP_MEM)) { 5325 rc = emulate_ud(ctxt); 5326 goto done; 5327 } 5328 5329 if ((ctxt->d & SrcMask) == SrcMemFAddr && ctxt->src.type != OP_MEM) { 5330 rc = emulate_ud(ctxt); 5331 goto done; 5332 } 5333 5334 emul_flags = ctxt->ops->get_hflags(ctxt); 5335 if (unlikely(ctxt->d & 5336 (No64|Undefined|Sse|Mmx|Intercept|CheckPerm|Priv|Prot|String))) { 5337 if ((ctxt->mode == X86EMUL_MODE_PROT64 && (ctxt->d & No64)) || 5338 (ctxt->d & Undefined)) { 5339 rc = emulate_ud(ctxt); 5340 goto done; 5341 } 5342 5343 if (((ctxt->d & (Sse|Mmx)) && ((ops->get_cr(ctxt, 0) & X86_CR0_EM))) 5344 || ((ctxt->d & Sse) && !(ops->get_cr(ctxt, 4) & X86_CR4_OSFXSR))) { 5345 rc = emulate_ud(ctxt); 5346 goto done; 5347 } 5348 5349 if ((ctxt->d & (Sse|Mmx)) && (ops->get_cr(ctxt, 0) & X86_CR0_TS)) { 5350 rc = emulate_nm(ctxt); 5351 goto done; 5352 } 5353 5354 if (ctxt->d & Mmx) { 5355 rc = flush_pending_x87_faults(ctxt); 5356 if (rc != X86EMUL_CONTINUE) 5357 goto done; 5358 /* 5359 * Now that we know the fpu is exception safe, we can fetch 5360 * operands from it. 5361 */ 5362 fetch_possible_mmx_operand(ctxt, &ctxt->src); 5363 fetch_possible_mmx_operand(ctxt, &ctxt->src2); 5364 if (!(ctxt->d & Mov)) 5365 fetch_possible_mmx_operand(ctxt, &ctxt->dst); 5366 } 5367 5368 if (unlikely(emul_flags & X86EMUL_GUEST_MASK) && ctxt->intercept) { 5369 rc = emulator_check_intercept(ctxt, ctxt->intercept, 5370 X86_ICPT_PRE_EXCEPT); 5371 if (rc != X86EMUL_CONTINUE) 5372 goto done; 5373 } 5374 5375 /* Instruction can only be executed in protected mode */ 5376 if ((ctxt->d & Prot) && ctxt->mode < X86EMUL_MODE_PROT16) { 5377 rc = emulate_ud(ctxt); 5378 goto done; 5379 } 5380 5381 /* Privileged instruction can be executed only in CPL=0 */ 5382 if ((ctxt->d & Priv) && ops->cpl(ctxt)) { 5383 if (ctxt->d & PrivUD) 5384 rc = emulate_ud(ctxt); 5385 else 5386 rc = emulate_gp(ctxt, 0); 5387 goto done; 5388 } 5389 5390 /* Do instruction specific permission checks */ 5391 if (ctxt->d & CheckPerm) { 5392 rc = ctxt->check_perm(ctxt); 5393 if (rc != X86EMUL_CONTINUE) 5394 goto done; 5395 } 5396 5397 if (unlikely(emul_flags & X86EMUL_GUEST_MASK) && (ctxt->d & Intercept)) { 5398 rc = emulator_check_intercept(ctxt, ctxt->intercept, 5399 X86_ICPT_POST_EXCEPT); 5400 if (rc != X86EMUL_CONTINUE) 5401 goto done; 5402 } 5403 5404 if (ctxt->rep_prefix && (ctxt->d & String)) { 5405 /* All REP prefixes have the same first termination condition */ 5406 if (address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) == 0) { 5407 string_registers_quirk(ctxt); 5408 ctxt->eip = ctxt->_eip; 5409 ctxt->eflags &= ~X86_EFLAGS_RF; 5410 goto done; 5411 } 5412 } 5413 } 5414 5415 if ((ctxt->src.type == OP_MEM) && !(ctxt->d & NoAccess)) { 5416 rc = segmented_read(ctxt, ctxt->src.addr.mem, 5417 ctxt->src.valptr, ctxt->src.bytes); 5418 if (rc != X86EMUL_CONTINUE) 5419 goto done; 5420 ctxt->src.orig_val64 = ctxt->src.val64; 5421 } 5422 5423 if (ctxt->src2.type == OP_MEM) { 5424 rc = segmented_read(ctxt, ctxt->src2.addr.mem, 5425 &ctxt->src2.val, ctxt->src2.bytes); 5426 if (rc != X86EMUL_CONTINUE) 5427 goto done; 5428 } 5429 5430 if ((ctxt->d & DstMask) == ImplicitOps) 5431 goto special_insn; 5432 5433 5434 if ((ctxt->dst.type == OP_MEM) && !(ctxt->d & Mov)) { 5435 /* optimisation - avoid slow emulated read if Mov */ 5436 rc = segmented_read(ctxt, ctxt->dst.addr.mem, 5437 &ctxt->dst.val, ctxt->dst.bytes); 5438 if (rc != X86EMUL_CONTINUE) { 5439 if (!(ctxt->d & NoWrite) && 5440 rc == X86EMUL_PROPAGATE_FAULT && 5441 ctxt->exception.vector == PF_VECTOR) 5442 ctxt->exception.error_code |= PFERR_WRITE_MASK; 5443 goto done; 5444 } 5445 } 5446 /* Copy full 64-bit value for CMPXCHG8B. */ 5447 ctxt->dst.orig_val64 = ctxt->dst.val64; 5448 5449 special_insn: 5450 5451 if (unlikely(emul_flags & X86EMUL_GUEST_MASK) && (ctxt->d & Intercept)) { 5452 rc = emulator_check_intercept(ctxt, ctxt->intercept, 5453 X86_ICPT_POST_MEMACCESS); 5454 if (rc != X86EMUL_CONTINUE) 5455 goto done; 5456 } 5457 5458 if (ctxt->rep_prefix && (ctxt->d & String)) 5459 ctxt->eflags |= X86_EFLAGS_RF; 5460 else 5461 ctxt->eflags &= ~X86_EFLAGS_RF; 5462 5463 if (ctxt->execute) { 5464 if (ctxt->d & Fastop) { 5465 void (*fop)(struct fastop *) = (void *)ctxt->execute; 5466 rc = fastop(ctxt, fop); 5467 if (rc != X86EMUL_CONTINUE) 5468 goto done; 5469 goto writeback; 5470 } 5471 rc = ctxt->execute(ctxt); 5472 if (rc != X86EMUL_CONTINUE) 5473 goto done; 5474 goto writeback; 5475 } 5476 5477 if (ctxt->opcode_len == 2) 5478 goto twobyte_insn; 5479 else if (ctxt->opcode_len == 3) 5480 goto threebyte_insn; 5481 5482 switch (ctxt->b) { 5483 case 0x70 ... 0x7f: /* jcc (short) */ 5484 if (test_cc(ctxt->b, ctxt->eflags)) 5485 rc = jmp_rel(ctxt, ctxt->src.val); 5486 break; 5487 case 0x8d: /* lea r16/r32, m */ 5488 ctxt->dst.val = ctxt->src.addr.mem.ea; 5489 break; 5490 case 0x90 ... 0x97: /* nop / xchg reg, rax */ 5491 if (ctxt->dst.addr.reg == reg_rmw(ctxt, VCPU_REGS_RAX)) 5492 ctxt->dst.type = OP_NONE; 5493 else 5494 rc = em_xchg(ctxt); 5495 break; 5496 case 0x98: /* cbw/cwde/cdqe */ 5497 switch (ctxt->op_bytes) { 5498 case 2: ctxt->dst.val = (s8)ctxt->dst.val; break; 5499 case 4: ctxt->dst.val = (s16)ctxt->dst.val; break; 5500 case 8: ctxt->dst.val = (s32)ctxt->dst.val; break; 5501 } 5502 break; 5503 case 0xcc: /* int3 */ 5504 rc = emulate_int(ctxt, 3); 5505 break; 5506 case 0xcd: /* int n */ 5507 rc = emulate_int(ctxt, ctxt->src.val); 5508 break; 5509 case 0xce: /* into */ 5510 if (ctxt->eflags & X86_EFLAGS_OF) 5511 rc = emulate_int(ctxt, 4); 5512 break; 5513 case 0xe9: /* jmp rel */ 5514 case 0xeb: /* jmp rel short */ 5515 rc = jmp_rel(ctxt, ctxt->src.val); 5516 ctxt->dst.type = OP_NONE; /* Disable writeback. */ 5517 break; 5518 case 0xf4: /* hlt */ 5519 ctxt->ops->halt(ctxt); 5520 break; 5521 case 0xf5: /* cmc */ 5522 /* complement carry flag from eflags reg */ 5523 ctxt->eflags ^= X86_EFLAGS_CF; 5524 break; 5525 case 0xf8: /* clc */ 5526 ctxt->eflags &= ~X86_EFLAGS_CF; 5527 break; 5528 case 0xf9: /* stc */ 5529 ctxt->eflags |= X86_EFLAGS_CF; 5530 break; 5531 case 0xfc: /* cld */ 5532 ctxt->eflags &= ~X86_EFLAGS_DF; 5533 break; 5534 case 0xfd: /* std */ 5535 ctxt->eflags |= X86_EFLAGS_DF; 5536 break; 5537 default: 5538 goto cannot_emulate; 5539 } 5540 5541 if (rc != X86EMUL_CONTINUE) 5542 goto done; 5543 5544 writeback: 5545 if (ctxt->d & SrcWrite) { 5546 BUG_ON(ctxt->src.type == OP_MEM || ctxt->src.type == OP_MEM_STR); 5547 rc = writeback(ctxt, &ctxt->src); 5548 if (rc != X86EMUL_CONTINUE) 5549 goto done; 5550 } 5551 if (!(ctxt->d & NoWrite)) { 5552 rc = writeback(ctxt, &ctxt->dst); 5553 if (rc != X86EMUL_CONTINUE) 5554 goto done; 5555 } 5556 5557 /* 5558 * restore dst type in case the decoding will be reused 5559 * (happens for string instruction ) 5560 */ 5561 ctxt->dst.type = saved_dst_type; 5562 5563 if ((ctxt->d & SrcMask) == SrcSI) 5564 string_addr_inc(ctxt, VCPU_REGS_RSI, &ctxt->src); 5565 5566 if ((ctxt->d & DstMask) == DstDI) 5567 string_addr_inc(ctxt, VCPU_REGS_RDI, &ctxt->dst); 5568 5569 if (ctxt->rep_prefix && (ctxt->d & String)) { 5570 unsigned int count; 5571 struct read_cache *r = &ctxt->io_read; 5572 if ((ctxt->d & SrcMask) == SrcSI) 5573 count = ctxt->src.count; 5574 else 5575 count = ctxt->dst.count; 5576 register_address_increment(ctxt, VCPU_REGS_RCX, -count); 5577 5578 if (!string_insn_completed(ctxt)) { 5579 /* 5580 * Re-enter guest when pio read ahead buffer is empty 5581 * or, if it is not used, after each 1024 iteration. 5582 */ 5583 if ((r->end != 0 || reg_read(ctxt, VCPU_REGS_RCX) & 0x3ff) && 5584 (r->end == 0 || r->end != r->pos)) { 5585 /* 5586 * Reset read cache. Usually happens before 5587 * decode, but since instruction is restarted 5588 * we have to do it here. 5589 */ 5590 ctxt->mem_read.end = 0; 5591 writeback_registers(ctxt); 5592 return EMULATION_RESTART; 5593 } 5594 goto done; /* skip rip writeback */ 5595 } 5596 ctxt->eflags &= ~X86_EFLAGS_RF; 5597 } 5598 5599 ctxt->eip = ctxt->_eip; 5600 5601 done: 5602 if (rc == X86EMUL_PROPAGATE_FAULT) { 5603 WARN_ON(ctxt->exception.vector > 0x1f); 5604 ctxt->have_exception = true; 5605 } 5606 if (rc == X86EMUL_INTERCEPTED) 5607 return EMULATION_INTERCEPTED; 5608 5609 if (rc == X86EMUL_CONTINUE) 5610 writeback_registers(ctxt); 5611 5612 return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK; 5613 5614 twobyte_insn: 5615 switch (ctxt->b) { 5616 case 0x09: /* wbinvd */ 5617 (ctxt->ops->wbinvd)(ctxt); 5618 break; 5619 case 0x08: /* invd */ 5620 case 0x0d: /* GrpP (prefetch) */ 5621 case 0x18: /* Grp16 (prefetch/nop) */ 5622 case 0x1f: /* nop */ 5623 break; 5624 case 0x20: /* mov cr, reg */ 5625 ctxt->dst.val = ops->get_cr(ctxt, ctxt->modrm_reg); 5626 break; 5627 case 0x21: /* mov from dr to reg */ 5628 ops->get_dr(ctxt, ctxt->modrm_reg, &ctxt->dst.val); 5629 break; 5630 case 0x40 ... 0x4f: /* cmov */ 5631 if (test_cc(ctxt->b, ctxt->eflags)) 5632 ctxt->dst.val = ctxt->src.val; 5633 else if (ctxt->op_bytes != 4) 5634 ctxt->dst.type = OP_NONE; /* no writeback */ 5635 break; 5636 case 0x80 ... 0x8f: /* jnz rel, etc*/ 5637 if (test_cc(ctxt->b, ctxt->eflags)) 5638 rc = jmp_rel(ctxt, ctxt->src.val); 5639 break; 5640 case 0x90 ... 0x9f: /* setcc r/m8 */ 5641 ctxt->dst.val = test_cc(ctxt->b, ctxt->eflags); 5642 break; 5643 case 0xb6 ... 0xb7: /* movzx */ 5644 ctxt->dst.bytes = ctxt->op_bytes; 5645 ctxt->dst.val = (ctxt->src.bytes == 1) ? (u8) ctxt->src.val 5646 : (u16) ctxt->src.val; 5647 break; 5648 case 0xbe ... 0xbf: /* movsx */ 5649 ctxt->dst.bytes = ctxt->op_bytes; 5650 ctxt->dst.val = (ctxt->src.bytes == 1) ? (s8) ctxt->src.val : 5651 (s16) ctxt->src.val; 5652 break; 5653 default: 5654 goto cannot_emulate; 5655 } 5656 5657 threebyte_insn: 5658 5659 if (rc != X86EMUL_CONTINUE) 5660 goto done; 5661 5662 goto writeback; 5663 5664 cannot_emulate: 5665 return EMULATION_FAILED; 5666 } 5667 5668 void emulator_invalidate_register_cache(struct x86_emulate_ctxt *ctxt) 5669 { 5670 invalidate_registers(ctxt); 5671 } 5672 5673 void emulator_writeback_register_cache(struct x86_emulate_ctxt *ctxt) 5674 { 5675 writeback_registers(ctxt); 5676 } 5677 5678 bool emulator_can_use_gpa(struct x86_emulate_ctxt *ctxt) 5679 { 5680 if (ctxt->rep_prefix && (ctxt->d & String)) 5681 return false; 5682 5683 if (ctxt->d & TwoMemOp) 5684 return false; 5685 5686 return true; 5687 } 5688