1 // SPDX-License-Identifier: GPL-2.0-only 2 /****************************************************************************** 3 * emulate.c 4 * 5 * Generic x86 (32-bit and 64-bit) instruction decoder and emulator. 6 * 7 * Copyright (c) 2005 Keir Fraser 8 * 9 * Linux coding style, mod r/m decoder, segment base fixes, real-mode 10 * privileged instructions: 11 * 12 * Copyright (C) 2006 Qumranet 13 * Copyright 2010 Red Hat, Inc. and/or its affiliates. 14 * 15 * Avi Kivity <avi@qumranet.com> 16 * Yaniv Kamay <yaniv@qumranet.com> 17 * 18 * From: xen-unstable 10676:af9809f51f81a3c43f276f00c81a52ef558afda4 19 */ 20 21 #include <linux/kvm_host.h> 22 #include "kvm_cache_regs.h" 23 #include <asm/kvm_emulate.h> 24 #include <linux/stringify.h> 25 #include <asm/debugreg.h> 26 #include <asm/nospec-branch.h> 27 28 #include "x86.h" 29 #include "tss.h" 30 #include "mmu.h" 31 #include "pmu.h" 32 33 /* 34 * Operand types 35 */ 36 #define OpNone 0ull 37 #define OpImplicit 1ull /* No generic decode */ 38 #define OpReg 2ull /* Register */ 39 #define OpMem 3ull /* Memory */ 40 #define OpAcc 4ull /* Accumulator: AL/AX/EAX/RAX */ 41 #define OpDI 5ull /* ES:DI/EDI/RDI */ 42 #define OpMem64 6ull /* Memory, 64-bit */ 43 #define OpImmUByte 7ull /* Zero-extended 8-bit immediate */ 44 #define OpDX 8ull /* DX register */ 45 #define OpCL 9ull /* CL register (for shifts) */ 46 #define OpImmByte 10ull /* 8-bit sign extended immediate */ 47 #define OpOne 11ull /* Implied 1 */ 48 #define OpImm 12ull /* Sign extended up to 32-bit immediate */ 49 #define OpMem16 13ull /* Memory operand (16-bit). */ 50 #define OpMem32 14ull /* Memory operand (32-bit). */ 51 #define OpImmU 15ull /* Immediate operand, zero extended */ 52 #define OpSI 16ull /* SI/ESI/RSI */ 53 #define OpImmFAddr 17ull /* Immediate far address */ 54 #define OpMemFAddr 18ull /* Far address in memory */ 55 #define OpImmU16 19ull /* Immediate operand, 16 bits, zero extended */ 56 #define OpES 20ull /* ES */ 57 #define OpCS 21ull /* CS */ 58 #define OpSS 22ull /* SS */ 59 #define OpDS 23ull /* DS */ 60 #define OpFS 24ull /* FS */ 61 #define OpGS 25ull /* GS */ 62 #define OpMem8 26ull /* 8-bit zero extended memory operand */ 63 #define OpImm64 27ull /* Sign extended 16/32/64-bit immediate */ 64 #define OpXLat 28ull /* memory at BX/EBX/RBX + zero-extended AL */ 65 #define OpAccLo 29ull /* Low part of extended acc (AX/AX/EAX/RAX) */ 66 #define OpAccHi 30ull /* High part of extended acc (-/DX/EDX/RDX) */ 67 68 #define OpBits 5 /* Width of operand field */ 69 #define OpMask ((1ull << OpBits) - 1) 70 71 /* 72 * Opcode effective-address decode tables. 73 * Note that we only emulate instructions that have at least one memory 74 * operand (excluding implicit stack references). We assume that stack 75 * references and instruction fetches will never occur in special memory 76 * areas that require emulation. So, for example, 'mov <imm>,<reg>' need 77 * not be handled. 78 */ 79 80 /* Operand sizes: 8-bit operands or specified/overridden size. */ 81 #define ByteOp (1<<0) /* 8-bit operands. */ 82 /* Destination operand type. */ 83 #define DstShift 1 84 #define ImplicitOps (OpImplicit << DstShift) 85 #define DstReg (OpReg << DstShift) 86 #define DstMem (OpMem << DstShift) 87 #define DstAcc (OpAcc << DstShift) 88 #define DstDI (OpDI << DstShift) 89 #define DstMem64 (OpMem64 << DstShift) 90 #define DstMem16 (OpMem16 << DstShift) 91 #define DstImmUByte (OpImmUByte << DstShift) 92 #define DstDX (OpDX << DstShift) 93 #define DstAccLo (OpAccLo << DstShift) 94 #define DstMask (OpMask << DstShift) 95 /* Source operand type. */ 96 #define SrcShift 6 97 #define SrcNone (OpNone << SrcShift) 98 #define SrcReg (OpReg << SrcShift) 99 #define SrcMem (OpMem << SrcShift) 100 #define SrcMem16 (OpMem16 << SrcShift) 101 #define SrcMem32 (OpMem32 << SrcShift) 102 #define SrcImm (OpImm << SrcShift) 103 #define SrcImmByte (OpImmByte << SrcShift) 104 #define SrcOne (OpOne << SrcShift) 105 #define SrcImmUByte (OpImmUByte << SrcShift) 106 #define SrcImmU (OpImmU << SrcShift) 107 #define SrcSI (OpSI << SrcShift) 108 #define SrcXLat (OpXLat << SrcShift) 109 #define SrcImmFAddr (OpImmFAddr << SrcShift) 110 #define SrcMemFAddr (OpMemFAddr << SrcShift) 111 #define SrcAcc (OpAcc << SrcShift) 112 #define SrcImmU16 (OpImmU16 << SrcShift) 113 #define SrcImm64 (OpImm64 << SrcShift) 114 #define SrcDX (OpDX << SrcShift) 115 #define SrcMem8 (OpMem8 << SrcShift) 116 #define SrcAccHi (OpAccHi << SrcShift) 117 #define SrcMask (OpMask << SrcShift) 118 #define BitOp (1<<11) 119 #define MemAbs (1<<12) /* Memory operand is absolute displacement */ 120 #define String (1<<13) /* String instruction (rep capable) */ 121 #define Stack (1<<14) /* Stack instruction (push/pop) */ 122 #define GroupMask (7<<15) /* Opcode uses one of the group mechanisms */ 123 #define Group (1<<15) /* Bits 3:5 of modrm byte extend opcode */ 124 #define GroupDual (2<<15) /* Alternate decoding of mod == 3 */ 125 #define Prefix (3<<15) /* Instruction varies with 66/f2/f3 prefix */ 126 #define RMExt (4<<15) /* Opcode extension in ModRM r/m if mod == 3 */ 127 #define Escape (5<<15) /* Escape to coprocessor instruction */ 128 #define InstrDual (6<<15) /* Alternate instruction decoding of mod == 3 */ 129 #define ModeDual (7<<15) /* Different instruction for 32/64 bit */ 130 #define Sse (1<<18) /* SSE Vector instruction */ 131 /* Generic ModRM decode. */ 132 #define ModRM (1<<19) 133 /* Destination is only written; never read. */ 134 #define Mov (1<<20) 135 /* Misc flags */ 136 #define Prot (1<<21) /* instruction generates #UD if not in prot-mode */ 137 #define EmulateOnUD (1<<22) /* Emulate if unsupported by the host */ 138 #define NoAccess (1<<23) /* Don't access memory (lea/invlpg/verr etc) */ 139 #define Op3264 (1<<24) /* Operand is 64b in long mode, 32b otherwise */ 140 #define Undefined (1<<25) /* No Such Instruction */ 141 #define Lock (1<<26) /* lock prefix is allowed for the instruction */ 142 #define Priv (1<<27) /* instruction generates #GP if current CPL != 0 */ 143 #define No64 (1<<28) 144 #define PageTable (1 << 29) /* instruction used to write page table */ 145 #define NotImpl (1 << 30) /* instruction is not implemented */ 146 /* Source 2 operand type */ 147 #define Src2Shift (31) 148 #define Src2None (OpNone << Src2Shift) 149 #define Src2Mem (OpMem << Src2Shift) 150 #define Src2CL (OpCL << Src2Shift) 151 #define Src2ImmByte (OpImmByte << Src2Shift) 152 #define Src2One (OpOne << Src2Shift) 153 #define Src2Imm (OpImm << Src2Shift) 154 #define Src2ES (OpES << Src2Shift) 155 #define Src2CS (OpCS << Src2Shift) 156 #define Src2SS (OpSS << Src2Shift) 157 #define Src2DS (OpDS << Src2Shift) 158 #define Src2FS (OpFS << Src2Shift) 159 #define Src2GS (OpGS << Src2Shift) 160 #define Src2Mask (OpMask << Src2Shift) 161 #define Mmx ((u64)1 << 40) /* MMX Vector instruction */ 162 #define AlignMask ((u64)7 << 41) 163 #define Aligned ((u64)1 << 41) /* Explicitly aligned (e.g. MOVDQA) */ 164 #define Unaligned ((u64)2 << 41) /* Explicitly unaligned (e.g. MOVDQU) */ 165 #define Avx ((u64)3 << 41) /* Advanced Vector Extensions */ 166 #define Aligned16 ((u64)4 << 41) /* Aligned to 16 byte boundary (e.g. FXSAVE) */ 167 #define Fastop ((u64)1 << 44) /* Use opcode::u.fastop */ 168 #define NoWrite ((u64)1 << 45) /* No writeback */ 169 #define SrcWrite ((u64)1 << 46) /* Write back src operand */ 170 #define NoMod ((u64)1 << 47) /* Mod field is ignored */ 171 #define Intercept ((u64)1 << 48) /* Has valid intercept field */ 172 #define CheckPerm ((u64)1 << 49) /* Has valid check_perm field */ 173 #define PrivUD ((u64)1 << 51) /* #UD instead of #GP on CPL > 0 */ 174 #define NearBranch ((u64)1 << 52) /* Near branches */ 175 #define No16 ((u64)1 << 53) /* No 16 bit operand */ 176 #define IncSP ((u64)1 << 54) /* SP is incremented before ModRM calc */ 177 #define TwoMemOp ((u64)1 << 55) /* Instruction has two memory operand */ 178 179 #define DstXacc (DstAccLo | SrcAccHi | SrcWrite) 180 181 #define X2(x...) x, x 182 #define X3(x...) X2(x), x 183 #define X4(x...) X2(x), X2(x) 184 #define X5(x...) X4(x), x 185 #define X6(x...) X4(x), X2(x) 186 #define X7(x...) X4(x), X3(x) 187 #define X8(x...) X4(x), X4(x) 188 #define X16(x...) X8(x), X8(x) 189 190 #define NR_FASTOP (ilog2(sizeof(ulong)) + 1) 191 #define FASTOP_SIZE 8 192 193 /* 194 * fastop functions have a special calling convention: 195 * 196 * dst: rax (in/out) 197 * src: rdx (in/out) 198 * src2: rcx (in) 199 * flags: rflags (in/out) 200 * ex: rsi (in:fastop pointer, out:zero if exception) 201 * 202 * Moreover, they are all exactly FASTOP_SIZE bytes long, so functions for 203 * different operand sizes can be reached by calculation, rather than a jump 204 * table (which would be bigger than the code). 205 * 206 * fastop functions are declared as taking a never-defined fastop parameter, 207 * so they can't be called from C directly. 208 */ 209 210 struct fastop; 211 212 struct opcode { 213 u64 flags : 56; 214 u64 intercept : 8; 215 union { 216 int (*execute)(struct x86_emulate_ctxt *ctxt); 217 const struct opcode *group; 218 const struct group_dual *gdual; 219 const struct gprefix *gprefix; 220 const struct escape *esc; 221 const struct instr_dual *idual; 222 const struct mode_dual *mdual; 223 void (*fastop)(struct fastop *fake); 224 } u; 225 int (*check_perm)(struct x86_emulate_ctxt *ctxt); 226 }; 227 228 struct group_dual { 229 struct opcode mod012[8]; 230 struct opcode mod3[8]; 231 }; 232 233 struct gprefix { 234 struct opcode pfx_no; 235 struct opcode pfx_66; 236 struct opcode pfx_f2; 237 struct opcode pfx_f3; 238 }; 239 240 struct escape { 241 struct opcode op[8]; 242 struct opcode high[64]; 243 }; 244 245 struct instr_dual { 246 struct opcode mod012; 247 struct opcode mod3; 248 }; 249 250 struct mode_dual { 251 struct opcode mode32; 252 struct opcode mode64; 253 }; 254 255 #define EFLG_RESERVED_ZEROS_MASK 0xffc0802a 256 257 enum x86_transfer_type { 258 X86_TRANSFER_NONE, 259 X86_TRANSFER_CALL_JMP, 260 X86_TRANSFER_RET, 261 X86_TRANSFER_TASK_SWITCH, 262 }; 263 264 static ulong reg_read(struct x86_emulate_ctxt *ctxt, unsigned nr) 265 { 266 if (!(ctxt->regs_valid & (1 << nr))) { 267 ctxt->regs_valid |= 1 << nr; 268 ctxt->_regs[nr] = ctxt->ops->read_gpr(ctxt, nr); 269 } 270 return ctxt->_regs[nr]; 271 } 272 273 static ulong *reg_write(struct x86_emulate_ctxt *ctxt, unsigned nr) 274 { 275 ctxt->regs_valid |= 1 << nr; 276 ctxt->regs_dirty |= 1 << nr; 277 return &ctxt->_regs[nr]; 278 } 279 280 static ulong *reg_rmw(struct x86_emulate_ctxt *ctxt, unsigned nr) 281 { 282 reg_read(ctxt, nr); 283 return reg_write(ctxt, nr); 284 } 285 286 static void writeback_registers(struct x86_emulate_ctxt *ctxt) 287 { 288 unsigned reg; 289 290 for_each_set_bit(reg, (ulong *)&ctxt->regs_dirty, 16) 291 ctxt->ops->write_gpr(ctxt, reg, ctxt->_regs[reg]); 292 } 293 294 static void invalidate_registers(struct x86_emulate_ctxt *ctxt) 295 { 296 ctxt->regs_dirty = 0; 297 ctxt->regs_valid = 0; 298 } 299 300 /* 301 * These EFLAGS bits are restored from saved value during emulation, and 302 * any changes are written back to the saved value after emulation. 303 */ 304 #define EFLAGS_MASK (X86_EFLAGS_OF|X86_EFLAGS_SF|X86_EFLAGS_ZF|X86_EFLAGS_AF|\ 305 X86_EFLAGS_PF|X86_EFLAGS_CF) 306 307 #ifdef CONFIG_X86_64 308 #define ON64(x) x 309 #else 310 #define ON64(x) 311 #endif 312 313 static int fastop(struct x86_emulate_ctxt *ctxt, void (*fop)(struct fastop *)); 314 315 #define __FOP_FUNC(name) \ 316 ".align " __stringify(FASTOP_SIZE) " \n\t" \ 317 ".type " name ", @function \n\t" \ 318 name ":\n\t" 319 320 #define FOP_FUNC(name) \ 321 __FOP_FUNC(#name) 322 323 #define __FOP_RET(name) \ 324 "ret \n\t" \ 325 ".size " name ", .-" name "\n\t" 326 327 #define FOP_RET(name) \ 328 __FOP_RET(#name) 329 330 #define FOP_START(op) \ 331 extern void em_##op(struct fastop *fake); \ 332 asm(".pushsection .text, \"ax\" \n\t" \ 333 ".global em_" #op " \n\t" \ 334 ".align " __stringify(FASTOP_SIZE) " \n\t" \ 335 "em_" #op ":\n\t" 336 337 #define FOP_END \ 338 ".popsection") 339 340 #define __FOPNOP(name) \ 341 __FOP_FUNC(name) \ 342 __FOP_RET(name) 343 344 #define FOPNOP() \ 345 __FOPNOP(__stringify(__UNIQUE_ID(nop))) 346 347 #define FOP1E(op, dst) \ 348 __FOP_FUNC(#op "_" #dst) \ 349 "10: " #op " %" #dst " \n\t" \ 350 __FOP_RET(#op "_" #dst) 351 352 #define FOP1EEX(op, dst) \ 353 FOP1E(op, dst) _ASM_EXTABLE(10b, kvm_fastop_exception) 354 355 #define FASTOP1(op) \ 356 FOP_START(op) \ 357 FOP1E(op##b, al) \ 358 FOP1E(op##w, ax) \ 359 FOP1E(op##l, eax) \ 360 ON64(FOP1E(op##q, rax)) \ 361 FOP_END 362 363 /* 1-operand, using src2 (for MUL/DIV r/m) */ 364 #define FASTOP1SRC2(op, name) \ 365 FOP_START(name) \ 366 FOP1E(op, cl) \ 367 FOP1E(op, cx) \ 368 FOP1E(op, ecx) \ 369 ON64(FOP1E(op, rcx)) \ 370 FOP_END 371 372 /* 1-operand, using src2 (for MUL/DIV r/m), with exceptions */ 373 #define FASTOP1SRC2EX(op, name) \ 374 FOP_START(name) \ 375 FOP1EEX(op, cl) \ 376 FOP1EEX(op, cx) \ 377 FOP1EEX(op, ecx) \ 378 ON64(FOP1EEX(op, rcx)) \ 379 FOP_END 380 381 #define FOP2E(op, dst, src) \ 382 __FOP_FUNC(#op "_" #dst "_" #src) \ 383 #op " %" #src ", %" #dst " \n\t" \ 384 __FOP_RET(#op "_" #dst "_" #src) 385 386 #define FASTOP2(op) \ 387 FOP_START(op) \ 388 FOP2E(op##b, al, dl) \ 389 FOP2E(op##w, ax, dx) \ 390 FOP2E(op##l, eax, edx) \ 391 ON64(FOP2E(op##q, rax, rdx)) \ 392 FOP_END 393 394 /* 2 operand, word only */ 395 #define FASTOP2W(op) \ 396 FOP_START(op) \ 397 FOPNOP() \ 398 FOP2E(op##w, ax, dx) \ 399 FOP2E(op##l, eax, edx) \ 400 ON64(FOP2E(op##q, rax, rdx)) \ 401 FOP_END 402 403 /* 2 operand, src is CL */ 404 #define FASTOP2CL(op) \ 405 FOP_START(op) \ 406 FOP2E(op##b, al, cl) \ 407 FOP2E(op##w, ax, cl) \ 408 FOP2E(op##l, eax, cl) \ 409 ON64(FOP2E(op##q, rax, cl)) \ 410 FOP_END 411 412 /* 2 operand, src and dest are reversed */ 413 #define FASTOP2R(op, name) \ 414 FOP_START(name) \ 415 FOP2E(op##b, dl, al) \ 416 FOP2E(op##w, dx, ax) \ 417 FOP2E(op##l, edx, eax) \ 418 ON64(FOP2E(op##q, rdx, rax)) \ 419 FOP_END 420 421 #define FOP3E(op, dst, src, src2) \ 422 __FOP_FUNC(#op "_" #dst "_" #src "_" #src2) \ 423 #op " %" #src2 ", %" #src ", %" #dst " \n\t"\ 424 __FOP_RET(#op "_" #dst "_" #src "_" #src2) 425 426 /* 3-operand, word-only, src2=cl */ 427 #define FASTOP3WCL(op) \ 428 FOP_START(op) \ 429 FOPNOP() \ 430 FOP3E(op##w, ax, dx, cl) \ 431 FOP3E(op##l, eax, edx, cl) \ 432 ON64(FOP3E(op##q, rax, rdx, cl)) \ 433 FOP_END 434 435 /* Special case for SETcc - 1 instruction per cc */ 436 #define FOP_SETCC(op) \ 437 ".align 4 \n\t" \ 438 ".type " #op ", @function \n\t" \ 439 #op ": \n\t" \ 440 #op " %al \n\t" \ 441 __FOP_RET(#op) 442 443 asm(".pushsection .fixup, \"ax\"\n" 444 ".global kvm_fastop_exception \n" 445 "kvm_fastop_exception: xor %esi, %esi; ret\n" 446 ".popsection"); 447 448 FOP_START(setcc) 449 FOP_SETCC(seto) 450 FOP_SETCC(setno) 451 FOP_SETCC(setc) 452 FOP_SETCC(setnc) 453 FOP_SETCC(setz) 454 FOP_SETCC(setnz) 455 FOP_SETCC(setbe) 456 FOP_SETCC(setnbe) 457 FOP_SETCC(sets) 458 FOP_SETCC(setns) 459 FOP_SETCC(setp) 460 FOP_SETCC(setnp) 461 FOP_SETCC(setl) 462 FOP_SETCC(setnl) 463 FOP_SETCC(setle) 464 FOP_SETCC(setnle) 465 FOP_END; 466 467 FOP_START(salc) 468 FOP_FUNC(salc) 469 "pushf; sbb %al, %al; popf \n\t" 470 FOP_RET(salc) 471 FOP_END; 472 473 /* 474 * XXX: inoutclob user must know where the argument is being expanded. 475 * Relying on CONFIG_CC_HAS_ASM_GOTO would allow us to remove _fault. 476 */ 477 #define asm_safe(insn, inoutclob...) \ 478 ({ \ 479 int _fault = 0; \ 480 \ 481 asm volatile("1:" insn "\n" \ 482 "2:\n" \ 483 ".pushsection .fixup, \"ax\"\n" \ 484 "3: movl $1, %[_fault]\n" \ 485 " jmp 2b\n" \ 486 ".popsection\n" \ 487 _ASM_EXTABLE(1b, 3b) \ 488 : [_fault] "+qm"(_fault) inoutclob ); \ 489 \ 490 _fault ? X86EMUL_UNHANDLEABLE : X86EMUL_CONTINUE; \ 491 }) 492 493 static int emulator_check_intercept(struct x86_emulate_ctxt *ctxt, 494 enum x86_intercept intercept, 495 enum x86_intercept_stage stage) 496 { 497 struct x86_instruction_info info = { 498 .intercept = intercept, 499 .rep_prefix = ctxt->rep_prefix, 500 .modrm_mod = ctxt->modrm_mod, 501 .modrm_reg = ctxt->modrm_reg, 502 .modrm_rm = ctxt->modrm_rm, 503 .src_val = ctxt->src.val64, 504 .dst_val = ctxt->dst.val64, 505 .src_bytes = ctxt->src.bytes, 506 .dst_bytes = ctxt->dst.bytes, 507 .ad_bytes = ctxt->ad_bytes, 508 .next_rip = ctxt->eip, 509 }; 510 511 return ctxt->ops->intercept(ctxt, &info, stage); 512 } 513 514 static void assign_masked(ulong *dest, ulong src, ulong mask) 515 { 516 *dest = (*dest & ~mask) | (src & mask); 517 } 518 519 static void assign_register(unsigned long *reg, u64 val, int bytes) 520 { 521 /* The 4-byte case *is* correct: in 64-bit mode we zero-extend. */ 522 switch (bytes) { 523 case 1: 524 *(u8 *)reg = (u8)val; 525 break; 526 case 2: 527 *(u16 *)reg = (u16)val; 528 break; 529 case 4: 530 *reg = (u32)val; 531 break; /* 64b: zero-extend */ 532 case 8: 533 *reg = val; 534 break; 535 } 536 } 537 538 static inline unsigned long ad_mask(struct x86_emulate_ctxt *ctxt) 539 { 540 return (1UL << (ctxt->ad_bytes << 3)) - 1; 541 } 542 543 static ulong stack_mask(struct x86_emulate_ctxt *ctxt) 544 { 545 u16 sel; 546 struct desc_struct ss; 547 548 if (ctxt->mode == X86EMUL_MODE_PROT64) 549 return ~0UL; 550 ctxt->ops->get_segment(ctxt, &sel, &ss, NULL, VCPU_SREG_SS); 551 return ~0U >> ((ss.d ^ 1) * 16); /* d=0: 0xffff; d=1: 0xffffffff */ 552 } 553 554 static int stack_size(struct x86_emulate_ctxt *ctxt) 555 { 556 return (__fls(stack_mask(ctxt)) + 1) >> 3; 557 } 558 559 /* Access/update address held in a register, based on addressing mode. */ 560 static inline unsigned long 561 address_mask(struct x86_emulate_ctxt *ctxt, unsigned long reg) 562 { 563 if (ctxt->ad_bytes == sizeof(unsigned long)) 564 return reg; 565 else 566 return reg & ad_mask(ctxt); 567 } 568 569 static inline unsigned long 570 register_address(struct x86_emulate_ctxt *ctxt, int reg) 571 { 572 return address_mask(ctxt, reg_read(ctxt, reg)); 573 } 574 575 static void masked_increment(ulong *reg, ulong mask, int inc) 576 { 577 assign_masked(reg, *reg + inc, mask); 578 } 579 580 static inline void 581 register_address_increment(struct x86_emulate_ctxt *ctxt, int reg, int inc) 582 { 583 ulong *preg = reg_rmw(ctxt, reg); 584 585 assign_register(preg, *preg + inc, ctxt->ad_bytes); 586 } 587 588 static void rsp_increment(struct x86_emulate_ctxt *ctxt, int inc) 589 { 590 masked_increment(reg_rmw(ctxt, VCPU_REGS_RSP), stack_mask(ctxt), inc); 591 } 592 593 static u32 desc_limit_scaled(struct desc_struct *desc) 594 { 595 u32 limit = get_desc_limit(desc); 596 597 return desc->g ? (limit << 12) | 0xfff : limit; 598 } 599 600 static unsigned long seg_base(struct x86_emulate_ctxt *ctxt, int seg) 601 { 602 if (ctxt->mode == X86EMUL_MODE_PROT64 && seg < VCPU_SREG_FS) 603 return 0; 604 605 return ctxt->ops->get_cached_segment_base(ctxt, seg); 606 } 607 608 static int emulate_exception(struct x86_emulate_ctxt *ctxt, int vec, 609 u32 error, bool valid) 610 { 611 WARN_ON(vec > 0x1f); 612 ctxt->exception.vector = vec; 613 ctxt->exception.error_code = error; 614 ctxt->exception.error_code_valid = valid; 615 return X86EMUL_PROPAGATE_FAULT; 616 } 617 618 static int emulate_db(struct x86_emulate_ctxt *ctxt) 619 { 620 return emulate_exception(ctxt, DB_VECTOR, 0, false); 621 } 622 623 static int emulate_gp(struct x86_emulate_ctxt *ctxt, int err) 624 { 625 return emulate_exception(ctxt, GP_VECTOR, err, true); 626 } 627 628 static int emulate_ss(struct x86_emulate_ctxt *ctxt, int err) 629 { 630 return emulate_exception(ctxt, SS_VECTOR, err, true); 631 } 632 633 static int emulate_ud(struct x86_emulate_ctxt *ctxt) 634 { 635 return emulate_exception(ctxt, UD_VECTOR, 0, false); 636 } 637 638 static int emulate_ts(struct x86_emulate_ctxt *ctxt, int err) 639 { 640 return emulate_exception(ctxt, TS_VECTOR, err, true); 641 } 642 643 static int emulate_de(struct x86_emulate_ctxt *ctxt) 644 { 645 return emulate_exception(ctxt, DE_VECTOR, 0, false); 646 } 647 648 static int emulate_nm(struct x86_emulate_ctxt *ctxt) 649 { 650 return emulate_exception(ctxt, NM_VECTOR, 0, false); 651 } 652 653 static u16 get_segment_selector(struct x86_emulate_ctxt *ctxt, unsigned seg) 654 { 655 u16 selector; 656 struct desc_struct desc; 657 658 ctxt->ops->get_segment(ctxt, &selector, &desc, NULL, seg); 659 return selector; 660 } 661 662 static void set_segment_selector(struct x86_emulate_ctxt *ctxt, u16 selector, 663 unsigned seg) 664 { 665 u16 dummy; 666 u32 base3; 667 struct desc_struct desc; 668 669 ctxt->ops->get_segment(ctxt, &dummy, &desc, &base3, seg); 670 ctxt->ops->set_segment(ctxt, selector, &desc, base3, seg); 671 } 672 673 /* 674 * x86 defines three classes of vector instructions: explicitly 675 * aligned, explicitly unaligned, and the rest, which change behaviour 676 * depending on whether they're AVX encoded or not. 677 * 678 * Also included is CMPXCHG16B which is not a vector instruction, yet it is 679 * subject to the same check. FXSAVE and FXRSTOR are checked here too as their 680 * 512 bytes of data must be aligned to a 16 byte boundary. 681 */ 682 static unsigned insn_alignment(struct x86_emulate_ctxt *ctxt, unsigned size) 683 { 684 u64 alignment = ctxt->d & AlignMask; 685 686 if (likely(size < 16)) 687 return 1; 688 689 switch (alignment) { 690 case Unaligned: 691 case Avx: 692 return 1; 693 case Aligned16: 694 return 16; 695 case Aligned: 696 default: 697 return size; 698 } 699 } 700 701 static __always_inline int __linearize(struct x86_emulate_ctxt *ctxt, 702 struct segmented_address addr, 703 unsigned *max_size, unsigned size, 704 bool write, bool fetch, 705 enum x86emul_mode mode, ulong *linear) 706 { 707 struct desc_struct desc; 708 bool usable; 709 ulong la; 710 u32 lim; 711 u16 sel; 712 u8 va_bits; 713 714 la = seg_base(ctxt, addr.seg) + addr.ea; 715 *max_size = 0; 716 switch (mode) { 717 case X86EMUL_MODE_PROT64: 718 *linear = la; 719 va_bits = ctxt_virt_addr_bits(ctxt); 720 if (get_canonical(la, va_bits) != la) 721 goto bad; 722 723 *max_size = min_t(u64, ~0u, (1ull << va_bits) - la); 724 if (size > *max_size) 725 goto bad; 726 break; 727 default: 728 *linear = la = (u32)la; 729 usable = ctxt->ops->get_segment(ctxt, &sel, &desc, NULL, 730 addr.seg); 731 if (!usable) 732 goto bad; 733 /* code segment in protected mode or read-only data segment */ 734 if ((((ctxt->mode != X86EMUL_MODE_REAL) && (desc.type & 8)) 735 || !(desc.type & 2)) && write) 736 goto bad; 737 /* unreadable code segment */ 738 if (!fetch && (desc.type & 8) && !(desc.type & 2)) 739 goto bad; 740 lim = desc_limit_scaled(&desc); 741 if (!(desc.type & 8) && (desc.type & 4)) { 742 /* expand-down segment */ 743 if (addr.ea <= lim) 744 goto bad; 745 lim = desc.d ? 0xffffffff : 0xffff; 746 } 747 if (addr.ea > lim) 748 goto bad; 749 if (lim == 0xffffffff) 750 *max_size = ~0u; 751 else { 752 *max_size = (u64)lim + 1 - addr.ea; 753 if (size > *max_size) 754 goto bad; 755 } 756 break; 757 } 758 if (la & (insn_alignment(ctxt, size) - 1)) 759 return emulate_gp(ctxt, 0); 760 return X86EMUL_CONTINUE; 761 bad: 762 if (addr.seg == VCPU_SREG_SS) 763 return emulate_ss(ctxt, 0); 764 else 765 return emulate_gp(ctxt, 0); 766 } 767 768 static int linearize(struct x86_emulate_ctxt *ctxt, 769 struct segmented_address addr, 770 unsigned size, bool write, 771 ulong *linear) 772 { 773 unsigned max_size; 774 return __linearize(ctxt, addr, &max_size, size, write, false, 775 ctxt->mode, linear); 776 } 777 778 static inline int assign_eip(struct x86_emulate_ctxt *ctxt, ulong dst, 779 enum x86emul_mode mode) 780 { 781 ulong linear; 782 int rc; 783 unsigned max_size; 784 struct segmented_address addr = { .seg = VCPU_SREG_CS, 785 .ea = dst }; 786 787 if (ctxt->op_bytes != sizeof(unsigned long)) 788 addr.ea = dst & ((1UL << (ctxt->op_bytes << 3)) - 1); 789 rc = __linearize(ctxt, addr, &max_size, 1, false, true, mode, &linear); 790 if (rc == X86EMUL_CONTINUE) 791 ctxt->_eip = addr.ea; 792 return rc; 793 } 794 795 static inline int assign_eip_near(struct x86_emulate_ctxt *ctxt, ulong dst) 796 { 797 return assign_eip(ctxt, dst, ctxt->mode); 798 } 799 800 static int assign_eip_far(struct x86_emulate_ctxt *ctxt, ulong dst, 801 const struct desc_struct *cs_desc) 802 { 803 enum x86emul_mode mode = ctxt->mode; 804 int rc; 805 806 #ifdef CONFIG_X86_64 807 if (ctxt->mode >= X86EMUL_MODE_PROT16) { 808 if (cs_desc->l) { 809 u64 efer = 0; 810 811 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer); 812 if (efer & EFER_LMA) 813 mode = X86EMUL_MODE_PROT64; 814 } else 815 mode = X86EMUL_MODE_PROT32; /* temporary value */ 816 } 817 #endif 818 if (mode == X86EMUL_MODE_PROT16 || mode == X86EMUL_MODE_PROT32) 819 mode = cs_desc->d ? X86EMUL_MODE_PROT32 : X86EMUL_MODE_PROT16; 820 rc = assign_eip(ctxt, dst, mode); 821 if (rc == X86EMUL_CONTINUE) 822 ctxt->mode = mode; 823 return rc; 824 } 825 826 static inline int jmp_rel(struct x86_emulate_ctxt *ctxt, int rel) 827 { 828 return assign_eip_near(ctxt, ctxt->_eip + rel); 829 } 830 831 static int linear_read_system(struct x86_emulate_ctxt *ctxt, ulong linear, 832 void *data, unsigned size) 833 { 834 return ctxt->ops->read_std(ctxt, linear, data, size, &ctxt->exception, true); 835 } 836 837 static int linear_write_system(struct x86_emulate_ctxt *ctxt, 838 ulong linear, void *data, 839 unsigned int size) 840 { 841 return ctxt->ops->write_std(ctxt, linear, data, size, &ctxt->exception, true); 842 } 843 844 static int segmented_read_std(struct x86_emulate_ctxt *ctxt, 845 struct segmented_address addr, 846 void *data, 847 unsigned size) 848 { 849 int rc; 850 ulong linear; 851 852 rc = linearize(ctxt, addr, size, false, &linear); 853 if (rc != X86EMUL_CONTINUE) 854 return rc; 855 return ctxt->ops->read_std(ctxt, linear, data, size, &ctxt->exception, false); 856 } 857 858 static int segmented_write_std(struct x86_emulate_ctxt *ctxt, 859 struct segmented_address addr, 860 void *data, 861 unsigned int size) 862 { 863 int rc; 864 ulong linear; 865 866 rc = linearize(ctxt, addr, size, true, &linear); 867 if (rc != X86EMUL_CONTINUE) 868 return rc; 869 return ctxt->ops->write_std(ctxt, linear, data, size, &ctxt->exception, false); 870 } 871 872 /* 873 * Prefetch the remaining bytes of the instruction without crossing page 874 * boundary if they are not in fetch_cache yet. 875 */ 876 static int __do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt, int op_size) 877 { 878 int rc; 879 unsigned size, max_size; 880 unsigned long linear; 881 int cur_size = ctxt->fetch.end - ctxt->fetch.data; 882 struct segmented_address addr = { .seg = VCPU_SREG_CS, 883 .ea = ctxt->eip + cur_size }; 884 885 /* 886 * We do not know exactly how many bytes will be needed, and 887 * __linearize is expensive, so fetch as much as possible. We 888 * just have to avoid going beyond the 15 byte limit, the end 889 * of the segment, or the end of the page. 890 * 891 * __linearize is called with size 0 so that it does not do any 892 * boundary check itself. Instead, we use max_size to check 893 * against op_size. 894 */ 895 rc = __linearize(ctxt, addr, &max_size, 0, false, true, ctxt->mode, 896 &linear); 897 if (unlikely(rc != X86EMUL_CONTINUE)) 898 return rc; 899 900 size = min_t(unsigned, 15UL ^ cur_size, max_size); 901 size = min_t(unsigned, size, PAGE_SIZE - offset_in_page(linear)); 902 903 /* 904 * One instruction can only straddle two pages, 905 * and one has been loaded at the beginning of 906 * x86_decode_insn. So, if not enough bytes 907 * still, we must have hit the 15-byte boundary. 908 */ 909 if (unlikely(size < op_size)) 910 return emulate_gp(ctxt, 0); 911 912 rc = ctxt->ops->fetch(ctxt, linear, ctxt->fetch.end, 913 size, &ctxt->exception); 914 if (unlikely(rc != X86EMUL_CONTINUE)) 915 return rc; 916 ctxt->fetch.end += size; 917 return X86EMUL_CONTINUE; 918 } 919 920 static __always_inline int do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt, 921 unsigned size) 922 { 923 unsigned done_size = ctxt->fetch.end - ctxt->fetch.ptr; 924 925 if (unlikely(done_size < size)) 926 return __do_insn_fetch_bytes(ctxt, size - done_size); 927 else 928 return X86EMUL_CONTINUE; 929 } 930 931 /* Fetch next part of the instruction being emulated. */ 932 #define insn_fetch(_type, _ctxt) \ 933 ({ _type _x; \ 934 \ 935 rc = do_insn_fetch_bytes(_ctxt, sizeof(_type)); \ 936 if (rc != X86EMUL_CONTINUE) \ 937 goto done; \ 938 ctxt->_eip += sizeof(_type); \ 939 memcpy(&_x, ctxt->fetch.ptr, sizeof(_type)); \ 940 ctxt->fetch.ptr += sizeof(_type); \ 941 _x; \ 942 }) 943 944 #define insn_fetch_arr(_arr, _size, _ctxt) \ 945 ({ \ 946 rc = do_insn_fetch_bytes(_ctxt, _size); \ 947 if (rc != X86EMUL_CONTINUE) \ 948 goto done; \ 949 ctxt->_eip += (_size); \ 950 memcpy(_arr, ctxt->fetch.ptr, _size); \ 951 ctxt->fetch.ptr += (_size); \ 952 }) 953 954 /* 955 * Given the 'reg' portion of a ModRM byte, and a register block, return a 956 * pointer into the block that addresses the relevant register. 957 * @highbyte_regs specifies whether to decode AH,CH,DH,BH. 958 */ 959 static void *decode_register(struct x86_emulate_ctxt *ctxt, u8 modrm_reg, 960 int byteop) 961 { 962 void *p; 963 int highbyte_regs = (ctxt->rex_prefix == 0) && byteop; 964 965 if (highbyte_regs && modrm_reg >= 4 && modrm_reg < 8) 966 p = (unsigned char *)reg_rmw(ctxt, modrm_reg & 3) + 1; 967 else 968 p = reg_rmw(ctxt, modrm_reg); 969 return p; 970 } 971 972 static int read_descriptor(struct x86_emulate_ctxt *ctxt, 973 struct segmented_address addr, 974 u16 *size, unsigned long *address, int op_bytes) 975 { 976 int rc; 977 978 if (op_bytes == 2) 979 op_bytes = 3; 980 *address = 0; 981 rc = segmented_read_std(ctxt, addr, size, 2); 982 if (rc != X86EMUL_CONTINUE) 983 return rc; 984 addr.ea += 2; 985 rc = segmented_read_std(ctxt, addr, address, op_bytes); 986 return rc; 987 } 988 989 FASTOP2(add); 990 FASTOP2(or); 991 FASTOP2(adc); 992 FASTOP2(sbb); 993 FASTOP2(and); 994 FASTOP2(sub); 995 FASTOP2(xor); 996 FASTOP2(cmp); 997 FASTOP2(test); 998 999 FASTOP1SRC2(mul, mul_ex); 1000 FASTOP1SRC2(imul, imul_ex); 1001 FASTOP1SRC2EX(div, div_ex); 1002 FASTOP1SRC2EX(idiv, idiv_ex); 1003 1004 FASTOP3WCL(shld); 1005 FASTOP3WCL(shrd); 1006 1007 FASTOP2W(imul); 1008 1009 FASTOP1(not); 1010 FASTOP1(neg); 1011 FASTOP1(inc); 1012 FASTOP1(dec); 1013 1014 FASTOP2CL(rol); 1015 FASTOP2CL(ror); 1016 FASTOP2CL(rcl); 1017 FASTOP2CL(rcr); 1018 FASTOP2CL(shl); 1019 FASTOP2CL(shr); 1020 FASTOP2CL(sar); 1021 1022 FASTOP2W(bsf); 1023 FASTOP2W(bsr); 1024 FASTOP2W(bt); 1025 FASTOP2W(bts); 1026 FASTOP2W(btr); 1027 FASTOP2W(btc); 1028 1029 FASTOP2(xadd); 1030 1031 FASTOP2R(cmp, cmp_r); 1032 1033 static int em_bsf_c(struct x86_emulate_ctxt *ctxt) 1034 { 1035 /* If src is zero, do not writeback, but update flags */ 1036 if (ctxt->src.val == 0) 1037 ctxt->dst.type = OP_NONE; 1038 return fastop(ctxt, em_bsf); 1039 } 1040 1041 static int em_bsr_c(struct x86_emulate_ctxt *ctxt) 1042 { 1043 /* If src is zero, do not writeback, but update flags */ 1044 if (ctxt->src.val == 0) 1045 ctxt->dst.type = OP_NONE; 1046 return fastop(ctxt, em_bsr); 1047 } 1048 1049 static __always_inline u8 test_cc(unsigned int condition, unsigned long flags) 1050 { 1051 u8 rc; 1052 void (*fop)(void) = (void *)em_setcc + 4 * (condition & 0xf); 1053 1054 flags = (flags & EFLAGS_MASK) | X86_EFLAGS_IF; 1055 asm("push %[flags]; popf; " CALL_NOSPEC 1056 : "=a"(rc) : [thunk_target]"r"(fop), [flags]"r"(flags)); 1057 return rc; 1058 } 1059 1060 static void fetch_register_operand(struct operand *op) 1061 { 1062 switch (op->bytes) { 1063 case 1: 1064 op->val = *(u8 *)op->addr.reg; 1065 break; 1066 case 2: 1067 op->val = *(u16 *)op->addr.reg; 1068 break; 1069 case 4: 1070 op->val = *(u32 *)op->addr.reg; 1071 break; 1072 case 8: 1073 op->val = *(u64 *)op->addr.reg; 1074 break; 1075 } 1076 } 1077 1078 static void read_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data, int reg) 1079 { 1080 switch (reg) { 1081 case 0: asm("movdqa %%xmm0, %0" : "=m"(*data)); break; 1082 case 1: asm("movdqa %%xmm1, %0" : "=m"(*data)); break; 1083 case 2: asm("movdqa %%xmm2, %0" : "=m"(*data)); break; 1084 case 3: asm("movdqa %%xmm3, %0" : "=m"(*data)); break; 1085 case 4: asm("movdqa %%xmm4, %0" : "=m"(*data)); break; 1086 case 5: asm("movdqa %%xmm5, %0" : "=m"(*data)); break; 1087 case 6: asm("movdqa %%xmm6, %0" : "=m"(*data)); break; 1088 case 7: asm("movdqa %%xmm7, %0" : "=m"(*data)); break; 1089 #ifdef CONFIG_X86_64 1090 case 8: asm("movdqa %%xmm8, %0" : "=m"(*data)); break; 1091 case 9: asm("movdqa %%xmm9, %0" : "=m"(*data)); break; 1092 case 10: asm("movdqa %%xmm10, %0" : "=m"(*data)); break; 1093 case 11: asm("movdqa %%xmm11, %0" : "=m"(*data)); break; 1094 case 12: asm("movdqa %%xmm12, %0" : "=m"(*data)); break; 1095 case 13: asm("movdqa %%xmm13, %0" : "=m"(*data)); break; 1096 case 14: asm("movdqa %%xmm14, %0" : "=m"(*data)); break; 1097 case 15: asm("movdqa %%xmm15, %0" : "=m"(*data)); break; 1098 #endif 1099 default: BUG(); 1100 } 1101 } 1102 1103 static void write_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data, 1104 int reg) 1105 { 1106 switch (reg) { 1107 case 0: asm("movdqa %0, %%xmm0" : : "m"(*data)); break; 1108 case 1: asm("movdqa %0, %%xmm1" : : "m"(*data)); break; 1109 case 2: asm("movdqa %0, %%xmm2" : : "m"(*data)); break; 1110 case 3: asm("movdqa %0, %%xmm3" : : "m"(*data)); break; 1111 case 4: asm("movdqa %0, %%xmm4" : : "m"(*data)); break; 1112 case 5: asm("movdqa %0, %%xmm5" : : "m"(*data)); break; 1113 case 6: asm("movdqa %0, %%xmm6" : : "m"(*data)); break; 1114 case 7: asm("movdqa %0, %%xmm7" : : "m"(*data)); break; 1115 #ifdef CONFIG_X86_64 1116 case 8: asm("movdqa %0, %%xmm8" : : "m"(*data)); break; 1117 case 9: asm("movdqa %0, %%xmm9" : : "m"(*data)); break; 1118 case 10: asm("movdqa %0, %%xmm10" : : "m"(*data)); break; 1119 case 11: asm("movdqa %0, %%xmm11" : : "m"(*data)); break; 1120 case 12: asm("movdqa %0, %%xmm12" : : "m"(*data)); break; 1121 case 13: asm("movdqa %0, %%xmm13" : : "m"(*data)); break; 1122 case 14: asm("movdqa %0, %%xmm14" : : "m"(*data)); break; 1123 case 15: asm("movdqa %0, %%xmm15" : : "m"(*data)); break; 1124 #endif 1125 default: BUG(); 1126 } 1127 } 1128 1129 static void read_mmx_reg(struct x86_emulate_ctxt *ctxt, u64 *data, int reg) 1130 { 1131 switch (reg) { 1132 case 0: asm("movq %%mm0, %0" : "=m"(*data)); break; 1133 case 1: asm("movq %%mm1, %0" : "=m"(*data)); break; 1134 case 2: asm("movq %%mm2, %0" : "=m"(*data)); break; 1135 case 3: asm("movq %%mm3, %0" : "=m"(*data)); break; 1136 case 4: asm("movq %%mm4, %0" : "=m"(*data)); break; 1137 case 5: asm("movq %%mm5, %0" : "=m"(*data)); break; 1138 case 6: asm("movq %%mm6, %0" : "=m"(*data)); break; 1139 case 7: asm("movq %%mm7, %0" : "=m"(*data)); break; 1140 default: BUG(); 1141 } 1142 } 1143 1144 static void write_mmx_reg(struct x86_emulate_ctxt *ctxt, u64 *data, int reg) 1145 { 1146 switch (reg) { 1147 case 0: asm("movq %0, %%mm0" : : "m"(*data)); break; 1148 case 1: asm("movq %0, %%mm1" : : "m"(*data)); break; 1149 case 2: asm("movq %0, %%mm2" : : "m"(*data)); break; 1150 case 3: asm("movq %0, %%mm3" : : "m"(*data)); break; 1151 case 4: asm("movq %0, %%mm4" : : "m"(*data)); break; 1152 case 5: asm("movq %0, %%mm5" : : "m"(*data)); break; 1153 case 6: asm("movq %0, %%mm6" : : "m"(*data)); break; 1154 case 7: asm("movq %0, %%mm7" : : "m"(*data)); break; 1155 default: BUG(); 1156 } 1157 } 1158 1159 static int em_fninit(struct x86_emulate_ctxt *ctxt) 1160 { 1161 if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM)) 1162 return emulate_nm(ctxt); 1163 1164 asm volatile("fninit"); 1165 return X86EMUL_CONTINUE; 1166 } 1167 1168 static int em_fnstcw(struct x86_emulate_ctxt *ctxt) 1169 { 1170 u16 fcw; 1171 1172 if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM)) 1173 return emulate_nm(ctxt); 1174 1175 asm volatile("fnstcw %0": "+m"(fcw)); 1176 1177 ctxt->dst.val = fcw; 1178 1179 return X86EMUL_CONTINUE; 1180 } 1181 1182 static int em_fnstsw(struct x86_emulate_ctxt *ctxt) 1183 { 1184 u16 fsw; 1185 1186 if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM)) 1187 return emulate_nm(ctxt); 1188 1189 asm volatile("fnstsw %0": "+m"(fsw)); 1190 1191 ctxt->dst.val = fsw; 1192 1193 return X86EMUL_CONTINUE; 1194 } 1195 1196 static void decode_register_operand(struct x86_emulate_ctxt *ctxt, 1197 struct operand *op) 1198 { 1199 unsigned reg = ctxt->modrm_reg; 1200 1201 if (!(ctxt->d & ModRM)) 1202 reg = (ctxt->b & 7) | ((ctxt->rex_prefix & 1) << 3); 1203 1204 if (ctxt->d & Sse) { 1205 op->type = OP_XMM; 1206 op->bytes = 16; 1207 op->addr.xmm = reg; 1208 read_sse_reg(ctxt, &op->vec_val, reg); 1209 return; 1210 } 1211 if (ctxt->d & Mmx) { 1212 reg &= 7; 1213 op->type = OP_MM; 1214 op->bytes = 8; 1215 op->addr.mm = reg; 1216 return; 1217 } 1218 1219 op->type = OP_REG; 1220 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes; 1221 op->addr.reg = decode_register(ctxt, reg, ctxt->d & ByteOp); 1222 1223 fetch_register_operand(op); 1224 op->orig_val = op->val; 1225 } 1226 1227 static void adjust_modrm_seg(struct x86_emulate_ctxt *ctxt, int base_reg) 1228 { 1229 if (base_reg == VCPU_REGS_RSP || base_reg == VCPU_REGS_RBP) 1230 ctxt->modrm_seg = VCPU_SREG_SS; 1231 } 1232 1233 static int decode_modrm(struct x86_emulate_ctxt *ctxt, 1234 struct operand *op) 1235 { 1236 u8 sib; 1237 int index_reg, base_reg, scale; 1238 int rc = X86EMUL_CONTINUE; 1239 ulong modrm_ea = 0; 1240 1241 ctxt->modrm_reg = ((ctxt->rex_prefix << 1) & 8); /* REX.R */ 1242 index_reg = (ctxt->rex_prefix << 2) & 8; /* REX.X */ 1243 base_reg = (ctxt->rex_prefix << 3) & 8; /* REX.B */ 1244 1245 ctxt->modrm_mod = (ctxt->modrm & 0xc0) >> 6; 1246 ctxt->modrm_reg |= (ctxt->modrm & 0x38) >> 3; 1247 ctxt->modrm_rm = base_reg | (ctxt->modrm & 0x07); 1248 ctxt->modrm_seg = VCPU_SREG_DS; 1249 1250 if (ctxt->modrm_mod == 3 || (ctxt->d & NoMod)) { 1251 op->type = OP_REG; 1252 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes; 1253 op->addr.reg = decode_register(ctxt, ctxt->modrm_rm, 1254 ctxt->d & ByteOp); 1255 if (ctxt->d & Sse) { 1256 op->type = OP_XMM; 1257 op->bytes = 16; 1258 op->addr.xmm = ctxt->modrm_rm; 1259 read_sse_reg(ctxt, &op->vec_val, ctxt->modrm_rm); 1260 return rc; 1261 } 1262 if (ctxt->d & Mmx) { 1263 op->type = OP_MM; 1264 op->bytes = 8; 1265 op->addr.mm = ctxt->modrm_rm & 7; 1266 return rc; 1267 } 1268 fetch_register_operand(op); 1269 return rc; 1270 } 1271 1272 op->type = OP_MEM; 1273 1274 if (ctxt->ad_bytes == 2) { 1275 unsigned bx = reg_read(ctxt, VCPU_REGS_RBX); 1276 unsigned bp = reg_read(ctxt, VCPU_REGS_RBP); 1277 unsigned si = reg_read(ctxt, VCPU_REGS_RSI); 1278 unsigned di = reg_read(ctxt, VCPU_REGS_RDI); 1279 1280 /* 16-bit ModR/M decode. */ 1281 switch (ctxt->modrm_mod) { 1282 case 0: 1283 if (ctxt->modrm_rm == 6) 1284 modrm_ea += insn_fetch(u16, ctxt); 1285 break; 1286 case 1: 1287 modrm_ea += insn_fetch(s8, ctxt); 1288 break; 1289 case 2: 1290 modrm_ea += insn_fetch(u16, ctxt); 1291 break; 1292 } 1293 switch (ctxt->modrm_rm) { 1294 case 0: 1295 modrm_ea += bx + si; 1296 break; 1297 case 1: 1298 modrm_ea += bx + di; 1299 break; 1300 case 2: 1301 modrm_ea += bp + si; 1302 break; 1303 case 3: 1304 modrm_ea += bp + di; 1305 break; 1306 case 4: 1307 modrm_ea += si; 1308 break; 1309 case 5: 1310 modrm_ea += di; 1311 break; 1312 case 6: 1313 if (ctxt->modrm_mod != 0) 1314 modrm_ea += bp; 1315 break; 1316 case 7: 1317 modrm_ea += bx; 1318 break; 1319 } 1320 if (ctxt->modrm_rm == 2 || ctxt->modrm_rm == 3 || 1321 (ctxt->modrm_rm == 6 && ctxt->modrm_mod != 0)) 1322 ctxt->modrm_seg = VCPU_SREG_SS; 1323 modrm_ea = (u16)modrm_ea; 1324 } else { 1325 /* 32/64-bit ModR/M decode. */ 1326 if ((ctxt->modrm_rm & 7) == 4) { 1327 sib = insn_fetch(u8, ctxt); 1328 index_reg |= (sib >> 3) & 7; 1329 base_reg |= sib & 7; 1330 scale = sib >> 6; 1331 1332 if ((base_reg & 7) == 5 && ctxt->modrm_mod == 0) 1333 modrm_ea += insn_fetch(s32, ctxt); 1334 else { 1335 modrm_ea += reg_read(ctxt, base_reg); 1336 adjust_modrm_seg(ctxt, base_reg); 1337 /* Increment ESP on POP [ESP] */ 1338 if ((ctxt->d & IncSP) && 1339 base_reg == VCPU_REGS_RSP) 1340 modrm_ea += ctxt->op_bytes; 1341 } 1342 if (index_reg != 4) 1343 modrm_ea += reg_read(ctxt, index_reg) << scale; 1344 } else if ((ctxt->modrm_rm & 7) == 5 && ctxt->modrm_mod == 0) { 1345 modrm_ea += insn_fetch(s32, ctxt); 1346 if (ctxt->mode == X86EMUL_MODE_PROT64) 1347 ctxt->rip_relative = 1; 1348 } else { 1349 base_reg = ctxt->modrm_rm; 1350 modrm_ea += reg_read(ctxt, base_reg); 1351 adjust_modrm_seg(ctxt, base_reg); 1352 } 1353 switch (ctxt->modrm_mod) { 1354 case 1: 1355 modrm_ea += insn_fetch(s8, ctxt); 1356 break; 1357 case 2: 1358 modrm_ea += insn_fetch(s32, ctxt); 1359 break; 1360 } 1361 } 1362 op->addr.mem.ea = modrm_ea; 1363 if (ctxt->ad_bytes != 8) 1364 ctxt->memop.addr.mem.ea = (u32)ctxt->memop.addr.mem.ea; 1365 1366 done: 1367 return rc; 1368 } 1369 1370 static int decode_abs(struct x86_emulate_ctxt *ctxt, 1371 struct operand *op) 1372 { 1373 int rc = X86EMUL_CONTINUE; 1374 1375 op->type = OP_MEM; 1376 switch (ctxt->ad_bytes) { 1377 case 2: 1378 op->addr.mem.ea = insn_fetch(u16, ctxt); 1379 break; 1380 case 4: 1381 op->addr.mem.ea = insn_fetch(u32, ctxt); 1382 break; 1383 case 8: 1384 op->addr.mem.ea = insn_fetch(u64, ctxt); 1385 break; 1386 } 1387 done: 1388 return rc; 1389 } 1390 1391 static void fetch_bit_operand(struct x86_emulate_ctxt *ctxt) 1392 { 1393 long sv = 0, mask; 1394 1395 if (ctxt->dst.type == OP_MEM && ctxt->src.type == OP_REG) { 1396 mask = ~((long)ctxt->dst.bytes * 8 - 1); 1397 1398 if (ctxt->src.bytes == 2) 1399 sv = (s16)ctxt->src.val & (s16)mask; 1400 else if (ctxt->src.bytes == 4) 1401 sv = (s32)ctxt->src.val & (s32)mask; 1402 else 1403 sv = (s64)ctxt->src.val & (s64)mask; 1404 1405 ctxt->dst.addr.mem.ea = address_mask(ctxt, 1406 ctxt->dst.addr.mem.ea + (sv >> 3)); 1407 } 1408 1409 /* only subword offset */ 1410 ctxt->src.val &= (ctxt->dst.bytes << 3) - 1; 1411 } 1412 1413 static int read_emulated(struct x86_emulate_ctxt *ctxt, 1414 unsigned long addr, void *dest, unsigned size) 1415 { 1416 int rc; 1417 struct read_cache *mc = &ctxt->mem_read; 1418 1419 if (mc->pos < mc->end) 1420 goto read_cached; 1421 1422 WARN_ON((mc->end + size) >= sizeof(mc->data)); 1423 1424 rc = ctxt->ops->read_emulated(ctxt, addr, mc->data + mc->end, size, 1425 &ctxt->exception); 1426 if (rc != X86EMUL_CONTINUE) 1427 return rc; 1428 1429 mc->end += size; 1430 1431 read_cached: 1432 memcpy(dest, mc->data + mc->pos, size); 1433 mc->pos += size; 1434 return X86EMUL_CONTINUE; 1435 } 1436 1437 static int segmented_read(struct x86_emulate_ctxt *ctxt, 1438 struct segmented_address addr, 1439 void *data, 1440 unsigned size) 1441 { 1442 int rc; 1443 ulong linear; 1444 1445 rc = linearize(ctxt, addr, size, false, &linear); 1446 if (rc != X86EMUL_CONTINUE) 1447 return rc; 1448 return read_emulated(ctxt, linear, data, size); 1449 } 1450 1451 static int segmented_write(struct x86_emulate_ctxt *ctxt, 1452 struct segmented_address addr, 1453 const void *data, 1454 unsigned size) 1455 { 1456 int rc; 1457 ulong linear; 1458 1459 rc = linearize(ctxt, addr, size, true, &linear); 1460 if (rc != X86EMUL_CONTINUE) 1461 return rc; 1462 return ctxt->ops->write_emulated(ctxt, linear, data, size, 1463 &ctxt->exception); 1464 } 1465 1466 static int segmented_cmpxchg(struct x86_emulate_ctxt *ctxt, 1467 struct segmented_address addr, 1468 const void *orig_data, const void *data, 1469 unsigned size) 1470 { 1471 int rc; 1472 ulong linear; 1473 1474 rc = linearize(ctxt, addr, size, true, &linear); 1475 if (rc != X86EMUL_CONTINUE) 1476 return rc; 1477 return ctxt->ops->cmpxchg_emulated(ctxt, linear, orig_data, data, 1478 size, &ctxt->exception); 1479 } 1480 1481 static int pio_in_emulated(struct x86_emulate_ctxt *ctxt, 1482 unsigned int size, unsigned short port, 1483 void *dest) 1484 { 1485 struct read_cache *rc = &ctxt->io_read; 1486 1487 if (rc->pos == rc->end) { /* refill pio read ahead */ 1488 unsigned int in_page, n; 1489 unsigned int count = ctxt->rep_prefix ? 1490 address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) : 1; 1491 in_page = (ctxt->eflags & X86_EFLAGS_DF) ? 1492 offset_in_page(reg_read(ctxt, VCPU_REGS_RDI)) : 1493 PAGE_SIZE - offset_in_page(reg_read(ctxt, VCPU_REGS_RDI)); 1494 n = min3(in_page, (unsigned int)sizeof(rc->data) / size, count); 1495 if (n == 0) 1496 n = 1; 1497 rc->pos = rc->end = 0; 1498 if (!ctxt->ops->pio_in_emulated(ctxt, size, port, rc->data, n)) 1499 return 0; 1500 rc->end = n * size; 1501 } 1502 1503 if (ctxt->rep_prefix && (ctxt->d & String) && 1504 !(ctxt->eflags & X86_EFLAGS_DF)) { 1505 ctxt->dst.data = rc->data + rc->pos; 1506 ctxt->dst.type = OP_MEM_STR; 1507 ctxt->dst.count = (rc->end - rc->pos) / size; 1508 rc->pos = rc->end; 1509 } else { 1510 memcpy(dest, rc->data + rc->pos, size); 1511 rc->pos += size; 1512 } 1513 return 1; 1514 } 1515 1516 static int read_interrupt_descriptor(struct x86_emulate_ctxt *ctxt, 1517 u16 index, struct desc_struct *desc) 1518 { 1519 struct desc_ptr dt; 1520 ulong addr; 1521 1522 ctxt->ops->get_idt(ctxt, &dt); 1523 1524 if (dt.size < index * 8 + 7) 1525 return emulate_gp(ctxt, index << 3 | 0x2); 1526 1527 addr = dt.address + index * 8; 1528 return linear_read_system(ctxt, addr, desc, sizeof(*desc)); 1529 } 1530 1531 static void get_descriptor_table_ptr(struct x86_emulate_ctxt *ctxt, 1532 u16 selector, struct desc_ptr *dt) 1533 { 1534 const struct x86_emulate_ops *ops = ctxt->ops; 1535 u32 base3 = 0; 1536 1537 if (selector & 1 << 2) { 1538 struct desc_struct desc; 1539 u16 sel; 1540 1541 memset(dt, 0, sizeof(*dt)); 1542 if (!ops->get_segment(ctxt, &sel, &desc, &base3, 1543 VCPU_SREG_LDTR)) 1544 return; 1545 1546 dt->size = desc_limit_scaled(&desc); /* what if limit > 65535? */ 1547 dt->address = get_desc_base(&desc) | ((u64)base3 << 32); 1548 } else 1549 ops->get_gdt(ctxt, dt); 1550 } 1551 1552 static int get_descriptor_ptr(struct x86_emulate_ctxt *ctxt, 1553 u16 selector, ulong *desc_addr_p) 1554 { 1555 struct desc_ptr dt; 1556 u16 index = selector >> 3; 1557 ulong addr; 1558 1559 get_descriptor_table_ptr(ctxt, selector, &dt); 1560 1561 if (dt.size < index * 8 + 7) 1562 return emulate_gp(ctxt, selector & 0xfffc); 1563 1564 addr = dt.address + index * 8; 1565 1566 #ifdef CONFIG_X86_64 1567 if (addr >> 32 != 0) { 1568 u64 efer = 0; 1569 1570 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer); 1571 if (!(efer & EFER_LMA)) 1572 addr &= (u32)-1; 1573 } 1574 #endif 1575 1576 *desc_addr_p = addr; 1577 return X86EMUL_CONTINUE; 1578 } 1579 1580 /* allowed just for 8 bytes segments */ 1581 static int read_segment_descriptor(struct x86_emulate_ctxt *ctxt, 1582 u16 selector, struct desc_struct *desc, 1583 ulong *desc_addr_p) 1584 { 1585 int rc; 1586 1587 rc = get_descriptor_ptr(ctxt, selector, desc_addr_p); 1588 if (rc != X86EMUL_CONTINUE) 1589 return rc; 1590 1591 return linear_read_system(ctxt, *desc_addr_p, desc, sizeof(*desc)); 1592 } 1593 1594 /* allowed just for 8 bytes segments */ 1595 static int write_segment_descriptor(struct x86_emulate_ctxt *ctxt, 1596 u16 selector, struct desc_struct *desc) 1597 { 1598 int rc; 1599 ulong addr; 1600 1601 rc = get_descriptor_ptr(ctxt, selector, &addr); 1602 if (rc != X86EMUL_CONTINUE) 1603 return rc; 1604 1605 return linear_write_system(ctxt, addr, desc, sizeof(*desc)); 1606 } 1607 1608 static int __load_segment_descriptor(struct x86_emulate_ctxt *ctxt, 1609 u16 selector, int seg, u8 cpl, 1610 enum x86_transfer_type transfer, 1611 struct desc_struct *desc) 1612 { 1613 struct desc_struct seg_desc, old_desc; 1614 u8 dpl, rpl; 1615 unsigned err_vec = GP_VECTOR; 1616 u32 err_code = 0; 1617 bool null_selector = !(selector & ~0x3); /* 0000-0003 are null */ 1618 ulong desc_addr; 1619 int ret; 1620 u16 dummy; 1621 u32 base3 = 0; 1622 1623 memset(&seg_desc, 0, sizeof(seg_desc)); 1624 1625 if (ctxt->mode == X86EMUL_MODE_REAL) { 1626 /* set real mode segment descriptor (keep limit etc. for 1627 * unreal mode) */ 1628 ctxt->ops->get_segment(ctxt, &dummy, &seg_desc, NULL, seg); 1629 set_desc_base(&seg_desc, selector << 4); 1630 goto load; 1631 } else if (seg <= VCPU_SREG_GS && ctxt->mode == X86EMUL_MODE_VM86) { 1632 /* VM86 needs a clean new segment descriptor */ 1633 set_desc_base(&seg_desc, selector << 4); 1634 set_desc_limit(&seg_desc, 0xffff); 1635 seg_desc.type = 3; 1636 seg_desc.p = 1; 1637 seg_desc.s = 1; 1638 seg_desc.dpl = 3; 1639 goto load; 1640 } 1641 1642 rpl = selector & 3; 1643 1644 /* TR should be in GDT only */ 1645 if (seg == VCPU_SREG_TR && (selector & (1 << 2))) 1646 goto exception; 1647 1648 /* NULL selector is not valid for TR, CS and (except for long mode) SS */ 1649 if (null_selector) { 1650 if (seg == VCPU_SREG_CS || seg == VCPU_SREG_TR) 1651 goto exception; 1652 1653 if (seg == VCPU_SREG_SS) { 1654 if (ctxt->mode != X86EMUL_MODE_PROT64 || rpl != cpl) 1655 goto exception; 1656 1657 /* 1658 * ctxt->ops->set_segment expects the CPL to be in 1659 * SS.DPL, so fake an expand-up 32-bit data segment. 1660 */ 1661 seg_desc.type = 3; 1662 seg_desc.p = 1; 1663 seg_desc.s = 1; 1664 seg_desc.dpl = cpl; 1665 seg_desc.d = 1; 1666 seg_desc.g = 1; 1667 } 1668 1669 /* Skip all following checks */ 1670 goto load; 1671 } 1672 1673 ret = read_segment_descriptor(ctxt, selector, &seg_desc, &desc_addr); 1674 if (ret != X86EMUL_CONTINUE) 1675 return ret; 1676 1677 err_code = selector & 0xfffc; 1678 err_vec = (transfer == X86_TRANSFER_TASK_SWITCH) ? TS_VECTOR : 1679 GP_VECTOR; 1680 1681 /* can't load system descriptor into segment selector */ 1682 if (seg <= VCPU_SREG_GS && !seg_desc.s) { 1683 if (transfer == X86_TRANSFER_CALL_JMP) 1684 return X86EMUL_UNHANDLEABLE; 1685 goto exception; 1686 } 1687 1688 if (!seg_desc.p) { 1689 err_vec = (seg == VCPU_SREG_SS) ? SS_VECTOR : NP_VECTOR; 1690 goto exception; 1691 } 1692 1693 dpl = seg_desc.dpl; 1694 1695 switch (seg) { 1696 case VCPU_SREG_SS: 1697 /* 1698 * segment is not a writable data segment or segment 1699 * selector's RPL != CPL or segment selector's RPL != CPL 1700 */ 1701 if (rpl != cpl || (seg_desc.type & 0xa) != 0x2 || dpl != cpl) 1702 goto exception; 1703 break; 1704 case VCPU_SREG_CS: 1705 if (!(seg_desc.type & 8)) 1706 goto exception; 1707 1708 if (seg_desc.type & 4) { 1709 /* conforming */ 1710 if (dpl > cpl) 1711 goto exception; 1712 } else { 1713 /* nonconforming */ 1714 if (rpl > cpl || dpl != cpl) 1715 goto exception; 1716 } 1717 /* in long-mode d/b must be clear if l is set */ 1718 if (seg_desc.d && seg_desc.l) { 1719 u64 efer = 0; 1720 1721 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer); 1722 if (efer & EFER_LMA) 1723 goto exception; 1724 } 1725 1726 /* CS(RPL) <- CPL */ 1727 selector = (selector & 0xfffc) | cpl; 1728 break; 1729 case VCPU_SREG_TR: 1730 if (seg_desc.s || (seg_desc.type != 1 && seg_desc.type != 9)) 1731 goto exception; 1732 old_desc = seg_desc; 1733 seg_desc.type |= 2; /* busy */ 1734 ret = ctxt->ops->cmpxchg_emulated(ctxt, desc_addr, &old_desc, &seg_desc, 1735 sizeof(seg_desc), &ctxt->exception); 1736 if (ret != X86EMUL_CONTINUE) 1737 return ret; 1738 break; 1739 case VCPU_SREG_LDTR: 1740 if (seg_desc.s || seg_desc.type != 2) 1741 goto exception; 1742 break; 1743 default: /* DS, ES, FS, or GS */ 1744 /* 1745 * segment is not a data or readable code segment or 1746 * ((segment is a data or nonconforming code segment) 1747 * and (both RPL and CPL > DPL)) 1748 */ 1749 if ((seg_desc.type & 0xa) == 0x8 || 1750 (((seg_desc.type & 0xc) != 0xc) && 1751 (rpl > dpl && cpl > dpl))) 1752 goto exception; 1753 break; 1754 } 1755 1756 if (seg_desc.s) { 1757 /* mark segment as accessed */ 1758 if (!(seg_desc.type & 1)) { 1759 seg_desc.type |= 1; 1760 ret = write_segment_descriptor(ctxt, selector, 1761 &seg_desc); 1762 if (ret != X86EMUL_CONTINUE) 1763 return ret; 1764 } 1765 } else if (ctxt->mode == X86EMUL_MODE_PROT64) { 1766 ret = linear_read_system(ctxt, desc_addr+8, &base3, sizeof(base3)); 1767 if (ret != X86EMUL_CONTINUE) 1768 return ret; 1769 if (emul_is_noncanonical_address(get_desc_base(&seg_desc) | 1770 ((u64)base3 << 32), ctxt)) 1771 return emulate_gp(ctxt, 0); 1772 } 1773 load: 1774 ctxt->ops->set_segment(ctxt, selector, &seg_desc, base3, seg); 1775 if (desc) 1776 *desc = seg_desc; 1777 return X86EMUL_CONTINUE; 1778 exception: 1779 return emulate_exception(ctxt, err_vec, err_code, true); 1780 } 1781 1782 static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt, 1783 u16 selector, int seg) 1784 { 1785 u8 cpl = ctxt->ops->cpl(ctxt); 1786 1787 /* 1788 * None of MOV, POP and LSS can load a NULL selector in CPL=3, but 1789 * they can load it at CPL<3 (Intel's manual says only LSS can, 1790 * but it's wrong). 1791 * 1792 * However, the Intel manual says that putting IST=1/DPL=3 in 1793 * an interrupt gate will result in SS=3 (the AMD manual instead 1794 * says it doesn't), so allow SS=3 in __load_segment_descriptor 1795 * and only forbid it here. 1796 */ 1797 if (seg == VCPU_SREG_SS && selector == 3 && 1798 ctxt->mode == X86EMUL_MODE_PROT64) 1799 return emulate_exception(ctxt, GP_VECTOR, 0, true); 1800 1801 return __load_segment_descriptor(ctxt, selector, seg, cpl, 1802 X86_TRANSFER_NONE, NULL); 1803 } 1804 1805 static void write_register_operand(struct operand *op) 1806 { 1807 return assign_register(op->addr.reg, op->val, op->bytes); 1808 } 1809 1810 static int writeback(struct x86_emulate_ctxt *ctxt, struct operand *op) 1811 { 1812 switch (op->type) { 1813 case OP_REG: 1814 write_register_operand(op); 1815 break; 1816 case OP_MEM: 1817 if (ctxt->lock_prefix) 1818 return segmented_cmpxchg(ctxt, 1819 op->addr.mem, 1820 &op->orig_val, 1821 &op->val, 1822 op->bytes); 1823 else 1824 return segmented_write(ctxt, 1825 op->addr.mem, 1826 &op->val, 1827 op->bytes); 1828 break; 1829 case OP_MEM_STR: 1830 return segmented_write(ctxt, 1831 op->addr.mem, 1832 op->data, 1833 op->bytes * op->count); 1834 break; 1835 case OP_XMM: 1836 write_sse_reg(ctxt, &op->vec_val, op->addr.xmm); 1837 break; 1838 case OP_MM: 1839 write_mmx_reg(ctxt, &op->mm_val, op->addr.mm); 1840 break; 1841 case OP_NONE: 1842 /* no writeback */ 1843 break; 1844 default: 1845 break; 1846 } 1847 return X86EMUL_CONTINUE; 1848 } 1849 1850 static int push(struct x86_emulate_ctxt *ctxt, void *data, int bytes) 1851 { 1852 struct segmented_address addr; 1853 1854 rsp_increment(ctxt, -bytes); 1855 addr.ea = reg_read(ctxt, VCPU_REGS_RSP) & stack_mask(ctxt); 1856 addr.seg = VCPU_SREG_SS; 1857 1858 return segmented_write(ctxt, addr, data, bytes); 1859 } 1860 1861 static int em_push(struct x86_emulate_ctxt *ctxt) 1862 { 1863 /* Disable writeback. */ 1864 ctxt->dst.type = OP_NONE; 1865 return push(ctxt, &ctxt->src.val, ctxt->op_bytes); 1866 } 1867 1868 static int emulate_pop(struct x86_emulate_ctxt *ctxt, 1869 void *dest, int len) 1870 { 1871 int rc; 1872 struct segmented_address addr; 1873 1874 addr.ea = reg_read(ctxt, VCPU_REGS_RSP) & stack_mask(ctxt); 1875 addr.seg = VCPU_SREG_SS; 1876 rc = segmented_read(ctxt, addr, dest, len); 1877 if (rc != X86EMUL_CONTINUE) 1878 return rc; 1879 1880 rsp_increment(ctxt, len); 1881 return rc; 1882 } 1883 1884 static int em_pop(struct x86_emulate_ctxt *ctxt) 1885 { 1886 return emulate_pop(ctxt, &ctxt->dst.val, ctxt->op_bytes); 1887 } 1888 1889 static int emulate_popf(struct x86_emulate_ctxt *ctxt, 1890 void *dest, int len) 1891 { 1892 int rc; 1893 unsigned long val, change_mask; 1894 int iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> X86_EFLAGS_IOPL_BIT; 1895 int cpl = ctxt->ops->cpl(ctxt); 1896 1897 rc = emulate_pop(ctxt, &val, len); 1898 if (rc != X86EMUL_CONTINUE) 1899 return rc; 1900 1901 change_mask = X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF | 1902 X86_EFLAGS_ZF | X86_EFLAGS_SF | X86_EFLAGS_OF | 1903 X86_EFLAGS_TF | X86_EFLAGS_DF | X86_EFLAGS_NT | 1904 X86_EFLAGS_AC | X86_EFLAGS_ID; 1905 1906 switch(ctxt->mode) { 1907 case X86EMUL_MODE_PROT64: 1908 case X86EMUL_MODE_PROT32: 1909 case X86EMUL_MODE_PROT16: 1910 if (cpl == 0) 1911 change_mask |= X86_EFLAGS_IOPL; 1912 if (cpl <= iopl) 1913 change_mask |= X86_EFLAGS_IF; 1914 break; 1915 case X86EMUL_MODE_VM86: 1916 if (iopl < 3) 1917 return emulate_gp(ctxt, 0); 1918 change_mask |= X86_EFLAGS_IF; 1919 break; 1920 default: /* real mode */ 1921 change_mask |= (X86_EFLAGS_IOPL | X86_EFLAGS_IF); 1922 break; 1923 } 1924 1925 *(unsigned long *)dest = 1926 (ctxt->eflags & ~change_mask) | (val & change_mask); 1927 1928 return rc; 1929 } 1930 1931 static int em_popf(struct x86_emulate_ctxt *ctxt) 1932 { 1933 ctxt->dst.type = OP_REG; 1934 ctxt->dst.addr.reg = &ctxt->eflags; 1935 ctxt->dst.bytes = ctxt->op_bytes; 1936 return emulate_popf(ctxt, &ctxt->dst.val, ctxt->op_bytes); 1937 } 1938 1939 static int em_enter(struct x86_emulate_ctxt *ctxt) 1940 { 1941 int rc; 1942 unsigned frame_size = ctxt->src.val; 1943 unsigned nesting_level = ctxt->src2.val & 31; 1944 ulong rbp; 1945 1946 if (nesting_level) 1947 return X86EMUL_UNHANDLEABLE; 1948 1949 rbp = reg_read(ctxt, VCPU_REGS_RBP); 1950 rc = push(ctxt, &rbp, stack_size(ctxt)); 1951 if (rc != X86EMUL_CONTINUE) 1952 return rc; 1953 assign_masked(reg_rmw(ctxt, VCPU_REGS_RBP), reg_read(ctxt, VCPU_REGS_RSP), 1954 stack_mask(ctxt)); 1955 assign_masked(reg_rmw(ctxt, VCPU_REGS_RSP), 1956 reg_read(ctxt, VCPU_REGS_RSP) - frame_size, 1957 stack_mask(ctxt)); 1958 return X86EMUL_CONTINUE; 1959 } 1960 1961 static int em_leave(struct x86_emulate_ctxt *ctxt) 1962 { 1963 assign_masked(reg_rmw(ctxt, VCPU_REGS_RSP), reg_read(ctxt, VCPU_REGS_RBP), 1964 stack_mask(ctxt)); 1965 return emulate_pop(ctxt, reg_rmw(ctxt, VCPU_REGS_RBP), ctxt->op_bytes); 1966 } 1967 1968 static int em_push_sreg(struct x86_emulate_ctxt *ctxt) 1969 { 1970 int seg = ctxt->src2.val; 1971 1972 ctxt->src.val = get_segment_selector(ctxt, seg); 1973 if (ctxt->op_bytes == 4) { 1974 rsp_increment(ctxt, -2); 1975 ctxt->op_bytes = 2; 1976 } 1977 1978 return em_push(ctxt); 1979 } 1980 1981 static int em_pop_sreg(struct x86_emulate_ctxt *ctxt) 1982 { 1983 int seg = ctxt->src2.val; 1984 unsigned long selector; 1985 int rc; 1986 1987 rc = emulate_pop(ctxt, &selector, 2); 1988 if (rc != X86EMUL_CONTINUE) 1989 return rc; 1990 1991 if (ctxt->modrm_reg == VCPU_SREG_SS) 1992 ctxt->interruptibility = KVM_X86_SHADOW_INT_MOV_SS; 1993 if (ctxt->op_bytes > 2) 1994 rsp_increment(ctxt, ctxt->op_bytes - 2); 1995 1996 rc = load_segment_descriptor(ctxt, (u16)selector, seg); 1997 return rc; 1998 } 1999 2000 static int em_pusha(struct x86_emulate_ctxt *ctxt) 2001 { 2002 unsigned long old_esp = reg_read(ctxt, VCPU_REGS_RSP); 2003 int rc = X86EMUL_CONTINUE; 2004 int reg = VCPU_REGS_RAX; 2005 2006 while (reg <= VCPU_REGS_RDI) { 2007 (reg == VCPU_REGS_RSP) ? 2008 (ctxt->src.val = old_esp) : (ctxt->src.val = reg_read(ctxt, reg)); 2009 2010 rc = em_push(ctxt); 2011 if (rc != X86EMUL_CONTINUE) 2012 return rc; 2013 2014 ++reg; 2015 } 2016 2017 return rc; 2018 } 2019 2020 static int em_pushf(struct x86_emulate_ctxt *ctxt) 2021 { 2022 ctxt->src.val = (unsigned long)ctxt->eflags & ~X86_EFLAGS_VM; 2023 return em_push(ctxt); 2024 } 2025 2026 static int em_popa(struct x86_emulate_ctxt *ctxt) 2027 { 2028 int rc = X86EMUL_CONTINUE; 2029 int reg = VCPU_REGS_RDI; 2030 u32 val; 2031 2032 while (reg >= VCPU_REGS_RAX) { 2033 if (reg == VCPU_REGS_RSP) { 2034 rsp_increment(ctxt, ctxt->op_bytes); 2035 --reg; 2036 } 2037 2038 rc = emulate_pop(ctxt, &val, ctxt->op_bytes); 2039 if (rc != X86EMUL_CONTINUE) 2040 break; 2041 assign_register(reg_rmw(ctxt, reg), val, ctxt->op_bytes); 2042 --reg; 2043 } 2044 return rc; 2045 } 2046 2047 static int __emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq) 2048 { 2049 const struct x86_emulate_ops *ops = ctxt->ops; 2050 int rc; 2051 struct desc_ptr dt; 2052 gva_t cs_addr; 2053 gva_t eip_addr; 2054 u16 cs, eip; 2055 2056 /* TODO: Add limit checks */ 2057 ctxt->src.val = ctxt->eflags; 2058 rc = em_push(ctxt); 2059 if (rc != X86EMUL_CONTINUE) 2060 return rc; 2061 2062 ctxt->eflags &= ~(X86_EFLAGS_IF | X86_EFLAGS_TF | X86_EFLAGS_AC); 2063 2064 ctxt->src.val = get_segment_selector(ctxt, VCPU_SREG_CS); 2065 rc = em_push(ctxt); 2066 if (rc != X86EMUL_CONTINUE) 2067 return rc; 2068 2069 ctxt->src.val = ctxt->_eip; 2070 rc = em_push(ctxt); 2071 if (rc != X86EMUL_CONTINUE) 2072 return rc; 2073 2074 ops->get_idt(ctxt, &dt); 2075 2076 eip_addr = dt.address + (irq << 2); 2077 cs_addr = dt.address + (irq << 2) + 2; 2078 2079 rc = linear_read_system(ctxt, cs_addr, &cs, 2); 2080 if (rc != X86EMUL_CONTINUE) 2081 return rc; 2082 2083 rc = linear_read_system(ctxt, eip_addr, &eip, 2); 2084 if (rc != X86EMUL_CONTINUE) 2085 return rc; 2086 2087 rc = load_segment_descriptor(ctxt, cs, VCPU_SREG_CS); 2088 if (rc != X86EMUL_CONTINUE) 2089 return rc; 2090 2091 ctxt->_eip = eip; 2092 2093 return rc; 2094 } 2095 2096 int emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq) 2097 { 2098 int rc; 2099 2100 invalidate_registers(ctxt); 2101 rc = __emulate_int_real(ctxt, irq); 2102 if (rc == X86EMUL_CONTINUE) 2103 writeback_registers(ctxt); 2104 return rc; 2105 } 2106 2107 static int emulate_int(struct x86_emulate_ctxt *ctxt, int irq) 2108 { 2109 switch(ctxt->mode) { 2110 case X86EMUL_MODE_REAL: 2111 return __emulate_int_real(ctxt, irq); 2112 case X86EMUL_MODE_VM86: 2113 case X86EMUL_MODE_PROT16: 2114 case X86EMUL_MODE_PROT32: 2115 case X86EMUL_MODE_PROT64: 2116 default: 2117 /* Protected mode interrupts unimplemented yet */ 2118 return X86EMUL_UNHANDLEABLE; 2119 } 2120 } 2121 2122 static int emulate_iret_real(struct x86_emulate_ctxt *ctxt) 2123 { 2124 int rc = X86EMUL_CONTINUE; 2125 unsigned long temp_eip = 0; 2126 unsigned long temp_eflags = 0; 2127 unsigned long cs = 0; 2128 unsigned long mask = X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF | 2129 X86_EFLAGS_ZF | X86_EFLAGS_SF | X86_EFLAGS_TF | 2130 X86_EFLAGS_IF | X86_EFLAGS_DF | X86_EFLAGS_OF | 2131 X86_EFLAGS_IOPL | X86_EFLAGS_NT | X86_EFLAGS_RF | 2132 X86_EFLAGS_AC | X86_EFLAGS_ID | 2133 X86_EFLAGS_FIXED; 2134 unsigned long vm86_mask = X86_EFLAGS_VM | X86_EFLAGS_VIF | 2135 X86_EFLAGS_VIP; 2136 2137 /* TODO: Add stack limit check */ 2138 2139 rc = emulate_pop(ctxt, &temp_eip, ctxt->op_bytes); 2140 2141 if (rc != X86EMUL_CONTINUE) 2142 return rc; 2143 2144 if (temp_eip & ~0xffff) 2145 return emulate_gp(ctxt, 0); 2146 2147 rc = emulate_pop(ctxt, &cs, ctxt->op_bytes); 2148 2149 if (rc != X86EMUL_CONTINUE) 2150 return rc; 2151 2152 rc = emulate_pop(ctxt, &temp_eflags, ctxt->op_bytes); 2153 2154 if (rc != X86EMUL_CONTINUE) 2155 return rc; 2156 2157 rc = load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS); 2158 2159 if (rc != X86EMUL_CONTINUE) 2160 return rc; 2161 2162 ctxt->_eip = temp_eip; 2163 2164 if (ctxt->op_bytes == 4) 2165 ctxt->eflags = ((temp_eflags & mask) | (ctxt->eflags & vm86_mask)); 2166 else if (ctxt->op_bytes == 2) { 2167 ctxt->eflags &= ~0xffff; 2168 ctxt->eflags |= temp_eflags; 2169 } 2170 2171 ctxt->eflags &= ~EFLG_RESERVED_ZEROS_MASK; /* Clear reserved zeros */ 2172 ctxt->eflags |= X86_EFLAGS_FIXED; 2173 ctxt->ops->set_nmi_mask(ctxt, false); 2174 2175 return rc; 2176 } 2177 2178 static int em_iret(struct x86_emulate_ctxt *ctxt) 2179 { 2180 switch(ctxt->mode) { 2181 case X86EMUL_MODE_REAL: 2182 return emulate_iret_real(ctxt); 2183 case X86EMUL_MODE_VM86: 2184 case X86EMUL_MODE_PROT16: 2185 case X86EMUL_MODE_PROT32: 2186 case X86EMUL_MODE_PROT64: 2187 default: 2188 /* iret from protected mode unimplemented yet */ 2189 return X86EMUL_UNHANDLEABLE; 2190 } 2191 } 2192 2193 static int em_jmp_far(struct x86_emulate_ctxt *ctxt) 2194 { 2195 int rc; 2196 unsigned short sel; 2197 struct desc_struct new_desc; 2198 u8 cpl = ctxt->ops->cpl(ctxt); 2199 2200 memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2); 2201 2202 rc = __load_segment_descriptor(ctxt, sel, VCPU_SREG_CS, cpl, 2203 X86_TRANSFER_CALL_JMP, 2204 &new_desc); 2205 if (rc != X86EMUL_CONTINUE) 2206 return rc; 2207 2208 rc = assign_eip_far(ctxt, ctxt->src.val, &new_desc); 2209 /* Error handling is not implemented. */ 2210 if (rc != X86EMUL_CONTINUE) 2211 return X86EMUL_UNHANDLEABLE; 2212 2213 return rc; 2214 } 2215 2216 static int em_jmp_abs(struct x86_emulate_ctxt *ctxt) 2217 { 2218 return assign_eip_near(ctxt, ctxt->src.val); 2219 } 2220 2221 static int em_call_near_abs(struct x86_emulate_ctxt *ctxt) 2222 { 2223 int rc; 2224 long int old_eip; 2225 2226 old_eip = ctxt->_eip; 2227 rc = assign_eip_near(ctxt, ctxt->src.val); 2228 if (rc != X86EMUL_CONTINUE) 2229 return rc; 2230 ctxt->src.val = old_eip; 2231 rc = em_push(ctxt); 2232 return rc; 2233 } 2234 2235 static int em_cmpxchg8b(struct x86_emulate_ctxt *ctxt) 2236 { 2237 u64 old = ctxt->dst.orig_val64; 2238 2239 if (ctxt->dst.bytes == 16) 2240 return X86EMUL_UNHANDLEABLE; 2241 2242 if (((u32) (old >> 0) != (u32) reg_read(ctxt, VCPU_REGS_RAX)) || 2243 ((u32) (old >> 32) != (u32) reg_read(ctxt, VCPU_REGS_RDX))) { 2244 *reg_write(ctxt, VCPU_REGS_RAX) = (u32) (old >> 0); 2245 *reg_write(ctxt, VCPU_REGS_RDX) = (u32) (old >> 32); 2246 ctxt->eflags &= ~X86_EFLAGS_ZF; 2247 } else { 2248 ctxt->dst.val64 = ((u64)reg_read(ctxt, VCPU_REGS_RCX) << 32) | 2249 (u32) reg_read(ctxt, VCPU_REGS_RBX); 2250 2251 ctxt->eflags |= X86_EFLAGS_ZF; 2252 } 2253 return X86EMUL_CONTINUE; 2254 } 2255 2256 static int em_ret(struct x86_emulate_ctxt *ctxt) 2257 { 2258 int rc; 2259 unsigned long eip; 2260 2261 rc = emulate_pop(ctxt, &eip, ctxt->op_bytes); 2262 if (rc != X86EMUL_CONTINUE) 2263 return rc; 2264 2265 return assign_eip_near(ctxt, eip); 2266 } 2267 2268 static int em_ret_far(struct x86_emulate_ctxt *ctxt) 2269 { 2270 int rc; 2271 unsigned long eip, cs; 2272 int cpl = ctxt->ops->cpl(ctxt); 2273 struct desc_struct new_desc; 2274 2275 rc = emulate_pop(ctxt, &eip, ctxt->op_bytes); 2276 if (rc != X86EMUL_CONTINUE) 2277 return rc; 2278 rc = emulate_pop(ctxt, &cs, ctxt->op_bytes); 2279 if (rc != X86EMUL_CONTINUE) 2280 return rc; 2281 /* Outer-privilege level return is not implemented */ 2282 if (ctxt->mode >= X86EMUL_MODE_PROT16 && (cs & 3) > cpl) 2283 return X86EMUL_UNHANDLEABLE; 2284 rc = __load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS, cpl, 2285 X86_TRANSFER_RET, 2286 &new_desc); 2287 if (rc != X86EMUL_CONTINUE) 2288 return rc; 2289 rc = assign_eip_far(ctxt, eip, &new_desc); 2290 /* Error handling is not implemented. */ 2291 if (rc != X86EMUL_CONTINUE) 2292 return X86EMUL_UNHANDLEABLE; 2293 2294 return rc; 2295 } 2296 2297 static int em_ret_far_imm(struct x86_emulate_ctxt *ctxt) 2298 { 2299 int rc; 2300 2301 rc = em_ret_far(ctxt); 2302 if (rc != X86EMUL_CONTINUE) 2303 return rc; 2304 rsp_increment(ctxt, ctxt->src.val); 2305 return X86EMUL_CONTINUE; 2306 } 2307 2308 static int em_cmpxchg(struct x86_emulate_ctxt *ctxt) 2309 { 2310 /* Save real source value, then compare EAX against destination. */ 2311 ctxt->dst.orig_val = ctxt->dst.val; 2312 ctxt->dst.val = reg_read(ctxt, VCPU_REGS_RAX); 2313 ctxt->src.orig_val = ctxt->src.val; 2314 ctxt->src.val = ctxt->dst.orig_val; 2315 fastop(ctxt, em_cmp); 2316 2317 if (ctxt->eflags & X86_EFLAGS_ZF) { 2318 /* Success: write back to memory; no update of EAX */ 2319 ctxt->src.type = OP_NONE; 2320 ctxt->dst.val = ctxt->src.orig_val; 2321 } else { 2322 /* Failure: write the value we saw to EAX. */ 2323 ctxt->src.type = OP_REG; 2324 ctxt->src.addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX); 2325 ctxt->src.val = ctxt->dst.orig_val; 2326 /* Create write-cycle to dest by writing the same value */ 2327 ctxt->dst.val = ctxt->dst.orig_val; 2328 } 2329 return X86EMUL_CONTINUE; 2330 } 2331 2332 static int em_lseg(struct x86_emulate_ctxt *ctxt) 2333 { 2334 int seg = ctxt->src2.val; 2335 unsigned short sel; 2336 int rc; 2337 2338 memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2); 2339 2340 rc = load_segment_descriptor(ctxt, sel, seg); 2341 if (rc != X86EMUL_CONTINUE) 2342 return rc; 2343 2344 ctxt->dst.val = ctxt->src.val; 2345 return rc; 2346 } 2347 2348 static int emulator_has_longmode(struct x86_emulate_ctxt *ctxt) 2349 { 2350 #ifdef CONFIG_X86_64 2351 u32 eax, ebx, ecx, edx; 2352 2353 eax = 0x80000001; 2354 ecx = 0; 2355 ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx, false); 2356 return edx & bit(X86_FEATURE_LM); 2357 #else 2358 return false; 2359 #endif 2360 } 2361 2362 static void rsm_set_desc_flags(struct desc_struct *desc, u32 flags) 2363 { 2364 desc->g = (flags >> 23) & 1; 2365 desc->d = (flags >> 22) & 1; 2366 desc->l = (flags >> 21) & 1; 2367 desc->avl = (flags >> 20) & 1; 2368 desc->p = (flags >> 15) & 1; 2369 desc->dpl = (flags >> 13) & 3; 2370 desc->s = (flags >> 12) & 1; 2371 desc->type = (flags >> 8) & 15; 2372 } 2373 2374 static int rsm_load_seg_32(struct x86_emulate_ctxt *ctxt, const char *smstate, 2375 int n) 2376 { 2377 struct desc_struct desc; 2378 int offset; 2379 u16 selector; 2380 2381 selector = GET_SMSTATE(u32, smstate, 0x7fa8 + n * 4); 2382 2383 if (n < 3) 2384 offset = 0x7f84 + n * 12; 2385 else 2386 offset = 0x7f2c + (n - 3) * 12; 2387 2388 set_desc_base(&desc, GET_SMSTATE(u32, smstate, offset + 8)); 2389 set_desc_limit(&desc, GET_SMSTATE(u32, smstate, offset + 4)); 2390 rsm_set_desc_flags(&desc, GET_SMSTATE(u32, smstate, offset)); 2391 ctxt->ops->set_segment(ctxt, selector, &desc, 0, n); 2392 return X86EMUL_CONTINUE; 2393 } 2394 2395 #ifdef CONFIG_X86_64 2396 static int rsm_load_seg_64(struct x86_emulate_ctxt *ctxt, const char *smstate, 2397 int n) 2398 { 2399 struct desc_struct desc; 2400 int offset; 2401 u16 selector; 2402 u32 base3; 2403 2404 offset = 0x7e00 + n * 16; 2405 2406 selector = GET_SMSTATE(u16, smstate, offset); 2407 rsm_set_desc_flags(&desc, GET_SMSTATE(u16, smstate, offset + 2) << 8); 2408 set_desc_limit(&desc, GET_SMSTATE(u32, smstate, offset + 4)); 2409 set_desc_base(&desc, GET_SMSTATE(u32, smstate, offset + 8)); 2410 base3 = GET_SMSTATE(u32, smstate, offset + 12); 2411 2412 ctxt->ops->set_segment(ctxt, selector, &desc, base3, n); 2413 return X86EMUL_CONTINUE; 2414 } 2415 #endif 2416 2417 static int rsm_enter_protected_mode(struct x86_emulate_ctxt *ctxt, 2418 u64 cr0, u64 cr3, u64 cr4) 2419 { 2420 int bad; 2421 u64 pcid; 2422 2423 /* In order to later set CR4.PCIDE, CR3[11:0] must be zero. */ 2424 pcid = 0; 2425 if (cr4 & X86_CR4_PCIDE) { 2426 pcid = cr3 & 0xfff; 2427 cr3 &= ~0xfff; 2428 } 2429 2430 bad = ctxt->ops->set_cr(ctxt, 3, cr3); 2431 if (bad) 2432 return X86EMUL_UNHANDLEABLE; 2433 2434 /* 2435 * First enable PAE, long mode needs it before CR0.PG = 1 is set. 2436 * Then enable protected mode. However, PCID cannot be enabled 2437 * if EFER.LMA=0, so set it separately. 2438 */ 2439 bad = ctxt->ops->set_cr(ctxt, 4, cr4 & ~X86_CR4_PCIDE); 2440 if (bad) 2441 return X86EMUL_UNHANDLEABLE; 2442 2443 bad = ctxt->ops->set_cr(ctxt, 0, cr0); 2444 if (bad) 2445 return X86EMUL_UNHANDLEABLE; 2446 2447 if (cr4 & X86_CR4_PCIDE) { 2448 bad = ctxt->ops->set_cr(ctxt, 4, cr4); 2449 if (bad) 2450 return X86EMUL_UNHANDLEABLE; 2451 if (pcid) { 2452 bad = ctxt->ops->set_cr(ctxt, 3, cr3 | pcid); 2453 if (bad) 2454 return X86EMUL_UNHANDLEABLE; 2455 } 2456 2457 } 2458 2459 return X86EMUL_CONTINUE; 2460 } 2461 2462 static int rsm_load_state_32(struct x86_emulate_ctxt *ctxt, 2463 const char *smstate) 2464 { 2465 struct desc_struct desc; 2466 struct desc_ptr dt; 2467 u16 selector; 2468 u32 val, cr0, cr3, cr4; 2469 int i; 2470 2471 cr0 = GET_SMSTATE(u32, smstate, 0x7ffc); 2472 cr3 = GET_SMSTATE(u32, smstate, 0x7ff8); 2473 ctxt->eflags = GET_SMSTATE(u32, smstate, 0x7ff4) | X86_EFLAGS_FIXED; 2474 ctxt->_eip = GET_SMSTATE(u32, smstate, 0x7ff0); 2475 2476 for (i = 0; i < 8; i++) 2477 *reg_write(ctxt, i) = GET_SMSTATE(u32, smstate, 0x7fd0 + i * 4); 2478 2479 val = GET_SMSTATE(u32, smstate, 0x7fcc); 2480 ctxt->ops->set_dr(ctxt, 6, (val & DR6_VOLATILE) | DR6_FIXED_1); 2481 val = GET_SMSTATE(u32, smstate, 0x7fc8); 2482 ctxt->ops->set_dr(ctxt, 7, (val & DR7_VOLATILE) | DR7_FIXED_1); 2483 2484 selector = GET_SMSTATE(u32, smstate, 0x7fc4); 2485 set_desc_base(&desc, GET_SMSTATE(u32, smstate, 0x7f64)); 2486 set_desc_limit(&desc, GET_SMSTATE(u32, smstate, 0x7f60)); 2487 rsm_set_desc_flags(&desc, GET_SMSTATE(u32, smstate, 0x7f5c)); 2488 ctxt->ops->set_segment(ctxt, selector, &desc, 0, VCPU_SREG_TR); 2489 2490 selector = GET_SMSTATE(u32, smstate, 0x7fc0); 2491 set_desc_base(&desc, GET_SMSTATE(u32, smstate, 0x7f80)); 2492 set_desc_limit(&desc, GET_SMSTATE(u32, smstate, 0x7f7c)); 2493 rsm_set_desc_flags(&desc, GET_SMSTATE(u32, smstate, 0x7f78)); 2494 ctxt->ops->set_segment(ctxt, selector, &desc, 0, VCPU_SREG_LDTR); 2495 2496 dt.address = GET_SMSTATE(u32, smstate, 0x7f74); 2497 dt.size = GET_SMSTATE(u32, smstate, 0x7f70); 2498 ctxt->ops->set_gdt(ctxt, &dt); 2499 2500 dt.address = GET_SMSTATE(u32, smstate, 0x7f58); 2501 dt.size = GET_SMSTATE(u32, smstate, 0x7f54); 2502 ctxt->ops->set_idt(ctxt, &dt); 2503 2504 for (i = 0; i < 6; i++) { 2505 int r = rsm_load_seg_32(ctxt, smstate, i); 2506 if (r != X86EMUL_CONTINUE) 2507 return r; 2508 } 2509 2510 cr4 = GET_SMSTATE(u32, smstate, 0x7f14); 2511 2512 ctxt->ops->set_smbase(ctxt, GET_SMSTATE(u32, smstate, 0x7ef8)); 2513 2514 return rsm_enter_protected_mode(ctxt, cr0, cr3, cr4); 2515 } 2516 2517 #ifdef CONFIG_X86_64 2518 static int rsm_load_state_64(struct x86_emulate_ctxt *ctxt, 2519 const char *smstate) 2520 { 2521 struct desc_struct desc; 2522 struct desc_ptr dt; 2523 u64 val, cr0, cr3, cr4; 2524 u32 base3; 2525 u16 selector; 2526 int i, r; 2527 2528 for (i = 0; i < 16; i++) 2529 *reg_write(ctxt, i) = GET_SMSTATE(u64, smstate, 0x7ff8 - i * 8); 2530 2531 ctxt->_eip = GET_SMSTATE(u64, smstate, 0x7f78); 2532 ctxt->eflags = GET_SMSTATE(u32, smstate, 0x7f70) | X86_EFLAGS_FIXED; 2533 2534 val = GET_SMSTATE(u32, smstate, 0x7f68); 2535 ctxt->ops->set_dr(ctxt, 6, (val & DR6_VOLATILE) | DR6_FIXED_1); 2536 val = GET_SMSTATE(u32, smstate, 0x7f60); 2537 ctxt->ops->set_dr(ctxt, 7, (val & DR7_VOLATILE) | DR7_FIXED_1); 2538 2539 cr0 = GET_SMSTATE(u64, smstate, 0x7f58); 2540 cr3 = GET_SMSTATE(u64, smstate, 0x7f50); 2541 cr4 = GET_SMSTATE(u64, smstate, 0x7f48); 2542 ctxt->ops->set_smbase(ctxt, GET_SMSTATE(u32, smstate, 0x7f00)); 2543 val = GET_SMSTATE(u64, smstate, 0x7ed0); 2544 ctxt->ops->set_msr(ctxt, MSR_EFER, val & ~EFER_LMA); 2545 2546 selector = GET_SMSTATE(u32, smstate, 0x7e90); 2547 rsm_set_desc_flags(&desc, GET_SMSTATE(u32, smstate, 0x7e92) << 8); 2548 set_desc_limit(&desc, GET_SMSTATE(u32, smstate, 0x7e94)); 2549 set_desc_base(&desc, GET_SMSTATE(u32, smstate, 0x7e98)); 2550 base3 = GET_SMSTATE(u32, smstate, 0x7e9c); 2551 ctxt->ops->set_segment(ctxt, selector, &desc, base3, VCPU_SREG_TR); 2552 2553 dt.size = GET_SMSTATE(u32, smstate, 0x7e84); 2554 dt.address = GET_SMSTATE(u64, smstate, 0x7e88); 2555 ctxt->ops->set_idt(ctxt, &dt); 2556 2557 selector = GET_SMSTATE(u32, smstate, 0x7e70); 2558 rsm_set_desc_flags(&desc, GET_SMSTATE(u32, smstate, 0x7e72) << 8); 2559 set_desc_limit(&desc, GET_SMSTATE(u32, smstate, 0x7e74)); 2560 set_desc_base(&desc, GET_SMSTATE(u32, smstate, 0x7e78)); 2561 base3 = GET_SMSTATE(u32, smstate, 0x7e7c); 2562 ctxt->ops->set_segment(ctxt, selector, &desc, base3, VCPU_SREG_LDTR); 2563 2564 dt.size = GET_SMSTATE(u32, smstate, 0x7e64); 2565 dt.address = GET_SMSTATE(u64, smstate, 0x7e68); 2566 ctxt->ops->set_gdt(ctxt, &dt); 2567 2568 r = rsm_enter_protected_mode(ctxt, cr0, cr3, cr4); 2569 if (r != X86EMUL_CONTINUE) 2570 return r; 2571 2572 for (i = 0; i < 6; i++) { 2573 r = rsm_load_seg_64(ctxt, smstate, i); 2574 if (r != X86EMUL_CONTINUE) 2575 return r; 2576 } 2577 2578 return X86EMUL_CONTINUE; 2579 } 2580 #endif 2581 2582 static int em_rsm(struct x86_emulate_ctxt *ctxt) 2583 { 2584 unsigned long cr0, cr4, efer; 2585 char buf[512]; 2586 u64 smbase; 2587 int ret; 2588 2589 if ((ctxt->ops->get_hflags(ctxt) & X86EMUL_SMM_MASK) == 0) 2590 return emulate_ud(ctxt); 2591 2592 smbase = ctxt->ops->get_smbase(ctxt); 2593 2594 ret = ctxt->ops->read_phys(ctxt, smbase + 0xfe00, buf, sizeof(buf)); 2595 if (ret != X86EMUL_CONTINUE) 2596 return X86EMUL_UNHANDLEABLE; 2597 2598 if ((ctxt->ops->get_hflags(ctxt) & X86EMUL_SMM_INSIDE_NMI_MASK) == 0) 2599 ctxt->ops->set_nmi_mask(ctxt, false); 2600 2601 ctxt->ops->set_hflags(ctxt, ctxt->ops->get_hflags(ctxt) & 2602 ~(X86EMUL_SMM_INSIDE_NMI_MASK | X86EMUL_SMM_MASK)); 2603 2604 /* 2605 * Get back to real mode, to prepare a safe state in which to load 2606 * CR0/CR3/CR4/EFER. It's all a bit more complicated if the vCPU 2607 * supports long mode. 2608 */ 2609 if (emulator_has_longmode(ctxt)) { 2610 struct desc_struct cs_desc; 2611 2612 /* Zero CR4.PCIDE before CR0.PG. */ 2613 cr4 = ctxt->ops->get_cr(ctxt, 4); 2614 if (cr4 & X86_CR4_PCIDE) 2615 ctxt->ops->set_cr(ctxt, 4, cr4 & ~X86_CR4_PCIDE); 2616 2617 /* A 32-bit code segment is required to clear EFER.LMA. */ 2618 memset(&cs_desc, 0, sizeof(cs_desc)); 2619 cs_desc.type = 0xb; 2620 cs_desc.s = cs_desc.g = cs_desc.p = 1; 2621 ctxt->ops->set_segment(ctxt, 0, &cs_desc, 0, VCPU_SREG_CS); 2622 } 2623 2624 /* For the 64-bit case, this will clear EFER.LMA. */ 2625 cr0 = ctxt->ops->get_cr(ctxt, 0); 2626 if (cr0 & X86_CR0_PE) 2627 ctxt->ops->set_cr(ctxt, 0, cr0 & ~(X86_CR0_PG | X86_CR0_PE)); 2628 2629 if (emulator_has_longmode(ctxt)) { 2630 /* Clear CR4.PAE before clearing EFER.LME. */ 2631 cr4 = ctxt->ops->get_cr(ctxt, 4); 2632 if (cr4 & X86_CR4_PAE) 2633 ctxt->ops->set_cr(ctxt, 4, cr4 & ~X86_CR4_PAE); 2634 2635 /* And finally go back to 32-bit mode. */ 2636 efer = 0; 2637 ctxt->ops->set_msr(ctxt, MSR_EFER, efer); 2638 } 2639 2640 /* 2641 * Give pre_leave_smm() a chance to make ISA-specific changes to the 2642 * vCPU state (e.g. enter guest mode) before loading state from the SMM 2643 * state-save area. 2644 */ 2645 if (ctxt->ops->pre_leave_smm(ctxt, buf)) 2646 return X86EMUL_UNHANDLEABLE; 2647 2648 #ifdef CONFIG_X86_64 2649 if (emulator_has_longmode(ctxt)) 2650 ret = rsm_load_state_64(ctxt, buf); 2651 else 2652 #endif 2653 ret = rsm_load_state_32(ctxt, buf); 2654 2655 if (ret != X86EMUL_CONTINUE) { 2656 /* FIXME: should triple fault */ 2657 return X86EMUL_UNHANDLEABLE; 2658 } 2659 2660 ctxt->ops->post_leave_smm(ctxt); 2661 2662 return X86EMUL_CONTINUE; 2663 } 2664 2665 static void 2666 setup_syscalls_segments(struct x86_emulate_ctxt *ctxt, 2667 struct desc_struct *cs, struct desc_struct *ss) 2668 { 2669 cs->l = 0; /* will be adjusted later */ 2670 set_desc_base(cs, 0); /* flat segment */ 2671 cs->g = 1; /* 4kb granularity */ 2672 set_desc_limit(cs, 0xfffff); /* 4GB limit */ 2673 cs->type = 0x0b; /* Read, Execute, Accessed */ 2674 cs->s = 1; 2675 cs->dpl = 0; /* will be adjusted later */ 2676 cs->p = 1; 2677 cs->d = 1; 2678 cs->avl = 0; 2679 2680 set_desc_base(ss, 0); /* flat segment */ 2681 set_desc_limit(ss, 0xfffff); /* 4GB limit */ 2682 ss->g = 1; /* 4kb granularity */ 2683 ss->s = 1; 2684 ss->type = 0x03; /* Read/Write, Accessed */ 2685 ss->d = 1; /* 32bit stack segment */ 2686 ss->dpl = 0; 2687 ss->p = 1; 2688 ss->l = 0; 2689 ss->avl = 0; 2690 } 2691 2692 static bool vendor_intel(struct x86_emulate_ctxt *ctxt) 2693 { 2694 u32 eax, ebx, ecx, edx; 2695 2696 eax = ecx = 0; 2697 ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx, false); 2698 return ebx == X86EMUL_CPUID_VENDOR_GenuineIntel_ebx 2699 && ecx == X86EMUL_CPUID_VENDOR_GenuineIntel_ecx 2700 && edx == X86EMUL_CPUID_VENDOR_GenuineIntel_edx; 2701 } 2702 2703 static bool em_syscall_is_enabled(struct x86_emulate_ctxt *ctxt) 2704 { 2705 const struct x86_emulate_ops *ops = ctxt->ops; 2706 u32 eax, ebx, ecx, edx; 2707 2708 /* 2709 * syscall should always be enabled in longmode - so only become 2710 * vendor specific (cpuid) if other modes are active... 2711 */ 2712 if (ctxt->mode == X86EMUL_MODE_PROT64) 2713 return true; 2714 2715 eax = 0x00000000; 2716 ecx = 0x00000000; 2717 ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx, false); 2718 /* 2719 * Intel ("GenuineIntel") 2720 * remark: Intel CPUs only support "syscall" in 64bit 2721 * longmode. Also an 64bit guest with a 2722 * 32bit compat-app running will #UD !! While this 2723 * behaviour can be fixed (by emulating) into AMD 2724 * response - CPUs of AMD can't behave like Intel. 2725 */ 2726 if (ebx == X86EMUL_CPUID_VENDOR_GenuineIntel_ebx && 2727 ecx == X86EMUL_CPUID_VENDOR_GenuineIntel_ecx && 2728 edx == X86EMUL_CPUID_VENDOR_GenuineIntel_edx) 2729 return false; 2730 2731 /* AMD ("AuthenticAMD") */ 2732 if (ebx == X86EMUL_CPUID_VENDOR_AuthenticAMD_ebx && 2733 ecx == X86EMUL_CPUID_VENDOR_AuthenticAMD_ecx && 2734 edx == X86EMUL_CPUID_VENDOR_AuthenticAMD_edx) 2735 return true; 2736 2737 /* AMD ("AMDisbetter!") */ 2738 if (ebx == X86EMUL_CPUID_VENDOR_AMDisbetterI_ebx && 2739 ecx == X86EMUL_CPUID_VENDOR_AMDisbetterI_ecx && 2740 edx == X86EMUL_CPUID_VENDOR_AMDisbetterI_edx) 2741 return true; 2742 2743 /* Hygon ("HygonGenuine") */ 2744 if (ebx == X86EMUL_CPUID_VENDOR_HygonGenuine_ebx && 2745 ecx == X86EMUL_CPUID_VENDOR_HygonGenuine_ecx && 2746 edx == X86EMUL_CPUID_VENDOR_HygonGenuine_edx) 2747 return true; 2748 2749 /* 2750 * default: (not Intel, not AMD, not Hygon), apply Intel's 2751 * stricter rules... 2752 */ 2753 return false; 2754 } 2755 2756 static int em_syscall(struct x86_emulate_ctxt *ctxt) 2757 { 2758 const struct x86_emulate_ops *ops = ctxt->ops; 2759 struct desc_struct cs, ss; 2760 u64 msr_data; 2761 u16 cs_sel, ss_sel; 2762 u64 efer = 0; 2763 2764 /* syscall is not available in real mode */ 2765 if (ctxt->mode == X86EMUL_MODE_REAL || 2766 ctxt->mode == X86EMUL_MODE_VM86) 2767 return emulate_ud(ctxt); 2768 2769 if (!(em_syscall_is_enabled(ctxt))) 2770 return emulate_ud(ctxt); 2771 2772 ops->get_msr(ctxt, MSR_EFER, &efer); 2773 setup_syscalls_segments(ctxt, &cs, &ss); 2774 2775 if (!(efer & EFER_SCE)) 2776 return emulate_ud(ctxt); 2777 2778 ops->get_msr(ctxt, MSR_STAR, &msr_data); 2779 msr_data >>= 32; 2780 cs_sel = (u16)(msr_data & 0xfffc); 2781 ss_sel = (u16)(msr_data + 8); 2782 2783 if (efer & EFER_LMA) { 2784 cs.d = 0; 2785 cs.l = 1; 2786 } 2787 ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS); 2788 ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS); 2789 2790 *reg_write(ctxt, VCPU_REGS_RCX) = ctxt->_eip; 2791 if (efer & EFER_LMA) { 2792 #ifdef CONFIG_X86_64 2793 *reg_write(ctxt, VCPU_REGS_R11) = ctxt->eflags; 2794 2795 ops->get_msr(ctxt, 2796 ctxt->mode == X86EMUL_MODE_PROT64 ? 2797 MSR_LSTAR : MSR_CSTAR, &msr_data); 2798 ctxt->_eip = msr_data; 2799 2800 ops->get_msr(ctxt, MSR_SYSCALL_MASK, &msr_data); 2801 ctxt->eflags &= ~msr_data; 2802 ctxt->eflags |= X86_EFLAGS_FIXED; 2803 #endif 2804 } else { 2805 /* legacy mode */ 2806 ops->get_msr(ctxt, MSR_STAR, &msr_data); 2807 ctxt->_eip = (u32)msr_data; 2808 2809 ctxt->eflags &= ~(X86_EFLAGS_VM | X86_EFLAGS_IF); 2810 } 2811 2812 ctxt->tf = (ctxt->eflags & X86_EFLAGS_TF) != 0; 2813 return X86EMUL_CONTINUE; 2814 } 2815 2816 static int em_sysenter(struct x86_emulate_ctxt *ctxt) 2817 { 2818 const struct x86_emulate_ops *ops = ctxt->ops; 2819 struct desc_struct cs, ss; 2820 u64 msr_data; 2821 u16 cs_sel, ss_sel; 2822 u64 efer = 0; 2823 2824 ops->get_msr(ctxt, MSR_EFER, &efer); 2825 /* inject #GP if in real mode */ 2826 if (ctxt->mode == X86EMUL_MODE_REAL) 2827 return emulate_gp(ctxt, 0); 2828 2829 /* 2830 * Not recognized on AMD in compat mode (but is recognized in legacy 2831 * mode). 2832 */ 2833 if ((ctxt->mode != X86EMUL_MODE_PROT64) && (efer & EFER_LMA) 2834 && !vendor_intel(ctxt)) 2835 return emulate_ud(ctxt); 2836 2837 /* sysenter/sysexit have not been tested in 64bit mode. */ 2838 if (ctxt->mode == X86EMUL_MODE_PROT64) 2839 return X86EMUL_UNHANDLEABLE; 2840 2841 setup_syscalls_segments(ctxt, &cs, &ss); 2842 2843 ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data); 2844 if ((msr_data & 0xfffc) == 0x0) 2845 return emulate_gp(ctxt, 0); 2846 2847 ctxt->eflags &= ~(X86_EFLAGS_VM | X86_EFLAGS_IF); 2848 cs_sel = (u16)msr_data & ~SEGMENT_RPL_MASK; 2849 ss_sel = cs_sel + 8; 2850 if (efer & EFER_LMA) { 2851 cs.d = 0; 2852 cs.l = 1; 2853 } 2854 2855 ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS); 2856 ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS); 2857 2858 ops->get_msr(ctxt, MSR_IA32_SYSENTER_EIP, &msr_data); 2859 ctxt->_eip = (efer & EFER_LMA) ? msr_data : (u32)msr_data; 2860 2861 ops->get_msr(ctxt, MSR_IA32_SYSENTER_ESP, &msr_data); 2862 *reg_write(ctxt, VCPU_REGS_RSP) = (efer & EFER_LMA) ? msr_data : 2863 (u32)msr_data; 2864 2865 return X86EMUL_CONTINUE; 2866 } 2867 2868 static int em_sysexit(struct x86_emulate_ctxt *ctxt) 2869 { 2870 const struct x86_emulate_ops *ops = ctxt->ops; 2871 struct desc_struct cs, ss; 2872 u64 msr_data, rcx, rdx; 2873 int usermode; 2874 u16 cs_sel = 0, ss_sel = 0; 2875 2876 /* inject #GP if in real mode or Virtual 8086 mode */ 2877 if (ctxt->mode == X86EMUL_MODE_REAL || 2878 ctxt->mode == X86EMUL_MODE_VM86) 2879 return emulate_gp(ctxt, 0); 2880 2881 setup_syscalls_segments(ctxt, &cs, &ss); 2882 2883 if ((ctxt->rex_prefix & 0x8) != 0x0) 2884 usermode = X86EMUL_MODE_PROT64; 2885 else 2886 usermode = X86EMUL_MODE_PROT32; 2887 2888 rcx = reg_read(ctxt, VCPU_REGS_RCX); 2889 rdx = reg_read(ctxt, VCPU_REGS_RDX); 2890 2891 cs.dpl = 3; 2892 ss.dpl = 3; 2893 ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data); 2894 switch (usermode) { 2895 case X86EMUL_MODE_PROT32: 2896 cs_sel = (u16)(msr_data + 16); 2897 if ((msr_data & 0xfffc) == 0x0) 2898 return emulate_gp(ctxt, 0); 2899 ss_sel = (u16)(msr_data + 24); 2900 rcx = (u32)rcx; 2901 rdx = (u32)rdx; 2902 break; 2903 case X86EMUL_MODE_PROT64: 2904 cs_sel = (u16)(msr_data + 32); 2905 if (msr_data == 0x0) 2906 return emulate_gp(ctxt, 0); 2907 ss_sel = cs_sel + 8; 2908 cs.d = 0; 2909 cs.l = 1; 2910 if (emul_is_noncanonical_address(rcx, ctxt) || 2911 emul_is_noncanonical_address(rdx, ctxt)) 2912 return emulate_gp(ctxt, 0); 2913 break; 2914 } 2915 cs_sel |= SEGMENT_RPL_MASK; 2916 ss_sel |= SEGMENT_RPL_MASK; 2917 2918 ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS); 2919 ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS); 2920 2921 ctxt->_eip = rdx; 2922 *reg_write(ctxt, VCPU_REGS_RSP) = rcx; 2923 2924 return X86EMUL_CONTINUE; 2925 } 2926 2927 static bool emulator_bad_iopl(struct x86_emulate_ctxt *ctxt) 2928 { 2929 int iopl; 2930 if (ctxt->mode == X86EMUL_MODE_REAL) 2931 return false; 2932 if (ctxt->mode == X86EMUL_MODE_VM86) 2933 return true; 2934 iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> X86_EFLAGS_IOPL_BIT; 2935 return ctxt->ops->cpl(ctxt) > iopl; 2936 } 2937 2938 #define VMWARE_PORT_VMPORT (0x5658) 2939 #define VMWARE_PORT_VMRPC (0x5659) 2940 2941 static bool emulator_io_port_access_allowed(struct x86_emulate_ctxt *ctxt, 2942 u16 port, u16 len) 2943 { 2944 const struct x86_emulate_ops *ops = ctxt->ops; 2945 struct desc_struct tr_seg; 2946 u32 base3; 2947 int r; 2948 u16 tr, io_bitmap_ptr, perm, bit_idx = port & 0x7; 2949 unsigned mask = (1 << len) - 1; 2950 unsigned long base; 2951 2952 /* 2953 * VMware allows access to these ports even if denied 2954 * by TSS I/O permission bitmap. Mimic behavior. 2955 */ 2956 if (enable_vmware_backdoor && 2957 ((port == VMWARE_PORT_VMPORT) || (port == VMWARE_PORT_VMRPC))) 2958 return true; 2959 2960 ops->get_segment(ctxt, &tr, &tr_seg, &base3, VCPU_SREG_TR); 2961 if (!tr_seg.p) 2962 return false; 2963 if (desc_limit_scaled(&tr_seg) < 103) 2964 return false; 2965 base = get_desc_base(&tr_seg); 2966 #ifdef CONFIG_X86_64 2967 base |= ((u64)base3) << 32; 2968 #endif 2969 r = ops->read_std(ctxt, base + 102, &io_bitmap_ptr, 2, NULL, true); 2970 if (r != X86EMUL_CONTINUE) 2971 return false; 2972 if (io_bitmap_ptr + port/8 > desc_limit_scaled(&tr_seg)) 2973 return false; 2974 r = ops->read_std(ctxt, base + io_bitmap_ptr + port/8, &perm, 2, NULL, true); 2975 if (r != X86EMUL_CONTINUE) 2976 return false; 2977 if ((perm >> bit_idx) & mask) 2978 return false; 2979 return true; 2980 } 2981 2982 static bool emulator_io_permited(struct x86_emulate_ctxt *ctxt, 2983 u16 port, u16 len) 2984 { 2985 if (ctxt->perm_ok) 2986 return true; 2987 2988 if (emulator_bad_iopl(ctxt)) 2989 if (!emulator_io_port_access_allowed(ctxt, port, len)) 2990 return false; 2991 2992 ctxt->perm_ok = true; 2993 2994 return true; 2995 } 2996 2997 static void string_registers_quirk(struct x86_emulate_ctxt *ctxt) 2998 { 2999 /* 3000 * Intel CPUs mask the counter and pointers in quite strange 3001 * manner when ECX is zero due to REP-string optimizations. 3002 */ 3003 #ifdef CONFIG_X86_64 3004 if (ctxt->ad_bytes != 4 || !vendor_intel(ctxt)) 3005 return; 3006 3007 *reg_write(ctxt, VCPU_REGS_RCX) = 0; 3008 3009 switch (ctxt->b) { 3010 case 0xa4: /* movsb */ 3011 case 0xa5: /* movsd/w */ 3012 *reg_rmw(ctxt, VCPU_REGS_RSI) &= (u32)-1; 3013 /* fall through */ 3014 case 0xaa: /* stosb */ 3015 case 0xab: /* stosd/w */ 3016 *reg_rmw(ctxt, VCPU_REGS_RDI) &= (u32)-1; 3017 } 3018 #endif 3019 } 3020 3021 static void save_state_to_tss16(struct x86_emulate_ctxt *ctxt, 3022 struct tss_segment_16 *tss) 3023 { 3024 tss->ip = ctxt->_eip; 3025 tss->flag = ctxt->eflags; 3026 tss->ax = reg_read(ctxt, VCPU_REGS_RAX); 3027 tss->cx = reg_read(ctxt, VCPU_REGS_RCX); 3028 tss->dx = reg_read(ctxt, VCPU_REGS_RDX); 3029 tss->bx = reg_read(ctxt, VCPU_REGS_RBX); 3030 tss->sp = reg_read(ctxt, VCPU_REGS_RSP); 3031 tss->bp = reg_read(ctxt, VCPU_REGS_RBP); 3032 tss->si = reg_read(ctxt, VCPU_REGS_RSI); 3033 tss->di = reg_read(ctxt, VCPU_REGS_RDI); 3034 3035 tss->es = get_segment_selector(ctxt, VCPU_SREG_ES); 3036 tss->cs = get_segment_selector(ctxt, VCPU_SREG_CS); 3037 tss->ss = get_segment_selector(ctxt, VCPU_SREG_SS); 3038 tss->ds = get_segment_selector(ctxt, VCPU_SREG_DS); 3039 tss->ldt = get_segment_selector(ctxt, VCPU_SREG_LDTR); 3040 } 3041 3042 static int load_state_from_tss16(struct x86_emulate_ctxt *ctxt, 3043 struct tss_segment_16 *tss) 3044 { 3045 int ret; 3046 u8 cpl; 3047 3048 ctxt->_eip = tss->ip; 3049 ctxt->eflags = tss->flag | 2; 3050 *reg_write(ctxt, VCPU_REGS_RAX) = tss->ax; 3051 *reg_write(ctxt, VCPU_REGS_RCX) = tss->cx; 3052 *reg_write(ctxt, VCPU_REGS_RDX) = tss->dx; 3053 *reg_write(ctxt, VCPU_REGS_RBX) = tss->bx; 3054 *reg_write(ctxt, VCPU_REGS_RSP) = tss->sp; 3055 *reg_write(ctxt, VCPU_REGS_RBP) = tss->bp; 3056 *reg_write(ctxt, VCPU_REGS_RSI) = tss->si; 3057 *reg_write(ctxt, VCPU_REGS_RDI) = tss->di; 3058 3059 /* 3060 * SDM says that segment selectors are loaded before segment 3061 * descriptors 3062 */ 3063 set_segment_selector(ctxt, tss->ldt, VCPU_SREG_LDTR); 3064 set_segment_selector(ctxt, tss->es, VCPU_SREG_ES); 3065 set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS); 3066 set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS); 3067 set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS); 3068 3069 cpl = tss->cs & 3; 3070 3071 /* 3072 * Now load segment descriptors. If fault happens at this stage 3073 * it is handled in a context of new task 3074 */ 3075 ret = __load_segment_descriptor(ctxt, tss->ldt, VCPU_SREG_LDTR, cpl, 3076 X86_TRANSFER_TASK_SWITCH, NULL); 3077 if (ret != X86EMUL_CONTINUE) 3078 return ret; 3079 ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl, 3080 X86_TRANSFER_TASK_SWITCH, NULL); 3081 if (ret != X86EMUL_CONTINUE) 3082 return ret; 3083 ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl, 3084 X86_TRANSFER_TASK_SWITCH, NULL); 3085 if (ret != X86EMUL_CONTINUE) 3086 return ret; 3087 ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl, 3088 X86_TRANSFER_TASK_SWITCH, NULL); 3089 if (ret != X86EMUL_CONTINUE) 3090 return ret; 3091 ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl, 3092 X86_TRANSFER_TASK_SWITCH, NULL); 3093 if (ret != X86EMUL_CONTINUE) 3094 return ret; 3095 3096 return X86EMUL_CONTINUE; 3097 } 3098 3099 static int task_switch_16(struct x86_emulate_ctxt *ctxt, 3100 u16 tss_selector, u16 old_tss_sel, 3101 ulong old_tss_base, struct desc_struct *new_desc) 3102 { 3103 struct tss_segment_16 tss_seg; 3104 int ret; 3105 u32 new_tss_base = get_desc_base(new_desc); 3106 3107 ret = linear_read_system(ctxt, old_tss_base, &tss_seg, sizeof(tss_seg)); 3108 if (ret != X86EMUL_CONTINUE) 3109 return ret; 3110 3111 save_state_to_tss16(ctxt, &tss_seg); 3112 3113 ret = linear_write_system(ctxt, old_tss_base, &tss_seg, sizeof(tss_seg)); 3114 if (ret != X86EMUL_CONTINUE) 3115 return ret; 3116 3117 ret = linear_read_system(ctxt, new_tss_base, &tss_seg, sizeof(tss_seg)); 3118 if (ret != X86EMUL_CONTINUE) 3119 return ret; 3120 3121 if (old_tss_sel != 0xffff) { 3122 tss_seg.prev_task_link = old_tss_sel; 3123 3124 ret = linear_write_system(ctxt, new_tss_base, 3125 &tss_seg.prev_task_link, 3126 sizeof(tss_seg.prev_task_link)); 3127 if (ret != X86EMUL_CONTINUE) 3128 return ret; 3129 } 3130 3131 return load_state_from_tss16(ctxt, &tss_seg); 3132 } 3133 3134 static void save_state_to_tss32(struct x86_emulate_ctxt *ctxt, 3135 struct tss_segment_32 *tss) 3136 { 3137 /* CR3 and ldt selector are not saved intentionally */ 3138 tss->eip = ctxt->_eip; 3139 tss->eflags = ctxt->eflags; 3140 tss->eax = reg_read(ctxt, VCPU_REGS_RAX); 3141 tss->ecx = reg_read(ctxt, VCPU_REGS_RCX); 3142 tss->edx = reg_read(ctxt, VCPU_REGS_RDX); 3143 tss->ebx = reg_read(ctxt, VCPU_REGS_RBX); 3144 tss->esp = reg_read(ctxt, VCPU_REGS_RSP); 3145 tss->ebp = reg_read(ctxt, VCPU_REGS_RBP); 3146 tss->esi = reg_read(ctxt, VCPU_REGS_RSI); 3147 tss->edi = reg_read(ctxt, VCPU_REGS_RDI); 3148 3149 tss->es = get_segment_selector(ctxt, VCPU_SREG_ES); 3150 tss->cs = get_segment_selector(ctxt, VCPU_SREG_CS); 3151 tss->ss = get_segment_selector(ctxt, VCPU_SREG_SS); 3152 tss->ds = get_segment_selector(ctxt, VCPU_SREG_DS); 3153 tss->fs = get_segment_selector(ctxt, VCPU_SREG_FS); 3154 tss->gs = get_segment_selector(ctxt, VCPU_SREG_GS); 3155 } 3156 3157 static int load_state_from_tss32(struct x86_emulate_ctxt *ctxt, 3158 struct tss_segment_32 *tss) 3159 { 3160 int ret; 3161 u8 cpl; 3162 3163 if (ctxt->ops->set_cr(ctxt, 3, tss->cr3)) 3164 return emulate_gp(ctxt, 0); 3165 ctxt->_eip = tss->eip; 3166 ctxt->eflags = tss->eflags | 2; 3167 3168 /* General purpose registers */ 3169 *reg_write(ctxt, VCPU_REGS_RAX) = tss->eax; 3170 *reg_write(ctxt, VCPU_REGS_RCX) = tss->ecx; 3171 *reg_write(ctxt, VCPU_REGS_RDX) = tss->edx; 3172 *reg_write(ctxt, VCPU_REGS_RBX) = tss->ebx; 3173 *reg_write(ctxt, VCPU_REGS_RSP) = tss->esp; 3174 *reg_write(ctxt, VCPU_REGS_RBP) = tss->ebp; 3175 *reg_write(ctxt, VCPU_REGS_RSI) = tss->esi; 3176 *reg_write(ctxt, VCPU_REGS_RDI) = tss->edi; 3177 3178 /* 3179 * SDM says that segment selectors are loaded before segment 3180 * descriptors. This is important because CPL checks will 3181 * use CS.RPL. 3182 */ 3183 set_segment_selector(ctxt, tss->ldt_selector, VCPU_SREG_LDTR); 3184 set_segment_selector(ctxt, tss->es, VCPU_SREG_ES); 3185 set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS); 3186 set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS); 3187 set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS); 3188 set_segment_selector(ctxt, tss->fs, VCPU_SREG_FS); 3189 set_segment_selector(ctxt, tss->gs, VCPU_SREG_GS); 3190 3191 /* 3192 * If we're switching between Protected Mode and VM86, we need to make 3193 * sure to update the mode before loading the segment descriptors so 3194 * that the selectors are interpreted correctly. 3195 */ 3196 if (ctxt->eflags & X86_EFLAGS_VM) { 3197 ctxt->mode = X86EMUL_MODE_VM86; 3198 cpl = 3; 3199 } else { 3200 ctxt->mode = X86EMUL_MODE_PROT32; 3201 cpl = tss->cs & 3; 3202 } 3203 3204 /* 3205 * Now load segment descriptors. If fault happenes at this stage 3206 * it is handled in a context of new task 3207 */ 3208 ret = __load_segment_descriptor(ctxt, tss->ldt_selector, VCPU_SREG_LDTR, 3209 cpl, X86_TRANSFER_TASK_SWITCH, NULL); 3210 if (ret != X86EMUL_CONTINUE) 3211 return ret; 3212 ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl, 3213 X86_TRANSFER_TASK_SWITCH, NULL); 3214 if (ret != X86EMUL_CONTINUE) 3215 return ret; 3216 ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl, 3217 X86_TRANSFER_TASK_SWITCH, NULL); 3218 if (ret != X86EMUL_CONTINUE) 3219 return ret; 3220 ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl, 3221 X86_TRANSFER_TASK_SWITCH, NULL); 3222 if (ret != X86EMUL_CONTINUE) 3223 return ret; 3224 ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl, 3225 X86_TRANSFER_TASK_SWITCH, NULL); 3226 if (ret != X86EMUL_CONTINUE) 3227 return ret; 3228 ret = __load_segment_descriptor(ctxt, tss->fs, VCPU_SREG_FS, cpl, 3229 X86_TRANSFER_TASK_SWITCH, NULL); 3230 if (ret != X86EMUL_CONTINUE) 3231 return ret; 3232 ret = __load_segment_descriptor(ctxt, tss->gs, VCPU_SREG_GS, cpl, 3233 X86_TRANSFER_TASK_SWITCH, NULL); 3234 3235 return ret; 3236 } 3237 3238 static int task_switch_32(struct x86_emulate_ctxt *ctxt, 3239 u16 tss_selector, u16 old_tss_sel, 3240 ulong old_tss_base, struct desc_struct *new_desc) 3241 { 3242 struct tss_segment_32 tss_seg; 3243 int ret; 3244 u32 new_tss_base = get_desc_base(new_desc); 3245 u32 eip_offset = offsetof(struct tss_segment_32, eip); 3246 u32 ldt_sel_offset = offsetof(struct tss_segment_32, ldt_selector); 3247 3248 ret = linear_read_system(ctxt, old_tss_base, &tss_seg, sizeof(tss_seg)); 3249 if (ret != X86EMUL_CONTINUE) 3250 return ret; 3251 3252 save_state_to_tss32(ctxt, &tss_seg); 3253 3254 /* Only GP registers and segment selectors are saved */ 3255 ret = linear_write_system(ctxt, old_tss_base + eip_offset, &tss_seg.eip, 3256 ldt_sel_offset - eip_offset); 3257 if (ret != X86EMUL_CONTINUE) 3258 return ret; 3259 3260 ret = linear_read_system(ctxt, new_tss_base, &tss_seg, sizeof(tss_seg)); 3261 if (ret != X86EMUL_CONTINUE) 3262 return ret; 3263 3264 if (old_tss_sel != 0xffff) { 3265 tss_seg.prev_task_link = old_tss_sel; 3266 3267 ret = linear_write_system(ctxt, new_tss_base, 3268 &tss_seg.prev_task_link, 3269 sizeof(tss_seg.prev_task_link)); 3270 if (ret != X86EMUL_CONTINUE) 3271 return ret; 3272 } 3273 3274 return load_state_from_tss32(ctxt, &tss_seg); 3275 } 3276 3277 static int emulator_do_task_switch(struct x86_emulate_ctxt *ctxt, 3278 u16 tss_selector, int idt_index, int reason, 3279 bool has_error_code, u32 error_code) 3280 { 3281 const struct x86_emulate_ops *ops = ctxt->ops; 3282 struct desc_struct curr_tss_desc, next_tss_desc; 3283 int ret; 3284 u16 old_tss_sel = get_segment_selector(ctxt, VCPU_SREG_TR); 3285 ulong old_tss_base = 3286 ops->get_cached_segment_base(ctxt, VCPU_SREG_TR); 3287 u32 desc_limit; 3288 ulong desc_addr, dr7; 3289 3290 /* FIXME: old_tss_base == ~0 ? */ 3291 3292 ret = read_segment_descriptor(ctxt, tss_selector, &next_tss_desc, &desc_addr); 3293 if (ret != X86EMUL_CONTINUE) 3294 return ret; 3295 ret = read_segment_descriptor(ctxt, old_tss_sel, &curr_tss_desc, &desc_addr); 3296 if (ret != X86EMUL_CONTINUE) 3297 return ret; 3298 3299 /* FIXME: check that next_tss_desc is tss */ 3300 3301 /* 3302 * Check privileges. The three cases are task switch caused by... 3303 * 3304 * 1. jmp/call/int to task gate: Check against DPL of the task gate 3305 * 2. Exception/IRQ/iret: No check is performed 3306 * 3. jmp/call to TSS/task-gate: No check is performed since the 3307 * hardware checks it before exiting. 3308 */ 3309 if (reason == TASK_SWITCH_GATE) { 3310 if (idt_index != -1) { 3311 /* Software interrupts */ 3312 struct desc_struct task_gate_desc; 3313 int dpl; 3314 3315 ret = read_interrupt_descriptor(ctxt, idt_index, 3316 &task_gate_desc); 3317 if (ret != X86EMUL_CONTINUE) 3318 return ret; 3319 3320 dpl = task_gate_desc.dpl; 3321 if ((tss_selector & 3) > dpl || ops->cpl(ctxt) > dpl) 3322 return emulate_gp(ctxt, (idt_index << 3) | 0x2); 3323 } 3324 } 3325 3326 desc_limit = desc_limit_scaled(&next_tss_desc); 3327 if (!next_tss_desc.p || 3328 ((desc_limit < 0x67 && (next_tss_desc.type & 8)) || 3329 desc_limit < 0x2b)) { 3330 return emulate_ts(ctxt, tss_selector & 0xfffc); 3331 } 3332 3333 if (reason == TASK_SWITCH_IRET || reason == TASK_SWITCH_JMP) { 3334 curr_tss_desc.type &= ~(1 << 1); /* clear busy flag */ 3335 write_segment_descriptor(ctxt, old_tss_sel, &curr_tss_desc); 3336 } 3337 3338 if (reason == TASK_SWITCH_IRET) 3339 ctxt->eflags = ctxt->eflags & ~X86_EFLAGS_NT; 3340 3341 /* set back link to prev task only if NT bit is set in eflags 3342 note that old_tss_sel is not used after this point */ 3343 if (reason != TASK_SWITCH_CALL && reason != TASK_SWITCH_GATE) 3344 old_tss_sel = 0xffff; 3345 3346 if (next_tss_desc.type & 8) 3347 ret = task_switch_32(ctxt, tss_selector, old_tss_sel, 3348 old_tss_base, &next_tss_desc); 3349 else 3350 ret = task_switch_16(ctxt, tss_selector, old_tss_sel, 3351 old_tss_base, &next_tss_desc); 3352 if (ret != X86EMUL_CONTINUE) 3353 return ret; 3354 3355 if (reason == TASK_SWITCH_CALL || reason == TASK_SWITCH_GATE) 3356 ctxt->eflags = ctxt->eflags | X86_EFLAGS_NT; 3357 3358 if (reason != TASK_SWITCH_IRET) { 3359 next_tss_desc.type |= (1 << 1); /* set busy flag */ 3360 write_segment_descriptor(ctxt, tss_selector, &next_tss_desc); 3361 } 3362 3363 ops->set_cr(ctxt, 0, ops->get_cr(ctxt, 0) | X86_CR0_TS); 3364 ops->set_segment(ctxt, tss_selector, &next_tss_desc, 0, VCPU_SREG_TR); 3365 3366 if (has_error_code) { 3367 ctxt->op_bytes = ctxt->ad_bytes = (next_tss_desc.type & 8) ? 4 : 2; 3368 ctxt->lock_prefix = 0; 3369 ctxt->src.val = (unsigned long) error_code; 3370 ret = em_push(ctxt); 3371 } 3372 3373 ops->get_dr(ctxt, 7, &dr7); 3374 ops->set_dr(ctxt, 7, dr7 & ~(DR_LOCAL_ENABLE_MASK | DR_LOCAL_SLOWDOWN)); 3375 3376 return ret; 3377 } 3378 3379 int emulator_task_switch(struct x86_emulate_ctxt *ctxt, 3380 u16 tss_selector, int idt_index, int reason, 3381 bool has_error_code, u32 error_code) 3382 { 3383 int rc; 3384 3385 invalidate_registers(ctxt); 3386 ctxt->_eip = ctxt->eip; 3387 ctxt->dst.type = OP_NONE; 3388 3389 rc = emulator_do_task_switch(ctxt, tss_selector, idt_index, reason, 3390 has_error_code, error_code); 3391 3392 if (rc == X86EMUL_CONTINUE) { 3393 ctxt->eip = ctxt->_eip; 3394 writeback_registers(ctxt); 3395 } 3396 3397 return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK; 3398 } 3399 3400 static void string_addr_inc(struct x86_emulate_ctxt *ctxt, int reg, 3401 struct operand *op) 3402 { 3403 int df = (ctxt->eflags & X86_EFLAGS_DF) ? -op->count : op->count; 3404 3405 register_address_increment(ctxt, reg, df * op->bytes); 3406 op->addr.mem.ea = register_address(ctxt, reg); 3407 } 3408 3409 static int em_das(struct x86_emulate_ctxt *ctxt) 3410 { 3411 u8 al, old_al; 3412 bool af, cf, old_cf; 3413 3414 cf = ctxt->eflags & X86_EFLAGS_CF; 3415 al = ctxt->dst.val; 3416 3417 old_al = al; 3418 old_cf = cf; 3419 cf = false; 3420 af = ctxt->eflags & X86_EFLAGS_AF; 3421 if ((al & 0x0f) > 9 || af) { 3422 al -= 6; 3423 cf = old_cf | (al >= 250); 3424 af = true; 3425 } else { 3426 af = false; 3427 } 3428 if (old_al > 0x99 || old_cf) { 3429 al -= 0x60; 3430 cf = true; 3431 } 3432 3433 ctxt->dst.val = al; 3434 /* Set PF, ZF, SF */ 3435 ctxt->src.type = OP_IMM; 3436 ctxt->src.val = 0; 3437 ctxt->src.bytes = 1; 3438 fastop(ctxt, em_or); 3439 ctxt->eflags &= ~(X86_EFLAGS_AF | X86_EFLAGS_CF); 3440 if (cf) 3441 ctxt->eflags |= X86_EFLAGS_CF; 3442 if (af) 3443 ctxt->eflags |= X86_EFLAGS_AF; 3444 return X86EMUL_CONTINUE; 3445 } 3446 3447 static int em_aam(struct x86_emulate_ctxt *ctxt) 3448 { 3449 u8 al, ah; 3450 3451 if (ctxt->src.val == 0) 3452 return emulate_de(ctxt); 3453 3454 al = ctxt->dst.val & 0xff; 3455 ah = al / ctxt->src.val; 3456 al %= ctxt->src.val; 3457 3458 ctxt->dst.val = (ctxt->dst.val & 0xffff0000) | al | (ah << 8); 3459 3460 /* Set PF, ZF, SF */ 3461 ctxt->src.type = OP_IMM; 3462 ctxt->src.val = 0; 3463 ctxt->src.bytes = 1; 3464 fastop(ctxt, em_or); 3465 3466 return X86EMUL_CONTINUE; 3467 } 3468 3469 static int em_aad(struct x86_emulate_ctxt *ctxt) 3470 { 3471 u8 al = ctxt->dst.val & 0xff; 3472 u8 ah = (ctxt->dst.val >> 8) & 0xff; 3473 3474 al = (al + (ah * ctxt->src.val)) & 0xff; 3475 3476 ctxt->dst.val = (ctxt->dst.val & 0xffff0000) | al; 3477 3478 /* Set PF, ZF, SF */ 3479 ctxt->src.type = OP_IMM; 3480 ctxt->src.val = 0; 3481 ctxt->src.bytes = 1; 3482 fastop(ctxt, em_or); 3483 3484 return X86EMUL_CONTINUE; 3485 } 3486 3487 static int em_call(struct x86_emulate_ctxt *ctxt) 3488 { 3489 int rc; 3490 long rel = ctxt->src.val; 3491 3492 ctxt->src.val = (unsigned long)ctxt->_eip; 3493 rc = jmp_rel(ctxt, rel); 3494 if (rc != X86EMUL_CONTINUE) 3495 return rc; 3496 return em_push(ctxt); 3497 } 3498 3499 static int em_call_far(struct x86_emulate_ctxt *ctxt) 3500 { 3501 u16 sel, old_cs; 3502 ulong old_eip; 3503 int rc; 3504 struct desc_struct old_desc, new_desc; 3505 const struct x86_emulate_ops *ops = ctxt->ops; 3506 int cpl = ctxt->ops->cpl(ctxt); 3507 enum x86emul_mode prev_mode = ctxt->mode; 3508 3509 old_eip = ctxt->_eip; 3510 ops->get_segment(ctxt, &old_cs, &old_desc, NULL, VCPU_SREG_CS); 3511 3512 memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2); 3513 rc = __load_segment_descriptor(ctxt, sel, VCPU_SREG_CS, cpl, 3514 X86_TRANSFER_CALL_JMP, &new_desc); 3515 if (rc != X86EMUL_CONTINUE) 3516 return rc; 3517 3518 rc = assign_eip_far(ctxt, ctxt->src.val, &new_desc); 3519 if (rc != X86EMUL_CONTINUE) 3520 goto fail; 3521 3522 ctxt->src.val = old_cs; 3523 rc = em_push(ctxt); 3524 if (rc != X86EMUL_CONTINUE) 3525 goto fail; 3526 3527 ctxt->src.val = old_eip; 3528 rc = em_push(ctxt); 3529 /* If we failed, we tainted the memory, but the very least we should 3530 restore cs */ 3531 if (rc != X86EMUL_CONTINUE) { 3532 pr_warn_once("faulting far call emulation tainted memory\n"); 3533 goto fail; 3534 } 3535 return rc; 3536 fail: 3537 ops->set_segment(ctxt, old_cs, &old_desc, 0, VCPU_SREG_CS); 3538 ctxt->mode = prev_mode; 3539 return rc; 3540 3541 } 3542 3543 static int em_ret_near_imm(struct x86_emulate_ctxt *ctxt) 3544 { 3545 int rc; 3546 unsigned long eip; 3547 3548 rc = emulate_pop(ctxt, &eip, ctxt->op_bytes); 3549 if (rc != X86EMUL_CONTINUE) 3550 return rc; 3551 rc = assign_eip_near(ctxt, eip); 3552 if (rc != X86EMUL_CONTINUE) 3553 return rc; 3554 rsp_increment(ctxt, ctxt->src.val); 3555 return X86EMUL_CONTINUE; 3556 } 3557 3558 static int em_xchg(struct x86_emulate_ctxt *ctxt) 3559 { 3560 /* Write back the register source. */ 3561 ctxt->src.val = ctxt->dst.val; 3562 write_register_operand(&ctxt->src); 3563 3564 /* Write back the memory destination with implicit LOCK prefix. */ 3565 ctxt->dst.val = ctxt->src.orig_val; 3566 ctxt->lock_prefix = 1; 3567 return X86EMUL_CONTINUE; 3568 } 3569 3570 static int em_imul_3op(struct x86_emulate_ctxt *ctxt) 3571 { 3572 ctxt->dst.val = ctxt->src2.val; 3573 return fastop(ctxt, em_imul); 3574 } 3575 3576 static int em_cwd(struct x86_emulate_ctxt *ctxt) 3577 { 3578 ctxt->dst.type = OP_REG; 3579 ctxt->dst.bytes = ctxt->src.bytes; 3580 ctxt->dst.addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX); 3581 ctxt->dst.val = ~((ctxt->src.val >> (ctxt->src.bytes * 8 - 1)) - 1); 3582 3583 return X86EMUL_CONTINUE; 3584 } 3585 3586 static int em_rdpid(struct x86_emulate_ctxt *ctxt) 3587 { 3588 u64 tsc_aux = 0; 3589 3590 if (ctxt->ops->get_msr(ctxt, MSR_TSC_AUX, &tsc_aux)) 3591 return emulate_gp(ctxt, 0); 3592 ctxt->dst.val = tsc_aux; 3593 return X86EMUL_CONTINUE; 3594 } 3595 3596 static int em_rdtsc(struct x86_emulate_ctxt *ctxt) 3597 { 3598 u64 tsc = 0; 3599 3600 ctxt->ops->get_msr(ctxt, MSR_IA32_TSC, &tsc); 3601 *reg_write(ctxt, VCPU_REGS_RAX) = (u32)tsc; 3602 *reg_write(ctxt, VCPU_REGS_RDX) = tsc >> 32; 3603 return X86EMUL_CONTINUE; 3604 } 3605 3606 static int em_rdpmc(struct x86_emulate_ctxt *ctxt) 3607 { 3608 u64 pmc; 3609 3610 if (ctxt->ops->read_pmc(ctxt, reg_read(ctxt, VCPU_REGS_RCX), &pmc)) 3611 return emulate_gp(ctxt, 0); 3612 *reg_write(ctxt, VCPU_REGS_RAX) = (u32)pmc; 3613 *reg_write(ctxt, VCPU_REGS_RDX) = pmc >> 32; 3614 return X86EMUL_CONTINUE; 3615 } 3616 3617 static int em_mov(struct x86_emulate_ctxt *ctxt) 3618 { 3619 memcpy(ctxt->dst.valptr, ctxt->src.valptr, sizeof(ctxt->src.valptr)); 3620 return X86EMUL_CONTINUE; 3621 } 3622 3623 #define FFL(x) bit(X86_FEATURE_##x) 3624 3625 static int em_movbe(struct x86_emulate_ctxt *ctxt) 3626 { 3627 u32 ebx, ecx, edx, eax = 1; 3628 u16 tmp; 3629 3630 /* 3631 * Check MOVBE is set in the guest-visible CPUID leaf. 3632 */ 3633 ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx, false); 3634 if (!(ecx & FFL(MOVBE))) 3635 return emulate_ud(ctxt); 3636 3637 switch (ctxt->op_bytes) { 3638 case 2: 3639 /* 3640 * From MOVBE definition: "...When the operand size is 16 bits, 3641 * the upper word of the destination register remains unchanged 3642 * ..." 3643 * 3644 * Both casting ->valptr and ->val to u16 breaks strict aliasing 3645 * rules so we have to do the operation almost per hand. 3646 */ 3647 tmp = (u16)ctxt->src.val; 3648 ctxt->dst.val &= ~0xffffUL; 3649 ctxt->dst.val |= (unsigned long)swab16(tmp); 3650 break; 3651 case 4: 3652 ctxt->dst.val = swab32((u32)ctxt->src.val); 3653 break; 3654 case 8: 3655 ctxt->dst.val = swab64(ctxt->src.val); 3656 break; 3657 default: 3658 BUG(); 3659 } 3660 return X86EMUL_CONTINUE; 3661 } 3662 3663 static int em_cr_write(struct x86_emulate_ctxt *ctxt) 3664 { 3665 if (ctxt->ops->set_cr(ctxt, ctxt->modrm_reg, ctxt->src.val)) 3666 return emulate_gp(ctxt, 0); 3667 3668 /* Disable writeback. */ 3669 ctxt->dst.type = OP_NONE; 3670 return X86EMUL_CONTINUE; 3671 } 3672 3673 static int em_dr_write(struct x86_emulate_ctxt *ctxt) 3674 { 3675 unsigned long val; 3676 3677 if (ctxt->mode == X86EMUL_MODE_PROT64) 3678 val = ctxt->src.val & ~0ULL; 3679 else 3680 val = ctxt->src.val & ~0U; 3681 3682 /* #UD condition is already handled. */ 3683 if (ctxt->ops->set_dr(ctxt, ctxt->modrm_reg, val) < 0) 3684 return emulate_gp(ctxt, 0); 3685 3686 /* Disable writeback. */ 3687 ctxt->dst.type = OP_NONE; 3688 return X86EMUL_CONTINUE; 3689 } 3690 3691 static int em_wrmsr(struct x86_emulate_ctxt *ctxt) 3692 { 3693 u64 msr_data; 3694 3695 msr_data = (u32)reg_read(ctxt, VCPU_REGS_RAX) 3696 | ((u64)reg_read(ctxt, VCPU_REGS_RDX) << 32); 3697 if (ctxt->ops->set_msr(ctxt, reg_read(ctxt, VCPU_REGS_RCX), msr_data)) 3698 return emulate_gp(ctxt, 0); 3699 3700 return X86EMUL_CONTINUE; 3701 } 3702 3703 static int em_rdmsr(struct x86_emulate_ctxt *ctxt) 3704 { 3705 u64 msr_data; 3706 3707 if (ctxt->ops->get_msr(ctxt, reg_read(ctxt, VCPU_REGS_RCX), &msr_data)) 3708 return emulate_gp(ctxt, 0); 3709 3710 *reg_write(ctxt, VCPU_REGS_RAX) = (u32)msr_data; 3711 *reg_write(ctxt, VCPU_REGS_RDX) = msr_data >> 32; 3712 return X86EMUL_CONTINUE; 3713 } 3714 3715 static int em_store_sreg(struct x86_emulate_ctxt *ctxt, int segment) 3716 { 3717 if (segment > VCPU_SREG_GS && 3718 (ctxt->ops->get_cr(ctxt, 4) & X86_CR4_UMIP) && 3719 ctxt->ops->cpl(ctxt) > 0) 3720 return emulate_gp(ctxt, 0); 3721 3722 ctxt->dst.val = get_segment_selector(ctxt, segment); 3723 if (ctxt->dst.bytes == 4 && ctxt->dst.type == OP_MEM) 3724 ctxt->dst.bytes = 2; 3725 return X86EMUL_CONTINUE; 3726 } 3727 3728 static int em_mov_rm_sreg(struct x86_emulate_ctxt *ctxt) 3729 { 3730 if (ctxt->modrm_reg > VCPU_SREG_GS) 3731 return emulate_ud(ctxt); 3732 3733 return em_store_sreg(ctxt, ctxt->modrm_reg); 3734 } 3735 3736 static int em_mov_sreg_rm(struct x86_emulate_ctxt *ctxt) 3737 { 3738 u16 sel = ctxt->src.val; 3739 3740 if (ctxt->modrm_reg == VCPU_SREG_CS || ctxt->modrm_reg > VCPU_SREG_GS) 3741 return emulate_ud(ctxt); 3742 3743 if (ctxt->modrm_reg == VCPU_SREG_SS) 3744 ctxt->interruptibility = KVM_X86_SHADOW_INT_MOV_SS; 3745 3746 /* Disable writeback. */ 3747 ctxt->dst.type = OP_NONE; 3748 return load_segment_descriptor(ctxt, sel, ctxt->modrm_reg); 3749 } 3750 3751 static int em_sldt(struct x86_emulate_ctxt *ctxt) 3752 { 3753 return em_store_sreg(ctxt, VCPU_SREG_LDTR); 3754 } 3755 3756 static int em_lldt(struct x86_emulate_ctxt *ctxt) 3757 { 3758 u16 sel = ctxt->src.val; 3759 3760 /* Disable writeback. */ 3761 ctxt->dst.type = OP_NONE; 3762 return load_segment_descriptor(ctxt, sel, VCPU_SREG_LDTR); 3763 } 3764 3765 static int em_str(struct x86_emulate_ctxt *ctxt) 3766 { 3767 return em_store_sreg(ctxt, VCPU_SREG_TR); 3768 } 3769 3770 static int em_ltr(struct x86_emulate_ctxt *ctxt) 3771 { 3772 u16 sel = ctxt->src.val; 3773 3774 /* Disable writeback. */ 3775 ctxt->dst.type = OP_NONE; 3776 return load_segment_descriptor(ctxt, sel, VCPU_SREG_TR); 3777 } 3778 3779 static int em_invlpg(struct x86_emulate_ctxt *ctxt) 3780 { 3781 int rc; 3782 ulong linear; 3783 3784 rc = linearize(ctxt, ctxt->src.addr.mem, 1, false, &linear); 3785 if (rc == X86EMUL_CONTINUE) 3786 ctxt->ops->invlpg(ctxt, linear); 3787 /* Disable writeback. */ 3788 ctxt->dst.type = OP_NONE; 3789 return X86EMUL_CONTINUE; 3790 } 3791 3792 static int em_clts(struct x86_emulate_ctxt *ctxt) 3793 { 3794 ulong cr0; 3795 3796 cr0 = ctxt->ops->get_cr(ctxt, 0); 3797 cr0 &= ~X86_CR0_TS; 3798 ctxt->ops->set_cr(ctxt, 0, cr0); 3799 return X86EMUL_CONTINUE; 3800 } 3801 3802 static int em_hypercall(struct x86_emulate_ctxt *ctxt) 3803 { 3804 int rc = ctxt->ops->fix_hypercall(ctxt); 3805 3806 if (rc != X86EMUL_CONTINUE) 3807 return rc; 3808 3809 /* Let the processor re-execute the fixed hypercall */ 3810 ctxt->_eip = ctxt->eip; 3811 /* Disable writeback. */ 3812 ctxt->dst.type = OP_NONE; 3813 return X86EMUL_CONTINUE; 3814 } 3815 3816 static int emulate_store_desc_ptr(struct x86_emulate_ctxt *ctxt, 3817 void (*get)(struct x86_emulate_ctxt *ctxt, 3818 struct desc_ptr *ptr)) 3819 { 3820 struct desc_ptr desc_ptr; 3821 3822 if ((ctxt->ops->get_cr(ctxt, 4) & X86_CR4_UMIP) && 3823 ctxt->ops->cpl(ctxt) > 0) 3824 return emulate_gp(ctxt, 0); 3825 3826 if (ctxt->mode == X86EMUL_MODE_PROT64) 3827 ctxt->op_bytes = 8; 3828 get(ctxt, &desc_ptr); 3829 if (ctxt->op_bytes == 2) { 3830 ctxt->op_bytes = 4; 3831 desc_ptr.address &= 0x00ffffff; 3832 } 3833 /* Disable writeback. */ 3834 ctxt->dst.type = OP_NONE; 3835 return segmented_write_std(ctxt, ctxt->dst.addr.mem, 3836 &desc_ptr, 2 + ctxt->op_bytes); 3837 } 3838 3839 static int em_sgdt(struct x86_emulate_ctxt *ctxt) 3840 { 3841 return emulate_store_desc_ptr(ctxt, ctxt->ops->get_gdt); 3842 } 3843 3844 static int em_sidt(struct x86_emulate_ctxt *ctxt) 3845 { 3846 return emulate_store_desc_ptr(ctxt, ctxt->ops->get_idt); 3847 } 3848 3849 static int em_lgdt_lidt(struct x86_emulate_ctxt *ctxt, bool lgdt) 3850 { 3851 struct desc_ptr desc_ptr; 3852 int rc; 3853 3854 if (ctxt->mode == X86EMUL_MODE_PROT64) 3855 ctxt->op_bytes = 8; 3856 rc = read_descriptor(ctxt, ctxt->src.addr.mem, 3857 &desc_ptr.size, &desc_ptr.address, 3858 ctxt->op_bytes); 3859 if (rc != X86EMUL_CONTINUE) 3860 return rc; 3861 if (ctxt->mode == X86EMUL_MODE_PROT64 && 3862 emul_is_noncanonical_address(desc_ptr.address, ctxt)) 3863 return emulate_gp(ctxt, 0); 3864 if (lgdt) 3865 ctxt->ops->set_gdt(ctxt, &desc_ptr); 3866 else 3867 ctxt->ops->set_idt(ctxt, &desc_ptr); 3868 /* Disable writeback. */ 3869 ctxt->dst.type = OP_NONE; 3870 return X86EMUL_CONTINUE; 3871 } 3872 3873 static int em_lgdt(struct x86_emulate_ctxt *ctxt) 3874 { 3875 return em_lgdt_lidt(ctxt, true); 3876 } 3877 3878 static int em_lidt(struct x86_emulate_ctxt *ctxt) 3879 { 3880 return em_lgdt_lidt(ctxt, false); 3881 } 3882 3883 static int em_smsw(struct x86_emulate_ctxt *ctxt) 3884 { 3885 if ((ctxt->ops->get_cr(ctxt, 4) & X86_CR4_UMIP) && 3886 ctxt->ops->cpl(ctxt) > 0) 3887 return emulate_gp(ctxt, 0); 3888 3889 if (ctxt->dst.type == OP_MEM) 3890 ctxt->dst.bytes = 2; 3891 ctxt->dst.val = ctxt->ops->get_cr(ctxt, 0); 3892 return X86EMUL_CONTINUE; 3893 } 3894 3895 static int em_lmsw(struct x86_emulate_ctxt *ctxt) 3896 { 3897 ctxt->ops->set_cr(ctxt, 0, (ctxt->ops->get_cr(ctxt, 0) & ~0x0eul) 3898 | (ctxt->src.val & 0x0f)); 3899 ctxt->dst.type = OP_NONE; 3900 return X86EMUL_CONTINUE; 3901 } 3902 3903 static int em_loop(struct x86_emulate_ctxt *ctxt) 3904 { 3905 int rc = X86EMUL_CONTINUE; 3906 3907 register_address_increment(ctxt, VCPU_REGS_RCX, -1); 3908 if ((address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) != 0) && 3909 (ctxt->b == 0xe2 || test_cc(ctxt->b ^ 0x5, ctxt->eflags))) 3910 rc = jmp_rel(ctxt, ctxt->src.val); 3911 3912 return rc; 3913 } 3914 3915 static int em_jcxz(struct x86_emulate_ctxt *ctxt) 3916 { 3917 int rc = X86EMUL_CONTINUE; 3918 3919 if (address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) == 0) 3920 rc = jmp_rel(ctxt, ctxt->src.val); 3921 3922 return rc; 3923 } 3924 3925 static int em_in(struct x86_emulate_ctxt *ctxt) 3926 { 3927 if (!pio_in_emulated(ctxt, ctxt->dst.bytes, ctxt->src.val, 3928 &ctxt->dst.val)) 3929 return X86EMUL_IO_NEEDED; 3930 3931 return X86EMUL_CONTINUE; 3932 } 3933 3934 static int em_out(struct x86_emulate_ctxt *ctxt) 3935 { 3936 ctxt->ops->pio_out_emulated(ctxt, ctxt->src.bytes, ctxt->dst.val, 3937 &ctxt->src.val, 1); 3938 /* Disable writeback. */ 3939 ctxt->dst.type = OP_NONE; 3940 return X86EMUL_CONTINUE; 3941 } 3942 3943 static int em_cli(struct x86_emulate_ctxt *ctxt) 3944 { 3945 if (emulator_bad_iopl(ctxt)) 3946 return emulate_gp(ctxt, 0); 3947 3948 ctxt->eflags &= ~X86_EFLAGS_IF; 3949 return X86EMUL_CONTINUE; 3950 } 3951 3952 static int em_sti(struct x86_emulate_ctxt *ctxt) 3953 { 3954 if (emulator_bad_iopl(ctxt)) 3955 return emulate_gp(ctxt, 0); 3956 3957 ctxt->interruptibility = KVM_X86_SHADOW_INT_STI; 3958 ctxt->eflags |= X86_EFLAGS_IF; 3959 return X86EMUL_CONTINUE; 3960 } 3961 3962 static int em_cpuid(struct x86_emulate_ctxt *ctxt) 3963 { 3964 u32 eax, ebx, ecx, edx; 3965 u64 msr = 0; 3966 3967 ctxt->ops->get_msr(ctxt, MSR_MISC_FEATURES_ENABLES, &msr); 3968 if (msr & MSR_MISC_FEATURES_ENABLES_CPUID_FAULT && 3969 ctxt->ops->cpl(ctxt)) { 3970 return emulate_gp(ctxt, 0); 3971 } 3972 3973 eax = reg_read(ctxt, VCPU_REGS_RAX); 3974 ecx = reg_read(ctxt, VCPU_REGS_RCX); 3975 ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx, true); 3976 *reg_write(ctxt, VCPU_REGS_RAX) = eax; 3977 *reg_write(ctxt, VCPU_REGS_RBX) = ebx; 3978 *reg_write(ctxt, VCPU_REGS_RCX) = ecx; 3979 *reg_write(ctxt, VCPU_REGS_RDX) = edx; 3980 return X86EMUL_CONTINUE; 3981 } 3982 3983 static int em_sahf(struct x86_emulate_ctxt *ctxt) 3984 { 3985 u32 flags; 3986 3987 flags = X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF | X86_EFLAGS_ZF | 3988 X86_EFLAGS_SF; 3989 flags &= *reg_rmw(ctxt, VCPU_REGS_RAX) >> 8; 3990 3991 ctxt->eflags &= ~0xffUL; 3992 ctxt->eflags |= flags | X86_EFLAGS_FIXED; 3993 return X86EMUL_CONTINUE; 3994 } 3995 3996 static int em_lahf(struct x86_emulate_ctxt *ctxt) 3997 { 3998 *reg_rmw(ctxt, VCPU_REGS_RAX) &= ~0xff00UL; 3999 *reg_rmw(ctxt, VCPU_REGS_RAX) |= (ctxt->eflags & 0xff) << 8; 4000 return X86EMUL_CONTINUE; 4001 } 4002 4003 static int em_bswap(struct x86_emulate_ctxt *ctxt) 4004 { 4005 switch (ctxt->op_bytes) { 4006 #ifdef CONFIG_X86_64 4007 case 8: 4008 asm("bswap %0" : "+r"(ctxt->dst.val)); 4009 break; 4010 #endif 4011 default: 4012 asm("bswap %0" : "+r"(*(u32 *)&ctxt->dst.val)); 4013 break; 4014 } 4015 return X86EMUL_CONTINUE; 4016 } 4017 4018 static int em_clflush(struct x86_emulate_ctxt *ctxt) 4019 { 4020 /* emulating clflush regardless of cpuid */ 4021 return X86EMUL_CONTINUE; 4022 } 4023 4024 static int em_movsxd(struct x86_emulate_ctxt *ctxt) 4025 { 4026 ctxt->dst.val = (s32) ctxt->src.val; 4027 return X86EMUL_CONTINUE; 4028 } 4029 4030 static int check_fxsr(struct x86_emulate_ctxt *ctxt) 4031 { 4032 u32 eax = 1, ebx, ecx = 0, edx; 4033 4034 ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx, false); 4035 if (!(edx & FFL(FXSR))) 4036 return emulate_ud(ctxt); 4037 4038 if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM)) 4039 return emulate_nm(ctxt); 4040 4041 /* 4042 * Don't emulate a case that should never be hit, instead of working 4043 * around a lack of fxsave64/fxrstor64 on old compilers. 4044 */ 4045 if (ctxt->mode >= X86EMUL_MODE_PROT64) 4046 return X86EMUL_UNHANDLEABLE; 4047 4048 return X86EMUL_CONTINUE; 4049 } 4050 4051 /* 4052 * Hardware doesn't save and restore XMM 0-7 without CR4.OSFXSR, but does save 4053 * and restore MXCSR. 4054 */ 4055 static size_t __fxstate_size(int nregs) 4056 { 4057 return offsetof(struct fxregs_state, xmm_space[0]) + nregs * 16; 4058 } 4059 4060 static inline size_t fxstate_size(struct x86_emulate_ctxt *ctxt) 4061 { 4062 bool cr4_osfxsr; 4063 if (ctxt->mode == X86EMUL_MODE_PROT64) 4064 return __fxstate_size(16); 4065 4066 cr4_osfxsr = ctxt->ops->get_cr(ctxt, 4) & X86_CR4_OSFXSR; 4067 return __fxstate_size(cr4_osfxsr ? 8 : 0); 4068 } 4069 4070 /* 4071 * FXSAVE and FXRSTOR have 4 different formats depending on execution mode, 4072 * 1) 16 bit mode 4073 * 2) 32 bit mode 4074 * - like (1), but FIP and FDP (foo) are only 16 bit. At least Intel CPUs 4075 * preserve whole 32 bit values, though, so (1) and (2) are the same wrt. 4076 * save and restore 4077 * 3) 64-bit mode with REX.W prefix 4078 * - like (2), but XMM 8-15 are being saved and restored 4079 * 4) 64-bit mode without REX.W prefix 4080 * - like (3), but FIP and FDP are 64 bit 4081 * 4082 * Emulation uses (3) for (1) and (2) and preserves XMM 8-15 to reach the 4083 * desired result. (4) is not emulated. 4084 * 4085 * Note: Guest and host CPUID.(EAX=07H,ECX=0H):EBX[bit 13] (deprecate FPU CS 4086 * and FPU DS) should match. 4087 */ 4088 static int em_fxsave(struct x86_emulate_ctxt *ctxt) 4089 { 4090 struct fxregs_state fx_state; 4091 int rc; 4092 4093 rc = check_fxsr(ctxt); 4094 if (rc != X86EMUL_CONTINUE) 4095 return rc; 4096 4097 rc = asm_safe("fxsave %[fx]", , [fx] "+m"(fx_state)); 4098 4099 if (rc != X86EMUL_CONTINUE) 4100 return rc; 4101 4102 return segmented_write_std(ctxt, ctxt->memop.addr.mem, &fx_state, 4103 fxstate_size(ctxt)); 4104 } 4105 4106 /* 4107 * FXRSTOR might restore XMM registers not provided by the guest. Fill 4108 * in the host registers (via FXSAVE) instead, so they won't be modified. 4109 * (preemption has to stay disabled until FXRSTOR). 4110 * 4111 * Use noinline to keep the stack for other functions called by callers small. 4112 */ 4113 static noinline int fxregs_fixup(struct fxregs_state *fx_state, 4114 const size_t used_size) 4115 { 4116 struct fxregs_state fx_tmp; 4117 int rc; 4118 4119 rc = asm_safe("fxsave %[fx]", , [fx] "+m"(fx_tmp)); 4120 memcpy((void *)fx_state + used_size, (void *)&fx_tmp + used_size, 4121 __fxstate_size(16) - used_size); 4122 4123 return rc; 4124 } 4125 4126 static int em_fxrstor(struct x86_emulate_ctxt *ctxt) 4127 { 4128 struct fxregs_state fx_state; 4129 int rc; 4130 size_t size; 4131 4132 rc = check_fxsr(ctxt); 4133 if (rc != X86EMUL_CONTINUE) 4134 return rc; 4135 4136 size = fxstate_size(ctxt); 4137 rc = segmented_read_std(ctxt, ctxt->memop.addr.mem, &fx_state, size); 4138 if (rc != X86EMUL_CONTINUE) 4139 return rc; 4140 4141 if (size < __fxstate_size(16)) { 4142 rc = fxregs_fixup(&fx_state, size); 4143 if (rc != X86EMUL_CONTINUE) 4144 goto out; 4145 } 4146 4147 if (fx_state.mxcsr >> 16) { 4148 rc = emulate_gp(ctxt, 0); 4149 goto out; 4150 } 4151 4152 if (rc == X86EMUL_CONTINUE) 4153 rc = asm_safe("fxrstor %[fx]", : [fx] "m"(fx_state)); 4154 4155 out: 4156 return rc; 4157 } 4158 4159 static bool valid_cr(int nr) 4160 { 4161 switch (nr) { 4162 case 0: 4163 case 2 ... 4: 4164 case 8: 4165 return true; 4166 default: 4167 return false; 4168 } 4169 } 4170 4171 static int check_cr_read(struct x86_emulate_ctxt *ctxt) 4172 { 4173 if (!valid_cr(ctxt->modrm_reg)) 4174 return emulate_ud(ctxt); 4175 4176 return X86EMUL_CONTINUE; 4177 } 4178 4179 static int check_cr_write(struct x86_emulate_ctxt *ctxt) 4180 { 4181 u64 new_val = ctxt->src.val64; 4182 int cr = ctxt->modrm_reg; 4183 u64 efer = 0; 4184 4185 static u64 cr_reserved_bits[] = { 4186 0xffffffff00000000ULL, 4187 0, 0, 0, /* CR3 checked later */ 4188 CR4_RESERVED_BITS, 4189 0, 0, 0, 4190 CR8_RESERVED_BITS, 4191 }; 4192 4193 if (!valid_cr(cr)) 4194 return emulate_ud(ctxt); 4195 4196 if (new_val & cr_reserved_bits[cr]) 4197 return emulate_gp(ctxt, 0); 4198 4199 switch (cr) { 4200 case 0: { 4201 u64 cr4; 4202 if (((new_val & X86_CR0_PG) && !(new_val & X86_CR0_PE)) || 4203 ((new_val & X86_CR0_NW) && !(new_val & X86_CR0_CD))) 4204 return emulate_gp(ctxt, 0); 4205 4206 cr4 = ctxt->ops->get_cr(ctxt, 4); 4207 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer); 4208 4209 if ((new_val & X86_CR0_PG) && (efer & EFER_LME) && 4210 !(cr4 & X86_CR4_PAE)) 4211 return emulate_gp(ctxt, 0); 4212 4213 break; 4214 } 4215 case 3: { 4216 u64 rsvd = 0; 4217 4218 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer); 4219 if (efer & EFER_LMA) { 4220 u64 maxphyaddr; 4221 u32 eax, ebx, ecx, edx; 4222 4223 eax = 0x80000008; 4224 ecx = 0; 4225 if (ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, 4226 &edx, false)) 4227 maxphyaddr = eax & 0xff; 4228 else 4229 maxphyaddr = 36; 4230 rsvd = rsvd_bits(maxphyaddr, 63); 4231 if (ctxt->ops->get_cr(ctxt, 4) & X86_CR4_PCIDE) 4232 rsvd &= ~X86_CR3_PCID_NOFLUSH; 4233 } 4234 4235 if (new_val & rsvd) 4236 return emulate_gp(ctxt, 0); 4237 4238 break; 4239 } 4240 case 4: { 4241 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer); 4242 4243 if ((efer & EFER_LMA) && !(new_val & X86_CR4_PAE)) 4244 return emulate_gp(ctxt, 0); 4245 4246 break; 4247 } 4248 } 4249 4250 return X86EMUL_CONTINUE; 4251 } 4252 4253 static int check_dr7_gd(struct x86_emulate_ctxt *ctxt) 4254 { 4255 unsigned long dr7; 4256 4257 ctxt->ops->get_dr(ctxt, 7, &dr7); 4258 4259 /* Check if DR7.Global_Enable is set */ 4260 return dr7 & (1 << 13); 4261 } 4262 4263 static int check_dr_read(struct x86_emulate_ctxt *ctxt) 4264 { 4265 int dr = ctxt->modrm_reg; 4266 u64 cr4; 4267 4268 if (dr > 7) 4269 return emulate_ud(ctxt); 4270 4271 cr4 = ctxt->ops->get_cr(ctxt, 4); 4272 if ((cr4 & X86_CR4_DE) && (dr == 4 || dr == 5)) 4273 return emulate_ud(ctxt); 4274 4275 if (check_dr7_gd(ctxt)) { 4276 ulong dr6; 4277 4278 ctxt->ops->get_dr(ctxt, 6, &dr6); 4279 dr6 &= ~DR_TRAP_BITS; 4280 dr6 |= DR6_BD | DR6_RTM; 4281 ctxt->ops->set_dr(ctxt, 6, dr6); 4282 return emulate_db(ctxt); 4283 } 4284 4285 return X86EMUL_CONTINUE; 4286 } 4287 4288 static int check_dr_write(struct x86_emulate_ctxt *ctxt) 4289 { 4290 u64 new_val = ctxt->src.val64; 4291 int dr = ctxt->modrm_reg; 4292 4293 if ((dr == 6 || dr == 7) && (new_val & 0xffffffff00000000ULL)) 4294 return emulate_gp(ctxt, 0); 4295 4296 return check_dr_read(ctxt); 4297 } 4298 4299 static int check_svme(struct x86_emulate_ctxt *ctxt) 4300 { 4301 u64 efer = 0; 4302 4303 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer); 4304 4305 if (!(efer & EFER_SVME)) 4306 return emulate_ud(ctxt); 4307 4308 return X86EMUL_CONTINUE; 4309 } 4310 4311 static int check_svme_pa(struct x86_emulate_ctxt *ctxt) 4312 { 4313 u64 rax = reg_read(ctxt, VCPU_REGS_RAX); 4314 4315 /* Valid physical address? */ 4316 if (rax & 0xffff000000000000ULL) 4317 return emulate_gp(ctxt, 0); 4318 4319 return check_svme(ctxt); 4320 } 4321 4322 static int check_rdtsc(struct x86_emulate_ctxt *ctxt) 4323 { 4324 u64 cr4 = ctxt->ops->get_cr(ctxt, 4); 4325 4326 if (cr4 & X86_CR4_TSD && ctxt->ops->cpl(ctxt)) 4327 return emulate_ud(ctxt); 4328 4329 return X86EMUL_CONTINUE; 4330 } 4331 4332 static int check_rdpmc(struct x86_emulate_ctxt *ctxt) 4333 { 4334 u64 cr4 = ctxt->ops->get_cr(ctxt, 4); 4335 u64 rcx = reg_read(ctxt, VCPU_REGS_RCX); 4336 4337 /* 4338 * VMware allows access to these Pseduo-PMCs even when read via RDPMC 4339 * in Ring3 when CR4.PCE=0. 4340 */ 4341 if (enable_vmware_backdoor && is_vmware_backdoor_pmc(rcx)) 4342 return X86EMUL_CONTINUE; 4343 4344 if ((!(cr4 & X86_CR4_PCE) && ctxt->ops->cpl(ctxt)) || 4345 ctxt->ops->check_pmc(ctxt, rcx)) 4346 return emulate_gp(ctxt, 0); 4347 4348 return X86EMUL_CONTINUE; 4349 } 4350 4351 static int check_perm_in(struct x86_emulate_ctxt *ctxt) 4352 { 4353 ctxt->dst.bytes = min(ctxt->dst.bytes, 4u); 4354 if (!emulator_io_permited(ctxt, ctxt->src.val, ctxt->dst.bytes)) 4355 return emulate_gp(ctxt, 0); 4356 4357 return X86EMUL_CONTINUE; 4358 } 4359 4360 static int check_perm_out(struct x86_emulate_ctxt *ctxt) 4361 { 4362 ctxt->src.bytes = min(ctxt->src.bytes, 4u); 4363 if (!emulator_io_permited(ctxt, ctxt->dst.val, ctxt->src.bytes)) 4364 return emulate_gp(ctxt, 0); 4365 4366 return X86EMUL_CONTINUE; 4367 } 4368 4369 #define D(_y) { .flags = (_y) } 4370 #define DI(_y, _i) { .flags = (_y)|Intercept, .intercept = x86_intercept_##_i } 4371 #define DIP(_y, _i, _p) { .flags = (_y)|Intercept|CheckPerm, \ 4372 .intercept = x86_intercept_##_i, .check_perm = (_p) } 4373 #define N D(NotImpl) 4374 #define EXT(_f, _e) { .flags = ((_f) | RMExt), .u.group = (_e) } 4375 #define G(_f, _g) { .flags = ((_f) | Group | ModRM), .u.group = (_g) } 4376 #define GD(_f, _g) { .flags = ((_f) | GroupDual | ModRM), .u.gdual = (_g) } 4377 #define ID(_f, _i) { .flags = ((_f) | InstrDual | ModRM), .u.idual = (_i) } 4378 #define MD(_f, _m) { .flags = ((_f) | ModeDual), .u.mdual = (_m) } 4379 #define E(_f, _e) { .flags = ((_f) | Escape | ModRM), .u.esc = (_e) } 4380 #define I(_f, _e) { .flags = (_f), .u.execute = (_e) } 4381 #define F(_f, _e) { .flags = (_f) | Fastop, .u.fastop = (_e) } 4382 #define II(_f, _e, _i) \ 4383 { .flags = (_f)|Intercept, .u.execute = (_e), .intercept = x86_intercept_##_i } 4384 #define IIP(_f, _e, _i, _p) \ 4385 { .flags = (_f)|Intercept|CheckPerm, .u.execute = (_e), \ 4386 .intercept = x86_intercept_##_i, .check_perm = (_p) } 4387 #define GP(_f, _g) { .flags = ((_f) | Prefix), .u.gprefix = (_g) } 4388 4389 #define D2bv(_f) D((_f) | ByteOp), D(_f) 4390 #define D2bvIP(_f, _i, _p) DIP((_f) | ByteOp, _i, _p), DIP(_f, _i, _p) 4391 #define I2bv(_f, _e) I((_f) | ByteOp, _e), I(_f, _e) 4392 #define F2bv(_f, _e) F((_f) | ByteOp, _e), F(_f, _e) 4393 #define I2bvIP(_f, _e, _i, _p) \ 4394 IIP((_f) | ByteOp, _e, _i, _p), IIP(_f, _e, _i, _p) 4395 4396 #define F6ALU(_f, _e) F2bv((_f) | DstMem | SrcReg | ModRM, _e), \ 4397 F2bv(((_f) | DstReg | SrcMem | ModRM) & ~Lock, _e), \ 4398 F2bv(((_f) & ~Lock) | DstAcc | SrcImm, _e) 4399 4400 static const struct opcode group7_rm0[] = { 4401 N, 4402 I(SrcNone | Priv | EmulateOnUD, em_hypercall), 4403 N, N, N, N, N, N, 4404 }; 4405 4406 static const struct opcode group7_rm1[] = { 4407 DI(SrcNone | Priv, monitor), 4408 DI(SrcNone | Priv, mwait), 4409 N, N, N, N, N, N, 4410 }; 4411 4412 static const struct opcode group7_rm3[] = { 4413 DIP(SrcNone | Prot | Priv, vmrun, check_svme_pa), 4414 II(SrcNone | Prot | EmulateOnUD, em_hypercall, vmmcall), 4415 DIP(SrcNone | Prot | Priv, vmload, check_svme_pa), 4416 DIP(SrcNone | Prot | Priv, vmsave, check_svme_pa), 4417 DIP(SrcNone | Prot | Priv, stgi, check_svme), 4418 DIP(SrcNone | Prot | Priv, clgi, check_svme), 4419 DIP(SrcNone | Prot | Priv, skinit, check_svme), 4420 DIP(SrcNone | Prot | Priv, invlpga, check_svme), 4421 }; 4422 4423 static const struct opcode group7_rm7[] = { 4424 N, 4425 DIP(SrcNone, rdtscp, check_rdtsc), 4426 N, N, N, N, N, N, 4427 }; 4428 4429 static const struct opcode group1[] = { 4430 F(Lock, em_add), 4431 F(Lock | PageTable, em_or), 4432 F(Lock, em_adc), 4433 F(Lock, em_sbb), 4434 F(Lock | PageTable, em_and), 4435 F(Lock, em_sub), 4436 F(Lock, em_xor), 4437 F(NoWrite, em_cmp), 4438 }; 4439 4440 static const struct opcode group1A[] = { 4441 I(DstMem | SrcNone | Mov | Stack | IncSP | TwoMemOp, em_pop), N, N, N, N, N, N, N, 4442 }; 4443 4444 static const struct opcode group2[] = { 4445 F(DstMem | ModRM, em_rol), 4446 F(DstMem | ModRM, em_ror), 4447 F(DstMem | ModRM, em_rcl), 4448 F(DstMem | ModRM, em_rcr), 4449 F(DstMem | ModRM, em_shl), 4450 F(DstMem | ModRM, em_shr), 4451 F(DstMem | ModRM, em_shl), 4452 F(DstMem | ModRM, em_sar), 4453 }; 4454 4455 static const struct opcode group3[] = { 4456 F(DstMem | SrcImm | NoWrite, em_test), 4457 F(DstMem | SrcImm | NoWrite, em_test), 4458 F(DstMem | SrcNone | Lock, em_not), 4459 F(DstMem | SrcNone | Lock, em_neg), 4460 F(DstXacc | Src2Mem, em_mul_ex), 4461 F(DstXacc | Src2Mem, em_imul_ex), 4462 F(DstXacc | Src2Mem, em_div_ex), 4463 F(DstXacc | Src2Mem, em_idiv_ex), 4464 }; 4465 4466 static const struct opcode group4[] = { 4467 F(ByteOp | DstMem | SrcNone | Lock, em_inc), 4468 F(ByteOp | DstMem | SrcNone | Lock, em_dec), 4469 N, N, N, N, N, N, 4470 }; 4471 4472 static const struct opcode group5[] = { 4473 F(DstMem | SrcNone | Lock, em_inc), 4474 F(DstMem | SrcNone | Lock, em_dec), 4475 I(SrcMem | NearBranch, em_call_near_abs), 4476 I(SrcMemFAddr | ImplicitOps, em_call_far), 4477 I(SrcMem | NearBranch, em_jmp_abs), 4478 I(SrcMemFAddr | ImplicitOps, em_jmp_far), 4479 I(SrcMem | Stack | TwoMemOp, em_push), D(Undefined), 4480 }; 4481 4482 static const struct opcode group6[] = { 4483 II(Prot | DstMem, em_sldt, sldt), 4484 II(Prot | DstMem, em_str, str), 4485 II(Prot | Priv | SrcMem16, em_lldt, lldt), 4486 II(Prot | Priv | SrcMem16, em_ltr, ltr), 4487 N, N, N, N, 4488 }; 4489 4490 static const struct group_dual group7 = { { 4491 II(Mov | DstMem, em_sgdt, sgdt), 4492 II(Mov | DstMem, em_sidt, sidt), 4493 II(SrcMem | Priv, em_lgdt, lgdt), 4494 II(SrcMem | Priv, em_lidt, lidt), 4495 II(SrcNone | DstMem | Mov, em_smsw, smsw), N, 4496 II(SrcMem16 | Mov | Priv, em_lmsw, lmsw), 4497 II(SrcMem | ByteOp | Priv | NoAccess, em_invlpg, invlpg), 4498 }, { 4499 EXT(0, group7_rm0), 4500 EXT(0, group7_rm1), 4501 N, EXT(0, group7_rm3), 4502 II(SrcNone | DstMem | Mov, em_smsw, smsw), N, 4503 II(SrcMem16 | Mov | Priv, em_lmsw, lmsw), 4504 EXT(0, group7_rm7), 4505 } }; 4506 4507 static const struct opcode group8[] = { 4508 N, N, N, N, 4509 F(DstMem | SrcImmByte | NoWrite, em_bt), 4510 F(DstMem | SrcImmByte | Lock | PageTable, em_bts), 4511 F(DstMem | SrcImmByte | Lock, em_btr), 4512 F(DstMem | SrcImmByte | Lock | PageTable, em_btc), 4513 }; 4514 4515 /* 4516 * The "memory" destination is actually always a register, since we come 4517 * from the register case of group9. 4518 */ 4519 static const struct gprefix pfx_0f_c7_7 = { 4520 N, N, N, II(DstMem | ModRM | Op3264 | EmulateOnUD, em_rdpid, rdtscp), 4521 }; 4522 4523 4524 static const struct group_dual group9 = { { 4525 N, I(DstMem64 | Lock | PageTable, em_cmpxchg8b), N, N, N, N, N, N, 4526 }, { 4527 N, N, N, N, N, N, N, 4528 GP(0, &pfx_0f_c7_7), 4529 } }; 4530 4531 static const struct opcode group11[] = { 4532 I(DstMem | SrcImm | Mov | PageTable, em_mov), 4533 X7(D(Undefined)), 4534 }; 4535 4536 static const struct gprefix pfx_0f_ae_7 = { 4537 I(SrcMem | ByteOp, em_clflush), N, N, N, 4538 }; 4539 4540 static const struct group_dual group15 = { { 4541 I(ModRM | Aligned16, em_fxsave), 4542 I(ModRM | Aligned16, em_fxrstor), 4543 N, N, N, N, N, GP(0, &pfx_0f_ae_7), 4544 }, { 4545 N, N, N, N, N, N, N, N, 4546 } }; 4547 4548 static const struct gprefix pfx_0f_6f_0f_7f = { 4549 I(Mmx, em_mov), I(Sse | Aligned, em_mov), N, I(Sse | Unaligned, em_mov), 4550 }; 4551 4552 static const struct instr_dual instr_dual_0f_2b = { 4553 I(0, em_mov), N 4554 }; 4555 4556 static const struct gprefix pfx_0f_2b = { 4557 ID(0, &instr_dual_0f_2b), ID(0, &instr_dual_0f_2b), N, N, 4558 }; 4559 4560 static const struct gprefix pfx_0f_10_0f_11 = { 4561 I(Unaligned, em_mov), I(Unaligned, em_mov), N, N, 4562 }; 4563 4564 static const struct gprefix pfx_0f_28_0f_29 = { 4565 I(Aligned, em_mov), I(Aligned, em_mov), N, N, 4566 }; 4567 4568 static const struct gprefix pfx_0f_e7 = { 4569 N, I(Sse, em_mov), N, N, 4570 }; 4571 4572 static const struct escape escape_d9 = { { 4573 N, N, N, N, N, N, N, I(DstMem16 | Mov, em_fnstcw), 4574 }, { 4575 /* 0xC0 - 0xC7 */ 4576 N, N, N, N, N, N, N, N, 4577 /* 0xC8 - 0xCF */ 4578 N, N, N, N, N, N, N, N, 4579 /* 0xD0 - 0xC7 */ 4580 N, N, N, N, N, N, N, N, 4581 /* 0xD8 - 0xDF */ 4582 N, N, N, N, N, N, N, N, 4583 /* 0xE0 - 0xE7 */ 4584 N, N, N, N, N, N, N, N, 4585 /* 0xE8 - 0xEF */ 4586 N, N, N, N, N, N, N, N, 4587 /* 0xF0 - 0xF7 */ 4588 N, N, N, N, N, N, N, N, 4589 /* 0xF8 - 0xFF */ 4590 N, N, N, N, N, N, N, N, 4591 } }; 4592 4593 static const struct escape escape_db = { { 4594 N, N, N, N, N, N, N, N, 4595 }, { 4596 /* 0xC0 - 0xC7 */ 4597 N, N, N, N, N, N, N, N, 4598 /* 0xC8 - 0xCF */ 4599 N, N, N, N, N, N, N, N, 4600 /* 0xD0 - 0xC7 */ 4601 N, N, N, N, N, N, N, N, 4602 /* 0xD8 - 0xDF */ 4603 N, N, N, N, N, N, N, N, 4604 /* 0xE0 - 0xE7 */ 4605 N, N, N, I(ImplicitOps, em_fninit), N, N, N, N, 4606 /* 0xE8 - 0xEF */ 4607 N, N, N, N, N, N, N, N, 4608 /* 0xF0 - 0xF7 */ 4609 N, N, N, N, N, N, N, N, 4610 /* 0xF8 - 0xFF */ 4611 N, N, N, N, N, N, N, N, 4612 } }; 4613 4614 static const struct escape escape_dd = { { 4615 N, N, N, N, N, N, N, I(DstMem16 | Mov, em_fnstsw), 4616 }, { 4617 /* 0xC0 - 0xC7 */ 4618 N, N, N, N, N, N, N, N, 4619 /* 0xC8 - 0xCF */ 4620 N, N, N, N, N, N, N, N, 4621 /* 0xD0 - 0xC7 */ 4622 N, N, N, N, N, N, N, N, 4623 /* 0xD8 - 0xDF */ 4624 N, N, N, N, N, N, N, N, 4625 /* 0xE0 - 0xE7 */ 4626 N, N, N, N, N, N, N, N, 4627 /* 0xE8 - 0xEF */ 4628 N, N, N, N, N, N, N, N, 4629 /* 0xF0 - 0xF7 */ 4630 N, N, N, N, N, N, N, N, 4631 /* 0xF8 - 0xFF */ 4632 N, N, N, N, N, N, N, N, 4633 } }; 4634 4635 static const struct instr_dual instr_dual_0f_c3 = { 4636 I(DstMem | SrcReg | ModRM | No16 | Mov, em_mov), N 4637 }; 4638 4639 static const struct mode_dual mode_dual_63 = { 4640 N, I(DstReg | SrcMem32 | ModRM | Mov, em_movsxd) 4641 }; 4642 4643 static const struct opcode opcode_table[256] = { 4644 /* 0x00 - 0x07 */ 4645 F6ALU(Lock, em_add), 4646 I(ImplicitOps | Stack | No64 | Src2ES, em_push_sreg), 4647 I(ImplicitOps | Stack | No64 | Src2ES, em_pop_sreg), 4648 /* 0x08 - 0x0F */ 4649 F6ALU(Lock | PageTable, em_or), 4650 I(ImplicitOps | Stack | No64 | Src2CS, em_push_sreg), 4651 N, 4652 /* 0x10 - 0x17 */ 4653 F6ALU(Lock, em_adc), 4654 I(ImplicitOps | Stack | No64 | Src2SS, em_push_sreg), 4655 I(ImplicitOps | Stack | No64 | Src2SS, em_pop_sreg), 4656 /* 0x18 - 0x1F */ 4657 F6ALU(Lock, em_sbb), 4658 I(ImplicitOps | Stack | No64 | Src2DS, em_push_sreg), 4659 I(ImplicitOps | Stack | No64 | Src2DS, em_pop_sreg), 4660 /* 0x20 - 0x27 */ 4661 F6ALU(Lock | PageTable, em_and), N, N, 4662 /* 0x28 - 0x2F */ 4663 F6ALU(Lock, em_sub), N, I(ByteOp | DstAcc | No64, em_das), 4664 /* 0x30 - 0x37 */ 4665 F6ALU(Lock, em_xor), N, N, 4666 /* 0x38 - 0x3F */ 4667 F6ALU(NoWrite, em_cmp), N, N, 4668 /* 0x40 - 0x4F */ 4669 X8(F(DstReg, em_inc)), X8(F(DstReg, em_dec)), 4670 /* 0x50 - 0x57 */ 4671 X8(I(SrcReg | Stack, em_push)), 4672 /* 0x58 - 0x5F */ 4673 X8(I(DstReg | Stack, em_pop)), 4674 /* 0x60 - 0x67 */ 4675 I(ImplicitOps | Stack | No64, em_pusha), 4676 I(ImplicitOps | Stack | No64, em_popa), 4677 N, MD(ModRM, &mode_dual_63), 4678 N, N, N, N, 4679 /* 0x68 - 0x6F */ 4680 I(SrcImm | Mov | Stack, em_push), 4681 I(DstReg | SrcMem | ModRM | Src2Imm, em_imul_3op), 4682 I(SrcImmByte | Mov | Stack, em_push), 4683 I(DstReg | SrcMem | ModRM | Src2ImmByte, em_imul_3op), 4684 I2bvIP(DstDI | SrcDX | Mov | String | Unaligned, em_in, ins, check_perm_in), /* insb, insw/insd */ 4685 I2bvIP(SrcSI | DstDX | String, em_out, outs, check_perm_out), /* outsb, outsw/outsd */ 4686 /* 0x70 - 0x7F */ 4687 X16(D(SrcImmByte | NearBranch)), 4688 /* 0x80 - 0x87 */ 4689 G(ByteOp | DstMem | SrcImm, group1), 4690 G(DstMem | SrcImm, group1), 4691 G(ByteOp | DstMem | SrcImm | No64, group1), 4692 G(DstMem | SrcImmByte, group1), 4693 F2bv(DstMem | SrcReg | ModRM | NoWrite, em_test), 4694 I2bv(DstMem | SrcReg | ModRM | Lock | PageTable, em_xchg), 4695 /* 0x88 - 0x8F */ 4696 I2bv(DstMem | SrcReg | ModRM | Mov | PageTable, em_mov), 4697 I2bv(DstReg | SrcMem | ModRM | Mov, em_mov), 4698 I(DstMem | SrcNone | ModRM | Mov | PageTable, em_mov_rm_sreg), 4699 D(ModRM | SrcMem | NoAccess | DstReg), 4700 I(ImplicitOps | SrcMem16 | ModRM, em_mov_sreg_rm), 4701 G(0, group1A), 4702 /* 0x90 - 0x97 */ 4703 DI(SrcAcc | DstReg, pause), X7(D(SrcAcc | DstReg)), 4704 /* 0x98 - 0x9F */ 4705 D(DstAcc | SrcNone), I(ImplicitOps | SrcAcc, em_cwd), 4706 I(SrcImmFAddr | No64, em_call_far), N, 4707 II(ImplicitOps | Stack, em_pushf, pushf), 4708 II(ImplicitOps | Stack, em_popf, popf), 4709 I(ImplicitOps, em_sahf), I(ImplicitOps, em_lahf), 4710 /* 0xA0 - 0xA7 */ 4711 I2bv(DstAcc | SrcMem | Mov | MemAbs, em_mov), 4712 I2bv(DstMem | SrcAcc | Mov | MemAbs | PageTable, em_mov), 4713 I2bv(SrcSI | DstDI | Mov | String | TwoMemOp, em_mov), 4714 F2bv(SrcSI | DstDI | String | NoWrite | TwoMemOp, em_cmp_r), 4715 /* 0xA8 - 0xAF */ 4716 F2bv(DstAcc | SrcImm | NoWrite, em_test), 4717 I2bv(SrcAcc | DstDI | Mov | String, em_mov), 4718 I2bv(SrcSI | DstAcc | Mov | String, em_mov), 4719 F2bv(SrcAcc | DstDI | String | NoWrite, em_cmp_r), 4720 /* 0xB0 - 0xB7 */ 4721 X8(I(ByteOp | DstReg | SrcImm | Mov, em_mov)), 4722 /* 0xB8 - 0xBF */ 4723 X8(I(DstReg | SrcImm64 | Mov, em_mov)), 4724 /* 0xC0 - 0xC7 */ 4725 G(ByteOp | Src2ImmByte, group2), G(Src2ImmByte, group2), 4726 I(ImplicitOps | NearBranch | SrcImmU16, em_ret_near_imm), 4727 I(ImplicitOps | NearBranch, em_ret), 4728 I(DstReg | SrcMemFAddr | ModRM | No64 | Src2ES, em_lseg), 4729 I(DstReg | SrcMemFAddr | ModRM | No64 | Src2DS, em_lseg), 4730 G(ByteOp, group11), G(0, group11), 4731 /* 0xC8 - 0xCF */ 4732 I(Stack | SrcImmU16 | Src2ImmByte, em_enter), I(Stack, em_leave), 4733 I(ImplicitOps | SrcImmU16, em_ret_far_imm), 4734 I(ImplicitOps, em_ret_far), 4735 D(ImplicitOps), DI(SrcImmByte, intn), 4736 D(ImplicitOps | No64), II(ImplicitOps, em_iret, iret), 4737 /* 0xD0 - 0xD7 */ 4738 G(Src2One | ByteOp, group2), G(Src2One, group2), 4739 G(Src2CL | ByteOp, group2), G(Src2CL, group2), 4740 I(DstAcc | SrcImmUByte | No64, em_aam), 4741 I(DstAcc | SrcImmUByte | No64, em_aad), 4742 F(DstAcc | ByteOp | No64, em_salc), 4743 I(DstAcc | SrcXLat | ByteOp, em_mov), 4744 /* 0xD8 - 0xDF */ 4745 N, E(0, &escape_d9), N, E(0, &escape_db), N, E(0, &escape_dd), N, N, 4746 /* 0xE0 - 0xE7 */ 4747 X3(I(SrcImmByte | NearBranch, em_loop)), 4748 I(SrcImmByte | NearBranch, em_jcxz), 4749 I2bvIP(SrcImmUByte | DstAcc, em_in, in, check_perm_in), 4750 I2bvIP(SrcAcc | DstImmUByte, em_out, out, check_perm_out), 4751 /* 0xE8 - 0xEF */ 4752 I(SrcImm | NearBranch, em_call), D(SrcImm | ImplicitOps | NearBranch), 4753 I(SrcImmFAddr | No64, em_jmp_far), 4754 D(SrcImmByte | ImplicitOps | NearBranch), 4755 I2bvIP(SrcDX | DstAcc, em_in, in, check_perm_in), 4756 I2bvIP(SrcAcc | DstDX, em_out, out, check_perm_out), 4757 /* 0xF0 - 0xF7 */ 4758 N, DI(ImplicitOps, icebp), N, N, 4759 DI(ImplicitOps | Priv, hlt), D(ImplicitOps), 4760 G(ByteOp, group3), G(0, group3), 4761 /* 0xF8 - 0xFF */ 4762 D(ImplicitOps), D(ImplicitOps), 4763 I(ImplicitOps, em_cli), I(ImplicitOps, em_sti), 4764 D(ImplicitOps), D(ImplicitOps), G(0, group4), G(0, group5), 4765 }; 4766 4767 static const struct opcode twobyte_table[256] = { 4768 /* 0x00 - 0x0F */ 4769 G(0, group6), GD(0, &group7), N, N, 4770 N, I(ImplicitOps | EmulateOnUD, em_syscall), 4771 II(ImplicitOps | Priv, em_clts, clts), N, 4772 DI(ImplicitOps | Priv, invd), DI(ImplicitOps | Priv, wbinvd), N, N, 4773 N, D(ImplicitOps | ModRM | SrcMem | NoAccess), N, N, 4774 /* 0x10 - 0x1F */ 4775 GP(ModRM | DstReg | SrcMem | Mov | Sse, &pfx_0f_10_0f_11), 4776 GP(ModRM | DstMem | SrcReg | Mov | Sse, &pfx_0f_10_0f_11), 4777 N, N, N, N, N, N, 4778 D(ImplicitOps | ModRM | SrcMem | NoAccess), 4779 N, N, N, N, N, N, D(ImplicitOps | ModRM | SrcMem | NoAccess), 4780 /* 0x20 - 0x2F */ 4781 DIP(ModRM | DstMem | Priv | Op3264 | NoMod, cr_read, check_cr_read), 4782 DIP(ModRM | DstMem | Priv | Op3264 | NoMod, dr_read, check_dr_read), 4783 IIP(ModRM | SrcMem | Priv | Op3264 | NoMod, em_cr_write, cr_write, 4784 check_cr_write), 4785 IIP(ModRM | SrcMem | Priv | Op3264 | NoMod, em_dr_write, dr_write, 4786 check_dr_write), 4787 N, N, N, N, 4788 GP(ModRM | DstReg | SrcMem | Mov | Sse, &pfx_0f_28_0f_29), 4789 GP(ModRM | DstMem | SrcReg | Mov | Sse, &pfx_0f_28_0f_29), 4790 N, GP(ModRM | DstMem | SrcReg | Mov | Sse, &pfx_0f_2b), 4791 N, N, N, N, 4792 /* 0x30 - 0x3F */ 4793 II(ImplicitOps | Priv, em_wrmsr, wrmsr), 4794 IIP(ImplicitOps, em_rdtsc, rdtsc, check_rdtsc), 4795 II(ImplicitOps | Priv, em_rdmsr, rdmsr), 4796 IIP(ImplicitOps, em_rdpmc, rdpmc, check_rdpmc), 4797 I(ImplicitOps | EmulateOnUD, em_sysenter), 4798 I(ImplicitOps | Priv | EmulateOnUD, em_sysexit), 4799 N, N, 4800 N, N, N, N, N, N, N, N, 4801 /* 0x40 - 0x4F */ 4802 X16(D(DstReg | SrcMem | ModRM)), 4803 /* 0x50 - 0x5F */ 4804 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, 4805 /* 0x60 - 0x6F */ 4806 N, N, N, N, 4807 N, N, N, N, 4808 N, N, N, N, 4809 N, N, N, GP(SrcMem | DstReg | ModRM | Mov, &pfx_0f_6f_0f_7f), 4810 /* 0x70 - 0x7F */ 4811 N, N, N, N, 4812 N, N, N, N, 4813 N, N, N, N, 4814 N, N, N, GP(SrcReg | DstMem | ModRM | Mov, &pfx_0f_6f_0f_7f), 4815 /* 0x80 - 0x8F */ 4816 X16(D(SrcImm | NearBranch)), 4817 /* 0x90 - 0x9F */ 4818 X16(D(ByteOp | DstMem | SrcNone | ModRM| Mov)), 4819 /* 0xA0 - 0xA7 */ 4820 I(Stack | Src2FS, em_push_sreg), I(Stack | Src2FS, em_pop_sreg), 4821 II(ImplicitOps, em_cpuid, cpuid), 4822 F(DstMem | SrcReg | ModRM | BitOp | NoWrite, em_bt), 4823 F(DstMem | SrcReg | Src2ImmByte | ModRM, em_shld), 4824 F(DstMem | SrcReg | Src2CL | ModRM, em_shld), N, N, 4825 /* 0xA8 - 0xAF */ 4826 I(Stack | Src2GS, em_push_sreg), I(Stack | Src2GS, em_pop_sreg), 4827 II(EmulateOnUD | ImplicitOps, em_rsm, rsm), 4828 F(DstMem | SrcReg | ModRM | BitOp | Lock | PageTable, em_bts), 4829 F(DstMem | SrcReg | Src2ImmByte | ModRM, em_shrd), 4830 F(DstMem | SrcReg | Src2CL | ModRM, em_shrd), 4831 GD(0, &group15), F(DstReg | SrcMem | ModRM, em_imul), 4832 /* 0xB0 - 0xB7 */ 4833 I2bv(DstMem | SrcReg | ModRM | Lock | PageTable | SrcWrite, em_cmpxchg), 4834 I(DstReg | SrcMemFAddr | ModRM | Src2SS, em_lseg), 4835 F(DstMem | SrcReg | ModRM | BitOp | Lock, em_btr), 4836 I(DstReg | SrcMemFAddr | ModRM | Src2FS, em_lseg), 4837 I(DstReg | SrcMemFAddr | ModRM | Src2GS, em_lseg), 4838 D(DstReg | SrcMem8 | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov), 4839 /* 0xB8 - 0xBF */ 4840 N, N, 4841 G(BitOp, group8), 4842 F(DstMem | SrcReg | ModRM | BitOp | Lock | PageTable, em_btc), 4843 I(DstReg | SrcMem | ModRM, em_bsf_c), 4844 I(DstReg | SrcMem | ModRM, em_bsr_c), 4845 D(DstReg | SrcMem8 | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov), 4846 /* 0xC0 - 0xC7 */ 4847 F2bv(DstMem | SrcReg | ModRM | SrcWrite | Lock, em_xadd), 4848 N, ID(0, &instr_dual_0f_c3), 4849 N, N, N, GD(0, &group9), 4850 /* 0xC8 - 0xCF */ 4851 X8(I(DstReg, em_bswap)), 4852 /* 0xD0 - 0xDF */ 4853 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, 4854 /* 0xE0 - 0xEF */ 4855 N, N, N, N, N, N, N, GP(SrcReg | DstMem | ModRM | Mov, &pfx_0f_e7), 4856 N, N, N, N, N, N, N, N, 4857 /* 0xF0 - 0xFF */ 4858 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N 4859 }; 4860 4861 static const struct instr_dual instr_dual_0f_38_f0 = { 4862 I(DstReg | SrcMem | Mov, em_movbe), N 4863 }; 4864 4865 static const struct instr_dual instr_dual_0f_38_f1 = { 4866 I(DstMem | SrcReg | Mov, em_movbe), N 4867 }; 4868 4869 static const struct gprefix three_byte_0f_38_f0 = { 4870 ID(0, &instr_dual_0f_38_f0), N, N, N 4871 }; 4872 4873 static const struct gprefix three_byte_0f_38_f1 = { 4874 ID(0, &instr_dual_0f_38_f1), N, N, N 4875 }; 4876 4877 /* 4878 * Insns below are selected by the prefix which indexed by the third opcode 4879 * byte. 4880 */ 4881 static const struct opcode opcode_map_0f_38[256] = { 4882 /* 0x00 - 0x7f */ 4883 X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), 4884 /* 0x80 - 0xef */ 4885 X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), 4886 /* 0xf0 - 0xf1 */ 4887 GP(EmulateOnUD | ModRM, &three_byte_0f_38_f0), 4888 GP(EmulateOnUD | ModRM, &three_byte_0f_38_f1), 4889 /* 0xf2 - 0xff */ 4890 N, N, X4(N), X8(N) 4891 }; 4892 4893 #undef D 4894 #undef N 4895 #undef G 4896 #undef GD 4897 #undef I 4898 #undef GP 4899 #undef EXT 4900 #undef MD 4901 #undef ID 4902 4903 #undef D2bv 4904 #undef D2bvIP 4905 #undef I2bv 4906 #undef I2bvIP 4907 #undef I6ALU 4908 4909 static unsigned imm_size(struct x86_emulate_ctxt *ctxt) 4910 { 4911 unsigned size; 4912 4913 size = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes; 4914 if (size == 8) 4915 size = 4; 4916 return size; 4917 } 4918 4919 static int decode_imm(struct x86_emulate_ctxt *ctxt, struct operand *op, 4920 unsigned size, bool sign_extension) 4921 { 4922 int rc = X86EMUL_CONTINUE; 4923 4924 op->type = OP_IMM; 4925 op->bytes = size; 4926 op->addr.mem.ea = ctxt->_eip; 4927 /* NB. Immediates are sign-extended as necessary. */ 4928 switch (op->bytes) { 4929 case 1: 4930 op->val = insn_fetch(s8, ctxt); 4931 break; 4932 case 2: 4933 op->val = insn_fetch(s16, ctxt); 4934 break; 4935 case 4: 4936 op->val = insn_fetch(s32, ctxt); 4937 break; 4938 case 8: 4939 op->val = insn_fetch(s64, ctxt); 4940 break; 4941 } 4942 if (!sign_extension) { 4943 switch (op->bytes) { 4944 case 1: 4945 op->val &= 0xff; 4946 break; 4947 case 2: 4948 op->val &= 0xffff; 4949 break; 4950 case 4: 4951 op->val &= 0xffffffff; 4952 break; 4953 } 4954 } 4955 done: 4956 return rc; 4957 } 4958 4959 static int decode_operand(struct x86_emulate_ctxt *ctxt, struct operand *op, 4960 unsigned d) 4961 { 4962 int rc = X86EMUL_CONTINUE; 4963 4964 switch (d) { 4965 case OpReg: 4966 decode_register_operand(ctxt, op); 4967 break; 4968 case OpImmUByte: 4969 rc = decode_imm(ctxt, op, 1, false); 4970 break; 4971 case OpMem: 4972 ctxt->memop.bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes; 4973 mem_common: 4974 *op = ctxt->memop; 4975 ctxt->memopp = op; 4976 if (ctxt->d & BitOp) 4977 fetch_bit_operand(ctxt); 4978 op->orig_val = op->val; 4979 break; 4980 case OpMem64: 4981 ctxt->memop.bytes = (ctxt->op_bytes == 8) ? 16 : 8; 4982 goto mem_common; 4983 case OpAcc: 4984 op->type = OP_REG; 4985 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes; 4986 op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX); 4987 fetch_register_operand(op); 4988 op->orig_val = op->val; 4989 break; 4990 case OpAccLo: 4991 op->type = OP_REG; 4992 op->bytes = (ctxt->d & ByteOp) ? 2 : ctxt->op_bytes; 4993 op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX); 4994 fetch_register_operand(op); 4995 op->orig_val = op->val; 4996 break; 4997 case OpAccHi: 4998 if (ctxt->d & ByteOp) { 4999 op->type = OP_NONE; 5000 break; 5001 } 5002 op->type = OP_REG; 5003 op->bytes = ctxt->op_bytes; 5004 op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX); 5005 fetch_register_operand(op); 5006 op->orig_val = op->val; 5007 break; 5008 case OpDI: 5009 op->type = OP_MEM; 5010 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes; 5011 op->addr.mem.ea = 5012 register_address(ctxt, VCPU_REGS_RDI); 5013 op->addr.mem.seg = VCPU_SREG_ES; 5014 op->val = 0; 5015 op->count = 1; 5016 break; 5017 case OpDX: 5018 op->type = OP_REG; 5019 op->bytes = 2; 5020 op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX); 5021 fetch_register_operand(op); 5022 break; 5023 case OpCL: 5024 op->type = OP_IMM; 5025 op->bytes = 1; 5026 op->val = reg_read(ctxt, VCPU_REGS_RCX) & 0xff; 5027 break; 5028 case OpImmByte: 5029 rc = decode_imm(ctxt, op, 1, true); 5030 break; 5031 case OpOne: 5032 op->type = OP_IMM; 5033 op->bytes = 1; 5034 op->val = 1; 5035 break; 5036 case OpImm: 5037 rc = decode_imm(ctxt, op, imm_size(ctxt), true); 5038 break; 5039 case OpImm64: 5040 rc = decode_imm(ctxt, op, ctxt->op_bytes, true); 5041 break; 5042 case OpMem8: 5043 ctxt->memop.bytes = 1; 5044 if (ctxt->memop.type == OP_REG) { 5045 ctxt->memop.addr.reg = decode_register(ctxt, 5046 ctxt->modrm_rm, true); 5047 fetch_register_operand(&ctxt->memop); 5048 } 5049 goto mem_common; 5050 case OpMem16: 5051 ctxt->memop.bytes = 2; 5052 goto mem_common; 5053 case OpMem32: 5054 ctxt->memop.bytes = 4; 5055 goto mem_common; 5056 case OpImmU16: 5057 rc = decode_imm(ctxt, op, 2, false); 5058 break; 5059 case OpImmU: 5060 rc = decode_imm(ctxt, op, imm_size(ctxt), false); 5061 break; 5062 case OpSI: 5063 op->type = OP_MEM; 5064 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes; 5065 op->addr.mem.ea = 5066 register_address(ctxt, VCPU_REGS_RSI); 5067 op->addr.mem.seg = ctxt->seg_override; 5068 op->val = 0; 5069 op->count = 1; 5070 break; 5071 case OpXLat: 5072 op->type = OP_MEM; 5073 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes; 5074 op->addr.mem.ea = 5075 address_mask(ctxt, 5076 reg_read(ctxt, VCPU_REGS_RBX) + 5077 (reg_read(ctxt, VCPU_REGS_RAX) & 0xff)); 5078 op->addr.mem.seg = ctxt->seg_override; 5079 op->val = 0; 5080 break; 5081 case OpImmFAddr: 5082 op->type = OP_IMM; 5083 op->addr.mem.ea = ctxt->_eip; 5084 op->bytes = ctxt->op_bytes + 2; 5085 insn_fetch_arr(op->valptr, op->bytes, ctxt); 5086 break; 5087 case OpMemFAddr: 5088 ctxt->memop.bytes = ctxt->op_bytes + 2; 5089 goto mem_common; 5090 case OpES: 5091 op->type = OP_IMM; 5092 op->val = VCPU_SREG_ES; 5093 break; 5094 case OpCS: 5095 op->type = OP_IMM; 5096 op->val = VCPU_SREG_CS; 5097 break; 5098 case OpSS: 5099 op->type = OP_IMM; 5100 op->val = VCPU_SREG_SS; 5101 break; 5102 case OpDS: 5103 op->type = OP_IMM; 5104 op->val = VCPU_SREG_DS; 5105 break; 5106 case OpFS: 5107 op->type = OP_IMM; 5108 op->val = VCPU_SREG_FS; 5109 break; 5110 case OpGS: 5111 op->type = OP_IMM; 5112 op->val = VCPU_SREG_GS; 5113 break; 5114 case OpImplicit: 5115 /* Special instructions do their own operand decoding. */ 5116 default: 5117 op->type = OP_NONE; /* Disable writeback. */ 5118 break; 5119 } 5120 5121 done: 5122 return rc; 5123 } 5124 5125 int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len) 5126 { 5127 int rc = X86EMUL_CONTINUE; 5128 int mode = ctxt->mode; 5129 int def_op_bytes, def_ad_bytes, goffset, simd_prefix; 5130 bool op_prefix = false; 5131 bool has_seg_override = false; 5132 struct opcode opcode; 5133 u16 dummy; 5134 struct desc_struct desc; 5135 5136 ctxt->memop.type = OP_NONE; 5137 ctxt->memopp = NULL; 5138 ctxt->_eip = ctxt->eip; 5139 ctxt->fetch.ptr = ctxt->fetch.data; 5140 ctxt->fetch.end = ctxt->fetch.data + insn_len; 5141 ctxt->opcode_len = 1; 5142 if (insn_len > 0) 5143 memcpy(ctxt->fetch.data, insn, insn_len); 5144 else { 5145 rc = __do_insn_fetch_bytes(ctxt, 1); 5146 if (rc != X86EMUL_CONTINUE) 5147 return rc; 5148 } 5149 5150 switch (mode) { 5151 case X86EMUL_MODE_REAL: 5152 case X86EMUL_MODE_VM86: 5153 def_op_bytes = def_ad_bytes = 2; 5154 ctxt->ops->get_segment(ctxt, &dummy, &desc, NULL, VCPU_SREG_CS); 5155 if (desc.d) 5156 def_op_bytes = def_ad_bytes = 4; 5157 break; 5158 case X86EMUL_MODE_PROT16: 5159 def_op_bytes = def_ad_bytes = 2; 5160 break; 5161 case X86EMUL_MODE_PROT32: 5162 def_op_bytes = def_ad_bytes = 4; 5163 break; 5164 #ifdef CONFIG_X86_64 5165 case X86EMUL_MODE_PROT64: 5166 def_op_bytes = 4; 5167 def_ad_bytes = 8; 5168 break; 5169 #endif 5170 default: 5171 return EMULATION_FAILED; 5172 } 5173 5174 ctxt->op_bytes = def_op_bytes; 5175 ctxt->ad_bytes = def_ad_bytes; 5176 5177 /* Legacy prefixes. */ 5178 for (;;) { 5179 switch (ctxt->b = insn_fetch(u8, ctxt)) { 5180 case 0x66: /* operand-size override */ 5181 op_prefix = true; 5182 /* switch between 2/4 bytes */ 5183 ctxt->op_bytes = def_op_bytes ^ 6; 5184 break; 5185 case 0x67: /* address-size override */ 5186 if (mode == X86EMUL_MODE_PROT64) 5187 /* switch between 4/8 bytes */ 5188 ctxt->ad_bytes = def_ad_bytes ^ 12; 5189 else 5190 /* switch between 2/4 bytes */ 5191 ctxt->ad_bytes = def_ad_bytes ^ 6; 5192 break; 5193 case 0x26: /* ES override */ 5194 case 0x2e: /* CS override */ 5195 case 0x36: /* SS override */ 5196 case 0x3e: /* DS override */ 5197 has_seg_override = true; 5198 ctxt->seg_override = (ctxt->b >> 3) & 3; 5199 break; 5200 case 0x64: /* FS override */ 5201 case 0x65: /* GS override */ 5202 has_seg_override = true; 5203 ctxt->seg_override = ctxt->b & 7; 5204 break; 5205 case 0x40 ... 0x4f: /* REX */ 5206 if (mode != X86EMUL_MODE_PROT64) 5207 goto done_prefixes; 5208 ctxt->rex_prefix = ctxt->b; 5209 continue; 5210 case 0xf0: /* LOCK */ 5211 ctxt->lock_prefix = 1; 5212 break; 5213 case 0xf2: /* REPNE/REPNZ */ 5214 case 0xf3: /* REP/REPE/REPZ */ 5215 ctxt->rep_prefix = ctxt->b; 5216 break; 5217 default: 5218 goto done_prefixes; 5219 } 5220 5221 /* Any legacy prefix after a REX prefix nullifies its effect. */ 5222 5223 ctxt->rex_prefix = 0; 5224 } 5225 5226 done_prefixes: 5227 5228 /* REX prefix. */ 5229 if (ctxt->rex_prefix & 8) 5230 ctxt->op_bytes = 8; /* REX.W */ 5231 5232 /* Opcode byte(s). */ 5233 opcode = opcode_table[ctxt->b]; 5234 /* Two-byte opcode? */ 5235 if (ctxt->b == 0x0f) { 5236 ctxt->opcode_len = 2; 5237 ctxt->b = insn_fetch(u8, ctxt); 5238 opcode = twobyte_table[ctxt->b]; 5239 5240 /* 0F_38 opcode map */ 5241 if (ctxt->b == 0x38) { 5242 ctxt->opcode_len = 3; 5243 ctxt->b = insn_fetch(u8, ctxt); 5244 opcode = opcode_map_0f_38[ctxt->b]; 5245 } 5246 } 5247 ctxt->d = opcode.flags; 5248 5249 if (ctxt->d & ModRM) 5250 ctxt->modrm = insn_fetch(u8, ctxt); 5251 5252 /* vex-prefix instructions are not implemented */ 5253 if (ctxt->opcode_len == 1 && (ctxt->b == 0xc5 || ctxt->b == 0xc4) && 5254 (mode == X86EMUL_MODE_PROT64 || (ctxt->modrm & 0xc0) == 0xc0)) { 5255 ctxt->d = NotImpl; 5256 } 5257 5258 while (ctxt->d & GroupMask) { 5259 switch (ctxt->d & GroupMask) { 5260 case Group: 5261 goffset = (ctxt->modrm >> 3) & 7; 5262 opcode = opcode.u.group[goffset]; 5263 break; 5264 case GroupDual: 5265 goffset = (ctxt->modrm >> 3) & 7; 5266 if ((ctxt->modrm >> 6) == 3) 5267 opcode = opcode.u.gdual->mod3[goffset]; 5268 else 5269 opcode = opcode.u.gdual->mod012[goffset]; 5270 break; 5271 case RMExt: 5272 goffset = ctxt->modrm & 7; 5273 opcode = opcode.u.group[goffset]; 5274 break; 5275 case Prefix: 5276 if (ctxt->rep_prefix && op_prefix) 5277 return EMULATION_FAILED; 5278 simd_prefix = op_prefix ? 0x66 : ctxt->rep_prefix; 5279 switch (simd_prefix) { 5280 case 0x00: opcode = opcode.u.gprefix->pfx_no; break; 5281 case 0x66: opcode = opcode.u.gprefix->pfx_66; break; 5282 case 0xf2: opcode = opcode.u.gprefix->pfx_f2; break; 5283 case 0xf3: opcode = opcode.u.gprefix->pfx_f3; break; 5284 } 5285 break; 5286 case Escape: 5287 if (ctxt->modrm > 0xbf) 5288 opcode = opcode.u.esc->high[ctxt->modrm - 0xc0]; 5289 else 5290 opcode = opcode.u.esc->op[(ctxt->modrm >> 3) & 7]; 5291 break; 5292 case InstrDual: 5293 if ((ctxt->modrm >> 6) == 3) 5294 opcode = opcode.u.idual->mod3; 5295 else 5296 opcode = opcode.u.idual->mod012; 5297 break; 5298 case ModeDual: 5299 if (ctxt->mode == X86EMUL_MODE_PROT64) 5300 opcode = opcode.u.mdual->mode64; 5301 else 5302 opcode = opcode.u.mdual->mode32; 5303 break; 5304 default: 5305 return EMULATION_FAILED; 5306 } 5307 5308 ctxt->d &= ~(u64)GroupMask; 5309 ctxt->d |= opcode.flags; 5310 } 5311 5312 /* Unrecognised? */ 5313 if (ctxt->d == 0) 5314 return EMULATION_FAILED; 5315 5316 ctxt->execute = opcode.u.execute; 5317 5318 if (unlikely(ctxt->ud) && likely(!(ctxt->d & EmulateOnUD))) 5319 return EMULATION_FAILED; 5320 5321 if (unlikely(ctxt->d & 5322 (NotImpl|Stack|Op3264|Sse|Mmx|Intercept|CheckPerm|NearBranch| 5323 No16))) { 5324 /* 5325 * These are copied unconditionally here, and checked unconditionally 5326 * in x86_emulate_insn. 5327 */ 5328 ctxt->check_perm = opcode.check_perm; 5329 ctxt->intercept = opcode.intercept; 5330 5331 if (ctxt->d & NotImpl) 5332 return EMULATION_FAILED; 5333 5334 if (mode == X86EMUL_MODE_PROT64) { 5335 if (ctxt->op_bytes == 4 && (ctxt->d & Stack)) 5336 ctxt->op_bytes = 8; 5337 else if (ctxt->d & NearBranch) 5338 ctxt->op_bytes = 8; 5339 } 5340 5341 if (ctxt->d & Op3264) { 5342 if (mode == X86EMUL_MODE_PROT64) 5343 ctxt->op_bytes = 8; 5344 else 5345 ctxt->op_bytes = 4; 5346 } 5347 5348 if ((ctxt->d & No16) && ctxt->op_bytes == 2) 5349 ctxt->op_bytes = 4; 5350 5351 if (ctxt->d & Sse) 5352 ctxt->op_bytes = 16; 5353 else if (ctxt->d & Mmx) 5354 ctxt->op_bytes = 8; 5355 } 5356 5357 /* ModRM and SIB bytes. */ 5358 if (ctxt->d & ModRM) { 5359 rc = decode_modrm(ctxt, &ctxt->memop); 5360 if (!has_seg_override) { 5361 has_seg_override = true; 5362 ctxt->seg_override = ctxt->modrm_seg; 5363 } 5364 } else if (ctxt->d & MemAbs) 5365 rc = decode_abs(ctxt, &ctxt->memop); 5366 if (rc != X86EMUL_CONTINUE) 5367 goto done; 5368 5369 if (!has_seg_override) 5370 ctxt->seg_override = VCPU_SREG_DS; 5371 5372 ctxt->memop.addr.mem.seg = ctxt->seg_override; 5373 5374 /* 5375 * Decode and fetch the source operand: register, memory 5376 * or immediate. 5377 */ 5378 rc = decode_operand(ctxt, &ctxt->src, (ctxt->d >> SrcShift) & OpMask); 5379 if (rc != X86EMUL_CONTINUE) 5380 goto done; 5381 5382 /* 5383 * Decode and fetch the second source operand: register, memory 5384 * or immediate. 5385 */ 5386 rc = decode_operand(ctxt, &ctxt->src2, (ctxt->d >> Src2Shift) & OpMask); 5387 if (rc != X86EMUL_CONTINUE) 5388 goto done; 5389 5390 /* Decode and fetch the destination operand: register or memory. */ 5391 rc = decode_operand(ctxt, &ctxt->dst, (ctxt->d >> DstShift) & OpMask); 5392 5393 if (ctxt->rip_relative && likely(ctxt->memopp)) 5394 ctxt->memopp->addr.mem.ea = address_mask(ctxt, 5395 ctxt->memopp->addr.mem.ea + ctxt->_eip); 5396 5397 done: 5398 return (rc != X86EMUL_CONTINUE) ? EMULATION_FAILED : EMULATION_OK; 5399 } 5400 5401 bool x86_page_table_writing_insn(struct x86_emulate_ctxt *ctxt) 5402 { 5403 return ctxt->d & PageTable; 5404 } 5405 5406 static bool string_insn_completed(struct x86_emulate_ctxt *ctxt) 5407 { 5408 /* The second termination condition only applies for REPE 5409 * and REPNE. Test if the repeat string operation prefix is 5410 * REPE/REPZ or REPNE/REPNZ and if it's the case it tests the 5411 * corresponding termination condition according to: 5412 * - if REPE/REPZ and ZF = 0 then done 5413 * - if REPNE/REPNZ and ZF = 1 then done 5414 */ 5415 if (((ctxt->b == 0xa6) || (ctxt->b == 0xa7) || 5416 (ctxt->b == 0xae) || (ctxt->b == 0xaf)) 5417 && (((ctxt->rep_prefix == REPE_PREFIX) && 5418 ((ctxt->eflags & X86_EFLAGS_ZF) == 0)) 5419 || ((ctxt->rep_prefix == REPNE_PREFIX) && 5420 ((ctxt->eflags & X86_EFLAGS_ZF) == X86_EFLAGS_ZF)))) 5421 return true; 5422 5423 return false; 5424 } 5425 5426 static int flush_pending_x87_faults(struct x86_emulate_ctxt *ctxt) 5427 { 5428 int rc; 5429 5430 rc = asm_safe("fwait"); 5431 5432 if (unlikely(rc != X86EMUL_CONTINUE)) 5433 return emulate_exception(ctxt, MF_VECTOR, 0, false); 5434 5435 return X86EMUL_CONTINUE; 5436 } 5437 5438 static void fetch_possible_mmx_operand(struct x86_emulate_ctxt *ctxt, 5439 struct operand *op) 5440 { 5441 if (op->type == OP_MM) 5442 read_mmx_reg(ctxt, &op->mm_val, op->addr.mm); 5443 } 5444 5445 static int fastop(struct x86_emulate_ctxt *ctxt, void (*fop)(struct fastop *)) 5446 { 5447 ulong flags = (ctxt->eflags & EFLAGS_MASK) | X86_EFLAGS_IF; 5448 5449 if (!(ctxt->d & ByteOp)) 5450 fop += __ffs(ctxt->dst.bytes) * FASTOP_SIZE; 5451 5452 asm("push %[flags]; popf; " CALL_NOSPEC " ; pushf; pop %[flags]\n" 5453 : "+a"(ctxt->dst.val), "+d"(ctxt->src.val), [flags]"+D"(flags), 5454 [thunk_target]"+S"(fop), ASM_CALL_CONSTRAINT 5455 : "c"(ctxt->src2.val)); 5456 5457 ctxt->eflags = (ctxt->eflags & ~EFLAGS_MASK) | (flags & EFLAGS_MASK); 5458 if (!fop) /* exception is returned in fop variable */ 5459 return emulate_de(ctxt); 5460 return X86EMUL_CONTINUE; 5461 } 5462 5463 void init_decode_cache(struct x86_emulate_ctxt *ctxt) 5464 { 5465 memset(&ctxt->rip_relative, 0, 5466 (void *)&ctxt->modrm - (void *)&ctxt->rip_relative); 5467 5468 ctxt->io_read.pos = 0; 5469 ctxt->io_read.end = 0; 5470 ctxt->mem_read.end = 0; 5471 } 5472 5473 int x86_emulate_insn(struct x86_emulate_ctxt *ctxt) 5474 { 5475 const struct x86_emulate_ops *ops = ctxt->ops; 5476 int rc = X86EMUL_CONTINUE; 5477 int saved_dst_type = ctxt->dst.type; 5478 unsigned emul_flags; 5479 5480 ctxt->mem_read.pos = 0; 5481 5482 /* LOCK prefix is allowed only with some instructions */ 5483 if (ctxt->lock_prefix && (!(ctxt->d & Lock) || ctxt->dst.type != OP_MEM)) { 5484 rc = emulate_ud(ctxt); 5485 goto done; 5486 } 5487 5488 if ((ctxt->d & SrcMask) == SrcMemFAddr && ctxt->src.type != OP_MEM) { 5489 rc = emulate_ud(ctxt); 5490 goto done; 5491 } 5492 5493 emul_flags = ctxt->ops->get_hflags(ctxt); 5494 if (unlikely(ctxt->d & 5495 (No64|Undefined|Sse|Mmx|Intercept|CheckPerm|Priv|Prot|String))) { 5496 if ((ctxt->mode == X86EMUL_MODE_PROT64 && (ctxt->d & No64)) || 5497 (ctxt->d & Undefined)) { 5498 rc = emulate_ud(ctxt); 5499 goto done; 5500 } 5501 5502 if (((ctxt->d & (Sse|Mmx)) && ((ops->get_cr(ctxt, 0) & X86_CR0_EM))) 5503 || ((ctxt->d & Sse) && !(ops->get_cr(ctxt, 4) & X86_CR4_OSFXSR))) { 5504 rc = emulate_ud(ctxt); 5505 goto done; 5506 } 5507 5508 if ((ctxt->d & (Sse|Mmx)) && (ops->get_cr(ctxt, 0) & X86_CR0_TS)) { 5509 rc = emulate_nm(ctxt); 5510 goto done; 5511 } 5512 5513 if (ctxt->d & Mmx) { 5514 rc = flush_pending_x87_faults(ctxt); 5515 if (rc != X86EMUL_CONTINUE) 5516 goto done; 5517 /* 5518 * Now that we know the fpu is exception safe, we can fetch 5519 * operands from it. 5520 */ 5521 fetch_possible_mmx_operand(ctxt, &ctxt->src); 5522 fetch_possible_mmx_operand(ctxt, &ctxt->src2); 5523 if (!(ctxt->d & Mov)) 5524 fetch_possible_mmx_operand(ctxt, &ctxt->dst); 5525 } 5526 5527 if (unlikely(emul_flags & X86EMUL_GUEST_MASK) && ctxt->intercept) { 5528 rc = emulator_check_intercept(ctxt, ctxt->intercept, 5529 X86_ICPT_PRE_EXCEPT); 5530 if (rc != X86EMUL_CONTINUE) 5531 goto done; 5532 } 5533 5534 /* Instruction can only be executed in protected mode */ 5535 if ((ctxt->d & Prot) && ctxt->mode < X86EMUL_MODE_PROT16) { 5536 rc = emulate_ud(ctxt); 5537 goto done; 5538 } 5539 5540 /* Privileged instruction can be executed only in CPL=0 */ 5541 if ((ctxt->d & Priv) && ops->cpl(ctxt)) { 5542 if (ctxt->d & PrivUD) 5543 rc = emulate_ud(ctxt); 5544 else 5545 rc = emulate_gp(ctxt, 0); 5546 goto done; 5547 } 5548 5549 /* Do instruction specific permission checks */ 5550 if (ctxt->d & CheckPerm) { 5551 rc = ctxt->check_perm(ctxt); 5552 if (rc != X86EMUL_CONTINUE) 5553 goto done; 5554 } 5555 5556 if (unlikely(emul_flags & X86EMUL_GUEST_MASK) && (ctxt->d & Intercept)) { 5557 rc = emulator_check_intercept(ctxt, ctxt->intercept, 5558 X86_ICPT_POST_EXCEPT); 5559 if (rc != X86EMUL_CONTINUE) 5560 goto done; 5561 } 5562 5563 if (ctxt->rep_prefix && (ctxt->d & String)) { 5564 /* All REP prefixes have the same first termination condition */ 5565 if (address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) == 0) { 5566 string_registers_quirk(ctxt); 5567 ctxt->eip = ctxt->_eip; 5568 ctxt->eflags &= ~X86_EFLAGS_RF; 5569 goto done; 5570 } 5571 } 5572 } 5573 5574 if ((ctxt->src.type == OP_MEM) && !(ctxt->d & NoAccess)) { 5575 rc = segmented_read(ctxt, ctxt->src.addr.mem, 5576 ctxt->src.valptr, ctxt->src.bytes); 5577 if (rc != X86EMUL_CONTINUE) 5578 goto done; 5579 ctxt->src.orig_val64 = ctxt->src.val64; 5580 } 5581 5582 if (ctxt->src2.type == OP_MEM) { 5583 rc = segmented_read(ctxt, ctxt->src2.addr.mem, 5584 &ctxt->src2.val, ctxt->src2.bytes); 5585 if (rc != X86EMUL_CONTINUE) 5586 goto done; 5587 } 5588 5589 if ((ctxt->d & DstMask) == ImplicitOps) 5590 goto special_insn; 5591 5592 5593 if ((ctxt->dst.type == OP_MEM) && !(ctxt->d & Mov)) { 5594 /* optimisation - avoid slow emulated read if Mov */ 5595 rc = segmented_read(ctxt, ctxt->dst.addr.mem, 5596 &ctxt->dst.val, ctxt->dst.bytes); 5597 if (rc != X86EMUL_CONTINUE) { 5598 if (!(ctxt->d & NoWrite) && 5599 rc == X86EMUL_PROPAGATE_FAULT && 5600 ctxt->exception.vector == PF_VECTOR) 5601 ctxt->exception.error_code |= PFERR_WRITE_MASK; 5602 goto done; 5603 } 5604 } 5605 /* Copy full 64-bit value for CMPXCHG8B. */ 5606 ctxt->dst.orig_val64 = ctxt->dst.val64; 5607 5608 special_insn: 5609 5610 if (unlikely(emul_flags & X86EMUL_GUEST_MASK) && (ctxt->d & Intercept)) { 5611 rc = emulator_check_intercept(ctxt, ctxt->intercept, 5612 X86_ICPT_POST_MEMACCESS); 5613 if (rc != X86EMUL_CONTINUE) 5614 goto done; 5615 } 5616 5617 if (ctxt->rep_prefix && (ctxt->d & String)) 5618 ctxt->eflags |= X86_EFLAGS_RF; 5619 else 5620 ctxt->eflags &= ~X86_EFLAGS_RF; 5621 5622 if (ctxt->execute) { 5623 if (ctxt->d & Fastop) { 5624 void (*fop)(struct fastop *) = (void *)ctxt->execute; 5625 rc = fastop(ctxt, fop); 5626 if (rc != X86EMUL_CONTINUE) 5627 goto done; 5628 goto writeback; 5629 } 5630 rc = ctxt->execute(ctxt); 5631 if (rc != X86EMUL_CONTINUE) 5632 goto done; 5633 goto writeback; 5634 } 5635 5636 if (ctxt->opcode_len == 2) 5637 goto twobyte_insn; 5638 else if (ctxt->opcode_len == 3) 5639 goto threebyte_insn; 5640 5641 switch (ctxt->b) { 5642 case 0x70 ... 0x7f: /* jcc (short) */ 5643 if (test_cc(ctxt->b, ctxt->eflags)) 5644 rc = jmp_rel(ctxt, ctxt->src.val); 5645 break; 5646 case 0x8d: /* lea r16/r32, m */ 5647 ctxt->dst.val = ctxt->src.addr.mem.ea; 5648 break; 5649 case 0x90 ... 0x97: /* nop / xchg reg, rax */ 5650 if (ctxt->dst.addr.reg == reg_rmw(ctxt, VCPU_REGS_RAX)) 5651 ctxt->dst.type = OP_NONE; 5652 else 5653 rc = em_xchg(ctxt); 5654 break; 5655 case 0x98: /* cbw/cwde/cdqe */ 5656 switch (ctxt->op_bytes) { 5657 case 2: ctxt->dst.val = (s8)ctxt->dst.val; break; 5658 case 4: ctxt->dst.val = (s16)ctxt->dst.val; break; 5659 case 8: ctxt->dst.val = (s32)ctxt->dst.val; break; 5660 } 5661 break; 5662 case 0xcc: /* int3 */ 5663 rc = emulate_int(ctxt, 3); 5664 break; 5665 case 0xcd: /* int n */ 5666 rc = emulate_int(ctxt, ctxt->src.val); 5667 break; 5668 case 0xce: /* into */ 5669 if (ctxt->eflags & X86_EFLAGS_OF) 5670 rc = emulate_int(ctxt, 4); 5671 break; 5672 case 0xe9: /* jmp rel */ 5673 case 0xeb: /* jmp rel short */ 5674 rc = jmp_rel(ctxt, ctxt->src.val); 5675 ctxt->dst.type = OP_NONE; /* Disable writeback. */ 5676 break; 5677 case 0xf4: /* hlt */ 5678 ctxt->ops->halt(ctxt); 5679 break; 5680 case 0xf5: /* cmc */ 5681 /* complement carry flag from eflags reg */ 5682 ctxt->eflags ^= X86_EFLAGS_CF; 5683 break; 5684 case 0xf8: /* clc */ 5685 ctxt->eflags &= ~X86_EFLAGS_CF; 5686 break; 5687 case 0xf9: /* stc */ 5688 ctxt->eflags |= X86_EFLAGS_CF; 5689 break; 5690 case 0xfc: /* cld */ 5691 ctxt->eflags &= ~X86_EFLAGS_DF; 5692 break; 5693 case 0xfd: /* std */ 5694 ctxt->eflags |= X86_EFLAGS_DF; 5695 break; 5696 default: 5697 goto cannot_emulate; 5698 } 5699 5700 if (rc != X86EMUL_CONTINUE) 5701 goto done; 5702 5703 writeback: 5704 if (ctxt->d & SrcWrite) { 5705 BUG_ON(ctxt->src.type == OP_MEM || ctxt->src.type == OP_MEM_STR); 5706 rc = writeback(ctxt, &ctxt->src); 5707 if (rc != X86EMUL_CONTINUE) 5708 goto done; 5709 } 5710 if (!(ctxt->d & NoWrite)) { 5711 rc = writeback(ctxt, &ctxt->dst); 5712 if (rc != X86EMUL_CONTINUE) 5713 goto done; 5714 } 5715 5716 /* 5717 * restore dst type in case the decoding will be reused 5718 * (happens for string instruction ) 5719 */ 5720 ctxt->dst.type = saved_dst_type; 5721 5722 if ((ctxt->d & SrcMask) == SrcSI) 5723 string_addr_inc(ctxt, VCPU_REGS_RSI, &ctxt->src); 5724 5725 if ((ctxt->d & DstMask) == DstDI) 5726 string_addr_inc(ctxt, VCPU_REGS_RDI, &ctxt->dst); 5727 5728 if (ctxt->rep_prefix && (ctxt->d & String)) { 5729 unsigned int count; 5730 struct read_cache *r = &ctxt->io_read; 5731 if ((ctxt->d & SrcMask) == SrcSI) 5732 count = ctxt->src.count; 5733 else 5734 count = ctxt->dst.count; 5735 register_address_increment(ctxt, VCPU_REGS_RCX, -count); 5736 5737 if (!string_insn_completed(ctxt)) { 5738 /* 5739 * Re-enter guest when pio read ahead buffer is empty 5740 * or, if it is not used, after each 1024 iteration. 5741 */ 5742 if ((r->end != 0 || reg_read(ctxt, VCPU_REGS_RCX) & 0x3ff) && 5743 (r->end == 0 || r->end != r->pos)) { 5744 /* 5745 * Reset read cache. Usually happens before 5746 * decode, but since instruction is restarted 5747 * we have to do it here. 5748 */ 5749 ctxt->mem_read.end = 0; 5750 writeback_registers(ctxt); 5751 return EMULATION_RESTART; 5752 } 5753 goto done; /* skip rip writeback */ 5754 } 5755 ctxt->eflags &= ~X86_EFLAGS_RF; 5756 } 5757 5758 ctxt->eip = ctxt->_eip; 5759 5760 done: 5761 if (rc == X86EMUL_PROPAGATE_FAULT) { 5762 WARN_ON(ctxt->exception.vector > 0x1f); 5763 ctxt->have_exception = true; 5764 } 5765 if (rc == X86EMUL_INTERCEPTED) 5766 return EMULATION_INTERCEPTED; 5767 5768 if (rc == X86EMUL_CONTINUE) 5769 writeback_registers(ctxt); 5770 5771 return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK; 5772 5773 twobyte_insn: 5774 switch (ctxt->b) { 5775 case 0x09: /* wbinvd */ 5776 (ctxt->ops->wbinvd)(ctxt); 5777 break; 5778 case 0x08: /* invd */ 5779 case 0x0d: /* GrpP (prefetch) */ 5780 case 0x18: /* Grp16 (prefetch/nop) */ 5781 case 0x1f: /* nop */ 5782 break; 5783 case 0x20: /* mov cr, reg */ 5784 ctxt->dst.val = ops->get_cr(ctxt, ctxt->modrm_reg); 5785 break; 5786 case 0x21: /* mov from dr to reg */ 5787 ops->get_dr(ctxt, ctxt->modrm_reg, &ctxt->dst.val); 5788 break; 5789 case 0x40 ... 0x4f: /* cmov */ 5790 if (test_cc(ctxt->b, ctxt->eflags)) 5791 ctxt->dst.val = ctxt->src.val; 5792 else if (ctxt->op_bytes != 4) 5793 ctxt->dst.type = OP_NONE; /* no writeback */ 5794 break; 5795 case 0x80 ... 0x8f: /* jnz rel, etc*/ 5796 if (test_cc(ctxt->b, ctxt->eflags)) 5797 rc = jmp_rel(ctxt, ctxt->src.val); 5798 break; 5799 case 0x90 ... 0x9f: /* setcc r/m8 */ 5800 ctxt->dst.val = test_cc(ctxt->b, ctxt->eflags); 5801 break; 5802 case 0xb6 ... 0xb7: /* movzx */ 5803 ctxt->dst.bytes = ctxt->op_bytes; 5804 ctxt->dst.val = (ctxt->src.bytes == 1) ? (u8) ctxt->src.val 5805 : (u16) ctxt->src.val; 5806 break; 5807 case 0xbe ... 0xbf: /* movsx */ 5808 ctxt->dst.bytes = ctxt->op_bytes; 5809 ctxt->dst.val = (ctxt->src.bytes == 1) ? (s8) ctxt->src.val : 5810 (s16) ctxt->src.val; 5811 break; 5812 default: 5813 goto cannot_emulate; 5814 } 5815 5816 threebyte_insn: 5817 5818 if (rc != X86EMUL_CONTINUE) 5819 goto done; 5820 5821 goto writeback; 5822 5823 cannot_emulate: 5824 return EMULATION_FAILED; 5825 } 5826 5827 void emulator_invalidate_register_cache(struct x86_emulate_ctxt *ctxt) 5828 { 5829 invalidate_registers(ctxt); 5830 } 5831 5832 void emulator_writeback_register_cache(struct x86_emulate_ctxt *ctxt) 5833 { 5834 writeback_registers(ctxt); 5835 } 5836 5837 bool emulator_can_use_gpa(struct x86_emulate_ctxt *ctxt) 5838 { 5839 if (ctxt->rep_prefix && (ctxt->d & String)) 5840 return false; 5841 5842 if (ctxt->d & TwoMemOp) 5843 return false; 5844 5845 return true; 5846 } 5847