1 /****************************************************************************** 2 * emulate.c 3 * 4 * Generic x86 (32-bit and 64-bit) instruction decoder and emulator. 5 * 6 * Copyright (c) 2005 Keir Fraser 7 * 8 * Linux coding style, mod r/m decoder, segment base fixes, real-mode 9 * privileged instructions: 10 * 11 * Copyright (C) 2006 Qumranet 12 * Copyright 2010 Red Hat, Inc. and/or its affiliates. 13 * 14 * Avi Kivity <avi@qumranet.com> 15 * Yaniv Kamay <yaniv@qumranet.com> 16 * 17 * This work is licensed under the terms of the GNU GPL, version 2. See 18 * the COPYING file in the top-level directory. 19 * 20 * From: xen-unstable 10676:af9809f51f81a3c43f276f00c81a52ef558afda4 21 */ 22 23 #include <linux/kvm_host.h> 24 #include "kvm_cache_regs.h" 25 #include <linux/module.h> 26 #include <asm/kvm_emulate.h> 27 #include <linux/stringify.h> 28 29 #include "x86.h" 30 #include "tss.h" 31 32 /* 33 * Operand types 34 */ 35 #define OpNone 0ull 36 #define OpImplicit 1ull /* No generic decode */ 37 #define OpReg 2ull /* Register */ 38 #define OpMem 3ull /* Memory */ 39 #define OpAcc 4ull /* Accumulator: AL/AX/EAX/RAX */ 40 #define OpDI 5ull /* ES:DI/EDI/RDI */ 41 #define OpMem64 6ull /* Memory, 64-bit */ 42 #define OpImmUByte 7ull /* Zero-extended 8-bit immediate */ 43 #define OpDX 8ull /* DX register */ 44 #define OpCL 9ull /* CL register (for shifts) */ 45 #define OpImmByte 10ull /* 8-bit sign extended immediate */ 46 #define OpOne 11ull /* Implied 1 */ 47 #define OpImm 12ull /* Sign extended up to 32-bit immediate */ 48 #define OpMem16 13ull /* Memory operand (16-bit). */ 49 #define OpMem32 14ull /* Memory operand (32-bit). */ 50 #define OpImmU 15ull /* Immediate operand, zero extended */ 51 #define OpSI 16ull /* SI/ESI/RSI */ 52 #define OpImmFAddr 17ull /* Immediate far address */ 53 #define OpMemFAddr 18ull /* Far address in memory */ 54 #define OpImmU16 19ull /* Immediate operand, 16 bits, zero extended */ 55 #define OpES 20ull /* ES */ 56 #define OpCS 21ull /* CS */ 57 #define OpSS 22ull /* SS */ 58 #define OpDS 23ull /* DS */ 59 #define OpFS 24ull /* FS */ 60 #define OpGS 25ull /* GS */ 61 #define OpMem8 26ull /* 8-bit zero extended memory operand */ 62 #define OpImm64 27ull /* Sign extended 16/32/64-bit immediate */ 63 #define OpXLat 28ull /* memory at BX/EBX/RBX + zero-extended AL */ 64 #define OpAccLo 29ull /* Low part of extended acc (AX/AX/EAX/RAX) */ 65 #define OpAccHi 30ull /* High part of extended acc (-/DX/EDX/RDX) */ 66 67 #define OpBits 5 /* Width of operand field */ 68 #define OpMask ((1ull << OpBits) - 1) 69 70 /* 71 * Opcode effective-address decode tables. 72 * Note that we only emulate instructions that have at least one memory 73 * operand (excluding implicit stack references). We assume that stack 74 * references and instruction fetches will never occur in special memory 75 * areas that require emulation. So, for example, 'mov <imm>,<reg>' need 76 * not be handled. 77 */ 78 79 /* Operand sizes: 8-bit operands or specified/overridden size. */ 80 #define ByteOp (1<<0) /* 8-bit operands. */ 81 /* Destination operand type. */ 82 #define DstShift 1 83 #define ImplicitOps (OpImplicit << DstShift) 84 #define DstReg (OpReg << DstShift) 85 #define DstMem (OpMem << DstShift) 86 #define DstAcc (OpAcc << DstShift) 87 #define DstDI (OpDI << DstShift) 88 #define DstMem64 (OpMem64 << DstShift) 89 #define DstImmUByte (OpImmUByte << DstShift) 90 #define DstDX (OpDX << DstShift) 91 #define DstAccLo (OpAccLo << DstShift) 92 #define DstMask (OpMask << DstShift) 93 /* Source operand type. */ 94 #define SrcShift 6 95 #define SrcNone (OpNone << SrcShift) 96 #define SrcReg (OpReg << SrcShift) 97 #define SrcMem (OpMem << SrcShift) 98 #define SrcMem16 (OpMem16 << SrcShift) 99 #define SrcMem32 (OpMem32 << SrcShift) 100 #define SrcImm (OpImm << SrcShift) 101 #define SrcImmByte (OpImmByte << SrcShift) 102 #define SrcOne (OpOne << SrcShift) 103 #define SrcImmUByte (OpImmUByte << SrcShift) 104 #define SrcImmU (OpImmU << SrcShift) 105 #define SrcSI (OpSI << SrcShift) 106 #define SrcXLat (OpXLat << SrcShift) 107 #define SrcImmFAddr (OpImmFAddr << SrcShift) 108 #define SrcMemFAddr (OpMemFAddr << SrcShift) 109 #define SrcAcc (OpAcc << SrcShift) 110 #define SrcImmU16 (OpImmU16 << SrcShift) 111 #define SrcImm64 (OpImm64 << SrcShift) 112 #define SrcDX (OpDX << SrcShift) 113 #define SrcMem8 (OpMem8 << SrcShift) 114 #define SrcAccHi (OpAccHi << SrcShift) 115 #define SrcMask (OpMask << SrcShift) 116 #define BitOp (1<<11) 117 #define MemAbs (1<<12) /* Memory operand is absolute displacement */ 118 #define String (1<<13) /* String instruction (rep capable) */ 119 #define Stack (1<<14) /* Stack instruction (push/pop) */ 120 #define GroupMask (7<<15) /* Opcode uses one of the group mechanisms */ 121 #define Group (1<<15) /* Bits 3:5 of modrm byte extend opcode */ 122 #define GroupDual (2<<15) /* Alternate decoding of mod == 3 */ 123 #define Prefix (3<<15) /* Instruction varies with 66/f2/f3 prefix */ 124 #define RMExt (4<<15) /* Opcode extension in ModRM r/m if mod == 3 */ 125 #define Escape (5<<15) /* Escape to coprocessor instruction */ 126 #define Sse (1<<18) /* SSE Vector instruction */ 127 /* Generic ModRM decode. */ 128 #define ModRM (1<<19) 129 /* Destination is only written; never read. */ 130 #define Mov (1<<20) 131 /* Misc flags */ 132 #define Prot (1<<21) /* instruction generates #UD if not in prot-mode */ 133 #define EmulateOnUD (1<<22) /* Emulate if unsupported by the host */ 134 #define NoAccess (1<<23) /* Don't access memory (lea/invlpg/verr etc) */ 135 #define Op3264 (1<<24) /* Operand is 64b in long mode, 32b otherwise */ 136 #define Undefined (1<<25) /* No Such Instruction */ 137 #define Lock (1<<26) /* lock prefix is allowed for the instruction */ 138 #define Priv (1<<27) /* instruction generates #GP if current CPL != 0 */ 139 #define No64 (1<<28) 140 #define PageTable (1 << 29) /* instruction used to write page table */ 141 #define NotImpl (1 << 30) /* instruction is not implemented */ 142 /* Source 2 operand type */ 143 #define Src2Shift (31) 144 #define Src2None (OpNone << Src2Shift) 145 #define Src2Mem (OpMem << Src2Shift) 146 #define Src2CL (OpCL << Src2Shift) 147 #define Src2ImmByte (OpImmByte << Src2Shift) 148 #define Src2One (OpOne << Src2Shift) 149 #define Src2Imm (OpImm << Src2Shift) 150 #define Src2ES (OpES << Src2Shift) 151 #define Src2CS (OpCS << Src2Shift) 152 #define Src2SS (OpSS << Src2Shift) 153 #define Src2DS (OpDS << Src2Shift) 154 #define Src2FS (OpFS << Src2Shift) 155 #define Src2GS (OpGS << Src2Shift) 156 #define Src2Mask (OpMask << Src2Shift) 157 #define Mmx ((u64)1 << 40) /* MMX Vector instruction */ 158 #define Aligned ((u64)1 << 41) /* Explicitly aligned (e.g. MOVDQA) */ 159 #define Unaligned ((u64)1 << 42) /* Explicitly unaligned (e.g. MOVDQU) */ 160 #define Avx ((u64)1 << 43) /* Advanced Vector Extensions */ 161 #define Fastop ((u64)1 << 44) /* Use opcode::u.fastop */ 162 #define NoWrite ((u64)1 << 45) /* No writeback */ 163 #define SrcWrite ((u64)1 << 46) /* Write back src operand */ 164 #define NoMod ((u64)1 << 47) /* Mod field is ignored */ 165 #define Intercept ((u64)1 << 48) /* Has valid intercept field */ 166 #define CheckPerm ((u64)1 << 49) /* Has valid check_perm field */ 167 #define NoBigReal ((u64)1 << 50) /* No big real mode */ 168 #define PrivUD ((u64)1 << 51) /* #UD instead of #GP on CPL > 0 */ 169 170 #define DstXacc (DstAccLo | SrcAccHi | SrcWrite) 171 172 #define X2(x...) x, x 173 #define X3(x...) X2(x), x 174 #define X4(x...) X2(x), X2(x) 175 #define X5(x...) X4(x), x 176 #define X6(x...) X4(x), X2(x) 177 #define X7(x...) X4(x), X3(x) 178 #define X8(x...) X4(x), X4(x) 179 #define X16(x...) X8(x), X8(x) 180 181 #define NR_FASTOP (ilog2(sizeof(ulong)) + 1) 182 #define FASTOP_SIZE 8 183 184 /* 185 * fastop functions have a special calling convention: 186 * 187 * dst: rax (in/out) 188 * src: rdx (in/out) 189 * src2: rcx (in) 190 * flags: rflags (in/out) 191 * ex: rsi (in:fastop pointer, out:zero if exception) 192 * 193 * Moreover, they are all exactly FASTOP_SIZE bytes long, so functions for 194 * different operand sizes can be reached by calculation, rather than a jump 195 * table (which would be bigger than the code). 196 * 197 * fastop functions are declared as taking a never-defined fastop parameter, 198 * so they can't be called from C directly. 199 */ 200 201 struct fastop; 202 203 struct opcode { 204 u64 flags : 56; 205 u64 intercept : 8; 206 union { 207 int (*execute)(struct x86_emulate_ctxt *ctxt); 208 const struct opcode *group; 209 const struct group_dual *gdual; 210 const struct gprefix *gprefix; 211 const struct escape *esc; 212 void (*fastop)(struct fastop *fake); 213 } u; 214 int (*check_perm)(struct x86_emulate_ctxt *ctxt); 215 }; 216 217 struct group_dual { 218 struct opcode mod012[8]; 219 struct opcode mod3[8]; 220 }; 221 222 struct gprefix { 223 struct opcode pfx_no; 224 struct opcode pfx_66; 225 struct opcode pfx_f2; 226 struct opcode pfx_f3; 227 }; 228 229 struct escape { 230 struct opcode op[8]; 231 struct opcode high[64]; 232 }; 233 234 /* EFLAGS bit definitions. */ 235 #define EFLG_ID (1<<21) 236 #define EFLG_VIP (1<<20) 237 #define EFLG_VIF (1<<19) 238 #define EFLG_AC (1<<18) 239 #define EFLG_VM (1<<17) 240 #define EFLG_RF (1<<16) 241 #define EFLG_IOPL (3<<12) 242 #define EFLG_NT (1<<14) 243 #define EFLG_OF (1<<11) 244 #define EFLG_DF (1<<10) 245 #define EFLG_IF (1<<9) 246 #define EFLG_TF (1<<8) 247 #define EFLG_SF (1<<7) 248 #define EFLG_ZF (1<<6) 249 #define EFLG_AF (1<<4) 250 #define EFLG_PF (1<<2) 251 #define EFLG_CF (1<<0) 252 253 #define EFLG_RESERVED_ZEROS_MASK 0xffc0802a 254 #define EFLG_RESERVED_ONE_MASK 2 255 256 static ulong reg_read(struct x86_emulate_ctxt *ctxt, unsigned nr) 257 { 258 if (!(ctxt->regs_valid & (1 << nr))) { 259 ctxt->regs_valid |= 1 << nr; 260 ctxt->_regs[nr] = ctxt->ops->read_gpr(ctxt, nr); 261 } 262 return ctxt->_regs[nr]; 263 } 264 265 static ulong *reg_write(struct x86_emulate_ctxt *ctxt, unsigned nr) 266 { 267 ctxt->regs_valid |= 1 << nr; 268 ctxt->regs_dirty |= 1 << nr; 269 return &ctxt->_regs[nr]; 270 } 271 272 static ulong *reg_rmw(struct x86_emulate_ctxt *ctxt, unsigned nr) 273 { 274 reg_read(ctxt, nr); 275 return reg_write(ctxt, nr); 276 } 277 278 static void writeback_registers(struct x86_emulate_ctxt *ctxt) 279 { 280 unsigned reg; 281 282 for_each_set_bit(reg, (ulong *)&ctxt->regs_dirty, 16) 283 ctxt->ops->write_gpr(ctxt, reg, ctxt->_regs[reg]); 284 } 285 286 static void invalidate_registers(struct x86_emulate_ctxt *ctxt) 287 { 288 ctxt->regs_dirty = 0; 289 ctxt->regs_valid = 0; 290 } 291 292 /* 293 * These EFLAGS bits are restored from saved value during emulation, and 294 * any changes are written back to the saved value after emulation. 295 */ 296 #define EFLAGS_MASK (EFLG_OF|EFLG_SF|EFLG_ZF|EFLG_AF|EFLG_PF|EFLG_CF) 297 298 #ifdef CONFIG_X86_64 299 #define ON64(x) x 300 #else 301 #define ON64(x) 302 #endif 303 304 static int fastop(struct x86_emulate_ctxt *ctxt, void (*fop)(struct fastop *)); 305 306 #define FOP_ALIGN ".align " __stringify(FASTOP_SIZE) " \n\t" 307 #define FOP_RET "ret \n\t" 308 309 #define FOP_START(op) \ 310 extern void em_##op(struct fastop *fake); \ 311 asm(".pushsection .text, \"ax\" \n\t" \ 312 ".global em_" #op " \n\t" \ 313 FOP_ALIGN \ 314 "em_" #op ": \n\t" 315 316 #define FOP_END \ 317 ".popsection") 318 319 #define FOPNOP() FOP_ALIGN FOP_RET 320 321 #define FOP1E(op, dst) \ 322 FOP_ALIGN "10: " #op " %" #dst " \n\t" FOP_RET 323 324 #define FOP1EEX(op, dst) \ 325 FOP1E(op, dst) _ASM_EXTABLE(10b, kvm_fastop_exception) 326 327 #define FASTOP1(op) \ 328 FOP_START(op) \ 329 FOP1E(op##b, al) \ 330 FOP1E(op##w, ax) \ 331 FOP1E(op##l, eax) \ 332 ON64(FOP1E(op##q, rax)) \ 333 FOP_END 334 335 /* 1-operand, using src2 (for MUL/DIV r/m) */ 336 #define FASTOP1SRC2(op, name) \ 337 FOP_START(name) \ 338 FOP1E(op, cl) \ 339 FOP1E(op, cx) \ 340 FOP1E(op, ecx) \ 341 ON64(FOP1E(op, rcx)) \ 342 FOP_END 343 344 /* 1-operand, using src2 (for MUL/DIV r/m), with exceptions */ 345 #define FASTOP1SRC2EX(op, name) \ 346 FOP_START(name) \ 347 FOP1EEX(op, cl) \ 348 FOP1EEX(op, cx) \ 349 FOP1EEX(op, ecx) \ 350 ON64(FOP1EEX(op, rcx)) \ 351 FOP_END 352 353 #define FOP2E(op, dst, src) \ 354 FOP_ALIGN #op " %" #src ", %" #dst " \n\t" FOP_RET 355 356 #define FASTOP2(op) \ 357 FOP_START(op) \ 358 FOP2E(op##b, al, dl) \ 359 FOP2E(op##w, ax, dx) \ 360 FOP2E(op##l, eax, edx) \ 361 ON64(FOP2E(op##q, rax, rdx)) \ 362 FOP_END 363 364 /* 2 operand, word only */ 365 #define FASTOP2W(op) \ 366 FOP_START(op) \ 367 FOPNOP() \ 368 FOP2E(op##w, ax, dx) \ 369 FOP2E(op##l, eax, edx) \ 370 ON64(FOP2E(op##q, rax, rdx)) \ 371 FOP_END 372 373 /* 2 operand, src is CL */ 374 #define FASTOP2CL(op) \ 375 FOP_START(op) \ 376 FOP2E(op##b, al, cl) \ 377 FOP2E(op##w, ax, cl) \ 378 FOP2E(op##l, eax, cl) \ 379 ON64(FOP2E(op##q, rax, cl)) \ 380 FOP_END 381 382 #define FOP3E(op, dst, src, src2) \ 383 FOP_ALIGN #op " %" #src2 ", %" #src ", %" #dst " \n\t" FOP_RET 384 385 /* 3-operand, word-only, src2=cl */ 386 #define FASTOP3WCL(op) \ 387 FOP_START(op) \ 388 FOPNOP() \ 389 FOP3E(op##w, ax, dx, cl) \ 390 FOP3E(op##l, eax, edx, cl) \ 391 ON64(FOP3E(op##q, rax, rdx, cl)) \ 392 FOP_END 393 394 /* Special case for SETcc - 1 instruction per cc */ 395 #define FOP_SETCC(op) ".align 4; " #op " %al; ret \n\t" 396 397 asm(".global kvm_fastop_exception \n" 398 "kvm_fastop_exception: xor %esi, %esi; ret"); 399 400 FOP_START(setcc) 401 FOP_SETCC(seto) 402 FOP_SETCC(setno) 403 FOP_SETCC(setc) 404 FOP_SETCC(setnc) 405 FOP_SETCC(setz) 406 FOP_SETCC(setnz) 407 FOP_SETCC(setbe) 408 FOP_SETCC(setnbe) 409 FOP_SETCC(sets) 410 FOP_SETCC(setns) 411 FOP_SETCC(setp) 412 FOP_SETCC(setnp) 413 FOP_SETCC(setl) 414 FOP_SETCC(setnl) 415 FOP_SETCC(setle) 416 FOP_SETCC(setnle) 417 FOP_END; 418 419 FOP_START(salc) "pushf; sbb %al, %al; popf \n\t" FOP_RET 420 FOP_END; 421 422 static int emulator_check_intercept(struct x86_emulate_ctxt *ctxt, 423 enum x86_intercept intercept, 424 enum x86_intercept_stage stage) 425 { 426 struct x86_instruction_info info = { 427 .intercept = intercept, 428 .rep_prefix = ctxt->rep_prefix, 429 .modrm_mod = ctxt->modrm_mod, 430 .modrm_reg = ctxt->modrm_reg, 431 .modrm_rm = ctxt->modrm_rm, 432 .src_val = ctxt->src.val64, 433 .dst_val = ctxt->dst.val64, 434 .src_bytes = ctxt->src.bytes, 435 .dst_bytes = ctxt->dst.bytes, 436 .ad_bytes = ctxt->ad_bytes, 437 .next_rip = ctxt->eip, 438 }; 439 440 return ctxt->ops->intercept(ctxt, &info, stage); 441 } 442 443 static void assign_masked(ulong *dest, ulong src, ulong mask) 444 { 445 *dest = (*dest & ~mask) | (src & mask); 446 } 447 448 static inline unsigned long ad_mask(struct x86_emulate_ctxt *ctxt) 449 { 450 return (1UL << (ctxt->ad_bytes << 3)) - 1; 451 } 452 453 static ulong stack_mask(struct x86_emulate_ctxt *ctxt) 454 { 455 u16 sel; 456 struct desc_struct ss; 457 458 if (ctxt->mode == X86EMUL_MODE_PROT64) 459 return ~0UL; 460 ctxt->ops->get_segment(ctxt, &sel, &ss, NULL, VCPU_SREG_SS); 461 return ~0U >> ((ss.d ^ 1) * 16); /* d=0: 0xffff; d=1: 0xffffffff */ 462 } 463 464 static int stack_size(struct x86_emulate_ctxt *ctxt) 465 { 466 return (__fls(stack_mask(ctxt)) + 1) >> 3; 467 } 468 469 /* Access/update address held in a register, based on addressing mode. */ 470 static inline unsigned long 471 address_mask(struct x86_emulate_ctxt *ctxt, unsigned long reg) 472 { 473 if (ctxt->ad_bytes == sizeof(unsigned long)) 474 return reg; 475 else 476 return reg & ad_mask(ctxt); 477 } 478 479 static inline unsigned long 480 register_address(struct x86_emulate_ctxt *ctxt, unsigned long reg) 481 { 482 return address_mask(ctxt, reg); 483 } 484 485 static void masked_increment(ulong *reg, ulong mask, int inc) 486 { 487 assign_masked(reg, *reg + inc, mask); 488 } 489 490 static inline void 491 register_address_increment(struct x86_emulate_ctxt *ctxt, unsigned long *reg, int inc) 492 { 493 ulong mask; 494 495 if (ctxt->ad_bytes == sizeof(unsigned long)) 496 mask = ~0UL; 497 else 498 mask = ad_mask(ctxt); 499 masked_increment(reg, mask, inc); 500 } 501 502 static void rsp_increment(struct x86_emulate_ctxt *ctxt, int inc) 503 { 504 masked_increment(reg_rmw(ctxt, VCPU_REGS_RSP), stack_mask(ctxt), inc); 505 } 506 507 static inline void jmp_rel(struct x86_emulate_ctxt *ctxt, int rel) 508 { 509 register_address_increment(ctxt, &ctxt->_eip, rel); 510 } 511 512 static u32 desc_limit_scaled(struct desc_struct *desc) 513 { 514 u32 limit = get_desc_limit(desc); 515 516 return desc->g ? (limit << 12) | 0xfff : limit; 517 } 518 519 static unsigned long seg_base(struct x86_emulate_ctxt *ctxt, int seg) 520 { 521 if (ctxt->mode == X86EMUL_MODE_PROT64 && seg < VCPU_SREG_FS) 522 return 0; 523 524 return ctxt->ops->get_cached_segment_base(ctxt, seg); 525 } 526 527 static int emulate_exception(struct x86_emulate_ctxt *ctxt, int vec, 528 u32 error, bool valid) 529 { 530 ctxt->exception.vector = vec; 531 ctxt->exception.error_code = error; 532 ctxt->exception.error_code_valid = valid; 533 return X86EMUL_PROPAGATE_FAULT; 534 } 535 536 static int emulate_db(struct x86_emulate_ctxt *ctxt) 537 { 538 return emulate_exception(ctxt, DB_VECTOR, 0, false); 539 } 540 541 static int emulate_gp(struct x86_emulate_ctxt *ctxt, int err) 542 { 543 return emulate_exception(ctxt, GP_VECTOR, err, true); 544 } 545 546 static int emulate_ss(struct x86_emulate_ctxt *ctxt, int err) 547 { 548 return emulate_exception(ctxt, SS_VECTOR, err, true); 549 } 550 551 static int emulate_ud(struct x86_emulate_ctxt *ctxt) 552 { 553 return emulate_exception(ctxt, UD_VECTOR, 0, false); 554 } 555 556 static int emulate_ts(struct x86_emulate_ctxt *ctxt, int err) 557 { 558 return emulate_exception(ctxt, TS_VECTOR, err, true); 559 } 560 561 static int emulate_de(struct x86_emulate_ctxt *ctxt) 562 { 563 return emulate_exception(ctxt, DE_VECTOR, 0, false); 564 } 565 566 static int emulate_nm(struct x86_emulate_ctxt *ctxt) 567 { 568 return emulate_exception(ctxt, NM_VECTOR, 0, false); 569 } 570 571 static u16 get_segment_selector(struct x86_emulate_ctxt *ctxt, unsigned seg) 572 { 573 u16 selector; 574 struct desc_struct desc; 575 576 ctxt->ops->get_segment(ctxt, &selector, &desc, NULL, seg); 577 return selector; 578 } 579 580 static void set_segment_selector(struct x86_emulate_ctxt *ctxt, u16 selector, 581 unsigned seg) 582 { 583 u16 dummy; 584 u32 base3; 585 struct desc_struct desc; 586 587 ctxt->ops->get_segment(ctxt, &dummy, &desc, &base3, seg); 588 ctxt->ops->set_segment(ctxt, selector, &desc, base3, seg); 589 } 590 591 /* 592 * x86 defines three classes of vector instructions: explicitly 593 * aligned, explicitly unaligned, and the rest, which change behaviour 594 * depending on whether they're AVX encoded or not. 595 * 596 * Also included is CMPXCHG16B which is not a vector instruction, yet it is 597 * subject to the same check. 598 */ 599 static bool insn_aligned(struct x86_emulate_ctxt *ctxt, unsigned size) 600 { 601 if (likely(size < 16)) 602 return false; 603 604 if (ctxt->d & Aligned) 605 return true; 606 else if (ctxt->d & Unaligned) 607 return false; 608 else if (ctxt->d & Avx) 609 return false; 610 else 611 return true; 612 } 613 614 static int __linearize(struct x86_emulate_ctxt *ctxt, 615 struct segmented_address addr, 616 unsigned size, bool write, bool fetch, 617 ulong *linear) 618 { 619 struct desc_struct desc; 620 bool usable; 621 ulong la; 622 u32 lim; 623 u16 sel; 624 unsigned cpl; 625 626 la = seg_base(ctxt, addr.seg) + addr.ea; 627 switch (ctxt->mode) { 628 case X86EMUL_MODE_PROT64: 629 if (((signed long)la << 16) >> 16 != la) 630 return emulate_gp(ctxt, 0); 631 break; 632 default: 633 usable = ctxt->ops->get_segment(ctxt, &sel, &desc, NULL, 634 addr.seg); 635 if (!usable) 636 goto bad; 637 /* code segment in protected mode or read-only data segment */ 638 if ((((ctxt->mode != X86EMUL_MODE_REAL) && (desc.type & 8)) 639 || !(desc.type & 2)) && write) 640 goto bad; 641 /* unreadable code segment */ 642 if (!fetch && (desc.type & 8) && !(desc.type & 2)) 643 goto bad; 644 lim = desc_limit_scaled(&desc); 645 if ((ctxt->mode == X86EMUL_MODE_REAL) && !fetch && 646 (ctxt->d & NoBigReal)) { 647 /* la is between zero and 0xffff */ 648 if (la > 0xffff || (u32)(la + size - 1) > 0xffff) 649 goto bad; 650 } else if ((desc.type & 8) || !(desc.type & 4)) { 651 /* expand-up segment */ 652 if (addr.ea > lim || (u32)(addr.ea + size - 1) > lim) 653 goto bad; 654 } else { 655 /* expand-down segment */ 656 if (addr.ea <= lim || (u32)(addr.ea + size - 1) <= lim) 657 goto bad; 658 lim = desc.d ? 0xffffffff : 0xffff; 659 if (addr.ea > lim || (u32)(addr.ea + size - 1) > lim) 660 goto bad; 661 } 662 cpl = ctxt->ops->cpl(ctxt); 663 if (!(desc.type & 8)) { 664 /* data segment */ 665 if (cpl > desc.dpl) 666 goto bad; 667 } else if ((desc.type & 8) && !(desc.type & 4)) { 668 /* nonconforming code segment */ 669 if (cpl != desc.dpl) 670 goto bad; 671 } else if ((desc.type & 8) && (desc.type & 4)) { 672 /* conforming code segment */ 673 if (cpl < desc.dpl) 674 goto bad; 675 } 676 break; 677 } 678 if (fetch ? ctxt->mode != X86EMUL_MODE_PROT64 : ctxt->ad_bytes != 8) 679 la &= (u32)-1; 680 if (insn_aligned(ctxt, size) && ((la & (size - 1)) != 0)) 681 return emulate_gp(ctxt, 0); 682 *linear = la; 683 return X86EMUL_CONTINUE; 684 bad: 685 if (addr.seg == VCPU_SREG_SS) 686 return emulate_ss(ctxt, sel); 687 else 688 return emulate_gp(ctxt, sel); 689 } 690 691 static int linearize(struct x86_emulate_ctxt *ctxt, 692 struct segmented_address addr, 693 unsigned size, bool write, 694 ulong *linear) 695 { 696 return __linearize(ctxt, addr, size, write, false, linear); 697 } 698 699 700 static int segmented_read_std(struct x86_emulate_ctxt *ctxt, 701 struct segmented_address addr, 702 void *data, 703 unsigned size) 704 { 705 int rc; 706 ulong linear; 707 708 rc = linearize(ctxt, addr, size, false, &linear); 709 if (rc != X86EMUL_CONTINUE) 710 return rc; 711 return ctxt->ops->read_std(ctxt, linear, data, size, &ctxt->exception); 712 } 713 714 /* 715 * Prefetch the remaining bytes of the instruction without crossing page 716 * boundary if they are not in fetch_cache yet. 717 */ 718 static int __do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt, int op_size) 719 { 720 int rc; 721 unsigned size; 722 unsigned long linear; 723 int cur_size = ctxt->fetch.end - ctxt->fetch.data; 724 struct segmented_address addr = { .seg = VCPU_SREG_CS, 725 .ea = ctxt->eip + cur_size }; 726 727 size = 15UL ^ cur_size; 728 rc = __linearize(ctxt, addr, size, false, true, &linear); 729 if (unlikely(rc != X86EMUL_CONTINUE)) 730 return rc; 731 732 size = min_t(unsigned, size, PAGE_SIZE - offset_in_page(linear)); 733 734 /* 735 * One instruction can only straddle two pages, 736 * and one has been loaded at the beginning of 737 * x86_decode_insn. So, if not enough bytes 738 * still, we must have hit the 15-byte boundary. 739 */ 740 if (unlikely(size < op_size)) 741 return X86EMUL_UNHANDLEABLE; 742 rc = ctxt->ops->fetch(ctxt, linear, ctxt->fetch.end, 743 size, &ctxt->exception); 744 if (unlikely(rc != X86EMUL_CONTINUE)) 745 return rc; 746 ctxt->fetch.end += size; 747 return X86EMUL_CONTINUE; 748 } 749 750 static __always_inline int do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt, 751 unsigned size) 752 { 753 if (unlikely(ctxt->fetch.end - ctxt->fetch.ptr < size)) 754 return __do_insn_fetch_bytes(ctxt, size); 755 else 756 return X86EMUL_CONTINUE; 757 } 758 759 /* Fetch next part of the instruction being emulated. */ 760 #define insn_fetch(_type, _ctxt) \ 761 ({ _type _x; \ 762 \ 763 rc = do_insn_fetch_bytes(_ctxt, sizeof(_type)); \ 764 if (rc != X86EMUL_CONTINUE) \ 765 goto done; \ 766 ctxt->_eip += sizeof(_type); \ 767 _x = *(_type __aligned(1) *) ctxt->fetch.ptr; \ 768 ctxt->fetch.ptr += sizeof(_type); \ 769 _x; \ 770 }) 771 772 #define insn_fetch_arr(_arr, _size, _ctxt) \ 773 ({ \ 774 rc = do_insn_fetch_bytes(_ctxt, _size); \ 775 if (rc != X86EMUL_CONTINUE) \ 776 goto done; \ 777 ctxt->_eip += (_size); \ 778 memcpy(_arr, ctxt->fetch.ptr, _size); \ 779 ctxt->fetch.ptr += (_size); \ 780 }) 781 782 /* 783 * Given the 'reg' portion of a ModRM byte, and a register block, return a 784 * pointer into the block that addresses the relevant register. 785 * @highbyte_regs specifies whether to decode AH,CH,DH,BH. 786 */ 787 static void *decode_register(struct x86_emulate_ctxt *ctxt, u8 modrm_reg, 788 int byteop) 789 { 790 void *p; 791 int highbyte_regs = (ctxt->rex_prefix == 0) && byteop; 792 793 if (highbyte_regs && modrm_reg >= 4 && modrm_reg < 8) 794 p = (unsigned char *)reg_rmw(ctxt, modrm_reg & 3) + 1; 795 else 796 p = reg_rmw(ctxt, modrm_reg); 797 return p; 798 } 799 800 static int read_descriptor(struct x86_emulate_ctxt *ctxt, 801 struct segmented_address addr, 802 u16 *size, unsigned long *address, int op_bytes) 803 { 804 int rc; 805 806 if (op_bytes == 2) 807 op_bytes = 3; 808 *address = 0; 809 rc = segmented_read_std(ctxt, addr, size, 2); 810 if (rc != X86EMUL_CONTINUE) 811 return rc; 812 addr.ea += 2; 813 rc = segmented_read_std(ctxt, addr, address, op_bytes); 814 return rc; 815 } 816 817 FASTOP2(add); 818 FASTOP2(or); 819 FASTOP2(adc); 820 FASTOP2(sbb); 821 FASTOP2(and); 822 FASTOP2(sub); 823 FASTOP2(xor); 824 FASTOP2(cmp); 825 FASTOP2(test); 826 827 FASTOP1SRC2(mul, mul_ex); 828 FASTOP1SRC2(imul, imul_ex); 829 FASTOP1SRC2EX(div, div_ex); 830 FASTOP1SRC2EX(idiv, idiv_ex); 831 832 FASTOP3WCL(shld); 833 FASTOP3WCL(shrd); 834 835 FASTOP2W(imul); 836 837 FASTOP1(not); 838 FASTOP1(neg); 839 FASTOP1(inc); 840 FASTOP1(dec); 841 842 FASTOP2CL(rol); 843 FASTOP2CL(ror); 844 FASTOP2CL(rcl); 845 FASTOP2CL(rcr); 846 FASTOP2CL(shl); 847 FASTOP2CL(shr); 848 FASTOP2CL(sar); 849 850 FASTOP2W(bsf); 851 FASTOP2W(bsr); 852 FASTOP2W(bt); 853 FASTOP2W(bts); 854 FASTOP2W(btr); 855 FASTOP2W(btc); 856 857 FASTOP2(xadd); 858 859 static u8 test_cc(unsigned int condition, unsigned long flags) 860 { 861 u8 rc; 862 void (*fop)(void) = (void *)em_setcc + 4 * (condition & 0xf); 863 864 flags = (flags & EFLAGS_MASK) | X86_EFLAGS_IF; 865 asm("push %[flags]; popf; call *%[fastop]" 866 : "=a"(rc) : [fastop]"r"(fop), [flags]"r"(flags)); 867 return rc; 868 } 869 870 static void fetch_register_operand(struct operand *op) 871 { 872 switch (op->bytes) { 873 case 1: 874 op->val = *(u8 *)op->addr.reg; 875 break; 876 case 2: 877 op->val = *(u16 *)op->addr.reg; 878 break; 879 case 4: 880 op->val = *(u32 *)op->addr.reg; 881 break; 882 case 8: 883 op->val = *(u64 *)op->addr.reg; 884 break; 885 } 886 } 887 888 static void read_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data, int reg) 889 { 890 ctxt->ops->get_fpu(ctxt); 891 switch (reg) { 892 case 0: asm("movdqa %%xmm0, %0" : "=m"(*data)); break; 893 case 1: asm("movdqa %%xmm1, %0" : "=m"(*data)); break; 894 case 2: asm("movdqa %%xmm2, %0" : "=m"(*data)); break; 895 case 3: asm("movdqa %%xmm3, %0" : "=m"(*data)); break; 896 case 4: asm("movdqa %%xmm4, %0" : "=m"(*data)); break; 897 case 5: asm("movdqa %%xmm5, %0" : "=m"(*data)); break; 898 case 6: asm("movdqa %%xmm6, %0" : "=m"(*data)); break; 899 case 7: asm("movdqa %%xmm7, %0" : "=m"(*data)); break; 900 #ifdef CONFIG_X86_64 901 case 8: asm("movdqa %%xmm8, %0" : "=m"(*data)); break; 902 case 9: asm("movdqa %%xmm9, %0" : "=m"(*data)); break; 903 case 10: asm("movdqa %%xmm10, %0" : "=m"(*data)); break; 904 case 11: asm("movdqa %%xmm11, %0" : "=m"(*data)); break; 905 case 12: asm("movdqa %%xmm12, %0" : "=m"(*data)); break; 906 case 13: asm("movdqa %%xmm13, %0" : "=m"(*data)); break; 907 case 14: asm("movdqa %%xmm14, %0" : "=m"(*data)); break; 908 case 15: asm("movdqa %%xmm15, %0" : "=m"(*data)); break; 909 #endif 910 default: BUG(); 911 } 912 ctxt->ops->put_fpu(ctxt); 913 } 914 915 static void write_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data, 916 int reg) 917 { 918 ctxt->ops->get_fpu(ctxt); 919 switch (reg) { 920 case 0: asm("movdqa %0, %%xmm0" : : "m"(*data)); break; 921 case 1: asm("movdqa %0, %%xmm1" : : "m"(*data)); break; 922 case 2: asm("movdqa %0, %%xmm2" : : "m"(*data)); break; 923 case 3: asm("movdqa %0, %%xmm3" : : "m"(*data)); break; 924 case 4: asm("movdqa %0, %%xmm4" : : "m"(*data)); break; 925 case 5: asm("movdqa %0, %%xmm5" : : "m"(*data)); break; 926 case 6: asm("movdqa %0, %%xmm6" : : "m"(*data)); break; 927 case 7: asm("movdqa %0, %%xmm7" : : "m"(*data)); break; 928 #ifdef CONFIG_X86_64 929 case 8: asm("movdqa %0, %%xmm8" : : "m"(*data)); break; 930 case 9: asm("movdqa %0, %%xmm9" : : "m"(*data)); break; 931 case 10: asm("movdqa %0, %%xmm10" : : "m"(*data)); break; 932 case 11: asm("movdqa %0, %%xmm11" : : "m"(*data)); break; 933 case 12: asm("movdqa %0, %%xmm12" : : "m"(*data)); break; 934 case 13: asm("movdqa %0, %%xmm13" : : "m"(*data)); break; 935 case 14: asm("movdqa %0, %%xmm14" : : "m"(*data)); break; 936 case 15: asm("movdqa %0, %%xmm15" : : "m"(*data)); break; 937 #endif 938 default: BUG(); 939 } 940 ctxt->ops->put_fpu(ctxt); 941 } 942 943 static void read_mmx_reg(struct x86_emulate_ctxt *ctxt, u64 *data, int reg) 944 { 945 ctxt->ops->get_fpu(ctxt); 946 switch (reg) { 947 case 0: asm("movq %%mm0, %0" : "=m"(*data)); break; 948 case 1: asm("movq %%mm1, %0" : "=m"(*data)); break; 949 case 2: asm("movq %%mm2, %0" : "=m"(*data)); break; 950 case 3: asm("movq %%mm3, %0" : "=m"(*data)); break; 951 case 4: asm("movq %%mm4, %0" : "=m"(*data)); break; 952 case 5: asm("movq %%mm5, %0" : "=m"(*data)); break; 953 case 6: asm("movq %%mm6, %0" : "=m"(*data)); break; 954 case 7: asm("movq %%mm7, %0" : "=m"(*data)); break; 955 default: BUG(); 956 } 957 ctxt->ops->put_fpu(ctxt); 958 } 959 960 static void write_mmx_reg(struct x86_emulate_ctxt *ctxt, u64 *data, int reg) 961 { 962 ctxt->ops->get_fpu(ctxt); 963 switch (reg) { 964 case 0: asm("movq %0, %%mm0" : : "m"(*data)); break; 965 case 1: asm("movq %0, %%mm1" : : "m"(*data)); break; 966 case 2: asm("movq %0, %%mm2" : : "m"(*data)); break; 967 case 3: asm("movq %0, %%mm3" : : "m"(*data)); break; 968 case 4: asm("movq %0, %%mm4" : : "m"(*data)); break; 969 case 5: asm("movq %0, %%mm5" : : "m"(*data)); break; 970 case 6: asm("movq %0, %%mm6" : : "m"(*data)); break; 971 case 7: asm("movq %0, %%mm7" : : "m"(*data)); break; 972 default: BUG(); 973 } 974 ctxt->ops->put_fpu(ctxt); 975 } 976 977 static int em_fninit(struct x86_emulate_ctxt *ctxt) 978 { 979 if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM)) 980 return emulate_nm(ctxt); 981 982 ctxt->ops->get_fpu(ctxt); 983 asm volatile("fninit"); 984 ctxt->ops->put_fpu(ctxt); 985 return X86EMUL_CONTINUE; 986 } 987 988 static int em_fnstcw(struct x86_emulate_ctxt *ctxt) 989 { 990 u16 fcw; 991 992 if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM)) 993 return emulate_nm(ctxt); 994 995 ctxt->ops->get_fpu(ctxt); 996 asm volatile("fnstcw %0": "+m"(fcw)); 997 ctxt->ops->put_fpu(ctxt); 998 999 /* force 2 byte destination */ 1000 ctxt->dst.bytes = 2; 1001 ctxt->dst.val = fcw; 1002 1003 return X86EMUL_CONTINUE; 1004 } 1005 1006 static int em_fnstsw(struct x86_emulate_ctxt *ctxt) 1007 { 1008 u16 fsw; 1009 1010 if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM)) 1011 return emulate_nm(ctxt); 1012 1013 ctxt->ops->get_fpu(ctxt); 1014 asm volatile("fnstsw %0": "+m"(fsw)); 1015 ctxt->ops->put_fpu(ctxt); 1016 1017 /* force 2 byte destination */ 1018 ctxt->dst.bytes = 2; 1019 ctxt->dst.val = fsw; 1020 1021 return X86EMUL_CONTINUE; 1022 } 1023 1024 static void decode_register_operand(struct x86_emulate_ctxt *ctxt, 1025 struct operand *op) 1026 { 1027 unsigned reg = ctxt->modrm_reg; 1028 1029 if (!(ctxt->d & ModRM)) 1030 reg = (ctxt->b & 7) | ((ctxt->rex_prefix & 1) << 3); 1031 1032 if (ctxt->d & Sse) { 1033 op->type = OP_XMM; 1034 op->bytes = 16; 1035 op->addr.xmm = reg; 1036 read_sse_reg(ctxt, &op->vec_val, reg); 1037 return; 1038 } 1039 if (ctxt->d & Mmx) { 1040 reg &= 7; 1041 op->type = OP_MM; 1042 op->bytes = 8; 1043 op->addr.mm = reg; 1044 return; 1045 } 1046 1047 op->type = OP_REG; 1048 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes; 1049 op->addr.reg = decode_register(ctxt, reg, ctxt->d & ByteOp); 1050 1051 fetch_register_operand(op); 1052 op->orig_val = op->val; 1053 } 1054 1055 static void adjust_modrm_seg(struct x86_emulate_ctxt *ctxt, int base_reg) 1056 { 1057 if (base_reg == VCPU_REGS_RSP || base_reg == VCPU_REGS_RBP) 1058 ctxt->modrm_seg = VCPU_SREG_SS; 1059 } 1060 1061 static int decode_modrm(struct x86_emulate_ctxt *ctxt, 1062 struct operand *op) 1063 { 1064 u8 sib; 1065 int index_reg, base_reg, scale; 1066 int rc = X86EMUL_CONTINUE; 1067 ulong modrm_ea = 0; 1068 1069 ctxt->modrm_reg = ((ctxt->rex_prefix << 1) & 8); /* REX.R */ 1070 index_reg = (ctxt->rex_prefix << 2) & 8; /* REX.X */ 1071 base_reg = (ctxt->rex_prefix << 3) & 8; /* REX.B */ 1072 1073 ctxt->modrm_mod = (ctxt->modrm & 0xc0) >> 6; 1074 ctxt->modrm_reg |= (ctxt->modrm & 0x38) >> 3; 1075 ctxt->modrm_rm = base_reg | (ctxt->modrm & 0x07); 1076 ctxt->modrm_seg = VCPU_SREG_DS; 1077 1078 if (ctxt->modrm_mod == 3 || (ctxt->d & NoMod)) { 1079 op->type = OP_REG; 1080 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes; 1081 op->addr.reg = decode_register(ctxt, ctxt->modrm_rm, 1082 ctxt->d & ByteOp); 1083 if (ctxt->d & Sse) { 1084 op->type = OP_XMM; 1085 op->bytes = 16; 1086 op->addr.xmm = ctxt->modrm_rm; 1087 read_sse_reg(ctxt, &op->vec_val, ctxt->modrm_rm); 1088 return rc; 1089 } 1090 if (ctxt->d & Mmx) { 1091 op->type = OP_MM; 1092 op->bytes = 8; 1093 op->addr.mm = ctxt->modrm_rm & 7; 1094 return rc; 1095 } 1096 fetch_register_operand(op); 1097 return rc; 1098 } 1099 1100 op->type = OP_MEM; 1101 1102 if (ctxt->ad_bytes == 2) { 1103 unsigned bx = reg_read(ctxt, VCPU_REGS_RBX); 1104 unsigned bp = reg_read(ctxt, VCPU_REGS_RBP); 1105 unsigned si = reg_read(ctxt, VCPU_REGS_RSI); 1106 unsigned di = reg_read(ctxt, VCPU_REGS_RDI); 1107 1108 /* 16-bit ModR/M decode. */ 1109 switch (ctxt->modrm_mod) { 1110 case 0: 1111 if (ctxt->modrm_rm == 6) 1112 modrm_ea += insn_fetch(u16, ctxt); 1113 break; 1114 case 1: 1115 modrm_ea += insn_fetch(s8, ctxt); 1116 break; 1117 case 2: 1118 modrm_ea += insn_fetch(u16, ctxt); 1119 break; 1120 } 1121 switch (ctxt->modrm_rm) { 1122 case 0: 1123 modrm_ea += bx + si; 1124 break; 1125 case 1: 1126 modrm_ea += bx + di; 1127 break; 1128 case 2: 1129 modrm_ea += bp + si; 1130 break; 1131 case 3: 1132 modrm_ea += bp + di; 1133 break; 1134 case 4: 1135 modrm_ea += si; 1136 break; 1137 case 5: 1138 modrm_ea += di; 1139 break; 1140 case 6: 1141 if (ctxt->modrm_mod != 0) 1142 modrm_ea += bp; 1143 break; 1144 case 7: 1145 modrm_ea += bx; 1146 break; 1147 } 1148 if (ctxt->modrm_rm == 2 || ctxt->modrm_rm == 3 || 1149 (ctxt->modrm_rm == 6 && ctxt->modrm_mod != 0)) 1150 ctxt->modrm_seg = VCPU_SREG_SS; 1151 modrm_ea = (u16)modrm_ea; 1152 } else { 1153 /* 32/64-bit ModR/M decode. */ 1154 if ((ctxt->modrm_rm & 7) == 4) { 1155 sib = insn_fetch(u8, ctxt); 1156 index_reg |= (sib >> 3) & 7; 1157 base_reg |= sib & 7; 1158 scale = sib >> 6; 1159 1160 if ((base_reg & 7) == 5 && ctxt->modrm_mod == 0) 1161 modrm_ea += insn_fetch(s32, ctxt); 1162 else { 1163 modrm_ea += reg_read(ctxt, base_reg); 1164 adjust_modrm_seg(ctxt, base_reg); 1165 } 1166 if (index_reg != 4) 1167 modrm_ea += reg_read(ctxt, index_reg) << scale; 1168 } else if ((ctxt->modrm_rm & 7) == 5 && ctxt->modrm_mod == 0) { 1169 if (ctxt->mode == X86EMUL_MODE_PROT64) 1170 ctxt->rip_relative = 1; 1171 } else { 1172 base_reg = ctxt->modrm_rm; 1173 modrm_ea += reg_read(ctxt, base_reg); 1174 adjust_modrm_seg(ctxt, base_reg); 1175 } 1176 switch (ctxt->modrm_mod) { 1177 case 0: 1178 if (ctxt->modrm_rm == 5) 1179 modrm_ea += insn_fetch(s32, ctxt); 1180 break; 1181 case 1: 1182 modrm_ea += insn_fetch(s8, ctxt); 1183 break; 1184 case 2: 1185 modrm_ea += insn_fetch(s32, ctxt); 1186 break; 1187 } 1188 } 1189 op->addr.mem.ea = modrm_ea; 1190 if (ctxt->ad_bytes != 8) 1191 ctxt->memop.addr.mem.ea = (u32)ctxt->memop.addr.mem.ea; 1192 1193 done: 1194 return rc; 1195 } 1196 1197 static int decode_abs(struct x86_emulate_ctxt *ctxt, 1198 struct operand *op) 1199 { 1200 int rc = X86EMUL_CONTINUE; 1201 1202 op->type = OP_MEM; 1203 switch (ctxt->ad_bytes) { 1204 case 2: 1205 op->addr.mem.ea = insn_fetch(u16, ctxt); 1206 break; 1207 case 4: 1208 op->addr.mem.ea = insn_fetch(u32, ctxt); 1209 break; 1210 case 8: 1211 op->addr.mem.ea = insn_fetch(u64, ctxt); 1212 break; 1213 } 1214 done: 1215 return rc; 1216 } 1217 1218 static void fetch_bit_operand(struct x86_emulate_ctxt *ctxt) 1219 { 1220 long sv = 0, mask; 1221 1222 if (ctxt->dst.type == OP_MEM && ctxt->src.type == OP_REG) { 1223 mask = ~((long)ctxt->dst.bytes * 8 - 1); 1224 1225 if (ctxt->src.bytes == 2) 1226 sv = (s16)ctxt->src.val & (s16)mask; 1227 else if (ctxt->src.bytes == 4) 1228 sv = (s32)ctxt->src.val & (s32)mask; 1229 else 1230 sv = (s64)ctxt->src.val & (s64)mask; 1231 1232 ctxt->dst.addr.mem.ea += (sv >> 3); 1233 } 1234 1235 /* only subword offset */ 1236 ctxt->src.val &= (ctxt->dst.bytes << 3) - 1; 1237 } 1238 1239 static int read_emulated(struct x86_emulate_ctxt *ctxt, 1240 unsigned long addr, void *dest, unsigned size) 1241 { 1242 int rc; 1243 struct read_cache *mc = &ctxt->mem_read; 1244 1245 if (mc->pos < mc->end) 1246 goto read_cached; 1247 1248 WARN_ON((mc->end + size) >= sizeof(mc->data)); 1249 1250 rc = ctxt->ops->read_emulated(ctxt, addr, mc->data + mc->end, size, 1251 &ctxt->exception); 1252 if (rc != X86EMUL_CONTINUE) 1253 return rc; 1254 1255 mc->end += size; 1256 1257 read_cached: 1258 memcpy(dest, mc->data + mc->pos, size); 1259 mc->pos += size; 1260 return X86EMUL_CONTINUE; 1261 } 1262 1263 static int segmented_read(struct x86_emulate_ctxt *ctxt, 1264 struct segmented_address addr, 1265 void *data, 1266 unsigned size) 1267 { 1268 int rc; 1269 ulong linear; 1270 1271 rc = linearize(ctxt, addr, size, false, &linear); 1272 if (rc != X86EMUL_CONTINUE) 1273 return rc; 1274 return read_emulated(ctxt, linear, data, size); 1275 } 1276 1277 static int segmented_write(struct x86_emulate_ctxt *ctxt, 1278 struct segmented_address addr, 1279 const void *data, 1280 unsigned size) 1281 { 1282 int rc; 1283 ulong linear; 1284 1285 rc = linearize(ctxt, addr, size, true, &linear); 1286 if (rc != X86EMUL_CONTINUE) 1287 return rc; 1288 return ctxt->ops->write_emulated(ctxt, linear, data, size, 1289 &ctxt->exception); 1290 } 1291 1292 static int segmented_cmpxchg(struct x86_emulate_ctxt *ctxt, 1293 struct segmented_address addr, 1294 const void *orig_data, const void *data, 1295 unsigned size) 1296 { 1297 int rc; 1298 ulong linear; 1299 1300 rc = linearize(ctxt, addr, size, true, &linear); 1301 if (rc != X86EMUL_CONTINUE) 1302 return rc; 1303 return ctxt->ops->cmpxchg_emulated(ctxt, linear, orig_data, data, 1304 size, &ctxt->exception); 1305 } 1306 1307 static int pio_in_emulated(struct x86_emulate_ctxt *ctxt, 1308 unsigned int size, unsigned short port, 1309 void *dest) 1310 { 1311 struct read_cache *rc = &ctxt->io_read; 1312 1313 if (rc->pos == rc->end) { /* refill pio read ahead */ 1314 unsigned int in_page, n; 1315 unsigned int count = ctxt->rep_prefix ? 1316 address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) : 1; 1317 in_page = (ctxt->eflags & EFLG_DF) ? 1318 offset_in_page(reg_read(ctxt, VCPU_REGS_RDI)) : 1319 PAGE_SIZE - offset_in_page(reg_read(ctxt, VCPU_REGS_RDI)); 1320 n = min3(in_page, (unsigned int)sizeof(rc->data) / size, count); 1321 if (n == 0) 1322 n = 1; 1323 rc->pos = rc->end = 0; 1324 if (!ctxt->ops->pio_in_emulated(ctxt, size, port, rc->data, n)) 1325 return 0; 1326 rc->end = n * size; 1327 } 1328 1329 if (ctxt->rep_prefix && (ctxt->d & String) && 1330 !(ctxt->eflags & EFLG_DF)) { 1331 ctxt->dst.data = rc->data + rc->pos; 1332 ctxt->dst.type = OP_MEM_STR; 1333 ctxt->dst.count = (rc->end - rc->pos) / size; 1334 rc->pos = rc->end; 1335 } else { 1336 memcpy(dest, rc->data + rc->pos, size); 1337 rc->pos += size; 1338 } 1339 return 1; 1340 } 1341 1342 static int read_interrupt_descriptor(struct x86_emulate_ctxt *ctxt, 1343 u16 index, struct desc_struct *desc) 1344 { 1345 struct desc_ptr dt; 1346 ulong addr; 1347 1348 ctxt->ops->get_idt(ctxt, &dt); 1349 1350 if (dt.size < index * 8 + 7) 1351 return emulate_gp(ctxt, index << 3 | 0x2); 1352 1353 addr = dt.address + index * 8; 1354 return ctxt->ops->read_std(ctxt, addr, desc, sizeof *desc, 1355 &ctxt->exception); 1356 } 1357 1358 static void get_descriptor_table_ptr(struct x86_emulate_ctxt *ctxt, 1359 u16 selector, struct desc_ptr *dt) 1360 { 1361 const struct x86_emulate_ops *ops = ctxt->ops; 1362 u32 base3 = 0; 1363 1364 if (selector & 1 << 2) { 1365 struct desc_struct desc; 1366 u16 sel; 1367 1368 memset (dt, 0, sizeof *dt); 1369 if (!ops->get_segment(ctxt, &sel, &desc, &base3, 1370 VCPU_SREG_LDTR)) 1371 return; 1372 1373 dt->size = desc_limit_scaled(&desc); /* what if limit > 65535? */ 1374 dt->address = get_desc_base(&desc) | ((u64)base3 << 32); 1375 } else 1376 ops->get_gdt(ctxt, dt); 1377 } 1378 1379 /* allowed just for 8 bytes segments */ 1380 static int read_segment_descriptor(struct x86_emulate_ctxt *ctxt, 1381 u16 selector, struct desc_struct *desc, 1382 ulong *desc_addr_p) 1383 { 1384 struct desc_ptr dt; 1385 u16 index = selector >> 3; 1386 ulong addr; 1387 1388 get_descriptor_table_ptr(ctxt, selector, &dt); 1389 1390 if (dt.size < index * 8 + 7) 1391 return emulate_gp(ctxt, selector & 0xfffc); 1392 1393 *desc_addr_p = addr = dt.address + index * 8; 1394 return ctxt->ops->read_std(ctxt, addr, desc, sizeof *desc, 1395 &ctxt->exception); 1396 } 1397 1398 /* allowed just for 8 bytes segments */ 1399 static int write_segment_descriptor(struct x86_emulate_ctxt *ctxt, 1400 u16 selector, struct desc_struct *desc) 1401 { 1402 struct desc_ptr dt; 1403 u16 index = selector >> 3; 1404 ulong addr; 1405 1406 get_descriptor_table_ptr(ctxt, selector, &dt); 1407 1408 if (dt.size < index * 8 + 7) 1409 return emulate_gp(ctxt, selector & 0xfffc); 1410 1411 addr = dt.address + index * 8; 1412 return ctxt->ops->write_std(ctxt, addr, desc, sizeof *desc, 1413 &ctxt->exception); 1414 } 1415 1416 /* Does not support long mode */ 1417 static int __load_segment_descriptor(struct x86_emulate_ctxt *ctxt, 1418 u16 selector, int seg, u8 cpl, bool in_task_switch) 1419 { 1420 struct desc_struct seg_desc, old_desc; 1421 u8 dpl, rpl; 1422 unsigned err_vec = GP_VECTOR; 1423 u32 err_code = 0; 1424 bool null_selector = !(selector & ~0x3); /* 0000-0003 are null */ 1425 ulong desc_addr; 1426 int ret; 1427 u16 dummy; 1428 u32 base3 = 0; 1429 1430 memset(&seg_desc, 0, sizeof seg_desc); 1431 1432 if (ctxt->mode == X86EMUL_MODE_REAL) { 1433 /* set real mode segment descriptor (keep limit etc. for 1434 * unreal mode) */ 1435 ctxt->ops->get_segment(ctxt, &dummy, &seg_desc, NULL, seg); 1436 set_desc_base(&seg_desc, selector << 4); 1437 goto load; 1438 } else if (seg <= VCPU_SREG_GS && ctxt->mode == X86EMUL_MODE_VM86) { 1439 /* VM86 needs a clean new segment descriptor */ 1440 set_desc_base(&seg_desc, selector << 4); 1441 set_desc_limit(&seg_desc, 0xffff); 1442 seg_desc.type = 3; 1443 seg_desc.p = 1; 1444 seg_desc.s = 1; 1445 seg_desc.dpl = 3; 1446 goto load; 1447 } 1448 1449 rpl = selector & 3; 1450 1451 /* NULL selector is not valid for TR, CS and SS (except for long mode) */ 1452 if ((seg == VCPU_SREG_CS 1453 || (seg == VCPU_SREG_SS 1454 && (ctxt->mode != X86EMUL_MODE_PROT64 || rpl != cpl)) 1455 || seg == VCPU_SREG_TR) 1456 && null_selector) 1457 goto exception; 1458 1459 /* TR should be in GDT only */ 1460 if (seg == VCPU_SREG_TR && (selector & (1 << 2))) 1461 goto exception; 1462 1463 if (null_selector) /* for NULL selector skip all following checks */ 1464 goto load; 1465 1466 ret = read_segment_descriptor(ctxt, selector, &seg_desc, &desc_addr); 1467 if (ret != X86EMUL_CONTINUE) 1468 return ret; 1469 1470 err_code = selector & 0xfffc; 1471 err_vec = GP_VECTOR; 1472 1473 /* can't load system descriptor into segment selector */ 1474 if (seg <= VCPU_SREG_GS && !seg_desc.s) 1475 goto exception; 1476 1477 if (!seg_desc.p) { 1478 err_vec = (seg == VCPU_SREG_SS) ? SS_VECTOR : NP_VECTOR; 1479 goto exception; 1480 } 1481 1482 dpl = seg_desc.dpl; 1483 1484 switch (seg) { 1485 case VCPU_SREG_SS: 1486 /* 1487 * segment is not a writable data segment or segment 1488 * selector's RPL != CPL or segment selector's RPL != CPL 1489 */ 1490 if (rpl != cpl || (seg_desc.type & 0xa) != 0x2 || dpl != cpl) 1491 goto exception; 1492 break; 1493 case VCPU_SREG_CS: 1494 if (!(seg_desc.type & 8)) 1495 goto exception; 1496 1497 if (seg_desc.type & 4) { 1498 /* conforming */ 1499 if (dpl > cpl) 1500 goto exception; 1501 } else { 1502 /* nonconforming */ 1503 if (rpl > cpl || dpl != cpl) 1504 goto exception; 1505 } 1506 /* CS(RPL) <- CPL */ 1507 selector = (selector & 0xfffc) | cpl; 1508 break; 1509 case VCPU_SREG_TR: 1510 if (seg_desc.s || (seg_desc.type != 1 && seg_desc.type != 9)) 1511 goto exception; 1512 old_desc = seg_desc; 1513 seg_desc.type |= 2; /* busy */ 1514 ret = ctxt->ops->cmpxchg_emulated(ctxt, desc_addr, &old_desc, &seg_desc, 1515 sizeof(seg_desc), &ctxt->exception); 1516 if (ret != X86EMUL_CONTINUE) 1517 return ret; 1518 break; 1519 case VCPU_SREG_LDTR: 1520 if (seg_desc.s || seg_desc.type != 2) 1521 goto exception; 1522 break; 1523 default: /* DS, ES, FS, or GS */ 1524 /* 1525 * segment is not a data or readable code segment or 1526 * ((segment is a data or nonconforming code segment) 1527 * and (both RPL and CPL > DPL)) 1528 */ 1529 if ((seg_desc.type & 0xa) == 0x8 || 1530 (((seg_desc.type & 0xc) != 0xc) && 1531 (rpl > dpl && cpl > dpl))) 1532 goto exception; 1533 break; 1534 } 1535 1536 if (seg_desc.s) { 1537 /* mark segment as accessed */ 1538 seg_desc.type |= 1; 1539 ret = write_segment_descriptor(ctxt, selector, &seg_desc); 1540 if (ret != X86EMUL_CONTINUE) 1541 return ret; 1542 } else if (ctxt->mode == X86EMUL_MODE_PROT64) { 1543 ret = ctxt->ops->read_std(ctxt, desc_addr+8, &base3, 1544 sizeof(base3), &ctxt->exception); 1545 if (ret != X86EMUL_CONTINUE) 1546 return ret; 1547 } 1548 load: 1549 ctxt->ops->set_segment(ctxt, selector, &seg_desc, base3, seg); 1550 return X86EMUL_CONTINUE; 1551 exception: 1552 emulate_exception(ctxt, err_vec, err_code, true); 1553 return X86EMUL_PROPAGATE_FAULT; 1554 } 1555 1556 static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt, 1557 u16 selector, int seg) 1558 { 1559 u8 cpl = ctxt->ops->cpl(ctxt); 1560 return __load_segment_descriptor(ctxt, selector, seg, cpl, false); 1561 } 1562 1563 static void write_register_operand(struct operand *op) 1564 { 1565 /* The 4-byte case *is* correct: in 64-bit mode we zero-extend. */ 1566 switch (op->bytes) { 1567 case 1: 1568 *(u8 *)op->addr.reg = (u8)op->val; 1569 break; 1570 case 2: 1571 *(u16 *)op->addr.reg = (u16)op->val; 1572 break; 1573 case 4: 1574 *op->addr.reg = (u32)op->val; 1575 break; /* 64b: zero-extend */ 1576 case 8: 1577 *op->addr.reg = op->val; 1578 break; 1579 } 1580 } 1581 1582 static int writeback(struct x86_emulate_ctxt *ctxt, struct operand *op) 1583 { 1584 switch (op->type) { 1585 case OP_REG: 1586 write_register_operand(op); 1587 break; 1588 case OP_MEM: 1589 if (ctxt->lock_prefix) 1590 return segmented_cmpxchg(ctxt, 1591 op->addr.mem, 1592 &op->orig_val, 1593 &op->val, 1594 op->bytes); 1595 else 1596 return segmented_write(ctxt, 1597 op->addr.mem, 1598 &op->val, 1599 op->bytes); 1600 break; 1601 case OP_MEM_STR: 1602 return segmented_write(ctxt, 1603 op->addr.mem, 1604 op->data, 1605 op->bytes * op->count); 1606 break; 1607 case OP_XMM: 1608 write_sse_reg(ctxt, &op->vec_val, op->addr.xmm); 1609 break; 1610 case OP_MM: 1611 write_mmx_reg(ctxt, &op->mm_val, op->addr.mm); 1612 break; 1613 case OP_NONE: 1614 /* no writeback */ 1615 break; 1616 default: 1617 break; 1618 } 1619 return X86EMUL_CONTINUE; 1620 } 1621 1622 static int push(struct x86_emulate_ctxt *ctxt, void *data, int bytes) 1623 { 1624 struct segmented_address addr; 1625 1626 rsp_increment(ctxt, -bytes); 1627 addr.ea = reg_read(ctxt, VCPU_REGS_RSP) & stack_mask(ctxt); 1628 addr.seg = VCPU_SREG_SS; 1629 1630 return segmented_write(ctxt, addr, data, bytes); 1631 } 1632 1633 static int em_push(struct x86_emulate_ctxt *ctxt) 1634 { 1635 /* Disable writeback. */ 1636 ctxt->dst.type = OP_NONE; 1637 return push(ctxt, &ctxt->src.val, ctxt->op_bytes); 1638 } 1639 1640 static int emulate_pop(struct x86_emulate_ctxt *ctxt, 1641 void *dest, int len) 1642 { 1643 int rc; 1644 struct segmented_address addr; 1645 1646 addr.ea = reg_read(ctxt, VCPU_REGS_RSP) & stack_mask(ctxt); 1647 addr.seg = VCPU_SREG_SS; 1648 rc = segmented_read(ctxt, addr, dest, len); 1649 if (rc != X86EMUL_CONTINUE) 1650 return rc; 1651 1652 rsp_increment(ctxt, len); 1653 return rc; 1654 } 1655 1656 static int em_pop(struct x86_emulate_ctxt *ctxt) 1657 { 1658 return emulate_pop(ctxt, &ctxt->dst.val, ctxt->op_bytes); 1659 } 1660 1661 static int emulate_popf(struct x86_emulate_ctxt *ctxt, 1662 void *dest, int len) 1663 { 1664 int rc; 1665 unsigned long val, change_mask; 1666 int iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> IOPL_SHIFT; 1667 int cpl = ctxt->ops->cpl(ctxt); 1668 1669 rc = emulate_pop(ctxt, &val, len); 1670 if (rc != X86EMUL_CONTINUE) 1671 return rc; 1672 1673 change_mask = EFLG_CF | EFLG_PF | EFLG_AF | EFLG_ZF | EFLG_SF | EFLG_OF 1674 | EFLG_TF | EFLG_DF | EFLG_NT | EFLG_AC | EFLG_ID; 1675 1676 switch(ctxt->mode) { 1677 case X86EMUL_MODE_PROT64: 1678 case X86EMUL_MODE_PROT32: 1679 case X86EMUL_MODE_PROT16: 1680 if (cpl == 0) 1681 change_mask |= EFLG_IOPL; 1682 if (cpl <= iopl) 1683 change_mask |= EFLG_IF; 1684 break; 1685 case X86EMUL_MODE_VM86: 1686 if (iopl < 3) 1687 return emulate_gp(ctxt, 0); 1688 change_mask |= EFLG_IF; 1689 break; 1690 default: /* real mode */ 1691 change_mask |= (EFLG_IOPL | EFLG_IF); 1692 break; 1693 } 1694 1695 *(unsigned long *)dest = 1696 (ctxt->eflags & ~change_mask) | (val & change_mask); 1697 1698 return rc; 1699 } 1700 1701 static int em_popf(struct x86_emulate_ctxt *ctxt) 1702 { 1703 ctxt->dst.type = OP_REG; 1704 ctxt->dst.addr.reg = &ctxt->eflags; 1705 ctxt->dst.bytes = ctxt->op_bytes; 1706 return emulate_popf(ctxt, &ctxt->dst.val, ctxt->op_bytes); 1707 } 1708 1709 static int em_enter(struct x86_emulate_ctxt *ctxt) 1710 { 1711 int rc; 1712 unsigned frame_size = ctxt->src.val; 1713 unsigned nesting_level = ctxt->src2.val & 31; 1714 ulong rbp; 1715 1716 if (nesting_level) 1717 return X86EMUL_UNHANDLEABLE; 1718 1719 rbp = reg_read(ctxt, VCPU_REGS_RBP); 1720 rc = push(ctxt, &rbp, stack_size(ctxt)); 1721 if (rc != X86EMUL_CONTINUE) 1722 return rc; 1723 assign_masked(reg_rmw(ctxt, VCPU_REGS_RBP), reg_read(ctxt, VCPU_REGS_RSP), 1724 stack_mask(ctxt)); 1725 assign_masked(reg_rmw(ctxt, VCPU_REGS_RSP), 1726 reg_read(ctxt, VCPU_REGS_RSP) - frame_size, 1727 stack_mask(ctxt)); 1728 return X86EMUL_CONTINUE; 1729 } 1730 1731 static int em_leave(struct x86_emulate_ctxt *ctxt) 1732 { 1733 assign_masked(reg_rmw(ctxt, VCPU_REGS_RSP), reg_read(ctxt, VCPU_REGS_RBP), 1734 stack_mask(ctxt)); 1735 return emulate_pop(ctxt, reg_rmw(ctxt, VCPU_REGS_RBP), ctxt->op_bytes); 1736 } 1737 1738 static int em_push_sreg(struct x86_emulate_ctxt *ctxt) 1739 { 1740 int seg = ctxt->src2.val; 1741 1742 ctxt->src.val = get_segment_selector(ctxt, seg); 1743 1744 return em_push(ctxt); 1745 } 1746 1747 static int em_pop_sreg(struct x86_emulate_ctxt *ctxt) 1748 { 1749 int seg = ctxt->src2.val; 1750 unsigned long selector; 1751 int rc; 1752 1753 rc = emulate_pop(ctxt, &selector, ctxt->op_bytes); 1754 if (rc != X86EMUL_CONTINUE) 1755 return rc; 1756 1757 if (ctxt->modrm_reg == VCPU_SREG_SS) 1758 ctxt->interruptibility = KVM_X86_SHADOW_INT_MOV_SS; 1759 1760 rc = load_segment_descriptor(ctxt, (u16)selector, seg); 1761 return rc; 1762 } 1763 1764 static int em_pusha(struct x86_emulate_ctxt *ctxt) 1765 { 1766 unsigned long old_esp = reg_read(ctxt, VCPU_REGS_RSP); 1767 int rc = X86EMUL_CONTINUE; 1768 int reg = VCPU_REGS_RAX; 1769 1770 while (reg <= VCPU_REGS_RDI) { 1771 (reg == VCPU_REGS_RSP) ? 1772 (ctxt->src.val = old_esp) : (ctxt->src.val = reg_read(ctxt, reg)); 1773 1774 rc = em_push(ctxt); 1775 if (rc != X86EMUL_CONTINUE) 1776 return rc; 1777 1778 ++reg; 1779 } 1780 1781 return rc; 1782 } 1783 1784 static int em_pushf(struct x86_emulate_ctxt *ctxt) 1785 { 1786 ctxt->src.val = (unsigned long)ctxt->eflags; 1787 return em_push(ctxt); 1788 } 1789 1790 static int em_popa(struct x86_emulate_ctxt *ctxt) 1791 { 1792 int rc = X86EMUL_CONTINUE; 1793 int reg = VCPU_REGS_RDI; 1794 1795 while (reg >= VCPU_REGS_RAX) { 1796 if (reg == VCPU_REGS_RSP) { 1797 rsp_increment(ctxt, ctxt->op_bytes); 1798 --reg; 1799 } 1800 1801 rc = emulate_pop(ctxt, reg_rmw(ctxt, reg), ctxt->op_bytes); 1802 if (rc != X86EMUL_CONTINUE) 1803 break; 1804 --reg; 1805 } 1806 return rc; 1807 } 1808 1809 static int __emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq) 1810 { 1811 const struct x86_emulate_ops *ops = ctxt->ops; 1812 int rc; 1813 struct desc_ptr dt; 1814 gva_t cs_addr; 1815 gva_t eip_addr; 1816 u16 cs, eip; 1817 1818 /* TODO: Add limit checks */ 1819 ctxt->src.val = ctxt->eflags; 1820 rc = em_push(ctxt); 1821 if (rc != X86EMUL_CONTINUE) 1822 return rc; 1823 1824 ctxt->eflags &= ~(EFLG_IF | EFLG_TF | EFLG_AC); 1825 1826 ctxt->src.val = get_segment_selector(ctxt, VCPU_SREG_CS); 1827 rc = em_push(ctxt); 1828 if (rc != X86EMUL_CONTINUE) 1829 return rc; 1830 1831 ctxt->src.val = ctxt->_eip; 1832 rc = em_push(ctxt); 1833 if (rc != X86EMUL_CONTINUE) 1834 return rc; 1835 1836 ops->get_idt(ctxt, &dt); 1837 1838 eip_addr = dt.address + (irq << 2); 1839 cs_addr = dt.address + (irq << 2) + 2; 1840 1841 rc = ops->read_std(ctxt, cs_addr, &cs, 2, &ctxt->exception); 1842 if (rc != X86EMUL_CONTINUE) 1843 return rc; 1844 1845 rc = ops->read_std(ctxt, eip_addr, &eip, 2, &ctxt->exception); 1846 if (rc != X86EMUL_CONTINUE) 1847 return rc; 1848 1849 rc = load_segment_descriptor(ctxt, cs, VCPU_SREG_CS); 1850 if (rc != X86EMUL_CONTINUE) 1851 return rc; 1852 1853 ctxt->_eip = eip; 1854 1855 return rc; 1856 } 1857 1858 int emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq) 1859 { 1860 int rc; 1861 1862 invalidate_registers(ctxt); 1863 rc = __emulate_int_real(ctxt, irq); 1864 if (rc == X86EMUL_CONTINUE) 1865 writeback_registers(ctxt); 1866 return rc; 1867 } 1868 1869 static int emulate_int(struct x86_emulate_ctxt *ctxt, int irq) 1870 { 1871 switch(ctxt->mode) { 1872 case X86EMUL_MODE_REAL: 1873 return __emulate_int_real(ctxt, irq); 1874 case X86EMUL_MODE_VM86: 1875 case X86EMUL_MODE_PROT16: 1876 case X86EMUL_MODE_PROT32: 1877 case X86EMUL_MODE_PROT64: 1878 default: 1879 /* Protected mode interrupts unimplemented yet */ 1880 return X86EMUL_UNHANDLEABLE; 1881 } 1882 } 1883 1884 static int emulate_iret_real(struct x86_emulate_ctxt *ctxt) 1885 { 1886 int rc = X86EMUL_CONTINUE; 1887 unsigned long temp_eip = 0; 1888 unsigned long temp_eflags = 0; 1889 unsigned long cs = 0; 1890 unsigned long mask = EFLG_CF | EFLG_PF | EFLG_AF | EFLG_ZF | EFLG_SF | EFLG_TF | 1891 EFLG_IF | EFLG_DF | EFLG_OF | EFLG_IOPL | EFLG_NT | EFLG_RF | 1892 EFLG_AC | EFLG_ID | (1 << 1); /* Last one is the reserved bit */ 1893 unsigned long vm86_mask = EFLG_VM | EFLG_VIF | EFLG_VIP; 1894 1895 /* TODO: Add stack limit check */ 1896 1897 rc = emulate_pop(ctxt, &temp_eip, ctxt->op_bytes); 1898 1899 if (rc != X86EMUL_CONTINUE) 1900 return rc; 1901 1902 if (temp_eip & ~0xffff) 1903 return emulate_gp(ctxt, 0); 1904 1905 rc = emulate_pop(ctxt, &cs, ctxt->op_bytes); 1906 1907 if (rc != X86EMUL_CONTINUE) 1908 return rc; 1909 1910 rc = emulate_pop(ctxt, &temp_eflags, ctxt->op_bytes); 1911 1912 if (rc != X86EMUL_CONTINUE) 1913 return rc; 1914 1915 rc = load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS); 1916 1917 if (rc != X86EMUL_CONTINUE) 1918 return rc; 1919 1920 ctxt->_eip = temp_eip; 1921 1922 1923 if (ctxt->op_bytes == 4) 1924 ctxt->eflags = ((temp_eflags & mask) | (ctxt->eflags & vm86_mask)); 1925 else if (ctxt->op_bytes == 2) { 1926 ctxt->eflags &= ~0xffff; 1927 ctxt->eflags |= temp_eflags; 1928 } 1929 1930 ctxt->eflags &= ~EFLG_RESERVED_ZEROS_MASK; /* Clear reserved zeros */ 1931 ctxt->eflags |= EFLG_RESERVED_ONE_MASK; 1932 1933 return rc; 1934 } 1935 1936 static int em_iret(struct x86_emulate_ctxt *ctxt) 1937 { 1938 switch(ctxt->mode) { 1939 case X86EMUL_MODE_REAL: 1940 return emulate_iret_real(ctxt); 1941 case X86EMUL_MODE_VM86: 1942 case X86EMUL_MODE_PROT16: 1943 case X86EMUL_MODE_PROT32: 1944 case X86EMUL_MODE_PROT64: 1945 default: 1946 /* iret from protected mode unimplemented yet */ 1947 return X86EMUL_UNHANDLEABLE; 1948 } 1949 } 1950 1951 static int em_jmp_far(struct x86_emulate_ctxt *ctxt) 1952 { 1953 int rc; 1954 unsigned short sel; 1955 1956 memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2); 1957 1958 rc = load_segment_descriptor(ctxt, sel, VCPU_SREG_CS); 1959 if (rc != X86EMUL_CONTINUE) 1960 return rc; 1961 1962 ctxt->_eip = 0; 1963 memcpy(&ctxt->_eip, ctxt->src.valptr, ctxt->op_bytes); 1964 return X86EMUL_CONTINUE; 1965 } 1966 1967 static int em_grp45(struct x86_emulate_ctxt *ctxt) 1968 { 1969 int rc = X86EMUL_CONTINUE; 1970 1971 switch (ctxt->modrm_reg) { 1972 case 2: /* call near abs */ { 1973 long int old_eip; 1974 old_eip = ctxt->_eip; 1975 ctxt->_eip = ctxt->src.val; 1976 ctxt->src.val = old_eip; 1977 rc = em_push(ctxt); 1978 break; 1979 } 1980 case 4: /* jmp abs */ 1981 ctxt->_eip = ctxt->src.val; 1982 break; 1983 case 5: /* jmp far */ 1984 rc = em_jmp_far(ctxt); 1985 break; 1986 case 6: /* push */ 1987 rc = em_push(ctxt); 1988 break; 1989 } 1990 return rc; 1991 } 1992 1993 static int em_cmpxchg8b(struct x86_emulate_ctxt *ctxt) 1994 { 1995 u64 old = ctxt->dst.orig_val64; 1996 1997 if (ctxt->dst.bytes == 16) 1998 return X86EMUL_UNHANDLEABLE; 1999 2000 if (((u32) (old >> 0) != (u32) reg_read(ctxt, VCPU_REGS_RAX)) || 2001 ((u32) (old >> 32) != (u32) reg_read(ctxt, VCPU_REGS_RDX))) { 2002 *reg_write(ctxt, VCPU_REGS_RAX) = (u32) (old >> 0); 2003 *reg_write(ctxt, VCPU_REGS_RDX) = (u32) (old >> 32); 2004 ctxt->eflags &= ~EFLG_ZF; 2005 } else { 2006 ctxt->dst.val64 = ((u64)reg_read(ctxt, VCPU_REGS_RCX) << 32) | 2007 (u32) reg_read(ctxt, VCPU_REGS_RBX); 2008 2009 ctxt->eflags |= EFLG_ZF; 2010 } 2011 return X86EMUL_CONTINUE; 2012 } 2013 2014 static int em_ret(struct x86_emulate_ctxt *ctxt) 2015 { 2016 ctxt->dst.type = OP_REG; 2017 ctxt->dst.addr.reg = &ctxt->_eip; 2018 ctxt->dst.bytes = ctxt->op_bytes; 2019 return em_pop(ctxt); 2020 } 2021 2022 static int em_ret_far(struct x86_emulate_ctxt *ctxt) 2023 { 2024 int rc; 2025 unsigned long cs; 2026 int cpl = ctxt->ops->cpl(ctxt); 2027 2028 rc = emulate_pop(ctxt, &ctxt->_eip, ctxt->op_bytes); 2029 if (rc != X86EMUL_CONTINUE) 2030 return rc; 2031 if (ctxt->op_bytes == 4) 2032 ctxt->_eip = (u32)ctxt->_eip; 2033 rc = emulate_pop(ctxt, &cs, ctxt->op_bytes); 2034 if (rc != X86EMUL_CONTINUE) 2035 return rc; 2036 /* Outer-privilege level return is not implemented */ 2037 if (ctxt->mode >= X86EMUL_MODE_PROT16 && (cs & 3) > cpl) 2038 return X86EMUL_UNHANDLEABLE; 2039 rc = load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS); 2040 return rc; 2041 } 2042 2043 static int em_ret_far_imm(struct x86_emulate_ctxt *ctxt) 2044 { 2045 int rc; 2046 2047 rc = em_ret_far(ctxt); 2048 if (rc != X86EMUL_CONTINUE) 2049 return rc; 2050 rsp_increment(ctxt, ctxt->src.val); 2051 return X86EMUL_CONTINUE; 2052 } 2053 2054 static int em_cmpxchg(struct x86_emulate_ctxt *ctxt) 2055 { 2056 /* Save real source value, then compare EAX against destination. */ 2057 ctxt->dst.orig_val = ctxt->dst.val; 2058 ctxt->dst.val = reg_read(ctxt, VCPU_REGS_RAX); 2059 ctxt->src.orig_val = ctxt->src.val; 2060 ctxt->src.val = ctxt->dst.orig_val; 2061 fastop(ctxt, em_cmp); 2062 2063 if (ctxt->eflags & EFLG_ZF) { 2064 /* Success: write back to memory. */ 2065 ctxt->dst.val = ctxt->src.orig_val; 2066 } else { 2067 /* Failure: write the value we saw to EAX. */ 2068 ctxt->dst.type = OP_REG; 2069 ctxt->dst.addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX); 2070 ctxt->dst.val = ctxt->dst.orig_val; 2071 } 2072 return X86EMUL_CONTINUE; 2073 } 2074 2075 static int em_lseg(struct x86_emulate_ctxt *ctxt) 2076 { 2077 int seg = ctxt->src2.val; 2078 unsigned short sel; 2079 int rc; 2080 2081 memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2); 2082 2083 rc = load_segment_descriptor(ctxt, sel, seg); 2084 if (rc != X86EMUL_CONTINUE) 2085 return rc; 2086 2087 ctxt->dst.val = ctxt->src.val; 2088 return rc; 2089 } 2090 2091 static void 2092 setup_syscalls_segments(struct x86_emulate_ctxt *ctxt, 2093 struct desc_struct *cs, struct desc_struct *ss) 2094 { 2095 cs->l = 0; /* will be adjusted later */ 2096 set_desc_base(cs, 0); /* flat segment */ 2097 cs->g = 1; /* 4kb granularity */ 2098 set_desc_limit(cs, 0xfffff); /* 4GB limit */ 2099 cs->type = 0x0b; /* Read, Execute, Accessed */ 2100 cs->s = 1; 2101 cs->dpl = 0; /* will be adjusted later */ 2102 cs->p = 1; 2103 cs->d = 1; 2104 cs->avl = 0; 2105 2106 set_desc_base(ss, 0); /* flat segment */ 2107 set_desc_limit(ss, 0xfffff); /* 4GB limit */ 2108 ss->g = 1; /* 4kb granularity */ 2109 ss->s = 1; 2110 ss->type = 0x03; /* Read/Write, Accessed */ 2111 ss->d = 1; /* 32bit stack segment */ 2112 ss->dpl = 0; 2113 ss->p = 1; 2114 ss->l = 0; 2115 ss->avl = 0; 2116 } 2117 2118 static bool vendor_intel(struct x86_emulate_ctxt *ctxt) 2119 { 2120 u32 eax, ebx, ecx, edx; 2121 2122 eax = ecx = 0; 2123 ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx); 2124 return ebx == X86EMUL_CPUID_VENDOR_GenuineIntel_ebx 2125 && ecx == X86EMUL_CPUID_VENDOR_GenuineIntel_ecx 2126 && edx == X86EMUL_CPUID_VENDOR_GenuineIntel_edx; 2127 } 2128 2129 static bool em_syscall_is_enabled(struct x86_emulate_ctxt *ctxt) 2130 { 2131 const struct x86_emulate_ops *ops = ctxt->ops; 2132 u32 eax, ebx, ecx, edx; 2133 2134 /* 2135 * syscall should always be enabled in longmode - so only become 2136 * vendor specific (cpuid) if other modes are active... 2137 */ 2138 if (ctxt->mode == X86EMUL_MODE_PROT64) 2139 return true; 2140 2141 eax = 0x00000000; 2142 ecx = 0x00000000; 2143 ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx); 2144 /* 2145 * Intel ("GenuineIntel") 2146 * remark: Intel CPUs only support "syscall" in 64bit 2147 * longmode. Also an 64bit guest with a 2148 * 32bit compat-app running will #UD !! While this 2149 * behaviour can be fixed (by emulating) into AMD 2150 * response - CPUs of AMD can't behave like Intel. 2151 */ 2152 if (ebx == X86EMUL_CPUID_VENDOR_GenuineIntel_ebx && 2153 ecx == X86EMUL_CPUID_VENDOR_GenuineIntel_ecx && 2154 edx == X86EMUL_CPUID_VENDOR_GenuineIntel_edx) 2155 return false; 2156 2157 /* AMD ("AuthenticAMD") */ 2158 if (ebx == X86EMUL_CPUID_VENDOR_AuthenticAMD_ebx && 2159 ecx == X86EMUL_CPUID_VENDOR_AuthenticAMD_ecx && 2160 edx == X86EMUL_CPUID_VENDOR_AuthenticAMD_edx) 2161 return true; 2162 2163 /* AMD ("AMDisbetter!") */ 2164 if (ebx == X86EMUL_CPUID_VENDOR_AMDisbetterI_ebx && 2165 ecx == X86EMUL_CPUID_VENDOR_AMDisbetterI_ecx && 2166 edx == X86EMUL_CPUID_VENDOR_AMDisbetterI_edx) 2167 return true; 2168 2169 /* default: (not Intel, not AMD), apply Intel's stricter rules... */ 2170 return false; 2171 } 2172 2173 static int em_syscall(struct x86_emulate_ctxt *ctxt) 2174 { 2175 const struct x86_emulate_ops *ops = ctxt->ops; 2176 struct desc_struct cs, ss; 2177 u64 msr_data; 2178 u16 cs_sel, ss_sel; 2179 u64 efer = 0; 2180 2181 /* syscall is not available in real mode */ 2182 if (ctxt->mode == X86EMUL_MODE_REAL || 2183 ctxt->mode == X86EMUL_MODE_VM86) 2184 return emulate_ud(ctxt); 2185 2186 if (!(em_syscall_is_enabled(ctxt))) 2187 return emulate_ud(ctxt); 2188 2189 ops->get_msr(ctxt, MSR_EFER, &efer); 2190 setup_syscalls_segments(ctxt, &cs, &ss); 2191 2192 if (!(efer & EFER_SCE)) 2193 return emulate_ud(ctxt); 2194 2195 ops->get_msr(ctxt, MSR_STAR, &msr_data); 2196 msr_data >>= 32; 2197 cs_sel = (u16)(msr_data & 0xfffc); 2198 ss_sel = (u16)(msr_data + 8); 2199 2200 if (efer & EFER_LMA) { 2201 cs.d = 0; 2202 cs.l = 1; 2203 } 2204 ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS); 2205 ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS); 2206 2207 *reg_write(ctxt, VCPU_REGS_RCX) = ctxt->_eip; 2208 if (efer & EFER_LMA) { 2209 #ifdef CONFIG_X86_64 2210 *reg_write(ctxt, VCPU_REGS_R11) = ctxt->eflags; 2211 2212 ops->get_msr(ctxt, 2213 ctxt->mode == X86EMUL_MODE_PROT64 ? 2214 MSR_LSTAR : MSR_CSTAR, &msr_data); 2215 ctxt->_eip = msr_data; 2216 2217 ops->get_msr(ctxt, MSR_SYSCALL_MASK, &msr_data); 2218 ctxt->eflags &= ~msr_data; 2219 #endif 2220 } else { 2221 /* legacy mode */ 2222 ops->get_msr(ctxt, MSR_STAR, &msr_data); 2223 ctxt->_eip = (u32)msr_data; 2224 2225 ctxt->eflags &= ~(EFLG_VM | EFLG_IF); 2226 } 2227 2228 return X86EMUL_CONTINUE; 2229 } 2230 2231 static int em_sysenter(struct x86_emulate_ctxt *ctxt) 2232 { 2233 const struct x86_emulate_ops *ops = ctxt->ops; 2234 struct desc_struct cs, ss; 2235 u64 msr_data; 2236 u16 cs_sel, ss_sel; 2237 u64 efer = 0; 2238 2239 ops->get_msr(ctxt, MSR_EFER, &efer); 2240 /* inject #GP if in real mode */ 2241 if (ctxt->mode == X86EMUL_MODE_REAL) 2242 return emulate_gp(ctxt, 0); 2243 2244 /* 2245 * Not recognized on AMD in compat mode (but is recognized in legacy 2246 * mode). 2247 */ 2248 if ((ctxt->mode == X86EMUL_MODE_PROT32) && (efer & EFER_LMA) 2249 && !vendor_intel(ctxt)) 2250 return emulate_ud(ctxt); 2251 2252 /* XXX sysenter/sysexit have not been tested in 64bit mode. 2253 * Therefore, we inject an #UD. 2254 */ 2255 if (ctxt->mode == X86EMUL_MODE_PROT64) 2256 return emulate_ud(ctxt); 2257 2258 setup_syscalls_segments(ctxt, &cs, &ss); 2259 2260 ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data); 2261 switch (ctxt->mode) { 2262 case X86EMUL_MODE_PROT32: 2263 if ((msr_data & 0xfffc) == 0x0) 2264 return emulate_gp(ctxt, 0); 2265 break; 2266 case X86EMUL_MODE_PROT64: 2267 if (msr_data == 0x0) 2268 return emulate_gp(ctxt, 0); 2269 break; 2270 default: 2271 break; 2272 } 2273 2274 ctxt->eflags &= ~(EFLG_VM | EFLG_IF); 2275 cs_sel = (u16)msr_data; 2276 cs_sel &= ~SELECTOR_RPL_MASK; 2277 ss_sel = cs_sel + 8; 2278 ss_sel &= ~SELECTOR_RPL_MASK; 2279 if (ctxt->mode == X86EMUL_MODE_PROT64 || (efer & EFER_LMA)) { 2280 cs.d = 0; 2281 cs.l = 1; 2282 } 2283 2284 ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS); 2285 ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS); 2286 2287 ops->get_msr(ctxt, MSR_IA32_SYSENTER_EIP, &msr_data); 2288 ctxt->_eip = msr_data; 2289 2290 ops->get_msr(ctxt, MSR_IA32_SYSENTER_ESP, &msr_data); 2291 *reg_write(ctxt, VCPU_REGS_RSP) = msr_data; 2292 2293 return X86EMUL_CONTINUE; 2294 } 2295 2296 static int em_sysexit(struct x86_emulate_ctxt *ctxt) 2297 { 2298 const struct x86_emulate_ops *ops = ctxt->ops; 2299 struct desc_struct cs, ss; 2300 u64 msr_data; 2301 int usermode; 2302 u16 cs_sel = 0, ss_sel = 0; 2303 2304 /* inject #GP if in real mode or Virtual 8086 mode */ 2305 if (ctxt->mode == X86EMUL_MODE_REAL || 2306 ctxt->mode == X86EMUL_MODE_VM86) 2307 return emulate_gp(ctxt, 0); 2308 2309 setup_syscalls_segments(ctxt, &cs, &ss); 2310 2311 if ((ctxt->rex_prefix & 0x8) != 0x0) 2312 usermode = X86EMUL_MODE_PROT64; 2313 else 2314 usermode = X86EMUL_MODE_PROT32; 2315 2316 cs.dpl = 3; 2317 ss.dpl = 3; 2318 ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data); 2319 switch (usermode) { 2320 case X86EMUL_MODE_PROT32: 2321 cs_sel = (u16)(msr_data + 16); 2322 if ((msr_data & 0xfffc) == 0x0) 2323 return emulate_gp(ctxt, 0); 2324 ss_sel = (u16)(msr_data + 24); 2325 break; 2326 case X86EMUL_MODE_PROT64: 2327 cs_sel = (u16)(msr_data + 32); 2328 if (msr_data == 0x0) 2329 return emulate_gp(ctxt, 0); 2330 ss_sel = cs_sel + 8; 2331 cs.d = 0; 2332 cs.l = 1; 2333 break; 2334 } 2335 cs_sel |= SELECTOR_RPL_MASK; 2336 ss_sel |= SELECTOR_RPL_MASK; 2337 2338 ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS); 2339 ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS); 2340 2341 ctxt->_eip = reg_read(ctxt, VCPU_REGS_RDX); 2342 *reg_write(ctxt, VCPU_REGS_RSP) = reg_read(ctxt, VCPU_REGS_RCX); 2343 2344 return X86EMUL_CONTINUE; 2345 } 2346 2347 static bool emulator_bad_iopl(struct x86_emulate_ctxt *ctxt) 2348 { 2349 int iopl; 2350 if (ctxt->mode == X86EMUL_MODE_REAL) 2351 return false; 2352 if (ctxt->mode == X86EMUL_MODE_VM86) 2353 return true; 2354 iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> IOPL_SHIFT; 2355 return ctxt->ops->cpl(ctxt) > iopl; 2356 } 2357 2358 static bool emulator_io_port_access_allowed(struct x86_emulate_ctxt *ctxt, 2359 u16 port, u16 len) 2360 { 2361 const struct x86_emulate_ops *ops = ctxt->ops; 2362 struct desc_struct tr_seg; 2363 u32 base3; 2364 int r; 2365 u16 tr, io_bitmap_ptr, perm, bit_idx = port & 0x7; 2366 unsigned mask = (1 << len) - 1; 2367 unsigned long base; 2368 2369 ops->get_segment(ctxt, &tr, &tr_seg, &base3, VCPU_SREG_TR); 2370 if (!tr_seg.p) 2371 return false; 2372 if (desc_limit_scaled(&tr_seg) < 103) 2373 return false; 2374 base = get_desc_base(&tr_seg); 2375 #ifdef CONFIG_X86_64 2376 base |= ((u64)base3) << 32; 2377 #endif 2378 r = ops->read_std(ctxt, base + 102, &io_bitmap_ptr, 2, NULL); 2379 if (r != X86EMUL_CONTINUE) 2380 return false; 2381 if (io_bitmap_ptr + port/8 > desc_limit_scaled(&tr_seg)) 2382 return false; 2383 r = ops->read_std(ctxt, base + io_bitmap_ptr + port/8, &perm, 2, NULL); 2384 if (r != X86EMUL_CONTINUE) 2385 return false; 2386 if ((perm >> bit_idx) & mask) 2387 return false; 2388 return true; 2389 } 2390 2391 static bool emulator_io_permited(struct x86_emulate_ctxt *ctxt, 2392 u16 port, u16 len) 2393 { 2394 if (ctxt->perm_ok) 2395 return true; 2396 2397 if (emulator_bad_iopl(ctxt)) 2398 if (!emulator_io_port_access_allowed(ctxt, port, len)) 2399 return false; 2400 2401 ctxt->perm_ok = true; 2402 2403 return true; 2404 } 2405 2406 static void save_state_to_tss16(struct x86_emulate_ctxt *ctxt, 2407 struct tss_segment_16 *tss) 2408 { 2409 tss->ip = ctxt->_eip; 2410 tss->flag = ctxt->eflags; 2411 tss->ax = reg_read(ctxt, VCPU_REGS_RAX); 2412 tss->cx = reg_read(ctxt, VCPU_REGS_RCX); 2413 tss->dx = reg_read(ctxt, VCPU_REGS_RDX); 2414 tss->bx = reg_read(ctxt, VCPU_REGS_RBX); 2415 tss->sp = reg_read(ctxt, VCPU_REGS_RSP); 2416 tss->bp = reg_read(ctxt, VCPU_REGS_RBP); 2417 tss->si = reg_read(ctxt, VCPU_REGS_RSI); 2418 tss->di = reg_read(ctxt, VCPU_REGS_RDI); 2419 2420 tss->es = get_segment_selector(ctxt, VCPU_SREG_ES); 2421 tss->cs = get_segment_selector(ctxt, VCPU_SREG_CS); 2422 tss->ss = get_segment_selector(ctxt, VCPU_SREG_SS); 2423 tss->ds = get_segment_selector(ctxt, VCPU_SREG_DS); 2424 tss->ldt = get_segment_selector(ctxt, VCPU_SREG_LDTR); 2425 } 2426 2427 static int load_state_from_tss16(struct x86_emulate_ctxt *ctxt, 2428 struct tss_segment_16 *tss) 2429 { 2430 int ret; 2431 u8 cpl; 2432 2433 ctxt->_eip = tss->ip; 2434 ctxt->eflags = tss->flag | 2; 2435 *reg_write(ctxt, VCPU_REGS_RAX) = tss->ax; 2436 *reg_write(ctxt, VCPU_REGS_RCX) = tss->cx; 2437 *reg_write(ctxt, VCPU_REGS_RDX) = tss->dx; 2438 *reg_write(ctxt, VCPU_REGS_RBX) = tss->bx; 2439 *reg_write(ctxt, VCPU_REGS_RSP) = tss->sp; 2440 *reg_write(ctxt, VCPU_REGS_RBP) = tss->bp; 2441 *reg_write(ctxt, VCPU_REGS_RSI) = tss->si; 2442 *reg_write(ctxt, VCPU_REGS_RDI) = tss->di; 2443 2444 /* 2445 * SDM says that segment selectors are loaded before segment 2446 * descriptors 2447 */ 2448 set_segment_selector(ctxt, tss->ldt, VCPU_SREG_LDTR); 2449 set_segment_selector(ctxt, tss->es, VCPU_SREG_ES); 2450 set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS); 2451 set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS); 2452 set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS); 2453 2454 cpl = tss->cs & 3; 2455 2456 /* 2457 * Now load segment descriptors. If fault happens at this stage 2458 * it is handled in a context of new task 2459 */ 2460 ret = __load_segment_descriptor(ctxt, tss->ldt, VCPU_SREG_LDTR, cpl, true); 2461 if (ret != X86EMUL_CONTINUE) 2462 return ret; 2463 ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl, true); 2464 if (ret != X86EMUL_CONTINUE) 2465 return ret; 2466 ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl, true); 2467 if (ret != X86EMUL_CONTINUE) 2468 return ret; 2469 ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl, true); 2470 if (ret != X86EMUL_CONTINUE) 2471 return ret; 2472 ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl, true); 2473 if (ret != X86EMUL_CONTINUE) 2474 return ret; 2475 2476 return X86EMUL_CONTINUE; 2477 } 2478 2479 static int task_switch_16(struct x86_emulate_ctxt *ctxt, 2480 u16 tss_selector, u16 old_tss_sel, 2481 ulong old_tss_base, struct desc_struct *new_desc) 2482 { 2483 const struct x86_emulate_ops *ops = ctxt->ops; 2484 struct tss_segment_16 tss_seg; 2485 int ret; 2486 u32 new_tss_base = get_desc_base(new_desc); 2487 2488 ret = ops->read_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg, 2489 &ctxt->exception); 2490 if (ret != X86EMUL_CONTINUE) 2491 /* FIXME: need to provide precise fault address */ 2492 return ret; 2493 2494 save_state_to_tss16(ctxt, &tss_seg); 2495 2496 ret = ops->write_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg, 2497 &ctxt->exception); 2498 if (ret != X86EMUL_CONTINUE) 2499 /* FIXME: need to provide precise fault address */ 2500 return ret; 2501 2502 ret = ops->read_std(ctxt, new_tss_base, &tss_seg, sizeof tss_seg, 2503 &ctxt->exception); 2504 if (ret != X86EMUL_CONTINUE) 2505 /* FIXME: need to provide precise fault address */ 2506 return ret; 2507 2508 if (old_tss_sel != 0xffff) { 2509 tss_seg.prev_task_link = old_tss_sel; 2510 2511 ret = ops->write_std(ctxt, new_tss_base, 2512 &tss_seg.prev_task_link, 2513 sizeof tss_seg.prev_task_link, 2514 &ctxt->exception); 2515 if (ret != X86EMUL_CONTINUE) 2516 /* FIXME: need to provide precise fault address */ 2517 return ret; 2518 } 2519 2520 return load_state_from_tss16(ctxt, &tss_seg); 2521 } 2522 2523 static void save_state_to_tss32(struct x86_emulate_ctxt *ctxt, 2524 struct tss_segment_32 *tss) 2525 { 2526 /* CR3 and ldt selector are not saved intentionally */ 2527 tss->eip = ctxt->_eip; 2528 tss->eflags = ctxt->eflags; 2529 tss->eax = reg_read(ctxt, VCPU_REGS_RAX); 2530 tss->ecx = reg_read(ctxt, VCPU_REGS_RCX); 2531 tss->edx = reg_read(ctxt, VCPU_REGS_RDX); 2532 tss->ebx = reg_read(ctxt, VCPU_REGS_RBX); 2533 tss->esp = reg_read(ctxt, VCPU_REGS_RSP); 2534 tss->ebp = reg_read(ctxt, VCPU_REGS_RBP); 2535 tss->esi = reg_read(ctxt, VCPU_REGS_RSI); 2536 tss->edi = reg_read(ctxt, VCPU_REGS_RDI); 2537 2538 tss->es = get_segment_selector(ctxt, VCPU_SREG_ES); 2539 tss->cs = get_segment_selector(ctxt, VCPU_SREG_CS); 2540 tss->ss = get_segment_selector(ctxt, VCPU_SREG_SS); 2541 tss->ds = get_segment_selector(ctxt, VCPU_SREG_DS); 2542 tss->fs = get_segment_selector(ctxt, VCPU_SREG_FS); 2543 tss->gs = get_segment_selector(ctxt, VCPU_SREG_GS); 2544 } 2545 2546 static int load_state_from_tss32(struct x86_emulate_ctxt *ctxt, 2547 struct tss_segment_32 *tss) 2548 { 2549 int ret; 2550 u8 cpl; 2551 2552 if (ctxt->ops->set_cr(ctxt, 3, tss->cr3)) 2553 return emulate_gp(ctxt, 0); 2554 ctxt->_eip = tss->eip; 2555 ctxt->eflags = tss->eflags | 2; 2556 2557 /* General purpose registers */ 2558 *reg_write(ctxt, VCPU_REGS_RAX) = tss->eax; 2559 *reg_write(ctxt, VCPU_REGS_RCX) = tss->ecx; 2560 *reg_write(ctxt, VCPU_REGS_RDX) = tss->edx; 2561 *reg_write(ctxt, VCPU_REGS_RBX) = tss->ebx; 2562 *reg_write(ctxt, VCPU_REGS_RSP) = tss->esp; 2563 *reg_write(ctxt, VCPU_REGS_RBP) = tss->ebp; 2564 *reg_write(ctxt, VCPU_REGS_RSI) = tss->esi; 2565 *reg_write(ctxt, VCPU_REGS_RDI) = tss->edi; 2566 2567 /* 2568 * SDM says that segment selectors are loaded before segment 2569 * descriptors. This is important because CPL checks will 2570 * use CS.RPL. 2571 */ 2572 set_segment_selector(ctxt, tss->ldt_selector, VCPU_SREG_LDTR); 2573 set_segment_selector(ctxt, tss->es, VCPU_SREG_ES); 2574 set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS); 2575 set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS); 2576 set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS); 2577 set_segment_selector(ctxt, tss->fs, VCPU_SREG_FS); 2578 set_segment_selector(ctxt, tss->gs, VCPU_SREG_GS); 2579 2580 /* 2581 * If we're switching between Protected Mode and VM86, we need to make 2582 * sure to update the mode before loading the segment descriptors so 2583 * that the selectors are interpreted correctly. 2584 */ 2585 if (ctxt->eflags & X86_EFLAGS_VM) { 2586 ctxt->mode = X86EMUL_MODE_VM86; 2587 cpl = 3; 2588 } else { 2589 ctxt->mode = X86EMUL_MODE_PROT32; 2590 cpl = tss->cs & 3; 2591 } 2592 2593 /* 2594 * Now load segment descriptors. If fault happenes at this stage 2595 * it is handled in a context of new task 2596 */ 2597 ret = __load_segment_descriptor(ctxt, tss->ldt_selector, VCPU_SREG_LDTR, cpl, true); 2598 if (ret != X86EMUL_CONTINUE) 2599 return ret; 2600 ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl, true); 2601 if (ret != X86EMUL_CONTINUE) 2602 return ret; 2603 ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl, true); 2604 if (ret != X86EMUL_CONTINUE) 2605 return ret; 2606 ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl, true); 2607 if (ret != X86EMUL_CONTINUE) 2608 return ret; 2609 ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl, true); 2610 if (ret != X86EMUL_CONTINUE) 2611 return ret; 2612 ret = __load_segment_descriptor(ctxt, tss->fs, VCPU_SREG_FS, cpl, true); 2613 if (ret != X86EMUL_CONTINUE) 2614 return ret; 2615 ret = __load_segment_descriptor(ctxt, tss->gs, VCPU_SREG_GS, cpl, true); 2616 if (ret != X86EMUL_CONTINUE) 2617 return ret; 2618 2619 return X86EMUL_CONTINUE; 2620 } 2621 2622 static int task_switch_32(struct x86_emulate_ctxt *ctxt, 2623 u16 tss_selector, u16 old_tss_sel, 2624 ulong old_tss_base, struct desc_struct *new_desc) 2625 { 2626 const struct x86_emulate_ops *ops = ctxt->ops; 2627 struct tss_segment_32 tss_seg; 2628 int ret; 2629 u32 new_tss_base = get_desc_base(new_desc); 2630 u32 eip_offset = offsetof(struct tss_segment_32, eip); 2631 u32 ldt_sel_offset = offsetof(struct tss_segment_32, ldt_selector); 2632 2633 ret = ops->read_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg, 2634 &ctxt->exception); 2635 if (ret != X86EMUL_CONTINUE) 2636 /* FIXME: need to provide precise fault address */ 2637 return ret; 2638 2639 save_state_to_tss32(ctxt, &tss_seg); 2640 2641 /* Only GP registers and segment selectors are saved */ 2642 ret = ops->write_std(ctxt, old_tss_base + eip_offset, &tss_seg.eip, 2643 ldt_sel_offset - eip_offset, &ctxt->exception); 2644 if (ret != X86EMUL_CONTINUE) 2645 /* FIXME: need to provide precise fault address */ 2646 return ret; 2647 2648 ret = ops->read_std(ctxt, new_tss_base, &tss_seg, sizeof tss_seg, 2649 &ctxt->exception); 2650 if (ret != X86EMUL_CONTINUE) 2651 /* FIXME: need to provide precise fault address */ 2652 return ret; 2653 2654 if (old_tss_sel != 0xffff) { 2655 tss_seg.prev_task_link = old_tss_sel; 2656 2657 ret = ops->write_std(ctxt, new_tss_base, 2658 &tss_seg.prev_task_link, 2659 sizeof tss_seg.prev_task_link, 2660 &ctxt->exception); 2661 if (ret != X86EMUL_CONTINUE) 2662 /* FIXME: need to provide precise fault address */ 2663 return ret; 2664 } 2665 2666 return load_state_from_tss32(ctxt, &tss_seg); 2667 } 2668 2669 static int emulator_do_task_switch(struct x86_emulate_ctxt *ctxt, 2670 u16 tss_selector, int idt_index, int reason, 2671 bool has_error_code, u32 error_code) 2672 { 2673 const struct x86_emulate_ops *ops = ctxt->ops; 2674 struct desc_struct curr_tss_desc, next_tss_desc; 2675 int ret; 2676 u16 old_tss_sel = get_segment_selector(ctxt, VCPU_SREG_TR); 2677 ulong old_tss_base = 2678 ops->get_cached_segment_base(ctxt, VCPU_SREG_TR); 2679 u32 desc_limit; 2680 ulong desc_addr; 2681 2682 /* FIXME: old_tss_base == ~0 ? */ 2683 2684 ret = read_segment_descriptor(ctxt, tss_selector, &next_tss_desc, &desc_addr); 2685 if (ret != X86EMUL_CONTINUE) 2686 return ret; 2687 ret = read_segment_descriptor(ctxt, old_tss_sel, &curr_tss_desc, &desc_addr); 2688 if (ret != X86EMUL_CONTINUE) 2689 return ret; 2690 2691 /* FIXME: check that next_tss_desc is tss */ 2692 2693 /* 2694 * Check privileges. The three cases are task switch caused by... 2695 * 2696 * 1. jmp/call/int to task gate: Check against DPL of the task gate 2697 * 2. Exception/IRQ/iret: No check is performed 2698 * 3. jmp/call to TSS: Check against DPL of the TSS 2699 */ 2700 if (reason == TASK_SWITCH_GATE) { 2701 if (idt_index != -1) { 2702 /* Software interrupts */ 2703 struct desc_struct task_gate_desc; 2704 int dpl; 2705 2706 ret = read_interrupt_descriptor(ctxt, idt_index, 2707 &task_gate_desc); 2708 if (ret != X86EMUL_CONTINUE) 2709 return ret; 2710 2711 dpl = task_gate_desc.dpl; 2712 if ((tss_selector & 3) > dpl || ops->cpl(ctxt) > dpl) 2713 return emulate_gp(ctxt, (idt_index << 3) | 0x2); 2714 } 2715 } else if (reason != TASK_SWITCH_IRET) { 2716 int dpl = next_tss_desc.dpl; 2717 if ((tss_selector & 3) > dpl || ops->cpl(ctxt) > dpl) 2718 return emulate_gp(ctxt, tss_selector); 2719 } 2720 2721 2722 desc_limit = desc_limit_scaled(&next_tss_desc); 2723 if (!next_tss_desc.p || 2724 ((desc_limit < 0x67 && (next_tss_desc.type & 8)) || 2725 desc_limit < 0x2b)) { 2726 emulate_ts(ctxt, tss_selector & 0xfffc); 2727 return X86EMUL_PROPAGATE_FAULT; 2728 } 2729 2730 if (reason == TASK_SWITCH_IRET || reason == TASK_SWITCH_JMP) { 2731 curr_tss_desc.type &= ~(1 << 1); /* clear busy flag */ 2732 write_segment_descriptor(ctxt, old_tss_sel, &curr_tss_desc); 2733 } 2734 2735 if (reason == TASK_SWITCH_IRET) 2736 ctxt->eflags = ctxt->eflags & ~X86_EFLAGS_NT; 2737 2738 /* set back link to prev task only if NT bit is set in eflags 2739 note that old_tss_sel is not used after this point */ 2740 if (reason != TASK_SWITCH_CALL && reason != TASK_SWITCH_GATE) 2741 old_tss_sel = 0xffff; 2742 2743 if (next_tss_desc.type & 8) 2744 ret = task_switch_32(ctxt, tss_selector, old_tss_sel, 2745 old_tss_base, &next_tss_desc); 2746 else 2747 ret = task_switch_16(ctxt, tss_selector, old_tss_sel, 2748 old_tss_base, &next_tss_desc); 2749 if (ret != X86EMUL_CONTINUE) 2750 return ret; 2751 2752 if (reason == TASK_SWITCH_CALL || reason == TASK_SWITCH_GATE) 2753 ctxt->eflags = ctxt->eflags | X86_EFLAGS_NT; 2754 2755 if (reason != TASK_SWITCH_IRET) { 2756 next_tss_desc.type |= (1 << 1); /* set busy flag */ 2757 write_segment_descriptor(ctxt, tss_selector, &next_tss_desc); 2758 } 2759 2760 ops->set_cr(ctxt, 0, ops->get_cr(ctxt, 0) | X86_CR0_TS); 2761 ops->set_segment(ctxt, tss_selector, &next_tss_desc, 0, VCPU_SREG_TR); 2762 2763 if (has_error_code) { 2764 ctxt->op_bytes = ctxt->ad_bytes = (next_tss_desc.type & 8) ? 4 : 2; 2765 ctxt->lock_prefix = 0; 2766 ctxt->src.val = (unsigned long) error_code; 2767 ret = em_push(ctxt); 2768 } 2769 2770 return ret; 2771 } 2772 2773 int emulator_task_switch(struct x86_emulate_ctxt *ctxt, 2774 u16 tss_selector, int idt_index, int reason, 2775 bool has_error_code, u32 error_code) 2776 { 2777 int rc; 2778 2779 invalidate_registers(ctxt); 2780 ctxt->_eip = ctxt->eip; 2781 ctxt->dst.type = OP_NONE; 2782 2783 rc = emulator_do_task_switch(ctxt, tss_selector, idt_index, reason, 2784 has_error_code, error_code); 2785 2786 if (rc == X86EMUL_CONTINUE) { 2787 ctxt->eip = ctxt->_eip; 2788 writeback_registers(ctxt); 2789 } 2790 2791 return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK; 2792 } 2793 2794 static void string_addr_inc(struct x86_emulate_ctxt *ctxt, int reg, 2795 struct operand *op) 2796 { 2797 int df = (ctxt->eflags & EFLG_DF) ? -op->count : op->count; 2798 2799 register_address_increment(ctxt, reg_rmw(ctxt, reg), df * op->bytes); 2800 op->addr.mem.ea = register_address(ctxt, reg_read(ctxt, reg)); 2801 } 2802 2803 static int em_das(struct x86_emulate_ctxt *ctxt) 2804 { 2805 u8 al, old_al; 2806 bool af, cf, old_cf; 2807 2808 cf = ctxt->eflags & X86_EFLAGS_CF; 2809 al = ctxt->dst.val; 2810 2811 old_al = al; 2812 old_cf = cf; 2813 cf = false; 2814 af = ctxt->eflags & X86_EFLAGS_AF; 2815 if ((al & 0x0f) > 9 || af) { 2816 al -= 6; 2817 cf = old_cf | (al >= 250); 2818 af = true; 2819 } else { 2820 af = false; 2821 } 2822 if (old_al > 0x99 || old_cf) { 2823 al -= 0x60; 2824 cf = true; 2825 } 2826 2827 ctxt->dst.val = al; 2828 /* Set PF, ZF, SF */ 2829 ctxt->src.type = OP_IMM; 2830 ctxt->src.val = 0; 2831 ctxt->src.bytes = 1; 2832 fastop(ctxt, em_or); 2833 ctxt->eflags &= ~(X86_EFLAGS_AF | X86_EFLAGS_CF); 2834 if (cf) 2835 ctxt->eflags |= X86_EFLAGS_CF; 2836 if (af) 2837 ctxt->eflags |= X86_EFLAGS_AF; 2838 return X86EMUL_CONTINUE; 2839 } 2840 2841 static int em_aam(struct x86_emulate_ctxt *ctxt) 2842 { 2843 u8 al, ah; 2844 2845 if (ctxt->src.val == 0) 2846 return emulate_de(ctxt); 2847 2848 al = ctxt->dst.val & 0xff; 2849 ah = al / ctxt->src.val; 2850 al %= ctxt->src.val; 2851 2852 ctxt->dst.val = (ctxt->dst.val & 0xffff0000) | al | (ah << 8); 2853 2854 /* Set PF, ZF, SF */ 2855 ctxt->src.type = OP_IMM; 2856 ctxt->src.val = 0; 2857 ctxt->src.bytes = 1; 2858 fastop(ctxt, em_or); 2859 2860 return X86EMUL_CONTINUE; 2861 } 2862 2863 static int em_aad(struct x86_emulate_ctxt *ctxt) 2864 { 2865 u8 al = ctxt->dst.val & 0xff; 2866 u8 ah = (ctxt->dst.val >> 8) & 0xff; 2867 2868 al = (al + (ah * ctxt->src.val)) & 0xff; 2869 2870 ctxt->dst.val = (ctxt->dst.val & 0xffff0000) | al; 2871 2872 /* Set PF, ZF, SF */ 2873 ctxt->src.type = OP_IMM; 2874 ctxt->src.val = 0; 2875 ctxt->src.bytes = 1; 2876 fastop(ctxt, em_or); 2877 2878 return X86EMUL_CONTINUE; 2879 } 2880 2881 static int em_call(struct x86_emulate_ctxt *ctxt) 2882 { 2883 long rel = ctxt->src.val; 2884 2885 ctxt->src.val = (unsigned long)ctxt->_eip; 2886 jmp_rel(ctxt, rel); 2887 return em_push(ctxt); 2888 } 2889 2890 static int em_call_far(struct x86_emulate_ctxt *ctxt) 2891 { 2892 u16 sel, old_cs; 2893 ulong old_eip; 2894 int rc; 2895 2896 old_cs = get_segment_selector(ctxt, VCPU_SREG_CS); 2897 old_eip = ctxt->_eip; 2898 2899 memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2); 2900 if (load_segment_descriptor(ctxt, sel, VCPU_SREG_CS)) 2901 return X86EMUL_CONTINUE; 2902 2903 ctxt->_eip = 0; 2904 memcpy(&ctxt->_eip, ctxt->src.valptr, ctxt->op_bytes); 2905 2906 ctxt->src.val = old_cs; 2907 rc = em_push(ctxt); 2908 if (rc != X86EMUL_CONTINUE) 2909 return rc; 2910 2911 ctxt->src.val = old_eip; 2912 return em_push(ctxt); 2913 } 2914 2915 static int em_ret_near_imm(struct x86_emulate_ctxt *ctxt) 2916 { 2917 int rc; 2918 2919 ctxt->dst.type = OP_REG; 2920 ctxt->dst.addr.reg = &ctxt->_eip; 2921 ctxt->dst.bytes = ctxt->op_bytes; 2922 rc = emulate_pop(ctxt, &ctxt->dst.val, ctxt->op_bytes); 2923 if (rc != X86EMUL_CONTINUE) 2924 return rc; 2925 rsp_increment(ctxt, ctxt->src.val); 2926 return X86EMUL_CONTINUE; 2927 } 2928 2929 static int em_xchg(struct x86_emulate_ctxt *ctxt) 2930 { 2931 /* Write back the register source. */ 2932 ctxt->src.val = ctxt->dst.val; 2933 write_register_operand(&ctxt->src); 2934 2935 /* Write back the memory destination with implicit LOCK prefix. */ 2936 ctxt->dst.val = ctxt->src.orig_val; 2937 ctxt->lock_prefix = 1; 2938 return X86EMUL_CONTINUE; 2939 } 2940 2941 static int em_imul_3op(struct x86_emulate_ctxt *ctxt) 2942 { 2943 ctxt->dst.val = ctxt->src2.val; 2944 return fastop(ctxt, em_imul); 2945 } 2946 2947 static int em_cwd(struct x86_emulate_ctxt *ctxt) 2948 { 2949 ctxt->dst.type = OP_REG; 2950 ctxt->dst.bytes = ctxt->src.bytes; 2951 ctxt->dst.addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX); 2952 ctxt->dst.val = ~((ctxt->src.val >> (ctxt->src.bytes * 8 - 1)) - 1); 2953 2954 return X86EMUL_CONTINUE; 2955 } 2956 2957 static int em_rdtsc(struct x86_emulate_ctxt *ctxt) 2958 { 2959 u64 tsc = 0; 2960 2961 ctxt->ops->get_msr(ctxt, MSR_IA32_TSC, &tsc); 2962 *reg_write(ctxt, VCPU_REGS_RAX) = (u32)tsc; 2963 *reg_write(ctxt, VCPU_REGS_RDX) = tsc >> 32; 2964 return X86EMUL_CONTINUE; 2965 } 2966 2967 static int em_rdpmc(struct x86_emulate_ctxt *ctxt) 2968 { 2969 u64 pmc; 2970 2971 if (ctxt->ops->read_pmc(ctxt, reg_read(ctxt, VCPU_REGS_RCX), &pmc)) 2972 return emulate_gp(ctxt, 0); 2973 *reg_write(ctxt, VCPU_REGS_RAX) = (u32)pmc; 2974 *reg_write(ctxt, VCPU_REGS_RDX) = pmc >> 32; 2975 return X86EMUL_CONTINUE; 2976 } 2977 2978 static int em_mov(struct x86_emulate_ctxt *ctxt) 2979 { 2980 memcpy(ctxt->dst.valptr, ctxt->src.valptr, sizeof(ctxt->src.valptr)); 2981 return X86EMUL_CONTINUE; 2982 } 2983 2984 #define FFL(x) bit(X86_FEATURE_##x) 2985 2986 static int em_movbe(struct x86_emulate_ctxt *ctxt) 2987 { 2988 u32 ebx, ecx, edx, eax = 1; 2989 u16 tmp; 2990 2991 /* 2992 * Check MOVBE is set in the guest-visible CPUID leaf. 2993 */ 2994 ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx); 2995 if (!(ecx & FFL(MOVBE))) 2996 return emulate_ud(ctxt); 2997 2998 switch (ctxt->op_bytes) { 2999 case 2: 3000 /* 3001 * From MOVBE definition: "...When the operand size is 16 bits, 3002 * the upper word of the destination register remains unchanged 3003 * ..." 3004 * 3005 * Both casting ->valptr and ->val to u16 breaks strict aliasing 3006 * rules so we have to do the operation almost per hand. 3007 */ 3008 tmp = (u16)ctxt->src.val; 3009 ctxt->dst.val &= ~0xffffUL; 3010 ctxt->dst.val |= (unsigned long)swab16(tmp); 3011 break; 3012 case 4: 3013 ctxt->dst.val = swab32((u32)ctxt->src.val); 3014 break; 3015 case 8: 3016 ctxt->dst.val = swab64(ctxt->src.val); 3017 break; 3018 default: 3019 return X86EMUL_PROPAGATE_FAULT; 3020 } 3021 return X86EMUL_CONTINUE; 3022 } 3023 3024 static int em_cr_write(struct x86_emulate_ctxt *ctxt) 3025 { 3026 if (ctxt->ops->set_cr(ctxt, ctxt->modrm_reg, ctxt->src.val)) 3027 return emulate_gp(ctxt, 0); 3028 3029 /* Disable writeback. */ 3030 ctxt->dst.type = OP_NONE; 3031 return X86EMUL_CONTINUE; 3032 } 3033 3034 static int em_dr_write(struct x86_emulate_ctxt *ctxt) 3035 { 3036 unsigned long val; 3037 3038 if (ctxt->mode == X86EMUL_MODE_PROT64) 3039 val = ctxt->src.val & ~0ULL; 3040 else 3041 val = ctxt->src.val & ~0U; 3042 3043 /* #UD condition is already handled. */ 3044 if (ctxt->ops->set_dr(ctxt, ctxt->modrm_reg, val) < 0) 3045 return emulate_gp(ctxt, 0); 3046 3047 /* Disable writeback. */ 3048 ctxt->dst.type = OP_NONE; 3049 return X86EMUL_CONTINUE; 3050 } 3051 3052 static int em_wrmsr(struct x86_emulate_ctxt *ctxt) 3053 { 3054 u64 msr_data; 3055 3056 msr_data = (u32)reg_read(ctxt, VCPU_REGS_RAX) 3057 | ((u64)reg_read(ctxt, VCPU_REGS_RDX) << 32); 3058 if (ctxt->ops->set_msr(ctxt, reg_read(ctxt, VCPU_REGS_RCX), msr_data)) 3059 return emulate_gp(ctxt, 0); 3060 3061 return X86EMUL_CONTINUE; 3062 } 3063 3064 static int em_rdmsr(struct x86_emulate_ctxt *ctxt) 3065 { 3066 u64 msr_data; 3067 3068 if (ctxt->ops->get_msr(ctxt, reg_read(ctxt, VCPU_REGS_RCX), &msr_data)) 3069 return emulate_gp(ctxt, 0); 3070 3071 *reg_write(ctxt, VCPU_REGS_RAX) = (u32)msr_data; 3072 *reg_write(ctxt, VCPU_REGS_RDX) = msr_data >> 32; 3073 return X86EMUL_CONTINUE; 3074 } 3075 3076 static int em_mov_rm_sreg(struct x86_emulate_ctxt *ctxt) 3077 { 3078 if (ctxt->modrm_reg > VCPU_SREG_GS) 3079 return emulate_ud(ctxt); 3080 3081 ctxt->dst.val = get_segment_selector(ctxt, ctxt->modrm_reg); 3082 return X86EMUL_CONTINUE; 3083 } 3084 3085 static int em_mov_sreg_rm(struct x86_emulate_ctxt *ctxt) 3086 { 3087 u16 sel = ctxt->src.val; 3088 3089 if (ctxt->modrm_reg == VCPU_SREG_CS || ctxt->modrm_reg > VCPU_SREG_GS) 3090 return emulate_ud(ctxt); 3091 3092 if (ctxt->modrm_reg == VCPU_SREG_SS) 3093 ctxt->interruptibility = KVM_X86_SHADOW_INT_MOV_SS; 3094 3095 /* Disable writeback. */ 3096 ctxt->dst.type = OP_NONE; 3097 return load_segment_descriptor(ctxt, sel, ctxt->modrm_reg); 3098 } 3099 3100 static int em_lldt(struct x86_emulate_ctxt *ctxt) 3101 { 3102 u16 sel = ctxt->src.val; 3103 3104 /* Disable writeback. */ 3105 ctxt->dst.type = OP_NONE; 3106 return load_segment_descriptor(ctxt, sel, VCPU_SREG_LDTR); 3107 } 3108 3109 static int em_ltr(struct x86_emulate_ctxt *ctxt) 3110 { 3111 u16 sel = ctxt->src.val; 3112 3113 /* Disable writeback. */ 3114 ctxt->dst.type = OP_NONE; 3115 return load_segment_descriptor(ctxt, sel, VCPU_SREG_TR); 3116 } 3117 3118 static int em_invlpg(struct x86_emulate_ctxt *ctxt) 3119 { 3120 int rc; 3121 ulong linear; 3122 3123 rc = linearize(ctxt, ctxt->src.addr.mem, 1, false, &linear); 3124 if (rc == X86EMUL_CONTINUE) 3125 ctxt->ops->invlpg(ctxt, linear); 3126 /* Disable writeback. */ 3127 ctxt->dst.type = OP_NONE; 3128 return X86EMUL_CONTINUE; 3129 } 3130 3131 static int em_clts(struct x86_emulate_ctxt *ctxt) 3132 { 3133 ulong cr0; 3134 3135 cr0 = ctxt->ops->get_cr(ctxt, 0); 3136 cr0 &= ~X86_CR0_TS; 3137 ctxt->ops->set_cr(ctxt, 0, cr0); 3138 return X86EMUL_CONTINUE; 3139 } 3140 3141 static int em_vmcall(struct x86_emulate_ctxt *ctxt) 3142 { 3143 int rc; 3144 3145 if (ctxt->modrm_mod != 3 || ctxt->modrm_rm != 1) 3146 return X86EMUL_UNHANDLEABLE; 3147 3148 rc = ctxt->ops->fix_hypercall(ctxt); 3149 if (rc != X86EMUL_CONTINUE) 3150 return rc; 3151 3152 /* Let the processor re-execute the fixed hypercall */ 3153 ctxt->_eip = ctxt->eip; 3154 /* Disable writeback. */ 3155 ctxt->dst.type = OP_NONE; 3156 return X86EMUL_CONTINUE; 3157 } 3158 3159 static int emulate_store_desc_ptr(struct x86_emulate_ctxt *ctxt, 3160 void (*get)(struct x86_emulate_ctxt *ctxt, 3161 struct desc_ptr *ptr)) 3162 { 3163 struct desc_ptr desc_ptr; 3164 3165 if (ctxt->mode == X86EMUL_MODE_PROT64) 3166 ctxt->op_bytes = 8; 3167 get(ctxt, &desc_ptr); 3168 if (ctxt->op_bytes == 2) { 3169 ctxt->op_bytes = 4; 3170 desc_ptr.address &= 0x00ffffff; 3171 } 3172 /* Disable writeback. */ 3173 ctxt->dst.type = OP_NONE; 3174 return segmented_write(ctxt, ctxt->dst.addr.mem, 3175 &desc_ptr, 2 + ctxt->op_bytes); 3176 } 3177 3178 static int em_sgdt(struct x86_emulate_ctxt *ctxt) 3179 { 3180 return emulate_store_desc_ptr(ctxt, ctxt->ops->get_gdt); 3181 } 3182 3183 static int em_sidt(struct x86_emulate_ctxt *ctxt) 3184 { 3185 return emulate_store_desc_ptr(ctxt, ctxt->ops->get_idt); 3186 } 3187 3188 static int em_lgdt(struct x86_emulate_ctxt *ctxt) 3189 { 3190 struct desc_ptr desc_ptr; 3191 int rc; 3192 3193 if (ctxt->mode == X86EMUL_MODE_PROT64) 3194 ctxt->op_bytes = 8; 3195 rc = read_descriptor(ctxt, ctxt->src.addr.mem, 3196 &desc_ptr.size, &desc_ptr.address, 3197 ctxt->op_bytes); 3198 if (rc != X86EMUL_CONTINUE) 3199 return rc; 3200 ctxt->ops->set_gdt(ctxt, &desc_ptr); 3201 /* Disable writeback. */ 3202 ctxt->dst.type = OP_NONE; 3203 return X86EMUL_CONTINUE; 3204 } 3205 3206 static int em_vmmcall(struct x86_emulate_ctxt *ctxt) 3207 { 3208 int rc; 3209 3210 rc = ctxt->ops->fix_hypercall(ctxt); 3211 3212 /* Disable writeback. */ 3213 ctxt->dst.type = OP_NONE; 3214 return rc; 3215 } 3216 3217 static int em_lidt(struct x86_emulate_ctxt *ctxt) 3218 { 3219 struct desc_ptr desc_ptr; 3220 int rc; 3221 3222 if (ctxt->mode == X86EMUL_MODE_PROT64) 3223 ctxt->op_bytes = 8; 3224 rc = read_descriptor(ctxt, ctxt->src.addr.mem, 3225 &desc_ptr.size, &desc_ptr.address, 3226 ctxt->op_bytes); 3227 if (rc != X86EMUL_CONTINUE) 3228 return rc; 3229 ctxt->ops->set_idt(ctxt, &desc_ptr); 3230 /* Disable writeback. */ 3231 ctxt->dst.type = OP_NONE; 3232 return X86EMUL_CONTINUE; 3233 } 3234 3235 static int em_smsw(struct x86_emulate_ctxt *ctxt) 3236 { 3237 if (ctxt->dst.type == OP_MEM) 3238 ctxt->dst.bytes = 2; 3239 ctxt->dst.val = ctxt->ops->get_cr(ctxt, 0); 3240 return X86EMUL_CONTINUE; 3241 } 3242 3243 static int em_lmsw(struct x86_emulate_ctxt *ctxt) 3244 { 3245 ctxt->ops->set_cr(ctxt, 0, (ctxt->ops->get_cr(ctxt, 0) & ~0x0eul) 3246 | (ctxt->src.val & 0x0f)); 3247 ctxt->dst.type = OP_NONE; 3248 return X86EMUL_CONTINUE; 3249 } 3250 3251 static int em_loop(struct x86_emulate_ctxt *ctxt) 3252 { 3253 register_address_increment(ctxt, reg_rmw(ctxt, VCPU_REGS_RCX), -1); 3254 if ((address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) != 0) && 3255 (ctxt->b == 0xe2 || test_cc(ctxt->b ^ 0x5, ctxt->eflags))) 3256 jmp_rel(ctxt, ctxt->src.val); 3257 3258 return X86EMUL_CONTINUE; 3259 } 3260 3261 static int em_jcxz(struct x86_emulate_ctxt *ctxt) 3262 { 3263 if (address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) == 0) 3264 jmp_rel(ctxt, ctxt->src.val); 3265 3266 return X86EMUL_CONTINUE; 3267 } 3268 3269 static int em_in(struct x86_emulate_ctxt *ctxt) 3270 { 3271 if (!pio_in_emulated(ctxt, ctxt->dst.bytes, ctxt->src.val, 3272 &ctxt->dst.val)) 3273 return X86EMUL_IO_NEEDED; 3274 3275 return X86EMUL_CONTINUE; 3276 } 3277 3278 static int em_out(struct x86_emulate_ctxt *ctxt) 3279 { 3280 ctxt->ops->pio_out_emulated(ctxt, ctxt->src.bytes, ctxt->dst.val, 3281 &ctxt->src.val, 1); 3282 /* Disable writeback. */ 3283 ctxt->dst.type = OP_NONE; 3284 return X86EMUL_CONTINUE; 3285 } 3286 3287 static int em_cli(struct x86_emulate_ctxt *ctxt) 3288 { 3289 if (emulator_bad_iopl(ctxt)) 3290 return emulate_gp(ctxt, 0); 3291 3292 ctxt->eflags &= ~X86_EFLAGS_IF; 3293 return X86EMUL_CONTINUE; 3294 } 3295 3296 static int em_sti(struct x86_emulate_ctxt *ctxt) 3297 { 3298 if (emulator_bad_iopl(ctxt)) 3299 return emulate_gp(ctxt, 0); 3300 3301 ctxt->interruptibility = KVM_X86_SHADOW_INT_STI; 3302 ctxt->eflags |= X86_EFLAGS_IF; 3303 return X86EMUL_CONTINUE; 3304 } 3305 3306 static int em_cpuid(struct x86_emulate_ctxt *ctxt) 3307 { 3308 u32 eax, ebx, ecx, edx; 3309 3310 eax = reg_read(ctxt, VCPU_REGS_RAX); 3311 ecx = reg_read(ctxt, VCPU_REGS_RCX); 3312 ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx); 3313 *reg_write(ctxt, VCPU_REGS_RAX) = eax; 3314 *reg_write(ctxt, VCPU_REGS_RBX) = ebx; 3315 *reg_write(ctxt, VCPU_REGS_RCX) = ecx; 3316 *reg_write(ctxt, VCPU_REGS_RDX) = edx; 3317 return X86EMUL_CONTINUE; 3318 } 3319 3320 static int em_sahf(struct x86_emulate_ctxt *ctxt) 3321 { 3322 u32 flags; 3323 3324 flags = EFLG_CF | EFLG_PF | EFLG_AF | EFLG_ZF | EFLG_SF; 3325 flags &= *reg_rmw(ctxt, VCPU_REGS_RAX) >> 8; 3326 3327 ctxt->eflags &= ~0xffUL; 3328 ctxt->eflags |= flags | X86_EFLAGS_FIXED; 3329 return X86EMUL_CONTINUE; 3330 } 3331 3332 static int em_lahf(struct x86_emulate_ctxt *ctxt) 3333 { 3334 *reg_rmw(ctxt, VCPU_REGS_RAX) &= ~0xff00UL; 3335 *reg_rmw(ctxt, VCPU_REGS_RAX) |= (ctxt->eflags & 0xff) << 8; 3336 return X86EMUL_CONTINUE; 3337 } 3338 3339 static int em_bswap(struct x86_emulate_ctxt *ctxt) 3340 { 3341 switch (ctxt->op_bytes) { 3342 #ifdef CONFIG_X86_64 3343 case 8: 3344 asm("bswap %0" : "+r"(ctxt->dst.val)); 3345 break; 3346 #endif 3347 default: 3348 asm("bswap %0" : "+r"(*(u32 *)&ctxt->dst.val)); 3349 break; 3350 } 3351 return X86EMUL_CONTINUE; 3352 } 3353 3354 static bool valid_cr(int nr) 3355 { 3356 switch (nr) { 3357 case 0: 3358 case 2 ... 4: 3359 case 8: 3360 return true; 3361 default: 3362 return false; 3363 } 3364 } 3365 3366 static int check_cr_read(struct x86_emulate_ctxt *ctxt) 3367 { 3368 if (!valid_cr(ctxt->modrm_reg)) 3369 return emulate_ud(ctxt); 3370 3371 return X86EMUL_CONTINUE; 3372 } 3373 3374 static int check_cr_write(struct x86_emulate_ctxt *ctxt) 3375 { 3376 u64 new_val = ctxt->src.val64; 3377 int cr = ctxt->modrm_reg; 3378 u64 efer = 0; 3379 3380 static u64 cr_reserved_bits[] = { 3381 0xffffffff00000000ULL, 3382 0, 0, 0, /* CR3 checked later */ 3383 CR4_RESERVED_BITS, 3384 0, 0, 0, 3385 CR8_RESERVED_BITS, 3386 }; 3387 3388 if (!valid_cr(cr)) 3389 return emulate_ud(ctxt); 3390 3391 if (new_val & cr_reserved_bits[cr]) 3392 return emulate_gp(ctxt, 0); 3393 3394 switch (cr) { 3395 case 0: { 3396 u64 cr4; 3397 if (((new_val & X86_CR0_PG) && !(new_val & X86_CR0_PE)) || 3398 ((new_val & X86_CR0_NW) && !(new_val & X86_CR0_CD))) 3399 return emulate_gp(ctxt, 0); 3400 3401 cr4 = ctxt->ops->get_cr(ctxt, 4); 3402 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer); 3403 3404 if ((new_val & X86_CR0_PG) && (efer & EFER_LME) && 3405 !(cr4 & X86_CR4_PAE)) 3406 return emulate_gp(ctxt, 0); 3407 3408 break; 3409 } 3410 case 3: { 3411 u64 rsvd = 0; 3412 3413 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer); 3414 if (efer & EFER_LMA) 3415 rsvd = CR3_L_MODE_RESERVED_BITS; 3416 3417 if (new_val & rsvd) 3418 return emulate_gp(ctxt, 0); 3419 3420 break; 3421 } 3422 case 4: { 3423 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer); 3424 3425 if ((efer & EFER_LMA) && !(new_val & X86_CR4_PAE)) 3426 return emulate_gp(ctxt, 0); 3427 3428 break; 3429 } 3430 } 3431 3432 return X86EMUL_CONTINUE; 3433 } 3434 3435 static int check_dr7_gd(struct x86_emulate_ctxt *ctxt) 3436 { 3437 unsigned long dr7; 3438 3439 ctxt->ops->get_dr(ctxt, 7, &dr7); 3440 3441 /* Check if DR7.Global_Enable is set */ 3442 return dr7 & (1 << 13); 3443 } 3444 3445 static int check_dr_read(struct x86_emulate_ctxt *ctxt) 3446 { 3447 int dr = ctxt->modrm_reg; 3448 u64 cr4; 3449 3450 if (dr > 7) 3451 return emulate_ud(ctxt); 3452 3453 cr4 = ctxt->ops->get_cr(ctxt, 4); 3454 if ((cr4 & X86_CR4_DE) && (dr == 4 || dr == 5)) 3455 return emulate_ud(ctxt); 3456 3457 if (check_dr7_gd(ctxt)) 3458 return emulate_db(ctxt); 3459 3460 return X86EMUL_CONTINUE; 3461 } 3462 3463 static int check_dr_write(struct x86_emulate_ctxt *ctxt) 3464 { 3465 u64 new_val = ctxt->src.val64; 3466 int dr = ctxt->modrm_reg; 3467 3468 if ((dr == 6 || dr == 7) && (new_val & 0xffffffff00000000ULL)) 3469 return emulate_gp(ctxt, 0); 3470 3471 return check_dr_read(ctxt); 3472 } 3473 3474 static int check_svme(struct x86_emulate_ctxt *ctxt) 3475 { 3476 u64 efer; 3477 3478 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer); 3479 3480 if (!(efer & EFER_SVME)) 3481 return emulate_ud(ctxt); 3482 3483 return X86EMUL_CONTINUE; 3484 } 3485 3486 static int check_svme_pa(struct x86_emulate_ctxt *ctxt) 3487 { 3488 u64 rax = reg_read(ctxt, VCPU_REGS_RAX); 3489 3490 /* Valid physical address? */ 3491 if (rax & 0xffff000000000000ULL) 3492 return emulate_gp(ctxt, 0); 3493 3494 return check_svme(ctxt); 3495 } 3496 3497 static int check_rdtsc(struct x86_emulate_ctxt *ctxt) 3498 { 3499 u64 cr4 = ctxt->ops->get_cr(ctxt, 4); 3500 3501 if (cr4 & X86_CR4_TSD && ctxt->ops->cpl(ctxt)) 3502 return emulate_ud(ctxt); 3503 3504 return X86EMUL_CONTINUE; 3505 } 3506 3507 static int check_rdpmc(struct x86_emulate_ctxt *ctxt) 3508 { 3509 u64 cr4 = ctxt->ops->get_cr(ctxt, 4); 3510 u64 rcx = reg_read(ctxt, VCPU_REGS_RCX); 3511 3512 if ((!(cr4 & X86_CR4_PCE) && ctxt->ops->cpl(ctxt)) || 3513 ctxt->ops->check_pmc(ctxt, rcx)) 3514 return emulate_gp(ctxt, 0); 3515 3516 return X86EMUL_CONTINUE; 3517 } 3518 3519 static int check_perm_in(struct x86_emulate_ctxt *ctxt) 3520 { 3521 ctxt->dst.bytes = min(ctxt->dst.bytes, 4u); 3522 if (!emulator_io_permited(ctxt, ctxt->src.val, ctxt->dst.bytes)) 3523 return emulate_gp(ctxt, 0); 3524 3525 return X86EMUL_CONTINUE; 3526 } 3527 3528 static int check_perm_out(struct x86_emulate_ctxt *ctxt) 3529 { 3530 ctxt->src.bytes = min(ctxt->src.bytes, 4u); 3531 if (!emulator_io_permited(ctxt, ctxt->dst.val, ctxt->src.bytes)) 3532 return emulate_gp(ctxt, 0); 3533 3534 return X86EMUL_CONTINUE; 3535 } 3536 3537 #define D(_y) { .flags = (_y) } 3538 #define DI(_y, _i) { .flags = (_y)|Intercept, .intercept = x86_intercept_##_i } 3539 #define DIP(_y, _i, _p) { .flags = (_y)|Intercept|CheckPerm, \ 3540 .intercept = x86_intercept_##_i, .check_perm = (_p) } 3541 #define N D(NotImpl) 3542 #define EXT(_f, _e) { .flags = ((_f) | RMExt), .u.group = (_e) } 3543 #define G(_f, _g) { .flags = ((_f) | Group | ModRM), .u.group = (_g) } 3544 #define GD(_f, _g) { .flags = ((_f) | GroupDual | ModRM), .u.gdual = (_g) } 3545 #define E(_f, _e) { .flags = ((_f) | Escape | ModRM), .u.esc = (_e) } 3546 #define I(_f, _e) { .flags = (_f), .u.execute = (_e) } 3547 #define F(_f, _e) { .flags = (_f) | Fastop, .u.fastop = (_e) } 3548 #define II(_f, _e, _i) \ 3549 { .flags = (_f)|Intercept, .u.execute = (_e), .intercept = x86_intercept_##_i } 3550 #define IIP(_f, _e, _i, _p) \ 3551 { .flags = (_f)|Intercept|CheckPerm, .u.execute = (_e), \ 3552 .intercept = x86_intercept_##_i, .check_perm = (_p) } 3553 #define GP(_f, _g) { .flags = ((_f) | Prefix), .u.gprefix = (_g) } 3554 3555 #define D2bv(_f) D((_f) | ByteOp), D(_f) 3556 #define D2bvIP(_f, _i, _p) DIP((_f) | ByteOp, _i, _p), DIP(_f, _i, _p) 3557 #define I2bv(_f, _e) I((_f) | ByteOp, _e), I(_f, _e) 3558 #define F2bv(_f, _e) F((_f) | ByteOp, _e), F(_f, _e) 3559 #define I2bvIP(_f, _e, _i, _p) \ 3560 IIP((_f) | ByteOp, _e, _i, _p), IIP(_f, _e, _i, _p) 3561 3562 #define F6ALU(_f, _e) F2bv((_f) | DstMem | SrcReg | ModRM, _e), \ 3563 F2bv(((_f) | DstReg | SrcMem | ModRM) & ~Lock, _e), \ 3564 F2bv(((_f) & ~Lock) | DstAcc | SrcImm, _e) 3565 3566 static const struct opcode group7_rm1[] = { 3567 DI(SrcNone | Priv, monitor), 3568 DI(SrcNone | Priv, mwait), 3569 N, N, N, N, N, N, 3570 }; 3571 3572 static const struct opcode group7_rm3[] = { 3573 DIP(SrcNone | Prot | Priv, vmrun, check_svme_pa), 3574 II(SrcNone | Prot | EmulateOnUD, em_vmmcall, vmmcall), 3575 DIP(SrcNone | Prot | Priv, vmload, check_svme_pa), 3576 DIP(SrcNone | Prot | Priv, vmsave, check_svme_pa), 3577 DIP(SrcNone | Prot | Priv, stgi, check_svme), 3578 DIP(SrcNone | Prot | Priv, clgi, check_svme), 3579 DIP(SrcNone | Prot | Priv, skinit, check_svme), 3580 DIP(SrcNone | Prot | Priv, invlpga, check_svme), 3581 }; 3582 3583 static const struct opcode group7_rm7[] = { 3584 N, 3585 DIP(SrcNone, rdtscp, check_rdtsc), 3586 N, N, N, N, N, N, 3587 }; 3588 3589 static const struct opcode group1[] = { 3590 F(Lock, em_add), 3591 F(Lock | PageTable, em_or), 3592 F(Lock, em_adc), 3593 F(Lock, em_sbb), 3594 F(Lock | PageTable, em_and), 3595 F(Lock, em_sub), 3596 F(Lock, em_xor), 3597 F(NoWrite, em_cmp), 3598 }; 3599 3600 static const struct opcode group1A[] = { 3601 I(DstMem | SrcNone | Mov | Stack, em_pop), N, N, N, N, N, N, N, 3602 }; 3603 3604 static const struct opcode group2[] = { 3605 F(DstMem | ModRM, em_rol), 3606 F(DstMem | ModRM, em_ror), 3607 F(DstMem | ModRM, em_rcl), 3608 F(DstMem | ModRM, em_rcr), 3609 F(DstMem | ModRM, em_shl), 3610 F(DstMem | ModRM, em_shr), 3611 F(DstMem | ModRM, em_shl), 3612 F(DstMem | ModRM, em_sar), 3613 }; 3614 3615 static const struct opcode group3[] = { 3616 F(DstMem | SrcImm | NoWrite, em_test), 3617 F(DstMem | SrcImm | NoWrite, em_test), 3618 F(DstMem | SrcNone | Lock, em_not), 3619 F(DstMem | SrcNone | Lock, em_neg), 3620 F(DstXacc | Src2Mem, em_mul_ex), 3621 F(DstXacc | Src2Mem, em_imul_ex), 3622 F(DstXacc | Src2Mem, em_div_ex), 3623 F(DstXacc | Src2Mem, em_idiv_ex), 3624 }; 3625 3626 static const struct opcode group4[] = { 3627 F(ByteOp | DstMem | SrcNone | Lock, em_inc), 3628 F(ByteOp | DstMem | SrcNone | Lock, em_dec), 3629 N, N, N, N, N, N, 3630 }; 3631 3632 static const struct opcode group5[] = { 3633 F(DstMem | SrcNone | Lock, em_inc), 3634 F(DstMem | SrcNone | Lock, em_dec), 3635 I(SrcMem | Stack, em_grp45), 3636 I(SrcMemFAddr | ImplicitOps | Stack, em_call_far), 3637 I(SrcMem | Stack, em_grp45), 3638 I(SrcMemFAddr | ImplicitOps, em_grp45), 3639 I(SrcMem | Stack, em_grp45), D(Undefined), 3640 }; 3641 3642 static const struct opcode group6[] = { 3643 DI(Prot, sldt), 3644 DI(Prot, str), 3645 II(Prot | Priv | SrcMem16, em_lldt, lldt), 3646 II(Prot | Priv | SrcMem16, em_ltr, ltr), 3647 N, N, N, N, 3648 }; 3649 3650 static const struct group_dual group7 = { { 3651 II(Mov | DstMem, em_sgdt, sgdt), 3652 II(Mov | DstMem, em_sidt, sidt), 3653 II(SrcMem | Priv, em_lgdt, lgdt), 3654 II(SrcMem | Priv, em_lidt, lidt), 3655 II(SrcNone | DstMem | Mov, em_smsw, smsw), N, 3656 II(SrcMem16 | Mov | Priv, em_lmsw, lmsw), 3657 II(SrcMem | ByteOp | Priv | NoAccess, em_invlpg, invlpg), 3658 }, { 3659 I(SrcNone | Priv | EmulateOnUD, em_vmcall), 3660 EXT(0, group7_rm1), 3661 N, EXT(0, group7_rm3), 3662 II(SrcNone | DstMem | Mov, em_smsw, smsw), N, 3663 II(SrcMem16 | Mov | Priv, em_lmsw, lmsw), 3664 EXT(0, group7_rm7), 3665 } }; 3666 3667 static const struct opcode group8[] = { 3668 N, N, N, N, 3669 F(DstMem | SrcImmByte | NoWrite, em_bt), 3670 F(DstMem | SrcImmByte | Lock | PageTable, em_bts), 3671 F(DstMem | SrcImmByte | Lock, em_btr), 3672 F(DstMem | SrcImmByte | Lock | PageTable, em_btc), 3673 }; 3674 3675 static const struct group_dual group9 = { { 3676 N, I(DstMem64 | Lock | PageTable, em_cmpxchg8b), N, N, N, N, N, N, 3677 }, { 3678 N, N, N, N, N, N, N, N, 3679 } }; 3680 3681 static const struct opcode group11[] = { 3682 I(DstMem | SrcImm | Mov | PageTable, em_mov), 3683 X7(D(Undefined)), 3684 }; 3685 3686 static const struct gprefix pfx_0f_6f_0f_7f = { 3687 I(Mmx, em_mov), I(Sse | Aligned, em_mov), N, I(Sse | Unaligned, em_mov), 3688 }; 3689 3690 static const struct gprefix pfx_vmovntpx = { 3691 I(0, em_mov), N, N, N, 3692 }; 3693 3694 static const struct gprefix pfx_0f_28_0f_29 = { 3695 I(Aligned, em_mov), I(Aligned, em_mov), N, N, 3696 }; 3697 3698 static const struct escape escape_d9 = { { 3699 N, N, N, N, N, N, N, I(DstMem, em_fnstcw), 3700 }, { 3701 /* 0xC0 - 0xC7 */ 3702 N, N, N, N, N, N, N, N, 3703 /* 0xC8 - 0xCF */ 3704 N, N, N, N, N, N, N, N, 3705 /* 0xD0 - 0xC7 */ 3706 N, N, N, N, N, N, N, N, 3707 /* 0xD8 - 0xDF */ 3708 N, N, N, N, N, N, N, N, 3709 /* 0xE0 - 0xE7 */ 3710 N, N, N, N, N, N, N, N, 3711 /* 0xE8 - 0xEF */ 3712 N, N, N, N, N, N, N, N, 3713 /* 0xF0 - 0xF7 */ 3714 N, N, N, N, N, N, N, N, 3715 /* 0xF8 - 0xFF */ 3716 N, N, N, N, N, N, N, N, 3717 } }; 3718 3719 static const struct escape escape_db = { { 3720 N, N, N, N, N, N, N, N, 3721 }, { 3722 /* 0xC0 - 0xC7 */ 3723 N, N, N, N, N, N, N, N, 3724 /* 0xC8 - 0xCF */ 3725 N, N, N, N, N, N, N, N, 3726 /* 0xD0 - 0xC7 */ 3727 N, N, N, N, N, N, N, N, 3728 /* 0xD8 - 0xDF */ 3729 N, N, N, N, N, N, N, N, 3730 /* 0xE0 - 0xE7 */ 3731 N, N, N, I(ImplicitOps, em_fninit), N, N, N, N, 3732 /* 0xE8 - 0xEF */ 3733 N, N, N, N, N, N, N, N, 3734 /* 0xF0 - 0xF7 */ 3735 N, N, N, N, N, N, N, N, 3736 /* 0xF8 - 0xFF */ 3737 N, N, N, N, N, N, N, N, 3738 } }; 3739 3740 static const struct escape escape_dd = { { 3741 N, N, N, N, N, N, N, I(DstMem, em_fnstsw), 3742 }, { 3743 /* 0xC0 - 0xC7 */ 3744 N, N, N, N, N, N, N, N, 3745 /* 0xC8 - 0xCF */ 3746 N, N, N, N, N, N, N, N, 3747 /* 0xD0 - 0xC7 */ 3748 N, N, N, N, N, N, N, N, 3749 /* 0xD8 - 0xDF */ 3750 N, N, N, N, N, N, N, N, 3751 /* 0xE0 - 0xE7 */ 3752 N, N, N, N, N, N, N, N, 3753 /* 0xE8 - 0xEF */ 3754 N, N, N, N, N, N, N, N, 3755 /* 0xF0 - 0xF7 */ 3756 N, N, N, N, N, N, N, N, 3757 /* 0xF8 - 0xFF */ 3758 N, N, N, N, N, N, N, N, 3759 } }; 3760 3761 static const struct opcode opcode_table[256] = { 3762 /* 0x00 - 0x07 */ 3763 F6ALU(Lock, em_add), 3764 I(ImplicitOps | Stack | No64 | Src2ES, em_push_sreg), 3765 I(ImplicitOps | Stack | No64 | Src2ES, em_pop_sreg), 3766 /* 0x08 - 0x0F */ 3767 F6ALU(Lock | PageTable, em_or), 3768 I(ImplicitOps | Stack | No64 | Src2CS, em_push_sreg), 3769 N, 3770 /* 0x10 - 0x17 */ 3771 F6ALU(Lock, em_adc), 3772 I(ImplicitOps | Stack | No64 | Src2SS, em_push_sreg), 3773 I(ImplicitOps | Stack | No64 | Src2SS, em_pop_sreg), 3774 /* 0x18 - 0x1F */ 3775 F6ALU(Lock, em_sbb), 3776 I(ImplicitOps | Stack | No64 | Src2DS, em_push_sreg), 3777 I(ImplicitOps | Stack | No64 | Src2DS, em_pop_sreg), 3778 /* 0x20 - 0x27 */ 3779 F6ALU(Lock | PageTable, em_and), N, N, 3780 /* 0x28 - 0x2F */ 3781 F6ALU(Lock, em_sub), N, I(ByteOp | DstAcc | No64, em_das), 3782 /* 0x30 - 0x37 */ 3783 F6ALU(Lock, em_xor), N, N, 3784 /* 0x38 - 0x3F */ 3785 F6ALU(NoWrite, em_cmp), N, N, 3786 /* 0x40 - 0x4F */ 3787 X8(F(DstReg, em_inc)), X8(F(DstReg, em_dec)), 3788 /* 0x50 - 0x57 */ 3789 X8(I(SrcReg | Stack, em_push)), 3790 /* 0x58 - 0x5F */ 3791 X8(I(DstReg | Stack, em_pop)), 3792 /* 0x60 - 0x67 */ 3793 I(ImplicitOps | Stack | No64, em_pusha), 3794 I(ImplicitOps | Stack | No64, em_popa), 3795 N, D(DstReg | SrcMem32 | ModRM | Mov) /* movsxd (x86/64) */ , 3796 N, N, N, N, 3797 /* 0x68 - 0x6F */ 3798 I(SrcImm | Mov | Stack, em_push), 3799 I(DstReg | SrcMem | ModRM | Src2Imm, em_imul_3op), 3800 I(SrcImmByte | Mov | Stack, em_push), 3801 I(DstReg | SrcMem | ModRM | Src2ImmByte, em_imul_3op), 3802 I2bvIP(DstDI | SrcDX | Mov | String | Unaligned, em_in, ins, check_perm_in), /* insb, insw/insd */ 3803 I2bvIP(SrcSI | DstDX | String, em_out, outs, check_perm_out), /* outsb, outsw/outsd */ 3804 /* 0x70 - 0x7F */ 3805 X16(D(SrcImmByte)), 3806 /* 0x80 - 0x87 */ 3807 G(ByteOp | DstMem | SrcImm, group1), 3808 G(DstMem | SrcImm, group1), 3809 G(ByteOp | DstMem | SrcImm | No64, group1), 3810 G(DstMem | SrcImmByte, group1), 3811 F2bv(DstMem | SrcReg | ModRM | NoWrite, em_test), 3812 I2bv(DstMem | SrcReg | ModRM | Lock | PageTable, em_xchg), 3813 /* 0x88 - 0x8F */ 3814 I2bv(DstMem | SrcReg | ModRM | Mov | PageTable, em_mov), 3815 I2bv(DstReg | SrcMem | ModRM | Mov, em_mov), 3816 I(DstMem | SrcNone | ModRM | Mov | PageTable, em_mov_rm_sreg), 3817 D(ModRM | SrcMem | NoAccess | DstReg), 3818 I(ImplicitOps | SrcMem16 | ModRM, em_mov_sreg_rm), 3819 G(0, group1A), 3820 /* 0x90 - 0x97 */ 3821 DI(SrcAcc | DstReg, pause), X7(D(SrcAcc | DstReg)), 3822 /* 0x98 - 0x9F */ 3823 D(DstAcc | SrcNone), I(ImplicitOps | SrcAcc, em_cwd), 3824 I(SrcImmFAddr | No64, em_call_far), N, 3825 II(ImplicitOps | Stack, em_pushf, pushf), 3826 II(ImplicitOps | Stack, em_popf, popf), 3827 I(ImplicitOps, em_sahf), I(ImplicitOps, em_lahf), 3828 /* 0xA0 - 0xA7 */ 3829 I2bv(DstAcc | SrcMem | Mov | MemAbs, em_mov), 3830 I2bv(DstMem | SrcAcc | Mov | MemAbs | PageTable, em_mov), 3831 I2bv(SrcSI | DstDI | Mov | String, em_mov), 3832 F2bv(SrcSI | DstDI | String | NoWrite, em_cmp), 3833 /* 0xA8 - 0xAF */ 3834 F2bv(DstAcc | SrcImm | NoWrite, em_test), 3835 I2bv(SrcAcc | DstDI | Mov | String, em_mov), 3836 I2bv(SrcSI | DstAcc | Mov | String, em_mov), 3837 F2bv(SrcAcc | DstDI | String | NoWrite, em_cmp), 3838 /* 0xB0 - 0xB7 */ 3839 X8(I(ByteOp | DstReg | SrcImm | Mov, em_mov)), 3840 /* 0xB8 - 0xBF */ 3841 X8(I(DstReg | SrcImm64 | Mov, em_mov)), 3842 /* 0xC0 - 0xC7 */ 3843 G(ByteOp | Src2ImmByte, group2), G(Src2ImmByte, group2), 3844 I(ImplicitOps | Stack | SrcImmU16, em_ret_near_imm), 3845 I(ImplicitOps | Stack, em_ret), 3846 I(DstReg | SrcMemFAddr | ModRM | No64 | Src2ES, em_lseg), 3847 I(DstReg | SrcMemFAddr | ModRM | No64 | Src2DS, em_lseg), 3848 G(ByteOp, group11), G(0, group11), 3849 /* 0xC8 - 0xCF */ 3850 I(Stack | SrcImmU16 | Src2ImmByte, em_enter), I(Stack, em_leave), 3851 I(ImplicitOps | Stack | SrcImmU16, em_ret_far_imm), 3852 I(ImplicitOps | Stack, em_ret_far), 3853 D(ImplicitOps), DI(SrcImmByte, intn), 3854 D(ImplicitOps | No64), II(ImplicitOps, em_iret, iret), 3855 /* 0xD0 - 0xD7 */ 3856 G(Src2One | ByteOp, group2), G(Src2One, group2), 3857 G(Src2CL | ByteOp, group2), G(Src2CL, group2), 3858 I(DstAcc | SrcImmUByte | No64, em_aam), 3859 I(DstAcc | SrcImmUByte | No64, em_aad), 3860 F(DstAcc | ByteOp | No64, em_salc), 3861 I(DstAcc | SrcXLat | ByteOp, em_mov), 3862 /* 0xD8 - 0xDF */ 3863 N, E(0, &escape_d9), N, E(0, &escape_db), N, E(0, &escape_dd), N, N, 3864 /* 0xE0 - 0xE7 */ 3865 X3(I(SrcImmByte, em_loop)), 3866 I(SrcImmByte, em_jcxz), 3867 I2bvIP(SrcImmUByte | DstAcc, em_in, in, check_perm_in), 3868 I2bvIP(SrcAcc | DstImmUByte, em_out, out, check_perm_out), 3869 /* 0xE8 - 0xEF */ 3870 I(SrcImm | Stack, em_call), D(SrcImm | ImplicitOps), 3871 I(SrcImmFAddr | No64, em_jmp_far), D(SrcImmByte | ImplicitOps), 3872 I2bvIP(SrcDX | DstAcc, em_in, in, check_perm_in), 3873 I2bvIP(SrcAcc | DstDX, em_out, out, check_perm_out), 3874 /* 0xF0 - 0xF7 */ 3875 N, DI(ImplicitOps, icebp), N, N, 3876 DI(ImplicitOps | Priv, hlt), D(ImplicitOps), 3877 G(ByteOp, group3), G(0, group3), 3878 /* 0xF8 - 0xFF */ 3879 D(ImplicitOps), D(ImplicitOps), 3880 I(ImplicitOps, em_cli), I(ImplicitOps, em_sti), 3881 D(ImplicitOps), D(ImplicitOps), G(0, group4), G(0, group5), 3882 }; 3883 3884 static const struct opcode twobyte_table[256] = { 3885 /* 0x00 - 0x0F */ 3886 G(0, group6), GD(0, &group7), N, N, 3887 N, I(ImplicitOps | EmulateOnUD, em_syscall), 3888 II(ImplicitOps | Priv, em_clts, clts), N, 3889 DI(ImplicitOps | Priv, invd), DI(ImplicitOps | Priv, wbinvd), N, N, 3890 N, D(ImplicitOps | ModRM), N, N, 3891 /* 0x10 - 0x1F */ 3892 N, N, N, N, N, N, N, N, 3893 D(ImplicitOps | ModRM), N, N, N, N, N, N, D(ImplicitOps | ModRM), 3894 /* 0x20 - 0x2F */ 3895 DIP(ModRM | DstMem | Priv | Op3264 | NoMod, cr_read, check_cr_read), 3896 DIP(ModRM | DstMem | Priv | Op3264 | NoMod, dr_read, check_dr_read), 3897 IIP(ModRM | SrcMem | Priv | Op3264 | NoMod, em_cr_write, cr_write, 3898 check_cr_write), 3899 IIP(ModRM | SrcMem | Priv | Op3264 | NoMod, em_dr_write, dr_write, 3900 check_dr_write), 3901 N, N, N, N, 3902 GP(ModRM | DstReg | SrcMem | Mov | Sse, &pfx_0f_28_0f_29), 3903 GP(ModRM | DstMem | SrcReg | Mov | Sse, &pfx_0f_28_0f_29), 3904 N, GP(ModRM | DstMem | SrcReg | Sse | Mov | Aligned, &pfx_vmovntpx), 3905 N, N, N, N, 3906 /* 0x30 - 0x3F */ 3907 II(ImplicitOps | Priv, em_wrmsr, wrmsr), 3908 IIP(ImplicitOps, em_rdtsc, rdtsc, check_rdtsc), 3909 II(ImplicitOps | Priv, em_rdmsr, rdmsr), 3910 IIP(ImplicitOps, em_rdpmc, rdpmc, check_rdpmc), 3911 I(ImplicitOps | EmulateOnUD, em_sysenter), 3912 I(ImplicitOps | Priv | EmulateOnUD, em_sysexit), 3913 N, N, 3914 N, N, N, N, N, N, N, N, 3915 /* 0x40 - 0x4F */ 3916 X16(D(DstReg | SrcMem | ModRM)), 3917 /* 0x50 - 0x5F */ 3918 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, 3919 /* 0x60 - 0x6F */ 3920 N, N, N, N, 3921 N, N, N, N, 3922 N, N, N, N, 3923 N, N, N, GP(SrcMem | DstReg | ModRM | Mov, &pfx_0f_6f_0f_7f), 3924 /* 0x70 - 0x7F */ 3925 N, N, N, N, 3926 N, N, N, N, 3927 N, N, N, N, 3928 N, N, N, GP(SrcReg | DstMem | ModRM | Mov, &pfx_0f_6f_0f_7f), 3929 /* 0x80 - 0x8F */ 3930 X16(D(SrcImm)), 3931 /* 0x90 - 0x9F */ 3932 X16(D(ByteOp | DstMem | SrcNone | ModRM| Mov)), 3933 /* 0xA0 - 0xA7 */ 3934 I(Stack | Src2FS, em_push_sreg), I(Stack | Src2FS, em_pop_sreg), 3935 II(ImplicitOps, em_cpuid, cpuid), 3936 F(DstMem | SrcReg | ModRM | BitOp | NoWrite, em_bt), 3937 F(DstMem | SrcReg | Src2ImmByte | ModRM, em_shld), 3938 F(DstMem | SrcReg | Src2CL | ModRM, em_shld), N, N, 3939 /* 0xA8 - 0xAF */ 3940 I(Stack | Src2GS, em_push_sreg), I(Stack | Src2GS, em_pop_sreg), 3941 DI(ImplicitOps, rsm), 3942 F(DstMem | SrcReg | ModRM | BitOp | Lock | PageTable, em_bts), 3943 F(DstMem | SrcReg | Src2ImmByte | ModRM, em_shrd), 3944 F(DstMem | SrcReg | Src2CL | ModRM, em_shrd), 3945 D(ModRM), F(DstReg | SrcMem | ModRM, em_imul), 3946 /* 0xB0 - 0xB7 */ 3947 I2bv(DstMem | SrcReg | ModRM | Lock | PageTable, em_cmpxchg), 3948 I(DstReg | SrcMemFAddr | ModRM | Src2SS, em_lseg), 3949 F(DstMem | SrcReg | ModRM | BitOp | Lock, em_btr), 3950 I(DstReg | SrcMemFAddr | ModRM | Src2FS, em_lseg), 3951 I(DstReg | SrcMemFAddr | ModRM | Src2GS, em_lseg), 3952 D(DstReg | SrcMem8 | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov), 3953 /* 0xB8 - 0xBF */ 3954 N, N, 3955 G(BitOp, group8), 3956 F(DstMem | SrcReg | ModRM | BitOp | Lock | PageTable, em_btc), 3957 F(DstReg | SrcMem | ModRM, em_bsf), F(DstReg | SrcMem | ModRM, em_bsr), 3958 D(DstReg | SrcMem8 | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov), 3959 /* 0xC0 - 0xC7 */ 3960 F2bv(DstMem | SrcReg | ModRM | SrcWrite | Lock, em_xadd), 3961 N, D(DstMem | SrcReg | ModRM | Mov), 3962 N, N, N, GD(0, &group9), 3963 /* 0xC8 - 0xCF */ 3964 X8(I(DstReg, em_bswap)), 3965 /* 0xD0 - 0xDF */ 3966 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, 3967 /* 0xE0 - 0xEF */ 3968 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, 3969 /* 0xF0 - 0xFF */ 3970 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N 3971 }; 3972 3973 static const struct gprefix three_byte_0f_38_f0 = { 3974 I(DstReg | SrcMem | Mov, em_movbe), N, N, N 3975 }; 3976 3977 static const struct gprefix three_byte_0f_38_f1 = { 3978 I(DstMem | SrcReg | Mov, em_movbe), N, N, N 3979 }; 3980 3981 /* 3982 * Insns below are selected by the prefix which indexed by the third opcode 3983 * byte. 3984 */ 3985 static const struct opcode opcode_map_0f_38[256] = { 3986 /* 0x00 - 0x7f */ 3987 X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), 3988 /* 0x80 - 0xef */ 3989 X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), 3990 /* 0xf0 - 0xf1 */ 3991 GP(EmulateOnUD | ModRM | Prefix, &three_byte_0f_38_f0), 3992 GP(EmulateOnUD | ModRM | Prefix, &three_byte_0f_38_f1), 3993 /* 0xf2 - 0xff */ 3994 N, N, X4(N), X8(N) 3995 }; 3996 3997 #undef D 3998 #undef N 3999 #undef G 4000 #undef GD 4001 #undef I 4002 #undef GP 4003 #undef EXT 4004 4005 #undef D2bv 4006 #undef D2bvIP 4007 #undef I2bv 4008 #undef I2bvIP 4009 #undef I6ALU 4010 4011 static unsigned imm_size(struct x86_emulate_ctxt *ctxt) 4012 { 4013 unsigned size; 4014 4015 size = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes; 4016 if (size == 8) 4017 size = 4; 4018 return size; 4019 } 4020 4021 static int decode_imm(struct x86_emulate_ctxt *ctxt, struct operand *op, 4022 unsigned size, bool sign_extension) 4023 { 4024 int rc = X86EMUL_CONTINUE; 4025 4026 op->type = OP_IMM; 4027 op->bytes = size; 4028 op->addr.mem.ea = ctxt->_eip; 4029 /* NB. Immediates are sign-extended as necessary. */ 4030 switch (op->bytes) { 4031 case 1: 4032 op->val = insn_fetch(s8, ctxt); 4033 break; 4034 case 2: 4035 op->val = insn_fetch(s16, ctxt); 4036 break; 4037 case 4: 4038 op->val = insn_fetch(s32, ctxt); 4039 break; 4040 case 8: 4041 op->val = insn_fetch(s64, ctxt); 4042 break; 4043 } 4044 if (!sign_extension) { 4045 switch (op->bytes) { 4046 case 1: 4047 op->val &= 0xff; 4048 break; 4049 case 2: 4050 op->val &= 0xffff; 4051 break; 4052 case 4: 4053 op->val &= 0xffffffff; 4054 break; 4055 } 4056 } 4057 done: 4058 return rc; 4059 } 4060 4061 static int decode_operand(struct x86_emulate_ctxt *ctxt, struct operand *op, 4062 unsigned d) 4063 { 4064 int rc = X86EMUL_CONTINUE; 4065 4066 switch (d) { 4067 case OpReg: 4068 decode_register_operand(ctxt, op); 4069 break; 4070 case OpImmUByte: 4071 rc = decode_imm(ctxt, op, 1, false); 4072 break; 4073 case OpMem: 4074 ctxt->memop.bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes; 4075 mem_common: 4076 *op = ctxt->memop; 4077 ctxt->memopp = op; 4078 if (ctxt->d & BitOp) 4079 fetch_bit_operand(ctxt); 4080 op->orig_val = op->val; 4081 break; 4082 case OpMem64: 4083 ctxt->memop.bytes = (ctxt->op_bytes == 8) ? 16 : 8; 4084 goto mem_common; 4085 case OpAcc: 4086 op->type = OP_REG; 4087 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes; 4088 op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX); 4089 fetch_register_operand(op); 4090 op->orig_val = op->val; 4091 break; 4092 case OpAccLo: 4093 op->type = OP_REG; 4094 op->bytes = (ctxt->d & ByteOp) ? 2 : ctxt->op_bytes; 4095 op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX); 4096 fetch_register_operand(op); 4097 op->orig_val = op->val; 4098 break; 4099 case OpAccHi: 4100 if (ctxt->d & ByteOp) { 4101 op->type = OP_NONE; 4102 break; 4103 } 4104 op->type = OP_REG; 4105 op->bytes = ctxt->op_bytes; 4106 op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX); 4107 fetch_register_operand(op); 4108 op->orig_val = op->val; 4109 break; 4110 case OpDI: 4111 op->type = OP_MEM; 4112 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes; 4113 op->addr.mem.ea = 4114 register_address(ctxt, reg_read(ctxt, VCPU_REGS_RDI)); 4115 op->addr.mem.seg = VCPU_SREG_ES; 4116 op->val = 0; 4117 op->count = 1; 4118 break; 4119 case OpDX: 4120 op->type = OP_REG; 4121 op->bytes = 2; 4122 op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX); 4123 fetch_register_operand(op); 4124 break; 4125 case OpCL: 4126 op->bytes = 1; 4127 op->val = reg_read(ctxt, VCPU_REGS_RCX) & 0xff; 4128 break; 4129 case OpImmByte: 4130 rc = decode_imm(ctxt, op, 1, true); 4131 break; 4132 case OpOne: 4133 op->bytes = 1; 4134 op->val = 1; 4135 break; 4136 case OpImm: 4137 rc = decode_imm(ctxt, op, imm_size(ctxt), true); 4138 break; 4139 case OpImm64: 4140 rc = decode_imm(ctxt, op, ctxt->op_bytes, true); 4141 break; 4142 case OpMem8: 4143 ctxt->memop.bytes = 1; 4144 if (ctxt->memop.type == OP_REG) { 4145 ctxt->memop.addr.reg = decode_register(ctxt, 4146 ctxt->modrm_rm, true); 4147 fetch_register_operand(&ctxt->memop); 4148 } 4149 goto mem_common; 4150 case OpMem16: 4151 ctxt->memop.bytes = 2; 4152 goto mem_common; 4153 case OpMem32: 4154 ctxt->memop.bytes = 4; 4155 goto mem_common; 4156 case OpImmU16: 4157 rc = decode_imm(ctxt, op, 2, false); 4158 break; 4159 case OpImmU: 4160 rc = decode_imm(ctxt, op, imm_size(ctxt), false); 4161 break; 4162 case OpSI: 4163 op->type = OP_MEM; 4164 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes; 4165 op->addr.mem.ea = 4166 register_address(ctxt, reg_read(ctxt, VCPU_REGS_RSI)); 4167 op->addr.mem.seg = ctxt->seg_override; 4168 op->val = 0; 4169 op->count = 1; 4170 break; 4171 case OpXLat: 4172 op->type = OP_MEM; 4173 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes; 4174 op->addr.mem.ea = 4175 register_address(ctxt, 4176 reg_read(ctxt, VCPU_REGS_RBX) + 4177 (reg_read(ctxt, VCPU_REGS_RAX) & 0xff)); 4178 op->addr.mem.seg = ctxt->seg_override; 4179 op->val = 0; 4180 break; 4181 case OpImmFAddr: 4182 op->type = OP_IMM; 4183 op->addr.mem.ea = ctxt->_eip; 4184 op->bytes = ctxt->op_bytes + 2; 4185 insn_fetch_arr(op->valptr, op->bytes, ctxt); 4186 break; 4187 case OpMemFAddr: 4188 ctxt->memop.bytes = ctxt->op_bytes + 2; 4189 goto mem_common; 4190 case OpES: 4191 op->val = VCPU_SREG_ES; 4192 break; 4193 case OpCS: 4194 op->val = VCPU_SREG_CS; 4195 break; 4196 case OpSS: 4197 op->val = VCPU_SREG_SS; 4198 break; 4199 case OpDS: 4200 op->val = VCPU_SREG_DS; 4201 break; 4202 case OpFS: 4203 op->val = VCPU_SREG_FS; 4204 break; 4205 case OpGS: 4206 op->val = VCPU_SREG_GS; 4207 break; 4208 case OpImplicit: 4209 /* Special instructions do their own operand decoding. */ 4210 default: 4211 op->type = OP_NONE; /* Disable writeback. */ 4212 break; 4213 } 4214 4215 done: 4216 return rc; 4217 } 4218 4219 int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len) 4220 { 4221 int rc = X86EMUL_CONTINUE; 4222 int mode = ctxt->mode; 4223 int def_op_bytes, def_ad_bytes, goffset, simd_prefix; 4224 bool op_prefix = false; 4225 bool has_seg_override = false; 4226 struct opcode opcode; 4227 4228 ctxt->memop.type = OP_NONE; 4229 ctxt->memopp = NULL; 4230 ctxt->_eip = ctxt->eip; 4231 ctxt->fetch.ptr = ctxt->fetch.data; 4232 ctxt->fetch.end = ctxt->fetch.data + insn_len; 4233 ctxt->opcode_len = 1; 4234 if (insn_len > 0) 4235 memcpy(ctxt->fetch.data, insn, insn_len); 4236 else { 4237 rc = __do_insn_fetch_bytes(ctxt, 1); 4238 if (rc != X86EMUL_CONTINUE) 4239 return rc; 4240 } 4241 4242 switch (mode) { 4243 case X86EMUL_MODE_REAL: 4244 case X86EMUL_MODE_VM86: 4245 case X86EMUL_MODE_PROT16: 4246 def_op_bytes = def_ad_bytes = 2; 4247 break; 4248 case X86EMUL_MODE_PROT32: 4249 def_op_bytes = def_ad_bytes = 4; 4250 break; 4251 #ifdef CONFIG_X86_64 4252 case X86EMUL_MODE_PROT64: 4253 def_op_bytes = 4; 4254 def_ad_bytes = 8; 4255 break; 4256 #endif 4257 default: 4258 return EMULATION_FAILED; 4259 } 4260 4261 ctxt->op_bytes = def_op_bytes; 4262 ctxt->ad_bytes = def_ad_bytes; 4263 4264 /* Legacy prefixes. */ 4265 for (;;) { 4266 switch (ctxt->b = insn_fetch(u8, ctxt)) { 4267 case 0x66: /* operand-size override */ 4268 op_prefix = true; 4269 /* switch between 2/4 bytes */ 4270 ctxt->op_bytes = def_op_bytes ^ 6; 4271 break; 4272 case 0x67: /* address-size override */ 4273 if (mode == X86EMUL_MODE_PROT64) 4274 /* switch between 4/8 bytes */ 4275 ctxt->ad_bytes = def_ad_bytes ^ 12; 4276 else 4277 /* switch between 2/4 bytes */ 4278 ctxt->ad_bytes = def_ad_bytes ^ 6; 4279 break; 4280 case 0x26: /* ES override */ 4281 case 0x2e: /* CS override */ 4282 case 0x36: /* SS override */ 4283 case 0x3e: /* DS override */ 4284 has_seg_override = true; 4285 ctxt->seg_override = (ctxt->b >> 3) & 3; 4286 break; 4287 case 0x64: /* FS override */ 4288 case 0x65: /* GS override */ 4289 has_seg_override = true; 4290 ctxt->seg_override = ctxt->b & 7; 4291 break; 4292 case 0x40 ... 0x4f: /* REX */ 4293 if (mode != X86EMUL_MODE_PROT64) 4294 goto done_prefixes; 4295 ctxt->rex_prefix = ctxt->b; 4296 continue; 4297 case 0xf0: /* LOCK */ 4298 ctxt->lock_prefix = 1; 4299 break; 4300 case 0xf2: /* REPNE/REPNZ */ 4301 case 0xf3: /* REP/REPE/REPZ */ 4302 ctxt->rep_prefix = ctxt->b; 4303 break; 4304 default: 4305 goto done_prefixes; 4306 } 4307 4308 /* Any legacy prefix after a REX prefix nullifies its effect. */ 4309 4310 ctxt->rex_prefix = 0; 4311 } 4312 4313 done_prefixes: 4314 4315 /* REX prefix. */ 4316 if (ctxt->rex_prefix & 8) 4317 ctxt->op_bytes = 8; /* REX.W */ 4318 4319 /* Opcode byte(s). */ 4320 opcode = opcode_table[ctxt->b]; 4321 /* Two-byte opcode? */ 4322 if (ctxt->b == 0x0f) { 4323 ctxt->opcode_len = 2; 4324 ctxt->b = insn_fetch(u8, ctxt); 4325 opcode = twobyte_table[ctxt->b]; 4326 4327 /* 0F_38 opcode map */ 4328 if (ctxt->b == 0x38) { 4329 ctxt->opcode_len = 3; 4330 ctxt->b = insn_fetch(u8, ctxt); 4331 opcode = opcode_map_0f_38[ctxt->b]; 4332 } 4333 } 4334 ctxt->d = opcode.flags; 4335 4336 if (ctxt->d & ModRM) 4337 ctxt->modrm = insn_fetch(u8, ctxt); 4338 4339 /* vex-prefix instructions are not implemented */ 4340 if (ctxt->opcode_len == 1 && (ctxt->b == 0xc5 || ctxt->b == 0xc4) && 4341 (mode == X86EMUL_MODE_PROT64 || 4342 (mode >= X86EMUL_MODE_PROT16 && (ctxt->modrm & 0x80)))) { 4343 ctxt->d = NotImpl; 4344 } 4345 4346 while (ctxt->d & GroupMask) { 4347 switch (ctxt->d & GroupMask) { 4348 case Group: 4349 goffset = (ctxt->modrm >> 3) & 7; 4350 opcode = opcode.u.group[goffset]; 4351 break; 4352 case GroupDual: 4353 goffset = (ctxt->modrm >> 3) & 7; 4354 if ((ctxt->modrm >> 6) == 3) 4355 opcode = opcode.u.gdual->mod3[goffset]; 4356 else 4357 opcode = opcode.u.gdual->mod012[goffset]; 4358 break; 4359 case RMExt: 4360 goffset = ctxt->modrm & 7; 4361 opcode = opcode.u.group[goffset]; 4362 break; 4363 case Prefix: 4364 if (ctxt->rep_prefix && op_prefix) 4365 return EMULATION_FAILED; 4366 simd_prefix = op_prefix ? 0x66 : ctxt->rep_prefix; 4367 switch (simd_prefix) { 4368 case 0x00: opcode = opcode.u.gprefix->pfx_no; break; 4369 case 0x66: opcode = opcode.u.gprefix->pfx_66; break; 4370 case 0xf2: opcode = opcode.u.gprefix->pfx_f2; break; 4371 case 0xf3: opcode = opcode.u.gprefix->pfx_f3; break; 4372 } 4373 break; 4374 case Escape: 4375 if (ctxt->modrm > 0xbf) 4376 opcode = opcode.u.esc->high[ctxt->modrm - 0xc0]; 4377 else 4378 opcode = opcode.u.esc->op[(ctxt->modrm >> 3) & 7]; 4379 break; 4380 default: 4381 return EMULATION_FAILED; 4382 } 4383 4384 ctxt->d &= ~(u64)GroupMask; 4385 ctxt->d |= opcode.flags; 4386 } 4387 4388 /* Unrecognised? */ 4389 if (ctxt->d == 0) 4390 return EMULATION_FAILED; 4391 4392 ctxt->execute = opcode.u.execute; 4393 4394 if (unlikely(ctxt->ud) && likely(!(ctxt->d & EmulateOnUD))) 4395 return EMULATION_FAILED; 4396 4397 if (unlikely(ctxt->d & 4398 (NotImpl|Stack|Op3264|Sse|Mmx|Intercept|CheckPerm))) { 4399 /* 4400 * These are copied unconditionally here, and checked unconditionally 4401 * in x86_emulate_insn. 4402 */ 4403 ctxt->check_perm = opcode.check_perm; 4404 ctxt->intercept = opcode.intercept; 4405 4406 if (ctxt->d & NotImpl) 4407 return EMULATION_FAILED; 4408 4409 if (mode == X86EMUL_MODE_PROT64 && (ctxt->d & Stack)) 4410 ctxt->op_bytes = 8; 4411 4412 if (ctxt->d & Op3264) { 4413 if (mode == X86EMUL_MODE_PROT64) 4414 ctxt->op_bytes = 8; 4415 else 4416 ctxt->op_bytes = 4; 4417 } 4418 4419 if (ctxt->d & Sse) 4420 ctxt->op_bytes = 16; 4421 else if (ctxt->d & Mmx) 4422 ctxt->op_bytes = 8; 4423 } 4424 4425 /* ModRM and SIB bytes. */ 4426 if (ctxt->d & ModRM) { 4427 rc = decode_modrm(ctxt, &ctxt->memop); 4428 if (!has_seg_override) { 4429 has_seg_override = true; 4430 ctxt->seg_override = ctxt->modrm_seg; 4431 } 4432 } else if (ctxt->d & MemAbs) 4433 rc = decode_abs(ctxt, &ctxt->memop); 4434 if (rc != X86EMUL_CONTINUE) 4435 goto done; 4436 4437 if (!has_seg_override) 4438 ctxt->seg_override = VCPU_SREG_DS; 4439 4440 ctxt->memop.addr.mem.seg = ctxt->seg_override; 4441 4442 /* 4443 * Decode and fetch the source operand: register, memory 4444 * or immediate. 4445 */ 4446 rc = decode_operand(ctxt, &ctxt->src, (ctxt->d >> SrcShift) & OpMask); 4447 if (rc != X86EMUL_CONTINUE) 4448 goto done; 4449 4450 /* 4451 * Decode and fetch the second source operand: register, memory 4452 * or immediate. 4453 */ 4454 rc = decode_operand(ctxt, &ctxt->src2, (ctxt->d >> Src2Shift) & OpMask); 4455 if (rc != X86EMUL_CONTINUE) 4456 goto done; 4457 4458 /* Decode and fetch the destination operand: register or memory. */ 4459 rc = decode_operand(ctxt, &ctxt->dst, (ctxt->d >> DstShift) & OpMask); 4460 4461 done: 4462 if (ctxt->rip_relative) 4463 ctxt->memopp->addr.mem.ea += ctxt->_eip; 4464 4465 return (rc != X86EMUL_CONTINUE) ? EMULATION_FAILED : EMULATION_OK; 4466 } 4467 4468 bool x86_page_table_writing_insn(struct x86_emulate_ctxt *ctxt) 4469 { 4470 return ctxt->d & PageTable; 4471 } 4472 4473 static bool string_insn_completed(struct x86_emulate_ctxt *ctxt) 4474 { 4475 /* The second termination condition only applies for REPE 4476 * and REPNE. Test if the repeat string operation prefix is 4477 * REPE/REPZ or REPNE/REPNZ and if it's the case it tests the 4478 * corresponding termination condition according to: 4479 * - if REPE/REPZ and ZF = 0 then done 4480 * - if REPNE/REPNZ and ZF = 1 then done 4481 */ 4482 if (((ctxt->b == 0xa6) || (ctxt->b == 0xa7) || 4483 (ctxt->b == 0xae) || (ctxt->b == 0xaf)) 4484 && (((ctxt->rep_prefix == REPE_PREFIX) && 4485 ((ctxt->eflags & EFLG_ZF) == 0)) 4486 || ((ctxt->rep_prefix == REPNE_PREFIX) && 4487 ((ctxt->eflags & EFLG_ZF) == EFLG_ZF)))) 4488 return true; 4489 4490 return false; 4491 } 4492 4493 static int flush_pending_x87_faults(struct x86_emulate_ctxt *ctxt) 4494 { 4495 bool fault = false; 4496 4497 ctxt->ops->get_fpu(ctxt); 4498 asm volatile("1: fwait \n\t" 4499 "2: \n\t" 4500 ".pushsection .fixup,\"ax\" \n\t" 4501 "3: \n\t" 4502 "movb $1, %[fault] \n\t" 4503 "jmp 2b \n\t" 4504 ".popsection \n\t" 4505 _ASM_EXTABLE(1b, 3b) 4506 : [fault]"+qm"(fault)); 4507 ctxt->ops->put_fpu(ctxt); 4508 4509 if (unlikely(fault)) 4510 return emulate_exception(ctxt, MF_VECTOR, 0, false); 4511 4512 return X86EMUL_CONTINUE; 4513 } 4514 4515 static void fetch_possible_mmx_operand(struct x86_emulate_ctxt *ctxt, 4516 struct operand *op) 4517 { 4518 if (op->type == OP_MM) 4519 read_mmx_reg(ctxt, &op->mm_val, op->addr.mm); 4520 } 4521 4522 static int fastop(struct x86_emulate_ctxt *ctxt, void (*fop)(struct fastop *)) 4523 { 4524 ulong flags = (ctxt->eflags & EFLAGS_MASK) | X86_EFLAGS_IF; 4525 if (!(ctxt->d & ByteOp)) 4526 fop += __ffs(ctxt->dst.bytes) * FASTOP_SIZE; 4527 asm("push %[flags]; popf; call *%[fastop]; pushf; pop %[flags]\n" 4528 : "+a"(ctxt->dst.val), "+d"(ctxt->src.val), [flags]"+D"(flags), 4529 [fastop]"+S"(fop) 4530 : "c"(ctxt->src2.val)); 4531 ctxt->eflags = (ctxt->eflags & ~EFLAGS_MASK) | (flags & EFLAGS_MASK); 4532 if (!fop) /* exception is returned in fop variable */ 4533 return emulate_de(ctxt); 4534 return X86EMUL_CONTINUE; 4535 } 4536 4537 void init_decode_cache(struct x86_emulate_ctxt *ctxt) 4538 { 4539 memset(&ctxt->rip_relative, 0, 4540 (void *)&ctxt->modrm - (void *)&ctxt->rip_relative); 4541 4542 ctxt->io_read.pos = 0; 4543 ctxt->io_read.end = 0; 4544 ctxt->mem_read.end = 0; 4545 } 4546 4547 int x86_emulate_insn(struct x86_emulate_ctxt *ctxt) 4548 { 4549 const struct x86_emulate_ops *ops = ctxt->ops; 4550 int rc = X86EMUL_CONTINUE; 4551 int saved_dst_type = ctxt->dst.type; 4552 4553 ctxt->mem_read.pos = 0; 4554 4555 /* LOCK prefix is allowed only with some instructions */ 4556 if (ctxt->lock_prefix && (!(ctxt->d & Lock) || ctxt->dst.type != OP_MEM)) { 4557 rc = emulate_ud(ctxt); 4558 goto done; 4559 } 4560 4561 if ((ctxt->d & SrcMask) == SrcMemFAddr && ctxt->src.type != OP_MEM) { 4562 rc = emulate_ud(ctxt); 4563 goto done; 4564 } 4565 4566 if (unlikely(ctxt->d & 4567 (No64|Undefined|Sse|Mmx|Intercept|CheckPerm|Priv|Prot|String))) { 4568 if ((ctxt->mode == X86EMUL_MODE_PROT64 && (ctxt->d & No64)) || 4569 (ctxt->d & Undefined)) { 4570 rc = emulate_ud(ctxt); 4571 goto done; 4572 } 4573 4574 if (((ctxt->d & (Sse|Mmx)) && ((ops->get_cr(ctxt, 0) & X86_CR0_EM))) 4575 || ((ctxt->d & Sse) && !(ops->get_cr(ctxt, 4) & X86_CR4_OSFXSR))) { 4576 rc = emulate_ud(ctxt); 4577 goto done; 4578 } 4579 4580 if ((ctxt->d & (Sse|Mmx)) && (ops->get_cr(ctxt, 0) & X86_CR0_TS)) { 4581 rc = emulate_nm(ctxt); 4582 goto done; 4583 } 4584 4585 if (ctxt->d & Mmx) { 4586 rc = flush_pending_x87_faults(ctxt); 4587 if (rc != X86EMUL_CONTINUE) 4588 goto done; 4589 /* 4590 * Now that we know the fpu is exception safe, we can fetch 4591 * operands from it. 4592 */ 4593 fetch_possible_mmx_operand(ctxt, &ctxt->src); 4594 fetch_possible_mmx_operand(ctxt, &ctxt->src2); 4595 if (!(ctxt->d & Mov)) 4596 fetch_possible_mmx_operand(ctxt, &ctxt->dst); 4597 } 4598 4599 if (unlikely(ctxt->guest_mode) && (ctxt->d & Intercept)) { 4600 rc = emulator_check_intercept(ctxt, ctxt->intercept, 4601 X86_ICPT_PRE_EXCEPT); 4602 if (rc != X86EMUL_CONTINUE) 4603 goto done; 4604 } 4605 4606 /* Privileged instruction can be executed only in CPL=0 */ 4607 if ((ctxt->d & Priv) && ops->cpl(ctxt)) { 4608 if (ctxt->d & PrivUD) 4609 rc = emulate_ud(ctxt); 4610 else 4611 rc = emulate_gp(ctxt, 0); 4612 goto done; 4613 } 4614 4615 /* Instruction can only be executed in protected mode */ 4616 if ((ctxt->d & Prot) && ctxt->mode < X86EMUL_MODE_PROT16) { 4617 rc = emulate_ud(ctxt); 4618 goto done; 4619 } 4620 4621 /* Do instruction specific permission checks */ 4622 if (ctxt->d & CheckPerm) { 4623 rc = ctxt->check_perm(ctxt); 4624 if (rc != X86EMUL_CONTINUE) 4625 goto done; 4626 } 4627 4628 if (unlikely(ctxt->guest_mode) && (ctxt->d & Intercept)) { 4629 rc = emulator_check_intercept(ctxt, ctxt->intercept, 4630 X86_ICPT_POST_EXCEPT); 4631 if (rc != X86EMUL_CONTINUE) 4632 goto done; 4633 } 4634 4635 if (ctxt->rep_prefix && (ctxt->d & String)) { 4636 /* All REP prefixes have the same first termination condition */ 4637 if (address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) == 0) { 4638 ctxt->eip = ctxt->_eip; 4639 ctxt->eflags &= ~EFLG_RF; 4640 goto done; 4641 } 4642 } 4643 } 4644 4645 if ((ctxt->src.type == OP_MEM) && !(ctxt->d & NoAccess)) { 4646 rc = segmented_read(ctxt, ctxt->src.addr.mem, 4647 ctxt->src.valptr, ctxt->src.bytes); 4648 if (rc != X86EMUL_CONTINUE) 4649 goto done; 4650 ctxt->src.orig_val64 = ctxt->src.val64; 4651 } 4652 4653 if (ctxt->src2.type == OP_MEM) { 4654 rc = segmented_read(ctxt, ctxt->src2.addr.mem, 4655 &ctxt->src2.val, ctxt->src2.bytes); 4656 if (rc != X86EMUL_CONTINUE) 4657 goto done; 4658 } 4659 4660 if ((ctxt->d & DstMask) == ImplicitOps) 4661 goto special_insn; 4662 4663 4664 if ((ctxt->dst.type == OP_MEM) && !(ctxt->d & Mov)) { 4665 /* optimisation - avoid slow emulated read if Mov */ 4666 rc = segmented_read(ctxt, ctxt->dst.addr.mem, 4667 &ctxt->dst.val, ctxt->dst.bytes); 4668 if (rc != X86EMUL_CONTINUE) 4669 goto done; 4670 } 4671 ctxt->dst.orig_val = ctxt->dst.val; 4672 4673 special_insn: 4674 4675 if (unlikely(ctxt->guest_mode) && (ctxt->d & Intercept)) { 4676 rc = emulator_check_intercept(ctxt, ctxt->intercept, 4677 X86_ICPT_POST_MEMACCESS); 4678 if (rc != X86EMUL_CONTINUE) 4679 goto done; 4680 } 4681 4682 if (ctxt->rep_prefix && (ctxt->d & String)) 4683 ctxt->eflags |= EFLG_RF; 4684 else 4685 ctxt->eflags &= ~EFLG_RF; 4686 4687 if (ctxt->execute) { 4688 if (ctxt->d & Fastop) { 4689 void (*fop)(struct fastop *) = (void *)ctxt->execute; 4690 rc = fastop(ctxt, fop); 4691 if (rc != X86EMUL_CONTINUE) 4692 goto done; 4693 goto writeback; 4694 } 4695 rc = ctxt->execute(ctxt); 4696 if (rc != X86EMUL_CONTINUE) 4697 goto done; 4698 goto writeback; 4699 } 4700 4701 if (ctxt->opcode_len == 2) 4702 goto twobyte_insn; 4703 else if (ctxt->opcode_len == 3) 4704 goto threebyte_insn; 4705 4706 switch (ctxt->b) { 4707 case 0x63: /* movsxd */ 4708 if (ctxt->mode != X86EMUL_MODE_PROT64) 4709 goto cannot_emulate; 4710 ctxt->dst.val = (s32) ctxt->src.val; 4711 break; 4712 case 0x70 ... 0x7f: /* jcc (short) */ 4713 if (test_cc(ctxt->b, ctxt->eflags)) 4714 jmp_rel(ctxt, ctxt->src.val); 4715 break; 4716 case 0x8d: /* lea r16/r32, m */ 4717 ctxt->dst.val = ctxt->src.addr.mem.ea; 4718 break; 4719 case 0x90 ... 0x97: /* nop / xchg reg, rax */ 4720 if (ctxt->dst.addr.reg == reg_rmw(ctxt, VCPU_REGS_RAX)) 4721 ctxt->dst.type = OP_NONE; 4722 else 4723 rc = em_xchg(ctxt); 4724 break; 4725 case 0x98: /* cbw/cwde/cdqe */ 4726 switch (ctxt->op_bytes) { 4727 case 2: ctxt->dst.val = (s8)ctxt->dst.val; break; 4728 case 4: ctxt->dst.val = (s16)ctxt->dst.val; break; 4729 case 8: ctxt->dst.val = (s32)ctxt->dst.val; break; 4730 } 4731 break; 4732 case 0xcc: /* int3 */ 4733 rc = emulate_int(ctxt, 3); 4734 break; 4735 case 0xcd: /* int n */ 4736 rc = emulate_int(ctxt, ctxt->src.val); 4737 break; 4738 case 0xce: /* into */ 4739 if (ctxt->eflags & EFLG_OF) 4740 rc = emulate_int(ctxt, 4); 4741 break; 4742 case 0xe9: /* jmp rel */ 4743 case 0xeb: /* jmp rel short */ 4744 jmp_rel(ctxt, ctxt->src.val); 4745 ctxt->dst.type = OP_NONE; /* Disable writeback. */ 4746 break; 4747 case 0xf4: /* hlt */ 4748 ctxt->ops->halt(ctxt); 4749 break; 4750 case 0xf5: /* cmc */ 4751 /* complement carry flag from eflags reg */ 4752 ctxt->eflags ^= EFLG_CF; 4753 break; 4754 case 0xf8: /* clc */ 4755 ctxt->eflags &= ~EFLG_CF; 4756 break; 4757 case 0xf9: /* stc */ 4758 ctxt->eflags |= EFLG_CF; 4759 break; 4760 case 0xfc: /* cld */ 4761 ctxt->eflags &= ~EFLG_DF; 4762 break; 4763 case 0xfd: /* std */ 4764 ctxt->eflags |= EFLG_DF; 4765 break; 4766 default: 4767 goto cannot_emulate; 4768 } 4769 4770 if (rc != X86EMUL_CONTINUE) 4771 goto done; 4772 4773 writeback: 4774 if (ctxt->d & SrcWrite) { 4775 BUG_ON(ctxt->src.type == OP_MEM || ctxt->src.type == OP_MEM_STR); 4776 rc = writeback(ctxt, &ctxt->src); 4777 if (rc != X86EMUL_CONTINUE) 4778 goto done; 4779 } 4780 if (!(ctxt->d & NoWrite)) { 4781 rc = writeback(ctxt, &ctxt->dst); 4782 if (rc != X86EMUL_CONTINUE) 4783 goto done; 4784 } 4785 4786 /* 4787 * restore dst type in case the decoding will be reused 4788 * (happens for string instruction ) 4789 */ 4790 ctxt->dst.type = saved_dst_type; 4791 4792 if ((ctxt->d & SrcMask) == SrcSI) 4793 string_addr_inc(ctxt, VCPU_REGS_RSI, &ctxt->src); 4794 4795 if ((ctxt->d & DstMask) == DstDI) 4796 string_addr_inc(ctxt, VCPU_REGS_RDI, &ctxt->dst); 4797 4798 if (ctxt->rep_prefix && (ctxt->d & String)) { 4799 unsigned int count; 4800 struct read_cache *r = &ctxt->io_read; 4801 if ((ctxt->d & SrcMask) == SrcSI) 4802 count = ctxt->src.count; 4803 else 4804 count = ctxt->dst.count; 4805 register_address_increment(ctxt, reg_rmw(ctxt, VCPU_REGS_RCX), 4806 -count); 4807 4808 if (!string_insn_completed(ctxt)) { 4809 /* 4810 * Re-enter guest when pio read ahead buffer is empty 4811 * or, if it is not used, after each 1024 iteration. 4812 */ 4813 if ((r->end != 0 || reg_read(ctxt, VCPU_REGS_RCX) & 0x3ff) && 4814 (r->end == 0 || r->end != r->pos)) { 4815 /* 4816 * Reset read cache. Usually happens before 4817 * decode, but since instruction is restarted 4818 * we have to do it here. 4819 */ 4820 ctxt->mem_read.end = 0; 4821 writeback_registers(ctxt); 4822 return EMULATION_RESTART; 4823 } 4824 goto done; /* skip rip writeback */ 4825 } 4826 ctxt->eflags &= ~EFLG_RF; 4827 } 4828 4829 ctxt->eip = ctxt->_eip; 4830 4831 done: 4832 if (rc == X86EMUL_PROPAGATE_FAULT) 4833 ctxt->have_exception = true; 4834 if (rc == X86EMUL_INTERCEPTED) 4835 return EMULATION_INTERCEPTED; 4836 4837 if (rc == X86EMUL_CONTINUE) 4838 writeback_registers(ctxt); 4839 4840 return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK; 4841 4842 twobyte_insn: 4843 switch (ctxt->b) { 4844 case 0x09: /* wbinvd */ 4845 (ctxt->ops->wbinvd)(ctxt); 4846 break; 4847 case 0x08: /* invd */ 4848 case 0x0d: /* GrpP (prefetch) */ 4849 case 0x18: /* Grp16 (prefetch/nop) */ 4850 case 0x1f: /* nop */ 4851 break; 4852 case 0x20: /* mov cr, reg */ 4853 ctxt->dst.val = ops->get_cr(ctxt, ctxt->modrm_reg); 4854 break; 4855 case 0x21: /* mov from dr to reg */ 4856 ops->get_dr(ctxt, ctxt->modrm_reg, &ctxt->dst.val); 4857 break; 4858 case 0x40 ... 0x4f: /* cmov */ 4859 if (test_cc(ctxt->b, ctxt->eflags)) 4860 ctxt->dst.val = ctxt->src.val; 4861 else if (ctxt->mode != X86EMUL_MODE_PROT64 || 4862 ctxt->op_bytes != 4) 4863 ctxt->dst.type = OP_NONE; /* no writeback */ 4864 break; 4865 case 0x80 ... 0x8f: /* jnz rel, etc*/ 4866 if (test_cc(ctxt->b, ctxt->eflags)) 4867 jmp_rel(ctxt, ctxt->src.val); 4868 break; 4869 case 0x90 ... 0x9f: /* setcc r/m8 */ 4870 ctxt->dst.val = test_cc(ctxt->b, ctxt->eflags); 4871 break; 4872 case 0xae: /* clflush */ 4873 break; 4874 case 0xb6 ... 0xb7: /* movzx */ 4875 ctxt->dst.bytes = ctxt->op_bytes; 4876 ctxt->dst.val = (ctxt->src.bytes == 1) ? (u8) ctxt->src.val 4877 : (u16) ctxt->src.val; 4878 break; 4879 case 0xbe ... 0xbf: /* movsx */ 4880 ctxt->dst.bytes = ctxt->op_bytes; 4881 ctxt->dst.val = (ctxt->src.bytes == 1) ? (s8) ctxt->src.val : 4882 (s16) ctxt->src.val; 4883 break; 4884 case 0xc3: /* movnti */ 4885 ctxt->dst.bytes = ctxt->op_bytes; 4886 ctxt->dst.val = (ctxt->op_bytes == 8) ? (u64) ctxt->src.val : 4887 (u32) ctxt->src.val; 4888 break; 4889 default: 4890 goto cannot_emulate; 4891 } 4892 4893 threebyte_insn: 4894 4895 if (rc != X86EMUL_CONTINUE) 4896 goto done; 4897 4898 goto writeback; 4899 4900 cannot_emulate: 4901 return EMULATION_FAILED; 4902 } 4903 4904 void emulator_invalidate_register_cache(struct x86_emulate_ctxt *ctxt) 4905 { 4906 invalidate_registers(ctxt); 4907 } 4908 4909 void emulator_writeback_register_cache(struct x86_emulate_ctxt *ctxt) 4910 { 4911 writeback_registers(ctxt); 4912 } 4913