1 /****************************************************************************** 2 * emulate.c 3 * 4 * Generic x86 (32-bit and 64-bit) instruction decoder and emulator. 5 * 6 * Copyright (c) 2005 Keir Fraser 7 * 8 * Linux coding style, mod r/m decoder, segment base fixes, real-mode 9 * privileged instructions: 10 * 11 * Copyright (C) 2006 Qumranet 12 * Copyright 2010 Red Hat, Inc. and/or its affiliates. 13 * 14 * Avi Kivity <avi@qumranet.com> 15 * Yaniv Kamay <yaniv@qumranet.com> 16 * 17 * This work is licensed under the terms of the GNU GPL, version 2. See 18 * the COPYING file in the top-level directory. 19 * 20 * From: xen-unstable 10676:af9809f51f81a3c43f276f00c81a52ef558afda4 21 */ 22 23 #include <linux/kvm_host.h> 24 #include "kvm_cache_regs.h" 25 #include <linux/module.h> 26 #include <asm/kvm_emulate.h> 27 #include <linux/stringify.h> 28 29 #include "x86.h" 30 #include "tss.h" 31 32 /* 33 * Operand types 34 */ 35 #define OpNone 0ull 36 #define OpImplicit 1ull /* No generic decode */ 37 #define OpReg 2ull /* Register */ 38 #define OpMem 3ull /* Memory */ 39 #define OpAcc 4ull /* Accumulator: AL/AX/EAX/RAX */ 40 #define OpDI 5ull /* ES:DI/EDI/RDI */ 41 #define OpMem64 6ull /* Memory, 64-bit */ 42 #define OpImmUByte 7ull /* Zero-extended 8-bit immediate */ 43 #define OpDX 8ull /* DX register */ 44 #define OpCL 9ull /* CL register (for shifts) */ 45 #define OpImmByte 10ull /* 8-bit sign extended immediate */ 46 #define OpOne 11ull /* Implied 1 */ 47 #define OpImm 12ull /* Sign extended up to 32-bit immediate */ 48 #define OpMem16 13ull /* Memory operand (16-bit). */ 49 #define OpMem32 14ull /* Memory operand (32-bit). */ 50 #define OpImmU 15ull /* Immediate operand, zero extended */ 51 #define OpSI 16ull /* SI/ESI/RSI */ 52 #define OpImmFAddr 17ull /* Immediate far address */ 53 #define OpMemFAddr 18ull /* Far address in memory */ 54 #define OpImmU16 19ull /* Immediate operand, 16 bits, zero extended */ 55 #define OpES 20ull /* ES */ 56 #define OpCS 21ull /* CS */ 57 #define OpSS 22ull /* SS */ 58 #define OpDS 23ull /* DS */ 59 #define OpFS 24ull /* FS */ 60 #define OpGS 25ull /* GS */ 61 #define OpMem8 26ull /* 8-bit zero extended memory operand */ 62 #define OpImm64 27ull /* Sign extended 16/32/64-bit immediate */ 63 #define OpXLat 28ull /* memory at BX/EBX/RBX + zero-extended AL */ 64 #define OpAccLo 29ull /* Low part of extended acc (AX/AX/EAX/RAX) */ 65 #define OpAccHi 30ull /* High part of extended acc (-/DX/EDX/RDX) */ 66 67 #define OpBits 5 /* Width of operand field */ 68 #define OpMask ((1ull << OpBits) - 1) 69 70 /* 71 * Opcode effective-address decode tables. 72 * Note that we only emulate instructions that have at least one memory 73 * operand (excluding implicit stack references). We assume that stack 74 * references and instruction fetches will never occur in special memory 75 * areas that require emulation. So, for example, 'mov <imm>,<reg>' need 76 * not be handled. 77 */ 78 79 /* Operand sizes: 8-bit operands or specified/overridden size. */ 80 #define ByteOp (1<<0) /* 8-bit operands. */ 81 /* Destination operand type. */ 82 #define DstShift 1 83 #define ImplicitOps (OpImplicit << DstShift) 84 #define DstReg (OpReg << DstShift) 85 #define DstMem (OpMem << DstShift) 86 #define DstAcc (OpAcc << DstShift) 87 #define DstDI (OpDI << DstShift) 88 #define DstMem64 (OpMem64 << DstShift) 89 #define DstImmUByte (OpImmUByte << DstShift) 90 #define DstDX (OpDX << DstShift) 91 #define DstAccLo (OpAccLo << DstShift) 92 #define DstMask (OpMask << DstShift) 93 /* Source operand type. */ 94 #define SrcShift 6 95 #define SrcNone (OpNone << SrcShift) 96 #define SrcReg (OpReg << SrcShift) 97 #define SrcMem (OpMem << SrcShift) 98 #define SrcMem16 (OpMem16 << SrcShift) 99 #define SrcMem32 (OpMem32 << SrcShift) 100 #define SrcImm (OpImm << SrcShift) 101 #define SrcImmByte (OpImmByte << SrcShift) 102 #define SrcOne (OpOne << SrcShift) 103 #define SrcImmUByte (OpImmUByte << SrcShift) 104 #define SrcImmU (OpImmU << SrcShift) 105 #define SrcSI (OpSI << SrcShift) 106 #define SrcXLat (OpXLat << SrcShift) 107 #define SrcImmFAddr (OpImmFAddr << SrcShift) 108 #define SrcMemFAddr (OpMemFAddr << SrcShift) 109 #define SrcAcc (OpAcc << SrcShift) 110 #define SrcImmU16 (OpImmU16 << SrcShift) 111 #define SrcImm64 (OpImm64 << SrcShift) 112 #define SrcDX (OpDX << SrcShift) 113 #define SrcMem8 (OpMem8 << SrcShift) 114 #define SrcAccHi (OpAccHi << SrcShift) 115 #define SrcMask (OpMask << SrcShift) 116 #define BitOp (1<<11) 117 #define MemAbs (1<<12) /* Memory operand is absolute displacement */ 118 #define String (1<<13) /* String instruction (rep capable) */ 119 #define Stack (1<<14) /* Stack instruction (push/pop) */ 120 #define GroupMask (7<<15) /* Opcode uses one of the group mechanisms */ 121 #define Group (1<<15) /* Bits 3:5 of modrm byte extend opcode */ 122 #define GroupDual (2<<15) /* Alternate decoding of mod == 3 */ 123 #define Prefix (3<<15) /* Instruction varies with 66/f2/f3 prefix */ 124 #define RMExt (4<<15) /* Opcode extension in ModRM r/m if mod == 3 */ 125 #define Escape (5<<15) /* Escape to coprocessor instruction */ 126 #define Sse (1<<18) /* SSE Vector instruction */ 127 /* Generic ModRM decode. */ 128 #define ModRM (1<<19) 129 /* Destination is only written; never read. */ 130 #define Mov (1<<20) 131 /* Misc flags */ 132 #define Prot (1<<21) /* instruction generates #UD if not in prot-mode */ 133 #define EmulateOnUD (1<<22) /* Emulate if unsupported by the host */ 134 #define NoAccess (1<<23) /* Don't access memory (lea/invlpg/verr etc) */ 135 #define Op3264 (1<<24) /* Operand is 64b in long mode, 32b otherwise */ 136 #define Undefined (1<<25) /* No Such Instruction */ 137 #define Lock (1<<26) /* lock prefix is allowed for the instruction */ 138 #define Priv (1<<27) /* instruction generates #GP if current CPL != 0 */ 139 #define No64 (1<<28) 140 #define PageTable (1 << 29) /* instruction used to write page table */ 141 #define NotImpl (1 << 30) /* instruction is not implemented */ 142 /* Source 2 operand type */ 143 #define Src2Shift (31) 144 #define Src2None (OpNone << Src2Shift) 145 #define Src2Mem (OpMem << Src2Shift) 146 #define Src2CL (OpCL << Src2Shift) 147 #define Src2ImmByte (OpImmByte << Src2Shift) 148 #define Src2One (OpOne << Src2Shift) 149 #define Src2Imm (OpImm << Src2Shift) 150 #define Src2ES (OpES << Src2Shift) 151 #define Src2CS (OpCS << Src2Shift) 152 #define Src2SS (OpSS << Src2Shift) 153 #define Src2DS (OpDS << Src2Shift) 154 #define Src2FS (OpFS << Src2Shift) 155 #define Src2GS (OpGS << Src2Shift) 156 #define Src2Mask (OpMask << Src2Shift) 157 #define Mmx ((u64)1 << 40) /* MMX Vector instruction */ 158 #define Aligned ((u64)1 << 41) /* Explicitly aligned (e.g. MOVDQA) */ 159 #define Unaligned ((u64)1 << 42) /* Explicitly unaligned (e.g. MOVDQU) */ 160 #define Avx ((u64)1 << 43) /* Advanced Vector Extensions */ 161 #define Fastop ((u64)1 << 44) /* Use opcode::u.fastop */ 162 #define NoWrite ((u64)1 << 45) /* No writeback */ 163 #define SrcWrite ((u64)1 << 46) /* Write back src operand */ 164 165 #define DstXacc (DstAccLo | SrcAccHi | SrcWrite) 166 167 #define X2(x...) x, x 168 #define X3(x...) X2(x), x 169 #define X4(x...) X2(x), X2(x) 170 #define X5(x...) X4(x), x 171 #define X6(x...) X4(x), X2(x) 172 #define X7(x...) X4(x), X3(x) 173 #define X8(x...) X4(x), X4(x) 174 #define X16(x...) X8(x), X8(x) 175 176 #define NR_FASTOP (ilog2(sizeof(ulong)) + 1) 177 #define FASTOP_SIZE 8 178 179 /* 180 * fastop functions have a special calling convention: 181 * 182 * dst: rax (in/out) 183 * src: rdx (in/out) 184 * src2: rcx (in) 185 * flags: rflags (in/out) 186 * ex: rsi (in:fastop pointer, out:zero if exception) 187 * 188 * Moreover, they are all exactly FASTOP_SIZE bytes long, so functions for 189 * different operand sizes can be reached by calculation, rather than a jump 190 * table (which would be bigger than the code). 191 * 192 * fastop functions are declared as taking a never-defined fastop parameter, 193 * so they can't be called from C directly. 194 */ 195 196 struct fastop; 197 198 struct opcode { 199 u64 flags : 56; 200 u64 intercept : 8; 201 union { 202 int (*execute)(struct x86_emulate_ctxt *ctxt); 203 const struct opcode *group; 204 const struct group_dual *gdual; 205 const struct gprefix *gprefix; 206 const struct escape *esc; 207 void (*fastop)(struct fastop *fake); 208 } u; 209 int (*check_perm)(struct x86_emulate_ctxt *ctxt); 210 }; 211 212 struct group_dual { 213 struct opcode mod012[8]; 214 struct opcode mod3[8]; 215 }; 216 217 struct gprefix { 218 struct opcode pfx_no; 219 struct opcode pfx_66; 220 struct opcode pfx_f2; 221 struct opcode pfx_f3; 222 }; 223 224 struct escape { 225 struct opcode op[8]; 226 struct opcode high[64]; 227 }; 228 229 /* EFLAGS bit definitions. */ 230 #define EFLG_ID (1<<21) 231 #define EFLG_VIP (1<<20) 232 #define EFLG_VIF (1<<19) 233 #define EFLG_AC (1<<18) 234 #define EFLG_VM (1<<17) 235 #define EFLG_RF (1<<16) 236 #define EFLG_IOPL (3<<12) 237 #define EFLG_NT (1<<14) 238 #define EFLG_OF (1<<11) 239 #define EFLG_DF (1<<10) 240 #define EFLG_IF (1<<9) 241 #define EFLG_TF (1<<8) 242 #define EFLG_SF (1<<7) 243 #define EFLG_ZF (1<<6) 244 #define EFLG_AF (1<<4) 245 #define EFLG_PF (1<<2) 246 #define EFLG_CF (1<<0) 247 248 #define EFLG_RESERVED_ZEROS_MASK 0xffc0802a 249 #define EFLG_RESERVED_ONE_MASK 2 250 251 static ulong reg_read(struct x86_emulate_ctxt *ctxt, unsigned nr) 252 { 253 if (!(ctxt->regs_valid & (1 << nr))) { 254 ctxt->regs_valid |= 1 << nr; 255 ctxt->_regs[nr] = ctxt->ops->read_gpr(ctxt, nr); 256 } 257 return ctxt->_regs[nr]; 258 } 259 260 static ulong *reg_write(struct x86_emulate_ctxt *ctxt, unsigned nr) 261 { 262 ctxt->regs_valid |= 1 << nr; 263 ctxt->regs_dirty |= 1 << nr; 264 return &ctxt->_regs[nr]; 265 } 266 267 static ulong *reg_rmw(struct x86_emulate_ctxt *ctxt, unsigned nr) 268 { 269 reg_read(ctxt, nr); 270 return reg_write(ctxt, nr); 271 } 272 273 static void writeback_registers(struct x86_emulate_ctxt *ctxt) 274 { 275 unsigned reg; 276 277 for_each_set_bit(reg, (ulong *)&ctxt->regs_dirty, 16) 278 ctxt->ops->write_gpr(ctxt, reg, ctxt->_regs[reg]); 279 } 280 281 static void invalidate_registers(struct x86_emulate_ctxt *ctxt) 282 { 283 ctxt->regs_dirty = 0; 284 ctxt->regs_valid = 0; 285 } 286 287 /* 288 * These EFLAGS bits are restored from saved value during emulation, and 289 * any changes are written back to the saved value after emulation. 290 */ 291 #define EFLAGS_MASK (EFLG_OF|EFLG_SF|EFLG_ZF|EFLG_AF|EFLG_PF|EFLG_CF) 292 293 #ifdef CONFIG_X86_64 294 #define ON64(x) x 295 #else 296 #define ON64(x) 297 #endif 298 299 static int fastop(struct x86_emulate_ctxt *ctxt, void (*fop)(struct fastop *)); 300 301 #define FOP_ALIGN ".align " __stringify(FASTOP_SIZE) " \n\t" 302 #define FOP_RET "ret \n\t" 303 304 #define FOP_START(op) \ 305 extern void em_##op(struct fastop *fake); \ 306 asm(".pushsection .text, \"ax\" \n\t" \ 307 ".global em_" #op " \n\t" \ 308 FOP_ALIGN \ 309 "em_" #op ": \n\t" 310 311 #define FOP_END \ 312 ".popsection") 313 314 #define FOPNOP() FOP_ALIGN FOP_RET 315 316 #define FOP1E(op, dst) \ 317 FOP_ALIGN "10: " #op " %" #dst " \n\t" FOP_RET 318 319 #define FOP1EEX(op, dst) \ 320 FOP1E(op, dst) _ASM_EXTABLE(10b, kvm_fastop_exception) 321 322 #define FASTOP1(op) \ 323 FOP_START(op) \ 324 FOP1E(op##b, al) \ 325 FOP1E(op##w, ax) \ 326 FOP1E(op##l, eax) \ 327 ON64(FOP1E(op##q, rax)) \ 328 FOP_END 329 330 /* 1-operand, using src2 (for MUL/DIV r/m) */ 331 #define FASTOP1SRC2(op, name) \ 332 FOP_START(name) \ 333 FOP1E(op, cl) \ 334 FOP1E(op, cx) \ 335 FOP1E(op, ecx) \ 336 ON64(FOP1E(op, rcx)) \ 337 FOP_END 338 339 /* 1-operand, using src2 (for MUL/DIV r/m), with exceptions */ 340 #define FASTOP1SRC2EX(op, name) \ 341 FOP_START(name) \ 342 FOP1EEX(op, cl) \ 343 FOP1EEX(op, cx) \ 344 FOP1EEX(op, ecx) \ 345 ON64(FOP1EEX(op, rcx)) \ 346 FOP_END 347 348 #define FOP2E(op, dst, src) \ 349 FOP_ALIGN #op " %" #src ", %" #dst " \n\t" FOP_RET 350 351 #define FASTOP2(op) \ 352 FOP_START(op) \ 353 FOP2E(op##b, al, dl) \ 354 FOP2E(op##w, ax, dx) \ 355 FOP2E(op##l, eax, edx) \ 356 ON64(FOP2E(op##q, rax, rdx)) \ 357 FOP_END 358 359 /* 2 operand, word only */ 360 #define FASTOP2W(op) \ 361 FOP_START(op) \ 362 FOPNOP() \ 363 FOP2E(op##w, ax, dx) \ 364 FOP2E(op##l, eax, edx) \ 365 ON64(FOP2E(op##q, rax, rdx)) \ 366 FOP_END 367 368 /* 2 operand, src is CL */ 369 #define FASTOP2CL(op) \ 370 FOP_START(op) \ 371 FOP2E(op##b, al, cl) \ 372 FOP2E(op##w, ax, cl) \ 373 FOP2E(op##l, eax, cl) \ 374 ON64(FOP2E(op##q, rax, cl)) \ 375 FOP_END 376 377 #define FOP3E(op, dst, src, src2) \ 378 FOP_ALIGN #op " %" #src2 ", %" #src ", %" #dst " \n\t" FOP_RET 379 380 /* 3-operand, word-only, src2=cl */ 381 #define FASTOP3WCL(op) \ 382 FOP_START(op) \ 383 FOPNOP() \ 384 FOP3E(op##w, ax, dx, cl) \ 385 FOP3E(op##l, eax, edx, cl) \ 386 ON64(FOP3E(op##q, rax, rdx, cl)) \ 387 FOP_END 388 389 /* Special case for SETcc - 1 instruction per cc */ 390 #define FOP_SETCC(op) ".align 4; " #op " %al; ret \n\t" 391 392 asm(".global kvm_fastop_exception \n" 393 "kvm_fastop_exception: xor %esi, %esi; ret"); 394 395 FOP_START(setcc) 396 FOP_SETCC(seto) 397 FOP_SETCC(setno) 398 FOP_SETCC(setc) 399 FOP_SETCC(setnc) 400 FOP_SETCC(setz) 401 FOP_SETCC(setnz) 402 FOP_SETCC(setbe) 403 FOP_SETCC(setnbe) 404 FOP_SETCC(sets) 405 FOP_SETCC(setns) 406 FOP_SETCC(setp) 407 FOP_SETCC(setnp) 408 FOP_SETCC(setl) 409 FOP_SETCC(setnl) 410 FOP_SETCC(setle) 411 FOP_SETCC(setnle) 412 FOP_END; 413 414 FOP_START(salc) "pushf; sbb %al, %al; popf \n\t" FOP_RET 415 FOP_END; 416 417 static int emulator_check_intercept(struct x86_emulate_ctxt *ctxt, 418 enum x86_intercept intercept, 419 enum x86_intercept_stage stage) 420 { 421 struct x86_instruction_info info = { 422 .intercept = intercept, 423 .rep_prefix = ctxt->rep_prefix, 424 .modrm_mod = ctxt->modrm_mod, 425 .modrm_reg = ctxt->modrm_reg, 426 .modrm_rm = ctxt->modrm_rm, 427 .src_val = ctxt->src.val64, 428 .src_bytes = ctxt->src.bytes, 429 .dst_bytes = ctxt->dst.bytes, 430 .ad_bytes = ctxt->ad_bytes, 431 .next_rip = ctxt->eip, 432 }; 433 434 return ctxt->ops->intercept(ctxt, &info, stage); 435 } 436 437 static void assign_masked(ulong *dest, ulong src, ulong mask) 438 { 439 *dest = (*dest & ~mask) | (src & mask); 440 } 441 442 static inline unsigned long ad_mask(struct x86_emulate_ctxt *ctxt) 443 { 444 return (1UL << (ctxt->ad_bytes << 3)) - 1; 445 } 446 447 static ulong stack_mask(struct x86_emulate_ctxt *ctxt) 448 { 449 u16 sel; 450 struct desc_struct ss; 451 452 if (ctxt->mode == X86EMUL_MODE_PROT64) 453 return ~0UL; 454 ctxt->ops->get_segment(ctxt, &sel, &ss, NULL, VCPU_SREG_SS); 455 return ~0U >> ((ss.d ^ 1) * 16); /* d=0: 0xffff; d=1: 0xffffffff */ 456 } 457 458 static int stack_size(struct x86_emulate_ctxt *ctxt) 459 { 460 return (__fls(stack_mask(ctxt)) + 1) >> 3; 461 } 462 463 /* Access/update address held in a register, based on addressing mode. */ 464 static inline unsigned long 465 address_mask(struct x86_emulate_ctxt *ctxt, unsigned long reg) 466 { 467 if (ctxt->ad_bytes == sizeof(unsigned long)) 468 return reg; 469 else 470 return reg & ad_mask(ctxt); 471 } 472 473 static inline unsigned long 474 register_address(struct x86_emulate_ctxt *ctxt, unsigned long reg) 475 { 476 return address_mask(ctxt, reg); 477 } 478 479 static void masked_increment(ulong *reg, ulong mask, int inc) 480 { 481 assign_masked(reg, *reg + inc, mask); 482 } 483 484 static inline void 485 register_address_increment(struct x86_emulate_ctxt *ctxt, unsigned long *reg, int inc) 486 { 487 ulong mask; 488 489 if (ctxt->ad_bytes == sizeof(unsigned long)) 490 mask = ~0UL; 491 else 492 mask = ad_mask(ctxt); 493 masked_increment(reg, mask, inc); 494 } 495 496 static void rsp_increment(struct x86_emulate_ctxt *ctxt, int inc) 497 { 498 masked_increment(reg_rmw(ctxt, VCPU_REGS_RSP), stack_mask(ctxt), inc); 499 } 500 501 static inline void jmp_rel(struct x86_emulate_ctxt *ctxt, int rel) 502 { 503 register_address_increment(ctxt, &ctxt->_eip, rel); 504 } 505 506 static u32 desc_limit_scaled(struct desc_struct *desc) 507 { 508 u32 limit = get_desc_limit(desc); 509 510 return desc->g ? (limit << 12) | 0xfff : limit; 511 } 512 513 static void set_seg_override(struct x86_emulate_ctxt *ctxt, int seg) 514 { 515 ctxt->has_seg_override = true; 516 ctxt->seg_override = seg; 517 } 518 519 static unsigned long seg_base(struct x86_emulate_ctxt *ctxt, int seg) 520 { 521 if (ctxt->mode == X86EMUL_MODE_PROT64 && seg < VCPU_SREG_FS) 522 return 0; 523 524 return ctxt->ops->get_cached_segment_base(ctxt, seg); 525 } 526 527 static unsigned seg_override(struct x86_emulate_ctxt *ctxt) 528 { 529 if (!ctxt->has_seg_override) 530 return 0; 531 532 return ctxt->seg_override; 533 } 534 535 static int emulate_exception(struct x86_emulate_ctxt *ctxt, int vec, 536 u32 error, bool valid) 537 { 538 ctxt->exception.vector = vec; 539 ctxt->exception.error_code = error; 540 ctxt->exception.error_code_valid = valid; 541 return X86EMUL_PROPAGATE_FAULT; 542 } 543 544 static int emulate_db(struct x86_emulate_ctxt *ctxt) 545 { 546 return emulate_exception(ctxt, DB_VECTOR, 0, false); 547 } 548 549 static int emulate_gp(struct x86_emulate_ctxt *ctxt, int err) 550 { 551 return emulate_exception(ctxt, GP_VECTOR, err, true); 552 } 553 554 static int emulate_ss(struct x86_emulate_ctxt *ctxt, int err) 555 { 556 return emulate_exception(ctxt, SS_VECTOR, err, true); 557 } 558 559 static int emulate_ud(struct x86_emulate_ctxt *ctxt) 560 { 561 return emulate_exception(ctxt, UD_VECTOR, 0, false); 562 } 563 564 static int emulate_ts(struct x86_emulate_ctxt *ctxt, int err) 565 { 566 return emulate_exception(ctxt, TS_VECTOR, err, true); 567 } 568 569 static int emulate_de(struct x86_emulate_ctxt *ctxt) 570 { 571 return emulate_exception(ctxt, DE_VECTOR, 0, false); 572 } 573 574 static int emulate_nm(struct x86_emulate_ctxt *ctxt) 575 { 576 return emulate_exception(ctxt, NM_VECTOR, 0, false); 577 } 578 579 static u16 get_segment_selector(struct x86_emulate_ctxt *ctxt, unsigned seg) 580 { 581 u16 selector; 582 struct desc_struct desc; 583 584 ctxt->ops->get_segment(ctxt, &selector, &desc, NULL, seg); 585 return selector; 586 } 587 588 static void set_segment_selector(struct x86_emulate_ctxt *ctxt, u16 selector, 589 unsigned seg) 590 { 591 u16 dummy; 592 u32 base3; 593 struct desc_struct desc; 594 595 ctxt->ops->get_segment(ctxt, &dummy, &desc, &base3, seg); 596 ctxt->ops->set_segment(ctxt, selector, &desc, base3, seg); 597 } 598 599 /* 600 * x86 defines three classes of vector instructions: explicitly 601 * aligned, explicitly unaligned, and the rest, which change behaviour 602 * depending on whether they're AVX encoded or not. 603 * 604 * Also included is CMPXCHG16B which is not a vector instruction, yet it is 605 * subject to the same check. 606 */ 607 static bool insn_aligned(struct x86_emulate_ctxt *ctxt, unsigned size) 608 { 609 if (likely(size < 16)) 610 return false; 611 612 if (ctxt->d & Aligned) 613 return true; 614 else if (ctxt->d & Unaligned) 615 return false; 616 else if (ctxt->d & Avx) 617 return false; 618 else 619 return true; 620 } 621 622 static int __linearize(struct x86_emulate_ctxt *ctxt, 623 struct segmented_address addr, 624 unsigned size, bool write, bool fetch, 625 ulong *linear) 626 { 627 struct desc_struct desc; 628 bool usable; 629 ulong la; 630 u32 lim; 631 u16 sel; 632 unsigned cpl; 633 634 la = seg_base(ctxt, addr.seg) + addr.ea; 635 switch (ctxt->mode) { 636 case X86EMUL_MODE_PROT64: 637 if (((signed long)la << 16) >> 16 != la) 638 return emulate_gp(ctxt, 0); 639 break; 640 default: 641 usable = ctxt->ops->get_segment(ctxt, &sel, &desc, NULL, 642 addr.seg); 643 if (!usable) 644 goto bad; 645 /* code segment in protected mode or read-only data segment */ 646 if ((((ctxt->mode != X86EMUL_MODE_REAL) && (desc.type & 8)) 647 || !(desc.type & 2)) && write) 648 goto bad; 649 /* unreadable code segment */ 650 if (!fetch && (desc.type & 8) && !(desc.type & 2)) 651 goto bad; 652 lim = desc_limit_scaled(&desc); 653 if ((desc.type & 8) || !(desc.type & 4)) { 654 /* expand-up segment */ 655 if (addr.ea > lim || (u32)(addr.ea + size - 1) > lim) 656 goto bad; 657 } else { 658 /* expand-down segment */ 659 if (addr.ea <= lim || (u32)(addr.ea + size - 1) <= lim) 660 goto bad; 661 lim = desc.d ? 0xffffffff : 0xffff; 662 if (addr.ea > lim || (u32)(addr.ea + size - 1) > lim) 663 goto bad; 664 } 665 cpl = ctxt->ops->cpl(ctxt); 666 if (!(desc.type & 8)) { 667 /* data segment */ 668 if (cpl > desc.dpl) 669 goto bad; 670 } else if ((desc.type & 8) && !(desc.type & 4)) { 671 /* nonconforming code segment */ 672 if (cpl != desc.dpl) 673 goto bad; 674 } else if ((desc.type & 8) && (desc.type & 4)) { 675 /* conforming code segment */ 676 if (cpl < desc.dpl) 677 goto bad; 678 } 679 break; 680 } 681 if (fetch ? ctxt->mode != X86EMUL_MODE_PROT64 : ctxt->ad_bytes != 8) 682 la &= (u32)-1; 683 if (insn_aligned(ctxt, size) && ((la & (size - 1)) != 0)) 684 return emulate_gp(ctxt, 0); 685 *linear = la; 686 return X86EMUL_CONTINUE; 687 bad: 688 if (addr.seg == VCPU_SREG_SS) 689 return emulate_ss(ctxt, sel); 690 else 691 return emulate_gp(ctxt, sel); 692 } 693 694 static int linearize(struct x86_emulate_ctxt *ctxt, 695 struct segmented_address addr, 696 unsigned size, bool write, 697 ulong *linear) 698 { 699 return __linearize(ctxt, addr, size, write, false, linear); 700 } 701 702 703 static int segmented_read_std(struct x86_emulate_ctxt *ctxt, 704 struct segmented_address addr, 705 void *data, 706 unsigned size) 707 { 708 int rc; 709 ulong linear; 710 711 rc = linearize(ctxt, addr, size, false, &linear); 712 if (rc != X86EMUL_CONTINUE) 713 return rc; 714 return ctxt->ops->read_std(ctxt, linear, data, size, &ctxt->exception); 715 } 716 717 /* 718 * Fetch the next byte of the instruction being emulated which is pointed to 719 * by ctxt->_eip, then increment ctxt->_eip. 720 * 721 * Also prefetch the remaining bytes of the instruction without crossing page 722 * boundary if they are not in fetch_cache yet. 723 */ 724 static int do_insn_fetch_byte(struct x86_emulate_ctxt *ctxt, u8 *dest) 725 { 726 struct fetch_cache *fc = &ctxt->fetch; 727 int rc; 728 int size, cur_size; 729 730 if (ctxt->_eip == fc->end) { 731 unsigned long linear; 732 struct segmented_address addr = { .seg = VCPU_SREG_CS, 733 .ea = ctxt->_eip }; 734 cur_size = fc->end - fc->start; 735 size = min(15UL - cur_size, 736 PAGE_SIZE - offset_in_page(ctxt->_eip)); 737 rc = __linearize(ctxt, addr, size, false, true, &linear); 738 if (unlikely(rc != X86EMUL_CONTINUE)) 739 return rc; 740 rc = ctxt->ops->fetch(ctxt, linear, fc->data + cur_size, 741 size, &ctxt->exception); 742 if (unlikely(rc != X86EMUL_CONTINUE)) 743 return rc; 744 fc->end += size; 745 } 746 *dest = fc->data[ctxt->_eip - fc->start]; 747 ctxt->_eip++; 748 return X86EMUL_CONTINUE; 749 } 750 751 static int do_insn_fetch(struct x86_emulate_ctxt *ctxt, 752 void *dest, unsigned size) 753 { 754 int rc; 755 756 /* x86 instructions are limited to 15 bytes. */ 757 if (unlikely(ctxt->_eip + size - ctxt->eip > 15)) 758 return X86EMUL_UNHANDLEABLE; 759 while (size--) { 760 rc = do_insn_fetch_byte(ctxt, dest++); 761 if (rc != X86EMUL_CONTINUE) 762 return rc; 763 } 764 return X86EMUL_CONTINUE; 765 } 766 767 /* Fetch next part of the instruction being emulated. */ 768 #define insn_fetch(_type, _ctxt) \ 769 ({ unsigned long _x; \ 770 rc = do_insn_fetch(_ctxt, &_x, sizeof(_type)); \ 771 if (rc != X86EMUL_CONTINUE) \ 772 goto done; \ 773 (_type)_x; \ 774 }) 775 776 #define insn_fetch_arr(_arr, _size, _ctxt) \ 777 ({ rc = do_insn_fetch(_ctxt, _arr, (_size)); \ 778 if (rc != X86EMUL_CONTINUE) \ 779 goto done; \ 780 }) 781 782 /* 783 * Given the 'reg' portion of a ModRM byte, and a register block, return a 784 * pointer into the block that addresses the relevant register. 785 * @highbyte_regs specifies whether to decode AH,CH,DH,BH. 786 */ 787 static void *decode_register(struct x86_emulate_ctxt *ctxt, u8 modrm_reg, 788 int byteop) 789 { 790 void *p; 791 int highbyte_regs = (ctxt->rex_prefix == 0) && byteop; 792 793 if (highbyte_regs && modrm_reg >= 4 && modrm_reg < 8) 794 p = (unsigned char *)reg_rmw(ctxt, modrm_reg & 3) + 1; 795 else 796 p = reg_rmw(ctxt, modrm_reg); 797 return p; 798 } 799 800 static int read_descriptor(struct x86_emulate_ctxt *ctxt, 801 struct segmented_address addr, 802 u16 *size, unsigned long *address, int op_bytes) 803 { 804 int rc; 805 806 if (op_bytes == 2) 807 op_bytes = 3; 808 *address = 0; 809 rc = segmented_read_std(ctxt, addr, size, 2); 810 if (rc != X86EMUL_CONTINUE) 811 return rc; 812 addr.ea += 2; 813 rc = segmented_read_std(ctxt, addr, address, op_bytes); 814 return rc; 815 } 816 817 FASTOP2(add); 818 FASTOP2(or); 819 FASTOP2(adc); 820 FASTOP2(sbb); 821 FASTOP2(and); 822 FASTOP2(sub); 823 FASTOP2(xor); 824 FASTOP2(cmp); 825 FASTOP2(test); 826 827 FASTOP1SRC2(mul, mul_ex); 828 FASTOP1SRC2(imul, imul_ex); 829 FASTOP1SRC2EX(div, div_ex); 830 FASTOP1SRC2EX(idiv, idiv_ex); 831 832 FASTOP3WCL(shld); 833 FASTOP3WCL(shrd); 834 835 FASTOP2W(imul); 836 837 FASTOP1(not); 838 FASTOP1(neg); 839 FASTOP1(inc); 840 FASTOP1(dec); 841 842 FASTOP2CL(rol); 843 FASTOP2CL(ror); 844 FASTOP2CL(rcl); 845 FASTOP2CL(rcr); 846 FASTOP2CL(shl); 847 FASTOP2CL(shr); 848 FASTOP2CL(sar); 849 850 FASTOP2W(bsf); 851 FASTOP2W(bsr); 852 FASTOP2W(bt); 853 FASTOP2W(bts); 854 FASTOP2W(btr); 855 FASTOP2W(btc); 856 857 FASTOP2(xadd); 858 859 static u8 test_cc(unsigned int condition, unsigned long flags) 860 { 861 u8 rc; 862 void (*fop)(void) = (void *)em_setcc + 4 * (condition & 0xf); 863 864 flags = (flags & EFLAGS_MASK) | X86_EFLAGS_IF; 865 asm("push %[flags]; popf; call *%[fastop]" 866 : "=a"(rc) : [fastop]"r"(fop), [flags]"r"(flags)); 867 return rc; 868 } 869 870 static void fetch_register_operand(struct operand *op) 871 { 872 switch (op->bytes) { 873 case 1: 874 op->val = *(u8 *)op->addr.reg; 875 break; 876 case 2: 877 op->val = *(u16 *)op->addr.reg; 878 break; 879 case 4: 880 op->val = *(u32 *)op->addr.reg; 881 break; 882 case 8: 883 op->val = *(u64 *)op->addr.reg; 884 break; 885 } 886 } 887 888 static void read_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data, int reg) 889 { 890 ctxt->ops->get_fpu(ctxt); 891 switch (reg) { 892 case 0: asm("movdqa %%xmm0, %0" : "=m"(*data)); break; 893 case 1: asm("movdqa %%xmm1, %0" : "=m"(*data)); break; 894 case 2: asm("movdqa %%xmm2, %0" : "=m"(*data)); break; 895 case 3: asm("movdqa %%xmm3, %0" : "=m"(*data)); break; 896 case 4: asm("movdqa %%xmm4, %0" : "=m"(*data)); break; 897 case 5: asm("movdqa %%xmm5, %0" : "=m"(*data)); break; 898 case 6: asm("movdqa %%xmm6, %0" : "=m"(*data)); break; 899 case 7: asm("movdqa %%xmm7, %0" : "=m"(*data)); break; 900 #ifdef CONFIG_X86_64 901 case 8: asm("movdqa %%xmm8, %0" : "=m"(*data)); break; 902 case 9: asm("movdqa %%xmm9, %0" : "=m"(*data)); break; 903 case 10: asm("movdqa %%xmm10, %0" : "=m"(*data)); break; 904 case 11: asm("movdqa %%xmm11, %0" : "=m"(*data)); break; 905 case 12: asm("movdqa %%xmm12, %0" : "=m"(*data)); break; 906 case 13: asm("movdqa %%xmm13, %0" : "=m"(*data)); break; 907 case 14: asm("movdqa %%xmm14, %0" : "=m"(*data)); break; 908 case 15: asm("movdqa %%xmm15, %0" : "=m"(*data)); break; 909 #endif 910 default: BUG(); 911 } 912 ctxt->ops->put_fpu(ctxt); 913 } 914 915 static void write_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data, 916 int reg) 917 { 918 ctxt->ops->get_fpu(ctxt); 919 switch (reg) { 920 case 0: asm("movdqa %0, %%xmm0" : : "m"(*data)); break; 921 case 1: asm("movdqa %0, %%xmm1" : : "m"(*data)); break; 922 case 2: asm("movdqa %0, %%xmm2" : : "m"(*data)); break; 923 case 3: asm("movdqa %0, %%xmm3" : : "m"(*data)); break; 924 case 4: asm("movdqa %0, %%xmm4" : : "m"(*data)); break; 925 case 5: asm("movdqa %0, %%xmm5" : : "m"(*data)); break; 926 case 6: asm("movdqa %0, %%xmm6" : : "m"(*data)); break; 927 case 7: asm("movdqa %0, %%xmm7" : : "m"(*data)); break; 928 #ifdef CONFIG_X86_64 929 case 8: asm("movdqa %0, %%xmm8" : : "m"(*data)); break; 930 case 9: asm("movdqa %0, %%xmm9" : : "m"(*data)); break; 931 case 10: asm("movdqa %0, %%xmm10" : : "m"(*data)); break; 932 case 11: asm("movdqa %0, %%xmm11" : : "m"(*data)); break; 933 case 12: asm("movdqa %0, %%xmm12" : : "m"(*data)); break; 934 case 13: asm("movdqa %0, %%xmm13" : : "m"(*data)); break; 935 case 14: asm("movdqa %0, %%xmm14" : : "m"(*data)); break; 936 case 15: asm("movdqa %0, %%xmm15" : : "m"(*data)); break; 937 #endif 938 default: BUG(); 939 } 940 ctxt->ops->put_fpu(ctxt); 941 } 942 943 static void read_mmx_reg(struct x86_emulate_ctxt *ctxt, u64 *data, int reg) 944 { 945 ctxt->ops->get_fpu(ctxt); 946 switch (reg) { 947 case 0: asm("movq %%mm0, %0" : "=m"(*data)); break; 948 case 1: asm("movq %%mm1, %0" : "=m"(*data)); break; 949 case 2: asm("movq %%mm2, %0" : "=m"(*data)); break; 950 case 3: asm("movq %%mm3, %0" : "=m"(*data)); break; 951 case 4: asm("movq %%mm4, %0" : "=m"(*data)); break; 952 case 5: asm("movq %%mm5, %0" : "=m"(*data)); break; 953 case 6: asm("movq %%mm6, %0" : "=m"(*data)); break; 954 case 7: asm("movq %%mm7, %0" : "=m"(*data)); break; 955 default: BUG(); 956 } 957 ctxt->ops->put_fpu(ctxt); 958 } 959 960 static void write_mmx_reg(struct x86_emulate_ctxt *ctxt, u64 *data, int reg) 961 { 962 ctxt->ops->get_fpu(ctxt); 963 switch (reg) { 964 case 0: asm("movq %0, %%mm0" : : "m"(*data)); break; 965 case 1: asm("movq %0, %%mm1" : : "m"(*data)); break; 966 case 2: asm("movq %0, %%mm2" : : "m"(*data)); break; 967 case 3: asm("movq %0, %%mm3" : : "m"(*data)); break; 968 case 4: asm("movq %0, %%mm4" : : "m"(*data)); break; 969 case 5: asm("movq %0, %%mm5" : : "m"(*data)); break; 970 case 6: asm("movq %0, %%mm6" : : "m"(*data)); break; 971 case 7: asm("movq %0, %%mm7" : : "m"(*data)); break; 972 default: BUG(); 973 } 974 ctxt->ops->put_fpu(ctxt); 975 } 976 977 static int em_fninit(struct x86_emulate_ctxt *ctxt) 978 { 979 if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM)) 980 return emulate_nm(ctxt); 981 982 ctxt->ops->get_fpu(ctxt); 983 asm volatile("fninit"); 984 ctxt->ops->put_fpu(ctxt); 985 return X86EMUL_CONTINUE; 986 } 987 988 static int em_fnstcw(struct x86_emulate_ctxt *ctxt) 989 { 990 u16 fcw; 991 992 if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM)) 993 return emulate_nm(ctxt); 994 995 ctxt->ops->get_fpu(ctxt); 996 asm volatile("fnstcw %0": "+m"(fcw)); 997 ctxt->ops->put_fpu(ctxt); 998 999 /* force 2 byte destination */ 1000 ctxt->dst.bytes = 2; 1001 ctxt->dst.val = fcw; 1002 1003 return X86EMUL_CONTINUE; 1004 } 1005 1006 static int em_fnstsw(struct x86_emulate_ctxt *ctxt) 1007 { 1008 u16 fsw; 1009 1010 if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM)) 1011 return emulate_nm(ctxt); 1012 1013 ctxt->ops->get_fpu(ctxt); 1014 asm volatile("fnstsw %0": "+m"(fsw)); 1015 ctxt->ops->put_fpu(ctxt); 1016 1017 /* force 2 byte destination */ 1018 ctxt->dst.bytes = 2; 1019 ctxt->dst.val = fsw; 1020 1021 return X86EMUL_CONTINUE; 1022 } 1023 1024 static void decode_register_operand(struct x86_emulate_ctxt *ctxt, 1025 struct operand *op) 1026 { 1027 unsigned reg = ctxt->modrm_reg; 1028 1029 if (!(ctxt->d & ModRM)) 1030 reg = (ctxt->b & 7) | ((ctxt->rex_prefix & 1) << 3); 1031 1032 if (ctxt->d & Sse) { 1033 op->type = OP_XMM; 1034 op->bytes = 16; 1035 op->addr.xmm = reg; 1036 read_sse_reg(ctxt, &op->vec_val, reg); 1037 return; 1038 } 1039 if (ctxt->d & Mmx) { 1040 reg &= 7; 1041 op->type = OP_MM; 1042 op->bytes = 8; 1043 op->addr.mm = reg; 1044 return; 1045 } 1046 1047 op->type = OP_REG; 1048 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes; 1049 op->addr.reg = decode_register(ctxt, reg, ctxt->d & ByteOp); 1050 1051 fetch_register_operand(op); 1052 op->orig_val = op->val; 1053 } 1054 1055 static void adjust_modrm_seg(struct x86_emulate_ctxt *ctxt, int base_reg) 1056 { 1057 if (base_reg == VCPU_REGS_RSP || base_reg == VCPU_REGS_RBP) 1058 ctxt->modrm_seg = VCPU_SREG_SS; 1059 } 1060 1061 static int decode_modrm(struct x86_emulate_ctxt *ctxt, 1062 struct operand *op) 1063 { 1064 u8 sib; 1065 int index_reg = 0, base_reg = 0, scale; 1066 int rc = X86EMUL_CONTINUE; 1067 ulong modrm_ea = 0; 1068 1069 if (ctxt->rex_prefix) { 1070 ctxt->modrm_reg = (ctxt->rex_prefix & 4) << 1; /* REX.R */ 1071 index_reg = (ctxt->rex_prefix & 2) << 2; /* REX.X */ 1072 ctxt->modrm_rm = base_reg = (ctxt->rex_prefix & 1) << 3; /* REG.B */ 1073 } 1074 1075 ctxt->modrm_mod |= (ctxt->modrm & 0xc0) >> 6; 1076 ctxt->modrm_reg |= (ctxt->modrm & 0x38) >> 3; 1077 ctxt->modrm_rm |= (ctxt->modrm & 0x07); 1078 ctxt->modrm_seg = VCPU_SREG_DS; 1079 1080 if (ctxt->modrm_mod == 3) { 1081 op->type = OP_REG; 1082 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes; 1083 op->addr.reg = decode_register(ctxt, ctxt->modrm_rm, 1084 ctxt->d & ByteOp); 1085 if (ctxt->d & Sse) { 1086 op->type = OP_XMM; 1087 op->bytes = 16; 1088 op->addr.xmm = ctxt->modrm_rm; 1089 read_sse_reg(ctxt, &op->vec_val, ctxt->modrm_rm); 1090 return rc; 1091 } 1092 if (ctxt->d & Mmx) { 1093 op->type = OP_MM; 1094 op->bytes = 8; 1095 op->addr.xmm = ctxt->modrm_rm & 7; 1096 return rc; 1097 } 1098 fetch_register_operand(op); 1099 return rc; 1100 } 1101 1102 op->type = OP_MEM; 1103 1104 if (ctxt->ad_bytes == 2) { 1105 unsigned bx = reg_read(ctxt, VCPU_REGS_RBX); 1106 unsigned bp = reg_read(ctxt, VCPU_REGS_RBP); 1107 unsigned si = reg_read(ctxt, VCPU_REGS_RSI); 1108 unsigned di = reg_read(ctxt, VCPU_REGS_RDI); 1109 1110 /* 16-bit ModR/M decode. */ 1111 switch (ctxt->modrm_mod) { 1112 case 0: 1113 if (ctxt->modrm_rm == 6) 1114 modrm_ea += insn_fetch(u16, ctxt); 1115 break; 1116 case 1: 1117 modrm_ea += insn_fetch(s8, ctxt); 1118 break; 1119 case 2: 1120 modrm_ea += insn_fetch(u16, ctxt); 1121 break; 1122 } 1123 switch (ctxt->modrm_rm) { 1124 case 0: 1125 modrm_ea += bx + si; 1126 break; 1127 case 1: 1128 modrm_ea += bx + di; 1129 break; 1130 case 2: 1131 modrm_ea += bp + si; 1132 break; 1133 case 3: 1134 modrm_ea += bp + di; 1135 break; 1136 case 4: 1137 modrm_ea += si; 1138 break; 1139 case 5: 1140 modrm_ea += di; 1141 break; 1142 case 6: 1143 if (ctxt->modrm_mod != 0) 1144 modrm_ea += bp; 1145 break; 1146 case 7: 1147 modrm_ea += bx; 1148 break; 1149 } 1150 if (ctxt->modrm_rm == 2 || ctxt->modrm_rm == 3 || 1151 (ctxt->modrm_rm == 6 && ctxt->modrm_mod != 0)) 1152 ctxt->modrm_seg = VCPU_SREG_SS; 1153 modrm_ea = (u16)modrm_ea; 1154 } else { 1155 /* 32/64-bit ModR/M decode. */ 1156 if ((ctxt->modrm_rm & 7) == 4) { 1157 sib = insn_fetch(u8, ctxt); 1158 index_reg |= (sib >> 3) & 7; 1159 base_reg |= sib & 7; 1160 scale = sib >> 6; 1161 1162 if ((base_reg & 7) == 5 && ctxt->modrm_mod == 0) 1163 modrm_ea += insn_fetch(s32, ctxt); 1164 else { 1165 modrm_ea += reg_read(ctxt, base_reg); 1166 adjust_modrm_seg(ctxt, base_reg); 1167 } 1168 if (index_reg != 4) 1169 modrm_ea += reg_read(ctxt, index_reg) << scale; 1170 } else if ((ctxt->modrm_rm & 7) == 5 && ctxt->modrm_mod == 0) { 1171 if (ctxt->mode == X86EMUL_MODE_PROT64) 1172 ctxt->rip_relative = 1; 1173 } else { 1174 base_reg = ctxt->modrm_rm; 1175 modrm_ea += reg_read(ctxt, base_reg); 1176 adjust_modrm_seg(ctxt, base_reg); 1177 } 1178 switch (ctxt->modrm_mod) { 1179 case 0: 1180 if (ctxt->modrm_rm == 5) 1181 modrm_ea += insn_fetch(s32, ctxt); 1182 break; 1183 case 1: 1184 modrm_ea += insn_fetch(s8, ctxt); 1185 break; 1186 case 2: 1187 modrm_ea += insn_fetch(s32, ctxt); 1188 break; 1189 } 1190 } 1191 op->addr.mem.ea = modrm_ea; 1192 done: 1193 return rc; 1194 } 1195 1196 static int decode_abs(struct x86_emulate_ctxt *ctxt, 1197 struct operand *op) 1198 { 1199 int rc = X86EMUL_CONTINUE; 1200 1201 op->type = OP_MEM; 1202 switch (ctxt->ad_bytes) { 1203 case 2: 1204 op->addr.mem.ea = insn_fetch(u16, ctxt); 1205 break; 1206 case 4: 1207 op->addr.mem.ea = insn_fetch(u32, ctxt); 1208 break; 1209 case 8: 1210 op->addr.mem.ea = insn_fetch(u64, ctxt); 1211 break; 1212 } 1213 done: 1214 return rc; 1215 } 1216 1217 static void fetch_bit_operand(struct x86_emulate_ctxt *ctxt) 1218 { 1219 long sv = 0, mask; 1220 1221 if (ctxt->dst.type == OP_MEM && ctxt->src.type == OP_REG) { 1222 mask = ~(ctxt->dst.bytes * 8 - 1); 1223 1224 if (ctxt->src.bytes == 2) 1225 sv = (s16)ctxt->src.val & (s16)mask; 1226 else if (ctxt->src.bytes == 4) 1227 sv = (s32)ctxt->src.val & (s32)mask; 1228 1229 ctxt->dst.addr.mem.ea += (sv >> 3); 1230 } 1231 1232 /* only subword offset */ 1233 ctxt->src.val &= (ctxt->dst.bytes << 3) - 1; 1234 } 1235 1236 static int read_emulated(struct x86_emulate_ctxt *ctxt, 1237 unsigned long addr, void *dest, unsigned size) 1238 { 1239 int rc; 1240 struct read_cache *mc = &ctxt->mem_read; 1241 1242 if (mc->pos < mc->end) 1243 goto read_cached; 1244 1245 WARN_ON((mc->end + size) >= sizeof(mc->data)); 1246 1247 rc = ctxt->ops->read_emulated(ctxt, addr, mc->data + mc->end, size, 1248 &ctxt->exception); 1249 if (rc != X86EMUL_CONTINUE) 1250 return rc; 1251 1252 mc->end += size; 1253 1254 read_cached: 1255 memcpy(dest, mc->data + mc->pos, size); 1256 mc->pos += size; 1257 return X86EMUL_CONTINUE; 1258 } 1259 1260 static int segmented_read(struct x86_emulate_ctxt *ctxt, 1261 struct segmented_address addr, 1262 void *data, 1263 unsigned size) 1264 { 1265 int rc; 1266 ulong linear; 1267 1268 rc = linearize(ctxt, addr, size, false, &linear); 1269 if (rc != X86EMUL_CONTINUE) 1270 return rc; 1271 return read_emulated(ctxt, linear, data, size); 1272 } 1273 1274 static int segmented_write(struct x86_emulate_ctxt *ctxt, 1275 struct segmented_address addr, 1276 const void *data, 1277 unsigned size) 1278 { 1279 int rc; 1280 ulong linear; 1281 1282 rc = linearize(ctxt, addr, size, true, &linear); 1283 if (rc != X86EMUL_CONTINUE) 1284 return rc; 1285 return ctxt->ops->write_emulated(ctxt, linear, data, size, 1286 &ctxt->exception); 1287 } 1288 1289 static int segmented_cmpxchg(struct x86_emulate_ctxt *ctxt, 1290 struct segmented_address addr, 1291 const void *orig_data, const void *data, 1292 unsigned size) 1293 { 1294 int rc; 1295 ulong linear; 1296 1297 rc = linearize(ctxt, addr, size, true, &linear); 1298 if (rc != X86EMUL_CONTINUE) 1299 return rc; 1300 return ctxt->ops->cmpxchg_emulated(ctxt, linear, orig_data, data, 1301 size, &ctxt->exception); 1302 } 1303 1304 static int pio_in_emulated(struct x86_emulate_ctxt *ctxt, 1305 unsigned int size, unsigned short port, 1306 void *dest) 1307 { 1308 struct read_cache *rc = &ctxt->io_read; 1309 1310 if (rc->pos == rc->end) { /* refill pio read ahead */ 1311 unsigned int in_page, n; 1312 unsigned int count = ctxt->rep_prefix ? 1313 address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) : 1; 1314 in_page = (ctxt->eflags & EFLG_DF) ? 1315 offset_in_page(reg_read(ctxt, VCPU_REGS_RDI)) : 1316 PAGE_SIZE - offset_in_page(reg_read(ctxt, VCPU_REGS_RDI)); 1317 n = min(min(in_page, (unsigned int)sizeof(rc->data)) / size, 1318 count); 1319 if (n == 0) 1320 n = 1; 1321 rc->pos = rc->end = 0; 1322 if (!ctxt->ops->pio_in_emulated(ctxt, size, port, rc->data, n)) 1323 return 0; 1324 rc->end = n * size; 1325 } 1326 1327 if (ctxt->rep_prefix && !(ctxt->eflags & EFLG_DF)) { 1328 ctxt->dst.data = rc->data + rc->pos; 1329 ctxt->dst.type = OP_MEM_STR; 1330 ctxt->dst.count = (rc->end - rc->pos) / size; 1331 rc->pos = rc->end; 1332 } else { 1333 memcpy(dest, rc->data + rc->pos, size); 1334 rc->pos += size; 1335 } 1336 return 1; 1337 } 1338 1339 static int read_interrupt_descriptor(struct x86_emulate_ctxt *ctxt, 1340 u16 index, struct desc_struct *desc) 1341 { 1342 struct desc_ptr dt; 1343 ulong addr; 1344 1345 ctxt->ops->get_idt(ctxt, &dt); 1346 1347 if (dt.size < index * 8 + 7) 1348 return emulate_gp(ctxt, index << 3 | 0x2); 1349 1350 addr = dt.address + index * 8; 1351 return ctxt->ops->read_std(ctxt, addr, desc, sizeof *desc, 1352 &ctxt->exception); 1353 } 1354 1355 static void get_descriptor_table_ptr(struct x86_emulate_ctxt *ctxt, 1356 u16 selector, struct desc_ptr *dt) 1357 { 1358 const struct x86_emulate_ops *ops = ctxt->ops; 1359 1360 if (selector & 1 << 2) { 1361 struct desc_struct desc; 1362 u16 sel; 1363 1364 memset (dt, 0, sizeof *dt); 1365 if (!ops->get_segment(ctxt, &sel, &desc, NULL, VCPU_SREG_LDTR)) 1366 return; 1367 1368 dt->size = desc_limit_scaled(&desc); /* what if limit > 65535? */ 1369 dt->address = get_desc_base(&desc); 1370 } else 1371 ops->get_gdt(ctxt, dt); 1372 } 1373 1374 /* allowed just for 8 bytes segments */ 1375 static int read_segment_descriptor(struct x86_emulate_ctxt *ctxt, 1376 u16 selector, struct desc_struct *desc, 1377 ulong *desc_addr_p) 1378 { 1379 struct desc_ptr dt; 1380 u16 index = selector >> 3; 1381 ulong addr; 1382 1383 get_descriptor_table_ptr(ctxt, selector, &dt); 1384 1385 if (dt.size < index * 8 + 7) 1386 return emulate_gp(ctxt, selector & 0xfffc); 1387 1388 *desc_addr_p = addr = dt.address + index * 8; 1389 return ctxt->ops->read_std(ctxt, addr, desc, sizeof *desc, 1390 &ctxt->exception); 1391 } 1392 1393 /* allowed just for 8 bytes segments */ 1394 static int write_segment_descriptor(struct x86_emulate_ctxt *ctxt, 1395 u16 selector, struct desc_struct *desc) 1396 { 1397 struct desc_ptr dt; 1398 u16 index = selector >> 3; 1399 ulong addr; 1400 1401 get_descriptor_table_ptr(ctxt, selector, &dt); 1402 1403 if (dt.size < index * 8 + 7) 1404 return emulate_gp(ctxt, selector & 0xfffc); 1405 1406 addr = dt.address + index * 8; 1407 return ctxt->ops->write_std(ctxt, addr, desc, sizeof *desc, 1408 &ctxt->exception); 1409 } 1410 1411 /* Does not support long mode */ 1412 static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt, 1413 u16 selector, int seg) 1414 { 1415 struct desc_struct seg_desc, old_desc; 1416 u8 dpl, rpl, cpl; 1417 unsigned err_vec = GP_VECTOR; 1418 u32 err_code = 0; 1419 bool null_selector = !(selector & ~0x3); /* 0000-0003 are null */ 1420 ulong desc_addr; 1421 int ret; 1422 u16 dummy; 1423 1424 memset(&seg_desc, 0, sizeof seg_desc); 1425 1426 if (ctxt->mode == X86EMUL_MODE_REAL) { 1427 /* set real mode segment descriptor (keep limit etc. for 1428 * unreal mode) */ 1429 ctxt->ops->get_segment(ctxt, &dummy, &seg_desc, NULL, seg); 1430 set_desc_base(&seg_desc, selector << 4); 1431 goto load; 1432 } else if (seg <= VCPU_SREG_GS && ctxt->mode == X86EMUL_MODE_VM86) { 1433 /* VM86 needs a clean new segment descriptor */ 1434 set_desc_base(&seg_desc, selector << 4); 1435 set_desc_limit(&seg_desc, 0xffff); 1436 seg_desc.type = 3; 1437 seg_desc.p = 1; 1438 seg_desc.s = 1; 1439 seg_desc.dpl = 3; 1440 goto load; 1441 } 1442 1443 rpl = selector & 3; 1444 cpl = ctxt->ops->cpl(ctxt); 1445 1446 /* NULL selector is not valid for TR, CS and SS (except for long mode) */ 1447 if ((seg == VCPU_SREG_CS 1448 || (seg == VCPU_SREG_SS 1449 && (ctxt->mode != X86EMUL_MODE_PROT64 || rpl != cpl)) 1450 || seg == VCPU_SREG_TR) 1451 && null_selector) 1452 goto exception; 1453 1454 /* TR should be in GDT only */ 1455 if (seg == VCPU_SREG_TR && (selector & (1 << 2))) 1456 goto exception; 1457 1458 if (null_selector) /* for NULL selector skip all following checks */ 1459 goto load; 1460 1461 ret = read_segment_descriptor(ctxt, selector, &seg_desc, &desc_addr); 1462 if (ret != X86EMUL_CONTINUE) 1463 return ret; 1464 1465 err_code = selector & 0xfffc; 1466 err_vec = GP_VECTOR; 1467 1468 /* can't load system descriptor into segment selector */ 1469 if (seg <= VCPU_SREG_GS && !seg_desc.s) 1470 goto exception; 1471 1472 if (!seg_desc.p) { 1473 err_vec = (seg == VCPU_SREG_SS) ? SS_VECTOR : NP_VECTOR; 1474 goto exception; 1475 } 1476 1477 dpl = seg_desc.dpl; 1478 1479 switch (seg) { 1480 case VCPU_SREG_SS: 1481 /* 1482 * segment is not a writable data segment or segment 1483 * selector's RPL != CPL or segment selector's RPL != CPL 1484 */ 1485 if (rpl != cpl || (seg_desc.type & 0xa) != 0x2 || dpl != cpl) 1486 goto exception; 1487 break; 1488 case VCPU_SREG_CS: 1489 if (!(seg_desc.type & 8)) 1490 goto exception; 1491 1492 if (seg_desc.type & 4) { 1493 /* conforming */ 1494 if (dpl > cpl) 1495 goto exception; 1496 } else { 1497 /* nonconforming */ 1498 if (rpl > cpl || dpl != cpl) 1499 goto exception; 1500 } 1501 /* CS(RPL) <- CPL */ 1502 selector = (selector & 0xfffc) | cpl; 1503 break; 1504 case VCPU_SREG_TR: 1505 if (seg_desc.s || (seg_desc.type != 1 && seg_desc.type != 9)) 1506 goto exception; 1507 old_desc = seg_desc; 1508 seg_desc.type |= 2; /* busy */ 1509 ret = ctxt->ops->cmpxchg_emulated(ctxt, desc_addr, &old_desc, &seg_desc, 1510 sizeof(seg_desc), &ctxt->exception); 1511 if (ret != X86EMUL_CONTINUE) 1512 return ret; 1513 break; 1514 case VCPU_SREG_LDTR: 1515 if (seg_desc.s || seg_desc.type != 2) 1516 goto exception; 1517 break; 1518 default: /* DS, ES, FS, or GS */ 1519 /* 1520 * segment is not a data or readable code segment or 1521 * ((segment is a data or nonconforming code segment) 1522 * and (both RPL and CPL > DPL)) 1523 */ 1524 if ((seg_desc.type & 0xa) == 0x8 || 1525 (((seg_desc.type & 0xc) != 0xc) && 1526 (rpl > dpl && cpl > dpl))) 1527 goto exception; 1528 break; 1529 } 1530 1531 if (seg_desc.s) { 1532 /* mark segment as accessed */ 1533 seg_desc.type |= 1; 1534 ret = write_segment_descriptor(ctxt, selector, &seg_desc); 1535 if (ret != X86EMUL_CONTINUE) 1536 return ret; 1537 } 1538 load: 1539 ctxt->ops->set_segment(ctxt, selector, &seg_desc, 0, seg); 1540 return X86EMUL_CONTINUE; 1541 exception: 1542 emulate_exception(ctxt, err_vec, err_code, true); 1543 return X86EMUL_PROPAGATE_FAULT; 1544 } 1545 1546 static void write_register_operand(struct operand *op) 1547 { 1548 /* The 4-byte case *is* correct: in 64-bit mode we zero-extend. */ 1549 switch (op->bytes) { 1550 case 1: 1551 *(u8 *)op->addr.reg = (u8)op->val; 1552 break; 1553 case 2: 1554 *(u16 *)op->addr.reg = (u16)op->val; 1555 break; 1556 case 4: 1557 *op->addr.reg = (u32)op->val; 1558 break; /* 64b: zero-extend */ 1559 case 8: 1560 *op->addr.reg = op->val; 1561 break; 1562 } 1563 } 1564 1565 static int writeback(struct x86_emulate_ctxt *ctxt, struct operand *op) 1566 { 1567 int rc; 1568 1569 switch (op->type) { 1570 case OP_REG: 1571 write_register_operand(op); 1572 break; 1573 case OP_MEM: 1574 if (ctxt->lock_prefix) 1575 rc = segmented_cmpxchg(ctxt, 1576 op->addr.mem, 1577 &op->orig_val, 1578 &op->val, 1579 op->bytes); 1580 else 1581 rc = segmented_write(ctxt, 1582 op->addr.mem, 1583 &op->val, 1584 op->bytes); 1585 if (rc != X86EMUL_CONTINUE) 1586 return rc; 1587 break; 1588 case OP_MEM_STR: 1589 rc = segmented_write(ctxt, 1590 op->addr.mem, 1591 op->data, 1592 op->bytes * op->count); 1593 if (rc != X86EMUL_CONTINUE) 1594 return rc; 1595 break; 1596 case OP_XMM: 1597 write_sse_reg(ctxt, &op->vec_val, op->addr.xmm); 1598 break; 1599 case OP_MM: 1600 write_mmx_reg(ctxt, &op->mm_val, op->addr.mm); 1601 break; 1602 case OP_NONE: 1603 /* no writeback */ 1604 break; 1605 default: 1606 break; 1607 } 1608 return X86EMUL_CONTINUE; 1609 } 1610 1611 static int push(struct x86_emulate_ctxt *ctxt, void *data, int bytes) 1612 { 1613 struct segmented_address addr; 1614 1615 rsp_increment(ctxt, -bytes); 1616 addr.ea = reg_read(ctxt, VCPU_REGS_RSP) & stack_mask(ctxt); 1617 addr.seg = VCPU_SREG_SS; 1618 1619 return segmented_write(ctxt, addr, data, bytes); 1620 } 1621 1622 static int em_push(struct x86_emulate_ctxt *ctxt) 1623 { 1624 /* Disable writeback. */ 1625 ctxt->dst.type = OP_NONE; 1626 return push(ctxt, &ctxt->src.val, ctxt->op_bytes); 1627 } 1628 1629 static int emulate_pop(struct x86_emulate_ctxt *ctxt, 1630 void *dest, int len) 1631 { 1632 int rc; 1633 struct segmented_address addr; 1634 1635 addr.ea = reg_read(ctxt, VCPU_REGS_RSP) & stack_mask(ctxt); 1636 addr.seg = VCPU_SREG_SS; 1637 rc = segmented_read(ctxt, addr, dest, len); 1638 if (rc != X86EMUL_CONTINUE) 1639 return rc; 1640 1641 rsp_increment(ctxt, len); 1642 return rc; 1643 } 1644 1645 static int em_pop(struct x86_emulate_ctxt *ctxt) 1646 { 1647 return emulate_pop(ctxt, &ctxt->dst.val, ctxt->op_bytes); 1648 } 1649 1650 static int emulate_popf(struct x86_emulate_ctxt *ctxt, 1651 void *dest, int len) 1652 { 1653 int rc; 1654 unsigned long val, change_mask; 1655 int iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> IOPL_SHIFT; 1656 int cpl = ctxt->ops->cpl(ctxt); 1657 1658 rc = emulate_pop(ctxt, &val, len); 1659 if (rc != X86EMUL_CONTINUE) 1660 return rc; 1661 1662 change_mask = EFLG_CF | EFLG_PF | EFLG_AF | EFLG_ZF | EFLG_SF | EFLG_OF 1663 | EFLG_TF | EFLG_DF | EFLG_NT | EFLG_RF | EFLG_AC | EFLG_ID; 1664 1665 switch(ctxt->mode) { 1666 case X86EMUL_MODE_PROT64: 1667 case X86EMUL_MODE_PROT32: 1668 case X86EMUL_MODE_PROT16: 1669 if (cpl == 0) 1670 change_mask |= EFLG_IOPL; 1671 if (cpl <= iopl) 1672 change_mask |= EFLG_IF; 1673 break; 1674 case X86EMUL_MODE_VM86: 1675 if (iopl < 3) 1676 return emulate_gp(ctxt, 0); 1677 change_mask |= EFLG_IF; 1678 break; 1679 default: /* real mode */ 1680 change_mask |= (EFLG_IOPL | EFLG_IF); 1681 break; 1682 } 1683 1684 *(unsigned long *)dest = 1685 (ctxt->eflags & ~change_mask) | (val & change_mask); 1686 1687 return rc; 1688 } 1689 1690 static int em_popf(struct x86_emulate_ctxt *ctxt) 1691 { 1692 ctxt->dst.type = OP_REG; 1693 ctxt->dst.addr.reg = &ctxt->eflags; 1694 ctxt->dst.bytes = ctxt->op_bytes; 1695 return emulate_popf(ctxt, &ctxt->dst.val, ctxt->op_bytes); 1696 } 1697 1698 static int em_enter(struct x86_emulate_ctxt *ctxt) 1699 { 1700 int rc; 1701 unsigned frame_size = ctxt->src.val; 1702 unsigned nesting_level = ctxt->src2.val & 31; 1703 ulong rbp; 1704 1705 if (nesting_level) 1706 return X86EMUL_UNHANDLEABLE; 1707 1708 rbp = reg_read(ctxt, VCPU_REGS_RBP); 1709 rc = push(ctxt, &rbp, stack_size(ctxt)); 1710 if (rc != X86EMUL_CONTINUE) 1711 return rc; 1712 assign_masked(reg_rmw(ctxt, VCPU_REGS_RBP), reg_read(ctxt, VCPU_REGS_RSP), 1713 stack_mask(ctxt)); 1714 assign_masked(reg_rmw(ctxt, VCPU_REGS_RSP), 1715 reg_read(ctxt, VCPU_REGS_RSP) - frame_size, 1716 stack_mask(ctxt)); 1717 return X86EMUL_CONTINUE; 1718 } 1719 1720 static int em_leave(struct x86_emulate_ctxt *ctxt) 1721 { 1722 assign_masked(reg_rmw(ctxt, VCPU_REGS_RSP), reg_read(ctxt, VCPU_REGS_RBP), 1723 stack_mask(ctxt)); 1724 return emulate_pop(ctxt, reg_rmw(ctxt, VCPU_REGS_RBP), ctxt->op_bytes); 1725 } 1726 1727 static int em_push_sreg(struct x86_emulate_ctxt *ctxt) 1728 { 1729 int seg = ctxt->src2.val; 1730 1731 ctxt->src.val = get_segment_selector(ctxt, seg); 1732 1733 return em_push(ctxt); 1734 } 1735 1736 static int em_pop_sreg(struct x86_emulate_ctxt *ctxt) 1737 { 1738 int seg = ctxt->src2.val; 1739 unsigned long selector; 1740 int rc; 1741 1742 rc = emulate_pop(ctxt, &selector, ctxt->op_bytes); 1743 if (rc != X86EMUL_CONTINUE) 1744 return rc; 1745 1746 rc = load_segment_descriptor(ctxt, (u16)selector, seg); 1747 return rc; 1748 } 1749 1750 static int em_pusha(struct x86_emulate_ctxt *ctxt) 1751 { 1752 unsigned long old_esp = reg_read(ctxt, VCPU_REGS_RSP); 1753 int rc = X86EMUL_CONTINUE; 1754 int reg = VCPU_REGS_RAX; 1755 1756 while (reg <= VCPU_REGS_RDI) { 1757 (reg == VCPU_REGS_RSP) ? 1758 (ctxt->src.val = old_esp) : (ctxt->src.val = reg_read(ctxt, reg)); 1759 1760 rc = em_push(ctxt); 1761 if (rc != X86EMUL_CONTINUE) 1762 return rc; 1763 1764 ++reg; 1765 } 1766 1767 return rc; 1768 } 1769 1770 static int em_pushf(struct x86_emulate_ctxt *ctxt) 1771 { 1772 ctxt->src.val = (unsigned long)ctxt->eflags; 1773 return em_push(ctxt); 1774 } 1775 1776 static int em_popa(struct x86_emulate_ctxt *ctxt) 1777 { 1778 int rc = X86EMUL_CONTINUE; 1779 int reg = VCPU_REGS_RDI; 1780 1781 while (reg >= VCPU_REGS_RAX) { 1782 if (reg == VCPU_REGS_RSP) { 1783 rsp_increment(ctxt, ctxt->op_bytes); 1784 --reg; 1785 } 1786 1787 rc = emulate_pop(ctxt, reg_rmw(ctxt, reg), ctxt->op_bytes); 1788 if (rc != X86EMUL_CONTINUE) 1789 break; 1790 --reg; 1791 } 1792 return rc; 1793 } 1794 1795 static int __emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq) 1796 { 1797 const struct x86_emulate_ops *ops = ctxt->ops; 1798 int rc; 1799 struct desc_ptr dt; 1800 gva_t cs_addr; 1801 gva_t eip_addr; 1802 u16 cs, eip; 1803 1804 /* TODO: Add limit checks */ 1805 ctxt->src.val = ctxt->eflags; 1806 rc = em_push(ctxt); 1807 if (rc != X86EMUL_CONTINUE) 1808 return rc; 1809 1810 ctxt->eflags &= ~(EFLG_IF | EFLG_TF | EFLG_AC); 1811 1812 ctxt->src.val = get_segment_selector(ctxt, VCPU_SREG_CS); 1813 rc = em_push(ctxt); 1814 if (rc != X86EMUL_CONTINUE) 1815 return rc; 1816 1817 ctxt->src.val = ctxt->_eip; 1818 rc = em_push(ctxt); 1819 if (rc != X86EMUL_CONTINUE) 1820 return rc; 1821 1822 ops->get_idt(ctxt, &dt); 1823 1824 eip_addr = dt.address + (irq << 2); 1825 cs_addr = dt.address + (irq << 2) + 2; 1826 1827 rc = ops->read_std(ctxt, cs_addr, &cs, 2, &ctxt->exception); 1828 if (rc != X86EMUL_CONTINUE) 1829 return rc; 1830 1831 rc = ops->read_std(ctxt, eip_addr, &eip, 2, &ctxt->exception); 1832 if (rc != X86EMUL_CONTINUE) 1833 return rc; 1834 1835 rc = load_segment_descriptor(ctxt, cs, VCPU_SREG_CS); 1836 if (rc != X86EMUL_CONTINUE) 1837 return rc; 1838 1839 ctxt->_eip = eip; 1840 1841 return rc; 1842 } 1843 1844 int emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq) 1845 { 1846 int rc; 1847 1848 invalidate_registers(ctxt); 1849 rc = __emulate_int_real(ctxt, irq); 1850 if (rc == X86EMUL_CONTINUE) 1851 writeback_registers(ctxt); 1852 return rc; 1853 } 1854 1855 static int emulate_int(struct x86_emulate_ctxt *ctxt, int irq) 1856 { 1857 switch(ctxt->mode) { 1858 case X86EMUL_MODE_REAL: 1859 return __emulate_int_real(ctxt, irq); 1860 case X86EMUL_MODE_VM86: 1861 case X86EMUL_MODE_PROT16: 1862 case X86EMUL_MODE_PROT32: 1863 case X86EMUL_MODE_PROT64: 1864 default: 1865 /* Protected mode interrupts unimplemented yet */ 1866 return X86EMUL_UNHANDLEABLE; 1867 } 1868 } 1869 1870 static int emulate_iret_real(struct x86_emulate_ctxt *ctxt) 1871 { 1872 int rc = X86EMUL_CONTINUE; 1873 unsigned long temp_eip = 0; 1874 unsigned long temp_eflags = 0; 1875 unsigned long cs = 0; 1876 unsigned long mask = EFLG_CF | EFLG_PF | EFLG_AF | EFLG_ZF | EFLG_SF | EFLG_TF | 1877 EFLG_IF | EFLG_DF | EFLG_OF | EFLG_IOPL | EFLG_NT | EFLG_RF | 1878 EFLG_AC | EFLG_ID | (1 << 1); /* Last one is the reserved bit */ 1879 unsigned long vm86_mask = EFLG_VM | EFLG_VIF | EFLG_VIP; 1880 1881 /* TODO: Add stack limit check */ 1882 1883 rc = emulate_pop(ctxt, &temp_eip, ctxt->op_bytes); 1884 1885 if (rc != X86EMUL_CONTINUE) 1886 return rc; 1887 1888 if (temp_eip & ~0xffff) 1889 return emulate_gp(ctxt, 0); 1890 1891 rc = emulate_pop(ctxt, &cs, ctxt->op_bytes); 1892 1893 if (rc != X86EMUL_CONTINUE) 1894 return rc; 1895 1896 rc = emulate_pop(ctxt, &temp_eflags, ctxt->op_bytes); 1897 1898 if (rc != X86EMUL_CONTINUE) 1899 return rc; 1900 1901 rc = load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS); 1902 1903 if (rc != X86EMUL_CONTINUE) 1904 return rc; 1905 1906 ctxt->_eip = temp_eip; 1907 1908 1909 if (ctxt->op_bytes == 4) 1910 ctxt->eflags = ((temp_eflags & mask) | (ctxt->eflags & vm86_mask)); 1911 else if (ctxt->op_bytes == 2) { 1912 ctxt->eflags &= ~0xffff; 1913 ctxt->eflags |= temp_eflags; 1914 } 1915 1916 ctxt->eflags &= ~EFLG_RESERVED_ZEROS_MASK; /* Clear reserved zeros */ 1917 ctxt->eflags |= EFLG_RESERVED_ONE_MASK; 1918 1919 return rc; 1920 } 1921 1922 static int em_iret(struct x86_emulate_ctxt *ctxt) 1923 { 1924 switch(ctxt->mode) { 1925 case X86EMUL_MODE_REAL: 1926 return emulate_iret_real(ctxt); 1927 case X86EMUL_MODE_VM86: 1928 case X86EMUL_MODE_PROT16: 1929 case X86EMUL_MODE_PROT32: 1930 case X86EMUL_MODE_PROT64: 1931 default: 1932 /* iret from protected mode unimplemented yet */ 1933 return X86EMUL_UNHANDLEABLE; 1934 } 1935 } 1936 1937 static int em_jmp_far(struct x86_emulate_ctxt *ctxt) 1938 { 1939 int rc; 1940 unsigned short sel; 1941 1942 memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2); 1943 1944 rc = load_segment_descriptor(ctxt, sel, VCPU_SREG_CS); 1945 if (rc != X86EMUL_CONTINUE) 1946 return rc; 1947 1948 ctxt->_eip = 0; 1949 memcpy(&ctxt->_eip, ctxt->src.valptr, ctxt->op_bytes); 1950 return X86EMUL_CONTINUE; 1951 } 1952 1953 static int em_grp45(struct x86_emulate_ctxt *ctxt) 1954 { 1955 int rc = X86EMUL_CONTINUE; 1956 1957 switch (ctxt->modrm_reg) { 1958 case 2: /* call near abs */ { 1959 long int old_eip; 1960 old_eip = ctxt->_eip; 1961 ctxt->_eip = ctxt->src.val; 1962 ctxt->src.val = old_eip; 1963 rc = em_push(ctxt); 1964 break; 1965 } 1966 case 4: /* jmp abs */ 1967 ctxt->_eip = ctxt->src.val; 1968 break; 1969 case 5: /* jmp far */ 1970 rc = em_jmp_far(ctxt); 1971 break; 1972 case 6: /* push */ 1973 rc = em_push(ctxt); 1974 break; 1975 } 1976 return rc; 1977 } 1978 1979 static int em_cmpxchg8b(struct x86_emulate_ctxt *ctxt) 1980 { 1981 u64 old = ctxt->dst.orig_val64; 1982 1983 if (((u32) (old >> 0) != (u32) reg_read(ctxt, VCPU_REGS_RAX)) || 1984 ((u32) (old >> 32) != (u32) reg_read(ctxt, VCPU_REGS_RDX))) { 1985 *reg_write(ctxt, VCPU_REGS_RAX) = (u32) (old >> 0); 1986 *reg_write(ctxt, VCPU_REGS_RDX) = (u32) (old >> 32); 1987 ctxt->eflags &= ~EFLG_ZF; 1988 } else { 1989 ctxt->dst.val64 = ((u64)reg_read(ctxt, VCPU_REGS_RCX) << 32) | 1990 (u32) reg_read(ctxt, VCPU_REGS_RBX); 1991 1992 ctxt->eflags |= EFLG_ZF; 1993 } 1994 return X86EMUL_CONTINUE; 1995 } 1996 1997 static int em_ret(struct x86_emulate_ctxt *ctxt) 1998 { 1999 ctxt->dst.type = OP_REG; 2000 ctxt->dst.addr.reg = &ctxt->_eip; 2001 ctxt->dst.bytes = ctxt->op_bytes; 2002 return em_pop(ctxt); 2003 } 2004 2005 static int em_ret_far(struct x86_emulate_ctxt *ctxt) 2006 { 2007 int rc; 2008 unsigned long cs; 2009 2010 rc = emulate_pop(ctxt, &ctxt->_eip, ctxt->op_bytes); 2011 if (rc != X86EMUL_CONTINUE) 2012 return rc; 2013 if (ctxt->op_bytes == 4) 2014 ctxt->_eip = (u32)ctxt->_eip; 2015 rc = emulate_pop(ctxt, &cs, ctxt->op_bytes); 2016 if (rc != X86EMUL_CONTINUE) 2017 return rc; 2018 rc = load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS); 2019 return rc; 2020 } 2021 2022 static int em_ret_far_imm(struct x86_emulate_ctxt *ctxt) 2023 { 2024 int rc; 2025 2026 rc = em_ret_far(ctxt); 2027 if (rc != X86EMUL_CONTINUE) 2028 return rc; 2029 rsp_increment(ctxt, ctxt->src.val); 2030 return X86EMUL_CONTINUE; 2031 } 2032 2033 static int em_cmpxchg(struct x86_emulate_ctxt *ctxt) 2034 { 2035 /* Save real source value, then compare EAX against destination. */ 2036 ctxt->src.orig_val = ctxt->src.val; 2037 ctxt->src.val = reg_read(ctxt, VCPU_REGS_RAX); 2038 fastop(ctxt, em_cmp); 2039 2040 if (ctxt->eflags & EFLG_ZF) { 2041 /* Success: write back to memory. */ 2042 ctxt->dst.val = ctxt->src.orig_val; 2043 } else { 2044 /* Failure: write the value we saw to EAX. */ 2045 ctxt->dst.type = OP_REG; 2046 ctxt->dst.addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX); 2047 } 2048 return X86EMUL_CONTINUE; 2049 } 2050 2051 static int em_lseg(struct x86_emulate_ctxt *ctxt) 2052 { 2053 int seg = ctxt->src2.val; 2054 unsigned short sel; 2055 int rc; 2056 2057 memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2); 2058 2059 rc = load_segment_descriptor(ctxt, sel, seg); 2060 if (rc != X86EMUL_CONTINUE) 2061 return rc; 2062 2063 ctxt->dst.val = ctxt->src.val; 2064 return rc; 2065 } 2066 2067 static void 2068 setup_syscalls_segments(struct x86_emulate_ctxt *ctxt, 2069 struct desc_struct *cs, struct desc_struct *ss) 2070 { 2071 cs->l = 0; /* will be adjusted later */ 2072 set_desc_base(cs, 0); /* flat segment */ 2073 cs->g = 1; /* 4kb granularity */ 2074 set_desc_limit(cs, 0xfffff); /* 4GB limit */ 2075 cs->type = 0x0b; /* Read, Execute, Accessed */ 2076 cs->s = 1; 2077 cs->dpl = 0; /* will be adjusted later */ 2078 cs->p = 1; 2079 cs->d = 1; 2080 cs->avl = 0; 2081 2082 set_desc_base(ss, 0); /* flat segment */ 2083 set_desc_limit(ss, 0xfffff); /* 4GB limit */ 2084 ss->g = 1; /* 4kb granularity */ 2085 ss->s = 1; 2086 ss->type = 0x03; /* Read/Write, Accessed */ 2087 ss->d = 1; /* 32bit stack segment */ 2088 ss->dpl = 0; 2089 ss->p = 1; 2090 ss->l = 0; 2091 ss->avl = 0; 2092 } 2093 2094 static bool vendor_intel(struct x86_emulate_ctxt *ctxt) 2095 { 2096 u32 eax, ebx, ecx, edx; 2097 2098 eax = ecx = 0; 2099 ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx); 2100 return ebx == X86EMUL_CPUID_VENDOR_GenuineIntel_ebx 2101 && ecx == X86EMUL_CPUID_VENDOR_GenuineIntel_ecx 2102 && edx == X86EMUL_CPUID_VENDOR_GenuineIntel_edx; 2103 } 2104 2105 static bool em_syscall_is_enabled(struct x86_emulate_ctxt *ctxt) 2106 { 2107 const struct x86_emulate_ops *ops = ctxt->ops; 2108 u32 eax, ebx, ecx, edx; 2109 2110 /* 2111 * syscall should always be enabled in longmode - so only become 2112 * vendor specific (cpuid) if other modes are active... 2113 */ 2114 if (ctxt->mode == X86EMUL_MODE_PROT64) 2115 return true; 2116 2117 eax = 0x00000000; 2118 ecx = 0x00000000; 2119 ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx); 2120 /* 2121 * Intel ("GenuineIntel") 2122 * remark: Intel CPUs only support "syscall" in 64bit 2123 * longmode. Also an 64bit guest with a 2124 * 32bit compat-app running will #UD !! While this 2125 * behaviour can be fixed (by emulating) into AMD 2126 * response - CPUs of AMD can't behave like Intel. 2127 */ 2128 if (ebx == X86EMUL_CPUID_VENDOR_GenuineIntel_ebx && 2129 ecx == X86EMUL_CPUID_VENDOR_GenuineIntel_ecx && 2130 edx == X86EMUL_CPUID_VENDOR_GenuineIntel_edx) 2131 return false; 2132 2133 /* AMD ("AuthenticAMD") */ 2134 if (ebx == X86EMUL_CPUID_VENDOR_AuthenticAMD_ebx && 2135 ecx == X86EMUL_CPUID_VENDOR_AuthenticAMD_ecx && 2136 edx == X86EMUL_CPUID_VENDOR_AuthenticAMD_edx) 2137 return true; 2138 2139 /* AMD ("AMDisbetter!") */ 2140 if (ebx == X86EMUL_CPUID_VENDOR_AMDisbetterI_ebx && 2141 ecx == X86EMUL_CPUID_VENDOR_AMDisbetterI_ecx && 2142 edx == X86EMUL_CPUID_VENDOR_AMDisbetterI_edx) 2143 return true; 2144 2145 /* default: (not Intel, not AMD), apply Intel's stricter rules... */ 2146 return false; 2147 } 2148 2149 static int em_syscall(struct x86_emulate_ctxt *ctxt) 2150 { 2151 const struct x86_emulate_ops *ops = ctxt->ops; 2152 struct desc_struct cs, ss; 2153 u64 msr_data; 2154 u16 cs_sel, ss_sel; 2155 u64 efer = 0; 2156 2157 /* syscall is not available in real mode */ 2158 if (ctxt->mode == X86EMUL_MODE_REAL || 2159 ctxt->mode == X86EMUL_MODE_VM86) 2160 return emulate_ud(ctxt); 2161 2162 if (!(em_syscall_is_enabled(ctxt))) 2163 return emulate_ud(ctxt); 2164 2165 ops->get_msr(ctxt, MSR_EFER, &efer); 2166 setup_syscalls_segments(ctxt, &cs, &ss); 2167 2168 if (!(efer & EFER_SCE)) 2169 return emulate_ud(ctxt); 2170 2171 ops->get_msr(ctxt, MSR_STAR, &msr_data); 2172 msr_data >>= 32; 2173 cs_sel = (u16)(msr_data & 0xfffc); 2174 ss_sel = (u16)(msr_data + 8); 2175 2176 if (efer & EFER_LMA) { 2177 cs.d = 0; 2178 cs.l = 1; 2179 } 2180 ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS); 2181 ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS); 2182 2183 *reg_write(ctxt, VCPU_REGS_RCX) = ctxt->_eip; 2184 if (efer & EFER_LMA) { 2185 #ifdef CONFIG_X86_64 2186 *reg_write(ctxt, VCPU_REGS_R11) = ctxt->eflags & ~EFLG_RF; 2187 2188 ops->get_msr(ctxt, 2189 ctxt->mode == X86EMUL_MODE_PROT64 ? 2190 MSR_LSTAR : MSR_CSTAR, &msr_data); 2191 ctxt->_eip = msr_data; 2192 2193 ops->get_msr(ctxt, MSR_SYSCALL_MASK, &msr_data); 2194 ctxt->eflags &= ~(msr_data | EFLG_RF); 2195 #endif 2196 } else { 2197 /* legacy mode */ 2198 ops->get_msr(ctxt, MSR_STAR, &msr_data); 2199 ctxt->_eip = (u32)msr_data; 2200 2201 ctxt->eflags &= ~(EFLG_VM | EFLG_IF | EFLG_RF); 2202 } 2203 2204 return X86EMUL_CONTINUE; 2205 } 2206 2207 static int em_sysenter(struct x86_emulate_ctxt *ctxt) 2208 { 2209 const struct x86_emulate_ops *ops = ctxt->ops; 2210 struct desc_struct cs, ss; 2211 u64 msr_data; 2212 u16 cs_sel, ss_sel; 2213 u64 efer = 0; 2214 2215 ops->get_msr(ctxt, MSR_EFER, &efer); 2216 /* inject #GP if in real mode */ 2217 if (ctxt->mode == X86EMUL_MODE_REAL) 2218 return emulate_gp(ctxt, 0); 2219 2220 /* 2221 * Not recognized on AMD in compat mode (but is recognized in legacy 2222 * mode). 2223 */ 2224 if ((ctxt->mode == X86EMUL_MODE_PROT32) && (efer & EFER_LMA) 2225 && !vendor_intel(ctxt)) 2226 return emulate_ud(ctxt); 2227 2228 /* XXX sysenter/sysexit have not been tested in 64bit mode. 2229 * Therefore, we inject an #UD. 2230 */ 2231 if (ctxt->mode == X86EMUL_MODE_PROT64) 2232 return emulate_ud(ctxt); 2233 2234 setup_syscalls_segments(ctxt, &cs, &ss); 2235 2236 ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data); 2237 switch (ctxt->mode) { 2238 case X86EMUL_MODE_PROT32: 2239 if ((msr_data & 0xfffc) == 0x0) 2240 return emulate_gp(ctxt, 0); 2241 break; 2242 case X86EMUL_MODE_PROT64: 2243 if (msr_data == 0x0) 2244 return emulate_gp(ctxt, 0); 2245 break; 2246 default: 2247 break; 2248 } 2249 2250 ctxt->eflags &= ~(EFLG_VM | EFLG_IF | EFLG_RF); 2251 cs_sel = (u16)msr_data; 2252 cs_sel &= ~SELECTOR_RPL_MASK; 2253 ss_sel = cs_sel + 8; 2254 ss_sel &= ~SELECTOR_RPL_MASK; 2255 if (ctxt->mode == X86EMUL_MODE_PROT64 || (efer & EFER_LMA)) { 2256 cs.d = 0; 2257 cs.l = 1; 2258 } 2259 2260 ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS); 2261 ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS); 2262 2263 ops->get_msr(ctxt, MSR_IA32_SYSENTER_EIP, &msr_data); 2264 ctxt->_eip = msr_data; 2265 2266 ops->get_msr(ctxt, MSR_IA32_SYSENTER_ESP, &msr_data); 2267 *reg_write(ctxt, VCPU_REGS_RSP) = msr_data; 2268 2269 return X86EMUL_CONTINUE; 2270 } 2271 2272 static int em_sysexit(struct x86_emulate_ctxt *ctxt) 2273 { 2274 const struct x86_emulate_ops *ops = ctxt->ops; 2275 struct desc_struct cs, ss; 2276 u64 msr_data; 2277 int usermode; 2278 u16 cs_sel = 0, ss_sel = 0; 2279 2280 /* inject #GP if in real mode or Virtual 8086 mode */ 2281 if (ctxt->mode == X86EMUL_MODE_REAL || 2282 ctxt->mode == X86EMUL_MODE_VM86) 2283 return emulate_gp(ctxt, 0); 2284 2285 setup_syscalls_segments(ctxt, &cs, &ss); 2286 2287 if ((ctxt->rex_prefix & 0x8) != 0x0) 2288 usermode = X86EMUL_MODE_PROT64; 2289 else 2290 usermode = X86EMUL_MODE_PROT32; 2291 2292 cs.dpl = 3; 2293 ss.dpl = 3; 2294 ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data); 2295 switch (usermode) { 2296 case X86EMUL_MODE_PROT32: 2297 cs_sel = (u16)(msr_data + 16); 2298 if ((msr_data & 0xfffc) == 0x0) 2299 return emulate_gp(ctxt, 0); 2300 ss_sel = (u16)(msr_data + 24); 2301 break; 2302 case X86EMUL_MODE_PROT64: 2303 cs_sel = (u16)(msr_data + 32); 2304 if (msr_data == 0x0) 2305 return emulate_gp(ctxt, 0); 2306 ss_sel = cs_sel + 8; 2307 cs.d = 0; 2308 cs.l = 1; 2309 break; 2310 } 2311 cs_sel |= SELECTOR_RPL_MASK; 2312 ss_sel |= SELECTOR_RPL_MASK; 2313 2314 ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS); 2315 ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS); 2316 2317 ctxt->_eip = reg_read(ctxt, VCPU_REGS_RDX); 2318 *reg_write(ctxt, VCPU_REGS_RSP) = reg_read(ctxt, VCPU_REGS_RCX); 2319 2320 return X86EMUL_CONTINUE; 2321 } 2322 2323 static bool emulator_bad_iopl(struct x86_emulate_ctxt *ctxt) 2324 { 2325 int iopl; 2326 if (ctxt->mode == X86EMUL_MODE_REAL) 2327 return false; 2328 if (ctxt->mode == X86EMUL_MODE_VM86) 2329 return true; 2330 iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> IOPL_SHIFT; 2331 return ctxt->ops->cpl(ctxt) > iopl; 2332 } 2333 2334 static bool emulator_io_port_access_allowed(struct x86_emulate_ctxt *ctxt, 2335 u16 port, u16 len) 2336 { 2337 const struct x86_emulate_ops *ops = ctxt->ops; 2338 struct desc_struct tr_seg; 2339 u32 base3; 2340 int r; 2341 u16 tr, io_bitmap_ptr, perm, bit_idx = port & 0x7; 2342 unsigned mask = (1 << len) - 1; 2343 unsigned long base; 2344 2345 ops->get_segment(ctxt, &tr, &tr_seg, &base3, VCPU_SREG_TR); 2346 if (!tr_seg.p) 2347 return false; 2348 if (desc_limit_scaled(&tr_seg) < 103) 2349 return false; 2350 base = get_desc_base(&tr_seg); 2351 #ifdef CONFIG_X86_64 2352 base |= ((u64)base3) << 32; 2353 #endif 2354 r = ops->read_std(ctxt, base + 102, &io_bitmap_ptr, 2, NULL); 2355 if (r != X86EMUL_CONTINUE) 2356 return false; 2357 if (io_bitmap_ptr + port/8 > desc_limit_scaled(&tr_seg)) 2358 return false; 2359 r = ops->read_std(ctxt, base + io_bitmap_ptr + port/8, &perm, 2, NULL); 2360 if (r != X86EMUL_CONTINUE) 2361 return false; 2362 if ((perm >> bit_idx) & mask) 2363 return false; 2364 return true; 2365 } 2366 2367 static bool emulator_io_permited(struct x86_emulate_ctxt *ctxt, 2368 u16 port, u16 len) 2369 { 2370 if (ctxt->perm_ok) 2371 return true; 2372 2373 if (emulator_bad_iopl(ctxt)) 2374 if (!emulator_io_port_access_allowed(ctxt, port, len)) 2375 return false; 2376 2377 ctxt->perm_ok = true; 2378 2379 return true; 2380 } 2381 2382 static void save_state_to_tss16(struct x86_emulate_ctxt *ctxt, 2383 struct tss_segment_16 *tss) 2384 { 2385 tss->ip = ctxt->_eip; 2386 tss->flag = ctxt->eflags; 2387 tss->ax = reg_read(ctxt, VCPU_REGS_RAX); 2388 tss->cx = reg_read(ctxt, VCPU_REGS_RCX); 2389 tss->dx = reg_read(ctxt, VCPU_REGS_RDX); 2390 tss->bx = reg_read(ctxt, VCPU_REGS_RBX); 2391 tss->sp = reg_read(ctxt, VCPU_REGS_RSP); 2392 tss->bp = reg_read(ctxt, VCPU_REGS_RBP); 2393 tss->si = reg_read(ctxt, VCPU_REGS_RSI); 2394 tss->di = reg_read(ctxt, VCPU_REGS_RDI); 2395 2396 tss->es = get_segment_selector(ctxt, VCPU_SREG_ES); 2397 tss->cs = get_segment_selector(ctxt, VCPU_SREG_CS); 2398 tss->ss = get_segment_selector(ctxt, VCPU_SREG_SS); 2399 tss->ds = get_segment_selector(ctxt, VCPU_SREG_DS); 2400 tss->ldt = get_segment_selector(ctxt, VCPU_SREG_LDTR); 2401 } 2402 2403 static int load_state_from_tss16(struct x86_emulate_ctxt *ctxt, 2404 struct tss_segment_16 *tss) 2405 { 2406 int ret; 2407 2408 ctxt->_eip = tss->ip; 2409 ctxt->eflags = tss->flag | 2; 2410 *reg_write(ctxt, VCPU_REGS_RAX) = tss->ax; 2411 *reg_write(ctxt, VCPU_REGS_RCX) = tss->cx; 2412 *reg_write(ctxt, VCPU_REGS_RDX) = tss->dx; 2413 *reg_write(ctxt, VCPU_REGS_RBX) = tss->bx; 2414 *reg_write(ctxt, VCPU_REGS_RSP) = tss->sp; 2415 *reg_write(ctxt, VCPU_REGS_RBP) = tss->bp; 2416 *reg_write(ctxt, VCPU_REGS_RSI) = tss->si; 2417 *reg_write(ctxt, VCPU_REGS_RDI) = tss->di; 2418 2419 /* 2420 * SDM says that segment selectors are loaded before segment 2421 * descriptors 2422 */ 2423 set_segment_selector(ctxt, tss->ldt, VCPU_SREG_LDTR); 2424 set_segment_selector(ctxt, tss->es, VCPU_SREG_ES); 2425 set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS); 2426 set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS); 2427 set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS); 2428 2429 /* 2430 * Now load segment descriptors. If fault happens at this stage 2431 * it is handled in a context of new task 2432 */ 2433 ret = load_segment_descriptor(ctxt, tss->ldt, VCPU_SREG_LDTR); 2434 if (ret != X86EMUL_CONTINUE) 2435 return ret; 2436 ret = load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES); 2437 if (ret != X86EMUL_CONTINUE) 2438 return ret; 2439 ret = load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS); 2440 if (ret != X86EMUL_CONTINUE) 2441 return ret; 2442 ret = load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS); 2443 if (ret != X86EMUL_CONTINUE) 2444 return ret; 2445 ret = load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS); 2446 if (ret != X86EMUL_CONTINUE) 2447 return ret; 2448 2449 return X86EMUL_CONTINUE; 2450 } 2451 2452 static int task_switch_16(struct x86_emulate_ctxt *ctxt, 2453 u16 tss_selector, u16 old_tss_sel, 2454 ulong old_tss_base, struct desc_struct *new_desc) 2455 { 2456 const struct x86_emulate_ops *ops = ctxt->ops; 2457 struct tss_segment_16 tss_seg; 2458 int ret; 2459 u32 new_tss_base = get_desc_base(new_desc); 2460 2461 ret = ops->read_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg, 2462 &ctxt->exception); 2463 if (ret != X86EMUL_CONTINUE) 2464 /* FIXME: need to provide precise fault address */ 2465 return ret; 2466 2467 save_state_to_tss16(ctxt, &tss_seg); 2468 2469 ret = ops->write_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg, 2470 &ctxt->exception); 2471 if (ret != X86EMUL_CONTINUE) 2472 /* FIXME: need to provide precise fault address */ 2473 return ret; 2474 2475 ret = ops->read_std(ctxt, new_tss_base, &tss_seg, sizeof tss_seg, 2476 &ctxt->exception); 2477 if (ret != X86EMUL_CONTINUE) 2478 /* FIXME: need to provide precise fault address */ 2479 return ret; 2480 2481 if (old_tss_sel != 0xffff) { 2482 tss_seg.prev_task_link = old_tss_sel; 2483 2484 ret = ops->write_std(ctxt, new_tss_base, 2485 &tss_seg.prev_task_link, 2486 sizeof tss_seg.prev_task_link, 2487 &ctxt->exception); 2488 if (ret != X86EMUL_CONTINUE) 2489 /* FIXME: need to provide precise fault address */ 2490 return ret; 2491 } 2492 2493 return load_state_from_tss16(ctxt, &tss_seg); 2494 } 2495 2496 static void save_state_to_tss32(struct x86_emulate_ctxt *ctxt, 2497 struct tss_segment_32 *tss) 2498 { 2499 tss->cr3 = ctxt->ops->get_cr(ctxt, 3); 2500 tss->eip = ctxt->_eip; 2501 tss->eflags = ctxt->eflags; 2502 tss->eax = reg_read(ctxt, VCPU_REGS_RAX); 2503 tss->ecx = reg_read(ctxt, VCPU_REGS_RCX); 2504 tss->edx = reg_read(ctxt, VCPU_REGS_RDX); 2505 tss->ebx = reg_read(ctxt, VCPU_REGS_RBX); 2506 tss->esp = reg_read(ctxt, VCPU_REGS_RSP); 2507 tss->ebp = reg_read(ctxt, VCPU_REGS_RBP); 2508 tss->esi = reg_read(ctxt, VCPU_REGS_RSI); 2509 tss->edi = reg_read(ctxt, VCPU_REGS_RDI); 2510 2511 tss->es = get_segment_selector(ctxt, VCPU_SREG_ES); 2512 tss->cs = get_segment_selector(ctxt, VCPU_SREG_CS); 2513 tss->ss = get_segment_selector(ctxt, VCPU_SREG_SS); 2514 tss->ds = get_segment_selector(ctxt, VCPU_SREG_DS); 2515 tss->fs = get_segment_selector(ctxt, VCPU_SREG_FS); 2516 tss->gs = get_segment_selector(ctxt, VCPU_SREG_GS); 2517 tss->ldt_selector = get_segment_selector(ctxt, VCPU_SREG_LDTR); 2518 } 2519 2520 static int load_state_from_tss32(struct x86_emulate_ctxt *ctxt, 2521 struct tss_segment_32 *tss) 2522 { 2523 int ret; 2524 2525 if (ctxt->ops->set_cr(ctxt, 3, tss->cr3)) 2526 return emulate_gp(ctxt, 0); 2527 ctxt->_eip = tss->eip; 2528 ctxt->eflags = tss->eflags | 2; 2529 2530 /* General purpose registers */ 2531 *reg_write(ctxt, VCPU_REGS_RAX) = tss->eax; 2532 *reg_write(ctxt, VCPU_REGS_RCX) = tss->ecx; 2533 *reg_write(ctxt, VCPU_REGS_RDX) = tss->edx; 2534 *reg_write(ctxt, VCPU_REGS_RBX) = tss->ebx; 2535 *reg_write(ctxt, VCPU_REGS_RSP) = tss->esp; 2536 *reg_write(ctxt, VCPU_REGS_RBP) = tss->ebp; 2537 *reg_write(ctxt, VCPU_REGS_RSI) = tss->esi; 2538 *reg_write(ctxt, VCPU_REGS_RDI) = tss->edi; 2539 2540 /* 2541 * SDM says that segment selectors are loaded before segment 2542 * descriptors 2543 */ 2544 set_segment_selector(ctxt, tss->ldt_selector, VCPU_SREG_LDTR); 2545 set_segment_selector(ctxt, tss->es, VCPU_SREG_ES); 2546 set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS); 2547 set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS); 2548 set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS); 2549 set_segment_selector(ctxt, tss->fs, VCPU_SREG_FS); 2550 set_segment_selector(ctxt, tss->gs, VCPU_SREG_GS); 2551 2552 /* 2553 * If we're switching between Protected Mode and VM86, we need to make 2554 * sure to update the mode before loading the segment descriptors so 2555 * that the selectors are interpreted correctly. 2556 * 2557 * Need to get rflags to the vcpu struct immediately because it 2558 * influences the CPL which is checked at least when loading the segment 2559 * descriptors and when pushing an error code to the new kernel stack. 2560 * 2561 * TODO Introduce a separate ctxt->ops->set_cpl callback 2562 */ 2563 if (ctxt->eflags & X86_EFLAGS_VM) 2564 ctxt->mode = X86EMUL_MODE_VM86; 2565 else 2566 ctxt->mode = X86EMUL_MODE_PROT32; 2567 2568 ctxt->ops->set_rflags(ctxt, ctxt->eflags); 2569 2570 /* 2571 * Now load segment descriptors. If fault happenes at this stage 2572 * it is handled in a context of new task 2573 */ 2574 ret = load_segment_descriptor(ctxt, tss->ldt_selector, VCPU_SREG_LDTR); 2575 if (ret != X86EMUL_CONTINUE) 2576 return ret; 2577 ret = load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES); 2578 if (ret != X86EMUL_CONTINUE) 2579 return ret; 2580 ret = load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS); 2581 if (ret != X86EMUL_CONTINUE) 2582 return ret; 2583 ret = load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS); 2584 if (ret != X86EMUL_CONTINUE) 2585 return ret; 2586 ret = load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS); 2587 if (ret != X86EMUL_CONTINUE) 2588 return ret; 2589 ret = load_segment_descriptor(ctxt, tss->fs, VCPU_SREG_FS); 2590 if (ret != X86EMUL_CONTINUE) 2591 return ret; 2592 ret = load_segment_descriptor(ctxt, tss->gs, VCPU_SREG_GS); 2593 if (ret != X86EMUL_CONTINUE) 2594 return ret; 2595 2596 return X86EMUL_CONTINUE; 2597 } 2598 2599 static int task_switch_32(struct x86_emulate_ctxt *ctxt, 2600 u16 tss_selector, u16 old_tss_sel, 2601 ulong old_tss_base, struct desc_struct *new_desc) 2602 { 2603 const struct x86_emulate_ops *ops = ctxt->ops; 2604 struct tss_segment_32 tss_seg; 2605 int ret; 2606 u32 new_tss_base = get_desc_base(new_desc); 2607 2608 ret = ops->read_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg, 2609 &ctxt->exception); 2610 if (ret != X86EMUL_CONTINUE) 2611 /* FIXME: need to provide precise fault address */ 2612 return ret; 2613 2614 save_state_to_tss32(ctxt, &tss_seg); 2615 2616 ret = ops->write_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg, 2617 &ctxt->exception); 2618 if (ret != X86EMUL_CONTINUE) 2619 /* FIXME: need to provide precise fault address */ 2620 return ret; 2621 2622 ret = ops->read_std(ctxt, new_tss_base, &tss_seg, sizeof tss_seg, 2623 &ctxt->exception); 2624 if (ret != X86EMUL_CONTINUE) 2625 /* FIXME: need to provide precise fault address */ 2626 return ret; 2627 2628 if (old_tss_sel != 0xffff) { 2629 tss_seg.prev_task_link = old_tss_sel; 2630 2631 ret = ops->write_std(ctxt, new_tss_base, 2632 &tss_seg.prev_task_link, 2633 sizeof tss_seg.prev_task_link, 2634 &ctxt->exception); 2635 if (ret != X86EMUL_CONTINUE) 2636 /* FIXME: need to provide precise fault address */ 2637 return ret; 2638 } 2639 2640 return load_state_from_tss32(ctxt, &tss_seg); 2641 } 2642 2643 static int emulator_do_task_switch(struct x86_emulate_ctxt *ctxt, 2644 u16 tss_selector, int idt_index, int reason, 2645 bool has_error_code, u32 error_code) 2646 { 2647 const struct x86_emulate_ops *ops = ctxt->ops; 2648 struct desc_struct curr_tss_desc, next_tss_desc; 2649 int ret; 2650 u16 old_tss_sel = get_segment_selector(ctxt, VCPU_SREG_TR); 2651 ulong old_tss_base = 2652 ops->get_cached_segment_base(ctxt, VCPU_SREG_TR); 2653 u32 desc_limit; 2654 ulong desc_addr; 2655 2656 /* FIXME: old_tss_base == ~0 ? */ 2657 2658 ret = read_segment_descriptor(ctxt, tss_selector, &next_tss_desc, &desc_addr); 2659 if (ret != X86EMUL_CONTINUE) 2660 return ret; 2661 ret = read_segment_descriptor(ctxt, old_tss_sel, &curr_tss_desc, &desc_addr); 2662 if (ret != X86EMUL_CONTINUE) 2663 return ret; 2664 2665 /* FIXME: check that next_tss_desc is tss */ 2666 2667 /* 2668 * Check privileges. The three cases are task switch caused by... 2669 * 2670 * 1. jmp/call/int to task gate: Check against DPL of the task gate 2671 * 2. Exception/IRQ/iret: No check is performed 2672 * 3. jmp/call to TSS: Check against DPL of the TSS 2673 */ 2674 if (reason == TASK_SWITCH_GATE) { 2675 if (idt_index != -1) { 2676 /* Software interrupts */ 2677 struct desc_struct task_gate_desc; 2678 int dpl; 2679 2680 ret = read_interrupt_descriptor(ctxt, idt_index, 2681 &task_gate_desc); 2682 if (ret != X86EMUL_CONTINUE) 2683 return ret; 2684 2685 dpl = task_gate_desc.dpl; 2686 if ((tss_selector & 3) > dpl || ops->cpl(ctxt) > dpl) 2687 return emulate_gp(ctxt, (idt_index << 3) | 0x2); 2688 } 2689 } else if (reason != TASK_SWITCH_IRET) { 2690 int dpl = next_tss_desc.dpl; 2691 if ((tss_selector & 3) > dpl || ops->cpl(ctxt) > dpl) 2692 return emulate_gp(ctxt, tss_selector); 2693 } 2694 2695 2696 desc_limit = desc_limit_scaled(&next_tss_desc); 2697 if (!next_tss_desc.p || 2698 ((desc_limit < 0x67 && (next_tss_desc.type & 8)) || 2699 desc_limit < 0x2b)) { 2700 emulate_ts(ctxt, tss_selector & 0xfffc); 2701 return X86EMUL_PROPAGATE_FAULT; 2702 } 2703 2704 if (reason == TASK_SWITCH_IRET || reason == TASK_SWITCH_JMP) { 2705 curr_tss_desc.type &= ~(1 << 1); /* clear busy flag */ 2706 write_segment_descriptor(ctxt, old_tss_sel, &curr_tss_desc); 2707 } 2708 2709 if (reason == TASK_SWITCH_IRET) 2710 ctxt->eflags = ctxt->eflags & ~X86_EFLAGS_NT; 2711 2712 /* set back link to prev task only if NT bit is set in eflags 2713 note that old_tss_sel is not used after this point */ 2714 if (reason != TASK_SWITCH_CALL && reason != TASK_SWITCH_GATE) 2715 old_tss_sel = 0xffff; 2716 2717 if (next_tss_desc.type & 8) 2718 ret = task_switch_32(ctxt, tss_selector, old_tss_sel, 2719 old_tss_base, &next_tss_desc); 2720 else 2721 ret = task_switch_16(ctxt, tss_selector, old_tss_sel, 2722 old_tss_base, &next_tss_desc); 2723 if (ret != X86EMUL_CONTINUE) 2724 return ret; 2725 2726 if (reason == TASK_SWITCH_CALL || reason == TASK_SWITCH_GATE) 2727 ctxt->eflags = ctxt->eflags | X86_EFLAGS_NT; 2728 2729 if (reason != TASK_SWITCH_IRET) { 2730 next_tss_desc.type |= (1 << 1); /* set busy flag */ 2731 write_segment_descriptor(ctxt, tss_selector, &next_tss_desc); 2732 } 2733 2734 ops->set_cr(ctxt, 0, ops->get_cr(ctxt, 0) | X86_CR0_TS); 2735 ops->set_segment(ctxt, tss_selector, &next_tss_desc, 0, VCPU_SREG_TR); 2736 2737 if (has_error_code) { 2738 ctxt->op_bytes = ctxt->ad_bytes = (next_tss_desc.type & 8) ? 4 : 2; 2739 ctxt->lock_prefix = 0; 2740 ctxt->src.val = (unsigned long) error_code; 2741 ret = em_push(ctxt); 2742 } 2743 2744 return ret; 2745 } 2746 2747 int emulator_task_switch(struct x86_emulate_ctxt *ctxt, 2748 u16 tss_selector, int idt_index, int reason, 2749 bool has_error_code, u32 error_code) 2750 { 2751 int rc; 2752 2753 invalidate_registers(ctxt); 2754 ctxt->_eip = ctxt->eip; 2755 ctxt->dst.type = OP_NONE; 2756 2757 rc = emulator_do_task_switch(ctxt, tss_selector, idt_index, reason, 2758 has_error_code, error_code); 2759 2760 if (rc == X86EMUL_CONTINUE) { 2761 ctxt->eip = ctxt->_eip; 2762 writeback_registers(ctxt); 2763 } 2764 2765 return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK; 2766 } 2767 2768 static void string_addr_inc(struct x86_emulate_ctxt *ctxt, int reg, 2769 struct operand *op) 2770 { 2771 int df = (ctxt->eflags & EFLG_DF) ? -op->count : op->count; 2772 2773 register_address_increment(ctxt, reg_rmw(ctxt, reg), df * op->bytes); 2774 op->addr.mem.ea = register_address(ctxt, reg_read(ctxt, reg)); 2775 } 2776 2777 static int em_das(struct x86_emulate_ctxt *ctxt) 2778 { 2779 u8 al, old_al; 2780 bool af, cf, old_cf; 2781 2782 cf = ctxt->eflags & X86_EFLAGS_CF; 2783 al = ctxt->dst.val; 2784 2785 old_al = al; 2786 old_cf = cf; 2787 cf = false; 2788 af = ctxt->eflags & X86_EFLAGS_AF; 2789 if ((al & 0x0f) > 9 || af) { 2790 al -= 6; 2791 cf = old_cf | (al >= 250); 2792 af = true; 2793 } else { 2794 af = false; 2795 } 2796 if (old_al > 0x99 || old_cf) { 2797 al -= 0x60; 2798 cf = true; 2799 } 2800 2801 ctxt->dst.val = al; 2802 /* Set PF, ZF, SF */ 2803 ctxt->src.type = OP_IMM; 2804 ctxt->src.val = 0; 2805 ctxt->src.bytes = 1; 2806 fastop(ctxt, em_or); 2807 ctxt->eflags &= ~(X86_EFLAGS_AF | X86_EFLAGS_CF); 2808 if (cf) 2809 ctxt->eflags |= X86_EFLAGS_CF; 2810 if (af) 2811 ctxt->eflags |= X86_EFLAGS_AF; 2812 return X86EMUL_CONTINUE; 2813 } 2814 2815 static int em_aam(struct x86_emulate_ctxt *ctxt) 2816 { 2817 u8 al, ah; 2818 2819 if (ctxt->src.val == 0) 2820 return emulate_de(ctxt); 2821 2822 al = ctxt->dst.val & 0xff; 2823 ah = al / ctxt->src.val; 2824 al %= ctxt->src.val; 2825 2826 ctxt->dst.val = (ctxt->dst.val & 0xffff0000) | al | (ah << 8); 2827 2828 /* Set PF, ZF, SF */ 2829 ctxt->src.type = OP_IMM; 2830 ctxt->src.val = 0; 2831 ctxt->src.bytes = 1; 2832 fastop(ctxt, em_or); 2833 2834 return X86EMUL_CONTINUE; 2835 } 2836 2837 static int em_aad(struct x86_emulate_ctxt *ctxt) 2838 { 2839 u8 al = ctxt->dst.val & 0xff; 2840 u8 ah = (ctxt->dst.val >> 8) & 0xff; 2841 2842 al = (al + (ah * ctxt->src.val)) & 0xff; 2843 2844 ctxt->dst.val = (ctxt->dst.val & 0xffff0000) | al; 2845 2846 /* Set PF, ZF, SF */ 2847 ctxt->src.type = OP_IMM; 2848 ctxt->src.val = 0; 2849 ctxt->src.bytes = 1; 2850 fastop(ctxt, em_or); 2851 2852 return X86EMUL_CONTINUE; 2853 } 2854 2855 static int em_call(struct x86_emulate_ctxt *ctxt) 2856 { 2857 long rel = ctxt->src.val; 2858 2859 ctxt->src.val = (unsigned long)ctxt->_eip; 2860 jmp_rel(ctxt, rel); 2861 return em_push(ctxt); 2862 } 2863 2864 static int em_call_far(struct x86_emulate_ctxt *ctxt) 2865 { 2866 u16 sel, old_cs; 2867 ulong old_eip; 2868 int rc; 2869 2870 old_cs = get_segment_selector(ctxt, VCPU_SREG_CS); 2871 old_eip = ctxt->_eip; 2872 2873 memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2); 2874 if (load_segment_descriptor(ctxt, sel, VCPU_SREG_CS)) 2875 return X86EMUL_CONTINUE; 2876 2877 ctxt->_eip = 0; 2878 memcpy(&ctxt->_eip, ctxt->src.valptr, ctxt->op_bytes); 2879 2880 ctxt->src.val = old_cs; 2881 rc = em_push(ctxt); 2882 if (rc != X86EMUL_CONTINUE) 2883 return rc; 2884 2885 ctxt->src.val = old_eip; 2886 return em_push(ctxt); 2887 } 2888 2889 static int em_ret_near_imm(struct x86_emulate_ctxt *ctxt) 2890 { 2891 int rc; 2892 2893 ctxt->dst.type = OP_REG; 2894 ctxt->dst.addr.reg = &ctxt->_eip; 2895 ctxt->dst.bytes = ctxt->op_bytes; 2896 rc = emulate_pop(ctxt, &ctxt->dst.val, ctxt->op_bytes); 2897 if (rc != X86EMUL_CONTINUE) 2898 return rc; 2899 rsp_increment(ctxt, ctxt->src.val); 2900 return X86EMUL_CONTINUE; 2901 } 2902 2903 static int em_xchg(struct x86_emulate_ctxt *ctxt) 2904 { 2905 /* Write back the register source. */ 2906 ctxt->src.val = ctxt->dst.val; 2907 write_register_operand(&ctxt->src); 2908 2909 /* Write back the memory destination with implicit LOCK prefix. */ 2910 ctxt->dst.val = ctxt->src.orig_val; 2911 ctxt->lock_prefix = 1; 2912 return X86EMUL_CONTINUE; 2913 } 2914 2915 static int em_imul_3op(struct x86_emulate_ctxt *ctxt) 2916 { 2917 ctxt->dst.val = ctxt->src2.val; 2918 return fastop(ctxt, em_imul); 2919 } 2920 2921 static int em_cwd(struct x86_emulate_ctxt *ctxt) 2922 { 2923 ctxt->dst.type = OP_REG; 2924 ctxt->dst.bytes = ctxt->src.bytes; 2925 ctxt->dst.addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX); 2926 ctxt->dst.val = ~((ctxt->src.val >> (ctxt->src.bytes * 8 - 1)) - 1); 2927 2928 return X86EMUL_CONTINUE; 2929 } 2930 2931 static int em_rdtsc(struct x86_emulate_ctxt *ctxt) 2932 { 2933 u64 tsc = 0; 2934 2935 ctxt->ops->get_msr(ctxt, MSR_IA32_TSC, &tsc); 2936 *reg_write(ctxt, VCPU_REGS_RAX) = (u32)tsc; 2937 *reg_write(ctxt, VCPU_REGS_RDX) = tsc >> 32; 2938 return X86EMUL_CONTINUE; 2939 } 2940 2941 static int em_rdpmc(struct x86_emulate_ctxt *ctxt) 2942 { 2943 u64 pmc; 2944 2945 if (ctxt->ops->read_pmc(ctxt, reg_read(ctxt, VCPU_REGS_RCX), &pmc)) 2946 return emulate_gp(ctxt, 0); 2947 *reg_write(ctxt, VCPU_REGS_RAX) = (u32)pmc; 2948 *reg_write(ctxt, VCPU_REGS_RDX) = pmc >> 32; 2949 return X86EMUL_CONTINUE; 2950 } 2951 2952 static int em_mov(struct x86_emulate_ctxt *ctxt) 2953 { 2954 memcpy(ctxt->dst.valptr, ctxt->src.valptr, ctxt->op_bytes); 2955 return X86EMUL_CONTINUE; 2956 } 2957 2958 #define FFL(x) bit(X86_FEATURE_##x) 2959 2960 static int em_movbe(struct x86_emulate_ctxt *ctxt) 2961 { 2962 u32 ebx, ecx, edx, eax = 1; 2963 u16 tmp; 2964 2965 /* 2966 * Check MOVBE is set in the guest-visible CPUID leaf. 2967 */ 2968 ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx); 2969 if (!(ecx & FFL(MOVBE))) 2970 return emulate_ud(ctxt); 2971 2972 switch (ctxt->op_bytes) { 2973 case 2: 2974 /* 2975 * From MOVBE definition: "...When the operand size is 16 bits, 2976 * the upper word of the destination register remains unchanged 2977 * ..." 2978 * 2979 * Both casting ->valptr and ->val to u16 breaks strict aliasing 2980 * rules so we have to do the operation almost per hand. 2981 */ 2982 tmp = (u16)ctxt->src.val; 2983 ctxt->dst.val &= ~0xffffUL; 2984 ctxt->dst.val |= (unsigned long)swab16(tmp); 2985 break; 2986 case 4: 2987 ctxt->dst.val = swab32((u32)ctxt->src.val); 2988 break; 2989 case 8: 2990 ctxt->dst.val = swab64(ctxt->src.val); 2991 break; 2992 default: 2993 return X86EMUL_PROPAGATE_FAULT; 2994 } 2995 return X86EMUL_CONTINUE; 2996 } 2997 2998 static int em_cr_write(struct x86_emulate_ctxt *ctxt) 2999 { 3000 if (ctxt->ops->set_cr(ctxt, ctxt->modrm_reg, ctxt->src.val)) 3001 return emulate_gp(ctxt, 0); 3002 3003 /* Disable writeback. */ 3004 ctxt->dst.type = OP_NONE; 3005 return X86EMUL_CONTINUE; 3006 } 3007 3008 static int em_dr_write(struct x86_emulate_ctxt *ctxt) 3009 { 3010 unsigned long val; 3011 3012 if (ctxt->mode == X86EMUL_MODE_PROT64) 3013 val = ctxt->src.val & ~0ULL; 3014 else 3015 val = ctxt->src.val & ~0U; 3016 3017 /* #UD condition is already handled. */ 3018 if (ctxt->ops->set_dr(ctxt, ctxt->modrm_reg, val) < 0) 3019 return emulate_gp(ctxt, 0); 3020 3021 /* Disable writeback. */ 3022 ctxt->dst.type = OP_NONE; 3023 return X86EMUL_CONTINUE; 3024 } 3025 3026 static int em_wrmsr(struct x86_emulate_ctxt *ctxt) 3027 { 3028 u64 msr_data; 3029 3030 msr_data = (u32)reg_read(ctxt, VCPU_REGS_RAX) 3031 | ((u64)reg_read(ctxt, VCPU_REGS_RDX) << 32); 3032 if (ctxt->ops->set_msr(ctxt, reg_read(ctxt, VCPU_REGS_RCX), msr_data)) 3033 return emulate_gp(ctxt, 0); 3034 3035 return X86EMUL_CONTINUE; 3036 } 3037 3038 static int em_rdmsr(struct x86_emulate_ctxt *ctxt) 3039 { 3040 u64 msr_data; 3041 3042 if (ctxt->ops->get_msr(ctxt, reg_read(ctxt, VCPU_REGS_RCX), &msr_data)) 3043 return emulate_gp(ctxt, 0); 3044 3045 *reg_write(ctxt, VCPU_REGS_RAX) = (u32)msr_data; 3046 *reg_write(ctxt, VCPU_REGS_RDX) = msr_data >> 32; 3047 return X86EMUL_CONTINUE; 3048 } 3049 3050 static int em_mov_rm_sreg(struct x86_emulate_ctxt *ctxt) 3051 { 3052 if (ctxt->modrm_reg > VCPU_SREG_GS) 3053 return emulate_ud(ctxt); 3054 3055 ctxt->dst.val = get_segment_selector(ctxt, ctxt->modrm_reg); 3056 return X86EMUL_CONTINUE; 3057 } 3058 3059 static int em_mov_sreg_rm(struct x86_emulate_ctxt *ctxt) 3060 { 3061 u16 sel = ctxt->src.val; 3062 3063 if (ctxt->modrm_reg == VCPU_SREG_CS || ctxt->modrm_reg > VCPU_SREG_GS) 3064 return emulate_ud(ctxt); 3065 3066 if (ctxt->modrm_reg == VCPU_SREG_SS) 3067 ctxt->interruptibility = KVM_X86_SHADOW_INT_MOV_SS; 3068 3069 /* Disable writeback. */ 3070 ctxt->dst.type = OP_NONE; 3071 return load_segment_descriptor(ctxt, sel, ctxt->modrm_reg); 3072 } 3073 3074 static int em_lldt(struct x86_emulate_ctxt *ctxt) 3075 { 3076 u16 sel = ctxt->src.val; 3077 3078 /* Disable writeback. */ 3079 ctxt->dst.type = OP_NONE; 3080 return load_segment_descriptor(ctxt, sel, VCPU_SREG_LDTR); 3081 } 3082 3083 static int em_ltr(struct x86_emulate_ctxt *ctxt) 3084 { 3085 u16 sel = ctxt->src.val; 3086 3087 /* Disable writeback. */ 3088 ctxt->dst.type = OP_NONE; 3089 return load_segment_descriptor(ctxt, sel, VCPU_SREG_TR); 3090 } 3091 3092 static int em_invlpg(struct x86_emulate_ctxt *ctxt) 3093 { 3094 int rc; 3095 ulong linear; 3096 3097 rc = linearize(ctxt, ctxt->src.addr.mem, 1, false, &linear); 3098 if (rc == X86EMUL_CONTINUE) 3099 ctxt->ops->invlpg(ctxt, linear); 3100 /* Disable writeback. */ 3101 ctxt->dst.type = OP_NONE; 3102 return X86EMUL_CONTINUE; 3103 } 3104 3105 static int em_clts(struct x86_emulate_ctxt *ctxt) 3106 { 3107 ulong cr0; 3108 3109 cr0 = ctxt->ops->get_cr(ctxt, 0); 3110 cr0 &= ~X86_CR0_TS; 3111 ctxt->ops->set_cr(ctxt, 0, cr0); 3112 return X86EMUL_CONTINUE; 3113 } 3114 3115 static int em_vmcall(struct x86_emulate_ctxt *ctxt) 3116 { 3117 int rc; 3118 3119 if (ctxt->modrm_mod != 3 || ctxt->modrm_rm != 1) 3120 return X86EMUL_UNHANDLEABLE; 3121 3122 rc = ctxt->ops->fix_hypercall(ctxt); 3123 if (rc != X86EMUL_CONTINUE) 3124 return rc; 3125 3126 /* Let the processor re-execute the fixed hypercall */ 3127 ctxt->_eip = ctxt->eip; 3128 /* Disable writeback. */ 3129 ctxt->dst.type = OP_NONE; 3130 return X86EMUL_CONTINUE; 3131 } 3132 3133 static int emulate_store_desc_ptr(struct x86_emulate_ctxt *ctxt, 3134 void (*get)(struct x86_emulate_ctxt *ctxt, 3135 struct desc_ptr *ptr)) 3136 { 3137 struct desc_ptr desc_ptr; 3138 3139 if (ctxt->mode == X86EMUL_MODE_PROT64) 3140 ctxt->op_bytes = 8; 3141 get(ctxt, &desc_ptr); 3142 if (ctxt->op_bytes == 2) { 3143 ctxt->op_bytes = 4; 3144 desc_ptr.address &= 0x00ffffff; 3145 } 3146 /* Disable writeback. */ 3147 ctxt->dst.type = OP_NONE; 3148 return segmented_write(ctxt, ctxt->dst.addr.mem, 3149 &desc_ptr, 2 + ctxt->op_bytes); 3150 } 3151 3152 static int em_sgdt(struct x86_emulate_ctxt *ctxt) 3153 { 3154 return emulate_store_desc_ptr(ctxt, ctxt->ops->get_gdt); 3155 } 3156 3157 static int em_sidt(struct x86_emulate_ctxt *ctxt) 3158 { 3159 return emulate_store_desc_ptr(ctxt, ctxt->ops->get_idt); 3160 } 3161 3162 static int em_lgdt(struct x86_emulate_ctxt *ctxt) 3163 { 3164 struct desc_ptr desc_ptr; 3165 int rc; 3166 3167 if (ctxt->mode == X86EMUL_MODE_PROT64) 3168 ctxt->op_bytes = 8; 3169 rc = read_descriptor(ctxt, ctxt->src.addr.mem, 3170 &desc_ptr.size, &desc_ptr.address, 3171 ctxt->op_bytes); 3172 if (rc != X86EMUL_CONTINUE) 3173 return rc; 3174 ctxt->ops->set_gdt(ctxt, &desc_ptr); 3175 /* Disable writeback. */ 3176 ctxt->dst.type = OP_NONE; 3177 return X86EMUL_CONTINUE; 3178 } 3179 3180 static int em_vmmcall(struct x86_emulate_ctxt *ctxt) 3181 { 3182 int rc; 3183 3184 rc = ctxt->ops->fix_hypercall(ctxt); 3185 3186 /* Disable writeback. */ 3187 ctxt->dst.type = OP_NONE; 3188 return rc; 3189 } 3190 3191 static int em_lidt(struct x86_emulate_ctxt *ctxt) 3192 { 3193 struct desc_ptr desc_ptr; 3194 int rc; 3195 3196 if (ctxt->mode == X86EMUL_MODE_PROT64) 3197 ctxt->op_bytes = 8; 3198 rc = read_descriptor(ctxt, ctxt->src.addr.mem, 3199 &desc_ptr.size, &desc_ptr.address, 3200 ctxt->op_bytes); 3201 if (rc != X86EMUL_CONTINUE) 3202 return rc; 3203 ctxt->ops->set_idt(ctxt, &desc_ptr); 3204 /* Disable writeback. */ 3205 ctxt->dst.type = OP_NONE; 3206 return X86EMUL_CONTINUE; 3207 } 3208 3209 static int em_smsw(struct x86_emulate_ctxt *ctxt) 3210 { 3211 ctxt->dst.bytes = 2; 3212 ctxt->dst.val = ctxt->ops->get_cr(ctxt, 0); 3213 return X86EMUL_CONTINUE; 3214 } 3215 3216 static int em_lmsw(struct x86_emulate_ctxt *ctxt) 3217 { 3218 ctxt->ops->set_cr(ctxt, 0, (ctxt->ops->get_cr(ctxt, 0) & ~0x0eul) 3219 | (ctxt->src.val & 0x0f)); 3220 ctxt->dst.type = OP_NONE; 3221 return X86EMUL_CONTINUE; 3222 } 3223 3224 static int em_loop(struct x86_emulate_ctxt *ctxt) 3225 { 3226 register_address_increment(ctxt, reg_rmw(ctxt, VCPU_REGS_RCX), -1); 3227 if ((address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) != 0) && 3228 (ctxt->b == 0xe2 || test_cc(ctxt->b ^ 0x5, ctxt->eflags))) 3229 jmp_rel(ctxt, ctxt->src.val); 3230 3231 return X86EMUL_CONTINUE; 3232 } 3233 3234 static int em_jcxz(struct x86_emulate_ctxt *ctxt) 3235 { 3236 if (address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) == 0) 3237 jmp_rel(ctxt, ctxt->src.val); 3238 3239 return X86EMUL_CONTINUE; 3240 } 3241 3242 static int em_in(struct x86_emulate_ctxt *ctxt) 3243 { 3244 if (!pio_in_emulated(ctxt, ctxt->dst.bytes, ctxt->src.val, 3245 &ctxt->dst.val)) 3246 return X86EMUL_IO_NEEDED; 3247 3248 return X86EMUL_CONTINUE; 3249 } 3250 3251 static int em_out(struct x86_emulate_ctxt *ctxt) 3252 { 3253 ctxt->ops->pio_out_emulated(ctxt, ctxt->src.bytes, ctxt->dst.val, 3254 &ctxt->src.val, 1); 3255 /* Disable writeback. */ 3256 ctxt->dst.type = OP_NONE; 3257 return X86EMUL_CONTINUE; 3258 } 3259 3260 static int em_cli(struct x86_emulate_ctxt *ctxt) 3261 { 3262 if (emulator_bad_iopl(ctxt)) 3263 return emulate_gp(ctxt, 0); 3264 3265 ctxt->eflags &= ~X86_EFLAGS_IF; 3266 return X86EMUL_CONTINUE; 3267 } 3268 3269 static int em_sti(struct x86_emulate_ctxt *ctxt) 3270 { 3271 if (emulator_bad_iopl(ctxt)) 3272 return emulate_gp(ctxt, 0); 3273 3274 ctxt->interruptibility = KVM_X86_SHADOW_INT_STI; 3275 ctxt->eflags |= X86_EFLAGS_IF; 3276 return X86EMUL_CONTINUE; 3277 } 3278 3279 static int em_cpuid(struct x86_emulate_ctxt *ctxt) 3280 { 3281 u32 eax, ebx, ecx, edx; 3282 3283 eax = reg_read(ctxt, VCPU_REGS_RAX); 3284 ecx = reg_read(ctxt, VCPU_REGS_RCX); 3285 ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx); 3286 *reg_write(ctxt, VCPU_REGS_RAX) = eax; 3287 *reg_write(ctxt, VCPU_REGS_RBX) = ebx; 3288 *reg_write(ctxt, VCPU_REGS_RCX) = ecx; 3289 *reg_write(ctxt, VCPU_REGS_RDX) = edx; 3290 return X86EMUL_CONTINUE; 3291 } 3292 3293 static int em_sahf(struct x86_emulate_ctxt *ctxt) 3294 { 3295 u32 flags; 3296 3297 flags = EFLG_CF | EFLG_PF | EFLG_AF | EFLG_ZF | EFLG_SF; 3298 flags &= *reg_rmw(ctxt, VCPU_REGS_RAX) >> 8; 3299 3300 ctxt->eflags &= ~0xffUL; 3301 ctxt->eflags |= flags | X86_EFLAGS_FIXED; 3302 return X86EMUL_CONTINUE; 3303 } 3304 3305 static int em_lahf(struct x86_emulate_ctxt *ctxt) 3306 { 3307 *reg_rmw(ctxt, VCPU_REGS_RAX) &= ~0xff00UL; 3308 *reg_rmw(ctxt, VCPU_REGS_RAX) |= (ctxt->eflags & 0xff) << 8; 3309 return X86EMUL_CONTINUE; 3310 } 3311 3312 static int em_bswap(struct x86_emulate_ctxt *ctxt) 3313 { 3314 switch (ctxt->op_bytes) { 3315 #ifdef CONFIG_X86_64 3316 case 8: 3317 asm("bswap %0" : "+r"(ctxt->dst.val)); 3318 break; 3319 #endif 3320 default: 3321 asm("bswap %0" : "+r"(*(u32 *)&ctxt->dst.val)); 3322 break; 3323 } 3324 return X86EMUL_CONTINUE; 3325 } 3326 3327 static bool valid_cr(int nr) 3328 { 3329 switch (nr) { 3330 case 0: 3331 case 2 ... 4: 3332 case 8: 3333 return true; 3334 default: 3335 return false; 3336 } 3337 } 3338 3339 static int check_cr_read(struct x86_emulate_ctxt *ctxt) 3340 { 3341 if (!valid_cr(ctxt->modrm_reg)) 3342 return emulate_ud(ctxt); 3343 3344 return X86EMUL_CONTINUE; 3345 } 3346 3347 static int check_cr_write(struct x86_emulate_ctxt *ctxt) 3348 { 3349 u64 new_val = ctxt->src.val64; 3350 int cr = ctxt->modrm_reg; 3351 u64 efer = 0; 3352 3353 static u64 cr_reserved_bits[] = { 3354 0xffffffff00000000ULL, 3355 0, 0, 0, /* CR3 checked later */ 3356 CR4_RESERVED_BITS, 3357 0, 0, 0, 3358 CR8_RESERVED_BITS, 3359 }; 3360 3361 if (!valid_cr(cr)) 3362 return emulate_ud(ctxt); 3363 3364 if (new_val & cr_reserved_bits[cr]) 3365 return emulate_gp(ctxt, 0); 3366 3367 switch (cr) { 3368 case 0: { 3369 u64 cr4; 3370 if (((new_val & X86_CR0_PG) && !(new_val & X86_CR0_PE)) || 3371 ((new_val & X86_CR0_NW) && !(new_val & X86_CR0_CD))) 3372 return emulate_gp(ctxt, 0); 3373 3374 cr4 = ctxt->ops->get_cr(ctxt, 4); 3375 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer); 3376 3377 if ((new_val & X86_CR0_PG) && (efer & EFER_LME) && 3378 !(cr4 & X86_CR4_PAE)) 3379 return emulate_gp(ctxt, 0); 3380 3381 break; 3382 } 3383 case 3: { 3384 u64 rsvd = 0; 3385 3386 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer); 3387 if (efer & EFER_LMA) 3388 rsvd = CR3_L_MODE_RESERVED_BITS; 3389 else if (ctxt->ops->get_cr(ctxt, 4) & X86_CR4_PAE) 3390 rsvd = CR3_PAE_RESERVED_BITS; 3391 else if (ctxt->ops->get_cr(ctxt, 0) & X86_CR0_PG) 3392 rsvd = CR3_NONPAE_RESERVED_BITS; 3393 3394 if (new_val & rsvd) 3395 return emulate_gp(ctxt, 0); 3396 3397 break; 3398 } 3399 case 4: { 3400 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer); 3401 3402 if ((efer & EFER_LMA) && !(new_val & X86_CR4_PAE)) 3403 return emulate_gp(ctxt, 0); 3404 3405 break; 3406 } 3407 } 3408 3409 return X86EMUL_CONTINUE; 3410 } 3411 3412 static int check_dr7_gd(struct x86_emulate_ctxt *ctxt) 3413 { 3414 unsigned long dr7; 3415 3416 ctxt->ops->get_dr(ctxt, 7, &dr7); 3417 3418 /* Check if DR7.Global_Enable is set */ 3419 return dr7 & (1 << 13); 3420 } 3421 3422 static int check_dr_read(struct x86_emulate_ctxt *ctxt) 3423 { 3424 int dr = ctxt->modrm_reg; 3425 u64 cr4; 3426 3427 if (dr > 7) 3428 return emulate_ud(ctxt); 3429 3430 cr4 = ctxt->ops->get_cr(ctxt, 4); 3431 if ((cr4 & X86_CR4_DE) && (dr == 4 || dr == 5)) 3432 return emulate_ud(ctxt); 3433 3434 if (check_dr7_gd(ctxt)) 3435 return emulate_db(ctxt); 3436 3437 return X86EMUL_CONTINUE; 3438 } 3439 3440 static int check_dr_write(struct x86_emulate_ctxt *ctxt) 3441 { 3442 u64 new_val = ctxt->src.val64; 3443 int dr = ctxt->modrm_reg; 3444 3445 if ((dr == 6 || dr == 7) && (new_val & 0xffffffff00000000ULL)) 3446 return emulate_gp(ctxt, 0); 3447 3448 return check_dr_read(ctxt); 3449 } 3450 3451 static int check_svme(struct x86_emulate_ctxt *ctxt) 3452 { 3453 u64 efer; 3454 3455 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer); 3456 3457 if (!(efer & EFER_SVME)) 3458 return emulate_ud(ctxt); 3459 3460 return X86EMUL_CONTINUE; 3461 } 3462 3463 static int check_svme_pa(struct x86_emulate_ctxt *ctxt) 3464 { 3465 u64 rax = reg_read(ctxt, VCPU_REGS_RAX); 3466 3467 /* Valid physical address? */ 3468 if (rax & 0xffff000000000000ULL) 3469 return emulate_gp(ctxt, 0); 3470 3471 return check_svme(ctxt); 3472 } 3473 3474 static int check_rdtsc(struct x86_emulate_ctxt *ctxt) 3475 { 3476 u64 cr4 = ctxt->ops->get_cr(ctxt, 4); 3477 3478 if (cr4 & X86_CR4_TSD && ctxt->ops->cpl(ctxt)) 3479 return emulate_ud(ctxt); 3480 3481 return X86EMUL_CONTINUE; 3482 } 3483 3484 static int check_rdpmc(struct x86_emulate_ctxt *ctxt) 3485 { 3486 u64 cr4 = ctxt->ops->get_cr(ctxt, 4); 3487 u64 rcx = reg_read(ctxt, VCPU_REGS_RCX); 3488 3489 if ((!(cr4 & X86_CR4_PCE) && ctxt->ops->cpl(ctxt)) || 3490 (rcx > 3)) 3491 return emulate_gp(ctxt, 0); 3492 3493 return X86EMUL_CONTINUE; 3494 } 3495 3496 static int check_perm_in(struct x86_emulate_ctxt *ctxt) 3497 { 3498 ctxt->dst.bytes = min(ctxt->dst.bytes, 4u); 3499 if (!emulator_io_permited(ctxt, ctxt->src.val, ctxt->dst.bytes)) 3500 return emulate_gp(ctxt, 0); 3501 3502 return X86EMUL_CONTINUE; 3503 } 3504 3505 static int check_perm_out(struct x86_emulate_ctxt *ctxt) 3506 { 3507 ctxt->src.bytes = min(ctxt->src.bytes, 4u); 3508 if (!emulator_io_permited(ctxt, ctxt->dst.val, ctxt->src.bytes)) 3509 return emulate_gp(ctxt, 0); 3510 3511 return X86EMUL_CONTINUE; 3512 } 3513 3514 #define D(_y) { .flags = (_y) } 3515 #define DI(_y, _i) { .flags = (_y), .intercept = x86_intercept_##_i } 3516 #define DIP(_y, _i, _p) { .flags = (_y), .intercept = x86_intercept_##_i, \ 3517 .check_perm = (_p) } 3518 #define N D(NotImpl) 3519 #define EXT(_f, _e) { .flags = ((_f) | RMExt), .u.group = (_e) } 3520 #define G(_f, _g) { .flags = ((_f) | Group | ModRM), .u.group = (_g) } 3521 #define GD(_f, _g) { .flags = ((_f) | GroupDual | ModRM), .u.gdual = (_g) } 3522 #define E(_f, _e) { .flags = ((_f) | Escape | ModRM), .u.esc = (_e) } 3523 #define I(_f, _e) { .flags = (_f), .u.execute = (_e) } 3524 #define F(_f, _e) { .flags = (_f) | Fastop, .u.fastop = (_e) } 3525 #define II(_f, _e, _i) \ 3526 { .flags = (_f), .u.execute = (_e), .intercept = x86_intercept_##_i } 3527 #define IIP(_f, _e, _i, _p) \ 3528 { .flags = (_f), .u.execute = (_e), .intercept = x86_intercept_##_i, \ 3529 .check_perm = (_p) } 3530 #define GP(_f, _g) { .flags = ((_f) | Prefix), .u.gprefix = (_g) } 3531 3532 #define D2bv(_f) D((_f) | ByteOp), D(_f) 3533 #define D2bvIP(_f, _i, _p) DIP((_f) | ByteOp, _i, _p), DIP(_f, _i, _p) 3534 #define I2bv(_f, _e) I((_f) | ByteOp, _e), I(_f, _e) 3535 #define F2bv(_f, _e) F((_f) | ByteOp, _e), F(_f, _e) 3536 #define I2bvIP(_f, _e, _i, _p) \ 3537 IIP((_f) | ByteOp, _e, _i, _p), IIP(_f, _e, _i, _p) 3538 3539 #define F6ALU(_f, _e) F2bv((_f) | DstMem | SrcReg | ModRM, _e), \ 3540 F2bv(((_f) | DstReg | SrcMem | ModRM) & ~Lock, _e), \ 3541 F2bv(((_f) & ~Lock) | DstAcc | SrcImm, _e) 3542 3543 static const struct opcode group7_rm1[] = { 3544 DI(SrcNone | Priv, monitor), 3545 DI(SrcNone | Priv, mwait), 3546 N, N, N, N, N, N, 3547 }; 3548 3549 static const struct opcode group7_rm3[] = { 3550 DIP(SrcNone | Prot | Priv, vmrun, check_svme_pa), 3551 II(SrcNone | Prot | EmulateOnUD, em_vmmcall, vmmcall), 3552 DIP(SrcNone | Prot | Priv, vmload, check_svme_pa), 3553 DIP(SrcNone | Prot | Priv, vmsave, check_svme_pa), 3554 DIP(SrcNone | Prot | Priv, stgi, check_svme), 3555 DIP(SrcNone | Prot | Priv, clgi, check_svme), 3556 DIP(SrcNone | Prot | Priv, skinit, check_svme), 3557 DIP(SrcNone | Prot | Priv, invlpga, check_svme), 3558 }; 3559 3560 static const struct opcode group7_rm7[] = { 3561 N, 3562 DIP(SrcNone, rdtscp, check_rdtsc), 3563 N, N, N, N, N, N, 3564 }; 3565 3566 static const struct opcode group1[] = { 3567 F(Lock, em_add), 3568 F(Lock | PageTable, em_or), 3569 F(Lock, em_adc), 3570 F(Lock, em_sbb), 3571 F(Lock | PageTable, em_and), 3572 F(Lock, em_sub), 3573 F(Lock, em_xor), 3574 F(NoWrite, em_cmp), 3575 }; 3576 3577 static const struct opcode group1A[] = { 3578 I(DstMem | SrcNone | Mov | Stack, em_pop), N, N, N, N, N, N, N, 3579 }; 3580 3581 static const struct opcode group2[] = { 3582 F(DstMem | ModRM, em_rol), 3583 F(DstMem | ModRM, em_ror), 3584 F(DstMem | ModRM, em_rcl), 3585 F(DstMem | ModRM, em_rcr), 3586 F(DstMem | ModRM, em_shl), 3587 F(DstMem | ModRM, em_shr), 3588 F(DstMem | ModRM, em_shl), 3589 F(DstMem | ModRM, em_sar), 3590 }; 3591 3592 static const struct opcode group3[] = { 3593 F(DstMem | SrcImm | NoWrite, em_test), 3594 F(DstMem | SrcImm | NoWrite, em_test), 3595 F(DstMem | SrcNone | Lock, em_not), 3596 F(DstMem | SrcNone | Lock, em_neg), 3597 F(DstXacc | Src2Mem, em_mul_ex), 3598 F(DstXacc | Src2Mem, em_imul_ex), 3599 F(DstXacc | Src2Mem, em_div_ex), 3600 F(DstXacc | Src2Mem, em_idiv_ex), 3601 }; 3602 3603 static const struct opcode group4[] = { 3604 F(ByteOp | DstMem | SrcNone | Lock, em_inc), 3605 F(ByteOp | DstMem | SrcNone | Lock, em_dec), 3606 N, N, N, N, N, N, 3607 }; 3608 3609 static const struct opcode group5[] = { 3610 F(DstMem | SrcNone | Lock, em_inc), 3611 F(DstMem | SrcNone | Lock, em_dec), 3612 I(SrcMem | Stack, em_grp45), 3613 I(SrcMemFAddr | ImplicitOps | Stack, em_call_far), 3614 I(SrcMem | Stack, em_grp45), 3615 I(SrcMemFAddr | ImplicitOps, em_grp45), 3616 I(SrcMem | Stack, em_grp45), D(Undefined), 3617 }; 3618 3619 static const struct opcode group6[] = { 3620 DI(Prot, sldt), 3621 DI(Prot, str), 3622 II(Prot | Priv | SrcMem16, em_lldt, lldt), 3623 II(Prot | Priv | SrcMem16, em_ltr, ltr), 3624 N, N, N, N, 3625 }; 3626 3627 static const struct group_dual group7 = { { 3628 II(Mov | DstMem | Priv, em_sgdt, sgdt), 3629 II(Mov | DstMem | Priv, em_sidt, sidt), 3630 II(SrcMem | Priv, em_lgdt, lgdt), 3631 II(SrcMem | Priv, em_lidt, lidt), 3632 II(SrcNone | DstMem | Mov, em_smsw, smsw), N, 3633 II(SrcMem16 | Mov | Priv, em_lmsw, lmsw), 3634 II(SrcMem | ByteOp | Priv | NoAccess, em_invlpg, invlpg), 3635 }, { 3636 I(SrcNone | Priv | EmulateOnUD, em_vmcall), 3637 EXT(0, group7_rm1), 3638 N, EXT(0, group7_rm3), 3639 II(SrcNone | DstMem | Mov, em_smsw, smsw), N, 3640 II(SrcMem16 | Mov | Priv, em_lmsw, lmsw), 3641 EXT(0, group7_rm7), 3642 } }; 3643 3644 static const struct opcode group8[] = { 3645 N, N, N, N, 3646 F(DstMem | SrcImmByte | NoWrite, em_bt), 3647 F(DstMem | SrcImmByte | Lock | PageTable, em_bts), 3648 F(DstMem | SrcImmByte | Lock, em_btr), 3649 F(DstMem | SrcImmByte | Lock | PageTable, em_btc), 3650 }; 3651 3652 static const struct group_dual group9 = { { 3653 N, I(DstMem64 | Lock | PageTable, em_cmpxchg8b), N, N, N, N, N, N, 3654 }, { 3655 N, N, N, N, N, N, N, N, 3656 } }; 3657 3658 static const struct opcode group11[] = { 3659 I(DstMem | SrcImm | Mov | PageTable, em_mov), 3660 X7(D(Undefined)), 3661 }; 3662 3663 static const struct gprefix pfx_0f_6f_0f_7f = { 3664 I(Mmx, em_mov), I(Sse | Aligned, em_mov), N, I(Sse | Unaligned, em_mov), 3665 }; 3666 3667 static const struct gprefix pfx_vmovntpx = { 3668 I(0, em_mov), N, N, N, 3669 }; 3670 3671 static const struct escape escape_d9 = { { 3672 N, N, N, N, N, N, N, I(DstMem, em_fnstcw), 3673 }, { 3674 /* 0xC0 - 0xC7 */ 3675 N, N, N, N, N, N, N, N, 3676 /* 0xC8 - 0xCF */ 3677 N, N, N, N, N, N, N, N, 3678 /* 0xD0 - 0xC7 */ 3679 N, N, N, N, N, N, N, N, 3680 /* 0xD8 - 0xDF */ 3681 N, N, N, N, N, N, N, N, 3682 /* 0xE0 - 0xE7 */ 3683 N, N, N, N, N, N, N, N, 3684 /* 0xE8 - 0xEF */ 3685 N, N, N, N, N, N, N, N, 3686 /* 0xF0 - 0xF7 */ 3687 N, N, N, N, N, N, N, N, 3688 /* 0xF8 - 0xFF */ 3689 N, N, N, N, N, N, N, N, 3690 } }; 3691 3692 static const struct escape escape_db = { { 3693 N, N, N, N, N, N, N, N, 3694 }, { 3695 /* 0xC0 - 0xC7 */ 3696 N, N, N, N, N, N, N, N, 3697 /* 0xC8 - 0xCF */ 3698 N, N, N, N, N, N, N, N, 3699 /* 0xD0 - 0xC7 */ 3700 N, N, N, N, N, N, N, N, 3701 /* 0xD8 - 0xDF */ 3702 N, N, N, N, N, N, N, N, 3703 /* 0xE0 - 0xE7 */ 3704 N, N, N, I(ImplicitOps, em_fninit), N, N, N, N, 3705 /* 0xE8 - 0xEF */ 3706 N, N, N, N, N, N, N, N, 3707 /* 0xF0 - 0xF7 */ 3708 N, N, N, N, N, N, N, N, 3709 /* 0xF8 - 0xFF */ 3710 N, N, N, N, N, N, N, N, 3711 } }; 3712 3713 static const struct escape escape_dd = { { 3714 N, N, N, N, N, N, N, I(DstMem, em_fnstsw), 3715 }, { 3716 /* 0xC0 - 0xC7 */ 3717 N, N, N, N, N, N, N, N, 3718 /* 0xC8 - 0xCF */ 3719 N, N, N, N, N, N, N, N, 3720 /* 0xD0 - 0xC7 */ 3721 N, N, N, N, N, N, N, N, 3722 /* 0xD8 - 0xDF */ 3723 N, N, N, N, N, N, N, N, 3724 /* 0xE0 - 0xE7 */ 3725 N, N, N, N, N, N, N, N, 3726 /* 0xE8 - 0xEF */ 3727 N, N, N, N, N, N, N, N, 3728 /* 0xF0 - 0xF7 */ 3729 N, N, N, N, N, N, N, N, 3730 /* 0xF8 - 0xFF */ 3731 N, N, N, N, N, N, N, N, 3732 } }; 3733 3734 static const struct opcode opcode_table[256] = { 3735 /* 0x00 - 0x07 */ 3736 F6ALU(Lock, em_add), 3737 I(ImplicitOps | Stack | No64 | Src2ES, em_push_sreg), 3738 I(ImplicitOps | Stack | No64 | Src2ES, em_pop_sreg), 3739 /* 0x08 - 0x0F */ 3740 F6ALU(Lock | PageTable, em_or), 3741 I(ImplicitOps | Stack | No64 | Src2CS, em_push_sreg), 3742 N, 3743 /* 0x10 - 0x17 */ 3744 F6ALU(Lock, em_adc), 3745 I(ImplicitOps | Stack | No64 | Src2SS, em_push_sreg), 3746 I(ImplicitOps | Stack | No64 | Src2SS, em_pop_sreg), 3747 /* 0x18 - 0x1F */ 3748 F6ALU(Lock, em_sbb), 3749 I(ImplicitOps | Stack | No64 | Src2DS, em_push_sreg), 3750 I(ImplicitOps | Stack | No64 | Src2DS, em_pop_sreg), 3751 /* 0x20 - 0x27 */ 3752 F6ALU(Lock | PageTable, em_and), N, N, 3753 /* 0x28 - 0x2F */ 3754 F6ALU(Lock, em_sub), N, I(ByteOp | DstAcc | No64, em_das), 3755 /* 0x30 - 0x37 */ 3756 F6ALU(Lock, em_xor), N, N, 3757 /* 0x38 - 0x3F */ 3758 F6ALU(NoWrite, em_cmp), N, N, 3759 /* 0x40 - 0x4F */ 3760 X8(F(DstReg, em_inc)), X8(F(DstReg, em_dec)), 3761 /* 0x50 - 0x57 */ 3762 X8(I(SrcReg | Stack, em_push)), 3763 /* 0x58 - 0x5F */ 3764 X8(I(DstReg | Stack, em_pop)), 3765 /* 0x60 - 0x67 */ 3766 I(ImplicitOps | Stack | No64, em_pusha), 3767 I(ImplicitOps | Stack | No64, em_popa), 3768 N, D(DstReg | SrcMem32 | ModRM | Mov) /* movsxd (x86/64) */ , 3769 N, N, N, N, 3770 /* 0x68 - 0x6F */ 3771 I(SrcImm | Mov | Stack, em_push), 3772 I(DstReg | SrcMem | ModRM | Src2Imm, em_imul_3op), 3773 I(SrcImmByte | Mov | Stack, em_push), 3774 I(DstReg | SrcMem | ModRM | Src2ImmByte, em_imul_3op), 3775 I2bvIP(DstDI | SrcDX | Mov | String | Unaligned, em_in, ins, check_perm_in), /* insb, insw/insd */ 3776 I2bvIP(SrcSI | DstDX | String, em_out, outs, check_perm_out), /* outsb, outsw/outsd */ 3777 /* 0x70 - 0x7F */ 3778 X16(D(SrcImmByte)), 3779 /* 0x80 - 0x87 */ 3780 G(ByteOp | DstMem | SrcImm, group1), 3781 G(DstMem | SrcImm, group1), 3782 G(ByteOp | DstMem | SrcImm | No64, group1), 3783 G(DstMem | SrcImmByte, group1), 3784 F2bv(DstMem | SrcReg | ModRM | NoWrite, em_test), 3785 I2bv(DstMem | SrcReg | ModRM | Lock | PageTable, em_xchg), 3786 /* 0x88 - 0x8F */ 3787 I2bv(DstMem | SrcReg | ModRM | Mov | PageTable, em_mov), 3788 I2bv(DstReg | SrcMem | ModRM | Mov, em_mov), 3789 I(DstMem | SrcNone | ModRM | Mov | PageTable, em_mov_rm_sreg), 3790 D(ModRM | SrcMem | NoAccess | DstReg), 3791 I(ImplicitOps | SrcMem16 | ModRM, em_mov_sreg_rm), 3792 G(0, group1A), 3793 /* 0x90 - 0x97 */ 3794 DI(SrcAcc | DstReg, pause), X7(D(SrcAcc | DstReg)), 3795 /* 0x98 - 0x9F */ 3796 D(DstAcc | SrcNone), I(ImplicitOps | SrcAcc, em_cwd), 3797 I(SrcImmFAddr | No64, em_call_far), N, 3798 II(ImplicitOps | Stack, em_pushf, pushf), 3799 II(ImplicitOps | Stack, em_popf, popf), 3800 I(ImplicitOps, em_sahf), I(ImplicitOps, em_lahf), 3801 /* 0xA0 - 0xA7 */ 3802 I2bv(DstAcc | SrcMem | Mov | MemAbs, em_mov), 3803 I2bv(DstMem | SrcAcc | Mov | MemAbs | PageTable, em_mov), 3804 I2bv(SrcSI | DstDI | Mov | String, em_mov), 3805 F2bv(SrcSI | DstDI | String | NoWrite, em_cmp), 3806 /* 0xA8 - 0xAF */ 3807 F2bv(DstAcc | SrcImm | NoWrite, em_test), 3808 I2bv(SrcAcc | DstDI | Mov | String, em_mov), 3809 I2bv(SrcSI | DstAcc | Mov | String, em_mov), 3810 F2bv(SrcAcc | DstDI | String | NoWrite, em_cmp), 3811 /* 0xB0 - 0xB7 */ 3812 X8(I(ByteOp | DstReg | SrcImm | Mov, em_mov)), 3813 /* 0xB8 - 0xBF */ 3814 X8(I(DstReg | SrcImm64 | Mov, em_mov)), 3815 /* 0xC0 - 0xC7 */ 3816 G(ByteOp | Src2ImmByte, group2), G(Src2ImmByte, group2), 3817 I(ImplicitOps | Stack | SrcImmU16, em_ret_near_imm), 3818 I(ImplicitOps | Stack, em_ret), 3819 I(DstReg | SrcMemFAddr | ModRM | No64 | Src2ES, em_lseg), 3820 I(DstReg | SrcMemFAddr | ModRM | No64 | Src2DS, em_lseg), 3821 G(ByteOp, group11), G(0, group11), 3822 /* 0xC8 - 0xCF */ 3823 I(Stack | SrcImmU16 | Src2ImmByte, em_enter), I(Stack, em_leave), 3824 I(ImplicitOps | Stack | SrcImmU16, em_ret_far_imm), 3825 I(ImplicitOps | Stack, em_ret_far), 3826 D(ImplicitOps), DI(SrcImmByte, intn), 3827 D(ImplicitOps | No64), II(ImplicitOps, em_iret, iret), 3828 /* 0xD0 - 0xD7 */ 3829 G(Src2One | ByteOp, group2), G(Src2One, group2), 3830 G(Src2CL | ByteOp, group2), G(Src2CL, group2), 3831 I(DstAcc | SrcImmUByte | No64, em_aam), 3832 I(DstAcc | SrcImmUByte | No64, em_aad), 3833 F(DstAcc | ByteOp | No64, em_salc), 3834 I(DstAcc | SrcXLat | ByteOp, em_mov), 3835 /* 0xD8 - 0xDF */ 3836 N, E(0, &escape_d9), N, E(0, &escape_db), N, E(0, &escape_dd), N, N, 3837 /* 0xE0 - 0xE7 */ 3838 X3(I(SrcImmByte, em_loop)), 3839 I(SrcImmByte, em_jcxz), 3840 I2bvIP(SrcImmUByte | DstAcc, em_in, in, check_perm_in), 3841 I2bvIP(SrcAcc | DstImmUByte, em_out, out, check_perm_out), 3842 /* 0xE8 - 0xEF */ 3843 I(SrcImm | Stack, em_call), D(SrcImm | ImplicitOps), 3844 I(SrcImmFAddr | No64, em_jmp_far), D(SrcImmByte | ImplicitOps), 3845 I2bvIP(SrcDX | DstAcc, em_in, in, check_perm_in), 3846 I2bvIP(SrcAcc | DstDX, em_out, out, check_perm_out), 3847 /* 0xF0 - 0xF7 */ 3848 N, DI(ImplicitOps, icebp), N, N, 3849 DI(ImplicitOps | Priv, hlt), D(ImplicitOps), 3850 G(ByteOp, group3), G(0, group3), 3851 /* 0xF8 - 0xFF */ 3852 D(ImplicitOps), D(ImplicitOps), 3853 I(ImplicitOps, em_cli), I(ImplicitOps, em_sti), 3854 D(ImplicitOps), D(ImplicitOps), G(0, group4), G(0, group5), 3855 }; 3856 3857 static const struct opcode twobyte_table[256] = { 3858 /* 0x00 - 0x0F */ 3859 G(0, group6), GD(0, &group7), N, N, 3860 N, I(ImplicitOps | EmulateOnUD, em_syscall), 3861 II(ImplicitOps | Priv, em_clts, clts), N, 3862 DI(ImplicitOps | Priv, invd), DI(ImplicitOps | Priv, wbinvd), N, N, 3863 N, D(ImplicitOps | ModRM), N, N, 3864 /* 0x10 - 0x1F */ 3865 N, N, N, N, N, N, N, N, 3866 D(ImplicitOps | ModRM), N, N, N, N, N, N, D(ImplicitOps | ModRM), 3867 /* 0x20 - 0x2F */ 3868 DIP(ModRM | DstMem | Priv | Op3264, cr_read, check_cr_read), 3869 DIP(ModRM | DstMem | Priv | Op3264, dr_read, check_dr_read), 3870 IIP(ModRM | SrcMem | Priv | Op3264, em_cr_write, cr_write, check_cr_write), 3871 IIP(ModRM | SrcMem | Priv | Op3264, em_dr_write, dr_write, check_dr_write), 3872 N, N, N, N, 3873 N, N, N, GP(ModRM | DstMem | SrcReg | Sse | Mov | Aligned, &pfx_vmovntpx), 3874 N, N, N, N, 3875 /* 0x30 - 0x3F */ 3876 II(ImplicitOps | Priv, em_wrmsr, wrmsr), 3877 IIP(ImplicitOps, em_rdtsc, rdtsc, check_rdtsc), 3878 II(ImplicitOps | Priv, em_rdmsr, rdmsr), 3879 IIP(ImplicitOps, em_rdpmc, rdpmc, check_rdpmc), 3880 I(ImplicitOps | EmulateOnUD, em_sysenter), 3881 I(ImplicitOps | Priv | EmulateOnUD, em_sysexit), 3882 N, N, 3883 N, N, N, N, N, N, N, N, 3884 /* 0x40 - 0x4F */ 3885 X16(D(DstReg | SrcMem | ModRM | Mov)), 3886 /* 0x50 - 0x5F */ 3887 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, 3888 /* 0x60 - 0x6F */ 3889 N, N, N, N, 3890 N, N, N, N, 3891 N, N, N, N, 3892 N, N, N, GP(SrcMem | DstReg | ModRM | Mov, &pfx_0f_6f_0f_7f), 3893 /* 0x70 - 0x7F */ 3894 N, N, N, N, 3895 N, N, N, N, 3896 N, N, N, N, 3897 N, N, N, GP(SrcReg | DstMem | ModRM | Mov, &pfx_0f_6f_0f_7f), 3898 /* 0x80 - 0x8F */ 3899 X16(D(SrcImm)), 3900 /* 0x90 - 0x9F */ 3901 X16(D(ByteOp | DstMem | SrcNone | ModRM| Mov)), 3902 /* 0xA0 - 0xA7 */ 3903 I(Stack | Src2FS, em_push_sreg), I(Stack | Src2FS, em_pop_sreg), 3904 II(ImplicitOps, em_cpuid, cpuid), 3905 F(DstMem | SrcReg | ModRM | BitOp | NoWrite, em_bt), 3906 F(DstMem | SrcReg | Src2ImmByte | ModRM, em_shld), 3907 F(DstMem | SrcReg | Src2CL | ModRM, em_shld), N, N, 3908 /* 0xA8 - 0xAF */ 3909 I(Stack | Src2GS, em_push_sreg), I(Stack | Src2GS, em_pop_sreg), 3910 DI(ImplicitOps, rsm), 3911 F(DstMem | SrcReg | ModRM | BitOp | Lock | PageTable, em_bts), 3912 F(DstMem | SrcReg | Src2ImmByte | ModRM, em_shrd), 3913 F(DstMem | SrcReg | Src2CL | ModRM, em_shrd), 3914 D(ModRM), F(DstReg | SrcMem | ModRM, em_imul), 3915 /* 0xB0 - 0xB7 */ 3916 I2bv(DstMem | SrcReg | ModRM | Lock | PageTable, em_cmpxchg), 3917 I(DstReg | SrcMemFAddr | ModRM | Src2SS, em_lseg), 3918 F(DstMem | SrcReg | ModRM | BitOp | Lock, em_btr), 3919 I(DstReg | SrcMemFAddr | ModRM | Src2FS, em_lseg), 3920 I(DstReg | SrcMemFAddr | ModRM | Src2GS, em_lseg), 3921 D(DstReg | SrcMem8 | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov), 3922 /* 0xB8 - 0xBF */ 3923 N, N, 3924 G(BitOp, group8), 3925 F(DstMem | SrcReg | ModRM | BitOp | Lock | PageTable, em_btc), 3926 F(DstReg | SrcMem | ModRM, em_bsf), F(DstReg | SrcMem | ModRM, em_bsr), 3927 D(DstReg | SrcMem8 | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov), 3928 /* 0xC0 - 0xC7 */ 3929 F2bv(DstMem | SrcReg | ModRM | SrcWrite | Lock, em_xadd), 3930 N, D(DstMem | SrcReg | ModRM | Mov), 3931 N, N, N, GD(0, &group9), 3932 /* 0xC8 - 0xCF */ 3933 X8(I(DstReg, em_bswap)), 3934 /* 0xD0 - 0xDF */ 3935 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, 3936 /* 0xE0 - 0xEF */ 3937 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, 3938 /* 0xF0 - 0xFF */ 3939 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N 3940 }; 3941 3942 static const struct gprefix three_byte_0f_38_f0 = { 3943 I(DstReg | SrcMem | Mov, em_movbe), N, N, N 3944 }; 3945 3946 static const struct gprefix three_byte_0f_38_f1 = { 3947 I(DstMem | SrcReg | Mov, em_movbe), N, N, N 3948 }; 3949 3950 /* 3951 * Insns below are selected by the prefix which indexed by the third opcode 3952 * byte. 3953 */ 3954 static const struct opcode opcode_map_0f_38[256] = { 3955 /* 0x00 - 0x7f */ 3956 X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), 3957 /* 0x80 - 0xef */ 3958 X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), 3959 /* 0xf0 - 0xf1 */ 3960 GP(EmulateOnUD | ModRM | Prefix, &three_byte_0f_38_f0), 3961 GP(EmulateOnUD | ModRM | Prefix, &three_byte_0f_38_f1), 3962 /* 0xf2 - 0xff */ 3963 N, N, X4(N), X8(N) 3964 }; 3965 3966 #undef D 3967 #undef N 3968 #undef G 3969 #undef GD 3970 #undef I 3971 #undef GP 3972 #undef EXT 3973 3974 #undef D2bv 3975 #undef D2bvIP 3976 #undef I2bv 3977 #undef I2bvIP 3978 #undef I6ALU 3979 3980 static unsigned imm_size(struct x86_emulate_ctxt *ctxt) 3981 { 3982 unsigned size; 3983 3984 size = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes; 3985 if (size == 8) 3986 size = 4; 3987 return size; 3988 } 3989 3990 static int decode_imm(struct x86_emulate_ctxt *ctxt, struct operand *op, 3991 unsigned size, bool sign_extension) 3992 { 3993 int rc = X86EMUL_CONTINUE; 3994 3995 op->type = OP_IMM; 3996 op->bytes = size; 3997 op->addr.mem.ea = ctxt->_eip; 3998 /* NB. Immediates are sign-extended as necessary. */ 3999 switch (op->bytes) { 4000 case 1: 4001 op->val = insn_fetch(s8, ctxt); 4002 break; 4003 case 2: 4004 op->val = insn_fetch(s16, ctxt); 4005 break; 4006 case 4: 4007 op->val = insn_fetch(s32, ctxt); 4008 break; 4009 case 8: 4010 op->val = insn_fetch(s64, ctxt); 4011 break; 4012 } 4013 if (!sign_extension) { 4014 switch (op->bytes) { 4015 case 1: 4016 op->val &= 0xff; 4017 break; 4018 case 2: 4019 op->val &= 0xffff; 4020 break; 4021 case 4: 4022 op->val &= 0xffffffff; 4023 break; 4024 } 4025 } 4026 done: 4027 return rc; 4028 } 4029 4030 static int decode_operand(struct x86_emulate_ctxt *ctxt, struct operand *op, 4031 unsigned d) 4032 { 4033 int rc = X86EMUL_CONTINUE; 4034 4035 switch (d) { 4036 case OpReg: 4037 decode_register_operand(ctxt, op); 4038 break; 4039 case OpImmUByte: 4040 rc = decode_imm(ctxt, op, 1, false); 4041 break; 4042 case OpMem: 4043 ctxt->memop.bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes; 4044 mem_common: 4045 *op = ctxt->memop; 4046 ctxt->memopp = op; 4047 if ((ctxt->d & BitOp) && op == &ctxt->dst) 4048 fetch_bit_operand(ctxt); 4049 op->orig_val = op->val; 4050 break; 4051 case OpMem64: 4052 ctxt->memop.bytes = 8; 4053 goto mem_common; 4054 case OpAcc: 4055 op->type = OP_REG; 4056 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes; 4057 op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX); 4058 fetch_register_operand(op); 4059 op->orig_val = op->val; 4060 break; 4061 case OpAccLo: 4062 op->type = OP_REG; 4063 op->bytes = (ctxt->d & ByteOp) ? 2 : ctxt->op_bytes; 4064 op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX); 4065 fetch_register_operand(op); 4066 op->orig_val = op->val; 4067 break; 4068 case OpAccHi: 4069 if (ctxt->d & ByteOp) { 4070 op->type = OP_NONE; 4071 break; 4072 } 4073 op->type = OP_REG; 4074 op->bytes = ctxt->op_bytes; 4075 op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX); 4076 fetch_register_operand(op); 4077 op->orig_val = op->val; 4078 break; 4079 case OpDI: 4080 op->type = OP_MEM; 4081 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes; 4082 op->addr.mem.ea = 4083 register_address(ctxt, reg_read(ctxt, VCPU_REGS_RDI)); 4084 op->addr.mem.seg = VCPU_SREG_ES; 4085 op->val = 0; 4086 op->count = 1; 4087 break; 4088 case OpDX: 4089 op->type = OP_REG; 4090 op->bytes = 2; 4091 op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX); 4092 fetch_register_operand(op); 4093 break; 4094 case OpCL: 4095 op->bytes = 1; 4096 op->val = reg_read(ctxt, VCPU_REGS_RCX) & 0xff; 4097 break; 4098 case OpImmByte: 4099 rc = decode_imm(ctxt, op, 1, true); 4100 break; 4101 case OpOne: 4102 op->bytes = 1; 4103 op->val = 1; 4104 break; 4105 case OpImm: 4106 rc = decode_imm(ctxt, op, imm_size(ctxt), true); 4107 break; 4108 case OpImm64: 4109 rc = decode_imm(ctxt, op, ctxt->op_bytes, true); 4110 break; 4111 case OpMem8: 4112 ctxt->memop.bytes = 1; 4113 if (ctxt->memop.type == OP_REG) { 4114 ctxt->memop.addr.reg = decode_register(ctxt, 4115 ctxt->modrm_rm, true); 4116 fetch_register_operand(&ctxt->memop); 4117 } 4118 goto mem_common; 4119 case OpMem16: 4120 ctxt->memop.bytes = 2; 4121 goto mem_common; 4122 case OpMem32: 4123 ctxt->memop.bytes = 4; 4124 goto mem_common; 4125 case OpImmU16: 4126 rc = decode_imm(ctxt, op, 2, false); 4127 break; 4128 case OpImmU: 4129 rc = decode_imm(ctxt, op, imm_size(ctxt), false); 4130 break; 4131 case OpSI: 4132 op->type = OP_MEM; 4133 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes; 4134 op->addr.mem.ea = 4135 register_address(ctxt, reg_read(ctxt, VCPU_REGS_RSI)); 4136 op->addr.mem.seg = seg_override(ctxt); 4137 op->val = 0; 4138 op->count = 1; 4139 break; 4140 case OpXLat: 4141 op->type = OP_MEM; 4142 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes; 4143 op->addr.mem.ea = 4144 register_address(ctxt, 4145 reg_read(ctxt, VCPU_REGS_RBX) + 4146 (reg_read(ctxt, VCPU_REGS_RAX) & 0xff)); 4147 op->addr.mem.seg = seg_override(ctxt); 4148 op->val = 0; 4149 break; 4150 case OpImmFAddr: 4151 op->type = OP_IMM; 4152 op->addr.mem.ea = ctxt->_eip; 4153 op->bytes = ctxt->op_bytes + 2; 4154 insn_fetch_arr(op->valptr, op->bytes, ctxt); 4155 break; 4156 case OpMemFAddr: 4157 ctxt->memop.bytes = ctxt->op_bytes + 2; 4158 goto mem_common; 4159 case OpES: 4160 op->val = VCPU_SREG_ES; 4161 break; 4162 case OpCS: 4163 op->val = VCPU_SREG_CS; 4164 break; 4165 case OpSS: 4166 op->val = VCPU_SREG_SS; 4167 break; 4168 case OpDS: 4169 op->val = VCPU_SREG_DS; 4170 break; 4171 case OpFS: 4172 op->val = VCPU_SREG_FS; 4173 break; 4174 case OpGS: 4175 op->val = VCPU_SREG_GS; 4176 break; 4177 case OpImplicit: 4178 /* Special instructions do their own operand decoding. */ 4179 default: 4180 op->type = OP_NONE; /* Disable writeback. */ 4181 break; 4182 } 4183 4184 done: 4185 return rc; 4186 } 4187 4188 int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len) 4189 { 4190 int rc = X86EMUL_CONTINUE; 4191 int mode = ctxt->mode; 4192 int def_op_bytes, def_ad_bytes, goffset, simd_prefix; 4193 bool op_prefix = false; 4194 struct opcode opcode; 4195 4196 ctxt->memop.type = OP_NONE; 4197 ctxt->memopp = NULL; 4198 ctxt->_eip = ctxt->eip; 4199 ctxt->fetch.start = ctxt->_eip; 4200 ctxt->fetch.end = ctxt->fetch.start + insn_len; 4201 ctxt->opcode_len = 1; 4202 if (insn_len > 0) 4203 memcpy(ctxt->fetch.data, insn, insn_len); 4204 4205 switch (mode) { 4206 case X86EMUL_MODE_REAL: 4207 case X86EMUL_MODE_VM86: 4208 case X86EMUL_MODE_PROT16: 4209 def_op_bytes = def_ad_bytes = 2; 4210 break; 4211 case X86EMUL_MODE_PROT32: 4212 def_op_bytes = def_ad_bytes = 4; 4213 break; 4214 #ifdef CONFIG_X86_64 4215 case X86EMUL_MODE_PROT64: 4216 def_op_bytes = 4; 4217 def_ad_bytes = 8; 4218 break; 4219 #endif 4220 default: 4221 return EMULATION_FAILED; 4222 } 4223 4224 ctxt->op_bytes = def_op_bytes; 4225 ctxt->ad_bytes = def_ad_bytes; 4226 4227 /* Legacy prefixes. */ 4228 for (;;) { 4229 switch (ctxt->b = insn_fetch(u8, ctxt)) { 4230 case 0x66: /* operand-size override */ 4231 op_prefix = true; 4232 /* switch between 2/4 bytes */ 4233 ctxt->op_bytes = def_op_bytes ^ 6; 4234 break; 4235 case 0x67: /* address-size override */ 4236 if (mode == X86EMUL_MODE_PROT64) 4237 /* switch between 4/8 bytes */ 4238 ctxt->ad_bytes = def_ad_bytes ^ 12; 4239 else 4240 /* switch between 2/4 bytes */ 4241 ctxt->ad_bytes = def_ad_bytes ^ 6; 4242 break; 4243 case 0x26: /* ES override */ 4244 case 0x2e: /* CS override */ 4245 case 0x36: /* SS override */ 4246 case 0x3e: /* DS override */ 4247 set_seg_override(ctxt, (ctxt->b >> 3) & 3); 4248 break; 4249 case 0x64: /* FS override */ 4250 case 0x65: /* GS override */ 4251 set_seg_override(ctxt, ctxt->b & 7); 4252 break; 4253 case 0x40 ... 0x4f: /* REX */ 4254 if (mode != X86EMUL_MODE_PROT64) 4255 goto done_prefixes; 4256 ctxt->rex_prefix = ctxt->b; 4257 continue; 4258 case 0xf0: /* LOCK */ 4259 ctxt->lock_prefix = 1; 4260 break; 4261 case 0xf2: /* REPNE/REPNZ */ 4262 case 0xf3: /* REP/REPE/REPZ */ 4263 ctxt->rep_prefix = ctxt->b; 4264 break; 4265 default: 4266 goto done_prefixes; 4267 } 4268 4269 /* Any legacy prefix after a REX prefix nullifies its effect. */ 4270 4271 ctxt->rex_prefix = 0; 4272 } 4273 4274 done_prefixes: 4275 4276 /* REX prefix. */ 4277 if (ctxt->rex_prefix & 8) 4278 ctxt->op_bytes = 8; /* REX.W */ 4279 4280 /* Opcode byte(s). */ 4281 opcode = opcode_table[ctxt->b]; 4282 /* Two-byte opcode? */ 4283 if (ctxt->b == 0x0f) { 4284 ctxt->opcode_len = 2; 4285 ctxt->b = insn_fetch(u8, ctxt); 4286 opcode = twobyte_table[ctxt->b]; 4287 4288 /* 0F_38 opcode map */ 4289 if (ctxt->b == 0x38) { 4290 ctxt->opcode_len = 3; 4291 ctxt->b = insn_fetch(u8, ctxt); 4292 opcode = opcode_map_0f_38[ctxt->b]; 4293 } 4294 } 4295 ctxt->d = opcode.flags; 4296 4297 if (ctxt->d & ModRM) 4298 ctxt->modrm = insn_fetch(u8, ctxt); 4299 4300 while (ctxt->d & GroupMask) { 4301 switch (ctxt->d & GroupMask) { 4302 case Group: 4303 goffset = (ctxt->modrm >> 3) & 7; 4304 opcode = opcode.u.group[goffset]; 4305 break; 4306 case GroupDual: 4307 goffset = (ctxt->modrm >> 3) & 7; 4308 if ((ctxt->modrm >> 6) == 3) 4309 opcode = opcode.u.gdual->mod3[goffset]; 4310 else 4311 opcode = opcode.u.gdual->mod012[goffset]; 4312 break; 4313 case RMExt: 4314 goffset = ctxt->modrm & 7; 4315 opcode = opcode.u.group[goffset]; 4316 break; 4317 case Prefix: 4318 if (ctxt->rep_prefix && op_prefix) 4319 return EMULATION_FAILED; 4320 simd_prefix = op_prefix ? 0x66 : ctxt->rep_prefix; 4321 switch (simd_prefix) { 4322 case 0x00: opcode = opcode.u.gprefix->pfx_no; break; 4323 case 0x66: opcode = opcode.u.gprefix->pfx_66; break; 4324 case 0xf2: opcode = opcode.u.gprefix->pfx_f2; break; 4325 case 0xf3: opcode = opcode.u.gprefix->pfx_f3; break; 4326 } 4327 break; 4328 case Escape: 4329 if (ctxt->modrm > 0xbf) 4330 opcode = opcode.u.esc->high[ctxt->modrm - 0xc0]; 4331 else 4332 opcode = opcode.u.esc->op[(ctxt->modrm >> 3) & 7]; 4333 break; 4334 default: 4335 return EMULATION_FAILED; 4336 } 4337 4338 ctxt->d &= ~(u64)GroupMask; 4339 ctxt->d |= opcode.flags; 4340 } 4341 4342 ctxt->execute = opcode.u.execute; 4343 ctxt->check_perm = opcode.check_perm; 4344 ctxt->intercept = opcode.intercept; 4345 4346 /* Unrecognised? */ 4347 if (ctxt->d == 0 || (ctxt->d & NotImpl)) 4348 return EMULATION_FAILED; 4349 4350 if (!(ctxt->d & EmulateOnUD) && ctxt->ud) 4351 return EMULATION_FAILED; 4352 4353 if (mode == X86EMUL_MODE_PROT64 && (ctxt->d & Stack)) 4354 ctxt->op_bytes = 8; 4355 4356 if (ctxt->d & Op3264) { 4357 if (mode == X86EMUL_MODE_PROT64) 4358 ctxt->op_bytes = 8; 4359 else 4360 ctxt->op_bytes = 4; 4361 } 4362 4363 if (ctxt->d & Sse) 4364 ctxt->op_bytes = 16; 4365 else if (ctxt->d & Mmx) 4366 ctxt->op_bytes = 8; 4367 4368 /* ModRM and SIB bytes. */ 4369 if (ctxt->d & ModRM) { 4370 rc = decode_modrm(ctxt, &ctxt->memop); 4371 if (!ctxt->has_seg_override) 4372 set_seg_override(ctxt, ctxt->modrm_seg); 4373 } else if (ctxt->d & MemAbs) 4374 rc = decode_abs(ctxt, &ctxt->memop); 4375 if (rc != X86EMUL_CONTINUE) 4376 goto done; 4377 4378 if (!ctxt->has_seg_override) 4379 set_seg_override(ctxt, VCPU_SREG_DS); 4380 4381 ctxt->memop.addr.mem.seg = seg_override(ctxt); 4382 4383 if (ctxt->memop.type == OP_MEM && ctxt->ad_bytes != 8) 4384 ctxt->memop.addr.mem.ea = (u32)ctxt->memop.addr.mem.ea; 4385 4386 /* 4387 * Decode and fetch the source operand: register, memory 4388 * or immediate. 4389 */ 4390 rc = decode_operand(ctxt, &ctxt->src, (ctxt->d >> SrcShift) & OpMask); 4391 if (rc != X86EMUL_CONTINUE) 4392 goto done; 4393 4394 /* 4395 * Decode and fetch the second source operand: register, memory 4396 * or immediate. 4397 */ 4398 rc = decode_operand(ctxt, &ctxt->src2, (ctxt->d >> Src2Shift) & OpMask); 4399 if (rc != X86EMUL_CONTINUE) 4400 goto done; 4401 4402 /* Decode and fetch the destination operand: register or memory. */ 4403 rc = decode_operand(ctxt, &ctxt->dst, (ctxt->d >> DstShift) & OpMask); 4404 4405 done: 4406 if (ctxt->memopp && ctxt->memopp->type == OP_MEM && ctxt->rip_relative) 4407 ctxt->memopp->addr.mem.ea += ctxt->_eip; 4408 4409 return (rc != X86EMUL_CONTINUE) ? EMULATION_FAILED : EMULATION_OK; 4410 } 4411 4412 bool x86_page_table_writing_insn(struct x86_emulate_ctxt *ctxt) 4413 { 4414 return ctxt->d & PageTable; 4415 } 4416 4417 static bool string_insn_completed(struct x86_emulate_ctxt *ctxt) 4418 { 4419 /* The second termination condition only applies for REPE 4420 * and REPNE. Test if the repeat string operation prefix is 4421 * REPE/REPZ or REPNE/REPNZ and if it's the case it tests the 4422 * corresponding termination condition according to: 4423 * - if REPE/REPZ and ZF = 0 then done 4424 * - if REPNE/REPNZ and ZF = 1 then done 4425 */ 4426 if (((ctxt->b == 0xa6) || (ctxt->b == 0xa7) || 4427 (ctxt->b == 0xae) || (ctxt->b == 0xaf)) 4428 && (((ctxt->rep_prefix == REPE_PREFIX) && 4429 ((ctxt->eflags & EFLG_ZF) == 0)) 4430 || ((ctxt->rep_prefix == REPNE_PREFIX) && 4431 ((ctxt->eflags & EFLG_ZF) == EFLG_ZF)))) 4432 return true; 4433 4434 return false; 4435 } 4436 4437 static int flush_pending_x87_faults(struct x86_emulate_ctxt *ctxt) 4438 { 4439 bool fault = false; 4440 4441 ctxt->ops->get_fpu(ctxt); 4442 asm volatile("1: fwait \n\t" 4443 "2: \n\t" 4444 ".pushsection .fixup,\"ax\" \n\t" 4445 "3: \n\t" 4446 "movb $1, %[fault] \n\t" 4447 "jmp 2b \n\t" 4448 ".popsection \n\t" 4449 _ASM_EXTABLE(1b, 3b) 4450 : [fault]"+qm"(fault)); 4451 ctxt->ops->put_fpu(ctxt); 4452 4453 if (unlikely(fault)) 4454 return emulate_exception(ctxt, MF_VECTOR, 0, false); 4455 4456 return X86EMUL_CONTINUE; 4457 } 4458 4459 static void fetch_possible_mmx_operand(struct x86_emulate_ctxt *ctxt, 4460 struct operand *op) 4461 { 4462 if (op->type == OP_MM) 4463 read_mmx_reg(ctxt, &op->mm_val, op->addr.mm); 4464 } 4465 4466 static int fastop(struct x86_emulate_ctxt *ctxt, void (*fop)(struct fastop *)) 4467 { 4468 ulong flags = (ctxt->eflags & EFLAGS_MASK) | X86_EFLAGS_IF; 4469 if (!(ctxt->d & ByteOp)) 4470 fop += __ffs(ctxt->dst.bytes) * FASTOP_SIZE; 4471 asm("push %[flags]; popf; call *%[fastop]; pushf; pop %[flags]\n" 4472 : "+a"(ctxt->dst.val), "+d"(ctxt->src.val), [flags]"+D"(flags), 4473 [fastop]"+S"(fop) 4474 : "c"(ctxt->src2.val)); 4475 ctxt->eflags = (ctxt->eflags & ~EFLAGS_MASK) | (flags & EFLAGS_MASK); 4476 if (!fop) /* exception is returned in fop variable */ 4477 return emulate_de(ctxt); 4478 return X86EMUL_CONTINUE; 4479 } 4480 4481 int x86_emulate_insn(struct x86_emulate_ctxt *ctxt) 4482 { 4483 const struct x86_emulate_ops *ops = ctxt->ops; 4484 int rc = X86EMUL_CONTINUE; 4485 int saved_dst_type = ctxt->dst.type; 4486 4487 ctxt->mem_read.pos = 0; 4488 4489 if ((ctxt->mode == X86EMUL_MODE_PROT64 && (ctxt->d & No64)) || 4490 (ctxt->d & Undefined)) { 4491 rc = emulate_ud(ctxt); 4492 goto done; 4493 } 4494 4495 /* LOCK prefix is allowed only with some instructions */ 4496 if (ctxt->lock_prefix && (!(ctxt->d & Lock) || ctxt->dst.type != OP_MEM)) { 4497 rc = emulate_ud(ctxt); 4498 goto done; 4499 } 4500 4501 if ((ctxt->d & SrcMask) == SrcMemFAddr && ctxt->src.type != OP_MEM) { 4502 rc = emulate_ud(ctxt); 4503 goto done; 4504 } 4505 4506 if (((ctxt->d & (Sse|Mmx)) && ((ops->get_cr(ctxt, 0) & X86_CR0_EM))) 4507 || ((ctxt->d & Sse) && !(ops->get_cr(ctxt, 4) & X86_CR4_OSFXSR))) { 4508 rc = emulate_ud(ctxt); 4509 goto done; 4510 } 4511 4512 if ((ctxt->d & (Sse|Mmx)) && (ops->get_cr(ctxt, 0) & X86_CR0_TS)) { 4513 rc = emulate_nm(ctxt); 4514 goto done; 4515 } 4516 4517 if (ctxt->d & Mmx) { 4518 rc = flush_pending_x87_faults(ctxt); 4519 if (rc != X86EMUL_CONTINUE) 4520 goto done; 4521 /* 4522 * Now that we know the fpu is exception safe, we can fetch 4523 * operands from it. 4524 */ 4525 fetch_possible_mmx_operand(ctxt, &ctxt->src); 4526 fetch_possible_mmx_operand(ctxt, &ctxt->src2); 4527 if (!(ctxt->d & Mov)) 4528 fetch_possible_mmx_operand(ctxt, &ctxt->dst); 4529 } 4530 4531 if (unlikely(ctxt->guest_mode) && ctxt->intercept) { 4532 rc = emulator_check_intercept(ctxt, ctxt->intercept, 4533 X86_ICPT_PRE_EXCEPT); 4534 if (rc != X86EMUL_CONTINUE) 4535 goto done; 4536 } 4537 4538 /* Privileged instruction can be executed only in CPL=0 */ 4539 if ((ctxt->d & Priv) && ops->cpl(ctxt)) { 4540 rc = emulate_gp(ctxt, 0); 4541 goto done; 4542 } 4543 4544 /* Instruction can only be executed in protected mode */ 4545 if ((ctxt->d & Prot) && ctxt->mode < X86EMUL_MODE_PROT16) { 4546 rc = emulate_ud(ctxt); 4547 goto done; 4548 } 4549 4550 /* Do instruction specific permission checks */ 4551 if (ctxt->check_perm) { 4552 rc = ctxt->check_perm(ctxt); 4553 if (rc != X86EMUL_CONTINUE) 4554 goto done; 4555 } 4556 4557 if (unlikely(ctxt->guest_mode) && ctxt->intercept) { 4558 rc = emulator_check_intercept(ctxt, ctxt->intercept, 4559 X86_ICPT_POST_EXCEPT); 4560 if (rc != X86EMUL_CONTINUE) 4561 goto done; 4562 } 4563 4564 if (ctxt->rep_prefix && (ctxt->d & String)) { 4565 /* All REP prefixes have the same first termination condition */ 4566 if (address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) == 0) { 4567 ctxt->eip = ctxt->_eip; 4568 goto done; 4569 } 4570 } 4571 4572 if ((ctxt->src.type == OP_MEM) && !(ctxt->d & NoAccess)) { 4573 rc = segmented_read(ctxt, ctxt->src.addr.mem, 4574 ctxt->src.valptr, ctxt->src.bytes); 4575 if (rc != X86EMUL_CONTINUE) 4576 goto done; 4577 ctxt->src.orig_val64 = ctxt->src.val64; 4578 } 4579 4580 if (ctxt->src2.type == OP_MEM) { 4581 rc = segmented_read(ctxt, ctxt->src2.addr.mem, 4582 &ctxt->src2.val, ctxt->src2.bytes); 4583 if (rc != X86EMUL_CONTINUE) 4584 goto done; 4585 } 4586 4587 if ((ctxt->d & DstMask) == ImplicitOps) 4588 goto special_insn; 4589 4590 4591 if ((ctxt->dst.type == OP_MEM) && !(ctxt->d & Mov)) { 4592 /* optimisation - avoid slow emulated read if Mov */ 4593 rc = segmented_read(ctxt, ctxt->dst.addr.mem, 4594 &ctxt->dst.val, ctxt->dst.bytes); 4595 if (rc != X86EMUL_CONTINUE) 4596 goto done; 4597 } 4598 ctxt->dst.orig_val = ctxt->dst.val; 4599 4600 special_insn: 4601 4602 if (unlikely(ctxt->guest_mode) && ctxt->intercept) { 4603 rc = emulator_check_intercept(ctxt, ctxt->intercept, 4604 X86_ICPT_POST_MEMACCESS); 4605 if (rc != X86EMUL_CONTINUE) 4606 goto done; 4607 } 4608 4609 if (ctxt->execute) { 4610 if (ctxt->d & Fastop) { 4611 void (*fop)(struct fastop *) = (void *)ctxt->execute; 4612 rc = fastop(ctxt, fop); 4613 if (rc != X86EMUL_CONTINUE) 4614 goto done; 4615 goto writeback; 4616 } 4617 rc = ctxt->execute(ctxt); 4618 if (rc != X86EMUL_CONTINUE) 4619 goto done; 4620 goto writeback; 4621 } 4622 4623 if (ctxt->opcode_len == 2) 4624 goto twobyte_insn; 4625 else if (ctxt->opcode_len == 3) 4626 goto threebyte_insn; 4627 4628 switch (ctxt->b) { 4629 case 0x63: /* movsxd */ 4630 if (ctxt->mode != X86EMUL_MODE_PROT64) 4631 goto cannot_emulate; 4632 ctxt->dst.val = (s32) ctxt->src.val; 4633 break; 4634 case 0x70 ... 0x7f: /* jcc (short) */ 4635 if (test_cc(ctxt->b, ctxt->eflags)) 4636 jmp_rel(ctxt, ctxt->src.val); 4637 break; 4638 case 0x8d: /* lea r16/r32, m */ 4639 ctxt->dst.val = ctxt->src.addr.mem.ea; 4640 break; 4641 case 0x90 ... 0x97: /* nop / xchg reg, rax */ 4642 if (ctxt->dst.addr.reg == reg_rmw(ctxt, VCPU_REGS_RAX)) 4643 break; 4644 rc = em_xchg(ctxt); 4645 break; 4646 case 0x98: /* cbw/cwde/cdqe */ 4647 switch (ctxt->op_bytes) { 4648 case 2: ctxt->dst.val = (s8)ctxt->dst.val; break; 4649 case 4: ctxt->dst.val = (s16)ctxt->dst.val; break; 4650 case 8: ctxt->dst.val = (s32)ctxt->dst.val; break; 4651 } 4652 break; 4653 case 0xcc: /* int3 */ 4654 rc = emulate_int(ctxt, 3); 4655 break; 4656 case 0xcd: /* int n */ 4657 rc = emulate_int(ctxt, ctxt->src.val); 4658 break; 4659 case 0xce: /* into */ 4660 if (ctxt->eflags & EFLG_OF) 4661 rc = emulate_int(ctxt, 4); 4662 break; 4663 case 0xe9: /* jmp rel */ 4664 case 0xeb: /* jmp rel short */ 4665 jmp_rel(ctxt, ctxt->src.val); 4666 ctxt->dst.type = OP_NONE; /* Disable writeback. */ 4667 break; 4668 case 0xf4: /* hlt */ 4669 ctxt->ops->halt(ctxt); 4670 break; 4671 case 0xf5: /* cmc */ 4672 /* complement carry flag from eflags reg */ 4673 ctxt->eflags ^= EFLG_CF; 4674 break; 4675 case 0xf8: /* clc */ 4676 ctxt->eflags &= ~EFLG_CF; 4677 break; 4678 case 0xf9: /* stc */ 4679 ctxt->eflags |= EFLG_CF; 4680 break; 4681 case 0xfc: /* cld */ 4682 ctxt->eflags &= ~EFLG_DF; 4683 break; 4684 case 0xfd: /* std */ 4685 ctxt->eflags |= EFLG_DF; 4686 break; 4687 default: 4688 goto cannot_emulate; 4689 } 4690 4691 if (rc != X86EMUL_CONTINUE) 4692 goto done; 4693 4694 writeback: 4695 if (!(ctxt->d & NoWrite)) { 4696 rc = writeback(ctxt, &ctxt->dst); 4697 if (rc != X86EMUL_CONTINUE) 4698 goto done; 4699 } 4700 if (ctxt->d & SrcWrite) { 4701 BUG_ON(ctxt->src.type == OP_MEM || ctxt->src.type == OP_MEM_STR); 4702 rc = writeback(ctxt, &ctxt->src); 4703 if (rc != X86EMUL_CONTINUE) 4704 goto done; 4705 } 4706 4707 /* 4708 * restore dst type in case the decoding will be reused 4709 * (happens for string instruction ) 4710 */ 4711 ctxt->dst.type = saved_dst_type; 4712 4713 if ((ctxt->d & SrcMask) == SrcSI) 4714 string_addr_inc(ctxt, VCPU_REGS_RSI, &ctxt->src); 4715 4716 if ((ctxt->d & DstMask) == DstDI) 4717 string_addr_inc(ctxt, VCPU_REGS_RDI, &ctxt->dst); 4718 4719 if (ctxt->rep_prefix && (ctxt->d & String)) { 4720 unsigned int count; 4721 struct read_cache *r = &ctxt->io_read; 4722 if ((ctxt->d & SrcMask) == SrcSI) 4723 count = ctxt->src.count; 4724 else 4725 count = ctxt->dst.count; 4726 register_address_increment(ctxt, reg_rmw(ctxt, VCPU_REGS_RCX), 4727 -count); 4728 4729 if (!string_insn_completed(ctxt)) { 4730 /* 4731 * Re-enter guest when pio read ahead buffer is empty 4732 * or, if it is not used, after each 1024 iteration. 4733 */ 4734 if ((r->end != 0 || reg_read(ctxt, VCPU_REGS_RCX) & 0x3ff) && 4735 (r->end == 0 || r->end != r->pos)) { 4736 /* 4737 * Reset read cache. Usually happens before 4738 * decode, but since instruction is restarted 4739 * we have to do it here. 4740 */ 4741 ctxt->mem_read.end = 0; 4742 writeback_registers(ctxt); 4743 return EMULATION_RESTART; 4744 } 4745 goto done; /* skip rip writeback */ 4746 } 4747 } 4748 4749 ctxt->eip = ctxt->_eip; 4750 4751 done: 4752 if (rc == X86EMUL_PROPAGATE_FAULT) 4753 ctxt->have_exception = true; 4754 if (rc == X86EMUL_INTERCEPTED) 4755 return EMULATION_INTERCEPTED; 4756 4757 if (rc == X86EMUL_CONTINUE) 4758 writeback_registers(ctxt); 4759 4760 return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK; 4761 4762 twobyte_insn: 4763 switch (ctxt->b) { 4764 case 0x09: /* wbinvd */ 4765 (ctxt->ops->wbinvd)(ctxt); 4766 break; 4767 case 0x08: /* invd */ 4768 case 0x0d: /* GrpP (prefetch) */ 4769 case 0x18: /* Grp16 (prefetch/nop) */ 4770 case 0x1f: /* nop */ 4771 break; 4772 case 0x20: /* mov cr, reg */ 4773 ctxt->dst.val = ops->get_cr(ctxt, ctxt->modrm_reg); 4774 break; 4775 case 0x21: /* mov from dr to reg */ 4776 ops->get_dr(ctxt, ctxt->modrm_reg, &ctxt->dst.val); 4777 break; 4778 case 0x40 ... 0x4f: /* cmov */ 4779 ctxt->dst.val = ctxt->dst.orig_val = ctxt->src.val; 4780 if (!test_cc(ctxt->b, ctxt->eflags)) 4781 ctxt->dst.type = OP_NONE; /* no writeback */ 4782 break; 4783 case 0x80 ... 0x8f: /* jnz rel, etc*/ 4784 if (test_cc(ctxt->b, ctxt->eflags)) 4785 jmp_rel(ctxt, ctxt->src.val); 4786 break; 4787 case 0x90 ... 0x9f: /* setcc r/m8 */ 4788 ctxt->dst.val = test_cc(ctxt->b, ctxt->eflags); 4789 break; 4790 case 0xae: /* clflush */ 4791 break; 4792 case 0xb6 ... 0xb7: /* movzx */ 4793 ctxt->dst.bytes = ctxt->op_bytes; 4794 ctxt->dst.val = (ctxt->src.bytes == 1) ? (u8) ctxt->src.val 4795 : (u16) ctxt->src.val; 4796 break; 4797 case 0xbe ... 0xbf: /* movsx */ 4798 ctxt->dst.bytes = ctxt->op_bytes; 4799 ctxt->dst.val = (ctxt->src.bytes == 1) ? (s8) ctxt->src.val : 4800 (s16) ctxt->src.val; 4801 break; 4802 case 0xc3: /* movnti */ 4803 ctxt->dst.bytes = ctxt->op_bytes; 4804 ctxt->dst.val = (ctxt->op_bytes == 4) ? (u32) ctxt->src.val : 4805 (u64) ctxt->src.val; 4806 break; 4807 default: 4808 goto cannot_emulate; 4809 } 4810 4811 threebyte_insn: 4812 4813 if (rc != X86EMUL_CONTINUE) 4814 goto done; 4815 4816 goto writeback; 4817 4818 cannot_emulate: 4819 return EMULATION_FAILED; 4820 } 4821 4822 void emulator_invalidate_register_cache(struct x86_emulate_ctxt *ctxt) 4823 { 4824 invalidate_registers(ctxt); 4825 } 4826 4827 void emulator_writeback_register_cache(struct x86_emulate_ctxt *ctxt) 4828 { 4829 writeback_registers(ctxt); 4830 } 4831