1 // SPDX-License-Identifier: GPL-2.0-only 2 /****************************************************************************** 3 * emulate.c 4 * 5 * Generic x86 (32-bit and 64-bit) instruction decoder and emulator. 6 * 7 * Copyright (c) 2005 Keir Fraser 8 * 9 * Linux coding style, mod r/m decoder, segment base fixes, real-mode 10 * privileged instructions: 11 * 12 * Copyright (C) 2006 Qumranet 13 * Copyright 2010 Red Hat, Inc. and/or its affiliates. 14 * 15 * Avi Kivity <avi@qumranet.com> 16 * Yaniv Kamay <yaniv@qumranet.com> 17 * 18 * From: xen-unstable 10676:af9809f51f81a3c43f276f00c81a52ef558afda4 19 */ 20 21 #include <linux/kvm_host.h> 22 #include "kvm_cache_regs.h" 23 #include "kvm_emulate.h" 24 #include <linux/stringify.h> 25 #include <asm/debugreg.h> 26 #include <asm/nospec-branch.h> 27 #include <asm/ibt.h> 28 29 #include "x86.h" 30 #include "tss.h" 31 #include "mmu.h" 32 #include "pmu.h" 33 34 /* 35 * Operand types 36 */ 37 #define OpNone 0ull 38 #define OpImplicit 1ull /* No generic decode */ 39 #define OpReg 2ull /* Register */ 40 #define OpMem 3ull /* Memory */ 41 #define OpAcc 4ull /* Accumulator: AL/AX/EAX/RAX */ 42 #define OpDI 5ull /* ES:DI/EDI/RDI */ 43 #define OpMem64 6ull /* Memory, 64-bit */ 44 #define OpImmUByte 7ull /* Zero-extended 8-bit immediate */ 45 #define OpDX 8ull /* DX register */ 46 #define OpCL 9ull /* CL register (for shifts) */ 47 #define OpImmByte 10ull /* 8-bit sign extended immediate */ 48 #define OpOne 11ull /* Implied 1 */ 49 #define OpImm 12ull /* Sign extended up to 32-bit immediate */ 50 #define OpMem16 13ull /* Memory operand (16-bit). */ 51 #define OpMem32 14ull /* Memory operand (32-bit). */ 52 #define OpImmU 15ull /* Immediate operand, zero extended */ 53 #define OpSI 16ull /* SI/ESI/RSI */ 54 #define OpImmFAddr 17ull /* Immediate far address */ 55 #define OpMemFAddr 18ull /* Far address in memory */ 56 #define OpImmU16 19ull /* Immediate operand, 16 bits, zero extended */ 57 #define OpES 20ull /* ES */ 58 #define OpCS 21ull /* CS */ 59 #define OpSS 22ull /* SS */ 60 #define OpDS 23ull /* DS */ 61 #define OpFS 24ull /* FS */ 62 #define OpGS 25ull /* GS */ 63 #define OpMem8 26ull /* 8-bit zero extended memory operand */ 64 #define OpImm64 27ull /* Sign extended 16/32/64-bit immediate */ 65 #define OpXLat 28ull /* memory at BX/EBX/RBX + zero-extended AL */ 66 #define OpAccLo 29ull /* Low part of extended acc (AX/AX/EAX/RAX) */ 67 #define OpAccHi 30ull /* High part of extended acc (-/DX/EDX/RDX) */ 68 69 #define OpBits 5 /* Width of operand field */ 70 #define OpMask ((1ull << OpBits) - 1) 71 72 /* 73 * Opcode effective-address decode tables. 74 * Note that we only emulate instructions that have at least one memory 75 * operand (excluding implicit stack references). We assume that stack 76 * references and instruction fetches will never occur in special memory 77 * areas that require emulation. So, for example, 'mov <imm>,<reg>' need 78 * not be handled. 79 */ 80 81 /* Operand sizes: 8-bit operands or specified/overridden size. */ 82 #define ByteOp (1<<0) /* 8-bit operands. */ 83 /* Destination operand type. */ 84 #define DstShift 1 85 #define ImplicitOps (OpImplicit << DstShift) 86 #define DstReg (OpReg << DstShift) 87 #define DstMem (OpMem << DstShift) 88 #define DstAcc (OpAcc << DstShift) 89 #define DstDI (OpDI << DstShift) 90 #define DstMem64 (OpMem64 << DstShift) 91 #define DstMem16 (OpMem16 << DstShift) 92 #define DstImmUByte (OpImmUByte << DstShift) 93 #define DstDX (OpDX << DstShift) 94 #define DstAccLo (OpAccLo << DstShift) 95 #define DstMask (OpMask << DstShift) 96 /* Source operand type. */ 97 #define SrcShift 6 98 #define SrcNone (OpNone << SrcShift) 99 #define SrcReg (OpReg << SrcShift) 100 #define SrcMem (OpMem << SrcShift) 101 #define SrcMem16 (OpMem16 << SrcShift) 102 #define SrcMem32 (OpMem32 << SrcShift) 103 #define SrcImm (OpImm << SrcShift) 104 #define SrcImmByte (OpImmByte << SrcShift) 105 #define SrcOne (OpOne << SrcShift) 106 #define SrcImmUByte (OpImmUByte << SrcShift) 107 #define SrcImmU (OpImmU << SrcShift) 108 #define SrcSI (OpSI << SrcShift) 109 #define SrcXLat (OpXLat << SrcShift) 110 #define SrcImmFAddr (OpImmFAddr << SrcShift) 111 #define SrcMemFAddr (OpMemFAddr << SrcShift) 112 #define SrcAcc (OpAcc << SrcShift) 113 #define SrcImmU16 (OpImmU16 << SrcShift) 114 #define SrcImm64 (OpImm64 << SrcShift) 115 #define SrcDX (OpDX << SrcShift) 116 #define SrcMem8 (OpMem8 << SrcShift) 117 #define SrcAccHi (OpAccHi << SrcShift) 118 #define SrcMask (OpMask << SrcShift) 119 #define BitOp (1<<11) 120 #define MemAbs (1<<12) /* Memory operand is absolute displacement */ 121 #define String (1<<13) /* String instruction (rep capable) */ 122 #define Stack (1<<14) /* Stack instruction (push/pop) */ 123 #define GroupMask (7<<15) /* Opcode uses one of the group mechanisms */ 124 #define Group (1<<15) /* Bits 3:5 of modrm byte extend opcode */ 125 #define GroupDual (2<<15) /* Alternate decoding of mod == 3 */ 126 #define Prefix (3<<15) /* Instruction varies with 66/f2/f3 prefix */ 127 #define RMExt (4<<15) /* Opcode extension in ModRM r/m if mod == 3 */ 128 #define Escape (5<<15) /* Escape to coprocessor instruction */ 129 #define InstrDual (6<<15) /* Alternate instruction decoding of mod == 3 */ 130 #define ModeDual (7<<15) /* Different instruction for 32/64 bit */ 131 #define Sse (1<<18) /* SSE Vector instruction */ 132 /* Generic ModRM decode. */ 133 #define ModRM (1<<19) 134 /* Destination is only written; never read. */ 135 #define Mov (1<<20) 136 /* Misc flags */ 137 #define Prot (1<<21) /* instruction generates #UD if not in prot-mode */ 138 #define EmulateOnUD (1<<22) /* Emulate if unsupported by the host */ 139 #define NoAccess (1<<23) /* Don't access memory (lea/invlpg/verr etc) */ 140 #define Op3264 (1<<24) /* Operand is 64b in long mode, 32b otherwise */ 141 #define Undefined (1<<25) /* No Such Instruction */ 142 #define Lock (1<<26) /* lock prefix is allowed for the instruction */ 143 #define Priv (1<<27) /* instruction generates #GP if current CPL != 0 */ 144 #define No64 (1<<28) 145 #define PageTable (1 << 29) /* instruction used to write page table */ 146 #define NotImpl (1 << 30) /* instruction is not implemented */ 147 /* Source 2 operand type */ 148 #define Src2Shift (31) 149 #define Src2None (OpNone << Src2Shift) 150 #define Src2Mem (OpMem << Src2Shift) 151 #define Src2CL (OpCL << Src2Shift) 152 #define Src2ImmByte (OpImmByte << Src2Shift) 153 #define Src2One (OpOne << Src2Shift) 154 #define Src2Imm (OpImm << Src2Shift) 155 #define Src2ES (OpES << Src2Shift) 156 #define Src2CS (OpCS << Src2Shift) 157 #define Src2SS (OpSS << Src2Shift) 158 #define Src2DS (OpDS << Src2Shift) 159 #define Src2FS (OpFS << Src2Shift) 160 #define Src2GS (OpGS << Src2Shift) 161 #define Src2Mask (OpMask << Src2Shift) 162 #define Mmx ((u64)1 << 40) /* MMX Vector instruction */ 163 #define AlignMask ((u64)7 << 41) 164 #define Aligned ((u64)1 << 41) /* Explicitly aligned (e.g. MOVDQA) */ 165 #define Unaligned ((u64)2 << 41) /* Explicitly unaligned (e.g. MOVDQU) */ 166 #define Avx ((u64)3 << 41) /* Advanced Vector Extensions */ 167 #define Aligned16 ((u64)4 << 41) /* Aligned to 16 byte boundary (e.g. FXSAVE) */ 168 #define Fastop ((u64)1 << 44) /* Use opcode::u.fastop */ 169 #define NoWrite ((u64)1 << 45) /* No writeback */ 170 #define SrcWrite ((u64)1 << 46) /* Write back src operand */ 171 #define NoMod ((u64)1 << 47) /* Mod field is ignored */ 172 #define Intercept ((u64)1 << 48) /* Has valid intercept field */ 173 #define CheckPerm ((u64)1 << 49) /* Has valid check_perm field */ 174 #define PrivUD ((u64)1 << 51) /* #UD instead of #GP on CPL > 0 */ 175 #define NearBranch ((u64)1 << 52) /* Near branches */ 176 #define No16 ((u64)1 << 53) /* No 16 bit operand */ 177 #define IncSP ((u64)1 << 54) /* SP is incremented before ModRM calc */ 178 #define TwoMemOp ((u64)1 << 55) /* Instruction has two memory operand */ 179 #define IsBranch ((u64)1 << 56) /* Instruction is considered a branch. */ 180 181 #define DstXacc (DstAccLo | SrcAccHi | SrcWrite) 182 183 #define X2(x...) x, x 184 #define X3(x...) X2(x), x 185 #define X4(x...) X2(x), X2(x) 186 #define X5(x...) X4(x), x 187 #define X6(x...) X4(x), X2(x) 188 #define X7(x...) X4(x), X3(x) 189 #define X8(x...) X4(x), X4(x) 190 #define X16(x...) X8(x), X8(x) 191 192 struct opcode { 193 u64 flags; 194 u8 intercept; 195 u8 pad[7]; 196 union { 197 int (*execute)(struct x86_emulate_ctxt *ctxt); 198 const struct opcode *group; 199 const struct group_dual *gdual; 200 const struct gprefix *gprefix; 201 const struct escape *esc; 202 const struct instr_dual *idual; 203 const struct mode_dual *mdual; 204 void (*fastop)(struct fastop *fake); 205 } u; 206 int (*check_perm)(struct x86_emulate_ctxt *ctxt); 207 }; 208 209 struct group_dual { 210 struct opcode mod012[8]; 211 struct opcode mod3[8]; 212 }; 213 214 struct gprefix { 215 struct opcode pfx_no; 216 struct opcode pfx_66; 217 struct opcode pfx_f2; 218 struct opcode pfx_f3; 219 }; 220 221 struct escape { 222 struct opcode op[8]; 223 struct opcode high[64]; 224 }; 225 226 struct instr_dual { 227 struct opcode mod012; 228 struct opcode mod3; 229 }; 230 231 struct mode_dual { 232 struct opcode mode32; 233 struct opcode mode64; 234 }; 235 236 #define EFLG_RESERVED_ZEROS_MASK 0xffc0802a 237 238 enum x86_transfer_type { 239 X86_TRANSFER_NONE, 240 X86_TRANSFER_CALL_JMP, 241 X86_TRANSFER_RET, 242 X86_TRANSFER_TASK_SWITCH, 243 }; 244 245 static ulong reg_read(struct x86_emulate_ctxt *ctxt, unsigned nr) 246 { 247 if (!(ctxt->regs_valid & (1 << nr))) { 248 ctxt->regs_valid |= 1 << nr; 249 ctxt->_regs[nr] = ctxt->ops->read_gpr(ctxt, nr); 250 } 251 return ctxt->_regs[nr]; 252 } 253 254 static ulong *reg_write(struct x86_emulate_ctxt *ctxt, unsigned nr) 255 { 256 ctxt->regs_valid |= 1 << nr; 257 ctxt->regs_dirty |= 1 << nr; 258 return &ctxt->_regs[nr]; 259 } 260 261 static ulong *reg_rmw(struct x86_emulate_ctxt *ctxt, unsigned nr) 262 { 263 reg_read(ctxt, nr); 264 return reg_write(ctxt, nr); 265 } 266 267 static void writeback_registers(struct x86_emulate_ctxt *ctxt) 268 { 269 unsigned reg; 270 271 for_each_set_bit(reg, (ulong *)&ctxt->regs_dirty, 16) 272 ctxt->ops->write_gpr(ctxt, reg, ctxt->_regs[reg]); 273 } 274 275 static void invalidate_registers(struct x86_emulate_ctxt *ctxt) 276 { 277 ctxt->regs_dirty = 0; 278 ctxt->regs_valid = 0; 279 } 280 281 /* 282 * These EFLAGS bits are restored from saved value during emulation, and 283 * any changes are written back to the saved value after emulation. 284 */ 285 #define EFLAGS_MASK (X86_EFLAGS_OF|X86_EFLAGS_SF|X86_EFLAGS_ZF|X86_EFLAGS_AF|\ 286 X86_EFLAGS_PF|X86_EFLAGS_CF) 287 288 #ifdef CONFIG_X86_64 289 #define ON64(x) x 290 #else 291 #define ON64(x) 292 #endif 293 294 /* 295 * fastop functions have a special calling convention: 296 * 297 * dst: rax (in/out) 298 * src: rdx (in/out) 299 * src2: rcx (in) 300 * flags: rflags (in/out) 301 * ex: rsi (in:fastop pointer, out:zero if exception) 302 * 303 * Moreover, they are all exactly FASTOP_SIZE bytes long, so functions for 304 * different operand sizes can be reached by calculation, rather than a jump 305 * table (which would be bigger than the code). 306 * 307 * The 16 byte alignment, considering 5 bytes for the RET thunk, 3 for ENDBR 308 * and 1 for the straight line speculation INT3, leaves 7 bytes for the 309 * body of the function. Currently none is larger than 4. 310 */ 311 static int fastop(struct x86_emulate_ctxt *ctxt, fastop_t fop); 312 313 #define FASTOP_SIZE 16 314 315 #define __FOP_FUNC(name) \ 316 ".align " __stringify(FASTOP_SIZE) " \n\t" \ 317 ".type " name ", @function \n\t" \ 318 name ":\n\t" \ 319 ASM_ENDBR 320 321 #define FOP_FUNC(name) \ 322 __FOP_FUNC(#name) 323 324 #define __FOP_RET(name) \ 325 "11: " ASM_RET \ 326 ".size " name ", .-" name "\n\t" 327 328 #define FOP_RET(name) \ 329 __FOP_RET(#name) 330 331 #define __FOP_START(op, align) \ 332 extern void em_##op(struct fastop *fake); \ 333 asm(".pushsection .text, \"ax\" \n\t" \ 334 ".global em_" #op " \n\t" \ 335 ".align " __stringify(align) " \n\t" \ 336 "em_" #op ":\n\t" 337 338 #define FOP_START(op) __FOP_START(op, FASTOP_SIZE) 339 340 #define FOP_END \ 341 ".popsection") 342 343 #define __FOPNOP(name) \ 344 __FOP_FUNC(name) \ 345 __FOP_RET(name) 346 347 #define FOPNOP() \ 348 __FOPNOP(__stringify(__UNIQUE_ID(nop))) 349 350 #define FOP1E(op, dst) \ 351 __FOP_FUNC(#op "_" #dst) \ 352 "10: " #op " %" #dst " \n\t" \ 353 __FOP_RET(#op "_" #dst) 354 355 #define FOP1EEX(op, dst) \ 356 FOP1E(op, dst) _ASM_EXTABLE_TYPE_REG(10b, 11b, EX_TYPE_ZERO_REG, %%esi) 357 358 #define FASTOP1(op) \ 359 FOP_START(op) \ 360 FOP1E(op##b, al) \ 361 FOP1E(op##w, ax) \ 362 FOP1E(op##l, eax) \ 363 ON64(FOP1E(op##q, rax)) \ 364 FOP_END 365 366 /* 1-operand, using src2 (for MUL/DIV r/m) */ 367 #define FASTOP1SRC2(op, name) \ 368 FOP_START(name) \ 369 FOP1E(op, cl) \ 370 FOP1E(op, cx) \ 371 FOP1E(op, ecx) \ 372 ON64(FOP1E(op, rcx)) \ 373 FOP_END 374 375 /* 1-operand, using src2 (for MUL/DIV r/m), with exceptions */ 376 #define FASTOP1SRC2EX(op, name) \ 377 FOP_START(name) \ 378 FOP1EEX(op, cl) \ 379 FOP1EEX(op, cx) \ 380 FOP1EEX(op, ecx) \ 381 ON64(FOP1EEX(op, rcx)) \ 382 FOP_END 383 384 #define FOP2E(op, dst, src) \ 385 __FOP_FUNC(#op "_" #dst "_" #src) \ 386 #op " %" #src ", %" #dst " \n\t" \ 387 __FOP_RET(#op "_" #dst "_" #src) 388 389 #define FASTOP2(op) \ 390 FOP_START(op) \ 391 FOP2E(op##b, al, dl) \ 392 FOP2E(op##w, ax, dx) \ 393 FOP2E(op##l, eax, edx) \ 394 ON64(FOP2E(op##q, rax, rdx)) \ 395 FOP_END 396 397 /* 2 operand, word only */ 398 #define FASTOP2W(op) \ 399 FOP_START(op) \ 400 FOPNOP() \ 401 FOP2E(op##w, ax, dx) \ 402 FOP2E(op##l, eax, edx) \ 403 ON64(FOP2E(op##q, rax, rdx)) \ 404 FOP_END 405 406 /* 2 operand, src is CL */ 407 #define FASTOP2CL(op) \ 408 FOP_START(op) \ 409 FOP2E(op##b, al, cl) \ 410 FOP2E(op##w, ax, cl) \ 411 FOP2E(op##l, eax, cl) \ 412 ON64(FOP2E(op##q, rax, cl)) \ 413 FOP_END 414 415 /* 2 operand, src and dest are reversed */ 416 #define FASTOP2R(op, name) \ 417 FOP_START(name) \ 418 FOP2E(op##b, dl, al) \ 419 FOP2E(op##w, dx, ax) \ 420 FOP2E(op##l, edx, eax) \ 421 ON64(FOP2E(op##q, rdx, rax)) \ 422 FOP_END 423 424 #define FOP3E(op, dst, src, src2) \ 425 __FOP_FUNC(#op "_" #dst "_" #src "_" #src2) \ 426 #op " %" #src2 ", %" #src ", %" #dst " \n\t"\ 427 __FOP_RET(#op "_" #dst "_" #src "_" #src2) 428 429 /* 3-operand, word-only, src2=cl */ 430 #define FASTOP3WCL(op) \ 431 FOP_START(op) \ 432 FOPNOP() \ 433 FOP3E(op##w, ax, dx, cl) \ 434 FOP3E(op##l, eax, edx, cl) \ 435 ON64(FOP3E(op##q, rax, rdx, cl)) \ 436 FOP_END 437 438 /* Special case for SETcc - 1 instruction per cc */ 439 440 /* 441 * Depending on .config the SETcc functions look like: 442 * 443 * ENDBR [4 bytes; CONFIG_X86_KERNEL_IBT] 444 * SETcc %al [3 bytes] 445 * RET | JMP __x86_return_thunk [1,5 bytes; CONFIG_RETHUNK] 446 * INT3 [1 byte; CONFIG_SLS] 447 */ 448 #define SETCC_ALIGN 16 449 450 #define FOP_SETCC(op) \ 451 ".align " __stringify(SETCC_ALIGN) " \n\t" \ 452 ".type " #op ", @function \n\t" \ 453 #op ": \n\t" \ 454 ASM_ENDBR \ 455 #op " %al \n\t" \ 456 __FOP_RET(#op) \ 457 ".skip " __stringify(SETCC_ALIGN) " - (.-" #op "), 0xcc \n\t" 458 459 __FOP_START(setcc, SETCC_ALIGN) 460 FOP_SETCC(seto) 461 FOP_SETCC(setno) 462 FOP_SETCC(setc) 463 FOP_SETCC(setnc) 464 FOP_SETCC(setz) 465 FOP_SETCC(setnz) 466 FOP_SETCC(setbe) 467 FOP_SETCC(setnbe) 468 FOP_SETCC(sets) 469 FOP_SETCC(setns) 470 FOP_SETCC(setp) 471 FOP_SETCC(setnp) 472 FOP_SETCC(setl) 473 FOP_SETCC(setnl) 474 FOP_SETCC(setle) 475 FOP_SETCC(setnle) 476 FOP_END; 477 478 FOP_START(salc) 479 FOP_FUNC(salc) 480 "pushf; sbb %al, %al; popf \n\t" 481 FOP_RET(salc) 482 FOP_END; 483 484 /* 485 * XXX: inoutclob user must know where the argument is being expanded. 486 * Relying on CONFIG_CC_HAS_ASM_GOTO would allow us to remove _fault. 487 */ 488 #define asm_safe(insn, inoutclob...) \ 489 ({ \ 490 int _fault = 0; \ 491 \ 492 asm volatile("1:" insn "\n" \ 493 "2:\n" \ 494 _ASM_EXTABLE_TYPE_REG(1b, 2b, EX_TYPE_ONE_REG, %[_fault]) \ 495 : [_fault] "+r"(_fault) inoutclob ); \ 496 \ 497 _fault ? X86EMUL_UNHANDLEABLE : X86EMUL_CONTINUE; \ 498 }) 499 500 static int emulator_check_intercept(struct x86_emulate_ctxt *ctxt, 501 enum x86_intercept intercept, 502 enum x86_intercept_stage stage) 503 { 504 struct x86_instruction_info info = { 505 .intercept = intercept, 506 .rep_prefix = ctxt->rep_prefix, 507 .modrm_mod = ctxt->modrm_mod, 508 .modrm_reg = ctxt->modrm_reg, 509 .modrm_rm = ctxt->modrm_rm, 510 .src_val = ctxt->src.val64, 511 .dst_val = ctxt->dst.val64, 512 .src_bytes = ctxt->src.bytes, 513 .dst_bytes = ctxt->dst.bytes, 514 .ad_bytes = ctxt->ad_bytes, 515 .next_rip = ctxt->eip, 516 }; 517 518 return ctxt->ops->intercept(ctxt, &info, stage); 519 } 520 521 static void assign_masked(ulong *dest, ulong src, ulong mask) 522 { 523 *dest = (*dest & ~mask) | (src & mask); 524 } 525 526 static void assign_register(unsigned long *reg, u64 val, int bytes) 527 { 528 /* The 4-byte case *is* correct: in 64-bit mode we zero-extend. */ 529 switch (bytes) { 530 case 1: 531 *(u8 *)reg = (u8)val; 532 break; 533 case 2: 534 *(u16 *)reg = (u16)val; 535 break; 536 case 4: 537 *reg = (u32)val; 538 break; /* 64b: zero-extend */ 539 case 8: 540 *reg = val; 541 break; 542 } 543 } 544 545 static inline unsigned long ad_mask(struct x86_emulate_ctxt *ctxt) 546 { 547 return (1UL << (ctxt->ad_bytes << 3)) - 1; 548 } 549 550 static ulong stack_mask(struct x86_emulate_ctxt *ctxt) 551 { 552 u16 sel; 553 struct desc_struct ss; 554 555 if (ctxt->mode == X86EMUL_MODE_PROT64) 556 return ~0UL; 557 ctxt->ops->get_segment(ctxt, &sel, &ss, NULL, VCPU_SREG_SS); 558 return ~0U >> ((ss.d ^ 1) * 16); /* d=0: 0xffff; d=1: 0xffffffff */ 559 } 560 561 static int stack_size(struct x86_emulate_ctxt *ctxt) 562 { 563 return (__fls(stack_mask(ctxt)) + 1) >> 3; 564 } 565 566 /* Access/update address held in a register, based on addressing mode. */ 567 static inline unsigned long 568 address_mask(struct x86_emulate_ctxt *ctxt, unsigned long reg) 569 { 570 if (ctxt->ad_bytes == sizeof(unsigned long)) 571 return reg; 572 else 573 return reg & ad_mask(ctxt); 574 } 575 576 static inline unsigned long 577 register_address(struct x86_emulate_ctxt *ctxt, int reg) 578 { 579 return address_mask(ctxt, reg_read(ctxt, reg)); 580 } 581 582 static void masked_increment(ulong *reg, ulong mask, int inc) 583 { 584 assign_masked(reg, *reg + inc, mask); 585 } 586 587 static inline void 588 register_address_increment(struct x86_emulate_ctxt *ctxt, int reg, int inc) 589 { 590 ulong *preg = reg_rmw(ctxt, reg); 591 592 assign_register(preg, *preg + inc, ctxt->ad_bytes); 593 } 594 595 static void rsp_increment(struct x86_emulate_ctxt *ctxt, int inc) 596 { 597 masked_increment(reg_rmw(ctxt, VCPU_REGS_RSP), stack_mask(ctxt), inc); 598 } 599 600 static u32 desc_limit_scaled(struct desc_struct *desc) 601 { 602 u32 limit = get_desc_limit(desc); 603 604 return desc->g ? (limit << 12) | 0xfff : limit; 605 } 606 607 static unsigned long seg_base(struct x86_emulate_ctxt *ctxt, int seg) 608 { 609 if (ctxt->mode == X86EMUL_MODE_PROT64 && seg < VCPU_SREG_FS) 610 return 0; 611 612 return ctxt->ops->get_cached_segment_base(ctxt, seg); 613 } 614 615 static int emulate_exception(struct x86_emulate_ctxt *ctxt, int vec, 616 u32 error, bool valid) 617 { 618 WARN_ON(vec > 0x1f); 619 ctxt->exception.vector = vec; 620 ctxt->exception.error_code = error; 621 ctxt->exception.error_code_valid = valid; 622 return X86EMUL_PROPAGATE_FAULT; 623 } 624 625 static int emulate_db(struct x86_emulate_ctxt *ctxt) 626 { 627 return emulate_exception(ctxt, DB_VECTOR, 0, false); 628 } 629 630 static int emulate_gp(struct x86_emulate_ctxt *ctxt, int err) 631 { 632 return emulate_exception(ctxt, GP_VECTOR, err, true); 633 } 634 635 static int emulate_ss(struct x86_emulate_ctxt *ctxt, int err) 636 { 637 return emulate_exception(ctxt, SS_VECTOR, err, true); 638 } 639 640 static int emulate_ud(struct x86_emulate_ctxt *ctxt) 641 { 642 return emulate_exception(ctxt, UD_VECTOR, 0, false); 643 } 644 645 static int emulate_ts(struct x86_emulate_ctxt *ctxt, int err) 646 { 647 return emulate_exception(ctxt, TS_VECTOR, err, true); 648 } 649 650 static int emulate_de(struct x86_emulate_ctxt *ctxt) 651 { 652 return emulate_exception(ctxt, DE_VECTOR, 0, false); 653 } 654 655 static int emulate_nm(struct x86_emulate_ctxt *ctxt) 656 { 657 return emulate_exception(ctxt, NM_VECTOR, 0, false); 658 } 659 660 static u16 get_segment_selector(struct x86_emulate_ctxt *ctxt, unsigned seg) 661 { 662 u16 selector; 663 struct desc_struct desc; 664 665 ctxt->ops->get_segment(ctxt, &selector, &desc, NULL, seg); 666 return selector; 667 } 668 669 static void set_segment_selector(struct x86_emulate_ctxt *ctxt, u16 selector, 670 unsigned seg) 671 { 672 u16 dummy; 673 u32 base3; 674 struct desc_struct desc; 675 676 ctxt->ops->get_segment(ctxt, &dummy, &desc, &base3, seg); 677 ctxt->ops->set_segment(ctxt, selector, &desc, base3, seg); 678 } 679 680 static inline u8 ctxt_virt_addr_bits(struct x86_emulate_ctxt *ctxt) 681 { 682 return (ctxt->ops->get_cr(ctxt, 4) & X86_CR4_LA57) ? 57 : 48; 683 } 684 685 static inline bool emul_is_noncanonical_address(u64 la, 686 struct x86_emulate_ctxt *ctxt) 687 { 688 return !__is_canonical_address(la, ctxt_virt_addr_bits(ctxt)); 689 } 690 691 /* 692 * x86 defines three classes of vector instructions: explicitly 693 * aligned, explicitly unaligned, and the rest, which change behaviour 694 * depending on whether they're AVX encoded or not. 695 * 696 * Also included is CMPXCHG16B which is not a vector instruction, yet it is 697 * subject to the same check. FXSAVE and FXRSTOR are checked here too as their 698 * 512 bytes of data must be aligned to a 16 byte boundary. 699 */ 700 static unsigned insn_alignment(struct x86_emulate_ctxt *ctxt, unsigned size) 701 { 702 u64 alignment = ctxt->d & AlignMask; 703 704 if (likely(size < 16)) 705 return 1; 706 707 switch (alignment) { 708 case Unaligned: 709 case Avx: 710 return 1; 711 case Aligned16: 712 return 16; 713 case Aligned: 714 default: 715 return size; 716 } 717 } 718 719 static __always_inline int __linearize(struct x86_emulate_ctxt *ctxt, 720 struct segmented_address addr, 721 unsigned *max_size, unsigned size, 722 bool write, bool fetch, 723 enum x86emul_mode mode, ulong *linear) 724 { 725 struct desc_struct desc; 726 bool usable; 727 ulong la; 728 u32 lim; 729 u16 sel; 730 u8 va_bits; 731 732 la = seg_base(ctxt, addr.seg) + addr.ea; 733 *max_size = 0; 734 switch (mode) { 735 case X86EMUL_MODE_PROT64: 736 *linear = la; 737 va_bits = ctxt_virt_addr_bits(ctxt); 738 if (!__is_canonical_address(la, va_bits)) 739 goto bad; 740 741 *max_size = min_t(u64, ~0u, (1ull << va_bits) - la); 742 if (size > *max_size) 743 goto bad; 744 break; 745 default: 746 *linear = la = (u32)la; 747 usable = ctxt->ops->get_segment(ctxt, &sel, &desc, NULL, 748 addr.seg); 749 if (!usable) 750 goto bad; 751 /* code segment in protected mode or read-only data segment */ 752 if ((((ctxt->mode != X86EMUL_MODE_REAL) && (desc.type & 8)) 753 || !(desc.type & 2)) && write) 754 goto bad; 755 /* unreadable code segment */ 756 if (!fetch && (desc.type & 8) && !(desc.type & 2)) 757 goto bad; 758 lim = desc_limit_scaled(&desc); 759 if (!(desc.type & 8) && (desc.type & 4)) { 760 /* expand-down segment */ 761 if (addr.ea <= lim) 762 goto bad; 763 lim = desc.d ? 0xffffffff : 0xffff; 764 } 765 if (addr.ea > lim) 766 goto bad; 767 if (lim == 0xffffffff) 768 *max_size = ~0u; 769 else { 770 *max_size = (u64)lim + 1 - addr.ea; 771 if (size > *max_size) 772 goto bad; 773 } 774 break; 775 } 776 if (la & (insn_alignment(ctxt, size) - 1)) 777 return emulate_gp(ctxt, 0); 778 return X86EMUL_CONTINUE; 779 bad: 780 if (addr.seg == VCPU_SREG_SS) 781 return emulate_ss(ctxt, 0); 782 else 783 return emulate_gp(ctxt, 0); 784 } 785 786 static int linearize(struct x86_emulate_ctxt *ctxt, 787 struct segmented_address addr, 788 unsigned size, bool write, 789 ulong *linear) 790 { 791 unsigned max_size; 792 return __linearize(ctxt, addr, &max_size, size, write, false, 793 ctxt->mode, linear); 794 } 795 796 static inline int assign_eip(struct x86_emulate_ctxt *ctxt, ulong dst, 797 enum x86emul_mode mode) 798 { 799 ulong linear; 800 int rc; 801 unsigned max_size; 802 struct segmented_address addr = { .seg = VCPU_SREG_CS, 803 .ea = dst }; 804 805 if (ctxt->op_bytes != sizeof(unsigned long)) 806 addr.ea = dst & ((1UL << (ctxt->op_bytes << 3)) - 1); 807 rc = __linearize(ctxt, addr, &max_size, 1, false, true, mode, &linear); 808 if (rc == X86EMUL_CONTINUE) 809 ctxt->_eip = addr.ea; 810 return rc; 811 } 812 813 static inline int assign_eip_near(struct x86_emulate_ctxt *ctxt, ulong dst) 814 { 815 return assign_eip(ctxt, dst, ctxt->mode); 816 } 817 818 static int assign_eip_far(struct x86_emulate_ctxt *ctxt, ulong dst, 819 const struct desc_struct *cs_desc) 820 { 821 enum x86emul_mode mode = ctxt->mode; 822 int rc; 823 824 #ifdef CONFIG_X86_64 825 if (ctxt->mode >= X86EMUL_MODE_PROT16) { 826 if (cs_desc->l) { 827 u64 efer = 0; 828 829 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer); 830 if (efer & EFER_LMA) 831 mode = X86EMUL_MODE_PROT64; 832 } else 833 mode = X86EMUL_MODE_PROT32; /* temporary value */ 834 } 835 #endif 836 if (mode == X86EMUL_MODE_PROT16 || mode == X86EMUL_MODE_PROT32) 837 mode = cs_desc->d ? X86EMUL_MODE_PROT32 : X86EMUL_MODE_PROT16; 838 rc = assign_eip(ctxt, dst, mode); 839 if (rc == X86EMUL_CONTINUE) 840 ctxt->mode = mode; 841 return rc; 842 } 843 844 static inline int jmp_rel(struct x86_emulate_ctxt *ctxt, int rel) 845 { 846 return assign_eip_near(ctxt, ctxt->_eip + rel); 847 } 848 849 static int linear_read_system(struct x86_emulate_ctxt *ctxt, ulong linear, 850 void *data, unsigned size) 851 { 852 return ctxt->ops->read_std(ctxt, linear, data, size, &ctxt->exception, true); 853 } 854 855 static int linear_write_system(struct x86_emulate_ctxt *ctxt, 856 ulong linear, void *data, 857 unsigned int size) 858 { 859 return ctxt->ops->write_std(ctxt, linear, data, size, &ctxt->exception, true); 860 } 861 862 static int segmented_read_std(struct x86_emulate_ctxt *ctxt, 863 struct segmented_address addr, 864 void *data, 865 unsigned size) 866 { 867 int rc; 868 ulong linear; 869 870 rc = linearize(ctxt, addr, size, false, &linear); 871 if (rc != X86EMUL_CONTINUE) 872 return rc; 873 return ctxt->ops->read_std(ctxt, linear, data, size, &ctxt->exception, false); 874 } 875 876 static int segmented_write_std(struct x86_emulate_ctxt *ctxt, 877 struct segmented_address addr, 878 void *data, 879 unsigned int size) 880 { 881 int rc; 882 ulong linear; 883 884 rc = linearize(ctxt, addr, size, true, &linear); 885 if (rc != X86EMUL_CONTINUE) 886 return rc; 887 return ctxt->ops->write_std(ctxt, linear, data, size, &ctxt->exception, false); 888 } 889 890 /* 891 * Prefetch the remaining bytes of the instruction without crossing page 892 * boundary if they are not in fetch_cache yet. 893 */ 894 static int __do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt, int op_size) 895 { 896 int rc; 897 unsigned size, max_size; 898 unsigned long linear; 899 int cur_size = ctxt->fetch.end - ctxt->fetch.data; 900 struct segmented_address addr = { .seg = VCPU_SREG_CS, 901 .ea = ctxt->eip + cur_size }; 902 903 /* 904 * We do not know exactly how many bytes will be needed, and 905 * __linearize is expensive, so fetch as much as possible. We 906 * just have to avoid going beyond the 15 byte limit, the end 907 * of the segment, or the end of the page. 908 * 909 * __linearize is called with size 0 so that it does not do any 910 * boundary check itself. Instead, we use max_size to check 911 * against op_size. 912 */ 913 rc = __linearize(ctxt, addr, &max_size, 0, false, true, ctxt->mode, 914 &linear); 915 if (unlikely(rc != X86EMUL_CONTINUE)) 916 return rc; 917 918 size = min_t(unsigned, 15UL ^ cur_size, max_size); 919 size = min_t(unsigned, size, PAGE_SIZE - offset_in_page(linear)); 920 921 /* 922 * One instruction can only straddle two pages, 923 * and one has been loaded at the beginning of 924 * x86_decode_insn. So, if not enough bytes 925 * still, we must have hit the 15-byte boundary. 926 */ 927 if (unlikely(size < op_size)) 928 return emulate_gp(ctxt, 0); 929 930 rc = ctxt->ops->fetch(ctxt, linear, ctxt->fetch.end, 931 size, &ctxt->exception); 932 if (unlikely(rc != X86EMUL_CONTINUE)) 933 return rc; 934 ctxt->fetch.end += size; 935 return X86EMUL_CONTINUE; 936 } 937 938 static __always_inline int do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt, 939 unsigned size) 940 { 941 unsigned done_size = ctxt->fetch.end - ctxt->fetch.ptr; 942 943 if (unlikely(done_size < size)) 944 return __do_insn_fetch_bytes(ctxt, size - done_size); 945 else 946 return X86EMUL_CONTINUE; 947 } 948 949 /* Fetch next part of the instruction being emulated. */ 950 #define insn_fetch(_type, _ctxt) \ 951 ({ _type _x; \ 952 \ 953 rc = do_insn_fetch_bytes(_ctxt, sizeof(_type)); \ 954 if (rc != X86EMUL_CONTINUE) \ 955 goto done; \ 956 ctxt->_eip += sizeof(_type); \ 957 memcpy(&_x, ctxt->fetch.ptr, sizeof(_type)); \ 958 ctxt->fetch.ptr += sizeof(_type); \ 959 _x; \ 960 }) 961 962 #define insn_fetch_arr(_arr, _size, _ctxt) \ 963 ({ \ 964 rc = do_insn_fetch_bytes(_ctxt, _size); \ 965 if (rc != X86EMUL_CONTINUE) \ 966 goto done; \ 967 ctxt->_eip += (_size); \ 968 memcpy(_arr, ctxt->fetch.ptr, _size); \ 969 ctxt->fetch.ptr += (_size); \ 970 }) 971 972 /* 973 * Given the 'reg' portion of a ModRM byte, and a register block, return a 974 * pointer into the block that addresses the relevant register. 975 * @highbyte_regs specifies whether to decode AH,CH,DH,BH. 976 */ 977 static void *decode_register(struct x86_emulate_ctxt *ctxt, u8 modrm_reg, 978 int byteop) 979 { 980 void *p; 981 int highbyte_regs = (ctxt->rex_prefix == 0) && byteop; 982 983 if (highbyte_regs && modrm_reg >= 4 && modrm_reg < 8) 984 p = (unsigned char *)reg_rmw(ctxt, modrm_reg & 3) + 1; 985 else 986 p = reg_rmw(ctxt, modrm_reg); 987 return p; 988 } 989 990 static int read_descriptor(struct x86_emulate_ctxt *ctxt, 991 struct segmented_address addr, 992 u16 *size, unsigned long *address, int op_bytes) 993 { 994 int rc; 995 996 if (op_bytes == 2) 997 op_bytes = 3; 998 *address = 0; 999 rc = segmented_read_std(ctxt, addr, size, 2); 1000 if (rc != X86EMUL_CONTINUE) 1001 return rc; 1002 addr.ea += 2; 1003 rc = segmented_read_std(ctxt, addr, address, op_bytes); 1004 return rc; 1005 } 1006 1007 FASTOP2(add); 1008 FASTOP2(or); 1009 FASTOP2(adc); 1010 FASTOP2(sbb); 1011 FASTOP2(and); 1012 FASTOP2(sub); 1013 FASTOP2(xor); 1014 FASTOP2(cmp); 1015 FASTOP2(test); 1016 1017 FASTOP1SRC2(mul, mul_ex); 1018 FASTOP1SRC2(imul, imul_ex); 1019 FASTOP1SRC2EX(div, div_ex); 1020 FASTOP1SRC2EX(idiv, idiv_ex); 1021 1022 FASTOP3WCL(shld); 1023 FASTOP3WCL(shrd); 1024 1025 FASTOP2W(imul); 1026 1027 FASTOP1(not); 1028 FASTOP1(neg); 1029 FASTOP1(inc); 1030 FASTOP1(dec); 1031 1032 FASTOP2CL(rol); 1033 FASTOP2CL(ror); 1034 FASTOP2CL(rcl); 1035 FASTOP2CL(rcr); 1036 FASTOP2CL(shl); 1037 FASTOP2CL(shr); 1038 FASTOP2CL(sar); 1039 1040 FASTOP2W(bsf); 1041 FASTOP2W(bsr); 1042 FASTOP2W(bt); 1043 FASTOP2W(bts); 1044 FASTOP2W(btr); 1045 FASTOP2W(btc); 1046 1047 FASTOP2(xadd); 1048 1049 FASTOP2R(cmp, cmp_r); 1050 1051 static int em_bsf_c(struct x86_emulate_ctxt *ctxt) 1052 { 1053 /* If src is zero, do not writeback, but update flags */ 1054 if (ctxt->src.val == 0) 1055 ctxt->dst.type = OP_NONE; 1056 return fastop(ctxt, em_bsf); 1057 } 1058 1059 static int em_bsr_c(struct x86_emulate_ctxt *ctxt) 1060 { 1061 /* If src is zero, do not writeback, but update flags */ 1062 if (ctxt->src.val == 0) 1063 ctxt->dst.type = OP_NONE; 1064 return fastop(ctxt, em_bsr); 1065 } 1066 1067 static __always_inline u8 test_cc(unsigned int condition, unsigned long flags) 1068 { 1069 u8 rc; 1070 void (*fop)(void) = (void *)em_setcc + SETCC_ALIGN * (condition & 0xf); 1071 1072 flags = (flags & EFLAGS_MASK) | X86_EFLAGS_IF; 1073 asm("push %[flags]; popf; " CALL_NOSPEC 1074 : "=a"(rc) : [thunk_target]"r"(fop), [flags]"r"(flags)); 1075 return rc; 1076 } 1077 1078 static void fetch_register_operand(struct operand *op) 1079 { 1080 switch (op->bytes) { 1081 case 1: 1082 op->val = *(u8 *)op->addr.reg; 1083 break; 1084 case 2: 1085 op->val = *(u16 *)op->addr.reg; 1086 break; 1087 case 4: 1088 op->val = *(u32 *)op->addr.reg; 1089 break; 1090 case 8: 1091 op->val = *(u64 *)op->addr.reg; 1092 break; 1093 } 1094 } 1095 1096 static int em_fninit(struct x86_emulate_ctxt *ctxt) 1097 { 1098 if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM)) 1099 return emulate_nm(ctxt); 1100 1101 kvm_fpu_get(); 1102 asm volatile("fninit"); 1103 kvm_fpu_put(); 1104 return X86EMUL_CONTINUE; 1105 } 1106 1107 static int em_fnstcw(struct x86_emulate_ctxt *ctxt) 1108 { 1109 u16 fcw; 1110 1111 if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM)) 1112 return emulate_nm(ctxt); 1113 1114 kvm_fpu_get(); 1115 asm volatile("fnstcw %0": "+m"(fcw)); 1116 kvm_fpu_put(); 1117 1118 ctxt->dst.val = fcw; 1119 1120 return X86EMUL_CONTINUE; 1121 } 1122 1123 static int em_fnstsw(struct x86_emulate_ctxt *ctxt) 1124 { 1125 u16 fsw; 1126 1127 if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM)) 1128 return emulate_nm(ctxt); 1129 1130 kvm_fpu_get(); 1131 asm volatile("fnstsw %0": "+m"(fsw)); 1132 kvm_fpu_put(); 1133 1134 ctxt->dst.val = fsw; 1135 1136 return X86EMUL_CONTINUE; 1137 } 1138 1139 static void decode_register_operand(struct x86_emulate_ctxt *ctxt, 1140 struct operand *op) 1141 { 1142 unsigned reg = ctxt->modrm_reg; 1143 1144 if (!(ctxt->d & ModRM)) 1145 reg = (ctxt->b & 7) | ((ctxt->rex_prefix & 1) << 3); 1146 1147 if (ctxt->d & Sse) { 1148 op->type = OP_XMM; 1149 op->bytes = 16; 1150 op->addr.xmm = reg; 1151 kvm_read_sse_reg(reg, &op->vec_val); 1152 return; 1153 } 1154 if (ctxt->d & Mmx) { 1155 reg &= 7; 1156 op->type = OP_MM; 1157 op->bytes = 8; 1158 op->addr.mm = reg; 1159 return; 1160 } 1161 1162 op->type = OP_REG; 1163 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes; 1164 op->addr.reg = decode_register(ctxt, reg, ctxt->d & ByteOp); 1165 1166 fetch_register_operand(op); 1167 op->orig_val = op->val; 1168 } 1169 1170 static void adjust_modrm_seg(struct x86_emulate_ctxt *ctxt, int base_reg) 1171 { 1172 if (base_reg == VCPU_REGS_RSP || base_reg == VCPU_REGS_RBP) 1173 ctxt->modrm_seg = VCPU_SREG_SS; 1174 } 1175 1176 static int decode_modrm(struct x86_emulate_ctxt *ctxt, 1177 struct operand *op) 1178 { 1179 u8 sib; 1180 int index_reg, base_reg, scale; 1181 int rc = X86EMUL_CONTINUE; 1182 ulong modrm_ea = 0; 1183 1184 ctxt->modrm_reg = ((ctxt->rex_prefix << 1) & 8); /* REX.R */ 1185 index_reg = (ctxt->rex_prefix << 2) & 8; /* REX.X */ 1186 base_reg = (ctxt->rex_prefix << 3) & 8; /* REX.B */ 1187 1188 ctxt->modrm_mod = (ctxt->modrm & 0xc0) >> 6; 1189 ctxt->modrm_reg |= (ctxt->modrm & 0x38) >> 3; 1190 ctxt->modrm_rm = base_reg | (ctxt->modrm & 0x07); 1191 ctxt->modrm_seg = VCPU_SREG_DS; 1192 1193 if (ctxt->modrm_mod == 3 || (ctxt->d & NoMod)) { 1194 op->type = OP_REG; 1195 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes; 1196 op->addr.reg = decode_register(ctxt, ctxt->modrm_rm, 1197 ctxt->d & ByteOp); 1198 if (ctxt->d & Sse) { 1199 op->type = OP_XMM; 1200 op->bytes = 16; 1201 op->addr.xmm = ctxt->modrm_rm; 1202 kvm_read_sse_reg(ctxt->modrm_rm, &op->vec_val); 1203 return rc; 1204 } 1205 if (ctxt->d & Mmx) { 1206 op->type = OP_MM; 1207 op->bytes = 8; 1208 op->addr.mm = ctxt->modrm_rm & 7; 1209 return rc; 1210 } 1211 fetch_register_operand(op); 1212 return rc; 1213 } 1214 1215 op->type = OP_MEM; 1216 1217 if (ctxt->ad_bytes == 2) { 1218 unsigned bx = reg_read(ctxt, VCPU_REGS_RBX); 1219 unsigned bp = reg_read(ctxt, VCPU_REGS_RBP); 1220 unsigned si = reg_read(ctxt, VCPU_REGS_RSI); 1221 unsigned di = reg_read(ctxt, VCPU_REGS_RDI); 1222 1223 /* 16-bit ModR/M decode. */ 1224 switch (ctxt->modrm_mod) { 1225 case 0: 1226 if (ctxt->modrm_rm == 6) 1227 modrm_ea += insn_fetch(u16, ctxt); 1228 break; 1229 case 1: 1230 modrm_ea += insn_fetch(s8, ctxt); 1231 break; 1232 case 2: 1233 modrm_ea += insn_fetch(u16, ctxt); 1234 break; 1235 } 1236 switch (ctxt->modrm_rm) { 1237 case 0: 1238 modrm_ea += bx + si; 1239 break; 1240 case 1: 1241 modrm_ea += bx + di; 1242 break; 1243 case 2: 1244 modrm_ea += bp + si; 1245 break; 1246 case 3: 1247 modrm_ea += bp + di; 1248 break; 1249 case 4: 1250 modrm_ea += si; 1251 break; 1252 case 5: 1253 modrm_ea += di; 1254 break; 1255 case 6: 1256 if (ctxt->modrm_mod != 0) 1257 modrm_ea += bp; 1258 break; 1259 case 7: 1260 modrm_ea += bx; 1261 break; 1262 } 1263 if (ctxt->modrm_rm == 2 || ctxt->modrm_rm == 3 || 1264 (ctxt->modrm_rm == 6 && ctxt->modrm_mod != 0)) 1265 ctxt->modrm_seg = VCPU_SREG_SS; 1266 modrm_ea = (u16)modrm_ea; 1267 } else { 1268 /* 32/64-bit ModR/M decode. */ 1269 if ((ctxt->modrm_rm & 7) == 4) { 1270 sib = insn_fetch(u8, ctxt); 1271 index_reg |= (sib >> 3) & 7; 1272 base_reg |= sib & 7; 1273 scale = sib >> 6; 1274 1275 if ((base_reg & 7) == 5 && ctxt->modrm_mod == 0) 1276 modrm_ea += insn_fetch(s32, ctxt); 1277 else { 1278 modrm_ea += reg_read(ctxt, base_reg); 1279 adjust_modrm_seg(ctxt, base_reg); 1280 /* Increment ESP on POP [ESP] */ 1281 if ((ctxt->d & IncSP) && 1282 base_reg == VCPU_REGS_RSP) 1283 modrm_ea += ctxt->op_bytes; 1284 } 1285 if (index_reg != 4) 1286 modrm_ea += reg_read(ctxt, index_reg) << scale; 1287 } else if ((ctxt->modrm_rm & 7) == 5 && ctxt->modrm_mod == 0) { 1288 modrm_ea += insn_fetch(s32, ctxt); 1289 if (ctxt->mode == X86EMUL_MODE_PROT64) 1290 ctxt->rip_relative = 1; 1291 } else { 1292 base_reg = ctxt->modrm_rm; 1293 modrm_ea += reg_read(ctxt, base_reg); 1294 adjust_modrm_seg(ctxt, base_reg); 1295 } 1296 switch (ctxt->modrm_mod) { 1297 case 1: 1298 modrm_ea += insn_fetch(s8, ctxt); 1299 break; 1300 case 2: 1301 modrm_ea += insn_fetch(s32, ctxt); 1302 break; 1303 } 1304 } 1305 op->addr.mem.ea = modrm_ea; 1306 if (ctxt->ad_bytes != 8) 1307 ctxt->memop.addr.mem.ea = (u32)ctxt->memop.addr.mem.ea; 1308 1309 done: 1310 return rc; 1311 } 1312 1313 static int decode_abs(struct x86_emulate_ctxt *ctxt, 1314 struct operand *op) 1315 { 1316 int rc = X86EMUL_CONTINUE; 1317 1318 op->type = OP_MEM; 1319 switch (ctxt->ad_bytes) { 1320 case 2: 1321 op->addr.mem.ea = insn_fetch(u16, ctxt); 1322 break; 1323 case 4: 1324 op->addr.mem.ea = insn_fetch(u32, ctxt); 1325 break; 1326 case 8: 1327 op->addr.mem.ea = insn_fetch(u64, ctxt); 1328 break; 1329 } 1330 done: 1331 return rc; 1332 } 1333 1334 static void fetch_bit_operand(struct x86_emulate_ctxt *ctxt) 1335 { 1336 long sv = 0, mask; 1337 1338 if (ctxt->dst.type == OP_MEM && ctxt->src.type == OP_REG) { 1339 mask = ~((long)ctxt->dst.bytes * 8 - 1); 1340 1341 if (ctxt->src.bytes == 2) 1342 sv = (s16)ctxt->src.val & (s16)mask; 1343 else if (ctxt->src.bytes == 4) 1344 sv = (s32)ctxt->src.val & (s32)mask; 1345 else 1346 sv = (s64)ctxt->src.val & (s64)mask; 1347 1348 ctxt->dst.addr.mem.ea = address_mask(ctxt, 1349 ctxt->dst.addr.mem.ea + (sv >> 3)); 1350 } 1351 1352 /* only subword offset */ 1353 ctxt->src.val &= (ctxt->dst.bytes << 3) - 1; 1354 } 1355 1356 static int read_emulated(struct x86_emulate_ctxt *ctxt, 1357 unsigned long addr, void *dest, unsigned size) 1358 { 1359 int rc; 1360 struct read_cache *mc = &ctxt->mem_read; 1361 1362 if (mc->pos < mc->end) 1363 goto read_cached; 1364 1365 WARN_ON((mc->end + size) >= sizeof(mc->data)); 1366 1367 rc = ctxt->ops->read_emulated(ctxt, addr, mc->data + mc->end, size, 1368 &ctxt->exception); 1369 if (rc != X86EMUL_CONTINUE) 1370 return rc; 1371 1372 mc->end += size; 1373 1374 read_cached: 1375 memcpy(dest, mc->data + mc->pos, size); 1376 mc->pos += size; 1377 return X86EMUL_CONTINUE; 1378 } 1379 1380 static int segmented_read(struct x86_emulate_ctxt *ctxt, 1381 struct segmented_address addr, 1382 void *data, 1383 unsigned size) 1384 { 1385 int rc; 1386 ulong linear; 1387 1388 rc = linearize(ctxt, addr, size, false, &linear); 1389 if (rc != X86EMUL_CONTINUE) 1390 return rc; 1391 return read_emulated(ctxt, linear, data, size); 1392 } 1393 1394 static int segmented_write(struct x86_emulate_ctxt *ctxt, 1395 struct segmented_address addr, 1396 const void *data, 1397 unsigned size) 1398 { 1399 int rc; 1400 ulong linear; 1401 1402 rc = linearize(ctxt, addr, size, true, &linear); 1403 if (rc != X86EMUL_CONTINUE) 1404 return rc; 1405 return ctxt->ops->write_emulated(ctxt, linear, data, size, 1406 &ctxt->exception); 1407 } 1408 1409 static int segmented_cmpxchg(struct x86_emulate_ctxt *ctxt, 1410 struct segmented_address addr, 1411 const void *orig_data, const void *data, 1412 unsigned size) 1413 { 1414 int rc; 1415 ulong linear; 1416 1417 rc = linearize(ctxt, addr, size, true, &linear); 1418 if (rc != X86EMUL_CONTINUE) 1419 return rc; 1420 return ctxt->ops->cmpxchg_emulated(ctxt, linear, orig_data, data, 1421 size, &ctxt->exception); 1422 } 1423 1424 static int pio_in_emulated(struct x86_emulate_ctxt *ctxt, 1425 unsigned int size, unsigned short port, 1426 void *dest) 1427 { 1428 struct read_cache *rc = &ctxt->io_read; 1429 1430 if (rc->pos == rc->end) { /* refill pio read ahead */ 1431 unsigned int in_page, n; 1432 unsigned int count = ctxt->rep_prefix ? 1433 address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) : 1; 1434 in_page = (ctxt->eflags & X86_EFLAGS_DF) ? 1435 offset_in_page(reg_read(ctxt, VCPU_REGS_RDI)) : 1436 PAGE_SIZE - offset_in_page(reg_read(ctxt, VCPU_REGS_RDI)); 1437 n = min3(in_page, (unsigned int)sizeof(rc->data) / size, count); 1438 if (n == 0) 1439 n = 1; 1440 rc->pos = rc->end = 0; 1441 if (!ctxt->ops->pio_in_emulated(ctxt, size, port, rc->data, n)) 1442 return 0; 1443 rc->end = n * size; 1444 } 1445 1446 if (ctxt->rep_prefix && (ctxt->d & String) && 1447 !(ctxt->eflags & X86_EFLAGS_DF)) { 1448 ctxt->dst.data = rc->data + rc->pos; 1449 ctxt->dst.type = OP_MEM_STR; 1450 ctxt->dst.count = (rc->end - rc->pos) / size; 1451 rc->pos = rc->end; 1452 } else { 1453 memcpy(dest, rc->data + rc->pos, size); 1454 rc->pos += size; 1455 } 1456 return 1; 1457 } 1458 1459 static int read_interrupt_descriptor(struct x86_emulate_ctxt *ctxt, 1460 u16 index, struct desc_struct *desc) 1461 { 1462 struct desc_ptr dt; 1463 ulong addr; 1464 1465 ctxt->ops->get_idt(ctxt, &dt); 1466 1467 if (dt.size < index * 8 + 7) 1468 return emulate_gp(ctxt, index << 3 | 0x2); 1469 1470 addr = dt.address + index * 8; 1471 return linear_read_system(ctxt, addr, desc, sizeof(*desc)); 1472 } 1473 1474 static void get_descriptor_table_ptr(struct x86_emulate_ctxt *ctxt, 1475 u16 selector, struct desc_ptr *dt) 1476 { 1477 const struct x86_emulate_ops *ops = ctxt->ops; 1478 u32 base3 = 0; 1479 1480 if (selector & 1 << 2) { 1481 struct desc_struct desc; 1482 u16 sel; 1483 1484 memset(dt, 0, sizeof(*dt)); 1485 if (!ops->get_segment(ctxt, &sel, &desc, &base3, 1486 VCPU_SREG_LDTR)) 1487 return; 1488 1489 dt->size = desc_limit_scaled(&desc); /* what if limit > 65535? */ 1490 dt->address = get_desc_base(&desc) | ((u64)base3 << 32); 1491 } else 1492 ops->get_gdt(ctxt, dt); 1493 } 1494 1495 static int get_descriptor_ptr(struct x86_emulate_ctxt *ctxt, 1496 u16 selector, ulong *desc_addr_p) 1497 { 1498 struct desc_ptr dt; 1499 u16 index = selector >> 3; 1500 ulong addr; 1501 1502 get_descriptor_table_ptr(ctxt, selector, &dt); 1503 1504 if (dt.size < index * 8 + 7) 1505 return emulate_gp(ctxt, selector & 0xfffc); 1506 1507 addr = dt.address + index * 8; 1508 1509 #ifdef CONFIG_X86_64 1510 if (addr >> 32 != 0) { 1511 u64 efer = 0; 1512 1513 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer); 1514 if (!(efer & EFER_LMA)) 1515 addr &= (u32)-1; 1516 } 1517 #endif 1518 1519 *desc_addr_p = addr; 1520 return X86EMUL_CONTINUE; 1521 } 1522 1523 /* allowed just for 8 bytes segments */ 1524 static int read_segment_descriptor(struct x86_emulate_ctxt *ctxt, 1525 u16 selector, struct desc_struct *desc, 1526 ulong *desc_addr_p) 1527 { 1528 int rc; 1529 1530 rc = get_descriptor_ptr(ctxt, selector, desc_addr_p); 1531 if (rc != X86EMUL_CONTINUE) 1532 return rc; 1533 1534 return linear_read_system(ctxt, *desc_addr_p, desc, sizeof(*desc)); 1535 } 1536 1537 /* allowed just for 8 bytes segments */ 1538 static int write_segment_descriptor(struct x86_emulate_ctxt *ctxt, 1539 u16 selector, struct desc_struct *desc) 1540 { 1541 int rc; 1542 ulong addr; 1543 1544 rc = get_descriptor_ptr(ctxt, selector, &addr); 1545 if (rc != X86EMUL_CONTINUE) 1546 return rc; 1547 1548 return linear_write_system(ctxt, addr, desc, sizeof(*desc)); 1549 } 1550 1551 static int __load_segment_descriptor(struct x86_emulate_ctxt *ctxt, 1552 u16 selector, int seg, u8 cpl, 1553 enum x86_transfer_type transfer, 1554 struct desc_struct *desc) 1555 { 1556 struct desc_struct seg_desc, old_desc; 1557 u8 dpl, rpl; 1558 unsigned err_vec = GP_VECTOR; 1559 u32 err_code = 0; 1560 bool null_selector = !(selector & ~0x3); /* 0000-0003 are null */ 1561 ulong desc_addr; 1562 int ret; 1563 u16 dummy; 1564 u32 base3 = 0; 1565 1566 memset(&seg_desc, 0, sizeof(seg_desc)); 1567 1568 if (ctxt->mode == X86EMUL_MODE_REAL) { 1569 /* set real mode segment descriptor (keep limit etc. for 1570 * unreal mode) */ 1571 ctxt->ops->get_segment(ctxt, &dummy, &seg_desc, NULL, seg); 1572 set_desc_base(&seg_desc, selector << 4); 1573 goto load; 1574 } else if (seg <= VCPU_SREG_GS && ctxt->mode == X86EMUL_MODE_VM86) { 1575 /* VM86 needs a clean new segment descriptor */ 1576 set_desc_base(&seg_desc, selector << 4); 1577 set_desc_limit(&seg_desc, 0xffff); 1578 seg_desc.type = 3; 1579 seg_desc.p = 1; 1580 seg_desc.s = 1; 1581 seg_desc.dpl = 3; 1582 goto load; 1583 } 1584 1585 rpl = selector & 3; 1586 1587 /* TR should be in GDT only */ 1588 if (seg == VCPU_SREG_TR && (selector & (1 << 2))) 1589 goto exception; 1590 1591 /* NULL selector is not valid for TR, CS and (except for long mode) SS */ 1592 if (null_selector) { 1593 if (seg == VCPU_SREG_CS || seg == VCPU_SREG_TR) 1594 goto exception; 1595 1596 if (seg == VCPU_SREG_SS) { 1597 if (ctxt->mode != X86EMUL_MODE_PROT64 || rpl != cpl) 1598 goto exception; 1599 1600 /* 1601 * ctxt->ops->set_segment expects the CPL to be in 1602 * SS.DPL, so fake an expand-up 32-bit data segment. 1603 */ 1604 seg_desc.type = 3; 1605 seg_desc.p = 1; 1606 seg_desc.s = 1; 1607 seg_desc.dpl = cpl; 1608 seg_desc.d = 1; 1609 seg_desc.g = 1; 1610 } 1611 1612 /* Skip all following checks */ 1613 goto load; 1614 } 1615 1616 ret = read_segment_descriptor(ctxt, selector, &seg_desc, &desc_addr); 1617 if (ret != X86EMUL_CONTINUE) 1618 return ret; 1619 1620 err_code = selector & 0xfffc; 1621 err_vec = (transfer == X86_TRANSFER_TASK_SWITCH) ? TS_VECTOR : 1622 GP_VECTOR; 1623 1624 /* can't load system descriptor into segment selector */ 1625 if (seg <= VCPU_SREG_GS && !seg_desc.s) { 1626 if (transfer == X86_TRANSFER_CALL_JMP) 1627 return X86EMUL_UNHANDLEABLE; 1628 goto exception; 1629 } 1630 1631 dpl = seg_desc.dpl; 1632 1633 switch (seg) { 1634 case VCPU_SREG_SS: 1635 /* 1636 * segment is not a writable data segment or segment 1637 * selector's RPL != CPL or segment selector's RPL != CPL 1638 */ 1639 if (rpl != cpl || (seg_desc.type & 0xa) != 0x2 || dpl != cpl) 1640 goto exception; 1641 break; 1642 case VCPU_SREG_CS: 1643 if (!(seg_desc.type & 8)) 1644 goto exception; 1645 1646 if (transfer == X86_TRANSFER_RET) { 1647 /* RET can never return to an inner privilege level. */ 1648 if (rpl < cpl) 1649 goto exception; 1650 /* Outer-privilege level return is not implemented */ 1651 if (rpl > cpl) 1652 return X86EMUL_UNHANDLEABLE; 1653 } 1654 if (transfer == X86_TRANSFER_RET || transfer == X86_TRANSFER_TASK_SWITCH) { 1655 if (seg_desc.type & 4) { 1656 /* conforming */ 1657 if (dpl > rpl) 1658 goto exception; 1659 } else { 1660 /* nonconforming */ 1661 if (dpl != rpl) 1662 goto exception; 1663 } 1664 } else { /* X86_TRANSFER_CALL_JMP */ 1665 if (seg_desc.type & 4) { 1666 /* conforming */ 1667 if (dpl > cpl) 1668 goto exception; 1669 } else { 1670 /* nonconforming */ 1671 if (rpl > cpl || dpl != cpl) 1672 goto exception; 1673 } 1674 } 1675 /* in long-mode d/b must be clear if l is set */ 1676 if (seg_desc.d && seg_desc.l) { 1677 u64 efer = 0; 1678 1679 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer); 1680 if (efer & EFER_LMA) 1681 goto exception; 1682 } 1683 1684 /* CS(RPL) <- CPL */ 1685 selector = (selector & 0xfffc) | cpl; 1686 break; 1687 case VCPU_SREG_TR: 1688 if (seg_desc.s || (seg_desc.type != 1 && seg_desc.type != 9)) 1689 goto exception; 1690 if (!seg_desc.p) { 1691 err_vec = NP_VECTOR; 1692 goto exception; 1693 } 1694 old_desc = seg_desc; 1695 seg_desc.type |= 2; /* busy */ 1696 ret = ctxt->ops->cmpxchg_emulated(ctxt, desc_addr, &old_desc, &seg_desc, 1697 sizeof(seg_desc), &ctxt->exception); 1698 if (ret != X86EMUL_CONTINUE) 1699 return ret; 1700 break; 1701 case VCPU_SREG_LDTR: 1702 if (seg_desc.s || seg_desc.type != 2) 1703 goto exception; 1704 break; 1705 default: /* DS, ES, FS, or GS */ 1706 /* 1707 * segment is not a data or readable code segment or 1708 * ((segment is a data or nonconforming code segment) 1709 * and (both RPL and CPL > DPL)) 1710 */ 1711 if ((seg_desc.type & 0xa) == 0x8 || 1712 (((seg_desc.type & 0xc) != 0xc) && 1713 (rpl > dpl && cpl > dpl))) 1714 goto exception; 1715 break; 1716 } 1717 1718 if (!seg_desc.p) { 1719 err_vec = (seg == VCPU_SREG_SS) ? SS_VECTOR : NP_VECTOR; 1720 goto exception; 1721 } 1722 1723 if (seg_desc.s) { 1724 /* mark segment as accessed */ 1725 if (!(seg_desc.type & 1)) { 1726 seg_desc.type |= 1; 1727 ret = write_segment_descriptor(ctxt, selector, 1728 &seg_desc); 1729 if (ret != X86EMUL_CONTINUE) 1730 return ret; 1731 } 1732 } else if (ctxt->mode == X86EMUL_MODE_PROT64) { 1733 ret = linear_read_system(ctxt, desc_addr+8, &base3, sizeof(base3)); 1734 if (ret != X86EMUL_CONTINUE) 1735 return ret; 1736 if (emul_is_noncanonical_address(get_desc_base(&seg_desc) | 1737 ((u64)base3 << 32), ctxt)) 1738 return emulate_gp(ctxt, 0); 1739 } 1740 load: 1741 ctxt->ops->set_segment(ctxt, selector, &seg_desc, base3, seg); 1742 if (desc) 1743 *desc = seg_desc; 1744 return X86EMUL_CONTINUE; 1745 exception: 1746 return emulate_exception(ctxt, err_vec, err_code, true); 1747 } 1748 1749 static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt, 1750 u16 selector, int seg) 1751 { 1752 u8 cpl = ctxt->ops->cpl(ctxt); 1753 1754 /* 1755 * None of MOV, POP and LSS can load a NULL selector in CPL=3, but 1756 * they can load it at CPL<3 (Intel's manual says only LSS can, 1757 * but it's wrong). 1758 * 1759 * However, the Intel manual says that putting IST=1/DPL=3 in 1760 * an interrupt gate will result in SS=3 (the AMD manual instead 1761 * says it doesn't), so allow SS=3 in __load_segment_descriptor 1762 * and only forbid it here. 1763 */ 1764 if (seg == VCPU_SREG_SS && selector == 3 && 1765 ctxt->mode == X86EMUL_MODE_PROT64) 1766 return emulate_exception(ctxt, GP_VECTOR, 0, true); 1767 1768 return __load_segment_descriptor(ctxt, selector, seg, cpl, 1769 X86_TRANSFER_NONE, NULL); 1770 } 1771 1772 static void write_register_operand(struct operand *op) 1773 { 1774 return assign_register(op->addr.reg, op->val, op->bytes); 1775 } 1776 1777 static int writeback(struct x86_emulate_ctxt *ctxt, struct operand *op) 1778 { 1779 switch (op->type) { 1780 case OP_REG: 1781 write_register_operand(op); 1782 break; 1783 case OP_MEM: 1784 if (ctxt->lock_prefix) 1785 return segmented_cmpxchg(ctxt, 1786 op->addr.mem, 1787 &op->orig_val, 1788 &op->val, 1789 op->bytes); 1790 else 1791 return segmented_write(ctxt, 1792 op->addr.mem, 1793 &op->val, 1794 op->bytes); 1795 break; 1796 case OP_MEM_STR: 1797 return segmented_write(ctxt, 1798 op->addr.mem, 1799 op->data, 1800 op->bytes * op->count); 1801 break; 1802 case OP_XMM: 1803 kvm_write_sse_reg(op->addr.xmm, &op->vec_val); 1804 break; 1805 case OP_MM: 1806 kvm_write_mmx_reg(op->addr.mm, &op->mm_val); 1807 break; 1808 case OP_NONE: 1809 /* no writeback */ 1810 break; 1811 default: 1812 break; 1813 } 1814 return X86EMUL_CONTINUE; 1815 } 1816 1817 static int push(struct x86_emulate_ctxt *ctxt, void *data, int bytes) 1818 { 1819 struct segmented_address addr; 1820 1821 rsp_increment(ctxt, -bytes); 1822 addr.ea = reg_read(ctxt, VCPU_REGS_RSP) & stack_mask(ctxt); 1823 addr.seg = VCPU_SREG_SS; 1824 1825 return segmented_write(ctxt, addr, data, bytes); 1826 } 1827 1828 static int em_push(struct x86_emulate_ctxt *ctxt) 1829 { 1830 /* Disable writeback. */ 1831 ctxt->dst.type = OP_NONE; 1832 return push(ctxt, &ctxt->src.val, ctxt->op_bytes); 1833 } 1834 1835 static int emulate_pop(struct x86_emulate_ctxt *ctxt, 1836 void *dest, int len) 1837 { 1838 int rc; 1839 struct segmented_address addr; 1840 1841 addr.ea = reg_read(ctxt, VCPU_REGS_RSP) & stack_mask(ctxt); 1842 addr.seg = VCPU_SREG_SS; 1843 rc = segmented_read(ctxt, addr, dest, len); 1844 if (rc != X86EMUL_CONTINUE) 1845 return rc; 1846 1847 rsp_increment(ctxt, len); 1848 return rc; 1849 } 1850 1851 static int em_pop(struct x86_emulate_ctxt *ctxt) 1852 { 1853 return emulate_pop(ctxt, &ctxt->dst.val, ctxt->op_bytes); 1854 } 1855 1856 static int emulate_popf(struct x86_emulate_ctxt *ctxt, 1857 void *dest, int len) 1858 { 1859 int rc; 1860 unsigned long val, change_mask; 1861 int iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> X86_EFLAGS_IOPL_BIT; 1862 int cpl = ctxt->ops->cpl(ctxt); 1863 1864 rc = emulate_pop(ctxt, &val, len); 1865 if (rc != X86EMUL_CONTINUE) 1866 return rc; 1867 1868 change_mask = X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF | 1869 X86_EFLAGS_ZF | X86_EFLAGS_SF | X86_EFLAGS_OF | 1870 X86_EFLAGS_TF | X86_EFLAGS_DF | X86_EFLAGS_NT | 1871 X86_EFLAGS_AC | X86_EFLAGS_ID; 1872 1873 switch(ctxt->mode) { 1874 case X86EMUL_MODE_PROT64: 1875 case X86EMUL_MODE_PROT32: 1876 case X86EMUL_MODE_PROT16: 1877 if (cpl == 0) 1878 change_mask |= X86_EFLAGS_IOPL; 1879 if (cpl <= iopl) 1880 change_mask |= X86_EFLAGS_IF; 1881 break; 1882 case X86EMUL_MODE_VM86: 1883 if (iopl < 3) 1884 return emulate_gp(ctxt, 0); 1885 change_mask |= X86_EFLAGS_IF; 1886 break; 1887 default: /* real mode */ 1888 change_mask |= (X86_EFLAGS_IOPL | X86_EFLAGS_IF); 1889 break; 1890 } 1891 1892 *(unsigned long *)dest = 1893 (ctxt->eflags & ~change_mask) | (val & change_mask); 1894 1895 return rc; 1896 } 1897 1898 static int em_popf(struct x86_emulate_ctxt *ctxt) 1899 { 1900 ctxt->dst.type = OP_REG; 1901 ctxt->dst.addr.reg = &ctxt->eflags; 1902 ctxt->dst.bytes = ctxt->op_bytes; 1903 return emulate_popf(ctxt, &ctxt->dst.val, ctxt->op_bytes); 1904 } 1905 1906 static int em_enter(struct x86_emulate_ctxt *ctxt) 1907 { 1908 int rc; 1909 unsigned frame_size = ctxt->src.val; 1910 unsigned nesting_level = ctxt->src2.val & 31; 1911 ulong rbp; 1912 1913 if (nesting_level) 1914 return X86EMUL_UNHANDLEABLE; 1915 1916 rbp = reg_read(ctxt, VCPU_REGS_RBP); 1917 rc = push(ctxt, &rbp, stack_size(ctxt)); 1918 if (rc != X86EMUL_CONTINUE) 1919 return rc; 1920 assign_masked(reg_rmw(ctxt, VCPU_REGS_RBP), reg_read(ctxt, VCPU_REGS_RSP), 1921 stack_mask(ctxt)); 1922 assign_masked(reg_rmw(ctxt, VCPU_REGS_RSP), 1923 reg_read(ctxt, VCPU_REGS_RSP) - frame_size, 1924 stack_mask(ctxt)); 1925 return X86EMUL_CONTINUE; 1926 } 1927 1928 static int em_leave(struct x86_emulate_ctxt *ctxt) 1929 { 1930 assign_masked(reg_rmw(ctxt, VCPU_REGS_RSP), reg_read(ctxt, VCPU_REGS_RBP), 1931 stack_mask(ctxt)); 1932 return emulate_pop(ctxt, reg_rmw(ctxt, VCPU_REGS_RBP), ctxt->op_bytes); 1933 } 1934 1935 static int em_push_sreg(struct x86_emulate_ctxt *ctxt) 1936 { 1937 int seg = ctxt->src2.val; 1938 1939 ctxt->src.val = get_segment_selector(ctxt, seg); 1940 if (ctxt->op_bytes == 4) { 1941 rsp_increment(ctxt, -2); 1942 ctxt->op_bytes = 2; 1943 } 1944 1945 return em_push(ctxt); 1946 } 1947 1948 static int em_pop_sreg(struct x86_emulate_ctxt *ctxt) 1949 { 1950 int seg = ctxt->src2.val; 1951 unsigned long selector; 1952 int rc; 1953 1954 rc = emulate_pop(ctxt, &selector, 2); 1955 if (rc != X86EMUL_CONTINUE) 1956 return rc; 1957 1958 if (ctxt->modrm_reg == VCPU_SREG_SS) 1959 ctxt->interruptibility = KVM_X86_SHADOW_INT_MOV_SS; 1960 if (ctxt->op_bytes > 2) 1961 rsp_increment(ctxt, ctxt->op_bytes - 2); 1962 1963 rc = load_segment_descriptor(ctxt, (u16)selector, seg); 1964 return rc; 1965 } 1966 1967 static int em_pusha(struct x86_emulate_ctxt *ctxt) 1968 { 1969 unsigned long old_esp = reg_read(ctxt, VCPU_REGS_RSP); 1970 int rc = X86EMUL_CONTINUE; 1971 int reg = VCPU_REGS_RAX; 1972 1973 while (reg <= VCPU_REGS_RDI) { 1974 (reg == VCPU_REGS_RSP) ? 1975 (ctxt->src.val = old_esp) : (ctxt->src.val = reg_read(ctxt, reg)); 1976 1977 rc = em_push(ctxt); 1978 if (rc != X86EMUL_CONTINUE) 1979 return rc; 1980 1981 ++reg; 1982 } 1983 1984 return rc; 1985 } 1986 1987 static int em_pushf(struct x86_emulate_ctxt *ctxt) 1988 { 1989 ctxt->src.val = (unsigned long)ctxt->eflags & ~X86_EFLAGS_VM; 1990 return em_push(ctxt); 1991 } 1992 1993 static int em_popa(struct x86_emulate_ctxt *ctxt) 1994 { 1995 int rc = X86EMUL_CONTINUE; 1996 int reg = VCPU_REGS_RDI; 1997 u32 val; 1998 1999 while (reg >= VCPU_REGS_RAX) { 2000 if (reg == VCPU_REGS_RSP) { 2001 rsp_increment(ctxt, ctxt->op_bytes); 2002 --reg; 2003 } 2004 2005 rc = emulate_pop(ctxt, &val, ctxt->op_bytes); 2006 if (rc != X86EMUL_CONTINUE) 2007 break; 2008 assign_register(reg_rmw(ctxt, reg), val, ctxt->op_bytes); 2009 --reg; 2010 } 2011 return rc; 2012 } 2013 2014 static int __emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq) 2015 { 2016 const struct x86_emulate_ops *ops = ctxt->ops; 2017 int rc; 2018 struct desc_ptr dt; 2019 gva_t cs_addr; 2020 gva_t eip_addr; 2021 u16 cs, eip; 2022 2023 /* TODO: Add limit checks */ 2024 ctxt->src.val = ctxt->eflags; 2025 rc = em_push(ctxt); 2026 if (rc != X86EMUL_CONTINUE) 2027 return rc; 2028 2029 ctxt->eflags &= ~(X86_EFLAGS_IF | X86_EFLAGS_TF | X86_EFLAGS_AC); 2030 2031 ctxt->src.val = get_segment_selector(ctxt, VCPU_SREG_CS); 2032 rc = em_push(ctxt); 2033 if (rc != X86EMUL_CONTINUE) 2034 return rc; 2035 2036 ctxt->src.val = ctxt->_eip; 2037 rc = em_push(ctxt); 2038 if (rc != X86EMUL_CONTINUE) 2039 return rc; 2040 2041 ops->get_idt(ctxt, &dt); 2042 2043 eip_addr = dt.address + (irq << 2); 2044 cs_addr = dt.address + (irq << 2) + 2; 2045 2046 rc = linear_read_system(ctxt, cs_addr, &cs, 2); 2047 if (rc != X86EMUL_CONTINUE) 2048 return rc; 2049 2050 rc = linear_read_system(ctxt, eip_addr, &eip, 2); 2051 if (rc != X86EMUL_CONTINUE) 2052 return rc; 2053 2054 rc = load_segment_descriptor(ctxt, cs, VCPU_SREG_CS); 2055 if (rc != X86EMUL_CONTINUE) 2056 return rc; 2057 2058 ctxt->_eip = eip; 2059 2060 return rc; 2061 } 2062 2063 int emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq) 2064 { 2065 int rc; 2066 2067 invalidate_registers(ctxt); 2068 rc = __emulate_int_real(ctxt, irq); 2069 if (rc == X86EMUL_CONTINUE) 2070 writeback_registers(ctxt); 2071 return rc; 2072 } 2073 2074 static int emulate_int(struct x86_emulate_ctxt *ctxt, int irq) 2075 { 2076 switch(ctxt->mode) { 2077 case X86EMUL_MODE_REAL: 2078 return __emulate_int_real(ctxt, irq); 2079 case X86EMUL_MODE_VM86: 2080 case X86EMUL_MODE_PROT16: 2081 case X86EMUL_MODE_PROT32: 2082 case X86EMUL_MODE_PROT64: 2083 default: 2084 /* Protected mode interrupts unimplemented yet */ 2085 return X86EMUL_UNHANDLEABLE; 2086 } 2087 } 2088 2089 static int emulate_iret_real(struct x86_emulate_ctxt *ctxt) 2090 { 2091 int rc = X86EMUL_CONTINUE; 2092 unsigned long temp_eip = 0; 2093 unsigned long temp_eflags = 0; 2094 unsigned long cs = 0; 2095 unsigned long mask = X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF | 2096 X86_EFLAGS_ZF | X86_EFLAGS_SF | X86_EFLAGS_TF | 2097 X86_EFLAGS_IF | X86_EFLAGS_DF | X86_EFLAGS_OF | 2098 X86_EFLAGS_IOPL | X86_EFLAGS_NT | X86_EFLAGS_RF | 2099 X86_EFLAGS_AC | X86_EFLAGS_ID | 2100 X86_EFLAGS_FIXED; 2101 unsigned long vm86_mask = X86_EFLAGS_VM | X86_EFLAGS_VIF | 2102 X86_EFLAGS_VIP; 2103 2104 /* TODO: Add stack limit check */ 2105 2106 rc = emulate_pop(ctxt, &temp_eip, ctxt->op_bytes); 2107 2108 if (rc != X86EMUL_CONTINUE) 2109 return rc; 2110 2111 if (temp_eip & ~0xffff) 2112 return emulate_gp(ctxt, 0); 2113 2114 rc = emulate_pop(ctxt, &cs, ctxt->op_bytes); 2115 2116 if (rc != X86EMUL_CONTINUE) 2117 return rc; 2118 2119 rc = emulate_pop(ctxt, &temp_eflags, ctxt->op_bytes); 2120 2121 if (rc != X86EMUL_CONTINUE) 2122 return rc; 2123 2124 rc = load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS); 2125 2126 if (rc != X86EMUL_CONTINUE) 2127 return rc; 2128 2129 ctxt->_eip = temp_eip; 2130 2131 if (ctxt->op_bytes == 4) 2132 ctxt->eflags = ((temp_eflags & mask) | (ctxt->eflags & vm86_mask)); 2133 else if (ctxt->op_bytes == 2) { 2134 ctxt->eflags &= ~0xffff; 2135 ctxt->eflags |= temp_eflags; 2136 } 2137 2138 ctxt->eflags &= ~EFLG_RESERVED_ZEROS_MASK; /* Clear reserved zeros */ 2139 ctxt->eflags |= X86_EFLAGS_FIXED; 2140 ctxt->ops->set_nmi_mask(ctxt, false); 2141 2142 return rc; 2143 } 2144 2145 static int em_iret(struct x86_emulate_ctxt *ctxt) 2146 { 2147 switch(ctxt->mode) { 2148 case X86EMUL_MODE_REAL: 2149 return emulate_iret_real(ctxt); 2150 case X86EMUL_MODE_VM86: 2151 case X86EMUL_MODE_PROT16: 2152 case X86EMUL_MODE_PROT32: 2153 case X86EMUL_MODE_PROT64: 2154 default: 2155 /* iret from protected mode unimplemented yet */ 2156 return X86EMUL_UNHANDLEABLE; 2157 } 2158 } 2159 2160 static int em_jmp_far(struct x86_emulate_ctxt *ctxt) 2161 { 2162 int rc; 2163 unsigned short sel; 2164 struct desc_struct new_desc; 2165 u8 cpl = ctxt->ops->cpl(ctxt); 2166 2167 memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2); 2168 2169 rc = __load_segment_descriptor(ctxt, sel, VCPU_SREG_CS, cpl, 2170 X86_TRANSFER_CALL_JMP, 2171 &new_desc); 2172 if (rc != X86EMUL_CONTINUE) 2173 return rc; 2174 2175 rc = assign_eip_far(ctxt, ctxt->src.val, &new_desc); 2176 /* Error handling is not implemented. */ 2177 if (rc != X86EMUL_CONTINUE) 2178 return X86EMUL_UNHANDLEABLE; 2179 2180 return rc; 2181 } 2182 2183 static int em_jmp_abs(struct x86_emulate_ctxt *ctxt) 2184 { 2185 return assign_eip_near(ctxt, ctxt->src.val); 2186 } 2187 2188 static int em_call_near_abs(struct x86_emulate_ctxt *ctxt) 2189 { 2190 int rc; 2191 long int old_eip; 2192 2193 old_eip = ctxt->_eip; 2194 rc = assign_eip_near(ctxt, ctxt->src.val); 2195 if (rc != X86EMUL_CONTINUE) 2196 return rc; 2197 ctxt->src.val = old_eip; 2198 rc = em_push(ctxt); 2199 return rc; 2200 } 2201 2202 static int em_cmpxchg8b(struct x86_emulate_ctxt *ctxt) 2203 { 2204 u64 old = ctxt->dst.orig_val64; 2205 2206 if (ctxt->dst.bytes == 16) 2207 return X86EMUL_UNHANDLEABLE; 2208 2209 if (((u32) (old >> 0) != (u32) reg_read(ctxt, VCPU_REGS_RAX)) || 2210 ((u32) (old >> 32) != (u32) reg_read(ctxt, VCPU_REGS_RDX))) { 2211 *reg_write(ctxt, VCPU_REGS_RAX) = (u32) (old >> 0); 2212 *reg_write(ctxt, VCPU_REGS_RDX) = (u32) (old >> 32); 2213 ctxt->eflags &= ~X86_EFLAGS_ZF; 2214 } else { 2215 ctxt->dst.val64 = ((u64)reg_read(ctxt, VCPU_REGS_RCX) << 32) | 2216 (u32) reg_read(ctxt, VCPU_REGS_RBX); 2217 2218 ctxt->eflags |= X86_EFLAGS_ZF; 2219 } 2220 return X86EMUL_CONTINUE; 2221 } 2222 2223 static int em_ret(struct x86_emulate_ctxt *ctxt) 2224 { 2225 int rc; 2226 unsigned long eip; 2227 2228 rc = emulate_pop(ctxt, &eip, ctxt->op_bytes); 2229 if (rc != X86EMUL_CONTINUE) 2230 return rc; 2231 2232 return assign_eip_near(ctxt, eip); 2233 } 2234 2235 static int em_ret_far(struct x86_emulate_ctxt *ctxt) 2236 { 2237 int rc; 2238 unsigned long eip, cs; 2239 int cpl = ctxt->ops->cpl(ctxt); 2240 struct desc_struct new_desc; 2241 2242 rc = emulate_pop(ctxt, &eip, ctxt->op_bytes); 2243 if (rc != X86EMUL_CONTINUE) 2244 return rc; 2245 rc = emulate_pop(ctxt, &cs, ctxt->op_bytes); 2246 if (rc != X86EMUL_CONTINUE) 2247 return rc; 2248 rc = __load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS, cpl, 2249 X86_TRANSFER_RET, 2250 &new_desc); 2251 if (rc != X86EMUL_CONTINUE) 2252 return rc; 2253 rc = assign_eip_far(ctxt, eip, &new_desc); 2254 /* Error handling is not implemented. */ 2255 if (rc != X86EMUL_CONTINUE) 2256 return X86EMUL_UNHANDLEABLE; 2257 2258 return rc; 2259 } 2260 2261 static int em_ret_far_imm(struct x86_emulate_ctxt *ctxt) 2262 { 2263 int rc; 2264 2265 rc = em_ret_far(ctxt); 2266 if (rc != X86EMUL_CONTINUE) 2267 return rc; 2268 rsp_increment(ctxt, ctxt->src.val); 2269 return X86EMUL_CONTINUE; 2270 } 2271 2272 static int em_cmpxchg(struct x86_emulate_ctxt *ctxt) 2273 { 2274 /* Save real source value, then compare EAX against destination. */ 2275 ctxt->dst.orig_val = ctxt->dst.val; 2276 ctxt->dst.val = reg_read(ctxt, VCPU_REGS_RAX); 2277 ctxt->src.orig_val = ctxt->src.val; 2278 ctxt->src.val = ctxt->dst.orig_val; 2279 fastop(ctxt, em_cmp); 2280 2281 if (ctxt->eflags & X86_EFLAGS_ZF) { 2282 /* Success: write back to memory; no update of EAX */ 2283 ctxt->src.type = OP_NONE; 2284 ctxt->dst.val = ctxt->src.orig_val; 2285 } else { 2286 /* Failure: write the value we saw to EAX. */ 2287 ctxt->src.type = OP_REG; 2288 ctxt->src.addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX); 2289 ctxt->src.val = ctxt->dst.orig_val; 2290 /* Create write-cycle to dest by writing the same value */ 2291 ctxt->dst.val = ctxt->dst.orig_val; 2292 } 2293 return X86EMUL_CONTINUE; 2294 } 2295 2296 static int em_lseg(struct x86_emulate_ctxt *ctxt) 2297 { 2298 int seg = ctxt->src2.val; 2299 unsigned short sel; 2300 int rc; 2301 2302 memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2); 2303 2304 rc = load_segment_descriptor(ctxt, sel, seg); 2305 if (rc != X86EMUL_CONTINUE) 2306 return rc; 2307 2308 ctxt->dst.val = ctxt->src.val; 2309 return rc; 2310 } 2311 2312 static int emulator_has_longmode(struct x86_emulate_ctxt *ctxt) 2313 { 2314 #ifdef CONFIG_X86_64 2315 return ctxt->ops->guest_has_long_mode(ctxt); 2316 #else 2317 return false; 2318 #endif 2319 } 2320 2321 static void rsm_set_desc_flags(struct desc_struct *desc, u32 flags) 2322 { 2323 desc->g = (flags >> 23) & 1; 2324 desc->d = (flags >> 22) & 1; 2325 desc->l = (flags >> 21) & 1; 2326 desc->avl = (flags >> 20) & 1; 2327 desc->p = (flags >> 15) & 1; 2328 desc->dpl = (flags >> 13) & 3; 2329 desc->s = (flags >> 12) & 1; 2330 desc->type = (flags >> 8) & 15; 2331 } 2332 2333 static int rsm_load_seg_32(struct x86_emulate_ctxt *ctxt, const char *smstate, 2334 int n) 2335 { 2336 struct desc_struct desc; 2337 int offset; 2338 u16 selector; 2339 2340 selector = GET_SMSTATE(u32, smstate, 0x7fa8 + n * 4); 2341 2342 if (n < 3) 2343 offset = 0x7f84 + n * 12; 2344 else 2345 offset = 0x7f2c + (n - 3) * 12; 2346 2347 set_desc_base(&desc, GET_SMSTATE(u32, smstate, offset + 8)); 2348 set_desc_limit(&desc, GET_SMSTATE(u32, smstate, offset + 4)); 2349 rsm_set_desc_flags(&desc, GET_SMSTATE(u32, smstate, offset)); 2350 ctxt->ops->set_segment(ctxt, selector, &desc, 0, n); 2351 return X86EMUL_CONTINUE; 2352 } 2353 2354 #ifdef CONFIG_X86_64 2355 static int rsm_load_seg_64(struct x86_emulate_ctxt *ctxt, const char *smstate, 2356 int n) 2357 { 2358 struct desc_struct desc; 2359 int offset; 2360 u16 selector; 2361 u32 base3; 2362 2363 offset = 0x7e00 + n * 16; 2364 2365 selector = GET_SMSTATE(u16, smstate, offset); 2366 rsm_set_desc_flags(&desc, GET_SMSTATE(u16, smstate, offset + 2) << 8); 2367 set_desc_limit(&desc, GET_SMSTATE(u32, smstate, offset + 4)); 2368 set_desc_base(&desc, GET_SMSTATE(u32, smstate, offset + 8)); 2369 base3 = GET_SMSTATE(u32, smstate, offset + 12); 2370 2371 ctxt->ops->set_segment(ctxt, selector, &desc, base3, n); 2372 return X86EMUL_CONTINUE; 2373 } 2374 #endif 2375 2376 static int rsm_enter_protected_mode(struct x86_emulate_ctxt *ctxt, 2377 u64 cr0, u64 cr3, u64 cr4) 2378 { 2379 int bad; 2380 u64 pcid; 2381 2382 /* In order to later set CR4.PCIDE, CR3[11:0] must be zero. */ 2383 pcid = 0; 2384 if (cr4 & X86_CR4_PCIDE) { 2385 pcid = cr3 & 0xfff; 2386 cr3 &= ~0xfff; 2387 } 2388 2389 bad = ctxt->ops->set_cr(ctxt, 3, cr3); 2390 if (bad) 2391 return X86EMUL_UNHANDLEABLE; 2392 2393 /* 2394 * First enable PAE, long mode needs it before CR0.PG = 1 is set. 2395 * Then enable protected mode. However, PCID cannot be enabled 2396 * if EFER.LMA=0, so set it separately. 2397 */ 2398 bad = ctxt->ops->set_cr(ctxt, 4, cr4 & ~X86_CR4_PCIDE); 2399 if (bad) 2400 return X86EMUL_UNHANDLEABLE; 2401 2402 bad = ctxt->ops->set_cr(ctxt, 0, cr0); 2403 if (bad) 2404 return X86EMUL_UNHANDLEABLE; 2405 2406 if (cr4 & X86_CR4_PCIDE) { 2407 bad = ctxt->ops->set_cr(ctxt, 4, cr4); 2408 if (bad) 2409 return X86EMUL_UNHANDLEABLE; 2410 if (pcid) { 2411 bad = ctxt->ops->set_cr(ctxt, 3, cr3 | pcid); 2412 if (bad) 2413 return X86EMUL_UNHANDLEABLE; 2414 } 2415 2416 } 2417 2418 return X86EMUL_CONTINUE; 2419 } 2420 2421 static int rsm_load_state_32(struct x86_emulate_ctxt *ctxt, 2422 const char *smstate) 2423 { 2424 struct desc_struct desc; 2425 struct desc_ptr dt; 2426 u16 selector; 2427 u32 val, cr0, cr3, cr4; 2428 int i; 2429 2430 cr0 = GET_SMSTATE(u32, smstate, 0x7ffc); 2431 cr3 = GET_SMSTATE(u32, smstate, 0x7ff8); 2432 ctxt->eflags = GET_SMSTATE(u32, smstate, 0x7ff4) | X86_EFLAGS_FIXED; 2433 ctxt->_eip = GET_SMSTATE(u32, smstate, 0x7ff0); 2434 2435 for (i = 0; i < 8; i++) 2436 *reg_write(ctxt, i) = GET_SMSTATE(u32, smstate, 0x7fd0 + i * 4); 2437 2438 val = GET_SMSTATE(u32, smstate, 0x7fcc); 2439 2440 if (ctxt->ops->set_dr(ctxt, 6, val)) 2441 return X86EMUL_UNHANDLEABLE; 2442 2443 val = GET_SMSTATE(u32, smstate, 0x7fc8); 2444 2445 if (ctxt->ops->set_dr(ctxt, 7, val)) 2446 return X86EMUL_UNHANDLEABLE; 2447 2448 selector = GET_SMSTATE(u32, smstate, 0x7fc4); 2449 set_desc_base(&desc, GET_SMSTATE(u32, smstate, 0x7f64)); 2450 set_desc_limit(&desc, GET_SMSTATE(u32, smstate, 0x7f60)); 2451 rsm_set_desc_flags(&desc, GET_SMSTATE(u32, smstate, 0x7f5c)); 2452 ctxt->ops->set_segment(ctxt, selector, &desc, 0, VCPU_SREG_TR); 2453 2454 selector = GET_SMSTATE(u32, smstate, 0x7fc0); 2455 set_desc_base(&desc, GET_SMSTATE(u32, smstate, 0x7f80)); 2456 set_desc_limit(&desc, GET_SMSTATE(u32, smstate, 0x7f7c)); 2457 rsm_set_desc_flags(&desc, GET_SMSTATE(u32, smstate, 0x7f78)); 2458 ctxt->ops->set_segment(ctxt, selector, &desc, 0, VCPU_SREG_LDTR); 2459 2460 dt.address = GET_SMSTATE(u32, smstate, 0x7f74); 2461 dt.size = GET_SMSTATE(u32, smstate, 0x7f70); 2462 ctxt->ops->set_gdt(ctxt, &dt); 2463 2464 dt.address = GET_SMSTATE(u32, smstate, 0x7f58); 2465 dt.size = GET_SMSTATE(u32, smstate, 0x7f54); 2466 ctxt->ops->set_idt(ctxt, &dt); 2467 2468 for (i = 0; i < 6; i++) { 2469 int r = rsm_load_seg_32(ctxt, smstate, i); 2470 if (r != X86EMUL_CONTINUE) 2471 return r; 2472 } 2473 2474 cr4 = GET_SMSTATE(u32, smstate, 0x7f14); 2475 2476 ctxt->ops->set_smbase(ctxt, GET_SMSTATE(u32, smstate, 0x7ef8)); 2477 2478 return rsm_enter_protected_mode(ctxt, cr0, cr3, cr4); 2479 } 2480 2481 #ifdef CONFIG_X86_64 2482 static int rsm_load_state_64(struct x86_emulate_ctxt *ctxt, 2483 const char *smstate) 2484 { 2485 struct desc_struct desc; 2486 struct desc_ptr dt; 2487 u64 val, cr0, cr3, cr4; 2488 u32 base3; 2489 u16 selector; 2490 int i, r; 2491 2492 for (i = 0; i < 16; i++) 2493 *reg_write(ctxt, i) = GET_SMSTATE(u64, smstate, 0x7ff8 - i * 8); 2494 2495 ctxt->_eip = GET_SMSTATE(u64, smstate, 0x7f78); 2496 ctxt->eflags = GET_SMSTATE(u32, smstate, 0x7f70) | X86_EFLAGS_FIXED; 2497 2498 val = GET_SMSTATE(u64, smstate, 0x7f68); 2499 2500 if (ctxt->ops->set_dr(ctxt, 6, val)) 2501 return X86EMUL_UNHANDLEABLE; 2502 2503 val = GET_SMSTATE(u64, smstate, 0x7f60); 2504 2505 if (ctxt->ops->set_dr(ctxt, 7, val)) 2506 return X86EMUL_UNHANDLEABLE; 2507 2508 cr0 = GET_SMSTATE(u64, smstate, 0x7f58); 2509 cr3 = GET_SMSTATE(u64, smstate, 0x7f50); 2510 cr4 = GET_SMSTATE(u64, smstate, 0x7f48); 2511 ctxt->ops->set_smbase(ctxt, GET_SMSTATE(u32, smstate, 0x7f00)); 2512 val = GET_SMSTATE(u64, smstate, 0x7ed0); 2513 2514 if (ctxt->ops->set_msr(ctxt, MSR_EFER, val & ~EFER_LMA)) 2515 return X86EMUL_UNHANDLEABLE; 2516 2517 selector = GET_SMSTATE(u32, smstate, 0x7e90); 2518 rsm_set_desc_flags(&desc, GET_SMSTATE(u32, smstate, 0x7e92) << 8); 2519 set_desc_limit(&desc, GET_SMSTATE(u32, smstate, 0x7e94)); 2520 set_desc_base(&desc, GET_SMSTATE(u32, smstate, 0x7e98)); 2521 base3 = GET_SMSTATE(u32, smstate, 0x7e9c); 2522 ctxt->ops->set_segment(ctxt, selector, &desc, base3, VCPU_SREG_TR); 2523 2524 dt.size = GET_SMSTATE(u32, smstate, 0x7e84); 2525 dt.address = GET_SMSTATE(u64, smstate, 0x7e88); 2526 ctxt->ops->set_idt(ctxt, &dt); 2527 2528 selector = GET_SMSTATE(u32, smstate, 0x7e70); 2529 rsm_set_desc_flags(&desc, GET_SMSTATE(u32, smstate, 0x7e72) << 8); 2530 set_desc_limit(&desc, GET_SMSTATE(u32, smstate, 0x7e74)); 2531 set_desc_base(&desc, GET_SMSTATE(u32, smstate, 0x7e78)); 2532 base3 = GET_SMSTATE(u32, smstate, 0x7e7c); 2533 ctxt->ops->set_segment(ctxt, selector, &desc, base3, VCPU_SREG_LDTR); 2534 2535 dt.size = GET_SMSTATE(u32, smstate, 0x7e64); 2536 dt.address = GET_SMSTATE(u64, smstate, 0x7e68); 2537 ctxt->ops->set_gdt(ctxt, &dt); 2538 2539 r = rsm_enter_protected_mode(ctxt, cr0, cr3, cr4); 2540 if (r != X86EMUL_CONTINUE) 2541 return r; 2542 2543 for (i = 0; i < 6; i++) { 2544 r = rsm_load_seg_64(ctxt, smstate, i); 2545 if (r != X86EMUL_CONTINUE) 2546 return r; 2547 } 2548 2549 return X86EMUL_CONTINUE; 2550 } 2551 #endif 2552 2553 static int em_rsm(struct x86_emulate_ctxt *ctxt) 2554 { 2555 unsigned long cr0, cr4, efer; 2556 char buf[512]; 2557 u64 smbase; 2558 int ret; 2559 2560 if ((ctxt->ops->get_hflags(ctxt) & X86EMUL_SMM_MASK) == 0) 2561 return emulate_ud(ctxt); 2562 2563 smbase = ctxt->ops->get_smbase(ctxt); 2564 2565 ret = ctxt->ops->read_phys(ctxt, smbase + 0xfe00, buf, sizeof(buf)); 2566 if (ret != X86EMUL_CONTINUE) 2567 return X86EMUL_UNHANDLEABLE; 2568 2569 if ((ctxt->ops->get_hflags(ctxt) & X86EMUL_SMM_INSIDE_NMI_MASK) == 0) 2570 ctxt->ops->set_nmi_mask(ctxt, false); 2571 2572 ctxt->ops->exiting_smm(ctxt); 2573 2574 /* 2575 * Get back to real mode, to prepare a safe state in which to load 2576 * CR0/CR3/CR4/EFER. It's all a bit more complicated if the vCPU 2577 * supports long mode. 2578 */ 2579 if (emulator_has_longmode(ctxt)) { 2580 struct desc_struct cs_desc; 2581 2582 /* Zero CR4.PCIDE before CR0.PG. */ 2583 cr4 = ctxt->ops->get_cr(ctxt, 4); 2584 if (cr4 & X86_CR4_PCIDE) 2585 ctxt->ops->set_cr(ctxt, 4, cr4 & ~X86_CR4_PCIDE); 2586 2587 /* A 32-bit code segment is required to clear EFER.LMA. */ 2588 memset(&cs_desc, 0, sizeof(cs_desc)); 2589 cs_desc.type = 0xb; 2590 cs_desc.s = cs_desc.g = cs_desc.p = 1; 2591 ctxt->ops->set_segment(ctxt, 0, &cs_desc, 0, VCPU_SREG_CS); 2592 } 2593 2594 /* For the 64-bit case, this will clear EFER.LMA. */ 2595 cr0 = ctxt->ops->get_cr(ctxt, 0); 2596 if (cr0 & X86_CR0_PE) 2597 ctxt->ops->set_cr(ctxt, 0, cr0 & ~(X86_CR0_PG | X86_CR0_PE)); 2598 2599 if (emulator_has_longmode(ctxt)) { 2600 /* Clear CR4.PAE before clearing EFER.LME. */ 2601 cr4 = ctxt->ops->get_cr(ctxt, 4); 2602 if (cr4 & X86_CR4_PAE) 2603 ctxt->ops->set_cr(ctxt, 4, cr4 & ~X86_CR4_PAE); 2604 2605 /* And finally go back to 32-bit mode. */ 2606 efer = 0; 2607 ctxt->ops->set_msr(ctxt, MSR_EFER, efer); 2608 } 2609 2610 /* 2611 * Give leave_smm() a chance to make ISA-specific changes to the vCPU 2612 * state (e.g. enter guest mode) before loading state from the SMM 2613 * state-save area. 2614 */ 2615 if (ctxt->ops->leave_smm(ctxt, buf)) 2616 goto emulate_shutdown; 2617 2618 #ifdef CONFIG_X86_64 2619 if (emulator_has_longmode(ctxt)) 2620 ret = rsm_load_state_64(ctxt, buf); 2621 else 2622 #endif 2623 ret = rsm_load_state_32(ctxt, buf); 2624 2625 if (ret != X86EMUL_CONTINUE) 2626 goto emulate_shutdown; 2627 2628 /* 2629 * Note, the ctxt->ops callbacks are responsible for handling side 2630 * effects when writing MSRs and CRs, e.g. MMU context resets, CPUID 2631 * runtime updates, etc... If that changes, e.g. this flow is moved 2632 * out of the emulator to make it look more like enter_smm(), then 2633 * those side effects need to be explicitly handled for both success 2634 * and shutdown. 2635 */ 2636 return X86EMUL_CONTINUE; 2637 2638 emulate_shutdown: 2639 ctxt->ops->triple_fault(ctxt); 2640 return X86EMUL_CONTINUE; 2641 } 2642 2643 static void 2644 setup_syscalls_segments(struct desc_struct *cs, struct desc_struct *ss) 2645 { 2646 cs->l = 0; /* will be adjusted later */ 2647 set_desc_base(cs, 0); /* flat segment */ 2648 cs->g = 1; /* 4kb granularity */ 2649 set_desc_limit(cs, 0xfffff); /* 4GB limit */ 2650 cs->type = 0x0b; /* Read, Execute, Accessed */ 2651 cs->s = 1; 2652 cs->dpl = 0; /* will be adjusted later */ 2653 cs->p = 1; 2654 cs->d = 1; 2655 cs->avl = 0; 2656 2657 set_desc_base(ss, 0); /* flat segment */ 2658 set_desc_limit(ss, 0xfffff); /* 4GB limit */ 2659 ss->g = 1; /* 4kb granularity */ 2660 ss->s = 1; 2661 ss->type = 0x03; /* Read/Write, Accessed */ 2662 ss->d = 1; /* 32bit stack segment */ 2663 ss->dpl = 0; 2664 ss->p = 1; 2665 ss->l = 0; 2666 ss->avl = 0; 2667 } 2668 2669 static bool vendor_intel(struct x86_emulate_ctxt *ctxt) 2670 { 2671 u32 eax, ebx, ecx, edx; 2672 2673 eax = ecx = 0; 2674 ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx, true); 2675 return is_guest_vendor_intel(ebx, ecx, edx); 2676 } 2677 2678 static bool em_syscall_is_enabled(struct x86_emulate_ctxt *ctxt) 2679 { 2680 const struct x86_emulate_ops *ops = ctxt->ops; 2681 u32 eax, ebx, ecx, edx; 2682 2683 /* 2684 * syscall should always be enabled in longmode - so only become 2685 * vendor specific (cpuid) if other modes are active... 2686 */ 2687 if (ctxt->mode == X86EMUL_MODE_PROT64) 2688 return true; 2689 2690 eax = 0x00000000; 2691 ecx = 0x00000000; 2692 ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx, true); 2693 /* 2694 * remark: Intel CPUs only support "syscall" in 64bit longmode. Also a 2695 * 64bit guest with a 32bit compat-app running will #UD !! While this 2696 * behaviour can be fixed (by emulating) into AMD response - CPUs of 2697 * AMD can't behave like Intel. 2698 */ 2699 if (is_guest_vendor_intel(ebx, ecx, edx)) 2700 return false; 2701 2702 if (is_guest_vendor_amd(ebx, ecx, edx) || 2703 is_guest_vendor_hygon(ebx, ecx, edx)) 2704 return true; 2705 2706 /* 2707 * default: (not Intel, not AMD, not Hygon), apply Intel's 2708 * stricter rules... 2709 */ 2710 return false; 2711 } 2712 2713 static int em_syscall(struct x86_emulate_ctxt *ctxt) 2714 { 2715 const struct x86_emulate_ops *ops = ctxt->ops; 2716 struct desc_struct cs, ss; 2717 u64 msr_data; 2718 u16 cs_sel, ss_sel; 2719 u64 efer = 0; 2720 2721 /* syscall is not available in real mode */ 2722 if (ctxt->mode == X86EMUL_MODE_REAL || 2723 ctxt->mode == X86EMUL_MODE_VM86) 2724 return emulate_ud(ctxt); 2725 2726 if (!(em_syscall_is_enabled(ctxt))) 2727 return emulate_ud(ctxt); 2728 2729 ops->get_msr(ctxt, MSR_EFER, &efer); 2730 if (!(efer & EFER_SCE)) 2731 return emulate_ud(ctxt); 2732 2733 setup_syscalls_segments(&cs, &ss); 2734 ops->get_msr(ctxt, MSR_STAR, &msr_data); 2735 msr_data >>= 32; 2736 cs_sel = (u16)(msr_data & 0xfffc); 2737 ss_sel = (u16)(msr_data + 8); 2738 2739 if (efer & EFER_LMA) { 2740 cs.d = 0; 2741 cs.l = 1; 2742 } 2743 ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS); 2744 ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS); 2745 2746 *reg_write(ctxt, VCPU_REGS_RCX) = ctxt->_eip; 2747 if (efer & EFER_LMA) { 2748 #ifdef CONFIG_X86_64 2749 *reg_write(ctxt, VCPU_REGS_R11) = ctxt->eflags; 2750 2751 ops->get_msr(ctxt, 2752 ctxt->mode == X86EMUL_MODE_PROT64 ? 2753 MSR_LSTAR : MSR_CSTAR, &msr_data); 2754 ctxt->_eip = msr_data; 2755 2756 ops->get_msr(ctxt, MSR_SYSCALL_MASK, &msr_data); 2757 ctxt->eflags &= ~msr_data; 2758 ctxt->eflags |= X86_EFLAGS_FIXED; 2759 #endif 2760 } else { 2761 /* legacy mode */ 2762 ops->get_msr(ctxt, MSR_STAR, &msr_data); 2763 ctxt->_eip = (u32)msr_data; 2764 2765 ctxt->eflags &= ~(X86_EFLAGS_VM | X86_EFLAGS_IF); 2766 } 2767 2768 ctxt->tf = (ctxt->eflags & X86_EFLAGS_TF) != 0; 2769 return X86EMUL_CONTINUE; 2770 } 2771 2772 static int em_sysenter(struct x86_emulate_ctxt *ctxt) 2773 { 2774 const struct x86_emulate_ops *ops = ctxt->ops; 2775 struct desc_struct cs, ss; 2776 u64 msr_data; 2777 u16 cs_sel, ss_sel; 2778 u64 efer = 0; 2779 2780 ops->get_msr(ctxt, MSR_EFER, &efer); 2781 /* inject #GP if in real mode */ 2782 if (ctxt->mode == X86EMUL_MODE_REAL) 2783 return emulate_gp(ctxt, 0); 2784 2785 /* 2786 * Not recognized on AMD in compat mode (but is recognized in legacy 2787 * mode). 2788 */ 2789 if ((ctxt->mode != X86EMUL_MODE_PROT64) && (efer & EFER_LMA) 2790 && !vendor_intel(ctxt)) 2791 return emulate_ud(ctxt); 2792 2793 /* sysenter/sysexit have not been tested in 64bit mode. */ 2794 if (ctxt->mode == X86EMUL_MODE_PROT64) 2795 return X86EMUL_UNHANDLEABLE; 2796 2797 ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data); 2798 if ((msr_data & 0xfffc) == 0x0) 2799 return emulate_gp(ctxt, 0); 2800 2801 setup_syscalls_segments(&cs, &ss); 2802 ctxt->eflags &= ~(X86_EFLAGS_VM | X86_EFLAGS_IF); 2803 cs_sel = (u16)msr_data & ~SEGMENT_RPL_MASK; 2804 ss_sel = cs_sel + 8; 2805 if (efer & EFER_LMA) { 2806 cs.d = 0; 2807 cs.l = 1; 2808 } 2809 2810 ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS); 2811 ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS); 2812 2813 ops->get_msr(ctxt, MSR_IA32_SYSENTER_EIP, &msr_data); 2814 ctxt->_eip = (efer & EFER_LMA) ? msr_data : (u32)msr_data; 2815 2816 ops->get_msr(ctxt, MSR_IA32_SYSENTER_ESP, &msr_data); 2817 *reg_write(ctxt, VCPU_REGS_RSP) = (efer & EFER_LMA) ? msr_data : 2818 (u32)msr_data; 2819 if (efer & EFER_LMA) 2820 ctxt->mode = X86EMUL_MODE_PROT64; 2821 2822 return X86EMUL_CONTINUE; 2823 } 2824 2825 static int em_sysexit(struct x86_emulate_ctxt *ctxt) 2826 { 2827 const struct x86_emulate_ops *ops = ctxt->ops; 2828 struct desc_struct cs, ss; 2829 u64 msr_data, rcx, rdx; 2830 int usermode; 2831 u16 cs_sel = 0, ss_sel = 0; 2832 2833 /* inject #GP if in real mode or Virtual 8086 mode */ 2834 if (ctxt->mode == X86EMUL_MODE_REAL || 2835 ctxt->mode == X86EMUL_MODE_VM86) 2836 return emulate_gp(ctxt, 0); 2837 2838 setup_syscalls_segments(&cs, &ss); 2839 2840 if ((ctxt->rex_prefix & 0x8) != 0x0) 2841 usermode = X86EMUL_MODE_PROT64; 2842 else 2843 usermode = X86EMUL_MODE_PROT32; 2844 2845 rcx = reg_read(ctxt, VCPU_REGS_RCX); 2846 rdx = reg_read(ctxt, VCPU_REGS_RDX); 2847 2848 cs.dpl = 3; 2849 ss.dpl = 3; 2850 ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data); 2851 switch (usermode) { 2852 case X86EMUL_MODE_PROT32: 2853 cs_sel = (u16)(msr_data + 16); 2854 if ((msr_data & 0xfffc) == 0x0) 2855 return emulate_gp(ctxt, 0); 2856 ss_sel = (u16)(msr_data + 24); 2857 rcx = (u32)rcx; 2858 rdx = (u32)rdx; 2859 break; 2860 case X86EMUL_MODE_PROT64: 2861 cs_sel = (u16)(msr_data + 32); 2862 if (msr_data == 0x0) 2863 return emulate_gp(ctxt, 0); 2864 ss_sel = cs_sel + 8; 2865 cs.d = 0; 2866 cs.l = 1; 2867 if (emul_is_noncanonical_address(rcx, ctxt) || 2868 emul_is_noncanonical_address(rdx, ctxt)) 2869 return emulate_gp(ctxt, 0); 2870 break; 2871 } 2872 cs_sel |= SEGMENT_RPL_MASK; 2873 ss_sel |= SEGMENT_RPL_MASK; 2874 2875 ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS); 2876 ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS); 2877 2878 ctxt->_eip = rdx; 2879 *reg_write(ctxt, VCPU_REGS_RSP) = rcx; 2880 2881 return X86EMUL_CONTINUE; 2882 } 2883 2884 static bool emulator_bad_iopl(struct x86_emulate_ctxt *ctxt) 2885 { 2886 int iopl; 2887 if (ctxt->mode == X86EMUL_MODE_REAL) 2888 return false; 2889 if (ctxt->mode == X86EMUL_MODE_VM86) 2890 return true; 2891 iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> X86_EFLAGS_IOPL_BIT; 2892 return ctxt->ops->cpl(ctxt) > iopl; 2893 } 2894 2895 #define VMWARE_PORT_VMPORT (0x5658) 2896 #define VMWARE_PORT_VMRPC (0x5659) 2897 2898 static bool emulator_io_port_access_allowed(struct x86_emulate_ctxt *ctxt, 2899 u16 port, u16 len) 2900 { 2901 const struct x86_emulate_ops *ops = ctxt->ops; 2902 struct desc_struct tr_seg; 2903 u32 base3; 2904 int r; 2905 u16 tr, io_bitmap_ptr, perm, bit_idx = port & 0x7; 2906 unsigned mask = (1 << len) - 1; 2907 unsigned long base; 2908 2909 /* 2910 * VMware allows access to these ports even if denied 2911 * by TSS I/O permission bitmap. Mimic behavior. 2912 */ 2913 if (enable_vmware_backdoor && 2914 ((port == VMWARE_PORT_VMPORT) || (port == VMWARE_PORT_VMRPC))) 2915 return true; 2916 2917 ops->get_segment(ctxt, &tr, &tr_seg, &base3, VCPU_SREG_TR); 2918 if (!tr_seg.p) 2919 return false; 2920 if (desc_limit_scaled(&tr_seg) < 103) 2921 return false; 2922 base = get_desc_base(&tr_seg); 2923 #ifdef CONFIG_X86_64 2924 base |= ((u64)base3) << 32; 2925 #endif 2926 r = ops->read_std(ctxt, base + 102, &io_bitmap_ptr, 2, NULL, true); 2927 if (r != X86EMUL_CONTINUE) 2928 return false; 2929 if (io_bitmap_ptr + port/8 > desc_limit_scaled(&tr_seg)) 2930 return false; 2931 r = ops->read_std(ctxt, base + io_bitmap_ptr + port/8, &perm, 2, NULL, true); 2932 if (r != X86EMUL_CONTINUE) 2933 return false; 2934 if ((perm >> bit_idx) & mask) 2935 return false; 2936 return true; 2937 } 2938 2939 static bool emulator_io_permited(struct x86_emulate_ctxt *ctxt, 2940 u16 port, u16 len) 2941 { 2942 if (ctxt->perm_ok) 2943 return true; 2944 2945 if (emulator_bad_iopl(ctxt)) 2946 if (!emulator_io_port_access_allowed(ctxt, port, len)) 2947 return false; 2948 2949 ctxt->perm_ok = true; 2950 2951 return true; 2952 } 2953 2954 static void string_registers_quirk(struct x86_emulate_ctxt *ctxt) 2955 { 2956 /* 2957 * Intel CPUs mask the counter and pointers in quite strange 2958 * manner when ECX is zero due to REP-string optimizations. 2959 */ 2960 #ifdef CONFIG_X86_64 2961 if (ctxt->ad_bytes != 4 || !vendor_intel(ctxt)) 2962 return; 2963 2964 *reg_write(ctxt, VCPU_REGS_RCX) = 0; 2965 2966 switch (ctxt->b) { 2967 case 0xa4: /* movsb */ 2968 case 0xa5: /* movsd/w */ 2969 *reg_rmw(ctxt, VCPU_REGS_RSI) &= (u32)-1; 2970 fallthrough; 2971 case 0xaa: /* stosb */ 2972 case 0xab: /* stosd/w */ 2973 *reg_rmw(ctxt, VCPU_REGS_RDI) &= (u32)-1; 2974 } 2975 #endif 2976 } 2977 2978 static void save_state_to_tss16(struct x86_emulate_ctxt *ctxt, 2979 struct tss_segment_16 *tss) 2980 { 2981 tss->ip = ctxt->_eip; 2982 tss->flag = ctxt->eflags; 2983 tss->ax = reg_read(ctxt, VCPU_REGS_RAX); 2984 tss->cx = reg_read(ctxt, VCPU_REGS_RCX); 2985 tss->dx = reg_read(ctxt, VCPU_REGS_RDX); 2986 tss->bx = reg_read(ctxt, VCPU_REGS_RBX); 2987 tss->sp = reg_read(ctxt, VCPU_REGS_RSP); 2988 tss->bp = reg_read(ctxt, VCPU_REGS_RBP); 2989 tss->si = reg_read(ctxt, VCPU_REGS_RSI); 2990 tss->di = reg_read(ctxt, VCPU_REGS_RDI); 2991 2992 tss->es = get_segment_selector(ctxt, VCPU_SREG_ES); 2993 tss->cs = get_segment_selector(ctxt, VCPU_SREG_CS); 2994 tss->ss = get_segment_selector(ctxt, VCPU_SREG_SS); 2995 tss->ds = get_segment_selector(ctxt, VCPU_SREG_DS); 2996 tss->ldt = get_segment_selector(ctxt, VCPU_SREG_LDTR); 2997 } 2998 2999 static int load_state_from_tss16(struct x86_emulate_ctxt *ctxt, 3000 struct tss_segment_16 *tss) 3001 { 3002 int ret; 3003 u8 cpl; 3004 3005 ctxt->_eip = tss->ip; 3006 ctxt->eflags = tss->flag | 2; 3007 *reg_write(ctxt, VCPU_REGS_RAX) = tss->ax; 3008 *reg_write(ctxt, VCPU_REGS_RCX) = tss->cx; 3009 *reg_write(ctxt, VCPU_REGS_RDX) = tss->dx; 3010 *reg_write(ctxt, VCPU_REGS_RBX) = tss->bx; 3011 *reg_write(ctxt, VCPU_REGS_RSP) = tss->sp; 3012 *reg_write(ctxt, VCPU_REGS_RBP) = tss->bp; 3013 *reg_write(ctxt, VCPU_REGS_RSI) = tss->si; 3014 *reg_write(ctxt, VCPU_REGS_RDI) = tss->di; 3015 3016 /* 3017 * SDM says that segment selectors are loaded before segment 3018 * descriptors 3019 */ 3020 set_segment_selector(ctxt, tss->ldt, VCPU_SREG_LDTR); 3021 set_segment_selector(ctxt, tss->es, VCPU_SREG_ES); 3022 set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS); 3023 set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS); 3024 set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS); 3025 3026 cpl = tss->cs & 3; 3027 3028 /* 3029 * Now load segment descriptors. If fault happens at this stage 3030 * it is handled in a context of new task 3031 */ 3032 ret = __load_segment_descriptor(ctxt, tss->ldt, VCPU_SREG_LDTR, cpl, 3033 X86_TRANSFER_TASK_SWITCH, NULL); 3034 if (ret != X86EMUL_CONTINUE) 3035 return ret; 3036 ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl, 3037 X86_TRANSFER_TASK_SWITCH, NULL); 3038 if (ret != X86EMUL_CONTINUE) 3039 return ret; 3040 ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl, 3041 X86_TRANSFER_TASK_SWITCH, NULL); 3042 if (ret != X86EMUL_CONTINUE) 3043 return ret; 3044 ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl, 3045 X86_TRANSFER_TASK_SWITCH, NULL); 3046 if (ret != X86EMUL_CONTINUE) 3047 return ret; 3048 ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl, 3049 X86_TRANSFER_TASK_SWITCH, NULL); 3050 if (ret != X86EMUL_CONTINUE) 3051 return ret; 3052 3053 return X86EMUL_CONTINUE; 3054 } 3055 3056 static int task_switch_16(struct x86_emulate_ctxt *ctxt, u16 old_tss_sel, 3057 ulong old_tss_base, struct desc_struct *new_desc) 3058 { 3059 struct tss_segment_16 tss_seg; 3060 int ret; 3061 u32 new_tss_base = get_desc_base(new_desc); 3062 3063 ret = linear_read_system(ctxt, old_tss_base, &tss_seg, sizeof(tss_seg)); 3064 if (ret != X86EMUL_CONTINUE) 3065 return ret; 3066 3067 save_state_to_tss16(ctxt, &tss_seg); 3068 3069 ret = linear_write_system(ctxt, old_tss_base, &tss_seg, sizeof(tss_seg)); 3070 if (ret != X86EMUL_CONTINUE) 3071 return ret; 3072 3073 ret = linear_read_system(ctxt, new_tss_base, &tss_seg, sizeof(tss_seg)); 3074 if (ret != X86EMUL_CONTINUE) 3075 return ret; 3076 3077 if (old_tss_sel != 0xffff) { 3078 tss_seg.prev_task_link = old_tss_sel; 3079 3080 ret = linear_write_system(ctxt, new_tss_base, 3081 &tss_seg.prev_task_link, 3082 sizeof(tss_seg.prev_task_link)); 3083 if (ret != X86EMUL_CONTINUE) 3084 return ret; 3085 } 3086 3087 return load_state_from_tss16(ctxt, &tss_seg); 3088 } 3089 3090 static void save_state_to_tss32(struct x86_emulate_ctxt *ctxt, 3091 struct tss_segment_32 *tss) 3092 { 3093 /* CR3 and ldt selector are not saved intentionally */ 3094 tss->eip = ctxt->_eip; 3095 tss->eflags = ctxt->eflags; 3096 tss->eax = reg_read(ctxt, VCPU_REGS_RAX); 3097 tss->ecx = reg_read(ctxt, VCPU_REGS_RCX); 3098 tss->edx = reg_read(ctxt, VCPU_REGS_RDX); 3099 tss->ebx = reg_read(ctxt, VCPU_REGS_RBX); 3100 tss->esp = reg_read(ctxt, VCPU_REGS_RSP); 3101 tss->ebp = reg_read(ctxt, VCPU_REGS_RBP); 3102 tss->esi = reg_read(ctxt, VCPU_REGS_RSI); 3103 tss->edi = reg_read(ctxt, VCPU_REGS_RDI); 3104 3105 tss->es = get_segment_selector(ctxt, VCPU_SREG_ES); 3106 tss->cs = get_segment_selector(ctxt, VCPU_SREG_CS); 3107 tss->ss = get_segment_selector(ctxt, VCPU_SREG_SS); 3108 tss->ds = get_segment_selector(ctxt, VCPU_SREG_DS); 3109 tss->fs = get_segment_selector(ctxt, VCPU_SREG_FS); 3110 tss->gs = get_segment_selector(ctxt, VCPU_SREG_GS); 3111 } 3112 3113 static int load_state_from_tss32(struct x86_emulate_ctxt *ctxt, 3114 struct tss_segment_32 *tss) 3115 { 3116 int ret; 3117 u8 cpl; 3118 3119 if (ctxt->ops->set_cr(ctxt, 3, tss->cr3)) 3120 return emulate_gp(ctxt, 0); 3121 ctxt->_eip = tss->eip; 3122 ctxt->eflags = tss->eflags | 2; 3123 3124 /* General purpose registers */ 3125 *reg_write(ctxt, VCPU_REGS_RAX) = tss->eax; 3126 *reg_write(ctxt, VCPU_REGS_RCX) = tss->ecx; 3127 *reg_write(ctxt, VCPU_REGS_RDX) = tss->edx; 3128 *reg_write(ctxt, VCPU_REGS_RBX) = tss->ebx; 3129 *reg_write(ctxt, VCPU_REGS_RSP) = tss->esp; 3130 *reg_write(ctxt, VCPU_REGS_RBP) = tss->ebp; 3131 *reg_write(ctxt, VCPU_REGS_RSI) = tss->esi; 3132 *reg_write(ctxt, VCPU_REGS_RDI) = tss->edi; 3133 3134 /* 3135 * SDM says that segment selectors are loaded before segment 3136 * descriptors. This is important because CPL checks will 3137 * use CS.RPL. 3138 */ 3139 set_segment_selector(ctxt, tss->ldt_selector, VCPU_SREG_LDTR); 3140 set_segment_selector(ctxt, tss->es, VCPU_SREG_ES); 3141 set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS); 3142 set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS); 3143 set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS); 3144 set_segment_selector(ctxt, tss->fs, VCPU_SREG_FS); 3145 set_segment_selector(ctxt, tss->gs, VCPU_SREG_GS); 3146 3147 /* 3148 * If we're switching between Protected Mode and VM86, we need to make 3149 * sure to update the mode before loading the segment descriptors so 3150 * that the selectors are interpreted correctly. 3151 */ 3152 if (ctxt->eflags & X86_EFLAGS_VM) { 3153 ctxt->mode = X86EMUL_MODE_VM86; 3154 cpl = 3; 3155 } else { 3156 ctxt->mode = X86EMUL_MODE_PROT32; 3157 cpl = tss->cs & 3; 3158 } 3159 3160 /* 3161 * Now load segment descriptors. If fault happens at this stage 3162 * it is handled in a context of new task 3163 */ 3164 ret = __load_segment_descriptor(ctxt, tss->ldt_selector, VCPU_SREG_LDTR, 3165 cpl, X86_TRANSFER_TASK_SWITCH, NULL); 3166 if (ret != X86EMUL_CONTINUE) 3167 return ret; 3168 ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl, 3169 X86_TRANSFER_TASK_SWITCH, NULL); 3170 if (ret != X86EMUL_CONTINUE) 3171 return ret; 3172 ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl, 3173 X86_TRANSFER_TASK_SWITCH, NULL); 3174 if (ret != X86EMUL_CONTINUE) 3175 return ret; 3176 ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl, 3177 X86_TRANSFER_TASK_SWITCH, NULL); 3178 if (ret != X86EMUL_CONTINUE) 3179 return ret; 3180 ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl, 3181 X86_TRANSFER_TASK_SWITCH, NULL); 3182 if (ret != X86EMUL_CONTINUE) 3183 return ret; 3184 ret = __load_segment_descriptor(ctxt, tss->fs, VCPU_SREG_FS, cpl, 3185 X86_TRANSFER_TASK_SWITCH, NULL); 3186 if (ret != X86EMUL_CONTINUE) 3187 return ret; 3188 ret = __load_segment_descriptor(ctxt, tss->gs, VCPU_SREG_GS, cpl, 3189 X86_TRANSFER_TASK_SWITCH, NULL); 3190 3191 return ret; 3192 } 3193 3194 static int task_switch_32(struct x86_emulate_ctxt *ctxt, u16 old_tss_sel, 3195 ulong old_tss_base, struct desc_struct *new_desc) 3196 { 3197 struct tss_segment_32 tss_seg; 3198 int ret; 3199 u32 new_tss_base = get_desc_base(new_desc); 3200 u32 eip_offset = offsetof(struct tss_segment_32, eip); 3201 u32 ldt_sel_offset = offsetof(struct tss_segment_32, ldt_selector); 3202 3203 ret = linear_read_system(ctxt, old_tss_base, &tss_seg, sizeof(tss_seg)); 3204 if (ret != X86EMUL_CONTINUE) 3205 return ret; 3206 3207 save_state_to_tss32(ctxt, &tss_seg); 3208 3209 /* Only GP registers and segment selectors are saved */ 3210 ret = linear_write_system(ctxt, old_tss_base + eip_offset, &tss_seg.eip, 3211 ldt_sel_offset - eip_offset); 3212 if (ret != X86EMUL_CONTINUE) 3213 return ret; 3214 3215 ret = linear_read_system(ctxt, new_tss_base, &tss_seg, sizeof(tss_seg)); 3216 if (ret != X86EMUL_CONTINUE) 3217 return ret; 3218 3219 if (old_tss_sel != 0xffff) { 3220 tss_seg.prev_task_link = old_tss_sel; 3221 3222 ret = linear_write_system(ctxt, new_tss_base, 3223 &tss_seg.prev_task_link, 3224 sizeof(tss_seg.prev_task_link)); 3225 if (ret != X86EMUL_CONTINUE) 3226 return ret; 3227 } 3228 3229 return load_state_from_tss32(ctxt, &tss_seg); 3230 } 3231 3232 static int emulator_do_task_switch(struct x86_emulate_ctxt *ctxt, 3233 u16 tss_selector, int idt_index, int reason, 3234 bool has_error_code, u32 error_code) 3235 { 3236 const struct x86_emulate_ops *ops = ctxt->ops; 3237 struct desc_struct curr_tss_desc, next_tss_desc; 3238 int ret; 3239 u16 old_tss_sel = get_segment_selector(ctxt, VCPU_SREG_TR); 3240 ulong old_tss_base = 3241 ops->get_cached_segment_base(ctxt, VCPU_SREG_TR); 3242 u32 desc_limit; 3243 ulong desc_addr, dr7; 3244 3245 /* FIXME: old_tss_base == ~0 ? */ 3246 3247 ret = read_segment_descriptor(ctxt, tss_selector, &next_tss_desc, &desc_addr); 3248 if (ret != X86EMUL_CONTINUE) 3249 return ret; 3250 ret = read_segment_descriptor(ctxt, old_tss_sel, &curr_tss_desc, &desc_addr); 3251 if (ret != X86EMUL_CONTINUE) 3252 return ret; 3253 3254 /* FIXME: check that next_tss_desc is tss */ 3255 3256 /* 3257 * Check privileges. The three cases are task switch caused by... 3258 * 3259 * 1. jmp/call/int to task gate: Check against DPL of the task gate 3260 * 2. Exception/IRQ/iret: No check is performed 3261 * 3. jmp/call to TSS/task-gate: No check is performed since the 3262 * hardware checks it before exiting. 3263 */ 3264 if (reason == TASK_SWITCH_GATE) { 3265 if (idt_index != -1) { 3266 /* Software interrupts */ 3267 struct desc_struct task_gate_desc; 3268 int dpl; 3269 3270 ret = read_interrupt_descriptor(ctxt, idt_index, 3271 &task_gate_desc); 3272 if (ret != X86EMUL_CONTINUE) 3273 return ret; 3274 3275 dpl = task_gate_desc.dpl; 3276 if ((tss_selector & 3) > dpl || ops->cpl(ctxt) > dpl) 3277 return emulate_gp(ctxt, (idt_index << 3) | 0x2); 3278 } 3279 } 3280 3281 desc_limit = desc_limit_scaled(&next_tss_desc); 3282 if (!next_tss_desc.p || 3283 ((desc_limit < 0x67 && (next_tss_desc.type & 8)) || 3284 desc_limit < 0x2b)) { 3285 return emulate_ts(ctxt, tss_selector & 0xfffc); 3286 } 3287 3288 if (reason == TASK_SWITCH_IRET || reason == TASK_SWITCH_JMP) { 3289 curr_tss_desc.type &= ~(1 << 1); /* clear busy flag */ 3290 write_segment_descriptor(ctxt, old_tss_sel, &curr_tss_desc); 3291 } 3292 3293 if (reason == TASK_SWITCH_IRET) 3294 ctxt->eflags = ctxt->eflags & ~X86_EFLAGS_NT; 3295 3296 /* set back link to prev task only if NT bit is set in eflags 3297 note that old_tss_sel is not used after this point */ 3298 if (reason != TASK_SWITCH_CALL && reason != TASK_SWITCH_GATE) 3299 old_tss_sel = 0xffff; 3300 3301 if (next_tss_desc.type & 8) 3302 ret = task_switch_32(ctxt, old_tss_sel, old_tss_base, &next_tss_desc); 3303 else 3304 ret = task_switch_16(ctxt, old_tss_sel, 3305 old_tss_base, &next_tss_desc); 3306 if (ret != X86EMUL_CONTINUE) 3307 return ret; 3308 3309 if (reason == TASK_SWITCH_CALL || reason == TASK_SWITCH_GATE) 3310 ctxt->eflags = ctxt->eflags | X86_EFLAGS_NT; 3311 3312 if (reason != TASK_SWITCH_IRET) { 3313 next_tss_desc.type |= (1 << 1); /* set busy flag */ 3314 write_segment_descriptor(ctxt, tss_selector, &next_tss_desc); 3315 } 3316 3317 ops->set_cr(ctxt, 0, ops->get_cr(ctxt, 0) | X86_CR0_TS); 3318 ops->set_segment(ctxt, tss_selector, &next_tss_desc, 0, VCPU_SREG_TR); 3319 3320 if (has_error_code) { 3321 ctxt->op_bytes = ctxt->ad_bytes = (next_tss_desc.type & 8) ? 4 : 2; 3322 ctxt->lock_prefix = 0; 3323 ctxt->src.val = (unsigned long) error_code; 3324 ret = em_push(ctxt); 3325 } 3326 3327 ops->get_dr(ctxt, 7, &dr7); 3328 ops->set_dr(ctxt, 7, dr7 & ~(DR_LOCAL_ENABLE_MASK | DR_LOCAL_SLOWDOWN)); 3329 3330 return ret; 3331 } 3332 3333 int emulator_task_switch(struct x86_emulate_ctxt *ctxt, 3334 u16 tss_selector, int idt_index, int reason, 3335 bool has_error_code, u32 error_code) 3336 { 3337 int rc; 3338 3339 invalidate_registers(ctxt); 3340 ctxt->_eip = ctxt->eip; 3341 ctxt->dst.type = OP_NONE; 3342 3343 rc = emulator_do_task_switch(ctxt, tss_selector, idt_index, reason, 3344 has_error_code, error_code); 3345 3346 if (rc == X86EMUL_CONTINUE) { 3347 ctxt->eip = ctxt->_eip; 3348 writeback_registers(ctxt); 3349 } 3350 3351 return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK; 3352 } 3353 3354 static void string_addr_inc(struct x86_emulate_ctxt *ctxt, int reg, 3355 struct operand *op) 3356 { 3357 int df = (ctxt->eflags & X86_EFLAGS_DF) ? -op->count : op->count; 3358 3359 register_address_increment(ctxt, reg, df * op->bytes); 3360 op->addr.mem.ea = register_address(ctxt, reg); 3361 } 3362 3363 static int em_das(struct x86_emulate_ctxt *ctxt) 3364 { 3365 u8 al, old_al; 3366 bool af, cf, old_cf; 3367 3368 cf = ctxt->eflags & X86_EFLAGS_CF; 3369 al = ctxt->dst.val; 3370 3371 old_al = al; 3372 old_cf = cf; 3373 cf = false; 3374 af = ctxt->eflags & X86_EFLAGS_AF; 3375 if ((al & 0x0f) > 9 || af) { 3376 al -= 6; 3377 cf = old_cf | (al >= 250); 3378 af = true; 3379 } else { 3380 af = false; 3381 } 3382 if (old_al > 0x99 || old_cf) { 3383 al -= 0x60; 3384 cf = true; 3385 } 3386 3387 ctxt->dst.val = al; 3388 /* Set PF, ZF, SF */ 3389 ctxt->src.type = OP_IMM; 3390 ctxt->src.val = 0; 3391 ctxt->src.bytes = 1; 3392 fastop(ctxt, em_or); 3393 ctxt->eflags &= ~(X86_EFLAGS_AF | X86_EFLAGS_CF); 3394 if (cf) 3395 ctxt->eflags |= X86_EFLAGS_CF; 3396 if (af) 3397 ctxt->eflags |= X86_EFLAGS_AF; 3398 return X86EMUL_CONTINUE; 3399 } 3400 3401 static int em_aam(struct x86_emulate_ctxt *ctxt) 3402 { 3403 u8 al, ah; 3404 3405 if (ctxt->src.val == 0) 3406 return emulate_de(ctxt); 3407 3408 al = ctxt->dst.val & 0xff; 3409 ah = al / ctxt->src.val; 3410 al %= ctxt->src.val; 3411 3412 ctxt->dst.val = (ctxt->dst.val & 0xffff0000) | al | (ah << 8); 3413 3414 /* Set PF, ZF, SF */ 3415 ctxt->src.type = OP_IMM; 3416 ctxt->src.val = 0; 3417 ctxt->src.bytes = 1; 3418 fastop(ctxt, em_or); 3419 3420 return X86EMUL_CONTINUE; 3421 } 3422 3423 static int em_aad(struct x86_emulate_ctxt *ctxt) 3424 { 3425 u8 al = ctxt->dst.val & 0xff; 3426 u8 ah = (ctxt->dst.val >> 8) & 0xff; 3427 3428 al = (al + (ah * ctxt->src.val)) & 0xff; 3429 3430 ctxt->dst.val = (ctxt->dst.val & 0xffff0000) | al; 3431 3432 /* Set PF, ZF, SF */ 3433 ctxt->src.type = OP_IMM; 3434 ctxt->src.val = 0; 3435 ctxt->src.bytes = 1; 3436 fastop(ctxt, em_or); 3437 3438 return X86EMUL_CONTINUE; 3439 } 3440 3441 static int em_call(struct x86_emulate_ctxt *ctxt) 3442 { 3443 int rc; 3444 long rel = ctxt->src.val; 3445 3446 ctxt->src.val = (unsigned long)ctxt->_eip; 3447 rc = jmp_rel(ctxt, rel); 3448 if (rc != X86EMUL_CONTINUE) 3449 return rc; 3450 return em_push(ctxt); 3451 } 3452 3453 static int em_call_far(struct x86_emulate_ctxt *ctxt) 3454 { 3455 u16 sel, old_cs; 3456 ulong old_eip; 3457 int rc; 3458 struct desc_struct old_desc, new_desc; 3459 const struct x86_emulate_ops *ops = ctxt->ops; 3460 int cpl = ctxt->ops->cpl(ctxt); 3461 enum x86emul_mode prev_mode = ctxt->mode; 3462 3463 old_eip = ctxt->_eip; 3464 ops->get_segment(ctxt, &old_cs, &old_desc, NULL, VCPU_SREG_CS); 3465 3466 memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2); 3467 rc = __load_segment_descriptor(ctxt, sel, VCPU_SREG_CS, cpl, 3468 X86_TRANSFER_CALL_JMP, &new_desc); 3469 if (rc != X86EMUL_CONTINUE) 3470 return rc; 3471 3472 rc = assign_eip_far(ctxt, ctxt->src.val, &new_desc); 3473 if (rc != X86EMUL_CONTINUE) 3474 goto fail; 3475 3476 ctxt->src.val = old_cs; 3477 rc = em_push(ctxt); 3478 if (rc != X86EMUL_CONTINUE) 3479 goto fail; 3480 3481 ctxt->src.val = old_eip; 3482 rc = em_push(ctxt); 3483 /* If we failed, we tainted the memory, but the very least we should 3484 restore cs */ 3485 if (rc != X86EMUL_CONTINUE) { 3486 pr_warn_once("faulting far call emulation tainted memory\n"); 3487 goto fail; 3488 } 3489 return rc; 3490 fail: 3491 ops->set_segment(ctxt, old_cs, &old_desc, 0, VCPU_SREG_CS); 3492 ctxt->mode = prev_mode; 3493 return rc; 3494 3495 } 3496 3497 static int em_ret_near_imm(struct x86_emulate_ctxt *ctxt) 3498 { 3499 int rc; 3500 unsigned long eip; 3501 3502 rc = emulate_pop(ctxt, &eip, ctxt->op_bytes); 3503 if (rc != X86EMUL_CONTINUE) 3504 return rc; 3505 rc = assign_eip_near(ctxt, eip); 3506 if (rc != X86EMUL_CONTINUE) 3507 return rc; 3508 rsp_increment(ctxt, ctxt->src.val); 3509 return X86EMUL_CONTINUE; 3510 } 3511 3512 static int em_xchg(struct x86_emulate_ctxt *ctxt) 3513 { 3514 /* Write back the register source. */ 3515 ctxt->src.val = ctxt->dst.val; 3516 write_register_operand(&ctxt->src); 3517 3518 /* Write back the memory destination with implicit LOCK prefix. */ 3519 ctxt->dst.val = ctxt->src.orig_val; 3520 ctxt->lock_prefix = 1; 3521 return X86EMUL_CONTINUE; 3522 } 3523 3524 static int em_imul_3op(struct x86_emulate_ctxt *ctxt) 3525 { 3526 ctxt->dst.val = ctxt->src2.val; 3527 return fastop(ctxt, em_imul); 3528 } 3529 3530 static int em_cwd(struct x86_emulate_ctxt *ctxt) 3531 { 3532 ctxt->dst.type = OP_REG; 3533 ctxt->dst.bytes = ctxt->src.bytes; 3534 ctxt->dst.addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX); 3535 ctxt->dst.val = ~((ctxt->src.val >> (ctxt->src.bytes * 8 - 1)) - 1); 3536 3537 return X86EMUL_CONTINUE; 3538 } 3539 3540 static int em_rdpid(struct x86_emulate_ctxt *ctxt) 3541 { 3542 u64 tsc_aux = 0; 3543 3544 if (!ctxt->ops->guest_has_rdpid(ctxt)) 3545 return emulate_ud(ctxt); 3546 3547 ctxt->ops->get_msr(ctxt, MSR_TSC_AUX, &tsc_aux); 3548 ctxt->dst.val = tsc_aux; 3549 return X86EMUL_CONTINUE; 3550 } 3551 3552 static int em_rdtsc(struct x86_emulate_ctxt *ctxt) 3553 { 3554 u64 tsc = 0; 3555 3556 ctxt->ops->get_msr(ctxt, MSR_IA32_TSC, &tsc); 3557 *reg_write(ctxt, VCPU_REGS_RAX) = (u32)tsc; 3558 *reg_write(ctxt, VCPU_REGS_RDX) = tsc >> 32; 3559 return X86EMUL_CONTINUE; 3560 } 3561 3562 static int em_rdpmc(struct x86_emulate_ctxt *ctxt) 3563 { 3564 u64 pmc; 3565 3566 if (ctxt->ops->read_pmc(ctxt, reg_read(ctxt, VCPU_REGS_RCX), &pmc)) 3567 return emulate_gp(ctxt, 0); 3568 *reg_write(ctxt, VCPU_REGS_RAX) = (u32)pmc; 3569 *reg_write(ctxt, VCPU_REGS_RDX) = pmc >> 32; 3570 return X86EMUL_CONTINUE; 3571 } 3572 3573 static int em_mov(struct x86_emulate_ctxt *ctxt) 3574 { 3575 memcpy(ctxt->dst.valptr, ctxt->src.valptr, sizeof(ctxt->src.valptr)); 3576 return X86EMUL_CONTINUE; 3577 } 3578 3579 static int em_movbe(struct x86_emulate_ctxt *ctxt) 3580 { 3581 u16 tmp; 3582 3583 if (!ctxt->ops->guest_has_movbe(ctxt)) 3584 return emulate_ud(ctxt); 3585 3586 switch (ctxt->op_bytes) { 3587 case 2: 3588 /* 3589 * From MOVBE definition: "...When the operand size is 16 bits, 3590 * the upper word of the destination register remains unchanged 3591 * ..." 3592 * 3593 * Both casting ->valptr and ->val to u16 breaks strict aliasing 3594 * rules so we have to do the operation almost per hand. 3595 */ 3596 tmp = (u16)ctxt->src.val; 3597 ctxt->dst.val &= ~0xffffUL; 3598 ctxt->dst.val |= (unsigned long)swab16(tmp); 3599 break; 3600 case 4: 3601 ctxt->dst.val = swab32((u32)ctxt->src.val); 3602 break; 3603 case 8: 3604 ctxt->dst.val = swab64(ctxt->src.val); 3605 break; 3606 default: 3607 BUG(); 3608 } 3609 return X86EMUL_CONTINUE; 3610 } 3611 3612 static int em_cr_write(struct x86_emulate_ctxt *ctxt) 3613 { 3614 if (ctxt->ops->set_cr(ctxt, ctxt->modrm_reg, ctxt->src.val)) 3615 return emulate_gp(ctxt, 0); 3616 3617 /* Disable writeback. */ 3618 ctxt->dst.type = OP_NONE; 3619 return X86EMUL_CONTINUE; 3620 } 3621 3622 static int em_dr_write(struct x86_emulate_ctxt *ctxt) 3623 { 3624 unsigned long val; 3625 3626 if (ctxt->mode == X86EMUL_MODE_PROT64) 3627 val = ctxt->src.val & ~0ULL; 3628 else 3629 val = ctxt->src.val & ~0U; 3630 3631 /* #UD condition is already handled. */ 3632 if (ctxt->ops->set_dr(ctxt, ctxt->modrm_reg, val) < 0) 3633 return emulate_gp(ctxt, 0); 3634 3635 /* Disable writeback. */ 3636 ctxt->dst.type = OP_NONE; 3637 return X86EMUL_CONTINUE; 3638 } 3639 3640 static int em_wrmsr(struct x86_emulate_ctxt *ctxt) 3641 { 3642 u64 msr_index = reg_read(ctxt, VCPU_REGS_RCX); 3643 u64 msr_data; 3644 int r; 3645 3646 msr_data = (u32)reg_read(ctxt, VCPU_REGS_RAX) 3647 | ((u64)reg_read(ctxt, VCPU_REGS_RDX) << 32); 3648 r = ctxt->ops->set_msr_with_filter(ctxt, msr_index, msr_data); 3649 3650 if (r == X86EMUL_IO_NEEDED) 3651 return r; 3652 3653 if (r > 0) 3654 return emulate_gp(ctxt, 0); 3655 3656 return r < 0 ? X86EMUL_UNHANDLEABLE : X86EMUL_CONTINUE; 3657 } 3658 3659 static int em_rdmsr(struct x86_emulate_ctxt *ctxt) 3660 { 3661 u64 msr_index = reg_read(ctxt, VCPU_REGS_RCX); 3662 u64 msr_data; 3663 int r; 3664 3665 r = ctxt->ops->get_msr_with_filter(ctxt, msr_index, &msr_data); 3666 3667 if (r == X86EMUL_IO_NEEDED) 3668 return r; 3669 3670 if (r) 3671 return emulate_gp(ctxt, 0); 3672 3673 *reg_write(ctxt, VCPU_REGS_RAX) = (u32)msr_data; 3674 *reg_write(ctxt, VCPU_REGS_RDX) = msr_data >> 32; 3675 return X86EMUL_CONTINUE; 3676 } 3677 3678 static int em_store_sreg(struct x86_emulate_ctxt *ctxt, int segment) 3679 { 3680 if (segment > VCPU_SREG_GS && 3681 (ctxt->ops->get_cr(ctxt, 4) & X86_CR4_UMIP) && 3682 ctxt->ops->cpl(ctxt) > 0) 3683 return emulate_gp(ctxt, 0); 3684 3685 ctxt->dst.val = get_segment_selector(ctxt, segment); 3686 if (ctxt->dst.bytes == 4 && ctxt->dst.type == OP_MEM) 3687 ctxt->dst.bytes = 2; 3688 return X86EMUL_CONTINUE; 3689 } 3690 3691 static int em_mov_rm_sreg(struct x86_emulate_ctxt *ctxt) 3692 { 3693 if (ctxt->modrm_reg > VCPU_SREG_GS) 3694 return emulate_ud(ctxt); 3695 3696 return em_store_sreg(ctxt, ctxt->modrm_reg); 3697 } 3698 3699 static int em_mov_sreg_rm(struct x86_emulate_ctxt *ctxt) 3700 { 3701 u16 sel = ctxt->src.val; 3702 3703 if (ctxt->modrm_reg == VCPU_SREG_CS || ctxt->modrm_reg > VCPU_SREG_GS) 3704 return emulate_ud(ctxt); 3705 3706 if (ctxt->modrm_reg == VCPU_SREG_SS) 3707 ctxt->interruptibility = KVM_X86_SHADOW_INT_MOV_SS; 3708 3709 /* Disable writeback. */ 3710 ctxt->dst.type = OP_NONE; 3711 return load_segment_descriptor(ctxt, sel, ctxt->modrm_reg); 3712 } 3713 3714 static int em_sldt(struct x86_emulate_ctxt *ctxt) 3715 { 3716 return em_store_sreg(ctxt, VCPU_SREG_LDTR); 3717 } 3718 3719 static int em_lldt(struct x86_emulate_ctxt *ctxt) 3720 { 3721 u16 sel = ctxt->src.val; 3722 3723 /* Disable writeback. */ 3724 ctxt->dst.type = OP_NONE; 3725 return load_segment_descriptor(ctxt, sel, VCPU_SREG_LDTR); 3726 } 3727 3728 static int em_str(struct x86_emulate_ctxt *ctxt) 3729 { 3730 return em_store_sreg(ctxt, VCPU_SREG_TR); 3731 } 3732 3733 static int em_ltr(struct x86_emulate_ctxt *ctxt) 3734 { 3735 u16 sel = ctxt->src.val; 3736 3737 /* Disable writeback. */ 3738 ctxt->dst.type = OP_NONE; 3739 return load_segment_descriptor(ctxt, sel, VCPU_SREG_TR); 3740 } 3741 3742 static int em_invlpg(struct x86_emulate_ctxt *ctxt) 3743 { 3744 int rc; 3745 ulong linear; 3746 3747 rc = linearize(ctxt, ctxt->src.addr.mem, 1, false, &linear); 3748 if (rc == X86EMUL_CONTINUE) 3749 ctxt->ops->invlpg(ctxt, linear); 3750 /* Disable writeback. */ 3751 ctxt->dst.type = OP_NONE; 3752 return X86EMUL_CONTINUE; 3753 } 3754 3755 static int em_clts(struct x86_emulate_ctxt *ctxt) 3756 { 3757 ulong cr0; 3758 3759 cr0 = ctxt->ops->get_cr(ctxt, 0); 3760 cr0 &= ~X86_CR0_TS; 3761 ctxt->ops->set_cr(ctxt, 0, cr0); 3762 return X86EMUL_CONTINUE; 3763 } 3764 3765 static int em_hypercall(struct x86_emulate_ctxt *ctxt) 3766 { 3767 int rc = ctxt->ops->fix_hypercall(ctxt); 3768 3769 if (rc != X86EMUL_CONTINUE) 3770 return rc; 3771 3772 /* Let the processor re-execute the fixed hypercall */ 3773 ctxt->_eip = ctxt->eip; 3774 /* Disable writeback. */ 3775 ctxt->dst.type = OP_NONE; 3776 return X86EMUL_CONTINUE; 3777 } 3778 3779 static int emulate_store_desc_ptr(struct x86_emulate_ctxt *ctxt, 3780 void (*get)(struct x86_emulate_ctxt *ctxt, 3781 struct desc_ptr *ptr)) 3782 { 3783 struct desc_ptr desc_ptr; 3784 3785 if ((ctxt->ops->get_cr(ctxt, 4) & X86_CR4_UMIP) && 3786 ctxt->ops->cpl(ctxt) > 0) 3787 return emulate_gp(ctxt, 0); 3788 3789 if (ctxt->mode == X86EMUL_MODE_PROT64) 3790 ctxt->op_bytes = 8; 3791 get(ctxt, &desc_ptr); 3792 if (ctxt->op_bytes == 2) { 3793 ctxt->op_bytes = 4; 3794 desc_ptr.address &= 0x00ffffff; 3795 } 3796 /* Disable writeback. */ 3797 ctxt->dst.type = OP_NONE; 3798 return segmented_write_std(ctxt, ctxt->dst.addr.mem, 3799 &desc_ptr, 2 + ctxt->op_bytes); 3800 } 3801 3802 static int em_sgdt(struct x86_emulate_ctxt *ctxt) 3803 { 3804 return emulate_store_desc_ptr(ctxt, ctxt->ops->get_gdt); 3805 } 3806 3807 static int em_sidt(struct x86_emulate_ctxt *ctxt) 3808 { 3809 return emulate_store_desc_ptr(ctxt, ctxt->ops->get_idt); 3810 } 3811 3812 static int em_lgdt_lidt(struct x86_emulate_ctxt *ctxt, bool lgdt) 3813 { 3814 struct desc_ptr desc_ptr; 3815 int rc; 3816 3817 if (ctxt->mode == X86EMUL_MODE_PROT64) 3818 ctxt->op_bytes = 8; 3819 rc = read_descriptor(ctxt, ctxt->src.addr.mem, 3820 &desc_ptr.size, &desc_ptr.address, 3821 ctxt->op_bytes); 3822 if (rc != X86EMUL_CONTINUE) 3823 return rc; 3824 if (ctxt->mode == X86EMUL_MODE_PROT64 && 3825 emul_is_noncanonical_address(desc_ptr.address, ctxt)) 3826 return emulate_gp(ctxt, 0); 3827 if (lgdt) 3828 ctxt->ops->set_gdt(ctxt, &desc_ptr); 3829 else 3830 ctxt->ops->set_idt(ctxt, &desc_ptr); 3831 /* Disable writeback. */ 3832 ctxt->dst.type = OP_NONE; 3833 return X86EMUL_CONTINUE; 3834 } 3835 3836 static int em_lgdt(struct x86_emulate_ctxt *ctxt) 3837 { 3838 return em_lgdt_lidt(ctxt, true); 3839 } 3840 3841 static int em_lidt(struct x86_emulate_ctxt *ctxt) 3842 { 3843 return em_lgdt_lidt(ctxt, false); 3844 } 3845 3846 static int em_smsw(struct x86_emulate_ctxt *ctxt) 3847 { 3848 if ((ctxt->ops->get_cr(ctxt, 4) & X86_CR4_UMIP) && 3849 ctxt->ops->cpl(ctxt) > 0) 3850 return emulate_gp(ctxt, 0); 3851 3852 if (ctxt->dst.type == OP_MEM) 3853 ctxt->dst.bytes = 2; 3854 ctxt->dst.val = ctxt->ops->get_cr(ctxt, 0); 3855 return X86EMUL_CONTINUE; 3856 } 3857 3858 static int em_lmsw(struct x86_emulate_ctxt *ctxt) 3859 { 3860 ctxt->ops->set_cr(ctxt, 0, (ctxt->ops->get_cr(ctxt, 0) & ~0x0eul) 3861 | (ctxt->src.val & 0x0f)); 3862 ctxt->dst.type = OP_NONE; 3863 return X86EMUL_CONTINUE; 3864 } 3865 3866 static int em_loop(struct x86_emulate_ctxt *ctxt) 3867 { 3868 int rc = X86EMUL_CONTINUE; 3869 3870 register_address_increment(ctxt, VCPU_REGS_RCX, -1); 3871 if ((address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) != 0) && 3872 (ctxt->b == 0xe2 || test_cc(ctxt->b ^ 0x5, ctxt->eflags))) 3873 rc = jmp_rel(ctxt, ctxt->src.val); 3874 3875 return rc; 3876 } 3877 3878 static int em_jcxz(struct x86_emulate_ctxt *ctxt) 3879 { 3880 int rc = X86EMUL_CONTINUE; 3881 3882 if (address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) == 0) 3883 rc = jmp_rel(ctxt, ctxt->src.val); 3884 3885 return rc; 3886 } 3887 3888 static int em_in(struct x86_emulate_ctxt *ctxt) 3889 { 3890 if (!pio_in_emulated(ctxt, ctxt->dst.bytes, ctxt->src.val, 3891 &ctxt->dst.val)) 3892 return X86EMUL_IO_NEEDED; 3893 3894 return X86EMUL_CONTINUE; 3895 } 3896 3897 static int em_out(struct x86_emulate_ctxt *ctxt) 3898 { 3899 ctxt->ops->pio_out_emulated(ctxt, ctxt->src.bytes, ctxt->dst.val, 3900 &ctxt->src.val, 1); 3901 /* Disable writeback. */ 3902 ctxt->dst.type = OP_NONE; 3903 return X86EMUL_CONTINUE; 3904 } 3905 3906 static int em_cli(struct x86_emulate_ctxt *ctxt) 3907 { 3908 if (emulator_bad_iopl(ctxt)) 3909 return emulate_gp(ctxt, 0); 3910 3911 ctxt->eflags &= ~X86_EFLAGS_IF; 3912 return X86EMUL_CONTINUE; 3913 } 3914 3915 static int em_sti(struct x86_emulate_ctxt *ctxt) 3916 { 3917 if (emulator_bad_iopl(ctxt)) 3918 return emulate_gp(ctxt, 0); 3919 3920 ctxt->interruptibility = KVM_X86_SHADOW_INT_STI; 3921 ctxt->eflags |= X86_EFLAGS_IF; 3922 return X86EMUL_CONTINUE; 3923 } 3924 3925 static int em_cpuid(struct x86_emulate_ctxt *ctxt) 3926 { 3927 u32 eax, ebx, ecx, edx; 3928 u64 msr = 0; 3929 3930 ctxt->ops->get_msr(ctxt, MSR_MISC_FEATURES_ENABLES, &msr); 3931 if (msr & MSR_MISC_FEATURES_ENABLES_CPUID_FAULT && 3932 ctxt->ops->cpl(ctxt)) { 3933 return emulate_gp(ctxt, 0); 3934 } 3935 3936 eax = reg_read(ctxt, VCPU_REGS_RAX); 3937 ecx = reg_read(ctxt, VCPU_REGS_RCX); 3938 ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx, false); 3939 *reg_write(ctxt, VCPU_REGS_RAX) = eax; 3940 *reg_write(ctxt, VCPU_REGS_RBX) = ebx; 3941 *reg_write(ctxt, VCPU_REGS_RCX) = ecx; 3942 *reg_write(ctxt, VCPU_REGS_RDX) = edx; 3943 return X86EMUL_CONTINUE; 3944 } 3945 3946 static int em_sahf(struct x86_emulate_ctxt *ctxt) 3947 { 3948 u32 flags; 3949 3950 flags = X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF | X86_EFLAGS_ZF | 3951 X86_EFLAGS_SF; 3952 flags &= *reg_rmw(ctxt, VCPU_REGS_RAX) >> 8; 3953 3954 ctxt->eflags &= ~0xffUL; 3955 ctxt->eflags |= flags | X86_EFLAGS_FIXED; 3956 return X86EMUL_CONTINUE; 3957 } 3958 3959 static int em_lahf(struct x86_emulate_ctxt *ctxt) 3960 { 3961 *reg_rmw(ctxt, VCPU_REGS_RAX) &= ~0xff00UL; 3962 *reg_rmw(ctxt, VCPU_REGS_RAX) |= (ctxt->eflags & 0xff) << 8; 3963 return X86EMUL_CONTINUE; 3964 } 3965 3966 static int em_bswap(struct x86_emulate_ctxt *ctxt) 3967 { 3968 switch (ctxt->op_bytes) { 3969 #ifdef CONFIG_X86_64 3970 case 8: 3971 asm("bswap %0" : "+r"(ctxt->dst.val)); 3972 break; 3973 #endif 3974 default: 3975 asm("bswap %0" : "+r"(*(u32 *)&ctxt->dst.val)); 3976 break; 3977 } 3978 return X86EMUL_CONTINUE; 3979 } 3980 3981 static int em_clflush(struct x86_emulate_ctxt *ctxt) 3982 { 3983 /* emulating clflush regardless of cpuid */ 3984 return X86EMUL_CONTINUE; 3985 } 3986 3987 static int em_clflushopt(struct x86_emulate_ctxt *ctxt) 3988 { 3989 /* emulating clflushopt regardless of cpuid */ 3990 return X86EMUL_CONTINUE; 3991 } 3992 3993 static int em_movsxd(struct x86_emulate_ctxt *ctxt) 3994 { 3995 ctxt->dst.val = (s32) ctxt->src.val; 3996 return X86EMUL_CONTINUE; 3997 } 3998 3999 static int check_fxsr(struct x86_emulate_ctxt *ctxt) 4000 { 4001 if (!ctxt->ops->guest_has_fxsr(ctxt)) 4002 return emulate_ud(ctxt); 4003 4004 if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM)) 4005 return emulate_nm(ctxt); 4006 4007 /* 4008 * Don't emulate a case that should never be hit, instead of working 4009 * around a lack of fxsave64/fxrstor64 on old compilers. 4010 */ 4011 if (ctxt->mode >= X86EMUL_MODE_PROT64) 4012 return X86EMUL_UNHANDLEABLE; 4013 4014 return X86EMUL_CONTINUE; 4015 } 4016 4017 /* 4018 * Hardware doesn't save and restore XMM 0-7 without CR4.OSFXSR, but does save 4019 * and restore MXCSR. 4020 */ 4021 static size_t __fxstate_size(int nregs) 4022 { 4023 return offsetof(struct fxregs_state, xmm_space[0]) + nregs * 16; 4024 } 4025 4026 static inline size_t fxstate_size(struct x86_emulate_ctxt *ctxt) 4027 { 4028 bool cr4_osfxsr; 4029 if (ctxt->mode == X86EMUL_MODE_PROT64) 4030 return __fxstate_size(16); 4031 4032 cr4_osfxsr = ctxt->ops->get_cr(ctxt, 4) & X86_CR4_OSFXSR; 4033 return __fxstate_size(cr4_osfxsr ? 8 : 0); 4034 } 4035 4036 /* 4037 * FXSAVE and FXRSTOR have 4 different formats depending on execution mode, 4038 * 1) 16 bit mode 4039 * 2) 32 bit mode 4040 * - like (1), but FIP and FDP (foo) are only 16 bit. At least Intel CPUs 4041 * preserve whole 32 bit values, though, so (1) and (2) are the same wrt. 4042 * save and restore 4043 * 3) 64-bit mode with REX.W prefix 4044 * - like (2), but XMM 8-15 are being saved and restored 4045 * 4) 64-bit mode without REX.W prefix 4046 * - like (3), but FIP and FDP are 64 bit 4047 * 4048 * Emulation uses (3) for (1) and (2) and preserves XMM 8-15 to reach the 4049 * desired result. (4) is not emulated. 4050 * 4051 * Note: Guest and host CPUID.(EAX=07H,ECX=0H):EBX[bit 13] (deprecate FPU CS 4052 * and FPU DS) should match. 4053 */ 4054 static int em_fxsave(struct x86_emulate_ctxt *ctxt) 4055 { 4056 struct fxregs_state fx_state; 4057 int rc; 4058 4059 rc = check_fxsr(ctxt); 4060 if (rc != X86EMUL_CONTINUE) 4061 return rc; 4062 4063 kvm_fpu_get(); 4064 4065 rc = asm_safe("fxsave %[fx]", , [fx] "+m"(fx_state)); 4066 4067 kvm_fpu_put(); 4068 4069 if (rc != X86EMUL_CONTINUE) 4070 return rc; 4071 4072 return segmented_write_std(ctxt, ctxt->memop.addr.mem, &fx_state, 4073 fxstate_size(ctxt)); 4074 } 4075 4076 /* 4077 * FXRSTOR might restore XMM registers not provided by the guest. Fill 4078 * in the host registers (via FXSAVE) instead, so they won't be modified. 4079 * (preemption has to stay disabled until FXRSTOR). 4080 * 4081 * Use noinline to keep the stack for other functions called by callers small. 4082 */ 4083 static noinline int fxregs_fixup(struct fxregs_state *fx_state, 4084 const size_t used_size) 4085 { 4086 struct fxregs_state fx_tmp; 4087 int rc; 4088 4089 rc = asm_safe("fxsave %[fx]", , [fx] "+m"(fx_tmp)); 4090 memcpy((void *)fx_state + used_size, (void *)&fx_tmp + used_size, 4091 __fxstate_size(16) - used_size); 4092 4093 return rc; 4094 } 4095 4096 static int em_fxrstor(struct x86_emulate_ctxt *ctxt) 4097 { 4098 struct fxregs_state fx_state; 4099 int rc; 4100 size_t size; 4101 4102 rc = check_fxsr(ctxt); 4103 if (rc != X86EMUL_CONTINUE) 4104 return rc; 4105 4106 size = fxstate_size(ctxt); 4107 rc = segmented_read_std(ctxt, ctxt->memop.addr.mem, &fx_state, size); 4108 if (rc != X86EMUL_CONTINUE) 4109 return rc; 4110 4111 kvm_fpu_get(); 4112 4113 if (size < __fxstate_size(16)) { 4114 rc = fxregs_fixup(&fx_state, size); 4115 if (rc != X86EMUL_CONTINUE) 4116 goto out; 4117 } 4118 4119 if (fx_state.mxcsr >> 16) { 4120 rc = emulate_gp(ctxt, 0); 4121 goto out; 4122 } 4123 4124 if (rc == X86EMUL_CONTINUE) 4125 rc = asm_safe("fxrstor %[fx]", : [fx] "m"(fx_state)); 4126 4127 out: 4128 kvm_fpu_put(); 4129 4130 return rc; 4131 } 4132 4133 static int em_xsetbv(struct x86_emulate_ctxt *ctxt) 4134 { 4135 u32 eax, ecx, edx; 4136 4137 eax = reg_read(ctxt, VCPU_REGS_RAX); 4138 edx = reg_read(ctxt, VCPU_REGS_RDX); 4139 ecx = reg_read(ctxt, VCPU_REGS_RCX); 4140 4141 if (ctxt->ops->set_xcr(ctxt, ecx, ((u64)edx << 32) | eax)) 4142 return emulate_gp(ctxt, 0); 4143 4144 return X86EMUL_CONTINUE; 4145 } 4146 4147 static bool valid_cr(int nr) 4148 { 4149 switch (nr) { 4150 case 0: 4151 case 2 ... 4: 4152 case 8: 4153 return true; 4154 default: 4155 return false; 4156 } 4157 } 4158 4159 static int check_cr_access(struct x86_emulate_ctxt *ctxt) 4160 { 4161 if (!valid_cr(ctxt->modrm_reg)) 4162 return emulate_ud(ctxt); 4163 4164 return X86EMUL_CONTINUE; 4165 } 4166 4167 static int check_dr7_gd(struct x86_emulate_ctxt *ctxt) 4168 { 4169 unsigned long dr7; 4170 4171 ctxt->ops->get_dr(ctxt, 7, &dr7); 4172 4173 /* Check if DR7.Global_Enable is set */ 4174 return dr7 & (1 << 13); 4175 } 4176 4177 static int check_dr_read(struct x86_emulate_ctxt *ctxt) 4178 { 4179 int dr = ctxt->modrm_reg; 4180 u64 cr4; 4181 4182 if (dr > 7) 4183 return emulate_ud(ctxt); 4184 4185 cr4 = ctxt->ops->get_cr(ctxt, 4); 4186 if ((cr4 & X86_CR4_DE) && (dr == 4 || dr == 5)) 4187 return emulate_ud(ctxt); 4188 4189 if (check_dr7_gd(ctxt)) { 4190 ulong dr6; 4191 4192 ctxt->ops->get_dr(ctxt, 6, &dr6); 4193 dr6 &= ~DR_TRAP_BITS; 4194 dr6 |= DR6_BD | DR6_ACTIVE_LOW; 4195 ctxt->ops->set_dr(ctxt, 6, dr6); 4196 return emulate_db(ctxt); 4197 } 4198 4199 return X86EMUL_CONTINUE; 4200 } 4201 4202 static int check_dr_write(struct x86_emulate_ctxt *ctxt) 4203 { 4204 u64 new_val = ctxt->src.val64; 4205 int dr = ctxt->modrm_reg; 4206 4207 if ((dr == 6 || dr == 7) && (new_val & 0xffffffff00000000ULL)) 4208 return emulate_gp(ctxt, 0); 4209 4210 return check_dr_read(ctxt); 4211 } 4212 4213 static int check_svme(struct x86_emulate_ctxt *ctxt) 4214 { 4215 u64 efer = 0; 4216 4217 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer); 4218 4219 if (!(efer & EFER_SVME)) 4220 return emulate_ud(ctxt); 4221 4222 return X86EMUL_CONTINUE; 4223 } 4224 4225 static int check_svme_pa(struct x86_emulate_ctxt *ctxt) 4226 { 4227 u64 rax = reg_read(ctxt, VCPU_REGS_RAX); 4228 4229 /* Valid physical address? */ 4230 if (rax & 0xffff000000000000ULL) 4231 return emulate_gp(ctxt, 0); 4232 4233 return check_svme(ctxt); 4234 } 4235 4236 static int check_rdtsc(struct x86_emulate_ctxt *ctxt) 4237 { 4238 u64 cr4 = ctxt->ops->get_cr(ctxt, 4); 4239 4240 if (cr4 & X86_CR4_TSD && ctxt->ops->cpl(ctxt)) 4241 return emulate_gp(ctxt, 0); 4242 4243 return X86EMUL_CONTINUE; 4244 } 4245 4246 static int check_rdpmc(struct x86_emulate_ctxt *ctxt) 4247 { 4248 u64 cr4 = ctxt->ops->get_cr(ctxt, 4); 4249 u64 rcx = reg_read(ctxt, VCPU_REGS_RCX); 4250 4251 /* 4252 * VMware allows access to these Pseduo-PMCs even when read via RDPMC 4253 * in Ring3 when CR4.PCE=0. 4254 */ 4255 if (enable_vmware_backdoor && is_vmware_backdoor_pmc(rcx)) 4256 return X86EMUL_CONTINUE; 4257 4258 /* 4259 * If CR4.PCE is set, the SDM requires CPL=0 or CR0.PE=0. The CR0.PE 4260 * check however is unnecessary because CPL is always 0 outside 4261 * protected mode. 4262 */ 4263 if ((!(cr4 & X86_CR4_PCE) && ctxt->ops->cpl(ctxt)) || 4264 ctxt->ops->check_pmc(ctxt, rcx)) 4265 return emulate_gp(ctxt, 0); 4266 4267 return X86EMUL_CONTINUE; 4268 } 4269 4270 static int check_perm_in(struct x86_emulate_ctxt *ctxt) 4271 { 4272 ctxt->dst.bytes = min(ctxt->dst.bytes, 4u); 4273 if (!emulator_io_permited(ctxt, ctxt->src.val, ctxt->dst.bytes)) 4274 return emulate_gp(ctxt, 0); 4275 4276 return X86EMUL_CONTINUE; 4277 } 4278 4279 static int check_perm_out(struct x86_emulate_ctxt *ctxt) 4280 { 4281 ctxt->src.bytes = min(ctxt->src.bytes, 4u); 4282 if (!emulator_io_permited(ctxt, ctxt->dst.val, ctxt->src.bytes)) 4283 return emulate_gp(ctxt, 0); 4284 4285 return X86EMUL_CONTINUE; 4286 } 4287 4288 #define D(_y) { .flags = (_y) } 4289 #define DI(_y, _i) { .flags = (_y)|Intercept, .intercept = x86_intercept_##_i } 4290 #define DIP(_y, _i, _p) { .flags = (_y)|Intercept|CheckPerm, \ 4291 .intercept = x86_intercept_##_i, .check_perm = (_p) } 4292 #define N D(NotImpl) 4293 #define EXT(_f, _e) { .flags = ((_f) | RMExt), .u.group = (_e) } 4294 #define G(_f, _g) { .flags = ((_f) | Group | ModRM), .u.group = (_g) } 4295 #define GD(_f, _g) { .flags = ((_f) | GroupDual | ModRM), .u.gdual = (_g) } 4296 #define ID(_f, _i) { .flags = ((_f) | InstrDual | ModRM), .u.idual = (_i) } 4297 #define MD(_f, _m) { .flags = ((_f) | ModeDual), .u.mdual = (_m) } 4298 #define E(_f, _e) { .flags = ((_f) | Escape | ModRM), .u.esc = (_e) } 4299 #define I(_f, _e) { .flags = (_f), .u.execute = (_e) } 4300 #define F(_f, _e) { .flags = (_f) | Fastop, .u.fastop = (_e) } 4301 #define II(_f, _e, _i) \ 4302 { .flags = (_f)|Intercept, .u.execute = (_e), .intercept = x86_intercept_##_i } 4303 #define IIP(_f, _e, _i, _p) \ 4304 { .flags = (_f)|Intercept|CheckPerm, .u.execute = (_e), \ 4305 .intercept = x86_intercept_##_i, .check_perm = (_p) } 4306 #define GP(_f, _g) { .flags = ((_f) | Prefix), .u.gprefix = (_g) } 4307 4308 #define D2bv(_f) D((_f) | ByteOp), D(_f) 4309 #define D2bvIP(_f, _i, _p) DIP((_f) | ByteOp, _i, _p), DIP(_f, _i, _p) 4310 #define I2bv(_f, _e) I((_f) | ByteOp, _e), I(_f, _e) 4311 #define F2bv(_f, _e) F((_f) | ByteOp, _e), F(_f, _e) 4312 #define I2bvIP(_f, _e, _i, _p) \ 4313 IIP((_f) | ByteOp, _e, _i, _p), IIP(_f, _e, _i, _p) 4314 4315 #define F6ALU(_f, _e) F2bv((_f) | DstMem | SrcReg | ModRM, _e), \ 4316 F2bv(((_f) | DstReg | SrcMem | ModRM) & ~Lock, _e), \ 4317 F2bv(((_f) & ~Lock) | DstAcc | SrcImm, _e) 4318 4319 static const struct opcode group7_rm0[] = { 4320 N, 4321 I(SrcNone | Priv | EmulateOnUD, em_hypercall), 4322 N, N, N, N, N, N, 4323 }; 4324 4325 static const struct opcode group7_rm1[] = { 4326 DI(SrcNone | Priv, monitor), 4327 DI(SrcNone | Priv, mwait), 4328 N, N, N, N, N, N, 4329 }; 4330 4331 static const struct opcode group7_rm2[] = { 4332 N, 4333 II(ImplicitOps | Priv, em_xsetbv, xsetbv), 4334 N, N, N, N, N, N, 4335 }; 4336 4337 static const struct opcode group7_rm3[] = { 4338 DIP(SrcNone | Prot | Priv, vmrun, check_svme_pa), 4339 II(SrcNone | Prot | EmulateOnUD, em_hypercall, vmmcall), 4340 DIP(SrcNone | Prot | Priv, vmload, check_svme_pa), 4341 DIP(SrcNone | Prot | Priv, vmsave, check_svme_pa), 4342 DIP(SrcNone | Prot | Priv, stgi, check_svme), 4343 DIP(SrcNone | Prot | Priv, clgi, check_svme), 4344 DIP(SrcNone | Prot | Priv, skinit, check_svme), 4345 DIP(SrcNone | Prot | Priv, invlpga, check_svme), 4346 }; 4347 4348 static const struct opcode group7_rm7[] = { 4349 N, 4350 DIP(SrcNone, rdtscp, check_rdtsc), 4351 N, N, N, N, N, N, 4352 }; 4353 4354 static const struct opcode group1[] = { 4355 F(Lock, em_add), 4356 F(Lock | PageTable, em_or), 4357 F(Lock, em_adc), 4358 F(Lock, em_sbb), 4359 F(Lock | PageTable, em_and), 4360 F(Lock, em_sub), 4361 F(Lock, em_xor), 4362 F(NoWrite, em_cmp), 4363 }; 4364 4365 static const struct opcode group1A[] = { 4366 I(DstMem | SrcNone | Mov | Stack | IncSP | TwoMemOp, em_pop), N, N, N, N, N, N, N, 4367 }; 4368 4369 static const struct opcode group2[] = { 4370 F(DstMem | ModRM, em_rol), 4371 F(DstMem | ModRM, em_ror), 4372 F(DstMem | ModRM, em_rcl), 4373 F(DstMem | ModRM, em_rcr), 4374 F(DstMem | ModRM, em_shl), 4375 F(DstMem | ModRM, em_shr), 4376 F(DstMem | ModRM, em_shl), 4377 F(DstMem | ModRM, em_sar), 4378 }; 4379 4380 static const struct opcode group3[] = { 4381 F(DstMem | SrcImm | NoWrite, em_test), 4382 F(DstMem | SrcImm | NoWrite, em_test), 4383 F(DstMem | SrcNone | Lock, em_not), 4384 F(DstMem | SrcNone | Lock, em_neg), 4385 F(DstXacc | Src2Mem, em_mul_ex), 4386 F(DstXacc | Src2Mem, em_imul_ex), 4387 F(DstXacc | Src2Mem, em_div_ex), 4388 F(DstXacc | Src2Mem, em_idiv_ex), 4389 }; 4390 4391 static const struct opcode group4[] = { 4392 F(ByteOp | DstMem | SrcNone | Lock, em_inc), 4393 F(ByteOp | DstMem | SrcNone | Lock, em_dec), 4394 N, N, N, N, N, N, 4395 }; 4396 4397 static const struct opcode group5[] = { 4398 F(DstMem | SrcNone | Lock, em_inc), 4399 F(DstMem | SrcNone | Lock, em_dec), 4400 I(SrcMem | NearBranch | IsBranch, em_call_near_abs), 4401 I(SrcMemFAddr | ImplicitOps | IsBranch, em_call_far), 4402 I(SrcMem | NearBranch | IsBranch, em_jmp_abs), 4403 I(SrcMemFAddr | ImplicitOps | IsBranch, em_jmp_far), 4404 I(SrcMem | Stack | TwoMemOp, em_push), D(Undefined), 4405 }; 4406 4407 static const struct opcode group6[] = { 4408 II(Prot | DstMem, em_sldt, sldt), 4409 II(Prot | DstMem, em_str, str), 4410 II(Prot | Priv | SrcMem16, em_lldt, lldt), 4411 II(Prot | Priv | SrcMem16, em_ltr, ltr), 4412 N, N, N, N, 4413 }; 4414 4415 static const struct group_dual group7 = { { 4416 II(Mov | DstMem, em_sgdt, sgdt), 4417 II(Mov | DstMem, em_sidt, sidt), 4418 II(SrcMem | Priv, em_lgdt, lgdt), 4419 II(SrcMem | Priv, em_lidt, lidt), 4420 II(SrcNone | DstMem | Mov, em_smsw, smsw), N, 4421 II(SrcMem16 | Mov | Priv, em_lmsw, lmsw), 4422 II(SrcMem | ByteOp | Priv | NoAccess, em_invlpg, invlpg), 4423 }, { 4424 EXT(0, group7_rm0), 4425 EXT(0, group7_rm1), 4426 EXT(0, group7_rm2), 4427 EXT(0, group7_rm3), 4428 II(SrcNone | DstMem | Mov, em_smsw, smsw), N, 4429 II(SrcMem16 | Mov | Priv, em_lmsw, lmsw), 4430 EXT(0, group7_rm7), 4431 } }; 4432 4433 static const struct opcode group8[] = { 4434 N, N, N, N, 4435 F(DstMem | SrcImmByte | NoWrite, em_bt), 4436 F(DstMem | SrcImmByte | Lock | PageTable, em_bts), 4437 F(DstMem | SrcImmByte | Lock, em_btr), 4438 F(DstMem | SrcImmByte | Lock | PageTable, em_btc), 4439 }; 4440 4441 /* 4442 * The "memory" destination is actually always a register, since we come 4443 * from the register case of group9. 4444 */ 4445 static const struct gprefix pfx_0f_c7_7 = { 4446 N, N, N, II(DstMem | ModRM | Op3264 | EmulateOnUD, em_rdpid, rdpid), 4447 }; 4448 4449 4450 static const struct group_dual group9 = { { 4451 N, I(DstMem64 | Lock | PageTable, em_cmpxchg8b), N, N, N, N, N, N, 4452 }, { 4453 N, N, N, N, N, N, N, 4454 GP(0, &pfx_0f_c7_7), 4455 } }; 4456 4457 static const struct opcode group11[] = { 4458 I(DstMem | SrcImm | Mov | PageTable, em_mov), 4459 X7(D(Undefined)), 4460 }; 4461 4462 static const struct gprefix pfx_0f_ae_7 = { 4463 I(SrcMem | ByteOp, em_clflush), I(SrcMem | ByteOp, em_clflushopt), N, N, 4464 }; 4465 4466 static const struct group_dual group15 = { { 4467 I(ModRM | Aligned16, em_fxsave), 4468 I(ModRM | Aligned16, em_fxrstor), 4469 N, N, N, N, N, GP(0, &pfx_0f_ae_7), 4470 }, { 4471 N, N, N, N, N, N, N, N, 4472 } }; 4473 4474 static const struct gprefix pfx_0f_6f_0f_7f = { 4475 I(Mmx, em_mov), I(Sse | Aligned, em_mov), N, I(Sse | Unaligned, em_mov), 4476 }; 4477 4478 static const struct instr_dual instr_dual_0f_2b = { 4479 I(0, em_mov), N 4480 }; 4481 4482 static const struct gprefix pfx_0f_2b = { 4483 ID(0, &instr_dual_0f_2b), ID(0, &instr_dual_0f_2b), N, N, 4484 }; 4485 4486 static const struct gprefix pfx_0f_10_0f_11 = { 4487 I(Unaligned, em_mov), I(Unaligned, em_mov), N, N, 4488 }; 4489 4490 static const struct gprefix pfx_0f_28_0f_29 = { 4491 I(Aligned, em_mov), I(Aligned, em_mov), N, N, 4492 }; 4493 4494 static const struct gprefix pfx_0f_e7 = { 4495 N, I(Sse, em_mov), N, N, 4496 }; 4497 4498 static const struct escape escape_d9 = { { 4499 N, N, N, N, N, N, N, I(DstMem16 | Mov, em_fnstcw), 4500 }, { 4501 /* 0xC0 - 0xC7 */ 4502 N, N, N, N, N, N, N, N, 4503 /* 0xC8 - 0xCF */ 4504 N, N, N, N, N, N, N, N, 4505 /* 0xD0 - 0xC7 */ 4506 N, N, N, N, N, N, N, N, 4507 /* 0xD8 - 0xDF */ 4508 N, N, N, N, N, N, N, N, 4509 /* 0xE0 - 0xE7 */ 4510 N, N, N, N, N, N, N, N, 4511 /* 0xE8 - 0xEF */ 4512 N, N, N, N, N, N, N, N, 4513 /* 0xF0 - 0xF7 */ 4514 N, N, N, N, N, N, N, N, 4515 /* 0xF8 - 0xFF */ 4516 N, N, N, N, N, N, N, N, 4517 } }; 4518 4519 static const struct escape escape_db = { { 4520 N, N, N, N, N, N, N, N, 4521 }, { 4522 /* 0xC0 - 0xC7 */ 4523 N, N, N, N, N, N, N, N, 4524 /* 0xC8 - 0xCF */ 4525 N, N, N, N, N, N, N, N, 4526 /* 0xD0 - 0xC7 */ 4527 N, N, N, N, N, N, N, N, 4528 /* 0xD8 - 0xDF */ 4529 N, N, N, N, N, N, N, N, 4530 /* 0xE0 - 0xE7 */ 4531 N, N, N, I(ImplicitOps, em_fninit), N, N, N, N, 4532 /* 0xE8 - 0xEF */ 4533 N, N, N, N, N, N, N, N, 4534 /* 0xF0 - 0xF7 */ 4535 N, N, N, N, N, N, N, N, 4536 /* 0xF8 - 0xFF */ 4537 N, N, N, N, N, N, N, N, 4538 } }; 4539 4540 static const struct escape escape_dd = { { 4541 N, N, N, N, N, N, N, I(DstMem16 | Mov, em_fnstsw), 4542 }, { 4543 /* 0xC0 - 0xC7 */ 4544 N, N, N, N, N, N, N, N, 4545 /* 0xC8 - 0xCF */ 4546 N, N, N, N, N, N, N, N, 4547 /* 0xD0 - 0xC7 */ 4548 N, N, N, N, N, N, N, N, 4549 /* 0xD8 - 0xDF */ 4550 N, N, N, N, N, N, N, N, 4551 /* 0xE0 - 0xE7 */ 4552 N, N, N, N, N, N, N, N, 4553 /* 0xE8 - 0xEF */ 4554 N, N, N, N, N, N, N, N, 4555 /* 0xF0 - 0xF7 */ 4556 N, N, N, N, N, N, N, N, 4557 /* 0xF8 - 0xFF */ 4558 N, N, N, N, N, N, N, N, 4559 } }; 4560 4561 static const struct instr_dual instr_dual_0f_c3 = { 4562 I(DstMem | SrcReg | ModRM | No16 | Mov, em_mov), N 4563 }; 4564 4565 static const struct mode_dual mode_dual_63 = { 4566 N, I(DstReg | SrcMem32 | ModRM | Mov, em_movsxd) 4567 }; 4568 4569 static const struct opcode opcode_table[256] = { 4570 /* 0x00 - 0x07 */ 4571 F6ALU(Lock, em_add), 4572 I(ImplicitOps | Stack | No64 | Src2ES, em_push_sreg), 4573 I(ImplicitOps | Stack | No64 | Src2ES, em_pop_sreg), 4574 /* 0x08 - 0x0F */ 4575 F6ALU(Lock | PageTable, em_or), 4576 I(ImplicitOps | Stack | No64 | Src2CS, em_push_sreg), 4577 N, 4578 /* 0x10 - 0x17 */ 4579 F6ALU(Lock, em_adc), 4580 I(ImplicitOps | Stack | No64 | Src2SS, em_push_sreg), 4581 I(ImplicitOps | Stack | No64 | Src2SS, em_pop_sreg), 4582 /* 0x18 - 0x1F */ 4583 F6ALU(Lock, em_sbb), 4584 I(ImplicitOps | Stack | No64 | Src2DS, em_push_sreg), 4585 I(ImplicitOps | Stack | No64 | Src2DS, em_pop_sreg), 4586 /* 0x20 - 0x27 */ 4587 F6ALU(Lock | PageTable, em_and), N, N, 4588 /* 0x28 - 0x2F */ 4589 F6ALU(Lock, em_sub), N, I(ByteOp | DstAcc | No64, em_das), 4590 /* 0x30 - 0x37 */ 4591 F6ALU(Lock, em_xor), N, N, 4592 /* 0x38 - 0x3F */ 4593 F6ALU(NoWrite, em_cmp), N, N, 4594 /* 0x40 - 0x4F */ 4595 X8(F(DstReg, em_inc)), X8(F(DstReg, em_dec)), 4596 /* 0x50 - 0x57 */ 4597 X8(I(SrcReg | Stack, em_push)), 4598 /* 0x58 - 0x5F */ 4599 X8(I(DstReg | Stack, em_pop)), 4600 /* 0x60 - 0x67 */ 4601 I(ImplicitOps | Stack | No64, em_pusha), 4602 I(ImplicitOps | Stack | No64, em_popa), 4603 N, MD(ModRM, &mode_dual_63), 4604 N, N, N, N, 4605 /* 0x68 - 0x6F */ 4606 I(SrcImm | Mov | Stack, em_push), 4607 I(DstReg | SrcMem | ModRM | Src2Imm, em_imul_3op), 4608 I(SrcImmByte | Mov | Stack, em_push), 4609 I(DstReg | SrcMem | ModRM | Src2ImmByte, em_imul_3op), 4610 I2bvIP(DstDI | SrcDX | Mov | String | Unaligned, em_in, ins, check_perm_in), /* insb, insw/insd */ 4611 I2bvIP(SrcSI | DstDX | String, em_out, outs, check_perm_out), /* outsb, outsw/outsd */ 4612 /* 0x70 - 0x7F */ 4613 X16(D(SrcImmByte | NearBranch | IsBranch)), 4614 /* 0x80 - 0x87 */ 4615 G(ByteOp | DstMem | SrcImm, group1), 4616 G(DstMem | SrcImm, group1), 4617 G(ByteOp | DstMem | SrcImm | No64, group1), 4618 G(DstMem | SrcImmByte, group1), 4619 F2bv(DstMem | SrcReg | ModRM | NoWrite, em_test), 4620 I2bv(DstMem | SrcReg | ModRM | Lock | PageTable, em_xchg), 4621 /* 0x88 - 0x8F */ 4622 I2bv(DstMem | SrcReg | ModRM | Mov | PageTable, em_mov), 4623 I2bv(DstReg | SrcMem | ModRM | Mov, em_mov), 4624 I(DstMem | SrcNone | ModRM | Mov | PageTable, em_mov_rm_sreg), 4625 D(ModRM | SrcMem | NoAccess | DstReg), 4626 I(ImplicitOps | SrcMem16 | ModRM, em_mov_sreg_rm), 4627 G(0, group1A), 4628 /* 0x90 - 0x97 */ 4629 DI(SrcAcc | DstReg, pause), X7(D(SrcAcc | DstReg)), 4630 /* 0x98 - 0x9F */ 4631 D(DstAcc | SrcNone), I(ImplicitOps | SrcAcc, em_cwd), 4632 I(SrcImmFAddr | No64 | IsBranch, em_call_far), N, 4633 II(ImplicitOps | Stack, em_pushf, pushf), 4634 II(ImplicitOps | Stack, em_popf, popf), 4635 I(ImplicitOps, em_sahf), I(ImplicitOps, em_lahf), 4636 /* 0xA0 - 0xA7 */ 4637 I2bv(DstAcc | SrcMem | Mov | MemAbs, em_mov), 4638 I2bv(DstMem | SrcAcc | Mov | MemAbs | PageTable, em_mov), 4639 I2bv(SrcSI | DstDI | Mov | String | TwoMemOp, em_mov), 4640 F2bv(SrcSI | DstDI | String | NoWrite | TwoMemOp, em_cmp_r), 4641 /* 0xA8 - 0xAF */ 4642 F2bv(DstAcc | SrcImm | NoWrite, em_test), 4643 I2bv(SrcAcc | DstDI | Mov | String, em_mov), 4644 I2bv(SrcSI | DstAcc | Mov | String, em_mov), 4645 F2bv(SrcAcc | DstDI | String | NoWrite, em_cmp_r), 4646 /* 0xB0 - 0xB7 */ 4647 X8(I(ByteOp | DstReg | SrcImm | Mov, em_mov)), 4648 /* 0xB8 - 0xBF */ 4649 X8(I(DstReg | SrcImm64 | Mov, em_mov)), 4650 /* 0xC0 - 0xC7 */ 4651 G(ByteOp | Src2ImmByte, group2), G(Src2ImmByte, group2), 4652 I(ImplicitOps | NearBranch | SrcImmU16 | IsBranch, em_ret_near_imm), 4653 I(ImplicitOps | NearBranch | IsBranch, em_ret), 4654 I(DstReg | SrcMemFAddr | ModRM | No64 | Src2ES, em_lseg), 4655 I(DstReg | SrcMemFAddr | ModRM | No64 | Src2DS, em_lseg), 4656 G(ByteOp, group11), G(0, group11), 4657 /* 0xC8 - 0xCF */ 4658 I(Stack | SrcImmU16 | Src2ImmByte | IsBranch, em_enter), 4659 I(Stack | IsBranch, em_leave), 4660 I(ImplicitOps | SrcImmU16 | IsBranch, em_ret_far_imm), 4661 I(ImplicitOps | IsBranch, em_ret_far), 4662 D(ImplicitOps | IsBranch), DI(SrcImmByte | IsBranch, intn), 4663 D(ImplicitOps | No64 | IsBranch), 4664 II(ImplicitOps | IsBranch, em_iret, iret), 4665 /* 0xD0 - 0xD7 */ 4666 G(Src2One | ByteOp, group2), G(Src2One, group2), 4667 G(Src2CL | ByteOp, group2), G(Src2CL, group2), 4668 I(DstAcc | SrcImmUByte | No64, em_aam), 4669 I(DstAcc | SrcImmUByte | No64, em_aad), 4670 F(DstAcc | ByteOp | No64, em_salc), 4671 I(DstAcc | SrcXLat | ByteOp, em_mov), 4672 /* 0xD8 - 0xDF */ 4673 N, E(0, &escape_d9), N, E(0, &escape_db), N, E(0, &escape_dd), N, N, 4674 /* 0xE0 - 0xE7 */ 4675 X3(I(SrcImmByte | NearBranch | IsBranch, em_loop)), 4676 I(SrcImmByte | NearBranch | IsBranch, em_jcxz), 4677 I2bvIP(SrcImmUByte | DstAcc, em_in, in, check_perm_in), 4678 I2bvIP(SrcAcc | DstImmUByte, em_out, out, check_perm_out), 4679 /* 0xE8 - 0xEF */ 4680 I(SrcImm | NearBranch | IsBranch, em_call), 4681 D(SrcImm | ImplicitOps | NearBranch | IsBranch), 4682 I(SrcImmFAddr | No64 | IsBranch, em_jmp_far), 4683 D(SrcImmByte | ImplicitOps | NearBranch | IsBranch), 4684 I2bvIP(SrcDX | DstAcc, em_in, in, check_perm_in), 4685 I2bvIP(SrcAcc | DstDX, em_out, out, check_perm_out), 4686 /* 0xF0 - 0xF7 */ 4687 N, DI(ImplicitOps, icebp), N, N, 4688 DI(ImplicitOps | Priv, hlt), D(ImplicitOps), 4689 G(ByteOp, group3), G(0, group3), 4690 /* 0xF8 - 0xFF */ 4691 D(ImplicitOps), D(ImplicitOps), 4692 I(ImplicitOps, em_cli), I(ImplicitOps, em_sti), 4693 D(ImplicitOps), D(ImplicitOps), G(0, group4), G(0, group5), 4694 }; 4695 4696 static const struct opcode twobyte_table[256] = { 4697 /* 0x00 - 0x0F */ 4698 G(0, group6), GD(0, &group7), N, N, 4699 N, I(ImplicitOps | EmulateOnUD | IsBranch, em_syscall), 4700 II(ImplicitOps | Priv, em_clts, clts), N, 4701 DI(ImplicitOps | Priv, invd), DI(ImplicitOps | Priv, wbinvd), N, N, 4702 N, D(ImplicitOps | ModRM | SrcMem | NoAccess), N, N, 4703 /* 0x10 - 0x1F */ 4704 GP(ModRM | DstReg | SrcMem | Mov | Sse, &pfx_0f_10_0f_11), 4705 GP(ModRM | DstMem | SrcReg | Mov | Sse, &pfx_0f_10_0f_11), 4706 N, N, N, N, N, N, 4707 D(ImplicitOps | ModRM | SrcMem | NoAccess), /* 4 * prefetch + 4 * reserved NOP */ 4708 D(ImplicitOps | ModRM | SrcMem | NoAccess), N, N, 4709 D(ImplicitOps | ModRM | SrcMem | NoAccess), /* 8 * reserved NOP */ 4710 D(ImplicitOps | ModRM | SrcMem | NoAccess), /* 8 * reserved NOP */ 4711 D(ImplicitOps | ModRM | SrcMem | NoAccess), /* 8 * reserved NOP */ 4712 D(ImplicitOps | ModRM | SrcMem | NoAccess), /* NOP + 7 * reserved NOP */ 4713 /* 0x20 - 0x2F */ 4714 DIP(ModRM | DstMem | Priv | Op3264 | NoMod, cr_read, check_cr_access), 4715 DIP(ModRM | DstMem | Priv | Op3264 | NoMod, dr_read, check_dr_read), 4716 IIP(ModRM | SrcMem | Priv | Op3264 | NoMod, em_cr_write, cr_write, 4717 check_cr_access), 4718 IIP(ModRM | SrcMem | Priv | Op3264 | NoMod, em_dr_write, dr_write, 4719 check_dr_write), 4720 N, N, N, N, 4721 GP(ModRM | DstReg | SrcMem | Mov | Sse, &pfx_0f_28_0f_29), 4722 GP(ModRM | DstMem | SrcReg | Mov | Sse, &pfx_0f_28_0f_29), 4723 N, GP(ModRM | DstMem | SrcReg | Mov | Sse, &pfx_0f_2b), 4724 N, N, N, N, 4725 /* 0x30 - 0x3F */ 4726 II(ImplicitOps | Priv, em_wrmsr, wrmsr), 4727 IIP(ImplicitOps, em_rdtsc, rdtsc, check_rdtsc), 4728 II(ImplicitOps | Priv, em_rdmsr, rdmsr), 4729 IIP(ImplicitOps, em_rdpmc, rdpmc, check_rdpmc), 4730 I(ImplicitOps | EmulateOnUD | IsBranch, em_sysenter), 4731 I(ImplicitOps | Priv | EmulateOnUD | IsBranch, em_sysexit), 4732 N, N, 4733 N, N, N, N, N, N, N, N, 4734 /* 0x40 - 0x4F */ 4735 X16(D(DstReg | SrcMem | ModRM)), 4736 /* 0x50 - 0x5F */ 4737 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, 4738 /* 0x60 - 0x6F */ 4739 N, N, N, N, 4740 N, N, N, N, 4741 N, N, N, N, 4742 N, N, N, GP(SrcMem | DstReg | ModRM | Mov, &pfx_0f_6f_0f_7f), 4743 /* 0x70 - 0x7F */ 4744 N, N, N, N, 4745 N, N, N, N, 4746 N, N, N, N, 4747 N, N, N, GP(SrcReg | DstMem | ModRM | Mov, &pfx_0f_6f_0f_7f), 4748 /* 0x80 - 0x8F */ 4749 X16(D(SrcImm | NearBranch | IsBranch)), 4750 /* 0x90 - 0x9F */ 4751 X16(D(ByteOp | DstMem | SrcNone | ModRM| Mov)), 4752 /* 0xA0 - 0xA7 */ 4753 I(Stack | Src2FS, em_push_sreg), I(Stack | Src2FS, em_pop_sreg), 4754 II(ImplicitOps, em_cpuid, cpuid), 4755 F(DstMem | SrcReg | ModRM | BitOp | NoWrite, em_bt), 4756 F(DstMem | SrcReg | Src2ImmByte | ModRM, em_shld), 4757 F(DstMem | SrcReg | Src2CL | ModRM, em_shld), N, N, 4758 /* 0xA8 - 0xAF */ 4759 I(Stack | Src2GS, em_push_sreg), I(Stack | Src2GS, em_pop_sreg), 4760 II(EmulateOnUD | ImplicitOps, em_rsm, rsm), 4761 F(DstMem | SrcReg | ModRM | BitOp | Lock | PageTable, em_bts), 4762 F(DstMem | SrcReg | Src2ImmByte | ModRM, em_shrd), 4763 F(DstMem | SrcReg | Src2CL | ModRM, em_shrd), 4764 GD(0, &group15), F(DstReg | SrcMem | ModRM, em_imul), 4765 /* 0xB0 - 0xB7 */ 4766 I2bv(DstMem | SrcReg | ModRM | Lock | PageTable | SrcWrite, em_cmpxchg), 4767 I(DstReg | SrcMemFAddr | ModRM | Src2SS, em_lseg), 4768 F(DstMem | SrcReg | ModRM | BitOp | Lock, em_btr), 4769 I(DstReg | SrcMemFAddr | ModRM | Src2FS, em_lseg), 4770 I(DstReg | SrcMemFAddr | ModRM | Src2GS, em_lseg), 4771 D(DstReg | SrcMem8 | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov), 4772 /* 0xB8 - 0xBF */ 4773 N, N, 4774 G(BitOp, group8), 4775 F(DstMem | SrcReg | ModRM | BitOp | Lock | PageTable, em_btc), 4776 I(DstReg | SrcMem | ModRM, em_bsf_c), 4777 I(DstReg | SrcMem | ModRM, em_bsr_c), 4778 D(DstReg | SrcMem8 | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov), 4779 /* 0xC0 - 0xC7 */ 4780 F2bv(DstMem | SrcReg | ModRM | SrcWrite | Lock, em_xadd), 4781 N, ID(0, &instr_dual_0f_c3), 4782 N, N, N, GD(0, &group9), 4783 /* 0xC8 - 0xCF */ 4784 X8(I(DstReg, em_bswap)), 4785 /* 0xD0 - 0xDF */ 4786 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, 4787 /* 0xE0 - 0xEF */ 4788 N, N, N, N, N, N, N, GP(SrcReg | DstMem | ModRM | Mov, &pfx_0f_e7), 4789 N, N, N, N, N, N, N, N, 4790 /* 0xF0 - 0xFF */ 4791 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N 4792 }; 4793 4794 static const struct instr_dual instr_dual_0f_38_f0 = { 4795 I(DstReg | SrcMem | Mov, em_movbe), N 4796 }; 4797 4798 static const struct instr_dual instr_dual_0f_38_f1 = { 4799 I(DstMem | SrcReg | Mov, em_movbe), N 4800 }; 4801 4802 static const struct gprefix three_byte_0f_38_f0 = { 4803 ID(0, &instr_dual_0f_38_f0), N, N, N 4804 }; 4805 4806 static const struct gprefix three_byte_0f_38_f1 = { 4807 ID(0, &instr_dual_0f_38_f1), N, N, N 4808 }; 4809 4810 /* 4811 * Insns below are selected by the prefix which indexed by the third opcode 4812 * byte. 4813 */ 4814 static const struct opcode opcode_map_0f_38[256] = { 4815 /* 0x00 - 0x7f */ 4816 X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), 4817 /* 0x80 - 0xef */ 4818 X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), 4819 /* 0xf0 - 0xf1 */ 4820 GP(EmulateOnUD | ModRM, &three_byte_0f_38_f0), 4821 GP(EmulateOnUD | ModRM, &three_byte_0f_38_f1), 4822 /* 0xf2 - 0xff */ 4823 N, N, X4(N), X8(N) 4824 }; 4825 4826 #undef D 4827 #undef N 4828 #undef G 4829 #undef GD 4830 #undef I 4831 #undef GP 4832 #undef EXT 4833 #undef MD 4834 #undef ID 4835 4836 #undef D2bv 4837 #undef D2bvIP 4838 #undef I2bv 4839 #undef I2bvIP 4840 #undef I6ALU 4841 4842 static unsigned imm_size(struct x86_emulate_ctxt *ctxt) 4843 { 4844 unsigned size; 4845 4846 size = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes; 4847 if (size == 8) 4848 size = 4; 4849 return size; 4850 } 4851 4852 static int decode_imm(struct x86_emulate_ctxt *ctxt, struct operand *op, 4853 unsigned size, bool sign_extension) 4854 { 4855 int rc = X86EMUL_CONTINUE; 4856 4857 op->type = OP_IMM; 4858 op->bytes = size; 4859 op->addr.mem.ea = ctxt->_eip; 4860 /* NB. Immediates are sign-extended as necessary. */ 4861 switch (op->bytes) { 4862 case 1: 4863 op->val = insn_fetch(s8, ctxt); 4864 break; 4865 case 2: 4866 op->val = insn_fetch(s16, ctxt); 4867 break; 4868 case 4: 4869 op->val = insn_fetch(s32, ctxt); 4870 break; 4871 case 8: 4872 op->val = insn_fetch(s64, ctxt); 4873 break; 4874 } 4875 if (!sign_extension) { 4876 switch (op->bytes) { 4877 case 1: 4878 op->val &= 0xff; 4879 break; 4880 case 2: 4881 op->val &= 0xffff; 4882 break; 4883 case 4: 4884 op->val &= 0xffffffff; 4885 break; 4886 } 4887 } 4888 done: 4889 return rc; 4890 } 4891 4892 static int decode_operand(struct x86_emulate_ctxt *ctxt, struct operand *op, 4893 unsigned d) 4894 { 4895 int rc = X86EMUL_CONTINUE; 4896 4897 switch (d) { 4898 case OpReg: 4899 decode_register_operand(ctxt, op); 4900 break; 4901 case OpImmUByte: 4902 rc = decode_imm(ctxt, op, 1, false); 4903 break; 4904 case OpMem: 4905 ctxt->memop.bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes; 4906 mem_common: 4907 *op = ctxt->memop; 4908 ctxt->memopp = op; 4909 if (ctxt->d & BitOp) 4910 fetch_bit_operand(ctxt); 4911 op->orig_val = op->val; 4912 break; 4913 case OpMem64: 4914 ctxt->memop.bytes = (ctxt->op_bytes == 8) ? 16 : 8; 4915 goto mem_common; 4916 case OpAcc: 4917 op->type = OP_REG; 4918 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes; 4919 op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX); 4920 fetch_register_operand(op); 4921 op->orig_val = op->val; 4922 break; 4923 case OpAccLo: 4924 op->type = OP_REG; 4925 op->bytes = (ctxt->d & ByteOp) ? 2 : ctxt->op_bytes; 4926 op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX); 4927 fetch_register_operand(op); 4928 op->orig_val = op->val; 4929 break; 4930 case OpAccHi: 4931 if (ctxt->d & ByteOp) { 4932 op->type = OP_NONE; 4933 break; 4934 } 4935 op->type = OP_REG; 4936 op->bytes = ctxt->op_bytes; 4937 op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX); 4938 fetch_register_operand(op); 4939 op->orig_val = op->val; 4940 break; 4941 case OpDI: 4942 op->type = OP_MEM; 4943 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes; 4944 op->addr.mem.ea = 4945 register_address(ctxt, VCPU_REGS_RDI); 4946 op->addr.mem.seg = VCPU_SREG_ES; 4947 op->val = 0; 4948 op->count = 1; 4949 break; 4950 case OpDX: 4951 op->type = OP_REG; 4952 op->bytes = 2; 4953 op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX); 4954 fetch_register_operand(op); 4955 break; 4956 case OpCL: 4957 op->type = OP_IMM; 4958 op->bytes = 1; 4959 op->val = reg_read(ctxt, VCPU_REGS_RCX) & 0xff; 4960 break; 4961 case OpImmByte: 4962 rc = decode_imm(ctxt, op, 1, true); 4963 break; 4964 case OpOne: 4965 op->type = OP_IMM; 4966 op->bytes = 1; 4967 op->val = 1; 4968 break; 4969 case OpImm: 4970 rc = decode_imm(ctxt, op, imm_size(ctxt), true); 4971 break; 4972 case OpImm64: 4973 rc = decode_imm(ctxt, op, ctxt->op_bytes, true); 4974 break; 4975 case OpMem8: 4976 ctxt->memop.bytes = 1; 4977 if (ctxt->memop.type == OP_REG) { 4978 ctxt->memop.addr.reg = decode_register(ctxt, 4979 ctxt->modrm_rm, true); 4980 fetch_register_operand(&ctxt->memop); 4981 } 4982 goto mem_common; 4983 case OpMem16: 4984 ctxt->memop.bytes = 2; 4985 goto mem_common; 4986 case OpMem32: 4987 ctxt->memop.bytes = 4; 4988 goto mem_common; 4989 case OpImmU16: 4990 rc = decode_imm(ctxt, op, 2, false); 4991 break; 4992 case OpImmU: 4993 rc = decode_imm(ctxt, op, imm_size(ctxt), false); 4994 break; 4995 case OpSI: 4996 op->type = OP_MEM; 4997 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes; 4998 op->addr.mem.ea = 4999 register_address(ctxt, VCPU_REGS_RSI); 5000 op->addr.mem.seg = ctxt->seg_override; 5001 op->val = 0; 5002 op->count = 1; 5003 break; 5004 case OpXLat: 5005 op->type = OP_MEM; 5006 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes; 5007 op->addr.mem.ea = 5008 address_mask(ctxt, 5009 reg_read(ctxt, VCPU_REGS_RBX) + 5010 (reg_read(ctxt, VCPU_REGS_RAX) & 0xff)); 5011 op->addr.mem.seg = ctxt->seg_override; 5012 op->val = 0; 5013 break; 5014 case OpImmFAddr: 5015 op->type = OP_IMM; 5016 op->addr.mem.ea = ctxt->_eip; 5017 op->bytes = ctxt->op_bytes + 2; 5018 insn_fetch_arr(op->valptr, op->bytes, ctxt); 5019 break; 5020 case OpMemFAddr: 5021 ctxt->memop.bytes = ctxt->op_bytes + 2; 5022 goto mem_common; 5023 case OpES: 5024 op->type = OP_IMM; 5025 op->val = VCPU_SREG_ES; 5026 break; 5027 case OpCS: 5028 op->type = OP_IMM; 5029 op->val = VCPU_SREG_CS; 5030 break; 5031 case OpSS: 5032 op->type = OP_IMM; 5033 op->val = VCPU_SREG_SS; 5034 break; 5035 case OpDS: 5036 op->type = OP_IMM; 5037 op->val = VCPU_SREG_DS; 5038 break; 5039 case OpFS: 5040 op->type = OP_IMM; 5041 op->val = VCPU_SREG_FS; 5042 break; 5043 case OpGS: 5044 op->type = OP_IMM; 5045 op->val = VCPU_SREG_GS; 5046 break; 5047 case OpImplicit: 5048 /* Special instructions do their own operand decoding. */ 5049 default: 5050 op->type = OP_NONE; /* Disable writeback. */ 5051 break; 5052 } 5053 5054 done: 5055 return rc; 5056 } 5057 5058 int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len, int emulation_type) 5059 { 5060 int rc = X86EMUL_CONTINUE; 5061 int mode = ctxt->mode; 5062 int def_op_bytes, def_ad_bytes, goffset, simd_prefix; 5063 bool op_prefix = false; 5064 bool has_seg_override = false; 5065 struct opcode opcode; 5066 u16 dummy; 5067 struct desc_struct desc; 5068 5069 ctxt->memop.type = OP_NONE; 5070 ctxt->memopp = NULL; 5071 ctxt->_eip = ctxt->eip; 5072 ctxt->fetch.ptr = ctxt->fetch.data; 5073 ctxt->fetch.end = ctxt->fetch.data + insn_len; 5074 ctxt->opcode_len = 1; 5075 ctxt->intercept = x86_intercept_none; 5076 if (insn_len > 0) 5077 memcpy(ctxt->fetch.data, insn, insn_len); 5078 else { 5079 rc = __do_insn_fetch_bytes(ctxt, 1); 5080 if (rc != X86EMUL_CONTINUE) 5081 goto done; 5082 } 5083 5084 switch (mode) { 5085 case X86EMUL_MODE_REAL: 5086 case X86EMUL_MODE_VM86: 5087 def_op_bytes = def_ad_bytes = 2; 5088 ctxt->ops->get_segment(ctxt, &dummy, &desc, NULL, VCPU_SREG_CS); 5089 if (desc.d) 5090 def_op_bytes = def_ad_bytes = 4; 5091 break; 5092 case X86EMUL_MODE_PROT16: 5093 def_op_bytes = def_ad_bytes = 2; 5094 break; 5095 case X86EMUL_MODE_PROT32: 5096 def_op_bytes = def_ad_bytes = 4; 5097 break; 5098 #ifdef CONFIG_X86_64 5099 case X86EMUL_MODE_PROT64: 5100 def_op_bytes = 4; 5101 def_ad_bytes = 8; 5102 break; 5103 #endif 5104 default: 5105 return EMULATION_FAILED; 5106 } 5107 5108 ctxt->op_bytes = def_op_bytes; 5109 ctxt->ad_bytes = def_ad_bytes; 5110 5111 /* Legacy prefixes. */ 5112 for (;;) { 5113 switch (ctxt->b = insn_fetch(u8, ctxt)) { 5114 case 0x66: /* operand-size override */ 5115 op_prefix = true; 5116 /* switch between 2/4 bytes */ 5117 ctxt->op_bytes = def_op_bytes ^ 6; 5118 break; 5119 case 0x67: /* address-size override */ 5120 if (mode == X86EMUL_MODE_PROT64) 5121 /* switch between 4/8 bytes */ 5122 ctxt->ad_bytes = def_ad_bytes ^ 12; 5123 else 5124 /* switch between 2/4 bytes */ 5125 ctxt->ad_bytes = def_ad_bytes ^ 6; 5126 break; 5127 case 0x26: /* ES override */ 5128 has_seg_override = true; 5129 ctxt->seg_override = VCPU_SREG_ES; 5130 break; 5131 case 0x2e: /* CS override */ 5132 has_seg_override = true; 5133 ctxt->seg_override = VCPU_SREG_CS; 5134 break; 5135 case 0x36: /* SS override */ 5136 has_seg_override = true; 5137 ctxt->seg_override = VCPU_SREG_SS; 5138 break; 5139 case 0x3e: /* DS override */ 5140 has_seg_override = true; 5141 ctxt->seg_override = VCPU_SREG_DS; 5142 break; 5143 case 0x64: /* FS override */ 5144 has_seg_override = true; 5145 ctxt->seg_override = VCPU_SREG_FS; 5146 break; 5147 case 0x65: /* GS override */ 5148 has_seg_override = true; 5149 ctxt->seg_override = VCPU_SREG_GS; 5150 break; 5151 case 0x40 ... 0x4f: /* REX */ 5152 if (mode != X86EMUL_MODE_PROT64) 5153 goto done_prefixes; 5154 ctxt->rex_prefix = ctxt->b; 5155 continue; 5156 case 0xf0: /* LOCK */ 5157 ctxt->lock_prefix = 1; 5158 break; 5159 case 0xf2: /* REPNE/REPNZ */ 5160 case 0xf3: /* REP/REPE/REPZ */ 5161 ctxt->rep_prefix = ctxt->b; 5162 break; 5163 default: 5164 goto done_prefixes; 5165 } 5166 5167 /* Any legacy prefix after a REX prefix nullifies its effect. */ 5168 5169 ctxt->rex_prefix = 0; 5170 } 5171 5172 done_prefixes: 5173 5174 /* REX prefix. */ 5175 if (ctxt->rex_prefix & 8) 5176 ctxt->op_bytes = 8; /* REX.W */ 5177 5178 /* Opcode byte(s). */ 5179 opcode = opcode_table[ctxt->b]; 5180 /* Two-byte opcode? */ 5181 if (ctxt->b == 0x0f) { 5182 ctxt->opcode_len = 2; 5183 ctxt->b = insn_fetch(u8, ctxt); 5184 opcode = twobyte_table[ctxt->b]; 5185 5186 /* 0F_38 opcode map */ 5187 if (ctxt->b == 0x38) { 5188 ctxt->opcode_len = 3; 5189 ctxt->b = insn_fetch(u8, ctxt); 5190 opcode = opcode_map_0f_38[ctxt->b]; 5191 } 5192 } 5193 ctxt->d = opcode.flags; 5194 5195 if (ctxt->d & ModRM) 5196 ctxt->modrm = insn_fetch(u8, ctxt); 5197 5198 /* vex-prefix instructions are not implemented */ 5199 if (ctxt->opcode_len == 1 && (ctxt->b == 0xc5 || ctxt->b == 0xc4) && 5200 (mode == X86EMUL_MODE_PROT64 || (ctxt->modrm & 0xc0) == 0xc0)) { 5201 ctxt->d = NotImpl; 5202 } 5203 5204 while (ctxt->d & GroupMask) { 5205 switch (ctxt->d & GroupMask) { 5206 case Group: 5207 goffset = (ctxt->modrm >> 3) & 7; 5208 opcode = opcode.u.group[goffset]; 5209 break; 5210 case GroupDual: 5211 goffset = (ctxt->modrm >> 3) & 7; 5212 if ((ctxt->modrm >> 6) == 3) 5213 opcode = opcode.u.gdual->mod3[goffset]; 5214 else 5215 opcode = opcode.u.gdual->mod012[goffset]; 5216 break; 5217 case RMExt: 5218 goffset = ctxt->modrm & 7; 5219 opcode = opcode.u.group[goffset]; 5220 break; 5221 case Prefix: 5222 if (ctxt->rep_prefix && op_prefix) 5223 return EMULATION_FAILED; 5224 simd_prefix = op_prefix ? 0x66 : ctxt->rep_prefix; 5225 switch (simd_prefix) { 5226 case 0x00: opcode = opcode.u.gprefix->pfx_no; break; 5227 case 0x66: opcode = opcode.u.gprefix->pfx_66; break; 5228 case 0xf2: opcode = opcode.u.gprefix->pfx_f2; break; 5229 case 0xf3: opcode = opcode.u.gprefix->pfx_f3; break; 5230 } 5231 break; 5232 case Escape: 5233 if (ctxt->modrm > 0xbf) { 5234 size_t size = ARRAY_SIZE(opcode.u.esc->high); 5235 u32 index = array_index_nospec( 5236 ctxt->modrm - 0xc0, size); 5237 5238 opcode = opcode.u.esc->high[index]; 5239 } else { 5240 opcode = opcode.u.esc->op[(ctxt->modrm >> 3) & 7]; 5241 } 5242 break; 5243 case InstrDual: 5244 if ((ctxt->modrm >> 6) == 3) 5245 opcode = opcode.u.idual->mod3; 5246 else 5247 opcode = opcode.u.idual->mod012; 5248 break; 5249 case ModeDual: 5250 if (ctxt->mode == X86EMUL_MODE_PROT64) 5251 opcode = opcode.u.mdual->mode64; 5252 else 5253 opcode = opcode.u.mdual->mode32; 5254 break; 5255 default: 5256 return EMULATION_FAILED; 5257 } 5258 5259 ctxt->d &= ~(u64)GroupMask; 5260 ctxt->d |= opcode.flags; 5261 } 5262 5263 ctxt->is_branch = opcode.flags & IsBranch; 5264 5265 /* Unrecognised? */ 5266 if (ctxt->d == 0) 5267 return EMULATION_FAILED; 5268 5269 ctxt->execute = opcode.u.execute; 5270 5271 if (unlikely(emulation_type & EMULTYPE_TRAP_UD) && 5272 likely(!(ctxt->d & EmulateOnUD))) 5273 return EMULATION_FAILED; 5274 5275 if (unlikely(ctxt->d & 5276 (NotImpl|Stack|Op3264|Sse|Mmx|Intercept|CheckPerm|NearBranch| 5277 No16))) { 5278 /* 5279 * These are copied unconditionally here, and checked unconditionally 5280 * in x86_emulate_insn. 5281 */ 5282 ctxt->check_perm = opcode.check_perm; 5283 ctxt->intercept = opcode.intercept; 5284 5285 if (ctxt->d & NotImpl) 5286 return EMULATION_FAILED; 5287 5288 if (mode == X86EMUL_MODE_PROT64) { 5289 if (ctxt->op_bytes == 4 && (ctxt->d & Stack)) 5290 ctxt->op_bytes = 8; 5291 else if (ctxt->d & NearBranch) 5292 ctxt->op_bytes = 8; 5293 } 5294 5295 if (ctxt->d & Op3264) { 5296 if (mode == X86EMUL_MODE_PROT64) 5297 ctxt->op_bytes = 8; 5298 else 5299 ctxt->op_bytes = 4; 5300 } 5301 5302 if ((ctxt->d & No16) && ctxt->op_bytes == 2) 5303 ctxt->op_bytes = 4; 5304 5305 if (ctxt->d & Sse) 5306 ctxt->op_bytes = 16; 5307 else if (ctxt->d & Mmx) 5308 ctxt->op_bytes = 8; 5309 } 5310 5311 /* ModRM and SIB bytes. */ 5312 if (ctxt->d & ModRM) { 5313 rc = decode_modrm(ctxt, &ctxt->memop); 5314 if (!has_seg_override) { 5315 has_seg_override = true; 5316 ctxt->seg_override = ctxt->modrm_seg; 5317 } 5318 } else if (ctxt->d & MemAbs) 5319 rc = decode_abs(ctxt, &ctxt->memop); 5320 if (rc != X86EMUL_CONTINUE) 5321 goto done; 5322 5323 if (!has_seg_override) 5324 ctxt->seg_override = VCPU_SREG_DS; 5325 5326 ctxt->memop.addr.mem.seg = ctxt->seg_override; 5327 5328 /* 5329 * Decode and fetch the source operand: register, memory 5330 * or immediate. 5331 */ 5332 rc = decode_operand(ctxt, &ctxt->src, (ctxt->d >> SrcShift) & OpMask); 5333 if (rc != X86EMUL_CONTINUE) 5334 goto done; 5335 5336 /* 5337 * Decode and fetch the second source operand: register, memory 5338 * or immediate. 5339 */ 5340 rc = decode_operand(ctxt, &ctxt->src2, (ctxt->d >> Src2Shift) & OpMask); 5341 if (rc != X86EMUL_CONTINUE) 5342 goto done; 5343 5344 /* Decode and fetch the destination operand: register or memory. */ 5345 rc = decode_operand(ctxt, &ctxt->dst, (ctxt->d >> DstShift) & OpMask); 5346 5347 if (ctxt->rip_relative && likely(ctxt->memopp)) 5348 ctxt->memopp->addr.mem.ea = address_mask(ctxt, 5349 ctxt->memopp->addr.mem.ea + ctxt->_eip); 5350 5351 done: 5352 if (rc == X86EMUL_PROPAGATE_FAULT) 5353 ctxt->have_exception = true; 5354 return (rc != X86EMUL_CONTINUE) ? EMULATION_FAILED : EMULATION_OK; 5355 } 5356 5357 bool x86_page_table_writing_insn(struct x86_emulate_ctxt *ctxt) 5358 { 5359 return ctxt->d & PageTable; 5360 } 5361 5362 static bool string_insn_completed(struct x86_emulate_ctxt *ctxt) 5363 { 5364 /* The second termination condition only applies for REPE 5365 * and REPNE. Test if the repeat string operation prefix is 5366 * REPE/REPZ or REPNE/REPNZ and if it's the case it tests the 5367 * corresponding termination condition according to: 5368 * - if REPE/REPZ and ZF = 0 then done 5369 * - if REPNE/REPNZ and ZF = 1 then done 5370 */ 5371 if (((ctxt->b == 0xa6) || (ctxt->b == 0xa7) || 5372 (ctxt->b == 0xae) || (ctxt->b == 0xaf)) 5373 && (((ctxt->rep_prefix == REPE_PREFIX) && 5374 ((ctxt->eflags & X86_EFLAGS_ZF) == 0)) 5375 || ((ctxt->rep_prefix == REPNE_PREFIX) && 5376 ((ctxt->eflags & X86_EFLAGS_ZF) == X86_EFLAGS_ZF)))) 5377 return true; 5378 5379 return false; 5380 } 5381 5382 static int flush_pending_x87_faults(struct x86_emulate_ctxt *ctxt) 5383 { 5384 int rc; 5385 5386 kvm_fpu_get(); 5387 rc = asm_safe("fwait"); 5388 kvm_fpu_put(); 5389 5390 if (unlikely(rc != X86EMUL_CONTINUE)) 5391 return emulate_exception(ctxt, MF_VECTOR, 0, false); 5392 5393 return X86EMUL_CONTINUE; 5394 } 5395 5396 static void fetch_possible_mmx_operand(struct operand *op) 5397 { 5398 if (op->type == OP_MM) 5399 kvm_read_mmx_reg(op->addr.mm, &op->mm_val); 5400 } 5401 5402 static int fastop(struct x86_emulate_ctxt *ctxt, fastop_t fop) 5403 { 5404 ulong flags = (ctxt->eflags & EFLAGS_MASK) | X86_EFLAGS_IF; 5405 5406 if (!(ctxt->d & ByteOp)) 5407 fop += __ffs(ctxt->dst.bytes) * FASTOP_SIZE; 5408 5409 asm("push %[flags]; popf; " CALL_NOSPEC " ; pushf; pop %[flags]\n" 5410 : "+a"(ctxt->dst.val), "+d"(ctxt->src.val), [flags]"+D"(flags), 5411 [thunk_target]"+S"(fop), ASM_CALL_CONSTRAINT 5412 : "c"(ctxt->src2.val)); 5413 5414 ctxt->eflags = (ctxt->eflags & ~EFLAGS_MASK) | (flags & EFLAGS_MASK); 5415 if (!fop) /* exception is returned in fop variable */ 5416 return emulate_de(ctxt); 5417 return X86EMUL_CONTINUE; 5418 } 5419 5420 void init_decode_cache(struct x86_emulate_ctxt *ctxt) 5421 { 5422 /* Clear fields that are set conditionally but read without a guard. */ 5423 ctxt->rip_relative = false; 5424 ctxt->rex_prefix = 0; 5425 ctxt->lock_prefix = 0; 5426 ctxt->rep_prefix = 0; 5427 ctxt->regs_valid = 0; 5428 ctxt->regs_dirty = 0; 5429 5430 ctxt->io_read.pos = 0; 5431 ctxt->io_read.end = 0; 5432 ctxt->mem_read.end = 0; 5433 } 5434 5435 int x86_emulate_insn(struct x86_emulate_ctxt *ctxt) 5436 { 5437 const struct x86_emulate_ops *ops = ctxt->ops; 5438 int rc = X86EMUL_CONTINUE; 5439 int saved_dst_type = ctxt->dst.type; 5440 unsigned emul_flags; 5441 5442 ctxt->mem_read.pos = 0; 5443 5444 /* LOCK prefix is allowed only with some instructions */ 5445 if (ctxt->lock_prefix && (!(ctxt->d & Lock) || ctxt->dst.type != OP_MEM)) { 5446 rc = emulate_ud(ctxt); 5447 goto done; 5448 } 5449 5450 if ((ctxt->d & SrcMask) == SrcMemFAddr && ctxt->src.type != OP_MEM) { 5451 rc = emulate_ud(ctxt); 5452 goto done; 5453 } 5454 5455 emul_flags = ctxt->ops->get_hflags(ctxt); 5456 if (unlikely(ctxt->d & 5457 (No64|Undefined|Sse|Mmx|Intercept|CheckPerm|Priv|Prot|String))) { 5458 if ((ctxt->mode == X86EMUL_MODE_PROT64 && (ctxt->d & No64)) || 5459 (ctxt->d & Undefined)) { 5460 rc = emulate_ud(ctxt); 5461 goto done; 5462 } 5463 5464 if (((ctxt->d & (Sse|Mmx)) && ((ops->get_cr(ctxt, 0) & X86_CR0_EM))) 5465 || ((ctxt->d & Sse) && !(ops->get_cr(ctxt, 4) & X86_CR4_OSFXSR))) { 5466 rc = emulate_ud(ctxt); 5467 goto done; 5468 } 5469 5470 if ((ctxt->d & (Sse|Mmx)) && (ops->get_cr(ctxt, 0) & X86_CR0_TS)) { 5471 rc = emulate_nm(ctxt); 5472 goto done; 5473 } 5474 5475 if (ctxt->d & Mmx) { 5476 rc = flush_pending_x87_faults(ctxt); 5477 if (rc != X86EMUL_CONTINUE) 5478 goto done; 5479 /* 5480 * Now that we know the fpu is exception safe, we can fetch 5481 * operands from it. 5482 */ 5483 fetch_possible_mmx_operand(&ctxt->src); 5484 fetch_possible_mmx_operand(&ctxt->src2); 5485 if (!(ctxt->d & Mov)) 5486 fetch_possible_mmx_operand(&ctxt->dst); 5487 } 5488 5489 if (unlikely(emul_flags & X86EMUL_GUEST_MASK) && ctxt->intercept) { 5490 rc = emulator_check_intercept(ctxt, ctxt->intercept, 5491 X86_ICPT_PRE_EXCEPT); 5492 if (rc != X86EMUL_CONTINUE) 5493 goto done; 5494 } 5495 5496 /* Instruction can only be executed in protected mode */ 5497 if ((ctxt->d & Prot) && ctxt->mode < X86EMUL_MODE_PROT16) { 5498 rc = emulate_ud(ctxt); 5499 goto done; 5500 } 5501 5502 /* Privileged instruction can be executed only in CPL=0 */ 5503 if ((ctxt->d & Priv) && ops->cpl(ctxt)) { 5504 if (ctxt->d & PrivUD) 5505 rc = emulate_ud(ctxt); 5506 else 5507 rc = emulate_gp(ctxt, 0); 5508 goto done; 5509 } 5510 5511 /* Do instruction specific permission checks */ 5512 if (ctxt->d & CheckPerm) { 5513 rc = ctxt->check_perm(ctxt); 5514 if (rc != X86EMUL_CONTINUE) 5515 goto done; 5516 } 5517 5518 if (unlikely(emul_flags & X86EMUL_GUEST_MASK) && (ctxt->d & Intercept)) { 5519 rc = emulator_check_intercept(ctxt, ctxt->intercept, 5520 X86_ICPT_POST_EXCEPT); 5521 if (rc != X86EMUL_CONTINUE) 5522 goto done; 5523 } 5524 5525 if (ctxt->rep_prefix && (ctxt->d & String)) { 5526 /* All REP prefixes have the same first termination condition */ 5527 if (address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) == 0) { 5528 string_registers_quirk(ctxt); 5529 ctxt->eip = ctxt->_eip; 5530 ctxt->eflags &= ~X86_EFLAGS_RF; 5531 goto done; 5532 } 5533 } 5534 } 5535 5536 if ((ctxt->src.type == OP_MEM) && !(ctxt->d & NoAccess)) { 5537 rc = segmented_read(ctxt, ctxt->src.addr.mem, 5538 ctxt->src.valptr, ctxt->src.bytes); 5539 if (rc != X86EMUL_CONTINUE) 5540 goto done; 5541 ctxt->src.orig_val64 = ctxt->src.val64; 5542 } 5543 5544 if (ctxt->src2.type == OP_MEM) { 5545 rc = segmented_read(ctxt, ctxt->src2.addr.mem, 5546 &ctxt->src2.val, ctxt->src2.bytes); 5547 if (rc != X86EMUL_CONTINUE) 5548 goto done; 5549 } 5550 5551 if ((ctxt->d & DstMask) == ImplicitOps) 5552 goto special_insn; 5553 5554 5555 if ((ctxt->dst.type == OP_MEM) && !(ctxt->d & Mov)) { 5556 /* optimisation - avoid slow emulated read if Mov */ 5557 rc = segmented_read(ctxt, ctxt->dst.addr.mem, 5558 &ctxt->dst.val, ctxt->dst.bytes); 5559 if (rc != X86EMUL_CONTINUE) { 5560 if (!(ctxt->d & NoWrite) && 5561 rc == X86EMUL_PROPAGATE_FAULT && 5562 ctxt->exception.vector == PF_VECTOR) 5563 ctxt->exception.error_code |= PFERR_WRITE_MASK; 5564 goto done; 5565 } 5566 } 5567 /* Copy full 64-bit value for CMPXCHG8B. */ 5568 ctxt->dst.orig_val64 = ctxt->dst.val64; 5569 5570 special_insn: 5571 5572 if (unlikely(emul_flags & X86EMUL_GUEST_MASK) && (ctxt->d & Intercept)) { 5573 rc = emulator_check_intercept(ctxt, ctxt->intercept, 5574 X86_ICPT_POST_MEMACCESS); 5575 if (rc != X86EMUL_CONTINUE) 5576 goto done; 5577 } 5578 5579 if (ctxt->rep_prefix && (ctxt->d & String)) 5580 ctxt->eflags |= X86_EFLAGS_RF; 5581 else 5582 ctxt->eflags &= ~X86_EFLAGS_RF; 5583 5584 if (ctxt->execute) { 5585 if (ctxt->d & Fastop) 5586 rc = fastop(ctxt, ctxt->fop); 5587 else 5588 rc = ctxt->execute(ctxt); 5589 if (rc != X86EMUL_CONTINUE) 5590 goto done; 5591 goto writeback; 5592 } 5593 5594 if (ctxt->opcode_len == 2) 5595 goto twobyte_insn; 5596 else if (ctxt->opcode_len == 3) 5597 goto threebyte_insn; 5598 5599 switch (ctxt->b) { 5600 case 0x70 ... 0x7f: /* jcc (short) */ 5601 if (test_cc(ctxt->b, ctxt->eflags)) 5602 rc = jmp_rel(ctxt, ctxt->src.val); 5603 break; 5604 case 0x8d: /* lea r16/r32, m */ 5605 ctxt->dst.val = ctxt->src.addr.mem.ea; 5606 break; 5607 case 0x90 ... 0x97: /* nop / xchg reg, rax */ 5608 if (ctxt->dst.addr.reg == reg_rmw(ctxt, VCPU_REGS_RAX)) 5609 ctxt->dst.type = OP_NONE; 5610 else 5611 rc = em_xchg(ctxt); 5612 break; 5613 case 0x98: /* cbw/cwde/cdqe */ 5614 switch (ctxt->op_bytes) { 5615 case 2: ctxt->dst.val = (s8)ctxt->dst.val; break; 5616 case 4: ctxt->dst.val = (s16)ctxt->dst.val; break; 5617 case 8: ctxt->dst.val = (s32)ctxt->dst.val; break; 5618 } 5619 break; 5620 case 0xcc: /* int3 */ 5621 rc = emulate_int(ctxt, 3); 5622 break; 5623 case 0xcd: /* int n */ 5624 rc = emulate_int(ctxt, ctxt->src.val); 5625 break; 5626 case 0xce: /* into */ 5627 if (ctxt->eflags & X86_EFLAGS_OF) 5628 rc = emulate_int(ctxt, 4); 5629 break; 5630 case 0xe9: /* jmp rel */ 5631 case 0xeb: /* jmp rel short */ 5632 rc = jmp_rel(ctxt, ctxt->src.val); 5633 ctxt->dst.type = OP_NONE; /* Disable writeback. */ 5634 break; 5635 case 0xf4: /* hlt */ 5636 ctxt->ops->halt(ctxt); 5637 break; 5638 case 0xf5: /* cmc */ 5639 /* complement carry flag from eflags reg */ 5640 ctxt->eflags ^= X86_EFLAGS_CF; 5641 break; 5642 case 0xf8: /* clc */ 5643 ctxt->eflags &= ~X86_EFLAGS_CF; 5644 break; 5645 case 0xf9: /* stc */ 5646 ctxt->eflags |= X86_EFLAGS_CF; 5647 break; 5648 case 0xfc: /* cld */ 5649 ctxt->eflags &= ~X86_EFLAGS_DF; 5650 break; 5651 case 0xfd: /* std */ 5652 ctxt->eflags |= X86_EFLAGS_DF; 5653 break; 5654 default: 5655 goto cannot_emulate; 5656 } 5657 5658 if (rc != X86EMUL_CONTINUE) 5659 goto done; 5660 5661 writeback: 5662 if (ctxt->d & SrcWrite) { 5663 BUG_ON(ctxt->src.type == OP_MEM || ctxt->src.type == OP_MEM_STR); 5664 rc = writeback(ctxt, &ctxt->src); 5665 if (rc != X86EMUL_CONTINUE) 5666 goto done; 5667 } 5668 if (!(ctxt->d & NoWrite)) { 5669 rc = writeback(ctxt, &ctxt->dst); 5670 if (rc != X86EMUL_CONTINUE) 5671 goto done; 5672 } 5673 5674 /* 5675 * restore dst type in case the decoding will be reused 5676 * (happens for string instruction ) 5677 */ 5678 ctxt->dst.type = saved_dst_type; 5679 5680 if ((ctxt->d & SrcMask) == SrcSI) 5681 string_addr_inc(ctxt, VCPU_REGS_RSI, &ctxt->src); 5682 5683 if ((ctxt->d & DstMask) == DstDI) 5684 string_addr_inc(ctxt, VCPU_REGS_RDI, &ctxt->dst); 5685 5686 if (ctxt->rep_prefix && (ctxt->d & String)) { 5687 unsigned int count; 5688 struct read_cache *r = &ctxt->io_read; 5689 if ((ctxt->d & SrcMask) == SrcSI) 5690 count = ctxt->src.count; 5691 else 5692 count = ctxt->dst.count; 5693 register_address_increment(ctxt, VCPU_REGS_RCX, -count); 5694 5695 if (!string_insn_completed(ctxt)) { 5696 /* 5697 * Re-enter guest when pio read ahead buffer is empty 5698 * or, if it is not used, after each 1024 iteration. 5699 */ 5700 if ((r->end != 0 || reg_read(ctxt, VCPU_REGS_RCX) & 0x3ff) && 5701 (r->end == 0 || r->end != r->pos)) { 5702 /* 5703 * Reset read cache. Usually happens before 5704 * decode, but since instruction is restarted 5705 * we have to do it here. 5706 */ 5707 ctxt->mem_read.end = 0; 5708 writeback_registers(ctxt); 5709 return EMULATION_RESTART; 5710 } 5711 goto done; /* skip rip writeback */ 5712 } 5713 ctxt->eflags &= ~X86_EFLAGS_RF; 5714 } 5715 5716 ctxt->eip = ctxt->_eip; 5717 if (ctxt->mode != X86EMUL_MODE_PROT64) 5718 ctxt->eip = (u32)ctxt->_eip; 5719 5720 done: 5721 if (rc == X86EMUL_PROPAGATE_FAULT) { 5722 WARN_ON(ctxt->exception.vector > 0x1f); 5723 ctxt->have_exception = true; 5724 } 5725 if (rc == X86EMUL_INTERCEPTED) 5726 return EMULATION_INTERCEPTED; 5727 5728 if (rc == X86EMUL_CONTINUE) 5729 writeback_registers(ctxt); 5730 5731 return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK; 5732 5733 twobyte_insn: 5734 switch (ctxt->b) { 5735 case 0x09: /* wbinvd */ 5736 (ctxt->ops->wbinvd)(ctxt); 5737 break; 5738 case 0x08: /* invd */ 5739 case 0x0d: /* GrpP (prefetch) */ 5740 case 0x18: /* Grp16 (prefetch/nop) */ 5741 case 0x1f: /* nop */ 5742 break; 5743 case 0x20: /* mov cr, reg */ 5744 ctxt->dst.val = ops->get_cr(ctxt, ctxt->modrm_reg); 5745 break; 5746 case 0x21: /* mov from dr to reg */ 5747 ops->get_dr(ctxt, ctxt->modrm_reg, &ctxt->dst.val); 5748 break; 5749 case 0x40 ... 0x4f: /* cmov */ 5750 if (test_cc(ctxt->b, ctxt->eflags)) 5751 ctxt->dst.val = ctxt->src.val; 5752 else if (ctxt->op_bytes != 4) 5753 ctxt->dst.type = OP_NONE; /* no writeback */ 5754 break; 5755 case 0x80 ... 0x8f: /* jnz rel, etc*/ 5756 if (test_cc(ctxt->b, ctxt->eflags)) 5757 rc = jmp_rel(ctxt, ctxt->src.val); 5758 break; 5759 case 0x90 ... 0x9f: /* setcc r/m8 */ 5760 ctxt->dst.val = test_cc(ctxt->b, ctxt->eflags); 5761 break; 5762 case 0xb6 ... 0xb7: /* movzx */ 5763 ctxt->dst.bytes = ctxt->op_bytes; 5764 ctxt->dst.val = (ctxt->src.bytes == 1) ? (u8) ctxt->src.val 5765 : (u16) ctxt->src.val; 5766 break; 5767 case 0xbe ... 0xbf: /* movsx */ 5768 ctxt->dst.bytes = ctxt->op_bytes; 5769 ctxt->dst.val = (ctxt->src.bytes == 1) ? (s8) ctxt->src.val : 5770 (s16) ctxt->src.val; 5771 break; 5772 default: 5773 goto cannot_emulate; 5774 } 5775 5776 threebyte_insn: 5777 5778 if (rc != X86EMUL_CONTINUE) 5779 goto done; 5780 5781 goto writeback; 5782 5783 cannot_emulate: 5784 return EMULATION_FAILED; 5785 } 5786 5787 void emulator_invalidate_register_cache(struct x86_emulate_ctxt *ctxt) 5788 { 5789 invalidate_registers(ctxt); 5790 } 5791 5792 void emulator_writeback_register_cache(struct x86_emulate_ctxt *ctxt) 5793 { 5794 writeback_registers(ctxt); 5795 } 5796 5797 bool emulator_can_use_gpa(struct x86_emulate_ctxt *ctxt) 5798 { 5799 if (ctxt->rep_prefix && (ctxt->d & String)) 5800 return false; 5801 5802 if (ctxt->d & TwoMemOp) 5803 return false; 5804 5805 return true; 5806 } 5807