xref: /openbmc/linux/arch/x86/kvm/emulate.c (revision 6724ed7f)
1 /******************************************************************************
2  * emulate.c
3  *
4  * Generic x86 (32-bit and 64-bit) instruction decoder and emulator.
5  *
6  * Copyright (c) 2005 Keir Fraser
7  *
8  * Linux coding style, mod r/m decoder, segment base fixes, real-mode
9  * privileged instructions:
10  *
11  * Copyright (C) 2006 Qumranet
12  * Copyright 2010 Red Hat, Inc. and/or its affiliates.
13  *
14  *   Avi Kivity <avi@qumranet.com>
15  *   Yaniv Kamay <yaniv@qumranet.com>
16  *
17  * This work is licensed under the terms of the GNU GPL, version 2.  See
18  * the COPYING file in the top-level directory.
19  *
20  * From: xen-unstable 10676:af9809f51f81a3c43f276f00c81a52ef558afda4
21  */
22 
23 #include <linux/kvm_host.h>
24 #include "kvm_cache_regs.h"
25 #include <asm/kvm_emulate.h>
26 #include <linux/stringify.h>
27 #include <asm/debugreg.h>
28 
29 #include "x86.h"
30 #include "tss.h"
31 #include "mmu.h"
32 
33 /*
34  * Operand types
35  */
36 #define OpNone             0ull
37 #define OpImplicit         1ull  /* No generic decode */
38 #define OpReg              2ull  /* Register */
39 #define OpMem              3ull  /* Memory */
40 #define OpAcc              4ull  /* Accumulator: AL/AX/EAX/RAX */
41 #define OpDI               5ull  /* ES:DI/EDI/RDI */
42 #define OpMem64            6ull  /* Memory, 64-bit */
43 #define OpImmUByte         7ull  /* Zero-extended 8-bit immediate */
44 #define OpDX               8ull  /* DX register */
45 #define OpCL               9ull  /* CL register (for shifts) */
46 #define OpImmByte         10ull  /* 8-bit sign extended immediate */
47 #define OpOne             11ull  /* Implied 1 */
48 #define OpImm             12ull  /* Sign extended up to 32-bit immediate */
49 #define OpMem16           13ull  /* Memory operand (16-bit). */
50 #define OpMem32           14ull  /* Memory operand (32-bit). */
51 #define OpImmU            15ull  /* Immediate operand, zero extended */
52 #define OpSI              16ull  /* SI/ESI/RSI */
53 #define OpImmFAddr        17ull  /* Immediate far address */
54 #define OpMemFAddr        18ull  /* Far address in memory */
55 #define OpImmU16          19ull  /* Immediate operand, 16 bits, zero extended */
56 #define OpES              20ull  /* ES */
57 #define OpCS              21ull  /* CS */
58 #define OpSS              22ull  /* SS */
59 #define OpDS              23ull  /* DS */
60 #define OpFS              24ull  /* FS */
61 #define OpGS              25ull  /* GS */
62 #define OpMem8            26ull  /* 8-bit zero extended memory operand */
63 #define OpImm64           27ull  /* Sign extended 16/32/64-bit immediate */
64 #define OpXLat            28ull  /* memory at BX/EBX/RBX + zero-extended AL */
65 #define OpAccLo           29ull  /* Low part of extended acc (AX/AX/EAX/RAX) */
66 #define OpAccHi           30ull  /* High part of extended acc (-/DX/EDX/RDX) */
67 
68 #define OpBits             5  /* Width of operand field */
69 #define OpMask             ((1ull << OpBits) - 1)
70 
71 /*
72  * Opcode effective-address decode tables.
73  * Note that we only emulate instructions that have at least one memory
74  * operand (excluding implicit stack references). We assume that stack
75  * references and instruction fetches will never occur in special memory
76  * areas that require emulation. So, for example, 'mov <imm>,<reg>' need
77  * not be handled.
78  */
79 
80 /* Operand sizes: 8-bit operands or specified/overridden size. */
81 #define ByteOp      (1<<0)	/* 8-bit operands. */
82 /* Destination operand type. */
83 #define DstShift    1
84 #define ImplicitOps (OpImplicit << DstShift)
85 #define DstReg      (OpReg << DstShift)
86 #define DstMem      (OpMem << DstShift)
87 #define DstAcc      (OpAcc << DstShift)
88 #define DstDI       (OpDI << DstShift)
89 #define DstMem64    (OpMem64 << DstShift)
90 #define DstMem16    (OpMem16 << DstShift)
91 #define DstImmUByte (OpImmUByte << DstShift)
92 #define DstDX       (OpDX << DstShift)
93 #define DstAccLo    (OpAccLo << DstShift)
94 #define DstMask     (OpMask << DstShift)
95 /* Source operand type. */
96 #define SrcShift    6
97 #define SrcNone     (OpNone << SrcShift)
98 #define SrcReg      (OpReg << SrcShift)
99 #define SrcMem      (OpMem << SrcShift)
100 #define SrcMem16    (OpMem16 << SrcShift)
101 #define SrcMem32    (OpMem32 << SrcShift)
102 #define SrcImm      (OpImm << SrcShift)
103 #define SrcImmByte  (OpImmByte << SrcShift)
104 #define SrcOne      (OpOne << SrcShift)
105 #define SrcImmUByte (OpImmUByte << SrcShift)
106 #define SrcImmU     (OpImmU << SrcShift)
107 #define SrcSI       (OpSI << SrcShift)
108 #define SrcXLat     (OpXLat << SrcShift)
109 #define SrcImmFAddr (OpImmFAddr << SrcShift)
110 #define SrcMemFAddr (OpMemFAddr << SrcShift)
111 #define SrcAcc      (OpAcc << SrcShift)
112 #define SrcImmU16   (OpImmU16 << SrcShift)
113 #define SrcImm64    (OpImm64 << SrcShift)
114 #define SrcDX       (OpDX << SrcShift)
115 #define SrcMem8     (OpMem8 << SrcShift)
116 #define SrcAccHi    (OpAccHi << SrcShift)
117 #define SrcMask     (OpMask << SrcShift)
118 #define BitOp       (1<<11)
119 #define MemAbs      (1<<12)      /* Memory operand is absolute displacement */
120 #define String      (1<<13)     /* String instruction (rep capable) */
121 #define Stack       (1<<14)     /* Stack instruction (push/pop) */
122 #define GroupMask   (7<<15)     /* Opcode uses one of the group mechanisms */
123 #define Group       (1<<15)     /* Bits 3:5 of modrm byte extend opcode */
124 #define GroupDual   (2<<15)     /* Alternate decoding of mod == 3 */
125 #define Prefix      (3<<15)     /* Instruction varies with 66/f2/f3 prefix */
126 #define RMExt       (4<<15)     /* Opcode extension in ModRM r/m if mod == 3 */
127 #define Escape      (5<<15)     /* Escape to coprocessor instruction */
128 #define InstrDual   (6<<15)     /* Alternate instruction decoding of mod == 3 */
129 #define ModeDual    (7<<15)     /* Different instruction for 32/64 bit */
130 #define Sse         (1<<18)     /* SSE Vector instruction */
131 /* Generic ModRM decode. */
132 #define ModRM       (1<<19)
133 /* Destination is only written; never read. */
134 #define Mov         (1<<20)
135 /* Misc flags */
136 #define Prot        (1<<21) /* instruction generates #UD if not in prot-mode */
137 #define EmulateOnUD (1<<22) /* Emulate if unsupported by the host */
138 #define NoAccess    (1<<23) /* Don't access memory (lea/invlpg/verr etc) */
139 #define Op3264      (1<<24) /* Operand is 64b in long mode, 32b otherwise */
140 #define Undefined   (1<<25) /* No Such Instruction */
141 #define Lock        (1<<26) /* lock prefix is allowed for the instruction */
142 #define Priv        (1<<27) /* instruction generates #GP if current CPL != 0 */
143 #define No64	    (1<<28)
144 #define PageTable   (1 << 29)   /* instruction used to write page table */
145 #define NotImpl     (1 << 30)   /* instruction is not implemented */
146 /* Source 2 operand type */
147 #define Src2Shift   (31)
148 #define Src2None    (OpNone << Src2Shift)
149 #define Src2Mem     (OpMem << Src2Shift)
150 #define Src2CL      (OpCL << Src2Shift)
151 #define Src2ImmByte (OpImmByte << Src2Shift)
152 #define Src2One     (OpOne << Src2Shift)
153 #define Src2Imm     (OpImm << Src2Shift)
154 #define Src2ES      (OpES << Src2Shift)
155 #define Src2CS      (OpCS << Src2Shift)
156 #define Src2SS      (OpSS << Src2Shift)
157 #define Src2DS      (OpDS << Src2Shift)
158 #define Src2FS      (OpFS << Src2Shift)
159 #define Src2GS      (OpGS << Src2Shift)
160 #define Src2Mask    (OpMask << Src2Shift)
161 #define Mmx         ((u64)1 << 40)  /* MMX Vector instruction */
162 #define AlignMask   ((u64)7 << 41)
163 #define Aligned     ((u64)1 << 41)  /* Explicitly aligned (e.g. MOVDQA) */
164 #define Unaligned   ((u64)2 << 41)  /* Explicitly unaligned (e.g. MOVDQU) */
165 #define Avx         ((u64)3 << 41)  /* Advanced Vector Extensions */
166 #define Aligned16   ((u64)4 << 41)  /* Aligned to 16 byte boundary (e.g. FXSAVE) */
167 #define Fastop      ((u64)1 << 44)  /* Use opcode::u.fastop */
168 #define NoWrite     ((u64)1 << 45)  /* No writeback */
169 #define SrcWrite    ((u64)1 << 46)  /* Write back src operand */
170 #define NoMod	    ((u64)1 << 47)  /* Mod field is ignored */
171 #define Intercept   ((u64)1 << 48)  /* Has valid intercept field */
172 #define CheckPerm   ((u64)1 << 49)  /* Has valid check_perm field */
173 #define PrivUD      ((u64)1 << 51)  /* #UD instead of #GP on CPL > 0 */
174 #define NearBranch  ((u64)1 << 52)  /* Near branches */
175 #define No16	    ((u64)1 << 53)  /* No 16 bit operand */
176 #define IncSP       ((u64)1 << 54)  /* SP is incremented before ModRM calc */
177 #define TwoMemOp    ((u64)1 << 55)  /* Instruction has two memory operand */
178 
179 #define DstXacc     (DstAccLo | SrcAccHi | SrcWrite)
180 
181 #define X2(x...) x, x
182 #define X3(x...) X2(x), x
183 #define X4(x...) X2(x), X2(x)
184 #define X5(x...) X4(x), x
185 #define X6(x...) X4(x), X2(x)
186 #define X7(x...) X4(x), X3(x)
187 #define X8(x...) X4(x), X4(x)
188 #define X16(x...) X8(x), X8(x)
189 
190 #define NR_FASTOP (ilog2(sizeof(ulong)) + 1)
191 #define FASTOP_SIZE 8
192 
193 /*
194  * fastop functions have a special calling convention:
195  *
196  * dst:    rax        (in/out)
197  * src:    rdx        (in/out)
198  * src2:   rcx        (in)
199  * flags:  rflags     (in/out)
200  * ex:     rsi        (in:fastop pointer, out:zero if exception)
201  *
202  * Moreover, they are all exactly FASTOP_SIZE bytes long, so functions for
203  * different operand sizes can be reached by calculation, rather than a jump
204  * table (which would be bigger than the code).
205  *
206  * fastop functions are declared as taking a never-defined fastop parameter,
207  * so they can't be called from C directly.
208  */
209 
210 struct fastop;
211 
212 struct opcode {
213 	u64 flags : 56;
214 	u64 intercept : 8;
215 	union {
216 		int (*execute)(struct x86_emulate_ctxt *ctxt);
217 		const struct opcode *group;
218 		const struct group_dual *gdual;
219 		const struct gprefix *gprefix;
220 		const struct escape *esc;
221 		const struct instr_dual *idual;
222 		const struct mode_dual *mdual;
223 		void (*fastop)(struct fastop *fake);
224 	} u;
225 	int (*check_perm)(struct x86_emulate_ctxt *ctxt);
226 };
227 
228 struct group_dual {
229 	struct opcode mod012[8];
230 	struct opcode mod3[8];
231 };
232 
233 struct gprefix {
234 	struct opcode pfx_no;
235 	struct opcode pfx_66;
236 	struct opcode pfx_f2;
237 	struct opcode pfx_f3;
238 };
239 
240 struct escape {
241 	struct opcode op[8];
242 	struct opcode high[64];
243 };
244 
245 struct instr_dual {
246 	struct opcode mod012;
247 	struct opcode mod3;
248 };
249 
250 struct mode_dual {
251 	struct opcode mode32;
252 	struct opcode mode64;
253 };
254 
255 #define EFLG_RESERVED_ZEROS_MASK 0xffc0802a
256 
257 enum x86_transfer_type {
258 	X86_TRANSFER_NONE,
259 	X86_TRANSFER_CALL_JMP,
260 	X86_TRANSFER_RET,
261 	X86_TRANSFER_TASK_SWITCH,
262 };
263 
264 static ulong reg_read(struct x86_emulate_ctxt *ctxt, unsigned nr)
265 {
266 	if (!(ctxt->regs_valid & (1 << nr))) {
267 		ctxt->regs_valid |= 1 << nr;
268 		ctxt->_regs[nr] = ctxt->ops->read_gpr(ctxt, nr);
269 	}
270 	return ctxt->_regs[nr];
271 }
272 
273 static ulong *reg_write(struct x86_emulate_ctxt *ctxt, unsigned nr)
274 {
275 	ctxt->regs_valid |= 1 << nr;
276 	ctxt->regs_dirty |= 1 << nr;
277 	return &ctxt->_regs[nr];
278 }
279 
280 static ulong *reg_rmw(struct x86_emulate_ctxt *ctxt, unsigned nr)
281 {
282 	reg_read(ctxt, nr);
283 	return reg_write(ctxt, nr);
284 }
285 
286 static void writeback_registers(struct x86_emulate_ctxt *ctxt)
287 {
288 	unsigned reg;
289 
290 	for_each_set_bit(reg, (ulong *)&ctxt->regs_dirty, 16)
291 		ctxt->ops->write_gpr(ctxt, reg, ctxt->_regs[reg]);
292 }
293 
294 static void invalidate_registers(struct x86_emulate_ctxt *ctxt)
295 {
296 	ctxt->regs_dirty = 0;
297 	ctxt->regs_valid = 0;
298 }
299 
300 /*
301  * These EFLAGS bits are restored from saved value during emulation, and
302  * any changes are written back to the saved value after emulation.
303  */
304 #define EFLAGS_MASK (X86_EFLAGS_OF|X86_EFLAGS_SF|X86_EFLAGS_ZF|X86_EFLAGS_AF|\
305 		     X86_EFLAGS_PF|X86_EFLAGS_CF)
306 
307 #ifdef CONFIG_X86_64
308 #define ON64(x) x
309 #else
310 #define ON64(x)
311 #endif
312 
313 static int fastop(struct x86_emulate_ctxt *ctxt, void (*fop)(struct fastop *));
314 
315 #define FOP_FUNC(name) \
316 	".align " __stringify(FASTOP_SIZE) " \n\t" \
317 	".type " name ", @function \n\t" \
318 	name ":\n\t"
319 
320 #define FOP_RET   "ret \n\t"
321 
322 #define FOP_START(op) \
323 	extern void em_##op(struct fastop *fake); \
324 	asm(".pushsection .text, \"ax\" \n\t" \
325 	    ".global em_" #op " \n\t" \
326 	    FOP_FUNC("em_" #op)
327 
328 #define FOP_END \
329 	    ".popsection")
330 
331 #define FOPNOP() \
332 	FOP_FUNC(__stringify(__UNIQUE_ID(nop))) \
333 	FOP_RET
334 
335 #define FOP1E(op,  dst) \
336 	FOP_FUNC(#op "_" #dst) \
337 	"10: " #op " %" #dst " \n\t" FOP_RET
338 
339 #define FOP1EEX(op,  dst) \
340 	FOP1E(op, dst) _ASM_EXTABLE(10b, kvm_fastop_exception)
341 
342 #define FASTOP1(op) \
343 	FOP_START(op) \
344 	FOP1E(op##b, al) \
345 	FOP1E(op##w, ax) \
346 	FOP1E(op##l, eax) \
347 	ON64(FOP1E(op##q, rax))	\
348 	FOP_END
349 
350 /* 1-operand, using src2 (for MUL/DIV r/m) */
351 #define FASTOP1SRC2(op, name) \
352 	FOP_START(name) \
353 	FOP1E(op, cl) \
354 	FOP1E(op, cx) \
355 	FOP1E(op, ecx) \
356 	ON64(FOP1E(op, rcx)) \
357 	FOP_END
358 
359 /* 1-operand, using src2 (for MUL/DIV r/m), with exceptions */
360 #define FASTOP1SRC2EX(op, name) \
361 	FOP_START(name) \
362 	FOP1EEX(op, cl) \
363 	FOP1EEX(op, cx) \
364 	FOP1EEX(op, ecx) \
365 	ON64(FOP1EEX(op, rcx)) \
366 	FOP_END
367 
368 #define FOP2E(op,  dst, src)	   \
369 	FOP_FUNC(#op "_" #dst "_" #src) \
370 	#op " %" #src ", %" #dst " \n\t" FOP_RET
371 
372 #define FASTOP2(op) \
373 	FOP_START(op) \
374 	FOP2E(op##b, al, dl) \
375 	FOP2E(op##w, ax, dx) \
376 	FOP2E(op##l, eax, edx) \
377 	ON64(FOP2E(op##q, rax, rdx)) \
378 	FOP_END
379 
380 /* 2 operand, word only */
381 #define FASTOP2W(op) \
382 	FOP_START(op) \
383 	FOPNOP() \
384 	FOP2E(op##w, ax, dx) \
385 	FOP2E(op##l, eax, edx) \
386 	ON64(FOP2E(op##q, rax, rdx)) \
387 	FOP_END
388 
389 /* 2 operand, src is CL */
390 #define FASTOP2CL(op) \
391 	FOP_START(op) \
392 	FOP2E(op##b, al, cl) \
393 	FOP2E(op##w, ax, cl) \
394 	FOP2E(op##l, eax, cl) \
395 	ON64(FOP2E(op##q, rax, cl)) \
396 	FOP_END
397 
398 /* 2 operand, src and dest are reversed */
399 #define FASTOP2R(op, name) \
400 	FOP_START(name) \
401 	FOP2E(op##b, dl, al) \
402 	FOP2E(op##w, dx, ax) \
403 	FOP2E(op##l, edx, eax) \
404 	ON64(FOP2E(op##q, rdx, rax)) \
405 	FOP_END
406 
407 #define FOP3E(op,  dst, src, src2) \
408 	FOP_FUNC(#op "_" #dst "_" #src "_" #src2) \
409 	#op " %" #src2 ", %" #src ", %" #dst " \n\t" FOP_RET
410 
411 /* 3-operand, word-only, src2=cl */
412 #define FASTOP3WCL(op) \
413 	FOP_START(op) \
414 	FOPNOP() \
415 	FOP3E(op##w, ax, dx, cl) \
416 	FOP3E(op##l, eax, edx, cl) \
417 	ON64(FOP3E(op##q, rax, rdx, cl)) \
418 	FOP_END
419 
420 /* Special case for SETcc - 1 instruction per cc */
421 #define FOP_SETCC(op) \
422 	".align 4 \n\t" \
423 	".type " #op ", @function \n\t" \
424 	#op ": \n\t" \
425 	#op " %al \n\t" \
426 	FOP_RET
427 
428 asm(".pushsection .fixup, \"ax\"\n"
429     ".global kvm_fastop_exception \n"
430     "kvm_fastop_exception: xor %esi, %esi; ret\n"
431     ".popsection");
432 
433 FOP_START(setcc)
434 FOP_SETCC(seto)
435 FOP_SETCC(setno)
436 FOP_SETCC(setc)
437 FOP_SETCC(setnc)
438 FOP_SETCC(setz)
439 FOP_SETCC(setnz)
440 FOP_SETCC(setbe)
441 FOP_SETCC(setnbe)
442 FOP_SETCC(sets)
443 FOP_SETCC(setns)
444 FOP_SETCC(setp)
445 FOP_SETCC(setnp)
446 FOP_SETCC(setl)
447 FOP_SETCC(setnl)
448 FOP_SETCC(setle)
449 FOP_SETCC(setnle)
450 FOP_END;
451 
452 FOP_START(salc) "pushf; sbb %al, %al; popf \n\t" FOP_RET
453 FOP_END;
454 
455 /*
456  * XXX: inoutclob user must know where the argument is being expanded.
457  *      Relying on CC_HAVE_ASM_GOTO would allow us to remove _fault.
458  */
459 #define asm_safe(insn, inoutclob...) \
460 ({ \
461 	int _fault = 0; \
462  \
463 	asm volatile("1:" insn "\n" \
464 	             "2:\n" \
465 	             ".pushsection .fixup, \"ax\"\n" \
466 	             "3: movl $1, %[_fault]\n" \
467 	             "   jmp  2b\n" \
468 	             ".popsection\n" \
469 	             _ASM_EXTABLE(1b, 3b) \
470 	             : [_fault] "+qm"(_fault) inoutclob ); \
471  \
472 	_fault ? X86EMUL_UNHANDLEABLE : X86EMUL_CONTINUE; \
473 })
474 
475 static int emulator_check_intercept(struct x86_emulate_ctxt *ctxt,
476 				    enum x86_intercept intercept,
477 				    enum x86_intercept_stage stage)
478 {
479 	struct x86_instruction_info info = {
480 		.intercept  = intercept,
481 		.rep_prefix = ctxt->rep_prefix,
482 		.modrm_mod  = ctxt->modrm_mod,
483 		.modrm_reg  = ctxt->modrm_reg,
484 		.modrm_rm   = ctxt->modrm_rm,
485 		.src_val    = ctxt->src.val64,
486 		.dst_val    = ctxt->dst.val64,
487 		.src_bytes  = ctxt->src.bytes,
488 		.dst_bytes  = ctxt->dst.bytes,
489 		.ad_bytes   = ctxt->ad_bytes,
490 		.next_rip   = ctxt->eip,
491 	};
492 
493 	return ctxt->ops->intercept(ctxt, &info, stage);
494 }
495 
496 static void assign_masked(ulong *dest, ulong src, ulong mask)
497 {
498 	*dest = (*dest & ~mask) | (src & mask);
499 }
500 
501 static void assign_register(unsigned long *reg, u64 val, int bytes)
502 {
503 	/* The 4-byte case *is* correct: in 64-bit mode we zero-extend. */
504 	switch (bytes) {
505 	case 1:
506 		*(u8 *)reg = (u8)val;
507 		break;
508 	case 2:
509 		*(u16 *)reg = (u16)val;
510 		break;
511 	case 4:
512 		*reg = (u32)val;
513 		break;	/* 64b: zero-extend */
514 	case 8:
515 		*reg = val;
516 		break;
517 	}
518 }
519 
520 static inline unsigned long ad_mask(struct x86_emulate_ctxt *ctxt)
521 {
522 	return (1UL << (ctxt->ad_bytes << 3)) - 1;
523 }
524 
525 static ulong stack_mask(struct x86_emulate_ctxt *ctxt)
526 {
527 	u16 sel;
528 	struct desc_struct ss;
529 
530 	if (ctxt->mode == X86EMUL_MODE_PROT64)
531 		return ~0UL;
532 	ctxt->ops->get_segment(ctxt, &sel, &ss, NULL, VCPU_SREG_SS);
533 	return ~0U >> ((ss.d ^ 1) * 16);  /* d=0: 0xffff; d=1: 0xffffffff */
534 }
535 
536 static int stack_size(struct x86_emulate_ctxt *ctxt)
537 {
538 	return (__fls(stack_mask(ctxt)) + 1) >> 3;
539 }
540 
541 /* Access/update address held in a register, based on addressing mode. */
542 static inline unsigned long
543 address_mask(struct x86_emulate_ctxt *ctxt, unsigned long reg)
544 {
545 	if (ctxt->ad_bytes == sizeof(unsigned long))
546 		return reg;
547 	else
548 		return reg & ad_mask(ctxt);
549 }
550 
551 static inline unsigned long
552 register_address(struct x86_emulate_ctxt *ctxt, int reg)
553 {
554 	return address_mask(ctxt, reg_read(ctxt, reg));
555 }
556 
557 static void masked_increment(ulong *reg, ulong mask, int inc)
558 {
559 	assign_masked(reg, *reg + inc, mask);
560 }
561 
562 static inline void
563 register_address_increment(struct x86_emulate_ctxt *ctxt, int reg, int inc)
564 {
565 	ulong *preg = reg_rmw(ctxt, reg);
566 
567 	assign_register(preg, *preg + inc, ctxt->ad_bytes);
568 }
569 
570 static void rsp_increment(struct x86_emulate_ctxt *ctxt, int inc)
571 {
572 	masked_increment(reg_rmw(ctxt, VCPU_REGS_RSP), stack_mask(ctxt), inc);
573 }
574 
575 static u32 desc_limit_scaled(struct desc_struct *desc)
576 {
577 	u32 limit = get_desc_limit(desc);
578 
579 	return desc->g ? (limit << 12) | 0xfff : limit;
580 }
581 
582 static unsigned long seg_base(struct x86_emulate_ctxt *ctxt, int seg)
583 {
584 	if (ctxt->mode == X86EMUL_MODE_PROT64 && seg < VCPU_SREG_FS)
585 		return 0;
586 
587 	return ctxt->ops->get_cached_segment_base(ctxt, seg);
588 }
589 
590 static int emulate_exception(struct x86_emulate_ctxt *ctxt, int vec,
591 			     u32 error, bool valid)
592 {
593 	WARN_ON(vec > 0x1f);
594 	ctxt->exception.vector = vec;
595 	ctxt->exception.error_code = error;
596 	ctxt->exception.error_code_valid = valid;
597 	return X86EMUL_PROPAGATE_FAULT;
598 }
599 
600 static int emulate_db(struct x86_emulate_ctxt *ctxt)
601 {
602 	return emulate_exception(ctxt, DB_VECTOR, 0, false);
603 }
604 
605 static int emulate_gp(struct x86_emulate_ctxt *ctxt, int err)
606 {
607 	return emulate_exception(ctxt, GP_VECTOR, err, true);
608 }
609 
610 static int emulate_ss(struct x86_emulate_ctxt *ctxt, int err)
611 {
612 	return emulate_exception(ctxt, SS_VECTOR, err, true);
613 }
614 
615 static int emulate_ud(struct x86_emulate_ctxt *ctxt)
616 {
617 	return emulate_exception(ctxt, UD_VECTOR, 0, false);
618 }
619 
620 static int emulate_ts(struct x86_emulate_ctxt *ctxt, int err)
621 {
622 	return emulate_exception(ctxt, TS_VECTOR, err, true);
623 }
624 
625 static int emulate_de(struct x86_emulate_ctxt *ctxt)
626 {
627 	return emulate_exception(ctxt, DE_VECTOR, 0, false);
628 }
629 
630 static int emulate_nm(struct x86_emulate_ctxt *ctxt)
631 {
632 	return emulate_exception(ctxt, NM_VECTOR, 0, false);
633 }
634 
635 static u16 get_segment_selector(struct x86_emulate_ctxt *ctxt, unsigned seg)
636 {
637 	u16 selector;
638 	struct desc_struct desc;
639 
640 	ctxt->ops->get_segment(ctxt, &selector, &desc, NULL, seg);
641 	return selector;
642 }
643 
644 static void set_segment_selector(struct x86_emulate_ctxt *ctxt, u16 selector,
645 				 unsigned seg)
646 {
647 	u16 dummy;
648 	u32 base3;
649 	struct desc_struct desc;
650 
651 	ctxt->ops->get_segment(ctxt, &dummy, &desc, &base3, seg);
652 	ctxt->ops->set_segment(ctxt, selector, &desc, base3, seg);
653 }
654 
655 /*
656  * x86 defines three classes of vector instructions: explicitly
657  * aligned, explicitly unaligned, and the rest, which change behaviour
658  * depending on whether they're AVX encoded or not.
659  *
660  * Also included is CMPXCHG16B which is not a vector instruction, yet it is
661  * subject to the same check.  FXSAVE and FXRSTOR are checked here too as their
662  * 512 bytes of data must be aligned to a 16 byte boundary.
663  */
664 static unsigned insn_alignment(struct x86_emulate_ctxt *ctxt, unsigned size)
665 {
666 	u64 alignment = ctxt->d & AlignMask;
667 
668 	if (likely(size < 16))
669 		return 1;
670 
671 	switch (alignment) {
672 	case Unaligned:
673 	case Avx:
674 		return 1;
675 	case Aligned16:
676 		return 16;
677 	case Aligned:
678 	default:
679 		return size;
680 	}
681 }
682 
683 static __always_inline int __linearize(struct x86_emulate_ctxt *ctxt,
684 				       struct segmented_address addr,
685 				       unsigned *max_size, unsigned size,
686 				       bool write, bool fetch,
687 				       enum x86emul_mode mode, ulong *linear)
688 {
689 	struct desc_struct desc;
690 	bool usable;
691 	ulong la;
692 	u32 lim;
693 	u16 sel;
694 	u8  va_bits;
695 
696 	la = seg_base(ctxt, addr.seg) + addr.ea;
697 	*max_size = 0;
698 	switch (mode) {
699 	case X86EMUL_MODE_PROT64:
700 		*linear = la;
701 		va_bits = ctxt_virt_addr_bits(ctxt);
702 		if (get_canonical(la, va_bits) != la)
703 			goto bad;
704 
705 		*max_size = min_t(u64, ~0u, (1ull << va_bits) - la);
706 		if (size > *max_size)
707 			goto bad;
708 		break;
709 	default:
710 		*linear = la = (u32)la;
711 		usable = ctxt->ops->get_segment(ctxt, &sel, &desc, NULL,
712 						addr.seg);
713 		if (!usable)
714 			goto bad;
715 		/* code segment in protected mode or read-only data segment */
716 		if ((((ctxt->mode != X86EMUL_MODE_REAL) && (desc.type & 8))
717 					|| !(desc.type & 2)) && write)
718 			goto bad;
719 		/* unreadable code segment */
720 		if (!fetch && (desc.type & 8) && !(desc.type & 2))
721 			goto bad;
722 		lim = desc_limit_scaled(&desc);
723 		if (!(desc.type & 8) && (desc.type & 4)) {
724 			/* expand-down segment */
725 			if (addr.ea <= lim)
726 				goto bad;
727 			lim = desc.d ? 0xffffffff : 0xffff;
728 		}
729 		if (addr.ea > lim)
730 			goto bad;
731 		if (lim == 0xffffffff)
732 			*max_size = ~0u;
733 		else {
734 			*max_size = (u64)lim + 1 - addr.ea;
735 			if (size > *max_size)
736 				goto bad;
737 		}
738 		break;
739 	}
740 	if (la & (insn_alignment(ctxt, size) - 1))
741 		return emulate_gp(ctxt, 0);
742 	return X86EMUL_CONTINUE;
743 bad:
744 	if (addr.seg == VCPU_SREG_SS)
745 		return emulate_ss(ctxt, 0);
746 	else
747 		return emulate_gp(ctxt, 0);
748 }
749 
750 static int linearize(struct x86_emulate_ctxt *ctxt,
751 		     struct segmented_address addr,
752 		     unsigned size, bool write,
753 		     ulong *linear)
754 {
755 	unsigned max_size;
756 	return __linearize(ctxt, addr, &max_size, size, write, false,
757 			   ctxt->mode, linear);
758 }
759 
760 static inline int assign_eip(struct x86_emulate_ctxt *ctxt, ulong dst,
761 			     enum x86emul_mode mode)
762 {
763 	ulong linear;
764 	int rc;
765 	unsigned max_size;
766 	struct segmented_address addr = { .seg = VCPU_SREG_CS,
767 					   .ea = dst };
768 
769 	if (ctxt->op_bytes != sizeof(unsigned long))
770 		addr.ea = dst & ((1UL << (ctxt->op_bytes << 3)) - 1);
771 	rc = __linearize(ctxt, addr, &max_size, 1, false, true, mode, &linear);
772 	if (rc == X86EMUL_CONTINUE)
773 		ctxt->_eip = addr.ea;
774 	return rc;
775 }
776 
777 static inline int assign_eip_near(struct x86_emulate_ctxt *ctxt, ulong dst)
778 {
779 	return assign_eip(ctxt, dst, ctxt->mode);
780 }
781 
782 static int assign_eip_far(struct x86_emulate_ctxt *ctxt, ulong dst,
783 			  const struct desc_struct *cs_desc)
784 {
785 	enum x86emul_mode mode = ctxt->mode;
786 	int rc;
787 
788 #ifdef CONFIG_X86_64
789 	if (ctxt->mode >= X86EMUL_MODE_PROT16) {
790 		if (cs_desc->l) {
791 			u64 efer = 0;
792 
793 			ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
794 			if (efer & EFER_LMA)
795 				mode = X86EMUL_MODE_PROT64;
796 		} else
797 			mode = X86EMUL_MODE_PROT32; /* temporary value */
798 	}
799 #endif
800 	if (mode == X86EMUL_MODE_PROT16 || mode == X86EMUL_MODE_PROT32)
801 		mode = cs_desc->d ? X86EMUL_MODE_PROT32 : X86EMUL_MODE_PROT16;
802 	rc = assign_eip(ctxt, dst, mode);
803 	if (rc == X86EMUL_CONTINUE)
804 		ctxt->mode = mode;
805 	return rc;
806 }
807 
808 static inline int jmp_rel(struct x86_emulate_ctxt *ctxt, int rel)
809 {
810 	return assign_eip_near(ctxt, ctxt->_eip + rel);
811 }
812 
813 static int segmented_read_std(struct x86_emulate_ctxt *ctxt,
814 			      struct segmented_address addr,
815 			      void *data,
816 			      unsigned size)
817 {
818 	int rc;
819 	ulong linear;
820 
821 	rc = linearize(ctxt, addr, size, false, &linear);
822 	if (rc != X86EMUL_CONTINUE)
823 		return rc;
824 	return ctxt->ops->read_std(ctxt, linear, data, size, &ctxt->exception);
825 }
826 
827 static int segmented_write_std(struct x86_emulate_ctxt *ctxt,
828 			       struct segmented_address addr,
829 			       void *data,
830 			       unsigned int size)
831 {
832 	int rc;
833 	ulong linear;
834 
835 	rc = linearize(ctxt, addr, size, true, &linear);
836 	if (rc != X86EMUL_CONTINUE)
837 		return rc;
838 	return ctxt->ops->write_std(ctxt, linear, data, size, &ctxt->exception);
839 }
840 
841 /*
842  * Prefetch the remaining bytes of the instruction without crossing page
843  * boundary if they are not in fetch_cache yet.
844  */
845 static int __do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt, int op_size)
846 {
847 	int rc;
848 	unsigned size, max_size;
849 	unsigned long linear;
850 	int cur_size = ctxt->fetch.end - ctxt->fetch.data;
851 	struct segmented_address addr = { .seg = VCPU_SREG_CS,
852 					   .ea = ctxt->eip + cur_size };
853 
854 	/*
855 	 * We do not know exactly how many bytes will be needed, and
856 	 * __linearize is expensive, so fetch as much as possible.  We
857 	 * just have to avoid going beyond the 15 byte limit, the end
858 	 * of the segment, or the end of the page.
859 	 *
860 	 * __linearize is called with size 0 so that it does not do any
861 	 * boundary check itself.  Instead, we use max_size to check
862 	 * against op_size.
863 	 */
864 	rc = __linearize(ctxt, addr, &max_size, 0, false, true, ctxt->mode,
865 			 &linear);
866 	if (unlikely(rc != X86EMUL_CONTINUE))
867 		return rc;
868 
869 	size = min_t(unsigned, 15UL ^ cur_size, max_size);
870 	size = min_t(unsigned, size, PAGE_SIZE - offset_in_page(linear));
871 
872 	/*
873 	 * One instruction can only straddle two pages,
874 	 * and one has been loaded at the beginning of
875 	 * x86_decode_insn.  So, if not enough bytes
876 	 * still, we must have hit the 15-byte boundary.
877 	 */
878 	if (unlikely(size < op_size))
879 		return emulate_gp(ctxt, 0);
880 
881 	rc = ctxt->ops->fetch(ctxt, linear, ctxt->fetch.end,
882 			      size, &ctxt->exception);
883 	if (unlikely(rc != X86EMUL_CONTINUE))
884 		return rc;
885 	ctxt->fetch.end += size;
886 	return X86EMUL_CONTINUE;
887 }
888 
889 static __always_inline int do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt,
890 					       unsigned size)
891 {
892 	unsigned done_size = ctxt->fetch.end - ctxt->fetch.ptr;
893 
894 	if (unlikely(done_size < size))
895 		return __do_insn_fetch_bytes(ctxt, size - done_size);
896 	else
897 		return X86EMUL_CONTINUE;
898 }
899 
900 /* Fetch next part of the instruction being emulated. */
901 #define insn_fetch(_type, _ctxt)					\
902 ({	_type _x;							\
903 									\
904 	rc = do_insn_fetch_bytes(_ctxt, sizeof(_type));			\
905 	if (rc != X86EMUL_CONTINUE)					\
906 		goto done;						\
907 	ctxt->_eip += sizeof(_type);					\
908 	memcpy(&_x, ctxt->fetch.ptr, sizeof(_type));			\
909 	ctxt->fetch.ptr += sizeof(_type);				\
910 	_x;								\
911 })
912 
913 #define insn_fetch_arr(_arr, _size, _ctxt)				\
914 ({									\
915 	rc = do_insn_fetch_bytes(_ctxt, _size);				\
916 	if (rc != X86EMUL_CONTINUE)					\
917 		goto done;						\
918 	ctxt->_eip += (_size);						\
919 	memcpy(_arr, ctxt->fetch.ptr, _size);				\
920 	ctxt->fetch.ptr += (_size);					\
921 })
922 
923 /*
924  * Given the 'reg' portion of a ModRM byte, and a register block, return a
925  * pointer into the block that addresses the relevant register.
926  * @highbyte_regs specifies whether to decode AH,CH,DH,BH.
927  */
928 static void *decode_register(struct x86_emulate_ctxt *ctxt, u8 modrm_reg,
929 			     int byteop)
930 {
931 	void *p;
932 	int highbyte_regs = (ctxt->rex_prefix == 0) && byteop;
933 
934 	if (highbyte_regs && modrm_reg >= 4 && modrm_reg < 8)
935 		p = (unsigned char *)reg_rmw(ctxt, modrm_reg & 3) + 1;
936 	else
937 		p = reg_rmw(ctxt, modrm_reg);
938 	return p;
939 }
940 
941 static int read_descriptor(struct x86_emulate_ctxt *ctxt,
942 			   struct segmented_address addr,
943 			   u16 *size, unsigned long *address, int op_bytes)
944 {
945 	int rc;
946 
947 	if (op_bytes == 2)
948 		op_bytes = 3;
949 	*address = 0;
950 	rc = segmented_read_std(ctxt, addr, size, 2);
951 	if (rc != X86EMUL_CONTINUE)
952 		return rc;
953 	addr.ea += 2;
954 	rc = segmented_read_std(ctxt, addr, address, op_bytes);
955 	return rc;
956 }
957 
958 FASTOP2(add);
959 FASTOP2(or);
960 FASTOP2(adc);
961 FASTOP2(sbb);
962 FASTOP2(and);
963 FASTOP2(sub);
964 FASTOP2(xor);
965 FASTOP2(cmp);
966 FASTOP2(test);
967 
968 FASTOP1SRC2(mul, mul_ex);
969 FASTOP1SRC2(imul, imul_ex);
970 FASTOP1SRC2EX(div, div_ex);
971 FASTOP1SRC2EX(idiv, idiv_ex);
972 
973 FASTOP3WCL(shld);
974 FASTOP3WCL(shrd);
975 
976 FASTOP2W(imul);
977 
978 FASTOP1(not);
979 FASTOP1(neg);
980 FASTOP1(inc);
981 FASTOP1(dec);
982 
983 FASTOP2CL(rol);
984 FASTOP2CL(ror);
985 FASTOP2CL(rcl);
986 FASTOP2CL(rcr);
987 FASTOP2CL(shl);
988 FASTOP2CL(shr);
989 FASTOP2CL(sar);
990 
991 FASTOP2W(bsf);
992 FASTOP2W(bsr);
993 FASTOP2W(bt);
994 FASTOP2W(bts);
995 FASTOP2W(btr);
996 FASTOP2W(btc);
997 
998 FASTOP2(xadd);
999 
1000 FASTOP2R(cmp, cmp_r);
1001 
1002 static int em_bsf_c(struct x86_emulate_ctxt *ctxt)
1003 {
1004 	/* If src is zero, do not writeback, but update flags */
1005 	if (ctxt->src.val == 0)
1006 		ctxt->dst.type = OP_NONE;
1007 	return fastop(ctxt, em_bsf);
1008 }
1009 
1010 static int em_bsr_c(struct x86_emulate_ctxt *ctxt)
1011 {
1012 	/* If src is zero, do not writeback, but update flags */
1013 	if (ctxt->src.val == 0)
1014 		ctxt->dst.type = OP_NONE;
1015 	return fastop(ctxt, em_bsr);
1016 }
1017 
1018 static __always_inline u8 test_cc(unsigned int condition, unsigned long flags)
1019 {
1020 	u8 rc;
1021 	void (*fop)(void) = (void *)em_setcc + 4 * (condition & 0xf);
1022 
1023 	flags = (flags & EFLAGS_MASK) | X86_EFLAGS_IF;
1024 	asm("push %[flags]; popf; call *%[fastop]"
1025 	    : "=a"(rc) : [fastop]"r"(fop), [flags]"r"(flags));
1026 	return rc;
1027 }
1028 
1029 static void fetch_register_operand(struct operand *op)
1030 {
1031 	switch (op->bytes) {
1032 	case 1:
1033 		op->val = *(u8 *)op->addr.reg;
1034 		break;
1035 	case 2:
1036 		op->val = *(u16 *)op->addr.reg;
1037 		break;
1038 	case 4:
1039 		op->val = *(u32 *)op->addr.reg;
1040 		break;
1041 	case 8:
1042 		op->val = *(u64 *)op->addr.reg;
1043 		break;
1044 	}
1045 }
1046 
1047 static void read_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data, int reg)
1048 {
1049 	switch (reg) {
1050 	case 0: asm("movdqa %%xmm0, %0" : "=m"(*data)); break;
1051 	case 1: asm("movdqa %%xmm1, %0" : "=m"(*data)); break;
1052 	case 2: asm("movdqa %%xmm2, %0" : "=m"(*data)); break;
1053 	case 3: asm("movdqa %%xmm3, %0" : "=m"(*data)); break;
1054 	case 4: asm("movdqa %%xmm4, %0" : "=m"(*data)); break;
1055 	case 5: asm("movdqa %%xmm5, %0" : "=m"(*data)); break;
1056 	case 6: asm("movdqa %%xmm6, %0" : "=m"(*data)); break;
1057 	case 7: asm("movdqa %%xmm7, %0" : "=m"(*data)); break;
1058 #ifdef CONFIG_X86_64
1059 	case 8: asm("movdqa %%xmm8, %0" : "=m"(*data)); break;
1060 	case 9: asm("movdqa %%xmm9, %0" : "=m"(*data)); break;
1061 	case 10: asm("movdqa %%xmm10, %0" : "=m"(*data)); break;
1062 	case 11: asm("movdqa %%xmm11, %0" : "=m"(*data)); break;
1063 	case 12: asm("movdqa %%xmm12, %0" : "=m"(*data)); break;
1064 	case 13: asm("movdqa %%xmm13, %0" : "=m"(*data)); break;
1065 	case 14: asm("movdqa %%xmm14, %0" : "=m"(*data)); break;
1066 	case 15: asm("movdqa %%xmm15, %0" : "=m"(*data)); break;
1067 #endif
1068 	default: BUG();
1069 	}
1070 }
1071 
1072 static void write_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data,
1073 			  int reg)
1074 {
1075 	switch (reg) {
1076 	case 0: asm("movdqa %0, %%xmm0" : : "m"(*data)); break;
1077 	case 1: asm("movdqa %0, %%xmm1" : : "m"(*data)); break;
1078 	case 2: asm("movdqa %0, %%xmm2" : : "m"(*data)); break;
1079 	case 3: asm("movdqa %0, %%xmm3" : : "m"(*data)); break;
1080 	case 4: asm("movdqa %0, %%xmm4" : : "m"(*data)); break;
1081 	case 5: asm("movdqa %0, %%xmm5" : : "m"(*data)); break;
1082 	case 6: asm("movdqa %0, %%xmm6" : : "m"(*data)); break;
1083 	case 7: asm("movdqa %0, %%xmm7" : : "m"(*data)); break;
1084 #ifdef CONFIG_X86_64
1085 	case 8: asm("movdqa %0, %%xmm8" : : "m"(*data)); break;
1086 	case 9: asm("movdqa %0, %%xmm9" : : "m"(*data)); break;
1087 	case 10: asm("movdqa %0, %%xmm10" : : "m"(*data)); break;
1088 	case 11: asm("movdqa %0, %%xmm11" : : "m"(*data)); break;
1089 	case 12: asm("movdqa %0, %%xmm12" : : "m"(*data)); break;
1090 	case 13: asm("movdqa %0, %%xmm13" : : "m"(*data)); break;
1091 	case 14: asm("movdqa %0, %%xmm14" : : "m"(*data)); break;
1092 	case 15: asm("movdqa %0, %%xmm15" : : "m"(*data)); break;
1093 #endif
1094 	default: BUG();
1095 	}
1096 }
1097 
1098 static void read_mmx_reg(struct x86_emulate_ctxt *ctxt, u64 *data, int reg)
1099 {
1100 	switch (reg) {
1101 	case 0: asm("movq %%mm0, %0" : "=m"(*data)); break;
1102 	case 1: asm("movq %%mm1, %0" : "=m"(*data)); break;
1103 	case 2: asm("movq %%mm2, %0" : "=m"(*data)); break;
1104 	case 3: asm("movq %%mm3, %0" : "=m"(*data)); break;
1105 	case 4: asm("movq %%mm4, %0" : "=m"(*data)); break;
1106 	case 5: asm("movq %%mm5, %0" : "=m"(*data)); break;
1107 	case 6: asm("movq %%mm6, %0" : "=m"(*data)); break;
1108 	case 7: asm("movq %%mm7, %0" : "=m"(*data)); break;
1109 	default: BUG();
1110 	}
1111 }
1112 
1113 static void write_mmx_reg(struct x86_emulate_ctxt *ctxt, u64 *data, int reg)
1114 {
1115 	switch (reg) {
1116 	case 0: asm("movq %0, %%mm0" : : "m"(*data)); break;
1117 	case 1: asm("movq %0, %%mm1" : : "m"(*data)); break;
1118 	case 2: asm("movq %0, %%mm2" : : "m"(*data)); break;
1119 	case 3: asm("movq %0, %%mm3" : : "m"(*data)); break;
1120 	case 4: asm("movq %0, %%mm4" : : "m"(*data)); break;
1121 	case 5: asm("movq %0, %%mm5" : : "m"(*data)); break;
1122 	case 6: asm("movq %0, %%mm6" : : "m"(*data)); break;
1123 	case 7: asm("movq %0, %%mm7" : : "m"(*data)); break;
1124 	default: BUG();
1125 	}
1126 }
1127 
1128 static int em_fninit(struct x86_emulate_ctxt *ctxt)
1129 {
1130 	if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
1131 		return emulate_nm(ctxt);
1132 
1133 	asm volatile("fninit");
1134 	return X86EMUL_CONTINUE;
1135 }
1136 
1137 static int em_fnstcw(struct x86_emulate_ctxt *ctxt)
1138 {
1139 	u16 fcw;
1140 
1141 	if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
1142 		return emulate_nm(ctxt);
1143 
1144 	asm volatile("fnstcw %0": "+m"(fcw));
1145 
1146 	ctxt->dst.val = fcw;
1147 
1148 	return X86EMUL_CONTINUE;
1149 }
1150 
1151 static int em_fnstsw(struct x86_emulate_ctxt *ctxt)
1152 {
1153 	u16 fsw;
1154 
1155 	if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
1156 		return emulate_nm(ctxt);
1157 
1158 	asm volatile("fnstsw %0": "+m"(fsw));
1159 
1160 	ctxt->dst.val = fsw;
1161 
1162 	return X86EMUL_CONTINUE;
1163 }
1164 
1165 static void decode_register_operand(struct x86_emulate_ctxt *ctxt,
1166 				    struct operand *op)
1167 {
1168 	unsigned reg = ctxt->modrm_reg;
1169 
1170 	if (!(ctxt->d & ModRM))
1171 		reg = (ctxt->b & 7) | ((ctxt->rex_prefix & 1) << 3);
1172 
1173 	if (ctxt->d & Sse) {
1174 		op->type = OP_XMM;
1175 		op->bytes = 16;
1176 		op->addr.xmm = reg;
1177 		read_sse_reg(ctxt, &op->vec_val, reg);
1178 		return;
1179 	}
1180 	if (ctxt->d & Mmx) {
1181 		reg &= 7;
1182 		op->type = OP_MM;
1183 		op->bytes = 8;
1184 		op->addr.mm = reg;
1185 		return;
1186 	}
1187 
1188 	op->type = OP_REG;
1189 	op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
1190 	op->addr.reg = decode_register(ctxt, reg, ctxt->d & ByteOp);
1191 
1192 	fetch_register_operand(op);
1193 	op->orig_val = op->val;
1194 }
1195 
1196 static void adjust_modrm_seg(struct x86_emulate_ctxt *ctxt, int base_reg)
1197 {
1198 	if (base_reg == VCPU_REGS_RSP || base_reg == VCPU_REGS_RBP)
1199 		ctxt->modrm_seg = VCPU_SREG_SS;
1200 }
1201 
1202 static int decode_modrm(struct x86_emulate_ctxt *ctxt,
1203 			struct operand *op)
1204 {
1205 	u8 sib;
1206 	int index_reg, base_reg, scale;
1207 	int rc = X86EMUL_CONTINUE;
1208 	ulong modrm_ea = 0;
1209 
1210 	ctxt->modrm_reg = ((ctxt->rex_prefix << 1) & 8); /* REX.R */
1211 	index_reg = (ctxt->rex_prefix << 2) & 8; /* REX.X */
1212 	base_reg = (ctxt->rex_prefix << 3) & 8; /* REX.B */
1213 
1214 	ctxt->modrm_mod = (ctxt->modrm & 0xc0) >> 6;
1215 	ctxt->modrm_reg |= (ctxt->modrm & 0x38) >> 3;
1216 	ctxt->modrm_rm = base_reg | (ctxt->modrm & 0x07);
1217 	ctxt->modrm_seg = VCPU_SREG_DS;
1218 
1219 	if (ctxt->modrm_mod == 3 || (ctxt->d & NoMod)) {
1220 		op->type = OP_REG;
1221 		op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
1222 		op->addr.reg = decode_register(ctxt, ctxt->modrm_rm,
1223 				ctxt->d & ByteOp);
1224 		if (ctxt->d & Sse) {
1225 			op->type = OP_XMM;
1226 			op->bytes = 16;
1227 			op->addr.xmm = ctxt->modrm_rm;
1228 			read_sse_reg(ctxt, &op->vec_val, ctxt->modrm_rm);
1229 			return rc;
1230 		}
1231 		if (ctxt->d & Mmx) {
1232 			op->type = OP_MM;
1233 			op->bytes = 8;
1234 			op->addr.mm = ctxt->modrm_rm & 7;
1235 			return rc;
1236 		}
1237 		fetch_register_operand(op);
1238 		return rc;
1239 	}
1240 
1241 	op->type = OP_MEM;
1242 
1243 	if (ctxt->ad_bytes == 2) {
1244 		unsigned bx = reg_read(ctxt, VCPU_REGS_RBX);
1245 		unsigned bp = reg_read(ctxt, VCPU_REGS_RBP);
1246 		unsigned si = reg_read(ctxt, VCPU_REGS_RSI);
1247 		unsigned di = reg_read(ctxt, VCPU_REGS_RDI);
1248 
1249 		/* 16-bit ModR/M decode. */
1250 		switch (ctxt->modrm_mod) {
1251 		case 0:
1252 			if (ctxt->modrm_rm == 6)
1253 				modrm_ea += insn_fetch(u16, ctxt);
1254 			break;
1255 		case 1:
1256 			modrm_ea += insn_fetch(s8, ctxt);
1257 			break;
1258 		case 2:
1259 			modrm_ea += insn_fetch(u16, ctxt);
1260 			break;
1261 		}
1262 		switch (ctxt->modrm_rm) {
1263 		case 0:
1264 			modrm_ea += bx + si;
1265 			break;
1266 		case 1:
1267 			modrm_ea += bx + di;
1268 			break;
1269 		case 2:
1270 			modrm_ea += bp + si;
1271 			break;
1272 		case 3:
1273 			modrm_ea += bp + di;
1274 			break;
1275 		case 4:
1276 			modrm_ea += si;
1277 			break;
1278 		case 5:
1279 			modrm_ea += di;
1280 			break;
1281 		case 6:
1282 			if (ctxt->modrm_mod != 0)
1283 				modrm_ea += bp;
1284 			break;
1285 		case 7:
1286 			modrm_ea += bx;
1287 			break;
1288 		}
1289 		if (ctxt->modrm_rm == 2 || ctxt->modrm_rm == 3 ||
1290 		    (ctxt->modrm_rm == 6 && ctxt->modrm_mod != 0))
1291 			ctxt->modrm_seg = VCPU_SREG_SS;
1292 		modrm_ea = (u16)modrm_ea;
1293 	} else {
1294 		/* 32/64-bit ModR/M decode. */
1295 		if ((ctxt->modrm_rm & 7) == 4) {
1296 			sib = insn_fetch(u8, ctxt);
1297 			index_reg |= (sib >> 3) & 7;
1298 			base_reg |= sib & 7;
1299 			scale = sib >> 6;
1300 
1301 			if ((base_reg & 7) == 5 && ctxt->modrm_mod == 0)
1302 				modrm_ea += insn_fetch(s32, ctxt);
1303 			else {
1304 				modrm_ea += reg_read(ctxt, base_reg);
1305 				adjust_modrm_seg(ctxt, base_reg);
1306 				/* Increment ESP on POP [ESP] */
1307 				if ((ctxt->d & IncSP) &&
1308 				    base_reg == VCPU_REGS_RSP)
1309 					modrm_ea += ctxt->op_bytes;
1310 			}
1311 			if (index_reg != 4)
1312 				modrm_ea += reg_read(ctxt, index_reg) << scale;
1313 		} else if ((ctxt->modrm_rm & 7) == 5 && ctxt->modrm_mod == 0) {
1314 			modrm_ea += insn_fetch(s32, ctxt);
1315 			if (ctxt->mode == X86EMUL_MODE_PROT64)
1316 				ctxt->rip_relative = 1;
1317 		} else {
1318 			base_reg = ctxt->modrm_rm;
1319 			modrm_ea += reg_read(ctxt, base_reg);
1320 			adjust_modrm_seg(ctxt, base_reg);
1321 		}
1322 		switch (ctxt->modrm_mod) {
1323 		case 1:
1324 			modrm_ea += insn_fetch(s8, ctxt);
1325 			break;
1326 		case 2:
1327 			modrm_ea += insn_fetch(s32, ctxt);
1328 			break;
1329 		}
1330 	}
1331 	op->addr.mem.ea = modrm_ea;
1332 	if (ctxt->ad_bytes != 8)
1333 		ctxt->memop.addr.mem.ea = (u32)ctxt->memop.addr.mem.ea;
1334 
1335 done:
1336 	return rc;
1337 }
1338 
1339 static int decode_abs(struct x86_emulate_ctxt *ctxt,
1340 		      struct operand *op)
1341 {
1342 	int rc = X86EMUL_CONTINUE;
1343 
1344 	op->type = OP_MEM;
1345 	switch (ctxt->ad_bytes) {
1346 	case 2:
1347 		op->addr.mem.ea = insn_fetch(u16, ctxt);
1348 		break;
1349 	case 4:
1350 		op->addr.mem.ea = insn_fetch(u32, ctxt);
1351 		break;
1352 	case 8:
1353 		op->addr.mem.ea = insn_fetch(u64, ctxt);
1354 		break;
1355 	}
1356 done:
1357 	return rc;
1358 }
1359 
1360 static void fetch_bit_operand(struct x86_emulate_ctxt *ctxt)
1361 {
1362 	long sv = 0, mask;
1363 
1364 	if (ctxt->dst.type == OP_MEM && ctxt->src.type == OP_REG) {
1365 		mask = ~((long)ctxt->dst.bytes * 8 - 1);
1366 
1367 		if (ctxt->src.bytes == 2)
1368 			sv = (s16)ctxt->src.val & (s16)mask;
1369 		else if (ctxt->src.bytes == 4)
1370 			sv = (s32)ctxt->src.val & (s32)mask;
1371 		else
1372 			sv = (s64)ctxt->src.val & (s64)mask;
1373 
1374 		ctxt->dst.addr.mem.ea = address_mask(ctxt,
1375 					   ctxt->dst.addr.mem.ea + (sv >> 3));
1376 	}
1377 
1378 	/* only subword offset */
1379 	ctxt->src.val &= (ctxt->dst.bytes << 3) - 1;
1380 }
1381 
1382 static int read_emulated(struct x86_emulate_ctxt *ctxt,
1383 			 unsigned long addr, void *dest, unsigned size)
1384 {
1385 	int rc;
1386 	struct read_cache *mc = &ctxt->mem_read;
1387 
1388 	if (mc->pos < mc->end)
1389 		goto read_cached;
1390 
1391 	WARN_ON((mc->end + size) >= sizeof(mc->data));
1392 
1393 	rc = ctxt->ops->read_emulated(ctxt, addr, mc->data + mc->end, size,
1394 				      &ctxt->exception);
1395 	if (rc != X86EMUL_CONTINUE)
1396 		return rc;
1397 
1398 	mc->end += size;
1399 
1400 read_cached:
1401 	memcpy(dest, mc->data + mc->pos, size);
1402 	mc->pos += size;
1403 	return X86EMUL_CONTINUE;
1404 }
1405 
1406 static int segmented_read(struct x86_emulate_ctxt *ctxt,
1407 			  struct segmented_address addr,
1408 			  void *data,
1409 			  unsigned size)
1410 {
1411 	int rc;
1412 	ulong linear;
1413 
1414 	rc = linearize(ctxt, addr, size, false, &linear);
1415 	if (rc != X86EMUL_CONTINUE)
1416 		return rc;
1417 	return read_emulated(ctxt, linear, data, size);
1418 }
1419 
1420 static int segmented_write(struct x86_emulate_ctxt *ctxt,
1421 			   struct segmented_address addr,
1422 			   const void *data,
1423 			   unsigned size)
1424 {
1425 	int rc;
1426 	ulong linear;
1427 
1428 	rc = linearize(ctxt, addr, size, true, &linear);
1429 	if (rc != X86EMUL_CONTINUE)
1430 		return rc;
1431 	return ctxt->ops->write_emulated(ctxt, linear, data, size,
1432 					 &ctxt->exception);
1433 }
1434 
1435 static int segmented_cmpxchg(struct x86_emulate_ctxt *ctxt,
1436 			     struct segmented_address addr,
1437 			     const void *orig_data, const void *data,
1438 			     unsigned size)
1439 {
1440 	int rc;
1441 	ulong linear;
1442 
1443 	rc = linearize(ctxt, addr, size, true, &linear);
1444 	if (rc != X86EMUL_CONTINUE)
1445 		return rc;
1446 	return ctxt->ops->cmpxchg_emulated(ctxt, linear, orig_data, data,
1447 					   size, &ctxt->exception);
1448 }
1449 
1450 static int pio_in_emulated(struct x86_emulate_ctxt *ctxt,
1451 			   unsigned int size, unsigned short port,
1452 			   void *dest)
1453 {
1454 	struct read_cache *rc = &ctxt->io_read;
1455 
1456 	if (rc->pos == rc->end) { /* refill pio read ahead */
1457 		unsigned int in_page, n;
1458 		unsigned int count = ctxt->rep_prefix ?
1459 			address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) : 1;
1460 		in_page = (ctxt->eflags & X86_EFLAGS_DF) ?
1461 			offset_in_page(reg_read(ctxt, VCPU_REGS_RDI)) :
1462 			PAGE_SIZE - offset_in_page(reg_read(ctxt, VCPU_REGS_RDI));
1463 		n = min3(in_page, (unsigned int)sizeof(rc->data) / size, count);
1464 		if (n == 0)
1465 			n = 1;
1466 		rc->pos = rc->end = 0;
1467 		if (!ctxt->ops->pio_in_emulated(ctxt, size, port, rc->data, n))
1468 			return 0;
1469 		rc->end = n * size;
1470 	}
1471 
1472 	if (ctxt->rep_prefix && (ctxt->d & String) &&
1473 	    !(ctxt->eflags & X86_EFLAGS_DF)) {
1474 		ctxt->dst.data = rc->data + rc->pos;
1475 		ctxt->dst.type = OP_MEM_STR;
1476 		ctxt->dst.count = (rc->end - rc->pos) / size;
1477 		rc->pos = rc->end;
1478 	} else {
1479 		memcpy(dest, rc->data + rc->pos, size);
1480 		rc->pos += size;
1481 	}
1482 	return 1;
1483 }
1484 
1485 static int read_interrupt_descriptor(struct x86_emulate_ctxt *ctxt,
1486 				     u16 index, struct desc_struct *desc)
1487 {
1488 	struct desc_ptr dt;
1489 	ulong addr;
1490 
1491 	ctxt->ops->get_idt(ctxt, &dt);
1492 
1493 	if (dt.size < index * 8 + 7)
1494 		return emulate_gp(ctxt, index << 3 | 0x2);
1495 
1496 	addr = dt.address + index * 8;
1497 	return ctxt->ops->read_std(ctxt, addr, desc, sizeof *desc,
1498 				   &ctxt->exception);
1499 }
1500 
1501 static void get_descriptor_table_ptr(struct x86_emulate_ctxt *ctxt,
1502 				     u16 selector, struct desc_ptr *dt)
1503 {
1504 	const struct x86_emulate_ops *ops = ctxt->ops;
1505 	u32 base3 = 0;
1506 
1507 	if (selector & 1 << 2) {
1508 		struct desc_struct desc;
1509 		u16 sel;
1510 
1511 		memset (dt, 0, sizeof *dt);
1512 		if (!ops->get_segment(ctxt, &sel, &desc, &base3,
1513 				      VCPU_SREG_LDTR))
1514 			return;
1515 
1516 		dt->size = desc_limit_scaled(&desc); /* what if limit > 65535? */
1517 		dt->address = get_desc_base(&desc) | ((u64)base3 << 32);
1518 	} else
1519 		ops->get_gdt(ctxt, dt);
1520 }
1521 
1522 static int get_descriptor_ptr(struct x86_emulate_ctxt *ctxt,
1523 			      u16 selector, ulong *desc_addr_p)
1524 {
1525 	struct desc_ptr dt;
1526 	u16 index = selector >> 3;
1527 	ulong addr;
1528 
1529 	get_descriptor_table_ptr(ctxt, selector, &dt);
1530 
1531 	if (dt.size < index * 8 + 7)
1532 		return emulate_gp(ctxt, selector & 0xfffc);
1533 
1534 	addr = dt.address + index * 8;
1535 
1536 #ifdef CONFIG_X86_64
1537 	if (addr >> 32 != 0) {
1538 		u64 efer = 0;
1539 
1540 		ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
1541 		if (!(efer & EFER_LMA))
1542 			addr &= (u32)-1;
1543 	}
1544 #endif
1545 
1546 	*desc_addr_p = addr;
1547 	return X86EMUL_CONTINUE;
1548 }
1549 
1550 /* allowed just for 8 bytes segments */
1551 static int read_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1552 				   u16 selector, struct desc_struct *desc,
1553 				   ulong *desc_addr_p)
1554 {
1555 	int rc;
1556 
1557 	rc = get_descriptor_ptr(ctxt, selector, desc_addr_p);
1558 	if (rc != X86EMUL_CONTINUE)
1559 		return rc;
1560 
1561 	return ctxt->ops->read_std(ctxt, *desc_addr_p, desc, sizeof(*desc),
1562 				   &ctxt->exception);
1563 }
1564 
1565 /* allowed just for 8 bytes segments */
1566 static int write_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1567 				    u16 selector, struct desc_struct *desc)
1568 {
1569 	int rc;
1570 	ulong addr;
1571 
1572 	rc = get_descriptor_ptr(ctxt, selector, &addr);
1573 	if (rc != X86EMUL_CONTINUE)
1574 		return rc;
1575 
1576 	return ctxt->ops->write_std(ctxt, addr, desc, sizeof *desc,
1577 				    &ctxt->exception);
1578 }
1579 
1580 static int __load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1581 				     u16 selector, int seg, u8 cpl,
1582 				     enum x86_transfer_type transfer,
1583 				     struct desc_struct *desc)
1584 {
1585 	struct desc_struct seg_desc, old_desc;
1586 	u8 dpl, rpl;
1587 	unsigned err_vec = GP_VECTOR;
1588 	u32 err_code = 0;
1589 	bool null_selector = !(selector & ~0x3); /* 0000-0003 are null */
1590 	ulong desc_addr;
1591 	int ret;
1592 	u16 dummy;
1593 	u32 base3 = 0;
1594 
1595 	memset(&seg_desc, 0, sizeof seg_desc);
1596 
1597 	if (ctxt->mode == X86EMUL_MODE_REAL) {
1598 		/* set real mode segment descriptor (keep limit etc. for
1599 		 * unreal mode) */
1600 		ctxt->ops->get_segment(ctxt, &dummy, &seg_desc, NULL, seg);
1601 		set_desc_base(&seg_desc, selector << 4);
1602 		goto load;
1603 	} else if (seg <= VCPU_SREG_GS && ctxt->mode == X86EMUL_MODE_VM86) {
1604 		/* VM86 needs a clean new segment descriptor */
1605 		set_desc_base(&seg_desc, selector << 4);
1606 		set_desc_limit(&seg_desc, 0xffff);
1607 		seg_desc.type = 3;
1608 		seg_desc.p = 1;
1609 		seg_desc.s = 1;
1610 		seg_desc.dpl = 3;
1611 		goto load;
1612 	}
1613 
1614 	rpl = selector & 3;
1615 
1616 	/* TR should be in GDT only */
1617 	if (seg == VCPU_SREG_TR && (selector & (1 << 2)))
1618 		goto exception;
1619 
1620 	/* NULL selector is not valid for TR, CS and (except for long mode) SS */
1621 	if (null_selector) {
1622 		if (seg == VCPU_SREG_CS || seg == VCPU_SREG_TR)
1623 			goto exception;
1624 
1625 		if (seg == VCPU_SREG_SS) {
1626 			if (ctxt->mode != X86EMUL_MODE_PROT64 || rpl != cpl)
1627 				goto exception;
1628 
1629 			/*
1630 			 * ctxt->ops->set_segment expects the CPL to be in
1631 			 * SS.DPL, so fake an expand-up 32-bit data segment.
1632 			 */
1633 			seg_desc.type = 3;
1634 			seg_desc.p = 1;
1635 			seg_desc.s = 1;
1636 			seg_desc.dpl = cpl;
1637 			seg_desc.d = 1;
1638 			seg_desc.g = 1;
1639 		}
1640 
1641 		/* Skip all following checks */
1642 		goto load;
1643 	}
1644 
1645 	ret = read_segment_descriptor(ctxt, selector, &seg_desc, &desc_addr);
1646 	if (ret != X86EMUL_CONTINUE)
1647 		return ret;
1648 
1649 	err_code = selector & 0xfffc;
1650 	err_vec = (transfer == X86_TRANSFER_TASK_SWITCH) ? TS_VECTOR :
1651 							   GP_VECTOR;
1652 
1653 	/* can't load system descriptor into segment selector */
1654 	if (seg <= VCPU_SREG_GS && !seg_desc.s) {
1655 		if (transfer == X86_TRANSFER_CALL_JMP)
1656 			return X86EMUL_UNHANDLEABLE;
1657 		goto exception;
1658 	}
1659 
1660 	if (!seg_desc.p) {
1661 		err_vec = (seg == VCPU_SREG_SS) ? SS_VECTOR : NP_VECTOR;
1662 		goto exception;
1663 	}
1664 
1665 	dpl = seg_desc.dpl;
1666 
1667 	switch (seg) {
1668 	case VCPU_SREG_SS:
1669 		/*
1670 		 * segment is not a writable data segment or segment
1671 		 * selector's RPL != CPL or segment selector's RPL != CPL
1672 		 */
1673 		if (rpl != cpl || (seg_desc.type & 0xa) != 0x2 || dpl != cpl)
1674 			goto exception;
1675 		break;
1676 	case VCPU_SREG_CS:
1677 		if (!(seg_desc.type & 8))
1678 			goto exception;
1679 
1680 		if (seg_desc.type & 4) {
1681 			/* conforming */
1682 			if (dpl > cpl)
1683 				goto exception;
1684 		} else {
1685 			/* nonconforming */
1686 			if (rpl > cpl || dpl != cpl)
1687 				goto exception;
1688 		}
1689 		/* in long-mode d/b must be clear if l is set */
1690 		if (seg_desc.d && seg_desc.l) {
1691 			u64 efer = 0;
1692 
1693 			ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
1694 			if (efer & EFER_LMA)
1695 				goto exception;
1696 		}
1697 
1698 		/* CS(RPL) <- CPL */
1699 		selector = (selector & 0xfffc) | cpl;
1700 		break;
1701 	case VCPU_SREG_TR:
1702 		if (seg_desc.s || (seg_desc.type != 1 && seg_desc.type != 9))
1703 			goto exception;
1704 		old_desc = seg_desc;
1705 		seg_desc.type |= 2; /* busy */
1706 		ret = ctxt->ops->cmpxchg_emulated(ctxt, desc_addr, &old_desc, &seg_desc,
1707 						  sizeof(seg_desc), &ctxt->exception);
1708 		if (ret != X86EMUL_CONTINUE)
1709 			return ret;
1710 		break;
1711 	case VCPU_SREG_LDTR:
1712 		if (seg_desc.s || seg_desc.type != 2)
1713 			goto exception;
1714 		break;
1715 	default: /*  DS, ES, FS, or GS */
1716 		/*
1717 		 * segment is not a data or readable code segment or
1718 		 * ((segment is a data or nonconforming code segment)
1719 		 * and (both RPL and CPL > DPL))
1720 		 */
1721 		if ((seg_desc.type & 0xa) == 0x8 ||
1722 		    (((seg_desc.type & 0xc) != 0xc) &&
1723 		     (rpl > dpl && cpl > dpl)))
1724 			goto exception;
1725 		break;
1726 	}
1727 
1728 	if (seg_desc.s) {
1729 		/* mark segment as accessed */
1730 		if (!(seg_desc.type & 1)) {
1731 			seg_desc.type |= 1;
1732 			ret = write_segment_descriptor(ctxt, selector,
1733 						       &seg_desc);
1734 			if (ret != X86EMUL_CONTINUE)
1735 				return ret;
1736 		}
1737 	} else if (ctxt->mode == X86EMUL_MODE_PROT64) {
1738 		ret = ctxt->ops->read_std(ctxt, desc_addr+8, &base3,
1739 				sizeof(base3), &ctxt->exception);
1740 		if (ret != X86EMUL_CONTINUE)
1741 			return ret;
1742 		if (emul_is_noncanonical_address(get_desc_base(&seg_desc) |
1743 				((u64)base3 << 32), ctxt))
1744 			return emulate_gp(ctxt, 0);
1745 	}
1746 load:
1747 	ctxt->ops->set_segment(ctxt, selector, &seg_desc, base3, seg);
1748 	if (desc)
1749 		*desc = seg_desc;
1750 	return X86EMUL_CONTINUE;
1751 exception:
1752 	return emulate_exception(ctxt, err_vec, err_code, true);
1753 }
1754 
1755 static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1756 				   u16 selector, int seg)
1757 {
1758 	u8 cpl = ctxt->ops->cpl(ctxt);
1759 
1760 	/*
1761 	 * None of MOV, POP and LSS can load a NULL selector in CPL=3, but
1762 	 * they can load it at CPL<3 (Intel's manual says only LSS can,
1763 	 * but it's wrong).
1764 	 *
1765 	 * However, the Intel manual says that putting IST=1/DPL=3 in
1766 	 * an interrupt gate will result in SS=3 (the AMD manual instead
1767 	 * says it doesn't), so allow SS=3 in __load_segment_descriptor
1768 	 * and only forbid it here.
1769 	 */
1770 	if (seg == VCPU_SREG_SS && selector == 3 &&
1771 	    ctxt->mode == X86EMUL_MODE_PROT64)
1772 		return emulate_exception(ctxt, GP_VECTOR, 0, true);
1773 
1774 	return __load_segment_descriptor(ctxt, selector, seg, cpl,
1775 					 X86_TRANSFER_NONE, NULL);
1776 }
1777 
1778 static void write_register_operand(struct operand *op)
1779 {
1780 	return assign_register(op->addr.reg, op->val, op->bytes);
1781 }
1782 
1783 static int writeback(struct x86_emulate_ctxt *ctxt, struct operand *op)
1784 {
1785 	switch (op->type) {
1786 	case OP_REG:
1787 		write_register_operand(op);
1788 		break;
1789 	case OP_MEM:
1790 		if (ctxt->lock_prefix)
1791 			return segmented_cmpxchg(ctxt,
1792 						 op->addr.mem,
1793 						 &op->orig_val,
1794 						 &op->val,
1795 						 op->bytes);
1796 		else
1797 			return segmented_write(ctxt,
1798 					       op->addr.mem,
1799 					       &op->val,
1800 					       op->bytes);
1801 		break;
1802 	case OP_MEM_STR:
1803 		return segmented_write(ctxt,
1804 				       op->addr.mem,
1805 				       op->data,
1806 				       op->bytes * op->count);
1807 		break;
1808 	case OP_XMM:
1809 		write_sse_reg(ctxt, &op->vec_val, op->addr.xmm);
1810 		break;
1811 	case OP_MM:
1812 		write_mmx_reg(ctxt, &op->mm_val, op->addr.mm);
1813 		break;
1814 	case OP_NONE:
1815 		/* no writeback */
1816 		break;
1817 	default:
1818 		break;
1819 	}
1820 	return X86EMUL_CONTINUE;
1821 }
1822 
1823 static int push(struct x86_emulate_ctxt *ctxt, void *data, int bytes)
1824 {
1825 	struct segmented_address addr;
1826 
1827 	rsp_increment(ctxt, -bytes);
1828 	addr.ea = reg_read(ctxt, VCPU_REGS_RSP) & stack_mask(ctxt);
1829 	addr.seg = VCPU_SREG_SS;
1830 
1831 	return segmented_write(ctxt, addr, data, bytes);
1832 }
1833 
1834 static int em_push(struct x86_emulate_ctxt *ctxt)
1835 {
1836 	/* Disable writeback. */
1837 	ctxt->dst.type = OP_NONE;
1838 	return push(ctxt, &ctxt->src.val, ctxt->op_bytes);
1839 }
1840 
1841 static int emulate_pop(struct x86_emulate_ctxt *ctxt,
1842 		       void *dest, int len)
1843 {
1844 	int rc;
1845 	struct segmented_address addr;
1846 
1847 	addr.ea = reg_read(ctxt, VCPU_REGS_RSP) & stack_mask(ctxt);
1848 	addr.seg = VCPU_SREG_SS;
1849 	rc = segmented_read(ctxt, addr, dest, len);
1850 	if (rc != X86EMUL_CONTINUE)
1851 		return rc;
1852 
1853 	rsp_increment(ctxt, len);
1854 	return rc;
1855 }
1856 
1857 static int em_pop(struct x86_emulate_ctxt *ctxt)
1858 {
1859 	return emulate_pop(ctxt, &ctxt->dst.val, ctxt->op_bytes);
1860 }
1861 
1862 static int emulate_popf(struct x86_emulate_ctxt *ctxt,
1863 			void *dest, int len)
1864 {
1865 	int rc;
1866 	unsigned long val, change_mask;
1867 	int iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> X86_EFLAGS_IOPL_BIT;
1868 	int cpl = ctxt->ops->cpl(ctxt);
1869 
1870 	rc = emulate_pop(ctxt, &val, len);
1871 	if (rc != X86EMUL_CONTINUE)
1872 		return rc;
1873 
1874 	change_mask = X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF |
1875 		      X86_EFLAGS_ZF | X86_EFLAGS_SF | X86_EFLAGS_OF |
1876 		      X86_EFLAGS_TF | X86_EFLAGS_DF | X86_EFLAGS_NT |
1877 		      X86_EFLAGS_AC | X86_EFLAGS_ID;
1878 
1879 	switch(ctxt->mode) {
1880 	case X86EMUL_MODE_PROT64:
1881 	case X86EMUL_MODE_PROT32:
1882 	case X86EMUL_MODE_PROT16:
1883 		if (cpl == 0)
1884 			change_mask |= X86_EFLAGS_IOPL;
1885 		if (cpl <= iopl)
1886 			change_mask |= X86_EFLAGS_IF;
1887 		break;
1888 	case X86EMUL_MODE_VM86:
1889 		if (iopl < 3)
1890 			return emulate_gp(ctxt, 0);
1891 		change_mask |= X86_EFLAGS_IF;
1892 		break;
1893 	default: /* real mode */
1894 		change_mask |= (X86_EFLAGS_IOPL | X86_EFLAGS_IF);
1895 		break;
1896 	}
1897 
1898 	*(unsigned long *)dest =
1899 		(ctxt->eflags & ~change_mask) | (val & change_mask);
1900 
1901 	return rc;
1902 }
1903 
1904 static int em_popf(struct x86_emulate_ctxt *ctxt)
1905 {
1906 	ctxt->dst.type = OP_REG;
1907 	ctxt->dst.addr.reg = &ctxt->eflags;
1908 	ctxt->dst.bytes = ctxt->op_bytes;
1909 	return emulate_popf(ctxt, &ctxt->dst.val, ctxt->op_bytes);
1910 }
1911 
1912 static int em_enter(struct x86_emulate_ctxt *ctxt)
1913 {
1914 	int rc;
1915 	unsigned frame_size = ctxt->src.val;
1916 	unsigned nesting_level = ctxt->src2.val & 31;
1917 	ulong rbp;
1918 
1919 	if (nesting_level)
1920 		return X86EMUL_UNHANDLEABLE;
1921 
1922 	rbp = reg_read(ctxt, VCPU_REGS_RBP);
1923 	rc = push(ctxt, &rbp, stack_size(ctxt));
1924 	if (rc != X86EMUL_CONTINUE)
1925 		return rc;
1926 	assign_masked(reg_rmw(ctxt, VCPU_REGS_RBP), reg_read(ctxt, VCPU_REGS_RSP),
1927 		      stack_mask(ctxt));
1928 	assign_masked(reg_rmw(ctxt, VCPU_REGS_RSP),
1929 		      reg_read(ctxt, VCPU_REGS_RSP) - frame_size,
1930 		      stack_mask(ctxt));
1931 	return X86EMUL_CONTINUE;
1932 }
1933 
1934 static int em_leave(struct x86_emulate_ctxt *ctxt)
1935 {
1936 	assign_masked(reg_rmw(ctxt, VCPU_REGS_RSP), reg_read(ctxt, VCPU_REGS_RBP),
1937 		      stack_mask(ctxt));
1938 	return emulate_pop(ctxt, reg_rmw(ctxt, VCPU_REGS_RBP), ctxt->op_bytes);
1939 }
1940 
1941 static int em_push_sreg(struct x86_emulate_ctxt *ctxt)
1942 {
1943 	int seg = ctxt->src2.val;
1944 
1945 	ctxt->src.val = get_segment_selector(ctxt, seg);
1946 	if (ctxt->op_bytes == 4) {
1947 		rsp_increment(ctxt, -2);
1948 		ctxt->op_bytes = 2;
1949 	}
1950 
1951 	return em_push(ctxt);
1952 }
1953 
1954 static int em_pop_sreg(struct x86_emulate_ctxt *ctxt)
1955 {
1956 	int seg = ctxt->src2.val;
1957 	unsigned long selector;
1958 	int rc;
1959 
1960 	rc = emulate_pop(ctxt, &selector, 2);
1961 	if (rc != X86EMUL_CONTINUE)
1962 		return rc;
1963 
1964 	if (ctxt->modrm_reg == VCPU_SREG_SS)
1965 		ctxt->interruptibility = KVM_X86_SHADOW_INT_MOV_SS;
1966 	if (ctxt->op_bytes > 2)
1967 		rsp_increment(ctxt, ctxt->op_bytes - 2);
1968 
1969 	rc = load_segment_descriptor(ctxt, (u16)selector, seg);
1970 	return rc;
1971 }
1972 
1973 static int em_pusha(struct x86_emulate_ctxt *ctxt)
1974 {
1975 	unsigned long old_esp = reg_read(ctxt, VCPU_REGS_RSP);
1976 	int rc = X86EMUL_CONTINUE;
1977 	int reg = VCPU_REGS_RAX;
1978 
1979 	while (reg <= VCPU_REGS_RDI) {
1980 		(reg == VCPU_REGS_RSP) ?
1981 		(ctxt->src.val = old_esp) : (ctxt->src.val = reg_read(ctxt, reg));
1982 
1983 		rc = em_push(ctxt);
1984 		if (rc != X86EMUL_CONTINUE)
1985 			return rc;
1986 
1987 		++reg;
1988 	}
1989 
1990 	return rc;
1991 }
1992 
1993 static int em_pushf(struct x86_emulate_ctxt *ctxt)
1994 {
1995 	ctxt->src.val = (unsigned long)ctxt->eflags & ~X86_EFLAGS_VM;
1996 	return em_push(ctxt);
1997 }
1998 
1999 static int em_popa(struct x86_emulate_ctxt *ctxt)
2000 {
2001 	int rc = X86EMUL_CONTINUE;
2002 	int reg = VCPU_REGS_RDI;
2003 	u32 val;
2004 
2005 	while (reg >= VCPU_REGS_RAX) {
2006 		if (reg == VCPU_REGS_RSP) {
2007 			rsp_increment(ctxt, ctxt->op_bytes);
2008 			--reg;
2009 		}
2010 
2011 		rc = emulate_pop(ctxt, &val, ctxt->op_bytes);
2012 		if (rc != X86EMUL_CONTINUE)
2013 			break;
2014 		assign_register(reg_rmw(ctxt, reg), val, ctxt->op_bytes);
2015 		--reg;
2016 	}
2017 	return rc;
2018 }
2019 
2020 static int __emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq)
2021 {
2022 	const struct x86_emulate_ops *ops = ctxt->ops;
2023 	int rc;
2024 	struct desc_ptr dt;
2025 	gva_t cs_addr;
2026 	gva_t eip_addr;
2027 	u16 cs, eip;
2028 
2029 	/* TODO: Add limit checks */
2030 	ctxt->src.val = ctxt->eflags;
2031 	rc = em_push(ctxt);
2032 	if (rc != X86EMUL_CONTINUE)
2033 		return rc;
2034 
2035 	ctxt->eflags &= ~(X86_EFLAGS_IF | X86_EFLAGS_TF | X86_EFLAGS_AC);
2036 
2037 	ctxt->src.val = get_segment_selector(ctxt, VCPU_SREG_CS);
2038 	rc = em_push(ctxt);
2039 	if (rc != X86EMUL_CONTINUE)
2040 		return rc;
2041 
2042 	ctxt->src.val = ctxt->_eip;
2043 	rc = em_push(ctxt);
2044 	if (rc != X86EMUL_CONTINUE)
2045 		return rc;
2046 
2047 	ops->get_idt(ctxt, &dt);
2048 
2049 	eip_addr = dt.address + (irq << 2);
2050 	cs_addr = dt.address + (irq << 2) + 2;
2051 
2052 	rc = ops->read_std(ctxt, cs_addr, &cs, 2, &ctxt->exception);
2053 	if (rc != X86EMUL_CONTINUE)
2054 		return rc;
2055 
2056 	rc = ops->read_std(ctxt, eip_addr, &eip, 2, &ctxt->exception);
2057 	if (rc != X86EMUL_CONTINUE)
2058 		return rc;
2059 
2060 	rc = load_segment_descriptor(ctxt, cs, VCPU_SREG_CS);
2061 	if (rc != X86EMUL_CONTINUE)
2062 		return rc;
2063 
2064 	ctxt->_eip = eip;
2065 
2066 	return rc;
2067 }
2068 
2069 int emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq)
2070 {
2071 	int rc;
2072 
2073 	invalidate_registers(ctxt);
2074 	rc = __emulate_int_real(ctxt, irq);
2075 	if (rc == X86EMUL_CONTINUE)
2076 		writeback_registers(ctxt);
2077 	return rc;
2078 }
2079 
2080 static int emulate_int(struct x86_emulate_ctxt *ctxt, int irq)
2081 {
2082 	switch(ctxt->mode) {
2083 	case X86EMUL_MODE_REAL:
2084 		return __emulate_int_real(ctxt, irq);
2085 	case X86EMUL_MODE_VM86:
2086 	case X86EMUL_MODE_PROT16:
2087 	case X86EMUL_MODE_PROT32:
2088 	case X86EMUL_MODE_PROT64:
2089 	default:
2090 		/* Protected mode interrupts unimplemented yet */
2091 		return X86EMUL_UNHANDLEABLE;
2092 	}
2093 }
2094 
2095 static int emulate_iret_real(struct x86_emulate_ctxt *ctxt)
2096 {
2097 	int rc = X86EMUL_CONTINUE;
2098 	unsigned long temp_eip = 0;
2099 	unsigned long temp_eflags = 0;
2100 	unsigned long cs = 0;
2101 	unsigned long mask = X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF |
2102 			     X86_EFLAGS_ZF | X86_EFLAGS_SF | X86_EFLAGS_TF |
2103 			     X86_EFLAGS_IF | X86_EFLAGS_DF | X86_EFLAGS_OF |
2104 			     X86_EFLAGS_IOPL | X86_EFLAGS_NT | X86_EFLAGS_RF |
2105 			     X86_EFLAGS_AC | X86_EFLAGS_ID |
2106 			     X86_EFLAGS_FIXED;
2107 	unsigned long vm86_mask = X86_EFLAGS_VM | X86_EFLAGS_VIF |
2108 				  X86_EFLAGS_VIP;
2109 
2110 	/* TODO: Add stack limit check */
2111 
2112 	rc = emulate_pop(ctxt, &temp_eip, ctxt->op_bytes);
2113 
2114 	if (rc != X86EMUL_CONTINUE)
2115 		return rc;
2116 
2117 	if (temp_eip & ~0xffff)
2118 		return emulate_gp(ctxt, 0);
2119 
2120 	rc = emulate_pop(ctxt, &cs, ctxt->op_bytes);
2121 
2122 	if (rc != X86EMUL_CONTINUE)
2123 		return rc;
2124 
2125 	rc = emulate_pop(ctxt, &temp_eflags, ctxt->op_bytes);
2126 
2127 	if (rc != X86EMUL_CONTINUE)
2128 		return rc;
2129 
2130 	rc = load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS);
2131 
2132 	if (rc != X86EMUL_CONTINUE)
2133 		return rc;
2134 
2135 	ctxt->_eip = temp_eip;
2136 
2137 	if (ctxt->op_bytes == 4)
2138 		ctxt->eflags = ((temp_eflags & mask) | (ctxt->eflags & vm86_mask));
2139 	else if (ctxt->op_bytes == 2) {
2140 		ctxt->eflags &= ~0xffff;
2141 		ctxt->eflags |= temp_eflags;
2142 	}
2143 
2144 	ctxt->eflags &= ~EFLG_RESERVED_ZEROS_MASK; /* Clear reserved zeros */
2145 	ctxt->eflags |= X86_EFLAGS_FIXED;
2146 	ctxt->ops->set_nmi_mask(ctxt, false);
2147 
2148 	return rc;
2149 }
2150 
2151 static int em_iret(struct x86_emulate_ctxt *ctxt)
2152 {
2153 	switch(ctxt->mode) {
2154 	case X86EMUL_MODE_REAL:
2155 		return emulate_iret_real(ctxt);
2156 	case X86EMUL_MODE_VM86:
2157 	case X86EMUL_MODE_PROT16:
2158 	case X86EMUL_MODE_PROT32:
2159 	case X86EMUL_MODE_PROT64:
2160 	default:
2161 		/* iret from protected mode unimplemented yet */
2162 		return X86EMUL_UNHANDLEABLE;
2163 	}
2164 }
2165 
2166 static int em_jmp_far(struct x86_emulate_ctxt *ctxt)
2167 {
2168 	int rc;
2169 	unsigned short sel;
2170 	struct desc_struct new_desc;
2171 	u8 cpl = ctxt->ops->cpl(ctxt);
2172 
2173 	memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
2174 
2175 	rc = __load_segment_descriptor(ctxt, sel, VCPU_SREG_CS, cpl,
2176 				       X86_TRANSFER_CALL_JMP,
2177 				       &new_desc);
2178 	if (rc != X86EMUL_CONTINUE)
2179 		return rc;
2180 
2181 	rc = assign_eip_far(ctxt, ctxt->src.val, &new_desc);
2182 	/* Error handling is not implemented. */
2183 	if (rc != X86EMUL_CONTINUE)
2184 		return X86EMUL_UNHANDLEABLE;
2185 
2186 	return rc;
2187 }
2188 
2189 static int em_jmp_abs(struct x86_emulate_ctxt *ctxt)
2190 {
2191 	return assign_eip_near(ctxt, ctxt->src.val);
2192 }
2193 
2194 static int em_call_near_abs(struct x86_emulate_ctxt *ctxt)
2195 {
2196 	int rc;
2197 	long int old_eip;
2198 
2199 	old_eip = ctxt->_eip;
2200 	rc = assign_eip_near(ctxt, ctxt->src.val);
2201 	if (rc != X86EMUL_CONTINUE)
2202 		return rc;
2203 	ctxt->src.val = old_eip;
2204 	rc = em_push(ctxt);
2205 	return rc;
2206 }
2207 
2208 static int em_cmpxchg8b(struct x86_emulate_ctxt *ctxt)
2209 {
2210 	u64 old = ctxt->dst.orig_val64;
2211 
2212 	if (ctxt->dst.bytes == 16)
2213 		return X86EMUL_UNHANDLEABLE;
2214 
2215 	if (((u32) (old >> 0) != (u32) reg_read(ctxt, VCPU_REGS_RAX)) ||
2216 	    ((u32) (old >> 32) != (u32) reg_read(ctxt, VCPU_REGS_RDX))) {
2217 		*reg_write(ctxt, VCPU_REGS_RAX) = (u32) (old >> 0);
2218 		*reg_write(ctxt, VCPU_REGS_RDX) = (u32) (old >> 32);
2219 		ctxt->eflags &= ~X86_EFLAGS_ZF;
2220 	} else {
2221 		ctxt->dst.val64 = ((u64)reg_read(ctxt, VCPU_REGS_RCX) << 32) |
2222 			(u32) reg_read(ctxt, VCPU_REGS_RBX);
2223 
2224 		ctxt->eflags |= X86_EFLAGS_ZF;
2225 	}
2226 	return X86EMUL_CONTINUE;
2227 }
2228 
2229 static int em_ret(struct x86_emulate_ctxt *ctxt)
2230 {
2231 	int rc;
2232 	unsigned long eip;
2233 
2234 	rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
2235 	if (rc != X86EMUL_CONTINUE)
2236 		return rc;
2237 
2238 	return assign_eip_near(ctxt, eip);
2239 }
2240 
2241 static int em_ret_far(struct x86_emulate_ctxt *ctxt)
2242 {
2243 	int rc;
2244 	unsigned long eip, cs;
2245 	int cpl = ctxt->ops->cpl(ctxt);
2246 	struct desc_struct new_desc;
2247 
2248 	rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
2249 	if (rc != X86EMUL_CONTINUE)
2250 		return rc;
2251 	rc = emulate_pop(ctxt, &cs, ctxt->op_bytes);
2252 	if (rc != X86EMUL_CONTINUE)
2253 		return rc;
2254 	/* Outer-privilege level return is not implemented */
2255 	if (ctxt->mode >= X86EMUL_MODE_PROT16 && (cs & 3) > cpl)
2256 		return X86EMUL_UNHANDLEABLE;
2257 	rc = __load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS, cpl,
2258 				       X86_TRANSFER_RET,
2259 				       &new_desc);
2260 	if (rc != X86EMUL_CONTINUE)
2261 		return rc;
2262 	rc = assign_eip_far(ctxt, eip, &new_desc);
2263 	/* Error handling is not implemented. */
2264 	if (rc != X86EMUL_CONTINUE)
2265 		return X86EMUL_UNHANDLEABLE;
2266 
2267 	return rc;
2268 }
2269 
2270 static int em_ret_far_imm(struct x86_emulate_ctxt *ctxt)
2271 {
2272         int rc;
2273 
2274         rc = em_ret_far(ctxt);
2275         if (rc != X86EMUL_CONTINUE)
2276                 return rc;
2277         rsp_increment(ctxt, ctxt->src.val);
2278         return X86EMUL_CONTINUE;
2279 }
2280 
2281 static int em_cmpxchg(struct x86_emulate_ctxt *ctxt)
2282 {
2283 	/* Save real source value, then compare EAX against destination. */
2284 	ctxt->dst.orig_val = ctxt->dst.val;
2285 	ctxt->dst.val = reg_read(ctxt, VCPU_REGS_RAX);
2286 	ctxt->src.orig_val = ctxt->src.val;
2287 	ctxt->src.val = ctxt->dst.orig_val;
2288 	fastop(ctxt, em_cmp);
2289 
2290 	if (ctxt->eflags & X86_EFLAGS_ZF) {
2291 		/* Success: write back to memory; no update of EAX */
2292 		ctxt->src.type = OP_NONE;
2293 		ctxt->dst.val = ctxt->src.orig_val;
2294 	} else {
2295 		/* Failure: write the value we saw to EAX. */
2296 		ctxt->src.type = OP_REG;
2297 		ctxt->src.addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
2298 		ctxt->src.val = ctxt->dst.orig_val;
2299 		/* Create write-cycle to dest by writing the same value */
2300 		ctxt->dst.val = ctxt->dst.orig_val;
2301 	}
2302 	return X86EMUL_CONTINUE;
2303 }
2304 
2305 static int em_lseg(struct x86_emulate_ctxt *ctxt)
2306 {
2307 	int seg = ctxt->src2.val;
2308 	unsigned short sel;
2309 	int rc;
2310 
2311 	memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
2312 
2313 	rc = load_segment_descriptor(ctxt, sel, seg);
2314 	if (rc != X86EMUL_CONTINUE)
2315 		return rc;
2316 
2317 	ctxt->dst.val = ctxt->src.val;
2318 	return rc;
2319 }
2320 
2321 static int emulator_has_longmode(struct x86_emulate_ctxt *ctxt)
2322 {
2323 	u32 eax, ebx, ecx, edx;
2324 
2325 	eax = 0x80000001;
2326 	ecx = 0;
2327 	ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx, false);
2328 	return edx & bit(X86_FEATURE_LM);
2329 }
2330 
2331 #define GET_SMSTATE(type, smbase, offset)				  \
2332 	({								  \
2333 	 type __val;							  \
2334 	 int r = ctxt->ops->read_phys(ctxt, smbase + offset, &__val,      \
2335 				      sizeof(__val));			  \
2336 	 if (r != X86EMUL_CONTINUE)					  \
2337 		 return X86EMUL_UNHANDLEABLE;				  \
2338 	 __val;								  \
2339 	})
2340 
2341 static void rsm_set_desc_flags(struct desc_struct *desc, u32 flags)
2342 {
2343 	desc->g    = (flags >> 23) & 1;
2344 	desc->d    = (flags >> 22) & 1;
2345 	desc->l    = (flags >> 21) & 1;
2346 	desc->avl  = (flags >> 20) & 1;
2347 	desc->p    = (flags >> 15) & 1;
2348 	desc->dpl  = (flags >> 13) & 3;
2349 	desc->s    = (flags >> 12) & 1;
2350 	desc->type = (flags >>  8) & 15;
2351 }
2352 
2353 static int rsm_load_seg_32(struct x86_emulate_ctxt *ctxt, u64 smbase, int n)
2354 {
2355 	struct desc_struct desc;
2356 	int offset;
2357 	u16 selector;
2358 
2359 	selector = GET_SMSTATE(u32, smbase, 0x7fa8 + n * 4);
2360 
2361 	if (n < 3)
2362 		offset = 0x7f84 + n * 12;
2363 	else
2364 		offset = 0x7f2c + (n - 3) * 12;
2365 
2366 	set_desc_base(&desc,      GET_SMSTATE(u32, smbase, offset + 8));
2367 	set_desc_limit(&desc,     GET_SMSTATE(u32, smbase, offset + 4));
2368 	rsm_set_desc_flags(&desc, GET_SMSTATE(u32, smbase, offset));
2369 	ctxt->ops->set_segment(ctxt, selector, &desc, 0, n);
2370 	return X86EMUL_CONTINUE;
2371 }
2372 
2373 static int rsm_load_seg_64(struct x86_emulate_ctxt *ctxt, u64 smbase, int n)
2374 {
2375 	struct desc_struct desc;
2376 	int offset;
2377 	u16 selector;
2378 	u32 base3;
2379 
2380 	offset = 0x7e00 + n * 16;
2381 
2382 	selector =                GET_SMSTATE(u16, smbase, offset);
2383 	rsm_set_desc_flags(&desc, GET_SMSTATE(u16, smbase, offset + 2) << 8);
2384 	set_desc_limit(&desc,     GET_SMSTATE(u32, smbase, offset + 4));
2385 	set_desc_base(&desc,      GET_SMSTATE(u32, smbase, offset + 8));
2386 	base3 =                   GET_SMSTATE(u32, smbase, offset + 12);
2387 
2388 	ctxt->ops->set_segment(ctxt, selector, &desc, base3, n);
2389 	return X86EMUL_CONTINUE;
2390 }
2391 
2392 static int rsm_enter_protected_mode(struct x86_emulate_ctxt *ctxt,
2393 				    u64 cr0, u64 cr3, u64 cr4)
2394 {
2395 	int bad;
2396 	u64 pcid;
2397 
2398 	/* In order to later set CR4.PCIDE, CR3[11:0] must be zero.  */
2399 	pcid = 0;
2400 	if (cr4 & X86_CR4_PCIDE) {
2401 		pcid = cr3 & 0xfff;
2402 		cr3 &= ~0xfff;
2403 	}
2404 
2405 	bad = ctxt->ops->set_cr(ctxt, 3, cr3);
2406 	if (bad)
2407 		return X86EMUL_UNHANDLEABLE;
2408 
2409 	/*
2410 	 * First enable PAE, long mode needs it before CR0.PG = 1 is set.
2411 	 * Then enable protected mode.	However, PCID cannot be enabled
2412 	 * if EFER.LMA=0, so set it separately.
2413 	 */
2414 	bad = ctxt->ops->set_cr(ctxt, 4, cr4 & ~X86_CR4_PCIDE);
2415 	if (bad)
2416 		return X86EMUL_UNHANDLEABLE;
2417 
2418 	bad = ctxt->ops->set_cr(ctxt, 0, cr0);
2419 	if (bad)
2420 		return X86EMUL_UNHANDLEABLE;
2421 
2422 	if (cr4 & X86_CR4_PCIDE) {
2423 		bad = ctxt->ops->set_cr(ctxt, 4, cr4);
2424 		if (bad)
2425 			return X86EMUL_UNHANDLEABLE;
2426 		if (pcid) {
2427 			bad = ctxt->ops->set_cr(ctxt, 3, cr3 | pcid);
2428 			if (bad)
2429 				return X86EMUL_UNHANDLEABLE;
2430 		}
2431 
2432 	}
2433 
2434 	return X86EMUL_CONTINUE;
2435 }
2436 
2437 static int rsm_load_state_32(struct x86_emulate_ctxt *ctxt, u64 smbase)
2438 {
2439 	struct desc_struct desc;
2440 	struct desc_ptr dt;
2441 	u16 selector;
2442 	u32 val, cr0, cr3, cr4;
2443 	int i;
2444 
2445 	cr0 =                      GET_SMSTATE(u32, smbase, 0x7ffc);
2446 	cr3 =                      GET_SMSTATE(u32, smbase, 0x7ff8);
2447 	ctxt->eflags =             GET_SMSTATE(u32, smbase, 0x7ff4) | X86_EFLAGS_FIXED;
2448 	ctxt->_eip =               GET_SMSTATE(u32, smbase, 0x7ff0);
2449 
2450 	for (i = 0; i < 8; i++)
2451 		*reg_write(ctxt, i) = GET_SMSTATE(u32, smbase, 0x7fd0 + i * 4);
2452 
2453 	val = GET_SMSTATE(u32, smbase, 0x7fcc);
2454 	ctxt->ops->set_dr(ctxt, 6, (val & DR6_VOLATILE) | DR6_FIXED_1);
2455 	val = GET_SMSTATE(u32, smbase, 0x7fc8);
2456 	ctxt->ops->set_dr(ctxt, 7, (val & DR7_VOLATILE) | DR7_FIXED_1);
2457 
2458 	selector =                 GET_SMSTATE(u32, smbase, 0x7fc4);
2459 	set_desc_base(&desc,       GET_SMSTATE(u32, smbase, 0x7f64));
2460 	set_desc_limit(&desc,      GET_SMSTATE(u32, smbase, 0x7f60));
2461 	rsm_set_desc_flags(&desc,  GET_SMSTATE(u32, smbase, 0x7f5c));
2462 	ctxt->ops->set_segment(ctxt, selector, &desc, 0, VCPU_SREG_TR);
2463 
2464 	selector =                 GET_SMSTATE(u32, smbase, 0x7fc0);
2465 	set_desc_base(&desc,       GET_SMSTATE(u32, smbase, 0x7f80));
2466 	set_desc_limit(&desc,      GET_SMSTATE(u32, smbase, 0x7f7c));
2467 	rsm_set_desc_flags(&desc,  GET_SMSTATE(u32, smbase, 0x7f78));
2468 	ctxt->ops->set_segment(ctxt, selector, &desc, 0, VCPU_SREG_LDTR);
2469 
2470 	dt.address =               GET_SMSTATE(u32, smbase, 0x7f74);
2471 	dt.size =                  GET_SMSTATE(u32, smbase, 0x7f70);
2472 	ctxt->ops->set_gdt(ctxt, &dt);
2473 
2474 	dt.address =               GET_SMSTATE(u32, smbase, 0x7f58);
2475 	dt.size =                  GET_SMSTATE(u32, smbase, 0x7f54);
2476 	ctxt->ops->set_idt(ctxt, &dt);
2477 
2478 	for (i = 0; i < 6; i++) {
2479 		int r = rsm_load_seg_32(ctxt, smbase, i);
2480 		if (r != X86EMUL_CONTINUE)
2481 			return r;
2482 	}
2483 
2484 	cr4 = GET_SMSTATE(u32, smbase, 0x7f14);
2485 
2486 	ctxt->ops->set_smbase(ctxt, GET_SMSTATE(u32, smbase, 0x7ef8));
2487 
2488 	return rsm_enter_protected_mode(ctxt, cr0, cr3, cr4);
2489 }
2490 
2491 static int rsm_load_state_64(struct x86_emulate_ctxt *ctxt, u64 smbase)
2492 {
2493 	struct desc_struct desc;
2494 	struct desc_ptr dt;
2495 	u64 val, cr0, cr3, cr4;
2496 	u32 base3;
2497 	u16 selector;
2498 	int i, r;
2499 
2500 	for (i = 0; i < 16; i++)
2501 		*reg_write(ctxt, i) = GET_SMSTATE(u64, smbase, 0x7ff8 - i * 8);
2502 
2503 	ctxt->_eip   = GET_SMSTATE(u64, smbase, 0x7f78);
2504 	ctxt->eflags = GET_SMSTATE(u32, smbase, 0x7f70) | X86_EFLAGS_FIXED;
2505 
2506 	val = GET_SMSTATE(u32, smbase, 0x7f68);
2507 	ctxt->ops->set_dr(ctxt, 6, (val & DR6_VOLATILE) | DR6_FIXED_1);
2508 	val = GET_SMSTATE(u32, smbase, 0x7f60);
2509 	ctxt->ops->set_dr(ctxt, 7, (val & DR7_VOLATILE) | DR7_FIXED_1);
2510 
2511 	cr0 =                       GET_SMSTATE(u64, smbase, 0x7f58);
2512 	cr3 =                       GET_SMSTATE(u64, smbase, 0x7f50);
2513 	cr4 =                       GET_SMSTATE(u64, smbase, 0x7f48);
2514 	ctxt->ops->set_smbase(ctxt, GET_SMSTATE(u32, smbase, 0x7f00));
2515 	val =                       GET_SMSTATE(u64, smbase, 0x7ed0);
2516 	ctxt->ops->set_msr(ctxt, MSR_EFER, val & ~EFER_LMA);
2517 
2518 	selector =                  GET_SMSTATE(u32, smbase, 0x7e90);
2519 	rsm_set_desc_flags(&desc,   GET_SMSTATE(u32, smbase, 0x7e92) << 8);
2520 	set_desc_limit(&desc,       GET_SMSTATE(u32, smbase, 0x7e94));
2521 	set_desc_base(&desc,        GET_SMSTATE(u32, smbase, 0x7e98));
2522 	base3 =                     GET_SMSTATE(u32, smbase, 0x7e9c);
2523 	ctxt->ops->set_segment(ctxt, selector, &desc, base3, VCPU_SREG_TR);
2524 
2525 	dt.size =                   GET_SMSTATE(u32, smbase, 0x7e84);
2526 	dt.address =                GET_SMSTATE(u64, smbase, 0x7e88);
2527 	ctxt->ops->set_idt(ctxt, &dt);
2528 
2529 	selector =                  GET_SMSTATE(u32, smbase, 0x7e70);
2530 	rsm_set_desc_flags(&desc,   GET_SMSTATE(u32, smbase, 0x7e72) << 8);
2531 	set_desc_limit(&desc,       GET_SMSTATE(u32, smbase, 0x7e74));
2532 	set_desc_base(&desc,        GET_SMSTATE(u32, smbase, 0x7e78));
2533 	base3 =                     GET_SMSTATE(u32, smbase, 0x7e7c);
2534 	ctxt->ops->set_segment(ctxt, selector, &desc, base3, VCPU_SREG_LDTR);
2535 
2536 	dt.size =                   GET_SMSTATE(u32, smbase, 0x7e64);
2537 	dt.address =                GET_SMSTATE(u64, smbase, 0x7e68);
2538 	ctxt->ops->set_gdt(ctxt, &dt);
2539 
2540 	r = rsm_enter_protected_mode(ctxt, cr0, cr3, cr4);
2541 	if (r != X86EMUL_CONTINUE)
2542 		return r;
2543 
2544 	for (i = 0; i < 6; i++) {
2545 		r = rsm_load_seg_64(ctxt, smbase, i);
2546 		if (r != X86EMUL_CONTINUE)
2547 			return r;
2548 	}
2549 
2550 	return X86EMUL_CONTINUE;
2551 }
2552 
2553 static int em_rsm(struct x86_emulate_ctxt *ctxt)
2554 {
2555 	unsigned long cr0, cr4, efer;
2556 	u64 smbase;
2557 	int ret;
2558 
2559 	if ((ctxt->ops->get_hflags(ctxt) & X86EMUL_SMM_MASK) == 0)
2560 		return emulate_ud(ctxt);
2561 
2562 	/*
2563 	 * Get back to real mode, to prepare a safe state in which to load
2564 	 * CR0/CR3/CR4/EFER.  It's all a bit more complicated if the vCPU
2565 	 * supports long mode.
2566 	 */
2567 	cr4 = ctxt->ops->get_cr(ctxt, 4);
2568 	if (emulator_has_longmode(ctxt)) {
2569 		struct desc_struct cs_desc;
2570 
2571 		/* Zero CR4.PCIDE before CR0.PG.  */
2572 		if (cr4 & X86_CR4_PCIDE) {
2573 			ctxt->ops->set_cr(ctxt, 4, cr4 & ~X86_CR4_PCIDE);
2574 			cr4 &= ~X86_CR4_PCIDE;
2575 		}
2576 
2577 		/* A 32-bit code segment is required to clear EFER.LMA.  */
2578 		memset(&cs_desc, 0, sizeof(cs_desc));
2579 		cs_desc.type = 0xb;
2580 		cs_desc.s = cs_desc.g = cs_desc.p = 1;
2581 		ctxt->ops->set_segment(ctxt, 0, &cs_desc, 0, VCPU_SREG_CS);
2582 	}
2583 
2584 	/* For the 64-bit case, this will clear EFER.LMA.  */
2585 	cr0 = ctxt->ops->get_cr(ctxt, 0);
2586 	if (cr0 & X86_CR0_PE)
2587 		ctxt->ops->set_cr(ctxt, 0, cr0 & ~(X86_CR0_PG | X86_CR0_PE));
2588 
2589 	/* Now clear CR4.PAE (which must be done before clearing EFER.LME).  */
2590 	if (cr4 & X86_CR4_PAE)
2591 		ctxt->ops->set_cr(ctxt, 4, cr4 & ~X86_CR4_PAE);
2592 
2593 	/* And finally go back to 32-bit mode.  */
2594 	efer = 0;
2595 	ctxt->ops->set_msr(ctxt, MSR_EFER, efer);
2596 
2597 	smbase = ctxt->ops->get_smbase(ctxt);
2598 
2599 	/*
2600 	 * Give pre_leave_smm() a chance to make ISA-specific changes to the
2601 	 * vCPU state (e.g. enter guest mode) before loading state from the SMM
2602 	 * state-save area.
2603 	 */
2604 	if (ctxt->ops->pre_leave_smm(ctxt, smbase))
2605 		return X86EMUL_UNHANDLEABLE;
2606 
2607 	if (emulator_has_longmode(ctxt))
2608 		ret = rsm_load_state_64(ctxt, smbase + 0x8000);
2609 	else
2610 		ret = rsm_load_state_32(ctxt, smbase + 0x8000);
2611 
2612 	if (ret != X86EMUL_CONTINUE) {
2613 		/* FIXME: should triple fault */
2614 		return X86EMUL_UNHANDLEABLE;
2615 	}
2616 
2617 	if ((ctxt->ops->get_hflags(ctxt) & X86EMUL_SMM_INSIDE_NMI_MASK) == 0)
2618 		ctxt->ops->set_nmi_mask(ctxt, false);
2619 
2620 	ctxt->ops->set_hflags(ctxt, ctxt->ops->get_hflags(ctxt) &
2621 		~(X86EMUL_SMM_INSIDE_NMI_MASK | X86EMUL_SMM_MASK));
2622 	return X86EMUL_CONTINUE;
2623 }
2624 
2625 static void
2626 setup_syscalls_segments(struct x86_emulate_ctxt *ctxt,
2627 			struct desc_struct *cs, struct desc_struct *ss)
2628 {
2629 	cs->l = 0;		/* will be adjusted later */
2630 	set_desc_base(cs, 0);	/* flat segment */
2631 	cs->g = 1;		/* 4kb granularity */
2632 	set_desc_limit(cs, 0xfffff);	/* 4GB limit */
2633 	cs->type = 0x0b;	/* Read, Execute, Accessed */
2634 	cs->s = 1;
2635 	cs->dpl = 0;		/* will be adjusted later */
2636 	cs->p = 1;
2637 	cs->d = 1;
2638 	cs->avl = 0;
2639 
2640 	set_desc_base(ss, 0);	/* flat segment */
2641 	set_desc_limit(ss, 0xfffff);	/* 4GB limit */
2642 	ss->g = 1;		/* 4kb granularity */
2643 	ss->s = 1;
2644 	ss->type = 0x03;	/* Read/Write, Accessed */
2645 	ss->d = 1;		/* 32bit stack segment */
2646 	ss->dpl = 0;
2647 	ss->p = 1;
2648 	ss->l = 0;
2649 	ss->avl = 0;
2650 }
2651 
2652 static bool vendor_intel(struct x86_emulate_ctxt *ctxt)
2653 {
2654 	u32 eax, ebx, ecx, edx;
2655 
2656 	eax = ecx = 0;
2657 	ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx, false);
2658 	return ebx == X86EMUL_CPUID_VENDOR_GenuineIntel_ebx
2659 		&& ecx == X86EMUL_CPUID_VENDOR_GenuineIntel_ecx
2660 		&& edx == X86EMUL_CPUID_VENDOR_GenuineIntel_edx;
2661 }
2662 
2663 static bool em_syscall_is_enabled(struct x86_emulate_ctxt *ctxt)
2664 {
2665 	const struct x86_emulate_ops *ops = ctxt->ops;
2666 	u32 eax, ebx, ecx, edx;
2667 
2668 	/*
2669 	 * syscall should always be enabled in longmode - so only become
2670 	 * vendor specific (cpuid) if other modes are active...
2671 	 */
2672 	if (ctxt->mode == X86EMUL_MODE_PROT64)
2673 		return true;
2674 
2675 	eax = 0x00000000;
2676 	ecx = 0x00000000;
2677 	ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx, false);
2678 	/*
2679 	 * Intel ("GenuineIntel")
2680 	 * remark: Intel CPUs only support "syscall" in 64bit
2681 	 * longmode. Also an 64bit guest with a
2682 	 * 32bit compat-app running will #UD !! While this
2683 	 * behaviour can be fixed (by emulating) into AMD
2684 	 * response - CPUs of AMD can't behave like Intel.
2685 	 */
2686 	if (ebx == X86EMUL_CPUID_VENDOR_GenuineIntel_ebx &&
2687 	    ecx == X86EMUL_CPUID_VENDOR_GenuineIntel_ecx &&
2688 	    edx == X86EMUL_CPUID_VENDOR_GenuineIntel_edx)
2689 		return false;
2690 
2691 	/* AMD ("AuthenticAMD") */
2692 	if (ebx == X86EMUL_CPUID_VENDOR_AuthenticAMD_ebx &&
2693 	    ecx == X86EMUL_CPUID_VENDOR_AuthenticAMD_ecx &&
2694 	    edx == X86EMUL_CPUID_VENDOR_AuthenticAMD_edx)
2695 		return true;
2696 
2697 	/* AMD ("AMDisbetter!") */
2698 	if (ebx == X86EMUL_CPUID_VENDOR_AMDisbetterI_ebx &&
2699 	    ecx == X86EMUL_CPUID_VENDOR_AMDisbetterI_ecx &&
2700 	    edx == X86EMUL_CPUID_VENDOR_AMDisbetterI_edx)
2701 		return true;
2702 
2703 	/* default: (not Intel, not AMD), apply Intel's stricter rules... */
2704 	return false;
2705 }
2706 
2707 static int em_syscall(struct x86_emulate_ctxt *ctxt)
2708 {
2709 	const struct x86_emulate_ops *ops = ctxt->ops;
2710 	struct desc_struct cs, ss;
2711 	u64 msr_data;
2712 	u16 cs_sel, ss_sel;
2713 	u64 efer = 0;
2714 
2715 	/* syscall is not available in real mode */
2716 	if (ctxt->mode == X86EMUL_MODE_REAL ||
2717 	    ctxt->mode == X86EMUL_MODE_VM86)
2718 		return emulate_ud(ctxt);
2719 
2720 	if (!(em_syscall_is_enabled(ctxt)))
2721 		return emulate_ud(ctxt);
2722 
2723 	ops->get_msr(ctxt, MSR_EFER, &efer);
2724 	setup_syscalls_segments(ctxt, &cs, &ss);
2725 
2726 	if (!(efer & EFER_SCE))
2727 		return emulate_ud(ctxt);
2728 
2729 	ops->get_msr(ctxt, MSR_STAR, &msr_data);
2730 	msr_data >>= 32;
2731 	cs_sel = (u16)(msr_data & 0xfffc);
2732 	ss_sel = (u16)(msr_data + 8);
2733 
2734 	if (efer & EFER_LMA) {
2735 		cs.d = 0;
2736 		cs.l = 1;
2737 	}
2738 	ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
2739 	ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
2740 
2741 	*reg_write(ctxt, VCPU_REGS_RCX) = ctxt->_eip;
2742 	if (efer & EFER_LMA) {
2743 #ifdef CONFIG_X86_64
2744 		*reg_write(ctxt, VCPU_REGS_R11) = ctxt->eflags;
2745 
2746 		ops->get_msr(ctxt,
2747 			     ctxt->mode == X86EMUL_MODE_PROT64 ?
2748 			     MSR_LSTAR : MSR_CSTAR, &msr_data);
2749 		ctxt->_eip = msr_data;
2750 
2751 		ops->get_msr(ctxt, MSR_SYSCALL_MASK, &msr_data);
2752 		ctxt->eflags &= ~msr_data;
2753 		ctxt->eflags |= X86_EFLAGS_FIXED;
2754 #endif
2755 	} else {
2756 		/* legacy mode */
2757 		ops->get_msr(ctxt, MSR_STAR, &msr_data);
2758 		ctxt->_eip = (u32)msr_data;
2759 
2760 		ctxt->eflags &= ~(X86_EFLAGS_VM | X86_EFLAGS_IF);
2761 	}
2762 
2763 	ctxt->tf = (ctxt->eflags & X86_EFLAGS_TF) != 0;
2764 	return X86EMUL_CONTINUE;
2765 }
2766 
2767 static int em_sysenter(struct x86_emulate_ctxt *ctxt)
2768 {
2769 	const struct x86_emulate_ops *ops = ctxt->ops;
2770 	struct desc_struct cs, ss;
2771 	u64 msr_data;
2772 	u16 cs_sel, ss_sel;
2773 	u64 efer = 0;
2774 
2775 	ops->get_msr(ctxt, MSR_EFER, &efer);
2776 	/* inject #GP if in real mode */
2777 	if (ctxt->mode == X86EMUL_MODE_REAL)
2778 		return emulate_gp(ctxt, 0);
2779 
2780 	/*
2781 	 * Not recognized on AMD in compat mode (but is recognized in legacy
2782 	 * mode).
2783 	 */
2784 	if ((ctxt->mode != X86EMUL_MODE_PROT64) && (efer & EFER_LMA)
2785 	    && !vendor_intel(ctxt))
2786 		return emulate_ud(ctxt);
2787 
2788 	/* sysenter/sysexit have not been tested in 64bit mode. */
2789 	if (ctxt->mode == X86EMUL_MODE_PROT64)
2790 		return X86EMUL_UNHANDLEABLE;
2791 
2792 	setup_syscalls_segments(ctxt, &cs, &ss);
2793 
2794 	ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data);
2795 	if ((msr_data & 0xfffc) == 0x0)
2796 		return emulate_gp(ctxt, 0);
2797 
2798 	ctxt->eflags &= ~(X86_EFLAGS_VM | X86_EFLAGS_IF);
2799 	cs_sel = (u16)msr_data & ~SEGMENT_RPL_MASK;
2800 	ss_sel = cs_sel + 8;
2801 	if (efer & EFER_LMA) {
2802 		cs.d = 0;
2803 		cs.l = 1;
2804 	}
2805 
2806 	ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
2807 	ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
2808 
2809 	ops->get_msr(ctxt, MSR_IA32_SYSENTER_EIP, &msr_data);
2810 	ctxt->_eip = (efer & EFER_LMA) ? msr_data : (u32)msr_data;
2811 
2812 	ops->get_msr(ctxt, MSR_IA32_SYSENTER_ESP, &msr_data);
2813 	*reg_write(ctxt, VCPU_REGS_RSP) = (efer & EFER_LMA) ? msr_data :
2814 							      (u32)msr_data;
2815 
2816 	return X86EMUL_CONTINUE;
2817 }
2818 
2819 static int em_sysexit(struct x86_emulate_ctxt *ctxt)
2820 {
2821 	const struct x86_emulate_ops *ops = ctxt->ops;
2822 	struct desc_struct cs, ss;
2823 	u64 msr_data, rcx, rdx;
2824 	int usermode;
2825 	u16 cs_sel = 0, ss_sel = 0;
2826 
2827 	/* inject #GP if in real mode or Virtual 8086 mode */
2828 	if (ctxt->mode == X86EMUL_MODE_REAL ||
2829 	    ctxt->mode == X86EMUL_MODE_VM86)
2830 		return emulate_gp(ctxt, 0);
2831 
2832 	setup_syscalls_segments(ctxt, &cs, &ss);
2833 
2834 	if ((ctxt->rex_prefix & 0x8) != 0x0)
2835 		usermode = X86EMUL_MODE_PROT64;
2836 	else
2837 		usermode = X86EMUL_MODE_PROT32;
2838 
2839 	rcx = reg_read(ctxt, VCPU_REGS_RCX);
2840 	rdx = reg_read(ctxt, VCPU_REGS_RDX);
2841 
2842 	cs.dpl = 3;
2843 	ss.dpl = 3;
2844 	ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data);
2845 	switch (usermode) {
2846 	case X86EMUL_MODE_PROT32:
2847 		cs_sel = (u16)(msr_data + 16);
2848 		if ((msr_data & 0xfffc) == 0x0)
2849 			return emulate_gp(ctxt, 0);
2850 		ss_sel = (u16)(msr_data + 24);
2851 		rcx = (u32)rcx;
2852 		rdx = (u32)rdx;
2853 		break;
2854 	case X86EMUL_MODE_PROT64:
2855 		cs_sel = (u16)(msr_data + 32);
2856 		if (msr_data == 0x0)
2857 			return emulate_gp(ctxt, 0);
2858 		ss_sel = cs_sel + 8;
2859 		cs.d = 0;
2860 		cs.l = 1;
2861 		if (emul_is_noncanonical_address(rcx, ctxt) ||
2862 		    emul_is_noncanonical_address(rdx, ctxt))
2863 			return emulate_gp(ctxt, 0);
2864 		break;
2865 	}
2866 	cs_sel |= SEGMENT_RPL_MASK;
2867 	ss_sel |= SEGMENT_RPL_MASK;
2868 
2869 	ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
2870 	ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
2871 
2872 	ctxt->_eip = rdx;
2873 	*reg_write(ctxt, VCPU_REGS_RSP) = rcx;
2874 
2875 	return X86EMUL_CONTINUE;
2876 }
2877 
2878 static bool emulator_bad_iopl(struct x86_emulate_ctxt *ctxt)
2879 {
2880 	int iopl;
2881 	if (ctxt->mode == X86EMUL_MODE_REAL)
2882 		return false;
2883 	if (ctxt->mode == X86EMUL_MODE_VM86)
2884 		return true;
2885 	iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> X86_EFLAGS_IOPL_BIT;
2886 	return ctxt->ops->cpl(ctxt) > iopl;
2887 }
2888 
2889 static bool emulator_io_port_access_allowed(struct x86_emulate_ctxt *ctxt,
2890 					    u16 port, u16 len)
2891 {
2892 	const struct x86_emulate_ops *ops = ctxt->ops;
2893 	struct desc_struct tr_seg;
2894 	u32 base3;
2895 	int r;
2896 	u16 tr, io_bitmap_ptr, perm, bit_idx = port & 0x7;
2897 	unsigned mask = (1 << len) - 1;
2898 	unsigned long base;
2899 
2900 	ops->get_segment(ctxt, &tr, &tr_seg, &base3, VCPU_SREG_TR);
2901 	if (!tr_seg.p)
2902 		return false;
2903 	if (desc_limit_scaled(&tr_seg) < 103)
2904 		return false;
2905 	base = get_desc_base(&tr_seg);
2906 #ifdef CONFIG_X86_64
2907 	base |= ((u64)base3) << 32;
2908 #endif
2909 	r = ops->read_std(ctxt, base + 102, &io_bitmap_ptr, 2, NULL);
2910 	if (r != X86EMUL_CONTINUE)
2911 		return false;
2912 	if (io_bitmap_ptr + port/8 > desc_limit_scaled(&tr_seg))
2913 		return false;
2914 	r = ops->read_std(ctxt, base + io_bitmap_ptr + port/8, &perm, 2, NULL);
2915 	if (r != X86EMUL_CONTINUE)
2916 		return false;
2917 	if ((perm >> bit_idx) & mask)
2918 		return false;
2919 	return true;
2920 }
2921 
2922 static bool emulator_io_permited(struct x86_emulate_ctxt *ctxt,
2923 				 u16 port, u16 len)
2924 {
2925 	if (ctxt->perm_ok)
2926 		return true;
2927 
2928 	if (emulator_bad_iopl(ctxt))
2929 		if (!emulator_io_port_access_allowed(ctxt, port, len))
2930 			return false;
2931 
2932 	ctxt->perm_ok = true;
2933 
2934 	return true;
2935 }
2936 
2937 static void string_registers_quirk(struct x86_emulate_ctxt *ctxt)
2938 {
2939 	/*
2940 	 * Intel CPUs mask the counter and pointers in quite strange
2941 	 * manner when ECX is zero due to REP-string optimizations.
2942 	 */
2943 #ifdef CONFIG_X86_64
2944 	if (ctxt->ad_bytes != 4 || !vendor_intel(ctxt))
2945 		return;
2946 
2947 	*reg_write(ctxt, VCPU_REGS_RCX) = 0;
2948 
2949 	switch (ctxt->b) {
2950 	case 0xa4:	/* movsb */
2951 	case 0xa5:	/* movsd/w */
2952 		*reg_rmw(ctxt, VCPU_REGS_RSI) &= (u32)-1;
2953 		/* fall through */
2954 	case 0xaa:	/* stosb */
2955 	case 0xab:	/* stosd/w */
2956 		*reg_rmw(ctxt, VCPU_REGS_RDI) &= (u32)-1;
2957 	}
2958 #endif
2959 }
2960 
2961 static void save_state_to_tss16(struct x86_emulate_ctxt *ctxt,
2962 				struct tss_segment_16 *tss)
2963 {
2964 	tss->ip = ctxt->_eip;
2965 	tss->flag = ctxt->eflags;
2966 	tss->ax = reg_read(ctxt, VCPU_REGS_RAX);
2967 	tss->cx = reg_read(ctxt, VCPU_REGS_RCX);
2968 	tss->dx = reg_read(ctxt, VCPU_REGS_RDX);
2969 	tss->bx = reg_read(ctxt, VCPU_REGS_RBX);
2970 	tss->sp = reg_read(ctxt, VCPU_REGS_RSP);
2971 	tss->bp = reg_read(ctxt, VCPU_REGS_RBP);
2972 	tss->si = reg_read(ctxt, VCPU_REGS_RSI);
2973 	tss->di = reg_read(ctxt, VCPU_REGS_RDI);
2974 
2975 	tss->es = get_segment_selector(ctxt, VCPU_SREG_ES);
2976 	tss->cs = get_segment_selector(ctxt, VCPU_SREG_CS);
2977 	tss->ss = get_segment_selector(ctxt, VCPU_SREG_SS);
2978 	tss->ds = get_segment_selector(ctxt, VCPU_SREG_DS);
2979 	tss->ldt = get_segment_selector(ctxt, VCPU_SREG_LDTR);
2980 }
2981 
2982 static int load_state_from_tss16(struct x86_emulate_ctxt *ctxt,
2983 				 struct tss_segment_16 *tss)
2984 {
2985 	int ret;
2986 	u8 cpl;
2987 
2988 	ctxt->_eip = tss->ip;
2989 	ctxt->eflags = tss->flag | 2;
2990 	*reg_write(ctxt, VCPU_REGS_RAX) = tss->ax;
2991 	*reg_write(ctxt, VCPU_REGS_RCX) = tss->cx;
2992 	*reg_write(ctxt, VCPU_REGS_RDX) = tss->dx;
2993 	*reg_write(ctxt, VCPU_REGS_RBX) = tss->bx;
2994 	*reg_write(ctxt, VCPU_REGS_RSP) = tss->sp;
2995 	*reg_write(ctxt, VCPU_REGS_RBP) = tss->bp;
2996 	*reg_write(ctxt, VCPU_REGS_RSI) = tss->si;
2997 	*reg_write(ctxt, VCPU_REGS_RDI) = tss->di;
2998 
2999 	/*
3000 	 * SDM says that segment selectors are loaded before segment
3001 	 * descriptors
3002 	 */
3003 	set_segment_selector(ctxt, tss->ldt, VCPU_SREG_LDTR);
3004 	set_segment_selector(ctxt, tss->es, VCPU_SREG_ES);
3005 	set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS);
3006 	set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS);
3007 	set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS);
3008 
3009 	cpl = tss->cs & 3;
3010 
3011 	/*
3012 	 * Now load segment descriptors. If fault happens at this stage
3013 	 * it is handled in a context of new task
3014 	 */
3015 	ret = __load_segment_descriptor(ctxt, tss->ldt, VCPU_SREG_LDTR, cpl,
3016 					X86_TRANSFER_TASK_SWITCH, NULL);
3017 	if (ret != X86EMUL_CONTINUE)
3018 		return ret;
3019 	ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl,
3020 					X86_TRANSFER_TASK_SWITCH, NULL);
3021 	if (ret != X86EMUL_CONTINUE)
3022 		return ret;
3023 	ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl,
3024 					X86_TRANSFER_TASK_SWITCH, NULL);
3025 	if (ret != X86EMUL_CONTINUE)
3026 		return ret;
3027 	ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl,
3028 					X86_TRANSFER_TASK_SWITCH, NULL);
3029 	if (ret != X86EMUL_CONTINUE)
3030 		return ret;
3031 	ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl,
3032 					X86_TRANSFER_TASK_SWITCH, NULL);
3033 	if (ret != X86EMUL_CONTINUE)
3034 		return ret;
3035 
3036 	return X86EMUL_CONTINUE;
3037 }
3038 
3039 static int task_switch_16(struct x86_emulate_ctxt *ctxt,
3040 			  u16 tss_selector, u16 old_tss_sel,
3041 			  ulong old_tss_base, struct desc_struct *new_desc)
3042 {
3043 	const struct x86_emulate_ops *ops = ctxt->ops;
3044 	struct tss_segment_16 tss_seg;
3045 	int ret;
3046 	u32 new_tss_base = get_desc_base(new_desc);
3047 
3048 	ret = ops->read_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
3049 			    &ctxt->exception);
3050 	if (ret != X86EMUL_CONTINUE)
3051 		return ret;
3052 
3053 	save_state_to_tss16(ctxt, &tss_seg);
3054 
3055 	ret = ops->write_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
3056 			     &ctxt->exception);
3057 	if (ret != X86EMUL_CONTINUE)
3058 		return ret;
3059 
3060 	ret = ops->read_std(ctxt, new_tss_base, &tss_seg, sizeof tss_seg,
3061 			    &ctxt->exception);
3062 	if (ret != X86EMUL_CONTINUE)
3063 		return ret;
3064 
3065 	if (old_tss_sel != 0xffff) {
3066 		tss_seg.prev_task_link = old_tss_sel;
3067 
3068 		ret = ops->write_std(ctxt, new_tss_base,
3069 				     &tss_seg.prev_task_link,
3070 				     sizeof tss_seg.prev_task_link,
3071 				     &ctxt->exception);
3072 		if (ret != X86EMUL_CONTINUE)
3073 			return ret;
3074 	}
3075 
3076 	return load_state_from_tss16(ctxt, &tss_seg);
3077 }
3078 
3079 static void save_state_to_tss32(struct x86_emulate_ctxt *ctxt,
3080 				struct tss_segment_32 *tss)
3081 {
3082 	/* CR3 and ldt selector are not saved intentionally */
3083 	tss->eip = ctxt->_eip;
3084 	tss->eflags = ctxt->eflags;
3085 	tss->eax = reg_read(ctxt, VCPU_REGS_RAX);
3086 	tss->ecx = reg_read(ctxt, VCPU_REGS_RCX);
3087 	tss->edx = reg_read(ctxt, VCPU_REGS_RDX);
3088 	tss->ebx = reg_read(ctxt, VCPU_REGS_RBX);
3089 	tss->esp = reg_read(ctxt, VCPU_REGS_RSP);
3090 	tss->ebp = reg_read(ctxt, VCPU_REGS_RBP);
3091 	tss->esi = reg_read(ctxt, VCPU_REGS_RSI);
3092 	tss->edi = reg_read(ctxt, VCPU_REGS_RDI);
3093 
3094 	tss->es = get_segment_selector(ctxt, VCPU_SREG_ES);
3095 	tss->cs = get_segment_selector(ctxt, VCPU_SREG_CS);
3096 	tss->ss = get_segment_selector(ctxt, VCPU_SREG_SS);
3097 	tss->ds = get_segment_selector(ctxt, VCPU_SREG_DS);
3098 	tss->fs = get_segment_selector(ctxt, VCPU_SREG_FS);
3099 	tss->gs = get_segment_selector(ctxt, VCPU_SREG_GS);
3100 }
3101 
3102 static int load_state_from_tss32(struct x86_emulate_ctxt *ctxt,
3103 				 struct tss_segment_32 *tss)
3104 {
3105 	int ret;
3106 	u8 cpl;
3107 
3108 	if (ctxt->ops->set_cr(ctxt, 3, tss->cr3))
3109 		return emulate_gp(ctxt, 0);
3110 	ctxt->_eip = tss->eip;
3111 	ctxt->eflags = tss->eflags | 2;
3112 
3113 	/* General purpose registers */
3114 	*reg_write(ctxt, VCPU_REGS_RAX) = tss->eax;
3115 	*reg_write(ctxt, VCPU_REGS_RCX) = tss->ecx;
3116 	*reg_write(ctxt, VCPU_REGS_RDX) = tss->edx;
3117 	*reg_write(ctxt, VCPU_REGS_RBX) = tss->ebx;
3118 	*reg_write(ctxt, VCPU_REGS_RSP) = tss->esp;
3119 	*reg_write(ctxt, VCPU_REGS_RBP) = tss->ebp;
3120 	*reg_write(ctxt, VCPU_REGS_RSI) = tss->esi;
3121 	*reg_write(ctxt, VCPU_REGS_RDI) = tss->edi;
3122 
3123 	/*
3124 	 * SDM says that segment selectors are loaded before segment
3125 	 * descriptors.  This is important because CPL checks will
3126 	 * use CS.RPL.
3127 	 */
3128 	set_segment_selector(ctxt, tss->ldt_selector, VCPU_SREG_LDTR);
3129 	set_segment_selector(ctxt, tss->es, VCPU_SREG_ES);
3130 	set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS);
3131 	set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS);
3132 	set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS);
3133 	set_segment_selector(ctxt, tss->fs, VCPU_SREG_FS);
3134 	set_segment_selector(ctxt, tss->gs, VCPU_SREG_GS);
3135 
3136 	/*
3137 	 * If we're switching between Protected Mode and VM86, we need to make
3138 	 * sure to update the mode before loading the segment descriptors so
3139 	 * that the selectors are interpreted correctly.
3140 	 */
3141 	if (ctxt->eflags & X86_EFLAGS_VM) {
3142 		ctxt->mode = X86EMUL_MODE_VM86;
3143 		cpl = 3;
3144 	} else {
3145 		ctxt->mode = X86EMUL_MODE_PROT32;
3146 		cpl = tss->cs & 3;
3147 	}
3148 
3149 	/*
3150 	 * Now load segment descriptors. If fault happenes at this stage
3151 	 * it is handled in a context of new task
3152 	 */
3153 	ret = __load_segment_descriptor(ctxt, tss->ldt_selector, VCPU_SREG_LDTR,
3154 					cpl, X86_TRANSFER_TASK_SWITCH, NULL);
3155 	if (ret != X86EMUL_CONTINUE)
3156 		return ret;
3157 	ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl,
3158 					X86_TRANSFER_TASK_SWITCH, NULL);
3159 	if (ret != X86EMUL_CONTINUE)
3160 		return ret;
3161 	ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl,
3162 					X86_TRANSFER_TASK_SWITCH, NULL);
3163 	if (ret != X86EMUL_CONTINUE)
3164 		return ret;
3165 	ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl,
3166 					X86_TRANSFER_TASK_SWITCH, NULL);
3167 	if (ret != X86EMUL_CONTINUE)
3168 		return ret;
3169 	ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl,
3170 					X86_TRANSFER_TASK_SWITCH, NULL);
3171 	if (ret != X86EMUL_CONTINUE)
3172 		return ret;
3173 	ret = __load_segment_descriptor(ctxt, tss->fs, VCPU_SREG_FS, cpl,
3174 					X86_TRANSFER_TASK_SWITCH, NULL);
3175 	if (ret != X86EMUL_CONTINUE)
3176 		return ret;
3177 	ret = __load_segment_descriptor(ctxt, tss->gs, VCPU_SREG_GS, cpl,
3178 					X86_TRANSFER_TASK_SWITCH, NULL);
3179 
3180 	return ret;
3181 }
3182 
3183 static int task_switch_32(struct x86_emulate_ctxt *ctxt,
3184 			  u16 tss_selector, u16 old_tss_sel,
3185 			  ulong old_tss_base, struct desc_struct *new_desc)
3186 {
3187 	const struct x86_emulate_ops *ops = ctxt->ops;
3188 	struct tss_segment_32 tss_seg;
3189 	int ret;
3190 	u32 new_tss_base = get_desc_base(new_desc);
3191 	u32 eip_offset = offsetof(struct tss_segment_32, eip);
3192 	u32 ldt_sel_offset = offsetof(struct tss_segment_32, ldt_selector);
3193 
3194 	ret = ops->read_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
3195 			    &ctxt->exception);
3196 	if (ret != X86EMUL_CONTINUE)
3197 		return ret;
3198 
3199 	save_state_to_tss32(ctxt, &tss_seg);
3200 
3201 	/* Only GP registers and segment selectors are saved */
3202 	ret = ops->write_std(ctxt, old_tss_base + eip_offset, &tss_seg.eip,
3203 			     ldt_sel_offset - eip_offset, &ctxt->exception);
3204 	if (ret != X86EMUL_CONTINUE)
3205 		return ret;
3206 
3207 	ret = ops->read_std(ctxt, new_tss_base, &tss_seg, sizeof tss_seg,
3208 			    &ctxt->exception);
3209 	if (ret != X86EMUL_CONTINUE)
3210 		return ret;
3211 
3212 	if (old_tss_sel != 0xffff) {
3213 		tss_seg.prev_task_link = old_tss_sel;
3214 
3215 		ret = ops->write_std(ctxt, new_tss_base,
3216 				     &tss_seg.prev_task_link,
3217 				     sizeof tss_seg.prev_task_link,
3218 				     &ctxt->exception);
3219 		if (ret != X86EMUL_CONTINUE)
3220 			return ret;
3221 	}
3222 
3223 	return load_state_from_tss32(ctxt, &tss_seg);
3224 }
3225 
3226 static int emulator_do_task_switch(struct x86_emulate_ctxt *ctxt,
3227 				   u16 tss_selector, int idt_index, int reason,
3228 				   bool has_error_code, u32 error_code)
3229 {
3230 	const struct x86_emulate_ops *ops = ctxt->ops;
3231 	struct desc_struct curr_tss_desc, next_tss_desc;
3232 	int ret;
3233 	u16 old_tss_sel = get_segment_selector(ctxt, VCPU_SREG_TR);
3234 	ulong old_tss_base =
3235 		ops->get_cached_segment_base(ctxt, VCPU_SREG_TR);
3236 	u32 desc_limit;
3237 	ulong desc_addr, dr7;
3238 
3239 	/* FIXME: old_tss_base == ~0 ? */
3240 
3241 	ret = read_segment_descriptor(ctxt, tss_selector, &next_tss_desc, &desc_addr);
3242 	if (ret != X86EMUL_CONTINUE)
3243 		return ret;
3244 	ret = read_segment_descriptor(ctxt, old_tss_sel, &curr_tss_desc, &desc_addr);
3245 	if (ret != X86EMUL_CONTINUE)
3246 		return ret;
3247 
3248 	/* FIXME: check that next_tss_desc is tss */
3249 
3250 	/*
3251 	 * Check privileges. The three cases are task switch caused by...
3252 	 *
3253 	 * 1. jmp/call/int to task gate: Check against DPL of the task gate
3254 	 * 2. Exception/IRQ/iret: No check is performed
3255 	 * 3. jmp/call to TSS/task-gate: No check is performed since the
3256 	 *    hardware checks it before exiting.
3257 	 */
3258 	if (reason == TASK_SWITCH_GATE) {
3259 		if (idt_index != -1) {
3260 			/* Software interrupts */
3261 			struct desc_struct task_gate_desc;
3262 			int dpl;
3263 
3264 			ret = read_interrupt_descriptor(ctxt, idt_index,
3265 							&task_gate_desc);
3266 			if (ret != X86EMUL_CONTINUE)
3267 				return ret;
3268 
3269 			dpl = task_gate_desc.dpl;
3270 			if ((tss_selector & 3) > dpl || ops->cpl(ctxt) > dpl)
3271 				return emulate_gp(ctxt, (idt_index << 3) | 0x2);
3272 		}
3273 	}
3274 
3275 	desc_limit = desc_limit_scaled(&next_tss_desc);
3276 	if (!next_tss_desc.p ||
3277 	    ((desc_limit < 0x67 && (next_tss_desc.type & 8)) ||
3278 	     desc_limit < 0x2b)) {
3279 		return emulate_ts(ctxt, tss_selector & 0xfffc);
3280 	}
3281 
3282 	if (reason == TASK_SWITCH_IRET || reason == TASK_SWITCH_JMP) {
3283 		curr_tss_desc.type &= ~(1 << 1); /* clear busy flag */
3284 		write_segment_descriptor(ctxt, old_tss_sel, &curr_tss_desc);
3285 	}
3286 
3287 	if (reason == TASK_SWITCH_IRET)
3288 		ctxt->eflags = ctxt->eflags & ~X86_EFLAGS_NT;
3289 
3290 	/* set back link to prev task only if NT bit is set in eflags
3291 	   note that old_tss_sel is not used after this point */
3292 	if (reason != TASK_SWITCH_CALL && reason != TASK_SWITCH_GATE)
3293 		old_tss_sel = 0xffff;
3294 
3295 	if (next_tss_desc.type & 8)
3296 		ret = task_switch_32(ctxt, tss_selector, old_tss_sel,
3297 				     old_tss_base, &next_tss_desc);
3298 	else
3299 		ret = task_switch_16(ctxt, tss_selector, old_tss_sel,
3300 				     old_tss_base, &next_tss_desc);
3301 	if (ret != X86EMUL_CONTINUE)
3302 		return ret;
3303 
3304 	if (reason == TASK_SWITCH_CALL || reason == TASK_SWITCH_GATE)
3305 		ctxt->eflags = ctxt->eflags | X86_EFLAGS_NT;
3306 
3307 	if (reason != TASK_SWITCH_IRET) {
3308 		next_tss_desc.type |= (1 << 1); /* set busy flag */
3309 		write_segment_descriptor(ctxt, tss_selector, &next_tss_desc);
3310 	}
3311 
3312 	ops->set_cr(ctxt, 0,  ops->get_cr(ctxt, 0) | X86_CR0_TS);
3313 	ops->set_segment(ctxt, tss_selector, &next_tss_desc, 0, VCPU_SREG_TR);
3314 
3315 	if (has_error_code) {
3316 		ctxt->op_bytes = ctxt->ad_bytes = (next_tss_desc.type & 8) ? 4 : 2;
3317 		ctxt->lock_prefix = 0;
3318 		ctxt->src.val = (unsigned long) error_code;
3319 		ret = em_push(ctxt);
3320 	}
3321 
3322 	ops->get_dr(ctxt, 7, &dr7);
3323 	ops->set_dr(ctxt, 7, dr7 & ~(DR_LOCAL_ENABLE_MASK | DR_LOCAL_SLOWDOWN));
3324 
3325 	return ret;
3326 }
3327 
3328 int emulator_task_switch(struct x86_emulate_ctxt *ctxt,
3329 			 u16 tss_selector, int idt_index, int reason,
3330 			 bool has_error_code, u32 error_code)
3331 {
3332 	int rc;
3333 
3334 	invalidate_registers(ctxt);
3335 	ctxt->_eip = ctxt->eip;
3336 	ctxt->dst.type = OP_NONE;
3337 
3338 	rc = emulator_do_task_switch(ctxt, tss_selector, idt_index, reason,
3339 				     has_error_code, error_code);
3340 
3341 	if (rc == X86EMUL_CONTINUE) {
3342 		ctxt->eip = ctxt->_eip;
3343 		writeback_registers(ctxt);
3344 	}
3345 
3346 	return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK;
3347 }
3348 
3349 static void string_addr_inc(struct x86_emulate_ctxt *ctxt, int reg,
3350 		struct operand *op)
3351 {
3352 	int df = (ctxt->eflags & X86_EFLAGS_DF) ? -op->count : op->count;
3353 
3354 	register_address_increment(ctxt, reg, df * op->bytes);
3355 	op->addr.mem.ea = register_address(ctxt, reg);
3356 }
3357 
3358 static int em_das(struct x86_emulate_ctxt *ctxt)
3359 {
3360 	u8 al, old_al;
3361 	bool af, cf, old_cf;
3362 
3363 	cf = ctxt->eflags & X86_EFLAGS_CF;
3364 	al = ctxt->dst.val;
3365 
3366 	old_al = al;
3367 	old_cf = cf;
3368 	cf = false;
3369 	af = ctxt->eflags & X86_EFLAGS_AF;
3370 	if ((al & 0x0f) > 9 || af) {
3371 		al -= 6;
3372 		cf = old_cf | (al >= 250);
3373 		af = true;
3374 	} else {
3375 		af = false;
3376 	}
3377 	if (old_al > 0x99 || old_cf) {
3378 		al -= 0x60;
3379 		cf = true;
3380 	}
3381 
3382 	ctxt->dst.val = al;
3383 	/* Set PF, ZF, SF */
3384 	ctxt->src.type = OP_IMM;
3385 	ctxt->src.val = 0;
3386 	ctxt->src.bytes = 1;
3387 	fastop(ctxt, em_or);
3388 	ctxt->eflags &= ~(X86_EFLAGS_AF | X86_EFLAGS_CF);
3389 	if (cf)
3390 		ctxt->eflags |= X86_EFLAGS_CF;
3391 	if (af)
3392 		ctxt->eflags |= X86_EFLAGS_AF;
3393 	return X86EMUL_CONTINUE;
3394 }
3395 
3396 static int em_aam(struct x86_emulate_ctxt *ctxt)
3397 {
3398 	u8 al, ah;
3399 
3400 	if (ctxt->src.val == 0)
3401 		return emulate_de(ctxt);
3402 
3403 	al = ctxt->dst.val & 0xff;
3404 	ah = al / ctxt->src.val;
3405 	al %= ctxt->src.val;
3406 
3407 	ctxt->dst.val = (ctxt->dst.val & 0xffff0000) | al | (ah << 8);
3408 
3409 	/* Set PF, ZF, SF */
3410 	ctxt->src.type = OP_IMM;
3411 	ctxt->src.val = 0;
3412 	ctxt->src.bytes = 1;
3413 	fastop(ctxt, em_or);
3414 
3415 	return X86EMUL_CONTINUE;
3416 }
3417 
3418 static int em_aad(struct x86_emulate_ctxt *ctxt)
3419 {
3420 	u8 al = ctxt->dst.val & 0xff;
3421 	u8 ah = (ctxt->dst.val >> 8) & 0xff;
3422 
3423 	al = (al + (ah * ctxt->src.val)) & 0xff;
3424 
3425 	ctxt->dst.val = (ctxt->dst.val & 0xffff0000) | al;
3426 
3427 	/* Set PF, ZF, SF */
3428 	ctxt->src.type = OP_IMM;
3429 	ctxt->src.val = 0;
3430 	ctxt->src.bytes = 1;
3431 	fastop(ctxt, em_or);
3432 
3433 	return X86EMUL_CONTINUE;
3434 }
3435 
3436 static int em_call(struct x86_emulate_ctxt *ctxt)
3437 {
3438 	int rc;
3439 	long rel = ctxt->src.val;
3440 
3441 	ctxt->src.val = (unsigned long)ctxt->_eip;
3442 	rc = jmp_rel(ctxt, rel);
3443 	if (rc != X86EMUL_CONTINUE)
3444 		return rc;
3445 	return em_push(ctxt);
3446 }
3447 
3448 static int em_call_far(struct x86_emulate_ctxt *ctxt)
3449 {
3450 	u16 sel, old_cs;
3451 	ulong old_eip;
3452 	int rc;
3453 	struct desc_struct old_desc, new_desc;
3454 	const struct x86_emulate_ops *ops = ctxt->ops;
3455 	int cpl = ctxt->ops->cpl(ctxt);
3456 	enum x86emul_mode prev_mode = ctxt->mode;
3457 
3458 	old_eip = ctxt->_eip;
3459 	ops->get_segment(ctxt, &old_cs, &old_desc, NULL, VCPU_SREG_CS);
3460 
3461 	memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
3462 	rc = __load_segment_descriptor(ctxt, sel, VCPU_SREG_CS, cpl,
3463 				       X86_TRANSFER_CALL_JMP, &new_desc);
3464 	if (rc != X86EMUL_CONTINUE)
3465 		return rc;
3466 
3467 	rc = assign_eip_far(ctxt, ctxt->src.val, &new_desc);
3468 	if (rc != X86EMUL_CONTINUE)
3469 		goto fail;
3470 
3471 	ctxt->src.val = old_cs;
3472 	rc = em_push(ctxt);
3473 	if (rc != X86EMUL_CONTINUE)
3474 		goto fail;
3475 
3476 	ctxt->src.val = old_eip;
3477 	rc = em_push(ctxt);
3478 	/* If we failed, we tainted the memory, but the very least we should
3479 	   restore cs */
3480 	if (rc != X86EMUL_CONTINUE) {
3481 		pr_warn_once("faulting far call emulation tainted memory\n");
3482 		goto fail;
3483 	}
3484 	return rc;
3485 fail:
3486 	ops->set_segment(ctxt, old_cs, &old_desc, 0, VCPU_SREG_CS);
3487 	ctxt->mode = prev_mode;
3488 	return rc;
3489 
3490 }
3491 
3492 static int em_ret_near_imm(struct x86_emulate_ctxt *ctxt)
3493 {
3494 	int rc;
3495 	unsigned long eip;
3496 
3497 	rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
3498 	if (rc != X86EMUL_CONTINUE)
3499 		return rc;
3500 	rc = assign_eip_near(ctxt, eip);
3501 	if (rc != X86EMUL_CONTINUE)
3502 		return rc;
3503 	rsp_increment(ctxt, ctxt->src.val);
3504 	return X86EMUL_CONTINUE;
3505 }
3506 
3507 static int em_xchg(struct x86_emulate_ctxt *ctxt)
3508 {
3509 	/* Write back the register source. */
3510 	ctxt->src.val = ctxt->dst.val;
3511 	write_register_operand(&ctxt->src);
3512 
3513 	/* Write back the memory destination with implicit LOCK prefix. */
3514 	ctxt->dst.val = ctxt->src.orig_val;
3515 	ctxt->lock_prefix = 1;
3516 	return X86EMUL_CONTINUE;
3517 }
3518 
3519 static int em_imul_3op(struct x86_emulate_ctxt *ctxt)
3520 {
3521 	ctxt->dst.val = ctxt->src2.val;
3522 	return fastop(ctxt, em_imul);
3523 }
3524 
3525 static int em_cwd(struct x86_emulate_ctxt *ctxt)
3526 {
3527 	ctxt->dst.type = OP_REG;
3528 	ctxt->dst.bytes = ctxt->src.bytes;
3529 	ctxt->dst.addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX);
3530 	ctxt->dst.val = ~((ctxt->src.val >> (ctxt->src.bytes * 8 - 1)) - 1);
3531 
3532 	return X86EMUL_CONTINUE;
3533 }
3534 
3535 static int em_rdtsc(struct x86_emulate_ctxt *ctxt)
3536 {
3537 	u64 tsc = 0;
3538 
3539 	ctxt->ops->get_msr(ctxt, MSR_IA32_TSC, &tsc);
3540 	*reg_write(ctxt, VCPU_REGS_RAX) = (u32)tsc;
3541 	*reg_write(ctxt, VCPU_REGS_RDX) = tsc >> 32;
3542 	return X86EMUL_CONTINUE;
3543 }
3544 
3545 static int em_rdpmc(struct x86_emulate_ctxt *ctxt)
3546 {
3547 	u64 pmc;
3548 
3549 	if (ctxt->ops->read_pmc(ctxt, reg_read(ctxt, VCPU_REGS_RCX), &pmc))
3550 		return emulate_gp(ctxt, 0);
3551 	*reg_write(ctxt, VCPU_REGS_RAX) = (u32)pmc;
3552 	*reg_write(ctxt, VCPU_REGS_RDX) = pmc >> 32;
3553 	return X86EMUL_CONTINUE;
3554 }
3555 
3556 static int em_mov(struct x86_emulate_ctxt *ctxt)
3557 {
3558 	memcpy(ctxt->dst.valptr, ctxt->src.valptr, sizeof(ctxt->src.valptr));
3559 	return X86EMUL_CONTINUE;
3560 }
3561 
3562 #define FFL(x) bit(X86_FEATURE_##x)
3563 
3564 static int em_movbe(struct x86_emulate_ctxt *ctxt)
3565 {
3566 	u32 ebx, ecx, edx, eax = 1;
3567 	u16 tmp;
3568 
3569 	/*
3570 	 * Check MOVBE is set in the guest-visible CPUID leaf.
3571 	 */
3572 	ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx, false);
3573 	if (!(ecx & FFL(MOVBE)))
3574 		return emulate_ud(ctxt);
3575 
3576 	switch (ctxt->op_bytes) {
3577 	case 2:
3578 		/*
3579 		 * From MOVBE definition: "...When the operand size is 16 bits,
3580 		 * the upper word of the destination register remains unchanged
3581 		 * ..."
3582 		 *
3583 		 * Both casting ->valptr and ->val to u16 breaks strict aliasing
3584 		 * rules so we have to do the operation almost per hand.
3585 		 */
3586 		tmp = (u16)ctxt->src.val;
3587 		ctxt->dst.val &= ~0xffffUL;
3588 		ctxt->dst.val |= (unsigned long)swab16(tmp);
3589 		break;
3590 	case 4:
3591 		ctxt->dst.val = swab32((u32)ctxt->src.val);
3592 		break;
3593 	case 8:
3594 		ctxt->dst.val = swab64(ctxt->src.val);
3595 		break;
3596 	default:
3597 		BUG();
3598 	}
3599 	return X86EMUL_CONTINUE;
3600 }
3601 
3602 static int em_cr_write(struct x86_emulate_ctxt *ctxt)
3603 {
3604 	if (ctxt->ops->set_cr(ctxt, ctxt->modrm_reg, ctxt->src.val))
3605 		return emulate_gp(ctxt, 0);
3606 
3607 	/* Disable writeback. */
3608 	ctxt->dst.type = OP_NONE;
3609 	return X86EMUL_CONTINUE;
3610 }
3611 
3612 static int em_dr_write(struct x86_emulate_ctxt *ctxt)
3613 {
3614 	unsigned long val;
3615 
3616 	if (ctxt->mode == X86EMUL_MODE_PROT64)
3617 		val = ctxt->src.val & ~0ULL;
3618 	else
3619 		val = ctxt->src.val & ~0U;
3620 
3621 	/* #UD condition is already handled. */
3622 	if (ctxt->ops->set_dr(ctxt, ctxt->modrm_reg, val) < 0)
3623 		return emulate_gp(ctxt, 0);
3624 
3625 	/* Disable writeback. */
3626 	ctxt->dst.type = OP_NONE;
3627 	return X86EMUL_CONTINUE;
3628 }
3629 
3630 static int em_wrmsr(struct x86_emulate_ctxt *ctxt)
3631 {
3632 	u64 msr_data;
3633 
3634 	msr_data = (u32)reg_read(ctxt, VCPU_REGS_RAX)
3635 		| ((u64)reg_read(ctxt, VCPU_REGS_RDX) << 32);
3636 	if (ctxt->ops->set_msr(ctxt, reg_read(ctxt, VCPU_REGS_RCX), msr_data))
3637 		return emulate_gp(ctxt, 0);
3638 
3639 	return X86EMUL_CONTINUE;
3640 }
3641 
3642 static int em_rdmsr(struct x86_emulate_ctxt *ctxt)
3643 {
3644 	u64 msr_data;
3645 
3646 	if (ctxt->ops->get_msr(ctxt, reg_read(ctxt, VCPU_REGS_RCX), &msr_data))
3647 		return emulate_gp(ctxt, 0);
3648 
3649 	*reg_write(ctxt, VCPU_REGS_RAX) = (u32)msr_data;
3650 	*reg_write(ctxt, VCPU_REGS_RDX) = msr_data >> 32;
3651 	return X86EMUL_CONTINUE;
3652 }
3653 
3654 static int em_mov_rm_sreg(struct x86_emulate_ctxt *ctxt)
3655 {
3656 	if (ctxt->modrm_reg > VCPU_SREG_GS)
3657 		return emulate_ud(ctxt);
3658 
3659 	ctxt->dst.val = get_segment_selector(ctxt, ctxt->modrm_reg);
3660 	if (ctxt->dst.bytes == 4 && ctxt->dst.type == OP_MEM)
3661 		ctxt->dst.bytes = 2;
3662 	return X86EMUL_CONTINUE;
3663 }
3664 
3665 static int em_mov_sreg_rm(struct x86_emulate_ctxt *ctxt)
3666 {
3667 	u16 sel = ctxt->src.val;
3668 
3669 	if (ctxt->modrm_reg == VCPU_SREG_CS || ctxt->modrm_reg > VCPU_SREG_GS)
3670 		return emulate_ud(ctxt);
3671 
3672 	if (ctxt->modrm_reg == VCPU_SREG_SS)
3673 		ctxt->interruptibility = KVM_X86_SHADOW_INT_MOV_SS;
3674 
3675 	/* Disable writeback. */
3676 	ctxt->dst.type = OP_NONE;
3677 	return load_segment_descriptor(ctxt, sel, ctxt->modrm_reg);
3678 }
3679 
3680 static int em_lldt(struct x86_emulate_ctxt *ctxt)
3681 {
3682 	u16 sel = ctxt->src.val;
3683 
3684 	/* Disable writeback. */
3685 	ctxt->dst.type = OP_NONE;
3686 	return load_segment_descriptor(ctxt, sel, VCPU_SREG_LDTR);
3687 }
3688 
3689 static int em_ltr(struct x86_emulate_ctxt *ctxt)
3690 {
3691 	u16 sel = ctxt->src.val;
3692 
3693 	/* Disable writeback. */
3694 	ctxt->dst.type = OP_NONE;
3695 	return load_segment_descriptor(ctxt, sel, VCPU_SREG_TR);
3696 }
3697 
3698 static int em_invlpg(struct x86_emulate_ctxt *ctxt)
3699 {
3700 	int rc;
3701 	ulong linear;
3702 
3703 	rc = linearize(ctxt, ctxt->src.addr.mem, 1, false, &linear);
3704 	if (rc == X86EMUL_CONTINUE)
3705 		ctxt->ops->invlpg(ctxt, linear);
3706 	/* Disable writeback. */
3707 	ctxt->dst.type = OP_NONE;
3708 	return X86EMUL_CONTINUE;
3709 }
3710 
3711 static int em_clts(struct x86_emulate_ctxt *ctxt)
3712 {
3713 	ulong cr0;
3714 
3715 	cr0 = ctxt->ops->get_cr(ctxt, 0);
3716 	cr0 &= ~X86_CR0_TS;
3717 	ctxt->ops->set_cr(ctxt, 0, cr0);
3718 	return X86EMUL_CONTINUE;
3719 }
3720 
3721 static int em_hypercall(struct x86_emulate_ctxt *ctxt)
3722 {
3723 	int rc = ctxt->ops->fix_hypercall(ctxt);
3724 
3725 	if (rc != X86EMUL_CONTINUE)
3726 		return rc;
3727 
3728 	/* Let the processor re-execute the fixed hypercall */
3729 	ctxt->_eip = ctxt->eip;
3730 	/* Disable writeback. */
3731 	ctxt->dst.type = OP_NONE;
3732 	return X86EMUL_CONTINUE;
3733 }
3734 
3735 static int emulate_store_desc_ptr(struct x86_emulate_ctxt *ctxt,
3736 				  void (*get)(struct x86_emulate_ctxt *ctxt,
3737 					      struct desc_ptr *ptr))
3738 {
3739 	struct desc_ptr desc_ptr;
3740 
3741 	if (ctxt->mode == X86EMUL_MODE_PROT64)
3742 		ctxt->op_bytes = 8;
3743 	get(ctxt, &desc_ptr);
3744 	if (ctxt->op_bytes == 2) {
3745 		ctxt->op_bytes = 4;
3746 		desc_ptr.address &= 0x00ffffff;
3747 	}
3748 	/* Disable writeback. */
3749 	ctxt->dst.type = OP_NONE;
3750 	return segmented_write_std(ctxt, ctxt->dst.addr.mem,
3751 				   &desc_ptr, 2 + ctxt->op_bytes);
3752 }
3753 
3754 static int em_sgdt(struct x86_emulate_ctxt *ctxt)
3755 {
3756 	return emulate_store_desc_ptr(ctxt, ctxt->ops->get_gdt);
3757 }
3758 
3759 static int em_sidt(struct x86_emulate_ctxt *ctxt)
3760 {
3761 	return emulate_store_desc_ptr(ctxt, ctxt->ops->get_idt);
3762 }
3763 
3764 static int em_lgdt_lidt(struct x86_emulate_ctxt *ctxt, bool lgdt)
3765 {
3766 	struct desc_ptr desc_ptr;
3767 	int rc;
3768 
3769 	if (ctxt->mode == X86EMUL_MODE_PROT64)
3770 		ctxt->op_bytes = 8;
3771 	rc = read_descriptor(ctxt, ctxt->src.addr.mem,
3772 			     &desc_ptr.size, &desc_ptr.address,
3773 			     ctxt->op_bytes);
3774 	if (rc != X86EMUL_CONTINUE)
3775 		return rc;
3776 	if (ctxt->mode == X86EMUL_MODE_PROT64 &&
3777 	    emul_is_noncanonical_address(desc_ptr.address, ctxt))
3778 		return emulate_gp(ctxt, 0);
3779 	if (lgdt)
3780 		ctxt->ops->set_gdt(ctxt, &desc_ptr);
3781 	else
3782 		ctxt->ops->set_idt(ctxt, &desc_ptr);
3783 	/* Disable writeback. */
3784 	ctxt->dst.type = OP_NONE;
3785 	return X86EMUL_CONTINUE;
3786 }
3787 
3788 static int em_lgdt(struct x86_emulate_ctxt *ctxt)
3789 {
3790 	return em_lgdt_lidt(ctxt, true);
3791 }
3792 
3793 static int em_lidt(struct x86_emulate_ctxt *ctxt)
3794 {
3795 	return em_lgdt_lidt(ctxt, false);
3796 }
3797 
3798 static int em_smsw(struct x86_emulate_ctxt *ctxt)
3799 {
3800 	if (ctxt->dst.type == OP_MEM)
3801 		ctxt->dst.bytes = 2;
3802 	ctxt->dst.val = ctxt->ops->get_cr(ctxt, 0);
3803 	return X86EMUL_CONTINUE;
3804 }
3805 
3806 static int em_lmsw(struct x86_emulate_ctxt *ctxt)
3807 {
3808 	ctxt->ops->set_cr(ctxt, 0, (ctxt->ops->get_cr(ctxt, 0) & ~0x0eul)
3809 			  | (ctxt->src.val & 0x0f));
3810 	ctxt->dst.type = OP_NONE;
3811 	return X86EMUL_CONTINUE;
3812 }
3813 
3814 static int em_loop(struct x86_emulate_ctxt *ctxt)
3815 {
3816 	int rc = X86EMUL_CONTINUE;
3817 
3818 	register_address_increment(ctxt, VCPU_REGS_RCX, -1);
3819 	if ((address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) != 0) &&
3820 	    (ctxt->b == 0xe2 || test_cc(ctxt->b ^ 0x5, ctxt->eflags)))
3821 		rc = jmp_rel(ctxt, ctxt->src.val);
3822 
3823 	return rc;
3824 }
3825 
3826 static int em_jcxz(struct x86_emulate_ctxt *ctxt)
3827 {
3828 	int rc = X86EMUL_CONTINUE;
3829 
3830 	if (address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) == 0)
3831 		rc = jmp_rel(ctxt, ctxt->src.val);
3832 
3833 	return rc;
3834 }
3835 
3836 static int em_in(struct x86_emulate_ctxt *ctxt)
3837 {
3838 	if (!pio_in_emulated(ctxt, ctxt->dst.bytes, ctxt->src.val,
3839 			     &ctxt->dst.val))
3840 		return X86EMUL_IO_NEEDED;
3841 
3842 	return X86EMUL_CONTINUE;
3843 }
3844 
3845 static int em_out(struct x86_emulate_ctxt *ctxt)
3846 {
3847 	ctxt->ops->pio_out_emulated(ctxt, ctxt->src.bytes, ctxt->dst.val,
3848 				    &ctxt->src.val, 1);
3849 	/* Disable writeback. */
3850 	ctxt->dst.type = OP_NONE;
3851 	return X86EMUL_CONTINUE;
3852 }
3853 
3854 static int em_cli(struct x86_emulate_ctxt *ctxt)
3855 {
3856 	if (emulator_bad_iopl(ctxt))
3857 		return emulate_gp(ctxt, 0);
3858 
3859 	ctxt->eflags &= ~X86_EFLAGS_IF;
3860 	return X86EMUL_CONTINUE;
3861 }
3862 
3863 static int em_sti(struct x86_emulate_ctxt *ctxt)
3864 {
3865 	if (emulator_bad_iopl(ctxt))
3866 		return emulate_gp(ctxt, 0);
3867 
3868 	ctxt->interruptibility = KVM_X86_SHADOW_INT_STI;
3869 	ctxt->eflags |= X86_EFLAGS_IF;
3870 	return X86EMUL_CONTINUE;
3871 }
3872 
3873 static int em_cpuid(struct x86_emulate_ctxt *ctxt)
3874 {
3875 	u32 eax, ebx, ecx, edx;
3876 	u64 msr = 0;
3877 
3878 	ctxt->ops->get_msr(ctxt, MSR_MISC_FEATURES_ENABLES, &msr);
3879 	if (msr & MSR_MISC_FEATURES_ENABLES_CPUID_FAULT &&
3880 	    ctxt->ops->cpl(ctxt)) {
3881 		return emulate_gp(ctxt, 0);
3882 	}
3883 
3884 	eax = reg_read(ctxt, VCPU_REGS_RAX);
3885 	ecx = reg_read(ctxt, VCPU_REGS_RCX);
3886 	ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx, true);
3887 	*reg_write(ctxt, VCPU_REGS_RAX) = eax;
3888 	*reg_write(ctxt, VCPU_REGS_RBX) = ebx;
3889 	*reg_write(ctxt, VCPU_REGS_RCX) = ecx;
3890 	*reg_write(ctxt, VCPU_REGS_RDX) = edx;
3891 	return X86EMUL_CONTINUE;
3892 }
3893 
3894 static int em_sahf(struct x86_emulate_ctxt *ctxt)
3895 {
3896 	u32 flags;
3897 
3898 	flags = X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF | X86_EFLAGS_ZF |
3899 		X86_EFLAGS_SF;
3900 	flags &= *reg_rmw(ctxt, VCPU_REGS_RAX) >> 8;
3901 
3902 	ctxt->eflags &= ~0xffUL;
3903 	ctxt->eflags |= flags | X86_EFLAGS_FIXED;
3904 	return X86EMUL_CONTINUE;
3905 }
3906 
3907 static int em_lahf(struct x86_emulate_ctxt *ctxt)
3908 {
3909 	*reg_rmw(ctxt, VCPU_REGS_RAX) &= ~0xff00UL;
3910 	*reg_rmw(ctxt, VCPU_REGS_RAX) |= (ctxt->eflags & 0xff) << 8;
3911 	return X86EMUL_CONTINUE;
3912 }
3913 
3914 static int em_bswap(struct x86_emulate_ctxt *ctxt)
3915 {
3916 	switch (ctxt->op_bytes) {
3917 #ifdef CONFIG_X86_64
3918 	case 8:
3919 		asm("bswap %0" : "+r"(ctxt->dst.val));
3920 		break;
3921 #endif
3922 	default:
3923 		asm("bswap %0" : "+r"(*(u32 *)&ctxt->dst.val));
3924 		break;
3925 	}
3926 	return X86EMUL_CONTINUE;
3927 }
3928 
3929 static int em_clflush(struct x86_emulate_ctxt *ctxt)
3930 {
3931 	/* emulating clflush regardless of cpuid */
3932 	return X86EMUL_CONTINUE;
3933 }
3934 
3935 static int em_movsxd(struct x86_emulate_ctxt *ctxt)
3936 {
3937 	ctxt->dst.val = (s32) ctxt->src.val;
3938 	return X86EMUL_CONTINUE;
3939 }
3940 
3941 static int check_fxsr(struct x86_emulate_ctxt *ctxt)
3942 {
3943 	u32 eax = 1, ebx, ecx = 0, edx;
3944 
3945 	ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx, false);
3946 	if (!(edx & FFL(FXSR)))
3947 		return emulate_ud(ctxt);
3948 
3949 	if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
3950 		return emulate_nm(ctxt);
3951 
3952 	/*
3953 	 * Don't emulate a case that should never be hit, instead of working
3954 	 * around a lack of fxsave64/fxrstor64 on old compilers.
3955 	 */
3956 	if (ctxt->mode >= X86EMUL_MODE_PROT64)
3957 		return X86EMUL_UNHANDLEABLE;
3958 
3959 	return X86EMUL_CONTINUE;
3960 }
3961 
3962 /*
3963  * Hardware doesn't save and restore XMM 0-7 without CR4.OSFXSR, but does save
3964  * and restore MXCSR.
3965  */
3966 static size_t __fxstate_size(int nregs)
3967 {
3968 	return offsetof(struct fxregs_state, xmm_space[0]) + nregs * 16;
3969 }
3970 
3971 static inline size_t fxstate_size(struct x86_emulate_ctxt *ctxt)
3972 {
3973 	bool cr4_osfxsr;
3974 	if (ctxt->mode == X86EMUL_MODE_PROT64)
3975 		return __fxstate_size(16);
3976 
3977 	cr4_osfxsr = ctxt->ops->get_cr(ctxt, 4) & X86_CR4_OSFXSR;
3978 	return __fxstate_size(cr4_osfxsr ? 8 : 0);
3979 }
3980 
3981 /*
3982  * FXSAVE and FXRSTOR have 4 different formats depending on execution mode,
3983  *  1) 16 bit mode
3984  *  2) 32 bit mode
3985  *     - like (1), but FIP and FDP (foo) are only 16 bit.  At least Intel CPUs
3986  *       preserve whole 32 bit values, though, so (1) and (2) are the same wrt.
3987  *       save and restore
3988  *  3) 64-bit mode with REX.W prefix
3989  *     - like (2), but XMM 8-15 are being saved and restored
3990  *  4) 64-bit mode without REX.W prefix
3991  *     - like (3), but FIP and FDP are 64 bit
3992  *
3993  * Emulation uses (3) for (1) and (2) and preserves XMM 8-15 to reach the
3994  * desired result.  (4) is not emulated.
3995  *
3996  * Note: Guest and host CPUID.(EAX=07H,ECX=0H):EBX[bit 13] (deprecate FPU CS
3997  * and FPU DS) should match.
3998  */
3999 static int em_fxsave(struct x86_emulate_ctxt *ctxt)
4000 {
4001 	struct fxregs_state fx_state;
4002 	int rc;
4003 
4004 	rc = check_fxsr(ctxt);
4005 	if (rc != X86EMUL_CONTINUE)
4006 		return rc;
4007 
4008 	rc = asm_safe("fxsave %[fx]", , [fx] "+m"(fx_state));
4009 
4010 	if (rc != X86EMUL_CONTINUE)
4011 		return rc;
4012 
4013 	return segmented_write_std(ctxt, ctxt->memop.addr.mem, &fx_state,
4014 		                   fxstate_size(ctxt));
4015 }
4016 
4017 /*
4018  * FXRSTOR might restore XMM registers not provided by the guest. Fill
4019  * in the host registers (via FXSAVE) instead, so they won't be modified.
4020  * (preemption has to stay disabled until FXRSTOR).
4021  *
4022  * Use noinline to keep the stack for other functions called by callers small.
4023  */
4024 static noinline int fxregs_fixup(struct fxregs_state *fx_state,
4025 				 const size_t used_size)
4026 {
4027 	struct fxregs_state fx_tmp;
4028 	int rc;
4029 
4030 	rc = asm_safe("fxsave %[fx]", , [fx] "+m"(fx_tmp));
4031 	memcpy((void *)fx_state + used_size, (void *)&fx_tmp + used_size,
4032 	       __fxstate_size(16) - used_size);
4033 
4034 	return rc;
4035 }
4036 
4037 static int em_fxrstor(struct x86_emulate_ctxt *ctxt)
4038 {
4039 	struct fxregs_state fx_state;
4040 	int rc;
4041 	size_t size;
4042 
4043 	rc = check_fxsr(ctxt);
4044 	if (rc != X86EMUL_CONTINUE)
4045 		return rc;
4046 
4047 	size = fxstate_size(ctxt);
4048 	rc = segmented_read_std(ctxt, ctxt->memop.addr.mem, &fx_state, size);
4049 	if (rc != X86EMUL_CONTINUE)
4050 		return rc;
4051 
4052 	if (size < __fxstate_size(16)) {
4053 		rc = fxregs_fixup(&fx_state, size);
4054 		if (rc != X86EMUL_CONTINUE)
4055 			goto out;
4056 	}
4057 
4058 	if (fx_state.mxcsr >> 16) {
4059 		rc = emulate_gp(ctxt, 0);
4060 		goto out;
4061 	}
4062 
4063 	if (rc == X86EMUL_CONTINUE)
4064 		rc = asm_safe("fxrstor %[fx]", : [fx] "m"(fx_state));
4065 
4066 out:
4067 	return rc;
4068 }
4069 
4070 static bool valid_cr(int nr)
4071 {
4072 	switch (nr) {
4073 	case 0:
4074 	case 2 ... 4:
4075 	case 8:
4076 		return true;
4077 	default:
4078 		return false;
4079 	}
4080 }
4081 
4082 static int check_cr_read(struct x86_emulate_ctxt *ctxt)
4083 {
4084 	if (!valid_cr(ctxt->modrm_reg))
4085 		return emulate_ud(ctxt);
4086 
4087 	return X86EMUL_CONTINUE;
4088 }
4089 
4090 static int check_cr_write(struct x86_emulate_ctxt *ctxt)
4091 {
4092 	u64 new_val = ctxt->src.val64;
4093 	int cr = ctxt->modrm_reg;
4094 	u64 efer = 0;
4095 
4096 	static u64 cr_reserved_bits[] = {
4097 		0xffffffff00000000ULL,
4098 		0, 0, 0, /* CR3 checked later */
4099 		CR4_RESERVED_BITS,
4100 		0, 0, 0,
4101 		CR8_RESERVED_BITS,
4102 	};
4103 
4104 	if (!valid_cr(cr))
4105 		return emulate_ud(ctxt);
4106 
4107 	if (new_val & cr_reserved_bits[cr])
4108 		return emulate_gp(ctxt, 0);
4109 
4110 	switch (cr) {
4111 	case 0: {
4112 		u64 cr4;
4113 		if (((new_val & X86_CR0_PG) && !(new_val & X86_CR0_PE)) ||
4114 		    ((new_val & X86_CR0_NW) && !(new_val & X86_CR0_CD)))
4115 			return emulate_gp(ctxt, 0);
4116 
4117 		cr4 = ctxt->ops->get_cr(ctxt, 4);
4118 		ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
4119 
4120 		if ((new_val & X86_CR0_PG) && (efer & EFER_LME) &&
4121 		    !(cr4 & X86_CR4_PAE))
4122 			return emulate_gp(ctxt, 0);
4123 
4124 		break;
4125 		}
4126 	case 3: {
4127 		u64 rsvd = 0;
4128 
4129 		ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
4130 		if (efer & EFER_LMA) {
4131 			u64 maxphyaddr;
4132 			u32 eax, ebx, ecx, edx;
4133 
4134 			eax = 0x80000008;
4135 			ecx = 0;
4136 			if (ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx,
4137 						 &edx, false))
4138 				maxphyaddr = eax & 0xff;
4139 			else
4140 				maxphyaddr = 36;
4141 			rsvd = rsvd_bits(maxphyaddr, 62);
4142 		}
4143 
4144 		if (new_val & rsvd)
4145 			return emulate_gp(ctxt, 0);
4146 
4147 		break;
4148 		}
4149 	case 4: {
4150 		ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
4151 
4152 		if ((efer & EFER_LMA) && !(new_val & X86_CR4_PAE))
4153 			return emulate_gp(ctxt, 0);
4154 
4155 		break;
4156 		}
4157 	}
4158 
4159 	return X86EMUL_CONTINUE;
4160 }
4161 
4162 static int check_dr7_gd(struct x86_emulate_ctxt *ctxt)
4163 {
4164 	unsigned long dr7;
4165 
4166 	ctxt->ops->get_dr(ctxt, 7, &dr7);
4167 
4168 	/* Check if DR7.Global_Enable is set */
4169 	return dr7 & (1 << 13);
4170 }
4171 
4172 static int check_dr_read(struct x86_emulate_ctxt *ctxt)
4173 {
4174 	int dr = ctxt->modrm_reg;
4175 	u64 cr4;
4176 
4177 	if (dr > 7)
4178 		return emulate_ud(ctxt);
4179 
4180 	cr4 = ctxt->ops->get_cr(ctxt, 4);
4181 	if ((cr4 & X86_CR4_DE) && (dr == 4 || dr == 5))
4182 		return emulate_ud(ctxt);
4183 
4184 	if (check_dr7_gd(ctxt)) {
4185 		ulong dr6;
4186 
4187 		ctxt->ops->get_dr(ctxt, 6, &dr6);
4188 		dr6 &= ~15;
4189 		dr6 |= DR6_BD | DR6_RTM;
4190 		ctxt->ops->set_dr(ctxt, 6, dr6);
4191 		return emulate_db(ctxt);
4192 	}
4193 
4194 	return X86EMUL_CONTINUE;
4195 }
4196 
4197 static int check_dr_write(struct x86_emulate_ctxt *ctxt)
4198 {
4199 	u64 new_val = ctxt->src.val64;
4200 	int dr = ctxt->modrm_reg;
4201 
4202 	if ((dr == 6 || dr == 7) && (new_val & 0xffffffff00000000ULL))
4203 		return emulate_gp(ctxt, 0);
4204 
4205 	return check_dr_read(ctxt);
4206 }
4207 
4208 static int check_svme(struct x86_emulate_ctxt *ctxt)
4209 {
4210 	u64 efer = 0;
4211 
4212 	ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
4213 
4214 	if (!(efer & EFER_SVME))
4215 		return emulate_ud(ctxt);
4216 
4217 	return X86EMUL_CONTINUE;
4218 }
4219 
4220 static int check_svme_pa(struct x86_emulate_ctxt *ctxt)
4221 {
4222 	u64 rax = reg_read(ctxt, VCPU_REGS_RAX);
4223 
4224 	/* Valid physical address? */
4225 	if (rax & 0xffff000000000000ULL)
4226 		return emulate_gp(ctxt, 0);
4227 
4228 	return check_svme(ctxt);
4229 }
4230 
4231 static int check_rdtsc(struct x86_emulate_ctxt *ctxt)
4232 {
4233 	u64 cr4 = ctxt->ops->get_cr(ctxt, 4);
4234 
4235 	if (cr4 & X86_CR4_TSD && ctxt->ops->cpl(ctxt))
4236 		return emulate_ud(ctxt);
4237 
4238 	return X86EMUL_CONTINUE;
4239 }
4240 
4241 static int check_rdpmc(struct x86_emulate_ctxt *ctxt)
4242 {
4243 	u64 cr4 = ctxt->ops->get_cr(ctxt, 4);
4244 	u64 rcx = reg_read(ctxt, VCPU_REGS_RCX);
4245 
4246 	if ((!(cr4 & X86_CR4_PCE) && ctxt->ops->cpl(ctxt)) ||
4247 	    ctxt->ops->check_pmc(ctxt, rcx))
4248 		return emulate_gp(ctxt, 0);
4249 
4250 	return X86EMUL_CONTINUE;
4251 }
4252 
4253 static int check_perm_in(struct x86_emulate_ctxt *ctxt)
4254 {
4255 	ctxt->dst.bytes = min(ctxt->dst.bytes, 4u);
4256 	if (!emulator_io_permited(ctxt, ctxt->src.val, ctxt->dst.bytes))
4257 		return emulate_gp(ctxt, 0);
4258 
4259 	return X86EMUL_CONTINUE;
4260 }
4261 
4262 static int check_perm_out(struct x86_emulate_ctxt *ctxt)
4263 {
4264 	ctxt->src.bytes = min(ctxt->src.bytes, 4u);
4265 	if (!emulator_io_permited(ctxt, ctxt->dst.val, ctxt->src.bytes))
4266 		return emulate_gp(ctxt, 0);
4267 
4268 	return X86EMUL_CONTINUE;
4269 }
4270 
4271 #define D(_y) { .flags = (_y) }
4272 #define DI(_y, _i) { .flags = (_y)|Intercept, .intercept = x86_intercept_##_i }
4273 #define DIP(_y, _i, _p) { .flags = (_y)|Intercept|CheckPerm, \
4274 		      .intercept = x86_intercept_##_i, .check_perm = (_p) }
4275 #define N    D(NotImpl)
4276 #define EXT(_f, _e) { .flags = ((_f) | RMExt), .u.group = (_e) }
4277 #define G(_f, _g) { .flags = ((_f) | Group | ModRM), .u.group = (_g) }
4278 #define GD(_f, _g) { .flags = ((_f) | GroupDual | ModRM), .u.gdual = (_g) }
4279 #define ID(_f, _i) { .flags = ((_f) | InstrDual | ModRM), .u.idual = (_i) }
4280 #define MD(_f, _m) { .flags = ((_f) | ModeDual), .u.mdual = (_m) }
4281 #define E(_f, _e) { .flags = ((_f) | Escape | ModRM), .u.esc = (_e) }
4282 #define I(_f, _e) { .flags = (_f), .u.execute = (_e) }
4283 #define F(_f, _e) { .flags = (_f) | Fastop, .u.fastop = (_e) }
4284 #define II(_f, _e, _i) \
4285 	{ .flags = (_f)|Intercept, .u.execute = (_e), .intercept = x86_intercept_##_i }
4286 #define IIP(_f, _e, _i, _p) \
4287 	{ .flags = (_f)|Intercept|CheckPerm, .u.execute = (_e), \
4288 	  .intercept = x86_intercept_##_i, .check_perm = (_p) }
4289 #define GP(_f, _g) { .flags = ((_f) | Prefix), .u.gprefix = (_g) }
4290 
4291 #define D2bv(_f)      D((_f) | ByteOp), D(_f)
4292 #define D2bvIP(_f, _i, _p) DIP((_f) | ByteOp, _i, _p), DIP(_f, _i, _p)
4293 #define I2bv(_f, _e)  I((_f) | ByteOp, _e), I(_f, _e)
4294 #define F2bv(_f, _e)  F((_f) | ByteOp, _e), F(_f, _e)
4295 #define I2bvIP(_f, _e, _i, _p) \
4296 	IIP((_f) | ByteOp, _e, _i, _p), IIP(_f, _e, _i, _p)
4297 
4298 #define F6ALU(_f, _e) F2bv((_f) | DstMem | SrcReg | ModRM, _e),		\
4299 		F2bv(((_f) | DstReg | SrcMem | ModRM) & ~Lock, _e),	\
4300 		F2bv(((_f) & ~Lock) | DstAcc | SrcImm, _e)
4301 
4302 static const struct opcode group7_rm0[] = {
4303 	N,
4304 	I(SrcNone | Priv | EmulateOnUD,	em_hypercall),
4305 	N, N, N, N, N, N,
4306 };
4307 
4308 static const struct opcode group7_rm1[] = {
4309 	DI(SrcNone | Priv, monitor),
4310 	DI(SrcNone | Priv, mwait),
4311 	N, N, N, N, N, N,
4312 };
4313 
4314 static const struct opcode group7_rm3[] = {
4315 	DIP(SrcNone | Prot | Priv,		vmrun,		check_svme_pa),
4316 	II(SrcNone  | Prot | EmulateOnUD,	em_hypercall,	vmmcall),
4317 	DIP(SrcNone | Prot | Priv,		vmload,		check_svme_pa),
4318 	DIP(SrcNone | Prot | Priv,		vmsave,		check_svme_pa),
4319 	DIP(SrcNone | Prot | Priv,		stgi,		check_svme),
4320 	DIP(SrcNone | Prot | Priv,		clgi,		check_svme),
4321 	DIP(SrcNone | Prot | Priv,		skinit,		check_svme),
4322 	DIP(SrcNone | Prot | Priv,		invlpga,	check_svme),
4323 };
4324 
4325 static const struct opcode group7_rm7[] = {
4326 	N,
4327 	DIP(SrcNone, rdtscp, check_rdtsc),
4328 	N, N, N, N, N, N,
4329 };
4330 
4331 static const struct opcode group1[] = {
4332 	F(Lock, em_add),
4333 	F(Lock | PageTable, em_or),
4334 	F(Lock, em_adc),
4335 	F(Lock, em_sbb),
4336 	F(Lock | PageTable, em_and),
4337 	F(Lock, em_sub),
4338 	F(Lock, em_xor),
4339 	F(NoWrite, em_cmp),
4340 };
4341 
4342 static const struct opcode group1A[] = {
4343 	I(DstMem | SrcNone | Mov | Stack | IncSP | TwoMemOp, em_pop), N, N, N, N, N, N, N,
4344 };
4345 
4346 static const struct opcode group2[] = {
4347 	F(DstMem | ModRM, em_rol),
4348 	F(DstMem | ModRM, em_ror),
4349 	F(DstMem | ModRM, em_rcl),
4350 	F(DstMem | ModRM, em_rcr),
4351 	F(DstMem | ModRM, em_shl),
4352 	F(DstMem | ModRM, em_shr),
4353 	F(DstMem | ModRM, em_shl),
4354 	F(DstMem | ModRM, em_sar),
4355 };
4356 
4357 static const struct opcode group3[] = {
4358 	F(DstMem | SrcImm | NoWrite, em_test),
4359 	F(DstMem | SrcImm | NoWrite, em_test),
4360 	F(DstMem | SrcNone | Lock, em_not),
4361 	F(DstMem | SrcNone | Lock, em_neg),
4362 	F(DstXacc | Src2Mem, em_mul_ex),
4363 	F(DstXacc | Src2Mem, em_imul_ex),
4364 	F(DstXacc | Src2Mem, em_div_ex),
4365 	F(DstXacc | Src2Mem, em_idiv_ex),
4366 };
4367 
4368 static const struct opcode group4[] = {
4369 	F(ByteOp | DstMem | SrcNone | Lock, em_inc),
4370 	F(ByteOp | DstMem | SrcNone | Lock, em_dec),
4371 	N, N, N, N, N, N,
4372 };
4373 
4374 static const struct opcode group5[] = {
4375 	F(DstMem | SrcNone | Lock,		em_inc),
4376 	F(DstMem | SrcNone | Lock,		em_dec),
4377 	I(SrcMem | NearBranch,			em_call_near_abs),
4378 	I(SrcMemFAddr | ImplicitOps,		em_call_far),
4379 	I(SrcMem | NearBranch,			em_jmp_abs),
4380 	I(SrcMemFAddr | ImplicitOps,		em_jmp_far),
4381 	I(SrcMem | Stack | TwoMemOp,		em_push), D(Undefined),
4382 };
4383 
4384 static const struct opcode group6[] = {
4385 	DI(Prot | DstMem,	sldt),
4386 	DI(Prot | DstMem,	str),
4387 	II(Prot | Priv | SrcMem16, em_lldt, lldt),
4388 	II(Prot | Priv | SrcMem16, em_ltr, ltr),
4389 	N, N, N, N,
4390 };
4391 
4392 static const struct group_dual group7 = { {
4393 	II(Mov | DstMem,			em_sgdt, sgdt),
4394 	II(Mov | DstMem,			em_sidt, sidt),
4395 	II(SrcMem | Priv,			em_lgdt, lgdt),
4396 	II(SrcMem | Priv,			em_lidt, lidt),
4397 	II(SrcNone | DstMem | Mov,		em_smsw, smsw), N,
4398 	II(SrcMem16 | Mov | Priv,		em_lmsw, lmsw),
4399 	II(SrcMem | ByteOp | Priv | NoAccess,	em_invlpg, invlpg),
4400 }, {
4401 	EXT(0, group7_rm0),
4402 	EXT(0, group7_rm1),
4403 	N, EXT(0, group7_rm3),
4404 	II(SrcNone | DstMem | Mov,		em_smsw, smsw), N,
4405 	II(SrcMem16 | Mov | Priv,		em_lmsw, lmsw),
4406 	EXT(0, group7_rm7),
4407 } };
4408 
4409 static const struct opcode group8[] = {
4410 	N, N, N, N,
4411 	F(DstMem | SrcImmByte | NoWrite,		em_bt),
4412 	F(DstMem | SrcImmByte | Lock | PageTable,	em_bts),
4413 	F(DstMem | SrcImmByte | Lock,			em_btr),
4414 	F(DstMem | SrcImmByte | Lock | PageTable,	em_btc),
4415 };
4416 
4417 static const struct group_dual group9 = { {
4418 	N, I(DstMem64 | Lock | PageTable, em_cmpxchg8b), N, N, N, N, N, N,
4419 }, {
4420 	N, N, N, N, N, N, N, N,
4421 } };
4422 
4423 static const struct opcode group11[] = {
4424 	I(DstMem | SrcImm | Mov | PageTable, em_mov),
4425 	X7(D(Undefined)),
4426 };
4427 
4428 static const struct gprefix pfx_0f_ae_7 = {
4429 	I(SrcMem | ByteOp, em_clflush), N, N, N,
4430 };
4431 
4432 static const struct group_dual group15 = { {
4433 	I(ModRM | Aligned16, em_fxsave),
4434 	I(ModRM | Aligned16, em_fxrstor),
4435 	N, N, N, N, N, GP(0, &pfx_0f_ae_7),
4436 }, {
4437 	N, N, N, N, N, N, N, N,
4438 } };
4439 
4440 static const struct gprefix pfx_0f_6f_0f_7f = {
4441 	I(Mmx, em_mov), I(Sse | Aligned, em_mov), N, I(Sse | Unaligned, em_mov),
4442 };
4443 
4444 static const struct instr_dual instr_dual_0f_2b = {
4445 	I(0, em_mov), N
4446 };
4447 
4448 static const struct gprefix pfx_0f_2b = {
4449 	ID(0, &instr_dual_0f_2b), ID(0, &instr_dual_0f_2b), N, N,
4450 };
4451 
4452 static const struct gprefix pfx_0f_28_0f_29 = {
4453 	I(Aligned, em_mov), I(Aligned, em_mov), N, N,
4454 };
4455 
4456 static const struct gprefix pfx_0f_e7 = {
4457 	N, I(Sse, em_mov), N, N,
4458 };
4459 
4460 static const struct escape escape_d9 = { {
4461 	N, N, N, N, N, N, N, I(DstMem16 | Mov, em_fnstcw),
4462 }, {
4463 	/* 0xC0 - 0xC7 */
4464 	N, N, N, N, N, N, N, N,
4465 	/* 0xC8 - 0xCF */
4466 	N, N, N, N, N, N, N, N,
4467 	/* 0xD0 - 0xC7 */
4468 	N, N, N, N, N, N, N, N,
4469 	/* 0xD8 - 0xDF */
4470 	N, N, N, N, N, N, N, N,
4471 	/* 0xE0 - 0xE7 */
4472 	N, N, N, N, N, N, N, N,
4473 	/* 0xE8 - 0xEF */
4474 	N, N, N, N, N, N, N, N,
4475 	/* 0xF0 - 0xF7 */
4476 	N, N, N, N, N, N, N, N,
4477 	/* 0xF8 - 0xFF */
4478 	N, N, N, N, N, N, N, N,
4479 } };
4480 
4481 static const struct escape escape_db = { {
4482 	N, N, N, N, N, N, N, N,
4483 }, {
4484 	/* 0xC0 - 0xC7 */
4485 	N, N, N, N, N, N, N, N,
4486 	/* 0xC8 - 0xCF */
4487 	N, N, N, N, N, N, N, N,
4488 	/* 0xD0 - 0xC7 */
4489 	N, N, N, N, N, N, N, N,
4490 	/* 0xD8 - 0xDF */
4491 	N, N, N, N, N, N, N, N,
4492 	/* 0xE0 - 0xE7 */
4493 	N, N, N, I(ImplicitOps, em_fninit), N, N, N, N,
4494 	/* 0xE8 - 0xEF */
4495 	N, N, N, N, N, N, N, N,
4496 	/* 0xF0 - 0xF7 */
4497 	N, N, N, N, N, N, N, N,
4498 	/* 0xF8 - 0xFF */
4499 	N, N, N, N, N, N, N, N,
4500 } };
4501 
4502 static const struct escape escape_dd = { {
4503 	N, N, N, N, N, N, N, I(DstMem16 | Mov, em_fnstsw),
4504 }, {
4505 	/* 0xC0 - 0xC7 */
4506 	N, N, N, N, N, N, N, N,
4507 	/* 0xC8 - 0xCF */
4508 	N, N, N, N, N, N, N, N,
4509 	/* 0xD0 - 0xC7 */
4510 	N, N, N, N, N, N, N, N,
4511 	/* 0xD8 - 0xDF */
4512 	N, N, N, N, N, N, N, N,
4513 	/* 0xE0 - 0xE7 */
4514 	N, N, N, N, N, N, N, N,
4515 	/* 0xE8 - 0xEF */
4516 	N, N, N, N, N, N, N, N,
4517 	/* 0xF0 - 0xF7 */
4518 	N, N, N, N, N, N, N, N,
4519 	/* 0xF8 - 0xFF */
4520 	N, N, N, N, N, N, N, N,
4521 } };
4522 
4523 static const struct instr_dual instr_dual_0f_c3 = {
4524 	I(DstMem | SrcReg | ModRM | No16 | Mov, em_mov), N
4525 };
4526 
4527 static const struct mode_dual mode_dual_63 = {
4528 	N, I(DstReg | SrcMem32 | ModRM | Mov, em_movsxd)
4529 };
4530 
4531 static const struct opcode opcode_table[256] = {
4532 	/* 0x00 - 0x07 */
4533 	F6ALU(Lock, em_add),
4534 	I(ImplicitOps | Stack | No64 | Src2ES, em_push_sreg),
4535 	I(ImplicitOps | Stack | No64 | Src2ES, em_pop_sreg),
4536 	/* 0x08 - 0x0F */
4537 	F6ALU(Lock | PageTable, em_or),
4538 	I(ImplicitOps | Stack | No64 | Src2CS, em_push_sreg),
4539 	N,
4540 	/* 0x10 - 0x17 */
4541 	F6ALU(Lock, em_adc),
4542 	I(ImplicitOps | Stack | No64 | Src2SS, em_push_sreg),
4543 	I(ImplicitOps | Stack | No64 | Src2SS, em_pop_sreg),
4544 	/* 0x18 - 0x1F */
4545 	F6ALU(Lock, em_sbb),
4546 	I(ImplicitOps | Stack | No64 | Src2DS, em_push_sreg),
4547 	I(ImplicitOps | Stack | No64 | Src2DS, em_pop_sreg),
4548 	/* 0x20 - 0x27 */
4549 	F6ALU(Lock | PageTable, em_and), N, N,
4550 	/* 0x28 - 0x2F */
4551 	F6ALU(Lock, em_sub), N, I(ByteOp | DstAcc | No64, em_das),
4552 	/* 0x30 - 0x37 */
4553 	F6ALU(Lock, em_xor), N, N,
4554 	/* 0x38 - 0x3F */
4555 	F6ALU(NoWrite, em_cmp), N, N,
4556 	/* 0x40 - 0x4F */
4557 	X8(F(DstReg, em_inc)), X8(F(DstReg, em_dec)),
4558 	/* 0x50 - 0x57 */
4559 	X8(I(SrcReg | Stack, em_push)),
4560 	/* 0x58 - 0x5F */
4561 	X8(I(DstReg | Stack, em_pop)),
4562 	/* 0x60 - 0x67 */
4563 	I(ImplicitOps | Stack | No64, em_pusha),
4564 	I(ImplicitOps | Stack | No64, em_popa),
4565 	N, MD(ModRM, &mode_dual_63),
4566 	N, N, N, N,
4567 	/* 0x68 - 0x6F */
4568 	I(SrcImm | Mov | Stack, em_push),
4569 	I(DstReg | SrcMem | ModRM | Src2Imm, em_imul_3op),
4570 	I(SrcImmByte | Mov | Stack, em_push),
4571 	I(DstReg | SrcMem | ModRM | Src2ImmByte, em_imul_3op),
4572 	I2bvIP(DstDI | SrcDX | Mov | String | Unaligned, em_in, ins, check_perm_in), /* insb, insw/insd */
4573 	I2bvIP(SrcSI | DstDX | String, em_out, outs, check_perm_out), /* outsb, outsw/outsd */
4574 	/* 0x70 - 0x7F */
4575 	X16(D(SrcImmByte | NearBranch)),
4576 	/* 0x80 - 0x87 */
4577 	G(ByteOp | DstMem | SrcImm, group1),
4578 	G(DstMem | SrcImm, group1),
4579 	G(ByteOp | DstMem | SrcImm | No64, group1),
4580 	G(DstMem | SrcImmByte, group1),
4581 	F2bv(DstMem | SrcReg | ModRM | NoWrite, em_test),
4582 	I2bv(DstMem | SrcReg | ModRM | Lock | PageTable, em_xchg),
4583 	/* 0x88 - 0x8F */
4584 	I2bv(DstMem | SrcReg | ModRM | Mov | PageTable, em_mov),
4585 	I2bv(DstReg | SrcMem | ModRM | Mov, em_mov),
4586 	I(DstMem | SrcNone | ModRM | Mov | PageTable, em_mov_rm_sreg),
4587 	D(ModRM | SrcMem | NoAccess | DstReg),
4588 	I(ImplicitOps | SrcMem16 | ModRM, em_mov_sreg_rm),
4589 	G(0, group1A),
4590 	/* 0x90 - 0x97 */
4591 	DI(SrcAcc | DstReg, pause), X7(D(SrcAcc | DstReg)),
4592 	/* 0x98 - 0x9F */
4593 	D(DstAcc | SrcNone), I(ImplicitOps | SrcAcc, em_cwd),
4594 	I(SrcImmFAddr | No64, em_call_far), N,
4595 	II(ImplicitOps | Stack, em_pushf, pushf),
4596 	II(ImplicitOps | Stack, em_popf, popf),
4597 	I(ImplicitOps, em_sahf), I(ImplicitOps, em_lahf),
4598 	/* 0xA0 - 0xA7 */
4599 	I2bv(DstAcc | SrcMem | Mov | MemAbs, em_mov),
4600 	I2bv(DstMem | SrcAcc | Mov | MemAbs | PageTable, em_mov),
4601 	I2bv(SrcSI | DstDI | Mov | String | TwoMemOp, em_mov),
4602 	F2bv(SrcSI | DstDI | String | NoWrite | TwoMemOp, em_cmp_r),
4603 	/* 0xA8 - 0xAF */
4604 	F2bv(DstAcc | SrcImm | NoWrite, em_test),
4605 	I2bv(SrcAcc | DstDI | Mov | String, em_mov),
4606 	I2bv(SrcSI | DstAcc | Mov | String, em_mov),
4607 	F2bv(SrcAcc | DstDI | String | NoWrite, em_cmp_r),
4608 	/* 0xB0 - 0xB7 */
4609 	X8(I(ByteOp | DstReg | SrcImm | Mov, em_mov)),
4610 	/* 0xB8 - 0xBF */
4611 	X8(I(DstReg | SrcImm64 | Mov, em_mov)),
4612 	/* 0xC0 - 0xC7 */
4613 	G(ByteOp | Src2ImmByte, group2), G(Src2ImmByte, group2),
4614 	I(ImplicitOps | NearBranch | SrcImmU16, em_ret_near_imm),
4615 	I(ImplicitOps | NearBranch, em_ret),
4616 	I(DstReg | SrcMemFAddr | ModRM | No64 | Src2ES, em_lseg),
4617 	I(DstReg | SrcMemFAddr | ModRM | No64 | Src2DS, em_lseg),
4618 	G(ByteOp, group11), G(0, group11),
4619 	/* 0xC8 - 0xCF */
4620 	I(Stack | SrcImmU16 | Src2ImmByte, em_enter), I(Stack, em_leave),
4621 	I(ImplicitOps | SrcImmU16, em_ret_far_imm),
4622 	I(ImplicitOps, em_ret_far),
4623 	D(ImplicitOps), DI(SrcImmByte, intn),
4624 	D(ImplicitOps | No64), II(ImplicitOps, em_iret, iret),
4625 	/* 0xD0 - 0xD7 */
4626 	G(Src2One | ByteOp, group2), G(Src2One, group2),
4627 	G(Src2CL | ByteOp, group2), G(Src2CL, group2),
4628 	I(DstAcc | SrcImmUByte | No64, em_aam),
4629 	I(DstAcc | SrcImmUByte | No64, em_aad),
4630 	F(DstAcc | ByteOp | No64, em_salc),
4631 	I(DstAcc | SrcXLat | ByteOp, em_mov),
4632 	/* 0xD8 - 0xDF */
4633 	N, E(0, &escape_d9), N, E(0, &escape_db), N, E(0, &escape_dd), N, N,
4634 	/* 0xE0 - 0xE7 */
4635 	X3(I(SrcImmByte | NearBranch, em_loop)),
4636 	I(SrcImmByte | NearBranch, em_jcxz),
4637 	I2bvIP(SrcImmUByte | DstAcc, em_in,  in,  check_perm_in),
4638 	I2bvIP(SrcAcc | DstImmUByte, em_out, out, check_perm_out),
4639 	/* 0xE8 - 0xEF */
4640 	I(SrcImm | NearBranch, em_call), D(SrcImm | ImplicitOps | NearBranch),
4641 	I(SrcImmFAddr | No64, em_jmp_far),
4642 	D(SrcImmByte | ImplicitOps | NearBranch),
4643 	I2bvIP(SrcDX | DstAcc, em_in,  in,  check_perm_in),
4644 	I2bvIP(SrcAcc | DstDX, em_out, out, check_perm_out),
4645 	/* 0xF0 - 0xF7 */
4646 	N, DI(ImplicitOps, icebp), N, N,
4647 	DI(ImplicitOps | Priv, hlt), D(ImplicitOps),
4648 	G(ByteOp, group3), G(0, group3),
4649 	/* 0xF8 - 0xFF */
4650 	D(ImplicitOps), D(ImplicitOps),
4651 	I(ImplicitOps, em_cli), I(ImplicitOps, em_sti),
4652 	D(ImplicitOps), D(ImplicitOps), G(0, group4), G(0, group5),
4653 };
4654 
4655 static const struct opcode twobyte_table[256] = {
4656 	/* 0x00 - 0x0F */
4657 	G(0, group6), GD(0, &group7), N, N,
4658 	N, I(ImplicitOps | EmulateOnUD, em_syscall),
4659 	II(ImplicitOps | Priv, em_clts, clts), N,
4660 	DI(ImplicitOps | Priv, invd), DI(ImplicitOps | Priv, wbinvd), N, N,
4661 	N, D(ImplicitOps | ModRM | SrcMem | NoAccess), N, N,
4662 	/* 0x10 - 0x1F */
4663 	N, N, N, N, N, N, N, N,
4664 	D(ImplicitOps | ModRM | SrcMem | NoAccess),
4665 	N, N, N, N, N, N, D(ImplicitOps | ModRM | SrcMem | NoAccess),
4666 	/* 0x20 - 0x2F */
4667 	DIP(ModRM | DstMem | Priv | Op3264 | NoMod, cr_read, check_cr_read),
4668 	DIP(ModRM | DstMem | Priv | Op3264 | NoMod, dr_read, check_dr_read),
4669 	IIP(ModRM | SrcMem | Priv | Op3264 | NoMod, em_cr_write, cr_write,
4670 						check_cr_write),
4671 	IIP(ModRM | SrcMem | Priv | Op3264 | NoMod, em_dr_write, dr_write,
4672 						check_dr_write),
4673 	N, N, N, N,
4674 	GP(ModRM | DstReg | SrcMem | Mov | Sse, &pfx_0f_28_0f_29),
4675 	GP(ModRM | DstMem | SrcReg | Mov | Sse, &pfx_0f_28_0f_29),
4676 	N, GP(ModRM | DstMem | SrcReg | Mov | Sse, &pfx_0f_2b),
4677 	N, N, N, N,
4678 	/* 0x30 - 0x3F */
4679 	II(ImplicitOps | Priv, em_wrmsr, wrmsr),
4680 	IIP(ImplicitOps, em_rdtsc, rdtsc, check_rdtsc),
4681 	II(ImplicitOps | Priv, em_rdmsr, rdmsr),
4682 	IIP(ImplicitOps, em_rdpmc, rdpmc, check_rdpmc),
4683 	I(ImplicitOps | EmulateOnUD, em_sysenter),
4684 	I(ImplicitOps | Priv | EmulateOnUD, em_sysexit),
4685 	N, N,
4686 	N, N, N, N, N, N, N, N,
4687 	/* 0x40 - 0x4F */
4688 	X16(D(DstReg | SrcMem | ModRM)),
4689 	/* 0x50 - 0x5F */
4690 	N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
4691 	/* 0x60 - 0x6F */
4692 	N, N, N, N,
4693 	N, N, N, N,
4694 	N, N, N, N,
4695 	N, N, N, GP(SrcMem | DstReg | ModRM | Mov, &pfx_0f_6f_0f_7f),
4696 	/* 0x70 - 0x7F */
4697 	N, N, N, N,
4698 	N, N, N, N,
4699 	N, N, N, N,
4700 	N, N, N, GP(SrcReg | DstMem | ModRM | Mov, &pfx_0f_6f_0f_7f),
4701 	/* 0x80 - 0x8F */
4702 	X16(D(SrcImm | NearBranch)),
4703 	/* 0x90 - 0x9F */
4704 	X16(D(ByteOp | DstMem | SrcNone | ModRM| Mov)),
4705 	/* 0xA0 - 0xA7 */
4706 	I(Stack | Src2FS, em_push_sreg), I(Stack | Src2FS, em_pop_sreg),
4707 	II(ImplicitOps, em_cpuid, cpuid),
4708 	F(DstMem | SrcReg | ModRM | BitOp | NoWrite, em_bt),
4709 	F(DstMem | SrcReg | Src2ImmByte | ModRM, em_shld),
4710 	F(DstMem | SrcReg | Src2CL | ModRM, em_shld), N, N,
4711 	/* 0xA8 - 0xAF */
4712 	I(Stack | Src2GS, em_push_sreg), I(Stack | Src2GS, em_pop_sreg),
4713 	II(EmulateOnUD | ImplicitOps, em_rsm, rsm),
4714 	F(DstMem | SrcReg | ModRM | BitOp | Lock | PageTable, em_bts),
4715 	F(DstMem | SrcReg | Src2ImmByte | ModRM, em_shrd),
4716 	F(DstMem | SrcReg | Src2CL | ModRM, em_shrd),
4717 	GD(0, &group15), F(DstReg | SrcMem | ModRM, em_imul),
4718 	/* 0xB0 - 0xB7 */
4719 	I2bv(DstMem | SrcReg | ModRM | Lock | PageTable | SrcWrite, em_cmpxchg),
4720 	I(DstReg | SrcMemFAddr | ModRM | Src2SS, em_lseg),
4721 	F(DstMem | SrcReg | ModRM | BitOp | Lock, em_btr),
4722 	I(DstReg | SrcMemFAddr | ModRM | Src2FS, em_lseg),
4723 	I(DstReg | SrcMemFAddr | ModRM | Src2GS, em_lseg),
4724 	D(DstReg | SrcMem8 | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov),
4725 	/* 0xB8 - 0xBF */
4726 	N, N,
4727 	G(BitOp, group8),
4728 	F(DstMem | SrcReg | ModRM | BitOp | Lock | PageTable, em_btc),
4729 	I(DstReg | SrcMem | ModRM, em_bsf_c),
4730 	I(DstReg | SrcMem | ModRM, em_bsr_c),
4731 	D(DstReg | SrcMem8 | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov),
4732 	/* 0xC0 - 0xC7 */
4733 	F2bv(DstMem | SrcReg | ModRM | SrcWrite | Lock, em_xadd),
4734 	N, ID(0, &instr_dual_0f_c3),
4735 	N, N, N, GD(0, &group9),
4736 	/* 0xC8 - 0xCF */
4737 	X8(I(DstReg, em_bswap)),
4738 	/* 0xD0 - 0xDF */
4739 	N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
4740 	/* 0xE0 - 0xEF */
4741 	N, N, N, N, N, N, N, GP(SrcReg | DstMem | ModRM | Mov, &pfx_0f_e7),
4742 	N, N, N, N, N, N, N, N,
4743 	/* 0xF0 - 0xFF */
4744 	N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N
4745 };
4746 
4747 static const struct instr_dual instr_dual_0f_38_f0 = {
4748 	I(DstReg | SrcMem | Mov, em_movbe), N
4749 };
4750 
4751 static const struct instr_dual instr_dual_0f_38_f1 = {
4752 	I(DstMem | SrcReg | Mov, em_movbe), N
4753 };
4754 
4755 static const struct gprefix three_byte_0f_38_f0 = {
4756 	ID(0, &instr_dual_0f_38_f0), N, N, N
4757 };
4758 
4759 static const struct gprefix three_byte_0f_38_f1 = {
4760 	ID(0, &instr_dual_0f_38_f1), N, N, N
4761 };
4762 
4763 /*
4764  * Insns below are selected by the prefix which indexed by the third opcode
4765  * byte.
4766  */
4767 static const struct opcode opcode_map_0f_38[256] = {
4768 	/* 0x00 - 0x7f */
4769 	X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N),
4770 	/* 0x80 - 0xef */
4771 	X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N),
4772 	/* 0xf0 - 0xf1 */
4773 	GP(EmulateOnUD | ModRM, &three_byte_0f_38_f0),
4774 	GP(EmulateOnUD | ModRM, &three_byte_0f_38_f1),
4775 	/* 0xf2 - 0xff */
4776 	N, N, X4(N), X8(N)
4777 };
4778 
4779 #undef D
4780 #undef N
4781 #undef G
4782 #undef GD
4783 #undef I
4784 #undef GP
4785 #undef EXT
4786 #undef MD
4787 #undef ID
4788 
4789 #undef D2bv
4790 #undef D2bvIP
4791 #undef I2bv
4792 #undef I2bvIP
4793 #undef I6ALU
4794 
4795 static unsigned imm_size(struct x86_emulate_ctxt *ctxt)
4796 {
4797 	unsigned size;
4798 
4799 	size = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4800 	if (size == 8)
4801 		size = 4;
4802 	return size;
4803 }
4804 
4805 static int decode_imm(struct x86_emulate_ctxt *ctxt, struct operand *op,
4806 		      unsigned size, bool sign_extension)
4807 {
4808 	int rc = X86EMUL_CONTINUE;
4809 
4810 	op->type = OP_IMM;
4811 	op->bytes = size;
4812 	op->addr.mem.ea = ctxt->_eip;
4813 	/* NB. Immediates are sign-extended as necessary. */
4814 	switch (op->bytes) {
4815 	case 1:
4816 		op->val = insn_fetch(s8, ctxt);
4817 		break;
4818 	case 2:
4819 		op->val = insn_fetch(s16, ctxt);
4820 		break;
4821 	case 4:
4822 		op->val = insn_fetch(s32, ctxt);
4823 		break;
4824 	case 8:
4825 		op->val = insn_fetch(s64, ctxt);
4826 		break;
4827 	}
4828 	if (!sign_extension) {
4829 		switch (op->bytes) {
4830 		case 1:
4831 			op->val &= 0xff;
4832 			break;
4833 		case 2:
4834 			op->val &= 0xffff;
4835 			break;
4836 		case 4:
4837 			op->val &= 0xffffffff;
4838 			break;
4839 		}
4840 	}
4841 done:
4842 	return rc;
4843 }
4844 
4845 static int decode_operand(struct x86_emulate_ctxt *ctxt, struct operand *op,
4846 			  unsigned d)
4847 {
4848 	int rc = X86EMUL_CONTINUE;
4849 
4850 	switch (d) {
4851 	case OpReg:
4852 		decode_register_operand(ctxt, op);
4853 		break;
4854 	case OpImmUByte:
4855 		rc = decode_imm(ctxt, op, 1, false);
4856 		break;
4857 	case OpMem:
4858 		ctxt->memop.bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4859 	mem_common:
4860 		*op = ctxt->memop;
4861 		ctxt->memopp = op;
4862 		if (ctxt->d & BitOp)
4863 			fetch_bit_operand(ctxt);
4864 		op->orig_val = op->val;
4865 		break;
4866 	case OpMem64:
4867 		ctxt->memop.bytes = (ctxt->op_bytes == 8) ? 16 : 8;
4868 		goto mem_common;
4869 	case OpAcc:
4870 		op->type = OP_REG;
4871 		op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4872 		op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
4873 		fetch_register_operand(op);
4874 		op->orig_val = op->val;
4875 		break;
4876 	case OpAccLo:
4877 		op->type = OP_REG;
4878 		op->bytes = (ctxt->d & ByteOp) ? 2 : ctxt->op_bytes;
4879 		op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
4880 		fetch_register_operand(op);
4881 		op->orig_val = op->val;
4882 		break;
4883 	case OpAccHi:
4884 		if (ctxt->d & ByteOp) {
4885 			op->type = OP_NONE;
4886 			break;
4887 		}
4888 		op->type = OP_REG;
4889 		op->bytes = ctxt->op_bytes;
4890 		op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX);
4891 		fetch_register_operand(op);
4892 		op->orig_val = op->val;
4893 		break;
4894 	case OpDI:
4895 		op->type = OP_MEM;
4896 		op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4897 		op->addr.mem.ea =
4898 			register_address(ctxt, VCPU_REGS_RDI);
4899 		op->addr.mem.seg = VCPU_SREG_ES;
4900 		op->val = 0;
4901 		op->count = 1;
4902 		break;
4903 	case OpDX:
4904 		op->type = OP_REG;
4905 		op->bytes = 2;
4906 		op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX);
4907 		fetch_register_operand(op);
4908 		break;
4909 	case OpCL:
4910 		op->type = OP_IMM;
4911 		op->bytes = 1;
4912 		op->val = reg_read(ctxt, VCPU_REGS_RCX) & 0xff;
4913 		break;
4914 	case OpImmByte:
4915 		rc = decode_imm(ctxt, op, 1, true);
4916 		break;
4917 	case OpOne:
4918 		op->type = OP_IMM;
4919 		op->bytes = 1;
4920 		op->val = 1;
4921 		break;
4922 	case OpImm:
4923 		rc = decode_imm(ctxt, op, imm_size(ctxt), true);
4924 		break;
4925 	case OpImm64:
4926 		rc = decode_imm(ctxt, op, ctxt->op_bytes, true);
4927 		break;
4928 	case OpMem8:
4929 		ctxt->memop.bytes = 1;
4930 		if (ctxt->memop.type == OP_REG) {
4931 			ctxt->memop.addr.reg = decode_register(ctxt,
4932 					ctxt->modrm_rm, true);
4933 			fetch_register_operand(&ctxt->memop);
4934 		}
4935 		goto mem_common;
4936 	case OpMem16:
4937 		ctxt->memop.bytes = 2;
4938 		goto mem_common;
4939 	case OpMem32:
4940 		ctxt->memop.bytes = 4;
4941 		goto mem_common;
4942 	case OpImmU16:
4943 		rc = decode_imm(ctxt, op, 2, false);
4944 		break;
4945 	case OpImmU:
4946 		rc = decode_imm(ctxt, op, imm_size(ctxt), false);
4947 		break;
4948 	case OpSI:
4949 		op->type = OP_MEM;
4950 		op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4951 		op->addr.mem.ea =
4952 			register_address(ctxt, VCPU_REGS_RSI);
4953 		op->addr.mem.seg = ctxt->seg_override;
4954 		op->val = 0;
4955 		op->count = 1;
4956 		break;
4957 	case OpXLat:
4958 		op->type = OP_MEM;
4959 		op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4960 		op->addr.mem.ea =
4961 			address_mask(ctxt,
4962 				reg_read(ctxt, VCPU_REGS_RBX) +
4963 				(reg_read(ctxt, VCPU_REGS_RAX) & 0xff));
4964 		op->addr.mem.seg = ctxt->seg_override;
4965 		op->val = 0;
4966 		break;
4967 	case OpImmFAddr:
4968 		op->type = OP_IMM;
4969 		op->addr.mem.ea = ctxt->_eip;
4970 		op->bytes = ctxt->op_bytes + 2;
4971 		insn_fetch_arr(op->valptr, op->bytes, ctxt);
4972 		break;
4973 	case OpMemFAddr:
4974 		ctxt->memop.bytes = ctxt->op_bytes + 2;
4975 		goto mem_common;
4976 	case OpES:
4977 		op->type = OP_IMM;
4978 		op->val = VCPU_SREG_ES;
4979 		break;
4980 	case OpCS:
4981 		op->type = OP_IMM;
4982 		op->val = VCPU_SREG_CS;
4983 		break;
4984 	case OpSS:
4985 		op->type = OP_IMM;
4986 		op->val = VCPU_SREG_SS;
4987 		break;
4988 	case OpDS:
4989 		op->type = OP_IMM;
4990 		op->val = VCPU_SREG_DS;
4991 		break;
4992 	case OpFS:
4993 		op->type = OP_IMM;
4994 		op->val = VCPU_SREG_FS;
4995 		break;
4996 	case OpGS:
4997 		op->type = OP_IMM;
4998 		op->val = VCPU_SREG_GS;
4999 		break;
5000 	case OpImplicit:
5001 		/* Special instructions do their own operand decoding. */
5002 	default:
5003 		op->type = OP_NONE; /* Disable writeback. */
5004 		break;
5005 	}
5006 
5007 done:
5008 	return rc;
5009 }
5010 
5011 int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len)
5012 {
5013 	int rc = X86EMUL_CONTINUE;
5014 	int mode = ctxt->mode;
5015 	int def_op_bytes, def_ad_bytes, goffset, simd_prefix;
5016 	bool op_prefix = false;
5017 	bool has_seg_override = false;
5018 	struct opcode opcode;
5019 	u16 dummy;
5020 	struct desc_struct desc;
5021 
5022 	ctxt->memop.type = OP_NONE;
5023 	ctxt->memopp = NULL;
5024 	ctxt->_eip = ctxt->eip;
5025 	ctxt->fetch.ptr = ctxt->fetch.data;
5026 	ctxt->fetch.end = ctxt->fetch.data + insn_len;
5027 	ctxt->opcode_len = 1;
5028 	if (insn_len > 0)
5029 		memcpy(ctxt->fetch.data, insn, insn_len);
5030 	else {
5031 		rc = __do_insn_fetch_bytes(ctxt, 1);
5032 		if (rc != X86EMUL_CONTINUE)
5033 			return rc;
5034 	}
5035 
5036 	switch (mode) {
5037 	case X86EMUL_MODE_REAL:
5038 	case X86EMUL_MODE_VM86:
5039 		def_op_bytes = def_ad_bytes = 2;
5040 		ctxt->ops->get_segment(ctxt, &dummy, &desc, NULL, VCPU_SREG_CS);
5041 		if (desc.d)
5042 			def_op_bytes = def_ad_bytes = 4;
5043 		break;
5044 	case X86EMUL_MODE_PROT16:
5045 		def_op_bytes = def_ad_bytes = 2;
5046 		break;
5047 	case X86EMUL_MODE_PROT32:
5048 		def_op_bytes = def_ad_bytes = 4;
5049 		break;
5050 #ifdef CONFIG_X86_64
5051 	case X86EMUL_MODE_PROT64:
5052 		def_op_bytes = 4;
5053 		def_ad_bytes = 8;
5054 		break;
5055 #endif
5056 	default:
5057 		return EMULATION_FAILED;
5058 	}
5059 
5060 	ctxt->op_bytes = def_op_bytes;
5061 	ctxt->ad_bytes = def_ad_bytes;
5062 
5063 	/* Legacy prefixes. */
5064 	for (;;) {
5065 		switch (ctxt->b = insn_fetch(u8, ctxt)) {
5066 		case 0x66:	/* operand-size override */
5067 			op_prefix = true;
5068 			/* switch between 2/4 bytes */
5069 			ctxt->op_bytes = def_op_bytes ^ 6;
5070 			break;
5071 		case 0x67:	/* address-size override */
5072 			if (mode == X86EMUL_MODE_PROT64)
5073 				/* switch between 4/8 bytes */
5074 				ctxt->ad_bytes = def_ad_bytes ^ 12;
5075 			else
5076 				/* switch between 2/4 bytes */
5077 				ctxt->ad_bytes = def_ad_bytes ^ 6;
5078 			break;
5079 		case 0x26:	/* ES override */
5080 		case 0x2e:	/* CS override */
5081 		case 0x36:	/* SS override */
5082 		case 0x3e:	/* DS override */
5083 			has_seg_override = true;
5084 			ctxt->seg_override = (ctxt->b >> 3) & 3;
5085 			break;
5086 		case 0x64:	/* FS override */
5087 		case 0x65:	/* GS override */
5088 			has_seg_override = true;
5089 			ctxt->seg_override = ctxt->b & 7;
5090 			break;
5091 		case 0x40 ... 0x4f: /* REX */
5092 			if (mode != X86EMUL_MODE_PROT64)
5093 				goto done_prefixes;
5094 			ctxt->rex_prefix = ctxt->b;
5095 			continue;
5096 		case 0xf0:	/* LOCK */
5097 			ctxt->lock_prefix = 1;
5098 			break;
5099 		case 0xf2:	/* REPNE/REPNZ */
5100 		case 0xf3:	/* REP/REPE/REPZ */
5101 			ctxt->rep_prefix = ctxt->b;
5102 			break;
5103 		default:
5104 			goto done_prefixes;
5105 		}
5106 
5107 		/* Any legacy prefix after a REX prefix nullifies its effect. */
5108 
5109 		ctxt->rex_prefix = 0;
5110 	}
5111 
5112 done_prefixes:
5113 
5114 	/* REX prefix. */
5115 	if (ctxt->rex_prefix & 8)
5116 		ctxt->op_bytes = 8;	/* REX.W */
5117 
5118 	/* Opcode byte(s). */
5119 	opcode = opcode_table[ctxt->b];
5120 	/* Two-byte opcode? */
5121 	if (ctxt->b == 0x0f) {
5122 		ctxt->opcode_len = 2;
5123 		ctxt->b = insn_fetch(u8, ctxt);
5124 		opcode = twobyte_table[ctxt->b];
5125 
5126 		/* 0F_38 opcode map */
5127 		if (ctxt->b == 0x38) {
5128 			ctxt->opcode_len = 3;
5129 			ctxt->b = insn_fetch(u8, ctxt);
5130 			opcode = opcode_map_0f_38[ctxt->b];
5131 		}
5132 	}
5133 	ctxt->d = opcode.flags;
5134 
5135 	if (ctxt->d & ModRM)
5136 		ctxt->modrm = insn_fetch(u8, ctxt);
5137 
5138 	/* vex-prefix instructions are not implemented */
5139 	if (ctxt->opcode_len == 1 && (ctxt->b == 0xc5 || ctxt->b == 0xc4) &&
5140 	    (mode == X86EMUL_MODE_PROT64 || (ctxt->modrm & 0xc0) == 0xc0)) {
5141 		ctxt->d = NotImpl;
5142 	}
5143 
5144 	while (ctxt->d & GroupMask) {
5145 		switch (ctxt->d & GroupMask) {
5146 		case Group:
5147 			goffset = (ctxt->modrm >> 3) & 7;
5148 			opcode = opcode.u.group[goffset];
5149 			break;
5150 		case GroupDual:
5151 			goffset = (ctxt->modrm >> 3) & 7;
5152 			if ((ctxt->modrm >> 6) == 3)
5153 				opcode = opcode.u.gdual->mod3[goffset];
5154 			else
5155 				opcode = opcode.u.gdual->mod012[goffset];
5156 			break;
5157 		case RMExt:
5158 			goffset = ctxt->modrm & 7;
5159 			opcode = opcode.u.group[goffset];
5160 			break;
5161 		case Prefix:
5162 			if (ctxt->rep_prefix && op_prefix)
5163 				return EMULATION_FAILED;
5164 			simd_prefix = op_prefix ? 0x66 : ctxt->rep_prefix;
5165 			switch (simd_prefix) {
5166 			case 0x00: opcode = opcode.u.gprefix->pfx_no; break;
5167 			case 0x66: opcode = opcode.u.gprefix->pfx_66; break;
5168 			case 0xf2: opcode = opcode.u.gprefix->pfx_f2; break;
5169 			case 0xf3: opcode = opcode.u.gprefix->pfx_f3; break;
5170 			}
5171 			break;
5172 		case Escape:
5173 			if (ctxt->modrm > 0xbf)
5174 				opcode = opcode.u.esc->high[ctxt->modrm - 0xc0];
5175 			else
5176 				opcode = opcode.u.esc->op[(ctxt->modrm >> 3) & 7];
5177 			break;
5178 		case InstrDual:
5179 			if ((ctxt->modrm >> 6) == 3)
5180 				opcode = opcode.u.idual->mod3;
5181 			else
5182 				opcode = opcode.u.idual->mod012;
5183 			break;
5184 		case ModeDual:
5185 			if (ctxt->mode == X86EMUL_MODE_PROT64)
5186 				opcode = opcode.u.mdual->mode64;
5187 			else
5188 				opcode = opcode.u.mdual->mode32;
5189 			break;
5190 		default:
5191 			return EMULATION_FAILED;
5192 		}
5193 
5194 		ctxt->d &= ~(u64)GroupMask;
5195 		ctxt->d |= opcode.flags;
5196 	}
5197 
5198 	/* Unrecognised? */
5199 	if (ctxt->d == 0)
5200 		return EMULATION_FAILED;
5201 
5202 	ctxt->execute = opcode.u.execute;
5203 
5204 	if (unlikely(ctxt->ud) && likely(!(ctxt->d & EmulateOnUD)))
5205 		return EMULATION_FAILED;
5206 
5207 	if (unlikely(ctxt->d &
5208 	    (NotImpl|Stack|Op3264|Sse|Mmx|Intercept|CheckPerm|NearBranch|
5209 	     No16))) {
5210 		/*
5211 		 * These are copied unconditionally here, and checked unconditionally
5212 		 * in x86_emulate_insn.
5213 		 */
5214 		ctxt->check_perm = opcode.check_perm;
5215 		ctxt->intercept = opcode.intercept;
5216 
5217 		if (ctxt->d & NotImpl)
5218 			return EMULATION_FAILED;
5219 
5220 		if (mode == X86EMUL_MODE_PROT64) {
5221 			if (ctxt->op_bytes == 4 && (ctxt->d & Stack))
5222 				ctxt->op_bytes = 8;
5223 			else if (ctxt->d & NearBranch)
5224 				ctxt->op_bytes = 8;
5225 		}
5226 
5227 		if (ctxt->d & Op3264) {
5228 			if (mode == X86EMUL_MODE_PROT64)
5229 				ctxt->op_bytes = 8;
5230 			else
5231 				ctxt->op_bytes = 4;
5232 		}
5233 
5234 		if ((ctxt->d & No16) && ctxt->op_bytes == 2)
5235 			ctxt->op_bytes = 4;
5236 
5237 		if (ctxt->d & Sse)
5238 			ctxt->op_bytes = 16;
5239 		else if (ctxt->d & Mmx)
5240 			ctxt->op_bytes = 8;
5241 	}
5242 
5243 	/* ModRM and SIB bytes. */
5244 	if (ctxt->d & ModRM) {
5245 		rc = decode_modrm(ctxt, &ctxt->memop);
5246 		if (!has_seg_override) {
5247 			has_seg_override = true;
5248 			ctxt->seg_override = ctxt->modrm_seg;
5249 		}
5250 	} else if (ctxt->d & MemAbs)
5251 		rc = decode_abs(ctxt, &ctxt->memop);
5252 	if (rc != X86EMUL_CONTINUE)
5253 		goto done;
5254 
5255 	if (!has_seg_override)
5256 		ctxt->seg_override = VCPU_SREG_DS;
5257 
5258 	ctxt->memop.addr.mem.seg = ctxt->seg_override;
5259 
5260 	/*
5261 	 * Decode and fetch the source operand: register, memory
5262 	 * or immediate.
5263 	 */
5264 	rc = decode_operand(ctxt, &ctxt->src, (ctxt->d >> SrcShift) & OpMask);
5265 	if (rc != X86EMUL_CONTINUE)
5266 		goto done;
5267 
5268 	/*
5269 	 * Decode and fetch the second source operand: register, memory
5270 	 * or immediate.
5271 	 */
5272 	rc = decode_operand(ctxt, &ctxt->src2, (ctxt->d >> Src2Shift) & OpMask);
5273 	if (rc != X86EMUL_CONTINUE)
5274 		goto done;
5275 
5276 	/* Decode and fetch the destination operand: register or memory. */
5277 	rc = decode_operand(ctxt, &ctxt->dst, (ctxt->d >> DstShift) & OpMask);
5278 
5279 	if (ctxt->rip_relative && likely(ctxt->memopp))
5280 		ctxt->memopp->addr.mem.ea = address_mask(ctxt,
5281 					ctxt->memopp->addr.mem.ea + ctxt->_eip);
5282 
5283 done:
5284 	return (rc != X86EMUL_CONTINUE) ? EMULATION_FAILED : EMULATION_OK;
5285 }
5286 
5287 bool x86_page_table_writing_insn(struct x86_emulate_ctxt *ctxt)
5288 {
5289 	return ctxt->d & PageTable;
5290 }
5291 
5292 static bool string_insn_completed(struct x86_emulate_ctxt *ctxt)
5293 {
5294 	/* The second termination condition only applies for REPE
5295 	 * and REPNE. Test if the repeat string operation prefix is
5296 	 * REPE/REPZ or REPNE/REPNZ and if it's the case it tests the
5297 	 * corresponding termination condition according to:
5298 	 * 	- if REPE/REPZ and ZF = 0 then done
5299 	 * 	- if REPNE/REPNZ and ZF = 1 then done
5300 	 */
5301 	if (((ctxt->b == 0xa6) || (ctxt->b == 0xa7) ||
5302 	     (ctxt->b == 0xae) || (ctxt->b == 0xaf))
5303 	    && (((ctxt->rep_prefix == REPE_PREFIX) &&
5304 		 ((ctxt->eflags & X86_EFLAGS_ZF) == 0))
5305 		|| ((ctxt->rep_prefix == REPNE_PREFIX) &&
5306 		    ((ctxt->eflags & X86_EFLAGS_ZF) == X86_EFLAGS_ZF))))
5307 		return true;
5308 
5309 	return false;
5310 }
5311 
5312 static int flush_pending_x87_faults(struct x86_emulate_ctxt *ctxt)
5313 {
5314 	int rc;
5315 
5316 	rc = asm_safe("fwait");
5317 
5318 	if (unlikely(rc != X86EMUL_CONTINUE))
5319 		return emulate_exception(ctxt, MF_VECTOR, 0, false);
5320 
5321 	return X86EMUL_CONTINUE;
5322 }
5323 
5324 static void fetch_possible_mmx_operand(struct x86_emulate_ctxt *ctxt,
5325 				       struct operand *op)
5326 {
5327 	if (op->type == OP_MM)
5328 		read_mmx_reg(ctxt, &op->mm_val, op->addr.mm);
5329 }
5330 
5331 static int fastop(struct x86_emulate_ctxt *ctxt, void (*fop)(struct fastop *))
5332 {
5333 	ulong flags = (ctxt->eflags & EFLAGS_MASK) | X86_EFLAGS_IF;
5334 
5335 	if (!(ctxt->d & ByteOp))
5336 		fop += __ffs(ctxt->dst.bytes) * FASTOP_SIZE;
5337 
5338 	asm("push %[flags]; popf; call *%[fastop]; pushf; pop %[flags]\n"
5339 	    : "+a"(ctxt->dst.val), "+d"(ctxt->src.val), [flags]"+D"(flags),
5340 	      [fastop]"+S"(fop), ASM_CALL_CONSTRAINT
5341 	    : "c"(ctxt->src2.val));
5342 
5343 	ctxt->eflags = (ctxt->eflags & ~EFLAGS_MASK) | (flags & EFLAGS_MASK);
5344 	if (!fop) /* exception is returned in fop variable */
5345 		return emulate_de(ctxt);
5346 	return X86EMUL_CONTINUE;
5347 }
5348 
5349 void init_decode_cache(struct x86_emulate_ctxt *ctxt)
5350 {
5351 	memset(&ctxt->rip_relative, 0,
5352 	       (void *)&ctxt->modrm - (void *)&ctxt->rip_relative);
5353 
5354 	ctxt->io_read.pos = 0;
5355 	ctxt->io_read.end = 0;
5356 	ctxt->mem_read.end = 0;
5357 }
5358 
5359 int x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
5360 {
5361 	const struct x86_emulate_ops *ops = ctxt->ops;
5362 	int rc = X86EMUL_CONTINUE;
5363 	int saved_dst_type = ctxt->dst.type;
5364 	unsigned emul_flags;
5365 
5366 	ctxt->mem_read.pos = 0;
5367 
5368 	/* LOCK prefix is allowed only with some instructions */
5369 	if (ctxt->lock_prefix && (!(ctxt->d & Lock) || ctxt->dst.type != OP_MEM)) {
5370 		rc = emulate_ud(ctxt);
5371 		goto done;
5372 	}
5373 
5374 	if ((ctxt->d & SrcMask) == SrcMemFAddr && ctxt->src.type != OP_MEM) {
5375 		rc = emulate_ud(ctxt);
5376 		goto done;
5377 	}
5378 
5379 	emul_flags = ctxt->ops->get_hflags(ctxt);
5380 	if (unlikely(ctxt->d &
5381 		     (No64|Undefined|Sse|Mmx|Intercept|CheckPerm|Priv|Prot|String))) {
5382 		if ((ctxt->mode == X86EMUL_MODE_PROT64 && (ctxt->d & No64)) ||
5383 				(ctxt->d & Undefined)) {
5384 			rc = emulate_ud(ctxt);
5385 			goto done;
5386 		}
5387 
5388 		if (((ctxt->d & (Sse|Mmx)) && ((ops->get_cr(ctxt, 0) & X86_CR0_EM)))
5389 		    || ((ctxt->d & Sse) && !(ops->get_cr(ctxt, 4) & X86_CR4_OSFXSR))) {
5390 			rc = emulate_ud(ctxt);
5391 			goto done;
5392 		}
5393 
5394 		if ((ctxt->d & (Sse|Mmx)) && (ops->get_cr(ctxt, 0) & X86_CR0_TS)) {
5395 			rc = emulate_nm(ctxt);
5396 			goto done;
5397 		}
5398 
5399 		if (ctxt->d & Mmx) {
5400 			rc = flush_pending_x87_faults(ctxt);
5401 			if (rc != X86EMUL_CONTINUE)
5402 				goto done;
5403 			/*
5404 			 * Now that we know the fpu is exception safe, we can fetch
5405 			 * operands from it.
5406 			 */
5407 			fetch_possible_mmx_operand(ctxt, &ctxt->src);
5408 			fetch_possible_mmx_operand(ctxt, &ctxt->src2);
5409 			if (!(ctxt->d & Mov))
5410 				fetch_possible_mmx_operand(ctxt, &ctxt->dst);
5411 		}
5412 
5413 		if (unlikely(emul_flags & X86EMUL_GUEST_MASK) && ctxt->intercept) {
5414 			rc = emulator_check_intercept(ctxt, ctxt->intercept,
5415 						      X86_ICPT_PRE_EXCEPT);
5416 			if (rc != X86EMUL_CONTINUE)
5417 				goto done;
5418 		}
5419 
5420 		/* Instruction can only be executed in protected mode */
5421 		if ((ctxt->d & Prot) && ctxt->mode < X86EMUL_MODE_PROT16) {
5422 			rc = emulate_ud(ctxt);
5423 			goto done;
5424 		}
5425 
5426 		/* Privileged instruction can be executed only in CPL=0 */
5427 		if ((ctxt->d & Priv) && ops->cpl(ctxt)) {
5428 			if (ctxt->d & PrivUD)
5429 				rc = emulate_ud(ctxt);
5430 			else
5431 				rc = emulate_gp(ctxt, 0);
5432 			goto done;
5433 		}
5434 
5435 		/* Do instruction specific permission checks */
5436 		if (ctxt->d & CheckPerm) {
5437 			rc = ctxt->check_perm(ctxt);
5438 			if (rc != X86EMUL_CONTINUE)
5439 				goto done;
5440 		}
5441 
5442 		if (unlikely(emul_flags & X86EMUL_GUEST_MASK) && (ctxt->d & Intercept)) {
5443 			rc = emulator_check_intercept(ctxt, ctxt->intercept,
5444 						      X86_ICPT_POST_EXCEPT);
5445 			if (rc != X86EMUL_CONTINUE)
5446 				goto done;
5447 		}
5448 
5449 		if (ctxt->rep_prefix && (ctxt->d & String)) {
5450 			/* All REP prefixes have the same first termination condition */
5451 			if (address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) == 0) {
5452 				string_registers_quirk(ctxt);
5453 				ctxt->eip = ctxt->_eip;
5454 				ctxt->eflags &= ~X86_EFLAGS_RF;
5455 				goto done;
5456 			}
5457 		}
5458 	}
5459 
5460 	if ((ctxt->src.type == OP_MEM) && !(ctxt->d & NoAccess)) {
5461 		rc = segmented_read(ctxt, ctxt->src.addr.mem,
5462 				    ctxt->src.valptr, ctxt->src.bytes);
5463 		if (rc != X86EMUL_CONTINUE)
5464 			goto done;
5465 		ctxt->src.orig_val64 = ctxt->src.val64;
5466 	}
5467 
5468 	if (ctxt->src2.type == OP_MEM) {
5469 		rc = segmented_read(ctxt, ctxt->src2.addr.mem,
5470 				    &ctxt->src2.val, ctxt->src2.bytes);
5471 		if (rc != X86EMUL_CONTINUE)
5472 			goto done;
5473 	}
5474 
5475 	if ((ctxt->d & DstMask) == ImplicitOps)
5476 		goto special_insn;
5477 
5478 
5479 	if ((ctxt->dst.type == OP_MEM) && !(ctxt->d & Mov)) {
5480 		/* optimisation - avoid slow emulated read if Mov */
5481 		rc = segmented_read(ctxt, ctxt->dst.addr.mem,
5482 				   &ctxt->dst.val, ctxt->dst.bytes);
5483 		if (rc != X86EMUL_CONTINUE) {
5484 			if (!(ctxt->d & NoWrite) &&
5485 			    rc == X86EMUL_PROPAGATE_FAULT &&
5486 			    ctxt->exception.vector == PF_VECTOR)
5487 				ctxt->exception.error_code |= PFERR_WRITE_MASK;
5488 			goto done;
5489 		}
5490 	}
5491 	/* Copy full 64-bit value for CMPXCHG8B.  */
5492 	ctxt->dst.orig_val64 = ctxt->dst.val64;
5493 
5494 special_insn:
5495 
5496 	if (unlikely(emul_flags & X86EMUL_GUEST_MASK) && (ctxt->d & Intercept)) {
5497 		rc = emulator_check_intercept(ctxt, ctxt->intercept,
5498 					      X86_ICPT_POST_MEMACCESS);
5499 		if (rc != X86EMUL_CONTINUE)
5500 			goto done;
5501 	}
5502 
5503 	if (ctxt->rep_prefix && (ctxt->d & String))
5504 		ctxt->eflags |= X86_EFLAGS_RF;
5505 	else
5506 		ctxt->eflags &= ~X86_EFLAGS_RF;
5507 
5508 	if (ctxt->execute) {
5509 		if (ctxt->d & Fastop) {
5510 			void (*fop)(struct fastop *) = (void *)ctxt->execute;
5511 			rc = fastop(ctxt, fop);
5512 			if (rc != X86EMUL_CONTINUE)
5513 				goto done;
5514 			goto writeback;
5515 		}
5516 		rc = ctxt->execute(ctxt);
5517 		if (rc != X86EMUL_CONTINUE)
5518 			goto done;
5519 		goto writeback;
5520 	}
5521 
5522 	if (ctxt->opcode_len == 2)
5523 		goto twobyte_insn;
5524 	else if (ctxt->opcode_len == 3)
5525 		goto threebyte_insn;
5526 
5527 	switch (ctxt->b) {
5528 	case 0x70 ... 0x7f: /* jcc (short) */
5529 		if (test_cc(ctxt->b, ctxt->eflags))
5530 			rc = jmp_rel(ctxt, ctxt->src.val);
5531 		break;
5532 	case 0x8d: /* lea r16/r32, m */
5533 		ctxt->dst.val = ctxt->src.addr.mem.ea;
5534 		break;
5535 	case 0x90 ... 0x97: /* nop / xchg reg, rax */
5536 		if (ctxt->dst.addr.reg == reg_rmw(ctxt, VCPU_REGS_RAX))
5537 			ctxt->dst.type = OP_NONE;
5538 		else
5539 			rc = em_xchg(ctxt);
5540 		break;
5541 	case 0x98: /* cbw/cwde/cdqe */
5542 		switch (ctxt->op_bytes) {
5543 		case 2: ctxt->dst.val = (s8)ctxt->dst.val; break;
5544 		case 4: ctxt->dst.val = (s16)ctxt->dst.val; break;
5545 		case 8: ctxt->dst.val = (s32)ctxt->dst.val; break;
5546 		}
5547 		break;
5548 	case 0xcc:		/* int3 */
5549 		rc = emulate_int(ctxt, 3);
5550 		break;
5551 	case 0xcd:		/* int n */
5552 		rc = emulate_int(ctxt, ctxt->src.val);
5553 		break;
5554 	case 0xce:		/* into */
5555 		if (ctxt->eflags & X86_EFLAGS_OF)
5556 			rc = emulate_int(ctxt, 4);
5557 		break;
5558 	case 0xe9: /* jmp rel */
5559 	case 0xeb: /* jmp rel short */
5560 		rc = jmp_rel(ctxt, ctxt->src.val);
5561 		ctxt->dst.type = OP_NONE; /* Disable writeback. */
5562 		break;
5563 	case 0xf4:              /* hlt */
5564 		ctxt->ops->halt(ctxt);
5565 		break;
5566 	case 0xf5:	/* cmc */
5567 		/* complement carry flag from eflags reg */
5568 		ctxt->eflags ^= X86_EFLAGS_CF;
5569 		break;
5570 	case 0xf8: /* clc */
5571 		ctxt->eflags &= ~X86_EFLAGS_CF;
5572 		break;
5573 	case 0xf9: /* stc */
5574 		ctxt->eflags |= X86_EFLAGS_CF;
5575 		break;
5576 	case 0xfc: /* cld */
5577 		ctxt->eflags &= ~X86_EFLAGS_DF;
5578 		break;
5579 	case 0xfd: /* std */
5580 		ctxt->eflags |= X86_EFLAGS_DF;
5581 		break;
5582 	default:
5583 		goto cannot_emulate;
5584 	}
5585 
5586 	if (rc != X86EMUL_CONTINUE)
5587 		goto done;
5588 
5589 writeback:
5590 	if (ctxt->d & SrcWrite) {
5591 		BUG_ON(ctxt->src.type == OP_MEM || ctxt->src.type == OP_MEM_STR);
5592 		rc = writeback(ctxt, &ctxt->src);
5593 		if (rc != X86EMUL_CONTINUE)
5594 			goto done;
5595 	}
5596 	if (!(ctxt->d & NoWrite)) {
5597 		rc = writeback(ctxt, &ctxt->dst);
5598 		if (rc != X86EMUL_CONTINUE)
5599 			goto done;
5600 	}
5601 
5602 	/*
5603 	 * restore dst type in case the decoding will be reused
5604 	 * (happens for string instruction )
5605 	 */
5606 	ctxt->dst.type = saved_dst_type;
5607 
5608 	if ((ctxt->d & SrcMask) == SrcSI)
5609 		string_addr_inc(ctxt, VCPU_REGS_RSI, &ctxt->src);
5610 
5611 	if ((ctxt->d & DstMask) == DstDI)
5612 		string_addr_inc(ctxt, VCPU_REGS_RDI, &ctxt->dst);
5613 
5614 	if (ctxt->rep_prefix && (ctxt->d & String)) {
5615 		unsigned int count;
5616 		struct read_cache *r = &ctxt->io_read;
5617 		if ((ctxt->d & SrcMask) == SrcSI)
5618 			count = ctxt->src.count;
5619 		else
5620 			count = ctxt->dst.count;
5621 		register_address_increment(ctxt, VCPU_REGS_RCX, -count);
5622 
5623 		if (!string_insn_completed(ctxt)) {
5624 			/*
5625 			 * Re-enter guest when pio read ahead buffer is empty
5626 			 * or, if it is not used, after each 1024 iteration.
5627 			 */
5628 			if ((r->end != 0 || reg_read(ctxt, VCPU_REGS_RCX) & 0x3ff) &&
5629 			    (r->end == 0 || r->end != r->pos)) {
5630 				/*
5631 				 * Reset read cache. Usually happens before
5632 				 * decode, but since instruction is restarted
5633 				 * we have to do it here.
5634 				 */
5635 				ctxt->mem_read.end = 0;
5636 				writeback_registers(ctxt);
5637 				return EMULATION_RESTART;
5638 			}
5639 			goto done; /* skip rip writeback */
5640 		}
5641 		ctxt->eflags &= ~X86_EFLAGS_RF;
5642 	}
5643 
5644 	ctxt->eip = ctxt->_eip;
5645 
5646 done:
5647 	if (rc == X86EMUL_PROPAGATE_FAULT) {
5648 		WARN_ON(ctxt->exception.vector > 0x1f);
5649 		ctxt->have_exception = true;
5650 	}
5651 	if (rc == X86EMUL_INTERCEPTED)
5652 		return EMULATION_INTERCEPTED;
5653 
5654 	if (rc == X86EMUL_CONTINUE)
5655 		writeback_registers(ctxt);
5656 
5657 	return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK;
5658 
5659 twobyte_insn:
5660 	switch (ctxt->b) {
5661 	case 0x09:		/* wbinvd */
5662 		(ctxt->ops->wbinvd)(ctxt);
5663 		break;
5664 	case 0x08:		/* invd */
5665 	case 0x0d:		/* GrpP (prefetch) */
5666 	case 0x18:		/* Grp16 (prefetch/nop) */
5667 	case 0x1f:		/* nop */
5668 		break;
5669 	case 0x20: /* mov cr, reg */
5670 		ctxt->dst.val = ops->get_cr(ctxt, ctxt->modrm_reg);
5671 		break;
5672 	case 0x21: /* mov from dr to reg */
5673 		ops->get_dr(ctxt, ctxt->modrm_reg, &ctxt->dst.val);
5674 		break;
5675 	case 0x40 ... 0x4f:	/* cmov */
5676 		if (test_cc(ctxt->b, ctxt->eflags))
5677 			ctxt->dst.val = ctxt->src.val;
5678 		else if (ctxt->op_bytes != 4)
5679 			ctxt->dst.type = OP_NONE; /* no writeback */
5680 		break;
5681 	case 0x80 ... 0x8f: /* jnz rel, etc*/
5682 		if (test_cc(ctxt->b, ctxt->eflags))
5683 			rc = jmp_rel(ctxt, ctxt->src.val);
5684 		break;
5685 	case 0x90 ... 0x9f:     /* setcc r/m8 */
5686 		ctxt->dst.val = test_cc(ctxt->b, ctxt->eflags);
5687 		break;
5688 	case 0xb6 ... 0xb7:	/* movzx */
5689 		ctxt->dst.bytes = ctxt->op_bytes;
5690 		ctxt->dst.val = (ctxt->src.bytes == 1) ? (u8) ctxt->src.val
5691 						       : (u16) ctxt->src.val;
5692 		break;
5693 	case 0xbe ... 0xbf:	/* movsx */
5694 		ctxt->dst.bytes = ctxt->op_bytes;
5695 		ctxt->dst.val = (ctxt->src.bytes == 1) ? (s8) ctxt->src.val :
5696 							(s16) ctxt->src.val;
5697 		break;
5698 	default:
5699 		goto cannot_emulate;
5700 	}
5701 
5702 threebyte_insn:
5703 
5704 	if (rc != X86EMUL_CONTINUE)
5705 		goto done;
5706 
5707 	goto writeback;
5708 
5709 cannot_emulate:
5710 	return EMULATION_FAILED;
5711 }
5712 
5713 void emulator_invalidate_register_cache(struct x86_emulate_ctxt *ctxt)
5714 {
5715 	invalidate_registers(ctxt);
5716 }
5717 
5718 void emulator_writeback_register_cache(struct x86_emulate_ctxt *ctxt)
5719 {
5720 	writeback_registers(ctxt);
5721 }
5722 
5723 bool emulator_can_use_gpa(struct x86_emulate_ctxt *ctxt)
5724 {
5725 	if (ctxt->rep_prefix && (ctxt->d & String))
5726 		return false;
5727 
5728 	if (ctxt->d & TwoMemOp)
5729 		return false;
5730 
5731 	return true;
5732 }
5733