xref: /openbmc/linux/arch/x86/kvm/emulate.c (revision 2f828fb2)
1 /******************************************************************************
2  * emulate.c
3  *
4  * Generic x86 (32-bit and 64-bit) instruction decoder and emulator.
5  *
6  * Copyright (c) 2005 Keir Fraser
7  *
8  * Linux coding style, mod r/m decoder, segment base fixes, real-mode
9  * privileged instructions:
10  *
11  * Copyright (C) 2006 Qumranet
12  * Copyright 2010 Red Hat, Inc. and/or its affiliates.
13  *
14  *   Avi Kivity <avi@qumranet.com>
15  *   Yaniv Kamay <yaniv@qumranet.com>
16  *
17  * This work is licensed under the terms of the GNU GPL, version 2.  See
18  * the COPYING file in the top-level directory.
19  *
20  * From: xen-unstable 10676:af9809f51f81a3c43f276f00c81a52ef558afda4
21  */
22 
23 #include <linux/kvm_host.h>
24 #include "kvm_cache_regs.h"
25 #include <asm/kvm_emulate.h>
26 #include <linux/stringify.h>
27 #include <asm/debugreg.h>
28 
29 #include "x86.h"
30 #include "tss.h"
31 #include "mmu.h"
32 
33 /*
34  * Operand types
35  */
36 #define OpNone             0ull
37 #define OpImplicit         1ull  /* No generic decode */
38 #define OpReg              2ull  /* Register */
39 #define OpMem              3ull  /* Memory */
40 #define OpAcc              4ull  /* Accumulator: AL/AX/EAX/RAX */
41 #define OpDI               5ull  /* ES:DI/EDI/RDI */
42 #define OpMem64            6ull  /* Memory, 64-bit */
43 #define OpImmUByte         7ull  /* Zero-extended 8-bit immediate */
44 #define OpDX               8ull  /* DX register */
45 #define OpCL               9ull  /* CL register (for shifts) */
46 #define OpImmByte         10ull  /* 8-bit sign extended immediate */
47 #define OpOne             11ull  /* Implied 1 */
48 #define OpImm             12ull  /* Sign extended up to 32-bit immediate */
49 #define OpMem16           13ull  /* Memory operand (16-bit). */
50 #define OpMem32           14ull  /* Memory operand (32-bit). */
51 #define OpImmU            15ull  /* Immediate operand, zero extended */
52 #define OpSI              16ull  /* SI/ESI/RSI */
53 #define OpImmFAddr        17ull  /* Immediate far address */
54 #define OpMemFAddr        18ull  /* Far address in memory */
55 #define OpImmU16          19ull  /* Immediate operand, 16 bits, zero extended */
56 #define OpES              20ull  /* ES */
57 #define OpCS              21ull  /* CS */
58 #define OpSS              22ull  /* SS */
59 #define OpDS              23ull  /* DS */
60 #define OpFS              24ull  /* FS */
61 #define OpGS              25ull  /* GS */
62 #define OpMem8            26ull  /* 8-bit zero extended memory operand */
63 #define OpImm64           27ull  /* Sign extended 16/32/64-bit immediate */
64 #define OpXLat            28ull  /* memory at BX/EBX/RBX + zero-extended AL */
65 #define OpAccLo           29ull  /* Low part of extended acc (AX/AX/EAX/RAX) */
66 #define OpAccHi           30ull  /* High part of extended acc (-/DX/EDX/RDX) */
67 
68 #define OpBits             5  /* Width of operand field */
69 #define OpMask             ((1ull << OpBits) - 1)
70 
71 /*
72  * Opcode effective-address decode tables.
73  * Note that we only emulate instructions that have at least one memory
74  * operand (excluding implicit stack references). We assume that stack
75  * references and instruction fetches will never occur in special memory
76  * areas that require emulation. So, for example, 'mov <imm>,<reg>' need
77  * not be handled.
78  */
79 
80 /* Operand sizes: 8-bit operands or specified/overridden size. */
81 #define ByteOp      (1<<0)	/* 8-bit operands. */
82 /* Destination operand type. */
83 #define DstShift    1
84 #define ImplicitOps (OpImplicit << DstShift)
85 #define DstReg      (OpReg << DstShift)
86 #define DstMem      (OpMem << DstShift)
87 #define DstAcc      (OpAcc << DstShift)
88 #define DstDI       (OpDI << DstShift)
89 #define DstMem64    (OpMem64 << DstShift)
90 #define DstMem16    (OpMem16 << DstShift)
91 #define DstImmUByte (OpImmUByte << DstShift)
92 #define DstDX       (OpDX << DstShift)
93 #define DstAccLo    (OpAccLo << DstShift)
94 #define DstMask     (OpMask << DstShift)
95 /* Source operand type. */
96 #define SrcShift    6
97 #define SrcNone     (OpNone << SrcShift)
98 #define SrcReg      (OpReg << SrcShift)
99 #define SrcMem      (OpMem << SrcShift)
100 #define SrcMem16    (OpMem16 << SrcShift)
101 #define SrcMem32    (OpMem32 << SrcShift)
102 #define SrcImm      (OpImm << SrcShift)
103 #define SrcImmByte  (OpImmByte << SrcShift)
104 #define SrcOne      (OpOne << SrcShift)
105 #define SrcImmUByte (OpImmUByte << SrcShift)
106 #define SrcImmU     (OpImmU << SrcShift)
107 #define SrcSI       (OpSI << SrcShift)
108 #define SrcXLat     (OpXLat << SrcShift)
109 #define SrcImmFAddr (OpImmFAddr << SrcShift)
110 #define SrcMemFAddr (OpMemFAddr << SrcShift)
111 #define SrcAcc      (OpAcc << SrcShift)
112 #define SrcImmU16   (OpImmU16 << SrcShift)
113 #define SrcImm64    (OpImm64 << SrcShift)
114 #define SrcDX       (OpDX << SrcShift)
115 #define SrcMem8     (OpMem8 << SrcShift)
116 #define SrcAccHi    (OpAccHi << SrcShift)
117 #define SrcMask     (OpMask << SrcShift)
118 #define BitOp       (1<<11)
119 #define MemAbs      (1<<12)      /* Memory operand is absolute displacement */
120 #define String      (1<<13)     /* String instruction (rep capable) */
121 #define Stack       (1<<14)     /* Stack instruction (push/pop) */
122 #define GroupMask   (7<<15)     /* Opcode uses one of the group mechanisms */
123 #define Group       (1<<15)     /* Bits 3:5 of modrm byte extend opcode */
124 #define GroupDual   (2<<15)     /* Alternate decoding of mod == 3 */
125 #define Prefix      (3<<15)     /* Instruction varies with 66/f2/f3 prefix */
126 #define RMExt       (4<<15)     /* Opcode extension in ModRM r/m if mod == 3 */
127 #define Escape      (5<<15)     /* Escape to coprocessor instruction */
128 #define InstrDual   (6<<15)     /* Alternate instruction decoding of mod == 3 */
129 #define ModeDual    (7<<15)     /* Different instruction for 32/64 bit */
130 #define Sse         (1<<18)     /* SSE Vector instruction */
131 /* Generic ModRM decode. */
132 #define ModRM       (1<<19)
133 /* Destination is only written; never read. */
134 #define Mov         (1<<20)
135 /* Misc flags */
136 #define Prot        (1<<21) /* instruction generates #UD if not in prot-mode */
137 #define EmulateOnUD (1<<22) /* Emulate if unsupported by the host */
138 #define NoAccess    (1<<23) /* Don't access memory (lea/invlpg/verr etc) */
139 #define Op3264      (1<<24) /* Operand is 64b in long mode, 32b otherwise */
140 #define Undefined   (1<<25) /* No Such Instruction */
141 #define Lock        (1<<26) /* lock prefix is allowed for the instruction */
142 #define Priv        (1<<27) /* instruction generates #GP if current CPL != 0 */
143 #define No64	    (1<<28)
144 #define PageTable   (1 << 29)   /* instruction used to write page table */
145 #define NotImpl     (1 << 30)   /* instruction is not implemented */
146 /* Source 2 operand type */
147 #define Src2Shift   (31)
148 #define Src2None    (OpNone << Src2Shift)
149 #define Src2Mem     (OpMem << Src2Shift)
150 #define Src2CL      (OpCL << Src2Shift)
151 #define Src2ImmByte (OpImmByte << Src2Shift)
152 #define Src2One     (OpOne << Src2Shift)
153 #define Src2Imm     (OpImm << Src2Shift)
154 #define Src2ES      (OpES << Src2Shift)
155 #define Src2CS      (OpCS << Src2Shift)
156 #define Src2SS      (OpSS << Src2Shift)
157 #define Src2DS      (OpDS << Src2Shift)
158 #define Src2FS      (OpFS << Src2Shift)
159 #define Src2GS      (OpGS << Src2Shift)
160 #define Src2Mask    (OpMask << Src2Shift)
161 #define Mmx         ((u64)1 << 40)  /* MMX Vector instruction */
162 #define AlignMask   ((u64)7 << 41)
163 #define Aligned     ((u64)1 << 41)  /* Explicitly aligned (e.g. MOVDQA) */
164 #define Unaligned   ((u64)2 << 41)  /* Explicitly unaligned (e.g. MOVDQU) */
165 #define Avx         ((u64)3 << 41)  /* Advanced Vector Extensions */
166 #define Aligned16   ((u64)4 << 41)  /* Aligned to 16 byte boundary (e.g. FXSAVE) */
167 #define Fastop      ((u64)1 << 44)  /* Use opcode::u.fastop */
168 #define NoWrite     ((u64)1 << 45)  /* No writeback */
169 #define SrcWrite    ((u64)1 << 46)  /* Write back src operand */
170 #define NoMod	    ((u64)1 << 47)  /* Mod field is ignored */
171 #define Intercept   ((u64)1 << 48)  /* Has valid intercept field */
172 #define CheckPerm   ((u64)1 << 49)  /* Has valid check_perm field */
173 #define PrivUD      ((u64)1 << 51)  /* #UD instead of #GP on CPL > 0 */
174 #define NearBranch  ((u64)1 << 52)  /* Near branches */
175 #define No16	    ((u64)1 << 53)  /* No 16 bit operand */
176 #define IncSP       ((u64)1 << 54)  /* SP is incremented before ModRM calc */
177 #define TwoMemOp    ((u64)1 << 55)  /* Instruction has two memory operand */
178 
179 #define DstXacc     (DstAccLo | SrcAccHi | SrcWrite)
180 
181 #define X2(x...) x, x
182 #define X3(x...) X2(x), x
183 #define X4(x...) X2(x), X2(x)
184 #define X5(x...) X4(x), x
185 #define X6(x...) X4(x), X2(x)
186 #define X7(x...) X4(x), X3(x)
187 #define X8(x...) X4(x), X4(x)
188 #define X16(x...) X8(x), X8(x)
189 
190 #define NR_FASTOP (ilog2(sizeof(ulong)) + 1)
191 #define FASTOP_SIZE 8
192 
193 /*
194  * fastop functions have a special calling convention:
195  *
196  * dst:    rax        (in/out)
197  * src:    rdx        (in/out)
198  * src2:   rcx        (in)
199  * flags:  rflags     (in/out)
200  * ex:     rsi        (in:fastop pointer, out:zero if exception)
201  *
202  * Moreover, they are all exactly FASTOP_SIZE bytes long, so functions for
203  * different operand sizes can be reached by calculation, rather than a jump
204  * table (which would be bigger than the code).
205  *
206  * fastop functions are declared as taking a never-defined fastop parameter,
207  * so they can't be called from C directly.
208  */
209 
210 struct fastop;
211 
212 struct opcode {
213 	u64 flags : 56;
214 	u64 intercept : 8;
215 	union {
216 		int (*execute)(struct x86_emulate_ctxt *ctxt);
217 		const struct opcode *group;
218 		const struct group_dual *gdual;
219 		const struct gprefix *gprefix;
220 		const struct escape *esc;
221 		const struct instr_dual *idual;
222 		const struct mode_dual *mdual;
223 		void (*fastop)(struct fastop *fake);
224 	} u;
225 	int (*check_perm)(struct x86_emulate_ctxt *ctxt);
226 };
227 
228 struct group_dual {
229 	struct opcode mod012[8];
230 	struct opcode mod3[8];
231 };
232 
233 struct gprefix {
234 	struct opcode pfx_no;
235 	struct opcode pfx_66;
236 	struct opcode pfx_f2;
237 	struct opcode pfx_f3;
238 };
239 
240 struct escape {
241 	struct opcode op[8];
242 	struct opcode high[64];
243 };
244 
245 struct instr_dual {
246 	struct opcode mod012;
247 	struct opcode mod3;
248 };
249 
250 struct mode_dual {
251 	struct opcode mode32;
252 	struct opcode mode64;
253 };
254 
255 #define EFLG_RESERVED_ZEROS_MASK 0xffc0802a
256 
257 enum x86_transfer_type {
258 	X86_TRANSFER_NONE,
259 	X86_TRANSFER_CALL_JMP,
260 	X86_TRANSFER_RET,
261 	X86_TRANSFER_TASK_SWITCH,
262 };
263 
264 static ulong reg_read(struct x86_emulate_ctxt *ctxt, unsigned nr)
265 {
266 	if (!(ctxt->regs_valid & (1 << nr))) {
267 		ctxt->regs_valid |= 1 << nr;
268 		ctxt->_regs[nr] = ctxt->ops->read_gpr(ctxt, nr);
269 	}
270 	return ctxt->_regs[nr];
271 }
272 
273 static ulong *reg_write(struct x86_emulate_ctxt *ctxt, unsigned nr)
274 {
275 	ctxt->regs_valid |= 1 << nr;
276 	ctxt->regs_dirty |= 1 << nr;
277 	return &ctxt->_regs[nr];
278 }
279 
280 static ulong *reg_rmw(struct x86_emulate_ctxt *ctxt, unsigned nr)
281 {
282 	reg_read(ctxt, nr);
283 	return reg_write(ctxt, nr);
284 }
285 
286 static void writeback_registers(struct x86_emulate_ctxt *ctxt)
287 {
288 	unsigned reg;
289 
290 	for_each_set_bit(reg, (ulong *)&ctxt->regs_dirty, 16)
291 		ctxt->ops->write_gpr(ctxt, reg, ctxt->_regs[reg]);
292 }
293 
294 static void invalidate_registers(struct x86_emulate_ctxt *ctxt)
295 {
296 	ctxt->regs_dirty = 0;
297 	ctxt->regs_valid = 0;
298 }
299 
300 /*
301  * These EFLAGS bits are restored from saved value during emulation, and
302  * any changes are written back to the saved value after emulation.
303  */
304 #define EFLAGS_MASK (X86_EFLAGS_OF|X86_EFLAGS_SF|X86_EFLAGS_ZF|X86_EFLAGS_AF|\
305 		     X86_EFLAGS_PF|X86_EFLAGS_CF)
306 
307 #ifdef CONFIG_X86_64
308 #define ON64(x) x
309 #else
310 #define ON64(x)
311 #endif
312 
313 static int fastop(struct x86_emulate_ctxt *ctxt, void (*fop)(struct fastop *));
314 
315 #define FOP_FUNC(name) \
316 	".align " __stringify(FASTOP_SIZE) " \n\t" \
317 	".type " name ", @function \n\t" \
318 	name ":\n\t"
319 
320 #define FOP_RET   "ret \n\t"
321 
322 #define FOP_START(op) \
323 	extern void em_##op(struct fastop *fake); \
324 	asm(".pushsection .text, \"ax\" \n\t" \
325 	    ".global em_" #op " \n\t" \
326 	    FOP_FUNC("em_" #op)
327 
328 #define FOP_END \
329 	    ".popsection")
330 
331 #define FOPNOP() \
332 	FOP_FUNC(__stringify(__UNIQUE_ID(nop))) \
333 	FOP_RET
334 
335 #define FOP1E(op,  dst) \
336 	FOP_FUNC(#op "_" #dst) \
337 	"10: " #op " %" #dst " \n\t" FOP_RET
338 
339 #define FOP1EEX(op,  dst) \
340 	FOP1E(op, dst) _ASM_EXTABLE(10b, kvm_fastop_exception)
341 
342 #define FASTOP1(op) \
343 	FOP_START(op) \
344 	FOP1E(op##b, al) \
345 	FOP1E(op##w, ax) \
346 	FOP1E(op##l, eax) \
347 	ON64(FOP1E(op##q, rax))	\
348 	FOP_END
349 
350 /* 1-operand, using src2 (for MUL/DIV r/m) */
351 #define FASTOP1SRC2(op, name) \
352 	FOP_START(name) \
353 	FOP1E(op, cl) \
354 	FOP1E(op, cx) \
355 	FOP1E(op, ecx) \
356 	ON64(FOP1E(op, rcx)) \
357 	FOP_END
358 
359 /* 1-operand, using src2 (for MUL/DIV r/m), with exceptions */
360 #define FASTOP1SRC2EX(op, name) \
361 	FOP_START(name) \
362 	FOP1EEX(op, cl) \
363 	FOP1EEX(op, cx) \
364 	FOP1EEX(op, ecx) \
365 	ON64(FOP1EEX(op, rcx)) \
366 	FOP_END
367 
368 #define FOP2E(op,  dst, src)	   \
369 	FOP_FUNC(#op "_" #dst "_" #src) \
370 	#op " %" #src ", %" #dst " \n\t" FOP_RET
371 
372 #define FASTOP2(op) \
373 	FOP_START(op) \
374 	FOP2E(op##b, al, dl) \
375 	FOP2E(op##w, ax, dx) \
376 	FOP2E(op##l, eax, edx) \
377 	ON64(FOP2E(op##q, rax, rdx)) \
378 	FOP_END
379 
380 /* 2 operand, word only */
381 #define FASTOP2W(op) \
382 	FOP_START(op) \
383 	FOPNOP() \
384 	FOP2E(op##w, ax, dx) \
385 	FOP2E(op##l, eax, edx) \
386 	ON64(FOP2E(op##q, rax, rdx)) \
387 	FOP_END
388 
389 /* 2 operand, src is CL */
390 #define FASTOP2CL(op) \
391 	FOP_START(op) \
392 	FOP2E(op##b, al, cl) \
393 	FOP2E(op##w, ax, cl) \
394 	FOP2E(op##l, eax, cl) \
395 	ON64(FOP2E(op##q, rax, cl)) \
396 	FOP_END
397 
398 /* 2 operand, src and dest are reversed */
399 #define FASTOP2R(op, name) \
400 	FOP_START(name) \
401 	FOP2E(op##b, dl, al) \
402 	FOP2E(op##w, dx, ax) \
403 	FOP2E(op##l, edx, eax) \
404 	ON64(FOP2E(op##q, rdx, rax)) \
405 	FOP_END
406 
407 #define FOP3E(op,  dst, src, src2) \
408 	FOP_FUNC(#op "_" #dst "_" #src "_" #src2) \
409 	#op " %" #src2 ", %" #src ", %" #dst " \n\t" FOP_RET
410 
411 /* 3-operand, word-only, src2=cl */
412 #define FASTOP3WCL(op) \
413 	FOP_START(op) \
414 	FOPNOP() \
415 	FOP3E(op##w, ax, dx, cl) \
416 	FOP3E(op##l, eax, edx, cl) \
417 	ON64(FOP3E(op##q, rax, rdx, cl)) \
418 	FOP_END
419 
420 /* Special case for SETcc - 1 instruction per cc */
421 #define FOP_SETCC(op) \
422 	".align 4 \n\t" \
423 	".type " #op ", @function \n\t" \
424 	#op ": \n\t" \
425 	#op " %al \n\t" \
426 	FOP_RET
427 
428 asm(".pushsection .fixup, \"ax\"\n"
429     ".global kvm_fastop_exception \n"
430     "kvm_fastop_exception: xor %esi, %esi; ret\n"
431     ".popsection");
432 
433 FOP_START(setcc)
434 FOP_SETCC(seto)
435 FOP_SETCC(setno)
436 FOP_SETCC(setc)
437 FOP_SETCC(setnc)
438 FOP_SETCC(setz)
439 FOP_SETCC(setnz)
440 FOP_SETCC(setbe)
441 FOP_SETCC(setnbe)
442 FOP_SETCC(sets)
443 FOP_SETCC(setns)
444 FOP_SETCC(setp)
445 FOP_SETCC(setnp)
446 FOP_SETCC(setl)
447 FOP_SETCC(setnl)
448 FOP_SETCC(setle)
449 FOP_SETCC(setnle)
450 FOP_END;
451 
452 FOP_START(salc) "pushf; sbb %al, %al; popf \n\t" FOP_RET
453 FOP_END;
454 
455 /*
456  * XXX: inoutclob user must know where the argument is being expanded.
457  *      Relying on CC_HAVE_ASM_GOTO would allow us to remove _fault.
458  */
459 #define asm_safe(insn, inoutclob...) \
460 ({ \
461 	int _fault = 0; \
462  \
463 	asm volatile("1:" insn "\n" \
464 	             "2:\n" \
465 	             ".pushsection .fixup, \"ax\"\n" \
466 	             "3: movl $1, %[_fault]\n" \
467 	             "   jmp  2b\n" \
468 	             ".popsection\n" \
469 	             _ASM_EXTABLE(1b, 3b) \
470 	             : [_fault] "+qm"(_fault) inoutclob ); \
471  \
472 	_fault ? X86EMUL_UNHANDLEABLE : X86EMUL_CONTINUE; \
473 })
474 
475 static int emulator_check_intercept(struct x86_emulate_ctxt *ctxt,
476 				    enum x86_intercept intercept,
477 				    enum x86_intercept_stage stage)
478 {
479 	struct x86_instruction_info info = {
480 		.intercept  = intercept,
481 		.rep_prefix = ctxt->rep_prefix,
482 		.modrm_mod  = ctxt->modrm_mod,
483 		.modrm_reg  = ctxt->modrm_reg,
484 		.modrm_rm   = ctxt->modrm_rm,
485 		.src_val    = ctxt->src.val64,
486 		.dst_val    = ctxt->dst.val64,
487 		.src_bytes  = ctxt->src.bytes,
488 		.dst_bytes  = ctxt->dst.bytes,
489 		.ad_bytes   = ctxt->ad_bytes,
490 		.next_rip   = ctxt->eip,
491 	};
492 
493 	return ctxt->ops->intercept(ctxt, &info, stage);
494 }
495 
496 static void assign_masked(ulong *dest, ulong src, ulong mask)
497 {
498 	*dest = (*dest & ~mask) | (src & mask);
499 }
500 
501 static void assign_register(unsigned long *reg, u64 val, int bytes)
502 {
503 	/* The 4-byte case *is* correct: in 64-bit mode we zero-extend. */
504 	switch (bytes) {
505 	case 1:
506 		*(u8 *)reg = (u8)val;
507 		break;
508 	case 2:
509 		*(u16 *)reg = (u16)val;
510 		break;
511 	case 4:
512 		*reg = (u32)val;
513 		break;	/* 64b: zero-extend */
514 	case 8:
515 		*reg = val;
516 		break;
517 	}
518 }
519 
520 static inline unsigned long ad_mask(struct x86_emulate_ctxt *ctxt)
521 {
522 	return (1UL << (ctxt->ad_bytes << 3)) - 1;
523 }
524 
525 static ulong stack_mask(struct x86_emulate_ctxt *ctxt)
526 {
527 	u16 sel;
528 	struct desc_struct ss;
529 
530 	if (ctxt->mode == X86EMUL_MODE_PROT64)
531 		return ~0UL;
532 	ctxt->ops->get_segment(ctxt, &sel, &ss, NULL, VCPU_SREG_SS);
533 	return ~0U >> ((ss.d ^ 1) * 16);  /* d=0: 0xffff; d=1: 0xffffffff */
534 }
535 
536 static int stack_size(struct x86_emulate_ctxt *ctxt)
537 {
538 	return (__fls(stack_mask(ctxt)) + 1) >> 3;
539 }
540 
541 /* Access/update address held in a register, based on addressing mode. */
542 static inline unsigned long
543 address_mask(struct x86_emulate_ctxt *ctxt, unsigned long reg)
544 {
545 	if (ctxt->ad_bytes == sizeof(unsigned long))
546 		return reg;
547 	else
548 		return reg & ad_mask(ctxt);
549 }
550 
551 static inline unsigned long
552 register_address(struct x86_emulate_ctxt *ctxt, int reg)
553 {
554 	return address_mask(ctxt, reg_read(ctxt, reg));
555 }
556 
557 static void masked_increment(ulong *reg, ulong mask, int inc)
558 {
559 	assign_masked(reg, *reg + inc, mask);
560 }
561 
562 static inline void
563 register_address_increment(struct x86_emulate_ctxt *ctxt, int reg, int inc)
564 {
565 	ulong *preg = reg_rmw(ctxt, reg);
566 
567 	assign_register(preg, *preg + inc, ctxt->ad_bytes);
568 }
569 
570 static void rsp_increment(struct x86_emulate_ctxt *ctxt, int inc)
571 {
572 	masked_increment(reg_rmw(ctxt, VCPU_REGS_RSP), stack_mask(ctxt), inc);
573 }
574 
575 static u32 desc_limit_scaled(struct desc_struct *desc)
576 {
577 	u32 limit = get_desc_limit(desc);
578 
579 	return desc->g ? (limit << 12) | 0xfff : limit;
580 }
581 
582 static unsigned long seg_base(struct x86_emulate_ctxt *ctxt, int seg)
583 {
584 	if (ctxt->mode == X86EMUL_MODE_PROT64 && seg < VCPU_SREG_FS)
585 		return 0;
586 
587 	return ctxt->ops->get_cached_segment_base(ctxt, seg);
588 }
589 
590 static int emulate_exception(struct x86_emulate_ctxt *ctxt, int vec,
591 			     u32 error, bool valid)
592 {
593 	WARN_ON(vec > 0x1f);
594 	ctxt->exception.vector = vec;
595 	ctxt->exception.error_code = error;
596 	ctxt->exception.error_code_valid = valid;
597 	return X86EMUL_PROPAGATE_FAULT;
598 }
599 
600 static int emulate_db(struct x86_emulate_ctxt *ctxt)
601 {
602 	return emulate_exception(ctxt, DB_VECTOR, 0, false);
603 }
604 
605 static int emulate_gp(struct x86_emulate_ctxt *ctxt, int err)
606 {
607 	return emulate_exception(ctxt, GP_VECTOR, err, true);
608 }
609 
610 static int emulate_ss(struct x86_emulate_ctxt *ctxt, int err)
611 {
612 	return emulate_exception(ctxt, SS_VECTOR, err, true);
613 }
614 
615 static int emulate_ud(struct x86_emulate_ctxt *ctxt)
616 {
617 	return emulate_exception(ctxt, UD_VECTOR, 0, false);
618 }
619 
620 static int emulate_ts(struct x86_emulate_ctxt *ctxt, int err)
621 {
622 	return emulate_exception(ctxt, TS_VECTOR, err, true);
623 }
624 
625 static int emulate_de(struct x86_emulate_ctxt *ctxt)
626 {
627 	return emulate_exception(ctxt, DE_VECTOR, 0, false);
628 }
629 
630 static int emulate_nm(struct x86_emulate_ctxt *ctxt)
631 {
632 	return emulate_exception(ctxt, NM_VECTOR, 0, false);
633 }
634 
635 static u16 get_segment_selector(struct x86_emulate_ctxt *ctxt, unsigned seg)
636 {
637 	u16 selector;
638 	struct desc_struct desc;
639 
640 	ctxt->ops->get_segment(ctxt, &selector, &desc, NULL, seg);
641 	return selector;
642 }
643 
644 static void set_segment_selector(struct x86_emulate_ctxt *ctxt, u16 selector,
645 				 unsigned seg)
646 {
647 	u16 dummy;
648 	u32 base3;
649 	struct desc_struct desc;
650 
651 	ctxt->ops->get_segment(ctxt, &dummy, &desc, &base3, seg);
652 	ctxt->ops->set_segment(ctxt, selector, &desc, base3, seg);
653 }
654 
655 /*
656  * x86 defines three classes of vector instructions: explicitly
657  * aligned, explicitly unaligned, and the rest, which change behaviour
658  * depending on whether they're AVX encoded or not.
659  *
660  * Also included is CMPXCHG16B which is not a vector instruction, yet it is
661  * subject to the same check.  FXSAVE and FXRSTOR are checked here too as their
662  * 512 bytes of data must be aligned to a 16 byte boundary.
663  */
664 static unsigned insn_alignment(struct x86_emulate_ctxt *ctxt, unsigned size)
665 {
666 	u64 alignment = ctxt->d & AlignMask;
667 
668 	if (likely(size < 16))
669 		return 1;
670 
671 	switch (alignment) {
672 	case Unaligned:
673 	case Avx:
674 		return 1;
675 	case Aligned16:
676 		return 16;
677 	case Aligned:
678 	default:
679 		return size;
680 	}
681 }
682 
683 static __always_inline int __linearize(struct x86_emulate_ctxt *ctxt,
684 				       struct segmented_address addr,
685 				       unsigned *max_size, unsigned size,
686 				       bool write, bool fetch,
687 				       enum x86emul_mode mode, ulong *linear)
688 {
689 	struct desc_struct desc;
690 	bool usable;
691 	ulong la;
692 	u32 lim;
693 	u16 sel;
694 	u8  va_bits;
695 
696 	la = seg_base(ctxt, addr.seg) + addr.ea;
697 	*max_size = 0;
698 	switch (mode) {
699 	case X86EMUL_MODE_PROT64:
700 		*linear = la;
701 		va_bits = ctxt_virt_addr_bits(ctxt);
702 		if (get_canonical(la, va_bits) != la)
703 			goto bad;
704 
705 		*max_size = min_t(u64, ~0u, (1ull << va_bits) - la);
706 		if (size > *max_size)
707 			goto bad;
708 		break;
709 	default:
710 		*linear = la = (u32)la;
711 		usable = ctxt->ops->get_segment(ctxt, &sel, &desc, NULL,
712 						addr.seg);
713 		if (!usable)
714 			goto bad;
715 		/* code segment in protected mode or read-only data segment */
716 		if ((((ctxt->mode != X86EMUL_MODE_REAL) && (desc.type & 8))
717 					|| !(desc.type & 2)) && write)
718 			goto bad;
719 		/* unreadable code segment */
720 		if (!fetch && (desc.type & 8) && !(desc.type & 2))
721 			goto bad;
722 		lim = desc_limit_scaled(&desc);
723 		if (!(desc.type & 8) && (desc.type & 4)) {
724 			/* expand-down segment */
725 			if (addr.ea <= lim)
726 				goto bad;
727 			lim = desc.d ? 0xffffffff : 0xffff;
728 		}
729 		if (addr.ea > lim)
730 			goto bad;
731 		if (lim == 0xffffffff)
732 			*max_size = ~0u;
733 		else {
734 			*max_size = (u64)lim + 1 - addr.ea;
735 			if (size > *max_size)
736 				goto bad;
737 		}
738 		break;
739 	}
740 	if (la & (insn_alignment(ctxt, size) - 1))
741 		return emulate_gp(ctxt, 0);
742 	return X86EMUL_CONTINUE;
743 bad:
744 	if (addr.seg == VCPU_SREG_SS)
745 		return emulate_ss(ctxt, 0);
746 	else
747 		return emulate_gp(ctxt, 0);
748 }
749 
750 static int linearize(struct x86_emulate_ctxt *ctxt,
751 		     struct segmented_address addr,
752 		     unsigned size, bool write,
753 		     ulong *linear)
754 {
755 	unsigned max_size;
756 	return __linearize(ctxt, addr, &max_size, size, write, false,
757 			   ctxt->mode, linear);
758 }
759 
760 static inline int assign_eip(struct x86_emulate_ctxt *ctxt, ulong dst,
761 			     enum x86emul_mode mode)
762 {
763 	ulong linear;
764 	int rc;
765 	unsigned max_size;
766 	struct segmented_address addr = { .seg = VCPU_SREG_CS,
767 					   .ea = dst };
768 
769 	if (ctxt->op_bytes != sizeof(unsigned long))
770 		addr.ea = dst & ((1UL << (ctxt->op_bytes << 3)) - 1);
771 	rc = __linearize(ctxt, addr, &max_size, 1, false, true, mode, &linear);
772 	if (rc == X86EMUL_CONTINUE)
773 		ctxt->_eip = addr.ea;
774 	return rc;
775 }
776 
777 static inline int assign_eip_near(struct x86_emulate_ctxt *ctxt, ulong dst)
778 {
779 	return assign_eip(ctxt, dst, ctxt->mode);
780 }
781 
782 static int assign_eip_far(struct x86_emulate_ctxt *ctxt, ulong dst,
783 			  const struct desc_struct *cs_desc)
784 {
785 	enum x86emul_mode mode = ctxt->mode;
786 	int rc;
787 
788 #ifdef CONFIG_X86_64
789 	if (ctxt->mode >= X86EMUL_MODE_PROT16) {
790 		if (cs_desc->l) {
791 			u64 efer = 0;
792 
793 			ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
794 			if (efer & EFER_LMA)
795 				mode = X86EMUL_MODE_PROT64;
796 		} else
797 			mode = X86EMUL_MODE_PROT32; /* temporary value */
798 	}
799 #endif
800 	if (mode == X86EMUL_MODE_PROT16 || mode == X86EMUL_MODE_PROT32)
801 		mode = cs_desc->d ? X86EMUL_MODE_PROT32 : X86EMUL_MODE_PROT16;
802 	rc = assign_eip(ctxt, dst, mode);
803 	if (rc == X86EMUL_CONTINUE)
804 		ctxt->mode = mode;
805 	return rc;
806 }
807 
808 static inline int jmp_rel(struct x86_emulate_ctxt *ctxt, int rel)
809 {
810 	return assign_eip_near(ctxt, ctxt->_eip + rel);
811 }
812 
813 static int segmented_read_std(struct x86_emulate_ctxt *ctxt,
814 			      struct segmented_address addr,
815 			      void *data,
816 			      unsigned size)
817 {
818 	int rc;
819 	ulong linear;
820 
821 	rc = linearize(ctxt, addr, size, false, &linear);
822 	if (rc != X86EMUL_CONTINUE)
823 		return rc;
824 	return ctxt->ops->read_std(ctxt, linear, data, size, &ctxt->exception);
825 }
826 
827 static int segmented_write_std(struct x86_emulate_ctxt *ctxt,
828 			       struct segmented_address addr,
829 			       void *data,
830 			       unsigned int size)
831 {
832 	int rc;
833 	ulong linear;
834 
835 	rc = linearize(ctxt, addr, size, true, &linear);
836 	if (rc != X86EMUL_CONTINUE)
837 		return rc;
838 	return ctxt->ops->write_std(ctxt, linear, data, size, &ctxt->exception);
839 }
840 
841 /*
842  * Prefetch the remaining bytes of the instruction without crossing page
843  * boundary if they are not in fetch_cache yet.
844  */
845 static int __do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt, int op_size)
846 {
847 	int rc;
848 	unsigned size, max_size;
849 	unsigned long linear;
850 	int cur_size = ctxt->fetch.end - ctxt->fetch.data;
851 	struct segmented_address addr = { .seg = VCPU_SREG_CS,
852 					   .ea = ctxt->eip + cur_size };
853 
854 	/*
855 	 * We do not know exactly how many bytes will be needed, and
856 	 * __linearize is expensive, so fetch as much as possible.  We
857 	 * just have to avoid going beyond the 15 byte limit, the end
858 	 * of the segment, or the end of the page.
859 	 *
860 	 * __linearize is called with size 0 so that it does not do any
861 	 * boundary check itself.  Instead, we use max_size to check
862 	 * against op_size.
863 	 */
864 	rc = __linearize(ctxt, addr, &max_size, 0, false, true, ctxt->mode,
865 			 &linear);
866 	if (unlikely(rc != X86EMUL_CONTINUE))
867 		return rc;
868 
869 	size = min_t(unsigned, 15UL ^ cur_size, max_size);
870 	size = min_t(unsigned, size, PAGE_SIZE - offset_in_page(linear));
871 
872 	/*
873 	 * One instruction can only straddle two pages,
874 	 * and one has been loaded at the beginning of
875 	 * x86_decode_insn.  So, if not enough bytes
876 	 * still, we must have hit the 15-byte boundary.
877 	 */
878 	if (unlikely(size < op_size))
879 		return emulate_gp(ctxt, 0);
880 
881 	rc = ctxt->ops->fetch(ctxt, linear, ctxt->fetch.end,
882 			      size, &ctxt->exception);
883 	if (unlikely(rc != X86EMUL_CONTINUE))
884 		return rc;
885 	ctxt->fetch.end += size;
886 	return X86EMUL_CONTINUE;
887 }
888 
889 static __always_inline int do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt,
890 					       unsigned size)
891 {
892 	unsigned done_size = ctxt->fetch.end - ctxt->fetch.ptr;
893 
894 	if (unlikely(done_size < size))
895 		return __do_insn_fetch_bytes(ctxt, size - done_size);
896 	else
897 		return X86EMUL_CONTINUE;
898 }
899 
900 /* Fetch next part of the instruction being emulated. */
901 #define insn_fetch(_type, _ctxt)					\
902 ({	_type _x;							\
903 									\
904 	rc = do_insn_fetch_bytes(_ctxt, sizeof(_type));			\
905 	if (rc != X86EMUL_CONTINUE)					\
906 		goto done;						\
907 	ctxt->_eip += sizeof(_type);					\
908 	memcpy(&_x, ctxt->fetch.ptr, sizeof(_type));			\
909 	ctxt->fetch.ptr += sizeof(_type);				\
910 	_x;								\
911 })
912 
913 #define insn_fetch_arr(_arr, _size, _ctxt)				\
914 ({									\
915 	rc = do_insn_fetch_bytes(_ctxt, _size);				\
916 	if (rc != X86EMUL_CONTINUE)					\
917 		goto done;						\
918 	ctxt->_eip += (_size);						\
919 	memcpy(_arr, ctxt->fetch.ptr, _size);				\
920 	ctxt->fetch.ptr += (_size);					\
921 })
922 
923 /*
924  * Given the 'reg' portion of a ModRM byte, and a register block, return a
925  * pointer into the block that addresses the relevant register.
926  * @highbyte_regs specifies whether to decode AH,CH,DH,BH.
927  */
928 static void *decode_register(struct x86_emulate_ctxt *ctxt, u8 modrm_reg,
929 			     int byteop)
930 {
931 	void *p;
932 	int highbyte_regs = (ctxt->rex_prefix == 0) && byteop;
933 
934 	if (highbyte_regs && modrm_reg >= 4 && modrm_reg < 8)
935 		p = (unsigned char *)reg_rmw(ctxt, modrm_reg & 3) + 1;
936 	else
937 		p = reg_rmw(ctxt, modrm_reg);
938 	return p;
939 }
940 
941 static int read_descriptor(struct x86_emulate_ctxt *ctxt,
942 			   struct segmented_address addr,
943 			   u16 *size, unsigned long *address, int op_bytes)
944 {
945 	int rc;
946 
947 	if (op_bytes == 2)
948 		op_bytes = 3;
949 	*address = 0;
950 	rc = segmented_read_std(ctxt, addr, size, 2);
951 	if (rc != X86EMUL_CONTINUE)
952 		return rc;
953 	addr.ea += 2;
954 	rc = segmented_read_std(ctxt, addr, address, op_bytes);
955 	return rc;
956 }
957 
958 FASTOP2(add);
959 FASTOP2(or);
960 FASTOP2(adc);
961 FASTOP2(sbb);
962 FASTOP2(and);
963 FASTOP2(sub);
964 FASTOP2(xor);
965 FASTOP2(cmp);
966 FASTOP2(test);
967 
968 FASTOP1SRC2(mul, mul_ex);
969 FASTOP1SRC2(imul, imul_ex);
970 FASTOP1SRC2EX(div, div_ex);
971 FASTOP1SRC2EX(idiv, idiv_ex);
972 
973 FASTOP3WCL(shld);
974 FASTOP3WCL(shrd);
975 
976 FASTOP2W(imul);
977 
978 FASTOP1(not);
979 FASTOP1(neg);
980 FASTOP1(inc);
981 FASTOP1(dec);
982 
983 FASTOP2CL(rol);
984 FASTOP2CL(ror);
985 FASTOP2CL(rcl);
986 FASTOP2CL(rcr);
987 FASTOP2CL(shl);
988 FASTOP2CL(shr);
989 FASTOP2CL(sar);
990 
991 FASTOP2W(bsf);
992 FASTOP2W(bsr);
993 FASTOP2W(bt);
994 FASTOP2W(bts);
995 FASTOP2W(btr);
996 FASTOP2W(btc);
997 
998 FASTOP2(xadd);
999 
1000 FASTOP2R(cmp, cmp_r);
1001 
1002 static int em_bsf_c(struct x86_emulate_ctxt *ctxt)
1003 {
1004 	/* If src is zero, do not writeback, but update flags */
1005 	if (ctxt->src.val == 0)
1006 		ctxt->dst.type = OP_NONE;
1007 	return fastop(ctxt, em_bsf);
1008 }
1009 
1010 static int em_bsr_c(struct x86_emulate_ctxt *ctxt)
1011 {
1012 	/* If src is zero, do not writeback, but update flags */
1013 	if (ctxt->src.val == 0)
1014 		ctxt->dst.type = OP_NONE;
1015 	return fastop(ctxt, em_bsr);
1016 }
1017 
1018 static __always_inline u8 test_cc(unsigned int condition, unsigned long flags)
1019 {
1020 	u8 rc;
1021 	void (*fop)(void) = (void *)em_setcc + 4 * (condition & 0xf);
1022 
1023 	flags = (flags & EFLAGS_MASK) | X86_EFLAGS_IF;
1024 	asm("push %[flags]; popf; call *%[fastop]"
1025 	    : "=a"(rc) : [fastop]"r"(fop), [flags]"r"(flags));
1026 	return rc;
1027 }
1028 
1029 static void fetch_register_operand(struct operand *op)
1030 {
1031 	switch (op->bytes) {
1032 	case 1:
1033 		op->val = *(u8 *)op->addr.reg;
1034 		break;
1035 	case 2:
1036 		op->val = *(u16 *)op->addr.reg;
1037 		break;
1038 	case 4:
1039 		op->val = *(u32 *)op->addr.reg;
1040 		break;
1041 	case 8:
1042 		op->val = *(u64 *)op->addr.reg;
1043 		break;
1044 	}
1045 }
1046 
1047 static void read_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data, int reg)
1048 {
1049 	ctxt->ops->get_fpu(ctxt);
1050 	switch (reg) {
1051 	case 0: asm("movdqa %%xmm0, %0" : "=m"(*data)); break;
1052 	case 1: asm("movdqa %%xmm1, %0" : "=m"(*data)); break;
1053 	case 2: asm("movdqa %%xmm2, %0" : "=m"(*data)); break;
1054 	case 3: asm("movdqa %%xmm3, %0" : "=m"(*data)); break;
1055 	case 4: asm("movdqa %%xmm4, %0" : "=m"(*data)); break;
1056 	case 5: asm("movdqa %%xmm5, %0" : "=m"(*data)); break;
1057 	case 6: asm("movdqa %%xmm6, %0" : "=m"(*data)); break;
1058 	case 7: asm("movdqa %%xmm7, %0" : "=m"(*data)); break;
1059 #ifdef CONFIG_X86_64
1060 	case 8: asm("movdqa %%xmm8, %0" : "=m"(*data)); break;
1061 	case 9: asm("movdqa %%xmm9, %0" : "=m"(*data)); break;
1062 	case 10: asm("movdqa %%xmm10, %0" : "=m"(*data)); break;
1063 	case 11: asm("movdqa %%xmm11, %0" : "=m"(*data)); break;
1064 	case 12: asm("movdqa %%xmm12, %0" : "=m"(*data)); break;
1065 	case 13: asm("movdqa %%xmm13, %0" : "=m"(*data)); break;
1066 	case 14: asm("movdqa %%xmm14, %0" : "=m"(*data)); break;
1067 	case 15: asm("movdqa %%xmm15, %0" : "=m"(*data)); break;
1068 #endif
1069 	default: BUG();
1070 	}
1071 	ctxt->ops->put_fpu(ctxt);
1072 }
1073 
1074 static void write_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data,
1075 			  int reg)
1076 {
1077 	ctxt->ops->get_fpu(ctxt);
1078 	switch (reg) {
1079 	case 0: asm("movdqa %0, %%xmm0" : : "m"(*data)); break;
1080 	case 1: asm("movdqa %0, %%xmm1" : : "m"(*data)); break;
1081 	case 2: asm("movdqa %0, %%xmm2" : : "m"(*data)); break;
1082 	case 3: asm("movdqa %0, %%xmm3" : : "m"(*data)); break;
1083 	case 4: asm("movdqa %0, %%xmm4" : : "m"(*data)); break;
1084 	case 5: asm("movdqa %0, %%xmm5" : : "m"(*data)); break;
1085 	case 6: asm("movdqa %0, %%xmm6" : : "m"(*data)); break;
1086 	case 7: asm("movdqa %0, %%xmm7" : : "m"(*data)); break;
1087 #ifdef CONFIG_X86_64
1088 	case 8: asm("movdqa %0, %%xmm8" : : "m"(*data)); break;
1089 	case 9: asm("movdqa %0, %%xmm9" : : "m"(*data)); break;
1090 	case 10: asm("movdqa %0, %%xmm10" : : "m"(*data)); break;
1091 	case 11: asm("movdqa %0, %%xmm11" : : "m"(*data)); break;
1092 	case 12: asm("movdqa %0, %%xmm12" : : "m"(*data)); break;
1093 	case 13: asm("movdqa %0, %%xmm13" : : "m"(*data)); break;
1094 	case 14: asm("movdqa %0, %%xmm14" : : "m"(*data)); break;
1095 	case 15: asm("movdqa %0, %%xmm15" : : "m"(*data)); break;
1096 #endif
1097 	default: BUG();
1098 	}
1099 	ctxt->ops->put_fpu(ctxt);
1100 }
1101 
1102 static void read_mmx_reg(struct x86_emulate_ctxt *ctxt, u64 *data, int reg)
1103 {
1104 	ctxt->ops->get_fpu(ctxt);
1105 	switch (reg) {
1106 	case 0: asm("movq %%mm0, %0" : "=m"(*data)); break;
1107 	case 1: asm("movq %%mm1, %0" : "=m"(*data)); break;
1108 	case 2: asm("movq %%mm2, %0" : "=m"(*data)); break;
1109 	case 3: asm("movq %%mm3, %0" : "=m"(*data)); break;
1110 	case 4: asm("movq %%mm4, %0" : "=m"(*data)); break;
1111 	case 5: asm("movq %%mm5, %0" : "=m"(*data)); break;
1112 	case 6: asm("movq %%mm6, %0" : "=m"(*data)); break;
1113 	case 7: asm("movq %%mm7, %0" : "=m"(*data)); break;
1114 	default: BUG();
1115 	}
1116 	ctxt->ops->put_fpu(ctxt);
1117 }
1118 
1119 static void write_mmx_reg(struct x86_emulate_ctxt *ctxt, u64 *data, int reg)
1120 {
1121 	ctxt->ops->get_fpu(ctxt);
1122 	switch (reg) {
1123 	case 0: asm("movq %0, %%mm0" : : "m"(*data)); break;
1124 	case 1: asm("movq %0, %%mm1" : : "m"(*data)); break;
1125 	case 2: asm("movq %0, %%mm2" : : "m"(*data)); break;
1126 	case 3: asm("movq %0, %%mm3" : : "m"(*data)); break;
1127 	case 4: asm("movq %0, %%mm4" : : "m"(*data)); break;
1128 	case 5: asm("movq %0, %%mm5" : : "m"(*data)); break;
1129 	case 6: asm("movq %0, %%mm6" : : "m"(*data)); break;
1130 	case 7: asm("movq %0, %%mm7" : : "m"(*data)); break;
1131 	default: BUG();
1132 	}
1133 	ctxt->ops->put_fpu(ctxt);
1134 }
1135 
1136 static int em_fninit(struct x86_emulate_ctxt *ctxt)
1137 {
1138 	if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
1139 		return emulate_nm(ctxt);
1140 
1141 	ctxt->ops->get_fpu(ctxt);
1142 	asm volatile("fninit");
1143 	ctxt->ops->put_fpu(ctxt);
1144 	return X86EMUL_CONTINUE;
1145 }
1146 
1147 static int em_fnstcw(struct x86_emulate_ctxt *ctxt)
1148 {
1149 	u16 fcw;
1150 
1151 	if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
1152 		return emulate_nm(ctxt);
1153 
1154 	ctxt->ops->get_fpu(ctxt);
1155 	asm volatile("fnstcw %0": "+m"(fcw));
1156 	ctxt->ops->put_fpu(ctxt);
1157 
1158 	ctxt->dst.val = fcw;
1159 
1160 	return X86EMUL_CONTINUE;
1161 }
1162 
1163 static int em_fnstsw(struct x86_emulate_ctxt *ctxt)
1164 {
1165 	u16 fsw;
1166 
1167 	if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
1168 		return emulate_nm(ctxt);
1169 
1170 	ctxt->ops->get_fpu(ctxt);
1171 	asm volatile("fnstsw %0": "+m"(fsw));
1172 	ctxt->ops->put_fpu(ctxt);
1173 
1174 	ctxt->dst.val = fsw;
1175 
1176 	return X86EMUL_CONTINUE;
1177 }
1178 
1179 static void decode_register_operand(struct x86_emulate_ctxt *ctxt,
1180 				    struct operand *op)
1181 {
1182 	unsigned reg = ctxt->modrm_reg;
1183 
1184 	if (!(ctxt->d & ModRM))
1185 		reg = (ctxt->b & 7) | ((ctxt->rex_prefix & 1) << 3);
1186 
1187 	if (ctxt->d & Sse) {
1188 		op->type = OP_XMM;
1189 		op->bytes = 16;
1190 		op->addr.xmm = reg;
1191 		read_sse_reg(ctxt, &op->vec_val, reg);
1192 		return;
1193 	}
1194 	if (ctxt->d & Mmx) {
1195 		reg &= 7;
1196 		op->type = OP_MM;
1197 		op->bytes = 8;
1198 		op->addr.mm = reg;
1199 		return;
1200 	}
1201 
1202 	op->type = OP_REG;
1203 	op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
1204 	op->addr.reg = decode_register(ctxt, reg, ctxt->d & ByteOp);
1205 
1206 	fetch_register_operand(op);
1207 	op->orig_val = op->val;
1208 }
1209 
1210 static void adjust_modrm_seg(struct x86_emulate_ctxt *ctxt, int base_reg)
1211 {
1212 	if (base_reg == VCPU_REGS_RSP || base_reg == VCPU_REGS_RBP)
1213 		ctxt->modrm_seg = VCPU_SREG_SS;
1214 }
1215 
1216 static int decode_modrm(struct x86_emulate_ctxt *ctxt,
1217 			struct operand *op)
1218 {
1219 	u8 sib;
1220 	int index_reg, base_reg, scale;
1221 	int rc = X86EMUL_CONTINUE;
1222 	ulong modrm_ea = 0;
1223 
1224 	ctxt->modrm_reg = ((ctxt->rex_prefix << 1) & 8); /* REX.R */
1225 	index_reg = (ctxt->rex_prefix << 2) & 8; /* REX.X */
1226 	base_reg = (ctxt->rex_prefix << 3) & 8; /* REX.B */
1227 
1228 	ctxt->modrm_mod = (ctxt->modrm & 0xc0) >> 6;
1229 	ctxt->modrm_reg |= (ctxt->modrm & 0x38) >> 3;
1230 	ctxt->modrm_rm = base_reg | (ctxt->modrm & 0x07);
1231 	ctxt->modrm_seg = VCPU_SREG_DS;
1232 
1233 	if (ctxt->modrm_mod == 3 || (ctxt->d & NoMod)) {
1234 		op->type = OP_REG;
1235 		op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
1236 		op->addr.reg = decode_register(ctxt, ctxt->modrm_rm,
1237 				ctxt->d & ByteOp);
1238 		if (ctxt->d & Sse) {
1239 			op->type = OP_XMM;
1240 			op->bytes = 16;
1241 			op->addr.xmm = ctxt->modrm_rm;
1242 			read_sse_reg(ctxt, &op->vec_val, ctxt->modrm_rm);
1243 			return rc;
1244 		}
1245 		if (ctxt->d & Mmx) {
1246 			op->type = OP_MM;
1247 			op->bytes = 8;
1248 			op->addr.mm = ctxt->modrm_rm & 7;
1249 			return rc;
1250 		}
1251 		fetch_register_operand(op);
1252 		return rc;
1253 	}
1254 
1255 	op->type = OP_MEM;
1256 
1257 	if (ctxt->ad_bytes == 2) {
1258 		unsigned bx = reg_read(ctxt, VCPU_REGS_RBX);
1259 		unsigned bp = reg_read(ctxt, VCPU_REGS_RBP);
1260 		unsigned si = reg_read(ctxt, VCPU_REGS_RSI);
1261 		unsigned di = reg_read(ctxt, VCPU_REGS_RDI);
1262 
1263 		/* 16-bit ModR/M decode. */
1264 		switch (ctxt->modrm_mod) {
1265 		case 0:
1266 			if (ctxt->modrm_rm == 6)
1267 				modrm_ea += insn_fetch(u16, ctxt);
1268 			break;
1269 		case 1:
1270 			modrm_ea += insn_fetch(s8, ctxt);
1271 			break;
1272 		case 2:
1273 			modrm_ea += insn_fetch(u16, ctxt);
1274 			break;
1275 		}
1276 		switch (ctxt->modrm_rm) {
1277 		case 0:
1278 			modrm_ea += bx + si;
1279 			break;
1280 		case 1:
1281 			modrm_ea += bx + di;
1282 			break;
1283 		case 2:
1284 			modrm_ea += bp + si;
1285 			break;
1286 		case 3:
1287 			modrm_ea += bp + di;
1288 			break;
1289 		case 4:
1290 			modrm_ea += si;
1291 			break;
1292 		case 5:
1293 			modrm_ea += di;
1294 			break;
1295 		case 6:
1296 			if (ctxt->modrm_mod != 0)
1297 				modrm_ea += bp;
1298 			break;
1299 		case 7:
1300 			modrm_ea += bx;
1301 			break;
1302 		}
1303 		if (ctxt->modrm_rm == 2 || ctxt->modrm_rm == 3 ||
1304 		    (ctxt->modrm_rm == 6 && ctxt->modrm_mod != 0))
1305 			ctxt->modrm_seg = VCPU_SREG_SS;
1306 		modrm_ea = (u16)modrm_ea;
1307 	} else {
1308 		/* 32/64-bit ModR/M decode. */
1309 		if ((ctxt->modrm_rm & 7) == 4) {
1310 			sib = insn_fetch(u8, ctxt);
1311 			index_reg |= (sib >> 3) & 7;
1312 			base_reg |= sib & 7;
1313 			scale = sib >> 6;
1314 
1315 			if ((base_reg & 7) == 5 && ctxt->modrm_mod == 0)
1316 				modrm_ea += insn_fetch(s32, ctxt);
1317 			else {
1318 				modrm_ea += reg_read(ctxt, base_reg);
1319 				adjust_modrm_seg(ctxt, base_reg);
1320 				/* Increment ESP on POP [ESP] */
1321 				if ((ctxt->d & IncSP) &&
1322 				    base_reg == VCPU_REGS_RSP)
1323 					modrm_ea += ctxt->op_bytes;
1324 			}
1325 			if (index_reg != 4)
1326 				modrm_ea += reg_read(ctxt, index_reg) << scale;
1327 		} else if ((ctxt->modrm_rm & 7) == 5 && ctxt->modrm_mod == 0) {
1328 			modrm_ea += insn_fetch(s32, ctxt);
1329 			if (ctxt->mode == X86EMUL_MODE_PROT64)
1330 				ctxt->rip_relative = 1;
1331 		} else {
1332 			base_reg = ctxt->modrm_rm;
1333 			modrm_ea += reg_read(ctxt, base_reg);
1334 			adjust_modrm_seg(ctxt, base_reg);
1335 		}
1336 		switch (ctxt->modrm_mod) {
1337 		case 1:
1338 			modrm_ea += insn_fetch(s8, ctxt);
1339 			break;
1340 		case 2:
1341 			modrm_ea += insn_fetch(s32, ctxt);
1342 			break;
1343 		}
1344 	}
1345 	op->addr.mem.ea = modrm_ea;
1346 	if (ctxt->ad_bytes != 8)
1347 		ctxt->memop.addr.mem.ea = (u32)ctxt->memop.addr.mem.ea;
1348 
1349 done:
1350 	return rc;
1351 }
1352 
1353 static int decode_abs(struct x86_emulate_ctxt *ctxt,
1354 		      struct operand *op)
1355 {
1356 	int rc = X86EMUL_CONTINUE;
1357 
1358 	op->type = OP_MEM;
1359 	switch (ctxt->ad_bytes) {
1360 	case 2:
1361 		op->addr.mem.ea = insn_fetch(u16, ctxt);
1362 		break;
1363 	case 4:
1364 		op->addr.mem.ea = insn_fetch(u32, ctxt);
1365 		break;
1366 	case 8:
1367 		op->addr.mem.ea = insn_fetch(u64, ctxt);
1368 		break;
1369 	}
1370 done:
1371 	return rc;
1372 }
1373 
1374 static void fetch_bit_operand(struct x86_emulate_ctxt *ctxt)
1375 {
1376 	long sv = 0, mask;
1377 
1378 	if (ctxt->dst.type == OP_MEM && ctxt->src.type == OP_REG) {
1379 		mask = ~((long)ctxt->dst.bytes * 8 - 1);
1380 
1381 		if (ctxt->src.bytes == 2)
1382 			sv = (s16)ctxt->src.val & (s16)mask;
1383 		else if (ctxt->src.bytes == 4)
1384 			sv = (s32)ctxt->src.val & (s32)mask;
1385 		else
1386 			sv = (s64)ctxt->src.val & (s64)mask;
1387 
1388 		ctxt->dst.addr.mem.ea = address_mask(ctxt,
1389 					   ctxt->dst.addr.mem.ea + (sv >> 3));
1390 	}
1391 
1392 	/* only subword offset */
1393 	ctxt->src.val &= (ctxt->dst.bytes << 3) - 1;
1394 }
1395 
1396 static int read_emulated(struct x86_emulate_ctxt *ctxt,
1397 			 unsigned long addr, void *dest, unsigned size)
1398 {
1399 	int rc;
1400 	struct read_cache *mc = &ctxt->mem_read;
1401 
1402 	if (mc->pos < mc->end)
1403 		goto read_cached;
1404 
1405 	WARN_ON((mc->end + size) >= sizeof(mc->data));
1406 
1407 	rc = ctxt->ops->read_emulated(ctxt, addr, mc->data + mc->end, size,
1408 				      &ctxt->exception);
1409 	if (rc != X86EMUL_CONTINUE)
1410 		return rc;
1411 
1412 	mc->end += size;
1413 
1414 read_cached:
1415 	memcpy(dest, mc->data + mc->pos, size);
1416 	mc->pos += size;
1417 	return X86EMUL_CONTINUE;
1418 }
1419 
1420 static int segmented_read(struct x86_emulate_ctxt *ctxt,
1421 			  struct segmented_address addr,
1422 			  void *data,
1423 			  unsigned size)
1424 {
1425 	int rc;
1426 	ulong linear;
1427 
1428 	rc = linearize(ctxt, addr, size, false, &linear);
1429 	if (rc != X86EMUL_CONTINUE)
1430 		return rc;
1431 	return read_emulated(ctxt, linear, data, size);
1432 }
1433 
1434 static int segmented_write(struct x86_emulate_ctxt *ctxt,
1435 			   struct segmented_address addr,
1436 			   const void *data,
1437 			   unsigned size)
1438 {
1439 	int rc;
1440 	ulong linear;
1441 
1442 	rc = linearize(ctxt, addr, size, true, &linear);
1443 	if (rc != X86EMUL_CONTINUE)
1444 		return rc;
1445 	return ctxt->ops->write_emulated(ctxt, linear, data, size,
1446 					 &ctxt->exception);
1447 }
1448 
1449 static int segmented_cmpxchg(struct x86_emulate_ctxt *ctxt,
1450 			     struct segmented_address addr,
1451 			     const void *orig_data, const void *data,
1452 			     unsigned size)
1453 {
1454 	int rc;
1455 	ulong linear;
1456 
1457 	rc = linearize(ctxt, addr, size, true, &linear);
1458 	if (rc != X86EMUL_CONTINUE)
1459 		return rc;
1460 	return ctxt->ops->cmpxchg_emulated(ctxt, linear, orig_data, data,
1461 					   size, &ctxt->exception);
1462 }
1463 
1464 static int pio_in_emulated(struct x86_emulate_ctxt *ctxt,
1465 			   unsigned int size, unsigned short port,
1466 			   void *dest)
1467 {
1468 	struct read_cache *rc = &ctxt->io_read;
1469 
1470 	if (rc->pos == rc->end) { /* refill pio read ahead */
1471 		unsigned int in_page, n;
1472 		unsigned int count = ctxt->rep_prefix ?
1473 			address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) : 1;
1474 		in_page = (ctxt->eflags & X86_EFLAGS_DF) ?
1475 			offset_in_page(reg_read(ctxt, VCPU_REGS_RDI)) :
1476 			PAGE_SIZE - offset_in_page(reg_read(ctxt, VCPU_REGS_RDI));
1477 		n = min3(in_page, (unsigned int)sizeof(rc->data) / size, count);
1478 		if (n == 0)
1479 			n = 1;
1480 		rc->pos = rc->end = 0;
1481 		if (!ctxt->ops->pio_in_emulated(ctxt, size, port, rc->data, n))
1482 			return 0;
1483 		rc->end = n * size;
1484 	}
1485 
1486 	if (ctxt->rep_prefix && (ctxt->d & String) &&
1487 	    !(ctxt->eflags & X86_EFLAGS_DF)) {
1488 		ctxt->dst.data = rc->data + rc->pos;
1489 		ctxt->dst.type = OP_MEM_STR;
1490 		ctxt->dst.count = (rc->end - rc->pos) / size;
1491 		rc->pos = rc->end;
1492 	} else {
1493 		memcpy(dest, rc->data + rc->pos, size);
1494 		rc->pos += size;
1495 	}
1496 	return 1;
1497 }
1498 
1499 static int read_interrupt_descriptor(struct x86_emulate_ctxt *ctxt,
1500 				     u16 index, struct desc_struct *desc)
1501 {
1502 	struct desc_ptr dt;
1503 	ulong addr;
1504 
1505 	ctxt->ops->get_idt(ctxt, &dt);
1506 
1507 	if (dt.size < index * 8 + 7)
1508 		return emulate_gp(ctxt, index << 3 | 0x2);
1509 
1510 	addr = dt.address + index * 8;
1511 	return ctxt->ops->read_std(ctxt, addr, desc, sizeof *desc,
1512 				   &ctxt->exception);
1513 }
1514 
1515 static void get_descriptor_table_ptr(struct x86_emulate_ctxt *ctxt,
1516 				     u16 selector, struct desc_ptr *dt)
1517 {
1518 	const struct x86_emulate_ops *ops = ctxt->ops;
1519 	u32 base3 = 0;
1520 
1521 	if (selector & 1 << 2) {
1522 		struct desc_struct desc;
1523 		u16 sel;
1524 
1525 		memset (dt, 0, sizeof *dt);
1526 		if (!ops->get_segment(ctxt, &sel, &desc, &base3,
1527 				      VCPU_SREG_LDTR))
1528 			return;
1529 
1530 		dt->size = desc_limit_scaled(&desc); /* what if limit > 65535? */
1531 		dt->address = get_desc_base(&desc) | ((u64)base3 << 32);
1532 	} else
1533 		ops->get_gdt(ctxt, dt);
1534 }
1535 
1536 static int get_descriptor_ptr(struct x86_emulate_ctxt *ctxt,
1537 			      u16 selector, ulong *desc_addr_p)
1538 {
1539 	struct desc_ptr dt;
1540 	u16 index = selector >> 3;
1541 	ulong addr;
1542 
1543 	get_descriptor_table_ptr(ctxt, selector, &dt);
1544 
1545 	if (dt.size < index * 8 + 7)
1546 		return emulate_gp(ctxt, selector & 0xfffc);
1547 
1548 	addr = dt.address + index * 8;
1549 
1550 #ifdef CONFIG_X86_64
1551 	if (addr >> 32 != 0) {
1552 		u64 efer = 0;
1553 
1554 		ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
1555 		if (!(efer & EFER_LMA))
1556 			addr &= (u32)-1;
1557 	}
1558 #endif
1559 
1560 	*desc_addr_p = addr;
1561 	return X86EMUL_CONTINUE;
1562 }
1563 
1564 /* allowed just for 8 bytes segments */
1565 static int read_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1566 				   u16 selector, struct desc_struct *desc,
1567 				   ulong *desc_addr_p)
1568 {
1569 	int rc;
1570 
1571 	rc = get_descriptor_ptr(ctxt, selector, desc_addr_p);
1572 	if (rc != X86EMUL_CONTINUE)
1573 		return rc;
1574 
1575 	return ctxt->ops->read_std(ctxt, *desc_addr_p, desc, sizeof(*desc),
1576 				   &ctxt->exception);
1577 }
1578 
1579 /* allowed just for 8 bytes segments */
1580 static int write_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1581 				    u16 selector, struct desc_struct *desc)
1582 {
1583 	int rc;
1584 	ulong addr;
1585 
1586 	rc = get_descriptor_ptr(ctxt, selector, &addr);
1587 	if (rc != X86EMUL_CONTINUE)
1588 		return rc;
1589 
1590 	return ctxt->ops->write_std(ctxt, addr, desc, sizeof *desc,
1591 				    &ctxt->exception);
1592 }
1593 
1594 static int __load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1595 				     u16 selector, int seg, u8 cpl,
1596 				     enum x86_transfer_type transfer,
1597 				     struct desc_struct *desc)
1598 {
1599 	struct desc_struct seg_desc, old_desc;
1600 	u8 dpl, rpl;
1601 	unsigned err_vec = GP_VECTOR;
1602 	u32 err_code = 0;
1603 	bool null_selector = !(selector & ~0x3); /* 0000-0003 are null */
1604 	ulong desc_addr;
1605 	int ret;
1606 	u16 dummy;
1607 	u32 base3 = 0;
1608 
1609 	memset(&seg_desc, 0, sizeof seg_desc);
1610 
1611 	if (ctxt->mode == X86EMUL_MODE_REAL) {
1612 		/* set real mode segment descriptor (keep limit etc. for
1613 		 * unreal mode) */
1614 		ctxt->ops->get_segment(ctxt, &dummy, &seg_desc, NULL, seg);
1615 		set_desc_base(&seg_desc, selector << 4);
1616 		goto load;
1617 	} else if (seg <= VCPU_SREG_GS && ctxt->mode == X86EMUL_MODE_VM86) {
1618 		/* VM86 needs a clean new segment descriptor */
1619 		set_desc_base(&seg_desc, selector << 4);
1620 		set_desc_limit(&seg_desc, 0xffff);
1621 		seg_desc.type = 3;
1622 		seg_desc.p = 1;
1623 		seg_desc.s = 1;
1624 		seg_desc.dpl = 3;
1625 		goto load;
1626 	}
1627 
1628 	rpl = selector & 3;
1629 
1630 	/* TR should be in GDT only */
1631 	if (seg == VCPU_SREG_TR && (selector & (1 << 2)))
1632 		goto exception;
1633 
1634 	/* NULL selector is not valid for TR, CS and (except for long mode) SS */
1635 	if (null_selector) {
1636 		if (seg == VCPU_SREG_CS || seg == VCPU_SREG_TR)
1637 			goto exception;
1638 
1639 		if (seg == VCPU_SREG_SS) {
1640 			if (ctxt->mode != X86EMUL_MODE_PROT64 || rpl != cpl)
1641 				goto exception;
1642 
1643 			/*
1644 			 * ctxt->ops->set_segment expects the CPL to be in
1645 			 * SS.DPL, so fake an expand-up 32-bit data segment.
1646 			 */
1647 			seg_desc.type = 3;
1648 			seg_desc.p = 1;
1649 			seg_desc.s = 1;
1650 			seg_desc.dpl = cpl;
1651 			seg_desc.d = 1;
1652 			seg_desc.g = 1;
1653 		}
1654 
1655 		/* Skip all following checks */
1656 		goto load;
1657 	}
1658 
1659 	ret = read_segment_descriptor(ctxt, selector, &seg_desc, &desc_addr);
1660 	if (ret != X86EMUL_CONTINUE)
1661 		return ret;
1662 
1663 	err_code = selector & 0xfffc;
1664 	err_vec = (transfer == X86_TRANSFER_TASK_SWITCH) ? TS_VECTOR :
1665 							   GP_VECTOR;
1666 
1667 	/* can't load system descriptor into segment selector */
1668 	if (seg <= VCPU_SREG_GS && !seg_desc.s) {
1669 		if (transfer == X86_TRANSFER_CALL_JMP)
1670 			return X86EMUL_UNHANDLEABLE;
1671 		goto exception;
1672 	}
1673 
1674 	if (!seg_desc.p) {
1675 		err_vec = (seg == VCPU_SREG_SS) ? SS_VECTOR : NP_VECTOR;
1676 		goto exception;
1677 	}
1678 
1679 	dpl = seg_desc.dpl;
1680 
1681 	switch (seg) {
1682 	case VCPU_SREG_SS:
1683 		/*
1684 		 * segment is not a writable data segment or segment
1685 		 * selector's RPL != CPL or segment selector's RPL != CPL
1686 		 */
1687 		if (rpl != cpl || (seg_desc.type & 0xa) != 0x2 || dpl != cpl)
1688 			goto exception;
1689 		break;
1690 	case VCPU_SREG_CS:
1691 		if (!(seg_desc.type & 8))
1692 			goto exception;
1693 
1694 		if (seg_desc.type & 4) {
1695 			/* conforming */
1696 			if (dpl > cpl)
1697 				goto exception;
1698 		} else {
1699 			/* nonconforming */
1700 			if (rpl > cpl || dpl != cpl)
1701 				goto exception;
1702 		}
1703 		/* in long-mode d/b must be clear if l is set */
1704 		if (seg_desc.d && seg_desc.l) {
1705 			u64 efer = 0;
1706 
1707 			ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
1708 			if (efer & EFER_LMA)
1709 				goto exception;
1710 		}
1711 
1712 		/* CS(RPL) <- CPL */
1713 		selector = (selector & 0xfffc) | cpl;
1714 		break;
1715 	case VCPU_SREG_TR:
1716 		if (seg_desc.s || (seg_desc.type != 1 && seg_desc.type != 9))
1717 			goto exception;
1718 		old_desc = seg_desc;
1719 		seg_desc.type |= 2; /* busy */
1720 		ret = ctxt->ops->cmpxchg_emulated(ctxt, desc_addr, &old_desc, &seg_desc,
1721 						  sizeof(seg_desc), &ctxt->exception);
1722 		if (ret != X86EMUL_CONTINUE)
1723 			return ret;
1724 		break;
1725 	case VCPU_SREG_LDTR:
1726 		if (seg_desc.s || seg_desc.type != 2)
1727 			goto exception;
1728 		break;
1729 	default: /*  DS, ES, FS, or GS */
1730 		/*
1731 		 * segment is not a data or readable code segment or
1732 		 * ((segment is a data or nonconforming code segment)
1733 		 * and (both RPL and CPL > DPL))
1734 		 */
1735 		if ((seg_desc.type & 0xa) == 0x8 ||
1736 		    (((seg_desc.type & 0xc) != 0xc) &&
1737 		     (rpl > dpl && cpl > dpl)))
1738 			goto exception;
1739 		break;
1740 	}
1741 
1742 	if (seg_desc.s) {
1743 		/* mark segment as accessed */
1744 		if (!(seg_desc.type & 1)) {
1745 			seg_desc.type |= 1;
1746 			ret = write_segment_descriptor(ctxt, selector,
1747 						       &seg_desc);
1748 			if (ret != X86EMUL_CONTINUE)
1749 				return ret;
1750 		}
1751 	} else if (ctxt->mode == X86EMUL_MODE_PROT64) {
1752 		ret = ctxt->ops->read_std(ctxt, desc_addr+8, &base3,
1753 				sizeof(base3), &ctxt->exception);
1754 		if (ret != X86EMUL_CONTINUE)
1755 			return ret;
1756 		if (emul_is_noncanonical_address(get_desc_base(&seg_desc) |
1757 				((u64)base3 << 32), ctxt))
1758 			return emulate_gp(ctxt, 0);
1759 	}
1760 load:
1761 	ctxt->ops->set_segment(ctxt, selector, &seg_desc, base3, seg);
1762 	if (desc)
1763 		*desc = seg_desc;
1764 	return X86EMUL_CONTINUE;
1765 exception:
1766 	return emulate_exception(ctxt, err_vec, err_code, true);
1767 }
1768 
1769 static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1770 				   u16 selector, int seg)
1771 {
1772 	u8 cpl = ctxt->ops->cpl(ctxt);
1773 
1774 	/*
1775 	 * None of MOV, POP and LSS can load a NULL selector in CPL=3, but
1776 	 * they can load it at CPL<3 (Intel's manual says only LSS can,
1777 	 * but it's wrong).
1778 	 *
1779 	 * However, the Intel manual says that putting IST=1/DPL=3 in
1780 	 * an interrupt gate will result in SS=3 (the AMD manual instead
1781 	 * says it doesn't), so allow SS=3 in __load_segment_descriptor
1782 	 * and only forbid it here.
1783 	 */
1784 	if (seg == VCPU_SREG_SS && selector == 3 &&
1785 	    ctxt->mode == X86EMUL_MODE_PROT64)
1786 		return emulate_exception(ctxt, GP_VECTOR, 0, true);
1787 
1788 	return __load_segment_descriptor(ctxt, selector, seg, cpl,
1789 					 X86_TRANSFER_NONE, NULL);
1790 }
1791 
1792 static void write_register_operand(struct operand *op)
1793 {
1794 	return assign_register(op->addr.reg, op->val, op->bytes);
1795 }
1796 
1797 static int writeback(struct x86_emulate_ctxt *ctxt, struct operand *op)
1798 {
1799 	switch (op->type) {
1800 	case OP_REG:
1801 		write_register_operand(op);
1802 		break;
1803 	case OP_MEM:
1804 		if (ctxt->lock_prefix)
1805 			return segmented_cmpxchg(ctxt,
1806 						 op->addr.mem,
1807 						 &op->orig_val,
1808 						 &op->val,
1809 						 op->bytes);
1810 		else
1811 			return segmented_write(ctxt,
1812 					       op->addr.mem,
1813 					       &op->val,
1814 					       op->bytes);
1815 		break;
1816 	case OP_MEM_STR:
1817 		return segmented_write(ctxt,
1818 				       op->addr.mem,
1819 				       op->data,
1820 				       op->bytes * op->count);
1821 		break;
1822 	case OP_XMM:
1823 		write_sse_reg(ctxt, &op->vec_val, op->addr.xmm);
1824 		break;
1825 	case OP_MM:
1826 		write_mmx_reg(ctxt, &op->mm_val, op->addr.mm);
1827 		break;
1828 	case OP_NONE:
1829 		/* no writeback */
1830 		break;
1831 	default:
1832 		break;
1833 	}
1834 	return X86EMUL_CONTINUE;
1835 }
1836 
1837 static int push(struct x86_emulate_ctxt *ctxt, void *data, int bytes)
1838 {
1839 	struct segmented_address addr;
1840 
1841 	rsp_increment(ctxt, -bytes);
1842 	addr.ea = reg_read(ctxt, VCPU_REGS_RSP) & stack_mask(ctxt);
1843 	addr.seg = VCPU_SREG_SS;
1844 
1845 	return segmented_write(ctxt, addr, data, bytes);
1846 }
1847 
1848 static int em_push(struct x86_emulate_ctxt *ctxt)
1849 {
1850 	/* Disable writeback. */
1851 	ctxt->dst.type = OP_NONE;
1852 	return push(ctxt, &ctxt->src.val, ctxt->op_bytes);
1853 }
1854 
1855 static int emulate_pop(struct x86_emulate_ctxt *ctxt,
1856 		       void *dest, int len)
1857 {
1858 	int rc;
1859 	struct segmented_address addr;
1860 
1861 	addr.ea = reg_read(ctxt, VCPU_REGS_RSP) & stack_mask(ctxt);
1862 	addr.seg = VCPU_SREG_SS;
1863 	rc = segmented_read(ctxt, addr, dest, len);
1864 	if (rc != X86EMUL_CONTINUE)
1865 		return rc;
1866 
1867 	rsp_increment(ctxt, len);
1868 	return rc;
1869 }
1870 
1871 static int em_pop(struct x86_emulate_ctxt *ctxt)
1872 {
1873 	return emulate_pop(ctxt, &ctxt->dst.val, ctxt->op_bytes);
1874 }
1875 
1876 static int emulate_popf(struct x86_emulate_ctxt *ctxt,
1877 			void *dest, int len)
1878 {
1879 	int rc;
1880 	unsigned long val, change_mask;
1881 	int iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> X86_EFLAGS_IOPL_BIT;
1882 	int cpl = ctxt->ops->cpl(ctxt);
1883 
1884 	rc = emulate_pop(ctxt, &val, len);
1885 	if (rc != X86EMUL_CONTINUE)
1886 		return rc;
1887 
1888 	change_mask = X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF |
1889 		      X86_EFLAGS_ZF | X86_EFLAGS_SF | X86_EFLAGS_OF |
1890 		      X86_EFLAGS_TF | X86_EFLAGS_DF | X86_EFLAGS_NT |
1891 		      X86_EFLAGS_AC | X86_EFLAGS_ID;
1892 
1893 	switch(ctxt->mode) {
1894 	case X86EMUL_MODE_PROT64:
1895 	case X86EMUL_MODE_PROT32:
1896 	case X86EMUL_MODE_PROT16:
1897 		if (cpl == 0)
1898 			change_mask |= X86_EFLAGS_IOPL;
1899 		if (cpl <= iopl)
1900 			change_mask |= X86_EFLAGS_IF;
1901 		break;
1902 	case X86EMUL_MODE_VM86:
1903 		if (iopl < 3)
1904 			return emulate_gp(ctxt, 0);
1905 		change_mask |= X86_EFLAGS_IF;
1906 		break;
1907 	default: /* real mode */
1908 		change_mask |= (X86_EFLAGS_IOPL | X86_EFLAGS_IF);
1909 		break;
1910 	}
1911 
1912 	*(unsigned long *)dest =
1913 		(ctxt->eflags & ~change_mask) | (val & change_mask);
1914 
1915 	return rc;
1916 }
1917 
1918 static int em_popf(struct x86_emulate_ctxt *ctxt)
1919 {
1920 	ctxt->dst.type = OP_REG;
1921 	ctxt->dst.addr.reg = &ctxt->eflags;
1922 	ctxt->dst.bytes = ctxt->op_bytes;
1923 	return emulate_popf(ctxt, &ctxt->dst.val, ctxt->op_bytes);
1924 }
1925 
1926 static int em_enter(struct x86_emulate_ctxt *ctxt)
1927 {
1928 	int rc;
1929 	unsigned frame_size = ctxt->src.val;
1930 	unsigned nesting_level = ctxt->src2.val & 31;
1931 	ulong rbp;
1932 
1933 	if (nesting_level)
1934 		return X86EMUL_UNHANDLEABLE;
1935 
1936 	rbp = reg_read(ctxt, VCPU_REGS_RBP);
1937 	rc = push(ctxt, &rbp, stack_size(ctxt));
1938 	if (rc != X86EMUL_CONTINUE)
1939 		return rc;
1940 	assign_masked(reg_rmw(ctxt, VCPU_REGS_RBP), reg_read(ctxt, VCPU_REGS_RSP),
1941 		      stack_mask(ctxt));
1942 	assign_masked(reg_rmw(ctxt, VCPU_REGS_RSP),
1943 		      reg_read(ctxt, VCPU_REGS_RSP) - frame_size,
1944 		      stack_mask(ctxt));
1945 	return X86EMUL_CONTINUE;
1946 }
1947 
1948 static int em_leave(struct x86_emulate_ctxt *ctxt)
1949 {
1950 	assign_masked(reg_rmw(ctxt, VCPU_REGS_RSP), reg_read(ctxt, VCPU_REGS_RBP),
1951 		      stack_mask(ctxt));
1952 	return emulate_pop(ctxt, reg_rmw(ctxt, VCPU_REGS_RBP), ctxt->op_bytes);
1953 }
1954 
1955 static int em_push_sreg(struct x86_emulate_ctxt *ctxt)
1956 {
1957 	int seg = ctxt->src2.val;
1958 
1959 	ctxt->src.val = get_segment_selector(ctxt, seg);
1960 	if (ctxt->op_bytes == 4) {
1961 		rsp_increment(ctxt, -2);
1962 		ctxt->op_bytes = 2;
1963 	}
1964 
1965 	return em_push(ctxt);
1966 }
1967 
1968 static int em_pop_sreg(struct x86_emulate_ctxt *ctxt)
1969 {
1970 	int seg = ctxt->src2.val;
1971 	unsigned long selector;
1972 	int rc;
1973 
1974 	rc = emulate_pop(ctxt, &selector, 2);
1975 	if (rc != X86EMUL_CONTINUE)
1976 		return rc;
1977 
1978 	if (ctxt->modrm_reg == VCPU_SREG_SS)
1979 		ctxt->interruptibility = KVM_X86_SHADOW_INT_MOV_SS;
1980 	if (ctxt->op_bytes > 2)
1981 		rsp_increment(ctxt, ctxt->op_bytes - 2);
1982 
1983 	rc = load_segment_descriptor(ctxt, (u16)selector, seg);
1984 	return rc;
1985 }
1986 
1987 static int em_pusha(struct x86_emulate_ctxt *ctxt)
1988 {
1989 	unsigned long old_esp = reg_read(ctxt, VCPU_REGS_RSP);
1990 	int rc = X86EMUL_CONTINUE;
1991 	int reg = VCPU_REGS_RAX;
1992 
1993 	while (reg <= VCPU_REGS_RDI) {
1994 		(reg == VCPU_REGS_RSP) ?
1995 		(ctxt->src.val = old_esp) : (ctxt->src.val = reg_read(ctxt, reg));
1996 
1997 		rc = em_push(ctxt);
1998 		if (rc != X86EMUL_CONTINUE)
1999 			return rc;
2000 
2001 		++reg;
2002 	}
2003 
2004 	return rc;
2005 }
2006 
2007 static int em_pushf(struct x86_emulate_ctxt *ctxt)
2008 {
2009 	ctxt->src.val = (unsigned long)ctxt->eflags & ~X86_EFLAGS_VM;
2010 	return em_push(ctxt);
2011 }
2012 
2013 static int em_popa(struct x86_emulate_ctxt *ctxt)
2014 {
2015 	int rc = X86EMUL_CONTINUE;
2016 	int reg = VCPU_REGS_RDI;
2017 	u32 val;
2018 
2019 	while (reg >= VCPU_REGS_RAX) {
2020 		if (reg == VCPU_REGS_RSP) {
2021 			rsp_increment(ctxt, ctxt->op_bytes);
2022 			--reg;
2023 		}
2024 
2025 		rc = emulate_pop(ctxt, &val, ctxt->op_bytes);
2026 		if (rc != X86EMUL_CONTINUE)
2027 			break;
2028 		assign_register(reg_rmw(ctxt, reg), val, ctxt->op_bytes);
2029 		--reg;
2030 	}
2031 	return rc;
2032 }
2033 
2034 static int __emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq)
2035 {
2036 	const struct x86_emulate_ops *ops = ctxt->ops;
2037 	int rc;
2038 	struct desc_ptr dt;
2039 	gva_t cs_addr;
2040 	gva_t eip_addr;
2041 	u16 cs, eip;
2042 
2043 	/* TODO: Add limit checks */
2044 	ctxt->src.val = ctxt->eflags;
2045 	rc = em_push(ctxt);
2046 	if (rc != X86EMUL_CONTINUE)
2047 		return rc;
2048 
2049 	ctxt->eflags &= ~(X86_EFLAGS_IF | X86_EFLAGS_TF | X86_EFLAGS_AC);
2050 
2051 	ctxt->src.val = get_segment_selector(ctxt, VCPU_SREG_CS);
2052 	rc = em_push(ctxt);
2053 	if (rc != X86EMUL_CONTINUE)
2054 		return rc;
2055 
2056 	ctxt->src.val = ctxt->_eip;
2057 	rc = em_push(ctxt);
2058 	if (rc != X86EMUL_CONTINUE)
2059 		return rc;
2060 
2061 	ops->get_idt(ctxt, &dt);
2062 
2063 	eip_addr = dt.address + (irq << 2);
2064 	cs_addr = dt.address + (irq << 2) + 2;
2065 
2066 	rc = ops->read_std(ctxt, cs_addr, &cs, 2, &ctxt->exception);
2067 	if (rc != X86EMUL_CONTINUE)
2068 		return rc;
2069 
2070 	rc = ops->read_std(ctxt, eip_addr, &eip, 2, &ctxt->exception);
2071 	if (rc != X86EMUL_CONTINUE)
2072 		return rc;
2073 
2074 	rc = load_segment_descriptor(ctxt, cs, VCPU_SREG_CS);
2075 	if (rc != X86EMUL_CONTINUE)
2076 		return rc;
2077 
2078 	ctxt->_eip = eip;
2079 
2080 	return rc;
2081 }
2082 
2083 int emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq)
2084 {
2085 	int rc;
2086 
2087 	invalidate_registers(ctxt);
2088 	rc = __emulate_int_real(ctxt, irq);
2089 	if (rc == X86EMUL_CONTINUE)
2090 		writeback_registers(ctxt);
2091 	return rc;
2092 }
2093 
2094 static int emulate_int(struct x86_emulate_ctxt *ctxt, int irq)
2095 {
2096 	switch(ctxt->mode) {
2097 	case X86EMUL_MODE_REAL:
2098 		return __emulate_int_real(ctxt, irq);
2099 	case X86EMUL_MODE_VM86:
2100 	case X86EMUL_MODE_PROT16:
2101 	case X86EMUL_MODE_PROT32:
2102 	case X86EMUL_MODE_PROT64:
2103 	default:
2104 		/* Protected mode interrupts unimplemented yet */
2105 		return X86EMUL_UNHANDLEABLE;
2106 	}
2107 }
2108 
2109 static int emulate_iret_real(struct x86_emulate_ctxt *ctxt)
2110 {
2111 	int rc = X86EMUL_CONTINUE;
2112 	unsigned long temp_eip = 0;
2113 	unsigned long temp_eflags = 0;
2114 	unsigned long cs = 0;
2115 	unsigned long mask = X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF |
2116 			     X86_EFLAGS_ZF | X86_EFLAGS_SF | X86_EFLAGS_TF |
2117 			     X86_EFLAGS_IF | X86_EFLAGS_DF | X86_EFLAGS_OF |
2118 			     X86_EFLAGS_IOPL | X86_EFLAGS_NT | X86_EFLAGS_RF |
2119 			     X86_EFLAGS_AC | X86_EFLAGS_ID |
2120 			     X86_EFLAGS_FIXED;
2121 	unsigned long vm86_mask = X86_EFLAGS_VM | X86_EFLAGS_VIF |
2122 				  X86_EFLAGS_VIP;
2123 
2124 	/* TODO: Add stack limit check */
2125 
2126 	rc = emulate_pop(ctxt, &temp_eip, ctxt->op_bytes);
2127 
2128 	if (rc != X86EMUL_CONTINUE)
2129 		return rc;
2130 
2131 	if (temp_eip & ~0xffff)
2132 		return emulate_gp(ctxt, 0);
2133 
2134 	rc = emulate_pop(ctxt, &cs, ctxt->op_bytes);
2135 
2136 	if (rc != X86EMUL_CONTINUE)
2137 		return rc;
2138 
2139 	rc = emulate_pop(ctxt, &temp_eflags, ctxt->op_bytes);
2140 
2141 	if (rc != X86EMUL_CONTINUE)
2142 		return rc;
2143 
2144 	rc = load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS);
2145 
2146 	if (rc != X86EMUL_CONTINUE)
2147 		return rc;
2148 
2149 	ctxt->_eip = temp_eip;
2150 
2151 	if (ctxt->op_bytes == 4)
2152 		ctxt->eflags = ((temp_eflags & mask) | (ctxt->eflags & vm86_mask));
2153 	else if (ctxt->op_bytes == 2) {
2154 		ctxt->eflags &= ~0xffff;
2155 		ctxt->eflags |= temp_eflags;
2156 	}
2157 
2158 	ctxt->eflags &= ~EFLG_RESERVED_ZEROS_MASK; /* Clear reserved zeros */
2159 	ctxt->eflags |= X86_EFLAGS_FIXED;
2160 	ctxt->ops->set_nmi_mask(ctxt, false);
2161 
2162 	return rc;
2163 }
2164 
2165 static int em_iret(struct x86_emulate_ctxt *ctxt)
2166 {
2167 	switch(ctxt->mode) {
2168 	case X86EMUL_MODE_REAL:
2169 		return emulate_iret_real(ctxt);
2170 	case X86EMUL_MODE_VM86:
2171 	case X86EMUL_MODE_PROT16:
2172 	case X86EMUL_MODE_PROT32:
2173 	case X86EMUL_MODE_PROT64:
2174 	default:
2175 		/* iret from protected mode unimplemented yet */
2176 		return X86EMUL_UNHANDLEABLE;
2177 	}
2178 }
2179 
2180 static int em_jmp_far(struct x86_emulate_ctxt *ctxt)
2181 {
2182 	int rc;
2183 	unsigned short sel;
2184 	struct desc_struct new_desc;
2185 	u8 cpl = ctxt->ops->cpl(ctxt);
2186 
2187 	memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
2188 
2189 	rc = __load_segment_descriptor(ctxt, sel, VCPU_SREG_CS, cpl,
2190 				       X86_TRANSFER_CALL_JMP,
2191 				       &new_desc);
2192 	if (rc != X86EMUL_CONTINUE)
2193 		return rc;
2194 
2195 	rc = assign_eip_far(ctxt, ctxt->src.val, &new_desc);
2196 	/* Error handling is not implemented. */
2197 	if (rc != X86EMUL_CONTINUE)
2198 		return X86EMUL_UNHANDLEABLE;
2199 
2200 	return rc;
2201 }
2202 
2203 static int em_jmp_abs(struct x86_emulate_ctxt *ctxt)
2204 {
2205 	return assign_eip_near(ctxt, ctxt->src.val);
2206 }
2207 
2208 static int em_call_near_abs(struct x86_emulate_ctxt *ctxt)
2209 {
2210 	int rc;
2211 	long int old_eip;
2212 
2213 	old_eip = ctxt->_eip;
2214 	rc = assign_eip_near(ctxt, ctxt->src.val);
2215 	if (rc != X86EMUL_CONTINUE)
2216 		return rc;
2217 	ctxt->src.val = old_eip;
2218 	rc = em_push(ctxt);
2219 	return rc;
2220 }
2221 
2222 static int em_cmpxchg8b(struct x86_emulate_ctxt *ctxt)
2223 {
2224 	u64 old = ctxt->dst.orig_val64;
2225 
2226 	if (ctxt->dst.bytes == 16)
2227 		return X86EMUL_UNHANDLEABLE;
2228 
2229 	if (((u32) (old >> 0) != (u32) reg_read(ctxt, VCPU_REGS_RAX)) ||
2230 	    ((u32) (old >> 32) != (u32) reg_read(ctxt, VCPU_REGS_RDX))) {
2231 		*reg_write(ctxt, VCPU_REGS_RAX) = (u32) (old >> 0);
2232 		*reg_write(ctxt, VCPU_REGS_RDX) = (u32) (old >> 32);
2233 		ctxt->eflags &= ~X86_EFLAGS_ZF;
2234 	} else {
2235 		ctxt->dst.val64 = ((u64)reg_read(ctxt, VCPU_REGS_RCX) << 32) |
2236 			(u32) reg_read(ctxt, VCPU_REGS_RBX);
2237 
2238 		ctxt->eflags |= X86_EFLAGS_ZF;
2239 	}
2240 	return X86EMUL_CONTINUE;
2241 }
2242 
2243 static int em_ret(struct x86_emulate_ctxt *ctxt)
2244 {
2245 	int rc;
2246 	unsigned long eip;
2247 
2248 	rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
2249 	if (rc != X86EMUL_CONTINUE)
2250 		return rc;
2251 
2252 	return assign_eip_near(ctxt, eip);
2253 }
2254 
2255 static int em_ret_far(struct x86_emulate_ctxt *ctxt)
2256 {
2257 	int rc;
2258 	unsigned long eip, cs;
2259 	int cpl = ctxt->ops->cpl(ctxt);
2260 	struct desc_struct new_desc;
2261 
2262 	rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
2263 	if (rc != X86EMUL_CONTINUE)
2264 		return rc;
2265 	rc = emulate_pop(ctxt, &cs, ctxt->op_bytes);
2266 	if (rc != X86EMUL_CONTINUE)
2267 		return rc;
2268 	/* Outer-privilege level return is not implemented */
2269 	if (ctxt->mode >= X86EMUL_MODE_PROT16 && (cs & 3) > cpl)
2270 		return X86EMUL_UNHANDLEABLE;
2271 	rc = __load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS, cpl,
2272 				       X86_TRANSFER_RET,
2273 				       &new_desc);
2274 	if (rc != X86EMUL_CONTINUE)
2275 		return rc;
2276 	rc = assign_eip_far(ctxt, eip, &new_desc);
2277 	/* Error handling is not implemented. */
2278 	if (rc != X86EMUL_CONTINUE)
2279 		return X86EMUL_UNHANDLEABLE;
2280 
2281 	return rc;
2282 }
2283 
2284 static int em_ret_far_imm(struct x86_emulate_ctxt *ctxt)
2285 {
2286         int rc;
2287 
2288         rc = em_ret_far(ctxt);
2289         if (rc != X86EMUL_CONTINUE)
2290                 return rc;
2291         rsp_increment(ctxt, ctxt->src.val);
2292         return X86EMUL_CONTINUE;
2293 }
2294 
2295 static int em_cmpxchg(struct x86_emulate_ctxt *ctxt)
2296 {
2297 	/* Save real source value, then compare EAX against destination. */
2298 	ctxt->dst.orig_val = ctxt->dst.val;
2299 	ctxt->dst.val = reg_read(ctxt, VCPU_REGS_RAX);
2300 	ctxt->src.orig_val = ctxt->src.val;
2301 	ctxt->src.val = ctxt->dst.orig_val;
2302 	fastop(ctxt, em_cmp);
2303 
2304 	if (ctxt->eflags & X86_EFLAGS_ZF) {
2305 		/* Success: write back to memory; no update of EAX */
2306 		ctxt->src.type = OP_NONE;
2307 		ctxt->dst.val = ctxt->src.orig_val;
2308 	} else {
2309 		/* Failure: write the value we saw to EAX. */
2310 		ctxt->src.type = OP_REG;
2311 		ctxt->src.addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
2312 		ctxt->src.val = ctxt->dst.orig_val;
2313 		/* Create write-cycle to dest by writing the same value */
2314 		ctxt->dst.val = ctxt->dst.orig_val;
2315 	}
2316 	return X86EMUL_CONTINUE;
2317 }
2318 
2319 static int em_lseg(struct x86_emulate_ctxt *ctxt)
2320 {
2321 	int seg = ctxt->src2.val;
2322 	unsigned short sel;
2323 	int rc;
2324 
2325 	memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
2326 
2327 	rc = load_segment_descriptor(ctxt, sel, seg);
2328 	if (rc != X86EMUL_CONTINUE)
2329 		return rc;
2330 
2331 	ctxt->dst.val = ctxt->src.val;
2332 	return rc;
2333 }
2334 
2335 static int emulator_has_longmode(struct x86_emulate_ctxt *ctxt)
2336 {
2337 	u32 eax, ebx, ecx, edx;
2338 
2339 	eax = 0x80000001;
2340 	ecx = 0;
2341 	ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx, false);
2342 	return edx & bit(X86_FEATURE_LM);
2343 }
2344 
2345 #define GET_SMSTATE(type, smbase, offset)				  \
2346 	({								  \
2347 	 type __val;							  \
2348 	 int r = ctxt->ops->read_phys(ctxt, smbase + offset, &__val,      \
2349 				      sizeof(__val));			  \
2350 	 if (r != X86EMUL_CONTINUE)					  \
2351 		 return X86EMUL_UNHANDLEABLE;				  \
2352 	 __val;								  \
2353 	})
2354 
2355 static void rsm_set_desc_flags(struct desc_struct *desc, u32 flags)
2356 {
2357 	desc->g    = (flags >> 23) & 1;
2358 	desc->d    = (flags >> 22) & 1;
2359 	desc->l    = (flags >> 21) & 1;
2360 	desc->avl  = (flags >> 20) & 1;
2361 	desc->p    = (flags >> 15) & 1;
2362 	desc->dpl  = (flags >> 13) & 3;
2363 	desc->s    = (flags >> 12) & 1;
2364 	desc->type = (flags >>  8) & 15;
2365 }
2366 
2367 static int rsm_load_seg_32(struct x86_emulate_ctxt *ctxt, u64 smbase, int n)
2368 {
2369 	struct desc_struct desc;
2370 	int offset;
2371 	u16 selector;
2372 
2373 	selector = GET_SMSTATE(u32, smbase, 0x7fa8 + n * 4);
2374 
2375 	if (n < 3)
2376 		offset = 0x7f84 + n * 12;
2377 	else
2378 		offset = 0x7f2c + (n - 3) * 12;
2379 
2380 	set_desc_base(&desc,      GET_SMSTATE(u32, smbase, offset + 8));
2381 	set_desc_limit(&desc,     GET_SMSTATE(u32, smbase, offset + 4));
2382 	rsm_set_desc_flags(&desc, GET_SMSTATE(u32, smbase, offset));
2383 	ctxt->ops->set_segment(ctxt, selector, &desc, 0, n);
2384 	return X86EMUL_CONTINUE;
2385 }
2386 
2387 static int rsm_load_seg_64(struct x86_emulate_ctxt *ctxt, u64 smbase, int n)
2388 {
2389 	struct desc_struct desc;
2390 	int offset;
2391 	u16 selector;
2392 	u32 base3;
2393 
2394 	offset = 0x7e00 + n * 16;
2395 
2396 	selector =                GET_SMSTATE(u16, smbase, offset);
2397 	rsm_set_desc_flags(&desc, GET_SMSTATE(u16, smbase, offset + 2) << 8);
2398 	set_desc_limit(&desc,     GET_SMSTATE(u32, smbase, offset + 4));
2399 	set_desc_base(&desc,      GET_SMSTATE(u32, smbase, offset + 8));
2400 	base3 =                   GET_SMSTATE(u32, smbase, offset + 12);
2401 
2402 	ctxt->ops->set_segment(ctxt, selector, &desc, base3, n);
2403 	return X86EMUL_CONTINUE;
2404 }
2405 
2406 static int rsm_enter_protected_mode(struct x86_emulate_ctxt *ctxt,
2407 				     u64 cr0, u64 cr4)
2408 {
2409 	int bad;
2410 
2411 	/*
2412 	 * First enable PAE, long mode needs it before CR0.PG = 1 is set.
2413 	 * Then enable protected mode.	However, PCID cannot be enabled
2414 	 * if EFER.LMA=0, so set it separately.
2415 	 */
2416 	bad = ctxt->ops->set_cr(ctxt, 4, cr4 & ~X86_CR4_PCIDE);
2417 	if (bad)
2418 		return X86EMUL_UNHANDLEABLE;
2419 
2420 	bad = ctxt->ops->set_cr(ctxt, 0, cr0);
2421 	if (bad)
2422 		return X86EMUL_UNHANDLEABLE;
2423 
2424 	if (cr4 & X86_CR4_PCIDE) {
2425 		bad = ctxt->ops->set_cr(ctxt, 4, cr4);
2426 		if (bad)
2427 			return X86EMUL_UNHANDLEABLE;
2428 	}
2429 
2430 	return X86EMUL_CONTINUE;
2431 }
2432 
2433 static int rsm_load_state_32(struct x86_emulate_ctxt *ctxt, u64 smbase)
2434 {
2435 	struct desc_struct desc;
2436 	struct desc_ptr dt;
2437 	u16 selector;
2438 	u32 val, cr0, cr4;
2439 	int i;
2440 
2441 	cr0 =                      GET_SMSTATE(u32, smbase, 0x7ffc);
2442 	ctxt->ops->set_cr(ctxt, 3, GET_SMSTATE(u32, smbase, 0x7ff8));
2443 	ctxt->eflags =             GET_SMSTATE(u32, smbase, 0x7ff4) | X86_EFLAGS_FIXED;
2444 	ctxt->_eip =               GET_SMSTATE(u32, smbase, 0x7ff0);
2445 
2446 	for (i = 0; i < 8; i++)
2447 		*reg_write(ctxt, i) = GET_SMSTATE(u32, smbase, 0x7fd0 + i * 4);
2448 
2449 	val = GET_SMSTATE(u32, smbase, 0x7fcc);
2450 	ctxt->ops->set_dr(ctxt, 6, (val & DR6_VOLATILE) | DR6_FIXED_1);
2451 	val = GET_SMSTATE(u32, smbase, 0x7fc8);
2452 	ctxt->ops->set_dr(ctxt, 7, (val & DR7_VOLATILE) | DR7_FIXED_1);
2453 
2454 	selector =                 GET_SMSTATE(u32, smbase, 0x7fc4);
2455 	set_desc_base(&desc,       GET_SMSTATE(u32, smbase, 0x7f64));
2456 	set_desc_limit(&desc,      GET_SMSTATE(u32, smbase, 0x7f60));
2457 	rsm_set_desc_flags(&desc,  GET_SMSTATE(u32, smbase, 0x7f5c));
2458 	ctxt->ops->set_segment(ctxt, selector, &desc, 0, VCPU_SREG_TR);
2459 
2460 	selector =                 GET_SMSTATE(u32, smbase, 0x7fc0);
2461 	set_desc_base(&desc,       GET_SMSTATE(u32, smbase, 0x7f80));
2462 	set_desc_limit(&desc,      GET_SMSTATE(u32, smbase, 0x7f7c));
2463 	rsm_set_desc_flags(&desc,  GET_SMSTATE(u32, smbase, 0x7f78));
2464 	ctxt->ops->set_segment(ctxt, selector, &desc, 0, VCPU_SREG_LDTR);
2465 
2466 	dt.address =               GET_SMSTATE(u32, smbase, 0x7f74);
2467 	dt.size =                  GET_SMSTATE(u32, smbase, 0x7f70);
2468 	ctxt->ops->set_gdt(ctxt, &dt);
2469 
2470 	dt.address =               GET_SMSTATE(u32, smbase, 0x7f58);
2471 	dt.size =                  GET_SMSTATE(u32, smbase, 0x7f54);
2472 	ctxt->ops->set_idt(ctxt, &dt);
2473 
2474 	for (i = 0; i < 6; i++) {
2475 		int r = rsm_load_seg_32(ctxt, smbase, i);
2476 		if (r != X86EMUL_CONTINUE)
2477 			return r;
2478 	}
2479 
2480 	cr4 = GET_SMSTATE(u32, smbase, 0x7f14);
2481 
2482 	ctxt->ops->set_smbase(ctxt, GET_SMSTATE(u32, smbase, 0x7ef8));
2483 
2484 	return rsm_enter_protected_mode(ctxt, cr0, cr4);
2485 }
2486 
2487 static int rsm_load_state_64(struct x86_emulate_ctxt *ctxt, u64 smbase)
2488 {
2489 	struct desc_struct desc;
2490 	struct desc_ptr dt;
2491 	u64 val, cr0, cr4;
2492 	u32 base3;
2493 	u16 selector;
2494 	int i, r;
2495 
2496 	for (i = 0; i < 16; i++)
2497 		*reg_write(ctxt, i) = GET_SMSTATE(u64, smbase, 0x7ff8 - i * 8);
2498 
2499 	ctxt->_eip   = GET_SMSTATE(u64, smbase, 0x7f78);
2500 	ctxt->eflags = GET_SMSTATE(u32, smbase, 0x7f70) | X86_EFLAGS_FIXED;
2501 
2502 	val = GET_SMSTATE(u32, smbase, 0x7f68);
2503 	ctxt->ops->set_dr(ctxt, 6, (val & DR6_VOLATILE) | DR6_FIXED_1);
2504 	val = GET_SMSTATE(u32, smbase, 0x7f60);
2505 	ctxt->ops->set_dr(ctxt, 7, (val & DR7_VOLATILE) | DR7_FIXED_1);
2506 
2507 	cr0 =                       GET_SMSTATE(u64, smbase, 0x7f58);
2508 	ctxt->ops->set_cr(ctxt, 3,  GET_SMSTATE(u64, smbase, 0x7f50));
2509 	cr4 =                       GET_SMSTATE(u64, smbase, 0x7f48);
2510 	ctxt->ops->set_smbase(ctxt, GET_SMSTATE(u32, smbase, 0x7f00));
2511 	val =                       GET_SMSTATE(u64, smbase, 0x7ed0);
2512 	ctxt->ops->set_msr(ctxt, MSR_EFER, val & ~EFER_LMA);
2513 
2514 	selector =                  GET_SMSTATE(u32, smbase, 0x7e90);
2515 	rsm_set_desc_flags(&desc,   GET_SMSTATE(u32, smbase, 0x7e92) << 8);
2516 	set_desc_limit(&desc,       GET_SMSTATE(u32, smbase, 0x7e94));
2517 	set_desc_base(&desc,        GET_SMSTATE(u32, smbase, 0x7e98));
2518 	base3 =                     GET_SMSTATE(u32, smbase, 0x7e9c);
2519 	ctxt->ops->set_segment(ctxt, selector, &desc, base3, VCPU_SREG_TR);
2520 
2521 	dt.size =                   GET_SMSTATE(u32, smbase, 0x7e84);
2522 	dt.address =                GET_SMSTATE(u64, smbase, 0x7e88);
2523 	ctxt->ops->set_idt(ctxt, &dt);
2524 
2525 	selector =                  GET_SMSTATE(u32, smbase, 0x7e70);
2526 	rsm_set_desc_flags(&desc,   GET_SMSTATE(u32, smbase, 0x7e72) << 8);
2527 	set_desc_limit(&desc,       GET_SMSTATE(u32, smbase, 0x7e74));
2528 	set_desc_base(&desc,        GET_SMSTATE(u32, smbase, 0x7e78));
2529 	base3 =                     GET_SMSTATE(u32, smbase, 0x7e7c);
2530 	ctxt->ops->set_segment(ctxt, selector, &desc, base3, VCPU_SREG_LDTR);
2531 
2532 	dt.size =                   GET_SMSTATE(u32, smbase, 0x7e64);
2533 	dt.address =                GET_SMSTATE(u64, smbase, 0x7e68);
2534 	ctxt->ops->set_gdt(ctxt, &dt);
2535 
2536 	r = rsm_enter_protected_mode(ctxt, cr0, cr4);
2537 	if (r != X86EMUL_CONTINUE)
2538 		return r;
2539 
2540 	for (i = 0; i < 6; i++) {
2541 		r = rsm_load_seg_64(ctxt, smbase, i);
2542 		if (r != X86EMUL_CONTINUE)
2543 			return r;
2544 	}
2545 
2546 	return X86EMUL_CONTINUE;
2547 }
2548 
2549 static int em_rsm(struct x86_emulate_ctxt *ctxt)
2550 {
2551 	unsigned long cr0, cr4, efer;
2552 	u64 smbase;
2553 	int ret;
2554 
2555 	if ((ctxt->ops->get_hflags(ctxt) & X86EMUL_SMM_MASK) == 0)
2556 		return emulate_ud(ctxt);
2557 
2558 	/*
2559 	 * Get back to real mode, to prepare a safe state in which to load
2560 	 * CR0/CR3/CR4/EFER.  It's all a bit more complicated if the vCPU
2561 	 * supports long mode.
2562 	 */
2563 	cr4 = ctxt->ops->get_cr(ctxt, 4);
2564 	if (emulator_has_longmode(ctxt)) {
2565 		struct desc_struct cs_desc;
2566 
2567 		/* Zero CR4.PCIDE before CR0.PG.  */
2568 		if (cr4 & X86_CR4_PCIDE) {
2569 			ctxt->ops->set_cr(ctxt, 4, cr4 & ~X86_CR4_PCIDE);
2570 			cr4 &= ~X86_CR4_PCIDE;
2571 		}
2572 
2573 		/* A 32-bit code segment is required to clear EFER.LMA.  */
2574 		memset(&cs_desc, 0, sizeof(cs_desc));
2575 		cs_desc.type = 0xb;
2576 		cs_desc.s = cs_desc.g = cs_desc.p = 1;
2577 		ctxt->ops->set_segment(ctxt, 0, &cs_desc, 0, VCPU_SREG_CS);
2578 	}
2579 
2580 	/* For the 64-bit case, this will clear EFER.LMA.  */
2581 	cr0 = ctxt->ops->get_cr(ctxt, 0);
2582 	if (cr0 & X86_CR0_PE)
2583 		ctxt->ops->set_cr(ctxt, 0, cr0 & ~(X86_CR0_PG | X86_CR0_PE));
2584 
2585 	/* Now clear CR4.PAE (which must be done before clearing EFER.LME).  */
2586 	if (cr4 & X86_CR4_PAE)
2587 		ctxt->ops->set_cr(ctxt, 4, cr4 & ~X86_CR4_PAE);
2588 
2589 	/* And finally go back to 32-bit mode.  */
2590 	efer = 0;
2591 	ctxt->ops->set_msr(ctxt, MSR_EFER, efer);
2592 
2593 	smbase = ctxt->ops->get_smbase(ctxt);
2594 
2595 	/*
2596 	 * Give pre_leave_smm() a chance to make ISA-specific changes to the
2597 	 * vCPU state (e.g. enter guest mode) before loading state from the SMM
2598 	 * state-save area.
2599 	 */
2600 	if (ctxt->ops->pre_leave_smm(ctxt, smbase))
2601 		return X86EMUL_UNHANDLEABLE;
2602 
2603 	if (emulator_has_longmode(ctxt))
2604 		ret = rsm_load_state_64(ctxt, smbase + 0x8000);
2605 	else
2606 		ret = rsm_load_state_32(ctxt, smbase + 0x8000);
2607 
2608 	if (ret != X86EMUL_CONTINUE) {
2609 		/* FIXME: should triple fault */
2610 		return X86EMUL_UNHANDLEABLE;
2611 	}
2612 
2613 	if ((ctxt->ops->get_hflags(ctxt) & X86EMUL_SMM_INSIDE_NMI_MASK) == 0)
2614 		ctxt->ops->set_nmi_mask(ctxt, false);
2615 
2616 	ctxt->ops->set_hflags(ctxt, ctxt->ops->get_hflags(ctxt) &
2617 		~(X86EMUL_SMM_INSIDE_NMI_MASK | X86EMUL_SMM_MASK));
2618 	return X86EMUL_CONTINUE;
2619 }
2620 
2621 static void
2622 setup_syscalls_segments(struct x86_emulate_ctxt *ctxt,
2623 			struct desc_struct *cs, struct desc_struct *ss)
2624 {
2625 	cs->l = 0;		/* will be adjusted later */
2626 	set_desc_base(cs, 0);	/* flat segment */
2627 	cs->g = 1;		/* 4kb granularity */
2628 	set_desc_limit(cs, 0xfffff);	/* 4GB limit */
2629 	cs->type = 0x0b;	/* Read, Execute, Accessed */
2630 	cs->s = 1;
2631 	cs->dpl = 0;		/* will be adjusted later */
2632 	cs->p = 1;
2633 	cs->d = 1;
2634 	cs->avl = 0;
2635 
2636 	set_desc_base(ss, 0);	/* flat segment */
2637 	set_desc_limit(ss, 0xfffff);	/* 4GB limit */
2638 	ss->g = 1;		/* 4kb granularity */
2639 	ss->s = 1;
2640 	ss->type = 0x03;	/* Read/Write, Accessed */
2641 	ss->d = 1;		/* 32bit stack segment */
2642 	ss->dpl = 0;
2643 	ss->p = 1;
2644 	ss->l = 0;
2645 	ss->avl = 0;
2646 }
2647 
2648 static bool vendor_intel(struct x86_emulate_ctxt *ctxt)
2649 {
2650 	u32 eax, ebx, ecx, edx;
2651 
2652 	eax = ecx = 0;
2653 	ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx, false);
2654 	return ebx == X86EMUL_CPUID_VENDOR_GenuineIntel_ebx
2655 		&& ecx == X86EMUL_CPUID_VENDOR_GenuineIntel_ecx
2656 		&& edx == X86EMUL_CPUID_VENDOR_GenuineIntel_edx;
2657 }
2658 
2659 static bool em_syscall_is_enabled(struct x86_emulate_ctxt *ctxt)
2660 {
2661 	const struct x86_emulate_ops *ops = ctxt->ops;
2662 	u32 eax, ebx, ecx, edx;
2663 
2664 	/*
2665 	 * syscall should always be enabled in longmode - so only become
2666 	 * vendor specific (cpuid) if other modes are active...
2667 	 */
2668 	if (ctxt->mode == X86EMUL_MODE_PROT64)
2669 		return true;
2670 
2671 	eax = 0x00000000;
2672 	ecx = 0x00000000;
2673 	ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx, false);
2674 	/*
2675 	 * Intel ("GenuineIntel")
2676 	 * remark: Intel CPUs only support "syscall" in 64bit
2677 	 * longmode. Also an 64bit guest with a
2678 	 * 32bit compat-app running will #UD !! While this
2679 	 * behaviour can be fixed (by emulating) into AMD
2680 	 * response - CPUs of AMD can't behave like Intel.
2681 	 */
2682 	if (ebx == X86EMUL_CPUID_VENDOR_GenuineIntel_ebx &&
2683 	    ecx == X86EMUL_CPUID_VENDOR_GenuineIntel_ecx &&
2684 	    edx == X86EMUL_CPUID_VENDOR_GenuineIntel_edx)
2685 		return false;
2686 
2687 	/* AMD ("AuthenticAMD") */
2688 	if (ebx == X86EMUL_CPUID_VENDOR_AuthenticAMD_ebx &&
2689 	    ecx == X86EMUL_CPUID_VENDOR_AuthenticAMD_ecx &&
2690 	    edx == X86EMUL_CPUID_VENDOR_AuthenticAMD_edx)
2691 		return true;
2692 
2693 	/* AMD ("AMDisbetter!") */
2694 	if (ebx == X86EMUL_CPUID_VENDOR_AMDisbetterI_ebx &&
2695 	    ecx == X86EMUL_CPUID_VENDOR_AMDisbetterI_ecx &&
2696 	    edx == X86EMUL_CPUID_VENDOR_AMDisbetterI_edx)
2697 		return true;
2698 
2699 	/* default: (not Intel, not AMD), apply Intel's stricter rules... */
2700 	return false;
2701 }
2702 
2703 static int em_syscall(struct x86_emulate_ctxt *ctxt)
2704 {
2705 	const struct x86_emulate_ops *ops = ctxt->ops;
2706 	struct desc_struct cs, ss;
2707 	u64 msr_data;
2708 	u16 cs_sel, ss_sel;
2709 	u64 efer = 0;
2710 
2711 	/* syscall is not available in real mode */
2712 	if (ctxt->mode == X86EMUL_MODE_REAL ||
2713 	    ctxt->mode == X86EMUL_MODE_VM86)
2714 		return emulate_ud(ctxt);
2715 
2716 	if (!(em_syscall_is_enabled(ctxt)))
2717 		return emulate_ud(ctxt);
2718 
2719 	ops->get_msr(ctxt, MSR_EFER, &efer);
2720 	setup_syscalls_segments(ctxt, &cs, &ss);
2721 
2722 	if (!(efer & EFER_SCE))
2723 		return emulate_ud(ctxt);
2724 
2725 	ops->get_msr(ctxt, MSR_STAR, &msr_data);
2726 	msr_data >>= 32;
2727 	cs_sel = (u16)(msr_data & 0xfffc);
2728 	ss_sel = (u16)(msr_data + 8);
2729 
2730 	if (efer & EFER_LMA) {
2731 		cs.d = 0;
2732 		cs.l = 1;
2733 	}
2734 	ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
2735 	ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
2736 
2737 	*reg_write(ctxt, VCPU_REGS_RCX) = ctxt->_eip;
2738 	if (efer & EFER_LMA) {
2739 #ifdef CONFIG_X86_64
2740 		*reg_write(ctxt, VCPU_REGS_R11) = ctxt->eflags;
2741 
2742 		ops->get_msr(ctxt,
2743 			     ctxt->mode == X86EMUL_MODE_PROT64 ?
2744 			     MSR_LSTAR : MSR_CSTAR, &msr_data);
2745 		ctxt->_eip = msr_data;
2746 
2747 		ops->get_msr(ctxt, MSR_SYSCALL_MASK, &msr_data);
2748 		ctxt->eflags &= ~msr_data;
2749 		ctxt->eflags |= X86_EFLAGS_FIXED;
2750 #endif
2751 	} else {
2752 		/* legacy mode */
2753 		ops->get_msr(ctxt, MSR_STAR, &msr_data);
2754 		ctxt->_eip = (u32)msr_data;
2755 
2756 		ctxt->eflags &= ~(X86_EFLAGS_VM | X86_EFLAGS_IF);
2757 	}
2758 
2759 	ctxt->tf = (ctxt->eflags & X86_EFLAGS_TF) != 0;
2760 	return X86EMUL_CONTINUE;
2761 }
2762 
2763 static int em_sysenter(struct x86_emulate_ctxt *ctxt)
2764 {
2765 	const struct x86_emulate_ops *ops = ctxt->ops;
2766 	struct desc_struct cs, ss;
2767 	u64 msr_data;
2768 	u16 cs_sel, ss_sel;
2769 	u64 efer = 0;
2770 
2771 	ops->get_msr(ctxt, MSR_EFER, &efer);
2772 	/* inject #GP if in real mode */
2773 	if (ctxt->mode == X86EMUL_MODE_REAL)
2774 		return emulate_gp(ctxt, 0);
2775 
2776 	/*
2777 	 * Not recognized on AMD in compat mode (but is recognized in legacy
2778 	 * mode).
2779 	 */
2780 	if ((ctxt->mode != X86EMUL_MODE_PROT64) && (efer & EFER_LMA)
2781 	    && !vendor_intel(ctxt))
2782 		return emulate_ud(ctxt);
2783 
2784 	/* sysenter/sysexit have not been tested in 64bit mode. */
2785 	if (ctxt->mode == X86EMUL_MODE_PROT64)
2786 		return X86EMUL_UNHANDLEABLE;
2787 
2788 	setup_syscalls_segments(ctxt, &cs, &ss);
2789 
2790 	ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data);
2791 	if ((msr_data & 0xfffc) == 0x0)
2792 		return emulate_gp(ctxt, 0);
2793 
2794 	ctxt->eflags &= ~(X86_EFLAGS_VM | X86_EFLAGS_IF);
2795 	cs_sel = (u16)msr_data & ~SEGMENT_RPL_MASK;
2796 	ss_sel = cs_sel + 8;
2797 	if (efer & EFER_LMA) {
2798 		cs.d = 0;
2799 		cs.l = 1;
2800 	}
2801 
2802 	ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
2803 	ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
2804 
2805 	ops->get_msr(ctxt, MSR_IA32_SYSENTER_EIP, &msr_data);
2806 	ctxt->_eip = (efer & EFER_LMA) ? msr_data : (u32)msr_data;
2807 
2808 	ops->get_msr(ctxt, MSR_IA32_SYSENTER_ESP, &msr_data);
2809 	*reg_write(ctxt, VCPU_REGS_RSP) = (efer & EFER_LMA) ? msr_data :
2810 							      (u32)msr_data;
2811 
2812 	return X86EMUL_CONTINUE;
2813 }
2814 
2815 static int em_sysexit(struct x86_emulate_ctxt *ctxt)
2816 {
2817 	const struct x86_emulate_ops *ops = ctxt->ops;
2818 	struct desc_struct cs, ss;
2819 	u64 msr_data, rcx, rdx;
2820 	int usermode;
2821 	u16 cs_sel = 0, ss_sel = 0;
2822 
2823 	/* inject #GP if in real mode or Virtual 8086 mode */
2824 	if (ctxt->mode == X86EMUL_MODE_REAL ||
2825 	    ctxt->mode == X86EMUL_MODE_VM86)
2826 		return emulate_gp(ctxt, 0);
2827 
2828 	setup_syscalls_segments(ctxt, &cs, &ss);
2829 
2830 	if ((ctxt->rex_prefix & 0x8) != 0x0)
2831 		usermode = X86EMUL_MODE_PROT64;
2832 	else
2833 		usermode = X86EMUL_MODE_PROT32;
2834 
2835 	rcx = reg_read(ctxt, VCPU_REGS_RCX);
2836 	rdx = reg_read(ctxt, VCPU_REGS_RDX);
2837 
2838 	cs.dpl = 3;
2839 	ss.dpl = 3;
2840 	ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data);
2841 	switch (usermode) {
2842 	case X86EMUL_MODE_PROT32:
2843 		cs_sel = (u16)(msr_data + 16);
2844 		if ((msr_data & 0xfffc) == 0x0)
2845 			return emulate_gp(ctxt, 0);
2846 		ss_sel = (u16)(msr_data + 24);
2847 		rcx = (u32)rcx;
2848 		rdx = (u32)rdx;
2849 		break;
2850 	case X86EMUL_MODE_PROT64:
2851 		cs_sel = (u16)(msr_data + 32);
2852 		if (msr_data == 0x0)
2853 			return emulate_gp(ctxt, 0);
2854 		ss_sel = cs_sel + 8;
2855 		cs.d = 0;
2856 		cs.l = 1;
2857 		if (emul_is_noncanonical_address(rcx, ctxt) ||
2858 		    emul_is_noncanonical_address(rdx, ctxt))
2859 			return emulate_gp(ctxt, 0);
2860 		break;
2861 	}
2862 	cs_sel |= SEGMENT_RPL_MASK;
2863 	ss_sel |= SEGMENT_RPL_MASK;
2864 
2865 	ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
2866 	ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
2867 
2868 	ctxt->_eip = rdx;
2869 	*reg_write(ctxt, VCPU_REGS_RSP) = rcx;
2870 
2871 	return X86EMUL_CONTINUE;
2872 }
2873 
2874 static bool emulator_bad_iopl(struct x86_emulate_ctxt *ctxt)
2875 {
2876 	int iopl;
2877 	if (ctxt->mode == X86EMUL_MODE_REAL)
2878 		return false;
2879 	if (ctxt->mode == X86EMUL_MODE_VM86)
2880 		return true;
2881 	iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> X86_EFLAGS_IOPL_BIT;
2882 	return ctxt->ops->cpl(ctxt) > iopl;
2883 }
2884 
2885 static bool emulator_io_port_access_allowed(struct x86_emulate_ctxt *ctxt,
2886 					    u16 port, u16 len)
2887 {
2888 	const struct x86_emulate_ops *ops = ctxt->ops;
2889 	struct desc_struct tr_seg;
2890 	u32 base3;
2891 	int r;
2892 	u16 tr, io_bitmap_ptr, perm, bit_idx = port & 0x7;
2893 	unsigned mask = (1 << len) - 1;
2894 	unsigned long base;
2895 
2896 	ops->get_segment(ctxt, &tr, &tr_seg, &base3, VCPU_SREG_TR);
2897 	if (!tr_seg.p)
2898 		return false;
2899 	if (desc_limit_scaled(&tr_seg) < 103)
2900 		return false;
2901 	base = get_desc_base(&tr_seg);
2902 #ifdef CONFIG_X86_64
2903 	base |= ((u64)base3) << 32;
2904 #endif
2905 	r = ops->read_std(ctxt, base + 102, &io_bitmap_ptr, 2, NULL);
2906 	if (r != X86EMUL_CONTINUE)
2907 		return false;
2908 	if (io_bitmap_ptr + port/8 > desc_limit_scaled(&tr_seg))
2909 		return false;
2910 	r = ops->read_std(ctxt, base + io_bitmap_ptr + port/8, &perm, 2, NULL);
2911 	if (r != X86EMUL_CONTINUE)
2912 		return false;
2913 	if ((perm >> bit_idx) & mask)
2914 		return false;
2915 	return true;
2916 }
2917 
2918 static bool emulator_io_permited(struct x86_emulate_ctxt *ctxt,
2919 				 u16 port, u16 len)
2920 {
2921 	if (ctxt->perm_ok)
2922 		return true;
2923 
2924 	if (emulator_bad_iopl(ctxt))
2925 		if (!emulator_io_port_access_allowed(ctxt, port, len))
2926 			return false;
2927 
2928 	ctxt->perm_ok = true;
2929 
2930 	return true;
2931 }
2932 
2933 static void string_registers_quirk(struct x86_emulate_ctxt *ctxt)
2934 {
2935 	/*
2936 	 * Intel CPUs mask the counter and pointers in quite strange
2937 	 * manner when ECX is zero due to REP-string optimizations.
2938 	 */
2939 #ifdef CONFIG_X86_64
2940 	if (ctxt->ad_bytes != 4 || !vendor_intel(ctxt))
2941 		return;
2942 
2943 	*reg_write(ctxt, VCPU_REGS_RCX) = 0;
2944 
2945 	switch (ctxt->b) {
2946 	case 0xa4:	/* movsb */
2947 	case 0xa5:	/* movsd/w */
2948 		*reg_rmw(ctxt, VCPU_REGS_RSI) &= (u32)-1;
2949 		/* fall through */
2950 	case 0xaa:	/* stosb */
2951 	case 0xab:	/* stosd/w */
2952 		*reg_rmw(ctxt, VCPU_REGS_RDI) &= (u32)-1;
2953 	}
2954 #endif
2955 }
2956 
2957 static void save_state_to_tss16(struct x86_emulate_ctxt *ctxt,
2958 				struct tss_segment_16 *tss)
2959 {
2960 	tss->ip = ctxt->_eip;
2961 	tss->flag = ctxt->eflags;
2962 	tss->ax = reg_read(ctxt, VCPU_REGS_RAX);
2963 	tss->cx = reg_read(ctxt, VCPU_REGS_RCX);
2964 	tss->dx = reg_read(ctxt, VCPU_REGS_RDX);
2965 	tss->bx = reg_read(ctxt, VCPU_REGS_RBX);
2966 	tss->sp = reg_read(ctxt, VCPU_REGS_RSP);
2967 	tss->bp = reg_read(ctxt, VCPU_REGS_RBP);
2968 	tss->si = reg_read(ctxt, VCPU_REGS_RSI);
2969 	tss->di = reg_read(ctxt, VCPU_REGS_RDI);
2970 
2971 	tss->es = get_segment_selector(ctxt, VCPU_SREG_ES);
2972 	tss->cs = get_segment_selector(ctxt, VCPU_SREG_CS);
2973 	tss->ss = get_segment_selector(ctxt, VCPU_SREG_SS);
2974 	tss->ds = get_segment_selector(ctxt, VCPU_SREG_DS);
2975 	tss->ldt = get_segment_selector(ctxt, VCPU_SREG_LDTR);
2976 }
2977 
2978 static int load_state_from_tss16(struct x86_emulate_ctxt *ctxt,
2979 				 struct tss_segment_16 *tss)
2980 {
2981 	int ret;
2982 	u8 cpl;
2983 
2984 	ctxt->_eip = tss->ip;
2985 	ctxt->eflags = tss->flag | 2;
2986 	*reg_write(ctxt, VCPU_REGS_RAX) = tss->ax;
2987 	*reg_write(ctxt, VCPU_REGS_RCX) = tss->cx;
2988 	*reg_write(ctxt, VCPU_REGS_RDX) = tss->dx;
2989 	*reg_write(ctxt, VCPU_REGS_RBX) = tss->bx;
2990 	*reg_write(ctxt, VCPU_REGS_RSP) = tss->sp;
2991 	*reg_write(ctxt, VCPU_REGS_RBP) = tss->bp;
2992 	*reg_write(ctxt, VCPU_REGS_RSI) = tss->si;
2993 	*reg_write(ctxt, VCPU_REGS_RDI) = tss->di;
2994 
2995 	/*
2996 	 * SDM says that segment selectors are loaded before segment
2997 	 * descriptors
2998 	 */
2999 	set_segment_selector(ctxt, tss->ldt, VCPU_SREG_LDTR);
3000 	set_segment_selector(ctxt, tss->es, VCPU_SREG_ES);
3001 	set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS);
3002 	set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS);
3003 	set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS);
3004 
3005 	cpl = tss->cs & 3;
3006 
3007 	/*
3008 	 * Now load segment descriptors. If fault happens at this stage
3009 	 * it is handled in a context of new task
3010 	 */
3011 	ret = __load_segment_descriptor(ctxt, tss->ldt, VCPU_SREG_LDTR, cpl,
3012 					X86_TRANSFER_TASK_SWITCH, NULL);
3013 	if (ret != X86EMUL_CONTINUE)
3014 		return ret;
3015 	ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl,
3016 					X86_TRANSFER_TASK_SWITCH, NULL);
3017 	if (ret != X86EMUL_CONTINUE)
3018 		return ret;
3019 	ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl,
3020 					X86_TRANSFER_TASK_SWITCH, NULL);
3021 	if (ret != X86EMUL_CONTINUE)
3022 		return ret;
3023 	ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl,
3024 					X86_TRANSFER_TASK_SWITCH, NULL);
3025 	if (ret != X86EMUL_CONTINUE)
3026 		return ret;
3027 	ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl,
3028 					X86_TRANSFER_TASK_SWITCH, NULL);
3029 	if (ret != X86EMUL_CONTINUE)
3030 		return ret;
3031 
3032 	return X86EMUL_CONTINUE;
3033 }
3034 
3035 static int task_switch_16(struct x86_emulate_ctxt *ctxt,
3036 			  u16 tss_selector, u16 old_tss_sel,
3037 			  ulong old_tss_base, struct desc_struct *new_desc)
3038 {
3039 	const struct x86_emulate_ops *ops = ctxt->ops;
3040 	struct tss_segment_16 tss_seg;
3041 	int ret;
3042 	u32 new_tss_base = get_desc_base(new_desc);
3043 
3044 	ret = ops->read_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
3045 			    &ctxt->exception);
3046 	if (ret != X86EMUL_CONTINUE)
3047 		return ret;
3048 
3049 	save_state_to_tss16(ctxt, &tss_seg);
3050 
3051 	ret = ops->write_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
3052 			     &ctxt->exception);
3053 	if (ret != X86EMUL_CONTINUE)
3054 		return ret;
3055 
3056 	ret = ops->read_std(ctxt, new_tss_base, &tss_seg, sizeof tss_seg,
3057 			    &ctxt->exception);
3058 	if (ret != X86EMUL_CONTINUE)
3059 		return ret;
3060 
3061 	if (old_tss_sel != 0xffff) {
3062 		tss_seg.prev_task_link = old_tss_sel;
3063 
3064 		ret = ops->write_std(ctxt, new_tss_base,
3065 				     &tss_seg.prev_task_link,
3066 				     sizeof tss_seg.prev_task_link,
3067 				     &ctxt->exception);
3068 		if (ret != X86EMUL_CONTINUE)
3069 			return ret;
3070 	}
3071 
3072 	return load_state_from_tss16(ctxt, &tss_seg);
3073 }
3074 
3075 static void save_state_to_tss32(struct x86_emulate_ctxt *ctxt,
3076 				struct tss_segment_32 *tss)
3077 {
3078 	/* CR3 and ldt selector are not saved intentionally */
3079 	tss->eip = ctxt->_eip;
3080 	tss->eflags = ctxt->eflags;
3081 	tss->eax = reg_read(ctxt, VCPU_REGS_RAX);
3082 	tss->ecx = reg_read(ctxt, VCPU_REGS_RCX);
3083 	tss->edx = reg_read(ctxt, VCPU_REGS_RDX);
3084 	tss->ebx = reg_read(ctxt, VCPU_REGS_RBX);
3085 	tss->esp = reg_read(ctxt, VCPU_REGS_RSP);
3086 	tss->ebp = reg_read(ctxt, VCPU_REGS_RBP);
3087 	tss->esi = reg_read(ctxt, VCPU_REGS_RSI);
3088 	tss->edi = reg_read(ctxt, VCPU_REGS_RDI);
3089 
3090 	tss->es = get_segment_selector(ctxt, VCPU_SREG_ES);
3091 	tss->cs = get_segment_selector(ctxt, VCPU_SREG_CS);
3092 	tss->ss = get_segment_selector(ctxt, VCPU_SREG_SS);
3093 	tss->ds = get_segment_selector(ctxt, VCPU_SREG_DS);
3094 	tss->fs = get_segment_selector(ctxt, VCPU_SREG_FS);
3095 	tss->gs = get_segment_selector(ctxt, VCPU_SREG_GS);
3096 }
3097 
3098 static int load_state_from_tss32(struct x86_emulate_ctxt *ctxt,
3099 				 struct tss_segment_32 *tss)
3100 {
3101 	int ret;
3102 	u8 cpl;
3103 
3104 	if (ctxt->ops->set_cr(ctxt, 3, tss->cr3))
3105 		return emulate_gp(ctxt, 0);
3106 	ctxt->_eip = tss->eip;
3107 	ctxt->eflags = tss->eflags | 2;
3108 
3109 	/* General purpose registers */
3110 	*reg_write(ctxt, VCPU_REGS_RAX) = tss->eax;
3111 	*reg_write(ctxt, VCPU_REGS_RCX) = tss->ecx;
3112 	*reg_write(ctxt, VCPU_REGS_RDX) = tss->edx;
3113 	*reg_write(ctxt, VCPU_REGS_RBX) = tss->ebx;
3114 	*reg_write(ctxt, VCPU_REGS_RSP) = tss->esp;
3115 	*reg_write(ctxt, VCPU_REGS_RBP) = tss->ebp;
3116 	*reg_write(ctxt, VCPU_REGS_RSI) = tss->esi;
3117 	*reg_write(ctxt, VCPU_REGS_RDI) = tss->edi;
3118 
3119 	/*
3120 	 * SDM says that segment selectors are loaded before segment
3121 	 * descriptors.  This is important because CPL checks will
3122 	 * use CS.RPL.
3123 	 */
3124 	set_segment_selector(ctxt, tss->ldt_selector, VCPU_SREG_LDTR);
3125 	set_segment_selector(ctxt, tss->es, VCPU_SREG_ES);
3126 	set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS);
3127 	set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS);
3128 	set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS);
3129 	set_segment_selector(ctxt, tss->fs, VCPU_SREG_FS);
3130 	set_segment_selector(ctxt, tss->gs, VCPU_SREG_GS);
3131 
3132 	/*
3133 	 * If we're switching between Protected Mode and VM86, we need to make
3134 	 * sure to update the mode before loading the segment descriptors so
3135 	 * that the selectors are interpreted correctly.
3136 	 */
3137 	if (ctxt->eflags & X86_EFLAGS_VM) {
3138 		ctxt->mode = X86EMUL_MODE_VM86;
3139 		cpl = 3;
3140 	} else {
3141 		ctxt->mode = X86EMUL_MODE_PROT32;
3142 		cpl = tss->cs & 3;
3143 	}
3144 
3145 	/*
3146 	 * Now load segment descriptors. If fault happenes at this stage
3147 	 * it is handled in a context of new task
3148 	 */
3149 	ret = __load_segment_descriptor(ctxt, tss->ldt_selector, VCPU_SREG_LDTR,
3150 					cpl, X86_TRANSFER_TASK_SWITCH, NULL);
3151 	if (ret != X86EMUL_CONTINUE)
3152 		return ret;
3153 	ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl,
3154 					X86_TRANSFER_TASK_SWITCH, NULL);
3155 	if (ret != X86EMUL_CONTINUE)
3156 		return ret;
3157 	ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl,
3158 					X86_TRANSFER_TASK_SWITCH, NULL);
3159 	if (ret != X86EMUL_CONTINUE)
3160 		return ret;
3161 	ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl,
3162 					X86_TRANSFER_TASK_SWITCH, NULL);
3163 	if (ret != X86EMUL_CONTINUE)
3164 		return ret;
3165 	ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl,
3166 					X86_TRANSFER_TASK_SWITCH, NULL);
3167 	if (ret != X86EMUL_CONTINUE)
3168 		return ret;
3169 	ret = __load_segment_descriptor(ctxt, tss->fs, VCPU_SREG_FS, cpl,
3170 					X86_TRANSFER_TASK_SWITCH, NULL);
3171 	if (ret != X86EMUL_CONTINUE)
3172 		return ret;
3173 	ret = __load_segment_descriptor(ctxt, tss->gs, VCPU_SREG_GS, cpl,
3174 					X86_TRANSFER_TASK_SWITCH, NULL);
3175 
3176 	return ret;
3177 }
3178 
3179 static int task_switch_32(struct x86_emulate_ctxt *ctxt,
3180 			  u16 tss_selector, u16 old_tss_sel,
3181 			  ulong old_tss_base, struct desc_struct *new_desc)
3182 {
3183 	const struct x86_emulate_ops *ops = ctxt->ops;
3184 	struct tss_segment_32 tss_seg;
3185 	int ret;
3186 	u32 new_tss_base = get_desc_base(new_desc);
3187 	u32 eip_offset = offsetof(struct tss_segment_32, eip);
3188 	u32 ldt_sel_offset = offsetof(struct tss_segment_32, ldt_selector);
3189 
3190 	ret = ops->read_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
3191 			    &ctxt->exception);
3192 	if (ret != X86EMUL_CONTINUE)
3193 		return ret;
3194 
3195 	save_state_to_tss32(ctxt, &tss_seg);
3196 
3197 	/* Only GP registers and segment selectors are saved */
3198 	ret = ops->write_std(ctxt, old_tss_base + eip_offset, &tss_seg.eip,
3199 			     ldt_sel_offset - eip_offset, &ctxt->exception);
3200 	if (ret != X86EMUL_CONTINUE)
3201 		return ret;
3202 
3203 	ret = ops->read_std(ctxt, new_tss_base, &tss_seg, sizeof tss_seg,
3204 			    &ctxt->exception);
3205 	if (ret != X86EMUL_CONTINUE)
3206 		return ret;
3207 
3208 	if (old_tss_sel != 0xffff) {
3209 		tss_seg.prev_task_link = old_tss_sel;
3210 
3211 		ret = ops->write_std(ctxt, new_tss_base,
3212 				     &tss_seg.prev_task_link,
3213 				     sizeof tss_seg.prev_task_link,
3214 				     &ctxt->exception);
3215 		if (ret != X86EMUL_CONTINUE)
3216 			return ret;
3217 	}
3218 
3219 	return load_state_from_tss32(ctxt, &tss_seg);
3220 }
3221 
3222 static int emulator_do_task_switch(struct x86_emulate_ctxt *ctxt,
3223 				   u16 tss_selector, int idt_index, int reason,
3224 				   bool has_error_code, u32 error_code)
3225 {
3226 	const struct x86_emulate_ops *ops = ctxt->ops;
3227 	struct desc_struct curr_tss_desc, next_tss_desc;
3228 	int ret;
3229 	u16 old_tss_sel = get_segment_selector(ctxt, VCPU_SREG_TR);
3230 	ulong old_tss_base =
3231 		ops->get_cached_segment_base(ctxt, VCPU_SREG_TR);
3232 	u32 desc_limit;
3233 	ulong desc_addr, dr7;
3234 
3235 	/* FIXME: old_tss_base == ~0 ? */
3236 
3237 	ret = read_segment_descriptor(ctxt, tss_selector, &next_tss_desc, &desc_addr);
3238 	if (ret != X86EMUL_CONTINUE)
3239 		return ret;
3240 	ret = read_segment_descriptor(ctxt, old_tss_sel, &curr_tss_desc, &desc_addr);
3241 	if (ret != X86EMUL_CONTINUE)
3242 		return ret;
3243 
3244 	/* FIXME: check that next_tss_desc is tss */
3245 
3246 	/*
3247 	 * Check privileges. The three cases are task switch caused by...
3248 	 *
3249 	 * 1. jmp/call/int to task gate: Check against DPL of the task gate
3250 	 * 2. Exception/IRQ/iret: No check is performed
3251 	 * 3. jmp/call to TSS/task-gate: No check is performed since the
3252 	 *    hardware checks it before exiting.
3253 	 */
3254 	if (reason == TASK_SWITCH_GATE) {
3255 		if (idt_index != -1) {
3256 			/* Software interrupts */
3257 			struct desc_struct task_gate_desc;
3258 			int dpl;
3259 
3260 			ret = read_interrupt_descriptor(ctxt, idt_index,
3261 							&task_gate_desc);
3262 			if (ret != X86EMUL_CONTINUE)
3263 				return ret;
3264 
3265 			dpl = task_gate_desc.dpl;
3266 			if ((tss_selector & 3) > dpl || ops->cpl(ctxt) > dpl)
3267 				return emulate_gp(ctxt, (idt_index << 3) | 0x2);
3268 		}
3269 	}
3270 
3271 	desc_limit = desc_limit_scaled(&next_tss_desc);
3272 	if (!next_tss_desc.p ||
3273 	    ((desc_limit < 0x67 && (next_tss_desc.type & 8)) ||
3274 	     desc_limit < 0x2b)) {
3275 		return emulate_ts(ctxt, tss_selector & 0xfffc);
3276 	}
3277 
3278 	if (reason == TASK_SWITCH_IRET || reason == TASK_SWITCH_JMP) {
3279 		curr_tss_desc.type &= ~(1 << 1); /* clear busy flag */
3280 		write_segment_descriptor(ctxt, old_tss_sel, &curr_tss_desc);
3281 	}
3282 
3283 	if (reason == TASK_SWITCH_IRET)
3284 		ctxt->eflags = ctxt->eflags & ~X86_EFLAGS_NT;
3285 
3286 	/* set back link to prev task only if NT bit is set in eflags
3287 	   note that old_tss_sel is not used after this point */
3288 	if (reason != TASK_SWITCH_CALL && reason != TASK_SWITCH_GATE)
3289 		old_tss_sel = 0xffff;
3290 
3291 	if (next_tss_desc.type & 8)
3292 		ret = task_switch_32(ctxt, tss_selector, old_tss_sel,
3293 				     old_tss_base, &next_tss_desc);
3294 	else
3295 		ret = task_switch_16(ctxt, tss_selector, old_tss_sel,
3296 				     old_tss_base, &next_tss_desc);
3297 	if (ret != X86EMUL_CONTINUE)
3298 		return ret;
3299 
3300 	if (reason == TASK_SWITCH_CALL || reason == TASK_SWITCH_GATE)
3301 		ctxt->eflags = ctxt->eflags | X86_EFLAGS_NT;
3302 
3303 	if (reason != TASK_SWITCH_IRET) {
3304 		next_tss_desc.type |= (1 << 1); /* set busy flag */
3305 		write_segment_descriptor(ctxt, tss_selector, &next_tss_desc);
3306 	}
3307 
3308 	ops->set_cr(ctxt, 0,  ops->get_cr(ctxt, 0) | X86_CR0_TS);
3309 	ops->set_segment(ctxt, tss_selector, &next_tss_desc, 0, VCPU_SREG_TR);
3310 
3311 	if (has_error_code) {
3312 		ctxt->op_bytes = ctxt->ad_bytes = (next_tss_desc.type & 8) ? 4 : 2;
3313 		ctxt->lock_prefix = 0;
3314 		ctxt->src.val = (unsigned long) error_code;
3315 		ret = em_push(ctxt);
3316 	}
3317 
3318 	ops->get_dr(ctxt, 7, &dr7);
3319 	ops->set_dr(ctxt, 7, dr7 & ~(DR_LOCAL_ENABLE_MASK | DR_LOCAL_SLOWDOWN));
3320 
3321 	return ret;
3322 }
3323 
3324 int emulator_task_switch(struct x86_emulate_ctxt *ctxt,
3325 			 u16 tss_selector, int idt_index, int reason,
3326 			 bool has_error_code, u32 error_code)
3327 {
3328 	int rc;
3329 
3330 	invalidate_registers(ctxt);
3331 	ctxt->_eip = ctxt->eip;
3332 	ctxt->dst.type = OP_NONE;
3333 
3334 	rc = emulator_do_task_switch(ctxt, tss_selector, idt_index, reason,
3335 				     has_error_code, error_code);
3336 
3337 	if (rc == X86EMUL_CONTINUE) {
3338 		ctxt->eip = ctxt->_eip;
3339 		writeback_registers(ctxt);
3340 	}
3341 
3342 	return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK;
3343 }
3344 
3345 static void string_addr_inc(struct x86_emulate_ctxt *ctxt, int reg,
3346 		struct operand *op)
3347 {
3348 	int df = (ctxt->eflags & X86_EFLAGS_DF) ? -op->count : op->count;
3349 
3350 	register_address_increment(ctxt, reg, df * op->bytes);
3351 	op->addr.mem.ea = register_address(ctxt, reg);
3352 }
3353 
3354 static int em_das(struct x86_emulate_ctxt *ctxt)
3355 {
3356 	u8 al, old_al;
3357 	bool af, cf, old_cf;
3358 
3359 	cf = ctxt->eflags & X86_EFLAGS_CF;
3360 	al = ctxt->dst.val;
3361 
3362 	old_al = al;
3363 	old_cf = cf;
3364 	cf = false;
3365 	af = ctxt->eflags & X86_EFLAGS_AF;
3366 	if ((al & 0x0f) > 9 || af) {
3367 		al -= 6;
3368 		cf = old_cf | (al >= 250);
3369 		af = true;
3370 	} else {
3371 		af = false;
3372 	}
3373 	if (old_al > 0x99 || old_cf) {
3374 		al -= 0x60;
3375 		cf = true;
3376 	}
3377 
3378 	ctxt->dst.val = al;
3379 	/* Set PF, ZF, SF */
3380 	ctxt->src.type = OP_IMM;
3381 	ctxt->src.val = 0;
3382 	ctxt->src.bytes = 1;
3383 	fastop(ctxt, em_or);
3384 	ctxt->eflags &= ~(X86_EFLAGS_AF | X86_EFLAGS_CF);
3385 	if (cf)
3386 		ctxt->eflags |= X86_EFLAGS_CF;
3387 	if (af)
3388 		ctxt->eflags |= X86_EFLAGS_AF;
3389 	return X86EMUL_CONTINUE;
3390 }
3391 
3392 static int em_aam(struct x86_emulate_ctxt *ctxt)
3393 {
3394 	u8 al, ah;
3395 
3396 	if (ctxt->src.val == 0)
3397 		return emulate_de(ctxt);
3398 
3399 	al = ctxt->dst.val & 0xff;
3400 	ah = al / ctxt->src.val;
3401 	al %= ctxt->src.val;
3402 
3403 	ctxt->dst.val = (ctxt->dst.val & 0xffff0000) | al | (ah << 8);
3404 
3405 	/* Set PF, ZF, SF */
3406 	ctxt->src.type = OP_IMM;
3407 	ctxt->src.val = 0;
3408 	ctxt->src.bytes = 1;
3409 	fastop(ctxt, em_or);
3410 
3411 	return X86EMUL_CONTINUE;
3412 }
3413 
3414 static int em_aad(struct x86_emulate_ctxt *ctxt)
3415 {
3416 	u8 al = ctxt->dst.val & 0xff;
3417 	u8 ah = (ctxt->dst.val >> 8) & 0xff;
3418 
3419 	al = (al + (ah * ctxt->src.val)) & 0xff;
3420 
3421 	ctxt->dst.val = (ctxt->dst.val & 0xffff0000) | al;
3422 
3423 	/* Set PF, ZF, SF */
3424 	ctxt->src.type = OP_IMM;
3425 	ctxt->src.val = 0;
3426 	ctxt->src.bytes = 1;
3427 	fastop(ctxt, em_or);
3428 
3429 	return X86EMUL_CONTINUE;
3430 }
3431 
3432 static int em_call(struct x86_emulate_ctxt *ctxt)
3433 {
3434 	int rc;
3435 	long rel = ctxt->src.val;
3436 
3437 	ctxt->src.val = (unsigned long)ctxt->_eip;
3438 	rc = jmp_rel(ctxt, rel);
3439 	if (rc != X86EMUL_CONTINUE)
3440 		return rc;
3441 	return em_push(ctxt);
3442 }
3443 
3444 static int em_call_far(struct x86_emulate_ctxt *ctxt)
3445 {
3446 	u16 sel, old_cs;
3447 	ulong old_eip;
3448 	int rc;
3449 	struct desc_struct old_desc, new_desc;
3450 	const struct x86_emulate_ops *ops = ctxt->ops;
3451 	int cpl = ctxt->ops->cpl(ctxt);
3452 	enum x86emul_mode prev_mode = ctxt->mode;
3453 
3454 	old_eip = ctxt->_eip;
3455 	ops->get_segment(ctxt, &old_cs, &old_desc, NULL, VCPU_SREG_CS);
3456 
3457 	memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
3458 	rc = __load_segment_descriptor(ctxt, sel, VCPU_SREG_CS, cpl,
3459 				       X86_TRANSFER_CALL_JMP, &new_desc);
3460 	if (rc != X86EMUL_CONTINUE)
3461 		return rc;
3462 
3463 	rc = assign_eip_far(ctxt, ctxt->src.val, &new_desc);
3464 	if (rc != X86EMUL_CONTINUE)
3465 		goto fail;
3466 
3467 	ctxt->src.val = old_cs;
3468 	rc = em_push(ctxt);
3469 	if (rc != X86EMUL_CONTINUE)
3470 		goto fail;
3471 
3472 	ctxt->src.val = old_eip;
3473 	rc = em_push(ctxt);
3474 	/* If we failed, we tainted the memory, but the very least we should
3475 	   restore cs */
3476 	if (rc != X86EMUL_CONTINUE) {
3477 		pr_warn_once("faulting far call emulation tainted memory\n");
3478 		goto fail;
3479 	}
3480 	return rc;
3481 fail:
3482 	ops->set_segment(ctxt, old_cs, &old_desc, 0, VCPU_SREG_CS);
3483 	ctxt->mode = prev_mode;
3484 	return rc;
3485 
3486 }
3487 
3488 static int em_ret_near_imm(struct x86_emulate_ctxt *ctxt)
3489 {
3490 	int rc;
3491 	unsigned long eip;
3492 
3493 	rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
3494 	if (rc != X86EMUL_CONTINUE)
3495 		return rc;
3496 	rc = assign_eip_near(ctxt, eip);
3497 	if (rc != X86EMUL_CONTINUE)
3498 		return rc;
3499 	rsp_increment(ctxt, ctxt->src.val);
3500 	return X86EMUL_CONTINUE;
3501 }
3502 
3503 static int em_xchg(struct x86_emulate_ctxt *ctxt)
3504 {
3505 	/* Write back the register source. */
3506 	ctxt->src.val = ctxt->dst.val;
3507 	write_register_operand(&ctxt->src);
3508 
3509 	/* Write back the memory destination with implicit LOCK prefix. */
3510 	ctxt->dst.val = ctxt->src.orig_val;
3511 	ctxt->lock_prefix = 1;
3512 	return X86EMUL_CONTINUE;
3513 }
3514 
3515 static int em_imul_3op(struct x86_emulate_ctxt *ctxt)
3516 {
3517 	ctxt->dst.val = ctxt->src2.val;
3518 	return fastop(ctxt, em_imul);
3519 }
3520 
3521 static int em_cwd(struct x86_emulate_ctxt *ctxt)
3522 {
3523 	ctxt->dst.type = OP_REG;
3524 	ctxt->dst.bytes = ctxt->src.bytes;
3525 	ctxt->dst.addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX);
3526 	ctxt->dst.val = ~((ctxt->src.val >> (ctxt->src.bytes * 8 - 1)) - 1);
3527 
3528 	return X86EMUL_CONTINUE;
3529 }
3530 
3531 static int em_rdtsc(struct x86_emulate_ctxt *ctxt)
3532 {
3533 	u64 tsc = 0;
3534 
3535 	ctxt->ops->get_msr(ctxt, MSR_IA32_TSC, &tsc);
3536 	*reg_write(ctxt, VCPU_REGS_RAX) = (u32)tsc;
3537 	*reg_write(ctxt, VCPU_REGS_RDX) = tsc >> 32;
3538 	return X86EMUL_CONTINUE;
3539 }
3540 
3541 static int em_rdpmc(struct x86_emulate_ctxt *ctxt)
3542 {
3543 	u64 pmc;
3544 
3545 	if (ctxt->ops->read_pmc(ctxt, reg_read(ctxt, VCPU_REGS_RCX), &pmc))
3546 		return emulate_gp(ctxt, 0);
3547 	*reg_write(ctxt, VCPU_REGS_RAX) = (u32)pmc;
3548 	*reg_write(ctxt, VCPU_REGS_RDX) = pmc >> 32;
3549 	return X86EMUL_CONTINUE;
3550 }
3551 
3552 static int em_mov(struct x86_emulate_ctxt *ctxt)
3553 {
3554 	memcpy(ctxt->dst.valptr, ctxt->src.valptr, sizeof(ctxt->src.valptr));
3555 	return X86EMUL_CONTINUE;
3556 }
3557 
3558 #define FFL(x) bit(X86_FEATURE_##x)
3559 
3560 static int em_movbe(struct x86_emulate_ctxt *ctxt)
3561 {
3562 	u32 ebx, ecx, edx, eax = 1;
3563 	u16 tmp;
3564 
3565 	/*
3566 	 * Check MOVBE is set in the guest-visible CPUID leaf.
3567 	 */
3568 	ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx, false);
3569 	if (!(ecx & FFL(MOVBE)))
3570 		return emulate_ud(ctxt);
3571 
3572 	switch (ctxt->op_bytes) {
3573 	case 2:
3574 		/*
3575 		 * From MOVBE definition: "...When the operand size is 16 bits,
3576 		 * the upper word of the destination register remains unchanged
3577 		 * ..."
3578 		 *
3579 		 * Both casting ->valptr and ->val to u16 breaks strict aliasing
3580 		 * rules so we have to do the operation almost per hand.
3581 		 */
3582 		tmp = (u16)ctxt->src.val;
3583 		ctxt->dst.val &= ~0xffffUL;
3584 		ctxt->dst.val |= (unsigned long)swab16(tmp);
3585 		break;
3586 	case 4:
3587 		ctxt->dst.val = swab32((u32)ctxt->src.val);
3588 		break;
3589 	case 8:
3590 		ctxt->dst.val = swab64(ctxt->src.val);
3591 		break;
3592 	default:
3593 		BUG();
3594 	}
3595 	return X86EMUL_CONTINUE;
3596 }
3597 
3598 static int em_cr_write(struct x86_emulate_ctxt *ctxt)
3599 {
3600 	if (ctxt->ops->set_cr(ctxt, ctxt->modrm_reg, ctxt->src.val))
3601 		return emulate_gp(ctxt, 0);
3602 
3603 	/* Disable writeback. */
3604 	ctxt->dst.type = OP_NONE;
3605 	return X86EMUL_CONTINUE;
3606 }
3607 
3608 static int em_dr_write(struct x86_emulate_ctxt *ctxt)
3609 {
3610 	unsigned long val;
3611 
3612 	if (ctxt->mode == X86EMUL_MODE_PROT64)
3613 		val = ctxt->src.val & ~0ULL;
3614 	else
3615 		val = ctxt->src.val & ~0U;
3616 
3617 	/* #UD condition is already handled. */
3618 	if (ctxt->ops->set_dr(ctxt, ctxt->modrm_reg, val) < 0)
3619 		return emulate_gp(ctxt, 0);
3620 
3621 	/* Disable writeback. */
3622 	ctxt->dst.type = OP_NONE;
3623 	return X86EMUL_CONTINUE;
3624 }
3625 
3626 static int em_wrmsr(struct x86_emulate_ctxt *ctxt)
3627 {
3628 	u64 msr_data;
3629 
3630 	msr_data = (u32)reg_read(ctxt, VCPU_REGS_RAX)
3631 		| ((u64)reg_read(ctxt, VCPU_REGS_RDX) << 32);
3632 	if (ctxt->ops->set_msr(ctxt, reg_read(ctxt, VCPU_REGS_RCX), msr_data))
3633 		return emulate_gp(ctxt, 0);
3634 
3635 	return X86EMUL_CONTINUE;
3636 }
3637 
3638 static int em_rdmsr(struct x86_emulate_ctxt *ctxt)
3639 {
3640 	u64 msr_data;
3641 
3642 	if (ctxt->ops->get_msr(ctxt, reg_read(ctxt, VCPU_REGS_RCX), &msr_data))
3643 		return emulate_gp(ctxt, 0);
3644 
3645 	*reg_write(ctxt, VCPU_REGS_RAX) = (u32)msr_data;
3646 	*reg_write(ctxt, VCPU_REGS_RDX) = msr_data >> 32;
3647 	return X86EMUL_CONTINUE;
3648 }
3649 
3650 static int em_mov_rm_sreg(struct x86_emulate_ctxt *ctxt)
3651 {
3652 	if (ctxt->modrm_reg > VCPU_SREG_GS)
3653 		return emulate_ud(ctxt);
3654 
3655 	ctxt->dst.val = get_segment_selector(ctxt, ctxt->modrm_reg);
3656 	if (ctxt->dst.bytes == 4 && ctxt->dst.type == OP_MEM)
3657 		ctxt->dst.bytes = 2;
3658 	return X86EMUL_CONTINUE;
3659 }
3660 
3661 static int em_mov_sreg_rm(struct x86_emulate_ctxt *ctxt)
3662 {
3663 	u16 sel = ctxt->src.val;
3664 
3665 	if (ctxt->modrm_reg == VCPU_SREG_CS || ctxt->modrm_reg > VCPU_SREG_GS)
3666 		return emulate_ud(ctxt);
3667 
3668 	if (ctxt->modrm_reg == VCPU_SREG_SS)
3669 		ctxt->interruptibility = KVM_X86_SHADOW_INT_MOV_SS;
3670 
3671 	/* Disable writeback. */
3672 	ctxt->dst.type = OP_NONE;
3673 	return load_segment_descriptor(ctxt, sel, ctxt->modrm_reg);
3674 }
3675 
3676 static int em_lldt(struct x86_emulate_ctxt *ctxt)
3677 {
3678 	u16 sel = ctxt->src.val;
3679 
3680 	/* Disable writeback. */
3681 	ctxt->dst.type = OP_NONE;
3682 	return load_segment_descriptor(ctxt, sel, VCPU_SREG_LDTR);
3683 }
3684 
3685 static int em_ltr(struct x86_emulate_ctxt *ctxt)
3686 {
3687 	u16 sel = ctxt->src.val;
3688 
3689 	/* Disable writeback. */
3690 	ctxt->dst.type = OP_NONE;
3691 	return load_segment_descriptor(ctxt, sel, VCPU_SREG_TR);
3692 }
3693 
3694 static int em_invlpg(struct x86_emulate_ctxt *ctxt)
3695 {
3696 	int rc;
3697 	ulong linear;
3698 
3699 	rc = linearize(ctxt, ctxt->src.addr.mem, 1, false, &linear);
3700 	if (rc == X86EMUL_CONTINUE)
3701 		ctxt->ops->invlpg(ctxt, linear);
3702 	/* Disable writeback. */
3703 	ctxt->dst.type = OP_NONE;
3704 	return X86EMUL_CONTINUE;
3705 }
3706 
3707 static int em_clts(struct x86_emulate_ctxt *ctxt)
3708 {
3709 	ulong cr0;
3710 
3711 	cr0 = ctxt->ops->get_cr(ctxt, 0);
3712 	cr0 &= ~X86_CR0_TS;
3713 	ctxt->ops->set_cr(ctxt, 0, cr0);
3714 	return X86EMUL_CONTINUE;
3715 }
3716 
3717 static int em_hypercall(struct x86_emulate_ctxt *ctxt)
3718 {
3719 	int rc = ctxt->ops->fix_hypercall(ctxt);
3720 
3721 	if (rc != X86EMUL_CONTINUE)
3722 		return rc;
3723 
3724 	/* Let the processor re-execute the fixed hypercall */
3725 	ctxt->_eip = ctxt->eip;
3726 	/* Disable writeback. */
3727 	ctxt->dst.type = OP_NONE;
3728 	return X86EMUL_CONTINUE;
3729 }
3730 
3731 static int emulate_store_desc_ptr(struct x86_emulate_ctxt *ctxt,
3732 				  void (*get)(struct x86_emulate_ctxt *ctxt,
3733 					      struct desc_ptr *ptr))
3734 {
3735 	struct desc_ptr desc_ptr;
3736 
3737 	if (ctxt->mode == X86EMUL_MODE_PROT64)
3738 		ctxt->op_bytes = 8;
3739 	get(ctxt, &desc_ptr);
3740 	if (ctxt->op_bytes == 2) {
3741 		ctxt->op_bytes = 4;
3742 		desc_ptr.address &= 0x00ffffff;
3743 	}
3744 	/* Disable writeback. */
3745 	ctxt->dst.type = OP_NONE;
3746 	return segmented_write_std(ctxt, ctxt->dst.addr.mem,
3747 				   &desc_ptr, 2 + ctxt->op_bytes);
3748 }
3749 
3750 static int em_sgdt(struct x86_emulate_ctxt *ctxt)
3751 {
3752 	return emulate_store_desc_ptr(ctxt, ctxt->ops->get_gdt);
3753 }
3754 
3755 static int em_sidt(struct x86_emulate_ctxt *ctxt)
3756 {
3757 	return emulate_store_desc_ptr(ctxt, ctxt->ops->get_idt);
3758 }
3759 
3760 static int em_lgdt_lidt(struct x86_emulate_ctxt *ctxt, bool lgdt)
3761 {
3762 	struct desc_ptr desc_ptr;
3763 	int rc;
3764 
3765 	if (ctxt->mode == X86EMUL_MODE_PROT64)
3766 		ctxt->op_bytes = 8;
3767 	rc = read_descriptor(ctxt, ctxt->src.addr.mem,
3768 			     &desc_ptr.size, &desc_ptr.address,
3769 			     ctxt->op_bytes);
3770 	if (rc != X86EMUL_CONTINUE)
3771 		return rc;
3772 	if (ctxt->mode == X86EMUL_MODE_PROT64 &&
3773 	    emul_is_noncanonical_address(desc_ptr.address, ctxt))
3774 		return emulate_gp(ctxt, 0);
3775 	if (lgdt)
3776 		ctxt->ops->set_gdt(ctxt, &desc_ptr);
3777 	else
3778 		ctxt->ops->set_idt(ctxt, &desc_ptr);
3779 	/* Disable writeback. */
3780 	ctxt->dst.type = OP_NONE;
3781 	return X86EMUL_CONTINUE;
3782 }
3783 
3784 static int em_lgdt(struct x86_emulate_ctxt *ctxt)
3785 {
3786 	return em_lgdt_lidt(ctxt, true);
3787 }
3788 
3789 static int em_lidt(struct x86_emulate_ctxt *ctxt)
3790 {
3791 	return em_lgdt_lidt(ctxt, false);
3792 }
3793 
3794 static int em_smsw(struct x86_emulate_ctxt *ctxt)
3795 {
3796 	if (ctxt->dst.type == OP_MEM)
3797 		ctxt->dst.bytes = 2;
3798 	ctxt->dst.val = ctxt->ops->get_cr(ctxt, 0);
3799 	return X86EMUL_CONTINUE;
3800 }
3801 
3802 static int em_lmsw(struct x86_emulate_ctxt *ctxt)
3803 {
3804 	ctxt->ops->set_cr(ctxt, 0, (ctxt->ops->get_cr(ctxt, 0) & ~0x0eul)
3805 			  | (ctxt->src.val & 0x0f));
3806 	ctxt->dst.type = OP_NONE;
3807 	return X86EMUL_CONTINUE;
3808 }
3809 
3810 static int em_loop(struct x86_emulate_ctxt *ctxt)
3811 {
3812 	int rc = X86EMUL_CONTINUE;
3813 
3814 	register_address_increment(ctxt, VCPU_REGS_RCX, -1);
3815 	if ((address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) != 0) &&
3816 	    (ctxt->b == 0xe2 || test_cc(ctxt->b ^ 0x5, ctxt->eflags)))
3817 		rc = jmp_rel(ctxt, ctxt->src.val);
3818 
3819 	return rc;
3820 }
3821 
3822 static int em_jcxz(struct x86_emulate_ctxt *ctxt)
3823 {
3824 	int rc = X86EMUL_CONTINUE;
3825 
3826 	if (address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) == 0)
3827 		rc = jmp_rel(ctxt, ctxt->src.val);
3828 
3829 	return rc;
3830 }
3831 
3832 static int em_in(struct x86_emulate_ctxt *ctxt)
3833 {
3834 	if (!pio_in_emulated(ctxt, ctxt->dst.bytes, ctxt->src.val,
3835 			     &ctxt->dst.val))
3836 		return X86EMUL_IO_NEEDED;
3837 
3838 	return X86EMUL_CONTINUE;
3839 }
3840 
3841 static int em_out(struct x86_emulate_ctxt *ctxt)
3842 {
3843 	ctxt->ops->pio_out_emulated(ctxt, ctxt->src.bytes, ctxt->dst.val,
3844 				    &ctxt->src.val, 1);
3845 	/* Disable writeback. */
3846 	ctxt->dst.type = OP_NONE;
3847 	return X86EMUL_CONTINUE;
3848 }
3849 
3850 static int em_cli(struct x86_emulate_ctxt *ctxt)
3851 {
3852 	if (emulator_bad_iopl(ctxt))
3853 		return emulate_gp(ctxt, 0);
3854 
3855 	ctxt->eflags &= ~X86_EFLAGS_IF;
3856 	return X86EMUL_CONTINUE;
3857 }
3858 
3859 static int em_sti(struct x86_emulate_ctxt *ctxt)
3860 {
3861 	if (emulator_bad_iopl(ctxt))
3862 		return emulate_gp(ctxt, 0);
3863 
3864 	ctxt->interruptibility = KVM_X86_SHADOW_INT_STI;
3865 	ctxt->eflags |= X86_EFLAGS_IF;
3866 	return X86EMUL_CONTINUE;
3867 }
3868 
3869 static int em_cpuid(struct x86_emulate_ctxt *ctxt)
3870 {
3871 	u32 eax, ebx, ecx, edx;
3872 	u64 msr = 0;
3873 
3874 	ctxt->ops->get_msr(ctxt, MSR_MISC_FEATURES_ENABLES, &msr);
3875 	if (msr & MSR_MISC_FEATURES_ENABLES_CPUID_FAULT &&
3876 	    ctxt->ops->cpl(ctxt)) {
3877 		return emulate_gp(ctxt, 0);
3878 	}
3879 
3880 	eax = reg_read(ctxt, VCPU_REGS_RAX);
3881 	ecx = reg_read(ctxt, VCPU_REGS_RCX);
3882 	ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx, true);
3883 	*reg_write(ctxt, VCPU_REGS_RAX) = eax;
3884 	*reg_write(ctxt, VCPU_REGS_RBX) = ebx;
3885 	*reg_write(ctxt, VCPU_REGS_RCX) = ecx;
3886 	*reg_write(ctxt, VCPU_REGS_RDX) = edx;
3887 	return X86EMUL_CONTINUE;
3888 }
3889 
3890 static int em_sahf(struct x86_emulate_ctxt *ctxt)
3891 {
3892 	u32 flags;
3893 
3894 	flags = X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF | X86_EFLAGS_ZF |
3895 		X86_EFLAGS_SF;
3896 	flags &= *reg_rmw(ctxt, VCPU_REGS_RAX) >> 8;
3897 
3898 	ctxt->eflags &= ~0xffUL;
3899 	ctxt->eflags |= flags | X86_EFLAGS_FIXED;
3900 	return X86EMUL_CONTINUE;
3901 }
3902 
3903 static int em_lahf(struct x86_emulate_ctxt *ctxt)
3904 {
3905 	*reg_rmw(ctxt, VCPU_REGS_RAX) &= ~0xff00UL;
3906 	*reg_rmw(ctxt, VCPU_REGS_RAX) |= (ctxt->eflags & 0xff) << 8;
3907 	return X86EMUL_CONTINUE;
3908 }
3909 
3910 static int em_bswap(struct x86_emulate_ctxt *ctxt)
3911 {
3912 	switch (ctxt->op_bytes) {
3913 #ifdef CONFIG_X86_64
3914 	case 8:
3915 		asm("bswap %0" : "+r"(ctxt->dst.val));
3916 		break;
3917 #endif
3918 	default:
3919 		asm("bswap %0" : "+r"(*(u32 *)&ctxt->dst.val));
3920 		break;
3921 	}
3922 	return X86EMUL_CONTINUE;
3923 }
3924 
3925 static int em_clflush(struct x86_emulate_ctxt *ctxt)
3926 {
3927 	/* emulating clflush regardless of cpuid */
3928 	return X86EMUL_CONTINUE;
3929 }
3930 
3931 static int em_movsxd(struct x86_emulate_ctxt *ctxt)
3932 {
3933 	ctxt->dst.val = (s32) ctxt->src.val;
3934 	return X86EMUL_CONTINUE;
3935 }
3936 
3937 static int check_fxsr(struct x86_emulate_ctxt *ctxt)
3938 {
3939 	u32 eax = 1, ebx, ecx = 0, edx;
3940 
3941 	ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx, false);
3942 	if (!(edx & FFL(FXSR)))
3943 		return emulate_ud(ctxt);
3944 
3945 	if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
3946 		return emulate_nm(ctxt);
3947 
3948 	/*
3949 	 * Don't emulate a case that should never be hit, instead of working
3950 	 * around a lack of fxsave64/fxrstor64 on old compilers.
3951 	 */
3952 	if (ctxt->mode >= X86EMUL_MODE_PROT64)
3953 		return X86EMUL_UNHANDLEABLE;
3954 
3955 	return X86EMUL_CONTINUE;
3956 }
3957 
3958 /*
3959  * Hardware doesn't save and restore XMM 0-7 without CR4.OSFXSR, but does save
3960  * and restore MXCSR.
3961  */
3962 static size_t __fxstate_size(int nregs)
3963 {
3964 	return offsetof(struct fxregs_state, xmm_space[0]) + nregs * 16;
3965 }
3966 
3967 static inline size_t fxstate_size(struct x86_emulate_ctxt *ctxt)
3968 {
3969 	bool cr4_osfxsr;
3970 	if (ctxt->mode == X86EMUL_MODE_PROT64)
3971 		return __fxstate_size(16);
3972 
3973 	cr4_osfxsr = ctxt->ops->get_cr(ctxt, 4) & X86_CR4_OSFXSR;
3974 	return __fxstate_size(cr4_osfxsr ? 8 : 0);
3975 }
3976 
3977 /*
3978  * FXSAVE and FXRSTOR have 4 different formats depending on execution mode,
3979  *  1) 16 bit mode
3980  *  2) 32 bit mode
3981  *     - like (1), but FIP and FDP (foo) are only 16 bit.  At least Intel CPUs
3982  *       preserve whole 32 bit values, though, so (1) and (2) are the same wrt.
3983  *       save and restore
3984  *  3) 64-bit mode with REX.W prefix
3985  *     - like (2), but XMM 8-15 are being saved and restored
3986  *  4) 64-bit mode without REX.W prefix
3987  *     - like (3), but FIP and FDP are 64 bit
3988  *
3989  * Emulation uses (3) for (1) and (2) and preserves XMM 8-15 to reach the
3990  * desired result.  (4) is not emulated.
3991  *
3992  * Note: Guest and host CPUID.(EAX=07H,ECX=0H):EBX[bit 13] (deprecate FPU CS
3993  * and FPU DS) should match.
3994  */
3995 static int em_fxsave(struct x86_emulate_ctxt *ctxt)
3996 {
3997 	struct fxregs_state fx_state;
3998 	int rc;
3999 
4000 	rc = check_fxsr(ctxt);
4001 	if (rc != X86EMUL_CONTINUE)
4002 		return rc;
4003 
4004 	ctxt->ops->get_fpu(ctxt);
4005 
4006 	rc = asm_safe("fxsave %[fx]", , [fx] "+m"(fx_state));
4007 
4008 	ctxt->ops->put_fpu(ctxt);
4009 
4010 	if (rc != X86EMUL_CONTINUE)
4011 		return rc;
4012 
4013 	return segmented_write_std(ctxt, ctxt->memop.addr.mem, &fx_state,
4014 		                   fxstate_size(ctxt));
4015 }
4016 
4017 static int em_fxrstor(struct x86_emulate_ctxt *ctxt)
4018 {
4019 	struct fxregs_state fx_state;
4020 	int rc;
4021 	size_t size;
4022 
4023 	rc = check_fxsr(ctxt);
4024 	if (rc != X86EMUL_CONTINUE)
4025 		return rc;
4026 
4027 	ctxt->ops->get_fpu(ctxt);
4028 
4029 	size = fxstate_size(ctxt);
4030 	if (size < __fxstate_size(16)) {
4031 		rc = asm_safe("fxsave %[fx]", , [fx] "+m"(fx_state));
4032 		if (rc != X86EMUL_CONTINUE)
4033 			goto out;
4034 	}
4035 
4036 	rc = segmented_read_std(ctxt, ctxt->memop.addr.mem, &fx_state, size);
4037 	if (rc != X86EMUL_CONTINUE)
4038 		goto out;
4039 
4040 	if (fx_state.mxcsr >> 16) {
4041 		rc = emulate_gp(ctxt, 0);
4042 		goto out;
4043 	}
4044 
4045 	if (rc == X86EMUL_CONTINUE)
4046 		rc = asm_safe("fxrstor %[fx]", : [fx] "m"(fx_state));
4047 
4048 out:
4049 	ctxt->ops->put_fpu(ctxt);
4050 
4051 	return rc;
4052 }
4053 
4054 static bool valid_cr(int nr)
4055 {
4056 	switch (nr) {
4057 	case 0:
4058 	case 2 ... 4:
4059 	case 8:
4060 		return true;
4061 	default:
4062 		return false;
4063 	}
4064 }
4065 
4066 static int check_cr_read(struct x86_emulate_ctxt *ctxt)
4067 {
4068 	if (!valid_cr(ctxt->modrm_reg))
4069 		return emulate_ud(ctxt);
4070 
4071 	return X86EMUL_CONTINUE;
4072 }
4073 
4074 static int check_cr_write(struct x86_emulate_ctxt *ctxt)
4075 {
4076 	u64 new_val = ctxt->src.val64;
4077 	int cr = ctxt->modrm_reg;
4078 	u64 efer = 0;
4079 
4080 	static u64 cr_reserved_bits[] = {
4081 		0xffffffff00000000ULL,
4082 		0, 0, 0, /* CR3 checked later */
4083 		CR4_RESERVED_BITS,
4084 		0, 0, 0,
4085 		CR8_RESERVED_BITS,
4086 	};
4087 
4088 	if (!valid_cr(cr))
4089 		return emulate_ud(ctxt);
4090 
4091 	if (new_val & cr_reserved_bits[cr])
4092 		return emulate_gp(ctxt, 0);
4093 
4094 	switch (cr) {
4095 	case 0: {
4096 		u64 cr4;
4097 		if (((new_val & X86_CR0_PG) && !(new_val & X86_CR0_PE)) ||
4098 		    ((new_val & X86_CR0_NW) && !(new_val & X86_CR0_CD)))
4099 			return emulate_gp(ctxt, 0);
4100 
4101 		cr4 = ctxt->ops->get_cr(ctxt, 4);
4102 		ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
4103 
4104 		if ((new_val & X86_CR0_PG) && (efer & EFER_LME) &&
4105 		    !(cr4 & X86_CR4_PAE))
4106 			return emulate_gp(ctxt, 0);
4107 
4108 		break;
4109 		}
4110 	case 3: {
4111 		u64 rsvd = 0;
4112 
4113 		ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
4114 		if (efer & EFER_LMA) {
4115 			u64 maxphyaddr;
4116 			u32 eax, ebx, ecx, edx;
4117 
4118 			eax = 0x80000008;
4119 			ecx = 0;
4120 			if (ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx,
4121 						 &edx, false))
4122 				maxphyaddr = eax & 0xff;
4123 			else
4124 				maxphyaddr = 36;
4125 			rsvd = rsvd_bits(maxphyaddr, 62);
4126 		}
4127 
4128 		if (new_val & rsvd)
4129 			return emulate_gp(ctxt, 0);
4130 
4131 		break;
4132 		}
4133 	case 4: {
4134 		ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
4135 
4136 		if ((efer & EFER_LMA) && !(new_val & X86_CR4_PAE))
4137 			return emulate_gp(ctxt, 0);
4138 
4139 		break;
4140 		}
4141 	}
4142 
4143 	return X86EMUL_CONTINUE;
4144 }
4145 
4146 static int check_dr7_gd(struct x86_emulate_ctxt *ctxt)
4147 {
4148 	unsigned long dr7;
4149 
4150 	ctxt->ops->get_dr(ctxt, 7, &dr7);
4151 
4152 	/* Check if DR7.Global_Enable is set */
4153 	return dr7 & (1 << 13);
4154 }
4155 
4156 static int check_dr_read(struct x86_emulate_ctxt *ctxt)
4157 {
4158 	int dr = ctxt->modrm_reg;
4159 	u64 cr4;
4160 
4161 	if (dr > 7)
4162 		return emulate_ud(ctxt);
4163 
4164 	cr4 = ctxt->ops->get_cr(ctxt, 4);
4165 	if ((cr4 & X86_CR4_DE) && (dr == 4 || dr == 5))
4166 		return emulate_ud(ctxt);
4167 
4168 	if (check_dr7_gd(ctxt)) {
4169 		ulong dr6;
4170 
4171 		ctxt->ops->get_dr(ctxt, 6, &dr6);
4172 		dr6 &= ~15;
4173 		dr6 |= DR6_BD | DR6_RTM;
4174 		ctxt->ops->set_dr(ctxt, 6, dr6);
4175 		return emulate_db(ctxt);
4176 	}
4177 
4178 	return X86EMUL_CONTINUE;
4179 }
4180 
4181 static int check_dr_write(struct x86_emulate_ctxt *ctxt)
4182 {
4183 	u64 new_val = ctxt->src.val64;
4184 	int dr = ctxt->modrm_reg;
4185 
4186 	if ((dr == 6 || dr == 7) && (new_val & 0xffffffff00000000ULL))
4187 		return emulate_gp(ctxt, 0);
4188 
4189 	return check_dr_read(ctxt);
4190 }
4191 
4192 static int check_svme(struct x86_emulate_ctxt *ctxt)
4193 {
4194 	u64 efer = 0;
4195 
4196 	ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
4197 
4198 	if (!(efer & EFER_SVME))
4199 		return emulate_ud(ctxt);
4200 
4201 	return X86EMUL_CONTINUE;
4202 }
4203 
4204 static int check_svme_pa(struct x86_emulate_ctxt *ctxt)
4205 {
4206 	u64 rax = reg_read(ctxt, VCPU_REGS_RAX);
4207 
4208 	/* Valid physical address? */
4209 	if (rax & 0xffff000000000000ULL)
4210 		return emulate_gp(ctxt, 0);
4211 
4212 	return check_svme(ctxt);
4213 }
4214 
4215 static int check_rdtsc(struct x86_emulate_ctxt *ctxt)
4216 {
4217 	u64 cr4 = ctxt->ops->get_cr(ctxt, 4);
4218 
4219 	if (cr4 & X86_CR4_TSD && ctxt->ops->cpl(ctxt))
4220 		return emulate_ud(ctxt);
4221 
4222 	return X86EMUL_CONTINUE;
4223 }
4224 
4225 static int check_rdpmc(struct x86_emulate_ctxt *ctxt)
4226 {
4227 	u64 cr4 = ctxt->ops->get_cr(ctxt, 4);
4228 	u64 rcx = reg_read(ctxt, VCPU_REGS_RCX);
4229 
4230 	if ((!(cr4 & X86_CR4_PCE) && ctxt->ops->cpl(ctxt)) ||
4231 	    ctxt->ops->check_pmc(ctxt, rcx))
4232 		return emulate_gp(ctxt, 0);
4233 
4234 	return X86EMUL_CONTINUE;
4235 }
4236 
4237 static int check_perm_in(struct x86_emulate_ctxt *ctxt)
4238 {
4239 	ctxt->dst.bytes = min(ctxt->dst.bytes, 4u);
4240 	if (!emulator_io_permited(ctxt, ctxt->src.val, ctxt->dst.bytes))
4241 		return emulate_gp(ctxt, 0);
4242 
4243 	return X86EMUL_CONTINUE;
4244 }
4245 
4246 static int check_perm_out(struct x86_emulate_ctxt *ctxt)
4247 {
4248 	ctxt->src.bytes = min(ctxt->src.bytes, 4u);
4249 	if (!emulator_io_permited(ctxt, ctxt->dst.val, ctxt->src.bytes))
4250 		return emulate_gp(ctxt, 0);
4251 
4252 	return X86EMUL_CONTINUE;
4253 }
4254 
4255 #define D(_y) { .flags = (_y) }
4256 #define DI(_y, _i) { .flags = (_y)|Intercept, .intercept = x86_intercept_##_i }
4257 #define DIP(_y, _i, _p) { .flags = (_y)|Intercept|CheckPerm, \
4258 		      .intercept = x86_intercept_##_i, .check_perm = (_p) }
4259 #define N    D(NotImpl)
4260 #define EXT(_f, _e) { .flags = ((_f) | RMExt), .u.group = (_e) }
4261 #define G(_f, _g) { .flags = ((_f) | Group | ModRM), .u.group = (_g) }
4262 #define GD(_f, _g) { .flags = ((_f) | GroupDual | ModRM), .u.gdual = (_g) }
4263 #define ID(_f, _i) { .flags = ((_f) | InstrDual | ModRM), .u.idual = (_i) }
4264 #define MD(_f, _m) { .flags = ((_f) | ModeDual), .u.mdual = (_m) }
4265 #define E(_f, _e) { .flags = ((_f) | Escape | ModRM), .u.esc = (_e) }
4266 #define I(_f, _e) { .flags = (_f), .u.execute = (_e) }
4267 #define F(_f, _e) { .flags = (_f) | Fastop, .u.fastop = (_e) }
4268 #define II(_f, _e, _i) \
4269 	{ .flags = (_f)|Intercept, .u.execute = (_e), .intercept = x86_intercept_##_i }
4270 #define IIP(_f, _e, _i, _p) \
4271 	{ .flags = (_f)|Intercept|CheckPerm, .u.execute = (_e), \
4272 	  .intercept = x86_intercept_##_i, .check_perm = (_p) }
4273 #define GP(_f, _g) { .flags = ((_f) | Prefix), .u.gprefix = (_g) }
4274 
4275 #define D2bv(_f)      D((_f) | ByteOp), D(_f)
4276 #define D2bvIP(_f, _i, _p) DIP((_f) | ByteOp, _i, _p), DIP(_f, _i, _p)
4277 #define I2bv(_f, _e)  I((_f) | ByteOp, _e), I(_f, _e)
4278 #define F2bv(_f, _e)  F((_f) | ByteOp, _e), F(_f, _e)
4279 #define I2bvIP(_f, _e, _i, _p) \
4280 	IIP((_f) | ByteOp, _e, _i, _p), IIP(_f, _e, _i, _p)
4281 
4282 #define F6ALU(_f, _e) F2bv((_f) | DstMem | SrcReg | ModRM, _e),		\
4283 		F2bv(((_f) | DstReg | SrcMem | ModRM) & ~Lock, _e),	\
4284 		F2bv(((_f) & ~Lock) | DstAcc | SrcImm, _e)
4285 
4286 static const struct opcode group7_rm0[] = {
4287 	N,
4288 	I(SrcNone | Priv | EmulateOnUD,	em_hypercall),
4289 	N, N, N, N, N, N,
4290 };
4291 
4292 static const struct opcode group7_rm1[] = {
4293 	DI(SrcNone | Priv, monitor),
4294 	DI(SrcNone | Priv, mwait),
4295 	N, N, N, N, N, N,
4296 };
4297 
4298 static const struct opcode group7_rm3[] = {
4299 	DIP(SrcNone | Prot | Priv,		vmrun,		check_svme_pa),
4300 	II(SrcNone  | Prot | EmulateOnUD,	em_hypercall,	vmmcall),
4301 	DIP(SrcNone | Prot | Priv,		vmload,		check_svme_pa),
4302 	DIP(SrcNone | Prot | Priv,		vmsave,		check_svme_pa),
4303 	DIP(SrcNone | Prot | Priv,		stgi,		check_svme),
4304 	DIP(SrcNone | Prot | Priv,		clgi,		check_svme),
4305 	DIP(SrcNone | Prot | Priv,		skinit,		check_svme),
4306 	DIP(SrcNone | Prot | Priv,		invlpga,	check_svme),
4307 };
4308 
4309 static const struct opcode group7_rm7[] = {
4310 	N,
4311 	DIP(SrcNone, rdtscp, check_rdtsc),
4312 	N, N, N, N, N, N,
4313 };
4314 
4315 static const struct opcode group1[] = {
4316 	F(Lock, em_add),
4317 	F(Lock | PageTable, em_or),
4318 	F(Lock, em_adc),
4319 	F(Lock, em_sbb),
4320 	F(Lock | PageTable, em_and),
4321 	F(Lock, em_sub),
4322 	F(Lock, em_xor),
4323 	F(NoWrite, em_cmp),
4324 };
4325 
4326 static const struct opcode group1A[] = {
4327 	I(DstMem | SrcNone | Mov | Stack | IncSP | TwoMemOp, em_pop), N, N, N, N, N, N, N,
4328 };
4329 
4330 static const struct opcode group2[] = {
4331 	F(DstMem | ModRM, em_rol),
4332 	F(DstMem | ModRM, em_ror),
4333 	F(DstMem | ModRM, em_rcl),
4334 	F(DstMem | ModRM, em_rcr),
4335 	F(DstMem | ModRM, em_shl),
4336 	F(DstMem | ModRM, em_shr),
4337 	F(DstMem | ModRM, em_shl),
4338 	F(DstMem | ModRM, em_sar),
4339 };
4340 
4341 static const struct opcode group3[] = {
4342 	F(DstMem | SrcImm | NoWrite, em_test),
4343 	F(DstMem | SrcImm | NoWrite, em_test),
4344 	F(DstMem | SrcNone | Lock, em_not),
4345 	F(DstMem | SrcNone | Lock, em_neg),
4346 	F(DstXacc | Src2Mem, em_mul_ex),
4347 	F(DstXacc | Src2Mem, em_imul_ex),
4348 	F(DstXacc | Src2Mem, em_div_ex),
4349 	F(DstXacc | Src2Mem, em_idiv_ex),
4350 };
4351 
4352 static const struct opcode group4[] = {
4353 	F(ByteOp | DstMem | SrcNone | Lock, em_inc),
4354 	F(ByteOp | DstMem | SrcNone | Lock, em_dec),
4355 	N, N, N, N, N, N,
4356 };
4357 
4358 static const struct opcode group5[] = {
4359 	F(DstMem | SrcNone | Lock,		em_inc),
4360 	F(DstMem | SrcNone | Lock,		em_dec),
4361 	I(SrcMem | NearBranch,			em_call_near_abs),
4362 	I(SrcMemFAddr | ImplicitOps,		em_call_far),
4363 	I(SrcMem | NearBranch,			em_jmp_abs),
4364 	I(SrcMemFAddr | ImplicitOps,		em_jmp_far),
4365 	I(SrcMem | Stack | TwoMemOp,		em_push), D(Undefined),
4366 };
4367 
4368 static const struct opcode group6[] = {
4369 	DI(Prot | DstMem,	sldt),
4370 	DI(Prot | DstMem,	str),
4371 	II(Prot | Priv | SrcMem16, em_lldt, lldt),
4372 	II(Prot | Priv | SrcMem16, em_ltr, ltr),
4373 	N, N, N, N,
4374 };
4375 
4376 static const struct group_dual group7 = { {
4377 	II(Mov | DstMem,			em_sgdt, sgdt),
4378 	II(Mov | DstMem,			em_sidt, sidt),
4379 	II(SrcMem | Priv,			em_lgdt, lgdt),
4380 	II(SrcMem | Priv,			em_lidt, lidt),
4381 	II(SrcNone | DstMem | Mov,		em_smsw, smsw), N,
4382 	II(SrcMem16 | Mov | Priv,		em_lmsw, lmsw),
4383 	II(SrcMem | ByteOp | Priv | NoAccess,	em_invlpg, invlpg),
4384 }, {
4385 	EXT(0, group7_rm0),
4386 	EXT(0, group7_rm1),
4387 	N, EXT(0, group7_rm3),
4388 	II(SrcNone | DstMem | Mov,		em_smsw, smsw), N,
4389 	II(SrcMem16 | Mov | Priv,		em_lmsw, lmsw),
4390 	EXT(0, group7_rm7),
4391 } };
4392 
4393 static const struct opcode group8[] = {
4394 	N, N, N, N,
4395 	F(DstMem | SrcImmByte | NoWrite,		em_bt),
4396 	F(DstMem | SrcImmByte | Lock | PageTable,	em_bts),
4397 	F(DstMem | SrcImmByte | Lock,			em_btr),
4398 	F(DstMem | SrcImmByte | Lock | PageTable,	em_btc),
4399 };
4400 
4401 static const struct group_dual group9 = { {
4402 	N, I(DstMem64 | Lock | PageTable, em_cmpxchg8b), N, N, N, N, N, N,
4403 }, {
4404 	N, N, N, N, N, N, N, N,
4405 } };
4406 
4407 static const struct opcode group11[] = {
4408 	I(DstMem | SrcImm | Mov | PageTable, em_mov),
4409 	X7(D(Undefined)),
4410 };
4411 
4412 static const struct gprefix pfx_0f_ae_7 = {
4413 	I(SrcMem | ByteOp, em_clflush), N, N, N,
4414 };
4415 
4416 static const struct group_dual group15 = { {
4417 	I(ModRM | Aligned16, em_fxsave),
4418 	I(ModRM | Aligned16, em_fxrstor),
4419 	N, N, N, N, N, GP(0, &pfx_0f_ae_7),
4420 }, {
4421 	N, N, N, N, N, N, N, N,
4422 } };
4423 
4424 static const struct gprefix pfx_0f_6f_0f_7f = {
4425 	I(Mmx, em_mov), I(Sse | Aligned, em_mov), N, I(Sse | Unaligned, em_mov),
4426 };
4427 
4428 static const struct instr_dual instr_dual_0f_2b = {
4429 	I(0, em_mov), N
4430 };
4431 
4432 static const struct gprefix pfx_0f_2b = {
4433 	ID(0, &instr_dual_0f_2b), ID(0, &instr_dual_0f_2b), N, N,
4434 };
4435 
4436 static const struct gprefix pfx_0f_28_0f_29 = {
4437 	I(Aligned, em_mov), I(Aligned, em_mov), N, N,
4438 };
4439 
4440 static const struct gprefix pfx_0f_e7 = {
4441 	N, I(Sse, em_mov), N, N,
4442 };
4443 
4444 static const struct escape escape_d9 = { {
4445 	N, N, N, N, N, N, N, I(DstMem16 | Mov, em_fnstcw),
4446 }, {
4447 	/* 0xC0 - 0xC7 */
4448 	N, N, N, N, N, N, N, N,
4449 	/* 0xC8 - 0xCF */
4450 	N, N, N, N, N, N, N, N,
4451 	/* 0xD0 - 0xC7 */
4452 	N, N, N, N, N, N, N, N,
4453 	/* 0xD8 - 0xDF */
4454 	N, N, N, N, N, N, N, N,
4455 	/* 0xE0 - 0xE7 */
4456 	N, N, N, N, N, N, N, N,
4457 	/* 0xE8 - 0xEF */
4458 	N, N, N, N, N, N, N, N,
4459 	/* 0xF0 - 0xF7 */
4460 	N, N, N, N, N, N, N, N,
4461 	/* 0xF8 - 0xFF */
4462 	N, N, N, N, N, N, N, N,
4463 } };
4464 
4465 static const struct escape escape_db = { {
4466 	N, N, N, N, N, N, N, N,
4467 }, {
4468 	/* 0xC0 - 0xC7 */
4469 	N, N, N, N, N, N, N, N,
4470 	/* 0xC8 - 0xCF */
4471 	N, N, N, N, N, N, N, N,
4472 	/* 0xD0 - 0xC7 */
4473 	N, N, N, N, N, N, N, N,
4474 	/* 0xD8 - 0xDF */
4475 	N, N, N, N, N, N, N, N,
4476 	/* 0xE0 - 0xE7 */
4477 	N, N, N, I(ImplicitOps, em_fninit), N, N, N, N,
4478 	/* 0xE8 - 0xEF */
4479 	N, N, N, N, N, N, N, N,
4480 	/* 0xF0 - 0xF7 */
4481 	N, N, N, N, N, N, N, N,
4482 	/* 0xF8 - 0xFF */
4483 	N, N, N, N, N, N, N, N,
4484 } };
4485 
4486 static const struct escape escape_dd = { {
4487 	N, N, N, N, N, N, N, I(DstMem16 | Mov, em_fnstsw),
4488 }, {
4489 	/* 0xC0 - 0xC7 */
4490 	N, N, N, N, N, N, N, N,
4491 	/* 0xC8 - 0xCF */
4492 	N, N, N, N, N, N, N, N,
4493 	/* 0xD0 - 0xC7 */
4494 	N, N, N, N, N, N, N, N,
4495 	/* 0xD8 - 0xDF */
4496 	N, N, N, N, N, N, N, N,
4497 	/* 0xE0 - 0xE7 */
4498 	N, N, N, N, N, N, N, N,
4499 	/* 0xE8 - 0xEF */
4500 	N, N, N, N, N, N, N, N,
4501 	/* 0xF0 - 0xF7 */
4502 	N, N, N, N, N, N, N, N,
4503 	/* 0xF8 - 0xFF */
4504 	N, N, N, N, N, N, N, N,
4505 } };
4506 
4507 static const struct instr_dual instr_dual_0f_c3 = {
4508 	I(DstMem | SrcReg | ModRM | No16 | Mov, em_mov), N
4509 };
4510 
4511 static const struct mode_dual mode_dual_63 = {
4512 	N, I(DstReg | SrcMem32 | ModRM | Mov, em_movsxd)
4513 };
4514 
4515 static const struct opcode opcode_table[256] = {
4516 	/* 0x00 - 0x07 */
4517 	F6ALU(Lock, em_add),
4518 	I(ImplicitOps | Stack | No64 | Src2ES, em_push_sreg),
4519 	I(ImplicitOps | Stack | No64 | Src2ES, em_pop_sreg),
4520 	/* 0x08 - 0x0F */
4521 	F6ALU(Lock | PageTable, em_or),
4522 	I(ImplicitOps | Stack | No64 | Src2CS, em_push_sreg),
4523 	N,
4524 	/* 0x10 - 0x17 */
4525 	F6ALU(Lock, em_adc),
4526 	I(ImplicitOps | Stack | No64 | Src2SS, em_push_sreg),
4527 	I(ImplicitOps | Stack | No64 | Src2SS, em_pop_sreg),
4528 	/* 0x18 - 0x1F */
4529 	F6ALU(Lock, em_sbb),
4530 	I(ImplicitOps | Stack | No64 | Src2DS, em_push_sreg),
4531 	I(ImplicitOps | Stack | No64 | Src2DS, em_pop_sreg),
4532 	/* 0x20 - 0x27 */
4533 	F6ALU(Lock | PageTable, em_and), N, N,
4534 	/* 0x28 - 0x2F */
4535 	F6ALU(Lock, em_sub), N, I(ByteOp | DstAcc | No64, em_das),
4536 	/* 0x30 - 0x37 */
4537 	F6ALU(Lock, em_xor), N, N,
4538 	/* 0x38 - 0x3F */
4539 	F6ALU(NoWrite, em_cmp), N, N,
4540 	/* 0x40 - 0x4F */
4541 	X8(F(DstReg, em_inc)), X8(F(DstReg, em_dec)),
4542 	/* 0x50 - 0x57 */
4543 	X8(I(SrcReg | Stack, em_push)),
4544 	/* 0x58 - 0x5F */
4545 	X8(I(DstReg | Stack, em_pop)),
4546 	/* 0x60 - 0x67 */
4547 	I(ImplicitOps | Stack | No64, em_pusha),
4548 	I(ImplicitOps | Stack | No64, em_popa),
4549 	N, MD(ModRM, &mode_dual_63),
4550 	N, N, N, N,
4551 	/* 0x68 - 0x6F */
4552 	I(SrcImm | Mov | Stack, em_push),
4553 	I(DstReg | SrcMem | ModRM | Src2Imm, em_imul_3op),
4554 	I(SrcImmByte | Mov | Stack, em_push),
4555 	I(DstReg | SrcMem | ModRM | Src2ImmByte, em_imul_3op),
4556 	I2bvIP(DstDI | SrcDX | Mov | String | Unaligned, em_in, ins, check_perm_in), /* insb, insw/insd */
4557 	I2bvIP(SrcSI | DstDX | String, em_out, outs, check_perm_out), /* outsb, outsw/outsd */
4558 	/* 0x70 - 0x7F */
4559 	X16(D(SrcImmByte | NearBranch)),
4560 	/* 0x80 - 0x87 */
4561 	G(ByteOp | DstMem | SrcImm, group1),
4562 	G(DstMem | SrcImm, group1),
4563 	G(ByteOp | DstMem | SrcImm | No64, group1),
4564 	G(DstMem | SrcImmByte, group1),
4565 	F2bv(DstMem | SrcReg | ModRM | NoWrite, em_test),
4566 	I2bv(DstMem | SrcReg | ModRM | Lock | PageTable, em_xchg),
4567 	/* 0x88 - 0x8F */
4568 	I2bv(DstMem | SrcReg | ModRM | Mov | PageTable, em_mov),
4569 	I2bv(DstReg | SrcMem | ModRM | Mov, em_mov),
4570 	I(DstMem | SrcNone | ModRM | Mov | PageTable, em_mov_rm_sreg),
4571 	D(ModRM | SrcMem | NoAccess | DstReg),
4572 	I(ImplicitOps | SrcMem16 | ModRM, em_mov_sreg_rm),
4573 	G(0, group1A),
4574 	/* 0x90 - 0x97 */
4575 	DI(SrcAcc | DstReg, pause), X7(D(SrcAcc | DstReg)),
4576 	/* 0x98 - 0x9F */
4577 	D(DstAcc | SrcNone), I(ImplicitOps | SrcAcc, em_cwd),
4578 	I(SrcImmFAddr | No64, em_call_far), N,
4579 	II(ImplicitOps | Stack, em_pushf, pushf),
4580 	II(ImplicitOps | Stack, em_popf, popf),
4581 	I(ImplicitOps, em_sahf), I(ImplicitOps, em_lahf),
4582 	/* 0xA0 - 0xA7 */
4583 	I2bv(DstAcc | SrcMem | Mov | MemAbs, em_mov),
4584 	I2bv(DstMem | SrcAcc | Mov | MemAbs | PageTable, em_mov),
4585 	I2bv(SrcSI | DstDI | Mov | String | TwoMemOp, em_mov),
4586 	F2bv(SrcSI | DstDI | String | NoWrite | TwoMemOp, em_cmp_r),
4587 	/* 0xA8 - 0xAF */
4588 	F2bv(DstAcc | SrcImm | NoWrite, em_test),
4589 	I2bv(SrcAcc | DstDI | Mov | String, em_mov),
4590 	I2bv(SrcSI | DstAcc | Mov | String, em_mov),
4591 	F2bv(SrcAcc | DstDI | String | NoWrite, em_cmp_r),
4592 	/* 0xB0 - 0xB7 */
4593 	X8(I(ByteOp | DstReg | SrcImm | Mov, em_mov)),
4594 	/* 0xB8 - 0xBF */
4595 	X8(I(DstReg | SrcImm64 | Mov, em_mov)),
4596 	/* 0xC0 - 0xC7 */
4597 	G(ByteOp | Src2ImmByte, group2), G(Src2ImmByte, group2),
4598 	I(ImplicitOps | NearBranch | SrcImmU16, em_ret_near_imm),
4599 	I(ImplicitOps | NearBranch, em_ret),
4600 	I(DstReg | SrcMemFAddr | ModRM | No64 | Src2ES, em_lseg),
4601 	I(DstReg | SrcMemFAddr | ModRM | No64 | Src2DS, em_lseg),
4602 	G(ByteOp, group11), G(0, group11),
4603 	/* 0xC8 - 0xCF */
4604 	I(Stack | SrcImmU16 | Src2ImmByte, em_enter), I(Stack, em_leave),
4605 	I(ImplicitOps | SrcImmU16, em_ret_far_imm),
4606 	I(ImplicitOps, em_ret_far),
4607 	D(ImplicitOps), DI(SrcImmByte, intn),
4608 	D(ImplicitOps | No64), II(ImplicitOps, em_iret, iret),
4609 	/* 0xD0 - 0xD7 */
4610 	G(Src2One | ByteOp, group2), G(Src2One, group2),
4611 	G(Src2CL | ByteOp, group2), G(Src2CL, group2),
4612 	I(DstAcc | SrcImmUByte | No64, em_aam),
4613 	I(DstAcc | SrcImmUByte | No64, em_aad),
4614 	F(DstAcc | ByteOp | No64, em_salc),
4615 	I(DstAcc | SrcXLat | ByteOp, em_mov),
4616 	/* 0xD8 - 0xDF */
4617 	N, E(0, &escape_d9), N, E(0, &escape_db), N, E(0, &escape_dd), N, N,
4618 	/* 0xE0 - 0xE7 */
4619 	X3(I(SrcImmByte | NearBranch, em_loop)),
4620 	I(SrcImmByte | NearBranch, em_jcxz),
4621 	I2bvIP(SrcImmUByte | DstAcc, em_in,  in,  check_perm_in),
4622 	I2bvIP(SrcAcc | DstImmUByte, em_out, out, check_perm_out),
4623 	/* 0xE8 - 0xEF */
4624 	I(SrcImm | NearBranch, em_call), D(SrcImm | ImplicitOps | NearBranch),
4625 	I(SrcImmFAddr | No64, em_jmp_far),
4626 	D(SrcImmByte | ImplicitOps | NearBranch),
4627 	I2bvIP(SrcDX | DstAcc, em_in,  in,  check_perm_in),
4628 	I2bvIP(SrcAcc | DstDX, em_out, out, check_perm_out),
4629 	/* 0xF0 - 0xF7 */
4630 	N, DI(ImplicitOps, icebp), N, N,
4631 	DI(ImplicitOps | Priv, hlt), D(ImplicitOps),
4632 	G(ByteOp, group3), G(0, group3),
4633 	/* 0xF8 - 0xFF */
4634 	D(ImplicitOps), D(ImplicitOps),
4635 	I(ImplicitOps, em_cli), I(ImplicitOps, em_sti),
4636 	D(ImplicitOps), D(ImplicitOps), G(0, group4), G(0, group5),
4637 };
4638 
4639 static const struct opcode twobyte_table[256] = {
4640 	/* 0x00 - 0x0F */
4641 	G(0, group6), GD(0, &group7), N, N,
4642 	N, I(ImplicitOps | EmulateOnUD, em_syscall),
4643 	II(ImplicitOps | Priv, em_clts, clts), N,
4644 	DI(ImplicitOps | Priv, invd), DI(ImplicitOps | Priv, wbinvd), N, N,
4645 	N, D(ImplicitOps | ModRM | SrcMem | NoAccess), N, N,
4646 	/* 0x10 - 0x1F */
4647 	N, N, N, N, N, N, N, N,
4648 	D(ImplicitOps | ModRM | SrcMem | NoAccess),
4649 	N, N, N, N, N, N, D(ImplicitOps | ModRM | SrcMem | NoAccess),
4650 	/* 0x20 - 0x2F */
4651 	DIP(ModRM | DstMem | Priv | Op3264 | NoMod, cr_read, check_cr_read),
4652 	DIP(ModRM | DstMem | Priv | Op3264 | NoMod, dr_read, check_dr_read),
4653 	IIP(ModRM | SrcMem | Priv | Op3264 | NoMod, em_cr_write, cr_write,
4654 						check_cr_write),
4655 	IIP(ModRM | SrcMem | Priv | Op3264 | NoMod, em_dr_write, dr_write,
4656 						check_dr_write),
4657 	N, N, N, N,
4658 	GP(ModRM | DstReg | SrcMem | Mov | Sse, &pfx_0f_28_0f_29),
4659 	GP(ModRM | DstMem | SrcReg | Mov | Sse, &pfx_0f_28_0f_29),
4660 	N, GP(ModRM | DstMem | SrcReg | Mov | Sse, &pfx_0f_2b),
4661 	N, N, N, N,
4662 	/* 0x30 - 0x3F */
4663 	II(ImplicitOps | Priv, em_wrmsr, wrmsr),
4664 	IIP(ImplicitOps, em_rdtsc, rdtsc, check_rdtsc),
4665 	II(ImplicitOps | Priv, em_rdmsr, rdmsr),
4666 	IIP(ImplicitOps, em_rdpmc, rdpmc, check_rdpmc),
4667 	I(ImplicitOps | EmulateOnUD, em_sysenter),
4668 	I(ImplicitOps | Priv | EmulateOnUD, em_sysexit),
4669 	N, N,
4670 	N, N, N, N, N, N, N, N,
4671 	/* 0x40 - 0x4F */
4672 	X16(D(DstReg | SrcMem | ModRM)),
4673 	/* 0x50 - 0x5F */
4674 	N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
4675 	/* 0x60 - 0x6F */
4676 	N, N, N, N,
4677 	N, N, N, N,
4678 	N, N, N, N,
4679 	N, N, N, GP(SrcMem | DstReg | ModRM | Mov, &pfx_0f_6f_0f_7f),
4680 	/* 0x70 - 0x7F */
4681 	N, N, N, N,
4682 	N, N, N, N,
4683 	N, N, N, N,
4684 	N, N, N, GP(SrcReg | DstMem | ModRM | Mov, &pfx_0f_6f_0f_7f),
4685 	/* 0x80 - 0x8F */
4686 	X16(D(SrcImm | NearBranch)),
4687 	/* 0x90 - 0x9F */
4688 	X16(D(ByteOp | DstMem | SrcNone | ModRM| Mov)),
4689 	/* 0xA0 - 0xA7 */
4690 	I(Stack | Src2FS, em_push_sreg), I(Stack | Src2FS, em_pop_sreg),
4691 	II(ImplicitOps, em_cpuid, cpuid),
4692 	F(DstMem | SrcReg | ModRM | BitOp | NoWrite, em_bt),
4693 	F(DstMem | SrcReg | Src2ImmByte | ModRM, em_shld),
4694 	F(DstMem | SrcReg | Src2CL | ModRM, em_shld), N, N,
4695 	/* 0xA8 - 0xAF */
4696 	I(Stack | Src2GS, em_push_sreg), I(Stack | Src2GS, em_pop_sreg),
4697 	II(EmulateOnUD | ImplicitOps, em_rsm, rsm),
4698 	F(DstMem | SrcReg | ModRM | BitOp | Lock | PageTable, em_bts),
4699 	F(DstMem | SrcReg | Src2ImmByte | ModRM, em_shrd),
4700 	F(DstMem | SrcReg | Src2CL | ModRM, em_shrd),
4701 	GD(0, &group15), F(DstReg | SrcMem | ModRM, em_imul),
4702 	/* 0xB0 - 0xB7 */
4703 	I2bv(DstMem | SrcReg | ModRM | Lock | PageTable | SrcWrite, em_cmpxchg),
4704 	I(DstReg | SrcMemFAddr | ModRM | Src2SS, em_lseg),
4705 	F(DstMem | SrcReg | ModRM | BitOp | Lock, em_btr),
4706 	I(DstReg | SrcMemFAddr | ModRM | Src2FS, em_lseg),
4707 	I(DstReg | SrcMemFAddr | ModRM | Src2GS, em_lseg),
4708 	D(DstReg | SrcMem8 | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov),
4709 	/* 0xB8 - 0xBF */
4710 	N, N,
4711 	G(BitOp, group8),
4712 	F(DstMem | SrcReg | ModRM | BitOp | Lock | PageTable, em_btc),
4713 	I(DstReg | SrcMem | ModRM, em_bsf_c),
4714 	I(DstReg | SrcMem | ModRM, em_bsr_c),
4715 	D(DstReg | SrcMem8 | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov),
4716 	/* 0xC0 - 0xC7 */
4717 	F2bv(DstMem | SrcReg | ModRM | SrcWrite | Lock, em_xadd),
4718 	N, ID(0, &instr_dual_0f_c3),
4719 	N, N, N, GD(0, &group9),
4720 	/* 0xC8 - 0xCF */
4721 	X8(I(DstReg, em_bswap)),
4722 	/* 0xD0 - 0xDF */
4723 	N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
4724 	/* 0xE0 - 0xEF */
4725 	N, N, N, N, N, N, N, GP(SrcReg | DstMem | ModRM | Mov, &pfx_0f_e7),
4726 	N, N, N, N, N, N, N, N,
4727 	/* 0xF0 - 0xFF */
4728 	N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N
4729 };
4730 
4731 static const struct instr_dual instr_dual_0f_38_f0 = {
4732 	I(DstReg | SrcMem | Mov, em_movbe), N
4733 };
4734 
4735 static const struct instr_dual instr_dual_0f_38_f1 = {
4736 	I(DstMem | SrcReg | Mov, em_movbe), N
4737 };
4738 
4739 static const struct gprefix three_byte_0f_38_f0 = {
4740 	ID(0, &instr_dual_0f_38_f0), N, N, N
4741 };
4742 
4743 static const struct gprefix three_byte_0f_38_f1 = {
4744 	ID(0, &instr_dual_0f_38_f1), N, N, N
4745 };
4746 
4747 /*
4748  * Insns below are selected by the prefix which indexed by the third opcode
4749  * byte.
4750  */
4751 static const struct opcode opcode_map_0f_38[256] = {
4752 	/* 0x00 - 0x7f */
4753 	X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N),
4754 	/* 0x80 - 0xef */
4755 	X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N),
4756 	/* 0xf0 - 0xf1 */
4757 	GP(EmulateOnUD | ModRM, &three_byte_0f_38_f0),
4758 	GP(EmulateOnUD | ModRM, &three_byte_0f_38_f1),
4759 	/* 0xf2 - 0xff */
4760 	N, N, X4(N), X8(N)
4761 };
4762 
4763 #undef D
4764 #undef N
4765 #undef G
4766 #undef GD
4767 #undef I
4768 #undef GP
4769 #undef EXT
4770 #undef MD
4771 #undef ID
4772 
4773 #undef D2bv
4774 #undef D2bvIP
4775 #undef I2bv
4776 #undef I2bvIP
4777 #undef I6ALU
4778 
4779 static unsigned imm_size(struct x86_emulate_ctxt *ctxt)
4780 {
4781 	unsigned size;
4782 
4783 	size = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4784 	if (size == 8)
4785 		size = 4;
4786 	return size;
4787 }
4788 
4789 static int decode_imm(struct x86_emulate_ctxt *ctxt, struct operand *op,
4790 		      unsigned size, bool sign_extension)
4791 {
4792 	int rc = X86EMUL_CONTINUE;
4793 
4794 	op->type = OP_IMM;
4795 	op->bytes = size;
4796 	op->addr.mem.ea = ctxt->_eip;
4797 	/* NB. Immediates are sign-extended as necessary. */
4798 	switch (op->bytes) {
4799 	case 1:
4800 		op->val = insn_fetch(s8, ctxt);
4801 		break;
4802 	case 2:
4803 		op->val = insn_fetch(s16, ctxt);
4804 		break;
4805 	case 4:
4806 		op->val = insn_fetch(s32, ctxt);
4807 		break;
4808 	case 8:
4809 		op->val = insn_fetch(s64, ctxt);
4810 		break;
4811 	}
4812 	if (!sign_extension) {
4813 		switch (op->bytes) {
4814 		case 1:
4815 			op->val &= 0xff;
4816 			break;
4817 		case 2:
4818 			op->val &= 0xffff;
4819 			break;
4820 		case 4:
4821 			op->val &= 0xffffffff;
4822 			break;
4823 		}
4824 	}
4825 done:
4826 	return rc;
4827 }
4828 
4829 static int decode_operand(struct x86_emulate_ctxt *ctxt, struct operand *op,
4830 			  unsigned d)
4831 {
4832 	int rc = X86EMUL_CONTINUE;
4833 
4834 	switch (d) {
4835 	case OpReg:
4836 		decode_register_operand(ctxt, op);
4837 		break;
4838 	case OpImmUByte:
4839 		rc = decode_imm(ctxt, op, 1, false);
4840 		break;
4841 	case OpMem:
4842 		ctxt->memop.bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4843 	mem_common:
4844 		*op = ctxt->memop;
4845 		ctxt->memopp = op;
4846 		if (ctxt->d & BitOp)
4847 			fetch_bit_operand(ctxt);
4848 		op->orig_val = op->val;
4849 		break;
4850 	case OpMem64:
4851 		ctxt->memop.bytes = (ctxt->op_bytes == 8) ? 16 : 8;
4852 		goto mem_common;
4853 	case OpAcc:
4854 		op->type = OP_REG;
4855 		op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4856 		op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
4857 		fetch_register_operand(op);
4858 		op->orig_val = op->val;
4859 		break;
4860 	case OpAccLo:
4861 		op->type = OP_REG;
4862 		op->bytes = (ctxt->d & ByteOp) ? 2 : ctxt->op_bytes;
4863 		op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
4864 		fetch_register_operand(op);
4865 		op->orig_val = op->val;
4866 		break;
4867 	case OpAccHi:
4868 		if (ctxt->d & ByteOp) {
4869 			op->type = OP_NONE;
4870 			break;
4871 		}
4872 		op->type = OP_REG;
4873 		op->bytes = ctxt->op_bytes;
4874 		op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX);
4875 		fetch_register_operand(op);
4876 		op->orig_val = op->val;
4877 		break;
4878 	case OpDI:
4879 		op->type = OP_MEM;
4880 		op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4881 		op->addr.mem.ea =
4882 			register_address(ctxt, VCPU_REGS_RDI);
4883 		op->addr.mem.seg = VCPU_SREG_ES;
4884 		op->val = 0;
4885 		op->count = 1;
4886 		break;
4887 	case OpDX:
4888 		op->type = OP_REG;
4889 		op->bytes = 2;
4890 		op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX);
4891 		fetch_register_operand(op);
4892 		break;
4893 	case OpCL:
4894 		op->type = OP_IMM;
4895 		op->bytes = 1;
4896 		op->val = reg_read(ctxt, VCPU_REGS_RCX) & 0xff;
4897 		break;
4898 	case OpImmByte:
4899 		rc = decode_imm(ctxt, op, 1, true);
4900 		break;
4901 	case OpOne:
4902 		op->type = OP_IMM;
4903 		op->bytes = 1;
4904 		op->val = 1;
4905 		break;
4906 	case OpImm:
4907 		rc = decode_imm(ctxt, op, imm_size(ctxt), true);
4908 		break;
4909 	case OpImm64:
4910 		rc = decode_imm(ctxt, op, ctxt->op_bytes, true);
4911 		break;
4912 	case OpMem8:
4913 		ctxt->memop.bytes = 1;
4914 		if (ctxt->memop.type == OP_REG) {
4915 			ctxt->memop.addr.reg = decode_register(ctxt,
4916 					ctxt->modrm_rm, true);
4917 			fetch_register_operand(&ctxt->memop);
4918 		}
4919 		goto mem_common;
4920 	case OpMem16:
4921 		ctxt->memop.bytes = 2;
4922 		goto mem_common;
4923 	case OpMem32:
4924 		ctxt->memop.bytes = 4;
4925 		goto mem_common;
4926 	case OpImmU16:
4927 		rc = decode_imm(ctxt, op, 2, false);
4928 		break;
4929 	case OpImmU:
4930 		rc = decode_imm(ctxt, op, imm_size(ctxt), false);
4931 		break;
4932 	case OpSI:
4933 		op->type = OP_MEM;
4934 		op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4935 		op->addr.mem.ea =
4936 			register_address(ctxt, VCPU_REGS_RSI);
4937 		op->addr.mem.seg = ctxt->seg_override;
4938 		op->val = 0;
4939 		op->count = 1;
4940 		break;
4941 	case OpXLat:
4942 		op->type = OP_MEM;
4943 		op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4944 		op->addr.mem.ea =
4945 			address_mask(ctxt,
4946 				reg_read(ctxt, VCPU_REGS_RBX) +
4947 				(reg_read(ctxt, VCPU_REGS_RAX) & 0xff));
4948 		op->addr.mem.seg = ctxt->seg_override;
4949 		op->val = 0;
4950 		break;
4951 	case OpImmFAddr:
4952 		op->type = OP_IMM;
4953 		op->addr.mem.ea = ctxt->_eip;
4954 		op->bytes = ctxt->op_bytes + 2;
4955 		insn_fetch_arr(op->valptr, op->bytes, ctxt);
4956 		break;
4957 	case OpMemFAddr:
4958 		ctxt->memop.bytes = ctxt->op_bytes + 2;
4959 		goto mem_common;
4960 	case OpES:
4961 		op->type = OP_IMM;
4962 		op->val = VCPU_SREG_ES;
4963 		break;
4964 	case OpCS:
4965 		op->type = OP_IMM;
4966 		op->val = VCPU_SREG_CS;
4967 		break;
4968 	case OpSS:
4969 		op->type = OP_IMM;
4970 		op->val = VCPU_SREG_SS;
4971 		break;
4972 	case OpDS:
4973 		op->type = OP_IMM;
4974 		op->val = VCPU_SREG_DS;
4975 		break;
4976 	case OpFS:
4977 		op->type = OP_IMM;
4978 		op->val = VCPU_SREG_FS;
4979 		break;
4980 	case OpGS:
4981 		op->type = OP_IMM;
4982 		op->val = VCPU_SREG_GS;
4983 		break;
4984 	case OpImplicit:
4985 		/* Special instructions do their own operand decoding. */
4986 	default:
4987 		op->type = OP_NONE; /* Disable writeback. */
4988 		break;
4989 	}
4990 
4991 done:
4992 	return rc;
4993 }
4994 
4995 int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len)
4996 {
4997 	int rc = X86EMUL_CONTINUE;
4998 	int mode = ctxt->mode;
4999 	int def_op_bytes, def_ad_bytes, goffset, simd_prefix;
5000 	bool op_prefix = false;
5001 	bool has_seg_override = false;
5002 	struct opcode opcode;
5003 
5004 	ctxt->memop.type = OP_NONE;
5005 	ctxt->memopp = NULL;
5006 	ctxt->_eip = ctxt->eip;
5007 	ctxt->fetch.ptr = ctxt->fetch.data;
5008 	ctxt->fetch.end = ctxt->fetch.data + insn_len;
5009 	ctxt->opcode_len = 1;
5010 	if (insn_len > 0)
5011 		memcpy(ctxt->fetch.data, insn, insn_len);
5012 	else {
5013 		rc = __do_insn_fetch_bytes(ctxt, 1);
5014 		if (rc != X86EMUL_CONTINUE)
5015 			return rc;
5016 	}
5017 
5018 	switch (mode) {
5019 	case X86EMUL_MODE_REAL:
5020 	case X86EMUL_MODE_VM86:
5021 	case X86EMUL_MODE_PROT16:
5022 		def_op_bytes = def_ad_bytes = 2;
5023 		break;
5024 	case X86EMUL_MODE_PROT32:
5025 		def_op_bytes = def_ad_bytes = 4;
5026 		break;
5027 #ifdef CONFIG_X86_64
5028 	case X86EMUL_MODE_PROT64:
5029 		def_op_bytes = 4;
5030 		def_ad_bytes = 8;
5031 		break;
5032 #endif
5033 	default:
5034 		return EMULATION_FAILED;
5035 	}
5036 
5037 	ctxt->op_bytes = def_op_bytes;
5038 	ctxt->ad_bytes = def_ad_bytes;
5039 
5040 	/* Legacy prefixes. */
5041 	for (;;) {
5042 		switch (ctxt->b = insn_fetch(u8, ctxt)) {
5043 		case 0x66:	/* operand-size override */
5044 			op_prefix = true;
5045 			/* switch between 2/4 bytes */
5046 			ctxt->op_bytes = def_op_bytes ^ 6;
5047 			break;
5048 		case 0x67:	/* address-size override */
5049 			if (mode == X86EMUL_MODE_PROT64)
5050 				/* switch between 4/8 bytes */
5051 				ctxt->ad_bytes = def_ad_bytes ^ 12;
5052 			else
5053 				/* switch between 2/4 bytes */
5054 				ctxt->ad_bytes = def_ad_bytes ^ 6;
5055 			break;
5056 		case 0x26:	/* ES override */
5057 		case 0x2e:	/* CS override */
5058 		case 0x36:	/* SS override */
5059 		case 0x3e:	/* DS override */
5060 			has_seg_override = true;
5061 			ctxt->seg_override = (ctxt->b >> 3) & 3;
5062 			break;
5063 		case 0x64:	/* FS override */
5064 		case 0x65:	/* GS override */
5065 			has_seg_override = true;
5066 			ctxt->seg_override = ctxt->b & 7;
5067 			break;
5068 		case 0x40 ... 0x4f: /* REX */
5069 			if (mode != X86EMUL_MODE_PROT64)
5070 				goto done_prefixes;
5071 			ctxt->rex_prefix = ctxt->b;
5072 			continue;
5073 		case 0xf0:	/* LOCK */
5074 			ctxt->lock_prefix = 1;
5075 			break;
5076 		case 0xf2:	/* REPNE/REPNZ */
5077 		case 0xf3:	/* REP/REPE/REPZ */
5078 			ctxt->rep_prefix = ctxt->b;
5079 			break;
5080 		default:
5081 			goto done_prefixes;
5082 		}
5083 
5084 		/* Any legacy prefix after a REX prefix nullifies its effect. */
5085 
5086 		ctxt->rex_prefix = 0;
5087 	}
5088 
5089 done_prefixes:
5090 
5091 	/* REX prefix. */
5092 	if (ctxt->rex_prefix & 8)
5093 		ctxt->op_bytes = 8;	/* REX.W */
5094 
5095 	/* Opcode byte(s). */
5096 	opcode = opcode_table[ctxt->b];
5097 	/* Two-byte opcode? */
5098 	if (ctxt->b == 0x0f) {
5099 		ctxt->opcode_len = 2;
5100 		ctxt->b = insn_fetch(u8, ctxt);
5101 		opcode = twobyte_table[ctxt->b];
5102 
5103 		/* 0F_38 opcode map */
5104 		if (ctxt->b == 0x38) {
5105 			ctxt->opcode_len = 3;
5106 			ctxt->b = insn_fetch(u8, ctxt);
5107 			opcode = opcode_map_0f_38[ctxt->b];
5108 		}
5109 	}
5110 	ctxt->d = opcode.flags;
5111 
5112 	if (ctxt->d & ModRM)
5113 		ctxt->modrm = insn_fetch(u8, ctxt);
5114 
5115 	/* vex-prefix instructions are not implemented */
5116 	if (ctxt->opcode_len == 1 && (ctxt->b == 0xc5 || ctxt->b == 0xc4) &&
5117 	    (mode == X86EMUL_MODE_PROT64 || (ctxt->modrm & 0xc0) == 0xc0)) {
5118 		ctxt->d = NotImpl;
5119 	}
5120 
5121 	while (ctxt->d & GroupMask) {
5122 		switch (ctxt->d & GroupMask) {
5123 		case Group:
5124 			goffset = (ctxt->modrm >> 3) & 7;
5125 			opcode = opcode.u.group[goffset];
5126 			break;
5127 		case GroupDual:
5128 			goffset = (ctxt->modrm >> 3) & 7;
5129 			if ((ctxt->modrm >> 6) == 3)
5130 				opcode = opcode.u.gdual->mod3[goffset];
5131 			else
5132 				opcode = opcode.u.gdual->mod012[goffset];
5133 			break;
5134 		case RMExt:
5135 			goffset = ctxt->modrm & 7;
5136 			opcode = opcode.u.group[goffset];
5137 			break;
5138 		case Prefix:
5139 			if (ctxt->rep_prefix && op_prefix)
5140 				return EMULATION_FAILED;
5141 			simd_prefix = op_prefix ? 0x66 : ctxt->rep_prefix;
5142 			switch (simd_prefix) {
5143 			case 0x00: opcode = opcode.u.gprefix->pfx_no; break;
5144 			case 0x66: opcode = opcode.u.gprefix->pfx_66; break;
5145 			case 0xf2: opcode = opcode.u.gprefix->pfx_f2; break;
5146 			case 0xf3: opcode = opcode.u.gprefix->pfx_f3; break;
5147 			}
5148 			break;
5149 		case Escape:
5150 			if (ctxt->modrm > 0xbf)
5151 				opcode = opcode.u.esc->high[ctxt->modrm - 0xc0];
5152 			else
5153 				opcode = opcode.u.esc->op[(ctxt->modrm >> 3) & 7];
5154 			break;
5155 		case InstrDual:
5156 			if ((ctxt->modrm >> 6) == 3)
5157 				opcode = opcode.u.idual->mod3;
5158 			else
5159 				opcode = opcode.u.idual->mod012;
5160 			break;
5161 		case ModeDual:
5162 			if (ctxt->mode == X86EMUL_MODE_PROT64)
5163 				opcode = opcode.u.mdual->mode64;
5164 			else
5165 				opcode = opcode.u.mdual->mode32;
5166 			break;
5167 		default:
5168 			return EMULATION_FAILED;
5169 		}
5170 
5171 		ctxt->d &= ~(u64)GroupMask;
5172 		ctxt->d |= opcode.flags;
5173 	}
5174 
5175 	/* Unrecognised? */
5176 	if (ctxt->d == 0)
5177 		return EMULATION_FAILED;
5178 
5179 	ctxt->execute = opcode.u.execute;
5180 
5181 	if (unlikely(ctxt->ud) && likely(!(ctxt->d & EmulateOnUD)))
5182 		return EMULATION_FAILED;
5183 
5184 	if (unlikely(ctxt->d &
5185 	    (NotImpl|Stack|Op3264|Sse|Mmx|Intercept|CheckPerm|NearBranch|
5186 	     No16))) {
5187 		/*
5188 		 * These are copied unconditionally here, and checked unconditionally
5189 		 * in x86_emulate_insn.
5190 		 */
5191 		ctxt->check_perm = opcode.check_perm;
5192 		ctxt->intercept = opcode.intercept;
5193 
5194 		if (ctxt->d & NotImpl)
5195 			return EMULATION_FAILED;
5196 
5197 		if (mode == X86EMUL_MODE_PROT64) {
5198 			if (ctxt->op_bytes == 4 && (ctxt->d & Stack))
5199 				ctxt->op_bytes = 8;
5200 			else if (ctxt->d & NearBranch)
5201 				ctxt->op_bytes = 8;
5202 		}
5203 
5204 		if (ctxt->d & Op3264) {
5205 			if (mode == X86EMUL_MODE_PROT64)
5206 				ctxt->op_bytes = 8;
5207 			else
5208 				ctxt->op_bytes = 4;
5209 		}
5210 
5211 		if ((ctxt->d & No16) && ctxt->op_bytes == 2)
5212 			ctxt->op_bytes = 4;
5213 
5214 		if (ctxt->d & Sse)
5215 			ctxt->op_bytes = 16;
5216 		else if (ctxt->d & Mmx)
5217 			ctxt->op_bytes = 8;
5218 	}
5219 
5220 	/* ModRM and SIB bytes. */
5221 	if (ctxt->d & ModRM) {
5222 		rc = decode_modrm(ctxt, &ctxt->memop);
5223 		if (!has_seg_override) {
5224 			has_seg_override = true;
5225 			ctxt->seg_override = ctxt->modrm_seg;
5226 		}
5227 	} else if (ctxt->d & MemAbs)
5228 		rc = decode_abs(ctxt, &ctxt->memop);
5229 	if (rc != X86EMUL_CONTINUE)
5230 		goto done;
5231 
5232 	if (!has_seg_override)
5233 		ctxt->seg_override = VCPU_SREG_DS;
5234 
5235 	ctxt->memop.addr.mem.seg = ctxt->seg_override;
5236 
5237 	/*
5238 	 * Decode and fetch the source operand: register, memory
5239 	 * or immediate.
5240 	 */
5241 	rc = decode_operand(ctxt, &ctxt->src, (ctxt->d >> SrcShift) & OpMask);
5242 	if (rc != X86EMUL_CONTINUE)
5243 		goto done;
5244 
5245 	/*
5246 	 * Decode and fetch the second source operand: register, memory
5247 	 * or immediate.
5248 	 */
5249 	rc = decode_operand(ctxt, &ctxt->src2, (ctxt->d >> Src2Shift) & OpMask);
5250 	if (rc != X86EMUL_CONTINUE)
5251 		goto done;
5252 
5253 	/* Decode and fetch the destination operand: register or memory. */
5254 	rc = decode_operand(ctxt, &ctxt->dst, (ctxt->d >> DstShift) & OpMask);
5255 
5256 	if (ctxt->rip_relative && likely(ctxt->memopp))
5257 		ctxt->memopp->addr.mem.ea = address_mask(ctxt,
5258 					ctxt->memopp->addr.mem.ea + ctxt->_eip);
5259 
5260 done:
5261 	return (rc != X86EMUL_CONTINUE) ? EMULATION_FAILED : EMULATION_OK;
5262 }
5263 
5264 bool x86_page_table_writing_insn(struct x86_emulate_ctxt *ctxt)
5265 {
5266 	return ctxt->d & PageTable;
5267 }
5268 
5269 static bool string_insn_completed(struct x86_emulate_ctxt *ctxt)
5270 {
5271 	/* The second termination condition only applies for REPE
5272 	 * and REPNE. Test if the repeat string operation prefix is
5273 	 * REPE/REPZ or REPNE/REPNZ and if it's the case it tests the
5274 	 * corresponding termination condition according to:
5275 	 * 	- if REPE/REPZ and ZF = 0 then done
5276 	 * 	- if REPNE/REPNZ and ZF = 1 then done
5277 	 */
5278 	if (((ctxt->b == 0xa6) || (ctxt->b == 0xa7) ||
5279 	     (ctxt->b == 0xae) || (ctxt->b == 0xaf))
5280 	    && (((ctxt->rep_prefix == REPE_PREFIX) &&
5281 		 ((ctxt->eflags & X86_EFLAGS_ZF) == 0))
5282 		|| ((ctxt->rep_prefix == REPNE_PREFIX) &&
5283 		    ((ctxt->eflags & X86_EFLAGS_ZF) == X86_EFLAGS_ZF))))
5284 		return true;
5285 
5286 	return false;
5287 }
5288 
5289 static int flush_pending_x87_faults(struct x86_emulate_ctxt *ctxt)
5290 {
5291 	int rc;
5292 
5293 	ctxt->ops->get_fpu(ctxt);
5294 	rc = asm_safe("fwait");
5295 	ctxt->ops->put_fpu(ctxt);
5296 
5297 	if (unlikely(rc != X86EMUL_CONTINUE))
5298 		return emulate_exception(ctxt, MF_VECTOR, 0, false);
5299 
5300 	return X86EMUL_CONTINUE;
5301 }
5302 
5303 static void fetch_possible_mmx_operand(struct x86_emulate_ctxt *ctxt,
5304 				       struct operand *op)
5305 {
5306 	if (op->type == OP_MM)
5307 		read_mmx_reg(ctxt, &op->mm_val, op->addr.mm);
5308 }
5309 
5310 static int fastop(struct x86_emulate_ctxt *ctxt, void (*fop)(struct fastop *))
5311 {
5312 	ulong flags = (ctxt->eflags & EFLAGS_MASK) | X86_EFLAGS_IF;
5313 
5314 	if (!(ctxt->d & ByteOp))
5315 		fop += __ffs(ctxt->dst.bytes) * FASTOP_SIZE;
5316 
5317 	asm("push %[flags]; popf; call *%[fastop]; pushf; pop %[flags]\n"
5318 	    : "+a"(ctxt->dst.val), "+d"(ctxt->src.val), [flags]"+D"(flags),
5319 	      [fastop]"+S"(fop), ASM_CALL_CONSTRAINT
5320 	    : "c"(ctxt->src2.val));
5321 
5322 	ctxt->eflags = (ctxt->eflags & ~EFLAGS_MASK) | (flags & EFLAGS_MASK);
5323 	if (!fop) /* exception is returned in fop variable */
5324 		return emulate_de(ctxt);
5325 	return X86EMUL_CONTINUE;
5326 }
5327 
5328 void init_decode_cache(struct x86_emulate_ctxt *ctxt)
5329 {
5330 	memset(&ctxt->rip_relative, 0,
5331 	       (void *)&ctxt->modrm - (void *)&ctxt->rip_relative);
5332 
5333 	ctxt->io_read.pos = 0;
5334 	ctxt->io_read.end = 0;
5335 	ctxt->mem_read.end = 0;
5336 }
5337 
5338 int x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
5339 {
5340 	const struct x86_emulate_ops *ops = ctxt->ops;
5341 	int rc = X86EMUL_CONTINUE;
5342 	int saved_dst_type = ctxt->dst.type;
5343 	unsigned emul_flags;
5344 
5345 	ctxt->mem_read.pos = 0;
5346 
5347 	/* LOCK prefix is allowed only with some instructions */
5348 	if (ctxt->lock_prefix && (!(ctxt->d & Lock) || ctxt->dst.type != OP_MEM)) {
5349 		rc = emulate_ud(ctxt);
5350 		goto done;
5351 	}
5352 
5353 	if ((ctxt->d & SrcMask) == SrcMemFAddr && ctxt->src.type != OP_MEM) {
5354 		rc = emulate_ud(ctxt);
5355 		goto done;
5356 	}
5357 
5358 	emul_flags = ctxt->ops->get_hflags(ctxt);
5359 	if (unlikely(ctxt->d &
5360 		     (No64|Undefined|Sse|Mmx|Intercept|CheckPerm|Priv|Prot|String))) {
5361 		if ((ctxt->mode == X86EMUL_MODE_PROT64 && (ctxt->d & No64)) ||
5362 				(ctxt->d & Undefined)) {
5363 			rc = emulate_ud(ctxt);
5364 			goto done;
5365 		}
5366 
5367 		if (((ctxt->d & (Sse|Mmx)) && ((ops->get_cr(ctxt, 0) & X86_CR0_EM)))
5368 		    || ((ctxt->d & Sse) && !(ops->get_cr(ctxt, 4) & X86_CR4_OSFXSR))) {
5369 			rc = emulate_ud(ctxt);
5370 			goto done;
5371 		}
5372 
5373 		if ((ctxt->d & (Sse|Mmx)) && (ops->get_cr(ctxt, 0) & X86_CR0_TS)) {
5374 			rc = emulate_nm(ctxt);
5375 			goto done;
5376 		}
5377 
5378 		if (ctxt->d & Mmx) {
5379 			rc = flush_pending_x87_faults(ctxt);
5380 			if (rc != X86EMUL_CONTINUE)
5381 				goto done;
5382 			/*
5383 			 * Now that we know the fpu is exception safe, we can fetch
5384 			 * operands from it.
5385 			 */
5386 			fetch_possible_mmx_operand(ctxt, &ctxt->src);
5387 			fetch_possible_mmx_operand(ctxt, &ctxt->src2);
5388 			if (!(ctxt->d & Mov))
5389 				fetch_possible_mmx_operand(ctxt, &ctxt->dst);
5390 		}
5391 
5392 		if (unlikely(emul_flags & X86EMUL_GUEST_MASK) && ctxt->intercept) {
5393 			rc = emulator_check_intercept(ctxt, ctxt->intercept,
5394 						      X86_ICPT_PRE_EXCEPT);
5395 			if (rc != X86EMUL_CONTINUE)
5396 				goto done;
5397 		}
5398 
5399 		/* Instruction can only be executed in protected mode */
5400 		if ((ctxt->d & Prot) && ctxt->mode < X86EMUL_MODE_PROT16) {
5401 			rc = emulate_ud(ctxt);
5402 			goto done;
5403 		}
5404 
5405 		/* Privileged instruction can be executed only in CPL=0 */
5406 		if ((ctxt->d & Priv) && ops->cpl(ctxt)) {
5407 			if (ctxt->d & PrivUD)
5408 				rc = emulate_ud(ctxt);
5409 			else
5410 				rc = emulate_gp(ctxt, 0);
5411 			goto done;
5412 		}
5413 
5414 		/* Do instruction specific permission checks */
5415 		if (ctxt->d & CheckPerm) {
5416 			rc = ctxt->check_perm(ctxt);
5417 			if (rc != X86EMUL_CONTINUE)
5418 				goto done;
5419 		}
5420 
5421 		if (unlikely(emul_flags & X86EMUL_GUEST_MASK) && (ctxt->d & Intercept)) {
5422 			rc = emulator_check_intercept(ctxt, ctxt->intercept,
5423 						      X86_ICPT_POST_EXCEPT);
5424 			if (rc != X86EMUL_CONTINUE)
5425 				goto done;
5426 		}
5427 
5428 		if (ctxt->rep_prefix && (ctxt->d & String)) {
5429 			/* All REP prefixes have the same first termination condition */
5430 			if (address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) == 0) {
5431 				string_registers_quirk(ctxt);
5432 				ctxt->eip = ctxt->_eip;
5433 				ctxt->eflags &= ~X86_EFLAGS_RF;
5434 				goto done;
5435 			}
5436 		}
5437 	}
5438 
5439 	if ((ctxt->src.type == OP_MEM) && !(ctxt->d & NoAccess)) {
5440 		rc = segmented_read(ctxt, ctxt->src.addr.mem,
5441 				    ctxt->src.valptr, ctxt->src.bytes);
5442 		if (rc != X86EMUL_CONTINUE)
5443 			goto done;
5444 		ctxt->src.orig_val64 = ctxt->src.val64;
5445 	}
5446 
5447 	if (ctxt->src2.type == OP_MEM) {
5448 		rc = segmented_read(ctxt, ctxt->src2.addr.mem,
5449 				    &ctxt->src2.val, ctxt->src2.bytes);
5450 		if (rc != X86EMUL_CONTINUE)
5451 			goto done;
5452 	}
5453 
5454 	if ((ctxt->d & DstMask) == ImplicitOps)
5455 		goto special_insn;
5456 
5457 
5458 	if ((ctxt->dst.type == OP_MEM) && !(ctxt->d & Mov)) {
5459 		/* optimisation - avoid slow emulated read if Mov */
5460 		rc = segmented_read(ctxt, ctxt->dst.addr.mem,
5461 				   &ctxt->dst.val, ctxt->dst.bytes);
5462 		if (rc != X86EMUL_CONTINUE) {
5463 			if (!(ctxt->d & NoWrite) &&
5464 			    rc == X86EMUL_PROPAGATE_FAULT &&
5465 			    ctxt->exception.vector == PF_VECTOR)
5466 				ctxt->exception.error_code |= PFERR_WRITE_MASK;
5467 			goto done;
5468 		}
5469 	}
5470 	/* Copy full 64-bit value for CMPXCHG8B.  */
5471 	ctxt->dst.orig_val64 = ctxt->dst.val64;
5472 
5473 special_insn:
5474 
5475 	if (unlikely(emul_flags & X86EMUL_GUEST_MASK) && (ctxt->d & Intercept)) {
5476 		rc = emulator_check_intercept(ctxt, ctxt->intercept,
5477 					      X86_ICPT_POST_MEMACCESS);
5478 		if (rc != X86EMUL_CONTINUE)
5479 			goto done;
5480 	}
5481 
5482 	if (ctxt->rep_prefix && (ctxt->d & String))
5483 		ctxt->eflags |= X86_EFLAGS_RF;
5484 	else
5485 		ctxt->eflags &= ~X86_EFLAGS_RF;
5486 
5487 	if (ctxt->execute) {
5488 		if (ctxt->d & Fastop) {
5489 			void (*fop)(struct fastop *) = (void *)ctxt->execute;
5490 			rc = fastop(ctxt, fop);
5491 			if (rc != X86EMUL_CONTINUE)
5492 				goto done;
5493 			goto writeback;
5494 		}
5495 		rc = ctxt->execute(ctxt);
5496 		if (rc != X86EMUL_CONTINUE)
5497 			goto done;
5498 		goto writeback;
5499 	}
5500 
5501 	if (ctxt->opcode_len == 2)
5502 		goto twobyte_insn;
5503 	else if (ctxt->opcode_len == 3)
5504 		goto threebyte_insn;
5505 
5506 	switch (ctxt->b) {
5507 	case 0x70 ... 0x7f: /* jcc (short) */
5508 		if (test_cc(ctxt->b, ctxt->eflags))
5509 			rc = jmp_rel(ctxt, ctxt->src.val);
5510 		break;
5511 	case 0x8d: /* lea r16/r32, m */
5512 		ctxt->dst.val = ctxt->src.addr.mem.ea;
5513 		break;
5514 	case 0x90 ... 0x97: /* nop / xchg reg, rax */
5515 		if (ctxt->dst.addr.reg == reg_rmw(ctxt, VCPU_REGS_RAX))
5516 			ctxt->dst.type = OP_NONE;
5517 		else
5518 			rc = em_xchg(ctxt);
5519 		break;
5520 	case 0x98: /* cbw/cwde/cdqe */
5521 		switch (ctxt->op_bytes) {
5522 		case 2: ctxt->dst.val = (s8)ctxt->dst.val; break;
5523 		case 4: ctxt->dst.val = (s16)ctxt->dst.val; break;
5524 		case 8: ctxt->dst.val = (s32)ctxt->dst.val; break;
5525 		}
5526 		break;
5527 	case 0xcc:		/* int3 */
5528 		rc = emulate_int(ctxt, 3);
5529 		break;
5530 	case 0xcd:		/* int n */
5531 		rc = emulate_int(ctxt, ctxt->src.val);
5532 		break;
5533 	case 0xce:		/* into */
5534 		if (ctxt->eflags & X86_EFLAGS_OF)
5535 			rc = emulate_int(ctxt, 4);
5536 		break;
5537 	case 0xe9: /* jmp rel */
5538 	case 0xeb: /* jmp rel short */
5539 		rc = jmp_rel(ctxt, ctxt->src.val);
5540 		ctxt->dst.type = OP_NONE; /* Disable writeback. */
5541 		break;
5542 	case 0xf4:              /* hlt */
5543 		ctxt->ops->halt(ctxt);
5544 		break;
5545 	case 0xf5:	/* cmc */
5546 		/* complement carry flag from eflags reg */
5547 		ctxt->eflags ^= X86_EFLAGS_CF;
5548 		break;
5549 	case 0xf8: /* clc */
5550 		ctxt->eflags &= ~X86_EFLAGS_CF;
5551 		break;
5552 	case 0xf9: /* stc */
5553 		ctxt->eflags |= X86_EFLAGS_CF;
5554 		break;
5555 	case 0xfc: /* cld */
5556 		ctxt->eflags &= ~X86_EFLAGS_DF;
5557 		break;
5558 	case 0xfd: /* std */
5559 		ctxt->eflags |= X86_EFLAGS_DF;
5560 		break;
5561 	default:
5562 		goto cannot_emulate;
5563 	}
5564 
5565 	if (rc != X86EMUL_CONTINUE)
5566 		goto done;
5567 
5568 writeback:
5569 	if (ctxt->d & SrcWrite) {
5570 		BUG_ON(ctxt->src.type == OP_MEM || ctxt->src.type == OP_MEM_STR);
5571 		rc = writeback(ctxt, &ctxt->src);
5572 		if (rc != X86EMUL_CONTINUE)
5573 			goto done;
5574 	}
5575 	if (!(ctxt->d & NoWrite)) {
5576 		rc = writeback(ctxt, &ctxt->dst);
5577 		if (rc != X86EMUL_CONTINUE)
5578 			goto done;
5579 	}
5580 
5581 	/*
5582 	 * restore dst type in case the decoding will be reused
5583 	 * (happens for string instruction )
5584 	 */
5585 	ctxt->dst.type = saved_dst_type;
5586 
5587 	if ((ctxt->d & SrcMask) == SrcSI)
5588 		string_addr_inc(ctxt, VCPU_REGS_RSI, &ctxt->src);
5589 
5590 	if ((ctxt->d & DstMask) == DstDI)
5591 		string_addr_inc(ctxt, VCPU_REGS_RDI, &ctxt->dst);
5592 
5593 	if (ctxt->rep_prefix && (ctxt->d & String)) {
5594 		unsigned int count;
5595 		struct read_cache *r = &ctxt->io_read;
5596 		if ((ctxt->d & SrcMask) == SrcSI)
5597 			count = ctxt->src.count;
5598 		else
5599 			count = ctxt->dst.count;
5600 		register_address_increment(ctxt, VCPU_REGS_RCX, -count);
5601 
5602 		if (!string_insn_completed(ctxt)) {
5603 			/*
5604 			 * Re-enter guest when pio read ahead buffer is empty
5605 			 * or, if it is not used, after each 1024 iteration.
5606 			 */
5607 			if ((r->end != 0 || reg_read(ctxt, VCPU_REGS_RCX) & 0x3ff) &&
5608 			    (r->end == 0 || r->end != r->pos)) {
5609 				/*
5610 				 * Reset read cache. Usually happens before
5611 				 * decode, but since instruction is restarted
5612 				 * we have to do it here.
5613 				 */
5614 				ctxt->mem_read.end = 0;
5615 				writeback_registers(ctxt);
5616 				return EMULATION_RESTART;
5617 			}
5618 			goto done; /* skip rip writeback */
5619 		}
5620 		ctxt->eflags &= ~X86_EFLAGS_RF;
5621 	}
5622 
5623 	ctxt->eip = ctxt->_eip;
5624 
5625 done:
5626 	if (rc == X86EMUL_PROPAGATE_FAULT) {
5627 		WARN_ON(ctxt->exception.vector > 0x1f);
5628 		ctxt->have_exception = true;
5629 	}
5630 	if (rc == X86EMUL_INTERCEPTED)
5631 		return EMULATION_INTERCEPTED;
5632 
5633 	if (rc == X86EMUL_CONTINUE)
5634 		writeback_registers(ctxt);
5635 
5636 	return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK;
5637 
5638 twobyte_insn:
5639 	switch (ctxt->b) {
5640 	case 0x09:		/* wbinvd */
5641 		(ctxt->ops->wbinvd)(ctxt);
5642 		break;
5643 	case 0x08:		/* invd */
5644 	case 0x0d:		/* GrpP (prefetch) */
5645 	case 0x18:		/* Grp16 (prefetch/nop) */
5646 	case 0x1f:		/* nop */
5647 		break;
5648 	case 0x20: /* mov cr, reg */
5649 		ctxt->dst.val = ops->get_cr(ctxt, ctxt->modrm_reg);
5650 		break;
5651 	case 0x21: /* mov from dr to reg */
5652 		ops->get_dr(ctxt, ctxt->modrm_reg, &ctxt->dst.val);
5653 		break;
5654 	case 0x40 ... 0x4f:	/* cmov */
5655 		if (test_cc(ctxt->b, ctxt->eflags))
5656 			ctxt->dst.val = ctxt->src.val;
5657 		else if (ctxt->op_bytes != 4)
5658 			ctxt->dst.type = OP_NONE; /* no writeback */
5659 		break;
5660 	case 0x80 ... 0x8f: /* jnz rel, etc*/
5661 		if (test_cc(ctxt->b, ctxt->eflags))
5662 			rc = jmp_rel(ctxt, ctxt->src.val);
5663 		break;
5664 	case 0x90 ... 0x9f:     /* setcc r/m8 */
5665 		ctxt->dst.val = test_cc(ctxt->b, ctxt->eflags);
5666 		break;
5667 	case 0xb6 ... 0xb7:	/* movzx */
5668 		ctxt->dst.bytes = ctxt->op_bytes;
5669 		ctxt->dst.val = (ctxt->src.bytes == 1) ? (u8) ctxt->src.val
5670 						       : (u16) ctxt->src.val;
5671 		break;
5672 	case 0xbe ... 0xbf:	/* movsx */
5673 		ctxt->dst.bytes = ctxt->op_bytes;
5674 		ctxt->dst.val = (ctxt->src.bytes == 1) ? (s8) ctxt->src.val :
5675 							(s16) ctxt->src.val;
5676 		break;
5677 	default:
5678 		goto cannot_emulate;
5679 	}
5680 
5681 threebyte_insn:
5682 
5683 	if (rc != X86EMUL_CONTINUE)
5684 		goto done;
5685 
5686 	goto writeback;
5687 
5688 cannot_emulate:
5689 	return EMULATION_FAILED;
5690 }
5691 
5692 void emulator_invalidate_register_cache(struct x86_emulate_ctxt *ctxt)
5693 {
5694 	invalidate_registers(ctxt);
5695 }
5696 
5697 void emulator_writeback_register_cache(struct x86_emulate_ctxt *ctxt)
5698 {
5699 	writeback_registers(ctxt);
5700 }
5701 
5702 bool emulator_can_use_gpa(struct x86_emulate_ctxt *ctxt)
5703 {
5704 	if (ctxt->rep_prefix && (ctxt->d & String))
5705 		return false;
5706 
5707 	if (ctxt->d & TwoMemOp)
5708 		return false;
5709 
5710 	return true;
5711 }
5712