xref: /openbmc/linux/arch/x86/kvm/emulate.c (revision e8f6f3b4)
1 /******************************************************************************
2  * emulate.c
3  *
4  * Generic x86 (32-bit and 64-bit) instruction decoder and emulator.
5  *
6  * Copyright (c) 2005 Keir Fraser
7  *
8  * Linux coding style, mod r/m decoder, segment base fixes, real-mode
9  * privileged instructions:
10  *
11  * Copyright (C) 2006 Qumranet
12  * Copyright 2010 Red Hat, Inc. and/or its affiliates.
13  *
14  *   Avi Kivity <avi@qumranet.com>
15  *   Yaniv Kamay <yaniv@qumranet.com>
16  *
17  * This work is licensed under the terms of the GNU GPL, version 2.  See
18  * the COPYING file in the top-level directory.
19  *
20  * From: xen-unstable 10676:af9809f51f81a3c43f276f00c81a52ef558afda4
21  */
22 
23 #include <linux/kvm_host.h>
24 #include "kvm_cache_regs.h"
25 #include <linux/module.h>
26 #include <asm/kvm_emulate.h>
27 #include <linux/stringify.h>
28 
29 #include "x86.h"
30 #include "tss.h"
31 
32 /*
33  * Operand types
34  */
35 #define OpNone             0ull
36 #define OpImplicit         1ull  /* No generic decode */
37 #define OpReg              2ull  /* Register */
38 #define OpMem              3ull  /* Memory */
39 #define OpAcc              4ull  /* Accumulator: AL/AX/EAX/RAX */
40 #define OpDI               5ull  /* ES:DI/EDI/RDI */
41 #define OpMem64            6ull  /* Memory, 64-bit */
42 #define OpImmUByte         7ull  /* Zero-extended 8-bit immediate */
43 #define OpDX               8ull  /* DX register */
44 #define OpCL               9ull  /* CL register (for shifts) */
45 #define OpImmByte         10ull  /* 8-bit sign extended immediate */
46 #define OpOne             11ull  /* Implied 1 */
47 #define OpImm             12ull  /* Sign extended up to 32-bit immediate */
48 #define OpMem16           13ull  /* Memory operand (16-bit). */
49 #define OpMem32           14ull  /* Memory operand (32-bit). */
50 #define OpImmU            15ull  /* Immediate operand, zero extended */
51 #define OpSI              16ull  /* SI/ESI/RSI */
52 #define OpImmFAddr        17ull  /* Immediate far address */
53 #define OpMemFAddr        18ull  /* Far address in memory */
54 #define OpImmU16          19ull  /* Immediate operand, 16 bits, zero extended */
55 #define OpES              20ull  /* ES */
56 #define OpCS              21ull  /* CS */
57 #define OpSS              22ull  /* SS */
58 #define OpDS              23ull  /* DS */
59 #define OpFS              24ull  /* FS */
60 #define OpGS              25ull  /* GS */
61 #define OpMem8            26ull  /* 8-bit zero extended memory operand */
62 #define OpImm64           27ull  /* Sign extended 16/32/64-bit immediate */
63 #define OpXLat            28ull  /* memory at BX/EBX/RBX + zero-extended AL */
64 #define OpAccLo           29ull  /* Low part of extended acc (AX/AX/EAX/RAX) */
65 #define OpAccHi           30ull  /* High part of extended acc (-/DX/EDX/RDX) */
66 
67 #define OpBits             5  /* Width of operand field */
68 #define OpMask             ((1ull << OpBits) - 1)
69 
70 /*
71  * Opcode effective-address decode tables.
72  * Note that we only emulate instructions that have at least one memory
73  * operand (excluding implicit stack references). We assume that stack
74  * references and instruction fetches will never occur in special memory
75  * areas that require emulation. So, for example, 'mov <imm>,<reg>' need
76  * not be handled.
77  */
78 
79 /* Operand sizes: 8-bit operands or specified/overridden size. */
80 #define ByteOp      (1<<0)	/* 8-bit operands. */
81 /* Destination operand type. */
82 #define DstShift    1
83 #define ImplicitOps (OpImplicit << DstShift)
84 #define DstReg      (OpReg << DstShift)
85 #define DstMem      (OpMem << DstShift)
86 #define DstAcc      (OpAcc << DstShift)
87 #define DstDI       (OpDI << DstShift)
88 #define DstMem64    (OpMem64 << DstShift)
89 #define DstImmUByte (OpImmUByte << DstShift)
90 #define DstDX       (OpDX << DstShift)
91 #define DstAccLo    (OpAccLo << DstShift)
92 #define DstMask     (OpMask << DstShift)
93 /* Source operand type. */
94 #define SrcShift    6
95 #define SrcNone     (OpNone << SrcShift)
96 #define SrcReg      (OpReg << SrcShift)
97 #define SrcMem      (OpMem << SrcShift)
98 #define SrcMem16    (OpMem16 << SrcShift)
99 #define SrcMem32    (OpMem32 << SrcShift)
100 #define SrcImm      (OpImm << SrcShift)
101 #define SrcImmByte  (OpImmByte << SrcShift)
102 #define SrcOne      (OpOne << SrcShift)
103 #define SrcImmUByte (OpImmUByte << SrcShift)
104 #define SrcImmU     (OpImmU << SrcShift)
105 #define SrcSI       (OpSI << SrcShift)
106 #define SrcXLat     (OpXLat << SrcShift)
107 #define SrcImmFAddr (OpImmFAddr << SrcShift)
108 #define SrcMemFAddr (OpMemFAddr << SrcShift)
109 #define SrcAcc      (OpAcc << SrcShift)
110 #define SrcImmU16   (OpImmU16 << SrcShift)
111 #define SrcImm64    (OpImm64 << SrcShift)
112 #define SrcDX       (OpDX << SrcShift)
113 #define SrcMem8     (OpMem8 << SrcShift)
114 #define SrcAccHi    (OpAccHi << SrcShift)
115 #define SrcMask     (OpMask << SrcShift)
116 #define BitOp       (1<<11)
117 #define MemAbs      (1<<12)      /* Memory operand is absolute displacement */
118 #define String      (1<<13)     /* String instruction (rep capable) */
119 #define Stack       (1<<14)     /* Stack instruction (push/pop) */
120 #define GroupMask   (7<<15)     /* Opcode uses one of the group mechanisms */
121 #define Group       (1<<15)     /* Bits 3:5 of modrm byte extend opcode */
122 #define GroupDual   (2<<15)     /* Alternate decoding of mod == 3 */
123 #define Prefix      (3<<15)     /* Instruction varies with 66/f2/f3 prefix */
124 #define RMExt       (4<<15)     /* Opcode extension in ModRM r/m if mod == 3 */
125 #define Escape      (5<<15)     /* Escape to coprocessor instruction */
126 #define InstrDual   (6<<15)     /* Alternate instruction decoding of mod == 3 */
127 #define Sse         (1<<18)     /* SSE Vector instruction */
128 /* Generic ModRM decode. */
129 #define ModRM       (1<<19)
130 /* Destination is only written; never read. */
131 #define Mov         (1<<20)
132 /* Misc flags */
133 #define Prot        (1<<21) /* instruction generates #UD if not in prot-mode */
134 #define EmulateOnUD (1<<22) /* Emulate if unsupported by the host */
135 #define NoAccess    (1<<23) /* Don't access memory (lea/invlpg/verr etc) */
136 #define Op3264      (1<<24) /* Operand is 64b in long mode, 32b otherwise */
137 #define Undefined   (1<<25) /* No Such Instruction */
138 #define Lock        (1<<26) /* lock prefix is allowed for the instruction */
139 #define Priv        (1<<27) /* instruction generates #GP if current CPL != 0 */
140 #define No64	    (1<<28)
141 #define PageTable   (1 << 29)   /* instruction used to write page table */
142 #define NotImpl     (1 << 30)   /* instruction is not implemented */
143 /* Source 2 operand type */
144 #define Src2Shift   (31)
145 #define Src2None    (OpNone << Src2Shift)
146 #define Src2Mem     (OpMem << Src2Shift)
147 #define Src2CL      (OpCL << Src2Shift)
148 #define Src2ImmByte (OpImmByte << Src2Shift)
149 #define Src2One     (OpOne << Src2Shift)
150 #define Src2Imm     (OpImm << Src2Shift)
151 #define Src2ES      (OpES << Src2Shift)
152 #define Src2CS      (OpCS << Src2Shift)
153 #define Src2SS      (OpSS << Src2Shift)
154 #define Src2DS      (OpDS << Src2Shift)
155 #define Src2FS      (OpFS << Src2Shift)
156 #define Src2GS      (OpGS << Src2Shift)
157 #define Src2Mask    (OpMask << Src2Shift)
158 #define Mmx         ((u64)1 << 40)  /* MMX Vector instruction */
159 #define Aligned     ((u64)1 << 41)  /* Explicitly aligned (e.g. MOVDQA) */
160 #define Unaligned   ((u64)1 << 42)  /* Explicitly unaligned (e.g. MOVDQU) */
161 #define Avx         ((u64)1 << 43)  /* Advanced Vector Extensions */
162 #define Fastop      ((u64)1 << 44)  /* Use opcode::u.fastop */
163 #define NoWrite     ((u64)1 << 45)  /* No writeback */
164 #define SrcWrite    ((u64)1 << 46)  /* Write back src operand */
165 #define NoMod	    ((u64)1 << 47)  /* Mod field is ignored */
166 #define Intercept   ((u64)1 << 48)  /* Has valid intercept field */
167 #define CheckPerm   ((u64)1 << 49)  /* Has valid check_perm field */
168 #define NoBigReal   ((u64)1 << 50)  /* No big real mode */
169 #define PrivUD      ((u64)1 << 51)  /* #UD instead of #GP on CPL > 0 */
170 #define NearBranch  ((u64)1 << 52)  /* Near branches */
171 #define No16	    ((u64)1 << 53)  /* No 16 bit operand */
172 
173 #define DstXacc     (DstAccLo | SrcAccHi | SrcWrite)
174 
175 #define X2(x...) x, x
176 #define X3(x...) X2(x), x
177 #define X4(x...) X2(x), X2(x)
178 #define X5(x...) X4(x), x
179 #define X6(x...) X4(x), X2(x)
180 #define X7(x...) X4(x), X3(x)
181 #define X8(x...) X4(x), X4(x)
182 #define X16(x...) X8(x), X8(x)
183 
184 #define NR_FASTOP (ilog2(sizeof(ulong)) + 1)
185 #define FASTOP_SIZE 8
186 
187 /*
188  * fastop functions have a special calling convention:
189  *
190  * dst:    rax        (in/out)
191  * src:    rdx        (in/out)
192  * src2:   rcx        (in)
193  * flags:  rflags     (in/out)
194  * ex:     rsi        (in:fastop pointer, out:zero if exception)
195  *
196  * Moreover, they are all exactly FASTOP_SIZE bytes long, so functions for
197  * different operand sizes can be reached by calculation, rather than a jump
198  * table (which would be bigger than the code).
199  *
200  * fastop functions are declared as taking a never-defined fastop parameter,
201  * so they can't be called from C directly.
202  */
203 
204 struct fastop;
205 
206 struct opcode {
207 	u64 flags : 56;
208 	u64 intercept : 8;
209 	union {
210 		int (*execute)(struct x86_emulate_ctxt *ctxt);
211 		const struct opcode *group;
212 		const struct group_dual *gdual;
213 		const struct gprefix *gprefix;
214 		const struct escape *esc;
215 		const struct instr_dual *idual;
216 		void (*fastop)(struct fastop *fake);
217 	} u;
218 	int (*check_perm)(struct x86_emulate_ctxt *ctxt);
219 };
220 
221 struct group_dual {
222 	struct opcode mod012[8];
223 	struct opcode mod3[8];
224 };
225 
226 struct gprefix {
227 	struct opcode pfx_no;
228 	struct opcode pfx_66;
229 	struct opcode pfx_f2;
230 	struct opcode pfx_f3;
231 };
232 
233 struct escape {
234 	struct opcode op[8];
235 	struct opcode high[64];
236 };
237 
238 struct instr_dual {
239 	struct opcode mod012;
240 	struct opcode mod3;
241 };
242 
243 /* EFLAGS bit definitions. */
244 #define EFLG_ID (1<<21)
245 #define EFLG_VIP (1<<20)
246 #define EFLG_VIF (1<<19)
247 #define EFLG_AC (1<<18)
248 #define EFLG_VM (1<<17)
249 #define EFLG_RF (1<<16)
250 #define EFLG_IOPL (3<<12)
251 #define EFLG_NT (1<<14)
252 #define EFLG_OF (1<<11)
253 #define EFLG_DF (1<<10)
254 #define EFLG_IF (1<<9)
255 #define EFLG_TF (1<<8)
256 #define EFLG_SF (1<<7)
257 #define EFLG_ZF (1<<6)
258 #define EFLG_AF (1<<4)
259 #define EFLG_PF (1<<2)
260 #define EFLG_CF (1<<0)
261 
262 #define EFLG_RESERVED_ZEROS_MASK 0xffc0802a
263 #define EFLG_RESERVED_ONE_MASK 2
264 
265 static ulong reg_read(struct x86_emulate_ctxt *ctxt, unsigned nr)
266 {
267 	if (!(ctxt->regs_valid & (1 << nr))) {
268 		ctxt->regs_valid |= 1 << nr;
269 		ctxt->_regs[nr] = ctxt->ops->read_gpr(ctxt, nr);
270 	}
271 	return ctxt->_regs[nr];
272 }
273 
274 static ulong *reg_write(struct x86_emulate_ctxt *ctxt, unsigned nr)
275 {
276 	ctxt->regs_valid |= 1 << nr;
277 	ctxt->regs_dirty |= 1 << nr;
278 	return &ctxt->_regs[nr];
279 }
280 
281 static ulong *reg_rmw(struct x86_emulate_ctxt *ctxt, unsigned nr)
282 {
283 	reg_read(ctxt, nr);
284 	return reg_write(ctxt, nr);
285 }
286 
287 static void writeback_registers(struct x86_emulate_ctxt *ctxt)
288 {
289 	unsigned reg;
290 
291 	for_each_set_bit(reg, (ulong *)&ctxt->regs_dirty, 16)
292 		ctxt->ops->write_gpr(ctxt, reg, ctxt->_regs[reg]);
293 }
294 
295 static void invalidate_registers(struct x86_emulate_ctxt *ctxt)
296 {
297 	ctxt->regs_dirty = 0;
298 	ctxt->regs_valid = 0;
299 }
300 
301 /*
302  * These EFLAGS bits are restored from saved value during emulation, and
303  * any changes are written back to the saved value after emulation.
304  */
305 #define EFLAGS_MASK (EFLG_OF|EFLG_SF|EFLG_ZF|EFLG_AF|EFLG_PF|EFLG_CF)
306 
307 #ifdef CONFIG_X86_64
308 #define ON64(x) x
309 #else
310 #define ON64(x)
311 #endif
312 
313 static int fastop(struct x86_emulate_ctxt *ctxt, void (*fop)(struct fastop *));
314 
315 #define FOP_ALIGN ".align " __stringify(FASTOP_SIZE) " \n\t"
316 #define FOP_RET   "ret \n\t"
317 
318 #define FOP_START(op) \
319 	extern void em_##op(struct fastop *fake); \
320 	asm(".pushsection .text, \"ax\" \n\t" \
321 	    ".global em_" #op " \n\t" \
322             FOP_ALIGN \
323 	    "em_" #op ": \n\t"
324 
325 #define FOP_END \
326 	    ".popsection")
327 
328 #define FOPNOP() FOP_ALIGN FOP_RET
329 
330 #define FOP1E(op,  dst) \
331 	FOP_ALIGN "10: " #op " %" #dst " \n\t" FOP_RET
332 
333 #define FOP1EEX(op,  dst) \
334 	FOP1E(op, dst) _ASM_EXTABLE(10b, kvm_fastop_exception)
335 
336 #define FASTOP1(op) \
337 	FOP_START(op) \
338 	FOP1E(op##b, al) \
339 	FOP1E(op##w, ax) \
340 	FOP1E(op##l, eax) \
341 	ON64(FOP1E(op##q, rax))	\
342 	FOP_END
343 
344 /* 1-operand, using src2 (for MUL/DIV r/m) */
345 #define FASTOP1SRC2(op, name) \
346 	FOP_START(name) \
347 	FOP1E(op, cl) \
348 	FOP1E(op, cx) \
349 	FOP1E(op, ecx) \
350 	ON64(FOP1E(op, rcx)) \
351 	FOP_END
352 
353 /* 1-operand, using src2 (for MUL/DIV r/m), with exceptions */
354 #define FASTOP1SRC2EX(op, name) \
355 	FOP_START(name) \
356 	FOP1EEX(op, cl) \
357 	FOP1EEX(op, cx) \
358 	FOP1EEX(op, ecx) \
359 	ON64(FOP1EEX(op, rcx)) \
360 	FOP_END
361 
362 #define FOP2E(op,  dst, src)	   \
363 	FOP_ALIGN #op " %" #src ", %" #dst " \n\t" FOP_RET
364 
365 #define FASTOP2(op) \
366 	FOP_START(op) \
367 	FOP2E(op##b, al, dl) \
368 	FOP2E(op##w, ax, dx) \
369 	FOP2E(op##l, eax, edx) \
370 	ON64(FOP2E(op##q, rax, rdx)) \
371 	FOP_END
372 
373 /* 2 operand, word only */
374 #define FASTOP2W(op) \
375 	FOP_START(op) \
376 	FOPNOP() \
377 	FOP2E(op##w, ax, dx) \
378 	FOP2E(op##l, eax, edx) \
379 	ON64(FOP2E(op##q, rax, rdx)) \
380 	FOP_END
381 
382 /* 2 operand, src is CL */
383 #define FASTOP2CL(op) \
384 	FOP_START(op) \
385 	FOP2E(op##b, al, cl) \
386 	FOP2E(op##w, ax, cl) \
387 	FOP2E(op##l, eax, cl) \
388 	ON64(FOP2E(op##q, rax, cl)) \
389 	FOP_END
390 
391 /* 2 operand, src and dest are reversed */
392 #define FASTOP2R(op, name) \
393 	FOP_START(name) \
394 	FOP2E(op##b, dl, al) \
395 	FOP2E(op##w, dx, ax) \
396 	FOP2E(op##l, edx, eax) \
397 	ON64(FOP2E(op##q, rdx, rax)) \
398 	FOP_END
399 
400 #define FOP3E(op,  dst, src, src2) \
401 	FOP_ALIGN #op " %" #src2 ", %" #src ", %" #dst " \n\t" FOP_RET
402 
403 /* 3-operand, word-only, src2=cl */
404 #define FASTOP3WCL(op) \
405 	FOP_START(op) \
406 	FOPNOP() \
407 	FOP3E(op##w, ax, dx, cl) \
408 	FOP3E(op##l, eax, edx, cl) \
409 	ON64(FOP3E(op##q, rax, rdx, cl)) \
410 	FOP_END
411 
412 /* Special case for SETcc - 1 instruction per cc */
413 #define FOP_SETCC(op) ".align 4; " #op " %al; ret \n\t"
414 
415 asm(".global kvm_fastop_exception \n"
416     "kvm_fastop_exception: xor %esi, %esi; ret");
417 
418 FOP_START(setcc)
419 FOP_SETCC(seto)
420 FOP_SETCC(setno)
421 FOP_SETCC(setc)
422 FOP_SETCC(setnc)
423 FOP_SETCC(setz)
424 FOP_SETCC(setnz)
425 FOP_SETCC(setbe)
426 FOP_SETCC(setnbe)
427 FOP_SETCC(sets)
428 FOP_SETCC(setns)
429 FOP_SETCC(setp)
430 FOP_SETCC(setnp)
431 FOP_SETCC(setl)
432 FOP_SETCC(setnl)
433 FOP_SETCC(setle)
434 FOP_SETCC(setnle)
435 FOP_END;
436 
437 FOP_START(salc) "pushf; sbb %al, %al; popf \n\t" FOP_RET
438 FOP_END;
439 
440 static int emulator_check_intercept(struct x86_emulate_ctxt *ctxt,
441 				    enum x86_intercept intercept,
442 				    enum x86_intercept_stage stage)
443 {
444 	struct x86_instruction_info info = {
445 		.intercept  = intercept,
446 		.rep_prefix = ctxt->rep_prefix,
447 		.modrm_mod  = ctxt->modrm_mod,
448 		.modrm_reg  = ctxt->modrm_reg,
449 		.modrm_rm   = ctxt->modrm_rm,
450 		.src_val    = ctxt->src.val64,
451 		.dst_val    = ctxt->dst.val64,
452 		.src_bytes  = ctxt->src.bytes,
453 		.dst_bytes  = ctxt->dst.bytes,
454 		.ad_bytes   = ctxt->ad_bytes,
455 		.next_rip   = ctxt->eip,
456 	};
457 
458 	return ctxt->ops->intercept(ctxt, &info, stage);
459 }
460 
461 static void assign_masked(ulong *dest, ulong src, ulong mask)
462 {
463 	*dest = (*dest & ~mask) | (src & mask);
464 }
465 
466 static inline unsigned long ad_mask(struct x86_emulate_ctxt *ctxt)
467 {
468 	return (1UL << (ctxt->ad_bytes << 3)) - 1;
469 }
470 
471 static ulong stack_mask(struct x86_emulate_ctxt *ctxt)
472 {
473 	u16 sel;
474 	struct desc_struct ss;
475 
476 	if (ctxt->mode == X86EMUL_MODE_PROT64)
477 		return ~0UL;
478 	ctxt->ops->get_segment(ctxt, &sel, &ss, NULL, VCPU_SREG_SS);
479 	return ~0U >> ((ss.d ^ 1) * 16);  /* d=0: 0xffff; d=1: 0xffffffff */
480 }
481 
482 static int stack_size(struct x86_emulate_ctxt *ctxt)
483 {
484 	return (__fls(stack_mask(ctxt)) + 1) >> 3;
485 }
486 
487 /* Access/update address held in a register, based on addressing mode. */
488 static inline unsigned long
489 address_mask(struct x86_emulate_ctxt *ctxt, unsigned long reg)
490 {
491 	if (ctxt->ad_bytes == sizeof(unsigned long))
492 		return reg;
493 	else
494 		return reg & ad_mask(ctxt);
495 }
496 
497 static inline unsigned long
498 register_address(struct x86_emulate_ctxt *ctxt, int reg)
499 {
500 	return address_mask(ctxt, reg_read(ctxt, reg));
501 }
502 
503 static void masked_increment(ulong *reg, ulong mask, int inc)
504 {
505 	assign_masked(reg, *reg + inc, mask);
506 }
507 
508 static inline void
509 register_address_increment(struct x86_emulate_ctxt *ctxt, int reg, int inc)
510 {
511 	ulong mask;
512 
513 	if (ctxt->ad_bytes == sizeof(unsigned long))
514 		mask = ~0UL;
515 	else
516 		mask = ad_mask(ctxt);
517 	masked_increment(reg_rmw(ctxt, reg), mask, inc);
518 }
519 
520 static void rsp_increment(struct x86_emulate_ctxt *ctxt, int inc)
521 {
522 	masked_increment(reg_rmw(ctxt, VCPU_REGS_RSP), stack_mask(ctxt), inc);
523 }
524 
525 static u32 desc_limit_scaled(struct desc_struct *desc)
526 {
527 	u32 limit = get_desc_limit(desc);
528 
529 	return desc->g ? (limit << 12) | 0xfff : limit;
530 }
531 
532 static unsigned long seg_base(struct x86_emulate_ctxt *ctxt, int seg)
533 {
534 	if (ctxt->mode == X86EMUL_MODE_PROT64 && seg < VCPU_SREG_FS)
535 		return 0;
536 
537 	return ctxt->ops->get_cached_segment_base(ctxt, seg);
538 }
539 
540 static int emulate_exception(struct x86_emulate_ctxt *ctxt, int vec,
541 			     u32 error, bool valid)
542 {
543 	WARN_ON(vec > 0x1f);
544 	ctxt->exception.vector = vec;
545 	ctxt->exception.error_code = error;
546 	ctxt->exception.error_code_valid = valid;
547 	return X86EMUL_PROPAGATE_FAULT;
548 }
549 
550 static int emulate_db(struct x86_emulate_ctxt *ctxt)
551 {
552 	return emulate_exception(ctxt, DB_VECTOR, 0, false);
553 }
554 
555 static int emulate_gp(struct x86_emulate_ctxt *ctxt, int err)
556 {
557 	return emulate_exception(ctxt, GP_VECTOR, err, true);
558 }
559 
560 static int emulate_ss(struct x86_emulate_ctxt *ctxt, int err)
561 {
562 	return emulate_exception(ctxt, SS_VECTOR, err, true);
563 }
564 
565 static int emulate_ud(struct x86_emulate_ctxt *ctxt)
566 {
567 	return emulate_exception(ctxt, UD_VECTOR, 0, false);
568 }
569 
570 static int emulate_ts(struct x86_emulate_ctxt *ctxt, int err)
571 {
572 	return emulate_exception(ctxt, TS_VECTOR, err, true);
573 }
574 
575 static int emulate_de(struct x86_emulate_ctxt *ctxt)
576 {
577 	return emulate_exception(ctxt, DE_VECTOR, 0, false);
578 }
579 
580 static int emulate_nm(struct x86_emulate_ctxt *ctxt)
581 {
582 	return emulate_exception(ctxt, NM_VECTOR, 0, false);
583 }
584 
585 static u16 get_segment_selector(struct x86_emulate_ctxt *ctxt, unsigned seg)
586 {
587 	u16 selector;
588 	struct desc_struct desc;
589 
590 	ctxt->ops->get_segment(ctxt, &selector, &desc, NULL, seg);
591 	return selector;
592 }
593 
594 static void set_segment_selector(struct x86_emulate_ctxt *ctxt, u16 selector,
595 				 unsigned seg)
596 {
597 	u16 dummy;
598 	u32 base3;
599 	struct desc_struct desc;
600 
601 	ctxt->ops->get_segment(ctxt, &dummy, &desc, &base3, seg);
602 	ctxt->ops->set_segment(ctxt, selector, &desc, base3, seg);
603 }
604 
605 /*
606  * x86 defines three classes of vector instructions: explicitly
607  * aligned, explicitly unaligned, and the rest, which change behaviour
608  * depending on whether they're AVX encoded or not.
609  *
610  * Also included is CMPXCHG16B which is not a vector instruction, yet it is
611  * subject to the same check.
612  */
613 static bool insn_aligned(struct x86_emulate_ctxt *ctxt, unsigned size)
614 {
615 	if (likely(size < 16))
616 		return false;
617 
618 	if (ctxt->d & Aligned)
619 		return true;
620 	else if (ctxt->d & Unaligned)
621 		return false;
622 	else if (ctxt->d & Avx)
623 		return false;
624 	else
625 		return true;
626 }
627 
628 static __always_inline int __linearize(struct x86_emulate_ctxt *ctxt,
629 				       struct segmented_address addr,
630 				       unsigned *max_size, unsigned size,
631 				       bool write, bool fetch,
632 				       enum x86emul_mode mode, ulong *linear)
633 {
634 	struct desc_struct desc;
635 	bool usable;
636 	ulong la;
637 	u32 lim;
638 	u16 sel;
639 
640 	la = seg_base(ctxt, addr.seg) + addr.ea;
641 	*max_size = 0;
642 	switch (mode) {
643 	case X86EMUL_MODE_PROT64:
644 		if (is_noncanonical_address(la))
645 			goto bad;
646 
647 		*max_size = min_t(u64, ~0u, (1ull << 48) - la);
648 		if (size > *max_size)
649 			goto bad;
650 		break;
651 	default:
652 		usable = ctxt->ops->get_segment(ctxt, &sel, &desc, NULL,
653 						addr.seg);
654 		if (!usable)
655 			goto bad;
656 		/* code segment in protected mode or read-only data segment */
657 		if ((((ctxt->mode != X86EMUL_MODE_REAL) && (desc.type & 8))
658 					|| !(desc.type & 2)) && write)
659 			goto bad;
660 		/* unreadable code segment */
661 		if (!fetch && (desc.type & 8) && !(desc.type & 2))
662 			goto bad;
663 		lim = desc_limit_scaled(&desc);
664 		if (!(desc.type & 8) && (desc.type & 4)) {
665 			/* expand-down segment */
666 			if (addr.ea <= lim)
667 				goto bad;
668 			lim = desc.d ? 0xffffffff : 0xffff;
669 		}
670 		if (addr.ea > lim)
671 			goto bad;
672 		*max_size = min_t(u64, ~0u, (u64)lim + 1 - addr.ea);
673 		if (size > *max_size)
674 			goto bad;
675 		la &= (u32)-1;
676 		break;
677 	}
678 	if (insn_aligned(ctxt, size) && ((la & (size - 1)) != 0))
679 		return emulate_gp(ctxt, 0);
680 	*linear = la;
681 	return X86EMUL_CONTINUE;
682 bad:
683 	if (addr.seg == VCPU_SREG_SS)
684 		return emulate_ss(ctxt, 0);
685 	else
686 		return emulate_gp(ctxt, 0);
687 }
688 
689 static int linearize(struct x86_emulate_ctxt *ctxt,
690 		     struct segmented_address addr,
691 		     unsigned size, bool write,
692 		     ulong *linear)
693 {
694 	unsigned max_size;
695 	return __linearize(ctxt, addr, &max_size, size, write, false,
696 			   ctxt->mode, linear);
697 }
698 
699 static inline int assign_eip(struct x86_emulate_ctxt *ctxt, ulong dst,
700 			     enum x86emul_mode mode)
701 {
702 	ulong linear;
703 	int rc;
704 	unsigned max_size;
705 	struct segmented_address addr = { .seg = VCPU_SREG_CS,
706 					   .ea = dst };
707 
708 	if (ctxt->op_bytes != sizeof(unsigned long))
709 		addr.ea = dst & ((1UL << (ctxt->op_bytes << 3)) - 1);
710 	rc = __linearize(ctxt, addr, &max_size, 1, false, true, mode, &linear);
711 	if (rc == X86EMUL_CONTINUE)
712 		ctxt->_eip = addr.ea;
713 	return rc;
714 }
715 
716 static inline int assign_eip_near(struct x86_emulate_ctxt *ctxt, ulong dst)
717 {
718 	return assign_eip(ctxt, dst, ctxt->mode);
719 }
720 
721 static int assign_eip_far(struct x86_emulate_ctxt *ctxt, ulong dst,
722 			  const struct desc_struct *cs_desc)
723 {
724 	enum x86emul_mode mode = ctxt->mode;
725 
726 #ifdef CONFIG_X86_64
727 	if (ctxt->mode >= X86EMUL_MODE_PROT32 && cs_desc->l) {
728 		u64 efer = 0;
729 
730 		ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
731 		if (efer & EFER_LMA)
732 			mode = X86EMUL_MODE_PROT64;
733 	}
734 #endif
735 	if (mode == X86EMUL_MODE_PROT16 || mode == X86EMUL_MODE_PROT32)
736 		mode = cs_desc->d ? X86EMUL_MODE_PROT32 : X86EMUL_MODE_PROT16;
737 	return assign_eip(ctxt, dst, mode);
738 }
739 
740 static inline int jmp_rel(struct x86_emulate_ctxt *ctxt, int rel)
741 {
742 	return assign_eip_near(ctxt, ctxt->_eip + rel);
743 }
744 
745 static int segmented_read_std(struct x86_emulate_ctxt *ctxt,
746 			      struct segmented_address addr,
747 			      void *data,
748 			      unsigned size)
749 {
750 	int rc;
751 	ulong linear;
752 
753 	rc = linearize(ctxt, addr, size, false, &linear);
754 	if (rc != X86EMUL_CONTINUE)
755 		return rc;
756 	return ctxt->ops->read_std(ctxt, linear, data, size, &ctxt->exception);
757 }
758 
759 /*
760  * Prefetch the remaining bytes of the instruction without crossing page
761  * boundary if they are not in fetch_cache yet.
762  */
763 static int __do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt, int op_size)
764 {
765 	int rc;
766 	unsigned size, max_size;
767 	unsigned long linear;
768 	int cur_size = ctxt->fetch.end - ctxt->fetch.data;
769 	struct segmented_address addr = { .seg = VCPU_SREG_CS,
770 					   .ea = ctxt->eip + cur_size };
771 
772 	/*
773 	 * We do not know exactly how many bytes will be needed, and
774 	 * __linearize is expensive, so fetch as much as possible.  We
775 	 * just have to avoid going beyond the 15 byte limit, the end
776 	 * of the segment, or the end of the page.
777 	 *
778 	 * __linearize is called with size 0 so that it does not do any
779 	 * boundary check itself.  Instead, we use max_size to check
780 	 * against op_size.
781 	 */
782 	rc = __linearize(ctxt, addr, &max_size, 0, false, true, ctxt->mode,
783 			 &linear);
784 	if (unlikely(rc != X86EMUL_CONTINUE))
785 		return rc;
786 
787 	size = min_t(unsigned, 15UL ^ cur_size, max_size);
788 	size = min_t(unsigned, size, PAGE_SIZE - offset_in_page(linear));
789 
790 	/*
791 	 * One instruction can only straddle two pages,
792 	 * and one has been loaded at the beginning of
793 	 * x86_decode_insn.  So, if not enough bytes
794 	 * still, we must have hit the 15-byte boundary.
795 	 */
796 	if (unlikely(size < op_size))
797 		return emulate_gp(ctxt, 0);
798 
799 	rc = ctxt->ops->fetch(ctxt, linear, ctxt->fetch.end,
800 			      size, &ctxt->exception);
801 	if (unlikely(rc != X86EMUL_CONTINUE))
802 		return rc;
803 	ctxt->fetch.end += size;
804 	return X86EMUL_CONTINUE;
805 }
806 
807 static __always_inline int do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt,
808 					       unsigned size)
809 {
810 	unsigned done_size = ctxt->fetch.end - ctxt->fetch.ptr;
811 
812 	if (unlikely(done_size < size))
813 		return __do_insn_fetch_bytes(ctxt, size - done_size);
814 	else
815 		return X86EMUL_CONTINUE;
816 }
817 
818 /* Fetch next part of the instruction being emulated. */
819 #define insn_fetch(_type, _ctxt)					\
820 ({	_type _x;							\
821 									\
822 	rc = do_insn_fetch_bytes(_ctxt, sizeof(_type));			\
823 	if (rc != X86EMUL_CONTINUE)					\
824 		goto done;						\
825 	ctxt->_eip += sizeof(_type);					\
826 	_x = *(_type __aligned(1) *) ctxt->fetch.ptr;			\
827 	ctxt->fetch.ptr += sizeof(_type);				\
828 	_x;								\
829 })
830 
831 #define insn_fetch_arr(_arr, _size, _ctxt)				\
832 ({									\
833 	rc = do_insn_fetch_bytes(_ctxt, _size);				\
834 	if (rc != X86EMUL_CONTINUE)					\
835 		goto done;						\
836 	ctxt->_eip += (_size);						\
837 	memcpy(_arr, ctxt->fetch.ptr, _size);				\
838 	ctxt->fetch.ptr += (_size);					\
839 })
840 
841 /*
842  * Given the 'reg' portion of a ModRM byte, and a register block, return a
843  * pointer into the block that addresses the relevant register.
844  * @highbyte_regs specifies whether to decode AH,CH,DH,BH.
845  */
846 static void *decode_register(struct x86_emulate_ctxt *ctxt, u8 modrm_reg,
847 			     int byteop)
848 {
849 	void *p;
850 	int highbyte_regs = (ctxt->rex_prefix == 0) && byteop;
851 
852 	if (highbyte_regs && modrm_reg >= 4 && modrm_reg < 8)
853 		p = (unsigned char *)reg_rmw(ctxt, modrm_reg & 3) + 1;
854 	else
855 		p = reg_rmw(ctxt, modrm_reg);
856 	return p;
857 }
858 
859 static int read_descriptor(struct x86_emulate_ctxt *ctxt,
860 			   struct segmented_address addr,
861 			   u16 *size, unsigned long *address, int op_bytes)
862 {
863 	int rc;
864 
865 	if (op_bytes == 2)
866 		op_bytes = 3;
867 	*address = 0;
868 	rc = segmented_read_std(ctxt, addr, size, 2);
869 	if (rc != X86EMUL_CONTINUE)
870 		return rc;
871 	addr.ea += 2;
872 	rc = segmented_read_std(ctxt, addr, address, op_bytes);
873 	return rc;
874 }
875 
876 FASTOP2(add);
877 FASTOP2(or);
878 FASTOP2(adc);
879 FASTOP2(sbb);
880 FASTOP2(and);
881 FASTOP2(sub);
882 FASTOP2(xor);
883 FASTOP2(cmp);
884 FASTOP2(test);
885 
886 FASTOP1SRC2(mul, mul_ex);
887 FASTOP1SRC2(imul, imul_ex);
888 FASTOP1SRC2EX(div, div_ex);
889 FASTOP1SRC2EX(idiv, idiv_ex);
890 
891 FASTOP3WCL(shld);
892 FASTOP3WCL(shrd);
893 
894 FASTOP2W(imul);
895 
896 FASTOP1(not);
897 FASTOP1(neg);
898 FASTOP1(inc);
899 FASTOP1(dec);
900 
901 FASTOP2CL(rol);
902 FASTOP2CL(ror);
903 FASTOP2CL(rcl);
904 FASTOP2CL(rcr);
905 FASTOP2CL(shl);
906 FASTOP2CL(shr);
907 FASTOP2CL(sar);
908 
909 FASTOP2W(bsf);
910 FASTOP2W(bsr);
911 FASTOP2W(bt);
912 FASTOP2W(bts);
913 FASTOP2W(btr);
914 FASTOP2W(btc);
915 
916 FASTOP2(xadd);
917 
918 FASTOP2R(cmp, cmp_r);
919 
920 static u8 test_cc(unsigned int condition, unsigned long flags)
921 {
922 	u8 rc;
923 	void (*fop)(void) = (void *)em_setcc + 4 * (condition & 0xf);
924 
925 	flags = (flags & EFLAGS_MASK) | X86_EFLAGS_IF;
926 	asm("push %[flags]; popf; call *%[fastop]"
927 	    : "=a"(rc) : [fastop]"r"(fop), [flags]"r"(flags));
928 	return rc;
929 }
930 
931 static void fetch_register_operand(struct operand *op)
932 {
933 	switch (op->bytes) {
934 	case 1:
935 		op->val = *(u8 *)op->addr.reg;
936 		break;
937 	case 2:
938 		op->val = *(u16 *)op->addr.reg;
939 		break;
940 	case 4:
941 		op->val = *(u32 *)op->addr.reg;
942 		break;
943 	case 8:
944 		op->val = *(u64 *)op->addr.reg;
945 		break;
946 	}
947 }
948 
949 static void read_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data, int reg)
950 {
951 	ctxt->ops->get_fpu(ctxt);
952 	switch (reg) {
953 	case 0: asm("movdqa %%xmm0, %0" : "=m"(*data)); break;
954 	case 1: asm("movdqa %%xmm1, %0" : "=m"(*data)); break;
955 	case 2: asm("movdqa %%xmm2, %0" : "=m"(*data)); break;
956 	case 3: asm("movdqa %%xmm3, %0" : "=m"(*data)); break;
957 	case 4: asm("movdqa %%xmm4, %0" : "=m"(*data)); break;
958 	case 5: asm("movdqa %%xmm5, %0" : "=m"(*data)); break;
959 	case 6: asm("movdqa %%xmm6, %0" : "=m"(*data)); break;
960 	case 7: asm("movdqa %%xmm7, %0" : "=m"(*data)); break;
961 #ifdef CONFIG_X86_64
962 	case 8: asm("movdqa %%xmm8, %0" : "=m"(*data)); break;
963 	case 9: asm("movdqa %%xmm9, %0" : "=m"(*data)); break;
964 	case 10: asm("movdqa %%xmm10, %0" : "=m"(*data)); break;
965 	case 11: asm("movdqa %%xmm11, %0" : "=m"(*data)); break;
966 	case 12: asm("movdqa %%xmm12, %0" : "=m"(*data)); break;
967 	case 13: asm("movdqa %%xmm13, %0" : "=m"(*data)); break;
968 	case 14: asm("movdqa %%xmm14, %0" : "=m"(*data)); break;
969 	case 15: asm("movdqa %%xmm15, %0" : "=m"(*data)); break;
970 #endif
971 	default: BUG();
972 	}
973 	ctxt->ops->put_fpu(ctxt);
974 }
975 
976 static void write_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data,
977 			  int reg)
978 {
979 	ctxt->ops->get_fpu(ctxt);
980 	switch (reg) {
981 	case 0: asm("movdqa %0, %%xmm0" : : "m"(*data)); break;
982 	case 1: asm("movdqa %0, %%xmm1" : : "m"(*data)); break;
983 	case 2: asm("movdqa %0, %%xmm2" : : "m"(*data)); break;
984 	case 3: asm("movdqa %0, %%xmm3" : : "m"(*data)); break;
985 	case 4: asm("movdqa %0, %%xmm4" : : "m"(*data)); break;
986 	case 5: asm("movdqa %0, %%xmm5" : : "m"(*data)); break;
987 	case 6: asm("movdqa %0, %%xmm6" : : "m"(*data)); break;
988 	case 7: asm("movdqa %0, %%xmm7" : : "m"(*data)); break;
989 #ifdef CONFIG_X86_64
990 	case 8: asm("movdqa %0, %%xmm8" : : "m"(*data)); break;
991 	case 9: asm("movdqa %0, %%xmm9" : : "m"(*data)); break;
992 	case 10: asm("movdqa %0, %%xmm10" : : "m"(*data)); break;
993 	case 11: asm("movdqa %0, %%xmm11" : : "m"(*data)); break;
994 	case 12: asm("movdqa %0, %%xmm12" : : "m"(*data)); break;
995 	case 13: asm("movdqa %0, %%xmm13" : : "m"(*data)); break;
996 	case 14: asm("movdqa %0, %%xmm14" : : "m"(*data)); break;
997 	case 15: asm("movdqa %0, %%xmm15" : : "m"(*data)); break;
998 #endif
999 	default: BUG();
1000 	}
1001 	ctxt->ops->put_fpu(ctxt);
1002 }
1003 
1004 static void read_mmx_reg(struct x86_emulate_ctxt *ctxt, u64 *data, int reg)
1005 {
1006 	ctxt->ops->get_fpu(ctxt);
1007 	switch (reg) {
1008 	case 0: asm("movq %%mm0, %0" : "=m"(*data)); break;
1009 	case 1: asm("movq %%mm1, %0" : "=m"(*data)); break;
1010 	case 2: asm("movq %%mm2, %0" : "=m"(*data)); break;
1011 	case 3: asm("movq %%mm3, %0" : "=m"(*data)); break;
1012 	case 4: asm("movq %%mm4, %0" : "=m"(*data)); break;
1013 	case 5: asm("movq %%mm5, %0" : "=m"(*data)); break;
1014 	case 6: asm("movq %%mm6, %0" : "=m"(*data)); break;
1015 	case 7: asm("movq %%mm7, %0" : "=m"(*data)); break;
1016 	default: BUG();
1017 	}
1018 	ctxt->ops->put_fpu(ctxt);
1019 }
1020 
1021 static void write_mmx_reg(struct x86_emulate_ctxt *ctxt, u64 *data, int reg)
1022 {
1023 	ctxt->ops->get_fpu(ctxt);
1024 	switch (reg) {
1025 	case 0: asm("movq %0, %%mm0" : : "m"(*data)); break;
1026 	case 1: asm("movq %0, %%mm1" : : "m"(*data)); break;
1027 	case 2: asm("movq %0, %%mm2" : : "m"(*data)); break;
1028 	case 3: asm("movq %0, %%mm3" : : "m"(*data)); break;
1029 	case 4: asm("movq %0, %%mm4" : : "m"(*data)); break;
1030 	case 5: asm("movq %0, %%mm5" : : "m"(*data)); break;
1031 	case 6: asm("movq %0, %%mm6" : : "m"(*data)); break;
1032 	case 7: asm("movq %0, %%mm7" : : "m"(*data)); break;
1033 	default: BUG();
1034 	}
1035 	ctxt->ops->put_fpu(ctxt);
1036 }
1037 
1038 static int em_fninit(struct x86_emulate_ctxt *ctxt)
1039 {
1040 	if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
1041 		return emulate_nm(ctxt);
1042 
1043 	ctxt->ops->get_fpu(ctxt);
1044 	asm volatile("fninit");
1045 	ctxt->ops->put_fpu(ctxt);
1046 	return X86EMUL_CONTINUE;
1047 }
1048 
1049 static int em_fnstcw(struct x86_emulate_ctxt *ctxt)
1050 {
1051 	u16 fcw;
1052 
1053 	if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
1054 		return emulate_nm(ctxt);
1055 
1056 	ctxt->ops->get_fpu(ctxt);
1057 	asm volatile("fnstcw %0": "+m"(fcw));
1058 	ctxt->ops->put_fpu(ctxt);
1059 
1060 	/* force 2 byte destination */
1061 	ctxt->dst.bytes = 2;
1062 	ctxt->dst.val = fcw;
1063 
1064 	return X86EMUL_CONTINUE;
1065 }
1066 
1067 static int em_fnstsw(struct x86_emulate_ctxt *ctxt)
1068 {
1069 	u16 fsw;
1070 
1071 	if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
1072 		return emulate_nm(ctxt);
1073 
1074 	ctxt->ops->get_fpu(ctxt);
1075 	asm volatile("fnstsw %0": "+m"(fsw));
1076 	ctxt->ops->put_fpu(ctxt);
1077 
1078 	/* force 2 byte destination */
1079 	ctxt->dst.bytes = 2;
1080 	ctxt->dst.val = fsw;
1081 
1082 	return X86EMUL_CONTINUE;
1083 }
1084 
1085 static void decode_register_operand(struct x86_emulate_ctxt *ctxt,
1086 				    struct operand *op)
1087 {
1088 	unsigned reg = ctxt->modrm_reg;
1089 
1090 	if (!(ctxt->d & ModRM))
1091 		reg = (ctxt->b & 7) | ((ctxt->rex_prefix & 1) << 3);
1092 
1093 	if (ctxt->d & Sse) {
1094 		op->type = OP_XMM;
1095 		op->bytes = 16;
1096 		op->addr.xmm = reg;
1097 		read_sse_reg(ctxt, &op->vec_val, reg);
1098 		return;
1099 	}
1100 	if (ctxt->d & Mmx) {
1101 		reg &= 7;
1102 		op->type = OP_MM;
1103 		op->bytes = 8;
1104 		op->addr.mm = reg;
1105 		return;
1106 	}
1107 
1108 	op->type = OP_REG;
1109 	op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
1110 	op->addr.reg = decode_register(ctxt, reg, ctxt->d & ByteOp);
1111 
1112 	fetch_register_operand(op);
1113 	op->orig_val = op->val;
1114 }
1115 
1116 static void adjust_modrm_seg(struct x86_emulate_ctxt *ctxt, int base_reg)
1117 {
1118 	if (base_reg == VCPU_REGS_RSP || base_reg == VCPU_REGS_RBP)
1119 		ctxt->modrm_seg = VCPU_SREG_SS;
1120 }
1121 
1122 static int decode_modrm(struct x86_emulate_ctxt *ctxt,
1123 			struct operand *op)
1124 {
1125 	u8 sib;
1126 	int index_reg, base_reg, scale;
1127 	int rc = X86EMUL_CONTINUE;
1128 	ulong modrm_ea = 0;
1129 
1130 	ctxt->modrm_reg = ((ctxt->rex_prefix << 1) & 8); /* REX.R */
1131 	index_reg = (ctxt->rex_prefix << 2) & 8; /* REX.X */
1132 	base_reg = (ctxt->rex_prefix << 3) & 8; /* REX.B */
1133 
1134 	ctxt->modrm_mod = (ctxt->modrm & 0xc0) >> 6;
1135 	ctxt->modrm_reg |= (ctxt->modrm & 0x38) >> 3;
1136 	ctxt->modrm_rm = base_reg | (ctxt->modrm & 0x07);
1137 	ctxt->modrm_seg = VCPU_SREG_DS;
1138 
1139 	if (ctxt->modrm_mod == 3 || (ctxt->d & NoMod)) {
1140 		op->type = OP_REG;
1141 		op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
1142 		op->addr.reg = decode_register(ctxt, ctxt->modrm_rm,
1143 				ctxt->d & ByteOp);
1144 		if (ctxt->d & Sse) {
1145 			op->type = OP_XMM;
1146 			op->bytes = 16;
1147 			op->addr.xmm = ctxt->modrm_rm;
1148 			read_sse_reg(ctxt, &op->vec_val, ctxt->modrm_rm);
1149 			return rc;
1150 		}
1151 		if (ctxt->d & Mmx) {
1152 			op->type = OP_MM;
1153 			op->bytes = 8;
1154 			op->addr.mm = ctxt->modrm_rm & 7;
1155 			return rc;
1156 		}
1157 		fetch_register_operand(op);
1158 		return rc;
1159 	}
1160 
1161 	op->type = OP_MEM;
1162 
1163 	if (ctxt->ad_bytes == 2) {
1164 		unsigned bx = reg_read(ctxt, VCPU_REGS_RBX);
1165 		unsigned bp = reg_read(ctxt, VCPU_REGS_RBP);
1166 		unsigned si = reg_read(ctxt, VCPU_REGS_RSI);
1167 		unsigned di = reg_read(ctxt, VCPU_REGS_RDI);
1168 
1169 		/* 16-bit ModR/M decode. */
1170 		switch (ctxt->modrm_mod) {
1171 		case 0:
1172 			if (ctxt->modrm_rm == 6)
1173 				modrm_ea += insn_fetch(u16, ctxt);
1174 			break;
1175 		case 1:
1176 			modrm_ea += insn_fetch(s8, ctxt);
1177 			break;
1178 		case 2:
1179 			modrm_ea += insn_fetch(u16, ctxt);
1180 			break;
1181 		}
1182 		switch (ctxt->modrm_rm) {
1183 		case 0:
1184 			modrm_ea += bx + si;
1185 			break;
1186 		case 1:
1187 			modrm_ea += bx + di;
1188 			break;
1189 		case 2:
1190 			modrm_ea += bp + si;
1191 			break;
1192 		case 3:
1193 			modrm_ea += bp + di;
1194 			break;
1195 		case 4:
1196 			modrm_ea += si;
1197 			break;
1198 		case 5:
1199 			modrm_ea += di;
1200 			break;
1201 		case 6:
1202 			if (ctxt->modrm_mod != 0)
1203 				modrm_ea += bp;
1204 			break;
1205 		case 7:
1206 			modrm_ea += bx;
1207 			break;
1208 		}
1209 		if (ctxt->modrm_rm == 2 || ctxt->modrm_rm == 3 ||
1210 		    (ctxt->modrm_rm == 6 && ctxt->modrm_mod != 0))
1211 			ctxt->modrm_seg = VCPU_SREG_SS;
1212 		modrm_ea = (u16)modrm_ea;
1213 	} else {
1214 		/* 32/64-bit ModR/M decode. */
1215 		if ((ctxt->modrm_rm & 7) == 4) {
1216 			sib = insn_fetch(u8, ctxt);
1217 			index_reg |= (sib >> 3) & 7;
1218 			base_reg |= sib & 7;
1219 			scale = sib >> 6;
1220 
1221 			if ((base_reg & 7) == 5 && ctxt->modrm_mod == 0)
1222 				modrm_ea += insn_fetch(s32, ctxt);
1223 			else {
1224 				modrm_ea += reg_read(ctxt, base_reg);
1225 				adjust_modrm_seg(ctxt, base_reg);
1226 			}
1227 			if (index_reg != 4)
1228 				modrm_ea += reg_read(ctxt, index_reg) << scale;
1229 		} else if ((ctxt->modrm_rm & 7) == 5 && ctxt->modrm_mod == 0) {
1230 			modrm_ea += insn_fetch(s32, ctxt);
1231 			if (ctxt->mode == X86EMUL_MODE_PROT64)
1232 				ctxt->rip_relative = 1;
1233 		} else {
1234 			base_reg = ctxt->modrm_rm;
1235 			modrm_ea += reg_read(ctxt, base_reg);
1236 			adjust_modrm_seg(ctxt, base_reg);
1237 		}
1238 		switch (ctxt->modrm_mod) {
1239 		case 1:
1240 			modrm_ea += insn_fetch(s8, ctxt);
1241 			break;
1242 		case 2:
1243 			modrm_ea += insn_fetch(s32, ctxt);
1244 			break;
1245 		}
1246 	}
1247 	op->addr.mem.ea = modrm_ea;
1248 	if (ctxt->ad_bytes != 8)
1249 		ctxt->memop.addr.mem.ea = (u32)ctxt->memop.addr.mem.ea;
1250 
1251 done:
1252 	return rc;
1253 }
1254 
1255 static int decode_abs(struct x86_emulate_ctxt *ctxt,
1256 		      struct operand *op)
1257 {
1258 	int rc = X86EMUL_CONTINUE;
1259 
1260 	op->type = OP_MEM;
1261 	switch (ctxt->ad_bytes) {
1262 	case 2:
1263 		op->addr.mem.ea = insn_fetch(u16, ctxt);
1264 		break;
1265 	case 4:
1266 		op->addr.mem.ea = insn_fetch(u32, ctxt);
1267 		break;
1268 	case 8:
1269 		op->addr.mem.ea = insn_fetch(u64, ctxt);
1270 		break;
1271 	}
1272 done:
1273 	return rc;
1274 }
1275 
1276 static void fetch_bit_operand(struct x86_emulate_ctxt *ctxt)
1277 {
1278 	long sv = 0, mask;
1279 
1280 	if (ctxt->dst.type == OP_MEM && ctxt->src.type == OP_REG) {
1281 		mask = ~((long)ctxt->dst.bytes * 8 - 1);
1282 
1283 		if (ctxt->src.bytes == 2)
1284 			sv = (s16)ctxt->src.val & (s16)mask;
1285 		else if (ctxt->src.bytes == 4)
1286 			sv = (s32)ctxt->src.val & (s32)mask;
1287 		else
1288 			sv = (s64)ctxt->src.val & (s64)mask;
1289 
1290 		ctxt->dst.addr.mem.ea = address_mask(ctxt,
1291 					   ctxt->dst.addr.mem.ea + (sv >> 3));
1292 	}
1293 
1294 	/* only subword offset */
1295 	ctxt->src.val &= (ctxt->dst.bytes << 3) - 1;
1296 }
1297 
1298 static int read_emulated(struct x86_emulate_ctxt *ctxt,
1299 			 unsigned long addr, void *dest, unsigned size)
1300 {
1301 	int rc;
1302 	struct read_cache *mc = &ctxt->mem_read;
1303 
1304 	if (mc->pos < mc->end)
1305 		goto read_cached;
1306 
1307 	WARN_ON((mc->end + size) >= sizeof(mc->data));
1308 
1309 	rc = ctxt->ops->read_emulated(ctxt, addr, mc->data + mc->end, size,
1310 				      &ctxt->exception);
1311 	if (rc != X86EMUL_CONTINUE)
1312 		return rc;
1313 
1314 	mc->end += size;
1315 
1316 read_cached:
1317 	memcpy(dest, mc->data + mc->pos, size);
1318 	mc->pos += size;
1319 	return X86EMUL_CONTINUE;
1320 }
1321 
1322 static int segmented_read(struct x86_emulate_ctxt *ctxt,
1323 			  struct segmented_address addr,
1324 			  void *data,
1325 			  unsigned size)
1326 {
1327 	int rc;
1328 	ulong linear;
1329 
1330 	rc = linearize(ctxt, addr, size, false, &linear);
1331 	if (rc != X86EMUL_CONTINUE)
1332 		return rc;
1333 	return read_emulated(ctxt, linear, data, size);
1334 }
1335 
1336 static int segmented_write(struct x86_emulate_ctxt *ctxt,
1337 			   struct segmented_address addr,
1338 			   const void *data,
1339 			   unsigned size)
1340 {
1341 	int rc;
1342 	ulong linear;
1343 
1344 	rc = linearize(ctxt, addr, size, true, &linear);
1345 	if (rc != X86EMUL_CONTINUE)
1346 		return rc;
1347 	return ctxt->ops->write_emulated(ctxt, linear, data, size,
1348 					 &ctxt->exception);
1349 }
1350 
1351 static int segmented_cmpxchg(struct x86_emulate_ctxt *ctxt,
1352 			     struct segmented_address addr,
1353 			     const void *orig_data, const void *data,
1354 			     unsigned size)
1355 {
1356 	int rc;
1357 	ulong linear;
1358 
1359 	rc = linearize(ctxt, addr, size, true, &linear);
1360 	if (rc != X86EMUL_CONTINUE)
1361 		return rc;
1362 	return ctxt->ops->cmpxchg_emulated(ctxt, linear, orig_data, data,
1363 					   size, &ctxt->exception);
1364 }
1365 
1366 static int pio_in_emulated(struct x86_emulate_ctxt *ctxt,
1367 			   unsigned int size, unsigned short port,
1368 			   void *dest)
1369 {
1370 	struct read_cache *rc = &ctxt->io_read;
1371 
1372 	if (rc->pos == rc->end) { /* refill pio read ahead */
1373 		unsigned int in_page, n;
1374 		unsigned int count = ctxt->rep_prefix ?
1375 			address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) : 1;
1376 		in_page = (ctxt->eflags & EFLG_DF) ?
1377 			offset_in_page(reg_read(ctxt, VCPU_REGS_RDI)) :
1378 			PAGE_SIZE - offset_in_page(reg_read(ctxt, VCPU_REGS_RDI));
1379 		n = min3(in_page, (unsigned int)sizeof(rc->data) / size, count);
1380 		if (n == 0)
1381 			n = 1;
1382 		rc->pos = rc->end = 0;
1383 		if (!ctxt->ops->pio_in_emulated(ctxt, size, port, rc->data, n))
1384 			return 0;
1385 		rc->end = n * size;
1386 	}
1387 
1388 	if (ctxt->rep_prefix && (ctxt->d & String) &&
1389 	    !(ctxt->eflags & EFLG_DF)) {
1390 		ctxt->dst.data = rc->data + rc->pos;
1391 		ctxt->dst.type = OP_MEM_STR;
1392 		ctxt->dst.count = (rc->end - rc->pos) / size;
1393 		rc->pos = rc->end;
1394 	} else {
1395 		memcpy(dest, rc->data + rc->pos, size);
1396 		rc->pos += size;
1397 	}
1398 	return 1;
1399 }
1400 
1401 static int read_interrupt_descriptor(struct x86_emulate_ctxt *ctxt,
1402 				     u16 index, struct desc_struct *desc)
1403 {
1404 	struct desc_ptr dt;
1405 	ulong addr;
1406 
1407 	ctxt->ops->get_idt(ctxt, &dt);
1408 
1409 	if (dt.size < index * 8 + 7)
1410 		return emulate_gp(ctxt, index << 3 | 0x2);
1411 
1412 	addr = dt.address + index * 8;
1413 	return ctxt->ops->read_std(ctxt, addr, desc, sizeof *desc,
1414 				   &ctxt->exception);
1415 }
1416 
1417 static void get_descriptor_table_ptr(struct x86_emulate_ctxt *ctxt,
1418 				     u16 selector, struct desc_ptr *dt)
1419 {
1420 	const struct x86_emulate_ops *ops = ctxt->ops;
1421 	u32 base3 = 0;
1422 
1423 	if (selector & 1 << 2) {
1424 		struct desc_struct desc;
1425 		u16 sel;
1426 
1427 		memset (dt, 0, sizeof *dt);
1428 		if (!ops->get_segment(ctxt, &sel, &desc, &base3,
1429 				      VCPU_SREG_LDTR))
1430 			return;
1431 
1432 		dt->size = desc_limit_scaled(&desc); /* what if limit > 65535? */
1433 		dt->address = get_desc_base(&desc) | ((u64)base3 << 32);
1434 	} else
1435 		ops->get_gdt(ctxt, dt);
1436 }
1437 
1438 /* allowed just for 8 bytes segments */
1439 static int read_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1440 				   u16 selector, struct desc_struct *desc,
1441 				   ulong *desc_addr_p)
1442 {
1443 	struct desc_ptr dt;
1444 	u16 index = selector >> 3;
1445 	ulong addr;
1446 
1447 	get_descriptor_table_ptr(ctxt, selector, &dt);
1448 
1449 	if (dt.size < index * 8 + 7)
1450 		return emulate_gp(ctxt, selector & 0xfffc);
1451 
1452 	*desc_addr_p = addr = dt.address + index * 8;
1453 	return ctxt->ops->read_std(ctxt, addr, desc, sizeof *desc,
1454 				   &ctxt->exception);
1455 }
1456 
1457 /* allowed just for 8 bytes segments */
1458 static int write_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1459 				    u16 selector, struct desc_struct *desc)
1460 {
1461 	struct desc_ptr dt;
1462 	u16 index = selector >> 3;
1463 	ulong addr;
1464 
1465 	get_descriptor_table_ptr(ctxt, selector, &dt);
1466 
1467 	if (dt.size < index * 8 + 7)
1468 		return emulate_gp(ctxt, selector & 0xfffc);
1469 
1470 	addr = dt.address + index * 8;
1471 	return ctxt->ops->write_std(ctxt, addr, desc, sizeof *desc,
1472 				    &ctxt->exception);
1473 }
1474 
1475 /* Does not support long mode */
1476 static int __load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1477 				     u16 selector, int seg, u8 cpl,
1478 				     bool in_task_switch,
1479 				     struct desc_struct *desc)
1480 {
1481 	struct desc_struct seg_desc, old_desc;
1482 	u8 dpl, rpl;
1483 	unsigned err_vec = GP_VECTOR;
1484 	u32 err_code = 0;
1485 	bool null_selector = !(selector & ~0x3); /* 0000-0003 are null */
1486 	ulong desc_addr;
1487 	int ret;
1488 	u16 dummy;
1489 	u32 base3 = 0;
1490 
1491 	memset(&seg_desc, 0, sizeof seg_desc);
1492 
1493 	if (ctxt->mode == X86EMUL_MODE_REAL) {
1494 		/* set real mode segment descriptor (keep limit etc. for
1495 		 * unreal mode) */
1496 		ctxt->ops->get_segment(ctxt, &dummy, &seg_desc, NULL, seg);
1497 		set_desc_base(&seg_desc, selector << 4);
1498 		goto load;
1499 	} else if (seg <= VCPU_SREG_GS && ctxt->mode == X86EMUL_MODE_VM86) {
1500 		/* VM86 needs a clean new segment descriptor */
1501 		set_desc_base(&seg_desc, selector << 4);
1502 		set_desc_limit(&seg_desc, 0xffff);
1503 		seg_desc.type = 3;
1504 		seg_desc.p = 1;
1505 		seg_desc.s = 1;
1506 		seg_desc.dpl = 3;
1507 		goto load;
1508 	}
1509 
1510 	rpl = selector & 3;
1511 
1512 	/* NULL selector is not valid for TR, CS and SS (except for long mode) */
1513 	if ((seg == VCPU_SREG_CS
1514 	     || (seg == VCPU_SREG_SS
1515 		 && (ctxt->mode != X86EMUL_MODE_PROT64 || rpl != cpl))
1516 	     || seg == VCPU_SREG_TR)
1517 	    && null_selector)
1518 		goto exception;
1519 
1520 	/* TR should be in GDT only */
1521 	if (seg == VCPU_SREG_TR && (selector & (1 << 2)))
1522 		goto exception;
1523 
1524 	if (null_selector) /* for NULL selector skip all following checks */
1525 		goto load;
1526 
1527 	ret = read_segment_descriptor(ctxt, selector, &seg_desc, &desc_addr);
1528 	if (ret != X86EMUL_CONTINUE)
1529 		return ret;
1530 
1531 	err_code = selector & 0xfffc;
1532 	err_vec = in_task_switch ? TS_VECTOR : GP_VECTOR;
1533 
1534 	/* can't load system descriptor into segment selector */
1535 	if (seg <= VCPU_SREG_GS && !seg_desc.s)
1536 		goto exception;
1537 
1538 	if (!seg_desc.p) {
1539 		err_vec = (seg == VCPU_SREG_SS) ? SS_VECTOR : NP_VECTOR;
1540 		goto exception;
1541 	}
1542 
1543 	dpl = seg_desc.dpl;
1544 
1545 	switch (seg) {
1546 	case VCPU_SREG_SS:
1547 		/*
1548 		 * segment is not a writable data segment or segment
1549 		 * selector's RPL != CPL or segment selector's RPL != CPL
1550 		 */
1551 		if (rpl != cpl || (seg_desc.type & 0xa) != 0x2 || dpl != cpl)
1552 			goto exception;
1553 		break;
1554 	case VCPU_SREG_CS:
1555 		if (!(seg_desc.type & 8))
1556 			goto exception;
1557 
1558 		if (seg_desc.type & 4) {
1559 			/* conforming */
1560 			if (dpl > cpl)
1561 				goto exception;
1562 		} else {
1563 			/* nonconforming */
1564 			if (rpl > cpl || dpl != cpl)
1565 				goto exception;
1566 		}
1567 		/* in long-mode d/b must be clear if l is set */
1568 		if (seg_desc.d && seg_desc.l) {
1569 			u64 efer = 0;
1570 
1571 			ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
1572 			if (efer & EFER_LMA)
1573 				goto exception;
1574 		}
1575 
1576 		/* CS(RPL) <- CPL */
1577 		selector = (selector & 0xfffc) | cpl;
1578 		break;
1579 	case VCPU_SREG_TR:
1580 		if (seg_desc.s || (seg_desc.type != 1 && seg_desc.type != 9))
1581 			goto exception;
1582 		old_desc = seg_desc;
1583 		seg_desc.type |= 2; /* busy */
1584 		ret = ctxt->ops->cmpxchg_emulated(ctxt, desc_addr, &old_desc, &seg_desc,
1585 						  sizeof(seg_desc), &ctxt->exception);
1586 		if (ret != X86EMUL_CONTINUE)
1587 			return ret;
1588 		break;
1589 	case VCPU_SREG_LDTR:
1590 		if (seg_desc.s || seg_desc.type != 2)
1591 			goto exception;
1592 		break;
1593 	default: /*  DS, ES, FS, or GS */
1594 		/*
1595 		 * segment is not a data or readable code segment or
1596 		 * ((segment is a data or nonconforming code segment)
1597 		 * and (both RPL and CPL > DPL))
1598 		 */
1599 		if ((seg_desc.type & 0xa) == 0x8 ||
1600 		    (((seg_desc.type & 0xc) != 0xc) &&
1601 		     (rpl > dpl && cpl > dpl)))
1602 			goto exception;
1603 		break;
1604 	}
1605 
1606 	if (seg_desc.s) {
1607 		/* mark segment as accessed */
1608 		seg_desc.type |= 1;
1609 		ret = write_segment_descriptor(ctxt, selector, &seg_desc);
1610 		if (ret != X86EMUL_CONTINUE)
1611 			return ret;
1612 	} else if (ctxt->mode == X86EMUL_MODE_PROT64) {
1613 		ret = ctxt->ops->read_std(ctxt, desc_addr+8, &base3,
1614 				sizeof(base3), &ctxt->exception);
1615 		if (ret != X86EMUL_CONTINUE)
1616 			return ret;
1617 		if (is_noncanonical_address(get_desc_base(&seg_desc) |
1618 					     ((u64)base3 << 32)))
1619 			return emulate_gp(ctxt, 0);
1620 	}
1621 load:
1622 	ctxt->ops->set_segment(ctxt, selector, &seg_desc, base3, seg);
1623 	if (desc)
1624 		*desc = seg_desc;
1625 	return X86EMUL_CONTINUE;
1626 exception:
1627 	return emulate_exception(ctxt, err_vec, err_code, true);
1628 }
1629 
1630 static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1631 				   u16 selector, int seg)
1632 {
1633 	u8 cpl = ctxt->ops->cpl(ctxt);
1634 	return __load_segment_descriptor(ctxt, selector, seg, cpl, false, NULL);
1635 }
1636 
1637 static void write_register_operand(struct operand *op)
1638 {
1639 	/* The 4-byte case *is* correct: in 64-bit mode we zero-extend. */
1640 	switch (op->bytes) {
1641 	case 1:
1642 		*(u8 *)op->addr.reg = (u8)op->val;
1643 		break;
1644 	case 2:
1645 		*(u16 *)op->addr.reg = (u16)op->val;
1646 		break;
1647 	case 4:
1648 		*op->addr.reg = (u32)op->val;
1649 		break;	/* 64b: zero-extend */
1650 	case 8:
1651 		*op->addr.reg = op->val;
1652 		break;
1653 	}
1654 }
1655 
1656 static int writeback(struct x86_emulate_ctxt *ctxt, struct operand *op)
1657 {
1658 	switch (op->type) {
1659 	case OP_REG:
1660 		write_register_operand(op);
1661 		break;
1662 	case OP_MEM:
1663 		if (ctxt->lock_prefix)
1664 			return segmented_cmpxchg(ctxt,
1665 						 op->addr.mem,
1666 						 &op->orig_val,
1667 						 &op->val,
1668 						 op->bytes);
1669 		else
1670 			return segmented_write(ctxt,
1671 					       op->addr.mem,
1672 					       &op->val,
1673 					       op->bytes);
1674 		break;
1675 	case OP_MEM_STR:
1676 		return segmented_write(ctxt,
1677 				       op->addr.mem,
1678 				       op->data,
1679 				       op->bytes * op->count);
1680 		break;
1681 	case OP_XMM:
1682 		write_sse_reg(ctxt, &op->vec_val, op->addr.xmm);
1683 		break;
1684 	case OP_MM:
1685 		write_mmx_reg(ctxt, &op->mm_val, op->addr.mm);
1686 		break;
1687 	case OP_NONE:
1688 		/* no writeback */
1689 		break;
1690 	default:
1691 		break;
1692 	}
1693 	return X86EMUL_CONTINUE;
1694 }
1695 
1696 static int push(struct x86_emulate_ctxt *ctxt, void *data, int bytes)
1697 {
1698 	struct segmented_address addr;
1699 
1700 	rsp_increment(ctxt, -bytes);
1701 	addr.ea = reg_read(ctxt, VCPU_REGS_RSP) & stack_mask(ctxt);
1702 	addr.seg = VCPU_SREG_SS;
1703 
1704 	return segmented_write(ctxt, addr, data, bytes);
1705 }
1706 
1707 static int em_push(struct x86_emulate_ctxt *ctxt)
1708 {
1709 	/* Disable writeback. */
1710 	ctxt->dst.type = OP_NONE;
1711 	return push(ctxt, &ctxt->src.val, ctxt->op_bytes);
1712 }
1713 
1714 static int emulate_pop(struct x86_emulate_ctxt *ctxt,
1715 		       void *dest, int len)
1716 {
1717 	int rc;
1718 	struct segmented_address addr;
1719 
1720 	addr.ea = reg_read(ctxt, VCPU_REGS_RSP) & stack_mask(ctxt);
1721 	addr.seg = VCPU_SREG_SS;
1722 	rc = segmented_read(ctxt, addr, dest, len);
1723 	if (rc != X86EMUL_CONTINUE)
1724 		return rc;
1725 
1726 	rsp_increment(ctxt, len);
1727 	return rc;
1728 }
1729 
1730 static int em_pop(struct x86_emulate_ctxt *ctxt)
1731 {
1732 	return emulate_pop(ctxt, &ctxt->dst.val, ctxt->op_bytes);
1733 }
1734 
1735 static int emulate_popf(struct x86_emulate_ctxt *ctxt,
1736 			void *dest, int len)
1737 {
1738 	int rc;
1739 	unsigned long val, change_mask;
1740 	int iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> IOPL_SHIFT;
1741 	int cpl = ctxt->ops->cpl(ctxt);
1742 
1743 	rc = emulate_pop(ctxt, &val, len);
1744 	if (rc != X86EMUL_CONTINUE)
1745 		return rc;
1746 
1747 	change_mask = EFLG_CF | EFLG_PF | EFLG_AF | EFLG_ZF | EFLG_SF | EFLG_OF
1748 		| EFLG_TF | EFLG_DF | EFLG_NT | EFLG_AC | EFLG_ID;
1749 
1750 	switch(ctxt->mode) {
1751 	case X86EMUL_MODE_PROT64:
1752 	case X86EMUL_MODE_PROT32:
1753 	case X86EMUL_MODE_PROT16:
1754 		if (cpl == 0)
1755 			change_mask |= EFLG_IOPL;
1756 		if (cpl <= iopl)
1757 			change_mask |= EFLG_IF;
1758 		break;
1759 	case X86EMUL_MODE_VM86:
1760 		if (iopl < 3)
1761 			return emulate_gp(ctxt, 0);
1762 		change_mask |= EFLG_IF;
1763 		break;
1764 	default: /* real mode */
1765 		change_mask |= (EFLG_IOPL | EFLG_IF);
1766 		break;
1767 	}
1768 
1769 	*(unsigned long *)dest =
1770 		(ctxt->eflags & ~change_mask) | (val & change_mask);
1771 
1772 	return rc;
1773 }
1774 
1775 static int em_popf(struct x86_emulate_ctxt *ctxt)
1776 {
1777 	ctxt->dst.type = OP_REG;
1778 	ctxt->dst.addr.reg = &ctxt->eflags;
1779 	ctxt->dst.bytes = ctxt->op_bytes;
1780 	return emulate_popf(ctxt, &ctxt->dst.val, ctxt->op_bytes);
1781 }
1782 
1783 static int em_enter(struct x86_emulate_ctxt *ctxt)
1784 {
1785 	int rc;
1786 	unsigned frame_size = ctxt->src.val;
1787 	unsigned nesting_level = ctxt->src2.val & 31;
1788 	ulong rbp;
1789 
1790 	if (nesting_level)
1791 		return X86EMUL_UNHANDLEABLE;
1792 
1793 	rbp = reg_read(ctxt, VCPU_REGS_RBP);
1794 	rc = push(ctxt, &rbp, stack_size(ctxt));
1795 	if (rc != X86EMUL_CONTINUE)
1796 		return rc;
1797 	assign_masked(reg_rmw(ctxt, VCPU_REGS_RBP), reg_read(ctxt, VCPU_REGS_RSP),
1798 		      stack_mask(ctxt));
1799 	assign_masked(reg_rmw(ctxt, VCPU_REGS_RSP),
1800 		      reg_read(ctxt, VCPU_REGS_RSP) - frame_size,
1801 		      stack_mask(ctxt));
1802 	return X86EMUL_CONTINUE;
1803 }
1804 
1805 static int em_leave(struct x86_emulate_ctxt *ctxt)
1806 {
1807 	assign_masked(reg_rmw(ctxt, VCPU_REGS_RSP), reg_read(ctxt, VCPU_REGS_RBP),
1808 		      stack_mask(ctxt));
1809 	return emulate_pop(ctxt, reg_rmw(ctxt, VCPU_REGS_RBP), ctxt->op_bytes);
1810 }
1811 
1812 static int em_push_sreg(struct x86_emulate_ctxt *ctxt)
1813 {
1814 	int seg = ctxt->src2.val;
1815 
1816 	ctxt->src.val = get_segment_selector(ctxt, seg);
1817 	if (ctxt->op_bytes == 4) {
1818 		rsp_increment(ctxt, -2);
1819 		ctxt->op_bytes = 2;
1820 	}
1821 
1822 	return em_push(ctxt);
1823 }
1824 
1825 static int em_pop_sreg(struct x86_emulate_ctxt *ctxt)
1826 {
1827 	int seg = ctxt->src2.val;
1828 	unsigned long selector;
1829 	int rc;
1830 
1831 	rc = emulate_pop(ctxt, &selector, ctxt->op_bytes);
1832 	if (rc != X86EMUL_CONTINUE)
1833 		return rc;
1834 
1835 	if (ctxt->modrm_reg == VCPU_SREG_SS)
1836 		ctxt->interruptibility = KVM_X86_SHADOW_INT_MOV_SS;
1837 
1838 	rc = load_segment_descriptor(ctxt, (u16)selector, seg);
1839 	return rc;
1840 }
1841 
1842 static int em_pusha(struct x86_emulate_ctxt *ctxt)
1843 {
1844 	unsigned long old_esp = reg_read(ctxt, VCPU_REGS_RSP);
1845 	int rc = X86EMUL_CONTINUE;
1846 	int reg = VCPU_REGS_RAX;
1847 
1848 	while (reg <= VCPU_REGS_RDI) {
1849 		(reg == VCPU_REGS_RSP) ?
1850 		(ctxt->src.val = old_esp) : (ctxt->src.val = reg_read(ctxt, reg));
1851 
1852 		rc = em_push(ctxt);
1853 		if (rc != X86EMUL_CONTINUE)
1854 			return rc;
1855 
1856 		++reg;
1857 	}
1858 
1859 	return rc;
1860 }
1861 
1862 static int em_pushf(struct x86_emulate_ctxt *ctxt)
1863 {
1864 	ctxt->src.val = (unsigned long)ctxt->eflags & ~EFLG_VM;
1865 	return em_push(ctxt);
1866 }
1867 
1868 static int em_popa(struct x86_emulate_ctxt *ctxt)
1869 {
1870 	int rc = X86EMUL_CONTINUE;
1871 	int reg = VCPU_REGS_RDI;
1872 
1873 	while (reg >= VCPU_REGS_RAX) {
1874 		if (reg == VCPU_REGS_RSP) {
1875 			rsp_increment(ctxt, ctxt->op_bytes);
1876 			--reg;
1877 		}
1878 
1879 		rc = emulate_pop(ctxt, reg_rmw(ctxt, reg), ctxt->op_bytes);
1880 		if (rc != X86EMUL_CONTINUE)
1881 			break;
1882 		--reg;
1883 	}
1884 	return rc;
1885 }
1886 
1887 static int __emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq)
1888 {
1889 	const struct x86_emulate_ops *ops = ctxt->ops;
1890 	int rc;
1891 	struct desc_ptr dt;
1892 	gva_t cs_addr;
1893 	gva_t eip_addr;
1894 	u16 cs, eip;
1895 
1896 	/* TODO: Add limit checks */
1897 	ctxt->src.val = ctxt->eflags;
1898 	rc = em_push(ctxt);
1899 	if (rc != X86EMUL_CONTINUE)
1900 		return rc;
1901 
1902 	ctxt->eflags &= ~(EFLG_IF | EFLG_TF | EFLG_AC);
1903 
1904 	ctxt->src.val = get_segment_selector(ctxt, VCPU_SREG_CS);
1905 	rc = em_push(ctxt);
1906 	if (rc != X86EMUL_CONTINUE)
1907 		return rc;
1908 
1909 	ctxt->src.val = ctxt->_eip;
1910 	rc = em_push(ctxt);
1911 	if (rc != X86EMUL_CONTINUE)
1912 		return rc;
1913 
1914 	ops->get_idt(ctxt, &dt);
1915 
1916 	eip_addr = dt.address + (irq << 2);
1917 	cs_addr = dt.address + (irq << 2) + 2;
1918 
1919 	rc = ops->read_std(ctxt, cs_addr, &cs, 2, &ctxt->exception);
1920 	if (rc != X86EMUL_CONTINUE)
1921 		return rc;
1922 
1923 	rc = ops->read_std(ctxt, eip_addr, &eip, 2, &ctxt->exception);
1924 	if (rc != X86EMUL_CONTINUE)
1925 		return rc;
1926 
1927 	rc = load_segment_descriptor(ctxt, cs, VCPU_SREG_CS);
1928 	if (rc != X86EMUL_CONTINUE)
1929 		return rc;
1930 
1931 	ctxt->_eip = eip;
1932 
1933 	return rc;
1934 }
1935 
1936 int emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq)
1937 {
1938 	int rc;
1939 
1940 	invalidate_registers(ctxt);
1941 	rc = __emulate_int_real(ctxt, irq);
1942 	if (rc == X86EMUL_CONTINUE)
1943 		writeback_registers(ctxt);
1944 	return rc;
1945 }
1946 
1947 static int emulate_int(struct x86_emulate_ctxt *ctxt, int irq)
1948 {
1949 	switch(ctxt->mode) {
1950 	case X86EMUL_MODE_REAL:
1951 		return __emulate_int_real(ctxt, irq);
1952 	case X86EMUL_MODE_VM86:
1953 	case X86EMUL_MODE_PROT16:
1954 	case X86EMUL_MODE_PROT32:
1955 	case X86EMUL_MODE_PROT64:
1956 	default:
1957 		/* Protected mode interrupts unimplemented yet */
1958 		return X86EMUL_UNHANDLEABLE;
1959 	}
1960 }
1961 
1962 static int emulate_iret_real(struct x86_emulate_ctxt *ctxt)
1963 {
1964 	int rc = X86EMUL_CONTINUE;
1965 	unsigned long temp_eip = 0;
1966 	unsigned long temp_eflags = 0;
1967 	unsigned long cs = 0;
1968 	unsigned long mask = EFLG_CF | EFLG_PF | EFLG_AF | EFLG_ZF | EFLG_SF | EFLG_TF |
1969 			     EFLG_IF | EFLG_DF | EFLG_OF | EFLG_IOPL | EFLG_NT | EFLG_RF |
1970 			     EFLG_AC | EFLG_ID | (1 << 1); /* Last one is the reserved bit */
1971 	unsigned long vm86_mask = EFLG_VM | EFLG_VIF | EFLG_VIP;
1972 
1973 	/* TODO: Add stack limit check */
1974 
1975 	rc = emulate_pop(ctxt, &temp_eip, ctxt->op_bytes);
1976 
1977 	if (rc != X86EMUL_CONTINUE)
1978 		return rc;
1979 
1980 	if (temp_eip & ~0xffff)
1981 		return emulate_gp(ctxt, 0);
1982 
1983 	rc = emulate_pop(ctxt, &cs, ctxt->op_bytes);
1984 
1985 	if (rc != X86EMUL_CONTINUE)
1986 		return rc;
1987 
1988 	rc = emulate_pop(ctxt, &temp_eflags, ctxt->op_bytes);
1989 
1990 	if (rc != X86EMUL_CONTINUE)
1991 		return rc;
1992 
1993 	rc = load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS);
1994 
1995 	if (rc != X86EMUL_CONTINUE)
1996 		return rc;
1997 
1998 	ctxt->_eip = temp_eip;
1999 
2000 
2001 	if (ctxt->op_bytes == 4)
2002 		ctxt->eflags = ((temp_eflags & mask) | (ctxt->eflags & vm86_mask));
2003 	else if (ctxt->op_bytes == 2) {
2004 		ctxt->eflags &= ~0xffff;
2005 		ctxt->eflags |= temp_eflags;
2006 	}
2007 
2008 	ctxt->eflags &= ~EFLG_RESERVED_ZEROS_MASK; /* Clear reserved zeros */
2009 	ctxt->eflags |= EFLG_RESERVED_ONE_MASK;
2010 
2011 	return rc;
2012 }
2013 
2014 static int em_iret(struct x86_emulate_ctxt *ctxt)
2015 {
2016 	switch(ctxt->mode) {
2017 	case X86EMUL_MODE_REAL:
2018 		return emulate_iret_real(ctxt);
2019 	case X86EMUL_MODE_VM86:
2020 	case X86EMUL_MODE_PROT16:
2021 	case X86EMUL_MODE_PROT32:
2022 	case X86EMUL_MODE_PROT64:
2023 	default:
2024 		/* iret from protected mode unimplemented yet */
2025 		return X86EMUL_UNHANDLEABLE;
2026 	}
2027 }
2028 
2029 static int em_jmp_far(struct x86_emulate_ctxt *ctxt)
2030 {
2031 	int rc;
2032 	unsigned short sel, old_sel;
2033 	struct desc_struct old_desc, new_desc;
2034 	const struct x86_emulate_ops *ops = ctxt->ops;
2035 	u8 cpl = ctxt->ops->cpl(ctxt);
2036 
2037 	/* Assignment of RIP may only fail in 64-bit mode */
2038 	if (ctxt->mode == X86EMUL_MODE_PROT64)
2039 		ops->get_segment(ctxt, &old_sel, &old_desc, NULL,
2040 				 VCPU_SREG_CS);
2041 
2042 	memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
2043 
2044 	rc = __load_segment_descriptor(ctxt, sel, VCPU_SREG_CS, cpl, false,
2045 				       &new_desc);
2046 	if (rc != X86EMUL_CONTINUE)
2047 		return rc;
2048 
2049 	rc = assign_eip_far(ctxt, ctxt->src.val, &new_desc);
2050 	if (rc != X86EMUL_CONTINUE) {
2051 		WARN_ON(ctxt->mode != X86EMUL_MODE_PROT64);
2052 		/* assigning eip failed; restore the old cs */
2053 		ops->set_segment(ctxt, old_sel, &old_desc, 0, VCPU_SREG_CS);
2054 		return rc;
2055 	}
2056 	return rc;
2057 }
2058 
2059 static int em_jmp_abs(struct x86_emulate_ctxt *ctxt)
2060 {
2061 	return assign_eip_near(ctxt, ctxt->src.val);
2062 }
2063 
2064 static int em_call_near_abs(struct x86_emulate_ctxt *ctxt)
2065 {
2066 	int rc;
2067 	long int old_eip;
2068 
2069 	old_eip = ctxt->_eip;
2070 	rc = assign_eip_near(ctxt, ctxt->src.val);
2071 	if (rc != X86EMUL_CONTINUE)
2072 		return rc;
2073 	ctxt->src.val = old_eip;
2074 	rc = em_push(ctxt);
2075 	return rc;
2076 }
2077 
2078 static int em_cmpxchg8b(struct x86_emulate_ctxt *ctxt)
2079 {
2080 	u64 old = ctxt->dst.orig_val64;
2081 
2082 	if (ctxt->dst.bytes == 16)
2083 		return X86EMUL_UNHANDLEABLE;
2084 
2085 	if (((u32) (old >> 0) != (u32) reg_read(ctxt, VCPU_REGS_RAX)) ||
2086 	    ((u32) (old >> 32) != (u32) reg_read(ctxt, VCPU_REGS_RDX))) {
2087 		*reg_write(ctxt, VCPU_REGS_RAX) = (u32) (old >> 0);
2088 		*reg_write(ctxt, VCPU_REGS_RDX) = (u32) (old >> 32);
2089 		ctxt->eflags &= ~EFLG_ZF;
2090 	} else {
2091 		ctxt->dst.val64 = ((u64)reg_read(ctxt, VCPU_REGS_RCX) << 32) |
2092 			(u32) reg_read(ctxt, VCPU_REGS_RBX);
2093 
2094 		ctxt->eflags |= EFLG_ZF;
2095 	}
2096 	return X86EMUL_CONTINUE;
2097 }
2098 
2099 static int em_ret(struct x86_emulate_ctxt *ctxt)
2100 {
2101 	int rc;
2102 	unsigned long eip;
2103 
2104 	rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
2105 	if (rc != X86EMUL_CONTINUE)
2106 		return rc;
2107 
2108 	return assign_eip_near(ctxt, eip);
2109 }
2110 
2111 static int em_ret_far(struct x86_emulate_ctxt *ctxt)
2112 {
2113 	int rc;
2114 	unsigned long eip, cs;
2115 	u16 old_cs;
2116 	int cpl = ctxt->ops->cpl(ctxt);
2117 	struct desc_struct old_desc, new_desc;
2118 	const struct x86_emulate_ops *ops = ctxt->ops;
2119 
2120 	if (ctxt->mode == X86EMUL_MODE_PROT64)
2121 		ops->get_segment(ctxt, &old_cs, &old_desc, NULL,
2122 				 VCPU_SREG_CS);
2123 
2124 	rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
2125 	if (rc != X86EMUL_CONTINUE)
2126 		return rc;
2127 	rc = emulate_pop(ctxt, &cs, ctxt->op_bytes);
2128 	if (rc != X86EMUL_CONTINUE)
2129 		return rc;
2130 	/* Outer-privilege level return is not implemented */
2131 	if (ctxt->mode >= X86EMUL_MODE_PROT16 && (cs & 3) > cpl)
2132 		return X86EMUL_UNHANDLEABLE;
2133 	rc = __load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS, cpl, false,
2134 				       &new_desc);
2135 	if (rc != X86EMUL_CONTINUE)
2136 		return rc;
2137 	rc = assign_eip_far(ctxt, eip, &new_desc);
2138 	if (rc != X86EMUL_CONTINUE) {
2139 		WARN_ON(ctxt->mode != X86EMUL_MODE_PROT64);
2140 		ops->set_segment(ctxt, old_cs, &old_desc, 0, VCPU_SREG_CS);
2141 	}
2142 	return rc;
2143 }
2144 
2145 static int em_ret_far_imm(struct x86_emulate_ctxt *ctxt)
2146 {
2147         int rc;
2148 
2149         rc = em_ret_far(ctxt);
2150         if (rc != X86EMUL_CONTINUE)
2151                 return rc;
2152         rsp_increment(ctxt, ctxt->src.val);
2153         return X86EMUL_CONTINUE;
2154 }
2155 
2156 static int em_cmpxchg(struct x86_emulate_ctxt *ctxt)
2157 {
2158 	/* Save real source value, then compare EAX against destination. */
2159 	ctxt->dst.orig_val = ctxt->dst.val;
2160 	ctxt->dst.val = reg_read(ctxt, VCPU_REGS_RAX);
2161 	ctxt->src.orig_val = ctxt->src.val;
2162 	ctxt->src.val = ctxt->dst.orig_val;
2163 	fastop(ctxt, em_cmp);
2164 
2165 	if (ctxt->eflags & EFLG_ZF) {
2166 		/* Success: write back to memory. */
2167 		ctxt->dst.val = ctxt->src.orig_val;
2168 	} else {
2169 		/* Failure: write the value we saw to EAX. */
2170 		ctxt->dst.type = OP_REG;
2171 		ctxt->dst.addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
2172 		ctxt->dst.val = ctxt->dst.orig_val;
2173 	}
2174 	return X86EMUL_CONTINUE;
2175 }
2176 
2177 static int em_lseg(struct x86_emulate_ctxt *ctxt)
2178 {
2179 	int seg = ctxt->src2.val;
2180 	unsigned short sel;
2181 	int rc;
2182 
2183 	memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
2184 
2185 	rc = load_segment_descriptor(ctxt, sel, seg);
2186 	if (rc != X86EMUL_CONTINUE)
2187 		return rc;
2188 
2189 	ctxt->dst.val = ctxt->src.val;
2190 	return rc;
2191 }
2192 
2193 static void
2194 setup_syscalls_segments(struct x86_emulate_ctxt *ctxt,
2195 			struct desc_struct *cs, struct desc_struct *ss)
2196 {
2197 	cs->l = 0;		/* will be adjusted later */
2198 	set_desc_base(cs, 0);	/* flat segment */
2199 	cs->g = 1;		/* 4kb granularity */
2200 	set_desc_limit(cs, 0xfffff);	/* 4GB limit */
2201 	cs->type = 0x0b;	/* Read, Execute, Accessed */
2202 	cs->s = 1;
2203 	cs->dpl = 0;		/* will be adjusted later */
2204 	cs->p = 1;
2205 	cs->d = 1;
2206 	cs->avl = 0;
2207 
2208 	set_desc_base(ss, 0);	/* flat segment */
2209 	set_desc_limit(ss, 0xfffff);	/* 4GB limit */
2210 	ss->g = 1;		/* 4kb granularity */
2211 	ss->s = 1;
2212 	ss->type = 0x03;	/* Read/Write, Accessed */
2213 	ss->d = 1;		/* 32bit stack segment */
2214 	ss->dpl = 0;
2215 	ss->p = 1;
2216 	ss->l = 0;
2217 	ss->avl = 0;
2218 }
2219 
2220 static bool vendor_intel(struct x86_emulate_ctxt *ctxt)
2221 {
2222 	u32 eax, ebx, ecx, edx;
2223 
2224 	eax = ecx = 0;
2225 	ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx);
2226 	return ebx == X86EMUL_CPUID_VENDOR_GenuineIntel_ebx
2227 		&& ecx == X86EMUL_CPUID_VENDOR_GenuineIntel_ecx
2228 		&& edx == X86EMUL_CPUID_VENDOR_GenuineIntel_edx;
2229 }
2230 
2231 static bool em_syscall_is_enabled(struct x86_emulate_ctxt *ctxt)
2232 {
2233 	const struct x86_emulate_ops *ops = ctxt->ops;
2234 	u32 eax, ebx, ecx, edx;
2235 
2236 	/*
2237 	 * syscall should always be enabled in longmode - so only become
2238 	 * vendor specific (cpuid) if other modes are active...
2239 	 */
2240 	if (ctxt->mode == X86EMUL_MODE_PROT64)
2241 		return true;
2242 
2243 	eax = 0x00000000;
2244 	ecx = 0x00000000;
2245 	ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx);
2246 	/*
2247 	 * Intel ("GenuineIntel")
2248 	 * remark: Intel CPUs only support "syscall" in 64bit
2249 	 * longmode. Also an 64bit guest with a
2250 	 * 32bit compat-app running will #UD !! While this
2251 	 * behaviour can be fixed (by emulating) into AMD
2252 	 * response - CPUs of AMD can't behave like Intel.
2253 	 */
2254 	if (ebx == X86EMUL_CPUID_VENDOR_GenuineIntel_ebx &&
2255 	    ecx == X86EMUL_CPUID_VENDOR_GenuineIntel_ecx &&
2256 	    edx == X86EMUL_CPUID_VENDOR_GenuineIntel_edx)
2257 		return false;
2258 
2259 	/* AMD ("AuthenticAMD") */
2260 	if (ebx == X86EMUL_CPUID_VENDOR_AuthenticAMD_ebx &&
2261 	    ecx == X86EMUL_CPUID_VENDOR_AuthenticAMD_ecx &&
2262 	    edx == X86EMUL_CPUID_VENDOR_AuthenticAMD_edx)
2263 		return true;
2264 
2265 	/* AMD ("AMDisbetter!") */
2266 	if (ebx == X86EMUL_CPUID_VENDOR_AMDisbetterI_ebx &&
2267 	    ecx == X86EMUL_CPUID_VENDOR_AMDisbetterI_ecx &&
2268 	    edx == X86EMUL_CPUID_VENDOR_AMDisbetterI_edx)
2269 		return true;
2270 
2271 	/* default: (not Intel, not AMD), apply Intel's stricter rules... */
2272 	return false;
2273 }
2274 
2275 static int em_syscall(struct x86_emulate_ctxt *ctxt)
2276 {
2277 	const struct x86_emulate_ops *ops = ctxt->ops;
2278 	struct desc_struct cs, ss;
2279 	u64 msr_data;
2280 	u16 cs_sel, ss_sel;
2281 	u64 efer = 0;
2282 
2283 	/* syscall is not available in real mode */
2284 	if (ctxt->mode == X86EMUL_MODE_REAL ||
2285 	    ctxt->mode == X86EMUL_MODE_VM86)
2286 		return emulate_ud(ctxt);
2287 
2288 	if (!(em_syscall_is_enabled(ctxt)))
2289 		return emulate_ud(ctxt);
2290 
2291 	ops->get_msr(ctxt, MSR_EFER, &efer);
2292 	setup_syscalls_segments(ctxt, &cs, &ss);
2293 
2294 	if (!(efer & EFER_SCE))
2295 		return emulate_ud(ctxt);
2296 
2297 	ops->get_msr(ctxt, MSR_STAR, &msr_data);
2298 	msr_data >>= 32;
2299 	cs_sel = (u16)(msr_data & 0xfffc);
2300 	ss_sel = (u16)(msr_data + 8);
2301 
2302 	if (efer & EFER_LMA) {
2303 		cs.d = 0;
2304 		cs.l = 1;
2305 	}
2306 	ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
2307 	ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
2308 
2309 	*reg_write(ctxt, VCPU_REGS_RCX) = ctxt->_eip;
2310 	if (efer & EFER_LMA) {
2311 #ifdef CONFIG_X86_64
2312 		*reg_write(ctxt, VCPU_REGS_R11) = ctxt->eflags;
2313 
2314 		ops->get_msr(ctxt,
2315 			     ctxt->mode == X86EMUL_MODE_PROT64 ?
2316 			     MSR_LSTAR : MSR_CSTAR, &msr_data);
2317 		ctxt->_eip = msr_data;
2318 
2319 		ops->get_msr(ctxt, MSR_SYSCALL_MASK, &msr_data);
2320 		ctxt->eflags &= ~msr_data;
2321 		ctxt->eflags |= EFLG_RESERVED_ONE_MASK;
2322 #endif
2323 	} else {
2324 		/* legacy mode */
2325 		ops->get_msr(ctxt, MSR_STAR, &msr_data);
2326 		ctxt->_eip = (u32)msr_data;
2327 
2328 		ctxt->eflags &= ~(EFLG_VM | EFLG_IF);
2329 	}
2330 
2331 	return X86EMUL_CONTINUE;
2332 }
2333 
2334 static int em_sysenter(struct x86_emulate_ctxt *ctxt)
2335 {
2336 	const struct x86_emulate_ops *ops = ctxt->ops;
2337 	struct desc_struct cs, ss;
2338 	u64 msr_data;
2339 	u16 cs_sel, ss_sel;
2340 	u64 efer = 0;
2341 
2342 	ops->get_msr(ctxt, MSR_EFER, &efer);
2343 	/* inject #GP if in real mode */
2344 	if (ctxt->mode == X86EMUL_MODE_REAL)
2345 		return emulate_gp(ctxt, 0);
2346 
2347 	/*
2348 	 * Not recognized on AMD in compat mode (but is recognized in legacy
2349 	 * mode).
2350 	 */
2351 	if ((ctxt->mode == X86EMUL_MODE_PROT32) && (efer & EFER_LMA)
2352 	    && !vendor_intel(ctxt))
2353 		return emulate_ud(ctxt);
2354 
2355 	/* sysenter/sysexit have not been tested in 64bit mode. */
2356 	if (ctxt->mode == X86EMUL_MODE_PROT64)
2357 		return X86EMUL_UNHANDLEABLE;
2358 
2359 	setup_syscalls_segments(ctxt, &cs, &ss);
2360 
2361 	ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data);
2362 	switch (ctxt->mode) {
2363 	case X86EMUL_MODE_PROT32:
2364 		if ((msr_data & 0xfffc) == 0x0)
2365 			return emulate_gp(ctxt, 0);
2366 		break;
2367 	case X86EMUL_MODE_PROT64:
2368 		if (msr_data == 0x0)
2369 			return emulate_gp(ctxt, 0);
2370 		break;
2371 	default:
2372 		break;
2373 	}
2374 
2375 	ctxt->eflags &= ~(EFLG_VM | EFLG_IF);
2376 	cs_sel = (u16)msr_data;
2377 	cs_sel &= ~SELECTOR_RPL_MASK;
2378 	ss_sel = cs_sel + 8;
2379 	ss_sel &= ~SELECTOR_RPL_MASK;
2380 	if (ctxt->mode == X86EMUL_MODE_PROT64 || (efer & EFER_LMA)) {
2381 		cs.d = 0;
2382 		cs.l = 1;
2383 	}
2384 
2385 	ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
2386 	ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
2387 
2388 	ops->get_msr(ctxt, MSR_IA32_SYSENTER_EIP, &msr_data);
2389 	ctxt->_eip = msr_data;
2390 
2391 	ops->get_msr(ctxt, MSR_IA32_SYSENTER_ESP, &msr_data);
2392 	*reg_write(ctxt, VCPU_REGS_RSP) = msr_data;
2393 
2394 	return X86EMUL_CONTINUE;
2395 }
2396 
2397 static int em_sysexit(struct x86_emulate_ctxt *ctxt)
2398 {
2399 	const struct x86_emulate_ops *ops = ctxt->ops;
2400 	struct desc_struct cs, ss;
2401 	u64 msr_data, rcx, rdx;
2402 	int usermode;
2403 	u16 cs_sel = 0, ss_sel = 0;
2404 
2405 	/* inject #GP if in real mode or Virtual 8086 mode */
2406 	if (ctxt->mode == X86EMUL_MODE_REAL ||
2407 	    ctxt->mode == X86EMUL_MODE_VM86)
2408 		return emulate_gp(ctxt, 0);
2409 
2410 	setup_syscalls_segments(ctxt, &cs, &ss);
2411 
2412 	if ((ctxt->rex_prefix & 0x8) != 0x0)
2413 		usermode = X86EMUL_MODE_PROT64;
2414 	else
2415 		usermode = X86EMUL_MODE_PROT32;
2416 
2417 	rcx = reg_read(ctxt, VCPU_REGS_RCX);
2418 	rdx = reg_read(ctxt, VCPU_REGS_RDX);
2419 
2420 	cs.dpl = 3;
2421 	ss.dpl = 3;
2422 	ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data);
2423 	switch (usermode) {
2424 	case X86EMUL_MODE_PROT32:
2425 		cs_sel = (u16)(msr_data + 16);
2426 		if ((msr_data & 0xfffc) == 0x0)
2427 			return emulate_gp(ctxt, 0);
2428 		ss_sel = (u16)(msr_data + 24);
2429 		rcx = (u32)rcx;
2430 		rdx = (u32)rdx;
2431 		break;
2432 	case X86EMUL_MODE_PROT64:
2433 		cs_sel = (u16)(msr_data + 32);
2434 		if (msr_data == 0x0)
2435 			return emulate_gp(ctxt, 0);
2436 		ss_sel = cs_sel + 8;
2437 		cs.d = 0;
2438 		cs.l = 1;
2439 		if (is_noncanonical_address(rcx) ||
2440 		    is_noncanonical_address(rdx))
2441 			return emulate_gp(ctxt, 0);
2442 		break;
2443 	}
2444 	cs_sel |= SELECTOR_RPL_MASK;
2445 	ss_sel |= SELECTOR_RPL_MASK;
2446 
2447 	ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
2448 	ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
2449 
2450 	ctxt->_eip = rdx;
2451 	*reg_write(ctxt, VCPU_REGS_RSP) = rcx;
2452 
2453 	return X86EMUL_CONTINUE;
2454 }
2455 
2456 static bool emulator_bad_iopl(struct x86_emulate_ctxt *ctxt)
2457 {
2458 	int iopl;
2459 	if (ctxt->mode == X86EMUL_MODE_REAL)
2460 		return false;
2461 	if (ctxt->mode == X86EMUL_MODE_VM86)
2462 		return true;
2463 	iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> IOPL_SHIFT;
2464 	return ctxt->ops->cpl(ctxt) > iopl;
2465 }
2466 
2467 static bool emulator_io_port_access_allowed(struct x86_emulate_ctxt *ctxt,
2468 					    u16 port, u16 len)
2469 {
2470 	const struct x86_emulate_ops *ops = ctxt->ops;
2471 	struct desc_struct tr_seg;
2472 	u32 base3;
2473 	int r;
2474 	u16 tr, io_bitmap_ptr, perm, bit_idx = port & 0x7;
2475 	unsigned mask = (1 << len) - 1;
2476 	unsigned long base;
2477 
2478 	ops->get_segment(ctxt, &tr, &tr_seg, &base3, VCPU_SREG_TR);
2479 	if (!tr_seg.p)
2480 		return false;
2481 	if (desc_limit_scaled(&tr_seg) < 103)
2482 		return false;
2483 	base = get_desc_base(&tr_seg);
2484 #ifdef CONFIG_X86_64
2485 	base |= ((u64)base3) << 32;
2486 #endif
2487 	r = ops->read_std(ctxt, base + 102, &io_bitmap_ptr, 2, NULL);
2488 	if (r != X86EMUL_CONTINUE)
2489 		return false;
2490 	if (io_bitmap_ptr + port/8 > desc_limit_scaled(&tr_seg))
2491 		return false;
2492 	r = ops->read_std(ctxt, base + io_bitmap_ptr + port/8, &perm, 2, NULL);
2493 	if (r != X86EMUL_CONTINUE)
2494 		return false;
2495 	if ((perm >> bit_idx) & mask)
2496 		return false;
2497 	return true;
2498 }
2499 
2500 static bool emulator_io_permited(struct x86_emulate_ctxt *ctxt,
2501 				 u16 port, u16 len)
2502 {
2503 	if (ctxt->perm_ok)
2504 		return true;
2505 
2506 	if (emulator_bad_iopl(ctxt))
2507 		if (!emulator_io_port_access_allowed(ctxt, port, len))
2508 			return false;
2509 
2510 	ctxt->perm_ok = true;
2511 
2512 	return true;
2513 }
2514 
2515 static void save_state_to_tss16(struct x86_emulate_ctxt *ctxt,
2516 				struct tss_segment_16 *tss)
2517 {
2518 	tss->ip = ctxt->_eip;
2519 	tss->flag = ctxt->eflags;
2520 	tss->ax = reg_read(ctxt, VCPU_REGS_RAX);
2521 	tss->cx = reg_read(ctxt, VCPU_REGS_RCX);
2522 	tss->dx = reg_read(ctxt, VCPU_REGS_RDX);
2523 	tss->bx = reg_read(ctxt, VCPU_REGS_RBX);
2524 	tss->sp = reg_read(ctxt, VCPU_REGS_RSP);
2525 	tss->bp = reg_read(ctxt, VCPU_REGS_RBP);
2526 	tss->si = reg_read(ctxt, VCPU_REGS_RSI);
2527 	tss->di = reg_read(ctxt, VCPU_REGS_RDI);
2528 
2529 	tss->es = get_segment_selector(ctxt, VCPU_SREG_ES);
2530 	tss->cs = get_segment_selector(ctxt, VCPU_SREG_CS);
2531 	tss->ss = get_segment_selector(ctxt, VCPU_SREG_SS);
2532 	tss->ds = get_segment_selector(ctxt, VCPU_SREG_DS);
2533 	tss->ldt = get_segment_selector(ctxt, VCPU_SREG_LDTR);
2534 }
2535 
2536 static int load_state_from_tss16(struct x86_emulate_ctxt *ctxt,
2537 				 struct tss_segment_16 *tss)
2538 {
2539 	int ret;
2540 	u8 cpl;
2541 
2542 	ctxt->_eip = tss->ip;
2543 	ctxt->eflags = tss->flag | 2;
2544 	*reg_write(ctxt, VCPU_REGS_RAX) = tss->ax;
2545 	*reg_write(ctxt, VCPU_REGS_RCX) = tss->cx;
2546 	*reg_write(ctxt, VCPU_REGS_RDX) = tss->dx;
2547 	*reg_write(ctxt, VCPU_REGS_RBX) = tss->bx;
2548 	*reg_write(ctxt, VCPU_REGS_RSP) = tss->sp;
2549 	*reg_write(ctxt, VCPU_REGS_RBP) = tss->bp;
2550 	*reg_write(ctxt, VCPU_REGS_RSI) = tss->si;
2551 	*reg_write(ctxt, VCPU_REGS_RDI) = tss->di;
2552 
2553 	/*
2554 	 * SDM says that segment selectors are loaded before segment
2555 	 * descriptors
2556 	 */
2557 	set_segment_selector(ctxt, tss->ldt, VCPU_SREG_LDTR);
2558 	set_segment_selector(ctxt, tss->es, VCPU_SREG_ES);
2559 	set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS);
2560 	set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS);
2561 	set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS);
2562 
2563 	cpl = tss->cs & 3;
2564 
2565 	/*
2566 	 * Now load segment descriptors. If fault happens at this stage
2567 	 * it is handled in a context of new task
2568 	 */
2569 	ret = __load_segment_descriptor(ctxt, tss->ldt, VCPU_SREG_LDTR, cpl,
2570 					true, NULL);
2571 	if (ret != X86EMUL_CONTINUE)
2572 		return ret;
2573 	ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl,
2574 					true, NULL);
2575 	if (ret != X86EMUL_CONTINUE)
2576 		return ret;
2577 	ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl,
2578 					true, NULL);
2579 	if (ret != X86EMUL_CONTINUE)
2580 		return ret;
2581 	ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl,
2582 					true, NULL);
2583 	if (ret != X86EMUL_CONTINUE)
2584 		return ret;
2585 	ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl,
2586 					true, NULL);
2587 	if (ret != X86EMUL_CONTINUE)
2588 		return ret;
2589 
2590 	return X86EMUL_CONTINUE;
2591 }
2592 
2593 static int task_switch_16(struct x86_emulate_ctxt *ctxt,
2594 			  u16 tss_selector, u16 old_tss_sel,
2595 			  ulong old_tss_base, struct desc_struct *new_desc)
2596 {
2597 	const struct x86_emulate_ops *ops = ctxt->ops;
2598 	struct tss_segment_16 tss_seg;
2599 	int ret;
2600 	u32 new_tss_base = get_desc_base(new_desc);
2601 
2602 	ret = ops->read_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
2603 			    &ctxt->exception);
2604 	if (ret != X86EMUL_CONTINUE)
2605 		return ret;
2606 
2607 	save_state_to_tss16(ctxt, &tss_seg);
2608 
2609 	ret = ops->write_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
2610 			     &ctxt->exception);
2611 	if (ret != X86EMUL_CONTINUE)
2612 		return ret;
2613 
2614 	ret = ops->read_std(ctxt, new_tss_base, &tss_seg, sizeof tss_seg,
2615 			    &ctxt->exception);
2616 	if (ret != X86EMUL_CONTINUE)
2617 		return ret;
2618 
2619 	if (old_tss_sel != 0xffff) {
2620 		tss_seg.prev_task_link = old_tss_sel;
2621 
2622 		ret = ops->write_std(ctxt, new_tss_base,
2623 				     &tss_seg.prev_task_link,
2624 				     sizeof tss_seg.prev_task_link,
2625 				     &ctxt->exception);
2626 		if (ret != X86EMUL_CONTINUE)
2627 			return ret;
2628 	}
2629 
2630 	return load_state_from_tss16(ctxt, &tss_seg);
2631 }
2632 
2633 static void save_state_to_tss32(struct x86_emulate_ctxt *ctxt,
2634 				struct tss_segment_32 *tss)
2635 {
2636 	/* CR3 and ldt selector are not saved intentionally */
2637 	tss->eip = ctxt->_eip;
2638 	tss->eflags = ctxt->eflags;
2639 	tss->eax = reg_read(ctxt, VCPU_REGS_RAX);
2640 	tss->ecx = reg_read(ctxt, VCPU_REGS_RCX);
2641 	tss->edx = reg_read(ctxt, VCPU_REGS_RDX);
2642 	tss->ebx = reg_read(ctxt, VCPU_REGS_RBX);
2643 	tss->esp = reg_read(ctxt, VCPU_REGS_RSP);
2644 	tss->ebp = reg_read(ctxt, VCPU_REGS_RBP);
2645 	tss->esi = reg_read(ctxt, VCPU_REGS_RSI);
2646 	tss->edi = reg_read(ctxt, VCPU_REGS_RDI);
2647 
2648 	tss->es = get_segment_selector(ctxt, VCPU_SREG_ES);
2649 	tss->cs = get_segment_selector(ctxt, VCPU_SREG_CS);
2650 	tss->ss = get_segment_selector(ctxt, VCPU_SREG_SS);
2651 	tss->ds = get_segment_selector(ctxt, VCPU_SREG_DS);
2652 	tss->fs = get_segment_selector(ctxt, VCPU_SREG_FS);
2653 	tss->gs = get_segment_selector(ctxt, VCPU_SREG_GS);
2654 }
2655 
2656 static int load_state_from_tss32(struct x86_emulate_ctxt *ctxt,
2657 				 struct tss_segment_32 *tss)
2658 {
2659 	int ret;
2660 	u8 cpl;
2661 
2662 	if (ctxt->ops->set_cr(ctxt, 3, tss->cr3))
2663 		return emulate_gp(ctxt, 0);
2664 	ctxt->_eip = tss->eip;
2665 	ctxt->eflags = tss->eflags | 2;
2666 
2667 	/* General purpose registers */
2668 	*reg_write(ctxt, VCPU_REGS_RAX) = tss->eax;
2669 	*reg_write(ctxt, VCPU_REGS_RCX) = tss->ecx;
2670 	*reg_write(ctxt, VCPU_REGS_RDX) = tss->edx;
2671 	*reg_write(ctxt, VCPU_REGS_RBX) = tss->ebx;
2672 	*reg_write(ctxt, VCPU_REGS_RSP) = tss->esp;
2673 	*reg_write(ctxt, VCPU_REGS_RBP) = tss->ebp;
2674 	*reg_write(ctxt, VCPU_REGS_RSI) = tss->esi;
2675 	*reg_write(ctxt, VCPU_REGS_RDI) = tss->edi;
2676 
2677 	/*
2678 	 * SDM says that segment selectors are loaded before segment
2679 	 * descriptors.  This is important because CPL checks will
2680 	 * use CS.RPL.
2681 	 */
2682 	set_segment_selector(ctxt, tss->ldt_selector, VCPU_SREG_LDTR);
2683 	set_segment_selector(ctxt, tss->es, VCPU_SREG_ES);
2684 	set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS);
2685 	set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS);
2686 	set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS);
2687 	set_segment_selector(ctxt, tss->fs, VCPU_SREG_FS);
2688 	set_segment_selector(ctxt, tss->gs, VCPU_SREG_GS);
2689 
2690 	/*
2691 	 * If we're switching between Protected Mode and VM86, we need to make
2692 	 * sure to update the mode before loading the segment descriptors so
2693 	 * that the selectors are interpreted correctly.
2694 	 */
2695 	if (ctxt->eflags & X86_EFLAGS_VM) {
2696 		ctxt->mode = X86EMUL_MODE_VM86;
2697 		cpl = 3;
2698 	} else {
2699 		ctxt->mode = X86EMUL_MODE_PROT32;
2700 		cpl = tss->cs & 3;
2701 	}
2702 
2703 	/*
2704 	 * Now load segment descriptors. If fault happenes at this stage
2705 	 * it is handled in a context of new task
2706 	 */
2707 	ret = __load_segment_descriptor(ctxt, tss->ldt_selector, VCPU_SREG_LDTR,
2708 					cpl, true, NULL);
2709 	if (ret != X86EMUL_CONTINUE)
2710 		return ret;
2711 	ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl,
2712 					true, NULL);
2713 	if (ret != X86EMUL_CONTINUE)
2714 		return ret;
2715 	ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl,
2716 					true, NULL);
2717 	if (ret != X86EMUL_CONTINUE)
2718 		return ret;
2719 	ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl,
2720 					true, NULL);
2721 	if (ret != X86EMUL_CONTINUE)
2722 		return ret;
2723 	ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl,
2724 					true, NULL);
2725 	if (ret != X86EMUL_CONTINUE)
2726 		return ret;
2727 	ret = __load_segment_descriptor(ctxt, tss->fs, VCPU_SREG_FS, cpl,
2728 					true, NULL);
2729 	if (ret != X86EMUL_CONTINUE)
2730 		return ret;
2731 	ret = __load_segment_descriptor(ctxt, tss->gs, VCPU_SREG_GS, cpl,
2732 					true, NULL);
2733 	if (ret != X86EMUL_CONTINUE)
2734 		return ret;
2735 
2736 	return X86EMUL_CONTINUE;
2737 }
2738 
2739 static int task_switch_32(struct x86_emulate_ctxt *ctxt,
2740 			  u16 tss_selector, u16 old_tss_sel,
2741 			  ulong old_tss_base, struct desc_struct *new_desc)
2742 {
2743 	const struct x86_emulate_ops *ops = ctxt->ops;
2744 	struct tss_segment_32 tss_seg;
2745 	int ret;
2746 	u32 new_tss_base = get_desc_base(new_desc);
2747 	u32 eip_offset = offsetof(struct tss_segment_32, eip);
2748 	u32 ldt_sel_offset = offsetof(struct tss_segment_32, ldt_selector);
2749 
2750 	ret = ops->read_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
2751 			    &ctxt->exception);
2752 	if (ret != X86EMUL_CONTINUE)
2753 		/* FIXME: need to provide precise fault address */
2754 		return ret;
2755 
2756 	save_state_to_tss32(ctxt, &tss_seg);
2757 
2758 	/* Only GP registers and segment selectors are saved */
2759 	ret = ops->write_std(ctxt, old_tss_base + eip_offset, &tss_seg.eip,
2760 			     ldt_sel_offset - eip_offset, &ctxt->exception);
2761 	if (ret != X86EMUL_CONTINUE)
2762 		/* FIXME: need to provide precise fault address */
2763 		return ret;
2764 
2765 	ret = ops->read_std(ctxt, new_tss_base, &tss_seg, sizeof tss_seg,
2766 			    &ctxt->exception);
2767 	if (ret != X86EMUL_CONTINUE)
2768 		/* FIXME: need to provide precise fault address */
2769 		return ret;
2770 
2771 	if (old_tss_sel != 0xffff) {
2772 		tss_seg.prev_task_link = old_tss_sel;
2773 
2774 		ret = ops->write_std(ctxt, new_tss_base,
2775 				     &tss_seg.prev_task_link,
2776 				     sizeof tss_seg.prev_task_link,
2777 				     &ctxt->exception);
2778 		if (ret != X86EMUL_CONTINUE)
2779 			/* FIXME: need to provide precise fault address */
2780 			return ret;
2781 	}
2782 
2783 	return load_state_from_tss32(ctxt, &tss_seg);
2784 }
2785 
2786 static int emulator_do_task_switch(struct x86_emulate_ctxt *ctxt,
2787 				   u16 tss_selector, int idt_index, int reason,
2788 				   bool has_error_code, u32 error_code)
2789 {
2790 	const struct x86_emulate_ops *ops = ctxt->ops;
2791 	struct desc_struct curr_tss_desc, next_tss_desc;
2792 	int ret;
2793 	u16 old_tss_sel = get_segment_selector(ctxt, VCPU_SREG_TR);
2794 	ulong old_tss_base =
2795 		ops->get_cached_segment_base(ctxt, VCPU_SREG_TR);
2796 	u32 desc_limit;
2797 	ulong desc_addr;
2798 
2799 	/* FIXME: old_tss_base == ~0 ? */
2800 
2801 	ret = read_segment_descriptor(ctxt, tss_selector, &next_tss_desc, &desc_addr);
2802 	if (ret != X86EMUL_CONTINUE)
2803 		return ret;
2804 	ret = read_segment_descriptor(ctxt, old_tss_sel, &curr_tss_desc, &desc_addr);
2805 	if (ret != X86EMUL_CONTINUE)
2806 		return ret;
2807 
2808 	/* FIXME: check that next_tss_desc is tss */
2809 
2810 	/*
2811 	 * Check privileges. The three cases are task switch caused by...
2812 	 *
2813 	 * 1. jmp/call/int to task gate: Check against DPL of the task gate
2814 	 * 2. Exception/IRQ/iret: No check is performed
2815 	 * 3. jmp/call to TSS/task-gate: No check is performed since the
2816 	 *    hardware checks it before exiting.
2817 	 */
2818 	if (reason == TASK_SWITCH_GATE) {
2819 		if (idt_index != -1) {
2820 			/* Software interrupts */
2821 			struct desc_struct task_gate_desc;
2822 			int dpl;
2823 
2824 			ret = read_interrupt_descriptor(ctxt, idt_index,
2825 							&task_gate_desc);
2826 			if (ret != X86EMUL_CONTINUE)
2827 				return ret;
2828 
2829 			dpl = task_gate_desc.dpl;
2830 			if ((tss_selector & 3) > dpl || ops->cpl(ctxt) > dpl)
2831 				return emulate_gp(ctxt, (idt_index << 3) | 0x2);
2832 		}
2833 	}
2834 
2835 	desc_limit = desc_limit_scaled(&next_tss_desc);
2836 	if (!next_tss_desc.p ||
2837 	    ((desc_limit < 0x67 && (next_tss_desc.type & 8)) ||
2838 	     desc_limit < 0x2b)) {
2839 		return emulate_ts(ctxt, tss_selector & 0xfffc);
2840 	}
2841 
2842 	if (reason == TASK_SWITCH_IRET || reason == TASK_SWITCH_JMP) {
2843 		curr_tss_desc.type &= ~(1 << 1); /* clear busy flag */
2844 		write_segment_descriptor(ctxt, old_tss_sel, &curr_tss_desc);
2845 	}
2846 
2847 	if (reason == TASK_SWITCH_IRET)
2848 		ctxt->eflags = ctxt->eflags & ~X86_EFLAGS_NT;
2849 
2850 	/* set back link to prev task only if NT bit is set in eflags
2851 	   note that old_tss_sel is not used after this point */
2852 	if (reason != TASK_SWITCH_CALL && reason != TASK_SWITCH_GATE)
2853 		old_tss_sel = 0xffff;
2854 
2855 	if (next_tss_desc.type & 8)
2856 		ret = task_switch_32(ctxt, tss_selector, old_tss_sel,
2857 				     old_tss_base, &next_tss_desc);
2858 	else
2859 		ret = task_switch_16(ctxt, tss_selector, old_tss_sel,
2860 				     old_tss_base, &next_tss_desc);
2861 	if (ret != X86EMUL_CONTINUE)
2862 		return ret;
2863 
2864 	if (reason == TASK_SWITCH_CALL || reason == TASK_SWITCH_GATE)
2865 		ctxt->eflags = ctxt->eflags | X86_EFLAGS_NT;
2866 
2867 	if (reason != TASK_SWITCH_IRET) {
2868 		next_tss_desc.type |= (1 << 1); /* set busy flag */
2869 		write_segment_descriptor(ctxt, tss_selector, &next_tss_desc);
2870 	}
2871 
2872 	ops->set_cr(ctxt, 0,  ops->get_cr(ctxt, 0) | X86_CR0_TS);
2873 	ops->set_segment(ctxt, tss_selector, &next_tss_desc, 0, VCPU_SREG_TR);
2874 
2875 	if (has_error_code) {
2876 		ctxt->op_bytes = ctxt->ad_bytes = (next_tss_desc.type & 8) ? 4 : 2;
2877 		ctxt->lock_prefix = 0;
2878 		ctxt->src.val = (unsigned long) error_code;
2879 		ret = em_push(ctxt);
2880 	}
2881 
2882 	return ret;
2883 }
2884 
2885 int emulator_task_switch(struct x86_emulate_ctxt *ctxt,
2886 			 u16 tss_selector, int idt_index, int reason,
2887 			 bool has_error_code, u32 error_code)
2888 {
2889 	int rc;
2890 
2891 	invalidate_registers(ctxt);
2892 	ctxt->_eip = ctxt->eip;
2893 	ctxt->dst.type = OP_NONE;
2894 
2895 	rc = emulator_do_task_switch(ctxt, tss_selector, idt_index, reason,
2896 				     has_error_code, error_code);
2897 
2898 	if (rc == X86EMUL_CONTINUE) {
2899 		ctxt->eip = ctxt->_eip;
2900 		writeback_registers(ctxt);
2901 	}
2902 
2903 	return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK;
2904 }
2905 
2906 static void string_addr_inc(struct x86_emulate_ctxt *ctxt, int reg,
2907 		struct operand *op)
2908 {
2909 	int df = (ctxt->eflags & EFLG_DF) ? -op->count : op->count;
2910 
2911 	register_address_increment(ctxt, reg, df * op->bytes);
2912 	op->addr.mem.ea = register_address(ctxt, reg);
2913 }
2914 
2915 static int em_das(struct x86_emulate_ctxt *ctxt)
2916 {
2917 	u8 al, old_al;
2918 	bool af, cf, old_cf;
2919 
2920 	cf = ctxt->eflags & X86_EFLAGS_CF;
2921 	al = ctxt->dst.val;
2922 
2923 	old_al = al;
2924 	old_cf = cf;
2925 	cf = false;
2926 	af = ctxt->eflags & X86_EFLAGS_AF;
2927 	if ((al & 0x0f) > 9 || af) {
2928 		al -= 6;
2929 		cf = old_cf | (al >= 250);
2930 		af = true;
2931 	} else {
2932 		af = false;
2933 	}
2934 	if (old_al > 0x99 || old_cf) {
2935 		al -= 0x60;
2936 		cf = true;
2937 	}
2938 
2939 	ctxt->dst.val = al;
2940 	/* Set PF, ZF, SF */
2941 	ctxt->src.type = OP_IMM;
2942 	ctxt->src.val = 0;
2943 	ctxt->src.bytes = 1;
2944 	fastop(ctxt, em_or);
2945 	ctxt->eflags &= ~(X86_EFLAGS_AF | X86_EFLAGS_CF);
2946 	if (cf)
2947 		ctxt->eflags |= X86_EFLAGS_CF;
2948 	if (af)
2949 		ctxt->eflags |= X86_EFLAGS_AF;
2950 	return X86EMUL_CONTINUE;
2951 }
2952 
2953 static int em_aam(struct x86_emulate_ctxt *ctxt)
2954 {
2955 	u8 al, ah;
2956 
2957 	if (ctxt->src.val == 0)
2958 		return emulate_de(ctxt);
2959 
2960 	al = ctxt->dst.val & 0xff;
2961 	ah = al / ctxt->src.val;
2962 	al %= ctxt->src.val;
2963 
2964 	ctxt->dst.val = (ctxt->dst.val & 0xffff0000) | al | (ah << 8);
2965 
2966 	/* Set PF, ZF, SF */
2967 	ctxt->src.type = OP_IMM;
2968 	ctxt->src.val = 0;
2969 	ctxt->src.bytes = 1;
2970 	fastop(ctxt, em_or);
2971 
2972 	return X86EMUL_CONTINUE;
2973 }
2974 
2975 static int em_aad(struct x86_emulate_ctxt *ctxt)
2976 {
2977 	u8 al = ctxt->dst.val & 0xff;
2978 	u8 ah = (ctxt->dst.val >> 8) & 0xff;
2979 
2980 	al = (al + (ah * ctxt->src.val)) & 0xff;
2981 
2982 	ctxt->dst.val = (ctxt->dst.val & 0xffff0000) | al;
2983 
2984 	/* Set PF, ZF, SF */
2985 	ctxt->src.type = OP_IMM;
2986 	ctxt->src.val = 0;
2987 	ctxt->src.bytes = 1;
2988 	fastop(ctxt, em_or);
2989 
2990 	return X86EMUL_CONTINUE;
2991 }
2992 
2993 static int em_call(struct x86_emulate_ctxt *ctxt)
2994 {
2995 	int rc;
2996 	long rel = ctxt->src.val;
2997 
2998 	ctxt->src.val = (unsigned long)ctxt->_eip;
2999 	rc = jmp_rel(ctxt, rel);
3000 	if (rc != X86EMUL_CONTINUE)
3001 		return rc;
3002 	return em_push(ctxt);
3003 }
3004 
3005 static int em_call_far(struct x86_emulate_ctxt *ctxt)
3006 {
3007 	u16 sel, old_cs;
3008 	ulong old_eip;
3009 	int rc;
3010 	struct desc_struct old_desc, new_desc;
3011 	const struct x86_emulate_ops *ops = ctxt->ops;
3012 	int cpl = ctxt->ops->cpl(ctxt);
3013 
3014 	old_eip = ctxt->_eip;
3015 	ops->get_segment(ctxt, &old_cs, &old_desc, NULL, VCPU_SREG_CS);
3016 
3017 	memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
3018 	rc = __load_segment_descriptor(ctxt, sel, VCPU_SREG_CS, cpl, false,
3019 				       &new_desc);
3020 	if (rc != X86EMUL_CONTINUE)
3021 		return X86EMUL_CONTINUE;
3022 
3023 	rc = assign_eip_far(ctxt, ctxt->src.val, &new_desc);
3024 	if (rc != X86EMUL_CONTINUE)
3025 		goto fail;
3026 
3027 	ctxt->src.val = old_cs;
3028 	rc = em_push(ctxt);
3029 	if (rc != X86EMUL_CONTINUE)
3030 		goto fail;
3031 
3032 	ctxt->src.val = old_eip;
3033 	rc = em_push(ctxt);
3034 	/* If we failed, we tainted the memory, but the very least we should
3035 	   restore cs */
3036 	if (rc != X86EMUL_CONTINUE)
3037 		goto fail;
3038 	return rc;
3039 fail:
3040 	ops->set_segment(ctxt, old_cs, &old_desc, 0, VCPU_SREG_CS);
3041 	return rc;
3042 
3043 }
3044 
3045 static int em_ret_near_imm(struct x86_emulate_ctxt *ctxt)
3046 {
3047 	int rc;
3048 	unsigned long eip;
3049 
3050 	rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
3051 	if (rc != X86EMUL_CONTINUE)
3052 		return rc;
3053 	rc = assign_eip_near(ctxt, eip);
3054 	if (rc != X86EMUL_CONTINUE)
3055 		return rc;
3056 	rsp_increment(ctxt, ctxt->src.val);
3057 	return X86EMUL_CONTINUE;
3058 }
3059 
3060 static int em_xchg(struct x86_emulate_ctxt *ctxt)
3061 {
3062 	/* Write back the register source. */
3063 	ctxt->src.val = ctxt->dst.val;
3064 	write_register_operand(&ctxt->src);
3065 
3066 	/* Write back the memory destination with implicit LOCK prefix. */
3067 	ctxt->dst.val = ctxt->src.orig_val;
3068 	ctxt->lock_prefix = 1;
3069 	return X86EMUL_CONTINUE;
3070 }
3071 
3072 static int em_imul_3op(struct x86_emulate_ctxt *ctxt)
3073 {
3074 	ctxt->dst.val = ctxt->src2.val;
3075 	return fastop(ctxt, em_imul);
3076 }
3077 
3078 static int em_cwd(struct x86_emulate_ctxt *ctxt)
3079 {
3080 	ctxt->dst.type = OP_REG;
3081 	ctxt->dst.bytes = ctxt->src.bytes;
3082 	ctxt->dst.addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX);
3083 	ctxt->dst.val = ~((ctxt->src.val >> (ctxt->src.bytes * 8 - 1)) - 1);
3084 
3085 	return X86EMUL_CONTINUE;
3086 }
3087 
3088 static int em_rdtsc(struct x86_emulate_ctxt *ctxt)
3089 {
3090 	u64 tsc = 0;
3091 
3092 	ctxt->ops->get_msr(ctxt, MSR_IA32_TSC, &tsc);
3093 	*reg_write(ctxt, VCPU_REGS_RAX) = (u32)tsc;
3094 	*reg_write(ctxt, VCPU_REGS_RDX) = tsc >> 32;
3095 	return X86EMUL_CONTINUE;
3096 }
3097 
3098 static int em_rdpmc(struct x86_emulate_ctxt *ctxt)
3099 {
3100 	u64 pmc;
3101 
3102 	if (ctxt->ops->read_pmc(ctxt, reg_read(ctxt, VCPU_REGS_RCX), &pmc))
3103 		return emulate_gp(ctxt, 0);
3104 	*reg_write(ctxt, VCPU_REGS_RAX) = (u32)pmc;
3105 	*reg_write(ctxt, VCPU_REGS_RDX) = pmc >> 32;
3106 	return X86EMUL_CONTINUE;
3107 }
3108 
3109 static int em_mov(struct x86_emulate_ctxt *ctxt)
3110 {
3111 	memcpy(ctxt->dst.valptr, ctxt->src.valptr, sizeof(ctxt->src.valptr));
3112 	return X86EMUL_CONTINUE;
3113 }
3114 
3115 #define FFL(x) bit(X86_FEATURE_##x)
3116 
3117 static int em_movbe(struct x86_emulate_ctxt *ctxt)
3118 {
3119 	u32 ebx, ecx, edx, eax = 1;
3120 	u16 tmp;
3121 
3122 	/*
3123 	 * Check MOVBE is set in the guest-visible CPUID leaf.
3124 	 */
3125 	ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx);
3126 	if (!(ecx & FFL(MOVBE)))
3127 		return emulate_ud(ctxt);
3128 
3129 	switch (ctxt->op_bytes) {
3130 	case 2:
3131 		/*
3132 		 * From MOVBE definition: "...When the operand size is 16 bits,
3133 		 * the upper word of the destination register remains unchanged
3134 		 * ..."
3135 		 *
3136 		 * Both casting ->valptr and ->val to u16 breaks strict aliasing
3137 		 * rules so we have to do the operation almost per hand.
3138 		 */
3139 		tmp = (u16)ctxt->src.val;
3140 		ctxt->dst.val &= ~0xffffUL;
3141 		ctxt->dst.val |= (unsigned long)swab16(tmp);
3142 		break;
3143 	case 4:
3144 		ctxt->dst.val = swab32((u32)ctxt->src.val);
3145 		break;
3146 	case 8:
3147 		ctxt->dst.val = swab64(ctxt->src.val);
3148 		break;
3149 	default:
3150 		BUG();
3151 	}
3152 	return X86EMUL_CONTINUE;
3153 }
3154 
3155 static int em_cr_write(struct x86_emulate_ctxt *ctxt)
3156 {
3157 	if (ctxt->ops->set_cr(ctxt, ctxt->modrm_reg, ctxt->src.val))
3158 		return emulate_gp(ctxt, 0);
3159 
3160 	/* Disable writeback. */
3161 	ctxt->dst.type = OP_NONE;
3162 	return X86EMUL_CONTINUE;
3163 }
3164 
3165 static int em_dr_write(struct x86_emulate_ctxt *ctxt)
3166 {
3167 	unsigned long val;
3168 
3169 	if (ctxt->mode == X86EMUL_MODE_PROT64)
3170 		val = ctxt->src.val & ~0ULL;
3171 	else
3172 		val = ctxt->src.val & ~0U;
3173 
3174 	/* #UD condition is already handled. */
3175 	if (ctxt->ops->set_dr(ctxt, ctxt->modrm_reg, val) < 0)
3176 		return emulate_gp(ctxt, 0);
3177 
3178 	/* Disable writeback. */
3179 	ctxt->dst.type = OP_NONE;
3180 	return X86EMUL_CONTINUE;
3181 }
3182 
3183 static int em_wrmsr(struct x86_emulate_ctxt *ctxt)
3184 {
3185 	u64 msr_data;
3186 
3187 	msr_data = (u32)reg_read(ctxt, VCPU_REGS_RAX)
3188 		| ((u64)reg_read(ctxt, VCPU_REGS_RDX) << 32);
3189 	if (ctxt->ops->set_msr(ctxt, reg_read(ctxt, VCPU_REGS_RCX), msr_data))
3190 		return emulate_gp(ctxt, 0);
3191 
3192 	return X86EMUL_CONTINUE;
3193 }
3194 
3195 static int em_rdmsr(struct x86_emulate_ctxt *ctxt)
3196 {
3197 	u64 msr_data;
3198 
3199 	if (ctxt->ops->get_msr(ctxt, reg_read(ctxt, VCPU_REGS_RCX), &msr_data))
3200 		return emulate_gp(ctxt, 0);
3201 
3202 	*reg_write(ctxt, VCPU_REGS_RAX) = (u32)msr_data;
3203 	*reg_write(ctxt, VCPU_REGS_RDX) = msr_data >> 32;
3204 	return X86EMUL_CONTINUE;
3205 }
3206 
3207 static int em_mov_rm_sreg(struct x86_emulate_ctxt *ctxt)
3208 {
3209 	if (ctxt->modrm_reg > VCPU_SREG_GS)
3210 		return emulate_ud(ctxt);
3211 
3212 	ctxt->dst.val = get_segment_selector(ctxt, ctxt->modrm_reg);
3213 	if (ctxt->dst.bytes == 4 && ctxt->dst.type == OP_MEM)
3214 		ctxt->dst.bytes = 2;
3215 	return X86EMUL_CONTINUE;
3216 }
3217 
3218 static int em_mov_sreg_rm(struct x86_emulate_ctxt *ctxt)
3219 {
3220 	u16 sel = ctxt->src.val;
3221 
3222 	if (ctxt->modrm_reg == VCPU_SREG_CS || ctxt->modrm_reg > VCPU_SREG_GS)
3223 		return emulate_ud(ctxt);
3224 
3225 	if (ctxt->modrm_reg == VCPU_SREG_SS)
3226 		ctxt->interruptibility = KVM_X86_SHADOW_INT_MOV_SS;
3227 
3228 	/* Disable writeback. */
3229 	ctxt->dst.type = OP_NONE;
3230 	return load_segment_descriptor(ctxt, sel, ctxt->modrm_reg);
3231 }
3232 
3233 static int em_lldt(struct x86_emulate_ctxt *ctxt)
3234 {
3235 	u16 sel = ctxt->src.val;
3236 
3237 	/* Disable writeback. */
3238 	ctxt->dst.type = OP_NONE;
3239 	return load_segment_descriptor(ctxt, sel, VCPU_SREG_LDTR);
3240 }
3241 
3242 static int em_ltr(struct x86_emulate_ctxt *ctxt)
3243 {
3244 	u16 sel = ctxt->src.val;
3245 
3246 	/* Disable writeback. */
3247 	ctxt->dst.type = OP_NONE;
3248 	return load_segment_descriptor(ctxt, sel, VCPU_SREG_TR);
3249 }
3250 
3251 static int em_invlpg(struct x86_emulate_ctxt *ctxt)
3252 {
3253 	int rc;
3254 	ulong linear;
3255 
3256 	rc = linearize(ctxt, ctxt->src.addr.mem, 1, false, &linear);
3257 	if (rc == X86EMUL_CONTINUE)
3258 		ctxt->ops->invlpg(ctxt, linear);
3259 	/* Disable writeback. */
3260 	ctxt->dst.type = OP_NONE;
3261 	return X86EMUL_CONTINUE;
3262 }
3263 
3264 static int em_clts(struct x86_emulate_ctxt *ctxt)
3265 {
3266 	ulong cr0;
3267 
3268 	cr0 = ctxt->ops->get_cr(ctxt, 0);
3269 	cr0 &= ~X86_CR0_TS;
3270 	ctxt->ops->set_cr(ctxt, 0, cr0);
3271 	return X86EMUL_CONTINUE;
3272 }
3273 
3274 static int em_vmcall(struct x86_emulate_ctxt *ctxt)
3275 {
3276 	int rc = ctxt->ops->fix_hypercall(ctxt);
3277 
3278 	if (rc != X86EMUL_CONTINUE)
3279 		return rc;
3280 
3281 	/* Let the processor re-execute the fixed hypercall */
3282 	ctxt->_eip = ctxt->eip;
3283 	/* Disable writeback. */
3284 	ctxt->dst.type = OP_NONE;
3285 	return X86EMUL_CONTINUE;
3286 }
3287 
3288 static int emulate_store_desc_ptr(struct x86_emulate_ctxt *ctxt,
3289 				  void (*get)(struct x86_emulate_ctxt *ctxt,
3290 					      struct desc_ptr *ptr))
3291 {
3292 	struct desc_ptr desc_ptr;
3293 
3294 	if (ctxt->mode == X86EMUL_MODE_PROT64)
3295 		ctxt->op_bytes = 8;
3296 	get(ctxt, &desc_ptr);
3297 	if (ctxt->op_bytes == 2) {
3298 		ctxt->op_bytes = 4;
3299 		desc_ptr.address &= 0x00ffffff;
3300 	}
3301 	/* Disable writeback. */
3302 	ctxt->dst.type = OP_NONE;
3303 	return segmented_write(ctxt, ctxt->dst.addr.mem,
3304 			       &desc_ptr, 2 + ctxt->op_bytes);
3305 }
3306 
3307 static int em_sgdt(struct x86_emulate_ctxt *ctxt)
3308 {
3309 	return emulate_store_desc_ptr(ctxt, ctxt->ops->get_gdt);
3310 }
3311 
3312 static int em_sidt(struct x86_emulate_ctxt *ctxt)
3313 {
3314 	return emulate_store_desc_ptr(ctxt, ctxt->ops->get_idt);
3315 }
3316 
3317 static int em_lgdt_lidt(struct x86_emulate_ctxt *ctxt, bool lgdt)
3318 {
3319 	struct desc_ptr desc_ptr;
3320 	int rc;
3321 
3322 	if (ctxt->mode == X86EMUL_MODE_PROT64)
3323 		ctxt->op_bytes = 8;
3324 	rc = read_descriptor(ctxt, ctxt->src.addr.mem,
3325 			     &desc_ptr.size, &desc_ptr.address,
3326 			     ctxt->op_bytes);
3327 	if (rc != X86EMUL_CONTINUE)
3328 		return rc;
3329 	if (ctxt->mode == X86EMUL_MODE_PROT64 &&
3330 	    is_noncanonical_address(desc_ptr.address))
3331 		return emulate_gp(ctxt, 0);
3332 	if (lgdt)
3333 		ctxt->ops->set_gdt(ctxt, &desc_ptr);
3334 	else
3335 		ctxt->ops->set_idt(ctxt, &desc_ptr);
3336 	/* Disable writeback. */
3337 	ctxt->dst.type = OP_NONE;
3338 	return X86EMUL_CONTINUE;
3339 }
3340 
3341 static int em_lgdt(struct x86_emulate_ctxt *ctxt)
3342 {
3343 	return em_lgdt_lidt(ctxt, true);
3344 }
3345 
3346 static int em_vmmcall(struct x86_emulate_ctxt *ctxt)
3347 {
3348 	int rc;
3349 
3350 	rc = ctxt->ops->fix_hypercall(ctxt);
3351 
3352 	/* Disable writeback. */
3353 	ctxt->dst.type = OP_NONE;
3354 	return rc;
3355 }
3356 
3357 static int em_lidt(struct x86_emulate_ctxt *ctxt)
3358 {
3359 	return em_lgdt_lidt(ctxt, false);
3360 }
3361 
3362 static int em_smsw(struct x86_emulate_ctxt *ctxt)
3363 {
3364 	if (ctxt->dst.type == OP_MEM)
3365 		ctxt->dst.bytes = 2;
3366 	ctxt->dst.val = ctxt->ops->get_cr(ctxt, 0);
3367 	return X86EMUL_CONTINUE;
3368 }
3369 
3370 static int em_lmsw(struct x86_emulate_ctxt *ctxt)
3371 {
3372 	ctxt->ops->set_cr(ctxt, 0, (ctxt->ops->get_cr(ctxt, 0) & ~0x0eul)
3373 			  | (ctxt->src.val & 0x0f));
3374 	ctxt->dst.type = OP_NONE;
3375 	return X86EMUL_CONTINUE;
3376 }
3377 
3378 static int em_loop(struct x86_emulate_ctxt *ctxt)
3379 {
3380 	int rc = X86EMUL_CONTINUE;
3381 
3382 	register_address_increment(ctxt, VCPU_REGS_RCX, -1);
3383 	if ((address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) != 0) &&
3384 	    (ctxt->b == 0xe2 || test_cc(ctxt->b ^ 0x5, ctxt->eflags)))
3385 		rc = jmp_rel(ctxt, ctxt->src.val);
3386 
3387 	return rc;
3388 }
3389 
3390 static int em_jcxz(struct x86_emulate_ctxt *ctxt)
3391 {
3392 	int rc = X86EMUL_CONTINUE;
3393 
3394 	if (address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) == 0)
3395 		rc = jmp_rel(ctxt, ctxt->src.val);
3396 
3397 	return rc;
3398 }
3399 
3400 static int em_in(struct x86_emulate_ctxt *ctxt)
3401 {
3402 	if (!pio_in_emulated(ctxt, ctxt->dst.bytes, ctxt->src.val,
3403 			     &ctxt->dst.val))
3404 		return X86EMUL_IO_NEEDED;
3405 
3406 	return X86EMUL_CONTINUE;
3407 }
3408 
3409 static int em_out(struct x86_emulate_ctxt *ctxt)
3410 {
3411 	ctxt->ops->pio_out_emulated(ctxt, ctxt->src.bytes, ctxt->dst.val,
3412 				    &ctxt->src.val, 1);
3413 	/* Disable writeback. */
3414 	ctxt->dst.type = OP_NONE;
3415 	return X86EMUL_CONTINUE;
3416 }
3417 
3418 static int em_cli(struct x86_emulate_ctxt *ctxt)
3419 {
3420 	if (emulator_bad_iopl(ctxt))
3421 		return emulate_gp(ctxt, 0);
3422 
3423 	ctxt->eflags &= ~X86_EFLAGS_IF;
3424 	return X86EMUL_CONTINUE;
3425 }
3426 
3427 static int em_sti(struct x86_emulate_ctxt *ctxt)
3428 {
3429 	if (emulator_bad_iopl(ctxt))
3430 		return emulate_gp(ctxt, 0);
3431 
3432 	ctxt->interruptibility = KVM_X86_SHADOW_INT_STI;
3433 	ctxt->eflags |= X86_EFLAGS_IF;
3434 	return X86EMUL_CONTINUE;
3435 }
3436 
3437 static int em_cpuid(struct x86_emulate_ctxt *ctxt)
3438 {
3439 	u32 eax, ebx, ecx, edx;
3440 
3441 	eax = reg_read(ctxt, VCPU_REGS_RAX);
3442 	ecx = reg_read(ctxt, VCPU_REGS_RCX);
3443 	ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx);
3444 	*reg_write(ctxt, VCPU_REGS_RAX) = eax;
3445 	*reg_write(ctxt, VCPU_REGS_RBX) = ebx;
3446 	*reg_write(ctxt, VCPU_REGS_RCX) = ecx;
3447 	*reg_write(ctxt, VCPU_REGS_RDX) = edx;
3448 	return X86EMUL_CONTINUE;
3449 }
3450 
3451 static int em_sahf(struct x86_emulate_ctxt *ctxt)
3452 {
3453 	u32 flags;
3454 
3455 	flags = EFLG_CF | EFLG_PF | EFLG_AF | EFLG_ZF | EFLG_SF;
3456 	flags &= *reg_rmw(ctxt, VCPU_REGS_RAX) >> 8;
3457 
3458 	ctxt->eflags &= ~0xffUL;
3459 	ctxt->eflags |= flags | X86_EFLAGS_FIXED;
3460 	return X86EMUL_CONTINUE;
3461 }
3462 
3463 static int em_lahf(struct x86_emulate_ctxt *ctxt)
3464 {
3465 	*reg_rmw(ctxt, VCPU_REGS_RAX) &= ~0xff00UL;
3466 	*reg_rmw(ctxt, VCPU_REGS_RAX) |= (ctxt->eflags & 0xff) << 8;
3467 	return X86EMUL_CONTINUE;
3468 }
3469 
3470 static int em_bswap(struct x86_emulate_ctxt *ctxt)
3471 {
3472 	switch (ctxt->op_bytes) {
3473 #ifdef CONFIG_X86_64
3474 	case 8:
3475 		asm("bswap %0" : "+r"(ctxt->dst.val));
3476 		break;
3477 #endif
3478 	default:
3479 		asm("bswap %0" : "+r"(*(u32 *)&ctxt->dst.val));
3480 		break;
3481 	}
3482 	return X86EMUL_CONTINUE;
3483 }
3484 
3485 static int em_clflush(struct x86_emulate_ctxt *ctxt)
3486 {
3487 	/* emulating clflush regardless of cpuid */
3488 	return X86EMUL_CONTINUE;
3489 }
3490 
3491 static bool valid_cr(int nr)
3492 {
3493 	switch (nr) {
3494 	case 0:
3495 	case 2 ... 4:
3496 	case 8:
3497 		return true;
3498 	default:
3499 		return false;
3500 	}
3501 }
3502 
3503 static int check_cr_read(struct x86_emulate_ctxt *ctxt)
3504 {
3505 	if (!valid_cr(ctxt->modrm_reg))
3506 		return emulate_ud(ctxt);
3507 
3508 	return X86EMUL_CONTINUE;
3509 }
3510 
3511 static int check_cr_write(struct x86_emulate_ctxt *ctxt)
3512 {
3513 	u64 new_val = ctxt->src.val64;
3514 	int cr = ctxt->modrm_reg;
3515 	u64 efer = 0;
3516 
3517 	static u64 cr_reserved_bits[] = {
3518 		0xffffffff00000000ULL,
3519 		0, 0, 0, /* CR3 checked later */
3520 		CR4_RESERVED_BITS,
3521 		0, 0, 0,
3522 		CR8_RESERVED_BITS,
3523 	};
3524 
3525 	if (!valid_cr(cr))
3526 		return emulate_ud(ctxt);
3527 
3528 	if (new_val & cr_reserved_bits[cr])
3529 		return emulate_gp(ctxt, 0);
3530 
3531 	switch (cr) {
3532 	case 0: {
3533 		u64 cr4;
3534 		if (((new_val & X86_CR0_PG) && !(new_val & X86_CR0_PE)) ||
3535 		    ((new_val & X86_CR0_NW) && !(new_val & X86_CR0_CD)))
3536 			return emulate_gp(ctxt, 0);
3537 
3538 		cr4 = ctxt->ops->get_cr(ctxt, 4);
3539 		ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
3540 
3541 		if ((new_val & X86_CR0_PG) && (efer & EFER_LME) &&
3542 		    !(cr4 & X86_CR4_PAE))
3543 			return emulate_gp(ctxt, 0);
3544 
3545 		break;
3546 		}
3547 	case 3: {
3548 		u64 rsvd = 0;
3549 
3550 		ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
3551 		if (efer & EFER_LMA)
3552 			rsvd = CR3_L_MODE_RESERVED_BITS & ~CR3_PCID_INVD;
3553 
3554 		if (new_val & rsvd)
3555 			return emulate_gp(ctxt, 0);
3556 
3557 		break;
3558 		}
3559 	case 4: {
3560 		ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
3561 
3562 		if ((efer & EFER_LMA) && !(new_val & X86_CR4_PAE))
3563 			return emulate_gp(ctxt, 0);
3564 
3565 		break;
3566 		}
3567 	}
3568 
3569 	return X86EMUL_CONTINUE;
3570 }
3571 
3572 static int check_dr7_gd(struct x86_emulate_ctxt *ctxt)
3573 {
3574 	unsigned long dr7;
3575 
3576 	ctxt->ops->get_dr(ctxt, 7, &dr7);
3577 
3578 	/* Check if DR7.Global_Enable is set */
3579 	return dr7 & (1 << 13);
3580 }
3581 
3582 static int check_dr_read(struct x86_emulate_ctxt *ctxt)
3583 {
3584 	int dr = ctxt->modrm_reg;
3585 	u64 cr4;
3586 
3587 	if (dr > 7)
3588 		return emulate_ud(ctxt);
3589 
3590 	cr4 = ctxt->ops->get_cr(ctxt, 4);
3591 	if ((cr4 & X86_CR4_DE) && (dr == 4 || dr == 5))
3592 		return emulate_ud(ctxt);
3593 
3594 	if (check_dr7_gd(ctxt)) {
3595 		ulong dr6;
3596 
3597 		ctxt->ops->get_dr(ctxt, 6, &dr6);
3598 		dr6 &= ~15;
3599 		dr6 |= DR6_BD | DR6_RTM;
3600 		ctxt->ops->set_dr(ctxt, 6, dr6);
3601 		return emulate_db(ctxt);
3602 	}
3603 
3604 	return X86EMUL_CONTINUE;
3605 }
3606 
3607 static int check_dr_write(struct x86_emulate_ctxt *ctxt)
3608 {
3609 	u64 new_val = ctxt->src.val64;
3610 	int dr = ctxt->modrm_reg;
3611 
3612 	if ((dr == 6 || dr == 7) && (new_val & 0xffffffff00000000ULL))
3613 		return emulate_gp(ctxt, 0);
3614 
3615 	return check_dr_read(ctxt);
3616 }
3617 
3618 static int check_svme(struct x86_emulate_ctxt *ctxt)
3619 {
3620 	u64 efer;
3621 
3622 	ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
3623 
3624 	if (!(efer & EFER_SVME))
3625 		return emulate_ud(ctxt);
3626 
3627 	return X86EMUL_CONTINUE;
3628 }
3629 
3630 static int check_svme_pa(struct x86_emulate_ctxt *ctxt)
3631 {
3632 	u64 rax = reg_read(ctxt, VCPU_REGS_RAX);
3633 
3634 	/* Valid physical address? */
3635 	if (rax & 0xffff000000000000ULL)
3636 		return emulate_gp(ctxt, 0);
3637 
3638 	return check_svme(ctxt);
3639 }
3640 
3641 static int check_rdtsc(struct x86_emulate_ctxt *ctxt)
3642 {
3643 	u64 cr4 = ctxt->ops->get_cr(ctxt, 4);
3644 
3645 	if (cr4 & X86_CR4_TSD && ctxt->ops->cpl(ctxt))
3646 		return emulate_ud(ctxt);
3647 
3648 	return X86EMUL_CONTINUE;
3649 }
3650 
3651 static int check_rdpmc(struct x86_emulate_ctxt *ctxt)
3652 {
3653 	u64 cr4 = ctxt->ops->get_cr(ctxt, 4);
3654 	u64 rcx = reg_read(ctxt, VCPU_REGS_RCX);
3655 
3656 	if ((!(cr4 & X86_CR4_PCE) && ctxt->ops->cpl(ctxt)) ||
3657 	    ctxt->ops->check_pmc(ctxt, rcx))
3658 		return emulate_gp(ctxt, 0);
3659 
3660 	return X86EMUL_CONTINUE;
3661 }
3662 
3663 static int check_perm_in(struct x86_emulate_ctxt *ctxt)
3664 {
3665 	ctxt->dst.bytes = min(ctxt->dst.bytes, 4u);
3666 	if (!emulator_io_permited(ctxt, ctxt->src.val, ctxt->dst.bytes))
3667 		return emulate_gp(ctxt, 0);
3668 
3669 	return X86EMUL_CONTINUE;
3670 }
3671 
3672 static int check_perm_out(struct x86_emulate_ctxt *ctxt)
3673 {
3674 	ctxt->src.bytes = min(ctxt->src.bytes, 4u);
3675 	if (!emulator_io_permited(ctxt, ctxt->dst.val, ctxt->src.bytes))
3676 		return emulate_gp(ctxt, 0);
3677 
3678 	return X86EMUL_CONTINUE;
3679 }
3680 
3681 #define D(_y) { .flags = (_y) }
3682 #define DI(_y, _i) { .flags = (_y)|Intercept, .intercept = x86_intercept_##_i }
3683 #define DIP(_y, _i, _p) { .flags = (_y)|Intercept|CheckPerm, \
3684 		      .intercept = x86_intercept_##_i, .check_perm = (_p) }
3685 #define N    D(NotImpl)
3686 #define EXT(_f, _e) { .flags = ((_f) | RMExt), .u.group = (_e) }
3687 #define G(_f, _g) { .flags = ((_f) | Group | ModRM), .u.group = (_g) }
3688 #define GD(_f, _g) { .flags = ((_f) | GroupDual | ModRM), .u.gdual = (_g) }
3689 #define ID(_f, _i) { .flags = ((_f) | InstrDual | ModRM), .u.idual = (_i) }
3690 #define E(_f, _e) { .flags = ((_f) | Escape | ModRM), .u.esc = (_e) }
3691 #define I(_f, _e) { .flags = (_f), .u.execute = (_e) }
3692 #define F(_f, _e) { .flags = (_f) | Fastop, .u.fastop = (_e) }
3693 #define II(_f, _e, _i) \
3694 	{ .flags = (_f)|Intercept, .u.execute = (_e), .intercept = x86_intercept_##_i }
3695 #define IIP(_f, _e, _i, _p) \
3696 	{ .flags = (_f)|Intercept|CheckPerm, .u.execute = (_e), \
3697 	  .intercept = x86_intercept_##_i, .check_perm = (_p) }
3698 #define GP(_f, _g) { .flags = ((_f) | Prefix), .u.gprefix = (_g) }
3699 
3700 #define D2bv(_f)      D((_f) | ByteOp), D(_f)
3701 #define D2bvIP(_f, _i, _p) DIP((_f) | ByteOp, _i, _p), DIP(_f, _i, _p)
3702 #define I2bv(_f, _e)  I((_f) | ByteOp, _e), I(_f, _e)
3703 #define F2bv(_f, _e)  F((_f) | ByteOp, _e), F(_f, _e)
3704 #define I2bvIP(_f, _e, _i, _p) \
3705 	IIP((_f) | ByteOp, _e, _i, _p), IIP(_f, _e, _i, _p)
3706 
3707 #define F6ALU(_f, _e) F2bv((_f) | DstMem | SrcReg | ModRM, _e),		\
3708 		F2bv(((_f) | DstReg | SrcMem | ModRM) & ~Lock, _e),	\
3709 		F2bv(((_f) & ~Lock) | DstAcc | SrcImm, _e)
3710 
3711 static const struct opcode group7_rm0[] = {
3712 	N,
3713 	I(SrcNone | Priv | EmulateOnUD,	em_vmcall),
3714 	N, N, N, N, N, N,
3715 };
3716 
3717 static const struct opcode group7_rm1[] = {
3718 	DI(SrcNone | Priv, monitor),
3719 	DI(SrcNone | Priv, mwait),
3720 	N, N, N, N, N, N,
3721 };
3722 
3723 static const struct opcode group7_rm3[] = {
3724 	DIP(SrcNone | Prot | Priv,		vmrun,		check_svme_pa),
3725 	II(SrcNone  | Prot | EmulateOnUD,	em_vmmcall,	vmmcall),
3726 	DIP(SrcNone | Prot | Priv,		vmload,		check_svme_pa),
3727 	DIP(SrcNone | Prot | Priv,		vmsave,		check_svme_pa),
3728 	DIP(SrcNone | Prot | Priv,		stgi,		check_svme),
3729 	DIP(SrcNone | Prot | Priv,		clgi,		check_svme),
3730 	DIP(SrcNone | Prot | Priv,		skinit,		check_svme),
3731 	DIP(SrcNone | Prot | Priv,		invlpga,	check_svme),
3732 };
3733 
3734 static const struct opcode group7_rm7[] = {
3735 	N,
3736 	DIP(SrcNone, rdtscp, check_rdtsc),
3737 	N, N, N, N, N, N,
3738 };
3739 
3740 static const struct opcode group1[] = {
3741 	F(Lock, em_add),
3742 	F(Lock | PageTable, em_or),
3743 	F(Lock, em_adc),
3744 	F(Lock, em_sbb),
3745 	F(Lock | PageTable, em_and),
3746 	F(Lock, em_sub),
3747 	F(Lock, em_xor),
3748 	F(NoWrite, em_cmp),
3749 };
3750 
3751 static const struct opcode group1A[] = {
3752 	I(DstMem | SrcNone | Mov | Stack, em_pop), N, N, N, N, N, N, N,
3753 };
3754 
3755 static const struct opcode group2[] = {
3756 	F(DstMem | ModRM, em_rol),
3757 	F(DstMem | ModRM, em_ror),
3758 	F(DstMem | ModRM, em_rcl),
3759 	F(DstMem | ModRM, em_rcr),
3760 	F(DstMem | ModRM, em_shl),
3761 	F(DstMem | ModRM, em_shr),
3762 	F(DstMem | ModRM, em_shl),
3763 	F(DstMem | ModRM, em_sar),
3764 };
3765 
3766 static const struct opcode group3[] = {
3767 	F(DstMem | SrcImm | NoWrite, em_test),
3768 	F(DstMem | SrcImm | NoWrite, em_test),
3769 	F(DstMem | SrcNone | Lock, em_not),
3770 	F(DstMem | SrcNone | Lock, em_neg),
3771 	F(DstXacc | Src2Mem, em_mul_ex),
3772 	F(DstXacc | Src2Mem, em_imul_ex),
3773 	F(DstXacc | Src2Mem, em_div_ex),
3774 	F(DstXacc | Src2Mem, em_idiv_ex),
3775 };
3776 
3777 static const struct opcode group4[] = {
3778 	F(ByteOp | DstMem | SrcNone | Lock, em_inc),
3779 	F(ByteOp | DstMem | SrcNone | Lock, em_dec),
3780 	N, N, N, N, N, N,
3781 };
3782 
3783 static const struct opcode group5[] = {
3784 	F(DstMem | SrcNone | Lock,		em_inc),
3785 	F(DstMem | SrcNone | Lock,		em_dec),
3786 	I(SrcMem | NearBranch,			em_call_near_abs),
3787 	I(SrcMemFAddr | ImplicitOps | Stack,	em_call_far),
3788 	I(SrcMem | NearBranch,			em_jmp_abs),
3789 	I(SrcMemFAddr | ImplicitOps,		em_jmp_far),
3790 	I(SrcMem | Stack,			em_push), D(Undefined),
3791 };
3792 
3793 static const struct opcode group6[] = {
3794 	DI(Prot,	sldt),
3795 	DI(Prot,	str),
3796 	II(Prot | Priv | SrcMem16, em_lldt, lldt),
3797 	II(Prot | Priv | SrcMem16, em_ltr, ltr),
3798 	N, N, N, N,
3799 };
3800 
3801 static const struct group_dual group7 = { {
3802 	II(Mov | DstMem,			em_sgdt, sgdt),
3803 	II(Mov | DstMem,			em_sidt, sidt),
3804 	II(SrcMem | Priv,			em_lgdt, lgdt),
3805 	II(SrcMem | Priv,			em_lidt, lidt),
3806 	II(SrcNone | DstMem | Mov,		em_smsw, smsw), N,
3807 	II(SrcMem16 | Mov | Priv,		em_lmsw, lmsw),
3808 	II(SrcMem | ByteOp | Priv | NoAccess,	em_invlpg, invlpg),
3809 }, {
3810 	EXT(0, group7_rm0),
3811 	EXT(0, group7_rm1),
3812 	N, EXT(0, group7_rm3),
3813 	II(SrcNone | DstMem | Mov,		em_smsw, smsw), N,
3814 	II(SrcMem16 | Mov | Priv,		em_lmsw, lmsw),
3815 	EXT(0, group7_rm7),
3816 } };
3817 
3818 static const struct opcode group8[] = {
3819 	N, N, N, N,
3820 	F(DstMem | SrcImmByte | NoWrite,		em_bt),
3821 	F(DstMem | SrcImmByte | Lock | PageTable,	em_bts),
3822 	F(DstMem | SrcImmByte | Lock,			em_btr),
3823 	F(DstMem | SrcImmByte | Lock | PageTable,	em_btc),
3824 };
3825 
3826 static const struct group_dual group9 = { {
3827 	N, I(DstMem64 | Lock | PageTable, em_cmpxchg8b), N, N, N, N, N, N,
3828 }, {
3829 	N, N, N, N, N, N, N, N,
3830 } };
3831 
3832 static const struct opcode group11[] = {
3833 	I(DstMem | SrcImm | Mov | PageTable, em_mov),
3834 	X7(D(Undefined)),
3835 };
3836 
3837 static const struct gprefix pfx_0f_ae_7 = {
3838 	I(SrcMem | ByteOp, em_clflush), N, N, N,
3839 };
3840 
3841 static const struct group_dual group15 = { {
3842 	N, N, N, N, N, N, N, GP(0, &pfx_0f_ae_7),
3843 }, {
3844 	N, N, N, N, N, N, N, N,
3845 } };
3846 
3847 static const struct gprefix pfx_0f_6f_0f_7f = {
3848 	I(Mmx, em_mov), I(Sse | Aligned, em_mov), N, I(Sse | Unaligned, em_mov),
3849 };
3850 
3851 static const struct instr_dual instr_dual_0f_2b = {
3852 	I(0, em_mov), N
3853 };
3854 
3855 static const struct gprefix pfx_0f_2b = {
3856 	ID(0, &instr_dual_0f_2b), ID(0, &instr_dual_0f_2b), N, N,
3857 };
3858 
3859 static const struct gprefix pfx_0f_28_0f_29 = {
3860 	I(Aligned, em_mov), I(Aligned, em_mov), N, N,
3861 };
3862 
3863 static const struct gprefix pfx_0f_e7 = {
3864 	N, I(Sse, em_mov), N, N,
3865 };
3866 
3867 static const struct escape escape_d9 = { {
3868 	N, N, N, N, N, N, N, I(DstMem, em_fnstcw),
3869 }, {
3870 	/* 0xC0 - 0xC7 */
3871 	N, N, N, N, N, N, N, N,
3872 	/* 0xC8 - 0xCF */
3873 	N, N, N, N, N, N, N, N,
3874 	/* 0xD0 - 0xC7 */
3875 	N, N, N, N, N, N, N, N,
3876 	/* 0xD8 - 0xDF */
3877 	N, N, N, N, N, N, N, N,
3878 	/* 0xE0 - 0xE7 */
3879 	N, N, N, N, N, N, N, N,
3880 	/* 0xE8 - 0xEF */
3881 	N, N, N, N, N, N, N, N,
3882 	/* 0xF0 - 0xF7 */
3883 	N, N, N, N, N, N, N, N,
3884 	/* 0xF8 - 0xFF */
3885 	N, N, N, N, N, N, N, N,
3886 } };
3887 
3888 static const struct escape escape_db = { {
3889 	N, N, N, N, N, N, N, N,
3890 }, {
3891 	/* 0xC0 - 0xC7 */
3892 	N, N, N, N, N, N, N, N,
3893 	/* 0xC8 - 0xCF */
3894 	N, N, N, N, N, N, N, N,
3895 	/* 0xD0 - 0xC7 */
3896 	N, N, N, N, N, N, N, N,
3897 	/* 0xD8 - 0xDF */
3898 	N, N, N, N, N, N, N, N,
3899 	/* 0xE0 - 0xE7 */
3900 	N, N, N, I(ImplicitOps, em_fninit), N, N, N, N,
3901 	/* 0xE8 - 0xEF */
3902 	N, N, N, N, N, N, N, N,
3903 	/* 0xF0 - 0xF7 */
3904 	N, N, N, N, N, N, N, N,
3905 	/* 0xF8 - 0xFF */
3906 	N, N, N, N, N, N, N, N,
3907 } };
3908 
3909 static const struct escape escape_dd = { {
3910 	N, N, N, N, N, N, N, I(DstMem, em_fnstsw),
3911 }, {
3912 	/* 0xC0 - 0xC7 */
3913 	N, N, N, N, N, N, N, N,
3914 	/* 0xC8 - 0xCF */
3915 	N, N, N, N, N, N, N, N,
3916 	/* 0xD0 - 0xC7 */
3917 	N, N, N, N, N, N, N, N,
3918 	/* 0xD8 - 0xDF */
3919 	N, N, N, N, N, N, N, N,
3920 	/* 0xE0 - 0xE7 */
3921 	N, N, N, N, N, N, N, N,
3922 	/* 0xE8 - 0xEF */
3923 	N, N, N, N, N, N, N, N,
3924 	/* 0xF0 - 0xF7 */
3925 	N, N, N, N, N, N, N, N,
3926 	/* 0xF8 - 0xFF */
3927 	N, N, N, N, N, N, N, N,
3928 } };
3929 
3930 static const struct instr_dual instr_dual_0f_c3 = {
3931 	I(DstMem | SrcReg | ModRM | No16 | Mov, em_mov), N
3932 };
3933 
3934 static const struct opcode opcode_table[256] = {
3935 	/* 0x00 - 0x07 */
3936 	F6ALU(Lock, em_add),
3937 	I(ImplicitOps | Stack | No64 | Src2ES, em_push_sreg),
3938 	I(ImplicitOps | Stack | No64 | Src2ES, em_pop_sreg),
3939 	/* 0x08 - 0x0F */
3940 	F6ALU(Lock | PageTable, em_or),
3941 	I(ImplicitOps | Stack | No64 | Src2CS, em_push_sreg),
3942 	N,
3943 	/* 0x10 - 0x17 */
3944 	F6ALU(Lock, em_adc),
3945 	I(ImplicitOps | Stack | No64 | Src2SS, em_push_sreg),
3946 	I(ImplicitOps | Stack | No64 | Src2SS, em_pop_sreg),
3947 	/* 0x18 - 0x1F */
3948 	F6ALU(Lock, em_sbb),
3949 	I(ImplicitOps | Stack | No64 | Src2DS, em_push_sreg),
3950 	I(ImplicitOps | Stack | No64 | Src2DS, em_pop_sreg),
3951 	/* 0x20 - 0x27 */
3952 	F6ALU(Lock | PageTable, em_and), N, N,
3953 	/* 0x28 - 0x2F */
3954 	F6ALU(Lock, em_sub), N, I(ByteOp | DstAcc | No64, em_das),
3955 	/* 0x30 - 0x37 */
3956 	F6ALU(Lock, em_xor), N, N,
3957 	/* 0x38 - 0x3F */
3958 	F6ALU(NoWrite, em_cmp), N, N,
3959 	/* 0x40 - 0x4F */
3960 	X8(F(DstReg, em_inc)), X8(F(DstReg, em_dec)),
3961 	/* 0x50 - 0x57 */
3962 	X8(I(SrcReg | Stack, em_push)),
3963 	/* 0x58 - 0x5F */
3964 	X8(I(DstReg | Stack, em_pop)),
3965 	/* 0x60 - 0x67 */
3966 	I(ImplicitOps | Stack | No64, em_pusha),
3967 	I(ImplicitOps | Stack | No64, em_popa),
3968 	N, D(DstReg | SrcMem32 | ModRM | Mov) /* movsxd (x86/64) */ ,
3969 	N, N, N, N,
3970 	/* 0x68 - 0x6F */
3971 	I(SrcImm | Mov | Stack, em_push),
3972 	I(DstReg | SrcMem | ModRM | Src2Imm, em_imul_3op),
3973 	I(SrcImmByte | Mov | Stack, em_push),
3974 	I(DstReg | SrcMem | ModRM | Src2ImmByte, em_imul_3op),
3975 	I2bvIP(DstDI | SrcDX | Mov | String | Unaligned, em_in, ins, check_perm_in), /* insb, insw/insd */
3976 	I2bvIP(SrcSI | DstDX | String, em_out, outs, check_perm_out), /* outsb, outsw/outsd */
3977 	/* 0x70 - 0x7F */
3978 	X16(D(SrcImmByte | NearBranch)),
3979 	/* 0x80 - 0x87 */
3980 	G(ByteOp | DstMem | SrcImm, group1),
3981 	G(DstMem | SrcImm, group1),
3982 	G(ByteOp | DstMem | SrcImm | No64, group1),
3983 	G(DstMem | SrcImmByte, group1),
3984 	F2bv(DstMem | SrcReg | ModRM | NoWrite, em_test),
3985 	I2bv(DstMem | SrcReg | ModRM | Lock | PageTable, em_xchg),
3986 	/* 0x88 - 0x8F */
3987 	I2bv(DstMem | SrcReg | ModRM | Mov | PageTable, em_mov),
3988 	I2bv(DstReg | SrcMem | ModRM | Mov, em_mov),
3989 	I(DstMem | SrcNone | ModRM | Mov | PageTable, em_mov_rm_sreg),
3990 	D(ModRM | SrcMem | NoAccess | DstReg),
3991 	I(ImplicitOps | SrcMem16 | ModRM, em_mov_sreg_rm),
3992 	G(0, group1A),
3993 	/* 0x90 - 0x97 */
3994 	DI(SrcAcc | DstReg, pause), X7(D(SrcAcc | DstReg)),
3995 	/* 0x98 - 0x9F */
3996 	D(DstAcc | SrcNone), I(ImplicitOps | SrcAcc, em_cwd),
3997 	I(SrcImmFAddr | No64, em_call_far), N,
3998 	II(ImplicitOps | Stack, em_pushf, pushf),
3999 	II(ImplicitOps | Stack, em_popf, popf),
4000 	I(ImplicitOps, em_sahf), I(ImplicitOps, em_lahf),
4001 	/* 0xA0 - 0xA7 */
4002 	I2bv(DstAcc | SrcMem | Mov | MemAbs, em_mov),
4003 	I2bv(DstMem | SrcAcc | Mov | MemAbs | PageTable, em_mov),
4004 	I2bv(SrcSI | DstDI | Mov | String, em_mov),
4005 	F2bv(SrcSI | DstDI | String | NoWrite, em_cmp_r),
4006 	/* 0xA8 - 0xAF */
4007 	F2bv(DstAcc | SrcImm | NoWrite, em_test),
4008 	I2bv(SrcAcc | DstDI | Mov | String, em_mov),
4009 	I2bv(SrcSI | DstAcc | Mov | String, em_mov),
4010 	F2bv(SrcAcc | DstDI | String | NoWrite, em_cmp_r),
4011 	/* 0xB0 - 0xB7 */
4012 	X8(I(ByteOp | DstReg | SrcImm | Mov, em_mov)),
4013 	/* 0xB8 - 0xBF */
4014 	X8(I(DstReg | SrcImm64 | Mov, em_mov)),
4015 	/* 0xC0 - 0xC7 */
4016 	G(ByteOp | Src2ImmByte, group2), G(Src2ImmByte, group2),
4017 	I(ImplicitOps | NearBranch | SrcImmU16, em_ret_near_imm),
4018 	I(ImplicitOps | NearBranch, em_ret),
4019 	I(DstReg | SrcMemFAddr | ModRM | No64 | Src2ES, em_lseg),
4020 	I(DstReg | SrcMemFAddr | ModRM | No64 | Src2DS, em_lseg),
4021 	G(ByteOp, group11), G(0, group11),
4022 	/* 0xC8 - 0xCF */
4023 	I(Stack | SrcImmU16 | Src2ImmByte, em_enter), I(Stack, em_leave),
4024 	I(ImplicitOps | Stack | SrcImmU16, em_ret_far_imm),
4025 	I(ImplicitOps | Stack, em_ret_far),
4026 	D(ImplicitOps), DI(SrcImmByte, intn),
4027 	D(ImplicitOps | No64), II(ImplicitOps, em_iret, iret),
4028 	/* 0xD0 - 0xD7 */
4029 	G(Src2One | ByteOp, group2), G(Src2One, group2),
4030 	G(Src2CL | ByteOp, group2), G(Src2CL, group2),
4031 	I(DstAcc | SrcImmUByte | No64, em_aam),
4032 	I(DstAcc | SrcImmUByte | No64, em_aad),
4033 	F(DstAcc | ByteOp | No64, em_salc),
4034 	I(DstAcc | SrcXLat | ByteOp, em_mov),
4035 	/* 0xD8 - 0xDF */
4036 	N, E(0, &escape_d9), N, E(0, &escape_db), N, E(0, &escape_dd), N, N,
4037 	/* 0xE0 - 0xE7 */
4038 	X3(I(SrcImmByte | NearBranch, em_loop)),
4039 	I(SrcImmByte | NearBranch, em_jcxz),
4040 	I2bvIP(SrcImmUByte | DstAcc, em_in,  in,  check_perm_in),
4041 	I2bvIP(SrcAcc | DstImmUByte, em_out, out, check_perm_out),
4042 	/* 0xE8 - 0xEF */
4043 	I(SrcImm | NearBranch, em_call), D(SrcImm | ImplicitOps | NearBranch),
4044 	I(SrcImmFAddr | No64, em_jmp_far),
4045 	D(SrcImmByte | ImplicitOps | NearBranch),
4046 	I2bvIP(SrcDX | DstAcc, em_in,  in,  check_perm_in),
4047 	I2bvIP(SrcAcc | DstDX, em_out, out, check_perm_out),
4048 	/* 0xF0 - 0xF7 */
4049 	N, DI(ImplicitOps, icebp), N, N,
4050 	DI(ImplicitOps | Priv, hlt), D(ImplicitOps),
4051 	G(ByteOp, group3), G(0, group3),
4052 	/* 0xF8 - 0xFF */
4053 	D(ImplicitOps), D(ImplicitOps),
4054 	I(ImplicitOps, em_cli), I(ImplicitOps, em_sti),
4055 	D(ImplicitOps), D(ImplicitOps), G(0, group4), G(0, group5),
4056 };
4057 
4058 static const struct opcode twobyte_table[256] = {
4059 	/* 0x00 - 0x0F */
4060 	G(0, group6), GD(0, &group7), N, N,
4061 	N, I(ImplicitOps | EmulateOnUD, em_syscall),
4062 	II(ImplicitOps | Priv, em_clts, clts), N,
4063 	DI(ImplicitOps | Priv, invd), DI(ImplicitOps | Priv, wbinvd), N, N,
4064 	N, D(ImplicitOps | ModRM | SrcMem | NoAccess), N, N,
4065 	/* 0x10 - 0x1F */
4066 	N, N, N, N, N, N, N, N,
4067 	D(ImplicitOps | ModRM | SrcMem | NoAccess),
4068 	N, N, N, N, N, N, D(ImplicitOps | ModRM | SrcMem | NoAccess),
4069 	/* 0x20 - 0x2F */
4070 	DIP(ModRM | DstMem | Priv | Op3264 | NoMod, cr_read, check_cr_read),
4071 	DIP(ModRM | DstMem | Priv | Op3264 | NoMod, dr_read, check_dr_read),
4072 	IIP(ModRM | SrcMem | Priv | Op3264 | NoMod, em_cr_write, cr_write,
4073 						check_cr_write),
4074 	IIP(ModRM | SrcMem | Priv | Op3264 | NoMod, em_dr_write, dr_write,
4075 						check_dr_write),
4076 	N, N, N, N,
4077 	GP(ModRM | DstReg | SrcMem | Mov | Sse, &pfx_0f_28_0f_29),
4078 	GP(ModRM | DstMem | SrcReg | Mov | Sse, &pfx_0f_28_0f_29),
4079 	N, GP(ModRM | DstMem | SrcReg | Mov | Sse, &pfx_0f_2b),
4080 	N, N, N, N,
4081 	/* 0x30 - 0x3F */
4082 	II(ImplicitOps | Priv, em_wrmsr, wrmsr),
4083 	IIP(ImplicitOps, em_rdtsc, rdtsc, check_rdtsc),
4084 	II(ImplicitOps | Priv, em_rdmsr, rdmsr),
4085 	IIP(ImplicitOps, em_rdpmc, rdpmc, check_rdpmc),
4086 	I(ImplicitOps | EmulateOnUD, em_sysenter),
4087 	I(ImplicitOps | Priv | EmulateOnUD, em_sysexit),
4088 	N, N,
4089 	N, N, N, N, N, N, N, N,
4090 	/* 0x40 - 0x4F */
4091 	X16(D(DstReg | SrcMem | ModRM)),
4092 	/* 0x50 - 0x5F */
4093 	N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
4094 	/* 0x60 - 0x6F */
4095 	N, N, N, N,
4096 	N, N, N, N,
4097 	N, N, N, N,
4098 	N, N, N, GP(SrcMem | DstReg | ModRM | Mov, &pfx_0f_6f_0f_7f),
4099 	/* 0x70 - 0x7F */
4100 	N, N, N, N,
4101 	N, N, N, N,
4102 	N, N, N, N,
4103 	N, N, N, GP(SrcReg | DstMem | ModRM | Mov, &pfx_0f_6f_0f_7f),
4104 	/* 0x80 - 0x8F */
4105 	X16(D(SrcImm | NearBranch)),
4106 	/* 0x90 - 0x9F */
4107 	X16(D(ByteOp | DstMem | SrcNone | ModRM| Mov)),
4108 	/* 0xA0 - 0xA7 */
4109 	I(Stack | Src2FS, em_push_sreg), I(Stack | Src2FS, em_pop_sreg),
4110 	II(ImplicitOps, em_cpuid, cpuid),
4111 	F(DstMem | SrcReg | ModRM | BitOp | NoWrite, em_bt),
4112 	F(DstMem | SrcReg | Src2ImmByte | ModRM, em_shld),
4113 	F(DstMem | SrcReg | Src2CL | ModRM, em_shld), N, N,
4114 	/* 0xA8 - 0xAF */
4115 	I(Stack | Src2GS, em_push_sreg), I(Stack | Src2GS, em_pop_sreg),
4116 	DI(ImplicitOps, rsm),
4117 	F(DstMem | SrcReg | ModRM | BitOp | Lock | PageTable, em_bts),
4118 	F(DstMem | SrcReg | Src2ImmByte | ModRM, em_shrd),
4119 	F(DstMem | SrcReg | Src2CL | ModRM, em_shrd),
4120 	GD(0, &group15), F(DstReg | SrcMem | ModRM, em_imul),
4121 	/* 0xB0 - 0xB7 */
4122 	I2bv(DstMem | SrcReg | ModRM | Lock | PageTable, em_cmpxchg),
4123 	I(DstReg | SrcMemFAddr | ModRM | Src2SS, em_lseg),
4124 	F(DstMem | SrcReg | ModRM | BitOp | Lock, em_btr),
4125 	I(DstReg | SrcMemFAddr | ModRM | Src2FS, em_lseg),
4126 	I(DstReg | SrcMemFAddr | ModRM | Src2GS, em_lseg),
4127 	D(DstReg | SrcMem8 | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov),
4128 	/* 0xB8 - 0xBF */
4129 	N, N,
4130 	G(BitOp, group8),
4131 	F(DstMem | SrcReg | ModRM | BitOp | Lock | PageTable, em_btc),
4132 	F(DstReg | SrcMem | ModRM, em_bsf), F(DstReg | SrcMem | ModRM, em_bsr),
4133 	D(DstReg | SrcMem8 | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov),
4134 	/* 0xC0 - 0xC7 */
4135 	F2bv(DstMem | SrcReg | ModRM | SrcWrite | Lock, em_xadd),
4136 	N, ID(0, &instr_dual_0f_c3),
4137 	N, N, N, GD(0, &group9),
4138 	/* 0xC8 - 0xCF */
4139 	X8(I(DstReg, em_bswap)),
4140 	/* 0xD0 - 0xDF */
4141 	N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
4142 	/* 0xE0 - 0xEF */
4143 	N, N, N, N, N, N, N, GP(SrcReg | DstMem | ModRM | Mov, &pfx_0f_e7),
4144 	N, N, N, N, N, N, N, N,
4145 	/* 0xF0 - 0xFF */
4146 	N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N
4147 };
4148 
4149 static const struct instr_dual instr_dual_0f_38_f0 = {
4150 	I(DstReg | SrcMem | Mov, em_movbe), N
4151 };
4152 
4153 static const struct instr_dual instr_dual_0f_38_f1 = {
4154 	I(DstMem | SrcReg | Mov, em_movbe), N
4155 };
4156 
4157 static const struct gprefix three_byte_0f_38_f0 = {
4158 	ID(0, &instr_dual_0f_38_f0), N, N, N
4159 };
4160 
4161 static const struct gprefix three_byte_0f_38_f1 = {
4162 	ID(0, &instr_dual_0f_38_f1), N, N, N
4163 };
4164 
4165 /*
4166  * Insns below are selected by the prefix which indexed by the third opcode
4167  * byte.
4168  */
4169 static const struct opcode opcode_map_0f_38[256] = {
4170 	/* 0x00 - 0x7f */
4171 	X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N),
4172 	/* 0x80 - 0xef */
4173 	X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N),
4174 	/* 0xf0 - 0xf1 */
4175 	GP(EmulateOnUD | ModRM, &three_byte_0f_38_f0),
4176 	GP(EmulateOnUD | ModRM, &three_byte_0f_38_f1),
4177 	/* 0xf2 - 0xff */
4178 	N, N, X4(N), X8(N)
4179 };
4180 
4181 #undef D
4182 #undef N
4183 #undef G
4184 #undef GD
4185 #undef I
4186 #undef GP
4187 #undef EXT
4188 
4189 #undef D2bv
4190 #undef D2bvIP
4191 #undef I2bv
4192 #undef I2bvIP
4193 #undef I6ALU
4194 
4195 static unsigned imm_size(struct x86_emulate_ctxt *ctxt)
4196 {
4197 	unsigned size;
4198 
4199 	size = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4200 	if (size == 8)
4201 		size = 4;
4202 	return size;
4203 }
4204 
4205 static int decode_imm(struct x86_emulate_ctxt *ctxt, struct operand *op,
4206 		      unsigned size, bool sign_extension)
4207 {
4208 	int rc = X86EMUL_CONTINUE;
4209 
4210 	op->type = OP_IMM;
4211 	op->bytes = size;
4212 	op->addr.mem.ea = ctxt->_eip;
4213 	/* NB. Immediates are sign-extended as necessary. */
4214 	switch (op->bytes) {
4215 	case 1:
4216 		op->val = insn_fetch(s8, ctxt);
4217 		break;
4218 	case 2:
4219 		op->val = insn_fetch(s16, ctxt);
4220 		break;
4221 	case 4:
4222 		op->val = insn_fetch(s32, ctxt);
4223 		break;
4224 	case 8:
4225 		op->val = insn_fetch(s64, ctxt);
4226 		break;
4227 	}
4228 	if (!sign_extension) {
4229 		switch (op->bytes) {
4230 		case 1:
4231 			op->val &= 0xff;
4232 			break;
4233 		case 2:
4234 			op->val &= 0xffff;
4235 			break;
4236 		case 4:
4237 			op->val &= 0xffffffff;
4238 			break;
4239 		}
4240 	}
4241 done:
4242 	return rc;
4243 }
4244 
4245 static int decode_operand(struct x86_emulate_ctxt *ctxt, struct operand *op,
4246 			  unsigned d)
4247 {
4248 	int rc = X86EMUL_CONTINUE;
4249 
4250 	switch (d) {
4251 	case OpReg:
4252 		decode_register_operand(ctxt, op);
4253 		break;
4254 	case OpImmUByte:
4255 		rc = decode_imm(ctxt, op, 1, false);
4256 		break;
4257 	case OpMem:
4258 		ctxt->memop.bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4259 	mem_common:
4260 		*op = ctxt->memop;
4261 		ctxt->memopp = op;
4262 		if (ctxt->d & BitOp)
4263 			fetch_bit_operand(ctxt);
4264 		op->orig_val = op->val;
4265 		break;
4266 	case OpMem64:
4267 		ctxt->memop.bytes = (ctxt->op_bytes == 8) ? 16 : 8;
4268 		goto mem_common;
4269 	case OpAcc:
4270 		op->type = OP_REG;
4271 		op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4272 		op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
4273 		fetch_register_operand(op);
4274 		op->orig_val = op->val;
4275 		break;
4276 	case OpAccLo:
4277 		op->type = OP_REG;
4278 		op->bytes = (ctxt->d & ByteOp) ? 2 : ctxt->op_bytes;
4279 		op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
4280 		fetch_register_operand(op);
4281 		op->orig_val = op->val;
4282 		break;
4283 	case OpAccHi:
4284 		if (ctxt->d & ByteOp) {
4285 			op->type = OP_NONE;
4286 			break;
4287 		}
4288 		op->type = OP_REG;
4289 		op->bytes = ctxt->op_bytes;
4290 		op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX);
4291 		fetch_register_operand(op);
4292 		op->orig_val = op->val;
4293 		break;
4294 	case OpDI:
4295 		op->type = OP_MEM;
4296 		op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4297 		op->addr.mem.ea =
4298 			register_address(ctxt, VCPU_REGS_RDI);
4299 		op->addr.mem.seg = VCPU_SREG_ES;
4300 		op->val = 0;
4301 		op->count = 1;
4302 		break;
4303 	case OpDX:
4304 		op->type = OP_REG;
4305 		op->bytes = 2;
4306 		op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX);
4307 		fetch_register_operand(op);
4308 		break;
4309 	case OpCL:
4310 		op->type = OP_IMM;
4311 		op->bytes = 1;
4312 		op->val = reg_read(ctxt, VCPU_REGS_RCX) & 0xff;
4313 		break;
4314 	case OpImmByte:
4315 		rc = decode_imm(ctxt, op, 1, true);
4316 		break;
4317 	case OpOne:
4318 		op->type = OP_IMM;
4319 		op->bytes = 1;
4320 		op->val = 1;
4321 		break;
4322 	case OpImm:
4323 		rc = decode_imm(ctxt, op, imm_size(ctxt), true);
4324 		break;
4325 	case OpImm64:
4326 		rc = decode_imm(ctxt, op, ctxt->op_bytes, true);
4327 		break;
4328 	case OpMem8:
4329 		ctxt->memop.bytes = 1;
4330 		if (ctxt->memop.type == OP_REG) {
4331 			ctxt->memop.addr.reg = decode_register(ctxt,
4332 					ctxt->modrm_rm, true);
4333 			fetch_register_operand(&ctxt->memop);
4334 		}
4335 		goto mem_common;
4336 	case OpMem16:
4337 		ctxt->memop.bytes = 2;
4338 		goto mem_common;
4339 	case OpMem32:
4340 		ctxt->memop.bytes = 4;
4341 		goto mem_common;
4342 	case OpImmU16:
4343 		rc = decode_imm(ctxt, op, 2, false);
4344 		break;
4345 	case OpImmU:
4346 		rc = decode_imm(ctxt, op, imm_size(ctxt), false);
4347 		break;
4348 	case OpSI:
4349 		op->type = OP_MEM;
4350 		op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4351 		op->addr.mem.ea =
4352 			register_address(ctxt, VCPU_REGS_RSI);
4353 		op->addr.mem.seg = ctxt->seg_override;
4354 		op->val = 0;
4355 		op->count = 1;
4356 		break;
4357 	case OpXLat:
4358 		op->type = OP_MEM;
4359 		op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4360 		op->addr.mem.ea =
4361 			address_mask(ctxt,
4362 				reg_read(ctxt, VCPU_REGS_RBX) +
4363 				(reg_read(ctxt, VCPU_REGS_RAX) & 0xff));
4364 		op->addr.mem.seg = ctxt->seg_override;
4365 		op->val = 0;
4366 		break;
4367 	case OpImmFAddr:
4368 		op->type = OP_IMM;
4369 		op->addr.mem.ea = ctxt->_eip;
4370 		op->bytes = ctxt->op_bytes + 2;
4371 		insn_fetch_arr(op->valptr, op->bytes, ctxt);
4372 		break;
4373 	case OpMemFAddr:
4374 		ctxt->memop.bytes = ctxt->op_bytes + 2;
4375 		goto mem_common;
4376 	case OpES:
4377 		op->type = OP_IMM;
4378 		op->val = VCPU_SREG_ES;
4379 		break;
4380 	case OpCS:
4381 		op->type = OP_IMM;
4382 		op->val = VCPU_SREG_CS;
4383 		break;
4384 	case OpSS:
4385 		op->type = OP_IMM;
4386 		op->val = VCPU_SREG_SS;
4387 		break;
4388 	case OpDS:
4389 		op->type = OP_IMM;
4390 		op->val = VCPU_SREG_DS;
4391 		break;
4392 	case OpFS:
4393 		op->type = OP_IMM;
4394 		op->val = VCPU_SREG_FS;
4395 		break;
4396 	case OpGS:
4397 		op->type = OP_IMM;
4398 		op->val = VCPU_SREG_GS;
4399 		break;
4400 	case OpImplicit:
4401 		/* Special instructions do their own operand decoding. */
4402 	default:
4403 		op->type = OP_NONE; /* Disable writeback. */
4404 		break;
4405 	}
4406 
4407 done:
4408 	return rc;
4409 }
4410 
4411 int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len)
4412 {
4413 	int rc = X86EMUL_CONTINUE;
4414 	int mode = ctxt->mode;
4415 	int def_op_bytes, def_ad_bytes, goffset, simd_prefix;
4416 	bool op_prefix = false;
4417 	bool has_seg_override = false;
4418 	struct opcode opcode;
4419 
4420 	ctxt->memop.type = OP_NONE;
4421 	ctxt->memopp = NULL;
4422 	ctxt->_eip = ctxt->eip;
4423 	ctxt->fetch.ptr = ctxt->fetch.data;
4424 	ctxt->fetch.end = ctxt->fetch.data + insn_len;
4425 	ctxt->opcode_len = 1;
4426 	if (insn_len > 0)
4427 		memcpy(ctxt->fetch.data, insn, insn_len);
4428 	else {
4429 		rc = __do_insn_fetch_bytes(ctxt, 1);
4430 		if (rc != X86EMUL_CONTINUE)
4431 			return rc;
4432 	}
4433 
4434 	switch (mode) {
4435 	case X86EMUL_MODE_REAL:
4436 	case X86EMUL_MODE_VM86:
4437 	case X86EMUL_MODE_PROT16:
4438 		def_op_bytes = def_ad_bytes = 2;
4439 		break;
4440 	case X86EMUL_MODE_PROT32:
4441 		def_op_bytes = def_ad_bytes = 4;
4442 		break;
4443 #ifdef CONFIG_X86_64
4444 	case X86EMUL_MODE_PROT64:
4445 		def_op_bytes = 4;
4446 		def_ad_bytes = 8;
4447 		break;
4448 #endif
4449 	default:
4450 		return EMULATION_FAILED;
4451 	}
4452 
4453 	ctxt->op_bytes = def_op_bytes;
4454 	ctxt->ad_bytes = def_ad_bytes;
4455 
4456 	/* Legacy prefixes. */
4457 	for (;;) {
4458 		switch (ctxt->b = insn_fetch(u8, ctxt)) {
4459 		case 0x66:	/* operand-size override */
4460 			op_prefix = true;
4461 			/* switch between 2/4 bytes */
4462 			ctxt->op_bytes = def_op_bytes ^ 6;
4463 			break;
4464 		case 0x67:	/* address-size override */
4465 			if (mode == X86EMUL_MODE_PROT64)
4466 				/* switch between 4/8 bytes */
4467 				ctxt->ad_bytes = def_ad_bytes ^ 12;
4468 			else
4469 				/* switch between 2/4 bytes */
4470 				ctxt->ad_bytes = def_ad_bytes ^ 6;
4471 			break;
4472 		case 0x26:	/* ES override */
4473 		case 0x2e:	/* CS override */
4474 		case 0x36:	/* SS override */
4475 		case 0x3e:	/* DS override */
4476 			has_seg_override = true;
4477 			ctxt->seg_override = (ctxt->b >> 3) & 3;
4478 			break;
4479 		case 0x64:	/* FS override */
4480 		case 0x65:	/* GS override */
4481 			has_seg_override = true;
4482 			ctxt->seg_override = ctxt->b & 7;
4483 			break;
4484 		case 0x40 ... 0x4f: /* REX */
4485 			if (mode != X86EMUL_MODE_PROT64)
4486 				goto done_prefixes;
4487 			ctxt->rex_prefix = ctxt->b;
4488 			continue;
4489 		case 0xf0:	/* LOCK */
4490 			ctxt->lock_prefix = 1;
4491 			break;
4492 		case 0xf2:	/* REPNE/REPNZ */
4493 		case 0xf3:	/* REP/REPE/REPZ */
4494 			ctxt->rep_prefix = ctxt->b;
4495 			break;
4496 		default:
4497 			goto done_prefixes;
4498 		}
4499 
4500 		/* Any legacy prefix after a REX prefix nullifies its effect. */
4501 
4502 		ctxt->rex_prefix = 0;
4503 	}
4504 
4505 done_prefixes:
4506 
4507 	/* REX prefix. */
4508 	if (ctxt->rex_prefix & 8)
4509 		ctxt->op_bytes = 8;	/* REX.W */
4510 
4511 	/* Opcode byte(s). */
4512 	opcode = opcode_table[ctxt->b];
4513 	/* Two-byte opcode? */
4514 	if (ctxt->b == 0x0f) {
4515 		ctxt->opcode_len = 2;
4516 		ctxt->b = insn_fetch(u8, ctxt);
4517 		opcode = twobyte_table[ctxt->b];
4518 
4519 		/* 0F_38 opcode map */
4520 		if (ctxt->b == 0x38) {
4521 			ctxt->opcode_len = 3;
4522 			ctxt->b = insn_fetch(u8, ctxt);
4523 			opcode = opcode_map_0f_38[ctxt->b];
4524 		}
4525 	}
4526 	ctxt->d = opcode.flags;
4527 
4528 	if (ctxt->d & ModRM)
4529 		ctxt->modrm = insn_fetch(u8, ctxt);
4530 
4531 	/* vex-prefix instructions are not implemented */
4532 	if (ctxt->opcode_len == 1 && (ctxt->b == 0xc5 || ctxt->b == 0xc4) &&
4533 	    (mode == X86EMUL_MODE_PROT64 || (ctxt->modrm & 0xc0) == 0xc0)) {
4534 		ctxt->d = NotImpl;
4535 	}
4536 
4537 	while (ctxt->d & GroupMask) {
4538 		switch (ctxt->d & GroupMask) {
4539 		case Group:
4540 			goffset = (ctxt->modrm >> 3) & 7;
4541 			opcode = opcode.u.group[goffset];
4542 			break;
4543 		case GroupDual:
4544 			goffset = (ctxt->modrm >> 3) & 7;
4545 			if ((ctxt->modrm >> 6) == 3)
4546 				opcode = opcode.u.gdual->mod3[goffset];
4547 			else
4548 				opcode = opcode.u.gdual->mod012[goffset];
4549 			break;
4550 		case RMExt:
4551 			goffset = ctxt->modrm & 7;
4552 			opcode = opcode.u.group[goffset];
4553 			break;
4554 		case Prefix:
4555 			if (ctxt->rep_prefix && op_prefix)
4556 				return EMULATION_FAILED;
4557 			simd_prefix = op_prefix ? 0x66 : ctxt->rep_prefix;
4558 			switch (simd_prefix) {
4559 			case 0x00: opcode = opcode.u.gprefix->pfx_no; break;
4560 			case 0x66: opcode = opcode.u.gprefix->pfx_66; break;
4561 			case 0xf2: opcode = opcode.u.gprefix->pfx_f2; break;
4562 			case 0xf3: opcode = opcode.u.gprefix->pfx_f3; break;
4563 			}
4564 			break;
4565 		case Escape:
4566 			if (ctxt->modrm > 0xbf)
4567 				opcode = opcode.u.esc->high[ctxt->modrm - 0xc0];
4568 			else
4569 				opcode = opcode.u.esc->op[(ctxt->modrm >> 3) & 7];
4570 			break;
4571 		case InstrDual:
4572 			if ((ctxt->modrm >> 6) == 3)
4573 				opcode = opcode.u.idual->mod3;
4574 			else
4575 				opcode = opcode.u.idual->mod012;
4576 			break;
4577 		default:
4578 			return EMULATION_FAILED;
4579 		}
4580 
4581 		ctxt->d &= ~(u64)GroupMask;
4582 		ctxt->d |= opcode.flags;
4583 	}
4584 
4585 	/* Unrecognised? */
4586 	if (ctxt->d == 0)
4587 		return EMULATION_FAILED;
4588 
4589 	ctxt->execute = opcode.u.execute;
4590 
4591 	if (unlikely(ctxt->ud) && likely(!(ctxt->d & EmulateOnUD)))
4592 		return EMULATION_FAILED;
4593 
4594 	if (unlikely(ctxt->d &
4595 	    (NotImpl|Stack|Op3264|Sse|Mmx|Intercept|CheckPerm|NearBranch|
4596 	     No16))) {
4597 		/*
4598 		 * These are copied unconditionally here, and checked unconditionally
4599 		 * in x86_emulate_insn.
4600 		 */
4601 		ctxt->check_perm = opcode.check_perm;
4602 		ctxt->intercept = opcode.intercept;
4603 
4604 		if (ctxt->d & NotImpl)
4605 			return EMULATION_FAILED;
4606 
4607 		if (mode == X86EMUL_MODE_PROT64) {
4608 			if (ctxt->op_bytes == 4 && (ctxt->d & Stack))
4609 				ctxt->op_bytes = 8;
4610 			else if (ctxt->d & NearBranch)
4611 				ctxt->op_bytes = 8;
4612 		}
4613 
4614 		if (ctxt->d & Op3264) {
4615 			if (mode == X86EMUL_MODE_PROT64)
4616 				ctxt->op_bytes = 8;
4617 			else
4618 				ctxt->op_bytes = 4;
4619 		}
4620 
4621 		if ((ctxt->d & No16) && ctxt->op_bytes == 2)
4622 			ctxt->op_bytes = 4;
4623 
4624 		if (ctxt->d & Sse)
4625 			ctxt->op_bytes = 16;
4626 		else if (ctxt->d & Mmx)
4627 			ctxt->op_bytes = 8;
4628 	}
4629 
4630 	/* ModRM and SIB bytes. */
4631 	if (ctxt->d & ModRM) {
4632 		rc = decode_modrm(ctxt, &ctxt->memop);
4633 		if (!has_seg_override) {
4634 			has_seg_override = true;
4635 			ctxt->seg_override = ctxt->modrm_seg;
4636 		}
4637 	} else if (ctxt->d & MemAbs)
4638 		rc = decode_abs(ctxt, &ctxt->memop);
4639 	if (rc != X86EMUL_CONTINUE)
4640 		goto done;
4641 
4642 	if (!has_seg_override)
4643 		ctxt->seg_override = VCPU_SREG_DS;
4644 
4645 	ctxt->memop.addr.mem.seg = ctxt->seg_override;
4646 
4647 	/*
4648 	 * Decode and fetch the source operand: register, memory
4649 	 * or immediate.
4650 	 */
4651 	rc = decode_operand(ctxt, &ctxt->src, (ctxt->d >> SrcShift) & OpMask);
4652 	if (rc != X86EMUL_CONTINUE)
4653 		goto done;
4654 
4655 	/*
4656 	 * Decode and fetch the second source operand: register, memory
4657 	 * or immediate.
4658 	 */
4659 	rc = decode_operand(ctxt, &ctxt->src2, (ctxt->d >> Src2Shift) & OpMask);
4660 	if (rc != X86EMUL_CONTINUE)
4661 		goto done;
4662 
4663 	/* Decode and fetch the destination operand: register or memory. */
4664 	rc = decode_operand(ctxt, &ctxt->dst, (ctxt->d >> DstShift) & OpMask);
4665 
4666 	if (ctxt->rip_relative)
4667 		ctxt->memopp->addr.mem.ea = address_mask(ctxt,
4668 					ctxt->memopp->addr.mem.ea + ctxt->_eip);
4669 
4670 done:
4671 	return (rc != X86EMUL_CONTINUE) ? EMULATION_FAILED : EMULATION_OK;
4672 }
4673 
4674 bool x86_page_table_writing_insn(struct x86_emulate_ctxt *ctxt)
4675 {
4676 	return ctxt->d & PageTable;
4677 }
4678 
4679 static bool string_insn_completed(struct x86_emulate_ctxt *ctxt)
4680 {
4681 	/* The second termination condition only applies for REPE
4682 	 * and REPNE. Test if the repeat string operation prefix is
4683 	 * REPE/REPZ or REPNE/REPNZ and if it's the case it tests the
4684 	 * corresponding termination condition according to:
4685 	 * 	- if REPE/REPZ and ZF = 0 then done
4686 	 * 	- if REPNE/REPNZ and ZF = 1 then done
4687 	 */
4688 	if (((ctxt->b == 0xa6) || (ctxt->b == 0xa7) ||
4689 	     (ctxt->b == 0xae) || (ctxt->b == 0xaf))
4690 	    && (((ctxt->rep_prefix == REPE_PREFIX) &&
4691 		 ((ctxt->eflags & EFLG_ZF) == 0))
4692 		|| ((ctxt->rep_prefix == REPNE_PREFIX) &&
4693 		    ((ctxt->eflags & EFLG_ZF) == EFLG_ZF))))
4694 		return true;
4695 
4696 	return false;
4697 }
4698 
4699 static int flush_pending_x87_faults(struct x86_emulate_ctxt *ctxt)
4700 {
4701 	bool fault = false;
4702 
4703 	ctxt->ops->get_fpu(ctxt);
4704 	asm volatile("1: fwait \n\t"
4705 		     "2: \n\t"
4706 		     ".pushsection .fixup,\"ax\" \n\t"
4707 		     "3: \n\t"
4708 		     "movb $1, %[fault] \n\t"
4709 		     "jmp 2b \n\t"
4710 		     ".popsection \n\t"
4711 		     _ASM_EXTABLE(1b, 3b)
4712 		     : [fault]"+qm"(fault));
4713 	ctxt->ops->put_fpu(ctxt);
4714 
4715 	if (unlikely(fault))
4716 		return emulate_exception(ctxt, MF_VECTOR, 0, false);
4717 
4718 	return X86EMUL_CONTINUE;
4719 }
4720 
4721 static void fetch_possible_mmx_operand(struct x86_emulate_ctxt *ctxt,
4722 				       struct operand *op)
4723 {
4724 	if (op->type == OP_MM)
4725 		read_mmx_reg(ctxt, &op->mm_val, op->addr.mm);
4726 }
4727 
4728 static int fastop(struct x86_emulate_ctxt *ctxt, void (*fop)(struct fastop *))
4729 {
4730 	ulong flags = (ctxt->eflags & EFLAGS_MASK) | X86_EFLAGS_IF;
4731 	if (!(ctxt->d & ByteOp))
4732 		fop += __ffs(ctxt->dst.bytes) * FASTOP_SIZE;
4733 	asm("push %[flags]; popf; call *%[fastop]; pushf; pop %[flags]\n"
4734 	    : "+a"(ctxt->dst.val), "+d"(ctxt->src.val), [flags]"+D"(flags),
4735 	      [fastop]"+S"(fop)
4736 	    : "c"(ctxt->src2.val));
4737 	ctxt->eflags = (ctxt->eflags & ~EFLAGS_MASK) | (flags & EFLAGS_MASK);
4738 	if (!fop) /* exception is returned in fop variable */
4739 		return emulate_de(ctxt);
4740 	return X86EMUL_CONTINUE;
4741 }
4742 
4743 void init_decode_cache(struct x86_emulate_ctxt *ctxt)
4744 {
4745 	memset(&ctxt->rip_relative, 0,
4746 	       (void *)&ctxt->modrm - (void *)&ctxt->rip_relative);
4747 
4748 	ctxt->io_read.pos = 0;
4749 	ctxt->io_read.end = 0;
4750 	ctxt->mem_read.end = 0;
4751 }
4752 
4753 int x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
4754 {
4755 	const struct x86_emulate_ops *ops = ctxt->ops;
4756 	int rc = X86EMUL_CONTINUE;
4757 	int saved_dst_type = ctxt->dst.type;
4758 
4759 	ctxt->mem_read.pos = 0;
4760 
4761 	/* LOCK prefix is allowed only with some instructions */
4762 	if (ctxt->lock_prefix && (!(ctxt->d & Lock) || ctxt->dst.type != OP_MEM)) {
4763 		rc = emulate_ud(ctxt);
4764 		goto done;
4765 	}
4766 
4767 	if ((ctxt->d & SrcMask) == SrcMemFAddr && ctxt->src.type != OP_MEM) {
4768 		rc = emulate_ud(ctxt);
4769 		goto done;
4770 	}
4771 
4772 	if (unlikely(ctxt->d &
4773 		     (No64|Undefined|Sse|Mmx|Intercept|CheckPerm|Priv|Prot|String))) {
4774 		if ((ctxt->mode == X86EMUL_MODE_PROT64 && (ctxt->d & No64)) ||
4775 				(ctxt->d & Undefined)) {
4776 			rc = emulate_ud(ctxt);
4777 			goto done;
4778 		}
4779 
4780 		if (((ctxt->d & (Sse|Mmx)) && ((ops->get_cr(ctxt, 0) & X86_CR0_EM)))
4781 		    || ((ctxt->d & Sse) && !(ops->get_cr(ctxt, 4) & X86_CR4_OSFXSR))) {
4782 			rc = emulate_ud(ctxt);
4783 			goto done;
4784 		}
4785 
4786 		if ((ctxt->d & (Sse|Mmx)) && (ops->get_cr(ctxt, 0) & X86_CR0_TS)) {
4787 			rc = emulate_nm(ctxt);
4788 			goto done;
4789 		}
4790 
4791 		if (ctxt->d & Mmx) {
4792 			rc = flush_pending_x87_faults(ctxt);
4793 			if (rc != X86EMUL_CONTINUE)
4794 				goto done;
4795 			/*
4796 			 * Now that we know the fpu is exception safe, we can fetch
4797 			 * operands from it.
4798 			 */
4799 			fetch_possible_mmx_operand(ctxt, &ctxt->src);
4800 			fetch_possible_mmx_operand(ctxt, &ctxt->src2);
4801 			if (!(ctxt->d & Mov))
4802 				fetch_possible_mmx_operand(ctxt, &ctxt->dst);
4803 		}
4804 
4805 		if (unlikely(ctxt->guest_mode) && (ctxt->d & Intercept)) {
4806 			rc = emulator_check_intercept(ctxt, ctxt->intercept,
4807 						      X86_ICPT_PRE_EXCEPT);
4808 			if (rc != X86EMUL_CONTINUE)
4809 				goto done;
4810 		}
4811 
4812 		/* Instruction can only be executed in protected mode */
4813 		if ((ctxt->d & Prot) && ctxt->mode < X86EMUL_MODE_PROT16) {
4814 			rc = emulate_ud(ctxt);
4815 			goto done;
4816 		}
4817 
4818 		/* Privileged instruction can be executed only in CPL=0 */
4819 		if ((ctxt->d & Priv) && ops->cpl(ctxt)) {
4820 			if (ctxt->d & PrivUD)
4821 				rc = emulate_ud(ctxt);
4822 			else
4823 				rc = emulate_gp(ctxt, 0);
4824 			goto done;
4825 		}
4826 
4827 		/* Do instruction specific permission checks */
4828 		if (ctxt->d & CheckPerm) {
4829 			rc = ctxt->check_perm(ctxt);
4830 			if (rc != X86EMUL_CONTINUE)
4831 				goto done;
4832 		}
4833 
4834 		if (unlikely(ctxt->guest_mode) && (ctxt->d & Intercept)) {
4835 			rc = emulator_check_intercept(ctxt, ctxt->intercept,
4836 						      X86_ICPT_POST_EXCEPT);
4837 			if (rc != X86EMUL_CONTINUE)
4838 				goto done;
4839 		}
4840 
4841 		if (ctxt->rep_prefix && (ctxt->d & String)) {
4842 			/* All REP prefixes have the same first termination condition */
4843 			if (address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) == 0) {
4844 				ctxt->eip = ctxt->_eip;
4845 				ctxt->eflags &= ~EFLG_RF;
4846 				goto done;
4847 			}
4848 		}
4849 	}
4850 
4851 	if ((ctxt->src.type == OP_MEM) && !(ctxt->d & NoAccess)) {
4852 		rc = segmented_read(ctxt, ctxt->src.addr.mem,
4853 				    ctxt->src.valptr, ctxt->src.bytes);
4854 		if (rc != X86EMUL_CONTINUE)
4855 			goto done;
4856 		ctxt->src.orig_val64 = ctxt->src.val64;
4857 	}
4858 
4859 	if (ctxt->src2.type == OP_MEM) {
4860 		rc = segmented_read(ctxt, ctxt->src2.addr.mem,
4861 				    &ctxt->src2.val, ctxt->src2.bytes);
4862 		if (rc != X86EMUL_CONTINUE)
4863 			goto done;
4864 	}
4865 
4866 	if ((ctxt->d & DstMask) == ImplicitOps)
4867 		goto special_insn;
4868 
4869 
4870 	if ((ctxt->dst.type == OP_MEM) && !(ctxt->d & Mov)) {
4871 		/* optimisation - avoid slow emulated read if Mov */
4872 		rc = segmented_read(ctxt, ctxt->dst.addr.mem,
4873 				   &ctxt->dst.val, ctxt->dst.bytes);
4874 		if (rc != X86EMUL_CONTINUE)
4875 			goto done;
4876 	}
4877 	ctxt->dst.orig_val = ctxt->dst.val;
4878 
4879 special_insn:
4880 
4881 	if (unlikely(ctxt->guest_mode) && (ctxt->d & Intercept)) {
4882 		rc = emulator_check_intercept(ctxt, ctxt->intercept,
4883 					      X86_ICPT_POST_MEMACCESS);
4884 		if (rc != X86EMUL_CONTINUE)
4885 			goto done;
4886 	}
4887 
4888 	if (ctxt->rep_prefix && (ctxt->d & String))
4889 		ctxt->eflags |= EFLG_RF;
4890 	else
4891 		ctxt->eflags &= ~EFLG_RF;
4892 
4893 	if (ctxt->execute) {
4894 		if (ctxt->d & Fastop) {
4895 			void (*fop)(struct fastop *) = (void *)ctxt->execute;
4896 			rc = fastop(ctxt, fop);
4897 			if (rc != X86EMUL_CONTINUE)
4898 				goto done;
4899 			goto writeback;
4900 		}
4901 		rc = ctxt->execute(ctxt);
4902 		if (rc != X86EMUL_CONTINUE)
4903 			goto done;
4904 		goto writeback;
4905 	}
4906 
4907 	if (ctxt->opcode_len == 2)
4908 		goto twobyte_insn;
4909 	else if (ctxt->opcode_len == 3)
4910 		goto threebyte_insn;
4911 
4912 	switch (ctxt->b) {
4913 	case 0x63:		/* movsxd */
4914 		if (ctxt->mode != X86EMUL_MODE_PROT64)
4915 			goto cannot_emulate;
4916 		ctxt->dst.val = (s32) ctxt->src.val;
4917 		break;
4918 	case 0x70 ... 0x7f: /* jcc (short) */
4919 		if (test_cc(ctxt->b, ctxt->eflags))
4920 			rc = jmp_rel(ctxt, ctxt->src.val);
4921 		break;
4922 	case 0x8d: /* lea r16/r32, m */
4923 		ctxt->dst.val = ctxt->src.addr.mem.ea;
4924 		break;
4925 	case 0x90 ... 0x97: /* nop / xchg reg, rax */
4926 		if (ctxt->dst.addr.reg == reg_rmw(ctxt, VCPU_REGS_RAX))
4927 			ctxt->dst.type = OP_NONE;
4928 		else
4929 			rc = em_xchg(ctxt);
4930 		break;
4931 	case 0x98: /* cbw/cwde/cdqe */
4932 		switch (ctxt->op_bytes) {
4933 		case 2: ctxt->dst.val = (s8)ctxt->dst.val; break;
4934 		case 4: ctxt->dst.val = (s16)ctxt->dst.val; break;
4935 		case 8: ctxt->dst.val = (s32)ctxt->dst.val; break;
4936 		}
4937 		break;
4938 	case 0xcc:		/* int3 */
4939 		rc = emulate_int(ctxt, 3);
4940 		break;
4941 	case 0xcd:		/* int n */
4942 		rc = emulate_int(ctxt, ctxt->src.val);
4943 		break;
4944 	case 0xce:		/* into */
4945 		if (ctxt->eflags & EFLG_OF)
4946 			rc = emulate_int(ctxt, 4);
4947 		break;
4948 	case 0xe9: /* jmp rel */
4949 	case 0xeb: /* jmp rel short */
4950 		rc = jmp_rel(ctxt, ctxt->src.val);
4951 		ctxt->dst.type = OP_NONE; /* Disable writeback. */
4952 		break;
4953 	case 0xf4:              /* hlt */
4954 		ctxt->ops->halt(ctxt);
4955 		break;
4956 	case 0xf5:	/* cmc */
4957 		/* complement carry flag from eflags reg */
4958 		ctxt->eflags ^= EFLG_CF;
4959 		break;
4960 	case 0xf8: /* clc */
4961 		ctxt->eflags &= ~EFLG_CF;
4962 		break;
4963 	case 0xf9: /* stc */
4964 		ctxt->eflags |= EFLG_CF;
4965 		break;
4966 	case 0xfc: /* cld */
4967 		ctxt->eflags &= ~EFLG_DF;
4968 		break;
4969 	case 0xfd: /* std */
4970 		ctxt->eflags |= EFLG_DF;
4971 		break;
4972 	default:
4973 		goto cannot_emulate;
4974 	}
4975 
4976 	if (rc != X86EMUL_CONTINUE)
4977 		goto done;
4978 
4979 writeback:
4980 	if (ctxt->d & SrcWrite) {
4981 		BUG_ON(ctxt->src.type == OP_MEM || ctxt->src.type == OP_MEM_STR);
4982 		rc = writeback(ctxt, &ctxt->src);
4983 		if (rc != X86EMUL_CONTINUE)
4984 			goto done;
4985 	}
4986 	if (!(ctxt->d & NoWrite)) {
4987 		rc = writeback(ctxt, &ctxt->dst);
4988 		if (rc != X86EMUL_CONTINUE)
4989 			goto done;
4990 	}
4991 
4992 	/*
4993 	 * restore dst type in case the decoding will be reused
4994 	 * (happens for string instruction )
4995 	 */
4996 	ctxt->dst.type = saved_dst_type;
4997 
4998 	if ((ctxt->d & SrcMask) == SrcSI)
4999 		string_addr_inc(ctxt, VCPU_REGS_RSI, &ctxt->src);
5000 
5001 	if ((ctxt->d & DstMask) == DstDI)
5002 		string_addr_inc(ctxt, VCPU_REGS_RDI, &ctxt->dst);
5003 
5004 	if (ctxt->rep_prefix && (ctxt->d & String)) {
5005 		unsigned int count;
5006 		struct read_cache *r = &ctxt->io_read;
5007 		if ((ctxt->d & SrcMask) == SrcSI)
5008 			count = ctxt->src.count;
5009 		else
5010 			count = ctxt->dst.count;
5011 		register_address_increment(ctxt, VCPU_REGS_RCX, -count);
5012 
5013 		if (!string_insn_completed(ctxt)) {
5014 			/*
5015 			 * Re-enter guest when pio read ahead buffer is empty
5016 			 * or, if it is not used, after each 1024 iteration.
5017 			 */
5018 			if ((r->end != 0 || reg_read(ctxt, VCPU_REGS_RCX) & 0x3ff) &&
5019 			    (r->end == 0 || r->end != r->pos)) {
5020 				/*
5021 				 * Reset read cache. Usually happens before
5022 				 * decode, but since instruction is restarted
5023 				 * we have to do it here.
5024 				 */
5025 				ctxt->mem_read.end = 0;
5026 				writeback_registers(ctxt);
5027 				return EMULATION_RESTART;
5028 			}
5029 			goto done; /* skip rip writeback */
5030 		}
5031 		ctxt->eflags &= ~EFLG_RF;
5032 	}
5033 
5034 	ctxt->eip = ctxt->_eip;
5035 
5036 done:
5037 	if (rc == X86EMUL_PROPAGATE_FAULT) {
5038 		WARN_ON(ctxt->exception.vector > 0x1f);
5039 		ctxt->have_exception = true;
5040 	}
5041 	if (rc == X86EMUL_INTERCEPTED)
5042 		return EMULATION_INTERCEPTED;
5043 
5044 	if (rc == X86EMUL_CONTINUE)
5045 		writeback_registers(ctxt);
5046 
5047 	return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK;
5048 
5049 twobyte_insn:
5050 	switch (ctxt->b) {
5051 	case 0x09:		/* wbinvd */
5052 		(ctxt->ops->wbinvd)(ctxt);
5053 		break;
5054 	case 0x08:		/* invd */
5055 	case 0x0d:		/* GrpP (prefetch) */
5056 	case 0x18:		/* Grp16 (prefetch/nop) */
5057 	case 0x1f:		/* nop */
5058 		break;
5059 	case 0x20: /* mov cr, reg */
5060 		ctxt->dst.val = ops->get_cr(ctxt, ctxt->modrm_reg);
5061 		break;
5062 	case 0x21: /* mov from dr to reg */
5063 		ops->get_dr(ctxt, ctxt->modrm_reg, &ctxt->dst.val);
5064 		break;
5065 	case 0x40 ... 0x4f:	/* cmov */
5066 		if (test_cc(ctxt->b, ctxt->eflags))
5067 			ctxt->dst.val = ctxt->src.val;
5068 		else if (ctxt->mode != X86EMUL_MODE_PROT64 ||
5069 			 ctxt->op_bytes != 4)
5070 			ctxt->dst.type = OP_NONE; /* no writeback */
5071 		break;
5072 	case 0x80 ... 0x8f: /* jnz rel, etc*/
5073 		if (test_cc(ctxt->b, ctxt->eflags))
5074 			rc = jmp_rel(ctxt, ctxt->src.val);
5075 		break;
5076 	case 0x90 ... 0x9f:     /* setcc r/m8 */
5077 		ctxt->dst.val = test_cc(ctxt->b, ctxt->eflags);
5078 		break;
5079 	case 0xb6 ... 0xb7:	/* movzx */
5080 		ctxt->dst.bytes = ctxt->op_bytes;
5081 		ctxt->dst.val = (ctxt->src.bytes == 1) ? (u8) ctxt->src.val
5082 						       : (u16) ctxt->src.val;
5083 		break;
5084 	case 0xbe ... 0xbf:	/* movsx */
5085 		ctxt->dst.bytes = ctxt->op_bytes;
5086 		ctxt->dst.val = (ctxt->src.bytes == 1) ? (s8) ctxt->src.val :
5087 							(s16) ctxt->src.val;
5088 		break;
5089 	default:
5090 		goto cannot_emulate;
5091 	}
5092 
5093 threebyte_insn:
5094 
5095 	if (rc != X86EMUL_CONTINUE)
5096 		goto done;
5097 
5098 	goto writeback;
5099 
5100 cannot_emulate:
5101 	return EMULATION_FAILED;
5102 }
5103 
5104 void emulator_invalidate_register_cache(struct x86_emulate_ctxt *ctxt)
5105 {
5106 	invalidate_registers(ctxt);
5107 }
5108 
5109 void emulator_writeback_register_cache(struct x86_emulate_ctxt *ctxt)
5110 {
5111 	writeback_registers(ctxt);
5112 }
5113