xref: /openbmc/linux/arch/x86/kvm/emulate.c (revision 95e9fd10)
1 /******************************************************************************
2  * emulate.c
3  *
4  * Generic x86 (32-bit and 64-bit) instruction decoder and emulator.
5  *
6  * Copyright (c) 2005 Keir Fraser
7  *
8  * Linux coding style, mod r/m decoder, segment base fixes, real-mode
9  * privileged instructions:
10  *
11  * Copyright (C) 2006 Qumranet
12  * Copyright 2010 Red Hat, Inc. and/or its affiliates.
13  *
14  *   Avi Kivity <avi@qumranet.com>
15  *   Yaniv Kamay <yaniv@qumranet.com>
16  *
17  * This work is licensed under the terms of the GNU GPL, version 2.  See
18  * the COPYING file in the top-level directory.
19  *
20  * From: xen-unstable 10676:af9809f51f81a3c43f276f00c81a52ef558afda4
21  */
22 
23 #include <linux/kvm_host.h>
24 #include "kvm_cache_regs.h"
25 #include <linux/module.h>
26 #include <asm/kvm_emulate.h>
27 
28 #include "x86.h"
29 #include "tss.h"
30 
31 /*
32  * Operand types
33  */
34 #define OpNone             0ull
35 #define OpImplicit         1ull  /* No generic decode */
36 #define OpReg              2ull  /* Register */
37 #define OpMem              3ull  /* Memory */
38 #define OpAcc              4ull  /* Accumulator: AL/AX/EAX/RAX */
39 #define OpDI               5ull  /* ES:DI/EDI/RDI */
40 #define OpMem64            6ull  /* Memory, 64-bit */
41 #define OpImmUByte         7ull  /* Zero-extended 8-bit immediate */
42 #define OpDX               8ull  /* DX register */
43 #define OpCL               9ull  /* CL register (for shifts) */
44 #define OpImmByte         10ull  /* 8-bit sign extended immediate */
45 #define OpOne             11ull  /* Implied 1 */
46 #define OpImm             12ull  /* Sign extended immediate */
47 #define OpMem16           13ull  /* Memory operand (16-bit). */
48 #define OpMem32           14ull  /* Memory operand (32-bit). */
49 #define OpImmU            15ull  /* Immediate operand, zero extended */
50 #define OpSI              16ull  /* SI/ESI/RSI */
51 #define OpImmFAddr        17ull  /* Immediate far address */
52 #define OpMemFAddr        18ull  /* Far address in memory */
53 #define OpImmU16          19ull  /* Immediate operand, 16 bits, zero extended */
54 #define OpES              20ull  /* ES */
55 #define OpCS              21ull  /* CS */
56 #define OpSS              22ull  /* SS */
57 #define OpDS              23ull  /* DS */
58 #define OpFS              24ull  /* FS */
59 #define OpGS              25ull  /* GS */
60 #define OpMem8            26ull  /* 8-bit zero extended memory operand */
61 
62 #define OpBits             5  /* Width of operand field */
63 #define OpMask             ((1ull << OpBits) - 1)
64 
65 /*
66  * Opcode effective-address decode tables.
67  * Note that we only emulate instructions that have at least one memory
68  * operand (excluding implicit stack references). We assume that stack
69  * references and instruction fetches will never occur in special memory
70  * areas that require emulation. So, for example, 'mov <imm>,<reg>' need
71  * not be handled.
72  */
73 
74 /* Operand sizes: 8-bit operands or specified/overridden size. */
75 #define ByteOp      (1<<0)	/* 8-bit operands. */
76 /* Destination operand type. */
77 #define DstShift    1
78 #define ImplicitOps (OpImplicit << DstShift)
79 #define DstReg      (OpReg << DstShift)
80 #define DstMem      (OpMem << DstShift)
81 #define DstAcc      (OpAcc << DstShift)
82 #define DstDI       (OpDI << DstShift)
83 #define DstMem64    (OpMem64 << DstShift)
84 #define DstImmUByte (OpImmUByte << DstShift)
85 #define DstDX       (OpDX << DstShift)
86 #define DstMask     (OpMask << DstShift)
87 /* Source operand type. */
88 #define SrcShift    6
89 #define SrcNone     (OpNone << SrcShift)
90 #define SrcReg      (OpReg << SrcShift)
91 #define SrcMem      (OpMem << SrcShift)
92 #define SrcMem16    (OpMem16 << SrcShift)
93 #define SrcMem32    (OpMem32 << SrcShift)
94 #define SrcImm      (OpImm << SrcShift)
95 #define SrcImmByte  (OpImmByte << SrcShift)
96 #define SrcOne      (OpOne << SrcShift)
97 #define SrcImmUByte (OpImmUByte << SrcShift)
98 #define SrcImmU     (OpImmU << SrcShift)
99 #define SrcSI       (OpSI << SrcShift)
100 #define SrcImmFAddr (OpImmFAddr << SrcShift)
101 #define SrcMemFAddr (OpMemFAddr << SrcShift)
102 #define SrcAcc      (OpAcc << SrcShift)
103 #define SrcImmU16   (OpImmU16 << SrcShift)
104 #define SrcDX       (OpDX << SrcShift)
105 #define SrcMem8     (OpMem8 << SrcShift)
106 #define SrcMask     (OpMask << SrcShift)
107 #define BitOp       (1<<11)
108 #define MemAbs      (1<<12)      /* Memory operand is absolute displacement */
109 #define String      (1<<13)     /* String instruction (rep capable) */
110 #define Stack       (1<<14)     /* Stack instruction (push/pop) */
111 #define GroupMask   (7<<15)     /* Opcode uses one of the group mechanisms */
112 #define Group       (1<<15)     /* Bits 3:5 of modrm byte extend opcode */
113 #define GroupDual   (2<<15)     /* Alternate decoding of mod == 3 */
114 #define Prefix      (3<<15)     /* Instruction varies with 66/f2/f3 prefix */
115 #define RMExt       (4<<15)     /* Opcode extension in ModRM r/m if mod == 3 */
116 #define Sse         (1<<18)     /* SSE Vector instruction */
117 /* Generic ModRM decode. */
118 #define ModRM       (1<<19)
119 /* Destination is only written; never read. */
120 #define Mov         (1<<20)
121 /* Misc flags */
122 #define Prot        (1<<21) /* instruction generates #UD if not in prot-mode */
123 #define VendorSpecific (1<<22) /* Vendor specific instruction */
124 #define NoAccess    (1<<23) /* Don't access memory (lea/invlpg/verr etc) */
125 #define Op3264      (1<<24) /* Operand is 64b in long mode, 32b otherwise */
126 #define Undefined   (1<<25) /* No Such Instruction */
127 #define Lock        (1<<26) /* lock prefix is allowed for the instruction */
128 #define Priv        (1<<27) /* instruction generates #GP if current CPL != 0 */
129 #define No64	    (1<<28)
130 #define PageTable   (1 << 29)   /* instruction used to write page table */
131 /* Source 2 operand type */
132 #define Src2Shift   (30)
133 #define Src2None    (OpNone << Src2Shift)
134 #define Src2CL      (OpCL << Src2Shift)
135 #define Src2ImmByte (OpImmByte << Src2Shift)
136 #define Src2One     (OpOne << Src2Shift)
137 #define Src2Imm     (OpImm << Src2Shift)
138 #define Src2ES      (OpES << Src2Shift)
139 #define Src2CS      (OpCS << Src2Shift)
140 #define Src2SS      (OpSS << Src2Shift)
141 #define Src2DS      (OpDS << Src2Shift)
142 #define Src2FS      (OpFS << Src2Shift)
143 #define Src2GS      (OpGS << Src2Shift)
144 #define Src2Mask    (OpMask << Src2Shift)
145 #define Mmx         ((u64)1 << 40)  /* MMX Vector instruction */
146 #define Aligned     ((u64)1 << 41)  /* Explicitly aligned (e.g. MOVDQA) */
147 #define Unaligned   ((u64)1 << 42)  /* Explicitly unaligned (e.g. MOVDQU) */
148 #define Avx         ((u64)1 << 43)  /* Advanced Vector Extensions */
149 
150 #define X2(x...) x, x
151 #define X3(x...) X2(x), x
152 #define X4(x...) X2(x), X2(x)
153 #define X5(x...) X4(x), x
154 #define X6(x...) X4(x), X2(x)
155 #define X7(x...) X4(x), X3(x)
156 #define X8(x...) X4(x), X4(x)
157 #define X16(x...) X8(x), X8(x)
158 
159 struct opcode {
160 	u64 flags : 56;
161 	u64 intercept : 8;
162 	union {
163 		int (*execute)(struct x86_emulate_ctxt *ctxt);
164 		struct opcode *group;
165 		struct group_dual *gdual;
166 		struct gprefix *gprefix;
167 	} u;
168 	int (*check_perm)(struct x86_emulate_ctxt *ctxt);
169 };
170 
171 struct group_dual {
172 	struct opcode mod012[8];
173 	struct opcode mod3[8];
174 };
175 
176 struct gprefix {
177 	struct opcode pfx_no;
178 	struct opcode pfx_66;
179 	struct opcode pfx_f2;
180 	struct opcode pfx_f3;
181 };
182 
183 /* EFLAGS bit definitions. */
184 #define EFLG_ID (1<<21)
185 #define EFLG_VIP (1<<20)
186 #define EFLG_VIF (1<<19)
187 #define EFLG_AC (1<<18)
188 #define EFLG_VM (1<<17)
189 #define EFLG_RF (1<<16)
190 #define EFLG_IOPL (3<<12)
191 #define EFLG_NT (1<<14)
192 #define EFLG_OF (1<<11)
193 #define EFLG_DF (1<<10)
194 #define EFLG_IF (1<<9)
195 #define EFLG_TF (1<<8)
196 #define EFLG_SF (1<<7)
197 #define EFLG_ZF (1<<6)
198 #define EFLG_AF (1<<4)
199 #define EFLG_PF (1<<2)
200 #define EFLG_CF (1<<0)
201 
202 #define EFLG_RESERVED_ZEROS_MASK 0xffc0802a
203 #define EFLG_RESERVED_ONE_MASK 2
204 
205 /*
206  * Instruction emulation:
207  * Most instructions are emulated directly via a fragment of inline assembly
208  * code. This allows us to save/restore EFLAGS and thus very easily pick up
209  * any modified flags.
210  */
211 
212 #if defined(CONFIG_X86_64)
213 #define _LO32 "k"		/* force 32-bit operand */
214 #define _STK  "%%rsp"		/* stack pointer */
215 #elif defined(__i386__)
216 #define _LO32 ""		/* force 32-bit operand */
217 #define _STK  "%%esp"		/* stack pointer */
218 #endif
219 
220 /*
221  * These EFLAGS bits are restored from saved value during emulation, and
222  * any changes are written back to the saved value after emulation.
223  */
224 #define EFLAGS_MASK (EFLG_OF|EFLG_SF|EFLG_ZF|EFLG_AF|EFLG_PF|EFLG_CF)
225 
226 /* Before executing instruction: restore necessary bits in EFLAGS. */
227 #define _PRE_EFLAGS(_sav, _msk, _tmp)					\
228 	/* EFLAGS = (_sav & _msk) | (EFLAGS & ~_msk); _sav &= ~_msk; */ \
229 	"movl %"_sav",%"_LO32 _tmp"; "                                  \
230 	"push %"_tmp"; "                                                \
231 	"push %"_tmp"; "                                                \
232 	"movl %"_msk",%"_LO32 _tmp"; "                                  \
233 	"andl %"_LO32 _tmp",("_STK"); "                                 \
234 	"pushf; "                                                       \
235 	"notl %"_LO32 _tmp"; "                                          \
236 	"andl %"_LO32 _tmp",("_STK"); "                                 \
237 	"andl %"_LO32 _tmp","__stringify(BITS_PER_LONG/4)"("_STK"); "	\
238 	"pop  %"_tmp"; "                                                \
239 	"orl  %"_LO32 _tmp",("_STK"); "                                 \
240 	"popf; "                                                        \
241 	"pop  %"_sav"; "
242 
243 /* After executing instruction: write-back necessary bits in EFLAGS. */
244 #define _POST_EFLAGS(_sav, _msk, _tmp) \
245 	/* _sav |= EFLAGS & _msk; */		\
246 	"pushf; "				\
247 	"pop  %"_tmp"; "			\
248 	"andl %"_msk",%"_LO32 _tmp"; "		\
249 	"orl  %"_LO32 _tmp",%"_sav"; "
250 
251 #ifdef CONFIG_X86_64
252 #define ON64(x) x
253 #else
254 #define ON64(x)
255 #endif
256 
257 #define ____emulate_2op(ctxt, _op, _x, _y, _suffix, _dsttype)	\
258 	do {								\
259 		__asm__ __volatile__ (					\
260 			_PRE_EFLAGS("0", "4", "2")			\
261 			_op _suffix " %"_x"3,%1; "			\
262 			_POST_EFLAGS("0", "4", "2")			\
263 			: "=m" ((ctxt)->eflags),			\
264 			  "+q" (*(_dsttype*)&(ctxt)->dst.val),		\
265 			  "=&r" (_tmp)					\
266 			: _y ((ctxt)->src.val), "i" (EFLAGS_MASK));	\
267 	} while (0)
268 
269 
270 /* Raw emulation: instruction has two explicit operands. */
271 #define __emulate_2op_nobyte(ctxt,_op,_wx,_wy,_lx,_ly,_qx,_qy)		\
272 	do {								\
273 		unsigned long _tmp;					\
274 									\
275 		switch ((ctxt)->dst.bytes) {				\
276 		case 2:							\
277 			____emulate_2op(ctxt,_op,_wx,_wy,"w",u16);	\
278 			break;						\
279 		case 4:							\
280 			____emulate_2op(ctxt,_op,_lx,_ly,"l",u32);	\
281 			break;						\
282 		case 8:							\
283 			ON64(____emulate_2op(ctxt,_op,_qx,_qy,"q",u64)); \
284 			break;						\
285 		}							\
286 	} while (0)
287 
288 #define __emulate_2op(ctxt,_op,_bx,_by,_wx,_wy,_lx,_ly,_qx,_qy)		     \
289 	do {								     \
290 		unsigned long _tmp;					     \
291 		switch ((ctxt)->dst.bytes) {				     \
292 		case 1:							     \
293 			____emulate_2op(ctxt,_op,_bx,_by,"b",u8);	     \
294 			break;						     \
295 		default:						     \
296 			__emulate_2op_nobyte(ctxt, _op,			     \
297 					     _wx, _wy, _lx, _ly, _qx, _qy);  \
298 			break;						     \
299 		}							     \
300 	} while (0)
301 
302 /* Source operand is byte-sized and may be restricted to just %cl. */
303 #define emulate_2op_SrcB(ctxt, _op)					\
304 	__emulate_2op(ctxt, _op, "b", "c", "b", "c", "b", "c", "b", "c")
305 
306 /* Source operand is byte, word, long or quad sized. */
307 #define emulate_2op_SrcV(ctxt, _op)					\
308 	__emulate_2op(ctxt, _op, "b", "q", "w", "r", _LO32, "r", "", "r")
309 
310 /* Source operand is word, long or quad sized. */
311 #define emulate_2op_SrcV_nobyte(ctxt, _op)				\
312 	__emulate_2op_nobyte(ctxt, _op, "w", "r", _LO32, "r", "", "r")
313 
314 /* Instruction has three operands and one operand is stored in ECX register */
315 #define __emulate_2op_cl(ctxt, _op, _suffix, _type)		\
316 	do {								\
317 		unsigned long _tmp;					\
318 		_type _clv  = (ctxt)->src2.val;				\
319 		_type _srcv = (ctxt)->src.val;				\
320 		_type _dstv = (ctxt)->dst.val;				\
321 									\
322 		__asm__ __volatile__ (					\
323 			_PRE_EFLAGS("0", "5", "2")			\
324 			_op _suffix " %4,%1 \n"				\
325 			_POST_EFLAGS("0", "5", "2")			\
326 			: "=m" ((ctxt)->eflags), "+r" (_dstv), "=&r" (_tmp) \
327 			: "c" (_clv) , "r" (_srcv), "i" (EFLAGS_MASK)	\
328 			);						\
329 									\
330 		(ctxt)->src2.val  = (unsigned long) _clv;		\
331 		(ctxt)->src2.val = (unsigned long) _srcv;		\
332 		(ctxt)->dst.val = (unsigned long) _dstv;		\
333 	} while (0)
334 
335 #define emulate_2op_cl(ctxt, _op)					\
336 	do {								\
337 		switch ((ctxt)->dst.bytes) {				\
338 		case 2:							\
339 			__emulate_2op_cl(ctxt, _op, "w", u16);		\
340 			break;						\
341 		case 4:							\
342 			__emulate_2op_cl(ctxt, _op, "l", u32);		\
343 			break;						\
344 		case 8:							\
345 			ON64(__emulate_2op_cl(ctxt, _op, "q", ulong));	\
346 			break;						\
347 		}							\
348 	} while (0)
349 
350 #define __emulate_1op(ctxt, _op, _suffix)				\
351 	do {								\
352 		unsigned long _tmp;					\
353 									\
354 		__asm__ __volatile__ (					\
355 			_PRE_EFLAGS("0", "3", "2")			\
356 			_op _suffix " %1; "				\
357 			_POST_EFLAGS("0", "3", "2")			\
358 			: "=m" ((ctxt)->eflags), "+m" ((ctxt)->dst.val), \
359 			  "=&r" (_tmp)					\
360 			: "i" (EFLAGS_MASK));				\
361 	} while (0)
362 
363 /* Instruction has only one explicit operand (no source operand). */
364 #define emulate_1op(ctxt, _op)						\
365 	do {								\
366 		switch ((ctxt)->dst.bytes) {				\
367 		case 1:	__emulate_1op(ctxt, _op, "b"); break;		\
368 		case 2:	__emulate_1op(ctxt, _op, "w"); break;		\
369 		case 4:	__emulate_1op(ctxt, _op, "l"); break;		\
370 		case 8:	ON64(__emulate_1op(ctxt, _op, "q")); break;	\
371 		}							\
372 	} while (0)
373 
374 #define __emulate_1op_rax_rdx(ctxt, _op, _suffix, _ex)			\
375 	do {								\
376 		unsigned long _tmp;					\
377 		ulong *rax = &(ctxt)->regs[VCPU_REGS_RAX];		\
378 		ulong *rdx = &(ctxt)->regs[VCPU_REGS_RDX];		\
379 									\
380 		__asm__ __volatile__ (					\
381 			_PRE_EFLAGS("0", "5", "1")			\
382 			"1: \n\t"					\
383 			_op _suffix " %6; "				\
384 			"2: \n\t"					\
385 			_POST_EFLAGS("0", "5", "1")			\
386 			".pushsection .fixup,\"ax\" \n\t"		\
387 			"3: movb $1, %4 \n\t"				\
388 			"jmp 2b \n\t"					\
389 			".popsection \n\t"				\
390 			_ASM_EXTABLE(1b, 3b)				\
391 			: "=m" ((ctxt)->eflags), "=&r" (_tmp),		\
392 			  "+a" (*rax), "+d" (*rdx), "+qm"(_ex)		\
393 			: "i" (EFLAGS_MASK), "m" ((ctxt)->src.val),	\
394 			  "a" (*rax), "d" (*rdx));			\
395 	} while (0)
396 
397 /* instruction has only one source operand, destination is implicit (e.g. mul, div, imul, idiv) */
398 #define emulate_1op_rax_rdx(ctxt, _op, _ex)	\
399 	do {								\
400 		switch((ctxt)->src.bytes) {				\
401 		case 1:							\
402 			__emulate_1op_rax_rdx(ctxt, _op, "b", _ex);	\
403 			break;						\
404 		case 2:							\
405 			__emulate_1op_rax_rdx(ctxt, _op, "w", _ex);	\
406 			break;						\
407 		case 4:							\
408 			__emulate_1op_rax_rdx(ctxt, _op, "l", _ex);	\
409 			break;						\
410 		case 8: ON64(						\
411 			__emulate_1op_rax_rdx(ctxt, _op, "q", _ex));	\
412 			break;						\
413 		}							\
414 	} while (0)
415 
416 static int emulator_check_intercept(struct x86_emulate_ctxt *ctxt,
417 				    enum x86_intercept intercept,
418 				    enum x86_intercept_stage stage)
419 {
420 	struct x86_instruction_info info = {
421 		.intercept  = intercept,
422 		.rep_prefix = ctxt->rep_prefix,
423 		.modrm_mod  = ctxt->modrm_mod,
424 		.modrm_reg  = ctxt->modrm_reg,
425 		.modrm_rm   = ctxt->modrm_rm,
426 		.src_val    = ctxt->src.val64,
427 		.src_bytes  = ctxt->src.bytes,
428 		.dst_bytes  = ctxt->dst.bytes,
429 		.ad_bytes   = ctxt->ad_bytes,
430 		.next_rip   = ctxt->eip,
431 	};
432 
433 	return ctxt->ops->intercept(ctxt, &info, stage);
434 }
435 
436 static void assign_masked(ulong *dest, ulong src, ulong mask)
437 {
438 	*dest = (*dest & ~mask) | (src & mask);
439 }
440 
441 static inline unsigned long ad_mask(struct x86_emulate_ctxt *ctxt)
442 {
443 	return (1UL << (ctxt->ad_bytes << 3)) - 1;
444 }
445 
446 static ulong stack_mask(struct x86_emulate_ctxt *ctxt)
447 {
448 	u16 sel;
449 	struct desc_struct ss;
450 
451 	if (ctxt->mode == X86EMUL_MODE_PROT64)
452 		return ~0UL;
453 	ctxt->ops->get_segment(ctxt, &sel, &ss, NULL, VCPU_SREG_SS);
454 	return ~0U >> ((ss.d ^ 1) * 16);  /* d=0: 0xffff; d=1: 0xffffffff */
455 }
456 
457 static int stack_size(struct x86_emulate_ctxt *ctxt)
458 {
459 	return (__fls(stack_mask(ctxt)) + 1) >> 3;
460 }
461 
462 /* Access/update address held in a register, based on addressing mode. */
463 static inline unsigned long
464 address_mask(struct x86_emulate_ctxt *ctxt, unsigned long reg)
465 {
466 	if (ctxt->ad_bytes == sizeof(unsigned long))
467 		return reg;
468 	else
469 		return reg & ad_mask(ctxt);
470 }
471 
472 static inline unsigned long
473 register_address(struct x86_emulate_ctxt *ctxt, unsigned long reg)
474 {
475 	return address_mask(ctxt, reg);
476 }
477 
478 static void masked_increment(ulong *reg, ulong mask, int inc)
479 {
480 	assign_masked(reg, *reg + inc, mask);
481 }
482 
483 static inline void
484 register_address_increment(struct x86_emulate_ctxt *ctxt, unsigned long *reg, int inc)
485 {
486 	ulong mask;
487 
488 	if (ctxt->ad_bytes == sizeof(unsigned long))
489 		mask = ~0UL;
490 	else
491 		mask = ad_mask(ctxt);
492 	masked_increment(reg, mask, inc);
493 }
494 
495 static void rsp_increment(struct x86_emulate_ctxt *ctxt, int inc)
496 {
497 	masked_increment(&ctxt->regs[VCPU_REGS_RSP], stack_mask(ctxt), inc);
498 }
499 
500 static inline void jmp_rel(struct x86_emulate_ctxt *ctxt, int rel)
501 {
502 	register_address_increment(ctxt, &ctxt->_eip, rel);
503 }
504 
505 static u32 desc_limit_scaled(struct desc_struct *desc)
506 {
507 	u32 limit = get_desc_limit(desc);
508 
509 	return desc->g ? (limit << 12) | 0xfff : limit;
510 }
511 
512 static void set_seg_override(struct x86_emulate_ctxt *ctxt, int seg)
513 {
514 	ctxt->has_seg_override = true;
515 	ctxt->seg_override = seg;
516 }
517 
518 static unsigned long seg_base(struct x86_emulate_ctxt *ctxt, int seg)
519 {
520 	if (ctxt->mode == X86EMUL_MODE_PROT64 && seg < VCPU_SREG_FS)
521 		return 0;
522 
523 	return ctxt->ops->get_cached_segment_base(ctxt, seg);
524 }
525 
526 static unsigned seg_override(struct x86_emulate_ctxt *ctxt)
527 {
528 	if (!ctxt->has_seg_override)
529 		return 0;
530 
531 	return ctxt->seg_override;
532 }
533 
534 static int emulate_exception(struct x86_emulate_ctxt *ctxt, int vec,
535 			     u32 error, bool valid)
536 {
537 	ctxt->exception.vector = vec;
538 	ctxt->exception.error_code = error;
539 	ctxt->exception.error_code_valid = valid;
540 	return X86EMUL_PROPAGATE_FAULT;
541 }
542 
543 static int emulate_db(struct x86_emulate_ctxt *ctxt)
544 {
545 	return emulate_exception(ctxt, DB_VECTOR, 0, false);
546 }
547 
548 static int emulate_gp(struct x86_emulate_ctxt *ctxt, int err)
549 {
550 	return emulate_exception(ctxt, GP_VECTOR, err, true);
551 }
552 
553 static int emulate_ss(struct x86_emulate_ctxt *ctxt, int err)
554 {
555 	return emulate_exception(ctxt, SS_VECTOR, err, true);
556 }
557 
558 static int emulate_ud(struct x86_emulate_ctxt *ctxt)
559 {
560 	return emulate_exception(ctxt, UD_VECTOR, 0, false);
561 }
562 
563 static int emulate_ts(struct x86_emulate_ctxt *ctxt, int err)
564 {
565 	return emulate_exception(ctxt, TS_VECTOR, err, true);
566 }
567 
568 static int emulate_de(struct x86_emulate_ctxt *ctxt)
569 {
570 	return emulate_exception(ctxt, DE_VECTOR, 0, false);
571 }
572 
573 static int emulate_nm(struct x86_emulate_ctxt *ctxt)
574 {
575 	return emulate_exception(ctxt, NM_VECTOR, 0, false);
576 }
577 
578 static u16 get_segment_selector(struct x86_emulate_ctxt *ctxt, unsigned seg)
579 {
580 	u16 selector;
581 	struct desc_struct desc;
582 
583 	ctxt->ops->get_segment(ctxt, &selector, &desc, NULL, seg);
584 	return selector;
585 }
586 
587 static void set_segment_selector(struct x86_emulate_ctxt *ctxt, u16 selector,
588 				 unsigned seg)
589 {
590 	u16 dummy;
591 	u32 base3;
592 	struct desc_struct desc;
593 
594 	ctxt->ops->get_segment(ctxt, &dummy, &desc, &base3, seg);
595 	ctxt->ops->set_segment(ctxt, selector, &desc, base3, seg);
596 }
597 
598 /*
599  * x86 defines three classes of vector instructions: explicitly
600  * aligned, explicitly unaligned, and the rest, which change behaviour
601  * depending on whether they're AVX encoded or not.
602  *
603  * Also included is CMPXCHG16B which is not a vector instruction, yet it is
604  * subject to the same check.
605  */
606 static bool insn_aligned(struct x86_emulate_ctxt *ctxt, unsigned size)
607 {
608 	if (likely(size < 16))
609 		return false;
610 
611 	if (ctxt->d & Aligned)
612 		return true;
613 	else if (ctxt->d & Unaligned)
614 		return false;
615 	else if (ctxt->d & Avx)
616 		return false;
617 	else
618 		return true;
619 }
620 
621 static int __linearize(struct x86_emulate_ctxt *ctxt,
622 		     struct segmented_address addr,
623 		     unsigned size, bool write, bool fetch,
624 		     ulong *linear)
625 {
626 	struct desc_struct desc;
627 	bool usable;
628 	ulong la;
629 	u32 lim;
630 	u16 sel;
631 	unsigned cpl, rpl;
632 
633 	la = seg_base(ctxt, addr.seg) + addr.ea;
634 	switch (ctxt->mode) {
635 	case X86EMUL_MODE_REAL:
636 		break;
637 	case X86EMUL_MODE_PROT64:
638 		if (((signed long)la << 16) >> 16 != la)
639 			return emulate_gp(ctxt, 0);
640 		break;
641 	default:
642 		usable = ctxt->ops->get_segment(ctxt, &sel, &desc, NULL,
643 						addr.seg);
644 		if (!usable)
645 			goto bad;
646 		/* code segment or read-only data segment */
647 		if (((desc.type & 8) || !(desc.type & 2)) && write)
648 			goto bad;
649 		/* unreadable code segment */
650 		if (!fetch && (desc.type & 8) && !(desc.type & 2))
651 			goto bad;
652 		lim = desc_limit_scaled(&desc);
653 		if ((desc.type & 8) || !(desc.type & 4)) {
654 			/* expand-up segment */
655 			if (addr.ea > lim || (u32)(addr.ea + size - 1) > lim)
656 				goto bad;
657 		} else {
658 			/* exapand-down segment */
659 			if (addr.ea <= lim || (u32)(addr.ea + size - 1) <= lim)
660 				goto bad;
661 			lim = desc.d ? 0xffffffff : 0xffff;
662 			if (addr.ea > lim || (u32)(addr.ea + size - 1) > lim)
663 				goto bad;
664 		}
665 		cpl = ctxt->ops->cpl(ctxt);
666 		rpl = sel & 3;
667 		cpl = max(cpl, rpl);
668 		if (!(desc.type & 8)) {
669 			/* data segment */
670 			if (cpl > desc.dpl)
671 				goto bad;
672 		} else if ((desc.type & 8) && !(desc.type & 4)) {
673 			/* nonconforming code segment */
674 			if (cpl != desc.dpl)
675 				goto bad;
676 		} else if ((desc.type & 8) && (desc.type & 4)) {
677 			/* conforming code segment */
678 			if (cpl < desc.dpl)
679 				goto bad;
680 		}
681 		break;
682 	}
683 	if (fetch ? ctxt->mode != X86EMUL_MODE_PROT64 : ctxt->ad_bytes != 8)
684 		la &= (u32)-1;
685 	if (insn_aligned(ctxt, size) && ((la & (size - 1)) != 0))
686 		return emulate_gp(ctxt, 0);
687 	*linear = la;
688 	return X86EMUL_CONTINUE;
689 bad:
690 	if (addr.seg == VCPU_SREG_SS)
691 		return emulate_ss(ctxt, addr.seg);
692 	else
693 		return emulate_gp(ctxt, addr.seg);
694 }
695 
696 static int linearize(struct x86_emulate_ctxt *ctxt,
697 		     struct segmented_address addr,
698 		     unsigned size, bool write,
699 		     ulong *linear)
700 {
701 	return __linearize(ctxt, addr, size, write, false, linear);
702 }
703 
704 
705 static int segmented_read_std(struct x86_emulate_ctxt *ctxt,
706 			      struct segmented_address addr,
707 			      void *data,
708 			      unsigned size)
709 {
710 	int rc;
711 	ulong linear;
712 
713 	rc = linearize(ctxt, addr, size, false, &linear);
714 	if (rc != X86EMUL_CONTINUE)
715 		return rc;
716 	return ctxt->ops->read_std(ctxt, linear, data, size, &ctxt->exception);
717 }
718 
719 /*
720  * Fetch the next byte of the instruction being emulated which is pointed to
721  * by ctxt->_eip, then increment ctxt->_eip.
722  *
723  * Also prefetch the remaining bytes of the instruction without crossing page
724  * boundary if they are not in fetch_cache yet.
725  */
726 static int do_insn_fetch_byte(struct x86_emulate_ctxt *ctxt, u8 *dest)
727 {
728 	struct fetch_cache *fc = &ctxt->fetch;
729 	int rc;
730 	int size, cur_size;
731 
732 	if (ctxt->_eip == fc->end) {
733 		unsigned long linear;
734 		struct segmented_address addr = { .seg = VCPU_SREG_CS,
735 						  .ea  = ctxt->_eip };
736 		cur_size = fc->end - fc->start;
737 		size = min(15UL - cur_size,
738 			   PAGE_SIZE - offset_in_page(ctxt->_eip));
739 		rc = __linearize(ctxt, addr, size, false, true, &linear);
740 		if (unlikely(rc != X86EMUL_CONTINUE))
741 			return rc;
742 		rc = ctxt->ops->fetch(ctxt, linear, fc->data + cur_size,
743 				      size, &ctxt->exception);
744 		if (unlikely(rc != X86EMUL_CONTINUE))
745 			return rc;
746 		fc->end += size;
747 	}
748 	*dest = fc->data[ctxt->_eip - fc->start];
749 	ctxt->_eip++;
750 	return X86EMUL_CONTINUE;
751 }
752 
753 static int do_insn_fetch(struct x86_emulate_ctxt *ctxt,
754 			 void *dest, unsigned size)
755 {
756 	int rc;
757 
758 	/* x86 instructions are limited to 15 bytes. */
759 	if (unlikely(ctxt->_eip + size - ctxt->eip > 15))
760 		return X86EMUL_UNHANDLEABLE;
761 	while (size--) {
762 		rc = do_insn_fetch_byte(ctxt, dest++);
763 		if (rc != X86EMUL_CONTINUE)
764 			return rc;
765 	}
766 	return X86EMUL_CONTINUE;
767 }
768 
769 /* Fetch next part of the instruction being emulated. */
770 #define insn_fetch(_type, _ctxt)					\
771 ({	unsigned long _x;						\
772 	rc = do_insn_fetch(_ctxt, &_x, sizeof(_type));			\
773 	if (rc != X86EMUL_CONTINUE)					\
774 		goto done;						\
775 	(_type)_x;							\
776 })
777 
778 #define insn_fetch_arr(_arr, _size, _ctxt)				\
779 ({	rc = do_insn_fetch(_ctxt, _arr, (_size));			\
780 	if (rc != X86EMUL_CONTINUE)					\
781 		goto done;						\
782 })
783 
784 /*
785  * Given the 'reg' portion of a ModRM byte, and a register block, return a
786  * pointer into the block that addresses the relevant register.
787  * @highbyte_regs specifies whether to decode AH,CH,DH,BH.
788  */
789 static void *decode_register(u8 modrm_reg, unsigned long *regs,
790 			     int highbyte_regs)
791 {
792 	void *p;
793 
794 	p = &regs[modrm_reg];
795 	if (highbyte_regs && modrm_reg >= 4 && modrm_reg < 8)
796 		p = (unsigned char *)&regs[modrm_reg & 3] + 1;
797 	return p;
798 }
799 
800 static int read_descriptor(struct x86_emulate_ctxt *ctxt,
801 			   struct segmented_address addr,
802 			   u16 *size, unsigned long *address, int op_bytes)
803 {
804 	int rc;
805 
806 	if (op_bytes == 2)
807 		op_bytes = 3;
808 	*address = 0;
809 	rc = segmented_read_std(ctxt, addr, size, 2);
810 	if (rc != X86EMUL_CONTINUE)
811 		return rc;
812 	addr.ea += 2;
813 	rc = segmented_read_std(ctxt, addr, address, op_bytes);
814 	return rc;
815 }
816 
817 static int test_cc(unsigned int condition, unsigned int flags)
818 {
819 	int rc = 0;
820 
821 	switch ((condition & 15) >> 1) {
822 	case 0: /* o */
823 		rc |= (flags & EFLG_OF);
824 		break;
825 	case 1: /* b/c/nae */
826 		rc |= (flags & EFLG_CF);
827 		break;
828 	case 2: /* z/e */
829 		rc |= (flags & EFLG_ZF);
830 		break;
831 	case 3: /* be/na */
832 		rc |= (flags & (EFLG_CF|EFLG_ZF));
833 		break;
834 	case 4: /* s */
835 		rc |= (flags & EFLG_SF);
836 		break;
837 	case 5: /* p/pe */
838 		rc |= (flags & EFLG_PF);
839 		break;
840 	case 7: /* le/ng */
841 		rc |= (flags & EFLG_ZF);
842 		/* fall through */
843 	case 6: /* l/nge */
844 		rc |= (!(flags & EFLG_SF) != !(flags & EFLG_OF));
845 		break;
846 	}
847 
848 	/* Odd condition identifiers (lsb == 1) have inverted sense. */
849 	return (!!rc ^ (condition & 1));
850 }
851 
852 static void fetch_register_operand(struct operand *op)
853 {
854 	switch (op->bytes) {
855 	case 1:
856 		op->val = *(u8 *)op->addr.reg;
857 		break;
858 	case 2:
859 		op->val = *(u16 *)op->addr.reg;
860 		break;
861 	case 4:
862 		op->val = *(u32 *)op->addr.reg;
863 		break;
864 	case 8:
865 		op->val = *(u64 *)op->addr.reg;
866 		break;
867 	}
868 }
869 
870 static void read_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data, int reg)
871 {
872 	ctxt->ops->get_fpu(ctxt);
873 	switch (reg) {
874 	case 0: asm("movdqu %%xmm0, %0" : "=m"(*data)); break;
875 	case 1: asm("movdqu %%xmm1, %0" : "=m"(*data)); break;
876 	case 2: asm("movdqu %%xmm2, %0" : "=m"(*data)); break;
877 	case 3: asm("movdqu %%xmm3, %0" : "=m"(*data)); break;
878 	case 4: asm("movdqu %%xmm4, %0" : "=m"(*data)); break;
879 	case 5: asm("movdqu %%xmm5, %0" : "=m"(*data)); break;
880 	case 6: asm("movdqu %%xmm6, %0" : "=m"(*data)); break;
881 	case 7: asm("movdqu %%xmm7, %0" : "=m"(*data)); break;
882 #ifdef CONFIG_X86_64
883 	case 8: asm("movdqu %%xmm8, %0" : "=m"(*data)); break;
884 	case 9: asm("movdqu %%xmm9, %0" : "=m"(*data)); break;
885 	case 10: asm("movdqu %%xmm10, %0" : "=m"(*data)); break;
886 	case 11: asm("movdqu %%xmm11, %0" : "=m"(*data)); break;
887 	case 12: asm("movdqu %%xmm12, %0" : "=m"(*data)); break;
888 	case 13: asm("movdqu %%xmm13, %0" : "=m"(*data)); break;
889 	case 14: asm("movdqu %%xmm14, %0" : "=m"(*data)); break;
890 	case 15: asm("movdqu %%xmm15, %0" : "=m"(*data)); break;
891 #endif
892 	default: BUG();
893 	}
894 	ctxt->ops->put_fpu(ctxt);
895 }
896 
897 static void write_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data,
898 			  int reg)
899 {
900 	ctxt->ops->get_fpu(ctxt);
901 	switch (reg) {
902 	case 0: asm("movdqu %0, %%xmm0" : : "m"(*data)); break;
903 	case 1: asm("movdqu %0, %%xmm1" : : "m"(*data)); break;
904 	case 2: asm("movdqu %0, %%xmm2" : : "m"(*data)); break;
905 	case 3: asm("movdqu %0, %%xmm3" : : "m"(*data)); break;
906 	case 4: asm("movdqu %0, %%xmm4" : : "m"(*data)); break;
907 	case 5: asm("movdqu %0, %%xmm5" : : "m"(*data)); break;
908 	case 6: asm("movdqu %0, %%xmm6" : : "m"(*data)); break;
909 	case 7: asm("movdqu %0, %%xmm7" : : "m"(*data)); break;
910 #ifdef CONFIG_X86_64
911 	case 8: asm("movdqu %0, %%xmm8" : : "m"(*data)); break;
912 	case 9: asm("movdqu %0, %%xmm9" : : "m"(*data)); break;
913 	case 10: asm("movdqu %0, %%xmm10" : : "m"(*data)); break;
914 	case 11: asm("movdqu %0, %%xmm11" : : "m"(*data)); break;
915 	case 12: asm("movdqu %0, %%xmm12" : : "m"(*data)); break;
916 	case 13: asm("movdqu %0, %%xmm13" : : "m"(*data)); break;
917 	case 14: asm("movdqu %0, %%xmm14" : : "m"(*data)); break;
918 	case 15: asm("movdqu %0, %%xmm15" : : "m"(*data)); break;
919 #endif
920 	default: BUG();
921 	}
922 	ctxt->ops->put_fpu(ctxt);
923 }
924 
925 static void read_mmx_reg(struct x86_emulate_ctxt *ctxt, u64 *data, int reg)
926 {
927 	ctxt->ops->get_fpu(ctxt);
928 	switch (reg) {
929 	case 0: asm("movq %%mm0, %0" : "=m"(*data)); break;
930 	case 1: asm("movq %%mm1, %0" : "=m"(*data)); break;
931 	case 2: asm("movq %%mm2, %0" : "=m"(*data)); break;
932 	case 3: asm("movq %%mm3, %0" : "=m"(*data)); break;
933 	case 4: asm("movq %%mm4, %0" : "=m"(*data)); break;
934 	case 5: asm("movq %%mm5, %0" : "=m"(*data)); break;
935 	case 6: asm("movq %%mm6, %0" : "=m"(*data)); break;
936 	case 7: asm("movq %%mm7, %0" : "=m"(*data)); break;
937 	default: BUG();
938 	}
939 	ctxt->ops->put_fpu(ctxt);
940 }
941 
942 static void write_mmx_reg(struct x86_emulate_ctxt *ctxt, u64 *data, int reg)
943 {
944 	ctxt->ops->get_fpu(ctxt);
945 	switch (reg) {
946 	case 0: asm("movq %0, %%mm0" : : "m"(*data)); break;
947 	case 1: asm("movq %0, %%mm1" : : "m"(*data)); break;
948 	case 2: asm("movq %0, %%mm2" : : "m"(*data)); break;
949 	case 3: asm("movq %0, %%mm3" : : "m"(*data)); break;
950 	case 4: asm("movq %0, %%mm4" : : "m"(*data)); break;
951 	case 5: asm("movq %0, %%mm5" : : "m"(*data)); break;
952 	case 6: asm("movq %0, %%mm6" : : "m"(*data)); break;
953 	case 7: asm("movq %0, %%mm7" : : "m"(*data)); break;
954 	default: BUG();
955 	}
956 	ctxt->ops->put_fpu(ctxt);
957 }
958 
959 static void decode_register_operand(struct x86_emulate_ctxt *ctxt,
960 				    struct operand *op)
961 {
962 	unsigned reg = ctxt->modrm_reg;
963 	int highbyte_regs = ctxt->rex_prefix == 0;
964 
965 	if (!(ctxt->d & ModRM))
966 		reg = (ctxt->b & 7) | ((ctxt->rex_prefix & 1) << 3);
967 
968 	if (ctxt->d & Sse) {
969 		op->type = OP_XMM;
970 		op->bytes = 16;
971 		op->addr.xmm = reg;
972 		read_sse_reg(ctxt, &op->vec_val, reg);
973 		return;
974 	}
975 	if (ctxt->d & Mmx) {
976 		reg &= 7;
977 		op->type = OP_MM;
978 		op->bytes = 8;
979 		op->addr.mm = reg;
980 		return;
981 	}
982 
983 	op->type = OP_REG;
984 	if (ctxt->d & ByteOp) {
985 		op->addr.reg = decode_register(reg, ctxt->regs, highbyte_regs);
986 		op->bytes = 1;
987 	} else {
988 		op->addr.reg = decode_register(reg, ctxt->regs, 0);
989 		op->bytes = ctxt->op_bytes;
990 	}
991 	fetch_register_operand(op);
992 	op->orig_val = op->val;
993 }
994 
995 static void adjust_modrm_seg(struct x86_emulate_ctxt *ctxt, int base_reg)
996 {
997 	if (base_reg == VCPU_REGS_RSP || base_reg == VCPU_REGS_RBP)
998 		ctxt->modrm_seg = VCPU_SREG_SS;
999 }
1000 
1001 static int decode_modrm(struct x86_emulate_ctxt *ctxt,
1002 			struct operand *op)
1003 {
1004 	u8 sib;
1005 	int index_reg = 0, base_reg = 0, scale;
1006 	int rc = X86EMUL_CONTINUE;
1007 	ulong modrm_ea = 0;
1008 
1009 	if (ctxt->rex_prefix) {
1010 		ctxt->modrm_reg = (ctxt->rex_prefix & 4) << 1;	/* REX.R */
1011 		index_reg = (ctxt->rex_prefix & 2) << 2; /* REX.X */
1012 		ctxt->modrm_rm = base_reg = (ctxt->rex_prefix & 1) << 3; /* REG.B */
1013 	}
1014 
1015 	ctxt->modrm_mod |= (ctxt->modrm & 0xc0) >> 6;
1016 	ctxt->modrm_reg |= (ctxt->modrm & 0x38) >> 3;
1017 	ctxt->modrm_rm |= (ctxt->modrm & 0x07);
1018 	ctxt->modrm_seg = VCPU_SREG_DS;
1019 
1020 	if (ctxt->modrm_mod == 3) {
1021 		op->type = OP_REG;
1022 		op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
1023 		op->addr.reg = decode_register(ctxt->modrm_rm,
1024 					       ctxt->regs, ctxt->d & ByteOp);
1025 		if (ctxt->d & Sse) {
1026 			op->type = OP_XMM;
1027 			op->bytes = 16;
1028 			op->addr.xmm = ctxt->modrm_rm;
1029 			read_sse_reg(ctxt, &op->vec_val, ctxt->modrm_rm);
1030 			return rc;
1031 		}
1032 		if (ctxt->d & Mmx) {
1033 			op->type = OP_MM;
1034 			op->bytes = 8;
1035 			op->addr.xmm = ctxt->modrm_rm & 7;
1036 			return rc;
1037 		}
1038 		fetch_register_operand(op);
1039 		return rc;
1040 	}
1041 
1042 	op->type = OP_MEM;
1043 
1044 	if (ctxt->ad_bytes == 2) {
1045 		unsigned bx = ctxt->regs[VCPU_REGS_RBX];
1046 		unsigned bp = ctxt->regs[VCPU_REGS_RBP];
1047 		unsigned si = ctxt->regs[VCPU_REGS_RSI];
1048 		unsigned di = ctxt->regs[VCPU_REGS_RDI];
1049 
1050 		/* 16-bit ModR/M decode. */
1051 		switch (ctxt->modrm_mod) {
1052 		case 0:
1053 			if (ctxt->modrm_rm == 6)
1054 				modrm_ea += insn_fetch(u16, ctxt);
1055 			break;
1056 		case 1:
1057 			modrm_ea += insn_fetch(s8, ctxt);
1058 			break;
1059 		case 2:
1060 			modrm_ea += insn_fetch(u16, ctxt);
1061 			break;
1062 		}
1063 		switch (ctxt->modrm_rm) {
1064 		case 0:
1065 			modrm_ea += bx + si;
1066 			break;
1067 		case 1:
1068 			modrm_ea += bx + di;
1069 			break;
1070 		case 2:
1071 			modrm_ea += bp + si;
1072 			break;
1073 		case 3:
1074 			modrm_ea += bp + di;
1075 			break;
1076 		case 4:
1077 			modrm_ea += si;
1078 			break;
1079 		case 5:
1080 			modrm_ea += di;
1081 			break;
1082 		case 6:
1083 			if (ctxt->modrm_mod != 0)
1084 				modrm_ea += bp;
1085 			break;
1086 		case 7:
1087 			modrm_ea += bx;
1088 			break;
1089 		}
1090 		if (ctxt->modrm_rm == 2 || ctxt->modrm_rm == 3 ||
1091 		    (ctxt->modrm_rm == 6 && ctxt->modrm_mod != 0))
1092 			ctxt->modrm_seg = VCPU_SREG_SS;
1093 		modrm_ea = (u16)modrm_ea;
1094 	} else {
1095 		/* 32/64-bit ModR/M decode. */
1096 		if ((ctxt->modrm_rm & 7) == 4) {
1097 			sib = insn_fetch(u8, ctxt);
1098 			index_reg |= (sib >> 3) & 7;
1099 			base_reg |= sib & 7;
1100 			scale = sib >> 6;
1101 
1102 			if ((base_reg & 7) == 5 && ctxt->modrm_mod == 0)
1103 				modrm_ea += insn_fetch(s32, ctxt);
1104 			else {
1105 				modrm_ea += ctxt->regs[base_reg];
1106 				adjust_modrm_seg(ctxt, base_reg);
1107 			}
1108 			if (index_reg != 4)
1109 				modrm_ea += ctxt->regs[index_reg] << scale;
1110 		} else if ((ctxt->modrm_rm & 7) == 5 && ctxt->modrm_mod == 0) {
1111 			if (ctxt->mode == X86EMUL_MODE_PROT64)
1112 				ctxt->rip_relative = 1;
1113 		} else {
1114 			base_reg = ctxt->modrm_rm;
1115 			modrm_ea += ctxt->regs[base_reg];
1116 			adjust_modrm_seg(ctxt, base_reg);
1117 		}
1118 		switch (ctxt->modrm_mod) {
1119 		case 0:
1120 			if (ctxt->modrm_rm == 5)
1121 				modrm_ea += insn_fetch(s32, ctxt);
1122 			break;
1123 		case 1:
1124 			modrm_ea += insn_fetch(s8, ctxt);
1125 			break;
1126 		case 2:
1127 			modrm_ea += insn_fetch(s32, ctxt);
1128 			break;
1129 		}
1130 	}
1131 	op->addr.mem.ea = modrm_ea;
1132 done:
1133 	return rc;
1134 }
1135 
1136 static int decode_abs(struct x86_emulate_ctxt *ctxt,
1137 		      struct operand *op)
1138 {
1139 	int rc = X86EMUL_CONTINUE;
1140 
1141 	op->type = OP_MEM;
1142 	switch (ctxt->ad_bytes) {
1143 	case 2:
1144 		op->addr.mem.ea = insn_fetch(u16, ctxt);
1145 		break;
1146 	case 4:
1147 		op->addr.mem.ea = insn_fetch(u32, ctxt);
1148 		break;
1149 	case 8:
1150 		op->addr.mem.ea = insn_fetch(u64, ctxt);
1151 		break;
1152 	}
1153 done:
1154 	return rc;
1155 }
1156 
1157 static void fetch_bit_operand(struct x86_emulate_ctxt *ctxt)
1158 {
1159 	long sv = 0, mask;
1160 
1161 	if (ctxt->dst.type == OP_MEM && ctxt->src.type == OP_REG) {
1162 		mask = ~(ctxt->dst.bytes * 8 - 1);
1163 
1164 		if (ctxt->src.bytes == 2)
1165 			sv = (s16)ctxt->src.val & (s16)mask;
1166 		else if (ctxt->src.bytes == 4)
1167 			sv = (s32)ctxt->src.val & (s32)mask;
1168 
1169 		ctxt->dst.addr.mem.ea += (sv >> 3);
1170 	}
1171 
1172 	/* only subword offset */
1173 	ctxt->src.val &= (ctxt->dst.bytes << 3) - 1;
1174 }
1175 
1176 static int read_emulated(struct x86_emulate_ctxt *ctxt,
1177 			 unsigned long addr, void *dest, unsigned size)
1178 {
1179 	int rc;
1180 	struct read_cache *mc = &ctxt->mem_read;
1181 
1182 	while (size) {
1183 		int n = min(size, 8u);
1184 		size -= n;
1185 		if (mc->pos < mc->end)
1186 			goto read_cached;
1187 
1188 		rc = ctxt->ops->read_emulated(ctxt, addr, mc->data + mc->end, n,
1189 					      &ctxt->exception);
1190 		if (rc != X86EMUL_CONTINUE)
1191 			return rc;
1192 		mc->end += n;
1193 
1194 	read_cached:
1195 		memcpy(dest, mc->data + mc->pos, n);
1196 		mc->pos += n;
1197 		dest += n;
1198 		addr += n;
1199 	}
1200 	return X86EMUL_CONTINUE;
1201 }
1202 
1203 static int segmented_read(struct x86_emulate_ctxt *ctxt,
1204 			  struct segmented_address addr,
1205 			  void *data,
1206 			  unsigned size)
1207 {
1208 	int rc;
1209 	ulong linear;
1210 
1211 	rc = linearize(ctxt, addr, size, false, &linear);
1212 	if (rc != X86EMUL_CONTINUE)
1213 		return rc;
1214 	return read_emulated(ctxt, linear, data, size);
1215 }
1216 
1217 static int segmented_write(struct x86_emulate_ctxt *ctxt,
1218 			   struct segmented_address addr,
1219 			   const void *data,
1220 			   unsigned size)
1221 {
1222 	int rc;
1223 	ulong linear;
1224 
1225 	rc = linearize(ctxt, addr, size, true, &linear);
1226 	if (rc != X86EMUL_CONTINUE)
1227 		return rc;
1228 	return ctxt->ops->write_emulated(ctxt, linear, data, size,
1229 					 &ctxt->exception);
1230 }
1231 
1232 static int segmented_cmpxchg(struct x86_emulate_ctxt *ctxt,
1233 			     struct segmented_address addr,
1234 			     const void *orig_data, const void *data,
1235 			     unsigned size)
1236 {
1237 	int rc;
1238 	ulong linear;
1239 
1240 	rc = linearize(ctxt, addr, size, true, &linear);
1241 	if (rc != X86EMUL_CONTINUE)
1242 		return rc;
1243 	return ctxt->ops->cmpxchg_emulated(ctxt, linear, orig_data, data,
1244 					   size, &ctxt->exception);
1245 }
1246 
1247 static int pio_in_emulated(struct x86_emulate_ctxt *ctxt,
1248 			   unsigned int size, unsigned short port,
1249 			   void *dest)
1250 {
1251 	struct read_cache *rc = &ctxt->io_read;
1252 
1253 	if (rc->pos == rc->end) { /* refill pio read ahead */
1254 		unsigned int in_page, n;
1255 		unsigned int count = ctxt->rep_prefix ?
1256 			address_mask(ctxt, ctxt->regs[VCPU_REGS_RCX]) : 1;
1257 		in_page = (ctxt->eflags & EFLG_DF) ?
1258 			offset_in_page(ctxt->regs[VCPU_REGS_RDI]) :
1259 			PAGE_SIZE - offset_in_page(ctxt->regs[VCPU_REGS_RDI]);
1260 		n = min(min(in_page, (unsigned int)sizeof(rc->data)) / size,
1261 			count);
1262 		if (n == 0)
1263 			n = 1;
1264 		rc->pos = rc->end = 0;
1265 		if (!ctxt->ops->pio_in_emulated(ctxt, size, port, rc->data, n))
1266 			return 0;
1267 		rc->end = n * size;
1268 	}
1269 
1270 	memcpy(dest, rc->data + rc->pos, size);
1271 	rc->pos += size;
1272 	return 1;
1273 }
1274 
1275 static int read_interrupt_descriptor(struct x86_emulate_ctxt *ctxt,
1276 				     u16 index, struct desc_struct *desc)
1277 {
1278 	struct desc_ptr dt;
1279 	ulong addr;
1280 
1281 	ctxt->ops->get_idt(ctxt, &dt);
1282 
1283 	if (dt.size < index * 8 + 7)
1284 		return emulate_gp(ctxt, index << 3 | 0x2);
1285 
1286 	addr = dt.address + index * 8;
1287 	return ctxt->ops->read_std(ctxt, addr, desc, sizeof *desc,
1288 				   &ctxt->exception);
1289 }
1290 
1291 static void get_descriptor_table_ptr(struct x86_emulate_ctxt *ctxt,
1292 				     u16 selector, struct desc_ptr *dt)
1293 {
1294 	struct x86_emulate_ops *ops = ctxt->ops;
1295 
1296 	if (selector & 1 << 2) {
1297 		struct desc_struct desc;
1298 		u16 sel;
1299 
1300 		memset (dt, 0, sizeof *dt);
1301 		if (!ops->get_segment(ctxt, &sel, &desc, NULL, VCPU_SREG_LDTR))
1302 			return;
1303 
1304 		dt->size = desc_limit_scaled(&desc); /* what if limit > 65535? */
1305 		dt->address = get_desc_base(&desc);
1306 	} else
1307 		ops->get_gdt(ctxt, dt);
1308 }
1309 
1310 /* allowed just for 8 bytes segments */
1311 static int read_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1312 				   u16 selector, struct desc_struct *desc,
1313 				   ulong *desc_addr_p)
1314 {
1315 	struct desc_ptr dt;
1316 	u16 index = selector >> 3;
1317 	ulong addr;
1318 
1319 	get_descriptor_table_ptr(ctxt, selector, &dt);
1320 
1321 	if (dt.size < index * 8 + 7)
1322 		return emulate_gp(ctxt, selector & 0xfffc);
1323 
1324 	*desc_addr_p = addr = dt.address + index * 8;
1325 	return ctxt->ops->read_std(ctxt, addr, desc, sizeof *desc,
1326 				   &ctxt->exception);
1327 }
1328 
1329 /* allowed just for 8 bytes segments */
1330 static int write_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1331 				    u16 selector, struct desc_struct *desc)
1332 {
1333 	struct desc_ptr dt;
1334 	u16 index = selector >> 3;
1335 	ulong addr;
1336 
1337 	get_descriptor_table_ptr(ctxt, selector, &dt);
1338 
1339 	if (dt.size < index * 8 + 7)
1340 		return emulate_gp(ctxt, selector & 0xfffc);
1341 
1342 	addr = dt.address + index * 8;
1343 	return ctxt->ops->write_std(ctxt, addr, desc, sizeof *desc,
1344 				    &ctxt->exception);
1345 }
1346 
1347 /* Does not support long mode */
1348 static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1349 				   u16 selector, int seg)
1350 {
1351 	struct desc_struct seg_desc, old_desc;
1352 	u8 dpl, rpl, cpl;
1353 	unsigned err_vec = GP_VECTOR;
1354 	u32 err_code = 0;
1355 	bool null_selector = !(selector & ~0x3); /* 0000-0003 are null */
1356 	ulong desc_addr;
1357 	int ret;
1358 
1359 	memset(&seg_desc, 0, sizeof seg_desc);
1360 
1361 	if ((seg <= VCPU_SREG_GS && ctxt->mode == X86EMUL_MODE_VM86)
1362 	    || ctxt->mode == X86EMUL_MODE_REAL) {
1363 		/* set real mode segment descriptor */
1364 		set_desc_base(&seg_desc, selector << 4);
1365 		set_desc_limit(&seg_desc, 0xffff);
1366 		seg_desc.type = 3;
1367 		seg_desc.p = 1;
1368 		seg_desc.s = 1;
1369 		if (ctxt->mode == X86EMUL_MODE_VM86)
1370 			seg_desc.dpl = 3;
1371 		goto load;
1372 	}
1373 
1374 	rpl = selector & 3;
1375 	cpl = ctxt->ops->cpl(ctxt);
1376 
1377 	/* NULL selector is not valid for TR, CS and SS (except for long mode) */
1378 	if ((seg == VCPU_SREG_CS
1379 	     || (seg == VCPU_SREG_SS
1380 		 && (ctxt->mode != X86EMUL_MODE_PROT64 || rpl != cpl))
1381 	     || seg == VCPU_SREG_TR)
1382 	    && null_selector)
1383 		goto exception;
1384 
1385 	/* TR should be in GDT only */
1386 	if (seg == VCPU_SREG_TR && (selector & (1 << 2)))
1387 		goto exception;
1388 
1389 	if (null_selector) /* for NULL selector skip all following checks */
1390 		goto load;
1391 
1392 	ret = read_segment_descriptor(ctxt, selector, &seg_desc, &desc_addr);
1393 	if (ret != X86EMUL_CONTINUE)
1394 		return ret;
1395 
1396 	err_code = selector & 0xfffc;
1397 	err_vec = GP_VECTOR;
1398 
1399 	/* can't load system descriptor into segment selecor */
1400 	if (seg <= VCPU_SREG_GS && !seg_desc.s)
1401 		goto exception;
1402 
1403 	if (!seg_desc.p) {
1404 		err_vec = (seg == VCPU_SREG_SS) ? SS_VECTOR : NP_VECTOR;
1405 		goto exception;
1406 	}
1407 
1408 	dpl = seg_desc.dpl;
1409 
1410 	switch (seg) {
1411 	case VCPU_SREG_SS:
1412 		/*
1413 		 * segment is not a writable data segment or segment
1414 		 * selector's RPL != CPL or segment selector's RPL != CPL
1415 		 */
1416 		if (rpl != cpl || (seg_desc.type & 0xa) != 0x2 || dpl != cpl)
1417 			goto exception;
1418 		break;
1419 	case VCPU_SREG_CS:
1420 		if (!(seg_desc.type & 8))
1421 			goto exception;
1422 
1423 		if (seg_desc.type & 4) {
1424 			/* conforming */
1425 			if (dpl > cpl)
1426 				goto exception;
1427 		} else {
1428 			/* nonconforming */
1429 			if (rpl > cpl || dpl != cpl)
1430 				goto exception;
1431 		}
1432 		/* CS(RPL) <- CPL */
1433 		selector = (selector & 0xfffc) | cpl;
1434 		break;
1435 	case VCPU_SREG_TR:
1436 		if (seg_desc.s || (seg_desc.type != 1 && seg_desc.type != 9))
1437 			goto exception;
1438 		old_desc = seg_desc;
1439 		seg_desc.type |= 2; /* busy */
1440 		ret = ctxt->ops->cmpxchg_emulated(ctxt, desc_addr, &old_desc, &seg_desc,
1441 						  sizeof(seg_desc), &ctxt->exception);
1442 		if (ret != X86EMUL_CONTINUE)
1443 			return ret;
1444 		break;
1445 	case VCPU_SREG_LDTR:
1446 		if (seg_desc.s || seg_desc.type != 2)
1447 			goto exception;
1448 		break;
1449 	default: /*  DS, ES, FS, or GS */
1450 		/*
1451 		 * segment is not a data or readable code segment or
1452 		 * ((segment is a data or nonconforming code segment)
1453 		 * and (both RPL and CPL > DPL))
1454 		 */
1455 		if ((seg_desc.type & 0xa) == 0x8 ||
1456 		    (((seg_desc.type & 0xc) != 0xc) &&
1457 		     (rpl > dpl && cpl > dpl)))
1458 			goto exception;
1459 		break;
1460 	}
1461 
1462 	if (seg_desc.s) {
1463 		/* mark segment as accessed */
1464 		seg_desc.type |= 1;
1465 		ret = write_segment_descriptor(ctxt, selector, &seg_desc);
1466 		if (ret != X86EMUL_CONTINUE)
1467 			return ret;
1468 	}
1469 load:
1470 	ctxt->ops->set_segment(ctxt, selector, &seg_desc, 0, seg);
1471 	return X86EMUL_CONTINUE;
1472 exception:
1473 	emulate_exception(ctxt, err_vec, err_code, true);
1474 	return X86EMUL_PROPAGATE_FAULT;
1475 }
1476 
1477 static void write_register_operand(struct operand *op)
1478 {
1479 	/* The 4-byte case *is* correct: in 64-bit mode we zero-extend. */
1480 	switch (op->bytes) {
1481 	case 1:
1482 		*(u8 *)op->addr.reg = (u8)op->val;
1483 		break;
1484 	case 2:
1485 		*(u16 *)op->addr.reg = (u16)op->val;
1486 		break;
1487 	case 4:
1488 		*op->addr.reg = (u32)op->val;
1489 		break;	/* 64b: zero-extend */
1490 	case 8:
1491 		*op->addr.reg = op->val;
1492 		break;
1493 	}
1494 }
1495 
1496 static int writeback(struct x86_emulate_ctxt *ctxt)
1497 {
1498 	int rc;
1499 
1500 	switch (ctxt->dst.type) {
1501 	case OP_REG:
1502 		write_register_operand(&ctxt->dst);
1503 		break;
1504 	case OP_MEM:
1505 		if (ctxt->lock_prefix)
1506 			rc = segmented_cmpxchg(ctxt,
1507 					       ctxt->dst.addr.mem,
1508 					       &ctxt->dst.orig_val,
1509 					       &ctxt->dst.val,
1510 					       ctxt->dst.bytes);
1511 		else
1512 			rc = segmented_write(ctxt,
1513 					     ctxt->dst.addr.mem,
1514 					     &ctxt->dst.val,
1515 					     ctxt->dst.bytes);
1516 		if (rc != X86EMUL_CONTINUE)
1517 			return rc;
1518 		break;
1519 	case OP_XMM:
1520 		write_sse_reg(ctxt, &ctxt->dst.vec_val, ctxt->dst.addr.xmm);
1521 		break;
1522 	case OP_MM:
1523 		write_mmx_reg(ctxt, &ctxt->dst.mm_val, ctxt->dst.addr.mm);
1524 		break;
1525 	case OP_NONE:
1526 		/* no writeback */
1527 		break;
1528 	default:
1529 		break;
1530 	}
1531 	return X86EMUL_CONTINUE;
1532 }
1533 
1534 static int push(struct x86_emulate_ctxt *ctxt, void *data, int bytes)
1535 {
1536 	struct segmented_address addr;
1537 
1538 	rsp_increment(ctxt, -bytes);
1539 	addr.ea = ctxt->regs[VCPU_REGS_RSP] & stack_mask(ctxt);
1540 	addr.seg = VCPU_SREG_SS;
1541 
1542 	return segmented_write(ctxt, addr, data, bytes);
1543 }
1544 
1545 static int em_push(struct x86_emulate_ctxt *ctxt)
1546 {
1547 	/* Disable writeback. */
1548 	ctxt->dst.type = OP_NONE;
1549 	return push(ctxt, &ctxt->src.val, ctxt->op_bytes);
1550 }
1551 
1552 static int emulate_pop(struct x86_emulate_ctxt *ctxt,
1553 		       void *dest, int len)
1554 {
1555 	int rc;
1556 	struct segmented_address addr;
1557 
1558 	addr.ea = ctxt->regs[VCPU_REGS_RSP] & stack_mask(ctxt);
1559 	addr.seg = VCPU_SREG_SS;
1560 	rc = segmented_read(ctxt, addr, dest, len);
1561 	if (rc != X86EMUL_CONTINUE)
1562 		return rc;
1563 
1564 	rsp_increment(ctxt, len);
1565 	return rc;
1566 }
1567 
1568 static int em_pop(struct x86_emulate_ctxt *ctxt)
1569 {
1570 	return emulate_pop(ctxt, &ctxt->dst.val, ctxt->op_bytes);
1571 }
1572 
1573 static int emulate_popf(struct x86_emulate_ctxt *ctxt,
1574 			void *dest, int len)
1575 {
1576 	int rc;
1577 	unsigned long val, change_mask;
1578 	int iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> IOPL_SHIFT;
1579 	int cpl = ctxt->ops->cpl(ctxt);
1580 
1581 	rc = emulate_pop(ctxt, &val, len);
1582 	if (rc != X86EMUL_CONTINUE)
1583 		return rc;
1584 
1585 	change_mask = EFLG_CF | EFLG_PF | EFLG_AF | EFLG_ZF | EFLG_SF | EFLG_OF
1586 		| EFLG_TF | EFLG_DF | EFLG_NT | EFLG_RF | EFLG_AC | EFLG_ID;
1587 
1588 	switch(ctxt->mode) {
1589 	case X86EMUL_MODE_PROT64:
1590 	case X86EMUL_MODE_PROT32:
1591 	case X86EMUL_MODE_PROT16:
1592 		if (cpl == 0)
1593 			change_mask |= EFLG_IOPL;
1594 		if (cpl <= iopl)
1595 			change_mask |= EFLG_IF;
1596 		break;
1597 	case X86EMUL_MODE_VM86:
1598 		if (iopl < 3)
1599 			return emulate_gp(ctxt, 0);
1600 		change_mask |= EFLG_IF;
1601 		break;
1602 	default: /* real mode */
1603 		change_mask |= (EFLG_IOPL | EFLG_IF);
1604 		break;
1605 	}
1606 
1607 	*(unsigned long *)dest =
1608 		(ctxt->eflags & ~change_mask) | (val & change_mask);
1609 
1610 	return rc;
1611 }
1612 
1613 static int em_popf(struct x86_emulate_ctxt *ctxt)
1614 {
1615 	ctxt->dst.type = OP_REG;
1616 	ctxt->dst.addr.reg = &ctxt->eflags;
1617 	ctxt->dst.bytes = ctxt->op_bytes;
1618 	return emulate_popf(ctxt, &ctxt->dst.val, ctxt->op_bytes);
1619 }
1620 
1621 static int em_enter(struct x86_emulate_ctxt *ctxt)
1622 {
1623 	int rc;
1624 	unsigned frame_size = ctxt->src.val;
1625 	unsigned nesting_level = ctxt->src2.val & 31;
1626 
1627 	if (nesting_level)
1628 		return X86EMUL_UNHANDLEABLE;
1629 
1630 	rc = push(ctxt, &ctxt->regs[VCPU_REGS_RBP], stack_size(ctxt));
1631 	if (rc != X86EMUL_CONTINUE)
1632 		return rc;
1633 	assign_masked(&ctxt->regs[VCPU_REGS_RBP], ctxt->regs[VCPU_REGS_RSP],
1634 		      stack_mask(ctxt));
1635 	assign_masked(&ctxt->regs[VCPU_REGS_RSP],
1636 		      ctxt->regs[VCPU_REGS_RSP] - frame_size,
1637 		      stack_mask(ctxt));
1638 	return X86EMUL_CONTINUE;
1639 }
1640 
1641 static int em_leave(struct x86_emulate_ctxt *ctxt)
1642 {
1643 	assign_masked(&ctxt->regs[VCPU_REGS_RSP], ctxt->regs[VCPU_REGS_RBP],
1644 		      stack_mask(ctxt));
1645 	return emulate_pop(ctxt, &ctxt->regs[VCPU_REGS_RBP], ctxt->op_bytes);
1646 }
1647 
1648 static int em_push_sreg(struct x86_emulate_ctxt *ctxt)
1649 {
1650 	int seg = ctxt->src2.val;
1651 
1652 	ctxt->src.val = get_segment_selector(ctxt, seg);
1653 
1654 	return em_push(ctxt);
1655 }
1656 
1657 static int em_pop_sreg(struct x86_emulate_ctxt *ctxt)
1658 {
1659 	int seg = ctxt->src2.val;
1660 	unsigned long selector;
1661 	int rc;
1662 
1663 	rc = emulate_pop(ctxt, &selector, ctxt->op_bytes);
1664 	if (rc != X86EMUL_CONTINUE)
1665 		return rc;
1666 
1667 	rc = load_segment_descriptor(ctxt, (u16)selector, seg);
1668 	return rc;
1669 }
1670 
1671 static int em_pusha(struct x86_emulate_ctxt *ctxt)
1672 {
1673 	unsigned long old_esp = ctxt->regs[VCPU_REGS_RSP];
1674 	int rc = X86EMUL_CONTINUE;
1675 	int reg = VCPU_REGS_RAX;
1676 
1677 	while (reg <= VCPU_REGS_RDI) {
1678 		(reg == VCPU_REGS_RSP) ?
1679 		(ctxt->src.val = old_esp) : (ctxt->src.val = ctxt->regs[reg]);
1680 
1681 		rc = em_push(ctxt);
1682 		if (rc != X86EMUL_CONTINUE)
1683 			return rc;
1684 
1685 		++reg;
1686 	}
1687 
1688 	return rc;
1689 }
1690 
1691 static int em_pushf(struct x86_emulate_ctxt *ctxt)
1692 {
1693 	ctxt->src.val =  (unsigned long)ctxt->eflags;
1694 	return em_push(ctxt);
1695 }
1696 
1697 static int em_popa(struct x86_emulate_ctxt *ctxt)
1698 {
1699 	int rc = X86EMUL_CONTINUE;
1700 	int reg = VCPU_REGS_RDI;
1701 
1702 	while (reg >= VCPU_REGS_RAX) {
1703 		if (reg == VCPU_REGS_RSP) {
1704 			rsp_increment(ctxt, ctxt->op_bytes);
1705 			--reg;
1706 		}
1707 
1708 		rc = emulate_pop(ctxt, &ctxt->regs[reg], ctxt->op_bytes);
1709 		if (rc != X86EMUL_CONTINUE)
1710 			break;
1711 		--reg;
1712 	}
1713 	return rc;
1714 }
1715 
1716 int emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq)
1717 {
1718 	struct x86_emulate_ops *ops = ctxt->ops;
1719 	int rc;
1720 	struct desc_ptr dt;
1721 	gva_t cs_addr;
1722 	gva_t eip_addr;
1723 	u16 cs, eip;
1724 
1725 	/* TODO: Add limit checks */
1726 	ctxt->src.val = ctxt->eflags;
1727 	rc = em_push(ctxt);
1728 	if (rc != X86EMUL_CONTINUE)
1729 		return rc;
1730 
1731 	ctxt->eflags &= ~(EFLG_IF | EFLG_TF | EFLG_AC);
1732 
1733 	ctxt->src.val = get_segment_selector(ctxt, VCPU_SREG_CS);
1734 	rc = em_push(ctxt);
1735 	if (rc != X86EMUL_CONTINUE)
1736 		return rc;
1737 
1738 	ctxt->src.val = ctxt->_eip;
1739 	rc = em_push(ctxt);
1740 	if (rc != X86EMUL_CONTINUE)
1741 		return rc;
1742 
1743 	ops->get_idt(ctxt, &dt);
1744 
1745 	eip_addr = dt.address + (irq << 2);
1746 	cs_addr = dt.address + (irq << 2) + 2;
1747 
1748 	rc = ops->read_std(ctxt, cs_addr, &cs, 2, &ctxt->exception);
1749 	if (rc != X86EMUL_CONTINUE)
1750 		return rc;
1751 
1752 	rc = ops->read_std(ctxt, eip_addr, &eip, 2, &ctxt->exception);
1753 	if (rc != X86EMUL_CONTINUE)
1754 		return rc;
1755 
1756 	rc = load_segment_descriptor(ctxt, cs, VCPU_SREG_CS);
1757 	if (rc != X86EMUL_CONTINUE)
1758 		return rc;
1759 
1760 	ctxt->_eip = eip;
1761 
1762 	return rc;
1763 }
1764 
1765 static int emulate_int(struct x86_emulate_ctxt *ctxt, int irq)
1766 {
1767 	switch(ctxt->mode) {
1768 	case X86EMUL_MODE_REAL:
1769 		return emulate_int_real(ctxt, irq);
1770 	case X86EMUL_MODE_VM86:
1771 	case X86EMUL_MODE_PROT16:
1772 	case X86EMUL_MODE_PROT32:
1773 	case X86EMUL_MODE_PROT64:
1774 	default:
1775 		/* Protected mode interrupts unimplemented yet */
1776 		return X86EMUL_UNHANDLEABLE;
1777 	}
1778 }
1779 
1780 static int emulate_iret_real(struct x86_emulate_ctxt *ctxt)
1781 {
1782 	int rc = X86EMUL_CONTINUE;
1783 	unsigned long temp_eip = 0;
1784 	unsigned long temp_eflags = 0;
1785 	unsigned long cs = 0;
1786 	unsigned long mask = EFLG_CF | EFLG_PF | EFLG_AF | EFLG_ZF | EFLG_SF | EFLG_TF |
1787 			     EFLG_IF | EFLG_DF | EFLG_OF | EFLG_IOPL | EFLG_NT | EFLG_RF |
1788 			     EFLG_AC | EFLG_ID | (1 << 1); /* Last one is the reserved bit */
1789 	unsigned long vm86_mask = EFLG_VM | EFLG_VIF | EFLG_VIP;
1790 
1791 	/* TODO: Add stack limit check */
1792 
1793 	rc = emulate_pop(ctxt, &temp_eip, ctxt->op_bytes);
1794 
1795 	if (rc != X86EMUL_CONTINUE)
1796 		return rc;
1797 
1798 	if (temp_eip & ~0xffff)
1799 		return emulate_gp(ctxt, 0);
1800 
1801 	rc = emulate_pop(ctxt, &cs, ctxt->op_bytes);
1802 
1803 	if (rc != X86EMUL_CONTINUE)
1804 		return rc;
1805 
1806 	rc = emulate_pop(ctxt, &temp_eflags, ctxt->op_bytes);
1807 
1808 	if (rc != X86EMUL_CONTINUE)
1809 		return rc;
1810 
1811 	rc = load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS);
1812 
1813 	if (rc != X86EMUL_CONTINUE)
1814 		return rc;
1815 
1816 	ctxt->_eip = temp_eip;
1817 
1818 
1819 	if (ctxt->op_bytes == 4)
1820 		ctxt->eflags = ((temp_eflags & mask) | (ctxt->eflags & vm86_mask));
1821 	else if (ctxt->op_bytes == 2) {
1822 		ctxt->eflags &= ~0xffff;
1823 		ctxt->eflags |= temp_eflags;
1824 	}
1825 
1826 	ctxt->eflags &= ~EFLG_RESERVED_ZEROS_MASK; /* Clear reserved zeros */
1827 	ctxt->eflags |= EFLG_RESERVED_ONE_MASK;
1828 
1829 	return rc;
1830 }
1831 
1832 static int em_iret(struct x86_emulate_ctxt *ctxt)
1833 {
1834 	switch(ctxt->mode) {
1835 	case X86EMUL_MODE_REAL:
1836 		return emulate_iret_real(ctxt);
1837 	case X86EMUL_MODE_VM86:
1838 	case X86EMUL_MODE_PROT16:
1839 	case X86EMUL_MODE_PROT32:
1840 	case X86EMUL_MODE_PROT64:
1841 	default:
1842 		/* iret from protected mode unimplemented yet */
1843 		return X86EMUL_UNHANDLEABLE;
1844 	}
1845 }
1846 
1847 static int em_jmp_far(struct x86_emulate_ctxt *ctxt)
1848 {
1849 	int rc;
1850 	unsigned short sel;
1851 
1852 	memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
1853 
1854 	rc = load_segment_descriptor(ctxt, sel, VCPU_SREG_CS);
1855 	if (rc != X86EMUL_CONTINUE)
1856 		return rc;
1857 
1858 	ctxt->_eip = 0;
1859 	memcpy(&ctxt->_eip, ctxt->src.valptr, ctxt->op_bytes);
1860 	return X86EMUL_CONTINUE;
1861 }
1862 
1863 static int em_grp2(struct x86_emulate_ctxt *ctxt)
1864 {
1865 	switch (ctxt->modrm_reg) {
1866 	case 0:	/* rol */
1867 		emulate_2op_SrcB(ctxt, "rol");
1868 		break;
1869 	case 1:	/* ror */
1870 		emulate_2op_SrcB(ctxt, "ror");
1871 		break;
1872 	case 2:	/* rcl */
1873 		emulate_2op_SrcB(ctxt, "rcl");
1874 		break;
1875 	case 3:	/* rcr */
1876 		emulate_2op_SrcB(ctxt, "rcr");
1877 		break;
1878 	case 4:	/* sal/shl */
1879 	case 6:	/* sal/shl */
1880 		emulate_2op_SrcB(ctxt, "sal");
1881 		break;
1882 	case 5:	/* shr */
1883 		emulate_2op_SrcB(ctxt, "shr");
1884 		break;
1885 	case 7:	/* sar */
1886 		emulate_2op_SrcB(ctxt, "sar");
1887 		break;
1888 	}
1889 	return X86EMUL_CONTINUE;
1890 }
1891 
1892 static int em_not(struct x86_emulate_ctxt *ctxt)
1893 {
1894 	ctxt->dst.val = ~ctxt->dst.val;
1895 	return X86EMUL_CONTINUE;
1896 }
1897 
1898 static int em_neg(struct x86_emulate_ctxt *ctxt)
1899 {
1900 	emulate_1op(ctxt, "neg");
1901 	return X86EMUL_CONTINUE;
1902 }
1903 
1904 static int em_mul_ex(struct x86_emulate_ctxt *ctxt)
1905 {
1906 	u8 ex = 0;
1907 
1908 	emulate_1op_rax_rdx(ctxt, "mul", ex);
1909 	return X86EMUL_CONTINUE;
1910 }
1911 
1912 static int em_imul_ex(struct x86_emulate_ctxt *ctxt)
1913 {
1914 	u8 ex = 0;
1915 
1916 	emulate_1op_rax_rdx(ctxt, "imul", ex);
1917 	return X86EMUL_CONTINUE;
1918 }
1919 
1920 static int em_div_ex(struct x86_emulate_ctxt *ctxt)
1921 {
1922 	u8 de = 0;
1923 
1924 	emulate_1op_rax_rdx(ctxt, "div", de);
1925 	if (de)
1926 		return emulate_de(ctxt);
1927 	return X86EMUL_CONTINUE;
1928 }
1929 
1930 static int em_idiv_ex(struct x86_emulate_ctxt *ctxt)
1931 {
1932 	u8 de = 0;
1933 
1934 	emulate_1op_rax_rdx(ctxt, "idiv", de);
1935 	if (de)
1936 		return emulate_de(ctxt);
1937 	return X86EMUL_CONTINUE;
1938 }
1939 
1940 static int em_grp45(struct x86_emulate_ctxt *ctxt)
1941 {
1942 	int rc = X86EMUL_CONTINUE;
1943 
1944 	switch (ctxt->modrm_reg) {
1945 	case 0:	/* inc */
1946 		emulate_1op(ctxt, "inc");
1947 		break;
1948 	case 1:	/* dec */
1949 		emulate_1op(ctxt, "dec");
1950 		break;
1951 	case 2: /* call near abs */ {
1952 		long int old_eip;
1953 		old_eip = ctxt->_eip;
1954 		ctxt->_eip = ctxt->src.val;
1955 		ctxt->src.val = old_eip;
1956 		rc = em_push(ctxt);
1957 		break;
1958 	}
1959 	case 4: /* jmp abs */
1960 		ctxt->_eip = ctxt->src.val;
1961 		break;
1962 	case 5: /* jmp far */
1963 		rc = em_jmp_far(ctxt);
1964 		break;
1965 	case 6:	/* push */
1966 		rc = em_push(ctxt);
1967 		break;
1968 	}
1969 	return rc;
1970 }
1971 
1972 static int em_cmpxchg8b(struct x86_emulate_ctxt *ctxt)
1973 {
1974 	u64 old = ctxt->dst.orig_val64;
1975 
1976 	if (((u32) (old >> 0) != (u32) ctxt->regs[VCPU_REGS_RAX]) ||
1977 	    ((u32) (old >> 32) != (u32) ctxt->regs[VCPU_REGS_RDX])) {
1978 		ctxt->regs[VCPU_REGS_RAX] = (u32) (old >> 0);
1979 		ctxt->regs[VCPU_REGS_RDX] = (u32) (old >> 32);
1980 		ctxt->eflags &= ~EFLG_ZF;
1981 	} else {
1982 		ctxt->dst.val64 = ((u64)ctxt->regs[VCPU_REGS_RCX] << 32) |
1983 			(u32) ctxt->regs[VCPU_REGS_RBX];
1984 
1985 		ctxt->eflags |= EFLG_ZF;
1986 	}
1987 	return X86EMUL_CONTINUE;
1988 }
1989 
1990 static int em_ret(struct x86_emulate_ctxt *ctxt)
1991 {
1992 	ctxt->dst.type = OP_REG;
1993 	ctxt->dst.addr.reg = &ctxt->_eip;
1994 	ctxt->dst.bytes = ctxt->op_bytes;
1995 	return em_pop(ctxt);
1996 }
1997 
1998 static int em_ret_far(struct x86_emulate_ctxt *ctxt)
1999 {
2000 	int rc;
2001 	unsigned long cs;
2002 
2003 	rc = emulate_pop(ctxt, &ctxt->_eip, ctxt->op_bytes);
2004 	if (rc != X86EMUL_CONTINUE)
2005 		return rc;
2006 	if (ctxt->op_bytes == 4)
2007 		ctxt->_eip = (u32)ctxt->_eip;
2008 	rc = emulate_pop(ctxt, &cs, ctxt->op_bytes);
2009 	if (rc != X86EMUL_CONTINUE)
2010 		return rc;
2011 	rc = load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS);
2012 	return rc;
2013 }
2014 
2015 static int em_cmpxchg(struct x86_emulate_ctxt *ctxt)
2016 {
2017 	/* Save real source value, then compare EAX against destination. */
2018 	ctxt->src.orig_val = ctxt->src.val;
2019 	ctxt->src.val = ctxt->regs[VCPU_REGS_RAX];
2020 	emulate_2op_SrcV(ctxt, "cmp");
2021 
2022 	if (ctxt->eflags & EFLG_ZF) {
2023 		/* Success: write back to memory. */
2024 		ctxt->dst.val = ctxt->src.orig_val;
2025 	} else {
2026 		/* Failure: write the value we saw to EAX. */
2027 		ctxt->dst.type = OP_REG;
2028 		ctxt->dst.addr.reg = (unsigned long *)&ctxt->regs[VCPU_REGS_RAX];
2029 	}
2030 	return X86EMUL_CONTINUE;
2031 }
2032 
2033 static int em_lseg(struct x86_emulate_ctxt *ctxt)
2034 {
2035 	int seg = ctxt->src2.val;
2036 	unsigned short sel;
2037 	int rc;
2038 
2039 	memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
2040 
2041 	rc = load_segment_descriptor(ctxt, sel, seg);
2042 	if (rc != X86EMUL_CONTINUE)
2043 		return rc;
2044 
2045 	ctxt->dst.val = ctxt->src.val;
2046 	return rc;
2047 }
2048 
2049 static void
2050 setup_syscalls_segments(struct x86_emulate_ctxt *ctxt,
2051 			struct desc_struct *cs, struct desc_struct *ss)
2052 {
2053 	u16 selector;
2054 
2055 	memset(cs, 0, sizeof(struct desc_struct));
2056 	ctxt->ops->get_segment(ctxt, &selector, cs, NULL, VCPU_SREG_CS);
2057 	memset(ss, 0, sizeof(struct desc_struct));
2058 
2059 	cs->l = 0;		/* will be adjusted later */
2060 	set_desc_base(cs, 0);	/* flat segment */
2061 	cs->g = 1;		/* 4kb granularity */
2062 	set_desc_limit(cs, 0xfffff);	/* 4GB limit */
2063 	cs->type = 0x0b;	/* Read, Execute, Accessed */
2064 	cs->s = 1;
2065 	cs->dpl = 0;		/* will be adjusted later */
2066 	cs->p = 1;
2067 	cs->d = 1;
2068 
2069 	set_desc_base(ss, 0);	/* flat segment */
2070 	set_desc_limit(ss, 0xfffff);	/* 4GB limit */
2071 	ss->g = 1;		/* 4kb granularity */
2072 	ss->s = 1;
2073 	ss->type = 0x03;	/* Read/Write, Accessed */
2074 	ss->d = 1;		/* 32bit stack segment */
2075 	ss->dpl = 0;
2076 	ss->p = 1;
2077 }
2078 
2079 static bool vendor_intel(struct x86_emulate_ctxt *ctxt)
2080 {
2081 	u32 eax, ebx, ecx, edx;
2082 
2083 	eax = ecx = 0;
2084 	ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx);
2085 	return ebx == X86EMUL_CPUID_VENDOR_GenuineIntel_ebx
2086 		&& ecx == X86EMUL_CPUID_VENDOR_GenuineIntel_ecx
2087 		&& edx == X86EMUL_CPUID_VENDOR_GenuineIntel_edx;
2088 }
2089 
2090 static bool em_syscall_is_enabled(struct x86_emulate_ctxt *ctxt)
2091 {
2092 	struct x86_emulate_ops *ops = ctxt->ops;
2093 	u32 eax, ebx, ecx, edx;
2094 
2095 	/*
2096 	 * syscall should always be enabled in longmode - so only become
2097 	 * vendor specific (cpuid) if other modes are active...
2098 	 */
2099 	if (ctxt->mode == X86EMUL_MODE_PROT64)
2100 		return true;
2101 
2102 	eax = 0x00000000;
2103 	ecx = 0x00000000;
2104 	ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx);
2105 	/*
2106 	 * Intel ("GenuineIntel")
2107 	 * remark: Intel CPUs only support "syscall" in 64bit
2108 	 * longmode. Also an 64bit guest with a
2109 	 * 32bit compat-app running will #UD !! While this
2110 	 * behaviour can be fixed (by emulating) into AMD
2111 	 * response - CPUs of AMD can't behave like Intel.
2112 	 */
2113 	if (ebx == X86EMUL_CPUID_VENDOR_GenuineIntel_ebx &&
2114 	    ecx == X86EMUL_CPUID_VENDOR_GenuineIntel_ecx &&
2115 	    edx == X86EMUL_CPUID_VENDOR_GenuineIntel_edx)
2116 		return false;
2117 
2118 	/* AMD ("AuthenticAMD") */
2119 	if (ebx == X86EMUL_CPUID_VENDOR_AuthenticAMD_ebx &&
2120 	    ecx == X86EMUL_CPUID_VENDOR_AuthenticAMD_ecx &&
2121 	    edx == X86EMUL_CPUID_VENDOR_AuthenticAMD_edx)
2122 		return true;
2123 
2124 	/* AMD ("AMDisbetter!") */
2125 	if (ebx == X86EMUL_CPUID_VENDOR_AMDisbetterI_ebx &&
2126 	    ecx == X86EMUL_CPUID_VENDOR_AMDisbetterI_ecx &&
2127 	    edx == X86EMUL_CPUID_VENDOR_AMDisbetterI_edx)
2128 		return true;
2129 
2130 	/* default: (not Intel, not AMD), apply Intel's stricter rules... */
2131 	return false;
2132 }
2133 
2134 static int em_syscall(struct x86_emulate_ctxt *ctxt)
2135 {
2136 	struct x86_emulate_ops *ops = ctxt->ops;
2137 	struct desc_struct cs, ss;
2138 	u64 msr_data;
2139 	u16 cs_sel, ss_sel;
2140 	u64 efer = 0;
2141 
2142 	/* syscall is not available in real mode */
2143 	if (ctxt->mode == X86EMUL_MODE_REAL ||
2144 	    ctxt->mode == X86EMUL_MODE_VM86)
2145 		return emulate_ud(ctxt);
2146 
2147 	if (!(em_syscall_is_enabled(ctxt)))
2148 		return emulate_ud(ctxt);
2149 
2150 	ops->get_msr(ctxt, MSR_EFER, &efer);
2151 	setup_syscalls_segments(ctxt, &cs, &ss);
2152 
2153 	if (!(efer & EFER_SCE))
2154 		return emulate_ud(ctxt);
2155 
2156 	ops->get_msr(ctxt, MSR_STAR, &msr_data);
2157 	msr_data >>= 32;
2158 	cs_sel = (u16)(msr_data & 0xfffc);
2159 	ss_sel = (u16)(msr_data + 8);
2160 
2161 	if (efer & EFER_LMA) {
2162 		cs.d = 0;
2163 		cs.l = 1;
2164 	}
2165 	ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
2166 	ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
2167 
2168 	ctxt->regs[VCPU_REGS_RCX] = ctxt->_eip;
2169 	if (efer & EFER_LMA) {
2170 #ifdef CONFIG_X86_64
2171 		ctxt->regs[VCPU_REGS_R11] = ctxt->eflags & ~EFLG_RF;
2172 
2173 		ops->get_msr(ctxt,
2174 			     ctxt->mode == X86EMUL_MODE_PROT64 ?
2175 			     MSR_LSTAR : MSR_CSTAR, &msr_data);
2176 		ctxt->_eip = msr_data;
2177 
2178 		ops->get_msr(ctxt, MSR_SYSCALL_MASK, &msr_data);
2179 		ctxt->eflags &= ~(msr_data | EFLG_RF);
2180 #endif
2181 	} else {
2182 		/* legacy mode */
2183 		ops->get_msr(ctxt, MSR_STAR, &msr_data);
2184 		ctxt->_eip = (u32)msr_data;
2185 
2186 		ctxt->eflags &= ~(EFLG_VM | EFLG_IF | EFLG_RF);
2187 	}
2188 
2189 	return X86EMUL_CONTINUE;
2190 }
2191 
2192 static int em_sysenter(struct x86_emulate_ctxt *ctxt)
2193 {
2194 	struct x86_emulate_ops *ops = ctxt->ops;
2195 	struct desc_struct cs, ss;
2196 	u64 msr_data;
2197 	u16 cs_sel, ss_sel;
2198 	u64 efer = 0;
2199 
2200 	ops->get_msr(ctxt, MSR_EFER, &efer);
2201 	/* inject #GP if in real mode */
2202 	if (ctxt->mode == X86EMUL_MODE_REAL)
2203 		return emulate_gp(ctxt, 0);
2204 
2205 	/*
2206 	 * Not recognized on AMD in compat mode (but is recognized in legacy
2207 	 * mode).
2208 	 */
2209 	if ((ctxt->mode == X86EMUL_MODE_PROT32) && (efer & EFER_LMA)
2210 	    && !vendor_intel(ctxt))
2211 		return emulate_ud(ctxt);
2212 
2213 	/* XXX sysenter/sysexit have not been tested in 64bit mode.
2214 	* Therefore, we inject an #UD.
2215 	*/
2216 	if (ctxt->mode == X86EMUL_MODE_PROT64)
2217 		return emulate_ud(ctxt);
2218 
2219 	setup_syscalls_segments(ctxt, &cs, &ss);
2220 
2221 	ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data);
2222 	switch (ctxt->mode) {
2223 	case X86EMUL_MODE_PROT32:
2224 		if ((msr_data & 0xfffc) == 0x0)
2225 			return emulate_gp(ctxt, 0);
2226 		break;
2227 	case X86EMUL_MODE_PROT64:
2228 		if (msr_data == 0x0)
2229 			return emulate_gp(ctxt, 0);
2230 		break;
2231 	}
2232 
2233 	ctxt->eflags &= ~(EFLG_VM | EFLG_IF | EFLG_RF);
2234 	cs_sel = (u16)msr_data;
2235 	cs_sel &= ~SELECTOR_RPL_MASK;
2236 	ss_sel = cs_sel + 8;
2237 	ss_sel &= ~SELECTOR_RPL_MASK;
2238 	if (ctxt->mode == X86EMUL_MODE_PROT64 || (efer & EFER_LMA)) {
2239 		cs.d = 0;
2240 		cs.l = 1;
2241 	}
2242 
2243 	ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
2244 	ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
2245 
2246 	ops->get_msr(ctxt, MSR_IA32_SYSENTER_EIP, &msr_data);
2247 	ctxt->_eip = msr_data;
2248 
2249 	ops->get_msr(ctxt, MSR_IA32_SYSENTER_ESP, &msr_data);
2250 	ctxt->regs[VCPU_REGS_RSP] = msr_data;
2251 
2252 	return X86EMUL_CONTINUE;
2253 }
2254 
2255 static int em_sysexit(struct x86_emulate_ctxt *ctxt)
2256 {
2257 	struct x86_emulate_ops *ops = ctxt->ops;
2258 	struct desc_struct cs, ss;
2259 	u64 msr_data;
2260 	int usermode;
2261 	u16 cs_sel = 0, ss_sel = 0;
2262 
2263 	/* inject #GP if in real mode or Virtual 8086 mode */
2264 	if (ctxt->mode == X86EMUL_MODE_REAL ||
2265 	    ctxt->mode == X86EMUL_MODE_VM86)
2266 		return emulate_gp(ctxt, 0);
2267 
2268 	setup_syscalls_segments(ctxt, &cs, &ss);
2269 
2270 	if ((ctxt->rex_prefix & 0x8) != 0x0)
2271 		usermode = X86EMUL_MODE_PROT64;
2272 	else
2273 		usermode = X86EMUL_MODE_PROT32;
2274 
2275 	cs.dpl = 3;
2276 	ss.dpl = 3;
2277 	ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data);
2278 	switch (usermode) {
2279 	case X86EMUL_MODE_PROT32:
2280 		cs_sel = (u16)(msr_data + 16);
2281 		if ((msr_data & 0xfffc) == 0x0)
2282 			return emulate_gp(ctxt, 0);
2283 		ss_sel = (u16)(msr_data + 24);
2284 		break;
2285 	case X86EMUL_MODE_PROT64:
2286 		cs_sel = (u16)(msr_data + 32);
2287 		if (msr_data == 0x0)
2288 			return emulate_gp(ctxt, 0);
2289 		ss_sel = cs_sel + 8;
2290 		cs.d = 0;
2291 		cs.l = 1;
2292 		break;
2293 	}
2294 	cs_sel |= SELECTOR_RPL_MASK;
2295 	ss_sel |= SELECTOR_RPL_MASK;
2296 
2297 	ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
2298 	ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
2299 
2300 	ctxt->_eip = ctxt->regs[VCPU_REGS_RDX];
2301 	ctxt->regs[VCPU_REGS_RSP] = ctxt->regs[VCPU_REGS_RCX];
2302 
2303 	return X86EMUL_CONTINUE;
2304 }
2305 
2306 static bool emulator_bad_iopl(struct x86_emulate_ctxt *ctxt)
2307 {
2308 	int iopl;
2309 	if (ctxt->mode == X86EMUL_MODE_REAL)
2310 		return false;
2311 	if (ctxt->mode == X86EMUL_MODE_VM86)
2312 		return true;
2313 	iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> IOPL_SHIFT;
2314 	return ctxt->ops->cpl(ctxt) > iopl;
2315 }
2316 
2317 static bool emulator_io_port_access_allowed(struct x86_emulate_ctxt *ctxt,
2318 					    u16 port, u16 len)
2319 {
2320 	struct x86_emulate_ops *ops = ctxt->ops;
2321 	struct desc_struct tr_seg;
2322 	u32 base3;
2323 	int r;
2324 	u16 tr, io_bitmap_ptr, perm, bit_idx = port & 0x7;
2325 	unsigned mask = (1 << len) - 1;
2326 	unsigned long base;
2327 
2328 	ops->get_segment(ctxt, &tr, &tr_seg, &base3, VCPU_SREG_TR);
2329 	if (!tr_seg.p)
2330 		return false;
2331 	if (desc_limit_scaled(&tr_seg) < 103)
2332 		return false;
2333 	base = get_desc_base(&tr_seg);
2334 #ifdef CONFIG_X86_64
2335 	base |= ((u64)base3) << 32;
2336 #endif
2337 	r = ops->read_std(ctxt, base + 102, &io_bitmap_ptr, 2, NULL);
2338 	if (r != X86EMUL_CONTINUE)
2339 		return false;
2340 	if (io_bitmap_ptr + port/8 > desc_limit_scaled(&tr_seg))
2341 		return false;
2342 	r = ops->read_std(ctxt, base + io_bitmap_ptr + port/8, &perm, 2, NULL);
2343 	if (r != X86EMUL_CONTINUE)
2344 		return false;
2345 	if ((perm >> bit_idx) & mask)
2346 		return false;
2347 	return true;
2348 }
2349 
2350 static bool emulator_io_permited(struct x86_emulate_ctxt *ctxt,
2351 				 u16 port, u16 len)
2352 {
2353 	if (ctxt->perm_ok)
2354 		return true;
2355 
2356 	if (emulator_bad_iopl(ctxt))
2357 		if (!emulator_io_port_access_allowed(ctxt, port, len))
2358 			return false;
2359 
2360 	ctxt->perm_ok = true;
2361 
2362 	return true;
2363 }
2364 
2365 static void save_state_to_tss16(struct x86_emulate_ctxt *ctxt,
2366 				struct tss_segment_16 *tss)
2367 {
2368 	tss->ip = ctxt->_eip;
2369 	tss->flag = ctxt->eflags;
2370 	tss->ax = ctxt->regs[VCPU_REGS_RAX];
2371 	tss->cx = ctxt->regs[VCPU_REGS_RCX];
2372 	tss->dx = ctxt->regs[VCPU_REGS_RDX];
2373 	tss->bx = ctxt->regs[VCPU_REGS_RBX];
2374 	tss->sp = ctxt->regs[VCPU_REGS_RSP];
2375 	tss->bp = ctxt->regs[VCPU_REGS_RBP];
2376 	tss->si = ctxt->regs[VCPU_REGS_RSI];
2377 	tss->di = ctxt->regs[VCPU_REGS_RDI];
2378 
2379 	tss->es = get_segment_selector(ctxt, VCPU_SREG_ES);
2380 	tss->cs = get_segment_selector(ctxt, VCPU_SREG_CS);
2381 	tss->ss = get_segment_selector(ctxt, VCPU_SREG_SS);
2382 	tss->ds = get_segment_selector(ctxt, VCPU_SREG_DS);
2383 	tss->ldt = get_segment_selector(ctxt, VCPU_SREG_LDTR);
2384 }
2385 
2386 static int load_state_from_tss16(struct x86_emulate_ctxt *ctxt,
2387 				 struct tss_segment_16 *tss)
2388 {
2389 	int ret;
2390 
2391 	ctxt->_eip = tss->ip;
2392 	ctxt->eflags = tss->flag | 2;
2393 	ctxt->regs[VCPU_REGS_RAX] = tss->ax;
2394 	ctxt->regs[VCPU_REGS_RCX] = tss->cx;
2395 	ctxt->regs[VCPU_REGS_RDX] = tss->dx;
2396 	ctxt->regs[VCPU_REGS_RBX] = tss->bx;
2397 	ctxt->regs[VCPU_REGS_RSP] = tss->sp;
2398 	ctxt->regs[VCPU_REGS_RBP] = tss->bp;
2399 	ctxt->regs[VCPU_REGS_RSI] = tss->si;
2400 	ctxt->regs[VCPU_REGS_RDI] = tss->di;
2401 
2402 	/*
2403 	 * SDM says that segment selectors are loaded before segment
2404 	 * descriptors
2405 	 */
2406 	set_segment_selector(ctxt, tss->ldt, VCPU_SREG_LDTR);
2407 	set_segment_selector(ctxt, tss->es, VCPU_SREG_ES);
2408 	set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS);
2409 	set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS);
2410 	set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS);
2411 
2412 	/*
2413 	 * Now load segment descriptors. If fault happenes at this stage
2414 	 * it is handled in a context of new task
2415 	 */
2416 	ret = load_segment_descriptor(ctxt, tss->ldt, VCPU_SREG_LDTR);
2417 	if (ret != X86EMUL_CONTINUE)
2418 		return ret;
2419 	ret = load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES);
2420 	if (ret != X86EMUL_CONTINUE)
2421 		return ret;
2422 	ret = load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS);
2423 	if (ret != X86EMUL_CONTINUE)
2424 		return ret;
2425 	ret = load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS);
2426 	if (ret != X86EMUL_CONTINUE)
2427 		return ret;
2428 	ret = load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS);
2429 	if (ret != X86EMUL_CONTINUE)
2430 		return ret;
2431 
2432 	return X86EMUL_CONTINUE;
2433 }
2434 
2435 static int task_switch_16(struct x86_emulate_ctxt *ctxt,
2436 			  u16 tss_selector, u16 old_tss_sel,
2437 			  ulong old_tss_base, struct desc_struct *new_desc)
2438 {
2439 	struct x86_emulate_ops *ops = ctxt->ops;
2440 	struct tss_segment_16 tss_seg;
2441 	int ret;
2442 	u32 new_tss_base = get_desc_base(new_desc);
2443 
2444 	ret = ops->read_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
2445 			    &ctxt->exception);
2446 	if (ret != X86EMUL_CONTINUE)
2447 		/* FIXME: need to provide precise fault address */
2448 		return ret;
2449 
2450 	save_state_to_tss16(ctxt, &tss_seg);
2451 
2452 	ret = ops->write_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
2453 			     &ctxt->exception);
2454 	if (ret != X86EMUL_CONTINUE)
2455 		/* FIXME: need to provide precise fault address */
2456 		return ret;
2457 
2458 	ret = ops->read_std(ctxt, new_tss_base, &tss_seg, sizeof tss_seg,
2459 			    &ctxt->exception);
2460 	if (ret != X86EMUL_CONTINUE)
2461 		/* FIXME: need to provide precise fault address */
2462 		return ret;
2463 
2464 	if (old_tss_sel != 0xffff) {
2465 		tss_seg.prev_task_link = old_tss_sel;
2466 
2467 		ret = ops->write_std(ctxt, new_tss_base,
2468 				     &tss_seg.prev_task_link,
2469 				     sizeof tss_seg.prev_task_link,
2470 				     &ctxt->exception);
2471 		if (ret != X86EMUL_CONTINUE)
2472 			/* FIXME: need to provide precise fault address */
2473 			return ret;
2474 	}
2475 
2476 	return load_state_from_tss16(ctxt, &tss_seg);
2477 }
2478 
2479 static void save_state_to_tss32(struct x86_emulate_ctxt *ctxt,
2480 				struct tss_segment_32 *tss)
2481 {
2482 	tss->cr3 = ctxt->ops->get_cr(ctxt, 3);
2483 	tss->eip = ctxt->_eip;
2484 	tss->eflags = ctxt->eflags;
2485 	tss->eax = ctxt->regs[VCPU_REGS_RAX];
2486 	tss->ecx = ctxt->regs[VCPU_REGS_RCX];
2487 	tss->edx = ctxt->regs[VCPU_REGS_RDX];
2488 	tss->ebx = ctxt->regs[VCPU_REGS_RBX];
2489 	tss->esp = ctxt->regs[VCPU_REGS_RSP];
2490 	tss->ebp = ctxt->regs[VCPU_REGS_RBP];
2491 	tss->esi = ctxt->regs[VCPU_REGS_RSI];
2492 	tss->edi = ctxt->regs[VCPU_REGS_RDI];
2493 
2494 	tss->es = get_segment_selector(ctxt, VCPU_SREG_ES);
2495 	tss->cs = get_segment_selector(ctxt, VCPU_SREG_CS);
2496 	tss->ss = get_segment_selector(ctxt, VCPU_SREG_SS);
2497 	tss->ds = get_segment_selector(ctxt, VCPU_SREG_DS);
2498 	tss->fs = get_segment_selector(ctxt, VCPU_SREG_FS);
2499 	tss->gs = get_segment_selector(ctxt, VCPU_SREG_GS);
2500 	tss->ldt_selector = get_segment_selector(ctxt, VCPU_SREG_LDTR);
2501 }
2502 
2503 static int load_state_from_tss32(struct x86_emulate_ctxt *ctxt,
2504 				 struct tss_segment_32 *tss)
2505 {
2506 	int ret;
2507 
2508 	if (ctxt->ops->set_cr(ctxt, 3, tss->cr3))
2509 		return emulate_gp(ctxt, 0);
2510 	ctxt->_eip = tss->eip;
2511 	ctxt->eflags = tss->eflags | 2;
2512 
2513 	/* General purpose registers */
2514 	ctxt->regs[VCPU_REGS_RAX] = tss->eax;
2515 	ctxt->regs[VCPU_REGS_RCX] = tss->ecx;
2516 	ctxt->regs[VCPU_REGS_RDX] = tss->edx;
2517 	ctxt->regs[VCPU_REGS_RBX] = tss->ebx;
2518 	ctxt->regs[VCPU_REGS_RSP] = tss->esp;
2519 	ctxt->regs[VCPU_REGS_RBP] = tss->ebp;
2520 	ctxt->regs[VCPU_REGS_RSI] = tss->esi;
2521 	ctxt->regs[VCPU_REGS_RDI] = tss->edi;
2522 
2523 	/*
2524 	 * SDM says that segment selectors are loaded before segment
2525 	 * descriptors
2526 	 */
2527 	set_segment_selector(ctxt, tss->ldt_selector, VCPU_SREG_LDTR);
2528 	set_segment_selector(ctxt, tss->es, VCPU_SREG_ES);
2529 	set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS);
2530 	set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS);
2531 	set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS);
2532 	set_segment_selector(ctxt, tss->fs, VCPU_SREG_FS);
2533 	set_segment_selector(ctxt, tss->gs, VCPU_SREG_GS);
2534 
2535 	/*
2536 	 * If we're switching between Protected Mode and VM86, we need to make
2537 	 * sure to update the mode before loading the segment descriptors so
2538 	 * that the selectors are interpreted correctly.
2539 	 *
2540 	 * Need to get rflags to the vcpu struct immediately because it
2541 	 * influences the CPL which is checked at least when loading the segment
2542 	 * descriptors and when pushing an error code to the new kernel stack.
2543 	 *
2544 	 * TODO Introduce a separate ctxt->ops->set_cpl callback
2545 	 */
2546 	if (ctxt->eflags & X86_EFLAGS_VM)
2547 		ctxt->mode = X86EMUL_MODE_VM86;
2548 	else
2549 		ctxt->mode = X86EMUL_MODE_PROT32;
2550 
2551 	ctxt->ops->set_rflags(ctxt, ctxt->eflags);
2552 
2553 	/*
2554 	 * Now load segment descriptors. If fault happenes at this stage
2555 	 * it is handled in a context of new task
2556 	 */
2557 	ret = load_segment_descriptor(ctxt, tss->ldt_selector, VCPU_SREG_LDTR);
2558 	if (ret != X86EMUL_CONTINUE)
2559 		return ret;
2560 	ret = load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES);
2561 	if (ret != X86EMUL_CONTINUE)
2562 		return ret;
2563 	ret = load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS);
2564 	if (ret != X86EMUL_CONTINUE)
2565 		return ret;
2566 	ret = load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS);
2567 	if (ret != X86EMUL_CONTINUE)
2568 		return ret;
2569 	ret = load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS);
2570 	if (ret != X86EMUL_CONTINUE)
2571 		return ret;
2572 	ret = load_segment_descriptor(ctxt, tss->fs, VCPU_SREG_FS);
2573 	if (ret != X86EMUL_CONTINUE)
2574 		return ret;
2575 	ret = load_segment_descriptor(ctxt, tss->gs, VCPU_SREG_GS);
2576 	if (ret != X86EMUL_CONTINUE)
2577 		return ret;
2578 
2579 	return X86EMUL_CONTINUE;
2580 }
2581 
2582 static int task_switch_32(struct x86_emulate_ctxt *ctxt,
2583 			  u16 tss_selector, u16 old_tss_sel,
2584 			  ulong old_tss_base, struct desc_struct *new_desc)
2585 {
2586 	struct x86_emulate_ops *ops = ctxt->ops;
2587 	struct tss_segment_32 tss_seg;
2588 	int ret;
2589 	u32 new_tss_base = get_desc_base(new_desc);
2590 
2591 	ret = ops->read_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
2592 			    &ctxt->exception);
2593 	if (ret != X86EMUL_CONTINUE)
2594 		/* FIXME: need to provide precise fault address */
2595 		return ret;
2596 
2597 	save_state_to_tss32(ctxt, &tss_seg);
2598 
2599 	ret = ops->write_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
2600 			     &ctxt->exception);
2601 	if (ret != X86EMUL_CONTINUE)
2602 		/* FIXME: need to provide precise fault address */
2603 		return ret;
2604 
2605 	ret = ops->read_std(ctxt, new_tss_base, &tss_seg, sizeof tss_seg,
2606 			    &ctxt->exception);
2607 	if (ret != X86EMUL_CONTINUE)
2608 		/* FIXME: need to provide precise fault address */
2609 		return ret;
2610 
2611 	if (old_tss_sel != 0xffff) {
2612 		tss_seg.prev_task_link = old_tss_sel;
2613 
2614 		ret = ops->write_std(ctxt, new_tss_base,
2615 				     &tss_seg.prev_task_link,
2616 				     sizeof tss_seg.prev_task_link,
2617 				     &ctxt->exception);
2618 		if (ret != X86EMUL_CONTINUE)
2619 			/* FIXME: need to provide precise fault address */
2620 			return ret;
2621 	}
2622 
2623 	return load_state_from_tss32(ctxt, &tss_seg);
2624 }
2625 
2626 static int emulator_do_task_switch(struct x86_emulate_ctxt *ctxt,
2627 				   u16 tss_selector, int idt_index, int reason,
2628 				   bool has_error_code, u32 error_code)
2629 {
2630 	struct x86_emulate_ops *ops = ctxt->ops;
2631 	struct desc_struct curr_tss_desc, next_tss_desc;
2632 	int ret;
2633 	u16 old_tss_sel = get_segment_selector(ctxt, VCPU_SREG_TR);
2634 	ulong old_tss_base =
2635 		ops->get_cached_segment_base(ctxt, VCPU_SREG_TR);
2636 	u32 desc_limit;
2637 	ulong desc_addr;
2638 
2639 	/* FIXME: old_tss_base == ~0 ? */
2640 
2641 	ret = read_segment_descriptor(ctxt, tss_selector, &next_tss_desc, &desc_addr);
2642 	if (ret != X86EMUL_CONTINUE)
2643 		return ret;
2644 	ret = read_segment_descriptor(ctxt, old_tss_sel, &curr_tss_desc, &desc_addr);
2645 	if (ret != X86EMUL_CONTINUE)
2646 		return ret;
2647 
2648 	/* FIXME: check that next_tss_desc is tss */
2649 
2650 	/*
2651 	 * Check privileges. The three cases are task switch caused by...
2652 	 *
2653 	 * 1. jmp/call/int to task gate: Check against DPL of the task gate
2654 	 * 2. Exception/IRQ/iret: No check is performed
2655 	 * 3. jmp/call to TSS: Check agains DPL of the TSS
2656 	 */
2657 	if (reason == TASK_SWITCH_GATE) {
2658 		if (idt_index != -1) {
2659 			/* Software interrupts */
2660 			struct desc_struct task_gate_desc;
2661 			int dpl;
2662 
2663 			ret = read_interrupt_descriptor(ctxt, idt_index,
2664 							&task_gate_desc);
2665 			if (ret != X86EMUL_CONTINUE)
2666 				return ret;
2667 
2668 			dpl = task_gate_desc.dpl;
2669 			if ((tss_selector & 3) > dpl || ops->cpl(ctxt) > dpl)
2670 				return emulate_gp(ctxt, (idt_index << 3) | 0x2);
2671 		}
2672 	} else if (reason != TASK_SWITCH_IRET) {
2673 		int dpl = next_tss_desc.dpl;
2674 		if ((tss_selector & 3) > dpl || ops->cpl(ctxt) > dpl)
2675 			return emulate_gp(ctxt, tss_selector);
2676 	}
2677 
2678 
2679 	desc_limit = desc_limit_scaled(&next_tss_desc);
2680 	if (!next_tss_desc.p ||
2681 	    ((desc_limit < 0x67 && (next_tss_desc.type & 8)) ||
2682 	     desc_limit < 0x2b)) {
2683 		emulate_ts(ctxt, tss_selector & 0xfffc);
2684 		return X86EMUL_PROPAGATE_FAULT;
2685 	}
2686 
2687 	if (reason == TASK_SWITCH_IRET || reason == TASK_SWITCH_JMP) {
2688 		curr_tss_desc.type &= ~(1 << 1); /* clear busy flag */
2689 		write_segment_descriptor(ctxt, old_tss_sel, &curr_tss_desc);
2690 	}
2691 
2692 	if (reason == TASK_SWITCH_IRET)
2693 		ctxt->eflags = ctxt->eflags & ~X86_EFLAGS_NT;
2694 
2695 	/* set back link to prev task only if NT bit is set in eflags
2696 	   note that old_tss_sel is not used afetr this point */
2697 	if (reason != TASK_SWITCH_CALL && reason != TASK_SWITCH_GATE)
2698 		old_tss_sel = 0xffff;
2699 
2700 	if (next_tss_desc.type & 8)
2701 		ret = task_switch_32(ctxt, tss_selector, old_tss_sel,
2702 				     old_tss_base, &next_tss_desc);
2703 	else
2704 		ret = task_switch_16(ctxt, tss_selector, old_tss_sel,
2705 				     old_tss_base, &next_tss_desc);
2706 	if (ret != X86EMUL_CONTINUE)
2707 		return ret;
2708 
2709 	if (reason == TASK_SWITCH_CALL || reason == TASK_SWITCH_GATE)
2710 		ctxt->eflags = ctxt->eflags | X86_EFLAGS_NT;
2711 
2712 	if (reason != TASK_SWITCH_IRET) {
2713 		next_tss_desc.type |= (1 << 1); /* set busy flag */
2714 		write_segment_descriptor(ctxt, tss_selector, &next_tss_desc);
2715 	}
2716 
2717 	ops->set_cr(ctxt, 0,  ops->get_cr(ctxt, 0) | X86_CR0_TS);
2718 	ops->set_segment(ctxt, tss_selector, &next_tss_desc, 0, VCPU_SREG_TR);
2719 
2720 	if (has_error_code) {
2721 		ctxt->op_bytes = ctxt->ad_bytes = (next_tss_desc.type & 8) ? 4 : 2;
2722 		ctxt->lock_prefix = 0;
2723 		ctxt->src.val = (unsigned long) error_code;
2724 		ret = em_push(ctxt);
2725 	}
2726 
2727 	return ret;
2728 }
2729 
2730 int emulator_task_switch(struct x86_emulate_ctxt *ctxt,
2731 			 u16 tss_selector, int idt_index, int reason,
2732 			 bool has_error_code, u32 error_code)
2733 {
2734 	int rc;
2735 
2736 	ctxt->_eip = ctxt->eip;
2737 	ctxt->dst.type = OP_NONE;
2738 
2739 	rc = emulator_do_task_switch(ctxt, tss_selector, idt_index, reason,
2740 				     has_error_code, error_code);
2741 
2742 	if (rc == X86EMUL_CONTINUE)
2743 		ctxt->eip = ctxt->_eip;
2744 
2745 	return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK;
2746 }
2747 
2748 static void string_addr_inc(struct x86_emulate_ctxt *ctxt, unsigned seg,
2749 			    int reg, struct operand *op)
2750 {
2751 	int df = (ctxt->eflags & EFLG_DF) ? -1 : 1;
2752 
2753 	register_address_increment(ctxt, &ctxt->regs[reg], df * op->bytes);
2754 	op->addr.mem.ea = register_address(ctxt, ctxt->regs[reg]);
2755 	op->addr.mem.seg = seg;
2756 }
2757 
2758 static int em_das(struct x86_emulate_ctxt *ctxt)
2759 {
2760 	u8 al, old_al;
2761 	bool af, cf, old_cf;
2762 
2763 	cf = ctxt->eflags & X86_EFLAGS_CF;
2764 	al = ctxt->dst.val;
2765 
2766 	old_al = al;
2767 	old_cf = cf;
2768 	cf = false;
2769 	af = ctxt->eflags & X86_EFLAGS_AF;
2770 	if ((al & 0x0f) > 9 || af) {
2771 		al -= 6;
2772 		cf = old_cf | (al >= 250);
2773 		af = true;
2774 	} else {
2775 		af = false;
2776 	}
2777 	if (old_al > 0x99 || old_cf) {
2778 		al -= 0x60;
2779 		cf = true;
2780 	}
2781 
2782 	ctxt->dst.val = al;
2783 	/* Set PF, ZF, SF */
2784 	ctxt->src.type = OP_IMM;
2785 	ctxt->src.val = 0;
2786 	ctxt->src.bytes = 1;
2787 	emulate_2op_SrcV(ctxt, "or");
2788 	ctxt->eflags &= ~(X86_EFLAGS_AF | X86_EFLAGS_CF);
2789 	if (cf)
2790 		ctxt->eflags |= X86_EFLAGS_CF;
2791 	if (af)
2792 		ctxt->eflags |= X86_EFLAGS_AF;
2793 	return X86EMUL_CONTINUE;
2794 }
2795 
2796 static int em_call(struct x86_emulate_ctxt *ctxt)
2797 {
2798 	long rel = ctxt->src.val;
2799 
2800 	ctxt->src.val = (unsigned long)ctxt->_eip;
2801 	jmp_rel(ctxt, rel);
2802 	return em_push(ctxt);
2803 }
2804 
2805 static int em_call_far(struct x86_emulate_ctxt *ctxt)
2806 {
2807 	u16 sel, old_cs;
2808 	ulong old_eip;
2809 	int rc;
2810 
2811 	old_cs = get_segment_selector(ctxt, VCPU_SREG_CS);
2812 	old_eip = ctxt->_eip;
2813 
2814 	memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
2815 	if (load_segment_descriptor(ctxt, sel, VCPU_SREG_CS))
2816 		return X86EMUL_CONTINUE;
2817 
2818 	ctxt->_eip = 0;
2819 	memcpy(&ctxt->_eip, ctxt->src.valptr, ctxt->op_bytes);
2820 
2821 	ctxt->src.val = old_cs;
2822 	rc = em_push(ctxt);
2823 	if (rc != X86EMUL_CONTINUE)
2824 		return rc;
2825 
2826 	ctxt->src.val = old_eip;
2827 	return em_push(ctxt);
2828 }
2829 
2830 static int em_ret_near_imm(struct x86_emulate_ctxt *ctxt)
2831 {
2832 	int rc;
2833 
2834 	ctxt->dst.type = OP_REG;
2835 	ctxt->dst.addr.reg = &ctxt->_eip;
2836 	ctxt->dst.bytes = ctxt->op_bytes;
2837 	rc = emulate_pop(ctxt, &ctxt->dst.val, ctxt->op_bytes);
2838 	if (rc != X86EMUL_CONTINUE)
2839 		return rc;
2840 	rsp_increment(ctxt, ctxt->src.val);
2841 	return X86EMUL_CONTINUE;
2842 }
2843 
2844 static int em_add(struct x86_emulate_ctxt *ctxt)
2845 {
2846 	emulate_2op_SrcV(ctxt, "add");
2847 	return X86EMUL_CONTINUE;
2848 }
2849 
2850 static int em_or(struct x86_emulate_ctxt *ctxt)
2851 {
2852 	emulate_2op_SrcV(ctxt, "or");
2853 	return X86EMUL_CONTINUE;
2854 }
2855 
2856 static int em_adc(struct x86_emulate_ctxt *ctxt)
2857 {
2858 	emulate_2op_SrcV(ctxt, "adc");
2859 	return X86EMUL_CONTINUE;
2860 }
2861 
2862 static int em_sbb(struct x86_emulate_ctxt *ctxt)
2863 {
2864 	emulate_2op_SrcV(ctxt, "sbb");
2865 	return X86EMUL_CONTINUE;
2866 }
2867 
2868 static int em_and(struct x86_emulate_ctxt *ctxt)
2869 {
2870 	emulate_2op_SrcV(ctxt, "and");
2871 	return X86EMUL_CONTINUE;
2872 }
2873 
2874 static int em_sub(struct x86_emulate_ctxt *ctxt)
2875 {
2876 	emulate_2op_SrcV(ctxt, "sub");
2877 	return X86EMUL_CONTINUE;
2878 }
2879 
2880 static int em_xor(struct x86_emulate_ctxt *ctxt)
2881 {
2882 	emulate_2op_SrcV(ctxt, "xor");
2883 	return X86EMUL_CONTINUE;
2884 }
2885 
2886 static int em_cmp(struct x86_emulate_ctxt *ctxt)
2887 {
2888 	emulate_2op_SrcV(ctxt, "cmp");
2889 	/* Disable writeback. */
2890 	ctxt->dst.type = OP_NONE;
2891 	return X86EMUL_CONTINUE;
2892 }
2893 
2894 static int em_test(struct x86_emulate_ctxt *ctxt)
2895 {
2896 	emulate_2op_SrcV(ctxt, "test");
2897 	/* Disable writeback. */
2898 	ctxt->dst.type = OP_NONE;
2899 	return X86EMUL_CONTINUE;
2900 }
2901 
2902 static int em_xchg(struct x86_emulate_ctxt *ctxt)
2903 {
2904 	/* Write back the register source. */
2905 	ctxt->src.val = ctxt->dst.val;
2906 	write_register_operand(&ctxt->src);
2907 
2908 	/* Write back the memory destination with implicit LOCK prefix. */
2909 	ctxt->dst.val = ctxt->src.orig_val;
2910 	ctxt->lock_prefix = 1;
2911 	return X86EMUL_CONTINUE;
2912 }
2913 
2914 static int em_imul(struct x86_emulate_ctxt *ctxt)
2915 {
2916 	emulate_2op_SrcV_nobyte(ctxt, "imul");
2917 	return X86EMUL_CONTINUE;
2918 }
2919 
2920 static int em_imul_3op(struct x86_emulate_ctxt *ctxt)
2921 {
2922 	ctxt->dst.val = ctxt->src2.val;
2923 	return em_imul(ctxt);
2924 }
2925 
2926 static int em_cwd(struct x86_emulate_ctxt *ctxt)
2927 {
2928 	ctxt->dst.type = OP_REG;
2929 	ctxt->dst.bytes = ctxt->src.bytes;
2930 	ctxt->dst.addr.reg = &ctxt->regs[VCPU_REGS_RDX];
2931 	ctxt->dst.val = ~((ctxt->src.val >> (ctxt->src.bytes * 8 - 1)) - 1);
2932 
2933 	return X86EMUL_CONTINUE;
2934 }
2935 
2936 static int em_rdtsc(struct x86_emulate_ctxt *ctxt)
2937 {
2938 	u64 tsc = 0;
2939 
2940 	ctxt->ops->get_msr(ctxt, MSR_IA32_TSC, &tsc);
2941 	ctxt->regs[VCPU_REGS_RAX] = (u32)tsc;
2942 	ctxt->regs[VCPU_REGS_RDX] = tsc >> 32;
2943 	return X86EMUL_CONTINUE;
2944 }
2945 
2946 static int em_rdpmc(struct x86_emulate_ctxt *ctxt)
2947 {
2948 	u64 pmc;
2949 
2950 	if (ctxt->ops->read_pmc(ctxt, ctxt->regs[VCPU_REGS_RCX], &pmc))
2951 		return emulate_gp(ctxt, 0);
2952 	ctxt->regs[VCPU_REGS_RAX] = (u32)pmc;
2953 	ctxt->regs[VCPU_REGS_RDX] = pmc >> 32;
2954 	return X86EMUL_CONTINUE;
2955 }
2956 
2957 static int em_mov(struct x86_emulate_ctxt *ctxt)
2958 {
2959 	memcpy(ctxt->dst.valptr, ctxt->src.valptr, ctxt->op_bytes);
2960 	return X86EMUL_CONTINUE;
2961 }
2962 
2963 static int em_cr_write(struct x86_emulate_ctxt *ctxt)
2964 {
2965 	if (ctxt->ops->set_cr(ctxt, ctxt->modrm_reg, ctxt->src.val))
2966 		return emulate_gp(ctxt, 0);
2967 
2968 	/* Disable writeback. */
2969 	ctxt->dst.type = OP_NONE;
2970 	return X86EMUL_CONTINUE;
2971 }
2972 
2973 static int em_dr_write(struct x86_emulate_ctxt *ctxt)
2974 {
2975 	unsigned long val;
2976 
2977 	if (ctxt->mode == X86EMUL_MODE_PROT64)
2978 		val = ctxt->src.val & ~0ULL;
2979 	else
2980 		val = ctxt->src.val & ~0U;
2981 
2982 	/* #UD condition is already handled. */
2983 	if (ctxt->ops->set_dr(ctxt, ctxt->modrm_reg, val) < 0)
2984 		return emulate_gp(ctxt, 0);
2985 
2986 	/* Disable writeback. */
2987 	ctxt->dst.type = OP_NONE;
2988 	return X86EMUL_CONTINUE;
2989 }
2990 
2991 static int em_wrmsr(struct x86_emulate_ctxt *ctxt)
2992 {
2993 	u64 msr_data;
2994 
2995 	msr_data = (u32)ctxt->regs[VCPU_REGS_RAX]
2996 		| ((u64)ctxt->regs[VCPU_REGS_RDX] << 32);
2997 	if (ctxt->ops->set_msr(ctxt, ctxt->regs[VCPU_REGS_RCX], msr_data))
2998 		return emulate_gp(ctxt, 0);
2999 
3000 	return X86EMUL_CONTINUE;
3001 }
3002 
3003 static int em_rdmsr(struct x86_emulate_ctxt *ctxt)
3004 {
3005 	u64 msr_data;
3006 
3007 	if (ctxt->ops->get_msr(ctxt, ctxt->regs[VCPU_REGS_RCX], &msr_data))
3008 		return emulate_gp(ctxt, 0);
3009 
3010 	ctxt->regs[VCPU_REGS_RAX] = (u32)msr_data;
3011 	ctxt->regs[VCPU_REGS_RDX] = msr_data >> 32;
3012 	return X86EMUL_CONTINUE;
3013 }
3014 
3015 static int em_mov_rm_sreg(struct x86_emulate_ctxt *ctxt)
3016 {
3017 	if (ctxt->modrm_reg > VCPU_SREG_GS)
3018 		return emulate_ud(ctxt);
3019 
3020 	ctxt->dst.val = get_segment_selector(ctxt, ctxt->modrm_reg);
3021 	return X86EMUL_CONTINUE;
3022 }
3023 
3024 static int em_mov_sreg_rm(struct x86_emulate_ctxt *ctxt)
3025 {
3026 	u16 sel = ctxt->src.val;
3027 
3028 	if (ctxt->modrm_reg == VCPU_SREG_CS || ctxt->modrm_reg > VCPU_SREG_GS)
3029 		return emulate_ud(ctxt);
3030 
3031 	if (ctxt->modrm_reg == VCPU_SREG_SS)
3032 		ctxt->interruptibility = KVM_X86_SHADOW_INT_MOV_SS;
3033 
3034 	/* Disable writeback. */
3035 	ctxt->dst.type = OP_NONE;
3036 	return load_segment_descriptor(ctxt, sel, ctxt->modrm_reg);
3037 }
3038 
3039 static int em_lldt(struct x86_emulate_ctxt *ctxt)
3040 {
3041 	u16 sel = ctxt->src.val;
3042 
3043 	/* Disable writeback. */
3044 	ctxt->dst.type = OP_NONE;
3045 	return load_segment_descriptor(ctxt, sel, VCPU_SREG_LDTR);
3046 }
3047 
3048 static int em_ltr(struct x86_emulate_ctxt *ctxt)
3049 {
3050 	u16 sel = ctxt->src.val;
3051 
3052 	/* Disable writeback. */
3053 	ctxt->dst.type = OP_NONE;
3054 	return load_segment_descriptor(ctxt, sel, VCPU_SREG_TR);
3055 }
3056 
3057 static int em_invlpg(struct x86_emulate_ctxt *ctxt)
3058 {
3059 	int rc;
3060 	ulong linear;
3061 
3062 	rc = linearize(ctxt, ctxt->src.addr.mem, 1, false, &linear);
3063 	if (rc == X86EMUL_CONTINUE)
3064 		ctxt->ops->invlpg(ctxt, linear);
3065 	/* Disable writeback. */
3066 	ctxt->dst.type = OP_NONE;
3067 	return X86EMUL_CONTINUE;
3068 }
3069 
3070 static int em_clts(struct x86_emulate_ctxt *ctxt)
3071 {
3072 	ulong cr0;
3073 
3074 	cr0 = ctxt->ops->get_cr(ctxt, 0);
3075 	cr0 &= ~X86_CR0_TS;
3076 	ctxt->ops->set_cr(ctxt, 0, cr0);
3077 	return X86EMUL_CONTINUE;
3078 }
3079 
3080 static int em_vmcall(struct x86_emulate_ctxt *ctxt)
3081 {
3082 	int rc;
3083 
3084 	if (ctxt->modrm_mod != 3 || ctxt->modrm_rm != 1)
3085 		return X86EMUL_UNHANDLEABLE;
3086 
3087 	rc = ctxt->ops->fix_hypercall(ctxt);
3088 	if (rc != X86EMUL_CONTINUE)
3089 		return rc;
3090 
3091 	/* Let the processor re-execute the fixed hypercall */
3092 	ctxt->_eip = ctxt->eip;
3093 	/* Disable writeback. */
3094 	ctxt->dst.type = OP_NONE;
3095 	return X86EMUL_CONTINUE;
3096 }
3097 
3098 static int emulate_store_desc_ptr(struct x86_emulate_ctxt *ctxt,
3099 				  void (*get)(struct x86_emulate_ctxt *ctxt,
3100 					      struct desc_ptr *ptr))
3101 {
3102 	struct desc_ptr desc_ptr;
3103 
3104 	if (ctxt->mode == X86EMUL_MODE_PROT64)
3105 		ctxt->op_bytes = 8;
3106 	get(ctxt, &desc_ptr);
3107 	if (ctxt->op_bytes == 2) {
3108 		ctxt->op_bytes = 4;
3109 		desc_ptr.address &= 0x00ffffff;
3110 	}
3111 	/* Disable writeback. */
3112 	ctxt->dst.type = OP_NONE;
3113 	return segmented_write(ctxt, ctxt->dst.addr.mem,
3114 			       &desc_ptr, 2 + ctxt->op_bytes);
3115 }
3116 
3117 static int em_sgdt(struct x86_emulate_ctxt *ctxt)
3118 {
3119 	return emulate_store_desc_ptr(ctxt, ctxt->ops->get_gdt);
3120 }
3121 
3122 static int em_sidt(struct x86_emulate_ctxt *ctxt)
3123 {
3124 	return emulate_store_desc_ptr(ctxt, ctxt->ops->get_idt);
3125 }
3126 
3127 static int em_lgdt(struct x86_emulate_ctxt *ctxt)
3128 {
3129 	struct desc_ptr desc_ptr;
3130 	int rc;
3131 
3132 	if (ctxt->mode == X86EMUL_MODE_PROT64)
3133 		ctxt->op_bytes = 8;
3134 	rc = read_descriptor(ctxt, ctxt->src.addr.mem,
3135 			     &desc_ptr.size, &desc_ptr.address,
3136 			     ctxt->op_bytes);
3137 	if (rc != X86EMUL_CONTINUE)
3138 		return rc;
3139 	ctxt->ops->set_gdt(ctxt, &desc_ptr);
3140 	/* Disable writeback. */
3141 	ctxt->dst.type = OP_NONE;
3142 	return X86EMUL_CONTINUE;
3143 }
3144 
3145 static int em_vmmcall(struct x86_emulate_ctxt *ctxt)
3146 {
3147 	int rc;
3148 
3149 	rc = ctxt->ops->fix_hypercall(ctxt);
3150 
3151 	/* Disable writeback. */
3152 	ctxt->dst.type = OP_NONE;
3153 	return rc;
3154 }
3155 
3156 static int em_lidt(struct x86_emulate_ctxt *ctxt)
3157 {
3158 	struct desc_ptr desc_ptr;
3159 	int rc;
3160 
3161 	if (ctxt->mode == X86EMUL_MODE_PROT64)
3162 		ctxt->op_bytes = 8;
3163 	rc = read_descriptor(ctxt, ctxt->src.addr.mem,
3164 			     &desc_ptr.size, &desc_ptr.address,
3165 			     ctxt->op_bytes);
3166 	if (rc != X86EMUL_CONTINUE)
3167 		return rc;
3168 	ctxt->ops->set_idt(ctxt, &desc_ptr);
3169 	/* Disable writeback. */
3170 	ctxt->dst.type = OP_NONE;
3171 	return X86EMUL_CONTINUE;
3172 }
3173 
3174 static int em_smsw(struct x86_emulate_ctxt *ctxt)
3175 {
3176 	ctxt->dst.bytes = 2;
3177 	ctxt->dst.val = ctxt->ops->get_cr(ctxt, 0);
3178 	return X86EMUL_CONTINUE;
3179 }
3180 
3181 static int em_lmsw(struct x86_emulate_ctxt *ctxt)
3182 {
3183 	ctxt->ops->set_cr(ctxt, 0, (ctxt->ops->get_cr(ctxt, 0) & ~0x0eul)
3184 			  | (ctxt->src.val & 0x0f));
3185 	ctxt->dst.type = OP_NONE;
3186 	return X86EMUL_CONTINUE;
3187 }
3188 
3189 static int em_loop(struct x86_emulate_ctxt *ctxt)
3190 {
3191 	register_address_increment(ctxt, &ctxt->regs[VCPU_REGS_RCX], -1);
3192 	if ((address_mask(ctxt, ctxt->regs[VCPU_REGS_RCX]) != 0) &&
3193 	    (ctxt->b == 0xe2 || test_cc(ctxt->b ^ 0x5, ctxt->eflags)))
3194 		jmp_rel(ctxt, ctxt->src.val);
3195 
3196 	return X86EMUL_CONTINUE;
3197 }
3198 
3199 static int em_jcxz(struct x86_emulate_ctxt *ctxt)
3200 {
3201 	if (address_mask(ctxt, ctxt->regs[VCPU_REGS_RCX]) == 0)
3202 		jmp_rel(ctxt, ctxt->src.val);
3203 
3204 	return X86EMUL_CONTINUE;
3205 }
3206 
3207 static int em_in(struct x86_emulate_ctxt *ctxt)
3208 {
3209 	if (!pio_in_emulated(ctxt, ctxt->dst.bytes, ctxt->src.val,
3210 			     &ctxt->dst.val))
3211 		return X86EMUL_IO_NEEDED;
3212 
3213 	return X86EMUL_CONTINUE;
3214 }
3215 
3216 static int em_out(struct x86_emulate_ctxt *ctxt)
3217 {
3218 	ctxt->ops->pio_out_emulated(ctxt, ctxt->src.bytes, ctxt->dst.val,
3219 				    &ctxt->src.val, 1);
3220 	/* Disable writeback. */
3221 	ctxt->dst.type = OP_NONE;
3222 	return X86EMUL_CONTINUE;
3223 }
3224 
3225 static int em_cli(struct x86_emulate_ctxt *ctxt)
3226 {
3227 	if (emulator_bad_iopl(ctxt))
3228 		return emulate_gp(ctxt, 0);
3229 
3230 	ctxt->eflags &= ~X86_EFLAGS_IF;
3231 	return X86EMUL_CONTINUE;
3232 }
3233 
3234 static int em_sti(struct x86_emulate_ctxt *ctxt)
3235 {
3236 	if (emulator_bad_iopl(ctxt))
3237 		return emulate_gp(ctxt, 0);
3238 
3239 	ctxt->interruptibility = KVM_X86_SHADOW_INT_STI;
3240 	ctxt->eflags |= X86_EFLAGS_IF;
3241 	return X86EMUL_CONTINUE;
3242 }
3243 
3244 static int em_bt(struct x86_emulate_ctxt *ctxt)
3245 {
3246 	/* Disable writeback. */
3247 	ctxt->dst.type = OP_NONE;
3248 	/* only subword offset */
3249 	ctxt->src.val &= (ctxt->dst.bytes << 3) - 1;
3250 
3251 	emulate_2op_SrcV_nobyte(ctxt, "bt");
3252 	return X86EMUL_CONTINUE;
3253 }
3254 
3255 static int em_bts(struct x86_emulate_ctxt *ctxt)
3256 {
3257 	emulate_2op_SrcV_nobyte(ctxt, "bts");
3258 	return X86EMUL_CONTINUE;
3259 }
3260 
3261 static int em_btr(struct x86_emulate_ctxt *ctxt)
3262 {
3263 	emulate_2op_SrcV_nobyte(ctxt, "btr");
3264 	return X86EMUL_CONTINUE;
3265 }
3266 
3267 static int em_btc(struct x86_emulate_ctxt *ctxt)
3268 {
3269 	emulate_2op_SrcV_nobyte(ctxt, "btc");
3270 	return X86EMUL_CONTINUE;
3271 }
3272 
3273 static int em_bsf(struct x86_emulate_ctxt *ctxt)
3274 {
3275 	emulate_2op_SrcV_nobyte(ctxt, "bsf");
3276 	return X86EMUL_CONTINUE;
3277 }
3278 
3279 static int em_bsr(struct x86_emulate_ctxt *ctxt)
3280 {
3281 	emulate_2op_SrcV_nobyte(ctxt, "bsr");
3282 	return X86EMUL_CONTINUE;
3283 }
3284 
3285 static int em_cpuid(struct x86_emulate_ctxt *ctxt)
3286 {
3287 	u32 eax, ebx, ecx, edx;
3288 
3289 	eax = ctxt->regs[VCPU_REGS_RAX];
3290 	ecx = ctxt->regs[VCPU_REGS_RCX];
3291 	ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx);
3292 	ctxt->regs[VCPU_REGS_RAX] = eax;
3293 	ctxt->regs[VCPU_REGS_RBX] = ebx;
3294 	ctxt->regs[VCPU_REGS_RCX] = ecx;
3295 	ctxt->regs[VCPU_REGS_RDX] = edx;
3296 	return X86EMUL_CONTINUE;
3297 }
3298 
3299 static int em_lahf(struct x86_emulate_ctxt *ctxt)
3300 {
3301 	ctxt->regs[VCPU_REGS_RAX] &= ~0xff00UL;
3302 	ctxt->regs[VCPU_REGS_RAX] |= (ctxt->eflags & 0xff) << 8;
3303 	return X86EMUL_CONTINUE;
3304 }
3305 
3306 static int em_bswap(struct x86_emulate_ctxt *ctxt)
3307 {
3308 	switch (ctxt->op_bytes) {
3309 #ifdef CONFIG_X86_64
3310 	case 8:
3311 		asm("bswap %0" : "+r"(ctxt->dst.val));
3312 		break;
3313 #endif
3314 	default:
3315 		asm("bswap %0" : "+r"(*(u32 *)&ctxt->dst.val));
3316 		break;
3317 	}
3318 	return X86EMUL_CONTINUE;
3319 }
3320 
3321 static bool valid_cr(int nr)
3322 {
3323 	switch (nr) {
3324 	case 0:
3325 	case 2 ... 4:
3326 	case 8:
3327 		return true;
3328 	default:
3329 		return false;
3330 	}
3331 }
3332 
3333 static int check_cr_read(struct x86_emulate_ctxt *ctxt)
3334 {
3335 	if (!valid_cr(ctxt->modrm_reg))
3336 		return emulate_ud(ctxt);
3337 
3338 	return X86EMUL_CONTINUE;
3339 }
3340 
3341 static int check_cr_write(struct x86_emulate_ctxt *ctxt)
3342 {
3343 	u64 new_val = ctxt->src.val64;
3344 	int cr = ctxt->modrm_reg;
3345 	u64 efer = 0;
3346 
3347 	static u64 cr_reserved_bits[] = {
3348 		0xffffffff00000000ULL,
3349 		0, 0, 0, /* CR3 checked later */
3350 		CR4_RESERVED_BITS,
3351 		0, 0, 0,
3352 		CR8_RESERVED_BITS,
3353 	};
3354 
3355 	if (!valid_cr(cr))
3356 		return emulate_ud(ctxt);
3357 
3358 	if (new_val & cr_reserved_bits[cr])
3359 		return emulate_gp(ctxt, 0);
3360 
3361 	switch (cr) {
3362 	case 0: {
3363 		u64 cr4;
3364 		if (((new_val & X86_CR0_PG) && !(new_val & X86_CR0_PE)) ||
3365 		    ((new_val & X86_CR0_NW) && !(new_val & X86_CR0_CD)))
3366 			return emulate_gp(ctxt, 0);
3367 
3368 		cr4 = ctxt->ops->get_cr(ctxt, 4);
3369 		ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
3370 
3371 		if ((new_val & X86_CR0_PG) && (efer & EFER_LME) &&
3372 		    !(cr4 & X86_CR4_PAE))
3373 			return emulate_gp(ctxt, 0);
3374 
3375 		break;
3376 		}
3377 	case 3: {
3378 		u64 rsvd = 0;
3379 
3380 		ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
3381 		if (efer & EFER_LMA)
3382 			rsvd = CR3_L_MODE_RESERVED_BITS;
3383 		else if (ctxt->ops->get_cr(ctxt, 4) & X86_CR4_PAE)
3384 			rsvd = CR3_PAE_RESERVED_BITS;
3385 		else if (ctxt->ops->get_cr(ctxt, 0) & X86_CR0_PG)
3386 			rsvd = CR3_NONPAE_RESERVED_BITS;
3387 
3388 		if (new_val & rsvd)
3389 			return emulate_gp(ctxt, 0);
3390 
3391 		break;
3392 		}
3393 	case 4: {
3394 		ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
3395 
3396 		if ((efer & EFER_LMA) && !(new_val & X86_CR4_PAE))
3397 			return emulate_gp(ctxt, 0);
3398 
3399 		break;
3400 		}
3401 	}
3402 
3403 	return X86EMUL_CONTINUE;
3404 }
3405 
3406 static int check_dr7_gd(struct x86_emulate_ctxt *ctxt)
3407 {
3408 	unsigned long dr7;
3409 
3410 	ctxt->ops->get_dr(ctxt, 7, &dr7);
3411 
3412 	/* Check if DR7.Global_Enable is set */
3413 	return dr7 & (1 << 13);
3414 }
3415 
3416 static int check_dr_read(struct x86_emulate_ctxt *ctxt)
3417 {
3418 	int dr = ctxt->modrm_reg;
3419 	u64 cr4;
3420 
3421 	if (dr > 7)
3422 		return emulate_ud(ctxt);
3423 
3424 	cr4 = ctxt->ops->get_cr(ctxt, 4);
3425 	if ((cr4 & X86_CR4_DE) && (dr == 4 || dr == 5))
3426 		return emulate_ud(ctxt);
3427 
3428 	if (check_dr7_gd(ctxt))
3429 		return emulate_db(ctxt);
3430 
3431 	return X86EMUL_CONTINUE;
3432 }
3433 
3434 static int check_dr_write(struct x86_emulate_ctxt *ctxt)
3435 {
3436 	u64 new_val = ctxt->src.val64;
3437 	int dr = ctxt->modrm_reg;
3438 
3439 	if ((dr == 6 || dr == 7) && (new_val & 0xffffffff00000000ULL))
3440 		return emulate_gp(ctxt, 0);
3441 
3442 	return check_dr_read(ctxt);
3443 }
3444 
3445 static int check_svme(struct x86_emulate_ctxt *ctxt)
3446 {
3447 	u64 efer;
3448 
3449 	ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
3450 
3451 	if (!(efer & EFER_SVME))
3452 		return emulate_ud(ctxt);
3453 
3454 	return X86EMUL_CONTINUE;
3455 }
3456 
3457 static int check_svme_pa(struct x86_emulate_ctxt *ctxt)
3458 {
3459 	u64 rax = ctxt->regs[VCPU_REGS_RAX];
3460 
3461 	/* Valid physical address? */
3462 	if (rax & 0xffff000000000000ULL)
3463 		return emulate_gp(ctxt, 0);
3464 
3465 	return check_svme(ctxt);
3466 }
3467 
3468 static int check_rdtsc(struct x86_emulate_ctxt *ctxt)
3469 {
3470 	u64 cr4 = ctxt->ops->get_cr(ctxt, 4);
3471 
3472 	if (cr4 & X86_CR4_TSD && ctxt->ops->cpl(ctxt))
3473 		return emulate_ud(ctxt);
3474 
3475 	return X86EMUL_CONTINUE;
3476 }
3477 
3478 static int check_rdpmc(struct x86_emulate_ctxt *ctxt)
3479 {
3480 	u64 cr4 = ctxt->ops->get_cr(ctxt, 4);
3481 	u64 rcx = ctxt->regs[VCPU_REGS_RCX];
3482 
3483 	if ((!(cr4 & X86_CR4_PCE) && ctxt->ops->cpl(ctxt)) ||
3484 	    (rcx > 3))
3485 		return emulate_gp(ctxt, 0);
3486 
3487 	return X86EMUL_CONTINUE;
3488 }
3489 
3490 static int check_perm_in(struct x86_emulate_ctxt *ctxt)
3491 {
3492 	ctxt->dst.bytes = min(ctxt->dst.bytes, 4u);
3493 	if (!emulator_io_permited(ctxt, ctxt->src.val, ctxt->dst.bytes))
3494 		return emulate_gp(ctxt, 0);
3495 
3496 	return X86EMUL_CONTINUE;
3497 }
3498 
3499 static int check_perm_out(struct x86_emulate_ctxt *ctxt)
3500 {
3501 	ctxt->src.bytes = min(ctxt->src.bytes, 4u);
3502 	if (!emulator_io_permited(ctxt, ctxt->dst.val, ctxt->src.bytes))
3503 		return emulate_gp(ctxt, 0);
3504 
3505 	return X86EMUL_CONTINUE;
3506 }
3507 
3508 #define D(_y) { .flags = (_y) }
3509 #define DI(_y, _i) { .flags = (_y), .intercept = x86_intercept_##_i }
3510 #define DIP(_y, _i, _p) { .flags = (_y), .intercept = x86_intercept_##_i, \
3511 		      .check_perm = (_p) }
3512 #define N    D(0)
3513 #define EXT(_f, _e) { .flags = ((_f) | RMExt), .u.group = (_e) }
3514 #define G(_f, _g) { .flags = ((_f) | Group | ModRM), .u.group = (_g) }
3515 #define GD(_f, _g) { .flags = ((_f) | GroupDual | ModRM), .u.gdual = (_g) }
3516 #define I(_f, _e) { .flags = (_f), .u.execute = (_e) }
3517 #define II(_f, _e, _i) \
3518 	{ .flags = (_f), .u.execute = (_e), .intercept = x86_intercept_##_i }
3519 #define IIP(_f, _e, _i, _p) \
3520 	{ .flags = (_f), .u.execute = (_e), .intercept = x86_intercept_##_i, \
3521 	  .check_perm = (_p) }
3522 #define GP(_f, _g) { .flags = ((_f) | Prefix), .u.gprefix = (_g) }
3523 
3524 #define D2bv(_f)      D((_f) | ByteOp), D(_f)
3525 #define D2bvIP(_f, _i, _p) DIP((_f) | ByteOp, _i, _p), DIP(_f, _i, _p)
3526 #define I2bv(_f, _e)  I((_f) | ByteOp, _e), I(_f, _e)
3527 #define I2bvIP(_f, _e, _i, _p) \
3528 	IIP((_f) | ByteOp, _e, _i, _p), IIP(_f, _e, _i, _p)
3529 
3530 #define I6ALU(_f, _e) I2bv((_f) | DstMem | SrcReg | ModRM, _e),		\
3531 		I2bv(((_f) | DstReg | SrcMem | ModRM) & ~Lock, _e),	\
3532 		I2bv(((_f) & ~Lock) | DstAcc | SrcImm, _e)
3533 
3534 static struct opcode group7_rm1[] = {
3535 	DI(SrcNone | Priv, monitor),
3536 	DI(SrcNone | Priv, mwait),
3537 	N, N, N, N, N, N,
3538 };
3539 
3540 static struct opcode group7_rm3[] = {
3541 	DIP(SrcNone | Prot | Priv,		vmrun,		check_svme_pa),
3542 	II(SrcNone  | Prot | VendorSpecific,	em_vmmcall,	vmmcall),
3543 	DIP(SrcNone | Prot | Priv,		vmload,		check_svme_pa),
3544 	DIP(SrcNone | Prot | Priv,		vmsave,		check_svme_pa),
3545 	DIP(SrcNone | Prot | Priv,		stgi,		check_svme),
3546 	DIP(SrcNone | Prot | Priv,		clgi,		check_svme),
3547 	DIP(SrcNone | Prot | Priv,		skinit,		check_svme),
3548 	DIP(SrcNone | Prot | Priv,		invlpga,	check_svme),
3549 };
3550 
3551 static struct opcode group7_rm7[] = {
3552 	N,
3553 	DIP(SrcNone, rdtscp, check_rdtsc),
3554 	N, N, N, N, N, N,
3555 };
3556 
3557 static struct opcode group1[] = {
3558 	I(Lock, em_add),
3559 	I(Lock | PageTable, em_or),
3560 	I(Lock, em_adc),
3561 	I(Lock, em_sbb),
3562 	I(Lock | PageTable, em_and),
3563 	I(Lock, em_sub),
3564 	I(Lock, em_xor),
3565 	I(0, em_cmp),
3566 };
3567 
3568 static struct opcode group1A[] = {
3569 	I(DstMem | SrcNone | Mov | Stack, em_pop), N, N, N, N, N, N, N,
3570 };
3571 
3572 static struct opcode group3[] = {
3573 	I(DstMem | SrcImm, em_test),
3574 	I(DstMem | SrcImm, em_test),
3575 	I(DstMem | SrcNone | Lock, em_not),
3576 	I(DstMem | SrcNone | Lock, em_neg),
3577 	I(SrcMem, em_mul_ex),
3578 	I(SrcMem, em_imul_ex),
3579 	I(SrcMem, em_div_ex),
3580 	I(SrcMem, em_idiv_ex),
3581 };
3582 
3583 static struct opcode group4[] = {
3584 	I(ByteOp | DstMem | SrcNone | Lock, em_grp45),
3585 	I(ByteOp | DstMem | SrcNone | Lock, em_grp45),
3586 	N, N, N, N, N, N,
3587 };
3588 
3589 static struct opcode group5[] = {
3590 	I(DstMem | SrcNone | Lock,		em_grp45),
3591 	I(DstMem | SrcNone | Lock,		em_grp45),
3592 	I(SrcMem | Stack,			em_grp45),
3593 	I(SrcMemFAddr | ImplicitOps | Stack,	em_call_far),
3594 	I(SrcMem | Stack,			em_grp45),
3595 	I(SrcMemFAddr | ImplicitOps,		em_grp45),
3596 	I(SrcMem | Stack,			em_grp45), N,
3597 };
3598 
3599 static struct opcode group6[] = {
3600 	DI(Prot,	sldt),
3601 	DI(Prot,	str),
3602 	II(Prot | Priv | SrcMem16, em_lldt, lldt),
3603 	II(Prot | Priv | SrcMem16, em_ltr, ltr),
3604 	N, N, N, N,
3605 };
3606 
3607 static struct group_dual group7 = { {
3608 	II(Mov | DstMem | Priv,			em_sgdt, sgdt),
3609 	II(Mov | DstMem | Priv,			em_sidt, sidt),
3610 	II(SrcMem | Priv,			em_lgdt, lgdt),
3611 	II(SrcMem | Priv,			em_lidt, lidt),
3612 	II(SrcNone | DstMem | Mov,		em_smsw, smsw), N,
3613 	II(SrcMem16 | Mov | Priv,		em_lmsw, lmsw),
3614 	II(SrcMem | ByteOp | Priv | NoAccess,	em_invlpg, invlpg),
3615 }, {
3616 	I(SrcNone | Priv | VendorSpecific,	em_vmcall),
3617 	EXT(0, group7_rm1),
3618 	N, EXT(0, group7_rm3),
3619 	II(SrcNone | DstMem | Mov,		em_smsw, smsw), N,
3620 	II(SrcMem16 | Mov | Priv,		em_lmsw, lmsw),
3621 	EXT(0, group7_rm7),
3622 } };
3623 
3624 static struct opcode group8[] = {
3625 	N, N, N, N,
3626 	I(DstMem | SrcImmByte,				em_bt),
3627 	I(DstMem | SrcImmByte | Lock | PageTable,	em_bts),
3628 	I(DstMem | SrcImmByte | Lock,			em_btr),
3629 	I(DstMem | SrcImmByte | Lock | PageTable,	em_btc),
3630 };
3631 
3632 static struct group_dual group9 = { {
3633 	N, I(DstMem64 | Lock | PageTable, em_cmpxchg8b), N, N, N, N, N, N,
3634 }, {
3635 	N, N, N, N, N, N, N, N,
3636 } };
3637 
3638 static struct opcode group11[] = {
3639 	I(DstMem | SrcImm | Mov | PageTable, em_mov),
3640 	X7(D(Undefined)),
3641 };
3642 
3643 static struct gprefix pfx_0f_6f_0f_7f = {
3644 	I(Mmx, em_mov), I(Sse | Aligned, em_mov), N, I(Sse | Unaligned, em_mov),
3645 };
3646 
3647 static struct gprefix pfx_vmovntpx = {
3648 	I(0, em_mov), N, N, N,
3649 };
3650 
3651 static struct opcode opcode_table[256] = {
3652 	/* 0x00 - 0x07 */
3653 	I6ALU(Lock, em_add),
3654 	I(ImplicitOps | Stack | No64 | Src2ES, em_push_sreg),
3655 	I(ImplicitOps | Stack | No64 | Src2ES, em_pop_sreg),
3656 	/* 0x08 - 0x0F */
3657 	I6ALU(Lock | PageTable, em_or),
3658 	I(ImplicitOps | Stack | No64 | Src2CS, em_push_sreg),
3659 	N,
3660 	/* 0x10 - 0x17 */
3661 	I6ALU(Lock, em_adc),
3662 	I(ImplicitOps | Stack | No64 | Src2SS, em_push_sreg),
3663 	I(ImplicitOps | Stack | No64 | Src2SS, em_pop_sreg),
3664 	/* 0x18 - 0x1F */
3665 	I6ALU(Lock, em_sbb),
3666 	I(ImplicitOps | Stack | No64 | Src2DS, em_push_sreg),
3667 	I(ImplicitOps | Stack | No64 | Src2DS, em_pop_sreg),
3668 	/* 0x20 - 0x27 */
3669 	I6ALU(Lock | PageTable, em_and), N, N,
3670 	/* 0x28 - 0x2F */
3671 	I6ALU(Lock, em_sub), N, I(ByteOp | DstAcc | No64, em_das),
3672 	/* 0x30 - 0x37 */
3673 	I6ALU(Lock, em_xor), N, N,
3674 	/* 0x38 - 0x3F */
3675 	I6ALU(0, em_cmp), N, N,
3676 	/* 0x40 - 0x4F */
3677 	X16(D(DstReg)),
3678 	/* 0x50 - 0x57 */
3679 	X8(I(SrcReg | Stack, em_push)),
3680 	/* 0x58 - 0x5F */
3681 	X8(I(DstReg | Stack, em_pop)),
3682 	/* 0x60 - 0x67 */
3683 	I(ImplicitOps | Stack | No64, em_pusha),
3684 	I(ImplicitOps | Stack | No64, em_popa),
3685 	N, D(DstReg | SrcMem32 | ModRM | Mov) /* movsxd (x86/64) */ ,
3686 	N, N, N, N,
3687 	/* 0x68 - 0x6F */
3688 	I(SrcImm | Mov | Stack, em_push),
3689 	I(DstReg | SrcMem | ModRM | Src2Imm, em_imul_3op),
3690 	I(SrcImmByte | Mov | Stack, em_push),
3691 	I(DstReg | SrcMem | ModRM | Src2ImmByte, em_imul_3op),
3692 	I2bvIP(DstDI | SrcDX | Mov | String, em_in, ins, check_perm_in), /* insb, insw/insd */
3693 	I2bvIP(SrcSI | DstDX | String, em_out, outs, check_perm_out), /* outsb, outsw/outsd */
3694 	/* 0x70 - 0x7F */
3695 	X16(D(SrcImmByte)),
3696 	/* 0x80 - 0x87 */
3697 	G(ByteOp | DstMem | SrcImm, group1),
3698 	G(DstMem | SrcImm, group1),
3699 	G(ByteOp | DstMem | SrcImm | No64, group1),
3700 	G(DstMem | SrcImmByte, group1),
3701 	I2bv(DstMem | SrcReg | ModRM, em_test),
3702 	I2bv(DstMem | SrcReg | ModRM | Lock | PageTable, em_xchg),
3703 	/* 0x88 - 0x8F */
3704 	I2bv(DstMem | SrcReg | ModRM | Mov | PageTable, em_mov),
3705 	I2bv(DstReg | SrcMem | ModRM | Mov, em_mov),
3706 	I(DstMem | SrcNone | ModRM | Mov | PageTable, em_mov_rm_sreg),
3707 	D(ModRM | SrcMem | NoAccess | DstReg),
3708 	I(ImplicitOps | SrcMem16 | ModRM, em_mov_sreg_rm),
3709 	G(0, group1A),
3710 	/* 0x90 - 0x97 */
3711 	DI(SrcAcc | DstReg, pause), X7(D(SrcAcc | DstReg)),
3712 	/* 0x98 - 0x9F */
3713 	D(DstAcc | SrcNone), I(ImplicitOps | SrcAcc, em_cwd),
3714 	I(SrcImmFAddr | No64, em_call_far), N,
3715 	II(ImplicitOps | Stack, em_pushf, pushf),
3716 	II(ImplicitOps | Stack, em_popf, popf), N, I(ImplicitOps, em_lahf),
3717 	/* 0xA0 - 0xA7 */
3718 	I2bv(DstAcc | SrcMem | Mov | MemAbs, em_mov),
3719 	I2bv(DstMem | SrcAcc | Mov | MemAbs | PageTable, em_mov),
3720 	I2bv(SrcSI | DstDI | Mov | String, em_mov),
3721 	I2bv(SrcSI | DstDI | String, em_cmp),
3722 	/* 0xA8 - 0xAF */
3723 	I2bv(DstAcc | SrcImm, em_test),
3724 	I2bv(SrcAcc | DstDI | Mov | String, em_mov),
3725 	I2bv(SrcSI | DstAcc | Mov | String, em_mov),
3726 	I2bv(SrcAcc | DstDI | String, em_cmp),
3727 	/* 0xB0 - 0xB7 */
3728 	X8(I(ByteOp | DstReg | SrcImm | Mov, em_mov)),
3729 	/* 0xB8 - 0xBF */
3730 	X8(I(DstReg | SrcImm | Mov, em_mov)),
3731 	/* 0xC0 - 0xC7 */
3732 	D2bv(DstMem | SrcImmByte | ModRM),
3733 	I(ImplicitOps | Stack | SrcImmU16, em_ret_near_imm),
3734 	I(ImplicitOps | Stack, em_ret),
3735 	I(DstReg | SrcMemFAddr | ModRM | No64 | Src2ES, em_lseg),
3736 	I(DstReg | SrcMemFAddr | ModRM | No64 | Src2DS, em_lseg),
3737 	G(ByteOp, group11), G(0, group11),
3738 	/* 0xC8 - 0xCF */
3739 	I(Stack | SrcImmU16 | Src2ImmByte, em_enter), I(Stack, em_leave),
3740 	N, I(ImplicitOps | Stack, em_ret_far),
3741 	D(ImplicitOps), DI(SrcImmByte, intn),
3742 	D(ImplicitOps | No64), II(ImplicitOps, em_iret, iret),
3743 	/* 0xD0 - 0xD7 */
3744 	D2bv(DstMem | SrcOne | ModRM), D2bv(DstMem | ModRM),
3745 	N, N, N, N,
3746 	/* 0xD8 - 0xDF */
3747 	N, N, N, N, N, N, N, N,
3748 	/* 0xE0 - 0xE7 */
3749 	X3(I(SrcImmByte, em_loop)),
3750 	I(SrcImmByte, em_jcxz),
3751 	I2bvIP(SrcImmUByte | DstAcc, em_in,  in,  check_perm_in),
3752 	I2bvIP(SrcAcc | DstImmUByte, em_out, out, check_perm_out),
3753 	/* 0xE8 - 0xEF */
3754 	I(SrcImm | Stack, em_call), D(SrcImm | ImplicitOps),
3755 	I(SrcImmFAddr | No64, em_jmp_far), D(SrcImmByte | ImplicitOps),
3756 	I2bvIP(SrcDX | DstAcc, em_in,  in,  check_perm_in),
3757 	I2bvIP(SrcAcc | DstDX, em_out, out, check_perm_out),
3758 	/* 0xF0 - 0xF7 */
3759 	N, DI(ImplicitOps, icebp), N, N,
3760 	DI(ImplicitOps | Priv, hlt), D(ImplicitOps),
3761 	G(ByteOp, group3), G(0, group3),
3762 	/* 0xF8 - 0xFF */
3763 	D(ImplicitOps), D(ImplicitOps),
3764 	I(ImplicitOps, em_cli), I(ImplicitOps, em_sti),
3765 	D(ImplicitOps), D(ImplicitOps), G(0, group4), G(0, group5),
3766 };
3767 
3768 static struct opcode twobyte_table[256] = {
3769 	/* 0x00 - 0x0F */
3770 	G(0, group6), GD(0, &group7), N, N,
3771 	N, I(ImplicitOps | VendorSpecific, em_syscall),
3772 	II(ImplicitOps | Priv, em_clts, clts), N,
3773 	DI(ImplicitOps | Priv, invd), DI(ImplicitOps | Priv, wbinvd), N, N,
3774 	N, D(ImplicitOps | ModRM), N, N,
3775 	/* 0x10 - 0x1F */
3776 	N, N, N, N, N, N, N, N, D(ImplicitOps | ModRM), N, N, N, N, N, N, N,
3777 	/* 0x20 - 0x2F */
3778 	DIP(ModRM | DstMem | Priv | Op3264, cr_read, check_cr_read),
3779 	DIP(ModRM | DstMem | Priv | Op3264, dr_read, check_dr_read),
3780 	IIP(ModRM | SrcMem | Priv | Op3264, em_cr_write, cr_write, check_cr_write),
3781 	IIP(ModRM | SrcMem | Priv | Op3264, em_dr_write, dr_write, check_dr_write),
3782 	N, N, N, N,
3783 	N, N, N, GP(ModRM | DstMem | SrcReg | Sse | Mov | Aligned, &pfx_vmovntpx),
3784 	N, N, N, N,
3785 	/* 0x30 - 0x3F */
3786 	II(ImplicitOps | Priv, em_wrmsr, wrmsr),
3787 	IIP(ImplicitOps, em_rdtsc, rdtsc, check_rdtsc),
3788 	II(ImplicitOps | Priv, em_rdmsr, rdmsr),
3789 	IIP(ImplicitOps, em_rdpmc, rdpmc, check_rdpmc),
3790 	I(ImplicitOps | VendorSpecific, em_sysenter),
3791 	I(ImplicitOps | Priv | VendorSpecific, em_sysexit),
3792 	N, N,
3793 	N, N, N, N, N, N, N, N,
3794 	/* 0x40 - 0x4F */
3795 	X16(D(DstReg | SrcMem | ModRM | Mov)),
3796 	/* 0x50 - 0x5F */
3797 	N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
3798 	/* 0x60 - 0x6F */
3799 	N, N, N, N,
3800 	N, N, N, N,
3801 	N, N, N, N,
3802 	N, N, N, GP(SrcMem | DstReg | ModRM | Mov, &pfx_0f_6f_0f_7f),
3803 	/* 0x70 - 0x7F */
3804 	N, N, N, N,
3805 	N, N, N, N,
3806 	N, N, N, N,
3807 	N, N, N, GP(SrcReg | DstMem | ModRM | Mov, &pfx_0f_6f_0f_7f),
3808 	/* 0x80 - 0x8F */
3809 	X16(D(SrcImm)),
3810 	/* 0x90 - 0x9F */
3811 	X16(D(ByteOp | DstMem | SrcNone | ModRM| Mov)),
3812 	/* 0xA0 - 0xA7 */
3813 	I(Stack | Src2FS, em_push_sreg), I(Stack | Src2FS, em_pop_sreg),
3814 	II(ImplicitOps, em_cpuid, cpuid), I(DstMem | SrcReg | ModRM | BitOp, em_bt),
3815 	D(DstMem | SrcReg | Src2ImmByte | ModRM),
3816 	D(DstMem | SrcReg | Src2CL | ModRM), N, N,
3817 	/* 0xA8 - 0xAF */
3818 	I(Stack | Src2GS, em_push_sreg), I(Stack | Src2GS, em_pop_sreg),
3819 	DI(ImplicitOps, rsm),
3820 	I(DstMem | SrcReg | ModRM | BitOp | Lock | PageTable, em_bts),
3821 	D(DstMem | SrcReg | Src2ImmByte | ModRM),
3822 	D(DstMem | SrcReg | Src2CL | ModRM),
3823 	D(ModRM), I(DstReg | SrcMem | ModRM, em_imul),
3824 	/* 0xB0 - 0xB7 */
3825 	I2bv(DstMem | SrcReg | ModRM | Lock | PageTable, em_cmpxchg),
3826 	I(DstReg | SrcMemFAddr | ModRM | Src2SS, em_lseg),
3827 	I(DstMem | SrcReg | ModRM | BitOp | Lock, em_btr),
3828 	I(DstReg | SrcMemFAddr | ModRM | Src2FS, em_lseg),
3829 	I(DstReg | SrcMemFAddr | ModRM | Src2GS, em_lseg),
3830 	D(DstReg | SrcMem8 | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov),
3831 	/* 0xB8 - 0xBF */
3832 	N, N,
3833 	G(BitOp, group8),
3834 	I(DstMem | SrcReg | ModRM | BitOp | Lock | PageTable, em_btc),
3835 	I(DstReg | SrcMem | ModRM, em_bsf), I(DstReg | SrcMem | ModRM, em_bsr),
3836 	D(DstReg | SrcMem8 | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov),
3837 	/* 0xC0 - 0xC7 */
3838 	D2bv(DstMem | SrcReg | ModRM | Lock),
3839 	N, D(DstMem | SrcReg | ModRM | Mov),
3840 	N, N, N, GD(0, &group9),
3841 	/* 0xC8 - 0xCF */
3842 	X8(I(DstReg, em_bswap)),
3843 	/* 0xD0 - 0xDF */
3844 	N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
3845 	/* 0xE0 - 0xEF */
3846 	N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
3847 	/* 0xF0 - 0xFF */
3848 	N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N
3849 };
3850 
3851 #undef D
3852 #undef N
3853 #undef G
3854 #undef GD
3855 #undef I
3856 #undef GP
3857 #undef EXT
3858 
3859 #undef D2bv
3860 #undef D2bvIP
3861 #undef I2bv
3862 #undef I2bvIP
3863 #undef I6ALU
3864 
3865 static unsigned imm_size(struct x86_emulate_ctxt *ctxt)
3866 {
3867 	unsigned size;
3868 
3869 	size = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
3870 	if (size == 8)
3871 		size = 4;
3872 	return size;
3873 }
3874 
3875 static int decode_imm(struct x86_emulate_ctxt *ctxt, struct operand *op,
3876 		      unsigned size, bool sign_extension)
3877 {
3878 	int rc = X86EMUL_CONTINUE;
3879 
3880 	op->type = OP_IMM;
3881 	op->bytes = size;
3882 	op->addr.mem.ea = ctxt->_eip;
3883 	/* NB. Immediates are sign-extended as necessary. */
3884 	switch (op->bytes) {
3885 	case 1:
3886 		op->val = insn_fetch(s8, ctxt);
3887 		break;
3888 	case 2:
3889 		op->val = insn_fetch(s16, ctxt);
3890 		break;
3891 	case 4:
3892 		op->val = insn_fetch(s32, ctxt);
3893 		break;
3894 	}
3895 	if (!sign_extension) {
3896 		switch (op->bytes) {
3897 		case 1:
3898 			op->val &= 0xff;
3899 			break;
3900 		case 2:
3901 			op->val &= 0xffff;
3902 			break;
3903 		case 4:
3904 			op->val &= 0xffffffff;
3905 			break;
3906 		}
3907 	}
3908 done:
3909 	return rc;
3910 }
3911 
3912 static int decode_operand(struct x86_emulate_ctxt *ctxt, struct operand *op,
3913 			  unsigned d)
3914 {
3915 	int rc = X86EMUL_CONTINUE;
3916 
3917 	switch (d) {
3918 	case OpReg:
3919 		decode_register_operand(ctxt, op);
3920 		break;
3921 	case OpImmUByte:
3922 		rc = decode_imm(ctxt, op, 1, false);
3923 		break;
3924 	case OpMem:
3925 		ctxt->memop.bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
3926 	mem_common:
3927 		*op = ctxt->memop;
3928 		ctxt->memopp = op;
3929 		if ((ctxt->d & BitOp) && op == &ctxt->dst)
3930 			fetch_bit_operand(ctxt);
3931 		op->orig_val = op->val;
3932 		break;
3933 	case OpMem64:
3934 		ctxt->memop.bytes = 8;
3935 		goto mem_common;
3936 	case OpAcc:
3937 		op->type = OP_REG;
3938 		op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
3939 		op->addr.reg = &ctxt->regs[VCPU_REGS_RAX];
3940 		fetch_register_operand(op);
3941 		op->orig_val = op->val;
3942 		break;
3943 	case OpDI:
3944 		op->type = OP_MEM;
3945 		op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
3946 		op->addr.mem.ea =
3947 			register_address(ctxt, ctxt->regs[VCPU_REGS_RDI]);
3948 		op->addr.mem.seg = VCPU_SREG_ES;
3949 		op->val = 0;
3950 		break;
3951 	case OpDX:
3952 		op->type = OP_REG;
3953 		op->bytes = 2;
3954 		op->addr.reg = &ctxt->regs[VCPU_REGS_RDX];
3955 		fetch_register_operand(op);
3956 		break;
3957 	case OpCL:
3958 		op->bytes = 1;
3959 		op->val = ctxt->regs[VCPU_REGS_RCX] & 0xff;
3960 		break;
3961 	case OpImmByte:
3962 		rc = decode_imm(ctxt, op, 1, true);
3963 		break;
3964 	case OpOne:
3965 		op->bytes = 1;
3966 		op->val = 1;
3967 		break;
3968 	case OpImm:
3969 		rc = decode_imm(ctxt, op, imm_size(ctxt), true);
3970 		break;
3971 	case OpMem8:
3972 		ctxt->memop.bytes = 1;
3973 		goto mem_common;
3974 	case OpMem16:
3975 		ctxt->memop.bytes = 2;
3976 		goto mem_common;
3977 	case OpMem32:
3978 		ctxt->memop.bytes = 4;
3979 		goto mem_common;
3980 	case OpImmU16:
3981 		rc = decode_imm(ctxt, op, 2, false);
3982 		break;
3983 	case OpImmU:
3984 		rc = decode_imm(ctxt, op, imm_size(ctxt), false);
3985 		break;
3986 	case OpSI:
3987 		op->type = OP_MEM;
3988 		op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
3989 		op->addr.mem.ea =
3990 			register_address(ctxt, ctxt->regs[VCPU_REGS_RSI]);
3991 		op->addr.mem.seg = seg_override(ctxt);
3992 		op->val = 0;
3993 		break;
3994 	case OpImmFAddr:
3995 		op->type = OP_IMM;
3996 		op->addr.mem.ea = ctxt->_eip;
3997 		op->bytes = ctxt->op_bytes + 2;
3998 		insn_fetch_arr(op->valptr, op->bytes, ctxt);
3999 		break;
4000 	case OpMemFAddr:
4001 		ctxt->memop.bytes = ctxt->op_bytes + 2;
4002 		goto mem_common;
4003 	case OpES:
4004 		op->val = VCPU_SREG_ES;
4005 		break;
4006 	case OpCS:
4007 		op->val = VCPU_SREG_CS;
4008 		break;
4009 	case OpSS:
4010 		op->val = VCPU_SREG_SS;
4011 		break;
4012 	case OpDS:
4013 		op->val = VCPU_SREG_DS;
4014 		break;
4015 	case OpFS:
4016 		op->val = VCPU_SREG_FS;
4017 		break;
4018 	case OpGS:
4019 		op->val = VCPU_SREG_GS;
4020 		break;
4021 	case OpImplicit:
4022 		/* Special instructions do their own operand decoding. */
4023 	default:
4024 		op->type = OP_NONE; /* Disable writeback. */
4025 		break;
4026 	}
4027 
4028 done:
4029 	return rc;
4030 }
4031 
4032 int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len)
4033 {
4034 	int rc = X86EMUL_CONTINUE;
4035 	int mode = ctxt->mode;
4036 	int def_op_bytes, def_ad_bytes, goffset, simd_prefix;
4037 	bool op_prefix = false;
4038 	struct opcode opcode;
4039 
4040 	ctxt->memop.type = OP_NONE;
4041 	ctxt->memopp = NULL;
4042 	ctxt->_eip = ctxt->eip;
4043 	ctxt->fetch.start = ctxt->_eip;
4044 	ctxt->fetch.end = ctxt->fetch.start + insn_len;
4045 	if (insn_len > 0)
4046 		memcpy(ctxt->fetch.data, insn, insn_len);
4047 
4048 	switch (mode) {
4049 	case X86EMUL_MODE_REAL:
4050 	case X86EMUL_MODE_VM86:
4051 	case X86EMUL_MODE_PROT16:
4052 		def_op_bytes = def_ad_bytes = 2;
4053 		break;
4054 	case X86EMUL_MODE_PROT32:
4055 		def_op_bytes = def_ad_bytes = 4;
4056 		break;
4057 #ifdef CONFIG_X86_64
4058 	case X86EMUL_MODE_PROT64:
4059 		def_op_bytes = 4;
4060 		def_ad_bytes = 8;
4061 		break;
4062 #endif
4063 	default:
4064 		return EMULATION_FAILED;
4065 	}
4066 
4067 	ctxt->op_bytes = def_op_bytes;
4068 	ctxt->ad_bytes = def_ad_bytes;
4069 
4070 	/* Legacy prefixes. */
4071 	for (;;) {
4072 		switch (ctxt->b = insn_fetch(u8, ctxt)) {
4073 		case 0x66:	/* operand-size override */
4074 			op_prefix = true;
4075 			/* switch between 2/4 bytes */
4076 			ctxt->op_bytes = def_op_bytes ^ 6;
4077 			break;
4078 		case 0x67:	/* address-size override */
4079 			if (mode == X86EMUL_MODE_PROT64)
4080 				/* switch between 4/8 bytes */
4081 				ctxt->ad_bytes = def_ad_bytes ^ 12;
4082 			else
4083 				/* switch between 2/4 bytes */
4084 				ctxt->ad_bytes = def_ad_bytes ^ 6;
4085 			break;
4086 		case 0x26:	/* ES override */
4087 		case 0x2e:	/* CS override */
4088 		case 0x36:	/* SS override */
4089 		case 0x3e:	/* DS override */
4090 			set_seg_override(ctxt, (ctxt->b >> 3) & 3);
4091 			break;
4092 		case 0x64:	/* FS override */
4093 		case 0x65:	/* GS override */
4094 			set_seg_override(ctxt, ctxt->b & 7);
4095 			break;
4096 		case 0x40 ... 0x4f: /* REX */
4097 			if (mode != X86EMUL_MODE_PROT64)
4098 				goto done_prefixes;
4099 			ctxt->rex_prefix = ctxt->b;
4100 			continue;
4101 		case 0xf0:	/* LOCK */
4102 			ctxt->lock_prefix = 1;
4103 			break;
4104 		case 0xf2:	/* REPNE/REPNZ */
4105 		case 0xf3:	/* REP/REPE/REPZ */
4106 			ctxt->rep_prefix = ctxt->b;
4107 			break;
4108 		default:
4109 			goto done_prefixes;
4110 		}
4111 
4112 		/* Any legacy prefix after a REX prefix nullifies its effect. */
4113 
4114 		ctxt->rex_prefix = 0;
4115 	}
4116 
4117 done_prefixes:
4118 
4119 	/* REX prefix. */
4120 	if (ctxt->rex_prefix & 8)
4121 		ctxt->op_bytes = 8;	/* REX.W */
4122 
4123 	/* Opcode byte(s). */
4124 	opcode = opcode_table[ctxt->b];
4125 	/* Two-byte opcode? */
4126 	if (ctxt->b == 0x0f) {
4127 		ctxt->twobyte = 1;
4128 		ctxt->b = insn_fetch(u8, ctxt);
4129 		opcode = twobyte_table[ctxt->b];
4130 	}
4131 	ctxt->d = opcode.flags;
4132 
4133 	if (ctxt->d & ModRM)
4134 		ctxt->modrm = insn_fetch(u8, ctxt);
4135 
4136 	while (ctxt->d & GroupMask) {
4137 		switch (ctxt->d & GroupMask) {
4138 		case Group:
4139 			goffset = (ctxt->modrm >> 3) & 7;
4140 			opcode = opcode.u.group[goffset];
4141 			break;
4142 		case GroupDual:
4143 			goffset = (ctxt->modrm >> 3) & 7;
4144 			if ((ctxt->modrm >> 6) == 3)
4145 				opcode = opcode.u.gdual->mod3[goffset];
4146 			else
4147 				opcode = opcode.u.gdual->mod012[goffset];
4148 			break;
4149 		case RMExt:
4150 			goffset = ctxt->modrm & 7;
4151 			opcode = opcode.u.group[goffset];
4152 			break;
4153 		case Prefix:
4154 			if (ctxt->rep_prefix && op_prefix)
4155 				return EMULATION_FAILED;
4156 			simd_prefix = op_prefix ? 0x66 : ctxt->rep_prefix;
4157 			switch (simd_prefix) {
4158 			case 0x00: opcode = opcode.u.gprefix->pfx_no; break;
4159 			case 0x66: opcode = opcode.u.gprefix->pfx_66; break;
4160 			case 0xf2: opcode = opcode.u.gprefix->pfx_f2; break;
4161 			case 0xf3: opcode = opcode.u.gprefix->pfx_f3; break;
4162 			}
4163 			break;
4164 		default:
4165 			return EMULATION_FAILED;
4166 		}
4167 
4168 		ctxt->d &= ~(u64)GroupMask;
4169 		ctxt->d |= opcode.flags;
4170 	}
4171 
4172 	ctxt->execute = opcode.u.execute;
4173 	ctxt->check_perm = opcode.check_perm;
4174 	ctxt->intercept = opcode.intercept;
4175 
4176 	/* Unrecognised? */
4177 	if (ctxt->d == 0 || (ctxt->d & Undefined))
4178 		return EMULATION_FAILED;
4179 
4180 	if (!(ctxt->d & VendorSpecific) && ctxt->only_vendor_specific_insn)
4181 		return EMULATION_FAILED;
4182 
4183 	if (mode == X86EMUL_MODE_PROT64 && (ctxt->d & Stack))
4184 		ctxt->op_bytes = 8;
4185 
4186 	if (ctxt->d & Op3264) {
4187 		if (mode == X86EMUL_MODE_PROT64)
4188 			ctxt->op_bytes = 8;
4189 		else
4190 			ctxt->op_bytes = 4;
4191 	}
4192 
4193 	if (ctxt->d & Sse)
4194 		ctxt->op_bytes = 16;
4195 	else if (ctxt->d & Mmx)
4196 		ctxt->op_bytes = 8;
4197 
4198 	/* ModRM and SIB bytes. */
4199 	if (ctxt->d & ModRM) {
4200 		rc = decode_modrm(ctxt, &ctxt->memop);
4201 		if (!ctxt->has_seg_override)
4202 			set_seg_override(ctxt, ctxt->modrm_seg);
4203 	} else if (ctxt->d & MemAbs)
4204 		rc = decode_abs(ctxt, &ctxt->memop);
4205 	if (rc != X86EMUL_CONTINUE)
4206 		goto done;
4207 
4208 	if (!ctxt->has_seg_override)
4209 		set_seg_override(ctxt, VCPU_SREG_DS);
4210 
4211 	ctxt->memop.addr.mem.seg = seg_override(ctxt);
4212 
4213 	if (ctxt->memop.type == OP_MEM && ctxt->ad_bytes != 8)
4214 		ctxt->memop.addr.mem.ea = (u32)ctxt->memop.addr.mem.ea;
4215 
4216 	/*
4217 	 * Decode and fetch the source operand: register, memory
4218 	 * or immediate.
4219 	 */
4220 	rc = decode_operand(ctxt, &ctxt->src, (ctxt->d >> SrcShift) & OpMask);
4221 	if (rc != X86EMUL_CONTINUE)
4222 		goto done;
4223 
4224 	/*
4225 	 * Decode and fetch the second source operand: register, memory
4226 	 * or immediate.
4227 	 */
4228 	rc = decode_operand(ctxt, &ctxt->src2, (ctxt->d >> Src2Shift) & OpMask);
4229 	if (rc != X86EMUL_CONTINUE)
4230 		goto done;
4231 
4232 	/* Decode and fetch the destination operand: register or memory. */
4233 	rc = decode_operand(ctxt, &ctxt->dst, (ctxt->d >> DstShift) & OpMask);
4234 
4235 done:
4236 	if (ctxt->memopp && ctxt->memopp->type == OP_MEM && ctxt->rip_relative)
4237 		ctxt->memopp->addr.mem.ea += ctxt->_eip;
4238 
4239 	return (rc != X86EMUL_CONTINUE) ? EMULATION_FAILED : EMULATION_OK;
4240 }
4241 
4242 bool x86_page_table_writing_insn(struct x86_emulate_ctxt *ctxt)
4243 {
4244 	return ctxt->d & PageTable;
4245 }
4246 
4247 static bool string_insn_completed(struct x86_emulate_ctxt *ctxt)
4248 {
4249 	/* The second termination condition only applies for REPE
4250 	 * and REPNE. Test if the repeat string operation prefix is
4251 	 * REPE/REPZ or REPNE/REPNZ and if it's the case it tests the
4252 	 * corresponding termination condition according to:
4253 	 * 	- if REPE/REPZ and ZF = 0 then done
4254 	 * 	- if REPNE/REPNZ and ZF = 1 then done
4255 	 */
4256 	if (((ctxt->b == 0xa6) || (ctxt->b == 0xa7) ||
4257 	     (ctxt->b == 0xae) || (ctxt->b == 0xaf))
4258 	    && (((ctxt->rep_prefix == REPE_PREFIX) &&
4259 		 ((ctxt->eflags & EFLG_ZF) == 0))
4260 		|| ((ctxt->rep_prefix == REPNE_PREFIX) &&
4261 		    ((ctxt->eflags & EFLG_ZF) == EFLG_ZF))))
4262 		return true;
4263 
4264 	return false;
4265 }
4266 
4267 static int flush_pending_x87_faults(struct x86_emulate_ctxt *ctxt)
4268 {
4269 	bool fault = false;
4270 
4271 	ctxt->ops->get_fpu(ctxt);
4272 	asm volatile("1: fwait \n\t"
4273 		     "2: \n\t"
4274 		     ".pushsection .fixup,\"ax\" \n\t"
4275 		     "3: \n\t"
4276 		     "movb $1, %[fault] \n\t"
4277 		     "jmp 2b \n\t"
4278 		     ".popsection \n\t"
4279 		     _ASM_EXTABLE(1b, 3b)
4280 		     : [fault]"+qm"(fault));
4281 	ctxt->ops->put_fpu(ctxt);
4282 
4283 	if (unlikely(fault))
4284 		return emulate_exception(ctxt, MF_VECTOR, 0, false);
4285 
4286 	return X86EMUL_CONTINUE;
4287 }
4288 
4289 static void fetch_possible_mmx_operand(struct x86_emulate_ctxt *ctxt,
4290 				       struct operand *op)
4291 {
4292 	if (op->type == OP_MM)
4293 		read_mmx_reg(ctxt, &op->mm_val, op->addr.mm);
4294 }
4295 
4296 int x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
4297 {
4298 	struct x86_emulate_ops *ops = ctxt->ops;
4299 	int rc = X86EMUL_CONTINUE;
4300 	int saved_dst_type = ctxt->dst.type;
4301 
4302 	ctxt->mem_read.pos = 0;
4303 
4304 	if (ctxt->mode == X86EMUL_MODE_PROT64 && (ctxt->d & No64)) {
4305 		rc = emulate_ud(ctxt);
4306 		goto done;
4307 	}
4308 
4309 	/* LOCK prefix is allowed only with some instructions */
4310 	if (ctxt->lock_prefix && (!(ctxt->d & Lock) || ctxt->dst.type != OP_MEM)) {
4311 		rc = emulate_ud(ctxt);
4312 		goto done;
4313 	}
4314 
4315 	if ((ctxt->d & SrcMask) == SrcMemFAddr && ctxt->src.type != OP_MEM) {
4316 		rc = emulate_ud(ctxt);
4317 		goto done;
4318 	}
4319 
4320 	if (((ctxt->d & (Sse|Mmx)) && ((ops->get_cr(ctxt, 0) & X86_CR0_EM)))
4321 	    || ((ctxt->d & Sse) && !(ops->get_cr(ctxt, 4) & X86_CR4_OSFXSR))) {
4322 		rc = emulate_ud(ctxt);
4323 		goto done;
4324 	}
4325 
4326 	if ((ctxt->d & (Sse|Mmx)) && (ops->get_cr(ctxt, 0) & X86_CR0_TS)) {
4327 		rc = emulate_nm(ctxt);
4328 		goto done;
4329 	}
4330 
4331 	if (ctxt->d & Mmx) {
4332 		rc = flush_pending_x87_faults(ctxt);
4333 		if (rc != X86EMUL_CONTINUE)
4334 			goto done;
4335 		/*
4336 		 * Now that we know the fpu is exception safe, we can fetch
4337 		 * operands from it.
4338 		 */
4339 		fetch_possible_mmx_operand(ctxt, &ctxt->src);
4340 		fetch_possible_mmx_operand(ctxt, &ctxt->src2);
4341 		if (!(ctxt->d & Mov))
4342 			fetch_possible_mmx_operand(ctxt, &ctxt->dst);
4343 	}
4344 
4345 	if (unlikely(ctxt->guest_mode) && ctxt->intercept) {
4346 		rc = emulator_check_intercept(ctxt, ctxt->intercept,
4347 					      X86_ICPT_PRE_EXCEPT);
4348 		if (rc != X86EMUL_CONTINUE)
4349 			goto done;
4350 	}
4351 
4352 	/* Privileged instruction can be executed only in CPL=0 */
4353 	if ((ctxt->d & Priv) && ops->cpl(ctxt)) {
4354 		rc = emulate_gp(ctxt, 0);
4355 		goto done;
4356 	}
4357 
4358 	/* Instruction can only be executed in protected mode */
4359 	if ((ctxt->d & Prot) && !(ctxt->mode & X86EMUL_MODE_PROT)) {
4360 		rc = emulate_ud(ctxt);
4361 		goto done;
4362 	}
4363 
4364 	/* Do instruction specific permission checks */
4365 	if (ctxt->check_perm) {
4366 		rc = ctxt->check_perm(ctxt);
4367 		if (rc != X86EMUL_CONTINUE)
4368 			goto done;
4369 	}
4370 
4371 	if (unlikely(ctxt->guest_mode) && ctxt->intercept) {
4372 		rc = emulator_check_intercept(ctxt, ctxt->intercept,
4373 					      X86_ICPT_POST_EXCEPT);
4374 		if (rc != X86EMUL_CONTINUE)
4375 			goto done;
4376 	}
4377 
4378 	if (ctxt->rep_prefix && (ctxt->d & String)) {
4379 		/* All REP prefixes have the same first termination condition */
4380 		if (address_mask(ctxt, ctxt->regs[VCPU_REGS_RCX]) == 0) {
4381 			ctxt->eip = ctxt->_eip;
4382 			goto done;
4383 		}
4384 	}
4385 
4386 	if ((ctxt->src.type == OP_MEM) && !(ctxt->d & NoAccess)) {
4387 		rc = segmented_read(ctxt, ctxt->src.addr.mem,
4388 				    ctxt->src.valptr, ctxt->src.bytes);
4389 		if (rc != X86EMUL_CONTINUE)
4390 			goto done;
4391 		ctxt->src.orig_val64 = ctxt->src.val64;
4392 	}
4393 
4394 	if (ctxt->src2.type == OP_MEM) {
4395 		rc = segmented_read(ctxt, ctxt->src2.addr.mem,
4396 				    &ctxt->src2.val, ctxt->src2.bytes);
4397 		if (rc != X86EMUL_CONTINUE)
4398 			goto done;
4399 	}
4400 
4401 	if ((ctxt->d & DstMask) == ImplicitOps)
4402 		goto special_insn;
4403 
4404 
4405 	if ((ctxt->dst.type == OP_MEM) && !(ctxt->d & Mov)) {
4406 		/* optimisation - avoid slow emulated read if Mov */
4407 		rc = segmented_read(ctxt, ctxt->dst.addr.mem,
4408 				   &ctxt->dst.val, ctxt->dst.bytes);
4409 		if (rc != X86EMUL_CONTINUE)
4410 			goto done;
4411 	}
4412 	ctxt->dst.orig_val = ctxt->dst.val;
4413 
4414 special_insn:
4415 
4416 	if (unlikely(ctxt->guest_mode) && ctxt->intercept) {
4417 		rc = emulator_check_intercept(ctxt, ctxt->intercept,
4418 					      X86_ICPT_POST_MEMACCESS);
4419 		if (rc != X86EMUL_CONTINUE)
4420 			goto done;
4421 	}
4422 
4423 	if (ctxt->execute) {
4424 		rc = ctxt->execute(ctxt);
4425 		if (rc != X86EMUL_CONTINUE)
4426 			goto done;
4427 		goto writeback;
4428 	}
4429 
4430 	if (ctxt->twobyte)
4431 		goto twobyte_insn;
4432 
4433 	switch (ctxt->b) {
4434 	case 0x40 ... 0x47: /* inc r16/r32 */
4435 		emulate_1op(ctxt, "inc");
4436 		break;
4437 	case 0x48 ... 0x4f: /* dec r16/r32 */
4438 		emulate_1op(ctxt, "dec");
4439 		break;
4440 	case 0x63:		/* movsxd */
4441 		if (ctxt->mode != X86EMUL_MODE_PROT64)
4442 			goto cannot_emulate;
4443 		ctxt->dst.val = (s32) ctxt->src.val;
4444 		break;
4445 	case 0x70 ... 0x7f: /* jcc (short) */
4446 		if (test_cc(ctxt->b, ctxt->eflags))
4447 			jmp_rel(ctxt, ctxt->src.val);
4448 		break;
4449 	case 0x8d: /* lea r16/r32, m */
4450 		ctxt->dst.val = ctxt->src.addr.mem.ea;
4451 		break;
4452 	case 0x90 ... 0x97: /* nop / xchg reg, rax */
4453 		if (ctxt->dst.addr.reg == &ctxt->regs[VCPU_REGS_RAX])
4454 			break;
4455 		rc = em_xchg(ctxt);
4456 		break;
4457 	case 0x98: /* cbw/cwde/cdqe */
4458 		switch (ctxt->op_bytes) {
4459 		case 2: ctxt->dst.val = (s8)ctxt->dst.val; break;
4460 		case 4: ctxt->dst.val = (s16)ctxt->dst.val; break;
4461 		case 8: ctxt->dst.val = (s32)ctxt->dst.val; break;
4462 		}
4463 		break;
4464 	case 0xc0 ... 0xc1:
4465 		rc = em_grp2(ctxt);
4466 		break;
4467 	case 0xcc:		/* int3 */
4468 		rc = emulate_int(ctxt, 3);
4469 		break;
4470 	case 0xcd:		/* int n */
4471 		rc = emulate_int(ctxt, ctxt->src.val);
4472 		break;
4473 	case 0xce:		/* into */
4474 		if (ctxt->eflags & EFLG_OF)
4475 			rc = emulate_int(ctxt, 4);
4476 		break;
4477 	case 0xd0 ... 0xd1:	/* Grp2 */
4478 		rc = em_grp2(ctxt);
4479 		break;
4480 	case 0xd2 ... 0xd3:	/* Grp2 */
4481 		ctxt->src.val = ctxt->regs[VCPU_REGS_RCX];
4482 		rc = em_grp2(ctxt);
4483 		break;
4484 	case 0xe9: /* jmp rel */
4485 	case 0xeb: /* jmp rel short */
4486 		jmp_rel(ctxt, ctxt->src.val);
4487 		ctxt->dst.type = OP_NONE; /* Disable writeback. */
4488 		break;
4489 	case 0xf4:              /* hlt */
4490 		ctxt->ops->halt(ctxt);
4491 		break;
4492 	case 0xf5:	/* cmc */
4493 		/* complement carry flag from eflags reg */
4494 		ctxt->eflags ^= EFLG_CF;
4495 		break;
4496 	case 0xf8: /* clc */
4497 		ctxt->eflags &= ~EFLG_CF;
4498 		break;
4499 	case 0xf9: /* stc */
4500 		ctxt->eflags |= EFLG_CF;
4501 		break;
4502 	case 0xfc: /* cld */
4503 		ctxt->eflags &= ~EFLG_DF;
4504 		break;
4505 	case 0xfd: /* std */
4506 		ctxt->eflags |= EFLG_DF;
4507 		break;
4508 	default:
4509 		goto cannot_emulate;
4510 	}
4511 
4512 	if (rc != X86EMUL_CONTINUE)
4513 		goto done;
4514 
4515 writeback:
4516 	rc = writeback(ctxt);
4517 	if (rc != X86EMUL_CONTINUE)
4518 		goto done;
4519 
4520 	/*
4521 	 * restore dst type in case the decoding will be reused
4522 	 * (happens for string instruction )
4523 	 */
4524 	ctxt->dst.type = saved_dst_type;
4525 
4526 	if ((ctxt->d & SrcMask) == SrcSI)
4527 		string_addr_inc(ctxt, seg_override(ctxt),
4528 				VCPU_REGS_RSI, &ctxt->src);
4529 
4530 	if ((ctxt->d & DstMask) == DstDI)
4531 		string_addr_inc(ctxt, VCPU_SREG_ES, VCPU_REGS_RDI,
4532 				&ctxt->dst);
4533 
4534 	if (ctxt->rep_prefix && (ctxt->d & String)) {
4535 		struct read_cache *r = &ctxt->io_read;
4536 		register_address_increment(ctxt, &ctxt->regs[VCPU_REGS_RCX], -1);
4537 
4538 		if (!string_insn_completed(ctxt)) {
4539 			/*
4540 			 * Re-enter guest when pio read ahead buffer is empty
4541 			 * or, if it is not used, after each 1024 iteration.
4542 			 */
4543 			if ((r->end != 0 || ctxt->regs[VCPU_REGS_RCX] & 0x3ff) &&
4544 			    (r->end == 0 || r->end != r->pos)) {
4545 				/*
4546 				 * Reset read cache. Usually happens before
4547 				 * decode, but since instruction is restarted
4548 				 * we have to do it here.
4549 				 */
4550 				ctxt->mem_read.end = 0;
4551 				return EMULATION_RESTART;
4552 			}
4553 			goto done; /* skip rip writeback */
4554 		}
4555 	}
4556 
4557 	ctxt->eip = ctxt->_eip;
4558 
4559 done:
4560 	if (rc == X86EMUL_PROPAGATE_FAULT)
4561 		ctxt->have_exception = true;
4562 	if (rc == X86EMUL_INTERCEPTED)
4563 		return EMULATION_INTERCEPTED;
4564 
4565 	return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK;
4566 
4567 twobyte_insn:
4568 	switch (ctxt->b) {
4569 	case 0x09:		/* wbinvd */
4570 		(ctxt->ops->wbinvd)(ctxt);
4571 		break;
4572 	case 0x08:		/* invd */
4573 	case 0x0d:		/* GrpP (prefetch) */
4574 	case 0x18:		/* Grp16 (prefetch/nop) */
4575 		break;
4576 	case 0x20: /* mov cr, reg */
4577 		ctxt->dst.val = ops->get_cr(ctxt, ctxt->modrm_reg);
4578 		break;
4579 	case 0x21: /* mov from dr to reg */
4580 		ops->get_dr(ctxt, ctxt->modrm_reg, &ctxt->dst.val);
4581 		break;
4582 	case 0x40 ... 0x4f:	/* cmov */
4583 		ctxt->dst.val = ctxt->dst.orig_val = ctxt->src.val;
4584 		if (!test_cc(ctxt->b, ctxt->eflags))
4585 			ctxt->dst.type = OP_NONE; /* no writeback */
4586 		break;
4587 	case 0x80 ... 0x8f: /* jnz rel, etc*/
4588 		if (test_cc(ctxt->b, ctxt->eflags))
4589 			jmp_rel(ctxt, ctxt->src.val);
4590 		break;
4591 	case 0x90 ... 0x9f:     /* setcc r/m8 */
4592 		ctxt->dst.val = test_cc(ctxt->b, ctxt->eflags);
4593 		break;
4594 	case 0xa4: /* shld imm8, r, r/m */
4595 	case 0xa5: /* shld cl, r, r/m */
4596 		emulate_2op_cl(ctxt, "shld");
4597 		break;
4598 	case 0xac: /* shrd imm8, r, r/m */
4599 	case 0xad: /* shrd cl, r, r/m */
4600 		emulate_2op_cl(ctxt, "shrd");
4601 		break;
4602 	case 0xae:              /* clflush */
4603 		break;
4604 	case 0xb6 ... 0xb7:	/* movzx */
4605 		ctxt->dst.bytes = ctxt->op_bytes;
4606 		ctxt->dst.val = (ctxt->src.bytes == 1) ? (u8) ctxt->src.val
4607 						       : (u16) ctxt->src.val;
4608 		break;
4609 	case 0xbe ... 0xbf:	/* movsx */
4610 		ctxt->dst.bytes = ctxt->op_bytes;
4611 		ctxt->dst.val = (ctxt->src.bytes == 1) ? (s8) ctxt->src.val :
4612 							(s16) ctxt->src.val;
4613 		break;
4614 	case 0xc0 ... 0xc1:	/* xadd */
4615 		emulate_2op_SrcV(ctxt, "add");
4616 		/* Write back the register source. */
4617 		ctxt->src.val = ctxt->dst.orig_val;
4618 		write_register_operand(&ctxt->src);
4619 		break;
4620 	case 0xc3:		/* movnti */
4621 		ctxt->dst.bytes = ctxt->op_bytes;
4622 		ctxt->dst.val = (ctxt->op_bytes == 4) ? (u32) ctxt->src.val :
4623 							(u64) ctxt->src.val;
4624 		break;
4625 	default:
4626 		goto cannot_emulate;
4627 	}
4628 
4629 	if (rc != X86EMUL_CONTINUE)
4630 		goto done;
4631 
4632 	goto writeback;
4633 
4634 cannot_emulate:
4635 	return EMULATION_FAILED;
4636 }
4637