xref: /openbmc/linux/arch/x86/kvm/emulate.c (revision b96fc2f3)
1 /******************************************************************************
2  * emulate.c
3  *
4  * Generic x86 (32-bit and 64-bit) instruction decoder and emulator.
5  *
6  * Copyright (c) 2005 Keir Fraser
7  *
8  * Linux coding style, mod r/m decoder, segment base fixes, real-mode
9  * privileged instructions:
10  *
11  * Copyright (C) 2006 Qumranet
12  * Copyright 2010 Red Hat, Inc. and/or its affiliates.
13  *
14  *   Avi Kivity <avi@qumranet.com>
15  *   Yaniv Kamay <yaniv@qumranet.com>
16  *
17  * This work is licensed under the terms of the GNU GPL, version 2.  See
18  * the COPYING file in the top-level directory.
19  *
20  * From: xen-unstable 10676:af9809f51f81a3c43f276f00c81a52ef558afda4
21  */
22 
23 #include <linux/kvm_host.h>
24 #include "kvm_cache_regs.h"
25 #include <linux/module.h>
26 #include <asm/kvm_emulate.h>
27 #include <linux/stringify.h>
28 #include <asm/debugreg.h>
29 
30 #include "x86.h"
31 #include "tss.h"
32 
33 /*
34  * Operand types
35  */
36 #define OpNone             0ull
37 #define OpImplicit         1ull  /* No generic decode */
38 #define OpReg              2ull  /* Register */
39 #define OpMem              3ull  /* Memory */
40 #define OpAcc              4ull  /* Accumulator: AL/AX/EAX/RAX */
41 #define OpDI               5ull  /* ES:DI/EDI/RDI */
42 #define OpMem64            6ull  /* Memory, 64-bit */
43 #define OpImmUByte         7ull  /* Zero-extended 8-bit immediate */
44 #define OpDX               8ull  /* DX register */
45 #define OpCL               9ull  /* CL register (for shifts) */
46 #define OpImmByte         10ull  /* 8-bit sign extended immediate */
47 #define OpOne             11ull  /* Implied 1 */
48 #define OpImm             12ull  /* Sign extended up to 32-bit immediate */
49 #define OpMem16           13ull  /* Memory operand (16-bit). */
50 #define OpMem32           14ull  /* Memory operand (32-bit). */
51 #define OpImmU            15ull  /* Immediate operand, zero extended */
52 #define OpSI              16ull  /* SI/ESI/RSI */
53 #define OpImmFAddr        17ull  /* Immediate far address */
54 #define OpMemFAddr        18ull  /* Far address in memory */
55 #define OpImmU16          19ull  /* Immediate operand, 16 bits, zero extended */
56 #define OpES              20ull  /* ES */
57 #define OpCS              21ull  /* CS */
58 #define OpSS              22ull  /* SS */
59 #define OpDS              23ull  /* DS */
60 #define OpFS              24ull  /* FS */
61 #define OpGS              25ull  /* GS */
62 #define OpMem8            26ull  /* 8-bit zero extended memory operand */
63 #define OpImm64           27ull  /* Sign extended 16/32/64-bit immediate */
64 #define OpXLat            28ull  /* memory at BX/EBX/RBX + zero-extended AL */
65 #define OpAccLo           29ull  /* Low part of extended acc (AX/AX/EAX/RAX) */
66 #define OpAccHi           30ull  /* High part of extended acc (-/DX/EDX/RDX) */
67 
68 #define OpBits             5  /* Width of operand field */
69 #define OpMask             ((1ull << OpBits) - 1)
70 
71 /*
72  * Opcode effective-address decode tables.
73  * Note that we only emulate instructions that have at least one memory
74  * operand (excluding implicit stack references). We assume that stack
75  * references and instruction fetches will never occur in special memory
76  * areas that require emulation. So, for example, 'mov <imm>,<reg>' need
77  * not be handled.
78  */
79 
80 /* Operand sizes: 8-bit operands or specified/overridden size. */
81 #define ByteOp      (1<<0)	/* 8-bit operands. */
82 /* Destination operand type. */
83 #define DstShift    1
84 #define ImplicitOps (OpImplicit << DstShift)
85 #define DstReg      (OpReg << DstShift)
86 #define DstMem      (OpMem << DstShift)
87 #define DstAcc      (OpAcc << DstShift)
88 #define DstDI       (OpDI << DstShift)
89 #define DstMem64    (OpMem64 << DstShift)
90 #define DstMem16    (OpMem16 << DstShift)
91 #define DstImmUByte (OpImmUByte << DstShift)
92 #define DstDX       (OpDX << DstShift)
93 #define DstAccLo    (OpAccLo << DstShift)
94 #define DstMask     (OpMask << DstShift)
95 /* Source operand type. */
96 #define SrcShift    6
97 #define SrcNone     (OpNone << SrcShift)
98 #define SrcReg      (OpReg << SrcShift)
99 #define SrcMem      (OpMem << SrcShift)
100 #define SrcMem16    (OpMem16 << SrcShift)
101 #define SrcMem32    (OpMem32 << SrcShift)
102 #define SrcImm      (OpImm << SrcShift)
103 #define SrcImmByte  (OpImmByte << SrcShift)
104 #define SrcOne      (OpOne << SrcShift)
105 #define SrcImmUByte (OpImmUByte << SrcShift)
106 #define SrcImmU     (OpImmU << SrcShift)
107 #define SrcSI       (OpSI << SrcShift)
108 #define SrcXLat     (OpXLat << SrcShift)
109 #define SrcImmFAddr (OpImmFAddr << SrcShift)
110 #define SrcMemFAddr (OpMemFAddr << SrcShift)
111 #define SrcAcc      (OpAcc << SrcShift)
112 #define SrcImmU16   (OpImmU16 << SrcShift)
113 #define SrcImm64    (OpImm64 << SrcShift)
114 #define SrcDX       (OpDX << SrcShift)
115 #define SrcMem8     (OpMem8 << SrcShift)
116 #define SrcAccHi    (OpAccHi << SrcShift)
117 #define SrcMask     (OpMask << SrcShift)
118 #define BitOp       (1<<11)
119 #define MemAbs      (1<<12)      /* Memory operand is absolute displacement */
120 #define String      (1<<13)     /* String instruction (rep capable) */
121 #define Stack       (1<<14)     /* Stack instruction (push/pop) */
122 #define GroupMask   (7<<15)     /* Opcode uses one of the group mechanisms */
123 #define Group       (1<<15)     /* Bits 3:5 of modrm byte extend opcode */
124 #define GroupDual   (2<<15)     /* Alternate decoding of mod == 3 */
125 #define Prefix      (3<<15)     /* Instruction varies with 66/f2/f3 prefix */
126 #define RMExt       (4<<15)     /* Opcode extension in ModRM r/m if mod == 3 */
127 #define Escape      (5<<15)     /* Escape to coprocessor instruction */
128 #define InstrDual   (6<<15)     /* Alternate instruction decoding of mod == 3 */
129 #define ModeDual    (7<<15)     /* Different instruction for 32/64 bit */
130 #define Sse         (1<<18)     /* SSE Vector instruction */
131 /* Generic ModRM decode. */
132 #define ModRM       (1<<19)
133 /* Destination is only written; never read. */
134 #define Mov         (1<<20)
135 /* Misc flags */
136 #define Prot        (1<<21) /* instruction generates #UD if not in prot-mode */
137 #define EmulateOnUD (1<<22) /* Emulate if unsupported by the host */
138 #define NoAccess    (1<<23) /* Don't access memory (lea/invlpg/verr etc) */
139 #define Op3264      (1<<24) /* Operand is 64b in long mode, 32b otherwise */
140 #define Undefined   (1<<25) /* No Such Instruction */
141 #define Lock        (1<<26) /* lock prefix is allowed for the instruction */
142 #define Priv        (1<<27) /* instruction generates #GP if current CPL != 0 */
143 #define No64	    (1<<28)
144 #define PageTable   (1 << 29)   /* instruction used to write page table */
145 #define NotImpl     (1 << 30)   /* instruction is not implemented */
146 /* Source 2 operand type */
147 #define Src2Shift   (31)
148 #define Src2None    (OpNone << Src2Shift)
149 #define Src2Mem     (OpMem << Src2Shift)
150 #define Src2CL      (OpCL << Src2Shift)
151 #define Src2ImmByte (OpImmByte << Src2Shift)
152 #define Src2One     (OpOne << Src2Shift)
153 #define Src2Imm     (OpImm << Src2Shift)
154 #define Src2ES      (OpES << Src2Shift)
155 #define Src2CS      (OpCS << Src2Shift)
156 #define Src2SS      (OpSS << Src2Shift)
157 #define Src2DS      (OpDS << Src2Shift)
158 #define Src2FS      (OpFS << Src2Shift)
159 #define Src2GS      (OpGS << Src2Shift)
160 #define Src2Mask    (OpMask << Src2Shift)
161 #define Mmx         ((u64)1 << 40)  /* MMX Vector instruction */
162 #define Aligned     ((u64)1 << 41)  /* Explicitly aligned (e.g. MOVDQA) */
163 #define Unaligned   ((u64)1 << 42)  /* Explicitly unaligned (e.g. MOVDQU) */
164 #define Avx         ((u64)1 << 43)  /* Advanced Vector Extensions */
165 #define Fastop      ((u64)1 << 44)  /* Use opcode::u.fastop */
166 #define NoWrite     ((u64)1 << 45)  /* No writeback */
167 #define SrcWrite    ((u64)1 << 46)  /* Write back src operand */
168 #define NoMod	    ((u64)1 << 47)  /* Mod field is ignored */
169 #define Intercept   ((u64)1 << 48)  /* Has valid intercept field */
170 #define CheckPerm   ((u64)1 << 49)  /* Has valid check_perm field */
171 #define PrivUD      ((u64)1 << 51)  /* #UD instead of #GP on CPL > 0 */
172 #define NearBranch  ((u64)1 << 52)  /* Near branches */
173 #define No16	    ((u64)1 << 53)  /* No 16 bit operand */
174 #define IncSP       ((u64)1 << 54)  /* SP is incremented before ModRM calc */
175 
176 #define DstXacc     (DstAccLo | SrcAccHi | SrcWrite)
177 
178 #define X2(x...) x, x
179 #define X3(x...) X2(x), x
180 #define X4(x...) X2(x), X2(x)
181 #define X5(x...) X4(x), x
182 #define X6(x...) X4(x), X2(x)
183 #define X7(x...) X4(x), X3(x)
184 #define X8(x...) X4(x), X4(x)
185 #define X16(x...) X8(x), X8(x)
186 
187 #define NR_FASTOP (ilog2(sizeof(ulong)) + 1)
188 #define FASTOP_SIZE 8
189 
190 /*
191  * fastop functions have a special calling convention:
192  *
193  * dst:    rax        (in/out)
194  * src:    rdx        (in/out)
195  * src2:   rcx        (in)
196  * flags:  rflags     (in/out)
197  * ex:     rsi        (in:fastop pointer, out:zero if exception)
198  *
199  * Moreover, they are all exactly FASTOP_SIZE bytes long, so functions for
200  * different operand sizes can be reached by calculation, rather than a jump
201  * table (which would be bigger than the code).
202  *
203  * fastop functions are declared as taking a never-defined fastop parameter,
204  * so they can't be called from C directly.
205  */
206 
207 struct fastop;
208 
209 struct opcode {
210 	u64 flags : 56;
211 	u64 intercept : 8;
212 	union {
213 		int (*execute)(struct x86_emulate_ctxt *ctxt);
214 		const struct opcode *group;
215 		const struct group_dual *gdual;
216 		const struct gprefix *gprefix;
217 		const struct escape *esc;
218 		const struct instr_dual *idual;
219 		const struct mode_dual *mdual;
220 		void (*fastop)(struct fastop *fake);
221 	} u;
222 	int (*check_perm)(struct x86_emulate_ctxt *ctxt);
223 };
224 
225 struct group_dual {
226 	struct opcode mod012[8];
227 	struct opcode mod3[8];
228 };
229 
230 struct gprefix {
231 	struct opcode pfx_no;
232 	struct opcode pfx_66;
233 	struct opcode pfx_f2;
234 	struct opcode pfx_f3;
235 };
236 
237 struct escape {
238 	struct opcode op[8];
239 	struct opcode high[64];
240 };
241 
242 struct instr_dual {
243 	struct opcode mod012;
244 	struct opcode mod3;
245 };
246 
247 struct mode_dual {
248 	struct opcode mode32;
249 	struct opcode mode64;
250 };
251 
252 #define EFLG_RESERVED_ZEROS_MASK 0xffc0802a
253 
254 enum x86_transfer_type {
255 	X86_TRANSFER_NONE,
256 	X86_TRANSFER_CALL_JMP,
257 	X86_TRANSFER_RET,
258 	X86_TRANSFER_TASK_SWITCH,
259 };
260 
261 static ulong reg_read(struct x86_emulate_ctxt *ctxt, unsigned nr)
262 {
263 	if (!(ctxt->regs_valid & (1 << nr))) {
264 		ctxt->regs_valid |= 1 << nr;
265 		ctxt->_regs[nr] = ctxt->ops->read_gpr(ctxt, nr);
266 	}
267 	return ctxt->_regs[nr];
268 }
269 
270 static ulong *reg_write(struct x86_emulate_ctxt *ctxt, unsigned nr)
271 {
272 	ctxt->regs_valid |= 1 << nr;
273 	ctxt->regs_dirty |= 1 << nr;
274 	return &ctxt->_regs[nr];
275 }
276 
277 static ulong *reg_rmw(struct x86_emulate_ctxt *ctxt, unsigned nr)
278 {
279 	reg_read(ctxt, nr);
280 	return reg_write(ctxt, nr);
281 }
282 
283 static void writeback_registers(struct x86_emulate_ctxt *ctxt)
284 {
285 	unsigned reg;
286 
287 	for_each_set_bit(reg, (ulong *)&ctxt->regs_dirty, 16)
288 		ctxt->ops->write_gpr(ctxt, reg, ctxt->_regs[reg]);
289 }
290 
291 static void invalidate_registers(struct x86_emulate_ctxt *ctxt)
292 {
293 	ctxt->regs_dirty = 0;
294 	ctxt->regs_valid = 0;
295 }
296 
297 /*
298  * These EFLAGS bits are restored from saved value during emulation, and
299  * any changes are written back to the saved value after emulation.
300  */
301 #define EFLAGS_MASK (X86_EFLAGS_OF|X86_EFLAGS_SF|X86_EFLAGS_ZF|X86_EFLAGS_AF|\
302 		     X86_EFLAGS_PF|X86_EFLAGS_CF)
303 
304 #ifdef CONFIG_X86_64
305 #define ON64(x) x
306 #else
307 #define ON64(x)
308 #endif
309 
310 static int fastop(struct x86_emulate_ctxt *ctxt, void (*fop)(struct fastop *));
311 
312 #define FOP_ALIGN ".align " __stringify(FASTOP_SIZE) " \n\t"
313 #define FOP_RET   "ret \n\t"
314 
315 #define FOP_START(op) \
316 	extern void em_##op(struct fastop *fake); \
317 	asm(".pushsection .text, \"ax\" \n\t" \
318 	    ".global em_" #op " \n\t" \
319             FOP_ALIGN \
320 	    "em_" #op ": \n\t"
321 
322 #define FOP_END \
323 	    ".popsection")
324 
325 #define FOPNOP() FOP_ALIGN FOP_RET
326 
327 #define FOP1E(op,  dst) \
328 	FOP_ALIGN "10: " #op " %" #dst " \n\t" FOP_RET
329 
330 #define FOP1EEX(op,  dst) \
331 	FOP1E(op, dst) _ASM_EXTABLE(10b, kvm_fastop_exception)
332 
333 #define FASTOP1(op) \
334 	FOP_START(op) \
335 	FOP1E(op##b, al) \
336 	FOP1E(op##w, ax) \
337 	FOP1E(op##l, eax) \
338 	ON64(FOP1E(op##q, rax))	\
339 	FOP_END
340 
341 /* 1-operand, using src2 (for MUL/DIV r/m) */
342 #define FASTOP1SRC2(op, name) \
343 	FOP_START(name) \
344 	FOP1E(op, cl) \
345 	FOP1E(op, cx) \
346 	FOP1E(op, ecx) \
347 	ON64(FOP1E(op, rcx)) \
348 	FOP_END
349 
350 /* 1-operand, using src2 (for MUL/DIV r/m), with exceptions */
351 #define FASTOP1SRC2EX(op, name) \
352 	FOP_START(name) \
353 	FOP1EEX(op, cl) \
354 	FOP1EEX(op, cx) \
355 	FOP1EEX(op, ecx) \
356 	ON64(FOP1EEX(op, rcx)) \
357 	FOP_END
358 
359 #define FOP2E(op,  dst, src)	   \
360 	FOP_ALIGN #op " %" #src ", %" #dst " \n\t" FOP_RET
361 
362 #define FASTOP2(op) \
363 	FOP_START(op) \
364 	FOP2E(op##b, al, dl) \
365 	FOP2E(op##w, ax, dx) \
366 	FOP2E(op##l, eax, edx) \
367 	ON64(FOP2E(op##q, rax, rdx)) \
368 	FOP_END
369 
370 /* 2 operand, word only */
371 #define FASTOP2W(op) \
372 	FOP_START(op) \
373 	FOPNOP() \
374 	FOP2E(op##w, ax, dx) \
375 	FOP2E(op##l, eax, edx) \
376 	ON64(FOP2E(op##q, rax, rdx)) \
377 	FOP_END
378 
379 /* 2 operand, src is CL */
380 #define FASTOP2CL(op) \
381 	FOP_START(op) \
382 	FOP2E(op##b, al, cl) \
383 	FOP2E(op##w, ax, cl) \
384 	FOP2E(op##l, eax, cl) \
385 	ON64(FOP2E(op##q, rax, cl)) \
386 	FOP_END
387 
388 /* 2 operand, src and dest are reversed */
389 #define FASTOP2R(op, name) \
390 	FOP_START(name) \
391 	FOP2E(op##b, dl, al) \
392 	FOP2E(op##w, dx, ax) \
393 	FOP2E(op##l, edx, eax) \
394 	ON64(FOP2E(op##q, rdx, rax)) \
395 	FOP_END
396 
397 #define FOP3E(op,  dst, src, src2) \
398 	FOP_ALIGN #op " %" #src2 ", %" #src ", %" #dst " \n\t" FOP_RET
399 
400 /* 3-operand, word-only, src2=cl */
401 #define FASTOP3WCL(op) \
402 	FOP_START(op) \
403 	FOPNOP() \
404 	FOP3E(op##w, ax, dx, cl) \
405 	FOP3E(op##l, eax, edx, cl) \
406 	ON64(FOP3E(op##q, rax, rdx, cl)) \
407 	FOP_END
408 
409 /* Special case for SETcc - 1 instruction per cc */
410 #define FOP_SETCC(op) ".align 4; " #op " %al; ret \n\t"
411 
412 asm(".global kvm_fastop_exception \n"
413     "kvm_fastop_exception: xor %esi, %esi; ret");
414 
415 FOP_START(setcc)
416 FOP_SETCC(seto)
417 FOP_SETCC(setno)
418 FOP_SETCC(setc)
419 FOP_SETCC(setnc)
420 FOP_SETCC(setz)
421 FOP_SETCC(setnz)
422 FOP_SETCC(setbe)
423 FOP_SETCC(setnbe)
424 FOP_SETCC(sets)
425 FOP_SETCC(setns)
426 FOP_SETCC(setp)
427 FOP_SETCC(setnp)
428 FOP_SETCC(setl)
429 FOP_SETCC(setnl)
430 FOP_SETCC(setle)
431 FOP_SETCC(setnle)
432 FOP_END;
433 
434 FOP_START(salc) "pushf; sbb %al, %al; popf \n\t" FOP_RET
435 FOP_END;
436 
437 static int emulator_check_intercept(struct x86_emulate_ctxt *ctxt,
438 				    enum x86_intercept intercept,
439 				    enum x86_intercept_stage stage)
440 {
441 	struct x86_instruction_info info = {
442 		.intercept  = intercept,
443 		.rep_prefix = ctxt->rep_prefix,
444 		.modrm_mod  = ctxt->modrm_mod,
445 		.modrm_reg  = ctxt->modrm_reg,
446 		.modrm_rm   = ctxt->modrm_rm,
447 		.src_val    = ctxt->src.val64,
448 		.dst_val    = ctxt->dst.val64,
449 		.src_bytes  = ctxt->src.bytes,
450 		.dst_bytes  = ctxt->dst.bytes,
451 		.ad_bytes   = ctxt->ad_bytes,
452 		.next_rip   = ctxt->eip,
453 	};
454 
455 	return ctxt->ops->intercept(ctxt, &info, stage);
456 }
457 
458 static void assign_masked(ulong *dest, ulong src, ulong mask)
459 {
460 	*dest = (*dest & ~mask) | (src & mask);
461 }
462 
463 static void assign_register(unsigned long *reg, u64 val, int bytes)
464 {
465 	/* The 4-byte case *is* correct: in 64-bit mode we zero-extend. */
466 	switch (bytes) {
467 	case 1:
468 		*(u8 *)reg = (u8)val;
469 		break;
470 	case 2:
471 		*(u16 *)reg = (u16)val;
472 		break;
473 	case 4:
474 		*reg = (u32)val;
475 		break;	/* 64b: zero-extend */
476 	case 8:
477 		*reg = val;
478 		break;
479 	}
480 }
481 
482 static inline unsigned long ad_mask(struct x86_emulate_ctxt *ctxt)
483 {
484 	return (1UL << (ctxt->ad_bytes << 3)) - 1;
485 }
486 
487 static ulong stack_mask(struct x86_emulate_ctxt *ctxt)
488 {
489 	u16 sel;
490 	struct desc_struct ss;
491 
492 	if (ctxt->mode == X86EMUL_MODE_PROT64)
493 		return ~0UL;
494 	ctxt->ops->get_segment(ctxt, &sel, &ss, NULL, VCPU_SREG_SS);
495 	return ~0U >> ((ss.d ^ 1) * 16);  /* d=0: 0xffff; d=1: 0xffffffff */
496 }
497 
498 static int stack_size(struct x86_emulate_ctxt *ctxt)
499 {
500 	return (__fls(stack_mask(ctxt)) + 1) >> 3;
501 }
502 
503 /* Access/update address held in a register, based on addressing mode. */
504 static inline unsigned long
505 address_mask(struct x86_emulate_ctxt *ctxt, unsigned long reg)
506 {
507 	if (ctxt->ad_bytes == sizeof(unsigned long))
508 		return reg;
509 	else
510 		return reg & ad_mask(ctxt);
511 }
512 
513 static inline unsigned long
514 register_address(struct x86_emulate_ctxt *ctxt, int reg)
515 {
516 	return address_mask(ctxt, reg_read(ctxt, reg));
517 }
518 
519 static void masked_increment(ulong *reg, ulong mask, int inc)
520 {
521 	assign_masked(reg, *reg + inc, mask);
522 }
523 
524 static inline void
525 register_address_increment(struct x86_emulate_ctxt *ctxt, int reg, int inc)
526 {
527 	ulong *preg = reg_rmw(ctxt, reg);
528 
529 	assign_register(preg, *preg + inc, ctxt->ad_bytes);
530 }
531 
532 static void rsp_increment(struct x86_emulate_ctxt *ctxt, int inc)
533 {
534 	masked_increment(reg_rmw(ctxt, VCPU_REGS_RSP), stack_mask(ctxt), inc);
535 }
536 
537 static u32 desc_limit_scaled(struct desc_struct *desc)
538 {
539 	u32 limit = get_desc_limit(desc);
540 
541 	return desc->g ? (limit << 12) | 0xfff : limit;
542 }
543 
544 static unsigned long seg_base(struct x86_emulate_ctxt *ctxt, int seg)
545 {
546 	if (ctxt->mode == X86EMUL_MODE_PROT64 && seg < VCPU_SREG_FS)
547 		return 0;
548 
549 	return ctxt->ops->get_cached_segment_base(ctxt, seg);
550 }
551 
552 static int emulate_exception(struct x86_emulate_ctxt *ctxt, int vec,
553 			     u32 error, bool valid)
554 {
555 	WARN_ON(vec > 0x1f);
556 	ctxt->exception.vector = vec;
557 	ctxt->exception.error_code = error;
558 	ctxt->exception.error_code_valid = valid;
559 	return X86EMUL_PROPAGATE_FAULT;
560 }
561 
562 static int emulate_db(struct x86_emulate_ctxt *ctxt)
563 {
564 	return emulate_exception(ctxt, DB_VECTOR, 0, false);
565 }
566 
567 static int emulate_gp(struct x86_emulate_ctxt *ctxt, int err)
568 {
569 	return emulate_exception(ctxt, GP_VECTOR, err, true);
570 }
571 
572 static int emulate_ss(struct x86_emulate_ctxt *ctxt, int err)
573 {
574 	return emulate_exception(ctxt, SS_VECTOR, err, true);
575 }
576 
577 static int emulate_ud(struct x86_emulate_ctxt *ctxt)
578 {
579 	return emulate_exception(ctxt, UD_VECTOR, 0, false);
580 }
581 
582 static int emulate_ts(struct x86_emulate_ctxt *ctxt, int err)
583 {
584 	return emulate_exception(ctxt, TS_VECTOR, err, true);
585 }
586 
587 static int emulate_de(struct x86_emulate_ctxt *ctxt)
588 {
589 	return emulate_exception(ctxt, DE_VECTOR, 0, false);
590 }
591 
592 static int emulate_nm(struct x86_emulate_ctxt *ctxt)
593 {
594 	return emulate_exception(ctxt, NM_VECTOR, 0, false);
595 }
596 
597 static u16 get_segment_selector(struct x86_emulate_ctxt *ctxt, unsigned seg)
598 {
599 	u16 selector;
600 	struct desc_struct desc;
601 
602 	ctxt->ops->get_segment(ctxt, &selector, &desc, NULL, seg);
603 	return selector;
604 }
605 
606 static void set_segment_selector(struct x86_emulate_ctxt *ctxt, u16 selector,
607 				 unsigned seg)
608 {
609 	u16 dummy;
610 	u32 base3;
611 	struct desc_struct desc;
612 
613 	ctxt->ops->get_segment(ctxt, &dummy, &desc, &base3, seg);
614 	ctxt->ops->set_segment(ctxt, selector, &desc, base3, seg);
615 }
616 
617 /*
618  * x86 defines three classes of vector instructions: explicitly
619  * aligned, explicitly unaligned, and the rest, which change behaviour
620  * depending on whether they're AVX encoded or not.
621  *
622  * Also included is CMPXCHG16B which is not a vector instruction, yet it is
623  * subject to the same check.
624  */
625 static bool insn_aligned(struct x86_emulate_ctxt *ctxt, unsigned size)
626 {
627 	if (likely(size < 16))
628 		return false;
629 
630 	if (ctxt->d & Aligned)
631 		return true;
632 	else if (ctxt->d & Unaligned)
633 		return false;
634 	else if (ctxt->d & Avx)
635 		return false;
636 	else
637 		return true;
638 }
639 
640 static __always_inline int __linearize(struct x86_emulate_ctxt *ctxt,
641 				       struct segmented_address addr,
642 				       unsigned *max_size, unsigned size,
643 				       bool write, bool fetch,
644 				       enum x86emul_mode mode, ulong *linear)
645 {
646 	struct desc_struct desc;
647 	bool usable;
648 	ulong la;
649 	u32 lim;
650 	u16 sel;
651 
652 	la = seg_base(ctxt, addr.seg) + addr.ea;
653 	*linear = la;
654 	*max_size = 0;
655 	switch (mode) {
656 	case X86EMUL_MODE_PROT64:
657 		if (is_noncanonical_address(la))
658 			goto bad;
659 
660 		*max_size = min_t(u64, ~0u, (1ull << 48) - la);
661 		if (size > *max_size)
662 			goto bad;
663 		break;
664 	default:
665 		usable = ctxt->ops->get_segment(ctxt, &sel, &desc, NULL,
666 						addr.seg);
667 		if (!usable)
668 			goto bad;
669 		/* code segment in protected mode or read-only data segment */
670 		if ((((ctxt->mode != X86EMUL_MODE_REAL) && (desc.type & 8))
671 					|| !(desc.type & 2)) && write)
672 			goto bad;
673 		/* unreadable code segment */
674 		if (!fetch && (desc.type & 8) && !(desc.type & 2))
675 			goto bad;
676 		lim = desc_limit_scaled(&desc);
677 		if (!(desc.type & 8) && (desc.type & 4)) {
678 			/* expand-down segment */
679 			if (addr.ea <= lim)
680 				goto bad;
681 			lim = desc.d ? 0xffffffff : 0xffff;
682 		}
683 		if (addr.ea > lim)
684 			goto bad;
685 		if (lim == 0xffffffff)
686 			*max_size = ~0u;
687 		else {
688 			*max_size = (u64)lim + 1 - addr.ea;
689 			if (size > *max_size)
690 				goto bad;
691 		}
692 		la &= (u32)-1;
693 		break;
694 	}
695 	if (insn_aligned(ctxt, size) && ((la & (size - 1)) != 0))
696 		return emulate_gp(ctxt, 0);
697 	return X86EMUL_CONTINUE;
698 bad:
699 	if (addr.seg == VCPU_SREG_SS)
700 		return emulate_ss(ctxt, 0);
701 	else
702 		return emulate_gp(ctxt, 0);
703 }
704 
705 static int linearize(struct x86_emulate_ctxt *ctxt,
706 		     struct segmented_address addr,
707 		     unsigned size, bool write,
708 		     ulong *linear)
709 {
710 	unsigned max_size;
711 	return __linearize(ctxt, addr, &max_size, size, write, false,
712 			   ctxt->mode, linear);
713 }
714 
715 static inline int assign_eip(struct x86_emulate_ctxt *ctxt, ulong dst,
716 			     enum x86emul_mode mode)
717 {
718 	ulong linear;
719 	int rc;
720 	unsigned max_size;
721 	struct segmented_address addr = { .seg = VCPU_SREG_CS,
722 					   .ea = dst };
723 
724 	if (ctxt->op_bytes != sizeof(unsigned long))
725 		addr.ea = dst & ((1UL << (ctxt->op_bytes << 3)) - 1);
726 	rc = __linearize(ctxt, addr, &max_size, 1, false, true, mode, &linear);
727 	if (rc == X86EMUL_CONTINUE)
728 		ctxt->_eip = addr.ea;
729 	return rc;
730 }
731 
732 static inline int assign_eip_near(struct x86_emulate_ctxt *ctxt, ulong dst)
733 {
734 	return assign_eip(ctxt, dst, ctxt->mode);
735 }
736 
737 static int assign_eip_far(struct x86_emulate_ctxt *ctxt, ulong dst,
738 			  const struct desc_struct *cs_desc)
739 {
740 	enum x86emul_mode mode = ctxt->mode;
741 	int rc;
742 
743 #ifdef CONFIG_X86_64
744 	if (ctxt->mode >= X86EMUL_MODE_PROT16) {
745 		if (cs_desc->l) {
746 			u64 efer = 0;
747 
748 			ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
749 			if (efer & EFER_LMA)
750 				mode = X86EMUL_MODE_PROT64;
751 		} else
752 			mode = X86EMUL_MODE_PROT32; /* temporary value */
753 	}
754 #endif
755 	if (mode == X86EMUL_MODE_PROT16 || mode == X86EMUL_MODE_PROT32)
756 		mode = cs_desc->d ? X86EMUL_MODE_PROT32 : X86EMUL_MODE_PROT16;
757 	rc = assign_eip(ctxt, dst, mode);
758 	if (rc == X86EMUL_CONTINUE)
759 		ctxt->mode = mode;
760 	return rc;
761 }
762 
763 static inline int jmp_rel(struct x86_emulate_ctxt *ctxt, int rel)
764 {
765 	return assign_eip_near(ctxt, ctxt->_eip + rel);
766 }
767 
768 static int segmented_read_std(struct x86_emulate_ctxt *ctxt,
769 			      struct segmented_address addr,
770 			      void *data,
771 			      unsigned size)
772 {
773 	int rc;
774 	ulong linear;
775 
776 	rc = linearize(ctxt, addr, size, false, &linear);
777 	if (rc != X86EMUL_CONTINUE)
778 		return rc;
779 	return ctxt->ops->read_std(ctxt, linear, data, size, &ctxt->exception);
780 }
781 
782 /*
783  * Prefetch the remaining bytes of the instruction without crossing page
784  * boundary if they are not in fetch_cache yet.
785  */
786 static int __do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt, int op_size)
787 {
788 	int rc;
789 	unsigned size, max_size;
790 	unsigned long linear;
791 	int cur_size = ctxt->fetch.end - ctxt->fetch.data;
792 	struct segmented_address addr = { .seg = VCPU_SREG_CS,
793 					   .ea = ctxt->eip + cur_size };
794 
795 	/*
796 	 * We do not know exactly how many bytes will be needed, and
797 	 * __linearize is expensive, so fetch as much as possible.  We
798 	 * just have to avoid going beyond the 15 byte limit, the end
799 	 * of the segment, or the end of the page.
800 	 *
801 	 * __linearize is called with size 0 so that it does not do any
802 	 * boundary check itself.  Instead, we use max_size to check
803 	 * against op_size.
804 	 */
805 	rc = __linearize(ctxt, addr, &max_size, 0, false, true, ctxt->mode,
806 			 &linear);
807 	if (unlikely(rc != X86EMUL_CONTINUE))
808 		return rc;
809 
810 	size = min_t(unsigned, 15UL ^ cur_size, max_size);
811 	size = min_t(unsigned, size, PAGE_SIZE - offset_in_page(linear));
812 
813 	/*
814 	 * One instruction can only straddle two pages,
815 	 * and one has been loaded at the beginning of
816 	 * x86_decode_insn.  So, if not enough bytes
817 	 * still, we must have hit the 15-byte boundary.
818 	 */
819 	if (unlikely(size < op_size))
820 		return emulate_gp(ctxt, 0);
821 
822 	rc = ctxt->ops->fetch(ctxt, linear, ctxt->fetch.end,
823 			      size, &ctxt->exception);
824 	if (unlikely(rc != X86EMUL_CONTINUE))
825 		return rc;
826 	ctxt->fetch.end += size;
827 	return X86EMUL_CONTINUE;
828 }
829 
830 static __always_inline int do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt,
831 					       unsigned size)
832 {
833 	unsigned done_size = ctxt->fetch.end - ctxt->fetch.ptr;
834 
835 	if (unlikely(done_size < size))
836 		return __do_insn_fetch_bytes(ctxt, size - done_size);
837 	else
838 		return X86EMUL_CONTINUE;
839 }
840 
841 /* Fetch next part of the instruction being emulated. */
842 #define insn_fetch(_type, _ctxt)					\
843 ({	_type _x;							\
844 									\
845 	rc = do_insn_fetch_bytes(_ctxt, sizeof(_type));			\
846 	if (rc != X86EMUL_CONTINUE)					\
847 		goto done;						\
848 	ctxt->_eip += sizeof(_type);					\
849 	_x = *(_type __aligned(1) *) ctxt->fetch.ptr;			\
850 	ctxt->fetch.ptr += sizeof(_type);				\
851 	_x;								\
852 })
853 
854 #define insn_fetch_arr(_arr, _size, _ctxt)				\
855 ({									\
856 	rc = do_insn_fetch_bytes(_ctxt, _size);				\
857 	if (rc != X86EMUL_CONTINUE)					\
858 		goto done;						\
859 	ctxt->_eip += (_size);						\
860 	memcpy(_arr, ctxt->fetch.ptr, _size);				\
861 	ctxt->fetch.ptr += (_size);					\
862 })
863 
864 /*
865  * Given the 'reg' portion of a ModRM byte, and a register block, return a
866  * pointer into the block that addresses the relevant register.
867  * @highbyte_regs specifies whether to decode AH,CH,DH,BH.
868  */
869 static void *decode_register(struct x86_emulate_ctxt *ctxt, u8 modrm_reg,
870 			     int byteop)
871 {
872 	void *p;
873 	int highbyte_regs = (ctxt->rex_prefix == 0) && byteop;
874 
875 	if (highbyte_regs && modrm_reg >= 4 && modrm_reg < 8)
876 		p = (unsigned char *)reg_rmw(ctxt, modrm_reg & 3) + 1;
877 	else
878 		p = reg_rmw(ctxt, modrm_reg);
879 	return p;
880 }
881 
882 static int read_descriptor(struct x86_emulate_ctxt *ctxt,
883 			   struct segmented_address addr,
884 			   u16 *size, unsigned long *address, int op_bytes)
885 {
886 	int rc;
887 
888 	if (op_bytes == 2)
889 		op_bytes = 3;
890 	*address = 0;
891 	rc = segmented_read_std(ctxt, addr, size, 2);
892 	if (rc != X86EMUL_CONTINUE)
893 		return rc;
894 	addr.ea += 2;
895 	rc = segmented_read_std(ctxt, addr, address, op_bytes);
896 	return rc;
897 }
898 
899 FASTOP2(add);
900 FASTOP2(or);
901 FASTOP2(adc);
902 FASTOP2(sbb);
903 FASTOP2(and);
904 FASTOP2(sub);
905 FASTOP2(xor);
906 FASTOP2(cmp);
907 FASTOP2(test);
908 
909 FASTOP1SRC2(mul, mul_ex);
910 FASTOP1SRC2(imul, imul_ex);
911 FASTOP1SRC2EX(div, div_ex);
912 FASTOP1SRC2EX(idiv, idiv_ex);
913 
914 FASTOP3WCL(shld);
915 FASTOP3WCL(shrd);
916 
917 FASTOP2W(imul);
918 
919 FASTOP1(not);
920 FASTOP1(neg);
921 FASTOP1(inc);
922 FASTOP1(dec);
923 
924 FASTOP2CL(rol);
925 FASTOP2CL(ror);
926 FASTOP2CL(rcl);
927 FASTOP2CL(rcr);
928 FASTOP2CL(shl);
929 FASTOP2CL(shr);
930 FASTOP2CL(sar);
931 
932 FASTOP2W(bsf);
933 FASTOP2W(bsr);
934 FASTOP2W(bt);
935 FASTOP2W(bts);
936 FASTOP2W(btr);
937 FASTOP2W(btc);
938 
939 FASTOP2(xadd);
940 
941 FASTOP2R(cmp, cmp_r);
942 
943 static int em_bsf_c(struct x86_emulate_ctxt *ctxt)
944 {
945 	/* If src is zero, do not writeback, but update flags */
946 	if (ctxt->src.val == 0)
947 		ctxt->dst.type = OP_NONE;
948 	return fastop(ctxt, em_bsf);
949 }
950 
951 static int em_bsr_c(struct x86_emulate_ctxt *ctxt)
952 {
953 	/* If src is zero, do not writeback, but update flags */
954 	if (ctxt->src.val == 0)
955 		ctxt->dst.type = OP_NONE;
956 	return fastop(ctxt, em_bsr);
957 }
958 
959 static u8 test_cc(unsigned int condition, unsigned long flags)
960 {
961 	u8 rc;
962 	void (*fop)(void) = (void *)em_setcc + 4 * (condition & 0xf);
963 
964 	flags = (flags & EFLAGS_MASK) | X86_EFLAGS_IF;
965 	asm("push %[flags]; popf; call *%[fastop]"
966 	    : "=a"(rc) : [fastop]"r"(fop), [flags]"r"(flags));
967 	return rc;
968 }
969 
970 static void fetch_register_operand(struct operand *op)
971 {
972 	switch (op->bytes) {
973 	case 1:
974 		op->val = *(u8 *)op->addr.reg;
975 		break;
976 	case 2:
977 		op->val = *(u16 *)op->addr.reg;
978 		break;
979 	case 4:
980 		op->val = *(u32 *)op->addr.reg;
981 		break;
982 	case 8:
983 		op->val = *(u64 *)op->addr.reg;
984 		break;
985 	}
986 }
987 
988 static void read_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data, int reg)
989 {
990 	ctxt->ops->get_fpu(ctxt);
991 	switch (reg) {
992 	case 0: asm("movdqa %%xmm0, %0" : "=m"(*data)); break;
993 	case 1: asm("movdqa %%xmm1, %0" : "=m"(*data)); break;
994 	case 2: asm("movdqa %%xmm2, %0" : "=m"(*data)); break;
995 	case 3: asm("movdqa %%xmm3, %0" : "=m"(*data)); break;
996 	case 4: asm("movdqa %%xmm4, %0" : "=m"(*data)); break;
997 	case 5: asm("movdqa %%xmm5, %0" : "=m"(*data)); break;
998 	case 6: asm("movdqa %%xmm6, %0" : "=m"(*data)); break;
999 	case 7: asm("movdqa %%xmm7, %0" : "=m"(*data)); break;
1000 #ifdef CONFIG_X86_64
1001 	case 8: asm("movdqa %%xmm8, %0" : "=m"(*data)); break;
1002 	case 9: asm("movdqa %%xmm9, %0" : "=m"(*data)); break;
1003 	case 10: asm("movdqa %%xmm10, %0" : "=m"(*data)); break;
1004 	case 11: asm("movdqa %%xmm11, %0" : "=m"(*data)); break;
1005 	case 12: asm("movdqa %%xmm12, %0" : "=m"(*data)); break;
1006 	case 13: asm("movdqa %%xmm13, %0" : "=m"(*data)); break;
1007 	case 14: asm("movdqa %%xmm14, %0" : "=m"(*data)); break;
1008 	case 15: asm("movdqa %%xmm15, %0" : "=m"(*data)); break;
1009 #endif
1010 	default: BUG();
1011 	}
1012 	ctxt->ops->put_fpu(ctxt);
1013 }
1014 
1015 static void write_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data,
1016 			  int reg)
1017 {
1018 	ctxt->ops->get_fpu(ctxt);
1019 	switch (reg) {
1020 	case 0: asm("movdqa %0, %%xmm0" : : "m"(*data)); break;
1021 	case 1: asm("movdqa %0, %%xmm1" : : "m"(*data)); break;
1022 	case 2: asm("movdqa %0, %%xmm2" : : "m"(*data)); break;
1023 	case 3: asm("movdqa %0, %%xmm3" : : "m"(*data)); break;
1024 	case 4: asm("movdqa %0, %%xmm4" : : "m"(*data)); break;
1025 	case 5: asm("movdqa %0, %%xmm5" : : "m"(*data)); break;
1026 	case 6: asm("movdqa %0, %%xmm6" : : "m"(*data)); break;
1027 	case 7: asm("movdqa %0, %%xmm7" : : "m"(*data)); break;
1028 #ifdef CONFIG_X86_64
1029 	case 8: asm("movdqa %0, %%xmm8" : : "m"(*data)); break;
1030 	case 9: asm("movdqa %0, %%xmm9" : : "m"(*data)); break;
1031 	case 10: asm("movdqa %0, %%xmm10" : : "m"(*data)); break;
1032 	case 11: asm("movdqa %0, %%xmm11" : : "m"(*data)); break;
1033 	case 12: asm("movdqa %0, %%xmm12" : : "m"(*data)); break;
1034 	case 13: asm("movdqa %0, %%xmm13" : : "m"(*data)); break;
1035 	case 14: asm("movdqa %0, %%xmm14" : : "m"(*data)); break;
1036 	case 15: asm("movdqa %0, %%xmm15" : : "m"(*data)); break;
1037 #endif
1038 	default: BUG();
1039 	}
1040 	ctxt->ops->put_fpu(ctxt);
1041 }
1042 
1043 static void read_mmx_reg(struct x86_emulate_ctxt *ctxt, u64 *data, int reg)
1044 {
1045 	ctxt->ops->get_fpu(ctxt);
1046 	switch (reg) {
1047 	case 0: asm("movq %%mm0, %0" : "=m"(*data)); break;
1048 	case 1: asm("movq %%mm1, %0" : "=m"(*data)); break;
1049 	case 2: asm("movq %%mm2, %0" : "=m"(*data)); break;
1050 	case 3: asm("movq %%mm3, %0" : "=m"(*data)); break;
1051 	case 4: asm("movq %%mm4, %0" : "=m"(*data)); break;
1052 	case 5: asm("movq %%mm5, %0" : "=m"(*data)); break;
1053 	case 6: asm("movq %%mm6, %0" : "=m"(*data)); break;
1054 	case 7: asm("movq %%mm7, %0" : "=m"(*data)); break;
1055 	default: BUG();
1056 	}
1057 	ctxt->ops->put_fpu(ctxt);
1058 }
1059 
1060 static void write_mmx_reg(struct x86_emulate_ctxt *ctxt, u64 *data, int reg)
1061 {
1062 	ctxt->ops->get_fpu(ctxt);
1063 	switch (reg) {
1064 	case 0: asm("movq %0, %%mm0" : : "m"(*data)); break;
1065 	case 1: asm("movq %0, %%mm1" : : "m"(*data)); break;
1066 	case 2: asm("movq %0, %%mm2" : : "m"(*data)); break;
1067 	case 3: asm("movq %0, %%mm3" : : "m"(*data)); break;
1068 	case 4: asm("movq %0, %%mm4" : : "m"(*data)); break;
1069 	case 5: asm("movq %0, %%mm5" : : "m"(*data)); break;
1070 	case 6: asm("movq %0, %%mm6" : : "m"(*data)); break;
1071 	case 7: asm("movq %0, %%mm7" : : "m"(*data)); break;
1072 	default: BUG();
1073 	}
1074 	ctxt->ops->put_fpu(ctxt);
1075 }
1076 
1077 static int em_fninit(struct x86_emulate_ctxt *ctxt)
1078 {
1079 	if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
1080 		return emulate_nm(ctxt);
1081 
1082 	ctxt->ops->get_fpu(ctxt);
1083 	asm volatile("fninit");
1084 	ctxt->ops->put_fpu(ctxt);
1085 	return X86EMUL_CONTINUE;
1086 }
1087 
1088 static int em_fnstcw(struct x86_emulate_ctxt *ctxt)
1089 {
1090 	u16 fcw;
1091 
1092 	if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
1093 		return emulate_nm(ctxt);
1094 
1095 	ctxt->ops->get_fpu(ctxt);
1096 	asm volatile("fnstcw %0": "+m"(fcw));
1097 	ctxt->ops->put_fpu(ctxt);
1098 
1099 	ctxt->dst.val = fcw;
1100 
1101 	return X86EMUL_CONTINUE;
1102 }
1103 
1104 static int em_fnstsw(struct x86_emulate_ctxt *ctxt)
1105 {
1106 	u16 fsw;
1107 
1108 	if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
1109 		return emulate_nm(ctxt);
1110 
1111 	ctxt->ops->get_fpu(ctxt);
1112 	asm volatile("fnstsw %0": "+m"(fsw));
1113 	ctxt->ops->put_fpu(ctxt);
1114 
1115 	ctxt->dst.val = fsw;
1116 
1117 	return X86EMUL_CONTINUE;
1118 }
1119 
1120 static void decode_register_operand(struct x86_emulate_ctxt *ctxt,
1121 				    struct operand *op)
1122 {
1123 	unsigned reg = ctxt->modrm_reg;
1124 
1125 	if (!(ctxt->d & ModRM))
1126 		reg = (ctxt->b & 7) | ((ctxt->rex_prefix & 1) << 3);
1127 
1128 	if (ctxt->d & Sse) {
1129 		op->type = OP_XMM;
1130 		op->bytes = 16;
1131 		op->addr.xmm = reg;
1132 		read_sse_reg(ctxt, &op->vec_val, reg);
1133 		return;
1134 	}
1135 	if (ctxt->d & Mmx) {
1136 		reg &= 7;
1137 		op->type = OP_MM;
1138 		op->bytes = 8;
1139 		op->addr.mm = reg;
1140 		return;
1141 	}
1142 
1143 	op->type = OP_REG;
1144 	op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
1145 	op->addr.reg = decode_register(ctxt, reg, ctxt->d & ByteOp);
1146 
1147 	fetch_register_operand(op);
1148 	op->orig_val = op->val;
1149 }
1150 
1151 static void adjust_modrm_seg(struct x86_emulate_ctxt *ctxt, int base_reg)
1152 {
1153 	if (base_reg == VCPU_REGS_RSP || base_reg == VCPU_REGS_RBP)
1154 		ctxt->modrm_seg = VCPU_SREG_SS;
1155 }
1156 
1157 static int decode_modrm(struct x86_emulate_ctxt *ctxt,
1158 			struct operand *op)
1159 {
1160 	u8 sib;
1161 	int index_reg, base_reg, scale;
1162 	int rc = X86EMUL_CONTINUE;
1163 	ulong modrm_ea = 0;
1164 
1165 	ctxt->modrm_reg = ((ctxt->rex_prefix << 1) & 8); /* REX.R */
1166 	index_reg = (ctxt->rex_prefix << 2) & 8; /* REX.X */
1167 	base_reg = (ctxt->rex_prefix << 3) & 8; /* REX.B */
1168 
1169 	ctxt->modrm_mod = (ctxt->modrm & 0xc0) >> 6;
1170 	ctxt->modrm_reg |= (ctxt->modrm & 0x38) >> 3;
1171 	ctxt->modrm_rm = base_reg | (ctxt->modrm & 0x07);
1172 	ctxt->modrm_seg = VCPU_SREG_DS;
1173 
1174 	if (ctxt->modrm_mod == 3 || (ctxt->d & NoMod)) {
1175 		op->type = OP_REG;
1176 		op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
1177 		op->addr.reg = decode_register(ctxt, ctxt->modrm_rm,
1178 				ctxt->d & ByteOp);
1179 		if (ctxt->d & Sse) {
1180 			op->type = OP_XMM;
1181 			op->bytes = 16;
1182 			op->addr.xmm = ctxt->modrm_rm;
1183 			read_sse_reg(ctxt, &op->vec_val, ctxt->modrm_rm);
1184 			return rc;
1185 		}
1186 		if (ctxt->d & Mmx) {
1187 			op->type = OP_MM;
1188 			op->bytes = 8;
1189 			op->addr.mm = ctxt->modrm_rm & 7;
1190 			return rc;
1191 		}
1192 		fetch_register_operand(op);
1193 		return rc;
1194 	}
1195 
1196 	op->type = OP_MEM;
1197 
1198 	if (ctxt->ad_bytes == 2) {
1199 		unsigned bx = reg_read(ctxt, VCPU_REGS_RBX);
1200 		unsigned bp = reg_read(ctxt, VCPU_REGS_RBP);
1201 		unsigned si = reg_read(ctxt, VCPU_REGS_RSI);
1202 		unsigned di = reg_read(ctxt, VCPU_REGS_RDI);
1203 
1204 		/* 16-bit ModR/M decode. */
1205 		switch (ctxt->modrm_mod) {
1206 		case 0:
1207 			if (ctxt->modrm_rm == 6)
1208 				modrm_ea += insn_fetch(u16, ctxt);
1209 			break;
1210 		case 1:
1211 			modrm_ea += insn_fetch(s8, ctxt);
1212 			break;
1213 		case 2:
1214 			modrm_ea += insn_fetch(u16, ctxt);
1215 			break;
1216 		}
1217 		switch (ctxt->modrm_rm) {
1218 		case 0:
1219 			modrm_ea += bx + si;
1220 			break;
1221 		case 1:
1222 			modrm_ea += bx + di;
1223 			break;
1224 		case 2:
1225 			modrm_ea += bp + si;
1226 			break;
1227 		case 3:
1228 			modrm_ea += bp + di;
1229 			break;
1230 		case 4:
1231 			modrm_ea += si;
1232 			break;
1233 		case 5:
1234 			modrm_ea += di;
1235 			break;
1236 		case 6:
1237 			if (ctxt->modrm_mod != 0)
1238 				modrm_ea += bp;
1239 			break;
1240 		case 7:
1241 			modrm_ea += bx;
1242 			break;
1243 		}
1244 		if (ctxt->modrm_rm == 2 || ctxt->modrm_rm == 3 ||
1245 		    (ctxt->modrm_rm == 6 && ctxt->modrm_mod != 0))
1246 			ctxt->modrm_seg = VCPU_SREG_SS;
1247 		modrm_ea = (u16)modrm_ea;
1248 	} else {
1249 		/* 32/64-bit ModR/M decode. */
1250 		if ((ctxt->modrm_rm & 7) == 4) {
1251 			sib = insn_fetch(u8, ctxt);
1252 			index_reg |= (sib >> 3) & 7;
1253 			base_reg |= sib & 7;
1254 			scale = sib >> 6;
1255 
1256 			if ((base_reg & 7) == 5 && ctxt->modrm_mod == 0)
1257 				modrm_ea += insn_fetch(s32, ctxt);
1258 			else {
1259 				modrm_ea += reg_read(ctxt, base_reg);
1260 				adjust_modrm_seg(ctxt, base_reg);
1261 				/* Increment ESP on POP [ESP] */
1262 				if ((ctxt->d & IncSP) &&
1263 				    base_reg == VCPU_REGS_RSP)
1264 					modrm_ea += ctxt->op_bytes;
1265 			}
1266 			if (index_reg != 4)
1267 				modrm_ea += reg_read(ctxt, index_reg) << scale;
1268 		} else if ((ctxt->modrm_rm & 7) == 5 && ctxt->modrm_mod == 0) {
1269 			modrm_ea += insn_fetch(s32, ctxt);
1270 			if (ctxt->mode == X86EMUL_MODE_PROT64)
1271 				ctxt->rip_relative = 1;
1272 		} else {
1273 			base_reg = ctxt->modrm_rm;
1274 			modrm_ea += reg_read(ctxt, base_reg);
1275 			adjust_modrm_seg(ctxt, base_reg);
1276 		}
1277 		switch (ctxt->modrm_mod) {
1278 		case 1:
1279 			modrm_ea += insn_fetch(s8, ctxt);
1280 			break;
1281 		case 2:
1282 			modrm_ea += insn_fetch(s32, ctxt);
1283 			break;
1284 		}
1285 	}
1286 	op->addr.mem.ea = modrm_ea;
1287 	if (ctxt->ad_bytes != 8)
1288 		ctxt->memop.addr.mem.ea = (u32)ctxt->memop.addr.mem.ea;
1289 
1290 done:
1291 	return rc;
1292 }
1293 
1294 static int decode_abs(struct x86_emulate_ctxt *ctxt,
1295 		      struct operand *op)
1296 {
1297 	int rc = X86EMUL_CONTINUE;
1298 
1299 	op->type = OP_MEM;
1300 	switch (ctxt->ad_bytes) {
1301 	case 2:
1302 		op->addr.mem.ea = insn_fetch(u16, ctxt);
1303 		break;
1304 	case 4:
1305 		op->addr.mem.ea = insn_fetch(u32, ctxt);
1306 		break;
1307 	case 8:
1308 		op->addr.mem.ea = insn_fetch(u64, ctxt);
1309 		break;
1310 	}
1311 done:
1312 	return rc;
1313 }
1314 
1315 static void fetch_bit_operand(struct x86_emulate_ctxt *ctxt)
1316 {
1317 	long sv = 0, mask;
1318 
1319 	if (ctxt->dst.type == OP_MEM && ctxt->src.type == OP_REG) {
1320 		mask = ~((long)ctxt->dst.bytes * 8 - 1);
1321 
1322 		if (ctxt->src.bytes == 2)
1323 			sv = (s16)ctxt->src.val & (s16)mask;
1324 		else if (ctxt->src.bytes == 4)
1325 			sv = (s32)ctxt->src.val & (s32)mask;
1326 		else
1327 			sv = (s64)ctxt->src.val & (s64)mask;
1328 
1329 		ctxt->dst.addr.mem.ea = address_mask(ctxt,
1330 					   ctxt->dst.addr.mem.ea + (sv >> 3));
1331 	}
1332 
1333 	/* only subword offset */
1334 	ctxt->src.val &= (ctxt->dst.bytes << 3) - 1;
1335 }
1336 
1337 static int read_emulated(struct x86_emulate_ctxt *ctxt,
1338 			 unsigned long addr, void *dest, unsigned size)
1339 {
1340 	int rc;
1341 	struct read_cache *mc = &ctxt->mem_read;
1342 
1343 	if (mc->pos < mc->end)
1344 		goto read_cached;
1345 
1346 	WARN_ON((mc->end + size) >= sizeof(mc->data));
1347 
1348 	rc = ctxt->ops->read_emulated(ctxt, addr, mc->data + mc->end, size,
1349 				      &ctxt->exception);
1350 	if (rc != X86EMUL_CONTINUE)
1351 		return rc;
1352 
1353 	mc->end += size;
1354 
1355 read_cached:
1356 	memcpy(dest, mc->data + mc->pos, size);
1357 	mc->pos += size;
1358 	return X86EMUL_CONTINUE;
1359 }
1360 
1361 static int segmented_read(struct x86_emulate_ctxt *ctxt,
1362 			  struct segmented_address addr,
1363 			  void *data,
1364 			  unsigned size)
1365 {
1366 	int rc;
1367 	ulong linear;
1368 
1369 	rc = linearize(ctxt, addr, size, false, &linear);
1370 	if (rc != X86EMUL_CONTINUE)
1371 		return rc;
1372 	return read_emulated(ctxt, linear, data, size);
1373 }
1374 
1375 static int segmented_write(struct x86_emulate_ctxt *ctxt,
1376 			   struct segmented_address addr,
1377 			   const void *data,
1378 			   unsigned size)
1379 {
1380 	int rc;
1381 	ulong linear;
1382 
1383 	rc = linearize(ctxt, addr, size, true, &linear);
1384 	if (rc != X86EMUL_CONTINUE)
1385 		return rc;
1386 	return ctxt->ops->write_emulated(ctxt, linear, data, size,
1387 					 &ctxt->exception);
1388 }
1389 
1390 static int segmented_cmpxchg(struct x86_emulate_ctxt *ctxt,
1391 			     struct segmented_address addr,
1392 			     const void *orig_data, const void *data,
1393 			     unsigned size)
1394 {
1395 	int rc;
1396 	ulong linear;
1397 
1398 	rc = linearize(ctxt, addr, size, true, &linear);
1399 	if (rc != X86EMUL_CONTINUE)
1400 		return rc;
1401 	return ctxt->ops->cmpxchg_emulated(ctxt, linear, orig_data, data,
1402 					   size, &ctxt->exception);
1403 }
1404 
1405 static int pio_in_emulated(struct x86_emulate_ctxt *ctxt,
1406 			   unsigned int size, unsigned short port,
1407 			   void *dest)
1408 {
1409 	struct read_cache *rc = &ctxt->io_read;
1410 
1411 	if (rc->pos == rc->end) { /* refill pio read ahead */
1412 		unsigned int in_page, n;
1413 		unsigned int count = ctxt->rep_prefix ?
1414 			address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) : 1;
1415 		in_page = (ctxt->eflags & X86_EFLAGS_DF) ?
1416 			offset_in_page(reg_read(ctxt, VCPU_REGS_RDI)) :
1417 			PAGE_SIZE - offset_in_page(reg_read(ctxt, VCPU_REGS_RDI));
1418 		n = min3(in_page, (unsigned int)sizeof(rc->data) / size, count);
1419 		if (n == 0)
1420 			n = 1;
1421 		rc->pos = rc->end = 0;
1422 		if (!ctxt->ops->pio_in_emulated(ctxt, size, port, rc->data, n))
1423 			return 0;
1424 		rc->end = n * size;
1425 	}
1426 
1427 	if (ctxt->rep_prefix && (ctxt->d & String) &&
1428 	    !(ctxt->eflags & X86_EFLAGS_DF)) {
1429 		ctxt->dst.data = rc->data + rc->pos;
1430 		ctxt->dst.type = OP_MEM_STR;
1431 		ctxt->dst.count = (rc->end - rc->pos) / size;
1432 		rc->pos = rc->end;
1433 	} else {
1434 		memcpy(dest, rc->data + rc->pos, size);
1435 		rc->pos += size;
1436 	}
1437 	return 1;
1438 }
1439 
1440 static int read_interrupt_descriptor(struct x86_emulate_ctxt *ctxt,
1441 				     u16 index, struct desc_struct *desc)
1442 {
1443 	struct desc_ptr dt;
1444 	ulong addr;
1445 
1446 	ctxt->ops->get_idt(ctxt, &dt);
1447 
1448 	if (dt.size < index * 8 + 7)
1449 		return emulate_gp(ctxt, index << 3 | 0x2);
1450 
1451 	addr = dt.address + index * 8;
1452 	return ctxt->ops->read_std(ctxt, addr, desc, sizeof *desc,
1453 				   &ctxt->exception);
1454 }
1455 
1456 static void get_descriptor_table_ptr(struct x86_emulate_ctxt *ctxt,
1457 				     u16 selector, struct desc_ptr *dt)
1458 {
1459 	const struct x86_emulate_ops *ops = ctxt->ops;
1460 	u32 base3 = 0;
1461 
1462 	if (selector & 1 << 2) {
1463 		struct desc_struct desc;
1464 		u16 sel;
1465 
1466 		memset (dt, 0, sizeof *dt);
1467 		if (!ops->get_segment(ctxt, &sel, &desc, &base3,
1468 				      VCPU_SREG_LDTR))
1469 			return;
1470 
1471 		dt->size = desc_limit_scaled(&desc); /* what if limit > 65535? */
1472 		dt->address = get_desc_base(&desc) | ((u64)base3 << 32);
1473 	} else
1474 		ops->get_gdt(ctxt, dt);
1475 }
1476 
1477 static int get_descriptor_ptr(struct x86_emulate_ctxt *ctxt,
1478 			      u16 selector, ulong *desc_addr_p)
1479 {
1480 	struct desc_ptr dt;
1481 	u16 index = selector >> 3;
1482 	ulong addr;
1483 
1484 	get_descriptor_table_ptr(ctxt, selector, &dt);
1485 
1486 	if (dt.size < index * 8 + 7)
1487 		return emulate_gp(ctxt, selector & 0xfffc);
1488 
1489 	addr = dt.address + index * 8;
1490 
1491 #ifdef CONFIG_X86_64
1492 	if (addr >> 32 != 0) {
1493 		u64 efer = 0;
1494 
1495 		ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
1496 		if (!(efer & EFER_LMA))
1497 			addr &= (u32)-1;
1498 	}
1499 #endif
1500 
1501 	*desc_addr_p = addr;
1502 	return X86EMUL_CONTINUE;
1503 }
1504 
1505 /* allowed just for 8 bytes segments */
1506 static int read_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1507 				   u16 selector, struct desc_struct *desc,
1508 				   ulong *desc_addr_p)
1509 {
1510 	int rc;
1511 
1512 	rc = get_descriptor_ptr(ctxt, selector, desc_addr_p);
1513 	if (rc != X86EMUL_CONTINUE)
1514 		return rc;
1515 
1516 	return ctxt->ops->read_std(ctxt, *desc_addr_p, desc, sizeof(*desc),
1517 				   &ctxt->exception);
1518 }
1519 
1520 /* allowed just for 8 bytes segments */
1521 static int write_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1522 				    u16 selector, struct desc_struct *desc)
1523 {
1524 	int rc;
1525 	ulong addr;
1526 
1527 	rc = get_descriptor_ptr(ctxt, selector, &addr);
1528 	if (rc != X86EMUL_CONTINUE)
1529 		return rc;
1530 
1531 	return ctxt->ops->write_std(ctxt, addr, desc, sizeof *desc,
1532 				    &ctxt->exception);
1533 }
1534 
1535 /* Does not support long mode */
1536 static int __load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1537 				     u16 selector, int seg, u8 cpl,
1538 				     enum x86_transfer_type transfer,
1539 				     struct desc_struct *desc)
1540 {
1541 	struct desc_struct seg_desc, old_desc;
1542 	u8 dpl, rpl;
1543 	unsigned err_vec = GP_VECTOR;
1544 	u32 err_code = 0;
1545 	bool null_selector = !(selector & ~0x3); /* 0000-0003 are null */
1546 	ulong desc_addr;
1547 	int ret;
1548 	u16 dummy;
1549 	u32 base3 = 0;
1550 
1551 	memset(&seg_desc, 0, sizeof seg_desc);
1552 
1553 	if (ctxt->mode == X86EMUL_MODE_REAL) {
1554 		/* set real mode segment descriptor (keep limit etc. for
1555 		 * unreal mode) */
1556 		ctxt->ops->get_segment(ctxt, &dummy, &seg_desc, NULL, seg);
1557 		set_desc_base(&seg_desc, selector << 4);
1558 		goto load;
1559 	} else if (seg <= VCPU_SREG_GS && ctxt->mode == X86EMUL_MODE_VM86) {
1560 		/* VM86 needs a clean new segment descriptor */
1561 		set_desc_base(&seg_desc, selector << 4);
1562 		set_desc_limit(&seg_desc, 0xffff);
1563 		seg_desc.type = 3;
1564 		seg_desc.p = 1;
1565 		seg_desc.s = 1;
1566 		seg_desc.dpl = 3;
1567 		goto load;
1568 	}
1569 
1570 	rpl = selector & 3;
1571 
1572 	/* NULL selector is not valid for TR, CS and SS (except for long mode) */
1573 	if ((seg == VCPU_SREG_CS
1574 	     || (seg == VCPU_SREG_SS
1575 		 && (ctxt->mode != X86EMUL_MODE_PROT64 || rpl != cpl))
1576 	     || seg == VCPU_SREG_TR)
1577 	    && null_selector)
1578 		goto exception;
1579 
1580 	/* TR should be in GDT only */
1581 	if (seg == VCPU_SREG_TR && (selector & (1 << 2)))
1582 		goto exception;
1583 
1584 	if (null_selector) /* for NULL selector skip all following checks */
1585 		goto load;
1586 
1587 	ret = read_segment_descriptor(ctxt, selector, &seg_desc, &desc_addr);
1588 	if (ret != X86EMUL_CONTINUE)
1589 		return ret;
1590 
1591 	err_code = selector & 0xfffc;
1592 	err_vec = (transfer == X86_TRANSFER_TASK_SWITCH) ? TS_VECTOR :
1593 							   GP_VECTOR;
1594 
1595 	/* can't load system descriptor into segment selector */
1596 	if (seg <= VCPU_SREG_GS && !seg_desc.s) {
1597 		if (transfer == X86_TRANSFER_CALL_JMP)
1598 			return X86EMUL_UNHANDLEABLE;
1599 		goto exception;
1600 	}
1601 
1602 	if (!seg_desc.p) {
1603 		err_vec = (seg == VCPU_SREG_SS) ? SS_VECTOR : NP_VECTOR;
1604 		goto exception;
1605 	}
1606 
1607 	dpl = seg_desc.dpl;
1608 
1609 	switch (seg) {
1610 	case VCPU_SREG_SS:
1611 		/*
1612 		 * segment is not a writable data segment or segment
1613 		 * selector's RPL != CPL or segment selector's RPL != CPL
1614 		 */
1615 		if (rpl != cpl || (seg_desc.type & 0xa) != 0x2 || dpl != cpl)
1616 			goto exception;
1617 		break;
1618 	case VCPU_SREG_CS:
1619 		if (!(seg_desc.type & 8))
1620 			goto exception;
1621 
1622 		if (seg_desc.type & 4) {
1623 			/* conforming */
1624 			if (dpl > cpl)
1625 				goto exception;
1626 		} else {
1627 			/* nonconforming */
1628 			if (rpl > cpl || dpl != cpl)
1629 				goto exception;
1630 		}
1631 		/* in long-mode d/b must be clear if l is set */
1632 		if (seg_desc.d && seg_desc.l) {
1633 			u64 efer = 0;
1634 
1635 			ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
1636 			if (efer & EFER_LMA)
1637 				goto exception;
1638 		}
1639 
1640 		/* CS(RPL) <- CPL */
1641 		selector = (selector & 0xfffc) | cpl;
1642 		break;
1643 	case VCPU_SREG_TR:
1644 		if (seg_desc.s || (seg_desc.type != 1 && seg_desc.type != 9))
1645 			goto exception;
1646 		old_desc = seg_desc;
1647 		seg_desc.type |= 2; /* busy */
1648 		ret = ctxt->ops->cmpxchg_emulated(ctxt, desc_addr, &old_desc, &seg_desc,
1649 						  sizeof(seg_desc), &ctxt->exception);
1650 		if (ret != X86EMUL_CONTINUE)
1651 			return ret;
1652 		break;
1653 	case VCPU_SREG_LDTR:
1654 		if (seg_desc.s || seg_desc.type != 2)
1655 			goto exception;
1656 		break;
1657 	default: /*  DS, ES, FS, or GS */
1658 		/*
1659 		 * segment is not a data or readable code segment or
1660 		 * ((segment is a data or nonconforming code segment)
1661 		 * and (both RPL and CPL > DPL))
1662 		 */
1663 		if ((seg_desc.type & 0xa) == 0x8 ||
1664 		    (((seg_desc.type & 0xc) != 0xc) &&
1665 		     (rpl > dpl && cpl > dpl)))
1666 			goto exception;
1667 		break;
1668 	}
1669 
1670 	if (seg_desc.s) {
1671 		/* mark segment as accessed */
1672 		if (!(seg_desc.type & 1)) {
1673 			seg_desc.type |= 1;
1674 			ret = write_segment_descriptor(ctxt, selector,
1675 						       &seg_desc);
1676 			if (ret != X86EMUL_CONTINUE)
1677 				return ret;
1678 		}
1679 	} else if (ctxt->mode == X86EMUL_MODE_PROT64) {
1680 		ret = ctxt->ops->read_std(ctxt, desc_addr+8, &base3,
1681 				sizeof(base3), &ctxt->exception);
1682 		if (ret != X86EMUL_CONTINUE)
1683 			return ret;
1684 		if (is_noncanonical_address(get_desc_base(&seg_desc) |
1685 					     ((u64)base3 << 32)))
1686 			return emulate_gp(ctxt, 0);
1687 	}
1688 load:
1689 	ctxt->ops->set_segment(ctxt, selector, &seg_desc, base3, seg);
1690 	if (desc)
1691 		*desc = seg_desc;
1692 	return X86EMUL_CONTINUE;
1693 exception:
1694 	return emulate_exception(ctxt, err_vec, err_code, true);
1695 }
1696 
1697 static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1698 				   u16 selector, int seg)
1699 {
1700 	u8 cpl = ctxt->ops->cpl(ctxt);
1701 	return __load_segment_descriptor(ctxt, selector, seg, cpl,
1702 					 X86_TRANSFER_NONE, NULL);
1703 }
1704 
1705 static void write_register_operand(struct operand *op)
1706 {
1707 	return assign_register(op->addr.reg, op->val, op->bytes);
1708 }
1709 
1710 static int writeback(struct x86_emulate_ctxt *ctxt, struct operand *op)
1711 {
1712 	switch (op->type) {
1713 	case OP_REG:
1714 		write_register_operand(op);
1715 		break;
1716 	case OP_MEM:
1717 		if (ctxt->lock_prefix)
1718 			return segmented_cmpxchg(ctxt,
1719 						 op->addr.mem,
1720 						 &op->orig_val,
1721 						 &op->val,
1722 						 op->bytes);
1723 		else
1724 			return segmented_write(ctxt,
1725 					       op->addr.mem,
1726 					       &op->val,
1727 					       op->bytes);
1728 		break;
1729 	case OP_MEM_STR:
1730 		return segmented_write(ctxt,
1731 				       op->addr.mem,
1732 				       op->data,
1733 				       op->bytes * op->count);
1734 		break;
1735 	case OP_XMM:
1736 		write_sse_reg(ctxt, &op->vec_val, op->addr.xmm);
1737 		break;
1738 	case OP_MM:
1739 		write_mmx_reg(ctxt, &op->mm_val, op->addr.mm);
1740 		break;
1741 	case OP_NONE:
1742 		/* no writeback */
1743 		break;
1744 	default:
1745 		break;
1746 	}
1747 	return X86EMUL_CONTINUE;
1748 }
1749 
1750 static int push(struct x86_emulate_ctxt *ctxt, void *data, int bytes)
1751 {
1752 	struct segmented_address addr;
1753 
1754 	rsp_increment(ctxt, -bytes);
1755 	addr.ea = reg_read(ctxt, VCPU_REGS_RSP) & stack_mask(ctxt);
1756 	addr.seg = VCPU_SREG_SS;
1757 
1758 	return segmented_write(ctxt, addr, data, bytes);
1759 }
1760 
1761 static int em_push(struct x86_emulate_ctxt *ctxt)
1762 {
1763 	/* Disable writeback. */
1764 	ctxt->dst.type = OP_NONE;
1765 	return push(ctxt, &ctxt->src.val, ctxt->op_bytes);
1766 }
1767 
1768 static int emulate_pop(struct x86_emulate_ctxt *ctxt,
1769 		       void *dest, int len)
1770 {
1771 	int rc;
1772 	struct segmented_address addr;
1773 
1774 	addr.ea = reg_read(ctxt, VCPU_REGS_RSP) & stack_mask(ctxt);
1775 	addr.seg = VCPU_SREG_SS;
1776 	rc = segmented_read(ctxt, addr, dest, len);
1777 	if (rc != X86EMUL_CONTINUE)
1778 		return rc;
1779 
1780 	rsp_increment(ctxt, len);
1781 	return rc;
1782 }
1783 
1784 static int em_pop(struct x86_emulate_ctxt *ctxt)
1785 {
1786 	return emulate_pop(ctxt, &ctxt->dst.val, ctxt->op_bytes);
1787 }
1788 
1789 static int emulate_popf(struct x86_emulate_ctxt *ctxt,
1790 			void *dest, int len)
1791 {
1792 	int rc;
1793 	unsigned long val, change_mask;
1794 	int iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> X86_EFLAGS_IOPL_BIT;
1795 	int cpl = ctxt->ops->cpl(ctxt);
1796 
1797 	rc = emulate_pop(ctxt, &val, len);
1798 	if (rc != X86EMUL_CONTINUE)
1799 		return rc;
1800 
1801 	change_mask = X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF |
1802 		      X86_EFLAGS_ZF | X86_EFLAGS_SF | X86_EFLAGS_OF |
1803 		      X86_EFLAGS_TF | X86_EFLAGS_DF | X86_EFLAGS_NT |
1804 		      X86_EFLAGS_AC | X86_EFLAGS_ID;
1805 
1806 	switch(ctxt->mode) {
1807 	case X86EMUL_MODE_PROT64:
1808 	case X86EMUL_MODE_PROT32:
1809 	case X86EMUL_MODE_PROT16:
1810 		if (cpl == 0)
1811 			change_mask |= X86_EFLAGS_IOPL;
1812 		if (cpl <= iopl)
1813 			change_mask |= X86_EFLAGS_IF;
1814 		break;
1815 	case X86EMUL_MODE_VM86:
1816 		if (iopl < 3)
1817 			return emulate_gp(ctxt, 0);
1818 		change_mask |= X86_EFLAGS_IF;
1819 		break;
1820 	default: /* real mode */
1821 		change_mask |= (X86_EFLAGS_IOPL | X86_EFLAGS_IF);
1822 		break;
1823 	}
1824 
1825 	*(unsigned long *)dest =
1826 		(ctxt->eflags & ~change_mask) | (val & change_mask);
1827 
1828 	return rc;
1829 }
1830 
1831 static int em_popf(struct x86_emulate_ctxt *ctxt)
1832 {
1833 	ctxt->dst.type = OP_REG;
1834 	ctxt->dst.addr.reg = &ctxt->eflags;
1835 	ctxt->dst.bytes = ctxt->op_bytes;
1836 	return emulate_popf(ctxt, &ctxt->dst.val, ctxt->op_bytes);
1837 }
1838 
1839 static int em_enter(struct x86_emulate_ctxt *ctxt)
1840 {
1841 	int rc;
1842 	unsigned frame_size = ctxt->src.val;
1843 	unsigned nesting_level = ctxt->src2.val & 31;
1844 	ulong rbp;
1845 
1846 	if (nesting_level)
1847 		return X86EMUL_UNHANDLEABLE;
1848 
1849 	rbp = reg_read(ctxt, VCPU_REGS_RBP);
1850 	rc = push(ctxt, &rbp, stack_size(ctxt));
1851 	if (rc != X86EMUL_CONTINUE)
1852 		return rc;
1853 	assign_masked(reg_rmw(ctxt, VCPU_REGS_RBP), reg_read(ctxt, VCPU_REGS_RSP),
1854 		      stack_mask(ctxt));
1855 	assign_masked(reg_rmw(ctxt, VCPU_REGS_RSP),
1856 		      reg_read(ctxt, VCPU_REGS_RSP) - frame_size,
1857 		      stack_mask(ctxt));
1858 	return X86EMUL_CONTINUE;
1859 }
1860 
1861 static int em_leave(struct x86_emulate_ctxt *ctxt)
1862 {
1863 	assign_masked(reg_rmw(ctxt, VCPU_REGS_RSP), reg_read(ctxt, VCPU_REGS_RBP),
1864 		      stack_mask(ctxt));
1865 	return emulate_pop(ctxt, reg_rmw(ctxt, VCPU_REGS_RBP), ctxt->op_bytes);
1866 }
1867 
1868 static int em_push_sreg(struct x86_emulate_ctxt *ctxt)
1869 {
1870 	int seg = ctxt->src2.val;
1871 
1872 	ctxt->src.val = get_segment_selector(ctxt, seg);
1873 	if (ctxt->op_bytes == 4) {
1874 		rsp_increment(ctxt, -2);
1875 		ctxt->op_bytes = 2;
1876 	}
1877 
1878 	return em_push(ctxt);
1879 }
1880 
1881 static int em_pop_sreg(struct x86_emulate_ctxt *ctxt)
1882 {
1883 	int seg = ctxt->src2.val;
1884 	unsigned long selector;
1885 	int rc;
1886 
1887 	rc = emulate_pop(ctxt, &selector, 2);
1888 	if (rc != X86EMUL_CONTINUE)
1889 		return rc;
1890 
1891 	if (ctxt->modrm_reg == VCPU_SREG_SS)
1892 		ctxt->interruptibility = KVM_X86_SHADOW_INT_MOV_SS;
1893 	if (ctxt->op_bytes > 2)
1894 		rsp_increment(ctxt, ctxt->op_bytes - 2);
1895 
1896 	rc = load_segment_descriptor(ctxt, (u16)selector, seg);
1897 	return rc;
1898 }
1899 
1900 static int em_pusha(struct x86_emulate_ctxt *ctxt)
1901 {
1902 	unsigned long old_esp = reg_read(ctxt, VCPU_REGS_RSP);
1903 	int rc = X86EMUL_CONTINUE;
1904 	int reg = VCPU_REGS_RAX;
1905 
1906 	while (reg <= VCPU_REGS_RDI) {
1907 		(reg == VCPU_REGS_RSP) ?
1908 		(ctxt->src.val = old_esp) : (ctxt->src.val = reg_read(ctxt, reg));
1909 
1910 		rc = em_push(ctxt);
1911 		if (rc != X86EMUL_CONTINUE)
1912 			return rc;
1913 
1914 		++reg;
1915 	}
1916 
1917 	return rc;
1918 }
1919 
1920 static int em_pushf(struct x86_emulate_ctxt *ctxt)
1921 {
1922 	ctxt->src.val = (unsigned long)ctxt->eflags & ~X86_EFLAGS_VM;
1923 	return em_push(ctxt);
1924 }
1925 
1926 static int em_popa(struct x86_emulate_ctxt *ctxt)
1927 {
1928 	int rc = X86EMUL_CONTINUE;
1929 	int reg = VCPU_REGS_RDI;
1930 	u32 val;
1931 
1932 	while (reg >= VCPU_REGS_RAX) {
1933 		if (reg == VCPU_REGS_RSP) {
1934 			rsp_increment(ctxt, ctxt->op_bytes);
1935 			--reg;
1936 		}
1937 
1938 		rc = emulate_pop(ctxt, &val, ctxt->op_bytes);
1939 		if (rc != X86EMUL_CONTINUE)
1940 			break;
1941 		assign_register(reg_rmw(ctxt, reg), val, ctxt->op_bytes);
1942 		--reg;
1943 	}
1944 	return rc;
1945 }
1946 
1947 static int __emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq)
1948 {
1949 	const struct x86_emulate_ops *ops = ctxt->ops;
1950 	int rc;
1951 	struct desc_ptr dt;
1952 	gva_t cs_addr;
1953 	gva_t eip_addr;
1954 	u16 cs, eip;
1955 
1956 	/* TODO: Add limit checks */
1957 	ctxt->src.val = ctxt->eflags;
1958 	rc = em_push(ctxt);
1959 	if (rc != X86EMUL_CONTINUE)
1960 		return rc;
1961 
1962 	ctxt->eflags &= ~(X86_EFLAGS_IF | X86_EFLAGS_TF | X86_EFLAGS_AC);
1963 
1964 	ctxt->src.val = get_segment_selector(ctxt, VCPU_SREG_CS);
1965 	rc = em_push(ctxt);
1966 	if (rc != X86EMUL_CONTINUE)
1967 		return rc;
1968 
1969 	ctxt->src.val = ctxt->_eip;
1970 	rc = em_push(ctxt);
1971 	if (rc != X86EMUL_CONTINUE)
1972 		return rc;
1973 
1974 	ops->get_idt(ctxt, &dt);
1975 
1976 	eip_addr = dt.address + (irq << 2);
1977 	cs_addr = dt.address + (irq << 2) + 2;
1978 
1979 	rc = ops->read_std(ctxt, cs_addr, &cs, 2, &ctxt->exception);
1980 	if (rc != X86EMUL_CONTINUE)
1981 		return rc;
1982 
1983 	rc = ops->read_std(ctxt, eip_addr, &eip, 2, &ctxt->exception);
1984 	if (rc != X86EMUL_CONTINUE)
1985 		return rc;
1986 
1987 	rc = load_segment_descriptor(ctxt, cs, VCPU_SREG_CS);
1988 	if (rc != X86EMUL_CONTINUE)
1989 		return rc;
1990 
1991 	ctxt->_eip = eip;
1992 
1993 	return rc;
1994 }
1995 
1996 int emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq)
1997 {
1998 	int rc;
1999 
2000 	invalidate_registers(ctxt);
2001 	rc = __emulate_int_real(ctxt, irq);
2002 	if (rc == X86EMUL_CONTINUE)
2003 		writeback_registers(ctxt);
2004 	return rc;
2005 }
2006 
2007 static int emulate_int(struct x86_emulate_ctxt *ctxt, int irq)
2008 {
2009 	switch(ctxt->mode) {
2010 	case X86EMUL_MODE_REAL:
2011 		return __emulate_int_real(ctxt, irq);
2012 	case X86EMUL_MODE_VM86:
2013 	case X86EMUL_MODE_PROT16:
2014 	case X86EMUL_MODE_PROT32:
2015 	case X86EMUL_MODE_PROT64:
2016 	default:
2017 		/* Protected mode interrupts unimplemented yet */
2018 		return X86EMUL_UNHANDLEABLE;
2019 	}
2020 }
2021 
2022 static int emulate_iret_real(struct x86_emulate_ctxt *ctxt)
2023 {
2024 	int rc = X86EMUL_CONTINUE;
2025 	unsigned long temp_eip = 0;
2026 	unsigned long temp_eflags = 0;
2027 	unsigned long cs = 0;
2028 	unsigned long mask = X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF |
2029 			     X86_EFLAGS_ZF | X86_EFLAGS_SF | X86_EFLAGS_TF |
2030 			     X86_EFLAGS_IF | X86_EFLAGS_DF | X86_EFLAGS_OF |
2031 			     X86_EFLAGS_IOPL | X86_EFLAGS_NT | X86_EFLAGS_RF |
2032 			     X86_EFLAGS_AC | X86_EFLAGS_ID |
2033 			     X86_EFLAGS_FIXED;
2034 	unsigned long vm86_mask = X86_EFLAGS_VM | X86_EFLAGS_VIF |
2035 				  X86_EFLAGS_VIP;
2036 
2037 	/* TODO: Add stack limit check */
2038 
2039 	rc = emulate_pop(ctxt, &temp_eip, ctxt->op_bytes);
2040 
2041 	if (rc != X86EMUL_CONTINUE)
2042 		return rc;
2043 
2044 	if (temp_eip & ~0xffff)
2045 		return emulate_gp(ctxt, 0);
2046 
2047 	rc = emulate_pop(ctxt, &cs, ctxt->op_bytes);
2048 
2049 	if (rc != X86EMUL_CONTINUE)
2050 		return rc;
2051 
2052 	rc = emulate_pop(ctxt, &temp_eflags, ctxt->op_bytes);
2053 
2054 	if (rc != X86EMUL_CONTINUE)
2055 		return rc;
2056 
2057 	rc = load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS);
2058 
2059 	if (rc != X86EMUL_CONTINUE)
2060 		return rc;
2061 
2062 	ctxt->_eip = temp_eip;
2063 
2064 	if (ctxt->op_bytes == 4)
2065 		ctxt->eflags = ((temp_eflags & mask) | (ctxt->eflags & vm86_mask));
2066 	else if (ctxt->op_bytes == 2) {
2067 		ctxt->eflags &= ~0xffff;
2068 		ctxt->eflags |= temp_eflags;
2069 	}
2070 
2071 	ctxt->eflags &= ~EFLG_RESERVED_ZEROS_MASK; /* Clear reserved zeros */
2072 	ctxt->eflags |= X86_EFLAGS_FIXED;
2073 	ctxt->ops->set_nmi_mask(ctxt, false);
2074 
2075 	return rc;
2076 }
2077 
2078 static int em_iret(struct x86_emulate_ctxt *ctxt)
2079 {
2080 	switch(ctxt->mode) {
2081 	case X86EMUL_MODE_REAL:
2082 		return emulate_iret_real(ctxt);
2083 	case X86EMUL_MODE_VM86:
2084 	case X86EMUL_MODE_PROT16:
2085 	case X86EMUL_MODE_PROT32:
2086 	case X86EMUL_MODE_PROT64:
2087 	default:
2088 		/* iret from protected mode unimplemented yet */
2089 		return X86EMUL_UNHANDLEABLE;
2090 	}
2091 }
2092 
2093 static int em_jmp_far(struct x86_emulate_ctxt *ctxt)
2094 {
2095 	int rc;
2096 	unsigned short sel, old_sel;
2097 	struct desc_struct old_desc, new_desc;
2098 	const struct x86_emulate_ops *ops = ctxt->ops;
2099 	u8 cpl = ctxt->ops->cpl(ctxt);
2100 
2101 	/* Assignment of RIP may only fail in 64-bit mode */
2102 	if (ctxt->mode == X86EMUL_MODE_PROT64)
2103 		ops->get_segment(ctxt, &old_sel, &old_desc, NULL,
2104 				 VCPU_SREG_CS);
2105 
2106 	memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
2107 
2108 	rc = __load_segment_descriptor(ctxt, sel, VCPU_SREG_CS, cpl,
2109 				       X86_TRANSFER_CALL_JMP,
2110 				       &new_desc);
2111 	if (rc != X86EMUL_CONTINUE)
2112 		return rc;
2113 
2114 	rc = assign_eip_far(ctxt, ctxt->src.val, &new_desc);
2115 	if (rc != X86EMUL_CONTINUE) {
2116 		WARN_ON(ctxt->mode != X86EMUL_MODE_PROT64);
2117 		/* assigning eip failed; restore the old cs */
2118 		ops->set_segment(ctxt, old_sel, &old_desc, 0, VCPU_SREG_CS);
2119 		return rc;
2120 	}
2121 	return rc;
2122 }
2123 
2124 static int em_jmp_abs(struct x86_emulate_ctxt *ctxt)
2125 {
2126 	return assign_eip_near(ctxt, ctxt->src.val);
2127 }
2128 
2129 static int em_call_near_abs(struct x86_emulate_ctxt *ctxt)
2130 {
2131 	int rc;
2132 	long int old_eip;
2133 
2134 	old_eip = ctxt->_eip;
2135 	rc = assign_eip_near(ctxt, ctxt->src.val);
2136 	if (rc != X86EMUL_CONTINUE)
2137 		return rc;
2138 	ctxt->src.val = old_eip;
2139 	rc = em_push(ctxt);
2140 	return rc;
2141 }
2142 
2143 static int em_cmpxchg8b(struct x86_emulate_ctxt *ctxt)
2144 {
2145 	u64 old = ctxt->dst.orig_val64;
2146 
2147 	if (ctxt->dst.bytes == 16)
2148 		return X86EMUL_UNHANDLEABLE;
2149 
2150 	if (((u32) (old >> 0) != (u32) reg_read(ctxt, VCPU_REGS_RAX)) ||
2151 	    ((u32) (old >> 32) != (u32) reg_read(ctxt, VCPU_REGS_RDX))) {
2152 		*reg_write(ctxt, VCPU_REGS_RAX) = (u32) (old >> 0);
2153 		*reg_write(ctxt, VCPU_REGS_RDX) = (u32) (old >> 32);
2154 		ctxt->eflags &= ~X86_EFLAGS_ZF;
2155 	} else {
2156 		ctxt->dst.val64 = ((u64)reg_read(ctxt, VCPU_REGS_RCX) << 32) |
2157 			(u32) reg_read(ctxt, VCPU_REGS_RBX);
2158 
2159 		ctxt->eflags |= X86_EFLAGS_ZF;
2160 	}
2161 	return X86EMUL_CONTINUE;
2162 }
2163 
2164 static int em_ret(struct x86_emulate_ctxt *ctxt)
2165 {
2166 	int rc;
2167 	unsigned long eip;
2168 
2169 	rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
2170 	if (rc != X86EMUL_CONTINUE)
2171 		return rc;
2172 
2173 	return assign_eip_near(ctxt, eip);
2174 }
2175 
2176 static int em_ret_far(struct x86_emulate_ctxt *ctxt)
2177 {
2178 	int rc;
2179 	unsigned long eip, cs;
2180 	u16 old_cs;
2181 	int cpl = ctxt->ops->cpl(ctxt);
2182 	struct desc_struct old_desc, new_desc;
2183 	const struct x86_emulate_ops *ops = ctxt->ops;
2184 
2185 	if (ctxt->mode == X86EMUL_MODE_PROT64)
2186 		ops->get_segment(ctxt, &old_cs, &old_desc, NULL,
2187 				 VCPU_SREG_CS);
2188 
2189 	rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
2190 	if (rc != X86EMUL_CONTINUE)
2191 		return rc;
2192 	rc = emulate_pop(ctxt, &cs, ctxt->op_bytes);
2193 	if (rc != X86EMUL_CONTINUE)
2194 		return rc;
2195 	/* Outer-privilege level return is not implemented */
2196 	if (ctxt->mode >= X86EMUL_MODE_PROT16 && (cs & 3) > cpl)
2197 		return X86EMUL_UNHANDLEABLE;
2198 	rc = __load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS, cpl,
2199 				       X86_TRANSFER_RET,
2200 				       &new_desc);
2201 	if (rc != X86EMUL_CONTINUE)
2202 		return rc;
2203 	rc = assign_eip_far(ctxt, eip, &new_desc);
2204 	if (rc != X86EMUL_CONTINUE) {
2205 		WARN_ON(ctxt->mode != X86EMUL_MODE_PROT64);
2206 		ops->set_segment(ctxt, old_cs, &old_desc, 0, VCPU_SREG_CS);
2207 	}
2208 	return rc;
2209 }
2210 
2211 static int em_ret_far_imm(struct x86_emulate_ctxt *ctxt)
2212 {
2213         int rc;
2214 
2215         rc = em_ret_far(ctxt);
2216         if (rc != X86EMUL_CONTINUE)
2217                 return rc;
2218         rsp_increment(ctxt, ctxt->src.val);
2219         return X86EMUL_CONTINUE;
2220 }
2221 
2222 static int em_cmpxchg(struct x86_emulate_ctxt *ctxt)
2223 {
2224 	/* Save real source value, then compare EAX against destination. */
2225 	ctxt->dst.orig_val = ctxt->dst.val;
2226 	ctxt->dst.val = reg_read(ctxt, VCPU_REGS_RAX);
2227 	ctxt->src.orig_val = ctxt->src.val;
2228 	ctxt->src.val = ctxt->dst.orig_val;
2229 	fastop(ctxt, em_cmp);
2230 
2231 	if (ctxt->eflags & X86_EFLAGS_ZF) {
2232 		/* Success: write back to memory; no update of EAX */
2233 		ctxt->src.type = OP_NONE;
2234 		ctxt->dst.val = ctxt->src.orig_val;
2235 	} else {
2236 		/* Failure: write the value we saw to EAX. */
2237 		ctxt->src.type = OP_REG;
2238 		ctxt->src.addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
2239 		ctxt->src.val = ctxt->dst.orig_val;
2240 		/* Create write-cycle to dest by writing the same value */
2241 		ctxt->dst.val = ctxt->dst.orig_val;
2242 	}
2243 	return X86EMUL_CONTINUE;
2244 }
2245 
2246 static int em_lseg(struct x86_emulate_ctxt *ctxt)
2247 {
2248 	int seg = ctxt->src2.val;
2249 	unsigned short sel;
2250 	int rc;
2251 
2252 	memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
2253 
2254 	rc = load_segment_descriptor(ctxt, sel, seg);
2255 	if (rc != X86EMUL_CONTINUE)
2256 		return rc;
2257 
2258 	ctxt->dst.val = ctxt->src.val;
2259 	return rc;
2260 }
2261 
2262 static int emulator_has_longmode(struct x86_emulate_ctxt *ctxt)
2263 {
2264 	u32 eax, ebx, ecx, edx;
2265 
2266 	eax = 0x80000001;
2267 	ecx = 0;
2268 	ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx);
2269 	return edx & bit(X86_FEATURE_LM);
2270 }
2271 
2272 #define GET_SMSTATE(type, smbase, offset)				  \
2273 	({								  \
2274 	 type __val;							  \
2275 	 int r = ctxt->ops->read_std(ctxt, smbase + offset, &__val,       \
2276 				     sizeof(__val), NULL);		  \
2277 	 if (r != X86EMUL_CONTINUE)					  \
2278 		 return X86EMUL_UNHANDLEABLE;				  \
2279 	 __val;								  \
2280 	})
2281 
2282 static void rsm_set_desc_flags(struct desc_struct *desc, u32 flags)
2283 {
2284 	desc->g    = (flags >> 23) & 1;
2285 	desc->d    = (flags >> 22) & 1;
2286 	desc->l    = (flags >> 21) & 1;
2287 	desc->avl  = (flags >> 20) & 1;
2288 	desc->p    = (flags >> 15) & 1;
2289 	desc->dpl  = (flags >> 13) & 3;
2290 	desc->s    = (flags >> 12) & 1;
2291 	desc->type = (flags >>  8) & 15;
2292 }
2293 
2294 static int rsm_load_seg_32(struct x86_emulate_ctxt *ctxt, u64 smbase, int n)
2295 {
2296 	struct desc_struct desc;
2297 	int offset;
2298 	u16 selector;
2299 
2300 	selector = GET_SMSTATE(u32, smbase, 0x7fa8 + n * 4);
2301 
2302 	if (n < 3)
2303 		offset = 0x7f84 + n * 12;
2304 	else
2305 		offset = 0x7f2c + (n - 3) * 12;
2306 
2307 	set_desc_base(&desc,      GET_SMSTATE(u32, smbase, offset + 8));
2308 	set_desc_limit(&desc,     GET_SMSTATE(u32, smbase, offset + 4));
2309 	rsm_set_desc_flags(&desc, GET_SMSTATE(u32, smbase, offset));
2310 	ctxt->ops->set_segment(ctxt, selector, &desc, 0, n);
2311 	return X86EMUL_CONTINUE;
2312 }
2313 
2314 static int rsm_load_seg_64(struct x86_emulate_ctxt *ctxt, u64 smbase, int n)
2315 {
2316 	struct desc_struct desc;
2317 	int offset;
2318 	u16 selector;
2319 	u32 base3;
2320 
2321 	offset = 0x7e00 + n * 16;
2322 
2323 	selector =                GET_SMSTATE(u16, smbase, offset);
2324 	rsm_set_desc_flags(&desc, GET_SMSTATE(u16, smbase, offset + 2) << 8);
2325 	set_desc_limit(&desc,     GET_SMSTATE(u32, smbase, offset + 4));
2326 	set_desc_base(&desc,      GET_SMSTATE(u32, smbase, offset + 8));
2327 	base3 =                   GET_SMSTATE(u32, smbase, offset + 12);
2328 
2329 	ctxt->ops->set_segment(ctxt, selector, &desc, base3, n);
2330 	return X86EMUL_CONTINUE;
2331 }
2332 
2333 static int rsm_enter_protected_mode(struct x86_emulate_ctxt *ctxt,
2334 				     u64 cr0, u64 cr4)
2335 {
2336 	int bad;
2337 
2338 	/*
2339 	 * First enable PAE, long mode needs it before CR0.PG = 1 is set.
2340 	 * Then enable protected mode.	However, PCID cannot be enabled
2341 	 * if EFER.LMA=0, so set it separately.
2342 	 */
2343 	bad = ctxt->ops->set_cr(ctxt, 4, cr4 & ~X86_CR4_PCIDE);
2344 	if (bad)
2345 		return X86EMUL_UNHANDLEABLE;
2346 
2347 	bad = ctxt->ops->set_cr(ctxt, 0, cr0);
2348 	if (bad)
2349 		return X86EMUL_UNHANDLEABLE;
2350 
2351 	if (cr4 & X86_CR4_PCIDE) {
2352 		bad = ctxt->ops->set_cr(ctxt, 4, cr4);
2353 		if (bad)
2354 			return X86EMUL_UNHANDLEABLE;
2355 	}
2356 
2357 	return X86EMUL_CONTINUE;
2358 }
2359 
2360 static int rsm_load_state_32(struct x86_emulate_ctxt *ctxt, u64 smbase)
2361 {
2362 	struct desc_struct desc;
2363 	struct desc_ptr dt;
2364 	u16 selector;
2365 	u32 val, cr0, cr4;
2366 	int i;
2367 
2368 	cr0 =                      GET_SMSTATE(u32, smbase, 0x7ffc);
2369 	ctxt->ops->set_cr(ctxt, 3, GET_SMSTATE(u32, smbase, 0x7ff8));
2370 	ctxt->eflags =             GET_SMSTATE(u32, smbase, 0x7ff4) | X86_EFLAGS_FIXED;
2371 	ctxt->_eip =               GET_SMSTATE(u32, smbase, 0x7ff0);
2372 
2373 	for (i = 0; i < 8; i++)
2374 		*reg_write(ctxt, i) = GET_SMSTATE(u32, smbase, 0x7fd0 + i * 4);
2375 
2376 	val = GET_SMSTATE(u32, smbase, 0x7fcc);
2377 	ctxt->ops->set_dr(ctxt, 6, (val & DR6_VOLATILE) | DR6_FIXED_1);
2378 	val = GET_SMSTATE(u32, smbase, 0x7fc8);
2379 	ctxt->ops->set_dr(ctxt, 7, (val & DR7_VOLATILE) | DR7_FIXED_1);
2380 
2381 	selector =                 GET_SMSTATE(u32, smbase, 0x7fc4);
2382 	set_desc_base(&desc,       GET_SMSTATE(u32, smbase, 0x7f64));
2383 	set_desc_limit(&desc,      GET_SMSTATE(u32, smbase, 0x7f60));
2384 	rsm_set_desc_flags(&desc,  GET_SMSTATE(u32, smbase, 0x7f5c));
2385 	ctxt->ops->set_segment(ctxt, selector, &desc, 0, VCPU_SREG_TR);
2386 
2387 	selector =                 GET_SMSTATE(u32, smbase, 0x7fc0);
2388 	set_desc_base(&desc,       GET_SMSTATE(u32, smbase, 0x7f80));
2389 	set_desc_limit(&desc,      GET_SMSTATE(u32, smbase, 0x7f7c));
2390 	rsm_set_desc_flags(&desc,  GET_SMSTATE(u32, smbase, 0x7f78));
2391 	ctxt->ops->set_segment(ctxt, selector, &desc, 0, VCPU_SREG_LDTR);
2392 
2393 	dt.address =               GET_SMSTATE(u32, smbase, 0x7f74);
2394 	dt.size =                  GET_SMSTATE(u32, smbase, 0x7f70);
2395 	ctxt->ops->set_gdt(ctxt, &dt);
2396 
2397 	dt.address =               GET_SMSTATE(u32, smbase, 0x7f58);
2398 	dt.size =                  GET_SMSTATE(u32, smbase, 0x7f54);
2399 	ctxt->ops->set_idt(ctxt, &dt);
2400 
2401 	for (i = 0; i < 6; i++) {
2402 		int r = rsm_load_seg_32(ctxt, smbase, i);
2403 		if (r != X86EMUL_CONTINUE)
2404 			return r;
2405 	}
2406 
2407 	cr4 = GET_SMSTATE(u32, smbase, 0x7f14);
2408 
2409 	ctxt->ops->set_smbase(ctxt, GET_SMSTATE(u32, smbase, 0x7ef8));
2410 
2411 	return rsm_enter_protected_mode(ctxt, cr0, cr4);
2412 }
2413 
2414 static int rsm_load_state_64(struct x86_emulate_ctxt *ctxt, u64 smbase)
2415 {
2416 	struct desc_struct desc;
2417 	struct desc_ptr dt;
2418 	u64 val, cr0, cr4;
2419 	u32 base3;
2420 	u16 selector;
2421 	int i;
2422 
2423 	for (i = 0; i < 16; i++)
2424 		*reg_write(ctxt, i) = GET_SMSTATE(u64, smbase, 0x7ff8 - i * 8);
2425 
2426 	ctxt->_eip   = GET_SMSTATE(u64, smbase, 0x7f78);
2427 	ctxt->eflags = GET_SMSTATE(u32, smbase, 0x7f70) | X86_EFLAGS_FIXED;
2428 
2429 	val = GET_SMSTATE(u32, smbase, 0x7f68);
2430 	ctxt->ops->set_dr(ctxt, 6, (val & DR6_VOLATILE) | DR6_FIXED_1);
2431 	val = GET_SMSTATE(u32, smbase, 0x7f60);
2432 	ctxt->ops->set_dr(ctxt, 7, (val & DR7_VOLATILE) | DR7_FIXED_1);
2433 
2434 	cr0 =                       GET_SMSTATE(u64, smbase, 0x7f58);
2435 	ctxt->ops->set_cr(ctxt, 3,  GET_SMSTATE(u64, smbase, 0x7f50));
2436 	cr4 =                       GET_SMSTATE(u64, smbase, 0x7f48);
2437 	ctxt->ops->set_smbase(ctxt, GET_SMSTATE(u32, smbase, 0x7f00));
2438 	val =                       GET_SMSTATE(u64, smbase, 0x7ed0);
2439 	ctxt->ops->set_msr(ctxt, MSR_EFER, val & ~EFER_LMA);
2440 
2441 	selector =                  GET_SMSTATE(u32, smbase, 0x7e90);
2442 	rsm_set_desc_flags(&desc,   GET_SMSTATE(u32, smbase, 0x7e92) << 8);
2443 	set_desc_limit(&desc,       GET_SMSTATE(u32, smbase, 0x7e94));
2444 	set_desc_base(&desc,        GET_SMSTATE(u32, smbase, 0x7e98));
2445 	base3 =                     GET_SMSTATE(u32, smbase, 0x7e9c);
2446 	ctxt->ops->set_segment(ctxt, selector, &desc, base3, VCPU_SREG_TR);
2447 
2448 	dt.size =                   GET_SMSTATE(u32, smbase, 0x7e84);
2449 	dt.address =                GET_SMSTATE(u64, smbase, 0x7e88);
2450 	ctxt->ops->set_idt(ctxt, &dt);
2451 
2452 	selector =                  GET_SMSTATE(u32, smbase, 0x7e70);
2453 	rsm_set_desc_flags(&desc,   GET_SMSTATE(u32, smbase, 0x7e72) << 8);
2454 	set_desc_limit(&desc,       GET_SMSTATE(u32, smbase, 0x7e74));
2455 	set_desc_base(&desc,        GET_SMSTATE(u32, smbase, 0x7e78));
2456 	base3 =                     GET_SMSTATE(u32, smbase, 0x7e7c);
2457 	ctxt->ops->set_segment(ctxt, selector, &desc, base3, VCPU_SREG_LDTR);
2458 
2459 	dt.size =                   GET_SMSTATE(u32, smbase, 0x7e64);
2460 	dt.address =                GET_SMSTATE(u64, smbase, 0x7e68);
2461 	ctxt->ops->set_gdt(ctxt, &dt);
2462 
2463 	for (i = 0; i < 6; i++) {
2464 		int r = rsm_load_seg_64(ctxt, smbase, i);
2465 		if (r != X86EMUL_CONTINUE)
2466 			return r;
2467 	}
2468 
2469 	return rsm_enter_protected_mode(ctxt, cr0, cr4);
2470 }
2471 
2472 static int em_rsm(struct x86_emulate_ctxt *ctxt)
2473 {
2474 	unsigned long cr0, cr4, efer;
2475 	u64 smbase;
2476 	int ret;
2477 
2478 	if ((ctxt->emul_flags & X86EMUL_SMM_MASK) == 0)
2479 		return emulate_ud(ctxt);
2480 
2481 	/*
2482 	 * Get back to real mode, to prepare a safe state in which to load
2483 	 * CR0/CR3/CR4/EFER.  Also this will ensure that addresses passed
2484 	 * to read_std/write_std are not virtual.
2485 	 *
2486 	 * CR4.PCIDE must be zero, because it is a 64-bit mode only feature.
2487 	 */
2488 	cr0 = ctxt->ops->get_cr(ctxt, 0);
2489 	if (cr0 & X86_CR0_PE)
2490 		ctxt->ops->set_cr(ctxt, 0, cr0 & ~(X86_CR0_PG | X86_CR0_PE));
2491 	cr4 = ctxt->ops->get_cr(ctxt, 4);
2492 	if (cr4 & X86_CR4_PAE)
2493 		ctxt->ops->set_cr(ctxt, 4, cr4 & ~X86_CR4_PAE);
2494 	efer = 0;
2495 	ctxt->ops->set_msr(ctxt, MSR_EFER, efer);
2496 
2497 	smbase = ctxt->ops->get_smbase(ctxt);
2498 	if (emulator_has_longmode(ctxt))
2499 		ret = rsm_load_state_64(ctxt, smbase + 0x8000);
2500 	else
2501 		ret = rsm_load_state_32(ctxt, smbase + 0x8000);
2502 
2503 	if (ret != X86EMUL_CONTINUE) {
2504 		/* FIXME: should triple fault */
2505 		return X86EMUL_UNHANDLEABLE;
2506 	}
2507 
2508 	if ((ctxt->emul_flags & X86EMUL_SMM_INSIDE_NMI_MASK) == 0)
2509 		ctxt->ops->set_nmi_mask(ctxt, false);
2510 
2511 	ctxt->emul_flags &= ~X86EMUL_SMM_INSIDE_NMI_MASK;
2512 	ctxt->emul_flags &= ~X86EMUL_SMM_MASK;
2513 	return X86EMUL_CONTINUE;
2514 }
2515 
2516 static void
2517 setup_syscalls_segments(struct x86_emulate_ctxt *ctxt,
2518 			struct desc_struct *cs, struct desc_struct *ss)
2519 {
2520 	cs->l = 0;		/* will be adjusted later */
2521 	set_desc_base(cs, 0);	/* flat segment */
2522 	cs->g = 1;		/* 4kb granularity */
2523 	set_desc_limit(cs, 0xfffff);	/* 4GB limit */
2524 	cs->type = 0x0b;	/* Read, Execute, Accessed */
2525 	cs->s = 1;
2526 	cs->dpl = 0;		/* will be adjusted later */
2527 	cs->p = 1;
2528 	cs->d = 1;
2529 	cs->avl = 0;
2530 
2531 	set_desc_base(ss, 0);	/* flat segment */
2532 	set_desc_limit(ss, 0xfffff);	/* 4GB limit */
2533 	ss->g = 1;		/* 4kb granularity */
2534 	ss->s = 1;
2535 	ss->type = 0x03;	/* Read/Write, Accessed */
2536 	ss->d = 1;		/* 32bit stack segment */
2537 	ss->dpl = 0;
2538 	ss->p = 1;
2539 	ss->l = 0;
2540 	ss->avl = 0;
2541 }
2542 
2543 static bool vendor_intel(struct x86_emulate_ctxt *ctxt)
2544 {
2545 	u32 eax, ebx, ecx, edx;
2546 
2547 	eax = ecx = 0;
2548 	ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx);
2549 	return ebx == X86EMUL_CPUID_VENDOR_GenuineIntel_ebx
2550 		&& ecx == X86EMUL_CPUID_VENDOR_GenuineIntel_ecx
2551 		&& edx == X86EMUL_CPUID_VENDOR_GenuineIntel_edx;
2552 }
2553 
2554 static bool em_syscall_is_enabled(struct x86_emulate_ctxt *ctxt)
2555 {
2556 	const struct x86_emulate_ops *ops = ctxt->ops;
2557 	u32 eax, ebx, ecx, edx;
2558 
2559 	/*
2560 	 * syscall should always be enabled in longmode - so only become
2561 	 * vendor specific (cpuid) if other modes are active...
2562 	 */
2563 	if (ctxt->mode == X86EMUL_MODE_PROT64)
2564 		return true;
2565 
2566 	eax = 0x00000000;
2567 	ecx = 0x00000000;
2568 	ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx);
2569 	/*
2570 	 * Intel ("GenuineIntel")
2571 	 * remark: Intel CPUs only support "syscall" in 64bit
2572 	 * longmode. Also an 64bit guest with a
2573 	 * 32bit compat-app running will #UD !! While this
2574 	 * behaviour can be fixed (by emulating) into AMD
2575 	 * response - CPUs of AMD can't behave like Intel.
2576 	 */
2577 	if (ebx == X86EMUL_CPUID_VENDOR_GenuineIntel_ebx &&
2578 	    ecx == X86EMUL_CPUID_VENDOR_GenuineIntel_ecx &&
2579 	    edx == X86EMUL_CPUID_VENDOR_GenuineIntel_edx)
2580 		return false;
2581 
2582 	/* AMD ("AuthenticAMD") */
2583 	if (ebx == X86EMUL_CPUID_VENDOR_AuthenticAMD_ebx &&
2584 	    ecx == X86EMUL_CPUID_VENDOR_AuthenticAMD_ecx &&
2585 	    edx == X86EMUL_CPUID_VENDOR_AuthenticAMD_edx)
2586 		return true;
2587 
2588 	/* AMD ("AMDisbetter!") */
2589 	if (ebx == X86EMUL_CPUID_VENDOR_AMDisbetterI_ebx &&
2590 	    ecx == X86EMUL_CPUID_VENDOR_AMDisbetterI_ecx &&
2591 	    edx == X86EMUL_CPUID_VENDOR_AMDisbetterI_edx)
2592 		return true;
2593 
2594 	/* default: (not Intel, not AMD), apply Intel's stricter rules... */
2595 	return false;
2596 }
2597 
2598 static int em_syscall(struct x86_emulate_ctxt *ctxt)
2599 {
2600 	const struct x86_emulate_ops *ops = ctxt->ops;
2601 	struct desc_struct cs, ss;
2602 	u64 msr_data;
2603 	u16 cs_sel, ss_sel;
2604 	u64 efer = 0;
2605 
2606 	/* syscall is not available in real mode */
2607 	if (ctxt->mode == X86EMUL_MODE_REAL ||
2608 	    ctxt->mode == X86EMUL_MODE_VM86)
2609 		return emulate_ud(ctxt);
2610 
2611 	if (!(em_syscall_is_enabled(ctxt)))
2612 		return emulate_ud(ctxt);
2613 
2614 	ops->get_msr(ctxt, MSR_EFER, &efer);
2615 	setup_syscalls_segments(ctxt, &cs, &ss);
2616 
2617 	if (!(efer & EFER_SCE))
2618 		return emulate_ud(ctxt);
2619 
2620 	ops->get_msr(ctxt, MSR_STAR, &msr_data);
2621 	msr_data >>= 32;
2622 	cs_sel = (u16)(msr_data & 0xfffc);
2623 	ss_sel = (u16)(msr_data + 8);
2624 
2625 	if (efer & EFER_LMA) {
2626 		cs.d = 0;
2627 		cs.l = 1;
2628 	}
2629 	ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
2630 	ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
2631 
2632 	*reg_write(ctxt, VCPU_REGS_RCX) = ctxt->_eip;
2633 	if (efer & EFER_LMA) {
2634 #ifdef CONFIG_X86_64
2635 		*reg_write(ctxt, VCPU_REGS_R11) = ctxt->eflags;
2636 
2637 		ops->get_msr(ctxt,
2638 			     ctxt->mode == X86EMUL_MODE_PROT64 ?
2639 			     MSR_LSTAR : MSR_CSTAR, &msr_data);
2640 		ctxt->_eip = msr_data;
2641 
2642 		ops->get_msr(ctxt, MSR_SYSCALL_MASK, &msr_data);
2643 		ctxt->eflags &= ~msr_data;
2644 		ctxt->eflags |= X86_EFLAGS_FIXED;
2645 #endif
2646 	} else {
2647 		/* legacy mode */
2648 		ops->get_msr(ctxt, MSR_STAR, &msr_data);
2649 		ctxt->_eip = (u32)msr_data;
2650 
2651 		ctxt->eflags &= ~(X86_EFLAGS_VM | X86_EFLAGS_IF);
2652 	}
2653 
2654 	return X86EMUL_CONTINUE;
2655 }
2656 
2657 static int em_sysenter(struct x86_emulate_ctxt *ctxt)
2658 {
2659 	const struct x86_emulate_ops *ops = ctxt->ops;
2660 	struct desc_struct cs, ss;
2661 	u64 msr_data;
2662 	u16 cs_sel, ss_sel;
2663 	u64 efer = 0;
2664 
2665 	ops->get_msr(ctxt, MSR_EFER, &efer);
2666 	/* inject #GP if in real mode */
2667 	if (ctxt->mode == X86EMUL_MODE_REAL)
2668 		return emulate_gp(ctxt, 0);
2669 
2670 	/*
2671 	 * Not recognized on AMD in compat mode (but is recognized in legacy
2672 	 * mode).
2673 	 */
2674 	if ((ctxt->mode != X86EMUL_MODE_PROT64) && (efer & EFER_LMA)
2675 	    && !vendor_intel(ctxt))
2676 		return emulate_ud(ctxt);
2677 
2678 	/* sysenter/sysexit have not been tested in 64bit mode. */
2679 	if (ctxt->mode == X86EMUL_MODE_PROT64)
2680 		return X86EMUL_UNHANDLEABLE;
2681 
2682 	setup_syscalls_segments(ctxt, &cs, &ss);
2683 
2684 	ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data);
2685 	if ((msr_data & 0xfffc) == 0x0)
2686 		return emulate_gp(ctxt, 0);
2687 
2688 	ctxt->eflags &= ~(X86_EFLAGS_VM | X86_EFLAGS_IF);
2689 	cs_sel = (u16)msr_data & ~SEGMENT_RPL_MASK;
2690 	ss_sel = cs_sel + 8;
2691 	if (efer & EFER_LMA) {
2692 		cs.d = 0;
2693 		cs.l = 1;
2694 	}
2695 
2696 	ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
2697 	ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
2698 
2699 	ops->get_msr(ctxt, MSR_IA32_SYSENTER_EIP, &msr_data);
2700 	ctxt->_eip = (efer & EFER_LMA) ? msr_data : (u32)msr_data;
2701 
2702 	ops->get_msr(ctxt, MSR_IA32_SYSENTER_ESP, &msr_data);
2703 	*reg_write(ctxt, VCPU_REGS_RSP) = (efer & EFER_LMA) ? msr_data :
2704 							      (u32)msr_data;
2705 
2706 	return X86EMUL_CONTINUE;
2707 }
2708 
2709 static int em_sysexit(struct x86_emulate_ctxt *ctxt)
2710 {
2711 	const struct x86_emulate_ops *ops = ctxt->ops;
2712 	struct desc_struct cs, ss;
2713 	u64 msr_data, rcx, rdx;
2714 	int usermode;
2715 	u16 cs_sel = 0, ss_sel = 0;
2716 
2717 	/* inject #GP if in real mode or Virtual 8086 mode */
2718 	if (ctxt->mode == X86EMUL_MODE_REAL ||
2719 	    ctxt->mode == X86EMUL_MODE_VM86)
2720 		return emulate_gp(ctxt, 0);
2721 
2722 	setup_syscalls_segments(ctxt, &cs, &ss);
2723 
2724 	if ((ctxt->rex_prefix & 0x8) != 0x0)
2725 		usermode = X86EMUL_MODE_PROT64;
2726 	else
2727 		usermode = X86EMUL_MODE_PROT32;
2728 
2729 	rcx = reg_read(ctxt, VCPU_REGS_RCX);
2730 	rdx = reg_read(ctxt, VCPU_REGS_RDX);
2731 
2732 	cs.dpl = 3;
2733 	ss.dpl = 3;
2734 	ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data);
2735 	switch (usermode) {
2736 	case X86EMUL_MODE_PROT32:
2737 		cs_sel = (u16)(msr_data + 16);
2738 		if ((msr_data & 0xfffc) == 0x0)
2739 			return emulate_gp(ctxt, 0);
2740 		ss_sel = (u16)(msr_data + 24);
2741 		rcx = (u32)rcx;
2742 		rdx = (u32)rdx;
2743 		break;
2744 	case X86EMUL_MODE_PROT64:
2745 		cs_sel = (u16)(msr_data + 32);
2746 		if (msr_data == 0x0)
2747 			return emulate_gp(ctxt, 0);
2748 		ss_sel = cs_sel + 8;
2749 		cs.d = 0;
2750 		cs.l = 1;
2751 		if (is_noncanonical_address(rcx) ||
2752 		    is_noncanonical_address(rdx))
2753 			return emulate_gp(ctxt, 0);
2754 		break;
2755 	}
2756 	cs_sel |= SEGMENT_RPL_MASK;
2757 	ss_sel |= SEGMENT_RPL_MASK;
2758 
2759 	ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
2760 	ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
2761 
2762 	ctxt->_eip = rdx;
2763 	*reg_write(ctxt, VCPU_REGS_RSP) = rcx;
2764 
2765 	return X86EMUL_CONTINUE;
2766 }
2767 
2768 static bool emulator_bad_iopl(struct x86_emulate_ctxt *ctxt)
2769 {
2770 	int iopl;
2771 	if (ctxt->mode == X86EMUL_MODE_REAL)
2772 		return false;
2773 	if (ctxt->mode == X86EMUL_MODE_VM86)
2774 		return true;
2775 	iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> X86_EFLAGS_IOPL_BIT;
2776 	return ctxt->ops->cpl(ctxt) > iopl;
2777 }
2778 
2779 static bool emulator_io_port_access_allowed(struct x86_emulate_ctxt *ctxt,
2780 					    u16 port, u16 len)
2781 {
2782 	const struct x86_emulate_ops *ops = ctxt->ops;
2783 	struct desc_struct tr_seg;
2784 	u32 base3;
2785 	int r;
2786 	u16 tr, io_bitmap_ptr, perm, bit_idx = port & 0x7;
2787 	unsigned mask = (1 << len) - 1;
2788 	unsigned long base;
2789 
2790 	ops->get_segment(ctxt, &tr, &tr_seg, &base3, VCPU_SREG_TR);
2791 	if (!tr_seg.p)
2792 		return false;
2793 	if (desc_limit_scaled(&tr_seg) < 103)
2794 		return false;
2795 	base = get_desc_base(&tr_seg);
2796 #ifdef CONFIG_X86_64
2797 	base |= ((u64)base3) << 32;
2798 #endif
2799 	r = ops->read_std(ctxt, base + 102, &io_bitmap_ptr, 2, NULL);
2800 	if (r != X86EMUL_CONTINUE)
2801 		return false;
2802 	if (io_bitmap_ptr + port/8 > desc_limit_scaled(&tr_seg))
2803 		return false;
2804 	r = ops->read_std(ctxt, base + io_bitmap_ptr + port/8, &perm, 2, NULL);
2805 	if (r != X86EMUL_CONTINUE)
2806 		return false;
2807 	if ((perm >> bit_idx) & mask)
2808 		return false;
2809 	return true;
2810 }
2811 
2812 static bool emulator_io_permited(struct x86_emulate_ctxt *ctxt,
2813 				 u16 port, u16 len)
2814 {
2815 	if (ctxt->perm_ok)
2816 		return true;
2817 
2818 	if (emulator_bad_iopl(ctxt))
2819 		if (!emulator_io_port_access_allowed(ctxt, port, len))
2820 			return false;
2821 
2822 	ctxt->perm_ok = true;
2823 
2824 	return true;
2825 }
2826 
2827 static void string_registers_quirk(struct x86_emulate_ctxt *ctxt)
2828 {
2829 	/*
2830 	 * Intel CPUs mask the counter and pointers in quite strange
2831 	 * manner when ECX is zero due to REP-string optimizations.
2832 	 */
2833 #ifdef CONFIG_X86_64
2834 	if (ctxt->ad_bytes != 4 || !vendor_intel(ctxt))
2835 		return;
2836 
2837 	*reg_write(ctxt, VCPU_REGS_RCX) = 0;
2838 
2839 	switch (ctxt->b) {
2840 	case 0xa4:	/* movsb */
2841 	case 0xa5:	/* movsd/w */
2842 		*reg_rmw(ctxt, VCPU_REGS_RSI) &= (u32)-1;
2843 		/* fall through */
2844 	case 0xaa:	/* stosb */
2845 	case 0xab:	/* stosd/w */
2846 		*reg_rmw(ctxt, VCPU_REGS_RDI) &= (u32)-1;
2847 	}
2848 #endif
2849 }
2850 
2851 static void save_state_to_tss16(struct x86_emulate_ctxt *ctxt,
2852 				struct tss_segment_16 *tss)
2853 {
2854 	tss->ip = ctxt->_eip;
2855 	tss->flag = ctxt->eflags;
2856 	tss->ax = reg_read(ctxt, VCPU_REGS_RAX);
2857 	tss->cx = reg_read(ctxt, VCPU_REGS_RCX);
2858 	tss->dx = reg_read(ctxt, VCPU_REGS_RDX);
2859 	tss->bx = reg_read(ctxt, VCPU_REGS_RBX);
2860 	tss->sp = reg_read(ctxt, VCPU_REGS_RSP);
2861 	tss->bp = reg_read(ctxt, VCPU_REGS_RBP);
2862 	tss->si = reg_read(ctxt, VCPU_REGS_RSI);
2863 	tss->di = reg_read(ctxt, VCPU_REGS_RDI);
2864 
2865 	tss->es = get_segment_selector(ctxt, VCPU_SREG_ES);
2866 	tss->cs = get_segment_selector(ctxt, VCPU_SREG_CS);
2867 	tss->ss = get_segment_selector(ctxt, VCPU_SREG_SS);
2868 	tss->ds = get_segment_selector(ctxt, VCPU_SREG_DS);
2869 	tss->ldt = get_segment_selector(ctxt, VCPU_SREG_LDTR);
2870 }
2871 
2872 static int load_state_from_tss16(struct x86_emulate_ctxt *ctxt,
2873 				 struct tss_segment_16 *tss)
2874 {
2875 	int ret;
2876 	u8 cpl;
2877 
2878 	ctxt->_eip = tss->ip;
2879 	ctxt->eflags = tss->flag | 2;
2880 	*reg_write(ctxt, VCPU_REGS_RAX) = tss->ax;
2881 	*reg_write(ctxt, VCPU_REGS_RCX) = tss->cx;
2882 	*reg_write(ctxt, VCPU_REGS_RDX) = tss->dx;
2883 	*reg_write(ctxt, VCPU_REGS_RBX) = tss->bx;
2884 	*reg_write(ctxt, VCPU_REGS_RSP) = tss->sp;
2885 	*reg_write(ctxt, VCPU_REGS_RBP) = tss->bp;
2886 	*reg_write(ctxt, VCPU_REGS_RSI) = tss->si;
2887 	*reg_write(ctxt, VCPU_REGS_RDI) = tss->di;
2888 
2889 	/*
2890 	 * SDM says that segment selectors are loaded before segment
2891 	 * descriptors
2892 	 */
2893 	set_segment_selector(ctxt, tss->ldt, VCPU_SREG_LDTR);
2894 	set_segment_selector(ctxt, tss->es, VCPU_SREG_ES);
2895 	set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS);
2896 	set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS);
2897 	set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS);
2898 
2899 	cpl = tss->cs & 3;
2900 
2901 	/*
2902 	 * Now load segment descriptors. If fault happens at this stage
2903 	 * it is handled in a context of new task
2904 	 */
2905 	ret = __load_segment_descriptor(ctxt, tss->ldt, VCPU_SREG_LDTR, cpl,
2906 					X86_TRANSFER_TASK_SWITCH, NULL);
2907 	if (ret != X86EMUL_CONTINUE)
2908 		return ret;
2909 	ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl,
2910 					X86_TRANSFER_TASK_SWITCH, NULL);
2911 	if (ret != X86EMUL_CONTINUE)
2912 		return ret;
2913 	ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl,
2914 					X86_TRANSFER_TASK_SWITCH, NULL);
2915 	if (ret != X86EMUL_CONTINUE)
2916 		return ret;
2917 	ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl,
2918 					X86_TRANSFER_TASK_SWITCH, NULL);
2919 	if (ret != X86EMUL_CONTINUE)
2920 		return ret;
2921 	ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl,
2922 					X86_TRANSFER_TASK_SWITCH, NULL);
2923 	if (ret != X86EMUL_CONTINUE)
2924 		return ret;
2925 
2926 	return X86EMUL_CONTINUE;
2927 }
2928 
2929 static int task_switch_16(struct x86_emulate_ctxt *ctxt,
2930 			  u16 tss_selector, u16 old_tss_sel,
2931 			  ulong old_tss_base, struct desc_struct *new_desc)
2932 {
2933 	const struct x86_emulate_ops *ops = ctxt->ops;
2934 	struct tss_segment_16 tss_seg;
2935 	int ret;
2936 	u32 new_tss_base = get_desc_base(new_desc);
2937 
2938 	ret = ops->read_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
2939 			    &ctxt->exception);
2940 	if (ret != X86EMUL_CONTINUE)
2941 		return ret;
2942 
2943 	save_state_to_tss16(ctxt, &tss_seg);
2944 
2945 	ret = ops->write_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
2946 			     &ctxt->exception);
2947 	if (ret != X86EMUL_CONTINUE)
2948 		return ret;
2949 
2950 	ret = ops->read_std(ctxt, new_tss_base, &tss_seg, sizeof tss_seg,
2951 			    &ctxt->exception);
2952 	if (ret != X86EMUL_CONTINUE)
2953 		return ret;
2954 
2955 	if (old_tss_sel != 0xffff) {
2956 		tss_seg.prev_task_link = old_tss_sel;
2957 
2958 		ret = ops->write_std(ctxt, new_tss_base,
2959 				     &tss_seg.prev_task_link,
2960 				     sizeof tss_seg.prev_task_link,
2961 				     &ctxt->exception);
2962 		if (ret != X86EMUL_CONTINUE)
2963 			return ret;
2964 	}
2965 
2966 	return load_state_from_tss16(ctxt, &tss_seg);
2967 }
2968 
2969 static void save_state_to_tss32(struct x86_emulate_ctxt *ctxt,
2970 				struct tss_segment_32 *tss)
2971 {
2972 	/* CR3 and ldt selector are not saved intentionally */
2973 	tss->eip = ctxt->_eip;
2974 	tss->eflags = ctxt->eflags;
2975 	tss->eax = reg_read(ctxt, VCPU_REGS_RAX);
2976 	tss->ecx = reg_read(ctxt, VCPU_REGS_RCX);
2977 	tss->edx = reg_read(ctxt, VCPU_REGS_RDX);
2978 	tss->ebx = reg_read(ctxt, VCPU_REGS_RBX);
2979 	tss->esp = reg_read(ctxt, VCPU_REGS_RSP);
2980 	tss->ebp = reg_read(ctxt, VCPU_REGS_RBP);
2981 	tss->esi = reg_read(ctxt, VCPU_REGS_RSI);
2982 	tss->edi = reg_read(ctxt, VCPU_REGS_RDI);
2983 
2984 	tss->es = get_segment_selector(ctxt, VCPU_SREG_ES);
2985 	tss->cs = get_segment_selector(ctxt, VCPU_SREG_CS);
2986 	tss->ss = get_segment_selector(ctxt, VCPU_SREG_SS);
2987 	tss->ds = get_segment_selector(ctxt, VCPU_SREG_DS);
2988 	tss->fs = get_segment_selector(ctxt, VCPU_SREG_FS);
2989 	tss->gs = get_segment_selector(ctxt, VCPU_SREG_GS);
2990 }
2991 
2992 static int load_state_from_tss32(struct x86_emulate_ctxt *ctxt,
2993 				 struct tss_segment_32 *tss)
2994 {
2995 	int ret;
2996 	u8 cpl;
2997 
2998 	if (ctxt->ops->set_cr(ctxt, 3, tss->cr3))
2999 		return emulate_gp(ctxt, 0);
3000 	ctxt->_eip = tss->eip;
3001 	ctxt->eflags = tss->eflags | 2;
3002 
3003 	/* General purpose registers */
3004 	*reg_write(ctxt, VCPU_REGS_RAX) = tss->eax;
3005 	*reg_write(ctxt, VCPU_REGS_RCX) = tss->ecx;
3006 	*reg_write(ctxt, VCPU_REGS_RDX) = tss->edx;
3007 	*reg_write(ctxt, VCPU_REGS_RBX) = tss->ebx;
3008 	*reg_write(ctxt, VCPU_REGS_RSP) = tss->esp;
3009 	*reg_write(ctxt, VCPU_REGS_RBP) = tss->ebp;
3010 	*reg_write(ctxt, VCPU_REGS_RSI) = tss->esi;
3011 	*reg_write(ctxt, VCPU_REGS_RDI) = tss->edi;
3012 
3013 	/*
3014 	 * SDM says that segment selectors are loaded before segment
3015 	 * descriptors.  This is important because CPL checks will
3016 	 * use CS.RPL.
3017 	 */
3018 	set_segment_selector(ctxt, tss->ldt_selector, VCPU_SREG_LDTR);
3019 	set_segment_selector(ctxt, tss->es, VCPU_SREG_ES);
3020 	set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS);
3021 	set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS);
3022 	set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS);
3023 	set_segment_selector(ctxt, tss->fs, VCPU_SREG_FS);
3024 	set_segment_selector(ctxt, tss->gs, VCPU_SREG_GS);
3025 
3026 	/*
3027 	 * If we're switching between Protected Mode and VM86, we need to make
3028 	 * sure to update the mode before loading the segment descriptors so
3029 	 * that the selectors are interpreted correctly.
3030 	 */
3031 	if (ctxt->eflags & X86_EFLAGS_VM) {
3032 		ctxt->mode = X86EMUL_MODE_VM86;
3033 		cpl = 3;
3034 	} else {
3035 		ctxt->mode = X86EMUL_MODE_PROT32;
3036 		cpl = tss->cs & 3;
3037 	}
3038 
3039 	/*
3040 	 * Now load segment descriptors. If fault happenes at this stage
3041 	 * it is handled in a context of new task
3042 	 */
3043 	ret = __load_segment_descriptor(ctxt, tss->ldt_selector, VCPU_SREG_LDTR,
3044 					cpl, X86_TRANSFER_TASK_SWITCH, NULL);
3045 	if (ret != X86EMUL_CONTINUE)
3046 		return ret;
3047 	ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl,
3048 					X86_TRANSFER_TASK_SWITCH, NULL);
3049 	if (ret != X86EMUL_CONTINUE)
3050 		return ret;
3051 	ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl,
3052 					X86_TRANSFER_TASK_SWITCH, NULL);
3053 	if (ret != X86EMUL_CONTINUE)
3054 		return ret;
3055 	ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl,
3056 					X86_TRANSFER_TASK_SWITCH, NULL);
3057 	if (ret != X86EMUL_CONTINUE)
3058 		return ret;
3059 	ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl,
3060 					X86_TRANSFER_TASK_SWITCH, NULL);
3061 	if (ret != X86EMUL_CONTINUE)
3062 		return ret;
3063 	ret = __load_segment_descriptor(ctxt, tss->fs, VCPU_SREG_FS, cpl,
3064 					X86_TRANSFER_TASK_SWITCH, NULL);
3065 	if (ret != X86EMUL_CONTINUE)
3066 		return ret;
3067 	ret = __load_segment_descriptor(ctxt, tss->gs, VCPU_SREG_GS, cpl,
3068 					X86_TRANSFER_TASK_SWITCH, NULL);
3069 
3070 	return ret;
3071 }
3072 
3073 static int task_switch_32(struct x86_emulate_ctxt *ctxt,
3074 			  u16 tss_selector, u16 old_tss_sel,
3075 			  ulong old_tss_base, struct desc_struct *new_desc)
3076 {
3077 	const struct x86_emulate_ops *ops = ctxt->ops;
3078 	struct tss_segment_32 tss_seg;
3079 	int ret;
3080 	u32 new_tss_base = get_desc_base(new_desc);
3081 	u32 eip_offset = offsetof(struct tss_segment_32, eip);
3082 	u32 ldt_sel_offset = offsetof(struct tss_segment_32, ldt_selector);
3083 
3084 	ret = ops->read_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
3085 			    &ctxt->exception);
3086 	if (ret != X86EMUL_CONTINUE)
3087 		return ret;
3088 
3089 	save_state_to_tss32(ctxt, &tss_seg);
3090 
3091 	/* Only GP registers and segment selectors are saved */
3092 	ret = ops->write_std(ctxt, old_tss_base + eip_offset, &tss_seg.eip,
3093 			     ldt_sel_offset - eip_offset, &ctxt->exception);
3094 	if (ret != X86EMUL_CONTINUE)
3095 		return ret;
3096 
3097 	ret = ops->read_std(ctxt, new_tss_base, &tss_seg, sizeof tss_seg,
3098 			    &ctxt->exception);
3099 	if (ret != X86EMUL_CONTINUE)
3100 		return ret;
3101 
3102 	if (old_tss_sel != 0xffff) {
3103 		tss_seg.prev_task_link = old_tss_sel;
3104 
3105 		ret = ops->write_std(ctxt, new_tss_base,
3106 				     &tss_seg.prev_task_link,
3107 				     sizeof tss_seg.prev_task_link,
3108 				     &ctxt->exception);
3109 		if (ret != X86EMUL_CONTINUE)
3110 			return ret;
3111 	}
3112 
3113 	return load_state_from_tss32(ctxt, &tss_seg);
3114 }
3115 
3116 static int emulator_do_task_switch(struct x86_emulate_ctxt *ctxt,
3117 				   u16 tss_selector, int idt_index, int reason,
3118 				   bool has_error_code, u32 error_code)
3119 {
3120 	const struct x86_emulate_ops *ops = ctxt->ops;
3121 	struct desc_struct curr_tss_desc, next_tss_desc;
3122 	int ret;
3123 	u16 old_tss_sel = get_segment_selector(ctxt, VCPU_SREG_TR);
3124 	ulong old_tss_base =
3125 		ops->get_cached_segment_base(ctxt, VCPU_SREG_TR);
3126 	u32 desc_limit;
3127 	ulong desc_addr, dr7;
3128 
3129 	/* FIXME: old_tss_base == ~0 ? */
3130 
3131 	ret = read_segment_descriptor(ctxt, tss_selector, &next_tss_desc, &desc_addr);
3132 	if (ret != X86EMUL_CONTINUE)
3133 		return ret;
3134 	ret = read_segment_descriptor(ctxt, old_tss_sel, &curr_tss_desc, &desc_addr);
3135 	if (ret != X86EMUL_CONTINUE)
3136 		return ret;
3137 
3138 	/* FIXME: check that next_tss_desc is tss */
3139 
3140 	/*
3141 	 * Check privileges. The three cases are task switch caused by...
3142 	 *
3143 	 * 1. jmp/call/int to task gate: Check against DPL of the task gate
3144 	 * 2. Exception/IRQ/iret: No check is performed
3145 	 * 3. jmp/call to TSS/task-gate: No check is performed since the
3146 	 *    hardware checks it before exiting.
3147 	 */
3148 	if (reason == TASK_SWITCH_GATE) {
3149 		if (idt_index != -1) {
3150 			/* Software interrupts */
3151 			struct desc_struct task_gate_desc;
3152 			int dpl;
3153 
3154 			ret = read_interrupt_descriptor(ctxt, idt_index,
3155 							&task_gate_desc);
3156 			if (ret != X86EMUL_CONTINUE)
3157 				return ret;
3158 
3159 			dpl = task_gate_desc.dpl;
3160 			if ((tss_selector & 3) > dpl || ops->cpl(ctxt) > dpl)
3161 				return emulate_gp(ctxt, (idt_index << 3) | 0x2);
3162 		}
3163 	}
3164 
3165 	desc_limit = desc_limit_scaled(&next_tss_desc);
3166 	if (!next_tss_desc.p ||
3167 	    ((desc_limit < 0x67 && (next_tss_desc.type & 8)) ||
3168 	     desc_limit < 0x2b)) {
3169 		return emulate_ts(ctxt, tss_selector & 0xfffc);
3170 	}
3171 
3172 	if (reason == TASK_SWITCH_IRET || reason == TASK_SWITCH_JMP) {
3173 		curr_tss_desc.type &= ~(1 << 1); /* clear busy flag */
3174 		write_segment_descriptor(ctxt, old_tss_sel, &curr_tss_desc);
3175 	}
3176 
3177 	if (reason == TASK_SWITCH_IRET)
3178 		ctxt->eflags = ctxt->eflags & ~X86_EFLAGS_NT;
3179 
3180 	/* set back link to prev task only if NT bit is set in eflags
3181 	   note that old_tss_sel is not used after this point */
3182 	if (reason != TASK_SWITCH_CALL && reason != TASK_SWITCH_GATE)
3183 		old_tss_sel = 0xffff;
3184 
3185 	if (next_tss_desc.type & 8)
3186 		ret = task_switch_32(ctxt, tss_selector, old_tss_sel,
3187 				     old_tss_base, &next_tss_desc);
3188 	else
3189 		ret = task_switch_16(ctxt, tss_selector, old_tss_sel,
3190 				     old_tss_base, &next_tss_desc);
3191 	if (ret != X86EMUL_CONTINUE)
3192 		return ret;
3193 
3194 	if (reason == TASK_SWITCH_CALL || reason == TASK_SWITCH_GATE)
3195 		ctxt->eflags = ctxt->eflags | X86_EFLAGS_NT;
3196 
3197 	if (reason != TASK_SWITCH_IRET) {
3198 		next_tss_desc.type |= (1 << 1); /* set busy flag */
3199 		write_segment_descriptor(ctxt, tss_selector, &next_tss_desc);
3200 	}
3201 
3202 	ops->set_cr(ctxt, 0,  ops->get_cr(ctxt, 0) | X86_CR0_TS);
3203 	ops->set_segment(ctxt, tss_selector, &next_tss_desc, 0, VCPU_SREG_TR);
3204 
3205 	if (has_error_code) {
3206 		ctxt->op_bytes = ctxt->ad_bytes = (next_tss_desc.type & 8) ? 4 : 2;
3207 		ctxt->lock_prefix = 0;
3208 		ctxt->src.val = (unsigned long) error_code;
3209 		ret = em_push(ctxt);
3210 	}
3211 
3212 	ops->get_dr(ctxt, 7, &dr7);
3213 	ops->set_dr(ctxt, 7, dr7 & ~(DR_LOCAL_ENABLE_MASK | DR_LOCAL_SLOWDOWN));
3214 
3215 	return ret;
3216 }
3217 
3218 int emulator_task_switch(struct x86_emulate_ctxt *ctxt,
3219 			 u16 tss_selector, int idt_index, int reason,
3220 			 bool has_error_code, u32 error_code)
3221 {
3222 	int rc;
3223 
3224 	invalidate_registers(ctxt);
3225 	ctxt->_eip = ctxt->eip;
3226 	ctxt->dst.type = OP_NONE;
3227 
3228 	rc = emulator_do_task_switch(ctxt, tss_selector, idt_index, reason,
3229 				     has_error_code, error_code);
3230 
3231 	if (rc == X86EMUL_CONTINUE) {
3232 		ctxt->eip = ctxt->_eip;
3233 		writeback_registers(ctxt);
3234 	}
3235 
3236 	return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK;
3237 }
3238 
3239 static void string_addr_inc(struct x86_emulate_ctxt *ctxt, int reg,
3240 		struct operand *op)
3241 {
3242 	int df = (ctxt->eflags & X86_EFLAGS_DF) ? -op->count : op->count;
3243 
3244 	register_address_increment(ctxt, reg, df * op->bytes);
3245 	op->addr.mem.ea = register_address(ctxt, reg);
3246 }
3247 
3248 static int em_das(struct x86_emulate_ctxt *ctxt)
3249 {
3250 	u8 al, old_al;
3251 	bool af, cf, old_cf;
3252 
3253 	cf = ctxt->eflags & X86_EFLAGS_CF;
3254 	al = ctxt->dst.val;
3255 
3256 	old_al = al;
3257 	old_cf = cf;
3258 	cf = false;
3259 	af = ctxt->eflags & X86_EFLAGS_AF;
3260 	if ((al & 0x0f) > 9 || af) {
3261 		al -= 6;
3262 		cf = old_cf | (al >= 250);
3263 		af = true;
3264 	} else {
3265 		af = false;
3266 	}
3267 	if (old_al > 0x99 || old_cf) {
3268 		al -= 0x60;
3269 		cf = true;
3270 	}
3271 
3272 	ctxt->dst.val = al;
3273 	/* Set PF, ZF, SF */
3274 	ctxt->src.type = OP_IMM;
3275 	ctxt->src.val = 0;
3276 	ctxt->src.bytes = 1;
3277 	fastop(ctxt, em_or);
3278 	ctxt->eflags &= ~(X86_EFLAGS_AF | X86_EFLAGS_CF);
3279 	if (cf)
3280 		ctxt->eflags |= X86_EFLAGS_CF;
3281 	if (af)
3282 		ctxt->eflags |= X86_EFLAGS_AF;
3283 	return X86EMUL_CONTINUE;
3284 }
3285 
3286 static int em_aam(struct x86_emulate_ctxt *ctxt)
3287 {
3288 	u8 al, ah;
3289 
3290 	if (ctxt->src.val == 0)
3291 		return emulate_de(ctxt);
3292 
3293 	al = ctxt->dst.val & 0xff;
3294 	ah = al / ctxt->src.val;
3295 	al %= ctxt->src.val;
3296 
3297 	ctxt->dst.val = (ctxt->dst.val & 0xffff0000) | al | (ah << 8);
3298 
3299 	/* Set PF, ZF, SF */
3300 	ctxt->src.type = OP_IMM;
3301 	ctxt->src.val = 0;
3302 	ctxt->src.bytes = 1;
3303 	fastop(ctxt, em_or);
3304 
3305 	return X86EMUL_CONTINUE;
3306 }
3307 
3308 static int em_aad(struct x86_emulate_ctxt *ctxt)
3309 {
3310 	u8 al = ctxt->dst.val & 0xff;
3311 	u8 ah = (ctxt->dst.val >> 8) & 0xff;
3312 
3313 	al = (al + (ah * ctxt->src.val)) & 0xff;
3314 
3315 	ctxt->dst.val = (ctxt->dst.val & 0xffff0000) | al;
3316 
3317 	/* Set PF, ZF, SF */
3318 	ctxt->src.type = OP_IMM;
3319 	ctxt->src.val = 0;
3320 	ctxt->src.bytes = 1;
3321 	fastop(ctxt, em_or);
3322 
3323 	return X86EMUL_CONTINUE;
3324 }
3325 
3326 static int em_call(struct x86_emulate_ctxt *ctxt)
3327 {
3328 	int rc;
3329 	long rel = ctxt->src.val;
3330 
3331 	ctxt->src.val = (unsigned long)ctxt->_eip;
3332 	rc = jmp_rel(ctxt, rel);
3333 	if (rc != X86EMUL_CONTINUE)
3334 		return rc;
3335 	return em_push(ctxt);
3336 }
3337 
3338 static int em_call_far(struct x86_emulate_ctxt *ctxt)
3339 {
3340 	u16 sel, old_cs;
3341 	ulong old_eip;
3342 	int rc;
3343 	struct desc_struct old_desc, new_desc;
3344 	const struct x86_emulate_ops *ops = ctxt->ops;
3345 	int cpl = ctxt->ops->cpl(ctxt);
3346 	enum x86emul_mode prev_mode = ctxt->mode;
3347 
3348 	old_eip = ctxt->_eip;
3349 	ops->get_segment(ctxt, &old_cs, &old_desc, NULL, VCPU_SREG_CS);
3350 
3351 	memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
3352 	rc = __load_segment_descriptor(ctxt, sel, VCPU_SREG_CS, cpl,
3353 				       X86_TRANSFER_CALL_JMP, &new_desc);
3354 	if (rc != X86EMUL_CONTINUE)
3355 		return rc;
3356 
3357 	rc = assign_eip_far(ctxt, ctxt->src.val, &new_desc);
3358 	if (rc != X86EMUL_CONTINUE)
3359 		goto fail;
3360 
3361 	ctxt->src.val = old_cs;
3362 	rc = em_push(ctxt);
3363 	if (rc != X86EMUL_CONTINUE)
3364 		goto fail;
3365 
3366 	ctxt->src.val = old_eip;
3367 	rc = em_push(ctxt);
3368 	/* If we failed, we tainted the memory, but the very least we should
3369 	   restore cs */
3370 	if (rc != X86EMUL_CONTINUE) {
3371 		pr_warn_once("faulting far call emulation tainted memory\n");
3372 		goto fail;
3373 	}
3374 	return rc;
3375 fail:
3376 	ops->set_segment(ctxt, old_cs, &old_desc, 0, VCPU_SREG_CS);
3377 	ctxt->mode = prev_mode;
3378 	return rc;
3379 
3380 }
3381 
3382 static int em_ret_near_imm(struct x86_emulate_ctxt *ctxt)
3383 {
3384 	int rc;
3385 	unsigned long eip;
3386 
3387 	rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
3388 	if (rc != X86EMUL_CONTINUE)
3389 		return rc;
3390 	rc = assign_eip_near(ctxt, eip);
3391 	if (rc != X86EMUL_CONTINUE)
3392 		return rc;
3393 	rsp_increment(ctxt, ctxt->src.val);
3394 	return X86EMUL_CONTINUE;
3395 }
3396 
3397 static int em_xchg(struct x86_emulate_ctxt *ctxt)
3398 {
3399 	/* Write back the register source. */
3400 	ctxt->src.val = ctxt->dst.val;
3401 	write_register_operand(&ctxt->src);
3402 
3403 	/* Write back the memory destination with implicit LOCK prefix. */
3404 	ctxt->dst.val = ctxt->src.orig_val;
3405 	ctxt->lock_prefix = 1;
3406 	return X86EMUL_CONTINUE;
3407 }
3408 
3409 static int em_imul_3op(struct x86_emulate_ctxt *ctxt)
3410 {
3411 	ctxt->dst.val = ctxt->src2.val;
3412 	return fastop(ctxt, em_imul);
3413 }
3414 
3415 static int em_cwd(struct x86_emulate_ctxt *ctxt)
3416 {
3417 	ctxt->dst.type = OP_REG;
3418 	ctxt->dst.bytes = ctxt->src.bytes;
3419 	ctxt->dst.addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX);
3420 	ctxt->dst.val = ~((ctxt->src.val >> (ctxt->src.bytes * 8 - 1)) - 1);
3421 
3422 	return X86EMUL_CONTINUE;
3423 }
3424 
3425 static int em_rdtsc(struct x86_emulate_ctxt *ctxt)
3426 {
3427 	u64 tsc = 0;
3428 
3429 	ctxt->ops->get_msr(ctxt, MSR_IA32_TSC, &tsc);
3430 	*reg_write(ctxt, VCPU_REGS_RAX) = (u32)tsc;
3431 	*reg_write(ctxt, VCPU_REGS_RDX) = tsc >> 32;
3432 	return X86EMUL_CONTINUE;
3433 }
3434 
3435 static int em_rdpmc(struct x86_emulate_ctxt *ctxt)
3436 {
3437 	u64 pmc;
3438 
3439 	if (ctxt->ops->read_pmc(ctxt, reg_read(ctxt, VCPU_REGS_RCX), &pmc))
3440 		return emulate_gp(ctxt, 0);
3441 	*reg_write(ctxt, VCPU_REGS_RAX) = (u32)pmc;
3442 	*reg_write(ctxt, VCPU_REGS_RDX) = pmc >> 32;
3443 	return X86EMUL_CONTINUE;
3444 }
3445 
3446 static int em_mov(struct x86_emulate_ctxt *ctxt)
3447 {
3448 	memcpy(ctxt->dst.valptr, ctxt->src.valptr, sizeof(ctxt->src.valptr));
3449 	return X86EMUL_CONTINUE;
3450 }
3451 
3452 #define FFL(x) bit(X86_FEATURE_##x)
3453 
3454 static int em_movbe(struct x86_emulate_ctxt *ctxt)
3455 {
3456 	u32 ebx, ecx, edx, eax = 1;
3457 	u16 tmp;
3458 
3459 	/*
3460 	 * Check MOVBE is set in the guest-visible CPUID leaf.
3461 	 */
3462 	ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx);
3463 	if (!(ecx & FFL(MOVBE)))
3464 		return emulate_ud(ctxt);
3465 
3466 	switch (ctxt->op_bytes) {
3467 	case 2:
3468 		/*
3469 		 * From MOVBE definition: "...When the operand size is 16 bits,
3470 		 * the upper word of the destination register remains unchanged
3471 		 * ..."
3472 		 *
3473 		 * Both casting ->valptr and ->val to u16 breaks strict aliasing
3474 		 * rules so we have to do the operation almost per hand.
3475 		 */
3476 		tmp = (u16)ctxt->src.val;
3477 		ctxt->dst.val &= ~0xffffUL;
3478 		ctxt->dst.val |= (unsigned long)swab16(tmp);
3479 		break;
3480 	case 4:
3481 		ctxt->dst.val = swab32((u32)ctxt->src.val);
3482 		break;
3483 	case 8:
3484 		ctxt->dst.val = swab64(ctxt->src.val);
3485 		break;
3486 	default:
3487 		BUG();
3488 	}
3489 	return X86EMUL_CONTINUE;
3490 }
3491 
3492 static int em_cr_write(struct x86_emulate_ctxt *ctxt)
3493 {
3494 	if (ctxt->ops->set_cr(ctxt, ctxt->modrm_reg, ctxt->src.val))
3495 		return emulate_gp(ctxt, 0);
3496 
3497 	/* Disable writeback. */
3498 	ctxt->dst.type = OP_NONE;
3499 	return X86EMUL_CONTINUE;
3500 }
3501 
3502 static int em_dr_write(struct x86_emulate_ctxt *ctxt)
3503 {
3504 	unsigned long val;
3505 
3506 	if (ctxt->mode == X86EMUL_MODE_PROT64)
3507 		val = ctxt->src.val & ~0ULL;
3508 	else
3509 		val = ctxt->src.val & ~0U;
3510 
3511 	/* #UD condition is already handled. */
3512 	if (ctxt->ops->set_dr(ctxt, ctxt->modrm_reg, val) < 0)
3513 		return emulate_gp(ctxt, 0);
3514 
3515 	/* Disable writeback. */
3516 	ctxt->dst.type = OP_NONE;
3517 	return X86EMUL_CONTINUE;
3518 }
3519 
3520 static int em_wrmsr(struct x86_emulate_ctxt *ctxt)
3521 {
3522 	u64 msr_data;
3523 
3524 	msr_data = (u32)reg_read(ctxt, VCPU_REGS_RAX)
3525 		| ((u64)reg_read(ctxt, VCPU_REGS_RDX) << 32);
3526 	if (ctxt->ops->set_msr(ctxt, reg_read(ctxt, VCPU_REGS_RCX), msr_data))
3527 		return emulate_gp(ctxt, 0);
3528 
3529 	return X86EMUL_CONTINUE;
3530 }
3531 
3532 static int em_rdmsr(struct x86_emulate_ctxt *ctxt)
3533 {
3534 	u64 msr_data;
3535 
3536 	if (ctxt->ops->get_msr(ctxt, reg_read(ctxt, VCPU_REGS_RCX), &msr_data))
3537 		return emulate_gp(ctxt, 0);
3538 
3539 	*reg_write(ctxt, VCPU_REGS_RAX) = (u32)msr_data;
3540 	*reg_write(ctxt, VCPU_REGS_RDX) = msr_data >> 32;
3541 	return X86EMUL_CONTINUE;
3542 }
3543 
3544 static int em_mov_rm_sreg(struct x86_emulate_ctxt *ctxt)
3545 {
3546 	if (ctxt->modrm_reg > VCPU_SREG_GS)
3547 		return emulate_ud(ctxt);
3548 
3549 	ctxt->dst.val = get_segment_selector(ctxt, ctxt->modrm_reg);
3550 	if (ctxt->dst.bytes == 4 && ctxt->dst.type == OP_MEM)
3551 		ctxt->dst.bytes = 2;
3552 	return X86EMUL_CONTINUE;
3553 }
3554 
3555 static int em_mov_sreg_rm(struct x86_emulate_ctxt *ctxt)
3556 {
3557 	u16 sel = ctxt->src.val;
3558 
3559 	if (ctxt->modrm_reg == VCPU_SREG_CS || ctxt->modrm_reg > VCPU_SREG_GS)
3560 		return emulate_ud(ctxt);
3561 
3562 	if (ctxt->modrm_reg == VCPU_SREG_SS)
3563 		ctxt->interruptibility = KVM_X86_SHADOW_INT_MOV_SS;
3564 
3565 	/* Disable writeback. */
3566 	ctxt->dst.type = OP_NONE;
3567 	return load_segment_descriptor(ctxt, sel, ctxt->modrm_reg);
3568 }
3569 
3570 static int em_lldt(struct x86_emulate_ctxt *ctxt)
3571 {
3572 	u16 sel = ctxt->src.val;
3573 
3574 	/* Disable writeback. */
3575 	ctxt->dst.type = OP_NONE;
3576 	return load_segment_descriptor(ctxt, sel, VCPU_SREG_LDTR);
3577 }
3578 
3579 static int em_ltr(struct x86_emulate_ctxt *ctxt)
3580 {
3581 	u16 sel = ctxt->src.val;
3582 
3583 	/* Disable writeback. */
3584 	ctxt->dst.type = OP_NONE;
3585 	return load_segment_descriptor(ctxt, sel, VCPU_SREG_TR);
3586 }
3587 
3588 static int em_invlpg(struct x86_emulate_ctxt *ctxt)
3589 {
3590 	int rc;
3591 	ulong linear;
3592 
3593 	rc = linearize(ctxt, ctxt->src.addr.mem, 1, false, &linear);
3594 	if (rc == X86EMUL_CONTINUE)
3595 		ctxt->ops->invlpg(ctxt, linear);
3596 	/* Disable writeback. */
3597 	ctxt->dst.type = OP_NONE;
3598 	return X86EMUL_CONTINUE;
3599 }
3600 
3601 static int em_clts(struct x86_emulate_ctxt *ctxt)
3602 {
3603 	ulong cr0;
3604 
3605 	cr0 = ctxt->ops->get_cr(ctxt, 0);
3606 	cr0 &= ~X86_CR0_TS;
3607 	ctxt->ops->set_cr(ctxt, 0, cr0);
3608 	return X86EMUL_CONTINUE;
3609 }
3610 
3611 static int em_hypercall(struct x86_emulate_ctxt *ctxt)
3612 {
3613 	int rc = ctxt->ops->fix_hypercall(ctxt);
3614 
3615 	if (rc != X86EMUL_CONTINUE)
3616 		return rc;
3617 
3618 	/* Let the processor re-execute the fixed hypercall */
3619 	ctxt->_eip = ctxt->eip;
3620 	/* Disable writeback. */
3621 	ctxt->dst.type = OP_NONE;
3622 	return X86EMUL_CONTINUE;
3623 }
3624 
3625 static int emulate_store_desc_ptr(struct x86_emulate_ctxt *ctxt,
3626 				  void (*get)(struct x86_emulate_ctxt *ctxt,
3627 					      struct desc_ptr *ptr))
3628 {
3629 	struct desc_ptr desc_ptr;
3630 
3631 	if (ctxt->mode == X86EMUL_MODE_PROT64)
3632 		ctxt->op_bytes = 8;
3633 	get(ctxt, &desc_ptr);
3634 	if (ctxt->op_bytes == 2) {
3635 		ctxt->op_bytes = 4;
3636 		desc_ptr.address &= 0x00ffffff;
3637 	}
3638 	/* Disable writeback. */
3639 	ctxt->dst.type = OP_NONE;
3640 	return segmented_write(ctxt, ctxt->dst.addr.mem,
3641 			       &desc_ptr, 2 + ctxt->op_bytes);
3642 }
3643 
3644 static int em_sgdt(struct x86_emulate_ctxt *ctxt)
3645 {
3646 	return emulate_store_desc_ptr(ctxt, ctxt->ops->get_gdt);
3647 }
3648 
3649 static int em_sidt(struct x86_emulate_ctxt *ctxt)
3650 {
3651 	return emulate_store_desc_ptr(ctxt, ctxt->ops->get_idt);
3652 }
3653 
3654 static int em_lgdt_lidt(struct x86_emulate_ctxt *ctxt, bool lgdt)
3655 {
3656 	struct desc_ptr desc_ptr;
3657 	int rc;
3658 
3659 	if (ctxt->mode == X86EMUL_MODE_PROT64)
3660 		ctxt->op_bytes = 8;
3661 	rc = read_descriptor(ctxt, ctxt->src.addr.mem,
3662 			     &desc_ptr.size, &desc_ptr.address,
3663 			     ctxt->op_bytes);
3664 	if (rc != X86EMUL_CONTINUE)
3665 		return rc;
3666 	if (ctxt->mode == X86EMUL_MODE_PROT64 &&
3667 	    is_noncanonical_address(desc_ptr.address))
3668 		return emulate_gp(ctxt, 0);
3669 	if (lgdt)
3670 		ctxt->ops->set_gdt(ctxt, &desc_ptr);
3671 	else
3672 		ctxt->ops->set_idt(ctxt, &desc_ptr);
3673 	/* Disable writeback. */
3674 	ctxt->dst.type = OP_NONE;
3675 	return X86EMUL_CONTINUE;
3676 }
3677 
3678 static int em_lgdt(struct x86_emulate_ctxt *ctxt)
3679 {
3680 	return em_lgdt_lidt(ctxt, true);
3681 }
3682 
3683 static int em_lidt(struct x86_emulate_ctxt *ctxt)
3684 {
3685 	return em_lgdt_lidt(ctxt, false);
3686 }
3687 
3688 static int em_smsw(struct x86_emulate_ctxt *ctxt)
3689 {
3690 	if (ctxt->dst.type == OP_MEM)
3691 		ctxt->dst.bytes = 2;
3692 	ctxt->dst.val = ctxt->ops->get_cr(ctxt, 0);
3693 	return X86EMUL_CONTINUE;
3694 }
3695 
3696 static int em_lmsw(struct x86_emulate_ctxt *ctxt)
3697 {
3698 	ctxt->ops->set_cr(ctxt, 0, (ctxt->ops->get_cr(ctxt, 0) & ~0x0eul)
3699 			  | (ctxt->src.val & 0x0f));
3700 	ctxt->dst.type = OP_NONE;
3701 	return X86EMUL_CONTINUE;
3702 }
3703 
3704 static int em_loop(struct x86_emulate_ctxt *ctxt)
3705 {
3706 	int rc = X86EMUL_CONTINUE;
3707 
3708 	register_address_increment(ctxt, VCPU_REGS_RCX, -1);
3709 	if ((address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) != 0) &&
3710 	    (ctxt->b == 0xe2 || test_cc(ctxt->b ^ 0x5, ctxt->eflags)))
3711 		rc = jmp_rel(ctxt, ctxt->src.val);
3712 
3713 	return rc;
3714 }
3715 
3716 static int em_jcxz(struct x86_emulate_ctxt *ctxt)
3717 {
3718 	int rc = X86EMUL_CONTINUE;
3719 
3720 	if (address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) == 0)
3721 		rc = jmp_rel(ctxt, ctxt->src.val);
3722 
3723 	return rc;
3724 }
3725 
3726 static int em_in(struct x86_emulate_ctxt *ctxt)
3727 {
3728 	if (!pio_in_emulated(ctxt, ctxt->dst.bytes, ctxt->src.val,
3729 			     &ctxt->dst.val))
3730 		return X86EMUL_IO_NEEDED;
3731 
3732 	return X86EMUL_CONTINUE;
3733 }
3734 
3735 static int em_out(struct x86_emulate_ctxt *ctxt)
3736 {
3737 	ctxt->ops->pio_out_emulated(ctxt, ctxt->src.bytes, ctxt->dst.val,
3738 				    &ctxt->src.val, 1);
3739 	/* Disable writeback. */
3740 	ctxt->dst.type = OP_NONE;
3741 	return X86EMUL_CONTINUE;
3742 }
3743 
3744 static int em_cli(struct x86_emulate_ctxt *ctxt)
3745 {
3746 	if (emulator_bad_iopl(ctxt))
3747 		return emulate_gp(ctxt, 0);
3748 
3749 	ctxt->eflags &= ~X86_EFLAGS_IF;
3750 	return X86EMUL_CONTINUE;
3751 }
3752 
3753 static int em_sti(struct x86_emulate_ctxt *ctxt)
3754 {
3755 	if (emulator_bad_iopl(ctxt))
3756 		return emulate_gp(ctxt, 0);
3757 
3758 	ctxt->interruptibility = KVM_X86_SHADOW_INT_STI;
3759 	ctxt->eflags |= X86_EFLAGS_IF;
3760 	return X86EMUL_CONTINUE;
3761 }
3762 
3763 static int em_cpuid(struct x86_emulate_ctxt *ctxt)
3764 {
3765 	u32 eax, ebx, ecx, edx;
3766 
3767 	eax = reg_read(ctxt, VCPU_REGS_RAX);
3768 	ecx = reg_read(ctxt, VCPU_REGS_RCX);
3769 	ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx);
3770 	*reg_write(ctxt, VCPU_REGS_RAX) = eax;
3771 	*reg_write(ctxt, VCPU_REGS_RBX) = ebx;
3772 	*reg_write(ctxt, VCPU_REGS_RCX) = ecx;
3773 	*reg_write(ctxt, VCPU_REGS_RDX) = edx;
3774 	return X86EMUL_CONTINUE;
3775 }
3776 
3777 static int em_sahf(struct x86_emulate_ctxt *ctxt)
3778 {
3779 	u32 flags;
3780 
3781 	flags = X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF | X86_EFLAGS_ZF |
3782 		X86_EFLAGS_SF;
3783 	flags &= *reg_rmw(ctxt, VCPU_REGS_RAX) >> 8;
3784 
3785 	ctxt->eflags &= ~0xffUL;
3786 	ctxt->eflags |= flags | X86_EFLAGS_FIXED;
3787 	return X86EMUL_CONTINUE;
3788 }
3789 
3790 static int em_lahf(struct x86_emulate_ctxt *ctxt)
3791 {
3792 	*reg_rmw(ctxt, VCPU_REGS_RAX) &= ~0xff00UL;
3793 	*reg_rmw(ctxt, VCPU_REGS_RAX) |= (ctxt->eflags & 0xff) << 8;
3794 	return X86EMUL_CONTINUE;
3795 }
3796 
3797 static int em_bswap(struct x86_emulate_ctxt *ctxt)
3798 {
3799 	switch (ctxt->op_bytes) {
3800 #ifdef CONFIG_X86_64
3801 	case 8:
3802 		asm("bswap %0" : "+r"(ctxt->dst.val));
3803 		break;
3804 #endif
3805 	default:
3806 		asm("bswap %0" : "+r"(*(u32 *)&ctxt->dst.val));
3807 		break;
3808 	}
3809 	return X86EMUL_CONTINUE;
3810 }
3811 
3812 static int em_clflush(struct x86_emulate_ctxt *ctxt)
3813 {
3814 	/* emulating clflush regardless of cpuid */
3815 	return X86EMUL_CONTINUE;
3816 }
3817 
3818 static int em_movsxd(struct x86_emulate_ctxt *ctxt)
3819 {
3820 	ctxt->dst.val = (s32) ctxt->src.val;
3821 	return X86EMUL_CONTINUE;
3822 }
3823 
3824 static bool valid_cr(int nr)
3825 {
3826 	switch (nr) {
3827 	case 0:
3828 	case 2 ... 4:
3829 	case 8:
3830 		return true;
3831 	default:
3832 		return false;
3833 	}
3834 }
3835 
3836 static int check_cr_read(struct x86_emulate_ctxt *ctxt)
3837 {
3838 	if (!valid_cr(ctxt->modrm_reg))
3839 		return emulate_ud(ctxt);
3840 
3841 	return X86EMUL_CONTINUE;
3842 }
3843 
3844 static int check_cr_write(struct x86_emulate_ctxt *ctxt)
3845 {
3846 	u64 new_val = ctxt->src.val64;
3847 	int cr = ctxt->modrm_reg;
3848 	u64 efer = 0;
3849 
3850 	static u64 cr_reserved_bits[] = {
3851 		0xffffffff00000000ULL,
3852 		0, 0, 0, /* CR3 checked later */
3853 		CR4_RESERVED_BITS,
3854 		0, 0, 0,
3855 		CR8_RESERVED_BITS,
3856 	};
3857 
3858 	if (!valid_cr(cr))
3859 		return emulate_ud(ctxt);
3860 
3861 	if (new_val & cr_reserved_bits[cr])
3862 		return emulate_gp(ctxt, 0);
3863 
3864 	switch (cr) {
3865 	case 0: {
3866 		u64 cr4;
3867 		if (((new_val & X86_CR0_PG) && !(new_val & X86_CR0_PE)) ||
3868 		    ((new_val & X86_CR0_NW) && !(new_val & X86_CR0_CD)))
3869 			return emulate_gp(ctxt, 0);
3870 
3871 		cr4 = ctxt->ops->get_cr(ctxt, 4);
3872 		ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
3873 
3874 		if ((new_val & X86_CR0_PG) && (efer & EFER_LME) &&
3875 		    !(cr4 & X86_CR4_PAE))
3876 			return emulate_gp(ctxt, 0);
3877 
3878 		break;
3879 		}
3880 	case 3: {
3881 		u64 rsvd = 0;
3882 
3883 		ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
3884 		if (efer & EFER_LMA)
3885 			rsvd = CR3_L_MODE_RESERVED_BITS & ~CR3_PCID_INVD;
3886 
3887 		if (new_val & rsvd)
3888 			return emulate_gp(ctxt, 0);
3889 
3890 		break;
3891 		}
3892 	case 4: {
3893 		ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
3894 
3895 		if ((efer & EFER_LMA) && !(new_val & X86_CR4_PAE))
3896 			return emulate_gp(ctxt, 0);
3897 
3898 		break;
3899 		}
3900 	}
3901 
3902 	return X86EMUL_CONTINUE;
3903 }
3904 
3905 static int check_dr7_gd(struct x86_emulate_ctxt *ctxt)
3906 {
3907 	unsigned long dr7;
3908 
3909 	ctxt->ops->get_dr(ctxt, 7, &dr7);
3910 
3911 	/* Check if DR7.Global_Enable is set */
3912 	return dr7 & (1 << 13);
3913 }
3914 
3915 static int check_dr_read(struct x86_emulate_ctxt *ctxt)
3916 {
3917 	int dr = ctxt->modrm_reg;
3918 	u64 cr4;
3919 
3920 	if (dr > 7)
3921 		return emulate_ud(ctxt);
3922 
3923 	cr4 = ctxt->ops->get_cr(ctxt, 4);
3924 	if ((cr4 & X86_CR4_DE) && (dr == 4 || dr == 5))
3925 		return emulate_ud(ctxt);
3926 
3927 	if (check_dr7_gd(ctxt)) {
3928 		ulong dr6;
3929 
3930 		ctxt->ops->get_dr(ctxt, 6, &dr6);
3931 		dr6 &= ~15;
3932 		dr6 |= DR6_BD | DR6_RTM;
3933 		ctxt->ops->set_dr(ctxt, 6, dr6);
3934 		return emulate_db(ctxt);
3935 	}
3936 
3937 	return X86EMUL_CONTINUE;
3938 }
3939 
3940 static int check_dr_write(struct x86_emulate_ctxt *ctxt)
3941 {
3942 	u64 new_val = ctxt->src.val64;
3943 	int dr = ctxt->modrm_reg;
3944 
3945 	if ((dr == 6 || dr == 7) && (new_val & 0xffffffff00000000ULL))
3946 		return emulate_gp(ctxt, 0);
3947 
3948 	return check_dr_read(ctxt);
3949 }
3950 
3951 static int check_svme(struct x86_emulate_ctxt *ctxt)
3952 {
3953 	u64 efer;
3954 
3955 	ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
3956 
3957 	if (!(efer & EFER_SVME))
3958 		return emulate_ud(ctxt);
3959 
3960 	return X86EMUL_CONTINUE;
3961 }
3962 
3963 static int check_svme_pa(struct x86_emulate_ctxt *ctxt)
3964 {
3965 	u64 rax = reg_read(ctxt, VCPU_REGS_RAX);
3966 
3967 	/* Valid physical address? */
3968 	if (rax & 0xffff000000000000ULL)
3969 		return emulate_gp(ctxt, 0);
3970 
3971 	return check_svme(ctxt);
3972 }
3973 
3974 static int check_rdtsc(struct x86_emulate_ctxt *ctxt)
3975 {
3976 	u64 cr4 = ctxt->ops->get_cr(ctxt, 4);
3977 
3978 	if (cr4 & X86_CR4_TSD && ctxt->ops->cpl(ctxt))
3979 		return emulate_ud(ctxt);
3980 
3981 	return X86EMUL_CONTINUE;
3982 }
3983 
3984 static int check_rdpmc(struct x86_emulate_ctxt *ctxt)
3985 {
3986 	u64 cr4 = ctxt->ops->get_cr(ctxt, 4);
3987 	u64 rcx = reg_read(ctxt, VCPU_REGS_RCX);
3988 
3989 	if ((!(cr4 & X86_CR4_PCE) && ctxt->ops->cpl(ctxt)) ||
3990 	    ctxt->ops->check_pmc(ctxt, rcx))
3991 		return emulate_gp(ctxt, 0);
3992 
3993 	return X86EMUL_CONTINUE;
3994 }
3995 
3996 static int check_perm_in(struct x86_emulate_ctxt *ctxt)
3997 {
3998 	ctxt->dst.bytes = min(ctxt->dst.bytes, 4u);
3999 	if (!emulator_io_permited(ctxt, ctxt->src.val, ctxt->dst.bytes))
4000 		return emulate_gp(ctxt, 0);
4001 
4002 	return X86EMUL_CONTINUE;
4003 }
4004 
4005 static int check_perm_out(struct x86_emulate_ctxt *ctxt)
4006 {
4007 	ctxt->src.bytes = min(ctxt->src.bytes, 4u);
4008 	if (!emulator_io_permited(ctxt, ctxt->dst.val, ctxt->src.bytes))
4009 		return emulate_gp(ctxt, 0);
4010 
4011 	return X86EMUL_CONTINUE;
4012 }
4013 
4014 #define D(_y) { .flags = (_y) }
4015 #define DI(_y, _i) { .flags = (_y)|Intercept, .intercept = x86_intercept_##_i }
4016 #define DIP(_y, _i, _p) { .flags = (_y)|Intercept|CheckPerm, \
4017 		      .intercept = x86_intercept_##_i, .check_perm = (_p) }
4018 #define N    D(NotImpl)
4019 #define EXT(_f, _e) { .flags = ((_f) | RMExt), .u.group = (_e) }
4020 #define G(_f, _g) { .flags = ((_f) | Group | ModRM), .u.group = (_g) }
4021 #define GD(_f, _g) { .flags = ((_f) | GroupDual | ModRM), .u.gdual = (_g) }
4022 #define ID(_f, _i) { .flags = ((_f) | InstrDual | ModRM), .u.idual = (_i) }
4023 #define MD(_f, _m) { .flags = ((_f) | ModeDual), .u.mdual = (_m) }
4024 #define E(_f, _e) { .flags = ((_f) | Escape | ModRM), .u.esc = (_e) }
4025 #define I(_f, _e) { .flags = (_f), .u.execute = (_e) }
4026 #define F(_f, _e) { .flags = (_f) | Fastop, .u.fastop = (_e) }
4027 #define II(_f, _e, _i) \
4028 	{ .flags = (_f)|Intercept, .u.execute = (_e), .intercept = x86_intercept_##_i }
4029 #define IIP(_f, _e, _i, _p) \
4030 	{ .flags = (_f)|Intercept|CheckPerm, .u.execute = (_e), \
4031 	  .intercept = x86_intercept_##_i, .check_perm = (_p) }
4032 #define GP(_f, _g) { .flags = ((_f) | Prefix), .u.gprefix = (_g) }
4033 
4034 #define D2bv(_f)      D((_f) | ByteOp), D(_f)
4035 #define D2bvIP(_f, _i, _p) DIP((_f) | ByteOp, _i, _p), DIP(_f, _i, _p)
4036 #define I2bv(_f, _e)  I((_f) | ByteOp, _e), I(_f, _e)
4037 #define F2bv(_f, _e)  F((_f) | ByteOp, _e), F(_f, _e)
4038 #define I2bvIP(_f, _e, _i, _p) \
4039 	IIP((_f) | ByteOp, _e, _i, _p), IIP(_f, _e, _i, _p)
4040 
4041 #define F6ALU(_f, _e) F2bv((_f) | DstMem | SrcReg | ModRM, _e),		\
4042 		F2bv(((_f) | DstReg | SrcMem | ModRM) & ~Lock, _e),	\
4043 		F2bv(((_f) & ~Lock) | DstAcc | SrcImm, _e)
4044 
4045 static const struct opcode group7_rm0[] = {
4046 	N,
4047 	I(SrcNone | Priv | EmulateOnUD,	em_hypercall),
4048 	N, N, N, N, N, N,
4049 };
4050 
4051 static const struct opcode group7_rm1[] = {
4052 	DI(SrcNone | Priv, monitor),
4053 	DI(SrcNone | Priv, mwait),
4054 	N, N, N, N, N, N,
4055 };
4056 
4057 static const struct opcode group7_rm3[] = {
4058 	DIP(SrcNone | Prot | Priv,		vmrun,		check_svme_pa),
4059 	II(SrcNone  | Prot | EmulateOnUD,	em_hypercall,	vmmcall),
4060 	DIP(SrcNone | Prot | Priv,		vmload,		check_svme_pa),
4061 	DIP(SrcNone | Prot | Priv,		vmsave,		check_svme_pa),
4062 	DIP(SrcNone | Prot | Priv,		stgi,		check_svme),
4063 	DIP(SrcNone | Prot | Priv,		clgi,		check_svme),
4064 	DIP(SrcNone | Prot | Priv,		skinit,		check_svme),
4065 	DIP(SrcNone | Prot | Priv,		invlpga,	check_svme),
4066 };
4067 
4068 static const struct opcode group7_rm7[] = {
4069 	N,
4070 	DIP(SrcNone, rdtscp, check_rdtsc),
4071 	N, N, N, N, N, N,
4072 };
4073 
4074 static const struct opcode group1[] = {
4075 	F(Lock, em_add),
4076 	F(Lock | PageTable, em_or),
4077 	F(Lock, em_adc),
4078 	F(Lock, em_sbb),
4079 	F(Lock | PageTable, em_and),
4080 	F(Lock, em_sub),
4081 	F(Lock, em_xor),
4082 	F(NoWrite, em_cmp),
4083 };
4084 
4085 static const struct opcode group1A[] = {
4086 	I(DstMem | SrcNone | Mov | Stack | IncSP, em_pop), N, N, N, N, N, N, N,
4087 };
4088 
4089 static const struct opcode group2[] = {
4090 	F(DstMem | ModRM, em_rol),
4091 	F(DstMem | ModRM, em_ror),
4092 	F(DstMem | ModRM, em_rcl),
4093 	F(DstMem | ModRM, em_rcr),
4094 	F(DstMem | ModRM, em_shl),
4095 	F(DstMem | ModRM, em_shr),
4096 	F(DstMem | ModRM, em_shl),
4097 	F(DstMem | ModRM, em_sar),
4098 };
4099 
4100 static const struct opcode group3[] = {
4101 	F(DstMem | SrcImm | NoWrite, em_test),
4102 	F(DstMem | SrcImm | NoWrite, em_test),
4103 	F(DstMem | SrcNone | Lock, em_not),
4104 	F(DstMem | SrcNone | Lock, em_neg),
4105 	F(DstXacc | Src2Mem, em_mul_ex),
4106 	F(DstXacc | Src2Mem, em_imul_ex),
4107 	F(DstXacc | Src2Mem, em_div_ex),
4108 	F(DstXacc | Src2Mem, em_idiv_ex),
4109 };
4110 
4111 static const struct opcode group4[] = {
4112 	F(ByteOp | DstMem | SrcNone | Lock, em_inc),
4113 	F(ByteOp | DstMem | SrcNone | Lock, em_dec),
4114 	N, N, N, N, N, N,
4115 };
4116 
4117 static const struct opcode group5[] = {
4118 	F(DstMem | SrcNone | Lock,		em_inc),
4119 	F(DstMem | SrcNone | Lock,		em_dec),
4120 	I(SrcMem | NearBranch,			em_call_near_abs),
4121 	I(SrcMemFAddr | ImplicitOps,		em_call_far),
4122 	I(SrcMem | NearBranch,			em_jmp_abs),
4123 	I(SrcMemFAddr | ImplicitOps,		em_jmp_far),
4124 	I(SrcMem | Stack,			em_push), D(Undefined),
4125 };
4126 
4127 static const struct opcode group6[] = {
4128 	DI(Prot | DstMem,	sldt),
4129 	DI(Prot | DstMem,	str),
4130 	II(Prot | Priv | SrcMem16, em_lldt, lldt),
4131 	II(Prot | Priv | SrcMem16, em_ltr, ltr),
4132 	N, N, N, N,
4133 };
4134 
4135 static const struct group_dual group7 = { {
4136 	II(Mov | DstMem,			em_sgdt, sgdt),
4137 	II(Mov | DstMem,			em_sidt, sidt),
4138 	II(SrcMem | Priv,			em_lgdt, lgdt),
4139 	II(SrcMem | Priv,			em_lidt, lidt),
4140 	II(SrcNone | DstMem | Mov,		em_smsw, smsw), N,
4141 	II(SrcMem16 | Mov | Priv,		em_lmsw, lmsw),
4142 	II(SrcMem | ByteOp | Priv | NoAccess,	em_invlpg, invlpg),
4143 }, {
4144 	EXT(0, group7_rm0),
4145 	EXT(0, group7_rm1),
4146 	N, EXT(0, group7_rm3),
4147 	II(SrcNone | DstMem | Mov,		em_smsw, smsw), N,
4148 	II(SrcMem16 | Mov | Priv,		em_lmsw, lmsw),
4149 	EXT(0, group7_rm7),
4150 } };
4151 
4152 static const struct opcode group8[] = {
4153 	N, N, N, N,
4154 	F(DstMem | SrcImmByte | NoWrite,		em_bt),
4155 	F(DstMem | SrcImmByte | Lock | PageTable,	em_bts),
4156 	F(DstMem | SrcImmByte | Lock,			em_btr),
4157 	F(DstMem | SrcImmByte | Lock | PageTable,	em_btc),
4158 };
4159 
4160 static const struct group_dual group9 = { {
4161 	N, I(DstMem64 | Lock | PageTable, em_cmpxchg8b), N, N, N, N, N, N,
4162 }, {
4163 	N, N, N, N, N, N, N, N,
4164 } };
4165 
4166 static const struct opcode group11[] = {
4167 	I(DstMem | SrcImm | Mov | PageTable, em_mov),
4168 	X7(D(Undefined)),
4169 };
4170 
4171 static const struct gprefix pfx_0f_ae_7 = {
4172 	I(SrcMem | ByteOp, em_clflush), N, N, N,
4173 };
4174 
4175 static const struct group_dual group15 = { {
4176 	N, N, N, N, N, N, N, GP(0, &pfx_0f_ae_7),
4177 }, {
4178 	N, N, N, N, N, N, N, N,
4179 } };
4180 
4181 static const struct gprefix pfx_0f_6f_0f_7f = {
4182 	I(Mmx, em_mov), I(Sse | Aligned, em_mov), N, I(Sse | Unaligned, em_mov),
4183 };
4184 
4185 static const struct instr_dual instr_dual_0f_2b = {
4186 	I(0, em_mov), N
4187 };
4188 
4189 static const struct gprefix pfx_0f_2b = {
4190 	ID(0, &instr_dual_0f_2b), ID(0, &instr_dual_0f_2b), N, N,
4191 };
4192 
4193 static const struct gprefix pfx_0f_28_0f_29 = {
4194 	I(Aligned, em_mov), I(Aligned, em_mov), N, N,
4195 };
4196 
4197 static const struct gprefix pfx_0f_e7 = {
4198 	N, I(Sse, em_mov), N, N,
4199 };
4200 
4201 static const struct escape escape_d9 = { {
4202 	N, N, N, N, N, N, N, I(DstMem16 | Mov, em_fnstcw),
4203 }, {
4204 	/* 0xC0 - 0xC7 */
4205 	N, N, N, N, N, N, N, N,
4206 	/* 0xC8 - 0xCF */
4207 	N, N, N, N, N, N, N, N,
4208 	/* 0xD0 - 0xC7 */
4209 	N, N, N, N, N, N, N, N,
4210 	/* 0xD8 - 0xDF */
4211 	N, N, N, N, N, N, N, N,
4212 	/* 0xE0 - 0xE7 */
4213 	N, N, N, N, N, N, N, N,
4214 	/* 0xE8 - 0xEF */
4215 	N, N, N, N, N, N, N, N,
4216 	/* 0xF0 - 0xF7 */
4217 	N, N, N, N, N, N, N, N,
4218 	/* 0xF8 - 0xFF */
4219 	N, N, N, N, N, N, N, N,
4220 } };
4221 
4222 static const struct escape escape_db = { {
4223 	N, N, N, N, N, N, N, N,
4224 }, {
4225 	/* 0xC0 - 0xC7 */
4226 	N, N, N, N, N, N, N, N,
4227 	/* 0xC8 - 0xCF */
4228 	N, N, N, N, N, N, N, N,
4229 	/* 0xD0 - 0xC7 */
4230 	N, N, N, N, N, N, N, N,
4231 	/* 0xD8 - 0xDF */
4232 	N, N, N, N, N, N, N, N,
4233 	/* 0xE0 - 0xE7 */
4234 	N, N, N, I(ImplicitOps, em_fninit), N, N, N, N,
4235 	/* 0xE8 - 0xEF */
4236 	N, N, N, N, N, N, N, N,
4237 	/* 0xF0 - 0xF7 */
4238 	N, N, N, N, N, N, N, N,
4239 	/* 0xF8 - 0xFF */
4240 	N, N, N, N, N, N, N, N,
4241 } };
4242 
4243 static const struct escape escape_dd = { {
4244 	N, N, N, N, N, N, N, I(DstMem16 | Mov, em_fnstsw),
4245 }, {
4246 	/* 0xC0 - 0xC7 */
4247 	N, N, N, N, N, N, N, N,
4248 	/* 0xC8 - 0xCF */
4249 	N, N, N, N, N, N, N, N,
4250 	/* 0xD0 - 0xC7 */
4251 	N, N, N, N, N, N, N, N,
4252 	/* 0xD8 - 0xDF */
4253 	N, N, N, N, N, N, N, N,
4254 	/* 0xE0 - 0xE7 */
4255 	N, N, N, N, N, N, N, N,
4256 	/* 0xE8 - 0xEF */
4257 	N, N, N, N, N, N, N, N,
4258 	/* 0xF0 - 0xF7 */
4259 	N, N, N, N, N, N, N, N,
4260 	/* 0xF8 - 0xFF */
4261 	N, N, N, N, N, N, N, N,
4262 } };
4263 
4264 static const struct instr_dual instr_dual_0f_c3 = {
4265 	I(DstMem | SrcReg | ModRM | No16 | Mov, em_mov), N
4266 };
4267 
4268 static const struct mode_dual mode_dual_63 = {
4269 	N, I(DstReg | SrcMem32 | ModRM | Mov, em_movsxd)
4270 };
4271 
4272 static const struct opcode opcode_table[256] = {
4273 	/* 0x00 - 0x07 */
4274 	F6ALU(Lock, em_add),
4275 	I(ImplicitOps | Stack | No64 | Src2ES, em_push_sreg),
4276 	I(ImplicitOps | Stack | No64 | Src2ES, em_pop_sreg),
4277 	/* 0x08 - 0x0F */
4278 	F6ALU(Lock | PageTable, em_or),
4279 	I(ImplicitOps | Stack | No64 | Src2CS, em_push_sreg),
4280 	N,
4281 	/* 0x10 - 0x17 */
4282 	F6ALU(Lock, em_adc),
4283 	I(ImplicitOps | Stack | No64 | Src2SS, em_push_sreg),
4284 	I(ImplicitOps | Stack | No64 | Src2SS, em_pop_sreg),
4285 	/* 0x18 - 0x1F */
4286 	F6ALU(Lock, em_sbb),
4287 	I(ImplicitOps | Stack | No64 | Src2DS, em_push_sreg),
4288 	I(ImplicitOps | Stack | No64 | Src2DS, em_pop_sreg),
4289 	/* 0x20 - 0x27 */
4290 	F6ALU(Lock | PageTable, em_and), N, N,
4291 	/* 0x28 - 0x2F */
4292 	F6ALU(Lock, em_sub), N, I(ByteOp | DstAcc | No64, em_das),
4293 	/* 0x30 - 0x37 */
4294 	F6ALU(Lock, em_xor), N, N,
4295 	/* 0x38 - 0x3F */
4296 	F6ALU(NoWrite, em_cmp), N, N,
4297 	/* 0x40 - 0x4F */
4298 	X8(F(DstReg, em_inc)), X8(F(DstReg, em_dec)),
4299 	/* 0x50 - 0x57 */
4300 	X8(I(SrcReg | Stack, em_push)),
4301 	/* 0x58 - 0x5F */
4302 	X8(I(DstReg | Stack, em_pop)),
4303 	/* 0x60 - 0x67 */
4304 	I(ImplicitOps | Stack | No64, em_pusha),
4305 	I(ImplicitOps | Stack | No64, em_popa),
4306 	N, MD(ModRM, &mode_dual_63),
4307 	N, N, N, N,
4308 	/* 0x68 - 0x6F */
4309 	I(SrcImm | Mov | Stack, em_push),
4310 	I(DstReg | SrcMem | ModRM | Src2Imm, em_imul_3op),
4311 	I(SrcImmByte | Mov | Stack, em_push),
4312 	I(DstReg | SrcMem | ModRM | Src2ImmByte, em_imul_3op),
4313 	I2bvIP(DstDI | SrcDX | Mov | String | Unaligned, em_in, ins, check_perm_in), /* insb, insw/insd */
4314 	I2bvIP(SrcSI | DstDX | String, em_out, outs, check_perm_out), /* outsb, outsw/outsd */
4315 	/* 0x70 - 0x7F */
4316 	X16(D(SrcImmByte | NearBranch)),
4317 	/* 0x80 - 0x87 */
4318 	G(ByteOp | DstMem | SrcImm, group1),
4319 	G(DstMem | SrcImm, group1),
4320 	G(ByteOp | DstMem | SrcImm | No64, group1),
4321 	G(DstMem | SrcImmByte, group1),
4322 	F2bv(DstMem | SrcReg | ModRM | NoWrite, em_test),
4323 	I2bv(DstMem | SrcReg | ModRM | Lock | PageTable, em_xchg),
4324 	/* 0x88 - 0x8F */
4325 	I2bv(DstMem | SrcReg | ModRM | Mov | PageTable, em_mov),
4326 	I2bv(DstReg | SrcMem | ModRM | Mov, em_mov),
4327 	I(DstMem | SrcNone | ModRM | Mov | PageTable, em_mov_rm_sreg),
4328 	D(ModRM | SrcMem | NoAccess | DstReg),
4329 	I(ImplicitOps | SrcMem16 | ModRM, em_mov_sreg_rm),
4330 	G(0, group1A),
4331 	/* 0x90 - 0x97 */
4332 	DI(SrcAcc | DstReg, pause), X7(D(SrcAcc | DstReg)),
4333 	/* 0x98 - 0x9F */
4334 	D(DstAcc | SrcNone), I(ImplicitOps | SrcAcc, em_cwd),
4335 	I(SrcImmFAddr | No64, em_call_far), N,
4336 	II(ImplicitOps | Stack, em_pushf, pushf),
4337 	II(ImplicitOps | Stack, em_popf, popf),
4338 	I(ImplicitOps, em_sahf), I(ImplicitOps, em_lahf),
4339 	/* 0xA0 - 0xA7 */
4340 	I2bv(DstAcc | SrcMem | Mov | MemAbs, em_mov),
4341 	I2bv(DstMem | SrcAcc | Mov | MemAbs | PageTable, em_mov),
4342 	I2bv(SrcSI | DstDI | Mov | String, em_mov),
4343 	F2bv(SrcSI | DstDI | String | NoWrite, em_cmp_r),
4344 	/* 0xA8 - 0xAF */
4345 	F2bv(DstAcc | SrcImm | NoWrite, em_test),
4346 	I2bv(SrcAcc | DstDI | Mov | String, em_mov),
4347 	I2bv(SrcSI | DstAcc | Mov | String, em_mov),
4348 	F2bv(SrcAcc | DstDI | String | NoWrite, em_cmp_r),
4349 	/* 0xB0 - 0xB7 */
4350 	X8(I(ByteOp | DstReg | SrcImm | Mov, em_mov)),
4351 	/* 0xB8 - 0xBF */
4352 	X8(I(DstReg | SrcImm64 | Mov, em_mov)),
4353 	/* 0xC0 - 0xC7 */
4354 	G(ByteOp | Src2ImmByte, group2), G(Src2ImmByte, group2),
4355 	I(ImplicitOps | NearBranch | SrcImmU16, em_ret_near_imm),
4356 	I(ImplicitOps | NearBranch, em_ret),
4357 	I(DstReg | SrcMemFAddr | ModRM | No64 | Src2ES, em_lseg),
4358 	I(DstReg | SrcMemFAddr | ModRM | No64 | Src2DS, em_lseg),
4359 	G(ByteOp, group11), G(0, group11),
4360 	/* 0xC8 - 0xCF */
4361 	I(Stack | SrcImmU16 | Src2ImmByte, em_enter), I(Stack, em_leave),
4362 	I(ImplicitOps | SrcImmU16, em_ret_far_imm),
4363 	I(ImplicitOps, em_ret_far),
4364 	D(ImplicitOps), DI(SrcImmByte, intn),
4365 	D(ImplicitOps | No64), II(ImplicitOps, em_iret, iret),
4366 	/* 0xD0 - 0xD7 */
4367 	G(Src2One | ByteOp, group2), G(Src2One, group2),
4368 	G(Src2CL | ByteOp, group2), G(Src2CL, group2),
4369 	I(DstAcc | SrcImmUByte | No64, em_aam),
4370 	I(DstAcc | SrcImmUByte | No64, em_aad),
4371 	F(DstAcc | ByteOp | No64, em_salc),
4372 	I(DstAcc | SrcXLat | ByteOp, em_mov),
4373 	/* 0xD8 - 0xDF */
4374 	N, E(0, &escape_d9), N, E(0, &escape_db), N, E(0, &escape_dd), N, N,
4375 	/* 0xE0 - 0xE7 */
4376 	X3(I(SrcImmByte | NearBranch, em_loop)),
4377 	I(SrcImmByte | NearBranch, em_jcxz),
4378 	I2bvIP(SrcImmUByte | DstAcc, em_in,  in,  check_perm_in),
4379 	I2bvIP(SrcAcc | DstImmUByte, em_out, out, check_perm_out),
4380 	/* 0xE8 - 0xEF */
4381 	I(SrcImm | NearBranch, em_call), D(SrcImm | ImplicitOps | NearBranch),
4382 	I(SrcImmFAddr | No64, em_jmp_far),
4383 	D(SrcImmByte | ImplicitOps | NearBranch),
4384 	I2bvIP(SrcDX | DstAcc, em_in,  in,  check_perm_in),
4385 	I2bvIP(SrcAcc | DstDX, em_out, out, check_perm_out),
4386 	/* 0xF0 - 0xF7 */
4387 	N, DI(ImplicitOps, icebp), N, N,
4388 	DI(ImplicitOps | Priv, hlt), D(ImplicitOps),
4389 	G(ByteOp, group3), G(0, group3),
4390 	/* 0xF8 - 0xFF */
4391 	D(ImplicitOps), D(ImplicitOps),
4392 	I(ImplicitOps, em_cli), I(ImplicitOps, em_sti),
4393 	D(ImplicitOps), D(ImplicitOps), G(0, group4), G(0, group5),
4394 };
4395 
4396 static const struct opcode twobyte_table[256] = {
4397 	/* 0x00 - 0x0F */
4398 	G(0, group6), GD(0, &group7), N, N,
4399 	N, I(ImplicitOps | EmulateOnUD, em_syscall),
4400 	II(ImplicitOps | Priv, em_clts, clts), N,
4401 	DI(ImplicitOps | Priv, invd), DI(ImplicitOps | Priv, wbinvd), N, N,
4402 	N, D(ImplicitOps | ModRM | SrcMem | NoAccess), N, N,
4403 	/* 0x10 - 0x1F */
4404 	N, N, N, N, N, N, N, N,
4405 	D(ImplicitOps | ModRM | SrcMem | NoAccess),
4406 	N, N, N, N, N, N, D(ImplicitOps | ModRM | SrcMem | NoAccess),
4407 	/* 0x20 - 0x2F */
4408 	DIP(ModRM | DstMem | Priv | Op3264 | NoMod, cr_read, check_cr_read),
4409 	DIP(ModRM | DstMem | Priv | Op3264 | NoMod, dr_read, check_dr_read),
4410 	IIP(ModRM | SrcMem | Priv | Op3264 | NoMod, em_cr_write, cr_write,
4411 						check_cr_write),
4412 	IIP(ModRM | SrcMem | Priv | Op3264 | NoMod, em_dr_write, dr_write,
4413 						check_dr_write),
4414 	N, N, N, N,
4415 	GP(ModRM | DstReg | SrcMem | Mov | Sse, &pfx_0f_28_0f_29),
4416 	GP(ModRM | DstMem | SrcReg | Mov | Sse, &pfx_0f_28_0f_29),
4417 	N, GP(ModRM | DstMem | SrcReg | Mov | Sse, &pfx_0f_2b),
4418 	N, N, N, N,
4419 	/* 0x30 - 0x3F */
4420 	II(ImplicitOps | Priv, em_wrmsr, wrmsr),
4421 	IIP(ImplicitOps, em_rdtsc, rdtsc, check_rdtsc),
4422 	II(ImplicitOps | Priv, em_rdmsr, rdmsr),
4423 	IIP(ImplicitOps, em_rdpmc, rdpmc, check_rdpmc),
4424 	I(ImplicitOps | EmulateOnUD, em_sysenter),
4425 	I(ImplicitOps | Priv | EmulateOnUD, em_sysexit),
4426 	N, N,
4427 	N, N, N, N, N, N, N, N,
4428 	/* 0x40 - 0x4F */
4429 	X16(D(DstReg | SrcMem | ModRM)),
4430 	/* 0x50 - 0x5F */
4431 	N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
4432 	/* 0x60 - 0x6F */
4433 	N, N, N, N,
4434 	N, N, N, N,
4435 	N, N, N, N,
4436 	N, N, N, GP(SrcMem | DstReg | ModRM | Mov, &pfx_0f_6f_0f_7f),
4437 	/* 0x70 - 0x7F */
4438 	N, N, N, N,
4439 	N, N, N, N,
4440 	N, N, N, N,
4441 	N, N, N, GP(SrcReg | DstMem | ModRM | Mov, &pfx_0f_6f_0f_7f),
4442 	/* 0x80 - 0x8F */
4443 	X16(D(SrcImm | NearBranch)),
4444 	/* 0x90 - 0x9F */
4445 	X16(D(ByteOp | DstMem | SrcNone | ModRM| Mov)),
4446 	/* 0xA0 - 0xA7 */
4447 	I(Stack | Src2FS, em_push_sreg), I(Stack | Src2FS, em_pop_sreg),
4448 	II(ImplicitOps, em_cpuid, cpuid),
4449 	F(DstMem | SrcReg | ModRM | BitOp | NoWrite, em_bt),
4450 	F(DstMem | SrcReg | Src2ImmByte | ModRM, em_shld),
4451 	F(DstMem | SrcReg | Src2CL | ModRM, em_shld), N, N,
4452 	/* 0xA8 - 0xAF */
4453 	I(Stack | Src2GS, em_push_sreg), I(Stack | Src2GS, em_pop_sreg),
4454 	II(No64 | EmulateOnUD | ImplicitOps, em_rsm, rsm),
4455 	F(DstMem | SrcReg | ModRM | BitOp | Lock | PageTable, em_bts),
4456 	F(DstMem | SrcReg | Src2ImmByte | ModRM, em_shrd),
4457 	F(DstMem | SrcReg | Src2CL | ModRM, em_shrd),
4458 	GD(0, &group15), F(DstReg | SrcMem | ModRM, em_imul),
4459 	/* 0xB0 - 0xB7 */
4460 	I2bv(DstMem | SrcReg | ModRM | Lock | PageTable | SrcWrite, em_cmpxchg),
4461 	I(DstReg | SrcMemFAddr | ModRM | Src2SS, em_lseg),
4462 	F(DstMem | SrcReg | ModRM | BitOp | Lock, em_btr),
4463 	I(DstReg | SrcMemFAddr | ModRM | Src2FS, em_lseg),
4464 	I(DstReg | SrcMemFAddr | ModRM | Src2GS, em_lseg),
4465 	D(DstReg | SrcMem8 | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov),
4466 	/* 0xB8 - 0xBF */
4467 	N, N,
4468 	G(BitOp, group8),
4469 	F(DstMem | SrcReg | ModRM | BitOp | Lock | PageTable, em_btc),
4470 	I(DstReg | SrcMem | ModRM, em_bsf_c),
4471 	I(DstReg | SrcMem | ModRM, em_bsr_c),
4472 	D(DstReg | SrcMem8 | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov),
4473 	/* 0xC0 - 0xC7 */
4474 	F2bv(DstMem | SrcReg | ModRM | SrcWrite | Lock, em_xadd),
4475 	N, ID(0, &instr_dual_0f_c3),
4476 	N, N, N, GD(0, &group9),
4477 	/* 0xC8 - 0xCF */
4478 	X8(I(DstReg, em_bswap)),
4479 	/* 0xD0 - 0xDF */
4480 	N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
4481 	/* 0xE0 - 0xEF */
4482 	N, N, N, N, N, N, N, GP(SrcReg | DstMem | ModRM | Mov, &pfx_0f_e7),
4483 	N, N, N, N, N, N, N, N,
4484 	/* 0xF0 - 0xFF */
4485 	N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N
4486 };
4487 
4488 static const struct instr_dual instr_dual_0f_38_f0 = {
4489 	I(DstReg | SrcMem | Mov, em_movbe), N
4490 };
4491 
4492 static const struct instr_dual instr_dual_0f_38_f1 = {
4493 	I(DstMem | SrcReg | Mov, em_movbe), N
4494 };
4495 
4496 static const struct gprefix three_byte_0f_38_f0 = {
4497 	ID(0, &instr_dual_0f_38_f0), N, N, N
4498 };
4499 
4500 static const struct gprefix three_byte_0f_38_f1 = {
4501 	ID(0, &instr_dual_0f_38_f1), N, N, N
4502 };
4503 
4504 /*
4505  * Insns below are selected by the prefix which indexed by the third opcode
4506  * byte.
4507  */
4508 static const struct opcode opcode_map_0f_38[256] = {
4509 	/* 0x00 - 0x7f */
4510 	X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N),
4511 	/* 0x80 - 0xef */
4512 	X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N),
4513 	/* 0xf0 - 0xf1 */
4514 	GP(EmulateOnUD | ModRM, &three_byte_0f_38_f0),
4515 	GP(EmulateOnUD | ModRM, &three_byte_0f_38_f1),
4516 	/* 0xf2 - 0xff */
4517 	N, N, X4(N), X8(N)
4518 };
4519 
4520 #undef D
4521 #undef N
4522 #undef G
4523 #undef GD
4524 #undef I
4525 #undef GP
4526 #undef EXT
4527 #undef MD
4528 #undef ID
4529 
4530 #undef D2bv
4531 #undef D2bvIP
4532 #undef I2bv
4533 #undef I2bvIP
4534 #undef I6ALU
4535 
4536 static unsigned imm_size(struct x86_emulate_ctxt *ctxt)
4537 {
4538 	unsigned size;
4539 
4540 	size = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4541 	if (size == 8)
4542 		size = 4;
4543 	return size;
4544 }
4545 
4546 static int decode_imm(struct x86_emulate_ctxt *ctxt, struct operand *op,
4547 		      unsigned size, bool sign_extension)
4548 {
4549 	int rc = X86EMUL_CONTINUE;
4550 
4551 	op->type = OP_IMM;
4552 	op->bytes = size;
4553 	op->addr.mem.ea = ctxt->_eip;
4554 	/* NB. Immediates are sign-extended as necessary. */
4555 	switch (op->bytes) {
4556 	case 1:
4557 		op->val = insn_fetch(s8, ctxt);
4558 		break;
4559 	case 2:
4560 		op->val = insn_fetch(s16, ctxt);
4561 		break;
4562 	case 4:
4563 		op->val = insn_fetch(s32, ctxt);
4564 		break;
4565 	case 8:
4566 		op->val = insn_fetch(s64, ctxt);
4567 		break;
4568 	}
4569 	if (!sign_extension) {
4570 		switch (op->bytes) {
4571 		case 1:
4572 			op->val &= 0xff;
4573 			break;
4574 		case 2:
4575 			op->val &= 0xffff;
4576 			break;
4577 		case 4:
4578 			op->val &= 0xffffffff;
4579 			break;
4580 		}
4581 	}
4582 done:
4583 	return rc;
4584 }
4585 
4586 static int decode_operand(struct x86_emulate_ctxt *ctxt, struct operand *op,
4587 			  unsigned d)
4588 {
4589 	int rc = X86EMUL_CONTINUE;
4590 
4591 	switch (d) {
4592 	case OpReg:
4593 		decode_register_operand(ctxt, op);
4594 		break;
4595 	case OpImmUByte:
4596 		rc = decode_imm(ctxt, op, 1, false);
4597 		break;
4598 	case OpMem:
4599 		ctxt->memop.bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4600 	mem_common:
4601 		*op = ctxt->memop;
4602 		ctxt->memopp = op;
4603 		if (ctxt->d & BitOp)
4604 			fetch_bit_operand(ctxt);
4605 		op->orig_val = op->val;
4606 		break;
4607 	case OpMem64:
4608 		ctxt->memop.bytes = (ctxt->op_bytes == 8) ? 16 : 8;
4609 		goto mem_common;
4610 	case OpAcc:
4611 		op->type = OP_REG;
4612 		op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4613 		op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
4614 		fetch_register_operand(op);
4615 		op->orig_val = op->val;
4616 		break;
4617 	case OpAccLo:
4618 		op->type = OP_REG;
4619 		op->bytes = (ctxt->d & ByteOp) ? 2 : ctxt->op_bytes;
4620 		op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
4621 		fetch_register_operand(op);
4622 		op->orig_val = op->val;
4623 		break;
4624 	case OpAccHi:
4625 		if (ctxt->d & ByteOp) {
4626 			op->type = OP_NONE;
4627 			break;
4628 		}
4629 		op->type = OP_REG;
4630 		op->bytes = ctxt->op_bytes;
4631 		op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX);
4632 		fetch_register_operand(op);
4633 		op->orig_val = op->val;
4634 		break;
4635 	case OpDI:
4636 		op->type = OP_MEM;
4637 		op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4638 		op->addr.mem.ea =
4639 			register_address(ctxt, VCPU_REGS_RDI);
4640 		op->addr.mem.seg = VCPU_SREG_ES;
4641 		op->val = 0;
4642 		op->count = 1;
4643 		break;
4644 	case OpDX:
4645 		op->type = OP_REG;
4646 		op->bytes = 2;
4647 		op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX);
4648 		fetch_register_operand(op);
4649 		break;
4650 	case OpCL:
4651 		op->type = OP_IMM;
4652 		op->bytes = 1;
4653 		op->val = reg_read(ctxt, VCPU_REGS_RCX) & 0xff;
4654 		break;
4655 	case OpImmByte:
4656 		rc = decode_imm(ctxt, op, 1, true);
4657 		break;
4658 	case OpOne:
4659 		op->type = OP_IMM;
4660 		op->bytes = 1;
4661 		op->val = 1;
4662 		break;
4663 	case OpImm:
4664 		rc = decode_imm(ctxt, op, imm_size(ctxt), true);
4665 		break;
4666 	case OpImm64:
4667 		rc = decode_imm(ctxt, op, ctxt->op_bytes, true);
4668 		break;
4669 	case OpMem8:
4670 		ctxt->memop.bytes = 1;
4671 		if (ctxt->memop.type == OP_REG) {
4672 			ctxt->memop.addr.reg = decode_register(ctxt,
4673 					ctxt->modrm_rm, true);
4674 			fetch_register_operand(&ctxt->memop);
4675 		}
4676 		goto mem_common;
4677 	case OpMem16:
4678 		ctxt->memop.bytes = 2;
4679 		goto mem_common;
4680 	case OpMem32:
4681 		ctxt->memop.bytes = 4;
4682 		goto mem_common;
4683 	case OpImmU16:
4684 		rc = decode_imm(ctxt, op, 2, false);
4685 		break;
4686 	case OpImmU:
4687 		rc = decode_imm(ctxt, op, imm_size(ctxt), false);
4688 		break;
4689 	case OpSI:
4690 		op->type = OP_MEM;
4691 		op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4692 		op->addr.mem.ea =
4693 			register_address(ctxt, VCPU_REGS_RSI);
4694 		op->addr.mem.seg = ctxt->seg_override;
4695 		op->val = 0;
4696 		op->count = 1;
4697 		break;
4698 	case OpXLat:
4699 		op->type = OP_MEM;
4700 		op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4701 		op->addr.mem.ea =
4702 			address_mask(ctxt,
4703 				reg_read(ctxt, VCPU_REGS_RBX) +
4704 				(reg_read(ctxt, VCPU_REGS_RAX) & 0xff));
4705 		op->addr.mem.seg = ctxt->seg_override;
4706 		op->val = 0;
4707 		break;
4708 	case OpImmFAddr:
4709 		op->type = OP_IMM;
4710 		op->addr.mem.ea = ctxt->_eip;
4711 		op->bytes = ctxt->op_bytes + 2;
4712 		insn_fetch_arr(op->valptr, op->bytes, ctxt);
4713 		break;
4714 	case OpMemFAddr:
4715 		ctxt->memop.bytes = ctxt->op_bytes + 2;
4716 		goto mem_common;
4717 	case OpES:
4718 		op->type = OP_IMM;
4719 		op->val = VCPU_SREG_ES;
4720 		break;
4721 	case OpCS:
4722 		op->type = OP_IMM;
4723 		op->val = VCPU_SREG_CS;
4724 		break;
4725 	case OpSS:
4726 		op->type = OP_IMM;
4727 		op->val = VCPU_SREG_SS;
4728 		break;
4729 	case OpDS:
4730 		op->type = OP_IMM;
4731 		op->val = VCPU_SREG_DS;
4732 		break;
4733 	case OpFS:
4734 		op->type = OP_IMM;
4735 		op->val = VCPU_SREG_FS;
4736 		break;
4737 	case OpGS:
4738 		op->type = OP_IMM;
4739 		op->val = VCPU_SREG_GS;
4740 		break;
4741 	case OpImplicit:
4742 		/* Special instructions do their own operand decoding. */
4743 	default:
4744 		op->type = OP_NONE; /* Disable writeback. */
4745 		break;
4746 	}
4747 
4748 done:
4749 	return rc;
4750 }
4751 
4752 int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len)
4753 {
4754 	int rc = X86EMUL_CONTINUE;
4755 	int mode = ctxt->mode;
4756 	int def_op_bytes, def_ad_bytes, goffset, simd_prefix;
4757 	bool op_prefix = false;
4758 	bool has_seg_override = false;
4759 	struct opcode opcode;
4760 
4761 	ctxt->memop.type = OP_NONE;
4762 	ctxt->memopp = NULL;
4763 	ctxt->_eip = ctxt->eip;
4764 	ctxt->fetch.ptr = ctxt->fetch.data;
4765 	ctxt->fetch.end = ctxt->fetch.data + insn_len;
4766 	ctxt->opcode_len = 1;
4767 	if (insn_len > 0)
4768 		memcpy(ctxt->fetch.data, insn, insn_len);
4769 	else {
4770 		rc = __do_insn_fetch_bytes(ctxt, 1);
4771 		if (rc != X86EMUL_CONTINUE)
4772 			return rc;
4773 	}
4774 
4775 	switch (mode) {
4776 	case X86EMUL_MODE_REAL:
4777 	case X86EMUL_MODE_VM86:
4778 	case X86EMUL_MODE_PROT16:
4779 		def_op_bytes = def_ad_bytes = 2;
4780 		break;
4781 	case X86EMUL_MODE_PROT32:
4782 		def_op_bytes = def_ad_bytes = 4;
4783 		break;
4784 #ifdef CONFIG_X86_64
4785 	case X86EMUL_MODE_PROT64:
4786 		def_op_bytes = 4;
4787 		def_ad_bytes = 8;
4788 		break;
4789 #endif
4790 	default:
4791 		return EMULATION_FAILED;
4792 	}
4793 
4794 	ctxt->op_bytes = def_op_bytes;
4795 	ctxt->ad_bytes = def_ad_bytes;
4796 
4797 	/* Legacy prefixes. */
4798 	for (;;) {
4799 		switch (ctxt->b = insn_fetch(u8, ctxt)) {
4800 		case 0x66:	/* operand-size override */
4801 			op_prefix = true;
4802 			/* switch between 2/4 bytes */
4803 			ctxt->op_bytes = def_op_bytes ^ 6;
4804 			break;
4805 		case 0x67:	/* address-size override */
4806 			if (mode == X86EMUL_MODE_PROT64)
4807 				/* switch between 4/8 bytes */
4808 				ctxt->ad_bytes = def_ad_bytes ^ 12;
4809 			else
4810 				/* switch between 2/4 bytes */
4811 				ctxt->ad_bytes = def_ad_bytes ^ 6;
4812 			break;
4813 		case 0x26:	/* ES override */
4814 		case 0x2e:	/* CS override */
4815 		case 0x36:	/* SS override */
4816 		case 0x3e:	/* DS override */
4817 			has_seg_override = true;
4818 			ctxt->seg_override = (ctxt->b >> 3) & 3;
4819 			break;
4820 		case 0x64:	/* FS override */
4821 		case 0x65:	/* GS override */
4822 			has_seg_override = true;
4823 			ctxt->seg_override = ctxt->b & 7;
4824 			break;
4825 		case 0x40 ... 0x4f: /* REX */
4826 			if (mode != X86EMUL_MODE_PROT64)
4827 				goto done_prefixes;
4828 			ctxt->rex_prefix = ctxt->b;
4829 			continue;
4830 		case 0xf0:	/* LOCK */
4831 			ctxt->lock_prefix = 1;
4832 			break;
4833 		case 0xf2:	/* REPNE/REPNZ */
4834 		case 0xf3:	/* REP/REPE/REPZ */
4835 			ctxt->rep_prefix = ctxt->b;
4836 			break;
4837 		default:
4838 			goto done_prefixes;
4839 		}
4840 
4841 		/* Any legacy prefix after a REX prefix nullifies its effect. */
4842 
4843 		ctxt->rex_prefix = 0;
4844 	}
4845 
4846 done_prefixes:
4847 
4848 	/* REX prefix. */
4849 	if (ctxt->rex_prefix & 8)
4850 		ctxt->op_bytes = 8;	/* REX.W */
4851 
4852 	/* Opcode byte(s). */
4853 	opcode = opcode_table[ctxt->b];
4854 	/* Two-byte opcode? */
4855 	if (ctxt->b == 0x0f) {
4856 		ctxt->opcode_len = 2;
4857 		ctxt->b = insn_fetch(u8, ctxt);
4858 		opcode = twobyte_table[ctxt->b];
4859 
4860 		/* 0F_38 opcode map */
4861 		if (ctxt->b == 0x38) {
4862 			ctxt->opcode_len = 3;
4863 			ctxt->b = insn_fetch(u8, ctxt);
4864 			opcode = opcode_map_0f_38[ctxt->b];
4865 		}
4866 	}
4867 	ctxt->d = opcode.flags;
4868 
4869 	if (ctxt->d & ModRM)
4870 		ctxt->modrm = insn_fetch(u8, ctxt);
4871 
4872 	/* vex-prefix instructions are not implemented */
4873 	if (ctxt->opcode_len == 1 && (ctxt->b == 0xc5 || ctxt->b == 0xc4) &&
4874 	    (mode == X86EMUL_MODE_PROT64 || (ctxt->modrm & 0xc0) == 0xc0)) {
4875 		ctxt->d = NotImpl;
4876 	}
4877 
4878 	while (ctxt->d & GroupMask) {
4879 		switch (ctxt->d & GroupMask) {
4880 		case Group:
4881 			goffset = (ctxt->modrm >> 3) & 7;
4882 			opcode = opcode.u.group[goffset];
4883 			break;
4884 		case GroupDual:
4885 			goffset = (ctxt->modrm >> 3) & 7;
4886 			if ((ctxt->modrm >> 6) == 3)
4887 				opcode = opcode.u.gdual->mod3[goffset];
4888 			else
4889 				opcode = opcode.u.gdual->mod012[goffset];
4890 			break;
4891 		case RMExt:
4892 			goffset = ctxt->modrm & 7;
4893 			opcode = opcode.u.group[goffset];
4894 			break;
4895 		case Prefix:
4896 			if (ctxt->rep_prefix && op_prefix)
4897 				return EMULATION_FAILED;
4898 			simd_prefix = op_prefix ? 0x66 : ctxt->rep_prefix;
4899 			switch (simd_prefix) {
4900 			case 0x00: opcode = opcode.u.gprefix->pfx_no; break;
4901 			case 0x66: opcode = opcode.u.gprefix->pfx_66; break;
4902 			case 0xf2: opcode = opcode.u.gprefix->pfx_f2; break;
4903 			case 0xf3: opcode = opcode.u.gprefix->pfx_f3; break;
4904 			}
4905 			break;
4906 		case Escape:
4907 			if (ctxt->modrm > 0xbf)
4908 				opcode = opcode.u.esc->high[ctxt->modrm - 0xc0];
4909 			else
4910 				opcode = opcode.u.esc->op[(ctxt->modrm >> 3) & 7];
4911 			break;
4912 		case InstrDual:
4913 			if ((ctxt->modrm >> 6) == 3)
4914 				opcode = opcode.u.idual->mod3;
4915 			else
4916 				opcode = opcode.u.idual->mod012;
4917 			break;
4918 		case ModeDual:
4919 			if (ctxt->mode == X86EMUL_MODE_PROT64)
4920 				opcode = opcode.u.mdual->mode64;
4921 			else
4922 				opcode = opcode.u.mdual->mode32;
4923 			break;
4924 		default:
4925 			return EMULATION_FAILED;
4926 		}
4927 
4928 		ctxt->d &= ~(u64)GroupMask;
4929 		ctxt->d |= opcode.flags;
4930 	}
4931 
4932 	/* Unrecognised? */
4933 	if (ctxt->d == 0)
4934 		return EMULATION_FAILED;
4935 
4936 	ctxt->execute = opcode.u.execute;
4937 
4938 	if (unlikely(ctxt->ud) && likely(!(ctxt->d & EmulateOnUD)))
4939 		return EMULATION_FAILED;
4940 
4941 	if (unlikely(ctxt->d &
4942 	    (NotImpl|Stack|Op3264|Sse|Mmx|Intercept|CheckPerm|NearBranch|
4943 	     No16))) {
4944 		/*
4945 		 * These are copied unconditionally here, and checked unconditionally
4946 		 * in x86_emulate_insn.
4947 		 */
4948 		ctxt->check_perm = opcode.check_perm;
4949 		ctxt->intercept = opcode.intercept;
4950 
4951 		if (ctxt->d & NotImpl)
4952 			return EMULATION_FAILED;
4953 
4954 		if (mode == X86EMUL_MODE_PROT64) {
4955 			if (ctxt->op_bytes == 4 && (ctxt->d & Stack))
4956 				ctxt->op_bytes = 8;
4957 			else if (ctxt->d & NearBranch)
4958 				ctxt->op_bytes = 8;
4959 		}
4960 
4961 		if (ctxt->d & Op3264) {
4962 			if (mode == X86EMUL_MODE_PROT64)
4963 				ctxt->op_bytes = 8;
4964 			else
4965 				ctxt->op_bytes = 4;
4966 		}
4967 
4968 		if ((ctxt->d & No16) && ctxt->op_bytes == 2)
4969 			ctxt->op_bytes = 4;
4970 
4971 		if (ctxt->d & Sse)
4972 			ctxt->op_bytes = 16;
4973 		else if (ctxt->d & Mmx)
4974 			ctxt->op_bytes = 8;
4975 	}
4976 
4977 	/* ModRM and SIB bytes. */
4978 	if (ctxt->d & ModRM) {
4979 		rc = decode_modrm(ctxt, &ctxt->memop);
4980 		if (!has_seg_override) {
4981 			has_seg_override = true;
4982 			ctxt->seg_override = ctxt->modrm_seg;
4983 		}
4984 	} else if (ctxt->d & MemAbs)
4985 		rc = decode_abs(ctxt, &ctxt->memop);
4986 	if (rc != X86EMUL_CONTINUE)
4987 		goto done;
4988 
4989 	if (!has_seg_override)
4990 		ctxt->seg_override = VCPU_SREG_DS;
4991 
4992 	ctxt->memop.addr.mem.seg = ctxt->seg_override;
4993 
4994 	/*
4995 	 * Decode and fetch the source operand: register, memory
4996 	 * or immediate.
4997 	 */
4998 	rc = decode_operand(ctxt, &ctxt->src, (ctxt->d >> SrcShift) & OpMask);
4999 	if (rc != X86EMUL_CONTINUE)
5000 		goto done;
5001 
5002 	/*
5003 	 * Decode and fetch the second source operand: register, memory
5004 	 * or immediate.
5005 	 */
5006 	rc = decode_operand(ctxt, &ctxt->src2, (ctxt->d >> Src2Shift) & OpMask);
5007 	if (rc != X86EMUL_CONTINUE)
5008 		goto done;
5009 
5010 	/* Decode and fetch the destination operand: register or memory. */
5011 	rc = decode_operand(ctxt, &ctxt->dst, (ctxt->d >> DstShift) & OpMask);
5012 
5013 	if (ctxt->rip_relative)
5014 		ctxt->memopp->addr.mem.ea = address_mask(ctxt,
5015 					ctxt->memopp->addr.mem.ea + ctxt->_eip);
5016 
5017 done:
5018 	return (rc != X86EMUL_CONTINUE) ? EMULATION_FAILED : EMULATION_OK;
5019 }
5020 
5021 bool x86_page_table_writing_insn(struct x86_emulate_ctxt *ctxt)
5022 {
5023 	return ctxt->d & PageTable;
5024 }
5025 
5026 static bool string_insn_completed(struct x86_emulate_ctxt *ctxt)
5027 {
5028 	/* The second termination condition only applies for REPE
5029 	 * and REPNE. Test if the repeat string operation prefix is
5030 	 * REPE/REPZ or REPNE/REPNZ and if it's the case it tests the
5031 	 * corresponding termination condition according to:
5032 	 * 	- if REPE/REPZ and ZF = 0 then done
5033 	 * 	- if REPNE/REPNZ and ZF = 1 then done
5034 	 */
5035 	if (((ctxt->b == 0xa6) || (ctxt->b == 0xa7) ||
5036 	     (ctxt->b == 0xae) || (ctxt->b == 0xaf))
5037 	    && (((ctxt->rep_prefix == REPE_PREFIX) &&
5038 		 ((ctxt->eflags & X86_EFLAGS_ZF) == 0))
5039 		|| ((ctxt->rep_prefix == REPNE_PREFIX) &&
5040 		    ((ctxt->eflags & X86_EFLAGS_ZF) == X86_EFLAGS_ZF))))
5041 		return true;
5042 
5043 	return false;
5044 }
5045 
5046 static int flush_pending_x87_faults(struct x86_emulate_ctxt *ctxt)
5047 {
5048 	bool fault = false;
5049 
5050 	ctxt->ops->get_fpu(ctxt);
5051 	asm volatile("1: fwait \n\t"
5052 		     "2: \n\t"
5053 		     ".pushsection .fixup,\"ax\" \n\t"
5054 		     "3: \n\t"
5055 		     "movb $1, %[fault] \n\t"
5056 		     "jmp 2b \n\t"
5057 		     ".popsection \n\t"
5058 		     _ASM_EXTABLE(1b, 3b)
5059 		     : [fault]"+qm"(fault));
5060 	ctxt->ops->put_fpu(ctxt);
5061 
5062 	if (unlikely(fault))
5063 		return emulate_exception(ctxt, MF_VECTOR, 0, false);
5064 
5065 	return X86EMUL_CONTINUE;
5066 }
5067 
5068 static void fetch_possible_mmx_operand(struct x86_emulate_ctxt *ctxt,
5069 				       struct operand *op)
5070 {
5071 	if (op->type == OP_MM)
5072 		read_mmx_reg(ctxt, &op->mm_val, op->addr.mm);
5073 }
5074 
5075 static int fastop(struct x86_emulate_ctxt *ctxt, void (*fop)(struct fastop *))
5076 {
5077 	ulong flags = (ctxt->eflags & EFLAGS_MASK) | X86_EFLAGS_IF;
5078 	if (!(ctxt->d & ByteOp))
5079 		fop += __ffs(ctxt->dst.bytes) * FASTOP_SIZE;
5080 	asm("push %[flags]; popf; call *%[fastop]; pushf; pop %[flags]\n"
5081 	    : "+a"(ctxt->dst.val), "+d"(ctxt->src.val), [flags]"+D"(flags),
5082 	      [fastop]"+S"(fop)
5083 	    : "c"(ctxt->src2.val));
5084 	ctxt->eflags = (ctxt->eflags & ~EFLAGS_MASK) | (flags & EFLAGS_MASK);
5085 	if (!fop) /* exception is returned in fop variable */
5086 		return emulate_de(ctxt);
5087 	return X86EMUL_CONTINUE;
5088 }
5089 
5090 void init_decode_cache(struct x86_emulate_ctxt *ctxt)
5091 {
5092 	memset(&ctxt->rip_relative, 0,
5093 	       (void *)&ctxt->modrm - (void *)&ctxt->rip_relative);
5094 
5095 	ctxt->io_read.pos = 0;
5096 	ctxt->io_read.end = 0;
5097 	ctxt->mem_read.end = 0;
5098 }
5099 
5100 int x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
5101 {
5102 	const struct x86_emulate_ops *ops = ctxt->ops;
5103 	int rc = X86EMUL_CONTINUE;
5104 	int saved_dst_type = ctxt->dst.type;
5105 
5106 	ctxt->mem_read.pos = 0;
5107 
5108 	/* LOCK prefix is allowed only with some instructions */
5109 	if (ctxt->lock_prefix && (!(ctxt->d & Lock) || ctxt->dst.type != OP_MEM)) {
5110 		rc = emulate_ud(ctxt);
5111 		goto done;
5112 	}
5113 
5114 	if ((ctxt->d & SrcMask) == SrcMemFAddr && ctxt->src.type != OP_MEM) {
5115 		rc = emulate_ud(ctxt);
5116 		goto done;
5117 	}
5118 
5119 	if (unlikely(ctxt->d &
5120 		     (No64|Undefined|Sse|Mmx|Intercept|CheckPerm|Priv|Prot|String))) {
5121 		if ((ctxt->mode == X86EMUL_MODE_PROT64 && (ctxt->d & No64)) ||
5122 				(ctxt->d & Undefined)) {
5123 			rc = emulate_ud(ctxt);
5124 			goto done;
5125 		}
5126 
5127 		if (((ctxt->d & (Sse|Mmx)) && ((ops->get_cr(ctxt, 0) & X86_CR0_EM)))
5128 		    || ((ctxt->d & Sse) && !(ops->get_cr(ctxt, 4) & X86_CR4_OSFXSR))) {
5129 			rc = emulate_ud(ctxt);
5130 			goto done;
5131 		}
5132 
5133 		if ((ctxt->d & (Sse|Mmx)) && (ops->get_cr(ctxt, 0) & X86_CR0_TS)) {
5134 			rc = emulate_nm(ctxt);
5135 			goto done;
5136 		}
5137 
5138 		if (ctxt->d & Mmx) {
5139 			rc = flush_pending_x87_faults(ctxt);
5140 			if (rc != X86EMUL_CONTINUE)
5141 				goto done;
5142 			/*
5143 			 * Now that we know the fpu is exception safe, we can fetch
5144 			 * operands from it.
5145 			 */
5146 			fetch_possible_mmx_operand(ctxt, &ctxt->src);
5147 			fetch_possible_mmx_operand(ctxt, &ctxt->src2);
5148 			if (!(ctxt->d & Mov))
5149 				fetch_possible_mmx_operand(ctxt, &ctxt->dst);
5150 		}
5151 
5152 		if (unlikely(ctxt->emul_flags & X86EMUL_GUEST_MASK) && ctxt->intercept) {
5153 			rc = emulator_check_intercept(ctxt, ctxt->intercept,
5154 						      X86_ICPT_PRE_EXCEPT);
5155 			if (rc != X86EMUL_CONTINUE)
5156 				goto done;
5157 		}
5158 
5159 		/* Instruction can only be executed in protected mode */
5160 		if ((ctxt->d & Prot) && ctxt->mode < X86EMUL_MODE_PROT16) {
5161 			rc = emulate_ud(ctxt);
5162 			goto done;
5163 		}
5164 
5165 		/* Privileged instruction can be executed only in CPL=0 */
5166 		if ((ctxt->d & Priv) && ops->cpl(ctxt)) {
5167 			if (ctxt->d & PrivUD)
5168 				rc = emulate_ud(ctxt);
5169 			else
5170 				rc = emulate_gp(ctxt, 0);
5171 			goto done;
5172 		}
5173 
5174 		/* Do instruction specific permission checks */
5175 		if (ctxt->d & CheckPerm) {
5176 			rc = ctxt->check_perm(ctxt);
5177 			if (rc != X86EMUL_CONTINUE)
5178 				goto done;
5179 		}
5180 
5181 		if (unlikely(ctxt->emul_flags & X86EMUL_GUEST_MASK) && (ctxt->d & Intercept)) {
5182 			rc = emulator_check_intercept(ctxt, ctxt->intercept,
5183 						      X86_ICPT_POST_EXCEPT);
5184 			if (rc != X86EMUL_CONTINUE)
5185 				goto done;
5186 		}
5187 
5188 		if (ctxt->rep_prefix && (ctxt->d & String)) {
5189 			/* All REP prefixes have the same first termination condition */
5190 			if (address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) == 0) {
5191 				string_registers_quirk(ctxt);
5192 				ctxt->eip = ctxt->_eip;
5193 				ctxt->eflags &= ~X86_EFLAGS_RF;
5194 				goto done;
5195 			}
5196 		}
5197 	}
5198 
5199 	if ((ctxt->src.type == OP_MEM) && !(ctxt->d & NoAccess)) {
5200 		rc = segmented_read(ctxt, ctxt->src.addr.mem,
5201 				    ctxt->src.valptr, ctxt->src.bytes);
5202 		if (rc != X86EMUL_CONTINUE)
5203 			goto done;
5204 		ctxt->src.orig_val64 = ctxt->src.val64;
5205 	}
5206 
5207 	if (ctxt->src2.type == OP_MEM) {
5208 		rc = segmented_read(ctxt, ctxt->src2.addr.mem,
5209 				    &ctxt->src2.val, ctxt->src2.bytes);
5210 		if (rc != X86EMUL_CONTINUE)
5211 			goto done;
5212 	}
5213 
5214 	if ((ctxt->d & DstMask) == ImplicitOps)
5215 		goto special_insn;
5216 
5217 
5218 	if ((ctxt->dst.type == OP_MEM) && !(ctxt->d & Mov)) {
5219 		/* optimisation - avoid slow emulated read if Mov */
5220 		rc = segmented_read(ctxt, ctxt->dst.addr.mem,
5221 				   &ctxt->dst.val, ctxt->dst.bytes);
5222 		if (rc != X86EMUL_CONTINUE) {
5223 			if (!(ctxt->d & NoWrite) &&
5224 			    rc == X86EMUL_PROPAGATE_FAULT &&
5225 			    ctxt->exception.vector == PF_VECTOR)
5226 				ctxt->exception.error_code |= PFERR_WRITE_MASK;
5227 			goto done;
5228 		}
5229 	}
5230 	/* Copy full 64-bit value for CMPXCHG8B.  */
5231 	ctxt->dst.orig_val64 = ctxt->dst.val64;
5232 
5233 special_insn:
5234 
5235 	if (unlikely(ctxt->emul_flags & X86EMUL_GUEST_MASK) && (ctxt->d & Intercept)) {
5236 		rc = emulator_check_intercept(ctxt, ctxt->intercept,
5237 					      X86_ICPT_POST_MEMACCESS);
5238 		if (rc != X86EMUL_CONTINUE)
5239 			goto done;
5240 	}
5241 
5242 	if (ctxt->rep_prefix && (ctxt->d & String))
5243 		ctxt->eflags |= X86_EFLAGS_RF;
5244 	else
5245 		ctxt->eflags &= ~X86_EFLAGS_RF;
5246 
5247 	if (ctxt->execute) {
5248 		if (ctxt->d & Fastop) {
5249 			void (*fop)(struct fastop *) = (void *)ctxt->execute;
5250 			rc = fastop(ctxt, fop);
5251 			if (rc != X86EMUL_CONTINUE)
5252 				goto done;
5253 			goto writeback;
5254 		}
5255 		rc = ctxt->execute(ctxt);
5256 		if (rc != X86EMUL_CONTINUE)
5257 			goto done;
5258 		goto writeback;
5259 	}
5260 
5261 	if (ctxt->opcode_len == 2)
5262 		goto twobyte_insn;
5263 	else if (ctxt->opcode_len == 3)
5264 		goto threebyte_insn;
5265 
5266 	switch (ctxt->b) {
5267 	case 0x70 ... 0x7f: /* jcc (short) */
5268 		if (test_cc(ctxt->b, ctxt->eflags))
5269 			rc = jmp_rel(ctxt, ctxt->src.val);
5270 		break;
5271 	case 0x8d: /* lea r16/r32, m */
5272 		ctxt->dst.val = ctxt->src.addr.mem.ea;
5273 		break;
5274 	case 0x90 ... 0x97: /* nop / xchg reg, rax */
5275 		if (ctxt->dst.addr.reg == reg_rmw(ctxt, VCPU_REGS_RAX))
5276 			ctxt->dst.type = OP_NONE;
5277 		else
5278 			rc = em_xchg(ctxt);
5279 		break;
5280 	case 0x98: /* cbw/cwde/cdqe */
5281 		switch (ctxt->op_bytes) {
5282 		case 2: ctxt->dst.val = (s8)ctxt->dst.val; break;
5283 		case 4: ctxt->dst.val = (s16)ctxt->dst.val; break;
5284 		case 8: ctxt->dst.val = (s32)ctxt->dst.val; break;
5285 		}
5286 		break;
5287 	case 0xcc:		/* int3 */
5288 		rc = emulate_int(ctxt, 3);
5289 		break;
5290 	case 0xcd:		/* int n */
5291 		rc = emulate_int(ctxt, ctxt->src.val);
5292 		break;
5293 	case 0xce:		/* into */
5294 		if (ctxt->eflags & X86_EFLAGS_OF)
5295 			rc = emulate_int(ctxt, 4);
5296 		break;
5297 	case 0xe9: /* jmp rel */
5298 	case 0xeb: /* jmp rel short */
5299 		rc = jmp_rel(ctxt, ctxt->src.val);
5300 		ctxt->dst.type = OP_NONE; /* Disable writeback. */
5301 		break;
5302 	case 0xf4:              /* hlt */
5303 		ctxt->ops->halt(ctxt);
5304 		break;
5305 	case 0xf5:	/* cmc */
5306 		/* complement carry flag from eflags reg */
5307 		ctxt->eflags ^= X86_EFLAGS_CF;
5308 		break;
5309 	case 0xf8: /* clc */
5310 		ctxt->eflags &= ~X86_EFLAGS_CF;
5311 		break;
5312 	case 0xf9: /* stc */
5313 		ctxt->eflags |= X86_EFLAGS_CF;
5314 		break;
5315 	case 0xfc: /* cld */
5316 		ctxt->eflags &= ~X86_EFLAGS_DF;
5317 		break;
5318 	case 0xfd: /* std */
5319 		ctxt->eflags |= X86_EFLAGS_DF;
5320 		break;
5321 	default:
5322 		goto cannot_emulate;
5323 	}
5324 
5325 	if (rc != X86EMUL_CONTINUE)
5326 		goto done;
5327 
5328 writeback:
5329 	if (ctxt->d & SrcWrite) {
5330 		BUG_ON(ctxt->src.type == OP_MEM || ctxt->src.type == OP_MEM_STR);
5331 		rc = writeback(ctxt, &ctxt->src);
5332 		if (rc != X86EMUL_CONTINUE)
5333 			goto done;
5334 	}
5335 	if (!(ctxt->d & NoWrite)) {
5336 		rc = writeback(ctxt, &ctxt->dst);
5337 		if (rc != X86EMUL_CONTINUE)
5338 			goto done;
5339 	}
5340 
5341 	/*
5342 	 * restore dst type in case the decoding will be reused
5343 	 * (happens for string instruction )
5344 	 */
5345 	ctxt->dst.type = saved_dst_type;
5346 
5347 	if ((ctxt->d & SrcMask) == SrcSI)
5348 		string_addr_inc(ctxt, VCPU_REGS_RSI, &ctxt->src);
5349 
5350 	if ((ctxt->d & DstMask) == DstDI)
5351 		string_addr_inc(ctxt, VCPU_REGS_RDI, &ctxt->dst);
5352 
5353 	if (ctxt->rep_prefix && (ctxt->d & String)) {
5354 		unsigned int count;
5355 		struct read_cache *r = &ctxt->io_read;
5356 		if ((ctxt->d & SrcMask) == SrcSI)
5357 			count = ctxt->src.count;
5358 		else
5359 			count = ctxt->dst.count;
5360 		register_address_increment(ctxt, VCPU_REGS_RCX, -count);
5361 
5362 		if (!string_insn_completed(ctxt)) {
5363 			/*
5364 			 * Re-enter guest when pio read ahead buffer is empty
5365 			 * or, if it is not used, after each 1024 iteration.
5366 			 */
5367 			if ((r->end != 0 || reg_read(ctxt, VCPU_REGS_RCX) & 0x3ff) &&
5368 			    (r->end == 0 || r->end != r->pos)) {
5369 				/*
5370 				 * Reset read cache. Usually happens before
5371 				 * decode, but since instruction is restarted
5372 				 * we have to do it here.
5373 				 */
5374 				ctxt->mem_read.end = 0;
5375 				writeback_registers(ctxt);
5376 				return EMULATION_RESTART;
5377 			}
5378 			goto done; /* skip rip writeback */
5379 		}
5380 		ctxt->eflags &= ~X86_EFLAGS_RF;
5381 	}
5382 
5383 	ctxt->eip = ctxt->_eip;
5384 
5385 done:
5386 	if (rc == X86EMUL_PROPAGATE_FAULT) {
5387 		WARN_ON(ctxt->exception.vector > 0x1f);
5388 		ctxt->have_exception = true;
5389 	}
5390 	if (rc == X86EMUL_INTERCEPTED)
5391 		return EMULATION_INTERCEPTED;
5392 
5393 	if (rc == X86EMUL_CONTINUE)
5394 		writeback_registers(ctxt);
5395 
5396 	return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK;
5397 
5398 twobyte_insn:
5399 	switch (ctxt->b) {
5400 	case 0x09:		/* wbinvd */
5401 		(ctxt->ops->wbinvd)(ctxt);
5402 		break;
5403 	case 0x08:		/* invd */
5404 	case 0x0d:		/* GrpP (prefetch) */
5405 	case 0x18:		/* Grp16 (prefetch/nop) */
5406 	case 0x1f:		/* nop */
5407 		break;
5408 	case 0x20: /* mov cr, reg */
5409 		ctxt->dst.val = ops->get_cr(ctxt, ctxt->modrm_reg);
5410 		break;
5411 	case 0x21: /* mov from dr to reg */
5412 		ops->get_dr(ctxt, ctxt->modrm_reg, &ctxt->dst.val);
5413 		break;
5414 	case 0x40 ... 0x4f:	/* cmov */
5415 		if (test_cc(ctxt->b, ctxt->eflags))
5416 			ctxt->dst.val = ctxt->src.val;
5417 		else if (ctxt->op_bytes != 4)
5418 			ctxt->dst.type = OP_NONE; /* no writeback */
5419 		break;
5420 	case 0x80 ... 0x8f: /* jnz rel, etc*/
5421 		if (test_cc(ctxt->b, ctxt->eflags))
5422 			rc = jmp_rel(ctxt, ctxt->src.val);
5423 		break;
5424 	case 0x90 ... 0x9f:     /* setcc r/m8 */
5425 		ctxt->dst.val = test_cc(ctxt->b, ctxt->eflags);
5426 		break;
5427 	case 0xb6 ... 0xb7:	/* movzx */
5428 		ctxt->dst.bytes = ctxt->op_bytes;
5429 		ctxt->dst.val = (ctxt->src.bytes == 1) ? (u8) ctxt->src.val
5430 						       : (u16) ctxt->src.val;
5431 		break;
5432 	case 0xbe ... 0xbf:	/* movsx */
5433 		ctxt->dst.bytes = ctxt->op_bytes;
5434 		ctxt->dst.val = (ctxt->src.bytes == 1) ? (s8) ctxt->src.val :
5435 							(s16) ctxt->src.val;
5436 		break;
5437 	default:
5438 		goto cannot_emulate;
5439 	}
5440 
5441 threebyte_insn:
5442 
5443 	if (rc != X86EMUL_CONTINUE)
5444 		goto done;
5445 
5446 	goto writeback;
5447 
5448 cannot_emulate:
5449 	return EMULATION_FAILED;
5450 }
5451 
5452 void emulator_invalidate_register_cache(struct x86_emulate_ctxt *ctxt)
5453 {
5454 	invalidate_registers(ctxt);
5455 }
5456 
5457 void emulator_writeback_register_cache(struct x86_emulate_ctxt *ctxt)
5458 {
5459 	writeback_registers(ctxt);
5460 }
5461