xref: /openbmc/linux/arch/x86/kvm/emulate.c (revision 70a59dd8)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /******************************************************************************
3  * emulate.c
4  *
5  * Generic x86 (32-bit and 64-bit) instruction decoder and emulator.
6  *
7  * Copyright (c) 2005 Keir Fraser
8  *
9  * Linux coding style, mod r/m decoder, segment base fixes, real-mode
10  * privileged instructions:
11  *
12  * Copyright (C) 2006 Qumranet
13  * Copyright 2010 Red Hat, Inc. and/or its affiliates.
14  *
15  *   Avi Kivity <avi@qumranet.com>
16  *   Yaniv Kamay <yaniv@qumranet.com>
17  *
18  * From: xen-unstable 10676:af9809f51f81a3c43f276f00c81a52ef558afda4
19  */
20 
21 #include <linux/kvm_host.h>
22 #include "kvm_cache_regs.h"
23 #include "kvm_emulate.h"
24 #include <linux/stringify.h>
25 #include <asm/fpu/api.h>
26 #include <asm/debugreg.h>
27 #include <asm/nospec-branch.h>
28 
29 #include "x86.h"
30 #include "tss.h"
31 #include "mmu.h"
32 #include "pmu.h"
33 
34 /*
35  * Operand types
36  */
37 #define OpNone             0ull
38 #define OpImplicit         1ull  /* No generic decode */
39 #define OpReg              2ull  /* Register */
40 #define OpMem              3ull  /* Memory */
41 #define OpAcc              4ull  /* Accumulator: AL/AX/EAX/RAX */
42 #define OpDI               5ull  /* ES:DI/EDI/RDI */
43 #define OpMem64            6ull  /* Memory, 64-bit */
44 #define OpImmUByte         7ull  /* Zero-extended 8-bit immediate */
45 #define OpDX               8ull  /* DX register */
46 #define OpCL               9ull  /* CL register (for shifts) */
47 #define OpImmByte         10ull  /* 8-bit sign extended immediate */
48 #define OpOne             11ull  /* Implied 1 */
49 #define OpImm             12ull  /* Sign extended up to 32-bit immediate */
50 #define OpMem16           13ull  /* Memory operand (16-bit). */
51 #define OpMem32           14ull  /* Memory operand (32-bit). */
52 #define OpImmU            15ull  /* Immediate operand, zero extended */
53 #define OpSI              16ull  /* SI/ESI/RSI */
54 #define OpImmFAddr        17ull  /* Immediate far address */
55 #define OpMemFAddr        18ull  /* Far address in memory */
56 #define OpImmU16          19ull  /* Immediate operand, 16 bits, zero extended */
57 #define OpES              20ull  /* ES */
58 #define OpCS              21ull  /* CS */
59 #define OpSS              22ull  /* SS */
60 #define OpDS              23ull  /* DS */
61 #define OpFS              24ull  /* FS */
62 #define OpGS              25ull  /* GS */
63 #define OpMem8            26ull  /* 8-bit zero extended memory operand */
64 #define OpImm64           27ull  /* Sign extended 16/32/64-bit immediate */
65 #define OpXLat            28ull  /* memory at BX/EBX/RBX + zero-extended AL */
66 #define OpAccLo           29ull  /* Low part of extended acc (AX/AX/EAX/RAX) */
67 #define OpAccHi           30ull  /* High part of extended acc (-/DX/EDX/RDX) */
68 
69 #define OpBits             5  /* Width of operand field */
70 #define OpMask             ((1ull << OpBits) - 1)
71 
72 /*
73  * Opcode effective-address decode tables.
74  * Note that we only emulate instructions that have at least one memory
75  * operand (excluding implicit stack references). We assume that stack
76  * references and instruction fetches will never occur in special memory
77  * areas that require emulation. So, for example, 'mov <imm>,<reg>' need
78  * not be handled.
79  */
80 
81 /* Operand sizes: 8-bit operands or specified/overridden size. */
82 #define ByteOp      (1<<0)	/* 8-bit operands. */
83 /* Destination operand type. */
84 #define DstShift    1
85 #define ImplicitOps (OpImplicit << DstShift)
86 #define DstReg      (OpReg << DstShift)
87 #define DstMem      (OpMem << DstShift)
88 #define DstAcc      (OpAcc << DstShift)
89 #define DstDI       (OpDI << DstShift)
90 #define DstMem64    (OpMem64 << DstShift)
91 #define DstMem16    (OpMem16 << DstShift)
92 #define DstImmUByte (OpImmUByte << DstShift)
93 #define DstDX       (OpDX << DstShift)
94 #define DstAccLo    (OpAccLo << DstShift)
95 #define DstMask     (OpMask << DstShift)
96 /* Source operand type. */
97 #define SrcShift    6
98 #define SrcNone     (OpNone << SrcShift)
99 #define SrcReg      (OpReg << SrcShift)
100 #define SrcMem      (OpMem << SrcShift)
101 #define SrcMem16    (OpMem16 << SrcShift)
102 #define SrcMem32    (OpMem32 << SrcShift)
103 #define SrcImm      (OpImm << SrcShift)
104 #define SrcImmByte  (OpImmByte << SrcShift)
105 #define SrcOne      (OpOne << SrcShift)
106 #define SrcImmUByte (OpImmUByte << SrcShift)
107 #define SrcImmU     (OpImmU << SrcShift)
108 #define SrcSI       (OpSI << SrcShift)
109 #define SrcXLat     (OpXLat << SrcShift)
110 #define SrcImmFAddr (OpImmFAddr << SrcShift)
111 #define SrcMemFAddr (OpMemFAddr << SrcShift)
112 #define SrcAcc      (OpAcc << SrcShift)
113 #define SrcImmU16   (OpImmU16 << SrcShift)
114 #define SrcImm64    (OpImm64 << SrcShift)
115 #define SrcDX       (OpDX << SrcShift)
116 #define SrcMem8     (OpMem8 << SrcShift)
117 #define SrcAccHi    (OpAccHi << SrcShift)
118 #define SrcMask     (OpMask << SrcShift)
119 #define BitOp       (1<<11)
120 #define MemAbs      (1<<12)      /* Memory operand is absolute displacement */
121 #define String      (1<<13)     /* String instruction (rep capable) */
122 #define Stack       (1<<14)     /* Stack instruction (push/pop) */
123 #define GroupMask   (7<<15)     /* Opcode uses one of the group mechanisms */
124 #define Group       (1<<15)     /* Bits 3:5 of modrm byte extend opcode */
125 #define GroupDual   (2<<15)     /* Alternate decoding of mod == 3 */
126 #define Prefix      (3<<15)     /* Instruction varies with 66/f2/f3 prefix */
127 #define RMExt       (4<<15)     /* Opcode extension in ModRM r/m if mod == 3 */
128 #define Escape      (5<<15)     /* Escape to coprocessor instruction */
129 #define InstrDual   (6<<15)     /* Alternate instruction decoding of mod == 3 */
130 #define ModeDual    (7<<15)     /* Different instruction for 32/64 bit */
131 #define Sse         (1<<18)     /* SSE Vector instruction */
132 /* Generic ModRM decode. */
133 #define ModRM       (1<<19)
134 /* Destination is only written; never read. */
135 #define Mov         (1<<20)
136 /* Misc flags */
137 #define Prot        (1<<21) /* instruction generates #UD if not in prot-mode */
138 #define EmulateOnUD (1<<22) /* Emulate if unsupported by the host */
139 #define NoAccess    (1<<23) /* Don't access memory (lea/invlpg/verr etc) */
140 #define Op3264      (1<<24) /* Operand is 64b in long mode, 32b otherwise */
141 #define Undefined   (1<<25) /* No Such Instruction */
142 #define Lock        (1<<26) /* lock prefix is allowed for the instruction */
143 #define Priv        (1<<27) /* instruction generates #GP if current CPL != 0 */
144 #define No64	    (1<<28)
145 #define PageTable   (1 << 29)   /* instruction used to write page table */
146 #define NotImpl     (1 << 30)   /* instruction is not implemented */
147 /* Source 2 operand type */
148 #define Src2Shift   (31)
149 #define Src2None    (OpNone << Src2Shift)
150 #define Src2Mem     (OpMem << Src2Shift)
151 #define Src2CL      (OpCL << Src2Shift)
152 #define Src2ImmByte (OpImmByte << Src2Shift)
153 #define Src2One     (OpOne << Src2Shift)
154 #define Src2Imm     (OpImm << Src2Shift)
155 #define Src2ES      (OpES << Src2Shift)
156 #define Src2CS      (OpCS << Src2Shift)
157 #define Src2SS      (OpSS << Src2Shift)
158 #define Src2DS      (OpDS << Src2Shift)
159 #define Src2FS      (OpFS << Src2Shift)
160 #define Src2GS      (OpGS << Src2Shift)
161 #define Src2Mask    (OpMask << Src2Shift)
162 #define Mmx         ((u64)1 << 40)  /* MMX Vector instruction */
163 #define AlignMask   ((u64)7 << 41)
164 #define Aligned     ((u64)1 << 41)  /* Explicitly aligned (e.g. MOVDQA) */
165 #define Unaligned   ((u64)2 << 41)  /* Explicitly unaligned (e.g. MOVDQU) */
166 #define Avx         ((u64)3 << 41)  /* Advanced Vector Extensions */
167 #define Aligned16   ((u64)4 << 41)  /* Aligned to 16 byte boundary (e.g. FXSAVE) */
168 #define Fastop      ((u64)1 << 44)  /* Use opcode::u.fastop */
169 #define NoWrite     ((u64)1 << 45)  /* No writeback */
170 #define SrcWrite    ((u64)1 << 46)  /* Write back src operand */
171 #define NoMod	    ((u64)1 << 47)  /* Mod field is ignored */
172 #define Intercept   ((u64)1 << 48)  /* Has valid intercept field */
173 #define CheckPerm   ((u64)1 << 49)  /* Has valid check_perm field */
174 #define PrivUD      ((u64)1 << 51)  /* #UD instead of #GP on CPL > 0 */
175 #define NearBranch  ((u64)1 << 52)  /* Near branches */
176 #define No16	    ((u64)1 << 53)  /* No 16 bit operand */
177 #define IncSP       ((u64)1 << 54)  /* SP is incremented before ModRM calc */
178 #define TwoMemOp    ((u64)1 << 55)  /* Instruction has two memory operand */
179 
180 #define DstXacc     (DstAccLo | SrcAccHi | SrcWrite)
181 
182 #define X2(x...) x, x
183 #define X3(x...) X2(x), x
184 #define X4(x...) X2(x), X2(x)
185 #define X5(x...) X4(x), x
186 #define X6(x...) X4(x), X2(x)
187 #define X7(x...) X4(x), X3(x)
188 #define X8(x...) X4(x), X4(x)
189 #define X16(x...) X8(x), X8(x)
190 
191 #define NR_FASTOP (ilog2(sizeof(ulong)) + 1)
192 #define FASTOP_SIZE 8
193 
194 struct opcode {
195 	u64 flags : 56;
196 	u64 intercept : 8;
197 	union {
198 		int (*execute)(struct x86_emulate_ctxt *ctxt);
199 		const struct opcode *group;
200 		const struct group_dual *gdual;
201 		const struct gprefix *gprefix;
202 		const struct escape *esc;
203 		const struct instr_dual *idual;
204 		const struct mode_dual *mdual;
205 		void (*fastop)(struct fastop *fake);
206 	} u;
207 	int (*check_perm)(struct x86_emulate_ctxt *ctxt);
208 };
209 
210 struct group_dual {
211 	struct opcode mod012[8];
212 	struct opcode mod3[8];
213 };
214 
215 struct gprefix {
216 	struct opcode pfx_no;
217 	struct opcode pfx_66;
218 	struct opcode pfx_f2;
219 	struct opcode pfx_f3;
220 };
221 
222 struct escape {
223 	struct opcode op[8];
224 	struct opcode high[64];
225 };
226 
227 struct instr_dual {
228 	struct opcode mod012;
229 	struct opcode mod3;
230 };
231 
232 struct mode_dual {
233 	struct opcode mode32;
234 	struct opcode mode64;
235 };
236 
237 #define EFLG_RESERVED_ZEROS_MASK 0xffc0802a
238 
239 enum x86_transfer_type {
240 	X86_TRANSFER_NONE,
241 	X86_TRANSFER_CALL_JMP,
242 	X86_TRANSFER_RET,
243 	X86_TRANSFER_TASK_SWITCH,
244 };
245 
246 static ulong reg_read(struct x86_emulate_ctxt *ctxt, unsigned nr)
247 {
248 	if (!(ctxt->regs_valid & (1 << nr))) {
249 		ctxt->regs_valid |= 1 << nr;
250 		ctxt->_regs[nr] = ctxt->ops->read_gpr(ctxt, nr);
251 	}
252 	return ctxt->_regs[nr];
253 }
254 
255 static ulong *reg_write(struct x86_emulate_ctxt *ctxt, unsigned nr)
256 {
257 	ctxt->regs_valid |= 1 << nr;
258 	ctxt->regs_dirty |= 1 << nr;
259 	return &ctxt->_regs[nr];
260 }
261 
262 static ulong *reg_rmw(struct x86_emulate_ctxt *ctxt, unsigned nr)
263 {
264 	reg_read(ctxt, nr);
265 	return reg_write(ctxt, nr);
266 }
267 
268 static void writeback_registers(struct x86_emulate_ctxt *ctxt)
269 {
270 	unsigned reg;
271 
272 	for_each_set_bit(reg, (ulong *)&ctxt->regs_dirty, 16)
273 		ctxt->ops->write_gpr(ctxt, reg, ctxt->_regs[reg]);
274 }
275 
276 static void invalidate_registers(struct x86_emulate_ctxt *ctxt)
277 {
278 	ctxt->regs_dirty = 0;
279 	ctxt->regs_valid = 0;
280 }
281 
282 /*
283  * These EFLAGS bits are restored from saved value during emulation, and
284  * any changes are written back to the saved value after emulation.
285  */
286 #define EFLAGS_MASK (X86_EFLAGS_OF|X86_EFLAGS_SF|X86_EFLAGS_ZF|X86_EFLAGS_AF|\
287 		     X86_EFLAGS_PF|X86_EFLAGS_CF)
288 
289 #ifdef CONFIG_X86_64
290 #define ON64(x) x
291 #else
292 #define ON64(x)
293 #endif
294 
295 /*
296  * fastop functions have a special calling convention:
297  *
298  * dst:    rax        (in/out)
299  * src:    rdx        (in/out)
300  * src2:   rcx        (in)
301  * flags:  rflags     (in/out)
302  * ex:     rsi        (in:fastop pointer, out:zero if exception)
303  *
304  * Moreover, they are all exactly FASTOP_SIZE bytes long, so functions for
305  * different operand sizes can be reached by calculation, rather than a jump
306  * table (which would be bigger than the code).
307  */
308 static int fastop(struct x86_emulate_ctxt *ctxt, fastop_t fop);
309 
310 #define __FOP_FUNC(name) \
311 	".align " __stringify(FASTOP_SIZE) " \n\t" \
312 	".type " name ", @function \n\t" \
313 	name ":\n\t"
314 
315 #define FOP_FUNC(name) \
316 	__FOP_FUNC(#name)
317 
318 #define __FOP_RET(name) \
319 	"ret \n\t" \
320 	".size " name ", .-" name "\n\t"
321 
322 #define FOP_RET(name) \
323 	__FOP_RET(#name)
324 
325 #define FOP_START(op) \
326 	extern void em_##op(struct fastop *fake); \
327 	asm(".pushsection .text, \"ax\" \n\t" \
328 	    ".global em_" #op " \n\t" \
329 	    ".align " __stringify(FASTOP_SIZE) " \n\t" \
330 	    "em_" #op ":\n\t"
331 
332 #define FOP_END \
333 	    ".popsection")
334 
335 #define __FOPNOP(name) \
336 	__FOP_FUNC(name) \
337 	__FOP_RET(name)
338 
339 #define FOPNOP() \
340 	__FOPNOP(__stringify(__UNIQUE_ID(nop)))
341 
342 #define FOP1E(op,  dst) \
343 	__FOP_FUNC(#op "_" #dst) \
344 	"10: " #op " %" #dst " \n\t" \
345 	__FOP_RET(#op "_" #dst)
346 
347 #define FOP1EEX(op,  dst) \
348 	FOP1E(op, dst) _ASM_EXTABLE(10b, kvm_fastop_exception)
349 
350 #define FASTOP1(op) \
351 	FOP_START(op) \
352 	FOP1E(op##b, al) \
353 	FOP1E(op##w, ax) \
354 	FOP1E(op##l, eax) \
355 	ON64(FOP1E(op##q, rax))	\
356 	FOP_END
357 
358 /* 1-operand, using src2 (for MUL/DIV r/m) */
359 #define FASTOP1SRC2(op, name) \
360 	FOP_START(name) \
361 	FOP1E(op, cl) \
362 	FOP1E(op, cx) \
363 	FOP1E(op, ecx) \
364 	ON64(FOP1E(op, rcx)) \
365 	FOP_END
366 
367 /* 1-operand, using src2 (for MUL/DIV r/m), with exceptions */
368 #define FASTOP1SRC2EX(op, name) \
369 	FOP_START(name) \
370 	FOP1EEX(op, cl) \
371 	FOP1EEX(op, cx) \
372 	FOP1EEX(op, ecx) \
373 	ON64(FOP1EEX(op, rcx)) \
374 	FOP_END
375 
376 #define FOP2E(op,  dst, src)	   \
377 	__FOP_FUNC(#op "_" #dst "_" #src) \
378 	#op " %" #src ", %" #dst " \n\t" \
379 	__FOP_RET(#op "_" #dst "_" #src)
380 
381 #define FASTOP2(op) \
382 	FOP_START(op) \
383 	FOP2E(op##b, al, dl) \
384 	FOP2E(op##w, ax, dx) \
385 	FOP2E(op##l, eax, edx) \
386 	ON64(FOP2E(op##q, rax, rdx)) \
387 	FOP_END
388 
389 /* 2 operand, word only */
390 #define FASTOP2W(op) \
391 	FOP_START(op) \
392 	FOPNOP() \
393 	FOP2E(op##w, ax, dx) \
394 	FOP2E(op##l, eax, edx) \
395 	ON64(FOP2E(op##q, rax, rdx)) \
396 	FOP_END
397 
398 /* 2 operand, src is CL */
399 #define FASTOP2CL(op) \
400 	FOP_START(op) \
401 	FOP2E(op##b, al, cl) \
402 	FOP2E(op##w, ax, cl) \
403 	FOP2E(op##l, eax, cl) \
404 	ON64(FOP2E(op##q, rax, cl)) \
405 	FOP_END
406 
407 /* 2 operand, src and dest are reversed */
408 #define FASTOP2R(op, name) \
409 	FOP_START(name) \
410 	FOP2E(op##b, dl, al) \
411 	FOP2E(op##w, dx, ax) \
412 	FOP2E(op##l, edx, eax) \
413 	ON64(FOP2E(op##q, rdx, rax)) \
414 	FOP_END
415 
416 #define FOP3E(op,  dst, src, src2) \
417 	__FOP_FUNC(#op "_" #dst "_" #src "_" #src2) \
418 	#op " %" #src2 ", %" #src ", %" #dst " \n\t"\
419 	__FOP_RET(#op "_" #dst "_" #src "_" #src2)
420 
421 /* 3-operand, word-only, src2=cl */
422 #define FASTOP3WCL(op) \
423 	FOP_START(op) \
424 	FOPNOP() \
425 	FOP3E(op##w, ax, dx, cl) \
426 	FOP3E(op##l, eax, edx, cl) \
427 	ON64(FOP3E(op##q, rax, rdx, cl)) \
428 	FOP_END
429 
430 /* Special case for SETcc - 1 instruction per cc */
431 #define FOP_SETCC(op) \
432 	".align 4 \n\t" \
433 	".type " #op ", @function \n\t" \
434 	#op ": \n\t" \
435 	#op " %al \n\t" \
436 	__FOP_RET(#op)
437 
438 asm(".pushsection .fixup, \"ax\"\n"
439     ".global kvm_fastop_exception \n"
440     "kvm_fastop_exception: xor %esi, %esi; ret\n"
441     ".popsection");
442 
443 FOP_START(setcc)
444 FOP_SETCC(seto)
445 FOP_SETCC(setno)
446 FOP_SETCC(setc)
447 FOP_SETCC(setnc)
448 FOP_SETCC(setz)
449 FOP_SETCC(setnz)
450 FOP_SETCC(setbe)
451 FOP_SETCC(setnbe)
452 FOP_SETCC(sets)
453 FOP_SETCC(setns)
454 FOP_SETCC(setp)
455 FOP_SETCC(setnp)
456 FOP_SETCC(setl)
457 FOP_SETCC(setnl)
458 FOP_SETCC(setle)
459 FOP_SETCC(setnle)
460 FOP_END;
461 
462 FOP_START(salc)
463 FOP_FUNC(salc)
464 "pushf; sbb %al, %al; popf \n\t"
465 FOP_RET(salc)
466 FOP_END;
467 
468 /*
469  * XXX: inoutclob user must know where the argument is being expanded.
470  *      Relying on CONFIG_CC_HAS_ASM_GOTO would allow us to remove _fault.
471  */
472 #define asm_safe(insn, inoutclob...) \
473 ({ \
474 	int _fault = 0; \
475  \
476 	asm volatile("1:" insn "\n" \
477 	             "2:\n" \
478 	             ".pushsection .fixup, \"ax\"\n" \
479 	             "3: movl $1, %[_fault]\n" \
480 	             "   jmp  2b\n" \
481 	             ".popsection\n" \
482 	             _ASM_EXTABLE(1b, 3b) \
483 	             : [_fault] "+qm"(_fault) inoutclob ); \
484  \
485 	_fault ? X86EMUL_UNHANDLEABLE : X86EMUL_CONTINUE; \
486 })
487 
488 static int emulator_check_intercept(struct x86_emulate_ctxt *ctxt,
489 				    enum x86_intercept intercept,
490 				    enum x86_intercept_stage stage)
491 {
492 	struct x86_instruction_info info = {
493 		.intercept  = intercept,
494 		.rep_prefix = ctxt->rep_prefix,
495 		.modrm_mod  = ctxt->modrm_mod,
496 		.modrm_reg  = ctxt->modrm_reg,
497 		.modrm_rm   = ctxt->modrm_rm,
498 		.src_val    = ctxt->src.val64,
499 		.dst_val    = ctxt->dst.val64,
500 		.src_bytes  = ctxt->src.bytes,
501 		.dst_bytes  = ctxt->dst.bytes,
502 		.ad_bytes   = ctxt->ad_bytes,
503 		.next_rip   = ctxt->eip,
504 	};
505 
506 	return ctxt->ops->intercept(ctxt, &info, stage);
507 }
508 
509 static void assign_masked(ulong *dest, ulong src, ulong mask)
510 {
511 	*dest = (*dest & ~mask) | (src & mask);
512 }
513 
514 static void assign_register(unsigned long *reg, u64 val, int bytes)
515 {
516 	/* The 4-byte case *is* correct: in 64-bit mode we zero-extend. */
517 	switch (bytes) {
518 	case 1:
519 		*(u8 *)reg = (u8)val;
520 		break;
521 	case 2:
522 		*(u16 *)reg = (u16)val;
523 		break;
524 	case 4:
525 		*reg = (u32)val;
526 		break;	/* 64b: zero-extend */
527 	case 8:
528 		*reg = val;
529 		break;
530 	}
531 }
532 
533 static inline unsigned long ad_mask(struct x86_emulate_ctxt *ctxt)
534 {
535 	return (1UL << (ctxt->ad_bytes << 3)) - 1;
536 }
537 
538 static ulong stack_mask(struct x86_emulate_ctxt *ctxt)
539 {
540 	u16 sel;
541 	struct desc_struct ss;
542 
543 	if (ctxt->mode == X86EMUL_MODE_PROT64)
544 		return ~0UL;
545 	ctxt->ops->get_segment(ctxt, &sel, &ss, NULL, VCPU_SREG_SS);
546 	return ~0U >> ((ss.d ^ 1) * 16);  /* d=0: 0xffff; d=1: 0xffffffff */
547 }
548 
549 static int stack_size(struct x86_emulate_ctxt *ctxt)
550 {
551 	return (__fls(stack_mask(ctxt)) + 1) >> 3;
552 }
553 
554 /* Access/update address held in a register, based on addressing mode. */
555 static inline unsigned long
556 address_mask(struct x86_emulate_ctxt *ctxt, unsigned long reg)
557 {
558 	if (ctxt->ad_bytes == sizeof(unsigned long))
559 		return reg;
560 	else
561 		return reg & ad_mask(ctxt);
562 }
563 
564 static inline unsigned long
565 register_address(struct x86_emulate_ctxt *ctxt, int reg)
566 {
567 	return address_mask(ctxt, reg_read(ctxt, reg));
568 }
569 
570 static void masked_increment(ulong *reg, ulong mask, int inc)
571 {
572 	assign_masked(reg, *reg + inc, mask);
573 }
574 
575 static inline void
576 register_address_increment(struct x86_emulate_ctxt *ctxt, int reg, int inc)
577 {
578 	ulong *preg = reg_rmw(ctxt, reg);
579 
580 	assign_register(preg, *preg + inc, ctxt->ad_bytes);
581 }
582 
583 static void rsp_increment(struct x86_emulate_ctxt *ctxt, int inc)
584 {
585 	masked_increment(reg_rmw(ctxt, VCPU_REGS_RSP), stack_mask(ctxt), inc);
586 }
587 
588 static u32 desc_limit_scaled(struct desc_struct *desc)
589 {
590 	u32 limit = get_desc_limit(desc);
591 
592 	return desc->g ? (limit << 12) | 0xfff : limit;
593 }
594 
595 static unsigned long seg_base(struct x86_emulate_ctxt *ctxt, int seg)
596 {
597 	if (ctxt->mode == X86EMUL_MODE_PROT64 && seg < VCPU_SREG_FS)
598 		return 0;
599 
600 	return ctxt->ops->get_cached_segment_base(ctxt, seg);
601 }
602 
603 static int emulate_exception(struct x86_emulate_ctxt *ctxt, int vec,
604 			     u32 error, bool valid)
605 {
606 	WARN_ON(vec > 0x1f);
607 	ctxt->exception.vector = vec;
608 	ctxt->exception.error_code = error;
609 	ctxt->exception.error_code_valid = valid;
610 	return X86EMUL_PROPAGATE_FAULT;
611 }
612 
613 static int emulate_db(struct x86_emulate_ctxt *ctxt)
614 {
615 	return emulate_exception(ctxt, DB_VECTOR, 0, false);
616 }
617 
618 static int emulate_gp(struct x86_emulate_ctxt *ctxt, int err)
619 {
620 	return emulate_exception(ctxt, GP_VECTOR, err, true);
621 }
622 
623 static int emulate_ss(struct x86_emulate_ctxt *ctxt, int err)
624 {
625 	return emulate_exception(ctxt, SS_VECTOR, err, true);
626 }
627 
628 static int emulate_ud(struct x86_emulate_ctxt *ctxt)
629 {
630 	return emulate_exception(ctxt, UD_VECTOR, 0, false);
631 }
632 
633 static int emulate_ts(struct x86_emulate_ctxt *ctxt, int err)
634 {
635 	return emulate_exception(ctxt, TS_VECTOR, err, true);
636 }
637 
638 static int emulate_de(struct x86_emulate_ctxt *ctxt)
639 {
640 	return emulate_exception(ctxt, DE_VECTOR, 0, false);
641 }
642 
643 static int emulate_nm(struct x86_emulate_ctxt *ctxt)
644 {
645 	return emulate_exception(ctxt, NM_VECTOR, 0, false);
646 }
647 
648 static u16 get_segment_selector(struct x86_emulate_ctxt *ctxt, unsigned seg)
649 {
650 	u16 selector;
651 	struct desc_struct desc;
652 
653 	ctxt->ops->get_segment(ctxt, &selector, &desc, NULL, seg);
654 	return selector;
655 }
656 
657 static void set_segment_selector(struct x86_emulate_ctxt *ctxt, u16 selector,
658 				 unsigned seg)
659 {
660 	u16 dummy;
661 	u32 base3;
662 	struct desc_struct desc;
663 
664 	ctxt->ops->get_segment(ctxt, &dummy, &desc, &base3, seg);
665 	ctxt->ops->set_segment(ctxt, selector, &desc, base3, seg);
666 }
667 
668 static inline u8 ctxt_virt_addr_bits(struct x86_emulate_ctxt *ctxt)
669 {
670 	return (ctxt->ops->get_cr(ctxt, 4) & X86_CR4_LA57) ? 57 : 48;
671 }
672 
673 static inline bool emul_is_noncanonical_address(u64 la,
674 						struct x86_emulate_ctxt *ctxt)
675 {
676 	return get_canonical(la, ctxt_virt_addr_bits(ctxt)) != la;
677 }
678 
679 /*
680  * x86 defines three classes of vector instructions: explicitly
681  * aligned, explicitly unaligned, and the rest, which change behaviour
682  * depending on whether they're AVX encoded or not.
683  *
684  * Also included is CMPXCHG16B which is not a vector instruction, yet it is
685  * subject to the same check.  FXSAVE and FXRSTOR are checked here too as their
686  * 512 bytes of data must be aligned to a 16 byte boundary.
687  */
688 static unsigned insn_alignment(struct x86_emulate_ctxt *ctxt, unsigned size)
689 {
690 	u64 alignment = ctxt->d & AlignMask;
691 
692 	if (likely(size < 16))
693 		return 1;
694 
695 	switch (alignment) {
696 	case Unaligned:
697 	case Avx:
698 		return 1;
699 	case Aligned16:
700 		return 16;
701 	case Aligned:
702 	default:
703 		return size;
704 	}
705 }
706 
707 static __always_inline int __linearize(struct x86_emulate_ctxt *ctxt,
708 				       struct segmented_address addr,
709 				       unsigned *max_size, unsigned size,
710 				       bool write, bool fetch,
711 				       enum x86emul_mode mode, ulong *linear)
712 {
713 	struct desc_struct desc;
714 	bool usable;
715 	ulong la;
716 	u32 lim;
717 	u16 sel;
718 	u8  va_bits;
719 
720 	la = seg_base(ctxt, addr.seg) + addr.ea;
721 	*max_size = 0;
722 	switch (mode) {
723 	case X86EMUL_MODE_PROT64:
724 		*linear = la;
725 		va_bits = ctxt_virt_addr_bits(ctxt);
726 		if (get_canonical(la, va_bits) != la)
727 			goto bad;
728 
729 		*max_size = min_t(u64, ~0u, (1ull << va_bits) - la);
730 		if (size > *max_size)
731 			goto bad;
732 		break;
733 	default:
734 		*linear = la = (u32)la;
735 		usable = ctxt->ops->get_segment(ctxt, &sel, &desc, NULL,
736 						addr.seg);
737 		if (!usable)
738 			goto bad;
739 		/* code segment in protected mode or read-only data segment */
740 		if ((((ctxt->mode != X86EMUL_MODE_REAL) && (desc.type & 8))
741 					|| !(desc.type & 2)) && write)
742 			goto bad;
743 		/* unreadable code segment */
744 		if (!fetch && (desc.type & 8) && !(desc.type & 2))
745 			goto bad;
746 		lim = desc_limit_scaled(&desc);
747 		if (!(desc.type & 8) && (desc.type & 4)) {
748 			/* expand-down segment */
749 			if (addr.ea <= lim)
750 				goto bad;
751 			lim = desc.d ? 0xffffffff : 0xffff;
752 		}
753 		if (addr.ea > lim)
754 			goto bad;
755 		if (lim == 0xffffffff)
756 			*max_size = ~0u;
757 		else {
758 			*max_size = (u64)lim + 1 - addr.ea;
759 			if (size > *max_size)
760 				goto bad;
761 		}
762 		break;
763 	}
764 	if (la & (insn_alignment(ctxt, size) - 1))
765 		return emulate_gp(ctxt, 0);
766 	return X86EMUL_CONTINUE;
767 bad:
768 	if (addr.seg == VCPU_SREG_SS)
769 		return emulate_ss(ctxt, 0);
770 	else
771 		return emulate_gp(ctxt, 0);
772 }
773 
774 static int linearize(struct x86_emulate_ctxt *ctxt,
775 		     struct segmented_address addr,
776 		     unsigned size, bool write,
777 		     ulong *linear)
778 {
779 	unsigned max_size;
780 	return __linearize(ctxt, addr, &max_size, size, write, false,
781 			   ctxt->mode, linear);
782 }
783 
784 static inline int assign_eip(struct x86_emulate_ctxt *ctxt, ulong dst,
785 			     enum x86emul_mode mode)
786 {
787 	ulong linear;
788 	int rc;
789 	unsigned max_size;
790 	struct segmented_address addr = { .seg = VCPU_SREG_CS,
791 					   .ea = dst };
792 
793 	if (ctxt->op_bytes != sizeof(unsigned long))
794 		addr.ea = dst & ((1UL << (ctxt->op_bytes << 3)) - 1);
795 	rc = __linearize(ctxt, addr, &max_size, 1, false, true, mode, &linear);
796 	if (rc == X86EMUL_CONTINUE)
797 		ctxt->_eip = addr.ea;
798 	return rc;
799 }
800 
801 static inline int assign_eip_near(struct x86_emulate_ctxt *ctxt, ulong dst)
802 {
803 	return assign_eip(ctxt, dst, ctxt->mode);
804 }
805 
806 static int assign_eip_far(struct x86_emulate_ctxt *ctxt, ulong dst,
807 			  const struct desc_struct *cs_desc)
808 {
809 	enum x86emul_mode mode = ctxt->mode;
810 	int rc;
811 
812 #ifdef CONFIG_X86_64
813 	if (ctxt->mode >= X86EMUL_MODE_PROT16) {
814 		if (cs_desc->l) {
815 			u64 efer = 0;
816 
817 			ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
818 			if (efer & EFER_LMA)
819 				mode = X86EMUL_MODE_PROT64;
820 		} else
821 			mode = X86EMUL_MODE_PROT32; /* temporary value */
822 	}
823 #endif
824 	if (mode == X86EMUL_MODE_PROT16 || mode == X86EMUL_MODE_PROT32)
825 		mode = cs_desc->d ? X86EMUL_MODE_PROT32 : X86EMUL_MODE_PROT16;
826 	rc = assign_eip(ctxt, dst, mode);
827 	if (rc == X86EMUL_CONTINUE)
828 		ctxt->mode = mode;
829 	return rc;
830 }
831 
832 static inline int jmp_rel(struct x86_emulate_ctxt *ctxt, int rel)
833 {
834 	return assign_eip_near(ctxt, ctxt->_eip + rel);
835 }
836 
837 static int linear_read_system(struct x86_emulate_ctxt *ctxt, ulong linear,
838 			      void *data, unsigned size)
839 {
840 	return ctxt->ops->read_std(ctxt, linear, data, size, &ctxt->exception, true);
841 }
842 
843 static int linear_write_system(struct x86_emulate_ctxt *ctxt,
844 			       ulong linear, void *data,
845 			       unsigned int size)
846 {
847 	return ctxt->ops->write_std(ctxt, linear, data, size, &ctxt->exception, true);
848 }
849 
850 static int segmented_read_std(struct x86_emulate_ctxt *ctxt,
851 			      struct segmented_address addr,
852 			      void *data,
853 			      unsigned size)
854 {
855 	int rc;
856 	ulong linear;
857 
858 	rc = linearize(ctxt, addr, size, false, &linear);
859 	if (rc != X86EMUL_CONTINUE)
860 		return rc;
861 	return ctxt->ops->read_std(ctxt, linear, data, size, &ctxt->exception, false);
862 }
863 
864 static int segmented_write_std(struct x86_emulate_ctxt *ctxt,
865 			       struct segmented_address addr,
866 			       void *data,
867 			       unsigned int size)
868 {
869 	int rc;
870 	ulong linear;
871 
872 	rc = linearize(ctxt, addr, size, true, &linear);
873 	if (rc != X86EMUL_CONTINUE)
874 		return rc;
875 	return ctxt->ops->write_std(ctxt, linear, data, size, &ctxt->exception, false);
876 }
877 
878 /*
879  * Prefetch the remaining bytes of the instruction without crossing page
880  * boundary if they are not in fetch_cache yet.
881  */
882 static int __do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt, int op_size)
883 {
884 	int rc;
885 	unsigned size, max_size;
886 	unsigned long linear;
887 	int cur_size = ctxt->fetch.end - ctxt->fetch.data;
888 	struct segmented_address addr = { .seg = VCPU_SREG_CS,
889 					   .ea = ctxt->eip + cur_size };
890 
891 	/*
892 	 * We do not know exactly how many bytes will be needed, and
893 	 * __linearize is expensive, so fetch as much as possible.  We
894 	 * just have to avoid going beyond the 15 byte limit, the end
895 	 * of the segment, or the end of the page.
896 	 *
897 	 * __linearize is called with size 0 so that it does not do any
898 	 * boundary check itself.  Instead, we use max_size to check
899 	 * against op_size.
900 	 */
901 	rc = __linearize(ctxt, addr, &max_size, 0, false, true, ctxt->mode,
902 			 &linear);
903 	if (unlikely(rc != X86EMUL_CONTINUE))
904 		return rc;
905 
906 	size = min_t(unsigned, 15UL ^ cur_size, max_size);
907 	size = min_t(unsigned, size, PAGE_SIZE - offset_in_page(linear));
908 
909 	/*
910 	 * One instruction can only straddle two pages,
911 	 * and one has been loaded at the beginning of
912 	 * x86_decode_insn.  So, if not enough bytes
913 	 * still, we must have hit the 15-byte boundary.
914 	 */
915 	if (unlikely(size < op_size))
916 		return emulate_gp(ctxt, 0);
917 
918 	rc = ctxt->ops->fetch(ctxt, linear, ctxt->fetch.end,
919 			      size, &ctxt->exception);
920 	if (unlikely(rc != X86EMUL_CONTINUE))
921 		return rc;
922 	ctxt->fetch.end += size;
923 	return X86EMUL_CONTINUE;
924 }
925 
926 static __always_inline int do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt,
927 					       unsigned size)
928 {
929 	unsigned done_size = ctxt->fetch.end - ctxt->fetch.ptr;
930 
931 	if (unlikely(done_size < size))
932 		return __do_insn_fetch_bytes(ctxt, size - done_size);
933 	else
934 		return X86EMUL_CONTINUE;
935 }
936 
937 /* Fetch next part of the instruction being emulated. */
938 #define insn_fetch(_type, _ctxt)					\
939 ({	_type _x;							\
940 									\
941 	rc = do_insn_fetch_bytes(_ctxt, sizeof(_type));			\
942 	if (rc != X86EMUL_CONTINUE)					\
943 		goto done;						\
944 	ctxt->_eip += sizeof(_type);					\
945 	memcpy(&_x, ctxt->fetch.ptr, sizeof(_type));			\
946 	ctxt->fetch.ptr += sizeof(_type);				\
947 	_x;								\
948 })
949 
950 #define insn_fetch_arr(_arr, _size, _ctxt)				\
951 ({									\
952 	rc = do_insn_fetch_bytes(_ctxt, _size);				\
953 	if (rc != X86EMUL_CONTINUE)					\
954 		goto done;						\
955 	ctxt->_eip += (_size);						\
956 	memcpy(_arr, ctxt->fetch.ptr, _size);				\
957 	ctxt->fetch.ptr += (_size);					\
958 })
959 
960 /*
961  * Given the 'reg' portion of a ModRM byte, and a register block, return a
962  * pointer into the block that addresses the relevant register.
963  * @highbyte_regs specifies whether to decode AH,CH,DH,BH.
964  */
965 static void *decode_register(struct x86_emulate_ctxt *ctxt, u8 modrm_reg,
966 			     int byteop)
967 {
968 	void *p;
969 	int highbyte_regs = (ctxt->rex_prefix == 0) && byteop;
970 
971 	if (highbyte_regs && modrm_reg >= 4 && modrm_reg < 8)
972 		p = (unsigned char *)reg_rmw(ctxt, modrm_reg & 3) + 1;
973 	else
974 		p = reg_rmw(ctxt, modrm_reg);
975 	return p;
976 }
977 
978 static int read_descriptor(struct x86_emulate_ctxt *ctxt,
979 			   struct segmented_address addr,
980 			   u16 *size, unsigned long *address, int op_bytes)
981 {
982 	int rc;
983 
984 	if (op_bytes == 2)
985 		op_bytes = 3;
986 	*address = 0;
987 	rc = segmented_read_std(ctxt, addr, size, 2);
988 	if (rc != X86EMUL_CONTINUE)
989 		return rc;
990 	addr.ea += 2;
991 	rc = segmented_read_std(ctxt, addr, address, op_bytes);
992 	return rc;
993 }
994 
995 FASTOP2(add);
996 FASTOP2(or);
997 FASTOP2(adc);
998 FASTOP2(sbb);
999 FASTOP2(and);
1000 FASTOP2(sub);
1001 FASTOP2(xor);
1002 FASTOP2(cmp);
1003 FASTOP2(test);
1004 
1005 FASTOP1SRC2(mul, mul_ex);
1006 FASTOP1SRC2(imul, imul_ex);
1007 FASTOP1SRC2EX(div, div_ex);
1008 FASTOP1SRC2EX(idiv, idiv_ex);
1009 
1010 FASTOP3WCL(shld);
1011 FASTOP3WCL(shrd);
1012 
1013 FASTOP2W(imul);
1014 
1015 FASTOP1(not);
1016 FASTOP1(neg);
1017 FASTOP1(inc);
1018 FASTOP1(dec);
1019 
1020 FASTOP2CL(rol);
1021 FASTOP2CL(ror);
1022 FASTOP2CL(rcl);
1023 FASTOP2CL(rcr);
1024 FASTOP2CL(shl);
1025 FASTOP2CL(shr);
1026 FASTOP2CL(sar);
1027 
1028 FASTOP2W(bsf);
1029 FASTOP2W(bsr);
1030 FASTOP2W(bt);
1031 FASTOP2W(bts);
1032 FASTOP2W(btr);
1033 FASTOP2W(btc);
1034 
1035 FASTOP2(xadd);
1036 
1037 FASTOP2R(cmp, cmp_r);
1038 
1039 static int em_bsf_c(struct x86_emulate_ctxt *ctxt)
1040 {
1041 	/* If src is zero, do not writeback, but update flags */
1042 	if (ctxt->src.val == 0)
1043 		ctxt->dst.type = OP_NONE;
1044 	return fastop(ctxt, em_bsf);
1045 }
1046 
1047 static int em_bsr_c(struct x86_emulate_ctxt *ctxt)
1048 {
1049 	/* If src is zero, do not writeback, but update flags */
1050 	if (ctxt->src.val == 0)
1051 		ctxt->dst.type = OP_NONE;
1052 	return fastop(ctxt, em_bsr);
1053 }
1054 
1055 static __always_inline u8 test_cc(unsigned int condition, unsigned long flags)
1056 {
1057 	u8 rc;
1058 	void (*fop)(void) = (void *)em_setcc + 4 * (condition & 0xf);
1059 
1060 	flags = (flags & EFLAGS_MASK) | X86_EFLAGS_IF;
1061 	asm("push %[flags]; popf; " CALL_NOSPEC
1062 	    : "=a"(rc) : [thunk_target]"r"(fop), [flags]"r"(flags));
1063 	return rc;
1064 }
1065 
1066 static void fetch_register_operand(struct operand *op)
1067 {
1068 	switch (op->bytes) {
1069 	case 1:
1070 		op->val = *(u8 *)op->addr.reg;
1071 		break;
1072 	case 2:
1073 		op->val = *(u16 *)op->addr.reg;
1074 		break;
1075 	case 4:
1076 		op->val = *(u32 *)op->addr.reg;
1077 		break;
1078 	case 8:
1079 		op->val = *(u64 *)op->addr.reg;
1080 		break;
1081 	}
1082 }
1083 
1084 static void emulator_get_fpu(void)
1085 {
1086 	fpregs_lock();
1087 
1088 	fpregs_assert_state_consistent();
1089 	if (test_thread_flag(TIF_NEED_FPU_LOAD))
1090 		switch_fpu_return();
1091 }
1092 
1093 static void emulator_put_fpu(void)
1094 {
1095 	fpregs_unlock();
1096 }
1097 
1098 static void read_sse_reg(sse128_t *data, int reg)
1099 {
1100 	emulator_get_fpu();
1101 	switch (reg) {
1102 	case 0: asm("movdqa %%xmm0, %0" : "=m"(*data)); break;
1103 	case 1: asm("movdqa %%xmm1, %0" : "=m"(*data)); break;
1104 	case 2: asm("movdqa %%xmm2, %0" : "=m"(*data)); break;
1105 	case 3: asm("movdqa %%xmm3, %0" : "=m"(*data)); break;
1106 	case 4: asm("movdqa %%xmm4, %0" : "=m"(*data)); break;
1107 	case 5: asm("movdqa %%xmm5, %0" : "=m"(*data)); break;
1108 	case 6: asm("movdqa %%xmm6, %0" : "=m"(*data)); break;
1109 	case 7: asm("movdqa %%xmm7, %0" : "=m"(*data)); break;
1110 #ifdef CONFIG_X86_64
1111 	case 8: asm("movdqa %%xmm8, %0" : "=m"(*data)); break;
1112 	case 9: asm("movdqa %%xmm9, %0" : "=m"(*data)); break;
1113 	case 10: asm("movdqa %%xmm10, %0" : "=m"(*data)); break;
1114 	case 11: asm("movdqa %%xmm11, %0" : "=m"(*data)); break;
1115 	case 12: asm("movdqa %%xmm12, %0" : "=m"(*data)); break;
1116 	case 13: asm("movdqa %%xmm13, %0" : "=m"(*data)); break;
1117 	case 14: asm("movdqa %%xmm14, %0" : "=m"(*data)); break;
1118 	case 15: asm("movdqa %%xmm15, %0" : "=m"(*data)); break;
1119 #endif
1120 	default: BUG();
1121 	}
1122 	emulator_put_fpu();
1123 }
1124 
1125 static void write_sse_reg(sse128_t *data, int reg)
1126 {
1127 	emulator_get_fpu();
1128 	switch (reg) {
1129 	case 0: asm("movdqa %0, %%xmm0" : : "m"(*data)); break;
1130 	case 1: asm("movdqa %0, %%xmm1" : : "m"(*data)); break;
1131 	case 2: asm("movdqa %0, %%xmm2" : : "m"(*data)); break;
1132 	case 3: asm("movdqa %0, %%xmm3" : : "m"(*data)); break;
1133 	case 4: asm("movdqa %0, %%xmm4" : : "m"(*data)); break;
1134 	case 5: asm("movdqa %0, %%xmm5" : : "m"(*data)); break;
1135 	case 6: asm("movdqa %0, %%xmm6" : : "m"(*data)); break;
1136 	case 7: asm("movdqa %0, %%xmm7" : : "m"(*data)); break;
1137 #ifdef CONFIG_X86_64
1138 	case 8: asm("movdqa %0, %%xmm8" : : "m"(*data)); break;
1139 	case 9: asm("movdqa %0, %%xmm9" : : "m"(*data)); break;
1140 	case 10: asm("movdqa %0, %%xmm10" : : "m"(*data)); break;
1141 	case 11: asm("movdqa %0, %%xmm11" : : "m"(*data)); break;
1142 	case 12: asm("movdqa %0, %%xmm12" : : "m"(*data)); break;
1143 	case 13: asm("movdqa %0, %%xmm13" : : "m"(*data)); break;
1144 	case 14: asm("movdqa %0, %%xmm14" : : "m"(*data)); break;
1145 	case 15: asm("movdqa %0, %%xmm15" : : "m"(*data)); break;
1146 #endif
1147 	default: BUG();
1148 	}
1149 	emulator_put_fpu();
1150 }
1151 
1152 static void read_mmx_reg(u64 *data, int reg)
1153 {
1154 	emulator_get_fpu();
1155 	switch (reg) {
1156 	case 0: asm("movq %%mm0, %0" : "=m"(*data)); break;
1157 	case 1: asm("movq %%mm1, %0" : "=m"(*data)); break;
1158 	case 2: asm("movq %%mm2, %0" : "=m"(*data)); break;
1159 	case 3: asm("movq %%mm3, %0" : "=m"(*data)); break;
1160 	case 4: asm("movq %%mm4, %0" : "=m"(*data)); break;
1161 	case 5: asm("movq %%mm5, %0" : "=m"(*data)); break;
1162 	case 6: asm("movq %%mm6, %0" : "=m"(*data)); break;
1163 	case 7: asm("movq %%mm7, %0" : "=m"(*data)); break;
1164 	default: BUG();
1165 	}
1166 	emulator_put_fpu();
1167 }
1168 
1169 static void write_mmx_reg(u64 *data, int reg)
1170 {
1171 	emulator_get_fpu();
1172 	switch (reg) {
1173 	case 0: asm("movq %0, %%mm0" : : "m"(*data)); break;
1174 	case 1: asm("movq %0, %%mm1" : : "m"(*data)); break;
1175 	case 2: asm("movq %0, %%mm2" : : "m"(*data)); break;
1176 	case 3: asm("movq %0, %%mm3" : : "m"(*data)); break;
1177 	case 4: asm("movq %0, %%mm4" : : "m"(*data)); break;
1178 	case 5: asm("movq %0, %%mm5" : : "m"(*data)); break;
1179 	case 6: asm("movq %0, %%mm6" : : "m"(*data)); break;
1180 	case 7: asm("movq %0, %%mm7" : : "m"(*data)); break;
1181 	default: BUG();
1182 	}
1183 	emulator_put_fpu();
1184 }
1185 
1186 static int em_fninit(struct x86_emulate_ctxt *ctxt)
1187 {
1188 	if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
1189 		return emulate_nm(ctxt);
1190 
1191 	emulator_get_fpu();
1192 	asm volatile("fninit");
1193 	emulator_put_fpu();
1194 	return X86EMUL_CONTINUE;
1195 }
1196 
1197 static int em_fnstcw(struct x86_emulate_ctxt *ctxt)
1198 {
1199 	u16 fcw;
1200 
1201 	if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
1202 		return emulate_nm(ctxt);
1203 
1204 	emulator_get_fpu();
1205 	asm volatile("fnstcw %0": "+m"(fcw));
1206 	emulator_put_fpu();
1207 
1208 	ctxt->dst.val = fcw;
1209 
1210 	return X86EMUL_CONTINUE;
1211 }
1212 
1213 static int em_fnstsw(struct x86_emulate_ctxt *ctxt)
1214 {
1215 	u16 fsw;
1216 
1217 	if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
1218 		return emulate_nm(ctxt);
1219 
1220 	emulator_get_fpu();
1221 	asm volatile("fnstsw %0": "+m"(fsw));
1222 	emulator_put_fpu();
1223 
1224 	ctxt->dst.val = fsw;
1225 
1226 	return X86EMUL_CONTINUE;
1227 }
1228 
1229 static void decode_register_operand(struct x86_emulate_ctxt *ctxt,
1230 				    struct operand *op)
1231 {
1232 	unsigned reg = ctxt->modrm_reg;
1233 
1234 	if (!(ctxt->d & ModRM))
1235 		reg = (ctxt->b & 7) | ((ctxt->rex_prefix & 1) << 3);
1236 
1237 	if (ctxt->d & Sse) {
1238 		op->type = OP_XMM;
1239 		op->bytes = 16;
1240 		op->addr.xmm = reg;
1241 		read_sse_reg(&op->vec_val, reg);
1242 		return;
1243 	}
1244 	if (ctxt->d & Mmx) {
1245 		reg &= 7;
1246 		op->type = OP_MM;
1247 		op->bytes = 8;
1248 		op->addr.mm = reg;
1249 		return;
1250 	}
1251 
1252 	op->type = OP_REG;
1253 	op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
1254 	op->addr.reg = decode_register(ctxt, reg, ctxt->d & ByteOp);
1255 
1256 	fetch_register_operand(op);
1257 	op->orig_val = op->val;
1258 }
1259 
1260 static void adjust_modrm_seg(struct x86_emulate_ctxt *ctxt, int base_reg)
1261 {
1262 	if (base_reg == VCPU_REGS_RSP || base_reg == VCPU_REGS_RBP)
1263 		ctxt->modrm_seg = VCPU_SREG_SS;
1264 }
1265 
1266 static int decode_modrm(struct x86_emulate_ctxt *ctxt,
1267 			struct operand *op)
1268 {
1269 	u8 sib;
1270 	int index_reg, base_reg, scale;
1271 	int rc = X86EMUL_CONTINUE;
1272 	ulong modrm_ea = 0;
1273 
1274 	ctxt->modrm_reg = ((ctxt->rex_prefix << 1) & 8); /* REX.R */
1275 	index_reg = (ctxt->rex_prefix << 2) & 8; /* REX.X */
1276 	base_reg = (ctxt->rex_prefix << 3) & 8; /* REX.B */
1277 
1278 	ctxt->modrm_mod = (ctxt->modrm & 0xc0) >> 6;
1279 	ctxt->modrm_reg |= (ctxt->modrm & 0x38) >> 3;
1280 	ctxt->modrm_rm = base_reg | (ctxt->modrm & 0x07);
1281 	ctxt->modrm_seg = VCPU_SREG_DS;
1282 
1283 	if (ctxt->modrm_mod == 3 || (ctxt->d & NoMod)) {
1284 		op->type = OP_REG;
1285 		op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
1286 		op->addr.reg = decode_register(ctxt, ctxt->modrm_rm,
1287 				ctxt->d & ByteOp);
1288 		if (ctxt->d & Sse) {
1289 			op->type = OP_XMM;
1290 			op->bytes = 16;
1291 			op->addr.xmm = ctxt->modrm_rm;
1292 			read_sse_reg(&op->vec_val, ctxt->modrm_rm);
1293 			return rc;
1294 		}
1295 		if (ctxt->d & Mmx) {
1296 			op->type = OP_MM;
1297 			op->bytes = 8;
1298 			op->addr.mm = ctxt->modrm_rm & 7;
1299 			return rc;
1300 		}
1301 		fetch_register_operand(op);
1302 		return rc;
1303 	}
1304 
1305 	op->type = OP_MEM;
1306 
1307 	if (ctxt->ad_bytes == 2) {
1308 		unsigned bx = reg_read(ctxt, VCPU_REGS_RBX);
1309 		unsigned bp = reg_read(ctxt, VCPU_REGS_RBP);
1310 		unsigned si = reg_read(ctxt, VCPU_REGS_RSI);
1311 		unsigned di = reg_read(ctxt, VCPU_REGS_RDI);
1312 
1313 		/* 16-bit ModR/M decode. */
1314 		switch (ctxt->modrm_mod) {
1315 		case 0:
1316 			if (ctxt->modrm_rm == 6)
1317 				modrm_ea += insn_fetch(u16, ctxt);
1318 			break;
1319 		case 1:
1320 			modrm_ea += insn_fetch(s8, ctxt);
1321 			break;
1322 		case 2:
1323 			modrm_ea += insn_fetch(u16, ctxt);
1324 			break;
1325 		}
1326 		switch (ctxt->modrm_rm) {
1327 		case 0:
1328 			modrm_ea += bx + si;
1329 			break;
1330 		case 1:
1331 			modrm_ea += bx + di;
1332 			break;
1333 		case 2:
1334 			modrm_ea += bp + si;
1335 			break;
1336 		case 3:
1337 			modrm_ea += bp + di;
1338 			break;
1339 		case 4:
1340 			modrm_ea += si;
1341 			break;
1342 		case 5:
1343 			modrm_ea += di;
1344 			break;
1345 		case 6:
1346 			if (ctxt->modrm_mod != 0)
1347 				modrm_ea += bp;
1348 			break;
1349 		case 7:
1350 			modrm_ea += bx;
1351 			break;
1352 		}
1353 		if (ctxt->modrm_rm == 2 || ctxt->modrm_rm == 3 ||
1354 		    (ctxt->modrm_rm == 6 && ctxt->modrm_mod != 0))
1355 			ctxt->modrm_seg = VCPU_SREG_SS;
1356 		modrm_ea = (u16)modrm_ea;
1357 	} else {
1358 		/* 32/64-bit ModR/M decode. */
1359 		if ((ctxt->modrm_rm & 7) == 4) {
1360 			sib = insn_fetch(u8, ctxt);
1361 			index_reg |= (sib >> 3) & 7;
1362 			base_reg |= sib & 7;
1363 			scale = sib >> 6;
1364 
1365 			if ((base_reg & 7) == 5 && ctxt->modrm_mod == 0)
1366 				modrm_ea += insn_fetch(s32, ctxt);
1367 			else {
1368 				modrm_ea += reg_read(ctxt, base_reg);
1369 				adjust_modrm_seg(ctxt, base_reg);
1370 				/* Increment ESP on POP [ESP] */
1371 				if ((ctxt->d & IncSP) &&
1372 				    base_reg == VCPU_REGS_RSP)
1373 					modrm_ea += ctxt->op_bytes;
1374 			}
1375 			if (index_reg != 4)
1376 				modrm_ea += reg_read(ctxt, index_reg) << scale;
1377 		} else if ((ctxt->modrm_rm & 7) == 5 && ctxt->modrm_mod == 0) {
1378 			modrm_ea += insn_fetch(s32, ctxt);
1379 			if (ctxt->mode == X86EMUL_MODE_PROT64)
1380 				ctxt->rip_relative = 1;
1381 		} else {
1382 			base_reg = ctxt->modrm_rm;
1383 			modrm_ea += reg_read(ctxt, base_reg);
1384 			adjust_modrm_seg(ctxt, base_reg);
1385 		}
1386 		switch (ctxt->modrm_mod) {
1387 		case 1:
1388 			modrm_ea += insn_fetch(s8, ctxt);
1389 			break;
1390 		case 2:
1391 			modrm_ea += insn_fetch(s32, ctxt);
1392 			break;
1393 		}
1394 	}
1395 	op->addr.mem.ea = modrm_ea;
1396 	if (ctxt->ad_bytes != 8)
1397 		ctxt->memop.addr.mem.ea = (u32)ctxt->memop.addr.mem.ea;
1398 
1399 done:
1400 	return rc;
1401 }
1402 
1403 static int decode_abs(struct x86_emulate_ctxt *ctxt,
1404 		      struct operand *op)
1405 {
1406 	int rc = X86EMUL_CONTINUE;
1407 
1408 	op->type = OP_MEM;
1409 	switch (ctxt->ad_bytes) {
1410 	case 2:
1411 		op->addr.mem.ea = insn_fetch(u16, ctxt);
1412 		break;
1413 	case 4:
1414 		op->addr.mem.ea = insn_fetch(u32, ctxt);
1415 		break;
1416 	case 8:
1417 		op->addr.mem.ea = insn_fetch(u64, ctxt);
1418 		break;
1419 	}
1420 done:
1421 	return rc;
1422 }
1423 
1424 static void fetch_bit_operand(struct x86_emulate_ctxt *ctxt)
1425 {
1426 	long sv = 0, mask;
1427 
1428 	if (ctxt->dst.type == OP_MEM && ctxt->src.type == OP_REG) {
1429 		mask = ~((long)ctxt->dst.bytes * 8 - 1);
1430 
1431 		if (ctxt->src.bytes == 2)
1432 			sv = (s16)ctxt->src.val & (s16)mask;
1433 		else if (ctxt->src.bytes == 4)
1434 			sv = (s32)ctxt->src.val & (s32)mask;
1435 		else
1436 			sv = (s64)ctxt->src.val & (s64)mask;
1437 
1438 		ctxt->dst.addr.mem.ea = address_mask(ctxt,
1439 					   ctxt->dst.addr.mem.ea + (sv >> 3));
1440 	}
1441 
1442 	/* only subword offset */
1443 	ctxt->src.val &= (ctxt->dst.bytes << 3) - 1;
1444 }
1445 
1446 static int read_emulated(struct x86_emulate_ctxt *ctxt,
1447 			 unsigned long addr, void *dest, unsigned size)
1448 {
1449 	int rc;
1450 	struct read_cache *mc = &ctxt->mem_read;
1451 
1452 	if (mc->pos < mc->end)
1453 		goto read_cached;
1454 
1455 	WARN_ON((mc->end + size) >= sizeof(mc->data));
1456 
1457 	rc = ctxt->ops->read_emulated(ctxt, addr, mc->data + mc->end, size,
1458 				      &ctxt->exception);
1459 	if (rc != X86EMUL_CONTINUE)
1460 		return rc;
1461 
1462 	mc->end += size;
1463 
1464 read_cached:
1465 	memcpy(dest, mc->data + mc->pos, size);
1466 	mc->pos += size;
1467 	return X86EMUL_CONTINUE;
1468 }
1469 
1470 static int segmented_read(struct x86_emulate_ctxt *ctxt,
1471 			  struct segmented_address addr,
1472 			  void *data,
1473 			  unsigned size)
1474 {
1475 	int rc;
1476 	ulong linear;
1477 
1478 	rc = linearize(ctxt, addr, size, false, &linear);
1479 	if (rc != X86EMUL_CONTINUE)
1480 		return rc;
1481 	return read_emulated(ctxt, linear, data, size);
1482 }
1483 
1484 static int segmented_write(struct x86_emulate_ctxt *ctxt,
1485 			   struct segmented_address addr,
1486 			   const void *data,
1487 			   unsigned size)
1488 {
1489 	int rc;
1490 	ulong linear;
1491 
1492 	rc = linearize(ctxt, addr, size, true, &linear);
1493 	if (rc != X86EMUL_CONTINUE)
1494 		return rc;
1495 	return ctxt->ops->write_emulated(ctxt, linear, data, size,
1496 					 &ctxt->exception);
1497 }
1498 
1499 static int segmented_cmpxchg(struct x86_emulate_ctxt *ctxt,
1500 			     struct segmented_address addr,
1501 			     const void *orig_data, const void *data,
1502 			     unsigned size)
1503 {
1504 	int rc;
1505 	ulong linear;
1506 
1507 	rc = linearize(ctxt, addr, size, true, &linear);
1508 	if (rc != X86EMUL_CONTINUE)
1509 		return rc;
1510 	return ctxt->ops->cmpxchg_emulated(ctxt, linear, orig_data, data,
1511 					   size, &ctxt->exception);
1512 }
1513 
1514 static int pio_in_emulated(struct x86_emulate_ctxt *ctxt,
1515 			   unsigned int size, unsigned short port,
1516 			   void *dest)
1517 {
1518 	struct read_cache *rc = &ctxt->io_read;
1519 
1520 	if (rc->pos == rc->end) { /* refill pio read ahead */
1521 		unsigned int in_page, n;
1522 		unsigned int count = ctxt->rep_prefix ?
1523 			address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) : 1;
1524 		in_page = (ctxt->eflags & X86_EFLAGS_DF) ?
1525 			offset_in_page(reg_read(ctxt, VCPU_REGS_RDI)) :
1526 			PAGE_SIZE - offset_in_page(reg_read(ctxt, VCPU_REGS_RDI));
1527 		n = min3(in_page, (unsigned int)sizeof(rc->data) / size, count);
1528 		if (n == 0)
1529 			n = 1;
1530 		rc->pos = rc->end = 0;
1531 		if (!ctxt->ops->pio_in_emulated(ctxt, size, port, rc->data, n))
1532 			return 0;
1533 		rc->end = n * size;
1534 	}
1535 
1536 	if (ctxt->rep_prefix && (ctxt->d & String) &&
1537 	    !(ctxt->eflags & X86_EFLAGS_DF)) {
1538 		ctxt->dst.data = rc->data + rc->pos;
1539 		ctxt->dst.type = OP_MEM_STR;
1540 		ctxt->dst.count = (rc->end - rc->pos) / size;
1541 		rc->pos = rc->end;
1542 	} else {
1543 		memcpy(dest, rc->data + rc->pos, size);
1544 		rc->pos += size;
1545 	}
1546 	return 1;
1547 }
1548 
1549 static int read_interrupt_descriptor(struct x86_emulate_ctxt *ctxt,
1550 				     u16 index, struct desc_struct *desc)
1551 {
1552 	struct desc_ptr dt;
1553 	ulong addr;
1554 
1555 	ctxt->ops->get_idt(ctxt, &dt);
1556 
1557 	if (dt.size < index * 8 + 7)
1558 		return emulate_gp(ctxt, index << 3 | 0x2);
1559 
1560 	addr = dt.address + index * 8;
1561 	return linear_read_system(ctxt, addr, desc, sizeof(*desc));
1562 }
1563 
1564 static void get_descriptor_table_ptr(struct x86_emulate_ctxt *ctxt,
1565 				     u16 selector, struct desc_ptr *dt)
1566 {
1567 	const struct x86_emulate_ops *ops = ctxt->ops;
1568 	u32 base3 = 0;
1569 
1570 	if (selector & 1 << 2) {
1571 		struct desc_struct desc;
1572 		u16 sel;
1573 
1574 		memset(dt, 0, sizeof(*dt));
1575 		if (!ops->get_segment(ctxt, &sel, &desc, &base3,
1576 				      VCPU_SREG_LDTR))
1577 			return;
1578 
1579 		dt->size = desc_limit_scaled(&desc); /* what if limit > 65535? */
1580 		dt->address = get_desc_base(&desc) | ((u64)base3 << 32);
1581 	} else
1582 		ops->get_gdt(ctxt, dt);
1583 }
1584 
1585 static int get_descriptor_ptr(struct x86_emulate_ctxt *ctxt,
1586 			      u16 selector, ulong *desc_addr_p)
1587 {
1588 	struct desc_ptr dt;
1589 	u16 index = selector >> 3;
1590 	ulong addr;
1591 
1592 	get_descriptor_table_ptr(ctxt, selector, &dt);
1593 
1594 	if (dt.size < index * 8 + 7)
1595 		return emulate_gp(ctxt, selector & 0xfffc);
1596 
1597 	addr = dt.address + index * 8;
1598 
1599 #ifdef CONFIG_X86_64
1600 	if (addr >> 32 != 0) {
1601 		u64 efer = 0;
1602 
1603 		ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
1604 		if (!(efer & EFER_LMA))
1605 			addr &= (u32)-1;
1606 	}
1607 #endif
1608 
1609 	*desc_addr_p = addr;
1610 	return X86EMUL_CONTINUE;
1611 }
1612 
1613 /* allowed just for 8 bytes segments */
1614 static int read_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1615 				   u16 selector, struct desc_struct *desc,
1616 				   ulong *desc_addr_p)
1617 {
1618 	int rc;
1619 
1620 	rc = get_descriptor_ptr(ctxt, selector, desc_addr_p);
1621 	if (rc != X86EMUL_CONTINUE)
1622 		return rc;
1623 
1624 	return linear_read_system(ctxt, *desc_addr_p, desc, sizeof(*desc));
1625 }
1626 
1627 /* allowed just for 8 bytes segments */
1628 static int write_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1629 				    u16 selector, struct desc_struct *desc)
1630 {
1631 	int rc;
1632 	ulong addr;
1633 
1634 	rc = get_descriptor_ptr(ctxt, selector, &addr);
1635 	if (rc != X86EMUL_CONTINUE)
1636 		return rc;
1637 
1638 	return linear_write_system(ctxt, addr, desc, sizeof(*desc));
1639 }
1640 
1641 static int __load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1642 				     u16 selector, int seg, u8 cpl,
1643 				     enum x86_transfer_type transfer,
1644 				     struct desc_struct *desc)
1645 {
1646 	struct desc_struct seg_desc, old_desc;
1647 	u8 dpl, rpl;
1648 	unsigned err_vec = GP_VECTOR;
1649 	u32 err_code = 0;
1650 	bool null_selector = !(selector & ~0x3); /* 0000-0003 are null */
1651 	ulong desc_addr;
1652 	int ret;
1653 	u16 dummy;
1654 	u32 base3 = 0;
1655 
1656 	memset(&seg_desc, 0, sizeof(seg_desc));
1657 
1658 	if (ctxt->mode == X86EMUL_MODE_REAL) {
1659 		/* set real mode segment descriptor (keep limit etc. for
1660 		 * unreal mode) */
1661 		ctxt->ops->get_segment(ctxt, &dummy, &seg_desc, NULL, seg);
1662 		set_desc_base(&seg_desc, selector << 4);
1663 		goto load;
1664 	} else if (seg <= VCPU_SREG_GS && ctxt->mode == X86EMUL_MODE_VM86) {
1665 		/* VM86 needs a clean new segment descriptor */
1666 		set_desc_base(&seg_desc, selector << 4);
1667 		set_desc_limit(&seg_desc, 0xffff);
1668 		seg_desc.type = 3;
1669 		seg_desc.p = 1;
1670 		seg_desc.s = 1;
1671 		seg_desc.dpl = 3;
1672 		goto load;
1673 	}
1674 
1675 	rpl = selector & 3;
1676 
1677 	/* TR should be in GDT only */
1678 	if (seg == VCPU_SREG_TR && (selector & (1 << 2)))
1679 		goto exception;
1680 
1681 	/* NULL selector is not valid for TR, CS and (except for long mode) SS */
1682 	if (null_selector) {
1683 		if (seg == VCPU_SREG_CS || seg == VCPU_SREG_TR)
1684 			goto exception;
1685 
1686 		if (seg == VCPU_SREG_SS) {
1687 			if (ctxt->mode != X86EMUL_MODE_PROT64 || rpl != cpl)
1688 				goto exception;
1689 
1690 			/*
1691 			 * ctxt->ops->set_segment expects the CPL to be in
1692 			 * SS.DPL, so fake an expand-up 32-bit data segment.
1693 			 */
1694 			seg_desc.type = 3;
1695 			seg_desc.p = 1;
1696 			seg_desc.s = 1;
1697 			seg_desc.dpl = cpl;
1698 			seg_desc.d = 1;
1699 			seg_desc.g = 1;
1700 		}
1701 
1702 		/* Skip all following checks */
1703 		goto load;
1704 	}
1705 
1706 	ret = read_segment_descriptor(ctxt, selector, &seg_desc, &desc_addr);
1707 	if (ret != X86EMUL_CONTINUE)
1708 		return ret;
1709 
1710 	err_code = selector & 0xfffc;
1711 	err_vec = (transfer == X86_TRANSFER_TASK_SWITCH) ? TS_VECTOR :
1712 							   GP_VECTOR;
1713 
1714 	/* can't load system descriptor into segment selector */
1715 	if (seg <= VCPU_SREG_GS && !seg_desc.s) {
1716 		if (transfer == X86_TRANSFER_CALL_JMP)
1717 			return X86EMUL_UNHANDLEABLE;
1718 		goto exception;
1719 	}
1720 
1721 	if (!seg_desc.p) {
1722 		err_vec = (seg == VCPU_SREG_SS) ? SS_VECTOR : NP_VECTOR;
1723 		goto exception;
1724 	}
1725 
1726 	dpl = seg_desc.dpl;
1727 
1728 	switch (seg) {
1729 	case VCPU_SREG_SS:
1730 		/*
1731 		 * segment is not a writable data segment or segment
1732 		 * selector's RPL != CPL or segment selector's RPL != CPL
1733 		 */
1734 		if (rpl != cpl || (seg_desc.type & 0xa) != 0x2 || dpl != cpl)
1735 			goto exception;
1736 		break;
1737 	case VCPU_SREG_CS:
1738 		if (!(seg_desc.type & 8))
1739 			goto exception;
1740 
1741 		if (seg_desc.type & 4) {
1742 			/* conforming */
1743 			if (dpl > cpl)
1744 				goto exception;
1745 		} else {
1746 			/* nonconforming */
1747 			if (rpl > cpl || dpl != cpl)
1748 				goto exception;
1749 		}
1750 		/* in long-mode d/b must be clear if l is set */
1751 		if (seg_desc.d && seg_desc.l) {
1752 			u64 efer = 0;
1753 
1754 			ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
1755 			if (efer & EFER_LMA)
1756 				goto exception;
1757 		}
1758 
1759 		/* CS(RPL) <- CPL */
1760 		selector = (selector & 0xfffc) | cpl;
1761 		break;
1762 	case VCPU_SREG_TR:
1763 		if (seg_desc.s || (seg_desc.type != 1 && seg_desc.type != 9))
1764 			goto exception;
1765 		old_desc = seg_desc;
1766 		seg_desc.type |= 2; /* busy */
1767 		ret = ctxt->ops->cmpxchg_emulated(ctxt, desc_addr, &old_desc, &seg_desc,
1768 						  sizeof(seg_desc), &ctxt->exception);
1769 		if (ret != X86EMUL_CONTINUE)
1770 			return ret;
1771 		break;
1772 	case VCPU_SREG_LDTR:
1773 		if (seg_desc.s || seg_desc.type != 2)
1774 			goto exception;
1775 		break;
1776 	default: /*  DS, ES, FS, or GS */
1777 		/*
1778 		 * segment is not a data or readable code segment or
1779 		 * ((segment is a data or nonconforming code segment)
1780 		 * and (both RPL and CPL > DPL))
1781 		 */
1782 		if ((seg_desc.type & 0xa) == 0x8 ||
1783 		    (((seg_desc.type & 0xc) != 0xc) &&
1784 		     (rpl > dpl && cpl > dpl)))
1785 			goto exception;
1786 		break;
1787 	}
1788 
1789 	if (seg_desc.s) {
1790 		/* mark segment as accessed */
1791 		if (!(seg_desc.type & 1)) {
1792 			seg_desc.type |= 1;
1793 			ret = write_segment_descriptor(ctxt, selector,
1794 						       &seg_desc);
1795 			if (ret != X86EMUL_CONTINUE)
1796 				return ret;
1797 		}
1798 	} else if (ctxt->mode == X86EMUL_MODE_PROT64) {
1799 		ret = linear_read_system(ctxt, desc_addr+8, &base3, sizeof(base3));
1800 		if (ret != X86EMUL_CONTINUE)
1801 			return ret;
1802 		if (emul_is_noncanonical_address(get_desc_base(&seg_desc) |
1803 				((u64)base3 << 32), ctxt))
1804 			return emulate_gp(ctxt, 0);
1805 	}
1806 load:
1807 	ctxt->ops->set_segment(ctxt, selector, &seg_desc, base3, seg);
1808 	if (desc)
1809 		*desc = seg_desc;
1810 	return X86EMUL_CONTINUE;
1811 exception:
1812 	return emulate_exception(ctxt, err_vec, err_code, true);
1813 }
1814 
1815 static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1816 				   u16 selector, int seg)
1817 {
1818 	u8 cpl = ctxt->ops->cpl(ctxt);
1819 
1820 	/*
1821 	 * None of MOV, POP and LSS can load a NULL selector in CPL=3, but
1822 	 * they can load it at CPL<3 (Intel's manual says only LSS can,
1823 	 * but it's wrong).
1824 	 *
1825 	 * However, the Intel manual says that putting IST=1/DPL=3 in
1826 	 * an interrupt gate will result in SS=3 (the AMD manual instead
1827 	 * says it doesn't), so allow SS=3 in __load_segment_descriptor
1828 	 * and only forbid it here.
1829 	 */
1830 	if (seg == VCPU_SREG_SS && selector == 3 &&
1831 	    ctxt->mode == X86EMUL_MODE_PROT64)
1832 		return emulate_exception(ctxt, GP_VECTOR, 0, true);
1833 
1834 	return __load_segment_descriptor(ctxt, selector, seg, cpl,
1835 					 X86_TRANSFER_NONE, NULL);
1836 }
1837 
1838 static void write_register_operand(struct operand *op)
1839 {
1840 	return assign_register(op->addr.reg, op->val, op->bytes);
1841 }
1842 
1843 static int writeback(struct x86_emulate_ctxt *ctxt, struct operand *op)
1844 {
1845 	switch (op->type) {
1846 	case OP_REG:
1847 		write_register_operand(op);
1848 		break;
1849 	case OP_MEM:
1850 		if (ctxt->lock_prefix)
1851 			return segmented_cmpxchg(ctxt,
1852 						 op->addr.mem,
1853 						 &op->orig_val,
1854 						 &op->val,
1855 						 op->bytes);
1856 		else
1857 			return segmented_write(ctxt,
1858 					       op->addr.mem,
1859 					       &op->val,
1860 					       op->bytes);
1861 		break;
1862 	case OP_MEM_STR:
1863 		return segmented_write(ctxt,
1864 				       op->addr.mem,
1865 				       op->data,
1866 				       op->bytes * op->count);
1867 		break;
1868 	case OP_XMM:
1869 		write_sse_reg(&op->vec_val, op->addr.xmm);
1870 		break;
1871 	case OP_MM:
1872 		write_mmx_reg(&op->mm_val, op->addr.mm);
1873 		break;
1874 	case OP_NONE:
1875 		/* no writeback */
1876 		break;
1877 	default:
1878 		break;
1879 	}
1880 	return X86EMUL_CONTINUE;
1881 }
1882 
1883 static int push(struct x86_emulate_ctxt *ctxt, void *data, int bytes)
1884 {
1885 	struct segmented_address addr;
1886 
1887 	rsp_increment(ctxt, -bytes);
1888 	addr.ea = reg_read(ctxt, VCPU_REGS_RSP) & stack_mask(ctxt);
1889 	addr.seg = VCPU_SREG_SS;
1890 
1891 	return segmented_write(ctxt, addr, data, bytes);
1892 }
1893 
1894 static int em_push(struct x86_emulate_ctxt *ctxt)
1895 {
1896 	/* Disable writeback. */
1897 	ctxt->dst.type = OP_NONE;
1898 	return push(ctxt, &ctxt->src.val, ctxt->op_bytes);
1899 }
1900 
1901 static int emulate_pop(struct x86_emulate_ctxt *ctxt,
1902 		       void *dest, int len)
1903 {
1904 	int rc;
1905 	struct segmented_address addr;
1906 
1907 	addr.ea = reg_read(ctxt, VCPU_REGS_RSP) & stack_mask(ctxt);
1908 	addr.seg = VCPU_SREG_SS;
1909 	rc = segmented_read(ctxt, addr, dest, len);
1910 	if (rc != X86EMUL_CONTINUE)
1911 		return rc;
1912 
1913 	rsp_increment(ctxt, len);
1914 	return rc;
1915 }
1916 
1917 static int em_pop(struct x86_emulate_ctxt *ctxt)
1918 {
1919 	return emulate_pop(ctxt, &ctxt->dst.val, ctxt->op_bytes);
1920 }
1921 
1922 static int emulate_popf(struct x86_emulate_ctxt *ctxt,
1923 			void *dest, int len)
1924 {
1925 	int rc;
1926 	unsigned long val, change_mask;
1927 	int iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> X86_EFLAGS_IOPL_BIT;
1928 	int cpl = ctxt->ops->cpl(ctxt);
1929 
1930 	rc = emulate_pop(ctxt, &val, len);
1931 	if (rc != X86EMUL_CONTINUE)
1932 		return rc;
1933 
1934 	change_mask = X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF |
1935 		      X86_EFLAGS_ZF | X86_EFLAGS_SF | X86_EFLAGS_OF |
1936 		      X86_EFLAGS_TF | X86_EFLAGS_DF | X86_EFLAGS_NT |
1937 		      X86_EFLAGS_AC | X86_EFLAGS_ID;
1938 
1939 	switch(ctxt->mode) {
1940 	case X86EMUL_MODE_PROT64:
1941 	case X86EMUL_MODE_PROT32:
1942 	case X86EMUL_MODE_PROT16:
1943 		if (cpl == 0)
1944 			change_mask |= X86_EFLAGS_IOPL;
1945 		if (cpl <= iopl)
1946 			change_mask |= X86_EFLAGS_IF;
1947 		break;
1948 	case X86EMUL_MODE_VM86:
1949 		if (iopl < 3)
1950 			return emulate_gp(ctxt, 0);
1951 		change_mask |= X86_EFLAGS_IF;
1952 		break;
1953 	default: /* real mode */
1954 		change_mask |= (X86_EFLAGS_IOPL | X86_EFLAGS_IF);
1955 		break;
1956 	}
1957 
1958 	*(unsigned long *)dest =
1959 		(ctxt->eflags & ~change_mask) | (val & change_mask);
1960 
1961 	return rc;
1962 }
1963 
1964 static int em_popf(struct x86_emulate_ctxt *ctxt)
1965 {
1966 	ctxt->dst.type = OP_REG;
1967 	ctxt->dst.addr.reg = &ctxt->eflags;
1968 	ctxt->dst.bytes = ctxt->op_bytes;
1969 	return emulate_popf(ctxt, &ctxt->dst.val, ctxt->op_bytes);
1970 }
1971 
1972 static int em_enter(struct x86_emulate_ctxt *ctxt)
1973 {
1974 	int rc;
1975 	unsigned frame_size = ctxt->src.val;
1976 	unsigned nesting_level = ctxt->src2.val & 31;
1977 	ulong rbp;
1978 
1979 	if (nesting_level)
1980 		return X86EMUL_UNHANDLEABLE;
1981 
1982 	rbp = reg_read(ctxt, VCPU_REGS_RBP);
1983 	rc = push(ctxt, &rbp, stack_size(ctxt));
1984 	if (rc != X86EMUL_CONTINUE)
1985 		return rc;
1986 	assign_masked(reg_rmw(ctxt, VCPU_REGS_RBP), reg_read(ctxt, VCPU_REGS_RSP),
1987 		      stack_mask(ctxt));
1988 	assign_masked(reg_rmw(ctxt, VCPU_REGS_RSP),
1989 		      reg_read(ctxt, VCPU_REGS_RSP) - frame_size,
1990 		      stack_mask(ctxt));
1991 	return X86EMUL_CONTINUE;
1992 }
1993 
1994 static int em_leave(struct x86_emulate_ctxt *ctxt)
1995 {
1996 	assign_masked(reg_rmw(ctxt, VCPU_REGS_RSP), reg_read(ctxt, VCPU_REGS_RBP),
1997 		      stack_mask(ctxt));
1998 	return emulate_pop(ctxt, reg_rmw(ctxt, VCPU_REGS_RBP), ctxt->op_bytes);
1999 }
2000 
2001 static int em_push_sreg(struct x86_emulate_ctxt *ctxt)
2002 {
2003 	int seg = ctxt->src2.val;
2004 
2005 	ctxt->src.val = get_segment_selector(ctxt, seg);
2006 	if (ctxt->op_bytes == 4) {
2007 		rsp_increment(ctxt, -2);
2008 		ctxt->op_bytes = 2;
2009 	}
2010 
2011 	return em_push(ctxt);
2012 }
2013 
2014 static int em_pop_sreg(struct x86_emulate_ctxt *ctxt)
2015 {
2016 	int seg = ctxt->src2.val;
2017 	unsigned long selector;
2018 	int rc;
2019 
2020 	rc = emulate_pop(ctxt, &selector, 2);
2021 	if (rc != X86EMUL_CONTINUE)
2022 		return rc;
2023 
2024 	if (ctxt->modrm_reg == VCPU_SREG_SS)
2025 		ctxt->interruptibility = KVM_X86_SHADOW_INT_MOV_SS;
2026 	if (ctxt->op_bytes > 2)
2027 		rsp_increment(ctxt, ctxt->op_bytes - 2);
2028 
2029 	rc = load_segment_descriptor(ctxt, (u16)selector, seg);
2030 	return rc;
2031 }
2032 
2033 static int em_pusha(struct x86_emulate_ctxt *ctxt)
2034 {
2035 	unsigned long old_esp = reg_read(ctxt, VCPU_REGS_RSP);
2036 	int rc = X86EMUL_CONTINUE;
2037 	int reg = VCPU_REGS_RAX;
2038 
2039 	while (reg <= VCPU_REGS_RDI) {
2040 		(reg == VCPU_REGS_RSP) ?
2041 		(ctxt->src.val = old_esp) : (ctxt->src.val = reg_read(ctxt, reg));
2042 
2043 		rc = em_push(ctxt);
2044 		if (rc != X86EMUL_CONTINUE)
2045 			return rc;
2046 
2047 		++reg;
2048 	}
2049 
2050 	return rc;
2051 }
2052 
2053 static int em_pushf(struct x86_emulate_ctxt *ctxt)
2054 {
2055 	ctxt->src.val = (unsigned long)ctxt->eflags & ~X86_EFLAGS_VM;
2056 	return em_push(ctxt);
2057 }
2058 
2059 static int em_popa(struct x86_emulate_ctxt *ctxt)
2060 {
2061 	int rc = X86EMUL_CONTINUE;
2062 	int reg = VCPU_REGS_RDI;
2063 	u32 val;
2064 
2065 	while (reg >= VCPU_REGS_RAX) {
2066 		if (reg == VCPU_REGS_RSP) {
2067 			rsp_increment(ctxt, ctxt->op_bytes);
2068 			--reg;
2069 		}
2070 
2071 		rc = emulate_pop(ctxt, &val, ctxt->op_bytes);
2072 		if (rc != X86EMUL_CONTINUE)
2073 			break;
2074 		assign_register(reg_rmw(ctxt, reg), val, ctxt->op_bytes);
2075 		--reg;
2076 	}
2077 	return rc;
2078 }
2079 
2080 static int __emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq)
2081 {
2082 	const struct x86_emulate_ops *ops = ctxt->ops;
2083 	int rc;
2084 	struct desc_ptr dt;
2085 	gva_t cs_addr;
2086 	gva_t eip_addr;
2087 	u16 cs, eip;
2088 
2089 	/* TODO: Add limit checks */
2090 	ctxt->src.val = ctxt->eflags;
2091 	rc = em_push(ctxt);
2092 	if (rc != X86EMUL_CONTINUE)
2093 		return rc;
2094 
2095 	ctxt->eflags &= ~(X86_EFLAGS_IF | X86_EFLAGS_TF | X86_EFLAGS_AC);
2096 
2097 	ctxt->src.val = get_segment_selector(ctxt, VCPU_SREG_CS);
2098 	rc = em_push(ctxt);
2099 	if (rc != X86EMUL_CONTINUE)
2100 		return rc;
2101 
2102 	ctxt->src.val = ctxt->_eip;
2103 	rc = em_push(ctxt);
2104 	if (rc != X86EMUL_CONTINUE)
2105 		return rc;
2106 
2107 	ops->get_idt(ctxt, &dt);
2108 
2109 	eip_addr = dt.address + (irq << 2);
2110 	cs_addr = dt.address + (irq << 2) + 2;
2111 
2112 	rc = linear_read_system(ctxt, cs_addr, &cs, 2);
2113 	if (rc != X86EMUL_CONTINUE)
2114 		return rc;
2115 
2116 	rc = linear_read_system(ctxt, eip_addr, &eip, 2);
2117 	if (rc != X86EMUL_CONTINUE)
2118 		return rc;
2119 
2120 	rc = load_segment_descriptor(ctxt, cs, VCPU_SREG_CS);
2121 	if (rc != X86EMUL_CONTINUE)
2122 		return rc;
2123 
2124 	ctxt->_eip = eip;
2125 
2126 	return rc;
2127 }
2128 
2129 int emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq)
2130 {
2131 	int rc;
2132 
2133 	invalidate_registers(ctxt);
2134 	rc = __emulate_int_real(ctxt, irq);
2135 	if (rc == X86EMUL_CONTINUE)
2136 		writeback_registers(ctxt);
2137 	return rc;
2138 }
2139 
2140 static int emulate_int(struct x86_emulate_ctxt *ctxt, int irq)
2141 {
2142 	switch(ctxt->mode) {
2143 	case X86EMUL_MODE_REAL:
2144 		return __emulate_int_real(ctxt, irq);
2145 	case X86EMUL_MODE_VM86:
2146 	case X86EMUL_MODE_PROT16:
2147 	case X86EMUL_MODE_PROT32:
2148 	case X86EMUL_MODE_PROT64:
2149 	default:
2150 		/* Protected mode interrupts unimplemented yet */
2151 		return X86EMUL_UNHANDLEABLE;
2152 	}
2153 }
2154 
2155 static int emulate_iret_real(struct x86_emulate_ctxt *ctxt)
2156 {
2157 	int rc = X86EMUL_CONTINUE;
2158 	unsigned long temp_eip = 0;
2159 	unsigned long temp_eflags = 0;
2160 	unsigned long cs = 0;
2161 	unsigned long mask = X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF |
2162 			     X86_EFLAGS_ZF | X86_EFLAGS_SF | X86_EFLAGS_TF |
2163 			     X86_EFLAGS_IF | X86_EFLAGS_DF | X86_EFLAGS_OF |
2164 			     X86_EFLAGS_IOPL | X86_EFLAGS_NT | X86_EFLAGS_RF |
2165 			     X86_EFLAGS_AC | X86_EFLAGS_ID |
2166 			     X86_EFLAGS_FIXED;
2167 	unsigned long vm86_mask = X86_EFLAGS_VM | X86_EFLAGS_VIF |
2168 				  X86_EFLAGS_VIP;
2169 
2170 	/* TODO: Add stack limit check */
2171 
2172 	rc = emulate_pop(ctxt, &temp_eip, ctxt->op_bytes);
2173 
2174 	if (rc != X86EMUL_CONTINUE)
2175 		return rc;
2176 
2177 	if (temp_eip & ~0xffff)
2178 		return emulate_gp(ctxt, 0);
2179 
2180 	rc = emulate_pop(ctxt, &cs, ctxt->op_bytes);
2181 
2182 	if (rc != X86EMUL_CONTINUE)
2183 		return rc;
2184 
2185 	rc = emulate_pop(ctxt, &temp_eflags, ctxt->op_bytes);
2186 
2187 	if (rc != X86EMUL_CONTINUE)
2188 		return rc;
2189 
2190 	rc = load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS);
2191 
2192 	if (rc != X86EMUL_CONTINUE)
2193 		return rc;
2194 
2195 	ctxt->_eip = temp_eip;
2196 
2197 	if (ctxt->op_bytes == 4)
2198 		ctxt->eflags = ((temp_eflags & mask) | (ctxt->eflags & vm86_mask));
2199 	else if (ctxt->op_bytes == 2) {
2200 		ctxt->eflags &= ~0xffff;
2201 		ctxt->eflags |= temp_eflags;
2202 	}
2203 
2204 	ctxt->eflags &= ~EFLG_RESERVED_ZEROS_MASK; /* Clear reserved zeros */
2205 	ctxt->eflags |= X86_EFLAGS_FIXED;
2206 	ctxt->ops->set_nmi_mask(ctxt, false);
2207 
2208 	return rc;
2209 }
2210 
2211 static int em_iret(struct x86_emulate_ctxt *ctxt)
2212 {
2213 	switch(ctxt->mode) {
2214 	case X86EMUL_MODE_REAL:
2215 		return emulate_iret_real(ctxt);
2216 	case X86EMUL_MODE_VM86:
2217 	case X86EMUL_MODE_PROT16:
2218 	case X86EMUL_MODE_PROT32:
2219 	case X86EMUL_MODE_PROT64:
2220 	default:
2221 		/* iret from protected mode unimplemented yet */
2222 		return X86EMUL_UNHANDLEABLE;
2223 	}
2224 }
2225 
2226 static int em_jmp_far(struct x86_emulate_ctxt *ctxt)
2227 {
2228 	int rc;
2229 	unsigned short sel;
2230 	struct desc_struct new_desc;
2231 	u8 cpl = ctxt->ops->cpl(ctxt);
2232 
2233 	memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
2234 
2235 	rc = __load_segment_descriptor(ctxt, sel, VCPU_SREG_CS, cpl,
2236 				       X86_TRANSFER_CALL_JMP,
2237 				       &new_desc);
2238 	if (rc != X86EMUL_CONTINUE)
2239 		return rc;
2240 
2241 	rc = assign_eip_far(ctxt, ctxt->src.val, &new_desc);
2242 	/* Error handling is not implemented. */
2243 	if (rc != X86EMUL_CONTINUE)
2244 		return X86EMUL_UNHANDLEABLE;
2245 
2246 	return rc;
2247 }
2248 
2249 static int em_jmp_abs(struct x86_emulate_ctxt *ctxt)
2250 {
2251 	return assign_eip_near(ctxt, ctxt->src.val);
2252 }
2253 
2254 static int em_call_near_abs(struct x86_emulate_ctxt *ctxt)
2255 {
2256 	int rc;
2257 	long int old_eip;
2258 
2259 	old_eip = ctxt->_eip;
2260 	rc = assign_eip_near(ctxt, ctxt->src.val);
2261 	if (rc != X86EMUL_CONTINUE)
2262 		return rc;
2263 	ctxt->src.val = old_eip;
2264 	rc = em_push(ctxt);
2265 	return rc;
2266 }
2267 
2268 static int em_cmpxchg8b(struct x86_emulate_ctxt *ctxt)
2269 {
2270 	u64 old = ctxt->dst.orig_val64;
2271 
2272 	if (ctxt->dst.bytes == 16)
2273 		return X86EMUL_UNHANDLEABLE;
2274 
2275 	if (((u32) (old >> 0) != (u32) reg_read(ctxt, VCPU_REGS_RAX)) ||
2276 	    ((u32) (old >> 32) != (u32) reg_read(ctxt, VCPU_REGS_RDX))) {
2277 		*reg_write(ctxt, VCPU_REGS_RAX) = (u32) (old >> 0);
2278 		*reg_write(ctxt, VCPU_REGS_RDX) = (u32) (old >> 32);
2279 		ctxt->eflags &= ~X86_EFLAGS_ZF;
2280 	} else {
2281 		ctxt->dst.val64 = ((u64)reg_read(ctxt, VCPU_REGS_RCX) << 32) |
2282 			(u32) reg_read(ctxt, VCPU_REGS_RBX);
2283 
2284 		ctxt->eflags |= X86_EFLAGS_ZF;
2285 	}
2286 	return X86EMUL_CONTINUE;
2287 }
2288 
2289 static int em_ret(struct x86_emulate_ctxt *ctxt)
2290 {
2291 	int rc;
2292 	unsigned long eip;
2293 
2294 	rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
2295 	if (rc != X86EMUL_CONTINUE)
2296 		return rc;
2297 
2298 	return assign_eip_near(ctxt, eip);
2299 }
2300 
2301 static int em_ret_far(struct x86_emulate_ctxt *ctxt)
2302 {
2303 	int rc;
2304 	unsigned long eip, cs;
2305 	int cpl = ctxt->ops->cpl(ctxt);
2306 	struct desc_struct new_desc;
2307 
2308 	rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
2309 	if (rc != X86EMUL_CONTINUE)
2310 		return rc;
2311 	rc = emulate_pop(ctxt, &cs, ctxt->op_bytes);
2312 	if (rc != X86EMUL_CONTINUE)
2313 		return rc;
2314 	/* Outer-privilege level return is not implemented */
2315 	if (ctxt->mode >= X86EMUL_MODE_PROT16 && (cs & 3) > cpl)
2316 		return X86EMUL_UNHANDLEABLE;
2317 	rc = __load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS, cpl,
2318 				       X86_TRANSFER_RET,
2319 				       &new_desc);
2320 	if (rc != X86EMUL_CONTINUE)
2321 		return rc;
2322 	rc = assign_eip_far(ctxt, eip, &new_desc);
2323 	/* Error handling is not implemented. */
2324 	if (rc != X86EMUL_CONTINUE)
2325 		return X86EMUL_UNHANDLEABLE;
2326 
2327 	return rc;
2328 }
2329 
2330 static int em_ret_far_imm(struct x86_emulate_ctxt *ctxt)
2331 {
2332         int rc;
2333 
2334         rc = em_ret_far(ctxt);
2335         if (rc != X86EMUL_CONTINUE)
2336                 return rc;
2337         rsp_increment(ctxt, ctxt->src.val);
2338         return X86EMUL_CONTINUE;
2339 }
2340 
2341 static int em_cmpxchg(struct x86_emulate_ctxt *ctxt)
2342 {
2343 	/* Save real source value, then compare EAX against destination. */
2344 	ctxt->dst.orig_val = ctxt->dst.val;
2345 	ctxt->dst.val = reg_read(ctxt, VCPU_REGS_RAX);
2346 	ctxt->src.orig_val = ctxt->src.val;
2347 	ctxt->src.val = ctxt->dst.orig_val;
2348 	fastop(ctxt, em_cmp);
2349 
2350 	if (ctxt->eflags & X86_EFLAGS_ZF) {
2351 		/* Success: write back to memory; no update of EAX */
2352 		ctxt->src.type = OP_NONE;
2353 		ctxt->dst.val = ctxt->src.orig_val;
2354 	} else {
2355 		/* Failure: write the value we saw to EAX. */
2356 		ctxt->src.type = OP_REG;
2357 		ctxt->src.addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
2358 		ctxt->src.val = ctxt->dst.orig_val;
2359 		/* Create write-cycle to dest by writing the same value */
2360 		ctxt->dst.val = ctxt->dst.orig_val;
2361 	}
2362 	return X86EMUL_CONTINUE;
2363 }
2364 
2365 static int em_lseg(struct x86_emulate_ctxt *ctxt)
2366 {
2367 	int seg = ctxt->src2.val;
2368 	unsigned short sel;
2369 	int rc;
2370 
2371 	memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
2372 
2373 	rc = load_segment_descriptor(ctxt, sel, seg);
2374 	if (rc != X86EMUL_CONTINUE)
2375 		return rc;
2376 
2377 	ctxt->dst.val = ctxt->src.val;
2378 	return rc;
2379 }
2380 
2381 static int emulator_has_longmode(struct x86_emulate_ctxt *ctxt)
2382 {
2383 #ifdef CONFIG_X86_64
2384 	return ctxt->ops->guest_has_long_mode(ctxt);
2385 #else
2386 	return false;
2387 #endif
2388 }
2389 
2390 static void rsm_set_desc_flags(struct desc_struct *desc, u32 flags)
2391 {
2392 	desc->g    = (flags >> 23) & 1;
2393 	desc->d    = (flags >> 22) & 1;
2394 	desc->l    = (flags >> 21) & 1;
2395 	desc->avl  = (flags >> 20) & 1;
2396 	desc->p    = (flags >> 15) & 1;
2397 	desc->dpl  = (flags >> 13) & 3;
2398 	desc->s    = (flags >> 12) & 1;
2399 	desc->type = (flags >>  8) & 15;
2400 }
2401 
2402 static int rsm_load_seg_32(struct x86_emulate_ctxt *ctxt, const char *smstate,
2403 			   int n)
2404 {
2405 	struct desc_struct desc;
2406 	int offset;
2407 	u16 selector;
2408 
2409 	selector = GET_SMSTATE(u32, smstate, 0x7fa8 + n * 4);
2410 
2411 	if (n < 3)
2412 		offset = 0x7f84 + n * 12;
2413 	else
2414 		offset = 0x7f2c + (n - 3) * 12;
2415 
2416 	set_desc_base(&desc,      GET_SMSTATE(u32, smstate, offset + 8));
2417 	set_desc_limit(&desc,     GET_SMSTATE(u32, smstate, offset + 4));
2418 	rsm_set_desc_flags(&desc, GET_SMSTATE(u32, smstate, offset));
2419 	ctxt->ops->set_segment(ctxt, selector, &desc, 0, n);
2420 	return X86EMUL_CONTINUE;
2421 }
2422 
2423 #ifdef CONFIG_X86_64
2424 static int rsm_load_seg_64(struct x86_emulate_ctxt *ctxt, const char *smstate,
2425 			   int n)
2426 {
2427 	struct desc_struct desc;
2428 	int offset;
2429 	u16 selector;
2430 	u32 base3;
2431 
2432 	offset = 0x7e00 + n * 16;
2433 
2434 	selector =                GET_SMSTATE(u16, smstate, offset);
2435 	rsm_set_desc_flags(&desc, GET_SMSTATE(u16, smstate, offset + 2) << 8);
2436 	set_desc_limit(&desc,     GET_SMSTATE(u32, smstate, offset + 4));
2437 	set_desc_base(&desc,      GET_SMSTATE(u32, smstate, offset + 8));
2438 	base3 =                   GET_SMSTATE(u32, smstate, offset + 12);
2439 
2440 	ctxt->ops->set_segment(ctxt, selector, &desc, base3, n);
2441 	return X86EMUL_CONTINUE;
2442 }
2443 #endif
2444 
2445 static int rsm_enter_protected_mode(struct x86_emulate_ctxt *ctxt,
2446 				    u64 cr0, u64 cr3, u64 cr4)
2447 {
2448 	int bad;
2449 	u64 pcid;
2450 
2451 	/* In order to later set CR4.PCIDE, CR3[11:0] must be zero.  */
2452 	pcid = 0;
2453 	if (cr4 & X86_CR4_PCIDE) {
2454 		pcid = cr3 & 0xfff;
2455 		cr3 &= ~0xfff;
2456 	}
2457 
2458 	bad = ctxt->ops->set_cr(ctxt, 3, cr3);
2459 	if (bad)
2460 		return X86EMUL_UNHANDLEABLE;
2461 
2462 	/*
2463 	 * First enable PAE, long mode needs it before CR0.PG = 1 is set.
2464 	 * Then enable protected mode.	However, PCID cannot be enabled
2465 	 * if EFER.LMA=0, so set it separately.
2466 	 */
2467 	bad = ctxt->ops->set_cr(ctxt, 4, cr4 & ~X86_CR4_PCIDE);
2468 	if (bad)
2469 		return X86EMUL_UNHANDLEABLE;
2470 
2471 	bad = ctxt->ops->set_cr(ctxt, 0, cr0);
2472 	if (bad)
2473 		return X86EMUL_UNHANDLEABLE;
2474 
2475 	if (cr4 & X86_CR4_PCIDE) {
2476 		bad = ctxt->ops->set_cr(ctxt, 4, cr4);
2477 		if (bad)
2478 			return X86EMUL_UNHANDLEABLE;
2479 		if (pcid) {
2480 			bad = ctxt->ops->set_cr(ctxt, 3, cr3 | pcid);
2481 			if (bad)
2482 				return X86EMUL_UNHANDLEABLE;
2483 		}
2484 
2485 	}
2486 
2487 	return X86EMUL_CONTINUE;
2488 }
2489 
2490 static int rsm_load_state_32(struct x86_emulate_ctxt *ctxt,
2491 			     const char *smstate)
2492 {
2493 	struct desc_struct desc;
2494 	struct desc_ptr dt;
2495 	u16 selector;
2496 	u32 val, cr0, cr3, cr4;
2497 	int i;
2498 
2499 	cr0 =                      GET_SMSTATE(u32, smstate, 0x7ffc);
2500 	cr3 =                      GET_SMSTATE(u32, smstate, 0x7ff8);
2501 	ctxt->eflags =             GET_SMSTATE(u32, smstate, 0x7ff4) | X86_EFLAGS_FIXED;
2502 	ctxt->_eip =               GET_SMSTATE(u32, smstate, 0x7ff0);
2503 
2504 	for (i = 0; i < 8; i++)
2505 		*reg_write(ctxt, i) = GET_SMSTATE(u32, smstate, 0x7fd0 + i * 4);
2506 
2507 	val = GET_SMSTATE(u32, smstate, 0x7fcc);
2508 
2509 	if (ctxt->ops->set_dr(ctxt, 6, (val & DR6_VOLATILE) | DR6_FIXED_1))
2510 		return X86EMUL_UNHANDLEABLE;
2511 
2512 	val = GET_SMSTATE(u32, smstate, 0x7fc8);
2513 
2514 	if (ctxt->ops->set_dr(ctxt, 7, (val & DR7_VOLATILE) | DR7_FIXED_1))
2515 		return X86EMUL_UNHANDLEABLE;
2516 
2517 	selector =                 GET_SMSTATE(u32, smstate, 0x7fc4);
2518 	set_desc_base(&desc,       GET_SMSTATE(u32, smstate, 0x7f64));
2519 	set_desc_limit(&desc,      GET_SMSTATE(u32, smstate, 0x7f60));
2520 	rsm_set_desc_flags(&desc,  GET_SMSTATE(u32, smstate, 0x7f5c));
2521 	ctxt->ops->set_segment(ctxt, selector, &desc, 0, VCPU_SREG_TR);
2522 
2523 	selector =                 GET_SMSTATE(u32, smstate, 0x7fc0);
2524 	set_desc_base(&desc,       GET_SMSTATE(u32, smstate, 0x7f80));
2525 	set_desc_limit(&desc,      GET_SMSTATE(u32, smstate, 0x7f7c));
2526 	rsm_set_desc_flags(&desc,  GET_SMSTATE(u32, smstate, 0x7f78));
2527 	ctxt->ops->set_segment(ctxt, selector, &desc, 0, VCPU_SREG_LDTR);
2528 
2529 	dt.address =               GET_SMSTATE(u32, smstate, 0x7f74);
2530 	dt.size =                  GET_SMSTATE(u32, smstate, 0x7f70);
2531 	ctxt->ops->set_gdt(ctxt, &dt);
2532 
2533 	dt.address =               GET_SMSTATE(u32, smstate, 0x7f58);
2534 	dt.size =                  GET_SMSTATE(u32, smstate, 0x7f54);
2535 	ctxt->ops->set_idt(ctxt, &dt);
2536 
2537 	for (i = 0; i < 6; i++) {
2538 		int r = rsm_load_seg_32(ctxt, smstate, i);
2539 		if (r != X86EMUL_CONTINUE)
2540 			return r;
2541 	}
2542 
2543 	cr4 = GET_SMSTATE(u32, smstate, 0x7f14);
2544 
2545 	ctxt->ops->set_smbase(ctxt, GET_SMSTATE(u32, smstate, 0x7ef8));
2546 
2547 	return rsm_enter_protected_mode(ctxt, cr0, cr3, cr4);
2548 }
2549 
2550 #ifdef CONFIG_X86_64
2551 static int rsm_load_state_64(struct x86_emulate_ctxt *ctxt,
2552 			     const char *smstate)
2553 {
2554 	struct desc_struct desc;
2555 	struct desc_ptr dt;
2556 	u64 val, cr0, cr3, cr4;
2557 	u32 base3;
2558 	u16 selector;
2559 	int i, r;
2560 
2561 	for (i = 0; i < 16; i++)
2562 		*reg_write(ctxt, i) = GET_SMSTATE(u64, smstate, 0x7ff8 - i * 8);
2563 
2564 	ctxt->_eip   = GET_SMSTATE(u64, smstate, 0x7f78);
2565 	ctxt->eflags = GET_SMSTATE(u32, smstate, 0x7f70) | X86_EFLAGS_FIXED;
2566 
2567 	val = GET_SMSTATE(u32, smstate, 0x7f68);
2568 
2569 	if (ctxt->ops->set_dr(ctxt, 6, (val & DR6_VOLATILE) | DR6_FIXED_1))
2570 		return X86EMUL_UNHANDLEABLE;
2571 
2572 	val = GET_SMSTATE(u32, smstate, 0x7f60);
2573 
2574 	if (ctxt->ops->set_dr(ctxt, 7, (val & DR7_VOLATILE) | DR7_FIXED_1))
2575 		return X86EMUL_UNHANDLEABLE;
2576 
2577 	cr0 =                       GET_SMSTATE(u64, smstate, 0x7f58);
2578 	cr3 =                       GET_SMSTATE(u64, smstate, 0x7f50);
2579 	cr4 =                       GET_SMSTATE(u64, smstate, 0x7f48);
2580 	ctxt->ops->set_smbase(ctxt, GET_SMSTATE(u32, smstate, 0x7f00));
2581 	val =                       GET_SMSTATE(u64, smstate, 0x7ed0);
2582 
2583 	if (ctxt->ops->set_msr(ctxt, MSR_EFER, val & ~EFER_LMA))
2584 		return X86EMUL_UNHANDLEABLE;
2585 
2586 	selector =                  GET_SMSTATE(u32, smstate, 0x7e90);
2587 	rsm_set_desc_flags(&desc,   GET_SMSTATE(u32, smstate, 0x7e92) << 8);
2588 	set_desc_limit(&desc,       GET_SMSTATE(u32, smstate, 0x7e94));
2589 	set_desc_base(&desc,        GET_SMSTATE(u32, smstate, 0x7e98));
2590 	base3 =                     GET_SMSTATE(u32, smstate, 0x7e9c);
2591 	ctxt->ops->set_segment(ctxt, selector, &desc, base3, VCPU_SREG_TR);
2592 
2593 	dt.size =                   GET_SMSTATE(u32, smstate, 0x7e84);
2594 	dt.address =                GET_SMSTATE(u64, smstate, 0x7e88);
2595 	ctxt->ops->set_idt(ctxt, &dt);
2596 
2597 	selector =                  GET_SMSTATE(u32, smstate, 0x7e70);
2598 	rsm_set_desc_flags(&desc,   GET_SMSTATE(u32, smstate, 0x7e72) << 8);
2599 	set_desc_limit(&desc,       GET_SMSTATE(u32, smstate, 0x7e74));
2600 	set_desc_base(&desc,        GET_SMSTATE(u32, smstate, 0x7e78));
2601 	base3 =                     GET_SMSTATE(u32, smstate, 0x7e7c);
2602 	ctxt->ops->set_segment(ctxt, selector, &desc, base3, VCPU_SREG_LDTR);
2603 
2604 	dt.size =                   GET_SMSTATE(u32, smstate, 0x7e64);
2605 	dt.address =                GET_SMSTATE(u64, smstate, 0x7e68);
2606 	ctxt->ops->set_gdt(ctxt, &dt);
2607 
2608 	r = rsm_enter_protected_mode(ctxt, cr0, cr3, cr4);
2609 	if (r != X86EMUL_CONTINUE)
2610 		return r;
2611 
2612 	for (i = 0; i < 6; i++) {
2613 		r = rsm_load_seg_64(ctxt, smstate, i);
2614 		if (r != X86EMUL_CONTINUE)
2615 			return r;
2616 	}
2617 
2618 	return X86EMUL_CONTINUE;
2619 }
2620 #endif
2621 
2622 static int em_rsm(struct x86_emulate_ctxt *ctxt)
2623 {
2624 	unsigned long cr0, cr4, efer;
2625 	char buf[512];
2626 	u64 smbase;
2627 	int ret;
2628 
2629 	if ((ctxt->ops->get_hflags(ctxt) & X86EMUL_SMM_MASK) == 0)
2630 		return emulate_ud(ctxt);
2631 
2632 	smbase = ctxt->ops->get_smbase(ctxt);
2633 
2634 	ret = ctxt->ops->read_phys(ctxt, smbase + 0xfe00, buf, sizeof(buf));
2635 	if (ret != X86EMUL_CONTINUE)
2636 		return X86EMUL_UNHANDLEABLE;
2637 
2638 	if ((ctxt->ops->get_hflags(ctxt) & X86EMUL_SMM_INSIDE_NMI_MASK) == 0)
2639 		ctxt->ops->set_nmi_mask(ctxt, false);
2640 
2641 	ctxt->ops->set_hflags(ctxt, ctxt->ops->get_hflags(ctxt) &
2642 		~(X86EMUL_SMM_INSIDE_NMI_MASK | X86EMUL_SMM_MASK));
2643 
2644 	/*
2645 	 * Get back to real mode, to prepare a safe state in which to load
2646 	 * CR0/CR3/CR4/EFER.  It's all a bit more complicated if the vCPU
2647 	 * supports long mode.
2648 	 */
2649 	if (emulator_has_longmode(ctxt)) {
2650 		struct desc_struct cs_desc;
2651 
2652 		/* Zero CR4.PCIDE before CR0.PG.  */
2653 		cr4 = ctxt->ops->get_cr(ctxt, 4);
2654 		if (cr4 & X86_CR4_PCIDE)
2655 			ctxt->ops->set_cr(ctxt, 4, cr4 & ~X86_CR4_PCIDE);
2656 
2657 		/* A 32-bit code segment is required to clear EFER.LMA.  */
2658 		memset(&cs_desc, 0, sizeof(cs_desc));
2659 		cs_desc.type = 0xb;
2660 		cs_desc.s = cs_desc.g = cs_desc.p = 1;
2661 		ctxt->ops->set_segment(ctxt, 0, &cs_desc, 0, VCPU_SREG_CS);
2662 	}
2663 
2664 	/* For the 64-bit case, this will clear EFER.LMA.  */
2665 	cr0 = ctxt->ops->get_cr(ctxt, 0);
2666 	if (cr0 & X86_CR0_PE)
2667 		ctxt->ops->set_cr(ctxt, 0, cr0 & ~(X86_CR0_PG | X86_CR0_PE));
2668 
2669 	if (emulator_has_longmode(ctxt)) {
2670 		/* Clear CR4.PAE before clearing EFER.LME. */
2671 		cr4 = ctxt->ops->get_cr(ctxt, 4);
2672 		if (cr4 & X86_CR4_PAE)
2673 			ctxt->ops->set_cr(ctxt, 4, cr4 & ~X86_CR4_PAE);
2674 
2675 		/* And finally go back to 32-bit mode.  */
2676 		efer = 0;
2677 		ctxt->ops->set_msr(ctxt, MSR_EFER, efer);
2678 	}
2679 
2680 	/*
2681 	 * Give pre_leave_smm() a chance to make ISA-specific changes to the
2682 	 * vCPU state (e.g. enter guest mode) before loading state from the SMM
2683 	 * state-save area.
2684 	 */
2685 	if (ctxt->ops->pre_leave_smm(ctxt, buf))
2686 		return X86EMUL_UNHANDLEABLE;
2687 
2688 #ifdef CONFIG_X86_64
2689 	if (emulator_has_longmode(ctxt))
2690 		ret = rsm_load_state_64(ctxt, buf);
2691 	else
2692 #endif
2693 		ret = rsm_load_state_32(ctxt, buf);
2694 
2695 	if (ret != X86EMUL_CONTINUE) {
2696 		/* FIXME: should triple fault */
2697 		return X86EMUL_UNHANDLEABLE;
2698 	}
2699 
2700 	ctxt->ops->post_leave_smm(ctxt);
2701 
2702 	return X86EMUL_CONTINUE;
2703 }
2704 
2705 static void
2706 setup_syscalls_segments(struct x86_emulate_ctxt *ctxt,
2707 			struct desc_struct *cs, struct desc_struct *ss)
2708 {
2709 	cs->l = 0;		/* will be adjusted later */
2710 	set_desc_base(cs, 0);	/* flat segment */
2711 	cs->g = 1;		/* 4kb granularity */
2712 	set_desc_limit(cs, 0xfffff);	/* 4GB limit */
2713 	cs->type = 0x0b;	/* Read, Execute, Accessed */
2714 	cs->s = 1;
2715 	cs->dpl = 0;		/* will be adjusted later */
2716 	cs->p = 1;
2717 	cs->d = 1;
2718 	cs->avl = 0;
2719 
2720 	set_desc_base(ss, 0);	/* flat segment */
2721 	set_desc_limit(ss, 0xfffff);	/* 4GB limit */
2722 	ss->g = 1;		/* 4kb granularity */
2723 	ss->s = 1;
2724 	ss->type = 0x03;	/* Read/Write, Accessed */
2725 	ss->d = 1;		/* 32bit stack segment */
2726 	ss->dpl = 0;
2727 	ss->p = 1;
2728 	ss->l = 0;
2729 	ss->avl = 0;
2730 }
2731 
2732 static bool vendor_intel(struct x86_emulate_ctxt *ctxt)
2733 {
2734 	u32 eax, ebx, ecx, edx;
2735 
2736 	eax = ecx = 0;
2737 	ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx, true);
2738 	return is_guest_vendor_intel(ebx, ecx, edx);
2739 }
2740 
2741 static bool em_syscall_is_enabled(struct x86_emulate_ctxt *ctxt)
2742 {
2743 	const struct x86_emulate_ops *ops = ctxt->ops;
2744 	u32 eax, ebx, ecx, edx;
2745 
2746 	/*
2747 	 * syscall should always be enabled in longmode - so only become
2748 	 * vendor specific (cpuid) if other modes are active...
2749 	 */
2750 	if (ctxt->mode == X86EMUL_MODE_PROT64)
2751 		return true;
2752 
2753 	eax = 0x00000000;
2754 	ecx = 0x00000000;
2755 	ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx, true);
2756 	/*
2757 	 * remark: Intel CPUs only support "syscall" in 64bit longmode. Also a
2758 	 * 64bit guest with a 32bit compat-app running will #UD !! While this
2759 	 * behaviour can be fixed (by emulating) into AMD response - CPUs of
2760 	 * AMD can't behave like Intel.
2761 	 */
2762 	if (is_guest_vendor_intel(ebx, ecx, edx))
2763 		return false;
2764 
2765 	if (is_guest_vendor_amd(ebx, ecx, edx) ||
2766 	    is_guest_vendor_hygon(ebx, ecx, edx))
2767 		return true;
2768 
2769 	/*
2770 	 * default: (not Intel, not AMD, not Hygon), apply Intel's
2771 	 * stricter rules...
2772 	 */
2773 	return false;
2774 }
2775 
2776 static int em_syscall(struct x86_emulate_ctxt *ctxt)
2777 {
2778 	const struct x86_emulate_ops *ops = ctxt->ops;
2779 	struct desc_struct cs, ss;
2780 	u64 msr_data;
2781 	u16 cs_sel, ss_sel;
2782 	u64 efer = 0;
2783 
2784 	/* syscall is not available in real mode */
2785 	if (ctxt->mode == X86EMUL_MODE_REAL ||
2786 	    ctxt->mode == X86EMUL_MODE_VM86)
2787 		return emulate_ud(ctxt);
2788 
2789 	if (!(em_syscall_is_enabled(ctxt)))
2790 		return emulate_ud(ctxt);
2791 
2792 	ops->get_msr(ctxt, MSR_EFER, &efer);
2793 	if (!(efer & EFER_SCE))
2794 		return emulate_ud(ctxt);
2795 
2796 	setup_syscalls_segments(ctxt, &cs, &ss);
2797 	ops->get_msr(ctxt, MSR_STAR, &msr_data);
2798 	msr_data >>= 32;
2799 	cs_sel = (u16)(msr_data & 0xfffc);
2800 	ss_sel = (u16)(msr_data + 8);
2801 
2802 	if (efer & EFER_LMA) {
2803 		cs.d = 0;
2804 		cs.l = 1;
2805 	}
2806 	ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
2807 	ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
2808 
2809 	*reg_write(ctxt, VCPU_REGS_RCX) = ctxt->_eip;
2810 	if (efer & EFER_LMA) {
2811 #ifdef CONFIG_X86_64
2812 		*reg_write(ctxt, VCPU_REGS_R11) = ctxt->eflags;
2813 
2814 		ops->get_msr(ctxt,
2815 			     ctxt->mode == X86EMUL_MODE_PROT64 ?
2816 			     MSR_LSTAR : MSR_CSTAR, &msr_data);
2817 		ctxt->_eip = msr_data;
2818 
2819 		ops->get_msr(ctxt, MSR_SYSCALL_MASK, &msr_data);
2820 		ctxt->eflags &= ~msr_data;
2821 		ctxt->eflags |= X86_EFLAGS_FIXED;
2822 #endif
2823 	} else {
2824 		/* legacy mode */
2825 		ops->get_msr(ctxt, MSR_STAR, &msr_data);
2826 		ctxt->_eip = (u32)msr_data;
2827 
2828 		ctxt->eflags &= ~(X86_EFLAGS_VM | X86_EFLAGS_IF);
2829 	}
2830 
2831 	ctxt->tf = (ctxt->eflags & X86_EFLAGS_TF) != 0;
2832 	return X86EMUL_CONTINUE;
2833 }
2834 
2835 static int em_sysenter(struct x86_emulate_ctxt *ctxt)
2836 {
2837 	const struct x86_emulate_ops *ops = ctxt->ops;
2838 	struct desc_struct cs, ss;
2839 	u64 msr_data;
2840 	u16 cs_sel, ss_sel;
2841 	u64 efer = 0;
2842 
2843 	ops->get_msr(ctxt, MSR_EFER, &efer);
2844 	/* inject #GP if in real mode */
2845 	if (ctxt->mode == X86EMUL_MODE_REAL)
2846 		return emulate_gp(ctxt, 0);
2847 
2848 	/*
2849 	 * Not recognized on AMD in compat mode (but is recognized in legacy
2850 	 * mode).
2851 	 */
2852 	if ((ctxt->mode != X86EMUL_MODE_PROT64) && (efer & EFER_LMA)
2853 	    && !vendor_intel(ctxt))
2854 		return emulate_ud(ctxt);
2855 
2856 	/* sysenter/sysexit have not been tested in 64bit mode. */
2857 	if (ctxt->mode == X86EMUL_MODE_PROT64)
2858 		return X86EMUL_UNHANDLEABLE;
2859 
2860 	ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data);
2861 	if ((msr_data & 0xfffc) == 0x0)
2862 		return emulate_gp(ctxt, 0);
2863 
2864 	setup_syscalls_segments(ctxt, &cs, &ss);
2865 	ctxt->eflags &= ~(X86_EFLAGS_VM | X86_EFLAGS_IF);
2866 	cs_sel = (u16)msr_data & ~SEGMENT_RPL_MASK;
2867 	ss_sel = cs_sel + 8;
2868 	if (efer & EFER_LMA) {
2869 		cs.d = 0;
2870 		cs.l = 1;
2871 	}
2872 
2873 	ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
2874 	ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
2875 
2876 	ops->get_msr(ctxt, MSR_IA32_SYSENTER_EIP, &msr_data);
2877 	ctxt->_eip = (efer & EFER_LMA) ? msr_data : (u32)msr_data;
2878 
2879 	ops->get_msr(ctxt, MSR_IA32_SYSENTER_ESP, &msr_data);
2880 	*reg_write(ctxt, VCPU_REGS_RSP) = (efer & EFER_LMA) ? msr_data :
2881 							      (u32)msr_data;
2882 
2883 	return X86EMUL_CONTINUE;
2884 }
2885 
2886 static int em_sysexit(struct x86_emulate_ctxt *ctxt)
2887 {
2888 	const struct x86_emulate_ops *ops = ctxt->ops;
2889 	struct desc_struct cs, ss;
2890 	u64 msr_data, rcx, rdx;
2891 	int usermode;
2892 	u16 cs_sel = 0, ss_sel = 0;
2893 
2894 	/* inject #GP if in real mode or Virtual 8086 mode */
2895 	if (ctxt->mode == X86EMUL_MODE_REAL ||
2896 	    ctxt->mode == X86EMUL_MODE_VM86)
2897 		return emulate_gp(ctxt, 0);
2898 
2899 	setup_syscalls_segments(ctxt, &cs, &ss);
2900 
2901 	if ((ctxt->rex_prefix & 0x8) != 0x0)
2902 		usermode = X86EMUL_MODE_PROT64;
2903 	else
2904 		usermode = X86EMUL_MODE_PROT32;
2905 
2906 	rcx = reg_read(ctxt, VCPU_REGS_RCX);
2907 	rdx = reg_read(ctxt, VCPU_REGS_RDX);
2908 
2909 	cs.dpl = 3;
2910 	ss.dpl = 3;
2911 	ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data);
2912 	switch (usermode) {
2913 	case X86EMUL_MODE_PROT32:
2914 		cs_sel = (u16)(msr_data + 16);
2915 		if ((msr_data & 0xfffc) == 0x0)
2916 			return emulate_gp(ctxt, 0);
2917 		ss_sel = (u16)(msr_data + 24);
2918 		rcx = (u32)rcx;
2919 		rdx = (u32)rdx;
2920 		break;
2921 	case X86EMUL_MODE_PROT64:
2922 		cs_sel = (u16)(msr_data + 32);
2923 		if (msr_data == 0x0)
2924 			return emulate_gp(ctxt, 0);
2925 		ss_sel = cs_sel + 8;
2926 		cs.d = 0;
2927 		cs.l = 1;
2928 		if (emul_is_noncanonical_address(rcx, ctxt) ||
2929 		    emul_is_noncanonical_address(rdx, ctxt))
2930 			return emulate_gp(ctxt, 0);
2931 		break;
2932 	}
2933 	cs_sel |= SEGMENT_RPL_MASK;
2934 	ss_sel |= SEGMENT_RPL_MASK;
2935 
2936 	ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
2937 	ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
2938 
2939 	ctxt->_eip = rdx;
2940 	*reg_write(ctxt, VCPU_REGS_RSP) = rcx;
2941 
2942 	return X86EMUL_CONTINUE;
2943 }
2944 
2945 static bool emulator_bad_iopl(struct x86_emulate_ctxt *ctxt)
2946 {
2947 	int iopl;
2948 	if (ctxt->mode == X86EMUL_MODE_REAL)
2949 		return false;
2950 	if (ctxt->mode == X86EMUL_MODE_VM86)
2951 		return true;
2952 	iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> X86_EFLAGS_IOPL_BIT;
2953 	return ctxt->ops->cpl(ctxt) > iopl;
2954 }
2955 
2956 #define VMWARE_PORT_VMPORT	(0x5658)
2957 #define VMWARE_PORT_VMRPC	(0x5659)
2958 
2959 static bool emulator_io_port_access_allowed(struct x86_emulate_ctxt *ctxt,
2960 					    u16 port, u16 len)
2961 {
2962 	const struct x86_emulate_ops *ops = ctxt->ops;
2963 	struct desc_struct tr_seg;
2964 	u32 base3;
2965 	int r;
2966 	u16 tr, io_bitmap_ptr, perm, bit_idx = port & 0x7;
2967 	unsigned mask = (1 << len) - 1;
2968 	unsigned long base;
2969 
2970 	/*
2971 	 * VMware allows access to these ports even if denied
2972 	 * by TSS I/O permission bitmap. Mimic behavior.
2973 	 */
2974 	if (enable_vmware_backdoor &&
2975 	    ((port == VMWARE_PORT_VMPORT) || (port == VMWARE_PORT_VMRPC)))
2976 		return true;
2977 
2978 	ops->get_segment(ctxt, &tr, &tr_seg, &base3, VCPU_SREG_TR);
2979 	if (!tr_seg.p)
2980 		return false;
2981 	if (desc_limit_scaled(&tr_seg) < 103)
2982 		return false;
2983 	base = get_desc_base(&tr_seg);
2984 #ifdef CONFIG_X86_64
2985 	base |= ((u64)base3) << 32;
2986 #endif
2987 	r = ops->read_std(ctxt, base + 102, &io_bitmap_ptr, 2, NULL, true);
2988 	if (r != X86EMUL_CONTINUE)
2989 		return false;
2990 	if (io_bitmap_ptr + port/8 > desc_limit_scaled(&tr_seg))
2991 		return false;
2992 	r = ops->read_std(ctxt, base + io_bitmap_ptr + port/8, &perm, 2, NULL, true);
2993 	if (r != X86EMUL_CONTINUE)
2994 		return false;
2995 	if ((perm >> bit_idx) & mask)
2996 		return false;
2997 	return true;
2998 }
2999 
3000 static bool emulator_io_permited(struct x86_emulate_ctxt *ctxt,
3001 				 u16 port, u16 len)
3002 {
3003 	if (ctxt->perm_ok)
3004 		return true;
3005 
3006 	if (emulator_bad_iopl(ctxt))
3007 		if (!emulator_io_port_access_allowed(ctxt, port, len))
3008 			return false;
3009 
3010 	ctxt->perm_ok = true;
3011 
3012 	return true;
3013 }
3014 
3015 static void string_registers_quirk(struct x86_emulate_ctxt *ctxt)
3016 {
3017 	/*
3018 	 * Intel CPUs mask the counter and pointers in quite strange
3019 	 * manner when ECX is zero due to REP-string optimizations.
3020 	 */
3021 #ifdef CONFIG_X86_64
3022 	if (ctxt->ad_bytes != 4 || !vendor_intel(ctxt))
3023 		return;
3024 
3025 	*reg_write(ctxt, VCPU_REGS_RCX) = 0;
3026 
3027 	switch (ctxt->b) {
3028 	case 0xa4:	/* movsb */
3029 	case 0xa5:	/* movsd/w */
3030 		*reg_rmw(ctxt, VCPU_REGS_RSI) &= (u32)-1;
3031 		fallthrough;
3032 	case 0xaa:	/* stosb */
3033 	case 0xab:	/* stosd/w */
3034 		*reg_rmw(ctxt, VCPU_REGS_RDI) &= (u32)-1;
3035 	}
3036 #endif
3037 }
3038 
3039 static void save_state_to_tss16(struct x86_emulate_ctxt *ctxt,
3040 				struct tss_segment_16 *tss)
3041 {
3042 	tss->ip = ctxt->_eip;
3043 	tss->flag = ctxt->eflags;
3044 	tss->ax = reg_read(ctxt, VCPU_REGS_RAX);
3045 	tss->cx = reg_read(ctxt, VCPU_REGS_RCX);
3046 	tss->dx = reg_read(ctxt, VCPU_REGS_RDX);
3047 	tss->bx = reg_read(ctxt, VCPU_REGS_RBX);
3048 	tss->sp = reg_read(ctxt, VCPU_REGS_RSP);
3049 	tss->bp = reg_read(ctxt, VCPU_REGS_RBP);
3050 	tss->si = reg_read(ctxt, VCPU_REGS_RSI);
3051 	tss->di = reg_read(ctxt, VCPU_REGS_RDI);
3052 
3053 	tss->es = get_segment_selector(ctxt, VCPU_SREG_ES);
3054 	tss->cs = get_segment_selector(ctxt, VCPU_SREG_CS);
3055 	tss->ss = get_segment_selector(ctxt, VCPU_SREG_SS);
3056 	tss->ds = get_segment_selector(ctxt, VCPU_SREG_DS);
3057 	tss->ldt = get_segment_selector(ctxt, VCPU_SREG_LDTR);
3058 }
3059 
3060 static int load_state_from_tss16(struct x86_emulate_ctxt *ctxt,
3061 				 struct tss_segment_16 *tss)
3062 {
3063 	int ret;
3064 	u8 cpl;
3065 
3066 	ctxt->_eip = tss->ip;
3067 	ctxt->eflags = tss->flag | 2;
3068 	*reg_write(ctxt, VCPU_REGS_RAX) = tss->ax;
3069 	*reg_write(ctxt, VCPU_REGS_RCX) = tss->cx;
3070 	*reg_write(ctxt, VCPU_REGS_RDX) = tss->dx;
3071 	*reg_write(ctxt, VCPU_REGS_RBX) = tss->bx;
3072 	*reg_write(ctxt, VCPU_REGS_RSP) = tss->sp;
3073 	*reg_write(ctxt, VCPU_REGS_RBP) = tss->bp;
3074 	*reg_write(ctxt, VCPU_REGS_RSI) = tss->si;
3075 	*reg_write(ctxt, VCPU_REGS_RDI) = tss->di;
3076 
3077 	/*
3078 	 * SDM says that segment selectors are loaded before segment
3079 	 * descriptors
3080 	 */
3081 	set_segment_selector(ctxt, tss->ldt, VCPU_SREG_LDTR);
3082 	set_segment_selector(ctxt, tss->es, VCPU_SREG_ES);
3083 	set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS);
3084 	set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS);
3085 	set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS);
3086 
3087 	cpl = tss->cs & 3;
3088 
3089 	/*
3090 	 * Now load segment descriptors. If fault happens at this stage
3091 	 * it is handled in a context of new task
3092 	 */
3093 	ret = __load_segment_descriptor(ctxt, tss->ldt, VCPU_SREG_LDTR, cpl,
3094 					X86_TRANSFER_TASK_SWITCH, NULL);
3095 	if (ret != X86EMUL_CONTINUE)
3096 		return ret;
3097 	ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl,
3098 					X86_TRANSFER_TASK_SWITCH, NULL);
3099 	if (ret != X86EMUL_CONTINUE)
3100 		return ret;
3101 	ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl,
3102 					X86_TRANSFER_TASK_SWITCH, NULL);
3103 	if (ret != X86EMUL_CONTINUE)
3104 		return ret;
3105 	ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl,
3106 					X86_TRANSFER_TASK_SWITCH, NULL);
3107 	if (ret != X86EMUL_CONTINUE)
3108 		return ret;
3109 	ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl,
3110 					X86_TRANSFER_TASK_SWITCH, NULL);
3111 	if (ret != X86EMUL_CONTINUE)
3112 		return ret;
3113 
3114 	return X86EMUL_CONTINUE;
3115 }
3116 
3117 static int task_switch_16(struct x86_emulate_ctxt *ctxt,
3118 			  u16 tss_selector, u16 old_tss_sel,
3119 			  ulong old_tss_base, struct desc_struct *new_desc)
3120 {
3121 	struct tss_segment_16 tss_seg;
3122 	int ret;
3123 	u32 new_tss_base = get_desc_base(new_desc);
3124 
3125 	ret = linear_read_system(ctxt, old_tss_base, &tss_seg, sizeof(tss_seg));
3126 	if (ret != X86EMUL_CONTINUE)
3127 		return ret;
3128 
3129 	save_state_to_tss16(ctxt, &tss_seg);
3130 
3131 	ret = linear_write_system(ctxt, old_tss_base, &tss_seg, sizeof(tss_seg));
3132 	if (ret != X86EMUL_CONTINUE)
3133 		return ret;
3134 
3135 	ret = linear_read_system(ctxt, new_tss_base, &tss_seg, sizeof(tss_seg));
3136 	if (ret != X86EMUL_CONTINUE)
3137 		return ret;
3138 
3139 	if (old_tss_sel != 0xffff) {
3140 		tss_seg.prev_task_link = old_tss_sel;
3141 
3142 		ret = linear_write_system(ctxt, new_tss_base,
3143 					  &tss_seg.prev_task_link,
3144 					  sizeof(tss_seg.prev_task_link));
3145 		if (ret != X86EMUL_CONTINUE)
3146 			return ret;
3147 	}
3148 
3149 	return load_state_from_tss16(ctxt, &tss_seg);
3150 }
3151 
3152 static void save_state_to_tss32(struct x86_emulate_ctxt *ctxt,
3153 				struct tss_segment_32 *tss)
3154 {
3155 	/* CR3 and ldt selector are not saved intentionally */
3156 	tss->eip = ctxt->_eip;
3157 	tss->eflags = ctxt->eflags;
3158 	tss->eax = reg_read(ctxt, VCPU_REGS_RAX);
3159 	tss->ecx = reg_read(ctxt, VCPU_REGS_RCX);
3160 	tss->edx = reg_read(ctxt, VCPU_REGS_RDX);
3161 	tss->ebx = reg_read(ctxt, VCPU_REGS_RBX);
3162 	tss->esp = reg_read(ctxt, VCPU_REGS_RSP);
3163 	tss->ebp = reg_read(ctxt, VCPU_REGS_RBP);
3164 	tss->esi = reg_read(ctxt, VCPU_REGS_RSI);
3165 	tss->edi = reg_read(ctxt, VCPU_REGS_RDI);
3166 
3167 	tss->es = get_segment_selector(ctxt, VCPU_SREG_ES);
3168 	tss->cs = get_segment_selector(ctxt, VCPU_SREG_CS);
3169 	tss->ss = get_segment_selector(ctxt, VCPU_SREG_SS);
3170 	tss->ds = get_segment_selector(ctxt, VCPU_SREG_DS);
3171 	tss->fs = get_segment_selector(ctxt, VCPU_SREG_FS);
3172 	tss->gs = get_segment_selector(ctxt, VCPU_SREG_GS);
3173 }
3174 
3175 static int load_state_from_tss32(struct x86_emulate_ctxt *ctxt,
3176 				 struct tss_segment_32 *tss)
3177 {
3178 	int ret;
3179 	u8 cpl;
3180 
3181 	if (ctxt->ops->set_cr(ctxt, 3, tss->cr3))
3182 		return emulate_gp(ctxt, 0);
3183 	ctxt->_eip = tss->eip;
3184 	ctxt->eflags = tss->eflags | 2;
3185 
3186 	/* General purpose registers */
3187 	*reg_write(ctxt, VCPU_REGS_RAX) = tss->eax;
3188 	*reg_write(ctxt, VCPU_REGS_RCX) = tss->ecx;
3189 	*reg_write(ctxt, VCPU_REGS_RDX) = tss->edx;
3190 	*reg_write(ctxt, VCPU_REGS_RBX) = tss->ebx;
3191 	*reg_write(ctxt, VCPU_REGS_RSP) = tss->esp;
3192 	*reg_write(ctxt, VCPU_REGS_RBP) = tss->ebp;
3193 	*reg_write(ctxt, VCPU_REGS_RSI) = tss->esi;
3194 	*reg_write(ctxt, VCPU_REGS_RDI) = tss->edi;
3195 
3196 	/*
3197 	 * SDM says that segment selectors are loaded before segment
3198 	 * descriptors.  This is important because CPL checks will
3199 	 * use CS.RPL.
3200 	 */
3201 	set_segment_selector(ctxt, tss->ldt_selector, VCPU_SREG_LDTR);
3202 	set_segment_selector(ctxt, tss->es, VCPU_SREG_ES);
3203 	set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS);
3204 	set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS);
3205 	set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS);
3206 	set_segment_selector(ctxt, tss->fs, VCPU_SREG_FS);
3207 	set_segment_selector(ctxt, tss->gs, VCPU_SREG_GS);
3208 
3209 	/*
3210 	 * If we're switching between Protected Mode and VM86, we need to make
3211 	 * sure to update the mode before loading the segment descriptors so
3212 	 * that the selectors are interpreted correctly.
3213 	 */
3214 	if (ctxt->eflags & X86_EFLAGS_VM) {
3215 		ctxt->mode = X86EMUL_MODE_VM86;
3216 		cpl = 3;
3217 	} else {
3218 		ctxt->mode = X86EMUL_MODE_PROT32;
3219 		cpl = tss->cs & 3;
3220 	}
3221 
3222 	/*
3223 	 * Now load segment descriptors. If fault happenes at this stage
3224 	 * it is handled in a context of new task
3225 	 */
3226 	ret = __load_segment_descriptor(ctxt, tss->ldt_selector, VCPU_SREG_LDTR,
3227 					cpl, X86_TRANSFER_TASK_SWITCH, NULL);
3228 	if (ret != X86EMUL_CONTINUE)
3229 		return ret;
3230 	ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl,
3231 					X86_TRANSFER_TASK_SWITCH, NULL);
3232 	if (ret != X86EMUL_CONTINUE)
3233 		return ret;
3234 	ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl,
3235 					X86_TRANSFER_TASK_SWITCH, NULL);
3236 	if (ret != X86EMUL_CONTINUE)
3237 		return ret;
3238 	ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl,
3239 					X86_TRANSFER_TASK_SWITCH, NULL);
3240 	if (ret != X86EMUL_CONTINUE)
3241 		return ret;
3242 	ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl,
3243 					X86_TRANSFER_TASK_SWITCH, NULL);
3244 	if (ret != X86EMUL_CONTINUE)
3245 		return ret;
3246 	ret = __load_segment_descriptor(ctxt, tss->fs, VCPU_SREG_FS, cpl,
3247 					X86_TRANSFER_TASK_SWITCH, NULL);
3248 	if (ret != X86EMUL_CONTINUE)
3249 		return ret;
3250 	ret = __load_segment_descriptor(ctxt, tss->gs, VCPU_SREG_GS, cpl,
3251 					X86_TRANSFER_TASK_SWITCH, NULL);
3252 
3253 	return ret;
3254 }
3255 
3256 static int task_switch_32(struct x86_emulate_ctxt *ctxt,
3257 			  u16 tss_selector, u16 old_tss_sel,
3258 			  ulong old_tss_base, struct desc_struct *new_desc)
3259 {
3260 	struct tss_segment_32 tss_seg;
3261 	int ret;
3262 	u32 new_tss_base = get_desc_base(new_desc);
3263 	u32 eip_offset = offsetof(struct tss_segment_32, eip);
3264 	u32 ldt_sel_offset = offsetof(struct tss_segment_32, ldt_selector);
3265 
3266 	ret = linear_read_system(ctxt, old_tss_base, &tss_seg, sizeof(tss_seg));
3267 	if (ret != X86EMUL_CONTINUE)
3268 		return ret;
3269 
3270 	save_state_to_tss32(ctxt, &tss_seg);
3271 
3272 	/* Only GP registers and segment selectors are saved */
3273 	ret = linear_write_system(ctxt, old_tss_base + eip_offset, &tss_seg.eip,
3274 				  ldt_sel_offset - eip_offset);
3275 	if (ret != X86EMUL_CONTINUE)
3276 		return ret;
3277 
3278 	ret = linear_read_system(ctxt, new_tss_base, &tss_seg, sizeof(tss_seg));
3279 	if (ret != X86EMUL_CONTINUE)
3280 		return ret;
3281 
3282 	if (old_tss_sel != 0xffff) {
3283 		tss_seg.prev_task_link = old_tss_sel;
3284 
3285 		ret = linear_write_system(ctxt, new_tss_base,
3286 					  &tss_seg.prev_task_link,
3287 					  sizeof(tss_seg.prev_task_link));
3288 		if (ret != X86EMUL_CONTINUE)
3289 			return ret;
3290 	}
3291 
3292 	return load_state_from_tss32(ctxt, &tss_seg);
3293 }
3294 
3295 static int emulator_do_task_switch(struct x86_emulate_ctxt *ctxt,
3296 				   u16 tss_selector, int idt_index, int reason,
3297 				   bool has_error_code, u32 error_code)
3298 {
3299 	const struct x86_emulate_ops *ops = ctxt->ops;
3300 	struct desc_struct curr_tss_desc, next_tss_desc;
3301 	int ret;
3302 	u16 old_tss_sel = get_segment_selector(ctxt, VCPU_SREG_TR);
3303 	ulong old_tss_base =
3304 		ops->get_cached_segment_base(ctxt, VCPU_SREG_TR);
3305 	u32 desc_limit;
3306 	ulong desc_addr, dr7;
3307 
3308 	/* FIXME: old_tss_base == ~0 ? */
3309 
3310 	ret = read_segment_descriptor(ctxt, tss_selector, &next_tss_desc, &desc_addr);
3311 	if (ret != X86EMUL_CONTINUE)
3312 		return ret;
3313 	ret = read_segment_descriptor(ctxt, old_tss_sel, &curr_tss_desc, &desc_addr);
3314 	if (ret != X86EMUL_CONTINUE)
3315 		return ret;
3316 
3317 	/* FIXME: check that next_tss_desc is tss */
3318 
3319 	/*
3320 	 * Check privileges. The three cases are task switch caused by...
3321 	 *
3322 	 * 1. jmp/call/int to task gate: Check against DPL of the task gate
3323 	 * 2. Exception/IRQ/iret: No check is performed
3324 	 * 3. jmp/call to TSS/task-gate: No check is performed since the
3325 	 *    hardware checks it before exiting.
3326 	 */
3327 	if (reason == TASK_SWITCH_GATE) {
3328 		if (idt_index != -1) {
3329 			/* Software interrupts */
3330 			struct desc_struct task_gate_desc;
3331 			int dpl;
3332 
3333 			ret = read_interrupt_descriptor(ctxt, idt_index,
3334 							&task_gate_desc);
3335 			if (ret != X86EMUL_CONTINUE)
3336 				return ret;
3337 
3338 			dpl = task_gate_desc.dpl;
3339 			if ((tss_selector & 3) > dpl || ops->cpl(ctxt) > dpl)
3340 				return emulate_gp(ctxt, (idt_index << 3) | 0x2);
3341 		}
3342 	}
3343 
3344 	desc_limit = desc_limit_scaled(&next_tss_desc);
3345 	if (!next_tss_desc.p ||
3346 	    ((desc_limit < 0x67 && (next_tss_desc.type & 8)) ||
3347 	     desc_limit < 0x2b)) {
3348 		return emulate_ts(ctxt, tss_selector & 0xfffc);
3349 	}
3350 
3351 	if (reason == TASK_SWITCH_IRET || reason == TASK_SWITCH_JMP) {
3352 		curr_tss_desc.type &= ~(1 << 1); /* clear busy flag */
3353 		write_segment_descriptor(ctxt, old_tss_sel, &curr_tss_desc);
3354 	}
3355 
3356 	if (reason == TASK_SWITCH_IRET)
3357 		ctxt->eflags = ctxt->eflags & ~X86_EFLAGS_NT;
3358 
3359 	/* set back link to prev task only if NT bit is set in eflags
3360 	   note that old_tss_sel is not used after this point */
3361 	if (reason != TASK_SWITCH_CALL && reason != TASK_SWITCH_GATE)
3362 		old_tss_sel = 0xffff;
3363 
3364 	if (next_tss_desc.type & 8)
3365 		ret = task_switch_32(ctxt, tss_selector, old_tss_sel,
3366 				     old_tss_base, &next_tss_desc);
3367 	else
3368 		ret = task_switch_16(ctxt, tss_selector, old_tss_sel,
3369 				     old_tss_base, &next_tss_desc);
3370 	if (ret != X86EMUL_CONTINUE)
3371 		return ret;
3372 
3373 	if (reason == TASK_SWITCH_CALL || reason == TASK_SWITCH_GATE)
3374 		ctxt->eflags = ctxt->eflags | X86_EFLAGS_NT;
3375 
3376 	if (reason != TASK_SWITCH_IRET) {
3377 		next_tss_desc.type |= (1 << 1); /* set busy flag */
3378 		write_segment_descriptor(ctxt, tss_selector, &next_tss_desc);
3379 	}
3380 
3381 	ops->set_cr(ctxt, 0,  ops->get_cr(ctxt, 0) | X86_CR0_TS);
3382 	ops->set_segment(ctxt, tss_selector, &next_tss_desc, 0, VCPU_SREG_TR);
3383 
3384 	if (has_error_code) {
3385 		ctxt->op_bytes = ctxt->ad_bytes = (next_tss_desc.type & 8) ? 4 : 2;
3386 		ctxt->lock_prefix = 0;
3387 		ctxt->src.val = (unsigned long) error_code;
3388 		ret = em_push(ctxt);
3389 	}
3390 
3391 	ops->get_dr(ctxt, 7, &dr7);
3392 	ops->set_dr(ctxt, 7, dr7 & ~(DR_LOCAL_ENABLE_MASK | DR_LOCAL_SLOWDOWN));
3393 
3394 	return ret;
3395 }
3396 
3397 int emulator_task_switch(struct x86_emulate_ctxt *ctxt,
3398 			 u16 tss_selector, int idt_index, int reason,
3399 			 bool has_error_code, u32 error_code)
3400 {
3401 	int rc;
3402 
3403 	invalidate_registers(ctxt);
3404 	ctxt->_eip = ctxt->eip;
3405 	ctxt->dst.type = OP_NONE;
3406 
3407 	rc = emulator_do_task_switch(ctxt, tss_selector, idt_index, reason,
3408 				     has_error_code, error_code);
3409 
3410 	if (rc == X86EMUL_CONTINUE) {
3411 		ctxt->eip = ctxt->_eip;
3412 		writeback_registers(ctxt);
3413 	}
3414 
3415 	return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK;
3416 }
3417 
3418 static void string_addr_inc(struct x86_emulate_ctxt *ctxt, int reg,
3419 		struct operand *op)
3420 {
3421 	int df = (ctxt->eflags & X86_EFLAGS_DF) ? -op->count : op->count;
3422 
3423 	register_address_increment(ctxt, reg, df * op->bytes);
3424 	op->addr.mem.ea = register_address(ctxt, reg);
3425 }
3426 
3427 static int em_das(struct x86_emulate_ctxt *ctxt)
3428 {
3429 	u8 al, old_al;
3430 	bool af, cf, old_cf;
3431 
3432 	cf = ctxt->eflags & X86_EFLAGS_CF;
3433 	al = ctxt->dst.val;
3434 
3435 	old_al = al;
3436 	old_cf = cf;
3437 	cf = false;
3438 	af = ctxt->eflags & X86_EFLAGS_AF;
3439 	if ((al & 0x0f) > 9 || af) {
3440 		al -= 6;
3441 		cf = old_cf | (al >= 250);
3442 		af = true;
3443 	} else {
3444 		af = false;
3445 	}
3446 	if (old_al > 0x99 || old_cf) {
3447 		al -= 0x60;
3448 		cf = true;
3449 	}
3450 
3451 	ctxt->dst.val = al;
3452 	/* Set PF, ZF, SF */
3453 	ctxt->src.type = OP_IMM;
3454 	ctxt->src.val = 0;
3455 	ctxt->src.bytes = 1;
3456 	fastop(ctxt, em_or);
3457 	ctxt->eflags &= ~(X86_EFLAGS_AF | X86_EFLAGS_CF);
3458 	if (cf)
3459 		ctxt->eflags |= X86_EFLAGS_CF;
3460 	if (af)
3461 		ctxt->eflags |= X86_EFLAGS_AF;
3462 	return X86EMUL_CONTINUE;
3463 }
3464 
3465 static int em_aam(struct x86_emulate_ctxt *ctxt)
3466 {
3467 	u8 al, ah;
3468 
3469 	if (ctxt->src.val == 0)
3470 		return emulate_de(ctxt);
3471 
3472 	al = ctxt->dst.val & 0xff;
3473 	ah = al / ctxt->src.val;
3474 	al %= ctxt->src.val;
3475 
3476 	ctxt->dst.val = (ctxt->dst.val & 0xffff0000) | al | (ah << 8);
3477 
3478 	/* Set PF, ZF, SF */
3479 	ctxt->src.type = OP_IMM;
3480 	ctxt->src.val = 0;
3481 	ctxt->src.bytes = 1;
3482 	fastop(ctxt, em_or);
3483 
3484 	return X86EMUL_CONTINUE;
3485 }
3486 
3487 static int em_aad(struct x86_emulate_ctxt *ctxt)
3488 {
3489 	u8 al = ctxt->dst.val & 0xff;
3490 	u8 ah = (ctxt->dst.val >> 8) & 0xff;
3491 
3492 	al = (al + (ah * ctxt->src.val)) & 0xff;
3493 
3494 	ctxt->dst.val = (ctxt->dst.val & 0xffff0000) | al;
3495 
3496 	/* Set PF, ZF, SF */
3497 	ctxt->src.type = OP_IMM;
3498 	ctxt->src.val = 0;
3499 	ctxt->src.bytes = 1;
3500 	fastop(ctxt, em_or);
3501 
3502 	return X86EMUL_CONTINUE;
3503 }
3504 
3505 static int em_call(struct x86_emulate_ctxt *ctxt)
3506 {
3507 	int rc;
3508 	long rel = ctxt->src.val;
3509 
3510 	ctxt->src.val = (unsigned long)ctxt->_eip;
3511 	rc = jmp_rel(ctxt, rel);
3512 	if (rc != X86EMUL_CONTINUE)
3513 		return rc;
3514 	return em_push(ctxt);
3515 }
3516 
3517 static int em_call_far(struct x86_emulate_ctxt *ctxt)
3518 {
3519 	u16 sel, old_cs;
3520 	ulong old_eip;
3521 	int rc;
3522 	struct desc_struct old_desc, new_desc;
3523 	const struct x86_emulate_ops *ops = ctxt->ops;
3524 	int cpl = ctxt->ops->cpl(ctxt);
3525 	enum x86emul_mode prev_mode = ctxt->mode;
3526 
3527 	old_eip = ctxt->_eip;
3528 	ops->get_segment(ctxt, &old_cs, &old_desc, NULL, VCPU_SREG_CS);
3529 
3530 	memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
3531 	rc = __load_segment_descriptor(ctxt, sel, VCPU_SREG_CS, cpl,
3532 				       X86_TRANSFER_CALL_JMP, &new_desc);
3533 	if (rc != X86EMUL_CONTINUE)
3534 		return rc;
3535 
3536 	rc = assign_eip_far(ctxt, ctxt->src.val, &new_desc);
3537 	if (rc != X86EMUL_CONTINUE)
3538 		goto fail;
3539 
3540 	ctxt->src.val = old_cs;
3541 	rc = em_push(ctxt);
3542 	if (rc != X86EMUL_CONTINUE)
3543 		goto fail;
3544 
3545 	ctxt->src.val = old_eip;
3546 	rc = em_push(ctxt);
3547 	/* If we failed, we tainted the memory, but the very least we should
3548 	   restore cs */
3549 	if (rc != X86EMUL_CONTINUE) {
3550 		pr_warn_once("faulting far call emulation tainted memory\n");
3551 		goto fail;
3552 	}
3553 	return rc;
3554 fail:
3555 	ops->set_segment(ctxt, old_cs, &old_desc, 0, VCPU_SREG_CS);
3556 	ctxt->mode = prev_mode;
3557 	return rc;
3558 
3559 }
3560 
3561 static int em_ret_near_imm(struct x86_emulate_ctxt *ctxt)
3562 {
3563 	int rc;
3564 	unsigned long eip;
3565 
3566 	rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
3567 	if (rc != X86EMUL_CONTINUE)
3568 		return rc;
3569 	rc = assign_eip_near(ctxt, eip);
3570 	if (rc != X86EMUL_CONTINUE)
3571 		return rc;
3572 	rsp_increment(ctxt, ctxt->src.val);
3573 	return X86EMUL_CONTINUE;
3574 }
3575 
3576 static int em_xchg(struct x86_emulate_ctxt *ctxt)
3577 {
3578 	/* Write back the register source. */
3579 	ctxt->src.val = ctxt->dst.val;
3580 	write_register_operand(&ctxt->src);
3581 
3582 	/* Write back the memory destination with implicit LOCK prefix. */
3583 	ctxt->dst.val = ctxt->src.orig_val;
3584 	ctxt->lock_prefix = 1;
3585 	return X86EMUL_CONTINUE;
3586 }
3587 
3588 static int em_imul_3op(struct x86_emulate_ctxt *ctxt)
3589 {
3590 	ctxt->dst.val = ctxt->src2.val;
3591 	return fastop(ctxt, em_imul);
3592 }
3593 
3594 static int em_cwd(struct x86_emulate_ctxt *ctxt)
3595 {
3596 	ctxt->dst.type = OP_REG;
3597 	ctxt->dst.bytes = ctxt->src.bytes;
3598 	ctxt->dst.addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX);
3599 	ctxt->dst.val = ~((ctxt->src.val >> (ctxt->src.bytes * 8 - 1)) - 1);
3600 
3601 	return X86EMUL_CONTINUE;
3602 }
3603 
3604 static int em_rdpid(struct x86_emulate_ctxt *ctxt)
3605 {
3606 	u64 tsc_aux = 0;
3607 
3608 	if (ctxt->ops->get_msr(ctxt, MSR_TSC_AUX, &tsc_aux))
3609 		return emulate_ud(ctxt);
3610 	ctxt->dst.val = tsc_aux;
3611 	return X86EMUL_CONTINUE;
3612 }
3613 
3614 static int em_rdtsc(struct x86_emulate_ctxt *ctxt)
3615 {
3616 	u64 tsc = 0;
3617 
3618 	ctxt->ops->get_msr(ctxt, MSR_IA32_TSC, &tsc);
3619 	*reg_write(ctxt, VCPU_REGS_RAX) = (u32)tsc;
3620 	*reg_write(ctxt, VCPU_REGS_RDX) = tsc >> 32;
3621 	return X86EMUL_CONTINUE;
3622 }
3623 
3624 static int em_rdpmc(struct x86_emulate_ctxt *ctxt)
3625 {
3626 	u64 pmc;
3627 
3628 	if (ctxt->ops->read_pmc(ctxt, reg_read(ctxt, VCPU_REGS_RCX), &pmc))
3629 		return emulate_gp(ctxt, 0);
3630 	*reg_write(ctxt, VCPU_REGS_RAX) = (u32)pmc;
3631 	*reg_write(ctxt, VCPU_REGS_RDX) = pmc >> 32;
3632 	return X86EMUL_CONTINUE;
3633 }
3634 
3635 static int em_mov(struct x86_emulate_ctxt *ctxt)
3636 {
3637 	memcpy(ctxt->dst.valptr, ctxt->src.valptr, sizeof(ctxt->src.valptr));
3638 	return X86EMUL_CONTINUE;
3639 }
3640 
3641 static int em_movbe(struct x86_emulate_ctxt *ctxt)
3642 {
3643 	u16 tmp;
3644 
3645 	if (!ctxt->ops->guest_has_movbe(ctxt))
3646 		return emulate_ud(ctxt);
3647 
3648 	switch (ctxt->op_bytes) {
3649 	case 2:
3650 		/*
3651 		 * From MOVBE definition: "...When the operand size is 16 bits,
3652 		 * the upper word of the destination register remains unchanged
3653 		 * ..."
3654 		 *
3655 		 * Both casting ->valptr and ->val to u16 breaks strict aliasing
3656 		 * rules so we have to do the operation almost per hand.
3657 		 */
3658 		tmp = (u16)ctxt->src.val;
3659 		ctxt->dst.val &= ~0xffffUL;
3660 		ctxt->dst.val |= (unsigned long)swab16(tmp);
3661 		break;
3662 	case 4:
3663 		ctxt->dst.val = swab32((u32)ctxt->src.val);
3664 		break;
3665 	case 8:
3666 		ctxt->dst.val = swab64(ctxt->src.val);
3667 		break;
3668 	default:
3669 		BUG();
3670 	}
3671 	return X86EMUL_CONTINUE;
3672 }
3673 
3674 static int em_cr_write(struct x86_emulate_ctxt *ctxt)
3675 {
3676 	if (ctxt->ops->set_cr(ctxt, ctxt->modrm_reg, ctxt->src.val))
3677 		return emulate_gp(ctxt, 0);
3678 
3679 	/* Disable writeback. */
3680 	ctxt->dst.type = OP_NONE;
3681 	return X86EMUL_CONTINUE;
3682 }
3683 
3684 static int em_dr_write(struct x86_emulate_ctxt *ctxt)
3685 {
3686 	unsigned long val;
3687 
3688 	if (ctxt->mode == X86EMUL_MODE_PROT64)
3689 		val = ctxt->src.val & ~0ULL;
3690 	else
3691 		val = ctxt->src.val & ~0U;
3692 
3693 	/* #UD condition is already handled. */
3694 	if (ctxt->ops->set_dr(ctxt, ctxt->modrm_reg, val) < 0)
3695 		return emulate_gp(ctxt, 0);
3696 
3697 	/* Disable writeback. */
3698 	ctxt->dst.type = OP_NONE;
3699 	return X86EMUL_CONTINUE;
3700 }
3701 
3702 static int em_wrmsr(struct x86_emulate_ctxt *ctxt)
3703 {
3704 	u64 msr_index = reg_read(ctxt, VCPU_REGS_RCX);
3705 	u64 msr_data;
3706 	int r;
3707 
3708 	msr_data = (u32)reg_read(ctxt, VCPU_REGS_RAX)
3709 		| ((u64)reg_read(ctxt, VCPU_REGS_RDX) << 32);
3710 	r = ctxt->ops->set_msr(ctxt, msr_index, msr_data);
3711 
3712 	if (r == X86EMUL_IO_NEEDED)
3713 		return r;
3714 
3715 	if (r > 0)
3716 		return emulate_gp(ctxt, 0);
3717 
3718 	return r < 0 ? X86EMUL_UNHANDLEABLE : X86EMUL_CONTINUE;
3719 }
3720 
3721 static int em_rdmsr(struct x86_emulate_ctxt *ctxt)
3722 {
3723 	u64 msr_index = reg_read(ctxt, VCPU_REGS_RCX);
3724 	u64 msr_data;
3725 	int r;
3726 
3727 	r = ctxt->ops->get_msr(ctxt, msr_index, &msr_data);
3728 
3729 	if (r == X86EMUL_IO_NEEDED)
3730 		return r;
3731 
3732 	if (r)
3733 		return emulate_gp(ctxt, 0);
3734 
3735 	*reg_write(ctxt, VCPU_REGS_RAX) = (u32)msr_data;
3736 	*reg_write(ctxt, VCPU_REGS_RDX) = msr_data >> 32;
3737 	return X86EMUL_CONTINUE;
3738 }
3739 
3740 static int em_store_sreg(struct x86_emulate_ctxt *ctxt, int segment)
3741 {
3742 	if (segment > VCPU_SREG_GS &&
3743 	    (ctxt->ops->get_cr(ctxt, 4) & X86_CR4_UMIP) &&
3744 	    ctxt->ops->cpl(ctxt) > 0)
3745 		return emulate_gp(ctxt, 0);
3746 
3747 	ctxt->dst.val = get_segment_selector(ctxt, segment);
3748 	if (ctxt->dst.bytes == 4 && ctxt->dst.type == OP_MEM)
3749 		ctxt->dst.bytes = 2;
3750 	return X86EMUL_CONTINUE;
3751 }
3752 
3753 static int em_mov_rm_sreg(struct x86_emulate_ctxt *ctxt)
3754 {
3755 	if (ctxt->modrm_reg > VCPU_SREG_GS)
3756 		return emulate_ud(ctxt);
3757 
3758 	return em_store_sreg(ctxt, ctxt->modrm_reg);
3759 }
3760 
3761 static int em_mov_sreg_rm(struct x86_emulate_ctxt *ctxt)
3762 {
3763 	u16 sel = ctxt->src.val;
3764 
3765 	if (ctxt->modrm_reg == VCPU_SREG_CS || ctxt->modrm_reg > VCPU_SREG_GS)
3766 		return emulate_ud(ctxt);
3767 
3768 	if (ctxt->modrm_reg == VCPU_SREG_SS)
3769 		ctxt->interruptibility = KVM_X86_SHADOW_INT_MOV_SS;
3770 
3771 	/* Disable writeback. */
3772 	ctxt->dst.type = OP_NONE;
3773 	return load_segment_descriptor(ctxt, sel, ctxt->modrm_reg);
3774 }
3775 
3776 static int em_sldt(struct x86_emulate_ctxt *ctxt)
3777 {
3778 	return em_store_sreg(ctxt, VCPU_SREG_LDTR);
3779 }
3780 
3781 static int em_lldt(struct x86_emulate_ctxt *ctxt)
3782 {
3783 	u16 sel = ctxt->src.val;
3784 
3785 	/* Disable writeback. */
3786 	ctxt->dst.type = OP_NONE;
3787 	return load_segment_descriptor(ctxt, sel, VCPU_SREG_LDTR);
3788 }
3789 
3790 static int em_str(struct x86_emulate_ctxt *ctxt)
3791 {
3792 	return em_store_sreg(ctxt, VCPU_SREG_TR);
3793 }
3794 
3795 static int em_ltr(struct x86_emulate_ctxt *ctxt)
3796 {
3797 	u16 sel = ctxt->src.val;
3798 
3799 	/* Disable writeback. */
3800 	ctxt->dst.type = OP_NONE;
3801 	return load_segment_descriptor(ctxt, sel, VCPU_SREG_TR);
3802 }
3803 
3804 static int em_invlpg(struct x86_emulate_ctxt *ctxt)
3805 {
3806 	int rc;
3807 	ulong linear;
3808 
3809 	rc = linearize(ctxt, ctxt->src.addr.mem, 1, false, &linear);
3810 	if (rc == X86EMUL_CONTINUE)
3811 		ctxt->ops->invlpg(ctxt, linear);
3812 	/* Disable writeback. */
3813 	ctxt->dst.type = OP_NONE;
3814 	return X86EMUL_CONTINUE;
3815 }
3816 
3817 static int em_clts(struct x86_emulate_ctxt *ctxt)
3818 {
3819 	ulong cr0;
3820 
3821 	cr0 = ctxt->ops->get_cr(ctxt, 0);
3822 	cr0 &= ~X86_CR0_TS;
3823 	ctxt->ops->set_cr(ctxt, 0, cr0);
3824 	return X86EMUL_CONTINUE;
3825 }
3826 
3827 static int em_hypercall(struct x86_emulate_ctxt *ctxt)
3828 {
3829 	int rc = ctxt->ops->fix_hypercall(ctxt);
3830 
3831 	if (rc != X86EMUL_CONTINUE)
3832 		return rc;
3833 
3834 	/* Let the processor re-execute the fixed hypercall */
3835 	ctxt->_eip = ctxt->eip;
3836 	/* Disable writeback. */
3837 	ctxt->dst.type = OP_NONE;
3838 	return X86EMUL_CONTINUE;
3839 }
3840 
3841 static int emulate_store_desc_ptr(struct x86_emulate_ctxt *ctxt,
3842 				  void (*get)(struct x86_emulate_ctxt *ctxt,
3843 					      struct desc_ptr *ptr))
3844 {
3845 	struct desc_ptr desc_ptr;
3846 
3847 	if ((ctxt->ops->get_cr(ctxt, 4) & X86_CR4_UMIP) &&
3848 	    ctxt->ops->cpl(ctxt) > 0)
3849 		return emulate_gp(ctxt, 0);
3850 
3851 	if (ctxt->mode == X86EMUL_MODE_PROT64)
3852 		ctxt->op_bytes = 8;
3853 	get(ctxt, &desc_ptr);
3854 	if (ctxt->op_bytes == 2) {
3855 		ctxt->op_bytes = 4;
3856 		desc_ptr.address &= 0x00ffffff;
3857 	}
3858 	/* Disable writeback. */
3859 	ctxt->dst.type = OP_NONE;
3860 	return segmented_write_std(ctxt, ctxt->dst.addr.mem,
3861 				   &desc_ptr, 2 + ctxt->op_bytes);
3862 }
3863 
3864 static int em_sgdt(struct x86_emulate_ctxt *ctxt)
3865 {
3866 	return emulate_store_desc_ptr(ctxt, ctxt->ops->get_gdt);
3867 }
3868 
3869 static int em_sidt(struct x86_emulate_ctxt *ctxt)
3870 {
3871 	return emulate_store_desc_ptr(ctxt, ctxt->ops->get_idt);
3872 }
3873 
3874 static int em_lgdt_lidt(struct x86_emulate_ctxt *ctxt, bool lgdt)
3875 {
3876 	struct desc_ptr desc_ptr;
3877 	int rc;
3878 
3879 	if (ctxt->mode == X86EMUL_MODE_PROT64)
3880 		ctxt->op_bytes = 8;
3881 	rc = read_descriptor(ctxt, ctxt->src.addr.mem,
3882 			     &desc_ptr.size, &desc_ptr.address,
3883 			     ctxt->op_bytes);
3884 	if (rc != X86EMUL_CONTINUE)
3885 		return rc;
3886 	if (ctxt->mode == X86EMUL_MODE_PROT64 &&
3887 	    emul_is_noncanonical_address(desc_ptr.address, ctxt))
3888 		return emulate_gp(ctxt, 0);
3889 	if (lgdt)
3890 		ctxt->ops->set_gdt(ctxt, &desc_ptr);
3891 	else
3892 		ctxt->ops->set_idt(ctxt, &desc_ptr);
3893 	/* Disable writeback. */
3894 	ctxt->dst.type = OP_NONE;
3895 	return X86EMUL_CONTINUE;
3896 }
3897 
3898 static int em_lgdt(struct x86_emulate_ctxt *ctxt)
3899 {
3900 	return em_lgdt_lidt(ctxt, true);
3901 }
3902 
3903 static int em_lidt(struct x86_emulate_ctxt *ctxt)
3904 {
3905 	return em_lgdt_lidt(ctxt, false);
3906 }
3907 
3908 static int em_smsw(struct x86_emulate_ctxt *ctxt)
3909 {
3910 	if ((ctxt->ops->get_cr(ctxt, 4) & X86_CR4_UMIP) &&
3911 	    ctxt->ops->cpl(ctxt) > 0)
3912 		return emulate_gp(ctxt, 0);
3913 
3914 	if (ctxt->dst.type == OP_MEM)
3915 		ctxt->dst.bytes = 2;
3916 	ctxt->dst.val = ctxt->ops->get_cr(ctxt, 0);
3917 	return X86EMUL_CONTINUE;
3918 }
3919 
3920 static int em_lmsw(struct x86_emulate_ctxt *ctxt)
3921 {
3922 	ctxt->ops->set_cr(ctxt, 0, (ctxt->ops->get_cr(ctxt, 0) & ~0x0eul)
3923 			  | (ctxt->src.val & 0x0f));
3924 	ctxt->dst.type = OP_NONE;
3925 	return X86EMUL_CONTINUE;
3926 }
3927 
3928 static int em_loop(struct x86_emulate_ctxt *ctxt)
3929 {
3930 	int rc = X86EMUL_CONTINUE;
3931 
3932 	register_address_increment(ctxt, VCPU_REGS_RCX, -1);
3933 	if ((address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) != 0) &&
3934 	    (ctxt->b == 0xe2 || test_cc(ctxt->b ^ 0x5, ctxt->eflags)))
3935 		rc = jmp_rel(ctxt, ctxt->src.val);
3936 
3937 	return rc;
3938 }
3939 
3940 static int em_jcxz(struct x86_emulate_ctxt *ctxt)
3941 {
3942 	int rc = X86EMUL_CONTINUE;
3943 
3944 	if (address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) == 0)
3945 		rc = jmp_rel(ctxt, ctxt->src.val);
3946 
3947 	return rc;
3948 }
3949 
3950 static int em_in(struct x86_emulate_ctxt *ctxt)
3951 {
3952 	if (!pio_in_emulated(ctxt, ctxt->dst.bytes, ctxt->src.val,
3953 			     &ctxt->dst.val))
3954 		return X86EMUL_IO_NEEDED;
3955 
3956 	return X86EMUL_CONTINUE;
3957 }
3958 
3959 static int em_out(struct x86_emulate_ctxt *ctxt)
3960 {
3961 	ctxt->ops->pio_out_emulated(ctxt, ctxt->src.bytes, ctxt->dst.val,
3962 				    &ctxt->src.val, 1);
3963 	/* Disable writeback. */
3964 	ctxt->dst.type = OP_NONE;
3965 	return X86EMUL_CONTINUE;
3966 }
3967 
3968 static int em_cli(struct x86_emulate_ctxt *ctxt)
3969 {
3970 	if (emulator_bad_iopl(ctxt))
3971 		return emulate_gp(ctxt, 0);
3972 
3973 	ctxt->eflags &= ~X86_EFLAGS_IF;
3974 	return X86EMUL_CONTINUE;
3975 }
3976 
3977 static int em_sti(struct x86_emulate_ctxt *ctxt)
3978 {
3979 	if (emulator_bad_iopl(ctxt))
3980 		return emulate_gp(ctxt, 0);
3981 
3982 	ctxt->interruptibility = KVM_X86_SHADOW_INT_STI;
3983 	ctxt->eflags |= X86_EFLAGS_IF;
3984 	return X86EMUL_CONTINUE;
3985 }
3986 
3987 static int em_cpuid(struct x86_emulate_ctxt *ctxt)
3988 {
3989 	u32 eax, ebx, ecx, edx;
3990 	u64 msr = 0;
3991 
3992 	ctxt->ops->get_msr(ctxt, MSR_MISC_FEATURES_ENABLES, &msr);
3993 	if (msr & MSR_MISC_FEATURES_ENABLES_CPUID_FAULT &&
3994 	    ctxt->ops->cpl(ctxt)) {
3995 		return emulate_gp(ctxt, 0);
3996 	}
3997 
3998 	eax = reg_read(ctxt, VCPU_REGS_RAX);
3999 	ecx = reg_read(ctxt, VCPU_REGS_RCX);
4000 	ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx, false);
4001 	*reg_write(ctxt, VCPU_REGS_RAX) = eax;
4002 	*reg_write(ctxt, VCPU_REGS_RBX) = ebx;
4003 	*reg_write(ctxt, VCPU_REGS_RCX) = ecx;
4004 	*reg_write(ctxt, VCPU_REGS_RDX) = edx;
4005 	return X86EMUL_CONTINUE;
4006 }
4007 
4008 static int em_sahf(struct x86_emulate_ctxt *ctxt)
4009 {
4010 	u32 flags;
4011 
4012 	flags = X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF | X86_EFLAGS_ZF |
4013 		X86_EFLAGS_SF;
4014 	flags &= *reg_rmw(ctxt, VCPU_REGS_RAX) >> 8;
4015 
4016 	ctxt->eflags &= ~0xffUL;
4017 	ctxt->eflags |= flags | X86_EFLAGS_FIXED;
4018 	return X86EMUL_CONTINUE;
4019 }
4020 
4021 static int em_lahf(struct x86_emulate_ctxt *ctxt)
4022 {
4023 	*reg_rmw(ctxt, VCPU_REGS_RAX) &= ~0xff00UL;
4024 	*reg_rmw(ctxt, VCPU_REGS_RAX) |= (ctxt->eflags & 0xff) << 8;
4025 	return X86EMUL_CONTINUE;
4026 }
4027 
4028 static int em_bswap(struct x86_emulate_ctxt *ctxt)
4029 {
4030 	switch (ctxt->op_bytes) {
4031 #ifdef CONFIG_X86_64
4032 	case 8:
4033 		asm("bswap %0" : "+r"(ctxt->dst.val));
4034 		break;
4035 #endif
4036 	default:
4037 		asm("bswap %0" : "+r"(*(u32 *)&ctxt->dst.val));
4038 		break;
4039 	}
4040 	return X86EMUL_CONTINUE;
4041 }
4042 
4043 static int em_clflush(struct x86_emulate_ctxt *ctxt)
4044 {
4045 	/* emulating clflush regardless of cpuid */
4046 	return X86EMUL_CONTINUE;
4047 }
4048 
4049 static int em_movsxd(struct x86_emulate_ctxt *ctxt)
4050 {
4051 	ctxt->dst.val = (s32) ctxt->src.val;
4052 	return X86EMUL_CONTINUE;
4053 }
4054 
4055 static int check_fxsr(struct x86_emulate_ctxt *ctxt)
4056 {
4057 	if (!ctxt->ops->guest_has_fxsr(ctxt))
4058 		return emulate_ud(ctxt);
4059 
4060 	if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
4061 		return emulate_nm(ctxt);
4062 
4063 	/*
4064 	 * Don't emulate a case that should never be hit, instead of working
4065 	 * around a lack of fxsave64/fxrstor64 on old compilers.
4066 	 */
4067 	if (ctxt->mode >= X86EMUL_MODE_PROT64)
4068 		return X86EMUL_UNHANDLEABLE;
4069 
4070 	return X86EMUL_CONTINUE;
4071 }
4072 
4073 /*
4074  * Hardware doesn't save and restore XMM 0-7 without CR4.OSFXSR, but does save
4075  * and restore MXCSR.
4076  */
4077 static size_t __fxstate_size(int nregs)
4078 {
4079 	return offsetof(struct fxregs_state, xmm_space[0]) + nregs * 16;
4080 }
4081 
4082 static inline size_t fxstate_size(struct x86_emulate_ctxt *ctxt)
4083 {
4084 	bool cr4_osfxsr;
4085 	if (ctxt->mode == X86EMUL_MODE_PROT64)
4086 		return __fxstate_size(16);
4087 
4088 	cr4_osfxsr = ctxt->ops->get_cr(ctxt, 4) & X86_CR4_OSFXSR;
4089 	return __fxstate_size(cr4_osfxsr ? 8 : 0);
4090 }
4091 
4092 /*
4093  * FXSAVE and FXRSTOR have 4 different formats depending on execution mode,
4094  *  1) 16 bit mode
4095  *  2) 32 bit mode
4096  *     - like (1), but FIP and FDP (foo) are only 16 bit.  At least Intel CPUs
4097  *       preserve whole 32 bit values, though, so (1) and (2) are the same wrt.
4098  *       save and restore
4099  *  3) 64-bit mode with REX.W prefix
4100  *     - like (2), but XMM 8-15 are being saved and restored
4101  *  4) 64-bit mode without REX.W prefix
4102  *     - like (3), but FIP and FDP are 64 bit
4103  *
4104  * Emulation uses (3) for (1) and (2) and preserves XMM 8-15 to reach the
4105  * desired result.  (4) is not emulated.
4106  *
4107  * Note: Guest and host CPUID.(EAX=07H,ECX=0H):EBX[bit 13] (deprecate FPU CS
4108  * and FPU DS) should match.
4109  */
4110 static int em_fxsave(struct x86_emulate_ctxt *ctxt)
4111 {
4112 	struct fxregs_state fx_state;
4113 	int rc;
4114 
4115 	rc = check_fxsr(ctxt);
4116 	if (rc != X86EMUL_CONTINUE)
4117 		return rc;
4118 
4119 	emulator_get_fpu();
4120 
4121 	rc = asm_safe("fxsave %[fx]", , [fx] "+m"(fx_state));
4122 
4123 	emulator_put_fpu();
4124 
4125 	if (rc != X86EMUL_CONTINUE)
4126 		return rc;
4127 
4128 	return segmented_write_std(ctxt, ctxt->memop.addr.mem, &fx_state,
4129 		                   fxstate_size(ctxt));
4130 }
4131 
4132 /*
4133  * FXRSTOR might restore XMM registers not provided by the guest. Fill
4134  * in the host registers (via FXSAVE) instead, so they won't be modified.
4135  * (preemption has to stay disabled until FXRSTOR).
4136  *
4137  * Use noinline to keep the stack for other functions called by callers small.
4138  */
4139 static noinline int fxregs_fixup(struct fxregs_state *fx_state,
4140 				 const size_t used_size)
4141 {
4142 	struct fxregs_state fx_tmp;
4143 	int rc;
4144 
4145 	rc = asm_safe("fxsave %[fx]", , [fx] "+m"(fx_tmp));
4146 	memcpy((void *)fx_state + used_size, (void *)&fx_tmp + used_size,
4147 	       __fxstate_size(16) - used_size);
4148 
4149 	return rc;
4150 }
4151 
4152 static int em_fxrstor(struct x86_emulate_ctxt *ctxt)
4153 {
4154 	struct fxregs_state fx_state;
4155 	int rc;
4156 	size_t size;
4157 
4158 	rc = check_fxsr(ctxt);
4159 	if (rc != X86EMUL_CONTINUE)
4160 		return rc;
4161 
4162 	size = fxstate_size(ctxt);
4163 	rc = segmented_read_std(ctxt, ctxt->memop.addr.mem, &fx_state, size);
4164 	if (rc != X86EMUL_CONTINUE)
4165 		return rc;
4166 
4167 	emulator_get_fpu();
4168 
4169 	if (size < __fxstate_size(16)) {
4170 		rc = fxregs_fixup(&fx_state, size);
4171 		if (rc != X86EMUL_CONTINUE)
4172 			goto out;
4173 	}
4174 
4175 	if (fx_state.mxcsr >> 16) {
4176 		rc = emulate_gp(ctxt, 0);
4177 		goto out;
4178 	}
4179 
4180 	if (rc == X86EMUL_CONTINUE)
4181 		rc = asm_safe("fxrstor %[fx]", : [fx] "m"(fx_state));
4182 
4183 out:
4184 	emulator_put_fpu();
4185 
4186 	return rc;
4187 }
4188 
4189 static int em_xsetbv(struct x86_emulate_ctxt *ctxt)
4190 {
4191 	u32 eax, ecx, edx;
4192 
4193 	eax = reg_read(ctxt, VCPU_REGS_RAX);
4194 	edx = reg_read(ctxt, VCPU_REGS_RDX);
4195 	ecx = reg_read(ctxt, VCPU_REGS_RCX);
4196 
4197 	if (ctxt->ops->set_xcr(ctxt, ecx, ((u64)edx << 32) | eax))
4198 		return emulate_gp(ctxt, 0);
4199 
4200 	return X86EMUL_CONTINUE;
4201 }
4202 
4203 static bool valid_cr(int nr)
4204 {
4205 	switch (nr) {
4206 	case 0:
4207 	case 2 ... 4:
4208 	case 8:
4209 		return true;
4210 	default:
4211 		return false;
4212 	}
4213 }
4214 
4215 static int check_cr_read(struct x86_emulate_ctxt *ctxt)
4216 {
4217 	if (!valid_cr(ctxt->modrm_reg))
4218 		return emulate_ud(ctxt);
4219 
4220 	return X86EMUL_CONTINUE;
4221 }
4222 
4223 static int check_cr_write(struct x86_emulate_ctxt *ctxt)
4224 {
4225 	u64 new_val = ctxt->src.val64;
4226 	int cr = ctxt->modrm_reg;
4227 	u64 efer = 0;
4228 
4229 	static u64 cr_reserved_bits[] = {
4230 		0xffffffff00000000ULL,
4231 		0, 0, 0, /* CR3 checked later */
4232 		CR4_RESERVED_BITS,
4233 		0, 0, 0,
4234 		CR8_RESERVED_BITS,
4235 	};
4236 
4237 	if (!valid_cr(cr))
4238 		return emulate_ud(ctxt);
4239 
4240 	if (new_val & cr_reserved_bits[cr])
4241 		return emulate_gp(ctxt, 0);
4242 
4243 	switch (cr) {
4244 	case 0: {
4245 		u64 cr4;
4246 		if (((new_val & X86_CR0_PG) && !(new_val & X86_CR0_PE)) ||
4247 		    ((new_val & X86_CR0_NW) && !(new_val & X86_CR0_CD)))
4248 			return emulate_gp(ctxt, 0);
4249 
4250 		cr4 = ctxt->ops->get_cr(ctxt, 4);
4251 		ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
4252 
4253 		if ((new_val & X86_CR0_PG) && (efer & EFER_LME) &&
4254 		    !(cr4 & X86_CR4_PAE))
4255 			return emulate_gp(ctxt, 0);
4256 
4257 		break;
4258 		}
4259 	case 3: {
4260 		u64 rsvd = 0;
4261 
4262 		ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
4263 		if (efer & EFER_LMA) {
4264 			u64 maxphyaddr;
4265 			u32 eax, ebx, ecx, edx;
4266 
4267 			eax = 0x80000008;
4268 			ecx = 0;
4269 			if (ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx,
4270 						 &edx, true))
4271 				maxphyaddr = eax & 0xff;
4272 			else
4273 				maxphyaddr = 36;
4274 			rsvd = rsvd_bits(maxphyaddr, 63);
4275 			if (ctxt->ops->get_cr(ctxt, 4) & X86_CR4_PCIDE)
4276 				rsvd &= ~X86_CR3_PCID_NOFLUSH;
4277 		}
4278 
4279 		if (new_val & rsvd)
4280 			return emulate_gp(ctxt, 0);
4281 
4282 		break;
4283 		}
4284 	case 4: {
4285 		ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
4286 
4287 		if ((efer & EFER_LMA) && !(new_val & X86_CR4_PAE))
4288 			return emulate_gp(ctxt, 0);
4289 
4290 		break;
4291 		}
4292 	}
4293 
4294 	return X86EMUL_CONTINUE;
4295 }
4296 
4297 static int check_dr7_gd(struct x86_emulate_ctxt *ctxt)
4298 {
4299 	unsigned long dr7;
4300 
4301 	ctxt->ops->get_dr(ctxt, 7, &dr7);
4302 
4303 	/* Check if DR7.Global_Enable is set */
4304 	return dr7 & (1 << 13);
4305 }
4306 
4307 static int check_dr_read(struct x86_emulate_ctxt *ctxt)
4308 {
4309 	int dr = ctxt->modrm_reg;
4310 	u64 cr4;
4311 
4312 	if (dr > 7)
4313 		return emulate_ud(ctxt);
4314 
4315 	cr4 = ctxt->ops->get_cr(ctxt, 4);
4316 	if ((cr4 & X86_CR4_DE) && (dr == 4 || dr == 5))
4317 		return emulate_ud(ctxt);
4318 
4319 	if (check_dr7_gd(ctxt)) {
4320 		ulong dr6;
4321 
4322 		ctxt->ops->get_dr(ctxt, 6, &dr6);
4323 		dr6 &= ~DR_TRAP_BITS;
4324 		dr6 |= DR6_BD | DR6_RTM;
4325 		ctxt->ops->set_dr(ctxt, 6, dr6);
4326 		return emulate_db(ctxt);
4327 	}
4328 
4329 	return X86EMUL_CONTINUE;
4330 }
4331 
4332 static int check_dr_write(struct x86_emulate_ctxt *ctxt)
4333 {
4334 	u64 new_val = ctxt->src.val64;
4335 	int dr = ctxt->modrm_reg;
4336 
4337 	if ((dr == 6 || dr == 7) && (new_val & 0xffffffff00000000ULL))
4338 		return emulate_gp(ctxt, 0);
4339 
4340 	return check_dr_read(ctxt);
4341 }
4342 
4343 static int check_svme(struct x86_emulate_ctxt *ctxt)
4344 {
4345 	u64 efer = 0;
4346 
4347 	ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
4348 
4349 	if (!(efer & EFER_SVME))
4350 		return emulate_ud(ctxt);
4351 
4352 	return X86EMUL_CONTINUE;
4353 }
4354 
4355 static int check_svme_pa(struct x86_emulate_ctxt *ctxt)
4356 {
4357 	u64 rax = reg_read(ctxt, VCPU_REGS_RAX);
4358 
4359 	/* Valid physical address? */
4360 	if (rax & 0xffff000000000000ULL)
4361 		return emulate_gp(ctxt, 0);
4362 
4363 	return check_svme(ctxt);
4364 }
4365 
4366 static int check_rdtsc(struct x86_emulate_ctxt *ctxt)
4367 {
4368 	u64 cr4 = ctxt->ops->get_cr(ctxt, 4);
4369 
4370 	if (cr4 & X86_CR4_TSD && ctxt->ops->cpl(ctxt))
4371 		return emulate_ud(ctxt);
4372 
4373 	return X86EMUL_CONTINUE;
4374 }
4375 
4376 static int check_rdpmc(struct x86_emulate_ctxt *ctxt)
4377 {
4378 	u64 cr4 = ctxt->ops->get_cr(ctxt, 4);
4379 	u64 rcx = reg_read(ctxt, VCPU_REGS_RCX);
4380 
4381 	/*
4382 	 * VMware allows access to these Pseduo-PMCs even when read via RDPMC
4383 	 * in Ring3 when CR4.PCE=0.
4384 	 */
4385 	if (enable_vmware_backdoor && is_vmware_backdoor_pmc(rcx))
4386 		return X86EMUL_CONTINUE;
4387 
4388 	if ((!(cr4 & X86_CR4_PCE) && ctxt->ops->cpl(ctxt)) ||
4389 	    ctxt->ops->check_pmc(ctxt, rcx))
4390 		return emulate_gp(ctxt, 0);
4391 
4392 	return X86EMUL_CONTINUE;
4393 }
4394 
4395 static int check_perm_in(struct x86_emulate_ctxt *ctxt)
4396 {
4397 	ctxt->dst.bytes = min(ctxt->dst.bytes, 4u);
4398 	if (!emulator_io_permited(ctxt, ctxt->src.val, ctxt->dst.bytes))
4399 		return emulate_gp(ctxt, 0);
4400 
4401 	return X86EMUL_CONTINUE;
4402 }
4403 
4404 static int check_perm_out(struct x86_emulate_ctxt *ctxt)
4405 {
4406 	ctxt->src.bytes = min(ctxt->src.bytes, 4u);
4407 	if (!emulator_io_permited(ctxt, ctxt->dst.val, ctxt->src.bytes))
4408 		return emulate_gp(ctxt, 0);
4409 
4410 	return X86EMUL_CONTINUE;
4411 }
4412 
4413 #define D(_y) { .flags = (_y) }
4414 #define DI(_y, _i) { .flags = (_y)|Intercept, .intercept = x86_intercept_##_i }
4415 #define DIP(_y, _i, _p) { .flags = (_y)|Intercept|CheckPerm, \
4416 		      .intercept = x86_intercept_##_i, .check_perm = (_p) }
4417 #define N    D(NotImpl)
4418 #define EXT(_f, _e) { .flags = ((_f) | RMExt), .u.group = (_e) }
4419 #define G(_f, _g) { .flags = ((_f) | Group | ModRM), .u.group = (_g) }
4420 #define GD(_f, _g) { .flags = ((_f) | GroupDual | ModRM), .u.gdual = (_g) }
4421 #define ID(_f, _i) { .flags = ((_f) | InstrDual | ModRM), .u.idual = (_i) }
4422 #define MD(_f, _m) { .flags = ((_f) | ModeDual), .u.mdual = (_m) }
4423 #define E(_f, _e) { .flags = ((_f) | Escape | ModRM), .u.esc = (_e) }
4424 #define I(_f, _e) { .flags = (_f), .u.execute = (_e) }
4425 #define F(_f, _e) { .flags = (_f) | Fastop, .u.fastop = (_e) }
4426 #define II(_f, _e, _i) \
4427 	{ .flags = (_f)|Intercept, .u.execute = (_e), .intercept = x86_intercept_##_i }
4428 #define IIP(_f, _e, _i, _p) \
4429 	{ .flags = (_f)|Intercept|CheckPerm, .u.execute = (_e), \
4430 	  .intercept = x86_intercept_##_i, .check_perm = (_p) }
4431 #define GP(_f, _g) { .flags = ((_f) | Prefix), .u.gprefix = (_g) }
4432 
4433 #define D2bv(_f)      D((_f) | ByteOp), D(_f)
4434 #define D2bvIP(_f, _i, _p) DIP((_f) | ByteOp, _i, _p), DIP(_f, _i, _p)
4435 #define I2bv(_f, _e)  I((_f) | ByteOp, _e), I(_f, _e)
4436 #define F2bv(_f, _e)  F((_f) | ByteOp, _e), F(_f, _e)
4437 #define I2bvIP(_f, _e, _i, _p) \
4438 	IIP((_f) | ByteOp, _e, _i, _p), IIP(_f, _e, _i, _p)
4439 
4440 #define F6ALU(_f, _e) F2bv((_f) | DstMem | SrcReg | ModRM, _e),		\
4441 		F2bv(((_f) | DstReg | SrcMem | ModRM) & ~Lock, _e),	\
4442 		F2bv(((_f) & ~Lock) | DstAcc | SrcImm, _e)
4443 
4444 static const struct opcode group7_rm0[] = {
4445 	N,
4446 	I(SrcNone | Priv | EmulateOnUD,	em_hypercall),
4447 	N, N, N, N, N, N,
4448 };
4449 
4450 static const struct opcode group7_rm1[] = {
4451 	DI(SrcNone | Priv, monitor),
4452 	DI(SrcNone | Priv, mwait),
4453 	N, N, N, N, N, N,
4454 };
4455 
4456 static const struct opcode group7_rm2[] = {
4457 	N,
4458 	II(ImplicitOps | Priv,			em_xsetbv,	xsetbv),
4459 	N, N, N, N, N, N,
4460 };
4461 
4462 static const struct opcode group7_rm3[] = {
4463 	DIP(SrcNone | Prot | Priv,		vmrun,		check_svme_pa),
4464 	II(SrcNone  | Prot | EmulateOnUD,	em_hypercall,	vmmcall),
4465 	DIP(SrcNone | Prot | Priv,		vmload,		check_svme_pa),
4466 	DIP(SrcNone | Prot | Priv,		vmsave,		check_svme_pa),
4467 	DIP(SrcNone | Prot | Priv,		stgi,		check_svme),
4468 	DIP(SrcNone | Prot | Priv,		clgi,		check_svme),
4469 	DIP(SrcNone | Prot | Priv,		skinit,		check_svme),
4470 	DIP(SrcNone | Prot | Priv,		invlpga,	check_svme),
4471 };
4472 
4473 static const struct opcode group7_rm7[] = {
4474 	N,
4475 	DIP(SrcNone, rdtscp, check_rdtsc),
4476 	N, N, N, N, N, N,
4477 };
4478 
4479 static const struct opcode group1[] = {
4480 	F(Lock, em_add),
4481 	F(Lock | PageTable, em_or),
4482 	F(Lock, em_adc),
4483 	F(Lock, em_sbb),
4484 	F(Lock | PageTable, em_and),
4485 	F(Lock, em_sub),
4486 	F(Lock, em_xor),
4487 	F(NoWrite, em_cmp),
4488 };
4489 
4490 static const struct opcode group1A[] = {
4491 	I(DstMem | SrcNone | Mov | Stack | IncSP | TwoMemOp, em_pop), N, N, N, N, N, N, N,
4492 };
4493 
4494 static const struct opcode group2[] = {
4495 	F(DstMem | ModRM, em_rol),
4496 	F(DstMem | ModRM, em_ror),
4497 	F(DstMem | ModRM, em_rcl),
4498 	F(DstMem | ModRM, em_rcr),
4499 	F(DstMem | ModRM, em_shl),
4500 	F(DstMem | ModRM, em_shr),
4501 	F(DstMem | ModRM, em_shl),
4502 	F(DstMem | ModRM, em_sar),
4503 };
4504 
4505 static const struct opcode group3[] = {
4506 	F(DstMem | SrcImm | NoWrite, em_test),
4507 	F(DstMem | SrcImm | NoWrite, em_test),
4508 	F(DstMem | SrcNone | Lock, em_not),
4509 	F(DstMem | SrcNone | Lock, em_neg),
4510 	F(DstXacc | Src2Mem, em_mul_ex),
4511 	F(DstXacc | Src2Mem, em_imul_ex),
4512 	F(DstXacc | Src2Mem, em_div_ex),
4513 	F(DstXacc | Src2Mem, em_idiv_ex),
4514 };
4515 
4516 static const struct opcode group4[] = {
4517 	F(ByteOp | DstMem | SrcNone | Lock, em_inc),
4518 	F(ByteOp | DstMem | SrcNone | Lock, em_dec),
4519 	N, N, N, N, N, N,
4520 };
4521 
4522 static const struct opcode group5[] = {
4523 	F(DstMem | SrcNone | Lock,		em_inc),
4524 	F(DstMem | SrcNone | Lock,		em_dec),
4525 	I(SrcMem | NearBranch,			em_call_near_abs),
4526 	I(SrcMemFAddr | ImplicitOps,		em_call_far),
4527 	I(SrcMem | NearBranch,			em_jmp_abs),
4528 	I(SrcMemFAddr | ImplicitOps,		em_jmp_far),
4529 	I(SrcMem | Stack | TwoMemOp,		em_push), D(Undefined),
4530 };
4531 
4532 static const struct opcode group6[] = {
4533 	II(Prot | DstMem,	   em_sldt, sldt),
4534 	II(Prot | DstMem,	   em_str, str),
4535 	II(Prot | Priv | SrcMem16, em_lldt, lldt),
4536 	II(Prot | Priv | SrcMem16, em_ltr, ltr),
4537 	N, N, N, N,
4538 };
4539 
4540 static const struct group_dual group7 = { {
4541 	II(Mov | DstMem,			em_sgdt, sgdt),
4542 	II(Mov | DstMem,			em_sidt, sidt),
4543 	II(SrcMem | Priv,			em_lgdt, lgdt),
4544 	II(SrcMem | Priv,			em_lidt, lidt),
4545 	II(SrcNone | DstMem | Mov,		em_smsw, smsw), N,
4546 	II(SrcMem16 | Mov | Priv,		em_lmsw, lmsw),
4547 	II(SrcMem | ByteOp | Priv | NoAccess,	em_invlpg, invlpg),
4548 }, {
4549 	EXT(0, group7_rm0),
4550 	EXT(0, group7_rm1),
4551 	EXT(0, group7_rm2),
4552 	EXT(0, group7_rm3),
4553 	II(SrcNone | DstMem | Mov,		em_smsw, smsw), N,
4554 	II(SrcMem16 | Mov | Priv,		em_lmsw, lmsw),
4555 	EXT(0, group7_rm7),
4556 } };
4557 
4558 static const struct opcode group8[] = {
4559 	N, N, N, N,
4560 	F(DstMem | SrcImmByte | NoWrite,		em_bt),
4561 	F(DstMem | SrcImmByte | Lock | PageTable,	em_bts),
4562 	F(DstMem | SrcImmByte | Lock,			em_btr),
4563 	F(DstMem | SrcImmByte | Lock | PageTable,	em_btc),
4564 };
4565 
4566 /*
4567  * The "memory" destination is actually always a register, since we come
4568  * from the register case of group9.
4569  */
4570 static const struct gprefix pfx_0f_c7_7 = {
4571 	N, N, N, II(DstMem | ModRM | Op3264 | EmulateOnUD, em_rdpid, rdtscp),
4572 };
4573 
4574 
4575 static const struct group_dual group9 = { {
4576 	N, I(DstMem64 | Lock | PageTable, em_cmpxchg8b), N, N, N, N, N, N,
4577 }, {
4578 	N, N, N, N, N, N, N,
4579 	GP(0, &pfx_0f_c7_7),
4580 } };
4581 
4582 static const struct opcode group11[] = {
4583 	I(DstMem | SrcImm | Mov | PageTable, em_mov),
4584 	X7(D(Undefined)),
4585 };
4586 
4587 static const struct gprefix pfx_0f_ae_7 = {
4588 	I(SrcMem | ByteOp, em_clflush), N, N, N,
4589 };
4590 
4591 static const struct group_dual group15 = { {
4592 	I(ModRM | Aligned16, em_fxsave),
4593 	I(ModRM | Aligned16, em_fxrstor),
4594 	N, N, N, N, N, GP(0, &pfx_0f_ae_7),
4595 }, {
4596 	N, N, N, N, N, N, N, N,
4597 } };
4598 
4599 static const struct gprefix pfx_0f_6f_0f_7f = {
4600 	I(Mmx, em_mov), I(Sse | Aligned, em_mov), N, I(Sse | Unaligned, em_mov),
4601 };
4602 
4603 static const struct instr_dual instr_dual_0f_2b = {
4604 	I(0, em_mov), N
4605 };
4606 
4607 static const struct gprefix pfx_0f_2b = {
4608 	ID(0, &instr_dual_0f_2b), ID(0, &instr_dual_0f_2b), N, N,
4609 };
4610 
4611 static const struct gprefix pfx_0f_10_0f_11 = {
4612 	I(Unaligned, em_mov), I(Unaligned, em_mov), N, N,
4613 };
4614 
4615 static const struct gprefix pfx_0f_28_0f_29 = {
4616 	I(Aligned, em_mov), I(Aligned, em_mov), N, N,
4617 };
4618 
4619 static const struct gprefix pfx_0f_e7 = {
4620 	N, I(Sse, em_mov), N, N,
4621 };
4622 
4623 static const struct escape escape_d9 = { {
4624 	N, N, N, N, N, N, N, I(DstMem16 | Mov, em_fnstcw),
4625 }, {
4626 	/* 0xC0 - 0xC7 */
4627 	N, N, N, N, N, N, N, N,
4628 	/* 0xC8 - 0xCF */
4629 	N, N, N, N, N, N, N, N,
4630 	/* 0xD0 - 0xC7 */
4631 	N, N, N, N, N, N, N, N,
4632 	/* 0xD8 - 0xDF */
4633 	N, N, N, N, N, N, N, N,
4634 	/* 0xE0 - 0xE7 */
4635 	N, N, N, N, N, N, N, N,
4636 	/* 0xE8 - 0xEF */
4637 	N, N, N, N, N, N, N, N,
4638 	/* 0xF0 - 0xF7 */
4639 	N, N, N, N, N, N, N, N,
4640 	/* 0xF8 - 0xFF */
4641 	N, N, N, N, N, N, N, N,
4642 } };
4643 
4644 static const struct escape escape_db = { {
4645 	N, N, N, N, N, N, N, N,
4646 }, {
4647 	/* 0xC0 - 0xC7 */
4648 	N, N, N, N, N, N, N, N,
4649 	/* 0xC8 - 0xCF */
4650 	N, N, N, N, N, N, N, N,
4651 	/* 0xD0 - 0xC7 */
4652 	N, N, N, N, N, N, N, N,
4653 	/* 0xD8 - 0xDF */
4654 	N, N, N, N, N, N, N, N,
4655 	/* 0xE0 - 0xE7 */
4656 	N, N, N, I(ImplicitOps, em_fninit), N, N, N, N,
4657 	/* 0xE8 - 0xEF */
4658 	N, N, N, N, N, N, N, N,
4659 	/* 0xF0 - 0xF7 */
4660 	N, N, N, N, N, N, N, N,
4661 	/* 0xF8 - 0xFF */
4662 	N, N, N, N, N, N, N, N,
4663 } };
4664 
4665 static const struct escape escape_dd = { {
4666 	N, N, N, N, N, N, N, I(DstMem16 | Mov, em_fnstsw),
4667 }, {
4668 	/* 0xC0 - 0xC7 */
4669 	N, N, N, N, N, N, N, N,
4670 	/* 0xC8 - 0xCF */
4671 	N, N, N, N, N, N, N, N,
4672 	/* 0xD0 - 0xC7 */
4673 	N, N, N, N, N, N, N, N,
4674 	/* 0xD8 - 0xDF */
4675 	N, N, N, N, N, N, N, N,
4676 	/* 0xE0 - 0xE7 */
4677 	N, N, N, N, N, N, N, N,
4678 	/* 0xE8 - 0xEF */
4679 	N, N, N, N, N, N, N, N,
4680 	/* 0xF0 - 0xF7 */
4681 	N, N, N, N, N, N, N, N,
4682 	/* 0xF8 - 0xFF */
4683 	N, N, N, N, N, N, N, N,
4684 } };
4685 
4686 static const struct instr_dual instr_dual_0f_c3 = {
4687 	I(DstMem | SrcReg | ModRM | No16 | Mov, em_mov), N
4688 };
4689 
4690 static const struct mode_dual mode_dual_63 = {
4691 	N, I(DstReg | SrcMem32 | ModRM | Mov, em_movsxd)
4692 };
4693 
4694 static const struct opcode opcode_table[256] = {
4695 	/* 0x00 - 0x07 */
4696 	F6ALU(Lock, em_add),
4697 	I(ImplicitOps | Stack | No64 | Src2ES, em_push_sreg),
4698 	I(ImplicitOps | Stack | No64 | Src2ES, em_pop_sreg),
4699 	/* 0x08 - 0x0F */
4700 	F6ALU(Lock | PageTable, em_or),
4701 	I(ImplicitOps | Stack | No64 | Src2CS, em_push_sreg),
4702 	N,
4703 	/* 0x10 - 0x17 */
4704 	F6ALU(Lock, em_adc),
4705 	I(ImplicitOps | Stack | No64 | Src2SS, em_push_sreg),
4706 	I(ImplicitOps | Stack | No64 | Src2SS, em_pop_sreg),
4707 	/* 0x18 - 0x1F */
4708 	F6ALU(Lock, em_sbb),
4709 	I(ImplicitOps | Stack | No64 | Src2DS, em_push_sreg),
4710 	I(ImplicitOps | Stack | No64 | Src2DS, em_pop_sreg),
4711 	/* 0x20 - 0x27 */
4712 	F6ALU(Lock | PageTable, em_and), N, N,
4713 	/* 0x28 - 0x2F */
4714 	F6ALU(Lock, em_sub), N, I(ByteOp | DstAcc | No64, em_das),
4715 	/* 0x30 - 0x37 */
4716 	F6ALU(Lock, em_xor), N, N,
4717 	/* 0x38 - 0x3F */
4718 	F6ALU(NoWrite, em_cmp), N, N,
4719 	/* 0x40 - 0x4F */
4720 	X8(F(DstReg, em_inc)), X8(F(DstReg, em_dec)),
4721 	/* 0x50 - 0x57 */
4722 	X8(I(SrcReg | Stack, em_push)),
4723 	/* 0x58 - 0x5F */
4724 	X8(I(DstReg | Stack, em_pop)),
4725 	/* 0x60 - 0x67 */
4726 	I(ImplicitOps | Stack | No64, em_pusha),
4727 	I(ImplicitOps | Stack | No64, em_popa),
4728 	N, MD(ModRM, &mode_dual_63),
4729 	N, N, N, N,
4730 	/* 0x68 - 0x6F */
4731 	I(SrcImm | Mov | Stack, em_push),
4732 	I(DstReg | SrcMem | ModRM | Src2Imm, em_imul_3op),
4733 	I(SrcImmByte | Mov | Stack, em_push),
4734 	I(DstReg | SrcMem | ModRM | Src2ImmByte, em_imul_3op),
4735 	I2bvIP(DstDI | SrcDX | Mov | String | Unaligned, em_in, ins, check_perm_in), /* insb, insw/insd */
4736 	I2bvIP(SrcSI | DstDX | String, em_out, outs, check_perm_out), /* outsb, outsw/outsd */
4737 	/* 0x70 - 0x7F */
4738 	X16(D(SrcImmByte | NearBranch)),
4739 	/* 0x80 - 0x87 */
4740 	G(ByteOp | DstMem | SrcImm, group1),
4741 	G(DstMem | SrcImm, group1),
4742 	G(ByteOp | DstMem | SrcImm | No64, group1),
4743 	G(DstMem | SrcImmByte, group1),
4744 	F2bv(DstMem | SrcReg | ModRM | NoWrite, em_test),
4745 	I2bv(DstMem | SrcReg | ModRM | Lock | PageTable, em_xchg),
4746 	/* 0x88 - 0x8F */
4747 	I2bv(DstMem | SrcReg | ModRM | Mov | PageTable, em_mov),
4748 	I2bv(DstReg | SrcMem | ModRM | Mov, em_mov),
4749 	I(DstMem | SrcNone | ModRM | Mov | PageTable, em_mov_rm_sreg),
4750 	D(ModRM | SrcMem | NoAccess | DstReg),
4751 	I(ImplicitOps | SrcMem16 | ModRM, em_mov_sreg_rm),
4752 	G(0, group1A),
4753 	/* 0x90 - 0x97 */
4754 	DI(SrcAcc | DstReg, pause), X7(D(SrcAcc | DstReg)),
4755 	/* 0x98 - 0x9F */
4756 	D(DstAcc | SrcNone), I(ImplicitOps | SrcAcc, em_cwd),
4757 	I(SrcImmFAddr | No64, em_call_far), N,
4758 	II(ImplicitOps | Stack, em_pushf, pushf),
4759 	II(ImplicitOps | Stack, em_popf, popf),
4760 	I(ImplicitOps, em_sahf), I(ImplicitOps, em_lahf),
4761 	/* 0xA0 - 0xA7 */
4762 	I2bv(DstAcc | SrcMem | Mov | MemAbs, em_mov),
4763 	I2bv(DstMem | SrcAcc | Mov | MemAbs | PageTable, em_mov),
4764 	I2bv(SrcSI | DstDI | Mov | String | TwoMemOp, em_mov),
4765 	F2bv(SrcSI | DstDI | String | NoWrite | TwoMemOp, em_cmp_r),
4766 	/* 0xA8 - 0xAF */
4767 	F2bv(DstAcc | SrcImm | NoWrite, em_test),
4768 	I2bv(SrcAcc | DstDI | Mov | String, em_mov),
4769 	I2bv(SrcSI | DstAcc | Mov | String, em_mov),
4770 	F2bv(SrcAcc | DstDI | String | NoWrite, em_cmp_r),
4771 	/* 0xB0 - 0xB7 */
4772 	X8(I(ByteOp | DstReg | SrcImm | Mov, em_mov)),
4773 	/* 0xB8 - 0xBF */
4774 	X8(I(DstReg | SrcImm64 | Mov, em_mov)),
4775 	/* 0xC0 - 0xC7 */
4776 	G(ByteOp | Src2ImmByte, group2), G(Src2ImmByte, group2),
4777 	I(ImplicitOps | NearBranch | SrcImmU16, em_ret_near_imm),
4778 	I(ImplicitOps | NearBranch, em_ret),
4779 	I(DstReg | SrcMemFAddr | ModRM | No64 | Src2ES, em_lseg),
4780 	I(DstReg | SrcMemFAddr | ModRM | No64 | Src2DS, em_lseg),
4781 	G(ByteOp, group11), G(0, group11),
4782 	/* 0xC8 - 0xCF */
4783 	I(Stack | SrcImmU16 | Src2ImmByte, em_enter), I(Stack, em_leave),
4784 	I(ImplicitOps | SrcImmU16, em_ret_far_imm),
4785 	I(ImplicitOps, em_ret_far),
4786 	D(ImplicitOps), DI(SrcImmByte, intn),
4787 	D(ImplicitOps | No64), II(ImplicitOps, em_iret, iret),
4788 	/* 0xD0 - 0xD7 */
4789 	G(Src2One | ByteOp, group2), G(Src2One, group2),
4790 	G(Src2CL | ByteOp, group2), G(Src2CL, group2),
4791 	I(DstAcc | SrcImmUByte | No64, em_aam),
4792 	I(DstAcc | SrcImmUByte | No64, em_aad),
4793 	F(DstAcc | ByteOp | No64, em_salc),
4794 	I(DstAcc | SrcXLat | ByteOp, em_mov),
4795 	/* 0xD8 - 0xDF */
4796 	N, E(0, &escape_d9), N, E(0, &escape_db), N, E(0, &escape_dd), N, N,
4797 	/* 0xE0 - 0xE7 */
4798 	X3(I(SrcImmByte | NearBranch, em_loop)),
4799 	I(SrcImmByte | NearBranch, em_jcxz),
4800 	I2bvIP(SrcImmUByte | DstAcc, em_in,  in,  check_perm_in),
4801 	I2bvIP(SrcAcc | DstImmUByte, em_out, out, check_perm_out),
4802 	/* 0xE8 - 0xEF */
4803 	I(SrcImm | NearBranch, em_call), D(SrcImm | ImplicitOps | NearBranch),
4804 	I(SrcImmFAddr | No64, em_jmp_far),
4805 	D(SrcImmByte | ImplicitOps | NearBranch),
4806 	I2bvIP(SrcDX | DstAcc, em_in,  in,  check_perm_in),
4807 	I2bvIP(SrcAcc | DstDX, em_out, out, check_perm_out),
4808 	/* 0xF0 - 0xF7 */
4809 	N, DI(ImplicitOps, icebp), N, N,
4810 	DI(ImplicitOps | Priv, hlt), D(ImplicitOps),
4811 	G(ByteOp, group3), G(0, group3),
4812 	/* 0xF8 - 0xFF */
4813 	D(ImplicitOps), D(ImplicitOps),
4814 	I(ImplicitOps, em_cli), I(ImplicitOps, em_sti),
4815 	D(ImplicitOps), D(ImplicitOps), G(0, group4), G(0, group5),
4816 };
4817 
4818 static const struct opcode twobyte_table[256] = {
4819 	/* 0x00 - 0x0F */
4820 	G(0, group6), GD(0, &group7), N, N,
4821 	N, I(ImplicitOps | EmulateOnUD, em_syscall),
4822 	II(ImplicitOps | Priv, em_clts, clts), N,
4823 	DI(ImplicitOps | Priv, invd), DI(ImplicitOps | Priv, wbinvd), N, N,
4824 	N, D(ImplicitOps | ModRM | SrcMem | NoAccess), N, N,
4825 	/* 0x10 - 0x1F */
4826 	GP(ModRM | DstReg | SrcMem | Mov | Sse, &pfx_0f_10_0f_11),
4827 	GP(ModRM | DstMem | SrcReg | Mov | Sse, &pfx_0f_10_0f_11),
4828 	N, N, N, N, N, N,
4829 	D(ImplicitOps | ModRM | SrcMem | NoAccess), /* 4 * prefetch + 4 * reserved NOP */
4830 	D(ImplicitOps | ModRM | SrcMem | NoAccess), N, N,
4831 	D(ImplicitOps | ModRM | SrcMem | NoAccess), /* 8 * reserved NOP */
4832 	D(ImplicitOps | ModRM | SrcMem | NoAccess), /* 8 * reserved NOP */
4833 	D(ImplicitOps | ModRM | SrcMem | NoAccess), /* 8 * reserved NOP */
4834 	D(ImplicitOps | ModRM | SrcMem | NoAccess), /* NOP + 7 * reserved NOP */
4835 	/* 0x20 - 0x2F */
4836 	DIP(ModRM | DstMem | Priv | Op3264 | NoMod, cr_read, check_cr_read),
4837 	DIP(ModRM | DstMem | Priv | Op3264 | NoMod, dr_read, check_dr_read),
4838 	IIP(ModRM | SrcMem | Priv | Op3264 | NoMod, em_cr_write, cr_write,
4839 						check_cr_write),
4840 	IIP(ModRM | SrcMem | Priv | Op3264 | NoMod, em_dr_write, dr_write,
4841 						check_dr_write),
4842 	N, N, N, N,
4843 	GP(ModRM | DstReg | SrcMem | Mov | Sse, &pfx_0f_28_0f_29),
4844 	GP(ModRM | DstMem | SrcReg | Mov | Sse, &pfx_0f_28_0f_29),
4845 	N, GP(ModRM | DstMem | SrcReg | Mov | Sse, &pfx_0f_2b),
4846 	N, N, N, N,
4847 	/* 0x30 - 0x3F */
4848 	II(ImplicitOps | Priv, em_wrmsr, wrmsr),
4849 	IIP(ImplicitOps, em_rdtsc, rdtsc, check_rdtsc),
4850 	II(ImplicitOps | Priv, em_rdmsr, rdmsr),
4851 	IIP(ImplicitOps, em_rdpmc, rdpmc, check_rdpmc),
4852 	I(ImplicitOps | EmulateOnUD, em_sysenter),
4853 	I(ImplicitOps | Priv | EmulateOnUD, em_sysexit),
4854 	N, N,
4855 	N, N, N, N, N, N, N, N,
4856 	/* 0x40 - 0x4F */
4857 	X16(D(DstReg | SrcMem | ModRM)),
4858 	/* 0x50 - 0x5F */
4859 	N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
4860 	/* 0x60 - 0x6F */
4861 	N, N, N, N,
4862 	N, N, N, N,
4863 	N, N, N, N,
4864 	N, N, N, GP(SrcMem | DstReg | ModRM | Mov, &pfx_0f_6f_0f_7f),
4865 	/* 0x70 - 0x7F */
4866 	N, N, N, N,
4867 	N, N, N, N,
4868 	N, N, N, N,
4869 	N, N, N, GP(SrcReg | DstMem | ModRM | Mov, &pfx_0f_6f_0f_7f),
4870 	/* 0x80 - 0x8F */
4871 	X16(D(SrcImm | NearBranch)),
4872 	/* 0x90 - 0x9F */
4873 	X16(D(ByteOp | DstMem | SrcNone | ModRM| Mov)),
4874 	/* 0xA0 - 0xA7 */
4875 	I(Stack | Src2FS, em_push_sreg), I(Stack | Src2FS, em_pop_sreg),
4876 	II(ImplicitOps, em_cpuid, cpuid),
4877 	F(DstMem | SrcReg | ModRM | BitOp | NoWrite, em_bt),
4878 	F(DstMem | SrcReg | Src2ImmByte | ModRM, em_shld),
4879 	F(DstMem | SrcReg | Src2CL | ModRM, em_shld), N, N,
4880 	/* 0xA8 - 0xAF */
4881 	I(Stack | Src2GS, em_push_sreg), I(Stack | Src2GS, em_pop_sreg),
4882 	II(EmulateOnUD | ImplicitOps, em_rsm, rsm),
4883 	F(DstMem | SrcReg | ModRM | BitOp | Lock | PageTable, em_bts),
4884 	F(DstMem | SrcReg | Src2ImmByte | ModRM, em_shrd),
4885 	F(DstMem | SrcReg | Src2CL | ModRM, em_shrd),
4886 	GD(0, &group15), F(DstReg | SrcMem | ModRM, em_imul),
4887 	/* 0xB0 - 0xB7 */
4888 	I2bv(DstMem | SrcReg | ModRM | Lock | PageTable | SrcWrite, em_cmpxchg),
4889 	I(DstReg | SrcMemFAddr | ModRM | Src2SS, em_lseg),
4890 	F(DstMem | SrcReg | ModRM | BitOp | Lock, em_btr),
4891 	I(DstReg | SrcMemFAddr | ModRM | Src2FS, em_lseg),
4892 	I(DstReg | SrcMemFAddr | ModRM | Src2GS, em_lseg),
4893 	D(DstReg | SrcMem8 | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov),
4894 	/* 0xB8 - 0xBF */
4895 	N, N,
4896 	G(BitOp, group8),
4897 	F(DstMem | SrcReg | ModRM | BitOp | Lock | PageTable, em_btc),
4898 	I(DstReg | SrcMem | ModRM, em_bsf_c),
4899 	I(DstReg | SrcMem | ModRM, em_bsr_c),
4900 	D(DstReg | SrcMem8 | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov),
4901 	/* 0xC0 - 0xC7 */
4902 	F2bv(DstMem | SrcReg | ModRM | SrcWrite | Lock, em_xadd),
4903 	N, ID(0, &instr_dual_0f_c3),
4904 	N, N, N, GD(0, &group9),
4905 	/* 0xC8 - 0xCF */
4906 	X8(I(DstReg, em_bswap)),
4907 	/* 0xD0 - 0xDF */
4908 	N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
4909 	/* 0xE0 - 0xEF */
4910 	N, N, N, N, N, N, N, GP(SrcReg | DstMem | ModRM | Mov, &pfx_0f_e7),
4911 	N, N, N, N, N, N, N, N,
4912 	/* 0xF0 - 0xFF */
4913 	N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N
4914 };
4915 
4916 static const struct instr_dual instr_dual_0f_38_f0 = {
4917 	I(DstReg | SrcMem | Mov, em_movbe), N
4918 };
4919 
4920 static const struct instr_dual instr_dual_0f_38_f1 = {
4921 	I(DstMem | SrcReg | Mov, em_movbe), N
4922 };
4923 
4924 static const struct gprefix three_byte_0f_38_f0 = {
4925 	ID(0, &instr_dual_0f_38_f0), N, N, N
4926 };
4927 
4928 static const struct gprefix three_byte_0f_38_f1 = {
4929 	ID(0, &instr_dual_0f_38_f1), N, N, N
4930 };
4931 
4932 /*
4933  * Insns below are selected by the prefix which indexed by the third opcode
4934  * byte.
4935  */
4936 static const struct opcode opcode_map_0f_38[256] = {
4937 	/* 0x00 - 0x7f */
4938 	X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N),
4939 	/* 0x80 - 0xef */
4940 	X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N),
4941 	/* 0xf0 - 0xf1 */
4942 	GP(EmulateOnUD | ModRM, &three_byte_0f_38_f0),
4943 	GP(EmulateOnUD | ModRM, &three_byte_0f_38_f1),
4944 	/* 0xf2 - 0xff */
4945 	N, N, X4(N), X8(N)
4946 };
4947 
4948 #undef D
4949 #undef N
4950 #undef G
4951 #undef GD
4952 #undef I
4953 #undef GP
4954 #undef EXT
4955 #undef MD
4956 #undef ID
4957 
4958 #undef D2bv
4959 #undef D2bvIP
4960 #undef I2bv
4961 #undef I2bvIP
4962 #undef I6ALU
4963 
4964 static unsigned imm_size(struct x86_emulate_ctxt *ctxt)
4965 {
4966 	unsigned size;
4967 
4968 	size = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4969 	if (size == 8)
4970 		size = 4;
4971 	return size;
4972 }
4973 
4974 static int decode_imm(struct x86_emulate_ctxt *ctxt, struct operand *op,
4975 		      unsigned size, bool sign_extension)
4976 {
4977 	int rc = X86EMUL_CONTINUE;
4978 
4979 	op->type = OP_IMM;
4980 	op->bytes = size;
4981 	op->addr.mem.ea = ctxt->_eip;
4982 	/* NB. Immediates are sign-extended as necessary. */
4983 	switch (op->bytes) {
4984 	case 1:
4985 		op->val = insn_fetch(s8, ctxt);
4986 		break;
4987 	case 2:
4988 		op->val = insn_fetch(s16, ctxt);
4989 		break;
4990 	case 4:
4991 		op->val = insn_fetch(s32, ctxt);
4992 		break;
4993 	case 8:
4994 		op->val = insn_fetch(s64, ctxt);
4995 		break;
4996 	}
4997 	if (!sign_extension) {
4998 		switch (op->bytes) {
4999 		case 1:
5000 			op->val &= 0xff;
5001 			break;
5002 		case 2:
5003 			op->val &= 0xffff;
5004 			break;
5005 		case 4:
5006 			op->val &= 0xffffffff;
5007 			break;
5008 		}
5009 	}
5010 done:
5011 	return rc;
5012 }
5013 
5014 static int decode_operand(struct x86_emulate_ctxt *ctxt, struct operand *op,
5015 			  unsigned d)
5016 {
5017 	int rc = X86EMUL_CONTINUE;
5018 
5019 	switch (d) {
5020 	case OpReg:
5021 		decode_register_operand(ctxt, op);
5022 		break;
5023 	case OpImmUByte:
5024 		rc = decode_imm(ctxt, op, 1, false);
5025 		break;
5026 	case OpMem:
5027 		ctxt->memop.bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
5028 	mem_common:
5029 		*op = ctxt->memop;
5030 		ctxt->memopp = op;
5031 		if (ctxt->d & BitOp)
5032 			fetch_bit_operand(ctxt);
5033 		op->orig_val = op->val;
5034 		break;
5035 	case OpMem64:
5036 		ctxt->memop.bytes = (ctxt->op_bytes == 8) ? 16 : 8;
5037 		goto mem_common;
5038 	case OpAcc:
5039 		op->type = OP_REG;
5040 		op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
5041 		op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
5042 		fetch_register_operand(op);
5043 		op->orig_val = op->val;
5044 		break;
5045 	case OpAccLo:
5046 		op->type = OP_REG;
5047 		op->bytes = (ctxt->d & ByteOp) ? 2 : ctxt->op_bytes;
5048 		op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
5049 		fetch_register_operand(op);
5050 		op->orig_val = op->val;
5051 		break;
5052 	case OpAccHi:
5053 		if (ctxt->d & ByteOp) {
5054 			op->type = OP_NONE;
5055 			break;
5056 		}
5057 		op->type = OP_REG;
5058 		op->bytes = ctxt->op_bytes;
5059 		op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX);
5060 		fetch_register_operand(op);
5061 		op->orig_val = op->val;
5062 		break;
5063 	case OpDI:
5064 		op->type = OP_MEM;
5065 		op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
5066 		op->addr.mem.ea =
5067 			register_address(ctxt, VCPU_REGS_RDI);
5068 		op->addr.mem.seg = VCPU_SREG_ES;
5069 		op->val = 0;
5070 		op->count = 1;
5071 		break;
5072 	case OpDX:
5073 		op->type = OP_REG;
5074 		op->bytes = 2;
5075 		op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX);
5076 		fetch_register_operand(op);
5077 		break;
5078 	case OpCL:
5079 		op->type = OP_IMM;
5080 		op->bytes = 1;
5081 		op->val = reg_read(ctxt, VCPU_REGS_RCX) & 0xff;
5082 		break;
5083 	case OpImmByte:
5084 		rc = decode_imm(ctxt, op, 1, true);
5085 		break;
5086 	case OpOne:
5087 		op->type = OP_IMM;
5088 		op->bytes = 1;
5089 		op->val = 1;
5090 		break;
5091 	case OpImm:
5092 		rc = decode_imm(ctxt, op, imm_size(ctxt), true);
5093 		break;
5094 	case OpImm64:
5095 		rc = decode_imm(ctxt, op, ctxt->op_bytes, true);
5096 		break;
5097 	case OpMem8:
5098 		ctxt->memop.bytes = 1;
5099 		if (ctxt->memop.type == OP_REG) {
5100 			ctxt->memop.addr.reg = decode_register(ctxt,
5101 					ctxt->modrm_rm, true);
5102 			fetch_register_operand(&ctxt->memop);
5103 		}
5104 		goto mem_common;
5105 	case OpMem16:
5106 		ctxt->memop.bytes = 2;
5107 		goto mem_common;
5108 	case OpMem32:
5109 		ctxt->memop.bytes = 4;
5110 		goto mem_common;
5111 	case OpImmU16:
5112 		rc = decode_imm(ctxt, op, 2, false);
5113 		break;
5114 	case OpImmU:
5115 		rc = decode_imm(ctxt, op, imm_size(ctxt), false);
5116 		break;
5117 	case OpSI:
5118 		op->type = OP_MEM;
5119 		op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
5120 		op->addr.mem.ea =
5121 			register_address(ctxt, VCPU_REGS_RSI);
5122 		op->addr.mem.seg = ctxt->seg_override;
5123 		op->val = 0;
5124 		op->count = 1;
5125 		break;
5126 	case OpXLat:
5127 		op->type = OP_MEM;
5128 		op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
5129 		op->addr.mem.ea =
5130 			address_mask(ctxt,
5131 				reg_read(ctxt, VCPU_REGS_RBX) +
5132 				(reg_read(ctxt, VCPU_REGS_RAX) & 0xff));
5133 		op->addr.mem.seg = ctxt->seg_override;
5134 		op->val = 0;
5135 		break;
5136 	case OpImmFAddr:
5137 		op->type = OP_IMM;
5138 		op->addr.mem.ea = ctxt->_eip;
5139 		op->bytes = ctxt->op_bytes + 2;
5140 		insn_fetch_arr(op->valptr, op->bytes, ctxt);
5141 		break;
5142 	case OpMemFAddr:
5143 		ctxt->memop.bytes = ctxt->op_bytes + 2;
5144 		goto mem_common;
5145 	case OpES:
5146 		op->type = OP_IMM;
5147 		op->val = VCPU_SREG_ES;
5148 		break;
5149 	case OpCS:
5150 		op->type = OP_IMM;
5151 		op->val = VCPU_SREG_CS;
5152 		break;
5153 	case OpSS:
5154 		op->type = OP_IMM;
5155 		op->val = VCPU_SREG_SS;
5156 		break;
5157 	case OpDS:
5158 		op->type = OP_IMM;
5159 		op->val = VCPU_SREG_DS;
5160 		break;
5161 	case OpFS:
5162 		op->type = OP_IMM;
5163 		op->val = VCPU_SREG_FS;
5164 		break;
5165 	case OpGS:
5166 		op->type = OP_IMM;
5167 		op->val = VCPU_SREG_GS;
5168 		break;
5169 	case OpImplicit:
5170 		/* Special instructions do their own operand decoding. */
5171 	default:
5172 		op->type = OP_NONE; /* Disable writeback. */
5173 		break;
5174 	}
5175 
5176 done:
5177 	return rc;
5178 }
5179 
5180 int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len)
5181 {
5182 	int rc = X86EMUL_CONTINUE;
5183 	int mode = ctxt->mode;
5184 	int def_op_bytes, def_ad_bytes, goffset, simd_prefix;
5185 	bool op_prefix = false;
5186 	bool has_seg_override = false;
5187 	struct opcode opcode;
5188 	u16 dummy;
5189 	struct desc_struct desc;
5190 
5191 	ctxt->memop.type = OP_NONE;
5192 	ctxt->memopp = NULL;
5193 	ctxt->_eip = ctxt->eip;
5194 	ctxt->fetch.ptr = ctxt->fetch.data;
5195 	ctxt->fetch.end = ctxt->fetch.data + insn_len;
5196 	ctxt->opcode_len = 1;
5197 	ctxt->intercept = x86_intercept_none;
5198 	if (insn_len > 0)
5199 		memcpy(ctxt->fetch.data, insn, insn_len);
5200 	else {
5201 		rc = __do_insn_fetch_bytes(ctxt, 1);
5202 		if (rc != X86EMUL_CONTINUE)
5203 			goto done;
5204 	}
5205 
5206 	switch (mode) {
5207 	case X86EMUL_MODE_REAL:
5208 	case X86EMUL_MODE_VM86:
5209 		def_op_bytes = def_ad_bytes = 2;
5210 		ctxt->ops->get_segment(ctxt, &dummy, &desc, NULL, VCPU_SREG_CS);
5211 		if (desc.d)
5212 			def_op_bytes = def_ad_bytes = 4;
5213 		break;
5214 	case X86EMUL_MODE_PROT16:
5215 		def_op_bytes = def_ad_bytes = 2;
5216 		break;
5217 	case X86EMUL_MODE_PROT32:
5218 		def_op_bytes = def_ad_bytes = 4;
5219 		break;
5220 #ifdef CONFIG_X86_64
5221 	case X86EMUL_MODE_PROT64:
5222 		def_op_bytes = 4;
5223 		def_ad_bytes = 8;
5224 		break;
5225 #endif
5226 	default:
5227 		return EMULATION_FAILED;
5228 	}
5229 
5230 	ctxt->op_bytes = def_op_bytes;
5231 	ctxt->ad_bytes = def_ad_bytes;
5232 
5233 	/* Legacy prefixes. */
5234 	for (;;) {
5235 		switch (ctxt->b = insn_fetch(u8, ctxt)) {
5236 		case 0x66:	/* operand-size override */
5237 			op_prefix = true;
5238 			/* switch between 2/4 bytes */
5239 			ctxt->op_bytes = def_op_bytes ^ 6;
5240 			break;
5241 		case 0x67:	/* address-size override */
5242 			if (mode == X86EMUL_MODE_PROT64)
5243 				/* switch between 4/8 bytes */
5244 				ctxt->ad_bytes = def_ad_bytes ^ 12;
5245 			else
5246 				/* switch between 2/4 bytes */
5247 				ctxt->ad_bytes = def_ad_bytes ^ 6;
5248 			break;
5249 		case 0x26:	/* ES override */
5250 			has_seg_override = true;
5251 			ctxt->seg_override = VCPU_SREG_ES;
5252 			break;
5253 		case 0x2e:	/* CS override */
5254 			has_seg_override = true;
5255 			ctxt->seg_override = VCPU_SREG_CS;
5256 			break;
5257 		case 0x36:	/* SS override */
5258 			has_seg_override = true;
5259 			ctxt->seg_override = VCPU_SREG_SS;
5260 			break;
5261 		case 0x3e:	/* DS override */
5262 			has_seg_override = true;
5263 			ctxt->seg_override = VCPU_SREG_DS;
5264 			break;
5265 		case 0x64:	/* FS override */
5266 			has_seg_override = true;
5267 			ctxt->seg_override = VCPU_SREG_FS;
5268 			break;
5269 		case 0x65:	/* GS override */
5270 			has_seg_override = true;
5271 			ctxt->seg_override = VCPU_SREG_GS;
5272 			break;
5273 		case 0x40 ... 0x4f: /* REX */
5274 			if (mode != X86EMUL_MODE_PROT64)
5275 				goto done_prefixes;
5276 			ctxt->rex_prefix = ctxt->b;
5277 			continue;
5278 		case 0xf0:	/* LOCK */
5279 			ctxt->lock_prefix = 1;
5280 			break;
5281 		case 0xf2:	/* REPNE/REPNZ */
5282 		case 0xf3:	/* REP/REPE/REPZ */
5283 			ctxt->rep_prefix = ctxt->b;
5284 			break;
5285 		default:
5286 			goto done_prefixes;
5287 		}
5288 
5289 		/* Any legacy prefix after a REX prefix nullifies its effect. */
5290 
5291 		ctxt->rex_prefix = 0;
5292 	}
5293 
5294 done_prefixes:
5295 
5296 	/* REX prefix. */
5297 	if (ctxt->rex_prefix & 8)
5298 		ctxt->op_bytes = 8;	/* REX.W */
5299 
5300 	/* Opcode byte(s). */
5301 	opcode = opcode_table[ctxt->b];
5302 	/* Two-byte opcode? */
5303 	if (ctxt->b == 0x0f) {
5304 		ctxt->opcode_len = 2;
5305 		ctxt->b = insn_fetch(u8, ctxt);
5306 		opcode = twobyte_table[ctxt->b];
5307 
5308 		/* 0F_38 opcode map */
5309 		if (ctxt->b == 0x38) {
5310 			ctxt->opcode_len = 3;
5311 			ctxt->b = insn_fetch(u8, ctxt);
5312 			opcode = opcode_map_0f_38[ctxt->b];
5313 		}
5314 	}
5315 	ctxt->d = opcode.flags;
5316 
5317 	if (ctxt->d & ModRM)
5318 		ctxt->modrm = insn_fetch(u8, ctxt);
5319 
5320 	/* vex-prefix instructions are not implemented */
5321 	if (ctxt->opcode_len == 1 && (ctxt->b == 0xc5 || ctxt->b == 0xc4) &&
5322 	    (mode == X86EMUL_MODE_PROT64 || (ctxt->modrm & 0xc0) == 0xc0)) {
5323 		ctxt->d = NotImpl;
5324 	}
5325 
5326 	while (ctxt->d & GroupMask) {
5327 		switch (ctxt->d & GroupMask) {
5328 		case Group:
5329 			goffset = (ctxt->modrm >> 3) & 7;
5330 			opcode = opcode.u.group[goffset];
5331 			break;
5332 		case GroupDual:
5333 			goffset = (ctxt->modrm >> 3) & 7;
5334 			if ((ctxt->modrm >> 6) == 3)
5335 				opcode = opcode.u.gdual->mod3[goffset];
5336 			else
5337 				opcode = opcode.u.gdual->mod012[goffset];
5338 			break;
5339 		case RMExt:
5340 			goffset = ctxt->modrm & 7;
5341 			opcode = opcode.u.group[goffset];
5342 			break;
5343 		case Prefix:
5344 			if (ctxt->rep_prefix && op_prefix)
5345 				return EMULATION_FAILED;
5346 			simd_prefix = op_prefix ? 0x66 : ctxt->rep_prefix;
5347 			switch (simd_prefix) {
5348 			case 0x00: opcode = opcode.u.gprefix->pfx_no; break;
5349 			case 0x66: opcode = opcode.u.gprefix->pfx_66; break;
5350 			case 0xf2: opcode = opcode.u.gprefix->pfx_f2; break;
5351 			case 0xf3: opcode = opcode.u.gprefix->pfx_f3; break;
5352 			}
5353 			break;
5354 		case Escape:
5355 			if (ctxt->modrm > 0xbf) {
5356 				size_t size = ARRAY_SIZE(opcode.u.esc->high);
5357 				u32 index = array_index_nospec(
5358 					ctxt->modrm - 0xc0, size);
5359 
5360 				opcode = opcode.u.esc->high[index];
5361 			} else {
5362 				opcode = opcode.u.esc->op[(ctxt->modrm >> 3) & 7];
5363 			}
5364 			break;
5365 		case InstrDual:
5366 			if ((ctxt->modrm >> 6) == 3)
5367 				opcode = opcode.u.idual->mod3;
5368 			else
5369 				opcode = opcode.u.idual->mod012;
5370 			break;
5371 		case ModeDual:
5372 			if (ctxt->mode == X86EMUL_MODE_PROT64)
5373 				opcode = opcode.u.mdual->mode64;
5374 			else
5375 				opcode = opcode.u.mdual->mode32;
5376 			break;
5377 		default:
5378 			return EMULATION_FAILED;
5379 		}
5380 
5381 		ctxt->d &= ~(u64)GroupMask;
5382 		ctxt->d |= opcode.flags;
5383 	}
5384 
5385 	/* Unrecognised? */
5386 	if (ctxt->d == 0)
5387 		return EMULATION_FAILED;
5388 
5389 	ctxt->execute = opcode.u.execute;
5390 
5391 	if (unlikely(ctxt->ud) && likely(!(ctxt->d & EmulateOnUD)))
5392 		return EMULATION_FAILED;
5393 
5394 	if (unlikely(ctxt->d &
5395 	    (NotImpl|Stack|Op3264|Sse|Mmx|Intercept|CheckPerm|NearBranch|
5396 	     No16))) {
5397 		/*
5398 		 * These are copied unconditionally here, and checked unconditionally
5399 		 * in x86_emulate_insn.
5400 		 */
5401 		ctxt->check_perm = opcode.check_perm;
5402 		ctxt->intercept = opcode.intercept;
5403 
5404 		if (ctxt->d & NotImpl)
5405 			return EMULATION_FAILED;
5406 
5407 		if (mode == X86EMUL_MODE_PROT64) {
5408 			if (ctxt->op_bytes == 4 && (ctxt->d & Stack))
5409 				ctxt->op_bytes = 8;
5410 			else if (ctxt->d & NearBranch)
5411 				ctxt->op_bytes = 8;
5412 		}
5413 
5414 		if (ctxt->d & Op3264) {
5415 			if (mode == X86EMUL_MODE_PROT64)
5416 				ctxt->op_bytes = 8;
5417 			else
5418 				ctxt->op_bytes = 4;
5419 		}
5420 
5421 		if ((ctxt->d & No16) && ctxt->op_bytes == 2)
5422 			ctxt->op_bytes = 4;
5423 
5424 		if (ctxt->d & Sse)
5425 			ctxt->op_bytes = 16;
5426 		else if (ctxt->d & Mmx)
5427 			ctxt->op_bytes = 8;
5428 	}
5429 
5430 	/* ModRM and SIB bytes. */
5431 	if (ctxt->d & ModRM) {
5432 		rc = decode_modrm(ctxt, &ctxt->memop);
5433 		if (!has_seg_override) {
5434 			has_seg_override = true;
5435 			ctxt->seg_override = ctxt->modrm_seg;
5436 		}
5437 	} else if (ctxt->d & MemAbs)
5438 		rc = decode_abs(ctxt, &ctxt->memop);
5439 	if (rc != X86EMUL_CONTINUE)
5440 		goto done;
5441 
5442 	if (!has_seg_override)
5443 		ctxt->seg_override = VCPU_SREG_DS;
5444 
5445 	ctxt->memop.addr.mem.seg = ctxt->seg_override;
5446 
5447 	/*
5448 	 * Decode and fetch the source operand: register, memory
5449 	 * or immediate.
5450 	 */
5451 	rc = decode_operand(ctxt, &ctxt->src, (ctxt->d >> SrcShift) & OpMask);
5452 	if (rc != X86EMUL_CONTINUE)
5453 		goto done;
5454 
5455 	/*
5456 	 * Decode and fetch the second source operand: register, memory
5457 	 * or immediate.
5458 	 */
5459 	rc = decode_operand(ctxt, &ctxt->src2, (ctxt->d >> Src2Shift) & OpMask);
5460 	if (rc != X86EMUL_CONTINUE)
5461 		goto done;
5462 
5463 	/* Decode and fetch the destination operand: register or memory. */
5464 	rc = decode_operand(ctxt, &ctxt->dst, (ctxt->d >> DstShift) & OpMask);
5465 
5466 	if (ctxt->rip_relative && likely(ctxt->memopp))
5467 		ctxt->memopp->addr.mem.ea = address_mask(ctxt,
5468 					ctxt->memopp->addr.mem.ea + ctxt->_eip);
5469 
5470 done:
5471 	if (rc == X86EMUL_PROPAGATE_FAULT)
5472 		ctxt->have_exception = true;
5473 	return (rc != X86EMUL_CONTINUE) ? EMULATION_FAILED : EMULATION_OK;
5474 }
5475 
5476 bool x86_page_table_writing_insn(struct x86_emulate_ctxt *ctxt)
5477 {
5478 	return ctxt->d & PageTable;
5479 }
5480 
5481 static bool string_insn_completed(struct x86_emulate_ctxt *ctxt)
5482 {
5483 	/* The second termination condition only applies for REPE
5484 	 * and REPNE. Test if the repeat string operation prefix is
5485 	 * REPE/REPZ or REPNE/REPNZ and if it's the case it tests the
5486 	 * corresponding termination condition according to:
5487 	 * 	- if REPE/REPZ and ZF = 0 then done
5488 	 * 	- if REPNE/REPNZ and ZF = 1 then done
5489 	 */
5490 	if (((ctxt->b == 0xa6) || (ctxt->b == 0xa7) ||
5491 	     (ctxt->b == 0xae) || (ctxt->b == 0xaf))
5492 	    && (((ctxt->rep_prefix == REPE_PREFIX) &&
5493 		 ((ctxt->eflags & X86_EFLAGS_ZF) == 0))
5494 		|| ((ctxt->rep_prefix == REPNE_PREFIX) &&
5495 		    ((ctxt->eflags & X86_EFLAGS_ZF) == X86_EFLAGS_ZF))))
5496 		return true;
5497 
5498 	return false;
5499 }
5500 
5501 static int flush_pending_x87_faults(struct x86_emulate_ctxt *ctxt)
5502 {
5503 	int rc;
5504 
5505 	emulator_get_fpu();
5506 	rc = asm_safe("fwait");
5507 	emulator_put_fpu();
5508 
5509 	if (unlikely(rc != X86EMUL_CONTINUE))
5510 		return emulate_exception(ctxt, MF_VECTOR, 0, false);
5511 
5512 	return X86EMUL_CONTINUE;
5513 }
5514 
5515 static void fetch_possible_mmx_operand(struct operand *op)
5516 {
5517 	if (op->type == OP_MM)
5518 		read_mmx_reg(&op->mm_val, op->addr.mm);
5519 }
5520 
5521 static int fastop(struct x86_emulate_ctxt *ctxt, fastop_t fop)
5522 {
5523 	ulong flags = (ctxt->eflags & EFLAGS_MASK) | X86_EFLAGS_IF;
5524 
5525 	if (!(ctxt->d & ByteOp))
5526 		fop += __ffs(ctxt->dst.bytes) * FASTOP_SIZE;
5527 
5528 	asm("push %[flags]; popf; " CALL_NOSPEC " ; pushf; pop %[flags]\n"
5529 	    : "+a"(ctxt->dst.val), "+d"(ctxt->src.val), [flags]"+D"(flags),
5530 	      [thunk_target]"+S"(fop), ASM_CALL_CONSTRAINT
5531 	    : "c"(ctxt->src2.val));
5532 
5533 	ctxt->eflags = (ctxt->eflags & ~EFLAGS_MASK) | (flags & EFLAGS_MASK);
5534 	if (!fop) /* exception is returned in fop variable */
5535 		return emulate_de(ctxt);
5536 	return X86EMUL_CONTINUE;
5537 }
5538 
5539 void init_decode_cache(struct x86_emulate_ctxt *ctxt)
5540 {
5541 	memset(&ctxt->rip_relative, 0,
5542 	       (void *)&ctxt->modrm - (void *)&ctxt->rip_relative);
5543 
5544 	ctxt->io_read.pos = 0;
5545 	ctxt->io_read.end = 0;
5546 	ctxt->mem_read.end = 0;
5547 }
5548 
5549 int x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
5550 {
5551 	const struct x86_emulate_ops *ops = ctxt->ops;
5552 	int rc = X86EMUL_CONTINUE;
5553 	int saved_dst_type = ctxt->dst.type;
5554 	unsigned emul_flags;
5555 
5556 	ctxt->mem_read.pos = 0;
5557 
5558 	/* LOCK prefix is allowed only with some instructions */
5559 	if (ctxt->lock_prefix && (!(ctxt->d & Lock) || ctxt->dst.type != OP_MEM)) {
5560 		rc = emulate_ud(ctxt);
5561 		goto done;
5562 	}
5563 
5564 	if ((ctxt->d & SrcMask) == SrcMemFAddr && ctxt->src.type != OP_MEM) {
5565 		rc = emulate_ud(ctxt);
5566 		goto done;
5567 	}
5568 
5569 	emul_flags = ctxt->ops->get_hflags(ctxt);
5570 	if (unlikely(ctxt->d &
5571 		     (No64|Undefined|Sse|Mmx|Intercept|CheckPerm|Priv|Prot|String))) {
5572 		if ((ctxt->mode == X86EMUL_MODE_PROT64 && (ctxt->d & No64)) ||
5573 				(ctxt->d & Undefined)) {
5574 			rc = emulate_ud(ctxt);
5575 			goto done;
5576 		}
5577 
5578 		if (((ctxt->d & (Sse|Mmx)) && ((ops->get_cr(ctxt, 0) & X86_CR0_EM)))
5579 		    || ((ctxt->d & Sse) && !(ops->get_cr(ctxt, 4) & X86_CR4_OSFXSR))) {
5580 			rc = emulate_ud(ctxt);
5581 			goto done;
5582 		}
5583 
5584 		if ((ctxt->d & (Sse|Mmx)) && (ops->get_cr(ctxt, 0) & X86_CR0_TS)) {
5585 			rc = emulate_nm(ctxt);
5586 			goto done;
5587 		}
5588 
5589 		if (ctxt->d & Mmx) {
5590 			rc = flush_pending_x87_faults(ctxt);
5591 			if (rc != X86EMUL_CONTINUE)
5592 				goto done;
5593 			/*
5594 			 * Now that we know the fpu is exception safe, we can fetch
5595 			 * operands from it.
5596 			 */
5597 			fetch_possible_mmx_operand(&ctxt->src);
5598 			fetch_possible_mmx_operand(&ctxt->src2);
5599 			if (!(ctxt->d & Mov))
5600 				fetch_possible_mmx_operand(&ctxt->dst);
5601 		}
5602 
5603 		if (unlikely(emul_flags & X86EMUL_GUEST_MASK) && ctxt->intercept) {
5604 			rc = emulator_check_intercept(ctxt, ctxt->intercept,
5605 						      X86_ICPT_PRE_EXCEPT);
5606 			if (rc != X86EMUL_CONTINUE)
5607 				goto done;
5608 		}
5609 
5610 		/* Instruction can only be executed in protected mode */
5611 		if ((ctxt->d & Prot) && ctxt->mode < X86EMUL_MODE_PROT16) {
5612 			rc = emulate_ud(ctxt);
5613 			goto done;
5614 		}
5615 
5616 		/* Privileged instruction can be executed only in CPL=0 */
5617 		if ((ctxt->d & Priv) && ops->cpl(ctxt)) {
5618 			if (ctxt->d & PrivUD)
5619 				rc = emulate_ud(ctxt);
5620 			else
5621 				rc = emulate_gp(ctxt, 0);
5622 			goto done;
5623 		}
5624 
5625 		/* Do instruction specific permission checks */
5626 		if (ctxt->d & CheckPerm) {
5627 			rc = ctxt->check_perm(ctxt);
5628 			if (rc != X86EMUL_CONTINUE)
5629 				goto done;
5630 		}
5631 
5632 		if (unlikely(emul_flags & X86EMUL_GUEST_MASK) && (ctxt->d & Intercept)) {
5633 			rc = emulator_check_intercept(ctxt, ctxt->intercept,
5634 						      X86_ICPT_POST_EXCEPT);
5635 			if (rc != X86EMUL_CONTINUE)
5636 				goto done;
5637 		}
5638 
5639 		if (ctxt->rep_prefix && (ctxt->d & String)) {
5640 			/* All REP prefixes have the same first termination condition */
5641 			if (address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) == 0) {
5642 				string_registers_quirk(ctxt);
5643 				ctxt->eip = ctxt->_eip;
5644 				ctxt->eflags &= ~X86_EFLAGS_RF;
5645 				goto done;
5646 			}
5647 		}
5648 	}
5649 
5650 	if ((ctxt->src.type == OP_MEM) && !(ctxt->d & NoAccess)) {
5651 		rc = segmented_read(ctxt, ctxt->src.addr.mem,
5652 				    ctxt->src.valptr, ctxt->src.bytes);
5653 		if (rc != X86EMUL_CONTINUE)
5654 			goto done;
5655 		ctxt->src.orig_val64 = ctxt->src.val64;
5656 	}
5657 
5658 	if (ctxt->src2.type == OP_MEM) {
5659 		rc = segmented_read(ctxt, ctxt->src2.addr.mem,
5660 				    &ctxt->src2.val, ctxt->src2.bytes);
5661 		if (rc != X86EMUL_CONTINUE)
5662 			goto done;
5663 	}
5664 
5665 	if ((ctxt->d & DstMask) == ImplicitOps)
5666 		goto special_insn;
5667 
5668 
5669 	if ((ctxt->dst.type == OP_MEM) && !(ctxt->d & Mov)) {
5670 		/* optimisation - avoid slow emulated read if Mov */
5671 		rc = segmented_read(ctxt, ctxt->dst.addr.mem,
5672 				   &ctxt->dst.val, ctxt->dst.bytes);
5673 		if (rc != X86EMUL_CONTINUE) {
5674 			if (!(ctxt->d & NoWrite) &&
5675 			    rc == X86EMUL_PROPAGATE_FAULT &&
5676 			    ctxt->exception.vector == PF_VECTOR)
5677 				ctxt->exception.error_code |= PFERR_WRITE_MASK;
5678 			goto done;
5679 		}
5680 	}
5681 	/* Copy full 64-bit value for CMPXCHG8B.  */
5682 	ctxt->dst.orig_val64 = ctxt->dst.val64;
5683 
5684 special_insn:
5685 
5686 	if (unlikely(emul_flags & X86EMUL_GUEST_MASK) && (ctxt->d & Intercept)) {
5687 		rc = emulator_check_intercept(ctxt, ctxt->intercept,
5688 					      X86_ICPT_POST_MEMACCESS);
5689 		if (rc != X86EMUL_CONTINUE)
5690 			goto done;
5691 	}
5692 
5693 	if (ctxt->rep_prefix && (ctxt->d & String))
5694 		ctxt->eflags |= X86_EFLAGS_RF;
5695 	else
5696 		ctxt->eflags &= ~X86_EFLAGS_RF;
5697 
5698 	if (ctxt->execute) {
5699 		if (ctxt->d & Fastop)
5700 			rc = fastop(ctxt, ctxt->fop);
5701 		else
5702 			rc = ctxt->execute(ctxt);
5703 		if (rc != X86EMUL_CONTINUE)
5704 			goto done;
5705 		goto writeback;
5706 	}
5707 
5708 	if (ctxt->opcode_len == 2)
5709 		goto twobyte_insn;
5710 	else if (ctxt->opcode_len == 3)
5711 		goto threebyte_insn;
5712 
5713 	switch (ctxt->b) {
5714 	case 0x70 ... 0x7f: /* jcc (short) */
5715 		if (test_cc(ctxt->b, ctxt->eflags))
5716 			rc = jmp_rel(ctxt, ctxt->src.val);
5717 		break;
5718 	case 0x8d: /* lea r16/r32, m */
5719 		ctxt->dst.val = ctxt->src.addr.mem.ea;
5720 		break;
5721 	case 0x90 ... 0x97: /* nop / xchg reg, rax */
5722 		if (ctxt->dst.addr.reg == reg_rmw(ctxt, VCPU_REGS_RAX))
5723 			ctxt->dst.type = OP_NONE;
5724 		else
5725 			rc = em_xchg(ctxt);
5726 		break;
5727 	case 0x98: /* cbw/cwde/cdqe */
5728 		switch (ctxt->op_bytes) {
5729 		case 2: ctxt->dst.val = (s8)ctxt->dst.val; break;
5730 		case 4: ctxt->dst.val = (s16)ctxt->dst.val; break;
5731 		case 8: ctxt->dst.val = (s32)ctxt->dst.val; break;
5732 		}
5733 		break;
5734 	case 0xcc:		/* int3 */
5735 		rc = emulate_int(ctxt, 3);
5736 		break;
5737 	case 0xcd:		/* int n */
5738 		rc = emulate_int(ctxt, ctxt->src.val);
5739 		break;
5740 	case 0xce:		/* into */
5741 		if (ctxt->eflags & X86_EFLAGS_OF)
5742 			rc = emulate_int(ctxt, 4);
5743 		break;
5744 	case 0xe9: /* jmp rel */
5745 	case 0xeb: /* jmp rel short */
5746 		rc = jmp_rel(ctxt, ctxt->src.val);
5747 		ctxt->dst.type = OP_NONE; /* Disable writeback. */
5748 		break;
5749 	case 0xf4:              /* hlt */
5750 		ctxt->ops->halt(ctxt);
5751 		break;
5752 	case 0xf5:	/* cmc */
5753 		/* complement carry flag from eflags reg */
5754 		ctxt->eflags ^= X86_EFLAGS_CF;
5755 		break;
5756 	case 0xf8: /* clc */
5757 		ctxt->eflags &= ~X86_EFLAGS_CF;
5758 		break;
5759 	case 0xf9: /* stc */
5760 		ctxt->eflags |= X86_EFLAGS_CF;
5761 		break;
5762 	case 0xfc: /* cld */
5763 		ctxt->eflags &= ~X86_EFLAGS_DF;
5764 		break;
5765 	case 0xfd: /* std */
5766 		ctxt->eflags |= X86_EFLAGS_DF;
5767 		break;
5768 	default:
5769 		goto cannot_emulate;
5770 	}
5771 
5772 	if (rc != X86EMUL_CONTINUE)
5773 		goto done;
5774 
5775 writeback:
5776 	if (ctxt->d & SrcWrite) {
5777 		BUG_ON(ctxt->src.type == OP_MEM || ctxt->src.type == OP_MEM_STR);
5778 		rc = writeback(ctxt, &ctxt->src);
5779 		if (rc != X86EMUL_CONTINUE)
5780 			goto done;
5781 	}
5782 	if (!(ctxt->d & NoWrite)) {
5783 		rc = writeback(ctxt, &ctxt->dst);
5784 		if (rc != X86EMUL_CONTINUE)
5785 			goto done;
5786 	}
5787 
5788 	/*
5789 	 * restore dst type in case the decoding will be reused
5790 	 * (happens for string instruction )
5791 	 */
5792 	ctxt->dst.type = saved_dst_type;
5793 
5794 	if ((ctxt->d & SrcMask) == SrcSI)
5795 		string_addr_inc(ctxt, VCPU_REGS_RSI, &ctxt->src);
5796 
5797 	if ((ctxt->d & DstMask) == DstDI)
5798 		string_addr_inc(ctxt, VCPU_REGS_RDI, &ctxt->dst);
5799 
5800 	if (ctxt->rep_prefix && (ctxt->d & String)) {
5801 		unsigned int count;
5802 		struct read_cache *r = &ctxt->io_read;
5803 		if ((ctxt->d & SrcMask) == SrcSI)
5804 			count = ctxt->src.count;
5805 		else
5806 			count = ctxt->dst.count;
5807 		register_address_increment(ctxt, VCPU_REGS_RCX, -count);
5808 
5809 		if (!string_insn_completed(ctxt)) {
5810 			/*
5811 			 * Re-enter guest when pio read ahead buffer is empty
5812 			 * or, if it is not used, after each 1024 iteration.
5813 			 */
5814 			if ((r->end != 0 || reg_read(ctxt, VCPU_REGS_RCX) & 0x3ff) &&
5815 			    (r->end == 0 || r->end != r->pos)) {
5816 				/*
5817 				 * Reset read cache. Usually happens before
5818 				 * decode, but since instruction is restarted
5819 				 * we have to do it here.
5820 				 */
5821 				ctxt->mem_read.end = 0;
5822 				writeback_registers(ctxt);
5823 				return EMULATION_RESTART;
5824 			}
5825 			goto done; /* skip rip writeback */
5826 		}
5827 		ctxt->eflags &= ~X86_EFLAGS_RF;
5828 	}
5829 
5830 	ctxt->eip = ctxt->_eip;
5831 	if (ctxt->mode != X86EMUL_MODE_PROT64)
5832 		ctxt->eip = (u32)ctxt->_eip;
5833 
5834 done:
5835 	if (rc == X86EMUL_PROPAGATE_FAULT) {
5836 		WARN_ON(ctxt->exception.vector > 0x1f);
5837 		ctxt->have_exception = true;
5838 	}
5839 	if (rc == X86EMUL_INTERCEPTED)
5840 		return EMULATION_INTERCEPTED;
5841 
5842 	if (rc == X86EMUL_CONTINUE)
5843 		writeback_registers(ctxt);
5844 
5845 	return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK;
5846 
5847 twobyte_insn:
5848 	switch (ctxt->b) {
5849 	case 0x09:		/* wbinvd */
5850 		(ctxt->ops->wbinvd)(ctxt);
5851 		break;
5852 	case 0x08:		/* invd */
5853 	case 0x0d:		/* GrpP (prefetch) */
5854 	case 0x18:		/* Grp16 (prefetch/nop) */
5855 	case 0x1f:		/* nop */
5856 		break;
5857 	case 0x20: /* mov cr, reg */
5858 		ctxt->dst.val = ops->get_cr(ctxt, ctxt->modrm_reg);
5859 		break;
5860 	case 0x21: /* mov from dr to reg */
5861 		ops->get_dr(ctxt, ctxt->modrm_reg, &ctxt->dst.val);
5862 		break;
5863 	case 0x40 ... 0x4f:	/* cmov */
5864 		if (test_cc(ctxt->b, ctxt->eflags))
5865 			ctxt->dst.val = ctxt->src.val;
5866 		else if (ctxt->op_bytes != 4)
5867 			ctxt->dst.type = OP_NONE; /* no writeback */
5868 		break;
5869 	case 0x80 ... 0x8f: /* jnz rel, etc*/
5870 		if (test_cc(ctxt->b, ctxt->eflags))
5871 			rc = jmp_rel(ctxt, ctxt->src.val);
5872 		break;
5873 	case 0x90 ... 0x9f:     /* setcc r/m8 */
5874 		ctxt->dst.val = test_cc(ctxt->b, ctxt->eflags);
5875 		break;
5876 	case 0xb6 ... 0xb7:	/* movzx */
5877 		ctxt->dst.bytes = ctxt->op_bytes;
5878 		ctxt->dst.val = (ctxt->src.bytes == 1) ? (u8) ctxt->src.val
5879 						       : (u16) ctxt->src.val;
5880 		break;
5881 	case 0xbe ... 0xbf:	/* movsx */
5882 		ctxt->dst.bytes = ctxt->op_bytes;
5883 		ctxt->dst.val = (ctxt->src.bytes == 1) ? (s8) ctxt->src.val :
5884 							(s16) ctxt->src.val;
5885 		break;
5886 	default:
5887 		goto cannot_emulate;
5888 	}
5889 
5890 threebyte_insn:
5891 
5892 	if (rc != X86EMUL_CONTINUE)
5893 		goto done;
5894 
5895 	goto writeback;
5896 
5897 cannot_emulate:
5898 	return EMULATION_FAILED;
5899 }
5900 
5901 void emulator_invalidate_register_cache(struct x86_emulate_ctxt *ctxt)
5902 {
5903 	invalidate_registers(ctxt);
5904 }
5905 
5906 void emulator_writeback_register_cache(struct x86_emulate_ctxt *ctxt)
5907 {
5908 	writeback_registers(ctxt);
5909 }
5910 
5911 bool emulator_can_use_gpa(struct x86_emulate_ctxt *ctxt)
5912 {
5913 	if (ctxt->rep_prefix && (ctxt->d & String))
5914 		return false;
5915 
5916 	if (ctxt->d & TwoMemOp)
5917 		return false;
5918 
5919 	return true;
5920 }
5921