1 #ifndef _ASM_X86_PARAVIRT_TYPES_H
2 #define _ASM_X86_PARAVIRT_TYPES_H
3 
4 /* Bitmask of what can be clobbered: usually at least eax. */
5 #define CLBR_NONE 0
6 #define CLBR_EAX  (1 << 0)
7 #define CLBR_ECX  (1 << 1)
8 #define CLBR_EDX  (1 << 2)
9 #define CLBR_EDI  (1 << 3)
10 
11 #ifdef CONFIG_X86_32
12 /* CLBR_ANY should match all regs platform has. For i386, that's just it */
13 #define CLBR_ANY  ((1 << 4) - 1)
14 
15 #define CLBR_ARG_REGS	(CLBR_EAX | CLBR_EDX | CLBR_ECX)
16 #define CLBR_RET_REG	(CLBR_EAX | CLBR_EDX)
17 #define CLBR_SCRATCH	(0)
18 #else
19 #define CLBR_RAX  CLBR_EAX
20 #define CLBR_RCX  CLBR_ECX
21 #define CLBR_RDX  CLBR_EDX
22 #define CLBR_RDI  CLBR_EDI
23 #define CLBR_RSI  (1 << 4)
24 #define CLBR_R8   (1 << 5)
25 #define CLBR_R9   (1 << 6)
26 #define CLBR_R10  (1 << 7)
27 #define CLBR_R11  (1 << 8)
28 
29 #define CLBR_ANY  ((1 << 9) - 1)
30 
31 #define CLBR_ARG_REGS	(CLBR_RDI | CLBR_RSI | CLBR_RDX | \
32 			 CLBR_RCX | CLBR_R8 | CLBR_R9)
33 #define CLBR_RET_REG	(CLBR_RAX)
34 #define CLBR_SCRATCH	(CLBR_R10 | CLBR_R11)
35 
36 #endif /* X86_64 */
37 
38 #define CLBR_CALLEE_SAVE ((CLBR_ARG_REGS | CLBR_SCRATCH) & ~CLBR_RET_REG)
39 
40 #ifndef __ASSEMBLY__
41 
42 #include <asm/desc_defs.h>
43 #include <asm/kmap_types.h>
44 #include <asm/pgtable_types.h>
45 
46 struct page;
47 struct thread_struct;
48 struct desc_ptr;
49 struct tss_struct;
50 struct mm_struct;
51 struct desc_struct;
52 struct task_struct;
53 struct cpumask;
54 
55 /*
56  * Wrapper type for pointers to code which uses the non-standard
57  * calling convention.  See PV_CALL_SAVE_REGS_THUNK below.
58  */
59 struct paravirt_callee_save {
60 	void *func;
61 };
62 
63 /* general info */
64 struct pv_info {
65 	unsigned int kernel_rpl;
66 	int shared_kernel_pmd;
67 
68 #ifdef CONFIG_X86_64
69 	u16 extra_user_64bit_cs;  /* __USER_CS if none */
70 #endif
71 
72 	int paravirt_enabled;
73 	const char *name;
74 };
75 
76 struct pv_init_ops {
77 	/*
78 	 * Patch may replace one of the defined code sequences with
79 	 * arbitrary code, subject to the same register constraints.
80 	 * This generally means the code is not free to clobber any
81 	 * registers other than EAX.  The patch function should return
82 	 * the number of bytes of code generated, as we nop pad the
83 	 * rest in generic code.
84 	 */
85 	unsigned (*patch)(u8 type, u16 clobber, void *insnbuf,
86 			  unsigned long addr, unsigned len);
87 };
88 
89 
90 struct pv_lazy_ops {
91 	/* Set deferred update mode, used for batching operations. */
92 	void (*enter)(void);
93 	void (*leave)(void);
94 };
95 
96 struct pv_time_ops {
97 	unsigned long long (*sched_clock)(void);
98 	unsigned long long (*steal_clock)(int cpu);
99 	unsigned long (*get_tsc_khz)(void);
100 };
101 
102 struct pv_cpu_ops {
103 	/* hooks for various privileged instructions */
104 	unsigned long (*get_debugreg)(int regno);
105 	void (*set_debugreg)(int regno, unsigned long value);
106 
107 	void (*clts)(void);
108 
109 	unsigned long (*read_cr0)(void);
110 	void (*write_cr0)(unsigned long);
111 
112 	unsigned long (*read_cr4_safe)(void);
113 	unsigned long (*read_cr4)(void);
114 	void (*write_cr4)(unsigned long);
115 
116 #ifdef CONFIG_X86_64
117 	unsigned long (*read_cr8)(void);
118 	void (*write_cr8)(unsigned long);
119 #endif
120 
121 	/* Segment descriptor handling */
122 	void (*load_tr_desc)(void);
123 	void (*load_gdt)(const struct desc_ptr *);
124 	void (*load_idt)(const struct desc_ptr *);
125 	void (*store_gdt)(struct desc_ptr *);
126 	void (*store_idt)(struct desc_ptr *);
127 	void (*set_ldt)(const void *desc, unsigned entries);
128 	unsigned long (*store_tr)(void);
129 	void (*load_tls)(struct thread_struct *t, unsigned int cpu);
130 #ifdef CONFIG_X86_64
131 	void (*load_gs_index)(unsigned int idx);
132 #endif
133 	void (*write_ldt_entry)(struct desc_struct *ldt, int entrynum,
134 				const void *desc);
135 	void (*write_gdt_entry)(struct desc_struct *,
136 				int entrynum, const void *desc, int size);
137 	void (*write_idt_entry)(gate_desc *,
138 				int entrynum, const gate_desc *gate);
139 	void (*alloc_ldt)(struct desc_struct *ldt, unsigned entries);
140 	void (*free_ldt)(struct desc_struct *ldt, unsigned entries);
141 
142 	void (*load_sp0)(struct tss_struct *tss, struct thread_struct *t);
143 
144 	void (*set_iopl_mask)(unsigned mask);
145 
146 	void (*wbinvd)(void);
147 	void (*io_delay)(void);
148 
149 	/* cpuid emulation, mostly so that caps bits can be disabled */
150 	void (*cpuid)(unsigned int *eax, unsigned int *ebx,
151 		      unsigned int *ecx, unsigned int *edx);
152 
153 	/* MSR, PMC and TSR operations.
154 	   err = 0/-EFAULT.  wrmsr returns 0/-EFAULT. */
155 	u64 (*read_msr)(unsigned int msr, int *err);
156 	int (*write_msr)(unsigned int msr, unsigned low, unsigned high);
157 
158 	u64 (*read_tsc)(void);
159 	u64 (*read_pmc)(int counter);
160 	unsigned long long (*read_tscp)(unsigned int *aux);
161 
162 	/*
163 	 * Atomically enable interrupts and return to userspace.  This
164 	 * is only ever used to return to 32-bit processes; in a
165 	 * 64-bit kernel, it's used for 32-on-64 compat processes, but
166 	 * never native 64-bit processes.  (Jump, not call.)
167 	 */
168 	void (*irq_enable_sysexit)(void);
169 
170 	/*
171 	 * Switch to usermode gs and return to 64-bit usermode using
172 	 * sysret.  Only used in 64-bit kernels to return to 64-bit
173 	 * processes.  Usermode register state, including %rsp, must
174 	 * already be restored.
175 	 */
176 	void (*usergs_sysret64)(void);
177 
178 	/*
179 	 * Switch to usermode gs and return to 32-bit usermode using
180 	 * sysret.  Used to return to 32-on-64 compat processes.
181 	 * Other usermode register state, including %esp, must already
182 	 * be restored.
183 	 */
184 	void (*usergs_sysret32)(void);
185 
186 	/* Normal iret.  Jump to this with the standard iret stack
187 	   frame set up. */
188 	void (*iret)(void);
189 
190 	void (*swapgs)(void);
191 
192 	void (*start_context_switch)(struct task_struct *prev);
193 	void (*end_context_switch)(struct task_struct *next);
194 };
195 
196 struct pv_irq_ops {
197 	/*
198 	 * Get/set interrupt state.  save_fl and restore_fl are only
199 	 * expected to use X86_EFLAGS_IF; all other bits
200 	 * returned from save_fl are undefined, and may be ignored by
201 	 * restore_fl.
202 	 *
203 	 * NOTE: These functions callers expect the callee to preserve
204 	 * more registers than the standard C calling convention.
205 	 */
206 	struct paravirt_callee_save save_fl;
207 	struct paravirt_callee_save restore_fl;
208 	struct paravirt_callee_save irq_disable;
209 	struct paravirt_callee_save irq_enable;
210 
211 	void (*safe_halt)(void);
212 	void (*halt)(void);
213 
214 #ifdef CONFIG_X86_64
215 	void (*adjust_exception_frame)(void);
216 #endif
217 };
218 
219 struct pv_apic_ops {
220 #ifdef CONFIG_X86_LOCAL_APIC
221 	void (*startup_ipi_hook)(int phys_apicid,
222 				 unsigned long start_eip,
223 				 unsigned long start_esp);
224 #endif
225 };
226 
227 struct pv_mmu_ops {
228 	unsigned long (*read_cr2)(void);
229 	void (*write_cr2)(unsigned long);
230 
231 	unsigned long (*read_cr3)(void);
232 	void (*write_cr3)(unsigned long);
233 
234 	/*
235 	 * Hooks for intercepting the creation/use/destruction of an
236 	 * mm_struct.
237 	 */
238 	void (*activate_mm)(struct mm_struct *prev,
239 			    struct mm_struct *next);
240 	void (*dup_mmap)(struct mm_struct *oldmm,
241 			 struct mm_struct *mm);
242 	void (*exit_mmap)(struct mm_struct *mm);
243 
244 
245 	/* TLB operations */
246 	void (*flush_tlb_user)(void);
247 	void (*flush_tlb_kernel)(void);
248 	void (*flush_tlb_single)(unsigned long addr);
249 	void (*flush_tlb_others)(const struct cpumask *cpus,
250 				 struct mm_struct *mm,
251 				 unsigned long start,
252 				 unsigned long end);
253 
254 	/* Hooks for allocating and freeing a pagetable top-level */
255 	int  (*pgd_alloc)(struct mm_struct *mm);
256 	void (*pgd_free)(struct mm_struct *mm, pgd_t *pgd);
257 
258 	/*
259 	 * Hooks for allocating/releasing pagetable pages when they're
260 	 * attached to a pagetable
261 	 */
262 	void (*alloc_pte)(struct mm_struct *mm, unsigned long pfn);
263 	void (*alloc_pmd)(struct mm_struct *mm, unsigned long pfn);
264 	void (*alloc_pud)(struct mm_struct *mm, unsigned long pfn);
265 	void (*release_pte)(unsigned long pfn);
266 	void (*release_pmd)(unsigned long pfn);
267 	void (*release_pud)(unsigned long pfn);
268 
269 	/* Pagetable manipulation functions */
270 	void (*set_pte)(pte_t *ptep, pte_t pteval);
271 	void (*set_pte_at)(struct mm_struct *mm, unsigned long addr,
272 			   pte_t *ptep, pte_t pteval);
273 	void (*set_pmd)(pmd_t *pmdp, pmd_t pmdval);
274 	void (*set_pmd_at)(struct mm_struct *mm, unsigned long addr,
275 			   pmd_t *pmdp, pmd_t pmdval);
276 	void (*pte_update)(struct mm_struct *mm, unsigned long addr,
277 			   pte_t *ptep);
278 	void (*pte_update_defer)(struct mm_struct *mm,
279 				 unsigned long addr, pte_t *ptep);
280 	void (*pmd_update)(struct mm_struct *mm, unsigned long addr,
281 			   pmd_t *pmdp);
282 	void (*pmd_update_defer)(struct mm_struct *mm,
283 				 unsigned long addr, pmd_t *pmdp);
284 
285 	pte_t (*ptep_modify_prot_start)(struct mm_struct *mm, unsigned long addr,
286 					pte_t *ptep);
287 	void (*ptep_modify_prot_commit)(struct mm_struct *mm, unsigned long addr,
288 					pte_t *ptep, pte_t pte);
289 
290 	struct paravirt_callee_save pte_val;
291 	struct paravirt_callee_save make_pte;
292 
293 	struct paravirt_callee_save pgd_val;
294 	struct paravirt_callee_save make_pgd;
295 
296 #if PAGETABLE_LEVELS >= 3
297 #ifdef CONFIG_X86_PAE
298 	void (*set_pte_atomic)(pte_t *ptep, pte_t pteval);
299 	void (*pte_clear)(struct mm_struct *mm, unsigned long addr,
300 			  pte_t *ptep);
301 	void (*pmd_clear)(pmd_t *pmdp);
302 
303 #endif	/* CONFIG_X86_PAE */
304 
305 	void (*set_pud)(pud_t *pudp, pud_t pudval);
306 
307 	struct paravirt_callee_save pmd_val;
308 	struct paravirt_callee_save make_pmd;
309 
310 #if PAGETABLE_LEVELS == 4
311 	struct paravirt_callee_save pud_val;
312 	struct paravirt_callee_save make_pud;
313 
314 	void (*set_pgd)(pgd_t *pudp, pgd_t pgdval);
315 #endif	/* PAGETABLE_LEVELS == 4 */
316 #endif	/* PAGETABLE_LEVELS >= 3 */
317 
318 	struct pv_lazy_ops lazy_mode;
319 
320 	/* dom0 ops */
321 
322 	/* Sometimes the physical address is a pfn, and sometimes its
323 	   an mfn.  We can tell which is which from the index. */
324 	void (*set_fixmap)(unsigned /* enum fixed_addresses */ idx,
325 			   phys_addr_t phys, pgprot_t flags);
326 };
327 
328 struct arch_spinlock;
329 struct pv_lock_ops {
330 	int (*spin_is_locked)(struct arch_spinlock *lock);
331 	int (*spin_is_contended)(struct arch_spinlock *lock);
332 	void (*spin_lock)(struct arch_spinlock *lock);
333 	void (*spin_lock_flags)(struct arch_spinlock *lock, unsigned long flags);
334 	int (*spin_trylock)(struct arch_spinlock *lock);
335 	void (*spin_unlock)(struct arch_spinlock *lock);
336 };
337 
338 /* This contains all the paravirt structures: we get a convenient
339  * number for each function using the offset which we use to indicate
340  * what to patch. */
341 struct paravirt_patch_template {
342 	struct pv_init_ops pv_init_ops;
343 	struct pv_time_ops pv_time_ops;
344 	struct pv_cpu_ops pv_cpu_ops;
345 	struct pv_irq_ops pv_irq_ops;
346 	struct pv_apic_ops pv_apic_ops;
347 	struct pv_mmu_ops pv_mmu_ops;
348 	struct pv_lock_ops pv_lock_ops;
349 };
350 
351 extern struct pv_info pv_info;
352 extern struct pv_init_ops pv_init_ops;
353 extern struct pv_time_ops pv_time_ops;
354 extern struct pv_cpu_ops pv_cpu_ops;
355 extern struct pv_irq_ops pv_irq_ops;
356 extern struct pv_apic_ops pv_apic_ops;
357 extern struct pv_mmu_ops pv_mmu_ops;
358 extern struct pv_lock_ops pv_lock_ops;
359 
360 #define PARAVIRT_PATCH(x)					\
361 	(offsetof(struct paravirt_patch_template, x) / sizeof(void *))
362 
363 #define paravirt_type(op)				\
364 	[paravirt_typenum] "i" (PARAVIRT_PATCH(op)),	\
365 	[paravirt_opptr] "i" (&(op))
366 #define paravirt_clobber(clobber)		\
367 	[paravirt_clobber] "i" (clobber)
368 
369 /*
370  * Generate some code, and mark it as patchable by the
371  * apply_paravirt() alternate instruction patcher.
372  */
373 #define _paravirt_alt(insn_string, type, clobber)	\
374 	"771:\n\t" insn_string "\n" "772:\n"		\
375 	".pushsection .parainstructions,\"a\"\n"	\
376 	_ASM_ALIGN "\n"					\
377 	_ASM_PTR " 771b\n"				\
378 	"  .byte " type "\n"				\
379 	"  .byte 772b-771b\n"				\
380 	"  .short " clobber "\n"			\
381 	".popsection\n"
382 
383 /* Generate patchable code, with the default asm parameters. */
384 #define paravirt_alt(insn_string)					\
385 	_paravirt_alt(insn_string, "%c[paravirt_typenum]", "%c[paravirt_clobber]")
386 
387 /* Simple instruction patching code. */
388 #define DEF_NATIVE(ops, name, code) 					\
389 	extern const char start_##ops##_##name[], end_##ops##_##name[];	\
390 	asm("start_" #ops "_" #name ": " code "; end_" #ops "_" #name ":")
391 
392 unsigned paravirt_patch_nop(void);
393 unsigned paravirt_patch_ident_32(void *insnbuf, unsigned len);
394 unsigned paravirt_patch_ident_64(void *insnbuf, unsigned len);
395 unsigned paravirt_patch_ignore(unsigned len);
396 unsigned paravirt_patch_call(void *insnbuf,
397 			     const void *target, u16 tgt_clobbers,
398 			     unsigned long addr, u16 site_clobbers,
399 			     unsigned len);
400 unsigned paravirt_patch_jmp(void *insnbuf, const void *target,
401 			    unsigned long addr, unsigned len);
402 unsigned paravirt_patch_default(u8 type, u16 clobbers, void *insnbuf,
403 				unsigned long addr, unsigned len);
404 
405 unsigned paravirt_patch_insns(void *insnbuf, unsigned len,
406 			      const char *start, const char *end);
407 
408 unsigned native_patch(u8 type, u16 clobbers, void *ibuf,
409 		      unsigned long addr, unsigned len);
410 
411 int paravirt_disable_iospace(void);
412 
413 /*
414  * This generates an indirect call based on the operation type number.
415  * The type number, computed in PARAVIRT_PATCH, is derived from the
416  * offset into the paravirt_patch_template structure, and can therefore be
417  * freely converted back into a structure offset.
418  */
419 #define PARAVIRT_CALL	"call *%c[paravirt_opptr];"
420 
421 /*
422  * These macros are intended to wrap calls through one of the paravirt
423  * ops structs, so that they can be later identified and patched at
424  * runtime.
425  *
426  * Normally, a call to a pv_op function is a simple indirect call:
427  * (pv_op_struct.operations)(args...).
428  *
429  * Unfortunately, this is a relatively slow operation for modern CPUs,
430  * because it cannot necessarily determine what the destination
431  * address is.  In this case, the address is a runtime constant, so at
432  * the very least we can patch the call to e a simple direct call, or
433  * ideally, patch an inline implementation into the callsite.  (Direct
434  * calls are essentially free, because the call and return addresses
435  * are completely predictable.)
436  *
437  * For i386, these macros rely on the standard gcc "regparm(3)" calling
438  * convention, in which the first three arguments are placed in %eax,
439  * %edx, %ecx (in that order), and the remaining arguments are placed
440  * on the stack.  All caller-save registers (eax,edx,ecx) are expected
441  * to be modified (either clobbered or used for return values).
442  * X86_64, on the other hand, already specifies a register-based calling
443  * conventions, returning at %rax, with parameteres going on %rdi, %rsi,
444  * %rdx, and %rcx. Note that for this reason, x86_64 does not need any
445  * special handling for dealing with 4 arguments, unlike i386.
446  * However, x86_64 also have to clobber all caller saved registers, which
447  * unfortunately, are quite a bit (r8 - r11)
448  *
449  * The call instruction itself is marked by placing its start address
450  * and size into the .parainstructions section, so that
451  * apply_paravirt() in arch/i386/kernel/alternative.c can do the
452  * appropriate patching under the control of the backend pv_init_ops
453  * implementation.
454  *
455  * Unfortunately there's no way to get gcc to generate the args setup
456  * for the call, and then allow the call itself to be generated by an
457  * inline asm.  Because of this, we must do the complete arg setup and
458  * return value handling from within these macros.  This is fairly
459  * cumbersome.
460  *
461  * There are 5 sets of PVOP_* macros for dealing with 0-4 arguments.
462  * It could be extended to more arguments, but there would be little
463  * to be gained from that.  For each number of arguments, there are
464  * the two VCALL and CALL variants for void and non-void functions.
465  *
466  * When there is a return value, the invoker of the macro must specify
467  * the return type.  The macro then uses sizeof() on that type to
468  * determine whether its a 32 or 64 bit value, and places the return
469  * in the right register(s) (just %eax for 32-bit, and %edx:%eax for
470  * 64-bit). For x86_64 machines, it just returns at %rax regardless of
471  * the return value size.
472  *
473  * 64-bit arguments are passed as a pair of adjacent 32-bit arguments
474  * i386 also passes 64-bit arguments as a pair of adjacent 32-bit arguments
475  * in low,high order
476  *
477  * Small structures are passed and returned in registers.  The macro
478  * calling convention can't directly deal with this, so the wrapper
479  * functions must do this.
480  *
481  * These PVOP_* macros are only defined within this header.  This
482  * means that all uses must be wrapped in inline functions.  This also
483  * makes sure the incoming and outgoing types are always correct.
484  */
485 #ifdef CONFIG_X86_32
486 #define PVOP_VCALL_ARGS				\
487 	unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx
488 #define PVOP_CALL_ARGS			PVOP_VCALL_ARGS
489 
490 #define PVOP_CALL_ARG1(x)		"a" ((unsigned long)(x))
491 #define PVOP_CALL_ARG2(x)		"d" ((unsigned long)(x))
492 #define PVOP_CALL_ARG3(x)		"c" ((unsigned long)(x))
493 
494 #define PVOP_VCALL_CLOBBERS		"=a" (__eax), "=d" (__edx),	\
495 					"=c" (__ecx)
496 #define PVOP_CALL_CLOBBERS		PVOP_VCALL_CLOBBERS
497 
498 #define PVOP_VCALLEE_CLOBBERS		"=a" (__eax), "=d" (__edx)
499 #define PVOP_CALLEE_CLOBBERS		PVOP_VCALLEE_CLOBBERS
500 
501 #define EXTRA_CLOBBERS
502 #define VEXTRA_CLOBBERS
503 #else  /* CONFIG_X86_64 */
504 /* [re]ax isn't an arg, but the return val */
505 #define PVOP_VCALL_ARGS					\
506 	unsigned long __edi = __edi, __esi = __esi,	\
507 		__edx = __edx, __ecx = __ecx, __eax = __eax
508 #define PVOP_CALL_ARGS		PVOP_VCALL_ARGS
509 
510 #define PVOP_CALL_ARG1(x)		"D" ((unsigned long)(x))
511 #define PVOP_CALL_ARG2(x)		"S" ((unsigned long)(x))
512 #define PVOP_CALL_ARG3(x)		"d" ((unsigned long)(x))
513 #define PVOP_CALL_ARG4(x)		"c" ((unsigned long)(x))
514 
515 #define PVOP_VCALL_CLOBBERS	"=D" (__edi),				\
516 				"=S" (__esi), "=d" (__edx),		\
517 				"=c" (__ecx)
518 #define PVOP_CALL_CLOBBERS	PVOP_VCALL_CLOBBERS, "=a" (__eax)
519 
520 /* void functions are still allowed [re]ax for scratch */
521 #define PVOP_VCALLEE_CLOBBERS	"=a" (__eax)
522 #define PVOP_CALLEE_CLOBBERS	PVOP_VCALLEE_CLOBBERS
523 
524 #define EXTRA_CLOBBERS	 , "r8", "r9", "r10", "r11"
525 #define VEXTRA_CLOBBERS	 , "rax", "r8", "r9", "r10", "r11"
526 #endif	/* CONFIG_X86_32 */
527 
528 #ifdef CONFIG_PARAVIRT_DEBUG
529 #define PVOP_TEST_NULL(op)	BUG_ON(op == NULL)
530 #else
531 #define PVOP_TEST_NULL(op)	((void)op)
532 #endif
533 
534 #define ____PVOP_CALL(rettype, op, clbr, call_clbr, extra_clbr,		\
535 		      pre, post, ...)					\
536 	({								\
537 		rettype __ret;						\
538 		PVOP_CALL_ARGS;						\
539 		PVOP_TEST_NULL(op);					\
540 		/* This is 32-bit specific, but is okay in 64-bit */	\
541 		/* since this condition will never hold */		\
542 		if (sizeof(rettype) > sizeof(unsigned long)) {		\
543 			asm volatile(pre				\
544 				     paravirt_alt(PARAVIRT_CALL)	\
545 				     post				\
546 				     : call_clbr			\
547 				     : paravirt_type(op),		\
548 				       paravirt_clobber(clbr),		\
549 				       ##__VA_ARGS__			\
550 				     : "memory", "cc" extra_clbr);	\
551 			__ret = (rettype)((((u64)__edx) << 32) | __eax); \
552 		} else {						\
553 			asm volatile(pre				\
554 				     paravirt_alt(PARAVIRT_CALL)	\
555 				     post				\
556 				     : call_clbr			\
557 				     : paravirt_type(op),		\
558 				       paravirt_clobber(clbr),		\
559 				       ##__VA_ARGS__			\
560 				     : "memory", "cc" extra_clbr);	\
561 			__ret = (rettype)__eax;				\
562 		}							\
563 		__ret;							\
564 	})
565 
566 #define __PVOP_CALL(rettype, op, pre, post, ...)			\
567 	____PVOP_CALL(rettype, op, CLBR_ANY, PVOP_CALL_CLOBBERS,	\
568 		      EXTRA_CLOBBERS, pre, post, ##__VA_ARGS__)
569 
570 #define __PVOP_CALLEESAVE(rettype, op, pre, post, ...)			\
571 	____PVOP_CALL(rettype, op.func, CLBR_RET_REG,			\
572 		      PVOP_CALLEE_CLOBBERS, ,				\
573 		      pre, post, ##__VA_ARGS__)
574 
575 
576 #define ____PVOP_VCALL(op, clbr, call_clbr, extra_clbr, pre, post, ...)	\
577 	({								\
578 		PVOP_VCALL_ARGS;					\
579 		PVOP_TEST_NULL(op);					\
580 		asm volatile(pre					\
581 			     paravirt_alt(PARAVIRT_CALL)		\
582 			     post					\
583 			     : call_clbr				\
584 			     : paravirt_type(op),			\
585 			       paravirt_clobber(clbr),			\
586 			       ##__VA_ARGS__				\
587 			     : "memory", "cc" extra_clbr);		\
588 	})
589 
590 #define __PVOP_VCALL(op, pre, post, ...)				\
591 	____PVOP_VCALL(op, CLBR_ANY, PVOP_VCALL_CLOBBERS,		\
592 		       VEXTRA_CLOBBERS,					\
593 		       pre, post, ##__VA_ARGS__)
594 
595 #define __PVOP_VCALLEESAVE(op, pre, post, ...)				\
596 	____PVOP_VCALL(op.func, CLBR_RET_REG,				\
597 		      PVOP_VCALLEE_CLOBBERS, ,				\
598 		      pre, post, ##__VA_ARGS__)
599 
600 
601 
602 #define PVOP_CALL0(rettype, op)						\
603 	__PVOP_CALL(rettype, op, "", "")
604 #define PVOP_VCALL0(op)							\
605 	__PVOP_VCALL(op, "", "")
606 
607 #define PVOP_CALLEE0(rettype, op)					\
608 	__PVOP_CALLEESAVE(rettype, op, "", "")
609 #define PVOP_VCALLEE0(op)						\
610 	__PVOP_VCALLEESAVE(op, "", "")
611 
612 
613 #define PVOP_CALL1(rettype, op, arg1)					\
614 	__PVOP_CALL(rettype, op, "", "", PVOP_CALL_ARG1(arg1))
615 #define PVOP_VCALL1(op, arg1)						\
616 	__PVOP_VCALL(op, "", "", PVOP_CALL_ARG1(arg1))
617 
618 #define PVOP_CALLEE1(rettype, op, arg1)					\
619 	__PVOP_CALLEESAVE(rettype, op, "", "", PVOP_CALL_ARG1(arg1))
620 #define PVOP_VCALLEE1(op, arg1)						\
621 	__PVOP_VCALLEESAVE(op, "", "", PVOP_CALL_ARG1(arg1))
622 
623 
624 #define PVOP_CALL2(rettype, op, arg1, arg2)				\
625 	__PVOP_CALL(rettype, op, "", "", PVOP_CALL_ARG1(arg1),		\
626 		    PVOP_CALL_ARG2(arg2))
627 #define PVOP_VCALL2(op, arg1, arg2)					\
628 	__PVOP_VCALL(op, "", "", PVOP_CALL_ARG1(arg1),			\
629 		     PVOP_CALL_ARG2(arg2))
630 
631 #define PVOP_CALLEE2(rettype, op, arg1, arg2)				\
632 	__PVOP_CALLEESAVE(rettype, op, "", "", PVOP_CALL_ARG1(arg1),	\
633 			  PVOP_CALL_ARG2(arg2))
634 #define PVOP_VCALLEE2(op, arg1, arg2)					\
635 	__PVOP_VCALLEESAVE(op, "", "", PVOP_CALL_ARG1(arg1),		\
636 			   PVOP_CALL_ARG2(arg2))
637 
638 
639 #define PVOP_CALL3(rettype, op, arg1, arg2, arg3)			\
640 	__PVOP_CALL(rettype, op, "", "", PVOP_CALL_ARG1(arg1),		\
641 		    PVOP_CALL_ARG2(arg2), PVOP_CALL_ARG3(arg3))
642 #define PVOP_VCALL3(op, arg1, arg2, arg3)				\
643 	__PVOP_VCALL(op, "", "", PVOP_CALL_ARG1(arg1),			\
644 		     PVOP_CALL_ARG2(arg2), PVOP_CALL_ARG3(arg3))
645 
646 /* This is the only difference in x86_64. We can make it much simpler */
647 #ifdef CONFIG_X86_32
648 #define PVOP_CALL4(rettype, op, arg1, arg2, arg3, arg4)			\
649 	__PVOP_CALL(rettype, op,					\
650 		    "push %[_arg4];", "lea 4(%%esp),%%esp;",		\
651 		    PVOP_CALL_ARG1(arg1), PVOP_CALL_ARG2(arg2),		\
652 		    PVOP_CALL_ARG3(arg3), [_arg4] "mr" ((u32)(arg4)))
653 #define PVOP_VCALL4(op, arg1, arg2, arg3, arg4)				\
654 	__PVOP_VCALL(op,						\
655 		    "push %[_arg4];", "lea 4(%%esp),%%esp;",		\
656 		    "0" ((u32)(arg1)), "1" ((u32)(arg2)),		\
657 		    "2" ((u32)(arg3)), [_arg4] "mr" ((u32)(arg4)))
658 #else
659 #define PVOP_CALL4(rettype, op, arg1, arg2, arg3, arg4)			\
660 	__PVOP_CALL(rettype, op, "", "",				\
661 		    PVOP_CALL_ARG1(arg1), PVOP_CALL_ARG2(arg2),		\
662 		    PVOP_CALL_ARG3(arg3), PVOP_CALL_ARG4(arg4))
663 #define PVOP_VCALL4(op, arg1, arg2, arg3, arg4)				\
664 	__PVOP_VCALL(op, "", "",					\
665 		     PVOP_CALL_ARG1(arg1), PVOP_CALL_ARG2(arg2),	\
666 		     PVOP_CALL_ARG3(arg3), PVOP_CALL_ARG4(arg4))
667 #endif
668 
669 /* Lazy mode for batching updates / context switch */
670 enum paravirt_lazy_mode {
671 	PARAVIRT_LAZY_NONE,
672 	PARAVIRT_LAZY_MMU,
673 	PARAVIRT_LAZY_CPU,
674 };
675 
676 enum paravirt_lazy_mode paravirt_get_lazy_mode(void);
677 void paravirt_start_context_switch(struct task_struct *prev);
678 void paravirt_end_context_switch(struct task_struct *next);
679 
680 void paravirt_enter_lazy_mmu(void);
681 void paravirt_leave_lazy_mmu(void);
682 
683 void _paravirt_nop(void);
684 u32 _paravirt_ident_32(u32);
685 u64 _paravirt_ident_64(u64);
686 
687 #define paravirt_nop	((void *)_paravirt_nop)
688 
689 /* These all sit in the .parainstructions section to tell us what to patch. */
690 struct paravirt_patch_site {
691 	u8 *instr; 		/* original instructions */
692 	u8 instrtype;		/* type of this instruction */
693 	u8 len;			/* length of original instruction */
694 	u16 clobbers;		/* what registers you may clobber */
695 };
696 
697 extern struct paravirt_patch_site __parainstructions[],
698 	__parainstructions_end[];
699 
700 #endif	/* __ASSEMBLY__ */
701 
702 #endif	/* _ASM_X86_PARAVIRT_TYPES_H */
703