1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_X86_PARAVIRT_TYPES_H
3 #define _ASM_X86_PARAVIRT_TYPES_H
4 
5 #ifndef __ASSEMBLY__
6 /* These all sit in the .parainstructions section to tell us what to patch. */
7 struct paravirt_patch_site {
8 	u8 *instr;		/* original instructions */
9 	u8 type;		/* type of this instruction */
10 	u8 len;			/* length of original instruction */
11 };
12 
13 /* Lazy mode for batching updates / context switch */
14 enum paravirt_lazy_mode {
15 	PARAVIRT_LAZY_NONE,
16 	PARAVIRT_LAZY_MMU,
17 	PARAVIRT_LAZY_CPU,
18 };
19 #endif
20 
21 #ifdef CONFIG_PARAVIRT
22 
23 #ifndef __ASSEMBLY__
24 
25 #include <asm/desc_defs.h>
26 #include <asm/pgtable_types.h>
27 #include <asm/nospec-branch.h>
28 
29 struct page;
30 struct thread_struct;
31 struct desc_ptr;
32 struct tss_struct;
33 struct mm_struct;
34 struct desc_struct;
35 struct task_struct;
36 struct cpumask;
37 struct flush_tlb_info;
38 struct mmu_gather;
39 struct vm_area_struct;
40 
41 /*
42  * Wrapper type for pointers to code which uses the non-standard
43  * calling convention.  See PV_CALL_SAVE_REGS_THUNK below.
44  */
45 struct paravirt_callee_save {
46 	void *func;
47 };
48 
49 /* general info */
50 struct pv_info {
51 #ifdef CONFIG_PARAVIRT_XXL
52 	u16 extra_user_64bit_cs;  /* __USER_CS if none */
53 #endif
54 
55 	const char *name;
56 };
57 
58 #ifdef CONFIG_PARAVIRT_XXL
59 struct pv_lazy_ops {
60 	/* Set deferred update mode, used for batching operations. */
61 	void (*enter)(void);
62 	void (*leave)(void);
63 	void (*flush)(void);
64 } __no_randomize_layout;
65 #endif
66 
67 struct pv_cpu_ops {
68 	/* hooks for various privileged instructions */
69 	void (*io_delay)(void);
70 
71 #ifdef CONFIG_PARAVIRT_XXL
72 	unsigned long (*get_debugreg)(int regno);
73 	void (*set_debugreg)(int regno, unsigned long value);
74 
75 	unsigned long (*read_cr0)(void);
76 	void (*write_cr0)(unsigned long);
77 
78 	void (*write_cr4)(unsigned long);
79 
80 	/* Segment descriptor handling */
81 	void (*load_tr_desc)(void);
82 	void (*load_gdt)(const struct desc_ptr *);
83 	void (*load_idt)(const struct desc_ptr *);
84 	void (*set_ldt)(const void *desc, unsigned entries);
85 	unsigned long (*store_tr)(void);
86 	void (*load_tls)(struct thread_struct *t, unsigned int cpu);
87 	void (*load_gs_index)(unsigned int idx);
88 	void (*write_ldt_entry)(struct desc_struct *ldt, int entrynum,
89 				const void *desc);
90 	void (*write_gdt_entry)(struct desc_struct *,
91 				int entrynum, const void *desc, int size);
92 	void (*write_idt_entry)(gate_desc *,
93 				int entrynum, const gate_desc *gate);
94 	void (*alloc_ldt)(struct desc_struct *ldt, unsigned entries);
95 	void (*free_ldt)(struct desc_struct *ldt, unsigned entries);
96 
97 	void (*load_sp0)(unsigned long sp0);
98 
99 #ifdef CONFIG_X86_IOPL_IOPERM
100 	void (*invalidate_io_bitmap)(void);
101 	void (*update_io_bitmap)(void);
102 #endif
103 
104 	void (*wbinvd)(void);
105 
106 	/* cpuid emulation, mostly so that caps bits can be disabled */
107 	void (*cpuid)(unsigned int *eax, unsigned int *ebx,
108 		      unsigned int *ecx, unsigned int *edx);
109 
110 	/* Unsafe MSR operations.  These will warn or panic on failure. */
111 	u64 (*read_msr)(unsigned int msr);
112 	void (*write_msr)(unsigned int msr, unsigned low, unsigned high);
113 
114 	/*
115 	 * Safe MSR operations.
116 	 * read sets err to 0 or -EIO.  write returns 0 or -EIO.
117 	 */
118 	u64 (*read_msr_safe)(unsigned int msr, int *err);
119 	int (*write_msr_safe)(unsigned int msr, unsigned low, unsigned high);
120 
121 	u64 (*read_pmc)(int counter);
122 
123 	void (*start_context_switch)(struct task_struct *prev);
124 	void (*end_context_switch)(struct task_struct *next);
125 #endif
126 } __no_randomize_layout;
127 
128 struct pv_irq_ops {
129 #ifdef CONFIG_PARAVIRT_XXL
130 	/*
131 	 * Get/set interrupt state.  save_fl is expected to use X86_EFLAGS_IF;
132 	 * all other bits returned from save_fl are undefined.
133 	 *
134 	 * NOTE: These functions callers expect the callee to preserve
135 	 * more registers than the standard C calling convention.
136 	 */
137 	struct paravirt_callee_save save_fl;
138 	struct paravirt_callee_save irq_disable;
139 	struct paravirt_callee_save irq_enable;
140 
141 	void (*safe_halt)(void);
142 	void (*halt)(void);
143 #endif
144 } __no_randomize_layout;
145 
146 struct pv_mmu_ops {
147 	/* TLB operations */
148 	void (*flush_tlb_user)(void);
149 	void (*flush_tlb_kernel)(void);
150 	void (*flush_tlb_one_user)(unsigned long addr);
151 	void (*flush_tlb_multi)(const struct cpumask *cpus,
152 				const struct flush_tlb_info *info);
153 
154 	void (*tlb_remove_table)(struct mmu_gather *tlb, void *table);
155 
156 	/* Hook for intercepting the destruction of an mm_struct. */
157 	void (*exit_mmap)(struct mm_struct *mm);
158 	void (*notify_page_enc_status_changed)(unsigned long pfn, int npages, bool enc);
159 
160 #ifdef CONFIG_PARAVIRT_XXL
161 	struct paravirt_callee_save read_cr2;
162 	void (*write_cr2)(unsigned long);
163 
164 	unsigned long (*read_cr3)(void);
165 	void (*write_cr3)(unsigned long);
166 
167 	/* Hooks for intercepting the creation/use of an mm_struct. */
168 	void (*activate_mm)(struct mm_struct *prev,
169 			    struct mm_struct *next);
170 	void (*dup_mmap)(struct mm_struct *oldmm,
171 			 struct mm_struct *mm);
172 
173 	/* Hooks for allocating and freeing a pagetable top-level */
174 	int  (*pgd_alloc)(struct mm_struct *mm);
175 	void (*pgd_free)(struct mm_struct *mm, pgd_t *pgd);
176 
177 	/*
178 	 * Hooks for allocating/releasing pagetable pages when they're
179 	 * attached to a pagetable
180 	 */
181 	void (*alloc_pte)(struct mm_struct *mm, unsigned long pfn);
182 	void (*alloc_pmd)(struct mm_struct *mm, unsigned long pfn);
183 	void (*alloc_pud)(struct mm_struct *mm, unsigned long pfn);
184 	void (*alloc_p4d)(struct mm_struct *mm, unsigned long pfn);
185 	void (*release_pte)(unsigned long pfn);
186 	void (*release_pmd)(unsigned long pfn);
187 	void (*release_pud)(unsigned long pfn);
188 	void (*release_p4d)(unsigned long pfn);
189 
190 	/* Pagetable manipulation functions */
191 	void (*set_pte)(pte_t *ptep, pte_t pteval);
192 	void (*set_pmd)(pmd_t *pmdp, pmd_t pmdval);
193 
194 	pte_t (*ptep_modify_prot_start)(struct vm_area_struct *vma, unsigned long addr,
195 					pte_t *ptep);
196 	void (*ptep_modify_prot_commit)(struct vm_area_struct *vma, unsigned long addr,
197 					pte_t *ptep, pte_t pte);
198 
199 	struct paravirt_callee_save pte_val;
200 	struct paravirt_callee_save make_pte;
201 
202 	struct paravirt_callee_save pgd_val;
203 	struct paravirt_callee_save make_pgd;
204 
205 	void (*set_pud)(pud_t *pudp, pud_t pudval);
206 
207 	struct paravirt_callee_save pmd_val;
208 	struct paravirt_callee_save make_pmd;
209 
210 	struct paravirt_callee_save pud_val;
211 	struct paravirt_callee_save make_pud;
212 
213 	void (*set_p4d)(p4d_t *p4dp, p4d_t p4dval);
214 
215 #if CONFIG_PGTABLE_LEVELS >= 5
216 	struct paravirt_callee_save p4d_val;
217 	struct paravirt_callee_save make_p4d;
218 
219 	void (*set_pgd)(pgd_t *pgdp, pgd_t pgdval);
220 #endif	/* CONFIG_PGTABLE_LEVELS >= 5 */
221 
222 	struct pv_lazy_ops lazy_mode;
223 
224 	/* dom0 ops */
225 
226 	/* Sometimes the physical address is a pfn, and sometimes its
227 	   an mfn.  We can tell which is which from the index. */
228 	void (*set_fixmap)(unsigned /* enum fixed_addresses */ idx,
229 			   phys_addr_t phys, pgprot_t flags);
230 #endif
231 } __no_randomize_layout;
232 
233 struct arch_spinlock;
234 #ifdef CONFIG_SMP
235 #include <asm/spinlock_types.h>
236 #endif
237 
238 struct qspinlock;
239 
240 struct pv_lock_ops {
241 	void (*queued_spin_lock_slowpath)(struct qspinlock *lock, u32 val);
242 	struct paravirt_callee_save queued_spin_unlock;
243 
244 	void (*wait)(u8 *ptr, u8 val);
245 	void (*kick)(int cpu);
246 
247 	struct paravirt_callee_save vcpu_is_preempted;
248 } __no_randomize_layout;
249 
250 /* This contains all the paravirt structures: we get a convenient
251  * number for each function using the offset which we use to indicate
252  * what to patch. */
253 struct paravirt_patch_template {
254 	struct pv_cpu_ops	cpu;
255 	struct pv_irq_ops	irq;
256 	struct pv_mmu_ops	mmu;
257 	struct pv_lock_ops	lock;
258 } __no_randomize_layout;
259 
260 extern struct pv_info pv_info;
261 extern struct paravirt_patch_template pv_ops;
262 
263 #define PARAVIRT_PATCH(x)					\
264 	(offsetof(struct paravirt_patch_template, x) / sizeof(void *))
265 
266 #define paravirt_type(op)				\
267 	[paravirt_typenum] "i" (PARAVIRT_PATCH(op)),	\
268 	[paravirt_opptr] "m" (pv_ops.op)
269 /*
270  * Generate some code, and mark it as patchable by the
271  * apply_paravirt() alternate instruction patcher.
272  */
273 #define _paravirt_alt(insn_string, type)		\
274 	"771:\n\t" insn_string "\n" "772:\n"		\
275 	".pushsection .parainstructions,\"a\"\n"	\
276 	_ASM_ALIGN "\n"					\
277 	_ASM_PTR " 771b\n"				\
278 	"  .byte " type "\n"				\
279 	"  .byte 772b-771b\n"				\
280 	_ASM_ALIGN "\n"					\
281 	".popsection\n"
282 
283 /* Generate patchable code, with the default asm parameters. */
284 #define paravirt_alt(insn_string)					\
285 	_paravirt_alt(insn_string, "%c[paravirt_typenum]")
286 
287 /* Simple instruction patching code. */
288 #define NATIVE_LABEL(a,x,b) "\n\t.globl " a #x "_" #b "\n" a #x "_" #b ":\n\t"
289 
290 unsigned int paravirt_patch(u8 type, void *insn_buff, unsigned long addr, unsigned int len);
291 
292 int paravirt_disable_iospace(void);
293 
294 /*
295  * This generates an indirect call based on the operation type number.
296  * The type number, computed in PARAVIRT_PATCH, is derived from the
297  * offset into the paravirt_patch_template structure, and can therefore be
298  * freely converted back into a structure offset.
299  */
300 #define PARAVIRT_CALL					\
301 	ANNOTATE_RETPOLINE_SAFE				\
302 	"call *%[paravirt_opptr];"
303 
304 /*
305  * These macros are intended to wrap calls through one of the paravirt
306  * ops structs, so that they can be later identified and patched at
307  * runtime.
308  *
309  * Normally, a call to a pv_op function is a simple indirect call:
310  * (pv_op_struct.operations)(args...).
311  *
312  * Unfortunately, this is a relatively slow operation for modern CPUs,
313  * because it cannot necessarily determine what the destination
314  * address is.  In this case, the address is a runtime constant, so at
315  * the very least we can patch the call to a simple direct call, or,
316  * ideally, patch an inline implementation into the callsite.  (Direct
317  * calls are essentially free, because the call and return addresses
318  * are completely predictable.)
319  *
320  * For i386, these macros rely on the standard gcc "regparm(3)" calling
321  * convention, in which the first three arguments are placed in %eax,
322  * %edx, %ecx (in that order), and the remaining arguments are placed
323  * on the stack.  All caller-save registers (eax,edx,ecx) are expected
324  * to be modified (either clobbered or used for return values).
325  * X86_64, on the other hand, already specifies a register-based calling
326  * conventions, returning at %rax, with parameters going in %rdi, %rsi,
327  * %rdx, and %rcx. Note that for this reason, x86_64 does not need any
328  * special handling for dealing with 4 arguments, unlike i386.
329  * However, x86_64 also has to clobber all caller saved registers, which
330  * unfortunately, are quite a bit (r8 - r11)
331  *
332  * The call instruction itself is marked by placing its start address
333  * and size into the .parainstructions section, so that
334  * apply_paravirt() in arch/i386/kernel/alternative.c can do the
335  * appropriate patching under the control of the backend pv_init_ops
336  * implementation.
337  *
338  * Unfortunately there's no way to get gcc to generate the args setup
339  * for the call, and then allow the call itself to be generated by an
340  * inline asm.  Because of this, we must do the complete arg setup and
341  * return value handling from within these macros.  This is fairly
342  * cumbersome.
343  *
344  * There are 5 sets of PVOP_* macros for dealing with 0-4 arguments.
345  * It could be extended to more arguments, but there would be little
346  * to be gained from that.  For each number of arguments, there are
347  * two VCALL and CALL variants for void and non-void functions.
348  *
349  * When there is a return value, the invoker of the macro must specify
350  * the return type.  The macro then uses sizeof() on that type to
351  * determine whether it's a 32 or 64 bit value and places the return
352  * in the right register(s) (just %eax for 32-bit, and %edx:%eax for
353  * 64-bit). For x86_64 machines, it just returns in %rax regardless of
354  * the return value size.
355  *
356  * 64-bit arguments are passed as a pair of adjacent 32-bit arguments;
357  * i386 also passes 64-bit arguments as a pair of adjacent 32-bit arguments
358  * in low,high order
359  *
360  * Small structures are passed and returned in registers.  The macro
361  * calling convention can't directly deal with this, so the wrapper
362  * functions must do it.
363  *
364  * These PVOP_* macros are only defined within this header.  This
365  * means that all uses must be wrapped in inline functions.  This also
366  * makes sure the incoming and outgoing types are always correct.
367  */
368 #ifdef CONFIG_X86_32
369 #define PVOP_CALL_ARGS							\
370 	unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx;
371 
372 #define PVOP_CALL_ARG1(x)		"a" ((unsigned long)(x))
373 #define PVOP_CALL_ARG2(x)		"d" ((unsigned long)(x))
374 #define PVOP_CALL_ARG3(x)		"c" ((unsigned long)(x))
375 
376 #define PVOP_VCALL_CLOBBERS		"=a" (__eax), "=d" (__edx),	\
377 					"=c" (__ecx)
378 #define PVOP_CALL_CLOBBERS		PVOP_VCALL_CLOBBERS
379 
380 #define PVOP_VCALLEE_CLOBBERS		"=a" (__eax), "=d" (__edx)
381 #define PVOP_CALLEE_CLOBBERS		PVOP_VCALLEE_CLOBBERS
382 
383 #define EXTRA_CLOBBERS
384 #define VEXTRA_CLOBBERS
385 #else  /* CONFIG_X86_64 */
386 /* [re]ax isn't an arg, but the return val */
387 #define PVOP_CALL_ARGS						\
388 	unsigned long __edi = __edi, __esi = __esi,		\
389 		__edx = __edx, __ecx = __ecx, __eax = __eax;
390 
391 #define PVOP_CALL_ARG1(x)		"D" ((unsigned long)(x))
392 #define PVOP_CALL_ARG2(x)		"S" ((unsigned long)(x))
393 #define PVOP_CALL_ARG3(x)		"d" ((unsigned long)(x))
394 #define PVOP_CALL_ARG4(x)		"c" ((unsigned long)(x))
395 
396 #define PVOP_VCALL_CLOBBERS	"=D" (__edi),				\
397 				"=S" (__esi), "=d" (__edx),		\
398 				"=c" (__ecx)
399 #define PVOP_CALL_CLOBBERS	PVOP_VCALL_CLOBBERS, "=a" (__eax)
400 
401 /*
402  * void functions are still allowed [re]ax for scratch.
403  *
404  * The ZERO_CALL_USED REGS feature may end up zeroing out callee-saved
405  * registers. Make sure we model this with the appropriate clobbers.
406  */
407 #ifdef CONFIG_ZERO_CALL_USED_REGS
408 #define PVOP_VCALLEE_CLOBBERS	"=a" (__eax), PVOP_VCALL_CLOBBERS
409 #else
410 #define PVOP_VCALLEE_CLOBBERS	"=a" (__eax)
411 #endif
412 #define PVOP_CALLEE_CLOBBERS	PVOP_VCALLEE_CLOBBERS
413 
414 #define EXTRA_CLOBBERS	 , "r8", "r9", "r10", "r11"
415 #define VEXTRA_CLOBBERS	 , "rax", "r8", "r9", "r10", "r11"
416 #endif	/* CONFIG_X86_32 */
417 
418 #ifdef CONFIG_PARAVIRT_DEBUG
419 #define PVOP_TEST_NULL(op)	BUG_ON(pv_ops.op == NULL)
420 #else
421 #define PVOP_TEST_NULL(op)	((void)pv_ops.op)
422 #endif
423 
424 #define PVOP_RETVAL(rettype)						\
425 	({	unsigned long __mask = ~0UL;				\
426 		BUILD_BUG_ON(sizeof(rettype) > sizeof(unsigned long));	\
427 		switch (sizeof(rettype)) {				\
428 		case 1: __mask =       0xffUL; break;			\
429 		case 2: __mask =     0xffffUL; break;			\
430 		case 4: __mask = 0xffffffffUL; break;			\
431 		default: break;						\
432 		}							\
433 		__mask & __eax;						\
434 	})
435 
436 
437 #define ____PVOP_CALL(ret, op, call_clbr, extra_clbr, ...)	\
438 	({								\
439 		PVOP_CALL_ARGS;						\
440 		PVOP_TEST_NULL(op);					\
441 		asm volatile(paravirt_alt(PARAVIRT_CALL)		\
442 			     : call_clbr, ASM_CALL_CONSTRAINT		\
443 			     : paravirt_type(op),			\
444 			       ##__VA_ARGS__				\
445 			     : "memory", "cc" extra_clbr);		\
446 		ret;							\
447 	})
448 
449 #define ____PVOP_ALT_CALL(ret, op, alt, cond, call_clbr,		\
450 			  extra_clbr, ...)				\
451 	({								\
452 		PVOP_CALL_ARGS;						\
453 		PVOP_TEST_NULL(op);					\
454 		asm volatile(ALTERNATIVE(paravirt_alt(PARAVIRT_CALL),	\
455 					 alt, cond)			\
456 			     : call_clbr, ASM_CALL_CONSTRAINT		\
457 			     : paravirt_type(op),			\
458 			       ##__VA_ARGS__				\
459 			     : "memory", "cc" extra_clbr);		\
460 		ret;							\
461 	})
462 
463 #define __PVOP_CALL(rettype, op, ...)					\
464 	____PVOP_CALL(PVOP_RETVAL(rettype), op,				\
465 		      PVOP_CALL_CLOBBERS, EXTRA_CLOBBERS, ##__VA_ARGS__)
466 
467 #define __PVOP_ALT_CALL(rettype, op, alt, cond, ...)			\
468 	____PVOP_ALT_CALL(PVOP_RETVAL(rettype), op, alt, cond,		\
469 			  PVOP_CALL_CLOBBERS, EXTRA_CLOBBERS,		\
470 			  ##__VA_ARGS__)
471 
472 #define __PVOP_CALLEESAVE(rettype, op, ...)				\
473 	____PVOP_CALL(PVOP_RETVAL(rettype), op.func,			\
474 		      PVOP_CALLEE_CLOBBERS, , ##__VA_ARGS__)
475 
476 #define __PVOP_ALT_CALLEESAVE(rettype, op, alt, cond, ...)		\
477 	____PVOP_ALT_CALL(PVOP_RETVAL(rettype), op.func, alt, cond,	\
478 			  PVOP_CALLEE_CLOBBERS, , ##__VA_ARGS__)
479 
480 
481 #define __PVOP_VCALL(op, ...)						\
482 	(void)____PVOP_CALL(, op, PVOP_VCALL_CLOBBERS,			\
483 		       VEXTRA_CLOBBERS, ##__VA_ARGS__)
484 
485 #define __PVOP_ALT_VCALL(op, alt, cond, ...)				\
486 	(void)____PVOP_ALT_CALL(, op, alt, cond,			\
487 				PVOP_VCALL_CLOBBERS, VEXTRA_CLOBBERS,	\
488 				##__VA_ARGS__)
489 
490 #define __PVOP_VCALLEESAVE(op, ...)					\
491 	(void)____PVOP_CALL(, op.func,					\
492 			    PVOP_VCALLEE_CLOBBERS, , ##__VA_ARGS__)
493 
494 #define __PVOP_ALT_VCALLEESAVE(op, alt, cond, ...)			\
495 	(void)____PVOP_ALT_CALL(, op.func, alt, cond,			\
496 				PVOP_VCALLEE_CLOBBERS, , ##__VA_ARGS__)
497 
498 
499 #define PVOP_CALL0(rettype, op)						\
500 	__PVOP_CALL(rettype, op)
501 #define PVOP_VCALL0(op)							\
502 	__PVOP_VCALL(op)
503 #define PVOP_ALT_CALL0(rettype, op, alt, cond)				\
504 	__PVOP_ALT_CALL(rettype, op, alt, cond)
505 #define PVOP_ALT_VCALL0(op, alt, cond)					\
506 	__PVOP_ALT_VCALL(op, alt, cond)
507 
508 #define PVOP_CALLEE0(rettype, op)					\
509 	__PVOP_CALLEESAVE(rettype, op)
510 #define PVOP_VCALLEE0(op)						\
511 	__PVOP_VCALLEESAVE(op)
512 #define PVOP_ALT_CALLEE0(rettype, op, alt, cond)			\
513 	__PVOP_ALT_CALLEESAVE(rettype, op, alt, cond)
514 #define PVOP_ALT_VCALLEE0(op, alt, cond)				\
515 	__PVOP_ALT_VCALLEESAVE(op, alt, cond)
516 
517 
518 #define PVOP_CALL1(rettype, op, arg1)					\
519 	__PVOP_CALL(rettype, op, PVOP_CALL_ARG1(arg1))
520 #define PVOP_VCALL1(op, arg1)						\
521 	__PVOP_VCALL(op, PVOP_CALL_ARG1(arg1))
522 #define PVOP_ALT_VCALL1(op, arg1, alt, cond)				\
523 	__PVOP_ALT_VCALL(op, alt, cond, PVOP_CALL_ARG1(arg1))
524 
525 #define PVOP_CALLEE1(rettype, op, arg1)					\
526 	__PVOP_CALLEESAVE(rettype, op, PVOP_CALL_ARG1(arg1))
527 #define PVOP_VCALLEE1(op, arg1)						\
528 	__PVOP_VCALLEESAVE(op, PVOP_CALL_ARG1(arg1))
529 #define PVOP_ALT_CALLEE1(rettype, op, arg1, alt, cond)			\
530 	__PVOP_ALT_CALLEESAVE(rettype, op, alt, cond, PVOP_CALL_ARG1(arg1))
531 #define PVOP_ALT_VCALLEE1(op, arg1, alt, cond)				\
532 	__PVOP_ALT_VCALLEESAVE(op, alt, cond, PVOP_CALL_ARG1(arg1))
533 
534 
535 #define PVOP_CALL2(rettype, op, arg1, arg2)				\
536 	__PVOP_CALL(rettype, op, PVOP_CALL_ARG1(arg1), PVOP_CALL_ARG2(arg2))
537 #define PVOP_VCALL2(op, arg1, arg2)					\
538 	__PVOP_VCALL(op, PVOP_CALL_ARG1(arg1), PVOP_CALL_ARG2(arg2))
539 
540 #define PVOP_CALL3(rettype, op, arg1, arg2, arg3)			\
541 	__PVOP_CALL(rettype, op, PVOP_CALL_ARG1(arg1),			\
542 		    PVOP_CALL_ARG2(arg2), PVOP_CALL_ARG3(arg3))
543 #define PVOP_VCALL3(op, arg1, arg2, arg3)				\
544 	__PVOP_VCALL(op, PVOP_CALL_ARG1(arg1),				\
545 		     PVOP_CALL_ARG2(arg2), PVOP_CALL_ARG3(arg3))
546 
547 #define PVOP_CALL4(rettype, op, arg1, arg2, arg3, arg4)			\
548 	__PVOP_CALL(rettype, op,					\
549 		    PVOP_CALL_ARG1(arg1), PVOP_CALL_ARG2(arg2),		\
550 		    PVOP_CALL_ARG3(arg3), PVOP_CALL_ARG4(arg4))
551 #define PVOP_VCALL4(op, arg1, arg2, arg3, arg4)				\
552 	__PVOP_VCALL(op, PVOP_CALL_ARG1(arg1), PVOP_CALL_ARG2(arg2),	\
553 		     PVOP_CALL_ARG3(arg3), PVOP_CALL_ARG4(arg4))
554 
555 enum paravirt_lazy_mode paravirt_get_lazy_mode(void);
556 void paravirt_start_context_switch(struct task_struct *prev);
557 void paravirt_end_context_switch(struct task_struct *next);
558 
559 void paravirt_enter_lazy_mmu(void);
560 void paravirt_leave_lazy_mmu(void);
561 void paravirt_flush_lazy_mmu(void);
562 
563 void _paravirt_nop(void);
564 void paravirt_BUG(void);
565 u64 _paravirt_ident_64(u64);
566 unsigned long paravirt_ret0(void);
567 
568 #define paravirt_nop	((void *)_paravirt_nop)
569 
570 extern struct paravirt_patch_site __parainstructions[],
571 	__parainstructions_end[];
572 
573 #endif	/* __ASSEMBLY__ */
574 #endif  /* CONFIG_PARAVIRT */
575 #endif	/* _ASM_X86_PARAVIRT_TYPES_H */
576