1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef _ASM_X86_PARAVIRT_TYPES_H 3 #define _ASM_X86_PARAVIRT_TYPES_H 4 5 #ifndef __ASSEMBLY__ 6 /* These all sit in the .parainstructions section to tell us what to patch. */ 7 struct paravirt_patch_site { 8 u8 *instr; /* original instructions */ 9 u8 type; /* type of this instruction */ 10 u8 len; /* length of original instruction */ 11 }; 12 13 /* Lazy mode for batching updates / context switch */ 14 enum paravirt_lazy_mode { 15 PARAVIRT_LAZY_NONE, 16 PARAVIRT_LAZY_MMU, 17 PARAVIRT_LAZY_CPU, 18 }; 19 #endif 20 21 #ifdef CONFIG_PARAVIRT 22 23 #ifndef __ASSEMBLY__ 24 25 #include <asm/desc_defs.h> 26 #include <asm/pgtable_types.h> 27 #include <asm/nospec-branch.h> 28 29 struct page; 30 struct thread_struct; 31 struct desc_ptr; 32 struct tss_struct; 33 struct mm_struct; 34 struct desc_struct; 35 struct task_struct; 36 struct cpumask; 37 struct flush_tlb_info; 38 struct mmu_gather; 39 struct vm_area_struct; 40 41 /* 42 * Wrapper type for pointers to code which uses the non-standard 43 * calling convention. See PV_CALL_SAVE_REGS_THUNK below. 44 */ 45 struct paravirt_callee_save { 46 void *func; 47 }; 48 49 /* general info */ 50 struct pv_info { 51 #ifdef CONFIG_PARAVIRT_XXL 52 u16 extra_user_64bit_cs; /* __USER_CS if none */ 53 #endif 54 55 const char *name; 56 }; 57 58 #ifdef CONFIG_PARAVIRT_XXL 59 struct pv_lazy_ops { 60 /* Set deferred update mode, used for batching operations. */ 61 void (*enter)(void); 62 void (*leave)(void); 63 void (*flush)(void); 64 } __no_randomize_layout; 65 #endif 66 67 struct pv_cpu_ops { 68 /* hooks for various privileged instructions */ 69 void (*io_delay)(void); 70 71 #ifdef CONFIG_PARAVIRT_XXL 72 unsigned long (*get_debugreg)(int regno); 73 void (*set_debugreg)(int regno, unsigned long value); 74 75 unsigned long (*read_cr0)(void); 76 void (*write_cr0)(unsigned long); 77 78 void (*write_cr4)(unsigned long); 79 80 /* Segment descriptor handling */ 81 void (*load_tr_desc)(void); 82 void (*load_gdt)(const struct desc_ptr *); 83 void (*load_idt)(const struct desc_ptr *); 84 void (*set_ldt)(const void *desc, unsigned entries); 85 unsigned long (*store_tr)(void); 86 void (*load_tls)(struct thread_struct *t, unsigned int cpu); 87 void (*load_gs_index)(unsigned int idx); 88 void (*write_ldt_entry)(struct desc_struct *ldt, int entrynum, 89 const void *desc); 90 void (*write_gdt_entry)(struct desc_struct *, 91 int entrynum, const void *desc, int size); 92 void (*write_idt_entry)(gate_desc *, 93 int entrynum, const gate_desc *gate); 94 void (*alloc_ldt)(struct desc_struct *ldt, unsigned entries); 95 void (*free_ldt)(struct desc_struct *ldt, unsigned entries); 96 97 void (*load_sp0)(unsigned long sp0); 98 99 #ifdef CONFIG_X86_IOPL_IOPERM 100 void (*invalidate_io_bitmap)(void); 101 void (*update_io_bitmap)(void); 102 #endif 103 104 void (*wbinvd)(void); 105 106 /* cpuid emulation, mostly so that caps bits can be disabled */ 107 void (*cpuid)(unsigned int *eax, unsigned int *ebx, 108 unsigned int *ecx, unsigned int *edx); 109 110 /* Unsafe MSR operations. These will warn or panic on failure. */ 111 u64 (*read_msr)(unsigned int msr); 112 void (*write_msr)(unsigned int msr, unsigned low, unsigned high); 113 114 /* 115 * Safe MSR operations. 116 * read sets err to 0 or -EIO. write returns 0 or -EIO. 117 */ 118 u64 (*read_msr_safe)(unsigned int msr, int *err); 119 int (*write_msr_safe)(unsigned int msr, unsigned low, unsigned high); 120 121 u64 (*read_pmc)(int counter); 122 123 void (*start_context_switch)(struct task_struct *prev); 124 void (*end_context_switch)(struct task_struct *next); 125 #endif 126 } __no_randomize_layout; 127 128 struct pv_irq_ops { 129 #ifdef CONFIG_PARAVIRT_XXL 130 /* 131 * Get/set interrupt state. save_fl is expected to use X86_EFLAGS_IF; 132 * all other bits returned from save_fl are undefined. 133 * 134 * NOTE: These functions callers expect the callee to preserve 135 * more registers than the standard C calling convention. 136 */ 137 struct paravirt_callee_save save_fl; 138 struct paravirt_callee_save irq_disable; 139 struct paravirt_callee_save irq_enable; 140 141 void (*safe_halt)(void); 142 void (*halt)(void); 143 #endif 144 } __no_randomize_layout; 145 146 struct pv_mmu_ops { 147 /* TLB operations */ 148 void (*flush_tlb_user)(void); 149 void (*flush_tlb_kernel)(void); 150 void (*flush_tlb_one_user)(unsigned long addr); 151 void (*flush_tlb_multi)(const struct cpumask *cpus, 152 const struct flush_tlb_info *info); 153 154 void (*tlb_remove_table)(struct mmu_gather *tlb, void *table); 155 156 /* Hook for intercepting the destruction of an mm_struct. */ 157 void (*exit_mmap)(struct mm_struct *mm); 158 void (*notify_page_enc_status_changed)(unsigned long pfn, int npages, bool enc); 159 160 #ifdef CONFIG_PARAVIRT_XXL 161 struct paravirt_callee_save read_cr2; 162 void (*write_cr2)(unsigned long); 163 164 unsigned long (*read_cr3)(void); 165 void (*write_cr3)(unsigned long); 166 167 /* Hook for intercepting the creation/use of an mm_struct. */ 168 void (*enter_mmap)(struct mm_struct *mm); 169 170 /* Hooks for allocating and freeing a pagetable top-level */ 171 int (*pgd_alloc)(struct mm_struct *mm); 172 void (*pgd_free)(struct mm_struct *mm, pgd_t *pgd); 173 174 /* 175 * Hooks for allocating/releasing pagetable pages when they're 176 * attached to a pagetable 177 */ 178 void (*alloc_pte)(struct mm_struct *mm, unsigned long pfn); 179 void (*alloc_pmd)(struct mm_struct *mm, unsigned long pfn); 180 void (*alloc_pud)(struct mm_struct *mm, unsigned long pfn); 181 void (*alloc_p4d)(struct mm_struct *mm, unsigned long pfn); 182 void (*release_pte)(unsigned long pfn); 183 void (*release_pmd)(unsigned long pfn); 184 void (*release_pud)(unsigned long pfn); 185 void (*release_p4d)(unsigned long pfn); 186 187 /* Pagetable manipulation functions */ 188 void (*set_pte)(pte_t *ptep, pte_t pteval); 189 void (*set_pmd)(pmd_t *pmdp, pmd_t pmdval); 190 191 pte_t (*ptep_modify_prot_start)(struct vm_area_struct *vma, unsigned long addr, 192 pte_t *ptep); 193 void (*ptep_modify_prot_commit)(struct vm_area_struct *vma, unsigned long addr, 194 pte_t *ptep, pte_t pte); 195 196 struct paravirt_callee_save pte_val; 197 struct paravirt_callee_save make_pte; 198 199 struct paravirt_callee_save pgd_val; 200 struct paravirt_callee_save make_pgd; 201 202 void (*set_pud)(pud_t *pudp, pud_t pudval); 203 204 struct paravirt_callee_save pmd_val; 205 struct paravirt_callee_save make_pmd; 206 207 struct paravirt_callee_save pud_val; 208 struct paravirt_callee_save make_pud; 209 210 void (*set_p4d)(p4d_t *p4dp, p4d_t p4dval); 211 212 #if CONFIG_PGTABLE_LEVELS >= 5 213 struct paravirt_callee_save p4d_val; 214 struct paravirt_callee_save make_p4d; 215 216 void (*set_pgd)(pgd_t *pgdp, pgd_t pgdval); 217 #endif /* CONFIG_PGTABLE_LEVELS >= 5 */ 218 219 struct pv_lazy_ops lazy_mode; 220 221 /* dom0 ops */ 222 223 /* Sometimes the physical address is a pfn, and sometimes its 224 an mfn. We can tell which is which from the index. */ 225 void (*set_fixmap)(unsigned /* enum fixed_addresses */ idx, 226 phys_addr_t phys, pgprot_t flags); 227 #endif 228 } __no_randomize_layout; 229 230 struct arch_spinlock; 231 #ifdef CONFIG_SMP 232 #include <asm/spinlock_types.h> 233 #endif 234 235 struct qspinlock; 236 237 struct pv_lock_ops { 238 void (*queued_spin_lock_slowpath)(struct qspinlock *lock, u32 val); 239 struct paravirt_callee_save queued_spin_unlock; 240 241 void (*wait)(u8 *ptr, u8 val); 242 void (*kick)(int cpu); 243 244 struct paravirt_callee_save vcpu_is_preempted; 245 } __no_randomize_layout; 246 247 /* This contains all the paravirt structures: we get a convenient 248 * number for each function using the offset which we use to indicate 249 * what to patch. */ 250 struct paravirt_patch_template { 251 struct pv_cpu_ops cpu; 252 struct pv_irq_ops irq; 253 struct pv_mmu_ops mmu; 254 struct pv_lock_ops lock; 255 } __no_randomize_layout; 256 257 extern struct pv_info pv_info; 258 extern struct paravirt_patch_template pv_ops; 259 260 #define PARAVIRT_PATCH(x) \ 261 (offsetof(struct paravirt_patch_template, x) / sizeof(void *)) 262 263 #define paravirt_type(op) \ 264 [paravirt_typenum] "i" (PARAVIRT_PATCH(op)), \ 265 [paravirt_opptr] "m" (pv_ops.op) 266 /* 267 * Generate some code, and mark it as patchable by the 268 * apply_paravirt() alternate instruction patcher. 269 */ 270 #define _paravirt_alt(insn_string, type) \ 271 "771:\n\t" insn_string "\n" "772:\n" \ 272 ".pushsection .parainstructions,\"a\"\n" \ 273 _ASM_ALIGN "\n" \ 274 _ASM_PTR " 771b\n" \ 275 " .byte " type "\n" \ 276 " .byte 772b-771b\n" \ 277 _ASM_ALIGN "\n" \ 278 ".popsection\n" 279 280 /* Generate patchable code, with the default asm parameters. */ 281 #define paravirt_alt(insn_string) \ 282 _paravirt_alt(insn_string, "%c[paravirt_typenum]") 283 284 /* Simple instruction patching code. */ 285 #define NATIVE_LABEL(a,x,b) "\n\t.globl " a #x "_" #b "\n" a #x "_" #b ":\n\t" 286 287 unsigned int paravirt_patch(u8 type, void *insn_buff, unsigned long addr, unsigned int len); 288 289 int paravirt_disable_iospace(void); 290 291 /* 292 * This generates an indirect call based on the operation type number. 293 * The type number, computed in PARAVIRT_PATCH, is derived from the 294 * offset into the paravirt_patch_template structure, and can therefore be 295 * freely converted back into a structure offset. 296 */ 297 #define PARAVIRT_CALL \ 298 ANNOTATE_RETPOLINE_SAFE \ 299 "call *%[paravirt_opptr];" 300 301 /* 302 * These macros are intended to wrap calls through one of the paravirt 303 * ops structs, so that they can be later identified and patched at 304 * runtime. 305 * 306 * Normally, a call to a pv_op function is a simple indirect call: 307 * (pv_op_struct.operations)(args...). 308 * 309 * Unfortunately, this is a relatively slow operation for modern CPUs, 310 * because it cannot necessarily determine what the destination 311 * address is. In this case, the address is a runtime constant, so at 312 * the very least we can patch the call to a simple direct call, or, 313 * ideally, patch an inline implementation into the callsite. (Direct 314 * calls are essentially free, because the call and return addresses 315 * are completely predictable.) 316 * 317 * For i386, these macros rely on the standard gcc "regparm(3)" calling 318 * convention, in which the first three arguments are placed in %eax, 319 * %edx, %ecx (in that order), and the remaining arguments are placed 320 * on the stack. All caller-save registers (eax,edx,ecx) are expected 321 * to be modified (either clobbered or used for return values). 322 * X86_64, on the other hand, already specifies a register-based calling 323 * conventions, returning at %rax, with parameters going in %rdi, %rsi, 324 * %rdx, and %rcx. Note that for this reason, x86_64 does not need any 325 * special handling for dealing with 4 arguments, unlike i386. 326 * However, x86_64 also has to clobber all caller saved registers, which 327 * unfortunately, are quite a bit (r8 - r11) 328 * 329 * The call instruction itself is marked by placing its start address 330 * and size into the .parainstructions section, so that 331 * apply_paravirt() in arch/i386/kernel/alternative.c can do the 332 * appropriate patching under the control of the backend pv_init_ops 333 * implementation. 334 * 335 * Unfortunately there's no way to get gcc to generate the args setup 336 * for the call, and then allow the call itself to be generated by an 337 * inline asm. Because of this, we must do the complete arg setup and 338 * return value handling from within these macros. This is fairly 339 * cumbersome. 340 * 341 * There are 5 sets of PVOP_* macros for dealing with 0-4 arguments. 342 * It could be extended to more arguments, but there would be little 343 * to be gained from that. For each number of arguments, there are 344 * two VCALL and CALL variants for void and non-void functions. 345 * 346 * When there is a return value, the invoker of the macro must specify 347 * the return type. The macro then uses sizeof() on that type to 348 * determine whether it's a 32 or 64 bit value and places the return 349 * in the right register(s) (just %eax for 32-bit, and %edx:%eax for 350 * 64-bit). For x86_64 machines, it just returns in %rax regardless of 351 * the return value size. 352 * 353 * 64-bit arguments are passed as a pair of adjacent 32-bit arguments; 354 * i386 also passes 64-bit arguments as a pair of adjacent 32-bit arguments 355 * in low,high order 356 * 357 * Small structures are passed and returned in registers. The macro 358 * calling convention can't directly deal with this, so the wrapper 359 * functions must do it. 360 * 361 * These PVOP_* macros are only defined within this header. This 362 * means that all uses must be wrapped in inline functions. This also 363 * makes sure the incoming and outgoing types are always correct. 364 */ 365 #ifdef CONFIG_X86_32 366 #define PVOP_CALL_ARGS \ 367 unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; 368 369 #define PVOP_CALL_ARG1(x) "a" ((unsigned long)(x)) 370 #define PVOP_CALL_ARG2(x) "d" ((unsigned long)(x)) 371 #define PVOP_CALL_ARG3(x) "c" ((unsigned long)(x)) 372 373 #define PVOP_VCALL_CLOBBERS "=a" (__eax), "=d" (__edx), \ 374 "=c" (__ecx) 375 #define PVOP_CALL_CLOBBERS PVOP_VCALL_CLOBBERS 376 377 #define PVOP_VCALLEE_CLOBBERS "=a" (__eax), "=d" (__edx) 378 #define PVOP_CALLEE_CLOBBERS PVOP_VCALLEE_CLOBBERS 379 380 #define EXTRA_CLOBBERS 381 #define VEXTRA_CLOBBERS 382 #else /* CONFIG_X86_64 */ 383 /* [re]ax isn't an arg, but the return val */ 384 #define PVOP_CALL_ARGS \ 385 unsigned long __edi = __edi, __esi = __esi, \ 386 __edx = __edx, __ecx = __ecx, __eax = __eax; 387 388 #define PVOP_CALL_ARG1(x) "D" ((unsigned long)(x)) 389 #define PVOP_CALL_ARG2(x) "S" ((unsigned long)(x)) 390 #define PVOP_CALL_ARG3(x) "d" ((unsigned long)(x)) 391 #define PVOP_CALL_ARG4(x) "c" ((unsigned long)(x)) 392 393 #define PVOP_VCALL_CLOBBERS "=D" (__edi), \ 394 "=S" (__esi), "=d" (__edx), \ 395 "=c" (__ecx) 396 #define PVOP_CALL_CLOBBERS PVOP_VCALL_CLOBBERS, "=a" (__eax) 397 398 /* 399 * void functions are still allowed [re]ax for scratch. 400 * 401 * The ZERO_CALL_USED REGS feature may end up zeroing out callee-saved 402 * registers. Make sure we model this with the appropriate clobbers. 403 */ 404 #ifdef CONFIG_ZERO_CALL_USED_REGS 405 #define PVOP_VCALLEE_CLOBBERS "=a" (__eax), PVOP_VCALL_CLOBBERS 406 #else 407 #define PVOP_VCALLEE_CLOBBERS "=a" (__eax) 408 #endif 409 #define PVOP_CALLEE_CLOBBERS PVOP_VCALLEE_CLOBBERS 410 411 #define EXTRA_CLOBBERS , "r8", "r9", "r10", "r11" 412 #define VEXTRA_CLOBBERS , "rax", "r8", "r9", "r10", "r11" 413 #endif /* CONFIG_X86_32 */ 414 415 #ifdef CONFIG_PARAVIRT_DEBUG 416 #define PVOP_TEST_NULL(op) BUG_ON(pv_ops.op == NULL) 417 #else 418 #define PVOP_TEST_NULL(op) ((void)pv_ops.op) 419 #endif 420 421 #define PVOP_RETVAL(rettype) \ 422 ({ unsigned long __mask = ~0UL; \ 423 BUILD_BUG_ON(sizeof(rettype) > sizeof(unsigned long)); \ 424 switch (sizeof(rettype)) { \ 425 case 1: __mask = 0xffUL; break; \ 426 case 2: __mask = 0xffffUL; break; \ 427 case 4: __mask = 0xffffffffUL; break; \ 428 default: break; \ 429 } \ 430 __mask & __eax; \ 431 }) 432 433 434 #define ____PVOP_CALL(ret, op, call_clbr, extra_clbr, ...) \ 435 ({ \ 436 PVOP_CALL_ARGS; \ 437 PVOP_TEST_NULL(op); \ 438 asm volatile(paravirt_alt(PARAVIRT_CALL) \ 439 : call_clbr, ASM_CALL_CONSTRAINT \ 440 : paravirt_type(op), \ 441 ##__VA_ARGS__ \ 442 : "memory", "cc" extra_clbr); \ 443 ret; \ 444 }) 445 446 #define ____PVOP_ALT_CALL(ret, op, alt, cond, call_clbr, \ 447 extra_clbr, ...) \ 448 ({ \ 449 PVOP_CALL_ARGS; \ 450 PVOP_TEST_NULL(op); \ 451 asm volatile(ALTERNATIVE(paravirt_alt(PARAVIRT_CALL), \ 452 alt, cond) \ 453 : call_clbr, ASM_CALL_CONSTRAINT \ 454 : paravirt_type(op), \ 455 ##__VA_ARGS__ \ 456 : "memory", "cc" extra_clbr); \ 457 ret; \ 458 }) 459 460 #define __PVOP_CALL(rettype, op, ...) \ 461 ____PVOP_CALL(PVOP_RETVAL(rettype), op, \ 462 PVOP_CALL_CLOBBERS, EXTRA_CLOBBERS, ##__VA_ARGS__) 463 464 #define __PVOP_ALT_CALL(rettype, op, alt, cond, ...) \ 465 ____PVOP_ALT_CALL(PVOP_RETVAL(rettype), op, alt, cond, \ 466 PVOP_CALL_CLOBBERS, EXTRA_CLOBBERS, \ 467 ##__VA_ARGS__) 468 469 #define __PVOP_CALLEESAVE(rettype, op, ...) \ 470 ____PVOP_CALL(PVOP_RETVAL(rettype), op.func, \ 471 PVOP_CALLEE_CLOBBERS, , ##__VA_ARGS__) 472 473 #define __PVOP_ALT_CALLEESAVE(rettype, op, alt, cond, ...) \ 474 ____PVOP_ALT_CALL(PVOP_RETVAL(rettype), op.func, alt, cond, \ 475 PVOP_CALLEE_CLOBBERS, , ##__VA_ARGS__) 476 477 478 #define __PVOP_VCALL(op, ...) \ 479 (void)____PVOP_CALL(, op, PVOP_VCALL_CLOBBERS, \ 480 VEXTRA_CLOBBERS, ##__VA_ARGS__) 481 482 #define __PVOP_ALT_VCALL(op, alt, cond, ...) \ 483 (void)____PVOP_ALT_CALL(, op, alt, cond, \ 484 PVOP_VCALL_CLOBBERS, VEXTRA_CLOBBERS, \ 485 ##__VA_ARGS__) 486 487 #define __PVOP_VCALLEESAVE(op, ...) \ 488 (void)____PVOP_CALL(, op.func, \ 489 PVOP_VCALLEE_CLOBBERS, , ##__VA_ARGS__) 490 491 #define __PVOP_ALT_VCALLEESAVE(op, alt, cond, ...) \ 492 (void)____PVOP_ALT_CALL(, op.func, alt, cond, \ 493 PVOP_VCALLEE_CLOBBERS, , ##__VA_ARGS__) 494 495 496 #define PVOP_CALL0(rettype, op) \ 497 __PVOP_CALL(rettype, op) 498 #define PVOP_VCALL0(op) \ 499 __PVOP_VCALL(op) 500 #define PVOP_ALT_CALL0(rettype, op, alt, cond) \ 501 __PVOP_ALT_CALL(rettype, op, alt, cond) 502 #define PVOP_ALT_VCALL0(op, alt, cond) \ 503 __PVOP_ALT_VCALL(op, alt, cond) 504 505 #define PVOP_CALLEE0(rettype, op) \ 506 __PVOP_CALLEESAVE(rettype, op) 507 #define PVOP_VCALLEE0(op) \ 508 __PVOP_VCALLEESAVE(op) 509 #define PVOP_ALT_CALLEE0(rettype, op, alt, cond) \ 510 __PVOP_ALT_CALLEESAVE(rettype, op, alt, cond) 511 #define PVOP_ALT_VCALLEE0(op, alt, cond) \ 512 __PVOP_ALT_VCALLEESAVE(op, alt, cond) 513 514 515 #define PVOP_CALL1(rettype, op, arg1) \ 516 __PVOP_CALL(rettype, op, PVOP_CALL_ARG1(arg1)) 517 #define PVOP_VCALL1(op, arg1) \ 518 __PVOP_VCALL(op, PVOP_CALL_ARG1(arg1)) 519 #define PVOP_ALT_VCALL1(op, arg1, alt, cond) \ 520 __PVOP_ALT_VCALL(op, alt, cond, PVOP_CALL_ARG1(arg1)) 521 522 #define PVOP_CALLEE1(rettype, op, arg1) \ 523 __PVOP_CALLEESAVE(rettype, op, PVOP_CALL_ARG1(arg1)) 524 #define PVOP_VCALLEE1(op, arg1) \ 525 __PVOP_VCALLEESAVE(op, PVOP_CALL_ARG1(arg1)) 526 #define PVOP_ALT_CALLEE1(rettype, op, arg1, alt, cond) \ 527 __PVOP_ALT_CALLEESAVE(rettype, op, alt, cond, PVOP_CALL_ARG1(arg1)) 528 #define PVOP_ALT_VCALLEE1(op, arg1, alt, cond) \ 529 __PVOP_ALT_VCALLEESAVE(op, alt, cond, PVOP_CALL_ARG1(arg1)) 530 531 532 #define PVOP_CALL2(rettype, op, arg1, arg2) \ 533 __PVOP_CALL(rettype, op, PVOP_CALL_ARG1(arg1), PVOP_CALL_ARG2(arg2)) 534 #define PVOP_VCALL2(op, arg1, arg2) \ 535 __PVOP_VCALL(op, PVOP_CALL_ARG1(arg1), PVOP_CALL_ARG2(arg2)) 536 537 #define PVOP_CALL3(rettype, op, arg1, arg2, arg3) \ 538 __PVOP_CALL(rettype, op, PVOP_CALL_ARG1(arg1), \ 539 PVOP_CALL_ARG2(arg2), PVOP_CALL_ARG3(arg3)) 540 #define PVOP_VCALL3(op, arg1, arg2, arg3) \ 541 __PVOP_VCALL(op, PVOP_CALL_ARG1(arg1), \ 542 PVOP_CALL_ARG2(arg2), PVOP_CALL_ARG3(arg3)) 543 544 #define PVOP_CALL4(rettype, op, arg1, arg2, arg3, arg4) \ 545 __PVOP_CALL(rettype, op, \ 546 PVOP_CALL_ARG1(arg1), PVOP_CALL_ARG2(arg2), \ 547 PVOP_CALL_ARG3(arg3), PVOP_CALL_ARG4(arg4)) 548 #define PVOP_VCALL4(op, arg1, arg2, arg3, arg4) \ 549 __PVOP_VCALL(op, PVOP_CALL_ARG1(arg1), PVOP_CALL_ARG2(arg2), \ 550 PVOP_CALL_ARG3(arg3), PVOP_CALL_ARG4(arg4)) 551 552 enum paravirt_lazy_mode paravirt_get_lazy_mode(void); 553 void paravirt_start_context_switch(struct task_struct *prev); 554 void paravirt_end_context_switch(struct task_struct *next); 555 556 void paravirt_enter_lazy_mmu(void); 557 void paravirt_leave_lazy_mmu(void); 558 void paravirt_flush_lazy_mmu(void); 559 560 void _paravirt_nop(void); 561 void paravirt_BUG(void); 562 unsigned long paravirt_ret0(void); 563 #ifdef CONFIG_PARAVIRT_XXL 564 u64 _paravirt_ident_64(u64); 565 unsigned long pv_native_save_fl(void); 566 void pv_native_irq_disable(void); 567 void pv_native_irq_enable(void); 568 unsigned long pv_native_read_cr2(void); 569 #endif 570 571 #define paravirt_nop ((void *)_paravirt_nop) 572 573 extern struct paravirt_patch_site __parainstructions[], 574 __parainstructions_end[]; 575 576 #endif /* __ASSEMBLY__ */ 577 #endif /* CONFIG_PARAVIRT */ 578 #endif /* _ASM_X86_PARAVIRT_TYPES_H */ 579