1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef _ASM_X86_PARAVIRT_TYPES_H 3 #define _ASM_X86_PARAVIRT_TYPES_H 4 5 /* Bitmask of what can be clobbered: usually at least eax. */ 6 #define CLBR_NONE 0 7 #define CLBR_EAX (1 << 0) 8 #define CLBR_ECX (1 << 1) 9 #define CLBR_EDX (1 << 2) 10 #define CLBR_EDI (1 << 3) 11 12 #ifdef CONFIG_X86_32 13 /* CLBR_ANY should match all regs platform has. For i386, that's just it */ 14 #define CLBR_ANY ((1 << 4) - 1) 15 16 #define CLBR_ARG_REGS (CLBR_EAX | CLBR_EDX | CLBR_ECX) 17 #define CLBR_RET_REG (CLBR_EAX | CLBR_EDX) 18 #define CLBR_SCRATCH (0) 19 #else 20 #define CLBR_RAX CLBR_EAX 21 #define CLBR_RCX CLBR_ECX 22 #define CLBR_RDX CLBR_EDX 23 #define CLBR_RDI CLBR_EDI 24 #define CLBR_RSI (1 << 4) 25 #define CLBR_R8 (1 << 5) 26 #define CLBR_R9 (1 << 6) 27 #define CLBR_R10 (1 << 7) 28 #define CLBR_R11 (1 << 8) 29 30 #define CLBR_ANY ((1 << 9) - 1) 31 32 #define CLBR_ARG_REGS (CLBR_RDI | CLBR_RSI | CLBR_RDX | \ 33 CLBR_RCX | CLBR_R8 | CLBR_R9) 34 #define CLBR_RET_REG (CLBR_RAX) 35 #define CLBR_SCRATCH (CLBR_R10 | CLBR_R11) 36 37 #endif /* X86_64 */ 38 39 #define CLBR_CALLEE_SAVE ((CLBR_ARG_REGS | CLBR_SCRATCH) & ~CLBR_RET_REG) 40 41 #ifndef __ASSEMBLY__ 42 43 #include <asm/desc_defs.h> 44 #include <asm/kmap_types.h> 45 #include <asm/pgtable_types.h> 46 #include <asm/nospec-branch.h> 47 48 struct page; 49 struct thread_struct; 50 struct desc_ptr; 51 struct tss_struct; 52 struct mm_struct; 53 struct desc_struct; 54 struct task_struct; 55 struct cpumask; 56 struct flush_tlb_info; 57 struct mmu_gather; 58 struct vm_area_struct; 59 60 /* 61 * Wrapper type for pointers to code which uses the non-standard 62 * calling convention. See PV_CALL_SAVE_REGS_THUNK below. 63 */ 64 struct paravirt_callee_save { 65 void *func; 66 }; 67 68 /* general info */ 69 struct pv_info { 70 #ifdef CONFIG_PARAVIRT_XXL 71 unsigned int kernel_rpl; 72 int shared_kernel_pmd; 73 74 #ifdef CONFIG_X86_64 75 u16 extra_user_64bit_cs; /* __USER_CS if none */ 76 #endif 77 #endif 78 79 const char *name; 80 }; 81 82 struct pv_init_ops { 83 /* 84 * Patch may replace one of the defined code sequences with 85 * arbitrary code, subject to the same register constraints. 86 * This generally means the code is not free to clobber any 87 * registers other than EAX. The patch function should return 88 * the number of bytes of code generated, as we nop pad the 89 * rest in generic code. 90 */ 91 unsigned (*patch)(u8 type, void *insn_buff, 92 unsigned long addr, unsigned len); 93 } __no_randomize_layout; 94 95 #ifdef CONFIG_PARAVIRT_XXL 96 struct pv_lazy_ops { 97 /* Set deferred update mode, used for batching operations. */ 98 void (*enter)(void); 99 void (*leave)(void); 100 void (*flush)(void); 101 } __no_randomize_layout; 102 #endif 103 104 struct pv_time_ops { 105 unsigned long long (*sched_clock)(void); 106 unsigned long long (*steal_clock)(int cpu); 107 } __no_randomize_layout; 108 109 struct pv_cpu_ops { 110 /* hooks for various privileged instructions */ 111 void (*io_delay)(void); 112 113 #ifdef CONFIG_PARAVIRT_XXL 114 unsigned long (*get_debugreg)(int regno); 115 void (*set_debugreg)(int regno, unsigned long value); 116 117 unsigned long (*read_cr0)(void); 118 void (*write_cr0)(unsigned long); 119 120 void (*write_cr4)(unsigned long); 121 122 /* Segment descriptor handling */ 123 void (*load_tr_desc)(void); 124 void (*load_gdt)(const struct desc_ptr *); 125 void (*load_idt)(const struct desc_ptr *); 126 void (*set_ldt)(const void *desc, unsigned entries); 127 unsigned long (*store_tr)(void); 128 void (*load_tls)(struct thread_struct *t, unsigned int cpu); 129 #ifdef CONFIG_X86_64 130 void (*load_gs_index)(unsigned int idx); 131 #endif 132 void (*write_ldt_entry)(struct desc_struct *ldt, int entrynum, 133 const void *desc); 134 void (*write_gdt_entry)(struct desc_struct *, 135 int entrynum, const void *desc, int size); 136 void (*write_idt_entry)(gate_desc *, 137 int entrynum, const gate_desc *gate); 138 void (*alloc_ldt)(struct desc_struct *ldt, unsigned entries); 139 void (*free_ldt)(struct desc_struct *ldt, unsigned entries); 140 141 void (*load_sp0)(unsigned long sp0); 142 143 #ifdef CONFIG_X86_IOPL_IOPERM 144 void (*invalidate_io_bitmap)(void); 145 void (*update_io_bitmap)(void); 146 #endif 147 148 void (*wbinvd)(void); 149 150 /* cpuid emulation, mostly so that caps bits can be disabled */ 151 void (*cpuid)(unsigned int *eax, unsigned int *ebx, 152 unsigned int *ecx, unsigned int *edx); 153 154 /* Unsafe MSR operations. These will warn or panic on failure. */ 155 u64 (*read_msr)(unsigned int msr); 156 void (*write_msr)(unsigned int msr, unsigned low, unsigned high); 157 158 /* 159 * Safe MSR operations. 160 * read sets err to 0 or -EIO. write returns 0 or -EIO. 161 */ 162 u64 (*read_msr_safe)(unsigned int msr, int *err); 163 int (*write_msr_safe)(unsigned int msr, unsigned low, unsigned high); 164 165 u64 (*read_pmc)(int counter); 166 167 /* 168 * Switch to usermode gs and return to 64-bit usermode using 169 * sysret. Only used in 64-bit kernels to return to 64-bit 170 * processes. Usermode register state, including %rsp, must 171 * already be restored. 172 */ 173 void (*usergs_sysret64)(void); 174 175 /* Normal iret. Jump to this with the standard iret stack 176 frame set up. */ 177 void (*iret)(void); 178 179 void (*swapgs)(void); 180 181 void (*start_context_switch)(struct task_struct *prev); 182 void (*end_context_switch)(struct task_struct *next); 183 #endif 184 } __no_randomize_layout; 185 186 struct pv_irq_ops { 187 #ifdef CONFIG_PARAVIRT_XXL 188 /* 189 * Get/set interrupt state. save_fl and restore_fl are only 190 * expected to use X86_EFLAGS_IF; all other bits 191 * returned from save_fl are undefined, and may be ignored by 192 * restore_fl. 193 * 194 * NOTE: These functions callers expect the callee to preserve 195 * more registers than the standard C calling convention. 196 */ 197 struct paravirt_callee_save save_fl; 198 struct paravirt_callee_save restore_fl; 199 struct paravirt_callee_save irq_disable; 200 struct paravirt_callee_save irq_enable; 201 202 void (*safe_halt)(void); 203 void (*halt)(void); 204 #endif 205 } __no_randomize_layout; 206 207 struct pv_mmu_ops { 208 /* TLB operations */ 209 void (*flush_tlb_user)(void); 210 void (*flush_tlb_kernel)(void); 211 void (*flush_tlb_one_user)(unsigned long addr); 212 void (*flush_tlb_others)(const struct cpumask *cpus, 213 const struct flush_tlb_info *info); 214 215 void (*tlb_remove_table)(struct mmu_gather *tlb, void *table); 216 217 /* Hook for intercepting the destruction of an mm_struct. */ 218 void (*exit_mmap)(struct mm_struct *mm); 219 220 #ifdef CONFIG_PARAVIRT_XXL 221 struct paravirt_callee_save read_cr2; 222 void (*write_cr2)(unsigned long); 223 224 unsigned long (*read_cr3)(void); 225 void (*write_cr3)(unsigned long); 226 227 /* Hooks for intercepting the creation/use of an mm_struct. */ 228 void (*activate_mm)(struct mm_struct *prev, 229 struct mm_struct *next); 230 void (*dup_mmap)(struct mm_struct *oldmm, 231 struct mm_struct *mm); 232 233 /* Hooks for allocating and freeing a pagetable top-level */ 234 int (*pgd_alloc)(struct mm_struct *mm); 235 void (*pgd_free)(struct mm_struct *mm, pgd_t *pgd); 236 237 /* 238 * Hooks for allocating/releasing pagetable pages when they're 239 * attached to a pagetable 240 */ 241 void (*alloc_pte)(struct mm_struct *mm, unsigned long pfn); 242 void (*alloc_pmd)(struct mm_struct *mm, unsigned long pfn); 243 void (*alloc_pud)(struct mm_struct *mm, unsigned long pfn); 244 void (*alloc_p4d)(struct mm_struct *mm, unsigned long pfn); 245 void (*release_pte)(unsigned long pfn); 246 void (*release_pmd)(unsigned long pfn); 247 void (*release_pud)(unsigned long pfn); 248 void (*release_p4d)(unsigned long pfn); 249 250 /* Pagetable manipulation functions */ 251 void (*set_pte)(pte_t *ptep, pte_t pteval); 252 void (*set_pte_at)(struct mm_struct *mm, unsigned long addr, 253 pte_t *ptep, pte_t pteval); 254 void (*set_pmd)(pmd_t *pmdp, pmd_t pmdval); 255 256 pte_t (*ptep_modify_prot_start)(struct vm_area_struct *vma, unsigned long addr, 257 pte_t *ptep); 258 void (*ptep_modify_prot_commit)(struct vm_area_struct *vma, unsigned long addr, 259 pte_t *ptep, pte_t pte); 260 261 struct paravirt_callee_save pte_val; 262 struct paravirt_callee_save make_pte; 263 264 struct paravirt_callee_save pgd_val; 265 struct paravirt_callee_save make_pgd; 266 267 #if CONFIG_PGTABLE_LEVELS >= 3 268 #ifdef CONFIG_X86_PAE 269 void (*set_pte_atomic)(pte_t *ptep, pte_t pteval); 270 void (*pte_clear)(struct mm_struct *mm, unsigned long addr, 271 pte_t *ptep); 272 void (*pmd_clear)(pmd_t *pmdp); 273 274 #endif /* CONFIG_X86_PAE */ 275 276 void (*set_pud)(pud_t *pudp, pud_t pudval); 277 278 struct paravirt_callee_save pmd_val; 279 struct paravirt_callee_save make_pmd; 280 281 #if CONFIG_PGTABLE_LEVELS >= 4 282 struct paravirt_callee_save pud_val; 283 struct paravirt_callee_save make_pud; 284 285 void (*set_p4d)(p4d_t *p4dp, p4d_t p4dval); 286 287 #if CONFIG_PGTABLE_LEVELS >= 5 288 struct paravirt_callee_save p4d_val; 289 struct paravirt_callee_save make_p4d; 290 291 void (*set_pgd)(pgd_t *pgdp, pgd_t pgdval); 292 #endif /* CONFIG_PGTABLE_LEVELS >= 5 */ 293 294 #endif /* CONFIG_PGTABLE_LEVELS >= 4 */ 295 296 #endif /* CONFIG_PGTABLE_LEVELS >= 3 */ 297 298 struct pv_lazy_ops lazy_mode; 299 300 /* dom0 ops */ 301 302 /* Sometimes the physical address is a pfn, and sometimes its 303 an mfn. We can tell which is which from the index. */ 304 void (*set_fixmap)(unsigned /* enum fixed_addresses */ idx, 305 phys_addr_t phys, pgprot_t flags); 306 #endif 307 } __no_randomize_layout; 308 309 struct arch_spinlock; 310 #ifdef CONFIG_SMP 311 #include <asm/spinlock_types.h> 312 #endif 313 314 struct qspinlock; 315 316 struct pv_lock_ops { 317 void (*queued_spin_lock_slowpath)(struct qspinlock *lock, u32 val); 318 struct paravirt_callee_save queued_spin_unlock; 319 320 void (*wait)(u8 *ptr, u8 val); 321 void (*kick)(int cpu); 322 323 struct paravirt_callee_save vcpu_is_preempted; 324 } __no_randomize_layout; 325 326 /* This contains all the paravirt structures: we get a convenient 327 * number for each function using the offset which we use to indicate 328 * what to patch. */ 329 struct paravirt_patch_template { 330 struct pv_init_ops init; 331 struct pv_time_ops time; 332 struct pv_cpu_ops cpu; 333 struct pv_irq_ops irq; 334 struct pv_mmu_ops mmu; 335 struct pv_lock_ops lock; 336 } __no_randomize_layout; 337 338 extern struct pv_info pv_info; 339 extern struct paravirt_patch_template pv_ops; 340 341 #define PARAVIRT_PATCH(x) \ 342 (offsetof(struct paravirt_patch_template, x) / sizeof(void *)) 343 344 #define paravirt_type(op) \ 345 [paravirt_typenum] "i" (PARAVIRT_PATCH(op)), \ 346 [paravirt_opptr] "i" (&(pv_ops.op)) 347 #define paravirt_clobber(clobber) \ 348 [paravirt_clobber] "i" (clobber) 349 350 /* 351 * Generate some code, and mark it as patchable by the 352 * apply_paravirt() alternate instruction patcher. 353 */ 354 #define _paravirt_alt(insn_string, type, clobber) \ 355 "771:\n\t" insn_string "\n" "772:\n" \ 356 ".pushsection .parainstructions,\"a\"\n" \ 357 _ASM_ALIGN "\n" \ 358 _ASM_PTR " 771b\n" \ 359 " .byte " type "\n" \ 360 " .byte 772b-771b\n" \ 361 " .short " clobber "\n" \ 362 ".popsection\n" 363 364 /* Generate patchable code, with the default asm parameters. */ 365 #define paravirt_alt(insn_string) \ 366 _paravirt_alt(insn_string, "%c[paravirt_typenum]", "%c[paravirt_clobber]") 367 368 /* Simple instruction patching code. */ 369 #define NATIVE_LABEL(a,x,b) "\n\t.globl " a #x "_" #b "\n" a #x "_" #b ":\n\t" 370 371 unsigned paravirt_patch_ident_64(void *insn_buff, unsigned len); 372 unsigned paravirt_patch_default(u8 type, void *insn_buff, unsigned long addr, unsigned len); 373 unsigned paravirt_patch_insns(void *insn_buff, unsigned len, const char *start, const char *end); 374 375 unsigned native_patch(u8 type, void *insn_buff, unsigned long addr, unsigned len); 376 377 int paravirt_disable_iospace(void); 378 379 /* 380 * This generates an indirect call based on the operation type number. 381 * The type number, computed in PARAVIRT_PATCH, is derived from the 382 * offset into the paravirt_patch_template structure, and can therefore be 383 * freely converted back into a structure offset. 384 */ 385 #define PARAVIRT_CALL \ 386 ANNOTATE_RETPOLINE_SAFE \ 387 "call *%c[paravirt_opptr];" 388 389 /* 390 * These macros are intended to wrap calls through one of the paravirt 391 * ops structs, so that they can be later identified and patched at 392 * runtime. 393 * 394 * Normally, a call to a pv_op function is a simple indirect call: 395 * (pv_op_struct.operations)(args...). 396 * 397 * Unfortunately, this is a relatively slow operation for modern CPUs, 398 * because it cannot necessarily determine what the destination 399 * address is. In this case, the address is a runtime constant, so at 400 * the very least we can patch the call to e a simple direct call, or 401 * ideally, patch an inline implementation into the callsite. (Direct 402 * calls are essentially free, because the call and return addresses 403 * are completely predictable.) 404 * 405 * For i386, these macros rely on the standard gcc "regparm(3)" calling 406 * convention, in which the first three arguments are placed in %eax, 407 * %edx, %ecx (in that order), and the remaining arguments are placed 408 * on the stack. All caller-save registers (eax,edx,ecx) are expected 409 * to be modified (either clobbered or used for return values). 410 * X86_64, on the other hand, already specifies a register-based calling 411 * conventions, returning at %rax, with parameteres going on %rdi, %rsi, 412 * %rdx, and %rcx. Note that for this reason, x86_64 does not need any 413 * special handling for dealing with 4 arguments, unlike i386. 414 * However, x86_64 also have to clobber all caller saved registers, which 415 * unfortunately, are quite a bit (r8 - r11) 416 * 417 * The call instruction itself is marked by placing its start address 418 * and size into the .parainstructions section, so that 419 * apply_paravirt() in arch/i386/kernel/alternative.c can do the 420 * appropriate patching under the control of the backend pv_init_ops 421 * implementation. 422 * 423 * Unfortunately there's no way to get gcc to generate the args setup 424 * for the call, and then allow the call itself to be generated by an 425 * inline asm. Because of this, we must do the complete arg setup and 426 * return value handling from within these macros. This is fairly 427 * cumbersome. 428 * 429 * There are 5 sets of PVOP_* macros for dealing with 0-4 arguments. 430 * It could be extended to more arguments, but there would be little 431 * to be gained from that. For each number of arguments, there are 432 * the two VCALL and CALL variants for void and non-void functions. 433 * 434 * When there is a return value, the invoker of the macro must specify 435 * the return type. The macro then uses sizeof() on that type to 436 * determine whether its a 32 or 64 bit value, and places the return 437 * in the right register(s) (just %eax for 32-bit, and %edx:%eax for 438 * 64-bit). For x86_64 machines, it just returns at %rax regardless of 439 * the return value size. 440 * 441 * 64-bit arguments are passed as a pair of adjacent 32-bit arguments 442 * i386 also passes 64-bit arguments as a pair of adjacent 32-bit arguments 443 * in low,high order 444 * 445 * Small structures are passed and returned in registers. The macro 446 * calling convention can't directly deal with this, so the wrapper 447 * functions must do this. 448 * 449 * These PVOP_* macros are only defined within this header. This 450 * means that all uses must be wrapped in inline functions. This also 451 * makes sure the incoming and outgoing types are always correct. 452 */ 453 #ifdef CONFIG_X86_32 454 #define PVOP_VCALL_ARGS \ 455 unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; 456 457 #define PVOP_CALL_ARGS PVOP_VCALL_ARGS 458 459 #define PVOP_CALL_ARG1(x) "a" ((unsigned long)(x)) 460 #define PVOP_CALL_ARG2(x) "d" ((unsigned long)(x)) 461 #define PVOP_CALL_ARG3(x) "c" ((unsigned long)(x)) 462 463 #define PVOP_VCALL_CLOBBERS "=a" (__eax), "=d" (__edx), \ 464 "=c" (__ecx) 465 #define PVOP_CALL_CLOBBERS PVOP_VCALL_CLOBBERS 466 467 #define PVOP_VCALLEE_CLOBBERS "=a" (__eax), "=d" (__edx) 468 #define PVOP_CALLEE_CLOBBERS PVOP_VCALLEE_CLOBBERS 469 470 #define EXTRA_CLOBBERS 471 #define VEXTRA_CLOBBERS 472 #else /* CONFIG_X86_64 */ 473 /* [re]ax isn't an arg, but the return val */ 474 #define PVOP_VCALL_ARGS \ 475 unsigned long __edi = __edi, __esi = __esi, \ 476 __edx = __edx, __ecx = __ecx, __eax = __eax; 477 478 #define PVOP_CALL_ARGS PVOP_VCALL_ARGS 479 480 #define PVOP_CALL_ARG1(x) "D" ((unsigned long)(x)) 481 #define PVOP_CALL_ARG2(x) "S" ((unsigned long)(x)) 482 #define PVOP_CALL_ARG3(x) "d" ((unsigned long)(x)) 483 #define PVOP_CALL_ARG4(x) "c" ((unsigned long)(x)) 484 485 #define PVOP_VCALL_CLOBBERS "=D" (__edi), \ 486 "=S" (__esi), "=d" (__edx), \ 487 "=c" (__ecx) 488 #define PVOP_CALL_CLOBBERS PVOP_VCALL_CLOBBERS, "=a" (__eax) 489 490 /* void functions are still allowed [re]ax for scratch */ 491 #define PVOP_VCALLEE_CLOBBERS "=a" (__eax) 492 #define PVOP_CALLEE_CLOBBERS PVOP_VCALLEE_CLOBBERS 493 494 #define EXTRA_CLOBBERS , "r8", "r9", "r10", "r11" 495 #define VEXTRA_CLOBBERS , "rax", "r8", "r9", "r10", "r11" 496 #endif /* CONFIG_X86_32 */ 497 498 #ifdef CONFIG_PARAVIRT_DEBUG 499 #define PVOP_TEST_NULL(op) BUG_ON(pv_ops.op == NULL) 500 #else 501 #define PVOP_TEST_NULL(op) ((void)pv_ops.op) 502 #endif 503 504 #define PVOP_RETMASK(rettype) \ 505 ({ unsigned long __mask = ~0UL; \ 506 switch (sizeof(rettype)) { \ 507 case 1: __mask = 0xffUL; break; \ 508 case 2: __mask = 0xffffUL; break; \ 509 case 4: __mask = 0xffffffffUL; break; \ 510 default: break; \ 511 } \ 512 __mask; \ 513 }) 514 515 516 #define ____PVOP_CALL(rettype, op, clbr, call_clbr, extra_clbr, \ 517 pre, post, ...) \ 518 ({ \ 519 rettype __ret; \ 520 PVOP_CALL_ARGS; \ 521 PVOP_TEST_NULL(op); \ 522 /* This is 32-bit specific, but is okay in 64-bit */ \ 523 /* since this condition will never hold */ \ 524 if (sizeof(rettype) > sizeof(unsigned long)) { \ 525 asm volatile(pre \ 526 paravirt_alt(PARAVIRT_CALL) \ 527 post \ 528 : call_clbr, ASM_CALL_CONSTRAINT \ 529 : paravirt_type(op), \ 530 paravirt_clobber(clbr), \ 531 ##__VA_ARGS__ \ 532 : "memory", "cc" extra_clbr); \ 533 __ret = (rettype)((((u64)__edx) << 32) | __eax); \ 534 } else { \ 535 asm volatile(pre \ 536 paravirt_alt(PARAVIRT_CALL) \ 537 post \ 538 : call_clbr, ASM_CALL_CONSTRAINT \ 539 : paravirt_type(op), \ 540 paravirt_clobber(clbr), \ 541 ##__VA_ARGS__ \ 542 : "memory", "cc" extra_clbr); \ 543 __ret = (rettype)(__eax & PVOP_RETMASK(rettype)); \ 544 } \ 545 __ret; \ 546 }) 547 548 #define __PVOP_CALL(rettype, op, pre, post, ...) \ 549 ____PVOP_CALL(rettype, op, CLBR_ANY, PVOP_CALL_CLOBBERS, \ 550 EXTRA_CLOBBERS, pre, post, ##__VA_ARGS__) 551 552 #define __PVOP_CALLEESAVE(rettype, op, pre, post, ...) \ 553 ____PVOP_CALL(rettype, op.func, CLBR_RET_REG, \ 554 PVOP_CALLEE_CLOBBERS, , \ 555 pre, post, ##__VA_ARGS__) 556 557 558 #define ____PVOP_VCALL(op, clbr, call_clbr, extra_clbr, pre, post, ...) \ 559 ({ \ 560 PVOP_VCALL_ARGS; \ 561 PVOP_TEST_NULL(op); \ 562 asm volatile(pre \ 563 paravirt_alt(PARAVIRT_CALL) \ 564 post \ 565 : call_clbr, ASM_CALL_CONSTRAINT \ 566 : paravirt_type(op), \ 567 paravirt_clobber(clbr), \ 568 ##__VA_ARGS__ \ 569 : "memory", "cc" extra_clbr); \ 570 }) 571 572 #define __PVOP_VCALL(op, pre, post, ...) \ 573 ____PVOP_VCALL(op, CLBR_ANY, PVOP_VCALL_CLOBBERS, \ 574 VEXTRA_CLOBBERS, \ 575 pre, post, ##__VA_ARGS__) 576 577 #define __PVOP_VCALLEESAVE(op, pre, post, ...) \ 578 ____PVOP_VCALL(op.func, CLBR_RET_REG, \ 579 PVOP_VCALLEE_CLOBBERS, , \ 580 pre, post, ##__VA_ARGS__) 581 582 583 584 #define PVOP_CALL0(rettype, op) \ 585 __PVOP_CALL(rettype, op, "", "") 586 #define PVOP_VCALL0(op) \ 587 __PVOP_VCALL(op, "", "") 588 589 #define PVOP_CALLEE0(rettype, op) \ 590 __PVOP_CALLEESAVE(rettype, op, "", "") 591 #define PVOP_VCALLEE0(op) \ 592 __PVOP_VCALLEESAVE(op, "", "") 593 594 595 #define PVOP_CALL1(rettype, op, arg1) \ 596 __PVOP_CALL(rettype, op, "", "", PVOP_CALL_ARG1(arg1)) 597 #define PVOP_VCALL1(op, arg1) \ 598 __PVOP_VCALL(op, "", "", PVOP_CALL_ARG1(arg1)) 599 600 #define PVOP_CALLEE1(rettype, op, arg1) \ 601 __PVOP_CALLEESAVE(rettype, op, "", "", PVOP_CALL_ARG1(arg1)) 602 #define PVOP_VCALLEE1(op, arg1) \ 603 __PVOP_VCALLEESAVE(op, "", "", PVOP_CALL_ARG1(arg1)) 604 605 606 #define PVOP_CALL2(rettype, op, arg1, arg2) \ 607 __PVOP_CALL(rettype, op, "", "", PVOP_CALL_ARG1(arg1), \ 608 PVOP_CALL_ARG2(arg2)) 609 #define PVOP_VCALL2(op, arg1, arg2) \ 610 __PVOP_VCALL(op, "", "", PVOP_CALL_ARG1(arg1), \ 611 PVOP_CALL_ARG2(arg2)) 612 613 #define PVOP_CALLEE2(rettype, op, arg1, arg2) \ 614 __PVOP_CALLEESAVE(rettype, op, "", "", PVOP_CALL_ARG1(arg1), \ 615 PVOP_CALL_ARG2(arg2)) 616 #define PVOP_VCALLEE2(op, arg1, arg2) \ 617 __PVOP_VCALLEESAVE(op, "", "", PVOP_CALL_ARG1(arg1), \ 618 PVOP_CALL_ARG2(arg2)) 619 620 621 #define PVOP_CALL3(rettype, op, arg1, arg2, arg3) \ 622 __PVOP_CALL(rettype, op, "", "", PVOP_CALL_ARG1(arg1), \ 623 PVOP_CALL_ARG2(arg2), PVOP_CALL_ARG3(arg3)) 624 #define PVOP_VCALL3(op, arg1, arg2, arg3) \ 625 __PVOP_VCALL(op, "", "", PVOP_CALL_ARG1(arg1), \ 626 PVOP_CALL_ARG2(arg2), PVOP_CALL_ARG3(arg3)) 627 628 /* This is the only difference in x86_64. We can make it much simpler */ 629 #ifdef CONFIG_X86_32 630 #define PVOP_CALL4(rettype, op, arg1, arg2, arg3, arg4) \ 631 __PVOP_CALL(rettype, op, \ 632 "push %[_arg4];", "lea 4(%%esp),%%esp;", \ 633 PVOP_CALL_ARG1(arg1), PVOP_CALL_ARG2(arg2), \ 634 PVOP_CALL_ARG3(arg3), [_arg4] "mr" ((u32)(arg4))) 635 #define PVOP_VCALL4(op, arg1, arg2, arg3, arg4) \ 636 __PVOP_VCALL(op, \ 637 "push %[_arg4];", "lea 4(%%esp),%%esp;", \ 638 "0" ((u32)(arg1)), "1" ((u32)(arg2)), \ 639 "2" ((u32)(arg3)), [_arg4] "mr" ((u32)(arg4))) 640 #else 641 #define PVOP_CALL4(rettype, op, arg1, arg2, arg3, arg4) \ 642 __PVOP_CALL(rettype, op, "", "", \ 643 PVOP_CALL_ARG1(arg1), PVOP_CALL_ARG2(arg2), \ 644 PVOP_CALL_ARG3(arg3), PVOP_CALL_ARG4(arg4)) 645 #define PVOP_VCALL4(op, arg1, arg2, arg3, arg4) \ 646 __PVOP_VCALL(op, "", "", \ 647 PVOP_CALL_ARG1(arg1), PVOP_CALL_ARG2(arg2), \ 648 PVOP_CALL_ARG3(arg3), PVOP_CALL_ARG4(arg4)) 649 #endif 650 651 /* Lazy mode for batching updates / context switch */ 652 enum paravirt_lazy_mode { 653 PARAVIRT_LAZY_NONE, 654 PARAVIRT_LAZY_MMU, 655 PARAVIRT_LAZY_CPU, 656 }; 657 658 enum paravirt_lazy_mode paravirt_get_lazy_mode(void); 659 void paravirt_start_context_switch(struct task_struct *prev); 660 void paravirt_end_context_switch(struct task_struct *next); 661 662 void paravirt_enter_lazy_mmu(void); 663 void paravirt_leave_lazy_mmu(void); 664 void paravirt_flush_lazy_mmu(void); 665 666 void _paravirt_nop(void); 667 u64 _paravirt_ident_64(u64); 668 669 #define paravirt_nop ((void *)_paravirt_nop) 670 671 /* These all sit in the .parainstructions section to tell us what to patch. */ 672 struct paravirt_patch_site { 673 u8 *instr; /* original instructions */ 674 u8 type; /* type of this instruction */ 675 u8 len; /* length of original instruction */ 676 }; 677 678 extern struct paravirt_patch_site __parainstructions[], 679 __parainstructions_end[]; 680 681 #endif /* __ASSEMBLY__ */ 682 683 #endif /* _ASM_X86_PARAVIRT_TYPES_H */ 684