1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef _ASM_X86_PARAVIRT_TYPES_H 3 #define _ASM_X86_PARAVIRT_TYPES_H 4 5 /* Bitmask of what can be clobbered: usually at least eax. */ 6 #define CLBR_NONE 0 7 #define CLBR_EAX (1 << 0) 8 #define CLBR_ECX (1 << 1) 9 #define CLBR_EDX (1 << 2) 10 #define CLBR_EDI (1 << 3) 11 12 #ifdef CONFIG_X86_32 13 /* CLBR_ANY should match all regs platform has. For i386, that's just it */ 14 #define CLBR_ANY ((1 << 4) - 1) 15 16 #define CLBR_ARG_REGS (CLBR_EAX | CLBR_EDX | CLBR_ECX) 17 #define CLBR_RET_REG (CLBR_EAX | CLBR_EDX) 18 #define CLBR_SCRATCH (0) 19 #else 20 #define CLBR_RAX CLBR_EAX 21 #define CLBR_RCX CLBR_ECX 22 #define CLBR_RDX CLBR_EDX 23 #define CLBR_RDI CLBR_EDI 24 #define CLBR_RSI (1 << 4) 25 #define CLBR_R8 (1 << 5) 26 #define CLBR_R9 (1 << 6) 27 #define CLBR_R10 (1 << 7) 28 #define CLBR_R11 (1 << 8) 29 30 #define CLBR_ANY ((1 << 9) - 1) 31 32 #define CLBR_ARG_REGS (CLBR_RDI | CLBR_RSI | CLBR_RDX | \ 33 CLBR_RCX | CLBR_R8 | CLBR_R9) 34 #define CLBR_RET_REG (CLBR_RAX) 35 #define CLBR_SCRATCH (CLBR_R10 | CLBR_R11) 36 37 #endif /* X86_64 */ 38 39 #define CLBR_CALLEE_SAVE ((CLBR_ARG_REGS | CLBR_SCRATCH) & ~CLBR_RET_REG) 40 41 #ifndef __ASSEMBLY__ 42 43 #include <asm/desc_defs.h> 44 #include <asm/pgtable_types.h> 45 #include <asm/nospec-branch.h> 46 47 struct page; 48 struct thread_struct; 49 struct desc_ptr; 50 struct tss_struct; 51 struct mm_struct; 52 struct desc_struct; 53 struct task_struct; 54 struct cpumask; 55 struct flush_tlb_info; 56 struct mmu_gather; 57 struct vm_area_struct; 58 59 /* 60 * Wrapper type for pointers to code which uses the non-standard 61 * calling convention. See PV_CALL_SAVE_REGS_THUNK below. 62 */ 63 struct paravirt_callee_save { 64 void *func; 65 }; 66 67 /* general info */ 68 struct pv_info { 69 #ifdef CONFIG_PARAVIRT_XXL 70 u16 extra_user_64bit_cs; /* __USER_CS if none */ 71 #endif 72 73 const char *name; 74 }; 75 76 struct pv_init_ops { 77 /* 78 * Patch may replace one of the defined code sequences with 79 * arbitrary code, subject to the same register constraints. 80 * This generally means the code is not free to clobber any 81 * registers other than EAX. The patch function should return 82 * the number of bytes of code generated, as we nop pad the 83 * rest in generic code. 84 */ 85 unsigned (*patch)(u8 type, void *insn_buff, 86 unsigned long addr, unsigned len); 87 } __no_randomize_layout; 88 89 #ifdef CONFIG_PARAVIRT_XXL 90 struct pv_lazy_ops { 91 /* Set deferred update mode, used for batching operations. */ 92 void (*enter)(void); 93 void (*leave)(void); 94 void (*flush)(void); 95 } __no_randomize_layout; 96 #endif 97 98 struct pv_time_ops { 99 unsigned long long (*sched_clock)(void); 100 unsigned long long (*steal_clock)(int cpu); 101 } __no_randomize_layout; 102 103 struct pv_cpu_ops { 104 /* hooks for various privileged instructions */ 105 void (*io_delay)(void); 106 107 #ifdef CONFIG_PARAVIRT_XXL 108 unsigned long (*get_debugreg)(int regno); 109 void (*set_debugreg)(int regno, unsigned long value); 110 111 unsigned long (*read_cr0)(void); 112 void (*write_cr0)(unsigned long); 113 114 void (*write_cr4)(unsigned long); 115 116 /* Segment descriptor handling */ 117 void (*load_tr_desc)(void); 118 void (*load_gdt)(const struct desc_ptr *); 119 void (*load_idt)(const struct desc_ptr *); 120 void (*set_ldt)(const void *desc, unsigned entries); 121 unsigned long (*store_tr)(void); 122 void (*load_tls)(struct thread_struct *t, unsigned int cpu); 123 void (*load_gs_index)(unsigned int idx); 124 void (*write_ldt_entry)(struct desc_struct *ldt, int entrynum, 125 const void *desc); 126 void (*write_gdt_entry)(struct desc_struct *, 127 int entrynum, const void *desc, int size); 128 void (*write_idt_entry)(gate_desc *, 129 int entrynum, const gate_desc *gate); 130 void (*alloc_ldt)(struct desc_struct *ldt, unsigned entries); 131 void (*free_ldt)(struct desc_struct *ldt, unsigned entries); 132 133 void (*load_sp0)(unsigned long sp0); 134 135 #ifdef CONFIG_X86_IOPL_IOPERM 136 void (*invalidate_io_bitmap)(void); 137 void (*update_io_bitmap)(void); 138 #endif 139 140 void (*wbinvd)(void); 141 142 /* cpuid emulation, mostly so that caps bits can be disabled */ 143 void (*cpuid)(unsigned int *eax, unsigned int *ebx, 144 unsigned int *ecx, unsigned int *edx); 145 146 /* Unsafe MSR operations. These will warn or panic on failure. */ 147 u64 (*read_msr)(unsigned int msr); 148 void (*write_msr)(unsigned int msr, unsigned low, unsigned high); 149 150 /* 151 * Safe MSR operations. 152 * read sets err to 0 or -EIO. write returns 0 or -EIO. 153 */ 154 u64 (*read_msr_safe)(unsigned int msr, int *err); 155 int (*write_msr_safe)(unsigned int msr, unsigned low, unsigned high); 156 157 u64 (*read_pmc)(int counter); 158 159 /* 160 * Switch to usermode gs and return to 64-bit usermode using 161 * sysret. Only used in 64-bit kernels to return to 64-bit 162 * processes. Usermode register state, including %rsp, must 163 * already be restored. 164 */ 165 void (*usergs_sysret64)(void); 166 167 /* Normal iret. Jump to this with the standard iret stack 168 frame set up. */ 169 void (*iret)(void); 170 171 void (*swapgs)(void); 172 173 void (*start_context_switch)(struct task_struct *prev); 174 void (*end_context_switch)(struct task_struct *next); 175 #endif 176 } __no_randomize_layout; 177 178 struct pv_irq_ops { 179 #ifdef CONFIG_PARAVIRT_XXL 180 /* 181 * Get/set interrupt state. save_fl and restore_fl are only 182 * expected to use X86_EFLAGS_IF; all other bits 183 * returned from save_fl are undefined, and may be ignored by 184 * restore_fl. 185 * 186 * NOTE: These functions callers expect the callee to preserve 187 * more registers than the standard C calling convention. 188 */ 189 struct paravirt_callee_save save_fl; 190 struct paravirt_callee_save restore_fl; 191 struct paravirt_callee_save irq_disable; 192 struct paravirt_callee_save irq_enable; 193 194 void (*safe_halt)(void); 195 void (*halt)(void); 196 #endif 197 } __no_randomize_layout; 198 199 struct pv_mmu_ops { 200 /* TLB operations */ 201 void (*flush_tlb_user)(void); 202 void (*flush_tlb_kernel)(void); 203 void (*flush_tlb_one_user)(unsigned long addr); 204 void (*flush_tlb_others)(const struct cpumask *cpus, 205 const struct flush_tlb_info *info); 206 207 void (*tlb_remove_table)(struct mmu_gather *tlb, void *table); 208 209 /* Hook for intercepting the destruction of an mm_struct. */ 210 void (*exit_mmap)(struct mm_struct *mm); 211 212 #ifdef CONFIG_PARAVIRT_XXL 213 struct paravirt_callee_save read_cr2; 214 void (*write_cr2)(unsigned long); 215 216 unsigned long (*read_cr3)(void); 217 void (*write_cr3)(unsigned long); 218 219 /* Hooks for intercepting the creation/use of an mm_struct. */ 220 void (*activate_mm)(struct mm_struct *prev, 221 struct mm_struct *next); 222 void (*dup_mmap)(struct mm_struct *oldmm, 223 struct mm_struct *mm); 224 225 /* Hooks for allocating and freeing a pagetable top-level */ 226 int (*pgd_alloc)(struct mm_struct *mm); 227 void (*pgd_free)(struct mm_struct *mm, pgd_t *pgd); 228 229 /* 230 * Hooks for allocating/releasing pagetable pages when they're 231 * attached to a pagetable 232 */ 233 void (*alloc_pte)(struct mm_struct *mm, unsigned long pfn); 234 void (*alloc_pmd)(struct mm_struct *mm, unsigned long pfn); 235 void (*alloc_pud)(struct mm_struct *mm, unsigned long pfn); 236 void (*alloc_p4d)(struct mm_struct *mm, unsigned long pfn); 237 void (*release_pte)(unsigned long pfn); 238 void (*release_pmd)(unsigned long pfn); 239 void (*release_pud)(unsigned long pfn); 240 void (*release_p4d)(unsigned long pfn); 241 242 /* Pagetable manipulation functions */ 243 void (*set_pte)(pte_t *ptep, pte_t pteval); 244 void (*set_pmd)(pmd_t *pmdp, pmd_t pmdval); 245 246 pte_t (*ptep_modify_prot_start)(struct vm_area_struct *vma, unsigned long addr, 247 pte_t *ptep); 248 void (*ptep_modify_prot_commit)(struct vm_area_struct *vma, unsigned long addr, 249 pte_t *ptep, pte_t pte); 250 251 struct paravirt_callee_save pte_val; 252 struct paravirt_callee_save make_pte; 253 254 struct paravirt_callee_save pgd_val; 255 struct paravirt_callee_save make_pgd; 256 257 void (*set_pud)(pud_t *pudp, pud_t pudval); 258 259 struct paravirt_callee_save pmd_val; 260 struct paravirt_callee_save make_pmd; 261 262 struct paravirt_callee_save pud_val; 263 struct paravirt_callee_save make_pud; 264 265 void (*set_p4d)(p4d_t *p4dp, p4d_t p4dval); 266 267 #if CONFIG_PGTABLE_LEVELS >= 5 268 struct paravirt_callee_save p4d_val; 269 struct paravirt_callee_save make_p4d; 270 271 void (*set_pgd)(pgd_t *pgdp, pgd_t pgdval); 272 #endif /* CONFIG_PGTABLE_LEVELS >= 5 */ 273 274 struct pv_lazy_ops lazy_mode; 275 276 /* dom0 ops */ 277 278 /* Sometimes the physical address is a pfn, and sometimes its 279 an mfn. We can tell which is which from the index. */ 280 void (*set_fixmap)(unsigned /* enum fixed_addresses */ idx, 281 phys_addr_t phys, pgprot_t flags); 282 #endif 283 } __no_randomize_layout; 284 285 struct arch_spinlock; 286 #ifdef CONFIG_SMP 287 #include <asm/spinlock_types.h> 288 #endif 289 290 struct qspinlock; 291 292 struct pv_lock_ops { 293 void (*queued_spin_lock_slowpath)(struct qspinlock *lock, u32 val); 294 struct paravirt_callee_save queued_spin_unlock; 295 296 void (*wait)(u8 *ptr, u8 val); 297 void (*kick)(int cpu); 298 299 struct paravirt_callee_save vcpu_is_preempted; 300 } __no_randomize_layout; 301 302 /* This contains all the paravirt structures: we get a convenient 303 * number for each function using the offset which we use to indicate 304 * what to patch. */ 305 struct paravirt_patch_template { 306 struct pv_init_ops init; 307 struct pv_time_ops time; 308 struct pv_cpu_ops cpu; 309 struct pv_irq_ops irq; 310 struct pv_mmu_ops mmu; 311 struct pv_lock_ops lock; 312 } __no_randomize_layout; 313 314 extern struct pv_info pv_info; 315 extern struct paravirt_patch_template pv_ops; 316 317 #define PARAVIRT_PATCH(x) \ 318 (offsetof(struct paravirt_patch_template, x) / sizeof(void *)) 319 320 #define paravirt_type(op) \ 321 [paravirt_typenum] "i" (PARAVIRT_PATCH(op)), \ 322 [paravirt_opptr] "i" (&(pv_ops.op)) 323 #define paravirt_clobber(clobber) \ 324 [paravirt_clobber] "i" (clobber) 325 326 /* 327 * Generate some code, and mark it as patchable by the 328 * apply_paravirt() alternate instruction patcher. 329 */ 330 #define _paravirt_alt(insn_string, type, clobber) \ 331 "771:\n\t" insn_string "\n" "772:\n" \ 332 ".pushsection .parainstructions,\"a\"\n" \ 333 _ASM_ALIGN "\n" \ 334 _ASM_PTR " 771b\n" \ 335 " .byte " type "\n" \ 336 " .byte 772b-771b\n" \ 337 " .short " clobber "\n" \ 338 ".popsection\n" 339 340 /* Generate patchable code, with the default asm parameters. */ 341 #define paravirt_alt(insn_string) \ 342 _paravirt_alt(insn_string, "%c[paravirt_typenum]", "%c[paravirt_clobber]") 343 344 /* Simple instruction patching code. */ 345 #define NATIVE_LABEL(a,x,b) "\n\t.globl " a #x "_" #b "\n" a #x "_" #b ":\n\t" 346 347 unsigned paravirt_patch_ident_64(void *insn_buff, unsigned len); 348 unsigned paravirt_patch_default(u8 type, void *insn_buff, unsigned long addr, unsigned len); 349 unsigned paravirt_patch_insns(void *insn_buff, unsigned len, const char *start, const char *end); 350 351 unsigned native_patch(u8 type, void *insn_buff, unsigned long addr, unsigned len); 352 353 int paravirt_disable_iospace(void); 354 355 /* 356 * This generates an indirect call based on the operation type number. 357 * The type number, computed in PARAVIRT_PATCH, is derived from the 358 * offset into the paravirt_patch_template structure, and can therefore be 359 * freely converted back into a structure offset. 360 */ 361 #define PARAVIRT_CALL \ 362 ANNOTATE_RETPOLINE_SAFE \ 363 "call *%c[paravirt_opptr];" 364 365 /* 366 * These macros are intended to wrap calls through one of the paravirt 367 * ops structs, so that they can be later identified and patched at 368 * runtime. 369 * 370 * Normally, a call to a pv_op function is a simple indirect call: 371 * (pv_op_struct.operations)(args...). 372 * 373 * Unfortunately, this is a relatively slow operation for modern CPUs, 374 * because it cannot necessarily determine what the destination 375 * address is. In this case, the address is a runtime constant, so at 376 * the very least we can patch the call to e a simple direct call, or 377 * ideally, patch an inline implementation into the callsite. (Direct 378 * calls are essentially free, because the call and return addresses 379 * are completely predictable.) 380 * 381 * For i386, these macros rely on the standard gcc "regparm(3)" calling 382 * convention, in which the first three arguments are placed in %eax, 383 * %edx, %ecx (in that order), and the remaining arguments are placed 384 * on the stack. All caller-save registers (eax,edx,ecx) are expected 385 * to be modified (either clobbered or used for return values). 386 * X86_64, on the other hand, already specifies a register-based calling 387 * conventions, returning at %rax, with parameteres going on %rdi, %rsi, 388 * %rdx, and %rcx. Note that for this reason, x86_64 does not need any 389 * special handling for dealing with 4 arguments, unlike i386. 390 * However, x86_64 also have to clobber all caller saved registers, which 391 * unfortunately, are quite a bit (r8 - r11) 392 * 393 * The call instruction itself is marked by placing its start address 394 * and size into the .parainstructions section, so that 395 * apply_paravirt() in arch/i386/kernel/alternative.c can do the 396 * appropriate patching under the control of the backend pv_init_ops 397 * implementation. 398 * 399 * Unfortunately there's no way to get gcc to generate the args setup 400 * for the call, and then allow the call itself to be generated by an 401 * inline asm. Because of this, we must do the complete arg setup and 402 * return value handling from within these macros. This is fairly 403 * cumbersome. 404 * 405 * There are 5 sets of PVOP_* macros for dealing with 0-4 arguments. 406 * It could be extended to more arguments, but there would be little 407 * to be gained from that. For each number of arguments, there are 408 * the two VCALL and CALL variants for void and non-void functions. 409 * 410 * When there is a return value, the invoker of the macro must specify 411 * the return type. The macro then uses sizeof() on that type to 412 * determine whether its a 32 or 64 bit value, and places the return 413 * in the right register(s) (just %eax for 32-bit, and %edx:%eax for 414 * 64-bit). For x86_64 machines, it just returns at %rax regardless of 415 * the return value size. 416 * 417 * 64-bit arguments are passed as a pair of adjacent 32-bit arguments 418 * i386 also passes 64-bit arguments as a pair of adjacent 32-bit arguments 419 * in low,high order 420 * 421 * Small structures are passed and returned in registers. The macro 422 * calling convention can't directly deal with this, so the wrapper 423 * functions must do this. 424 * 425 * These PVOP_* macros are only defined within this header. This 426 * means that all uses must be wrapped in inline functions. This also 427 * makes sure the incoming and outgoing types are always correct. 428 */ 429 #ifdef CONFIG_X86_32 430 #define PVOP_VCALL_ARGS \ 431 unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; 432 433 #define PVOP_CALL_ARGS PVOP_VCALL_ARGS 434 435 #define PVOP_CALL_ARG1(x) "a" ((unsigned long)(x)) 436 #define PVOP_CALL_ARG2(x) "d" ((unsigned long)(x)) 437 #define PVOP_CALL_ARG3(x) "c" ((unsigned long)(x)) 438 439 #define PVOP_VCALL_CLOBBERS "=a" (__eax), "=d" (__edx), \ 440 "=c" (__ecx) 441 #define PVOP_CALL_CLOBBERS PVOP_VCALL_CLOBBERS 442 443 #define PVOP_VCALLEE_CLOBBERS "=a" (__eax), "=d" (__edx) 444 #define PVOP_CALLEE_CLOBBERS PVOP_VCALLEE_CLOBBERS 445 446 #define EXTRA_CLOBBERS 447 #define VEXTRA_CLOBBERS 448 #else /* CONFIG_X86_64 */ 449 /* [re]ax isn't an arg, but the return val */ 450 #define PVOP_VCALL_ARGS \ 451 unsigned long __edi = __edi, __esi = __esi, \ 452 __edx = __edx, __ecx = __ecx, __eax = __eax; 453 454 #define PVOP_CALL_ARGS PVOP_VCALL_ARGS 455 456 #define PVOP_CALL_ARG1(x) "D" ((unsigned long)(x)) 457 #define PVOP_CALL_ARG2(x) "S" ((unsigned long)(x)) 458 #define PVOP_CALL_ARG3(x) "d" ((unsigned long)(x)) 459 #define PVOP_CALL_ARG4(x) "c" ((unsigned long)(x)) 460 461 #define PVOP_VCALL_CLOBBERS "=D" (__edi), \ 462 "=S" (__esi), "=d" (__edx), \ 463 "=c" (__ecx) 464 #define PVOP_CALL_CLOBBERS PVOP_VCALL_CLOBBERS, "=a" (__eax) 465 466 /* void functions are still allowed [re]ax for scratch */ 467 #define PVOP_VCALLEE_CLOBBERS "=a" (__eax) 468 #define PVOP_CALLEE_CLOBBERS PVOP_VCALLEE_CLOBBERS 469 470 #define EXTRA_CLOBBERS , "r8", "r9", "r10", "r11" 471 #define VEXTRA_CLOBBERS , "rax", "r8", "r9", "r10", "r11" 472 #endif /* CONFIG_X86_32 */ 473 474 #ifdef CONFIG_PARAVIRT_DEBUG 475 #define PVOP_TEST_NULL(op) BUG_ON(pv_ops.op == NULL) 476 #else 477 #define PVOP_TEST_NULL(op) ((void)pv_ops.op) 478 #endif 479 480 #define PVOP_RETMASK(rettype) \ 481 ({ unsigned long __mask = ~0UL; \ 482 switch (sizeof(rettype)) { \ 483 case 1: __mask = 0xffUL; break; \ 484 case 2: __mask = 0xffffUL; break; \ 485 case 4: __mask = 0xffffffffUL; break; \ 486 default: break; \ 487 } \ 488 __mask; \ 489 }) 490 491 492 #define ____PVOP_CALL(rettype, op, clbr, call_clbr, extra_clbr, \ 493 pre, post, ...) \ 494 ({ \ 495 rettype __ret; \ 496 PVOP_CALL_ARGS; \ 497 PVOP_TEST_NULL(op); \ 498 /* This is 32-bit specific, but is okay in 64-bit */ \ 499 /* since this condition will never hold */ \ 500 if (sizeof(rettype) > sizeof(unsigned long)) { \ 501 asm volatile(pre \ 502 paravirt_alt(PARAVIRT_CALL) \ 503 post \ 504 : call_clbr, ASM_CALL_CONSTRAINT \ 505 : paravirt_type(op), \ 506 paravirt_clobber(clbr), \ 507 ##__VA_ARGS__ \ 508 : "memory", "cc" extra_clbr); \ 509 __ret = (rettype)((((u64)__edx) << 32) | __eax); \ 510 } else { \ 511 asm volatile(pre \ 512 paravirt_alt(PARAVIRT_CALL) \ 513 post \ 514 : call_clbr, ASM_CALL_CONSTRAINT \ 515 : paravirt_type(op), \ 516 paravirt_clobber(clbr), \ 517 ##__VA_ARGS__ \ 518 : "memory", "cc" extra_clbr); \ 519 __ret = (rettype)(__eax & PVOP_RETMASK(rettype)); \ 520 } \ 521 __ret; \ 522 }) 523 524 #define __PVOP_CALL(rettype, op, pre, post, ...) \ 525 ____PVOP_CALL(rettype, op, CLBR_ANY, PVOP_CALL_CLOBBERS, \ 526 EXTRA_CLOBBERS, pre, post, ##__VA_ARGS__) 527 528 #define __PVOP_CALLEESAVE(rettype, op, pre, post, ...) \ 529 ____PVOP_CALL(rettype, op.func, CLBR_RET_REG, \ 530 PVOP_CALLEE_CLOBBERS, , \ 531 pre, post, ##__VA_ARGS__) 532 533 534 #define ____PVOP_VCALL(op, clbr, call_clbr, extra_clbr, pre, post, ...) \ 535 ({ \ 536 PVOP_VCALL_ARGS; \ 537 PVOP_TEST_NULL(op); \ 538 asm volatile(pre \ 539 paravirt_alt(PARAVIRT_CALL) \ 540 post \ 541 : call_clbr, ASM_CALL_CONSTRAINT \ 542 : paravirt_type(op), \ 543 paravirt_clobber(clbr), \ 544 ##__VA_ARGS__ \ 545 : "memory", "cc" extra_clbr); \ 546 }) 547 548 #define __PVOP_VCALL(op, pre, post, ...) \ 549 ____PVOP_VCALL(op, CLBR_ANY, PVOP_VCALL_CLOBBERS, \ 550 VEXTRA_CLOBBERS, \ 551 pre, post, ##__VA_ARGS__) 552 553 #define __PVOP_VCALLEESAVE(op, pre, post, ...) \ 554 ____PVOP_VCALL(op.func, CLBR_RET_REG, \ 555 PVOP_VCALLEE_CLOBBERS, , \ 556 pre, post, ##__VA_ARGS__) 557 558 559 560 #define PVOP_CALL0(rettype, op) \ 561 __PVOP_CALL(rettype, op, "", "") 562 #define PVOP_VCALL0(op) \ 563 __PVOP_VCALL(op, "", "") 564 565 #define PVOP_CALLEE0(rettype, op) \ 566 __PVOP_CALLEESAVE(rettype, op, "", "") 567 #define PVOP_VCALLEE0(op) \ 568 __PVOP_VCALLEESAVE(op, "", "") 569 570 571 #define PVOP_CALL1(rettype, op, arg1) \ 572 __PVOP_CALL(rettype, op, "", "", PVOP_CALL_ARG1(arg1)) 573 #define PVOP_VCALL1(op, arg1) \ 574 __PVOP_VCALL(op, "", "", PVOP_CALL_ARG1(arg1)) 575 576 #define PVOP_CALLEE1(rettype, op, arg1) \ 577 __PVOP_CALLEESAVE(rettype, op, "", "", PVOP_CALL_ARG1(arg1)) 578 #define PVOP_VCALLEE1(op, arg1) \ 579 __PVOP_VCALLEESAVE(op, "", "", PVOP_CALL_ARG1(arg1)) 580 581 582 #define PVOP_CALL2(rettype, op, arg1, arg2) \ 583 __PVOP_CALL(rettype, op, "", "", PVOP_CALL_ARG1(arg1), \ 584 PVOP_CALL_ARG2(arg2)) 585 #define PVOP_VCALL2(op, arg1, arg2) \ 586 __PVOP_VCALL(op, "", "", PVOP_CALL_ARG1(arg1), \ 587 PVOP_CALL_ARG2(arg2)) 588 589 #define PVOP_CALLEE2(rettype, op, arg1, arg2) \ 590 __PVOP_CALLEESAVE(rettype, op, "", "", PVOP_CALL_ARG1(arg1), \ 591 PVOP_CALL_ARG2(arg2)) 592 #define PVOP_VCALLEE2(op, arg1, arg2) \ 593 __PVOP_VCALLEESAVE(op, "", "", PVOP_CALL_ARG1(arg1), \ 594 PVOP_CALL_ARG2(arg2)) 595 596 597 #define PVOP_CALL3(rettype, op, arg1, arg2, arg3) \ 598 __PVOP_CALL(rettype, op, "", "", PVOP_CALL_ARG1(arg1), \ 599 PVOP_CALL_ARG2(arg2), PVOP_CALL_ARG3(arg3)) 600 #define PVOP_VCALL3(op, arg1, arg2, arg3) \ 601 __PVOP_VCALL(op, "", "", PVOP_CALL_ARG1(arg1), \ 602 PVOP_CALL_ARG2(arg2), PVOP_CALL_ARG3(arg3)) 603 604 /* This is the only difference in x86_64. We can make it much simpler */ 605 #ifdef CONFIG_X86_32 606 #define PVOP_CALL4(rettype, op, arg1, arg2, arg3, arg4) \ 607 __PVOP_CALL(rettype, op, \ 608 "push %[_arg4];", "lea 4(%%esp),%%esp;", \ 609 PVOP_CALL_ARG1(arg1), PVOP_CALL_ARG2(arg2), \ 610 PVOP_CALL_ARG3(arg3), [_arg4] "mr" ((u32)(arg4))) 611 #define PVOP_VCALL4(op, arg1, arg2, arg3, arg4) \ 612 __PVOP_VCALL(op, \ 613 "push %[_arg4];", "lea 4(%%esp),%%esp;", \ 614 "0" ((u32)(arg1)), "1" ((u32)(arg2)), \ 615 "2" ((u32)(arg3)), [_arg4] "mr" ((u32)(arg4))) 616 #else 617 #define PVOP_CALL4(rettype, op, arg1, arg2, arg3, arg4) \ 618 __PVOP_CALL(rettype, op, "", "", \ 619 PVOP_CALL_ARG1(arg1), PVOP_CALL_ARG2(arg2), \ 620 PVOP_CALL_ARG3(arg3), PVOP_CALL_ARG4(arg4)) 621 #define PVOP_VCALL4(op, arg1, arg2, arg3, arg4) \ 622 __PVOP_VCALL(op, "", "", \ 623 PVOP_CALL_ARG1(arg1), PVOP_CALL_ARG2(arg2), \ 624 PVOP_CALL_ARG3(arg3), PVOP_CALL_ARG4(arg4)) 625 #endif 626 627 /* Lazy mode for batching updates / context switch */ 628 enum paravirt_lazy_mode { 629 PARAVIRT_LAZY_NONE, 630 PARAVIRT_LAZY_MMU, 631 PARAVIRT_LAZY_CPU, 632 }; 633 634 enum paravirt_lazy_mode paravirt_get_lazy_mode(void); 635 void paravirt_start_context_switch(struct task_struct *prev); 636 void paravirt_end_context_switch(struct task_struct *next); 637 638 void paravirt_enter_lazy_mmu(void); 639 void paravirt_leave_lazy_mmu(void); 640 void paravirt_flush_lazy_mmu(void); 641 642 void _paravirt_nop(void); 643 u64 _paravirt_ident_64(u64); 644 645 #define paravirt_nop ((void *)_paravirt_nop) 646 647 /* These all sit in the .parainstructions section to tell us what to patch. */ 648 struct paravirt_patch_site { 649 u8 *instr; /* original instructions */ 650 u8 type; /* type of this instruction */ 651 u8 len; /* length of original instruction */ 652 }; 653 654 extern struct paravirt_patch_site __parainstructions[], 655 __parainstructions_end[]; 656 657 #endif /* __ASSEMBLY__ */ 658 659 #endif /* _ASM_X86_PARAVIRT_TYPES_H */ 660