1 #ifndef _ASM_X86_PARAVIRT_H 2 #define _ASM_X86_PARAVIRT_H 3 /* Various instructions on x86 need to be replaced for 4 * para-virtualization: those hooks are defined here. */ 5 6 #ifdef CONFIG_PARAVIRT 7 #include <asm/pgtable_types.h> 8 #include <asm/asm.h> 9 10 /* Bitmask of what can be clobbered: usually at least eax. */ 11 #define CLBR_NONE 0 12 #define CLBR_EAX (1 << 0) 13 #define CLBR_ECX (1 << 1) 14 #define CLBR_EDX (1 << 2) 15 #define CLBR_EDI (1 << 3) 16 17 #ifdef CONFIG_X86_32 18 /* CLBR_ANY should match all regs platform has. For i386, that's just it */ 19 #define CLBR_ANY ((1 << 4) - 1) 20 21 #define CLBR_ARG_REGS (CLBR_EAX | CLBR_EDX | CLBR_ECX) 22 #define CLBR_RET_REG (CLBR_EAX | CLBR_EDX) 23 #define CLBR_SCRATCH (0) 24 #else 25 #define CLBR_RAX CLBR_EAX 26 #define CLBR_RCX CLBR_ECX 27 #define CLBR_RDX CLBR_EDX 28 #define CLBR_RDI CLBR_EDI 29 #define CLBR_RSI (1 << 4) 30 #define CLBR_R8 (1 << 5) 31 #define CLBR_R9 (1 << 6) 32 #define CLBR_R10 (1 << 7) 33 #define CLBR_R11 (1 << 8) 34 35 #define CLBR_ANY ((1 << 9) - 1) 36 37 #define CLBR_ARG_REGS (CLBR_RDI | CLBR_RSI | CLBR_RDX | \ 38 CLBR_RCX | CLBR_R8 | CLBR_R9) 39 #define CLBR_RET_REG (CLBR_RAX) 40 #define CLBR_SCRATCH (CLBR_R10 | CLBR_R11) 41 42 #include <asm/desc_defs.h> 43 #endif /* X86_64 */ 44 45 #define CLBR_CALLEE_SAVE ((CLBR_ARG_REGS | CLBR_SCRATCH) & ~CLBR_RET_REG) 46 47 #ifndef __ASSEMBLY__ 48 #include <linux/types.h> 49 #include <linux/cpumask.h> 50 #include <asm/kmap_types.h> 51 #include <asm/desc_defs.h> 52 53 struct page; 54 struct thread_struct; 55 struct desc_ptr; 56 struct tss_struct; 57 struct mm_struct; 58 struct desc_struct; 59 struct task_struct; 60 61 /* 62 * Wrapper type for pointers to code which uses the non-standard 63 * calling convention. See PV_CALL_SAVE_REGS_THUNK below. 64 */ 65 struct paravirt_callee_save { 66 void *func; 67 }; 68 69 /* general info */ 70 struct pv_info { 71 unsigned int kernel_rpl; 72 int shared_kernel_pmd; 73 int paravirt_enabled; 74 const char *name; 75 }; 76 77 struct pv_init_ops { 78 /* 79 * Patch may replace one of the defined code sequences with 80 * arbitrary code, subject to the same register constraints. 81 * This generally means the code is not free to clobber any 82 * registers other than EAX. The patch function should return 83 * the number of bytes of code generated, as we nop pad the 84 * rest in generic code. 85 */ 86 unsigned (*patch)(u8 type, u16 clobber, void *insnbuf, 87 unsigned long addr, unsigned len); 88 89 /* Basic arch-specific setup */ 90 void (*arch_setup)(void); 91 char *(*memory_setup)(void); 92 void (*post_allocator_init)(void); 93 94 /* Print a banner to identify the environment */ 95 void (*banner)(void); 96 }; 97 98 99 struct pv_lazy_ops { 100 /* Set deferred update mode, used for batching operations. */ 101 void (*enter)(void); 102 void (*leave)(void); 103 }; 104 105 struct pv_time_ops { 106 void (*time_init)(void); 107 108 /* Set and set time of day */ 109 unsigned long (*get_wallclock)(void); 110 int (*set_wallclock)(unsigned long); 111 112 unsigned long long (*sched_clock)(void); 113 unsigned long (*get_tsc_khz)(void); 114 }; 115 116 struct pv_cpu_ops { 117 /* hooks for various privileged instructions */ 118 unsigned long (*get_debugreg)(int regno); 119 void (*set_debugreg)(int regno, unsigned long value); 120 121 void (*clts)(void); 122 123 unsigned long (*read_cr0)(void); 124 void (*write_cr0)(unsigned long); 125 126 unsigned long (*read_cr4_safe)(void); 127 unsigned long (*read_cr4)(void); 128 void (*write_cr4)(unsigned long); 129 130 #ifdef CONFIG_X86_64 131 unsigned long (*read_cr8)(void); 132 void (*write_cr8)(unsigned long); 133 #endif 134 135 /* Segment descriptor handling */ 136 void (*load_tr_desc)(void); 137 void (*load_gdt)(const struct desc_ptr *); 138 void (*load_idt)(const struct desc_ptr *); 139 void (*store_gdt)(struct desc_ptr *); 140 void (*store_idt)(struct desc_ptr *); 141 void (*set_ldt)(const void *desc, unsigned entries); 142 unsigned long (*store_tr)(void); 143 void (*load_tls)(struct thread_struct *t, unsigned int cpu); 144 #ifdef CONFIG_X86_64 145 void (*load_gs_index)(unsigned int idx); 146 #endif 147 void (*write_ldt_entry)(struct desc_struct *ldt, int entrynum, 148 const void *desc); 149 void (*write_gdt_entry)(struct desc_struct *, 150 int entrynum, const void *desc, int size); 151 void (*write_idt_entry)(gate_desc *, 152 int entrynum, const gate_desc *gate); 153 void (*alloc_ldt)(struct desc_struct *ldt, unsigned entries); 154 void (*free_ldt)(struct desc_struct *ldt, unsigned entries); 155 156 void (*load_sp0)(struct tss_struct *tss, struct thread_struct *t); 157 158 void (*set_iopl_mask)(unsigned mask); 159 160 void (*wbinvd)(void); 161 void (*io_delay)(void); 162 163 /* cpuid emulation, mostly so that caps bits can be disabled */ 164 void (*cpuid)(unsigned int *eax, unsigned int *ebx, 165 unsigned int *ecx, unsigned int *edx); 166 167 /* MSR, PMC and TSR operations. 168 err = 0/-EFAULT. wrmsr returns 0/-EFAULT. */ 169 u64 (*read_msr_amd)(unsigned int msr, int *err); 170 u64 (*read_msr)(unsigned int msr, int *err); 171 int (*write_msr)(unsigned int msr, unsigned low, unsigned high); 172 173 u64 (*read_tsc)(void); 174 u64 (*read_pmc)(int counter); 175 unsigned long long (*read_tscp)(unsigned int *aux); 176 177 /* 178 * Atomically enable interrupts and return to userspace. This 179 * is only ever used to return to 32-bit processes; in a 180 * 64-bit kernel, it's used for 32-on-64 compat processes, but 181 * never native 64-bit processes. (Jump, not call.) 182 */ 183 void (*irq_enable_sysexit)(void); 184 185 /* 186 * Switch to usermode gs and return to 64-bit usermode using 187 * sysret. Only used in 64-bit kernels to return to 64-bit 188 * processes. Usermode register state, including %rsp, must 189 * already be restored. 190 */ 191 void (*usergs_sysret64)(void); 192 193 /* 194 * Switch to usermode gs and return to 32-bit usermode using 195 * sysret. Used to return to 32-on-64 compat processes. 196 * Other usermode register state, including %esp, must already 197 * be restored. 198 */ 199 void (*usergs_sysret32)(void); 200 201 /* Normal iret. Jump to this with the standard iret stack 202 frame set up. */ 203 void (*iret)(void); 204 205 void (*swapgs)(void); 206 207 void (*start_context_switch)(struct task_struct *prev); 208 void (*end_context_switch)(struct task_struct *next); 209 }; 210 211 struct pv_irq_ops { 212 void (*init_IRQ)(void); 213 214 /* 215 * Get/set interrupt state. save_fl and restore_fl are only 216 * expected to use X86_EFLAGS_IF; all other bits 217 * returned from save_fl are undefined, and may be ignored by 218 * restore_fl. 219 * 220 * NOTE: These functions callers expect the callee to preserve 221 * more registers than the standard C calling convention. 222 */ 223 struct paravirt_callee_save save_fl; 224 struct paravirt_callee_save restore_fl; 225 struct paravirt_callee_save irq_disable; 226 struct paravirt_callee_save irq_enable; 227 228 void (*safe_halt)(void); 229 void (*halt)(void); 230 231 #ifdef CONFIG_X86_64 232 void (*adjust_exception_frame)(void); 233 #endif 234 }; 235 236 struct pv_apic_ops { 237 #ifdef CONFIG_X86_LOCAL_APIC 238 void (*setup_boot_clock)(void); 239 void (*setup_secondary_clock)(void); 240 241 void (*startup_ipi_hook)(int phys_apicid, 242 unsigned long start_eip, 243 unsigned long start_esp); 244 #endif 245 }; 246 247 struct pv_mmu_ops { 248 /* 249 * Called before/after init_mm pagetable setup. setup_start 250 * may reset %cr3, and may pre-install parts of the pagetable; 251 * pagetable setup is expected to preserve any existing 252 * mapping. 253 */ 254 void (*pagetable_setup_start)(pgd_t *pgd_base); 255 void (*pagetable_setup_done)(pgd_t *pgd_base); 256 257 unsigned long (*read_cr2)(void); 258 void (*write_cr2)(unsigned long); 259 260 unsigned long (*read_cr3)(void); 261 void (*write_cr3)(unsigned long); 262 263 /* 264 * Hooks for intercepting the creation/use/destruction of an 265 * mm_struct. 266 */ 267 void (*activate_mm)(struct mm_struct *prev, 268 struct mm_struct *next); 269 void (*dup_mmap)(struct mm_struct *oldmm, 270 struct mm_struct *mm); 271 void (*exit_mmap)(struct mm_struct *mm); 272 273 274 /* TLB operations */ 275 void (*flush_tlb_user)(void); 276 void (*flush_tlb_kernel)(void); 277 void (*flush_tlb_single)(unsigned long addr); 278 void (*flush_tlb_others)(const struct cpumask *cpus, 279 struct mm_struct *mm, 280 unsigned long va); 281 282 /* Hooks for allocating and freeing a pagetable top-level */ 283 int (*pgd_alloc)(struct mm_struct *mm); 284 void (*pgd_free)(struct mm_struct *mm, pgd_t *pgd); 285 286 /* 287 * Hooks for allocating/releasing pagetable pages when they're 288 * attached to a pagetable 289 */ 290 void (*alloc_pte)(struct mm_struct *mm, unsigned long pfn); 291 void (*alloc_pmd)(struct mm_struct *mm, unsigned long pfn); 292 void (*alloc_pmd_clone)(unsigned long pfn, unsigned long clonepfn, unsigned long start, unsigned long count); 293 void (*alloc_pud)(struct mm_struct *mm, unsigned long pfn); 294 void (*release_pte)(unsigned long pfn); 295 void (*release_pmd)(unsigned long pfn); 296 void (*release_pud)(unsigned long pfn); 297 298 /* Pagetable manipulation functions */ 299 void (*set_pte)(pte_t *ptep, pte_t pteval); 300 void (*set_pte_at)(struct mm_struct *mm, unsigned long addr, 301 pte_t *ptep, pte_t pteval); 302 void (*set_pmd)(pmd_t *pmdp, pmd_t pmdval); 303 void (*pte_update)(struct mm_struct *mm, unsigned long addr, 304 pte_t *ptep); 305 void (*pte_update_defer)(struct mm_struct *mm, 306 unsigned long addr, pte_t *ptep); 307 308 pte_t (*ptep_modify_prot_start)(struct mm_struct *mm, unsigned long addr, 309 pte_t *ptep); 310 void (*ptep_modify_prot_commit)(struct mm_struct *mm, unsigned long addr, 311 pte_t *ptep, pte_t pte); 312 313 struct paravirt_callee_save pte_val; 314 struct paravirt_callee_save make_pte; 315 316 struct paravirt_callee_save pgd_val; 317 struct paravirt_callee_save make_pgd; 318 319 #if PAGETABLE_LEVELS >= 3 320 #ifdef CONFIG_X86_PAE 321 void (*set_pte_atomic)(pte_t *ptep, pte_t pteval); 322 void (*pte_clear)(struct mm_struct *mm, unsigned long addr, 323 pte_t *ptep); 324 void (*pmd_clear)(pmd_t *pmdp); 325 326 #endif /* CONFIG_X86_PAE */ 327 328 void (*set_pud)(pud_t *pudp, pud_t pudval); 329 330 struct paravirt_callee_save pmd_val; 331 struct paravirt_callee_save make_pmd; 332 333 #if PAGETABLE_LEVELS == 4 334 struct paravirt_callee_save pud_val; 335 struct paravirt_callee_save make_pud; 336 337 void (*set_pgd)(pgd_t *pudp, pgd_t pgdval); 338 #endif /* PAGETABLE_LEVELS == 4 */ 339 #endif /* PAGETABLE_LEVELS >= 3 */ 340 341 #ifdef CONFIG_HIGHPTE 342 void *(*kmap_atomic_pte)(struct page *page, enum km_type type); 343 #endif 344 345 struct pv_lazy_ops lazy_mode; 346 347 /* dom0 ops */ 348 349 /* Sometimes the physical address is a pfn, and sometimes its 350 an mfn. We can tell which is which from the index. */ 351 void (*set_fixmap)(unsigned /* enum fixed_addresses */ idx, 352 phys_addr_t phys, pgprot_t flags); 353 }; 354 355 struct raw_spinlock; 356 struct pv_lock_ops { 357 int (*spin_is_locked)(struct raw_spinlock *lock); 358 int (*spin_is_contended)(struct raw_spinlock *lock); 359 void (*spin_lock)(struct raw_spinlock *lock); 360 void (*spin_lock_flags)(struct raw_spinlock *lock, unsigned long flags); 361 int (*spin_trylock)(struct raw_spinlock *lock); 362 void (*spin_unlock)(struct raw_spinlock *lock); 363 }; 364 365 /* This contains all the paravirt structures: we get a convenient 366 * number for each function using the offset which we use to indicate 367 * what to patch. */ 368 struct paravirt_patch_template { 369 struct pv_init_ops pv_init_ops; 370 struct pv_time_ops pv_time_ops; 371 struct pv_cpu_ops pv_cpu_ops; 372 struct pv_irq_ops pv_irq_ops; 373 struct pv_apic_ops pv_apic_ops; 374 struct pv_mmu_ops pv_mmu_ops; 375 struct pv_lock_ops pv_lock_ops; 376 }; 377 378 extern struct pv_info pv_info; 379 extern struct pv_init_ops pv_init_ops; 380 extern struct pv_time_ops pv_time_ops; 381 extern struct pv_cpu_ops pv_cpu_ops; 382 extern struct pv_irq_ops pv_irq_ops; 383 extern struct pv_apic_ops pv_apic_ops; 384 extern struct pv_mmu_ops pv_mmu_ops; 385 extern struct pv_lock_ops pv_lock_ops; 386 387 #define PARAVIRT_PATCH(x) \ 388 (offsetof(struct paravirt_patch_template, x) / sizeof(void *)) 389 390 #define paravirt_type(op) \ 391 [paravirt_typenum] "i" (PARAVIRT_PATCH(op)), \ 392 [paravirt_opptr] "i" (&(op)) 393 #define paravirt_clobber(clobber) \ 394 [paravirt_clobber] "i" (clobber) 395 396 /* 397 * Generate some code, and mark it as patchable by the 398 * apply_paravirt() alternate instruction patcher. 399 */ 400 #define _paravirt_alt(insn_string, type, clobber) \ 401 "771:\n\t" insn_string "\n" "772:\n" \ 402 ".pushsection .parainstructions,\"a\"\n" \ 403 _ASM_ALIGN "\n" \ 404 _ASM_PTR " 771b\n" \ 405 " .byte " type "\n" \ 406 " .byte 772b-771b\n" \ 407 " .short " clobber "\n" \ 408 ".popsection\n" 409 410 /* Generate patchable code, with the default asm parameters. */ 411 #define paravirt_alt(insn_string) \ 412 _paravirt_alt(insn_string, "%c[paravirt_typenum]", "%c[paravirt_clobber]") 413 414 /* Simple instruction patching code. */ 415 #define DEF_NATIVE(ops, name, code) \ 416 extern const char start_##ops##_##name[], end_##ops##_##name[]; \ 417 asm("start_" #ops "_" #name ": " code "; end_" #ops "_" #name ":") 418 419 unsigned paravirt_patch_nop(void); 420 unsigned paravirt_patch_ident_32(void *insnbuf, unsigned len); 421 unsigned paravirt_patch_ident_64(void *insnbuf, unsigned len); 422 unsigned paravirt_patch_ignore(unsigned len); 423 unsigned paravirt_patch_call(void *insnbuf, 424 const void *target, u16 tgt_clobbers, 425 unsigned long addr, u16 site_clobbers, 426 unsigned len); 427 unsigned paravirt_patch_jmp(void *insnbuf, const void *target, 428 unsigned long addr, unsigned len); 429 unsigned paravirt_patch_default(u8 type, u16 clobbers, void *insnbuf, 430 unsigned long addr, unsigned len); 431 432 unsigned paravirt_patch_insns(void *insnbuf, unsigned len, 433 const char *start, const char *end); 434 435 unsigned native_patch(u8 type, u16 clobbers, void *ibuf, 436 unsigned long addr, unsigned len); 437 438 int paravirt_disable_iospace(void); 439 440 /* 441 * This generates an indirect call based on the operation type number. 442 * The type number, computed in PARAVIRT_PATCH, is derived from the 443 * offset into the paravirt_patch_template structure, and can therefore be 444 * freely converted back into a structure offset. 445 */ 446 #define PARAVIRT_CALL "call *%c[paravirt_opptr];" 447 448 /* 449 * These macros are intended to wrap calls through one of the paravirt 450 * ops structs, so that they can be later identified and patched at 451 * runtime. 452 * 453 * Normally, a call to a pv_op function is a simple indirect call: 454 * (pv_op_struct.operations)(args...). 455 * 456 * Unfortunately, this is a relatively slow operation for modern CPUs, 457 * because it cannot necessarily determine what the destination 458 * address is. In this case, the address is a runtime constant, so at 459 * the very least we can patch the call to e a simple direct call, or 460 * ideally, patch an inline implementation into the callsite. (Direct 461 * calls are essentially free, because the call and return addresses 462 * are completely predictable.) 463 * 464 * For i386, these macros rely on the standard gcc "regparm(3)" calling 465 * convention, in which the first three arguments are placed in %eax, 466 * %edx, %ecx (in that order), and the remaining arguments are placed 467 * on the stack. All caller-save registers (eax,edx,ecx) are expected 468 * to be modified (either clobbered or used for return values). 469 * X86_64, on the other hand, already specifies a register-based calling 470 * conventions, returning at %rax, with parameteres going on %rdi, %rsi, 471 * %rdx, and %rcx. Note that for this reason, x86_64 does not need any 472 * special handling for dealing with 4 arguments, unlike i386. 473 * However, x86_64 also have to clobber all caller saved registers, which 474 * unfortunately, are quite a bit (r8 - r11) 475 * 476 * The call instruction itself is marked by placing its start address 477 * and size into the .parainstructions section, so that 478 * apply_paravirt() in arch/i386/kernel/alternative.c can do the 479 * appropriate patching under the control of the backend pv_init_ops 480 * implementation. 481 * 482 * Unfortunately there's no way to get gcc to generate the args setup 483 * for the call, and then allow the call itself to be generated by an 484 * inline asm. Because of this, we must do the complete arg setup and 485 * return value handling from within these macros. This is fairly 486 * cumbersome. 487 * 488 * There are 5 sets of PVOP_* macros for dealing with 0-4 arguments. 489 * It could be extended to more arguments, but there would be little 490 * to be gained from that. For each number of arguments, there are 491 * the two VCALL and CALL variants for void and non-void functions. 492 * 493 * When there is a return value, the invoker of the macro must specify 494 * the return type. The macro then uses sizeof() on that type to 495 * determine whether its a 32 or 64 bit value, and places the return 496 * in the right register(s) (just %eax for 32-bit, and %edx:%eax for 497 * 64-bit). For x86_64 machines, it just returns at %rax regardless of 498 * the return value size. 499 * 500 * 64-bit arguments are passed as a pair of adjacent 32-bit arguments 501 * i386 also passes 64-bit arguments as a pair of adjacent 32-bit arguments 502 * in low,high order 503 * 504 * Small structures are passed and returned in registers. The macro 505 * calling convention can't directly deal with this, so the wrapper 506 * functions must do this. 507 * 508 * These PVOP_* macros are only defined within this header. This 509 * means that all uses must be wrapped in inline functions. This also 510 * makes sure the incoming and outgoing types are always correct. 511 */ 512 #ifdef CONFIG_X86_32 513 #define PVOP_VCALL_ARGS \ 514 unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx 515 #define PVOP_CALL_ARGS PVOP_VCALL_ARGS 516 517 #define PVOP_CALL_ARG1(x) "a" ((unsigned long)(x)) 518 #define PVOP_CALL_ARG2(x) "d" ((unsigned long)(x)) 519 #define PVOP_CALL_ARG3(x) "c" ((unsigned long)(x)) 520 521 #define PVOP_VCALL_CLOBBERS "=a" (__eax), "=d" (__edx), \ 522 "=c" (__ecx) 523 #define PVOP_CALL_CLOBBERS PVOP_VCALL_CLOBBERS 524 525 #define PVOP_VCALLEE_CLOBBERS "=a" (__eax), "=d" (__edx) 526 #define PVOP_CALLEE_CLOBBERS PVOP_VCALLEE_CLOBBERS 527 528 #define EXTRA_CLOBBERS 529 #define VEXTRA_CLOBBERS 530 #else /* CONFIG_X86_64 */ 531 #define PVOP_VCALL_ARGS \ 532 unsigned long __edi = __edi, __esi = __esi, \ 533 __edx = __edx, __ecx = __ecx 534 #define PVOP_CALL_ARGS PVOP_VCALL_ARGS, __eax 535 536 #define PVOP_CALL_ARG1(x) "D" ((unsigned long)(x)) 537 #define PVOP_CALL_ARG2(x) "S" ((unsigned long)(x)) 538 #define PVOP_CALL_ARG3(x) "d" ((unsigned long)(x)) 539 #define PVOP_CALL_ARG4(x) "c" ((unsigned long)(x)) 540 541 #define PVOP_VCALL_CLOBBERS "=D" (__edi), \ 542 "=S" (__esi), "=d" (__edx), \ 543 "=c" (__ecx) 544 #define PVOP_CALL_CLOBBERS PVOP_VCALL_CLOBBERS, "=a" (__eax) 545 546 #define PVOP_VCALLEE_CLOBBERS "=a" (__eax) 547 #define PVOP_CALLEE_CLOBBERS PVOP_VCALLEE_CLOBBERS 548 549 #define EXTRA_CLOBBERS , "r8", "r9", "r10", "r11" 550 #define VEXTRA_CLOBBERS , "rax", "r8", "r9", "r10", "r11" 551 #endif /* CONFIG_X86_32 */ 552 553 #ifdef CONFIG_PARAVIRT_DEBUG 554 #define PVOP_TEST_NULL(op) BUG_ON(op == NULL) 555 #else 556 #define PVOP_TEST_NULL(op) ((void)op) 557 #endif 558 559 #define ____PVOP_CALL(rettype, op, clbr, call_clbr, extra_clbr, \ 560 pre, post, ...) \ 561 ({ \ 562 rettype __ret; \ 563 PVOP_CALL_ARGS; \ 564 PVOP_TEST_NULL(op); \ 565 /* This is 32-bit specific, but is okay in 64-bit */ \ 566 /* since this condition will never hold */ \ 567 if (sizeof(rettype) > sizeof(unsigned long)) { \ 568 asm volatile(pre \ 569 paravirt_alt(PARAVIRT_CALL) \ 570 post \ 571 : call_clbr \ 572 : paravirt_type(op), \ 573 paravirt_clobber(clbr), \ 574 ##__VA_ARGS__ \ 575 : "memory", "cc" extra_clbr); \ 576 __ret = (rettype)((((u64)__edx) << 32) | __eax); \ 577 } else { \ 578 asm volatile(pre \ 579 paravirt_alt(PARAVIRT_CALL) \ 580 post \ 581 : call_clbr \ 582 : paravirt_type(op), \ 583 paravirt_clobber(clbr), \ 584 ##__VA_ARGS__ \ 585 : "memory", "cc" extra_clbr); \ 586 __ret = (rettype)__eax; \ 587 } \ 588 __ret; \ 589 }) 590 591 #define __PVOP_CALL(rettype, op, pre, post, ...) \ 592 ____PVOP_CALL(rettype, op, CLBR_ANY, PVOP_CALL_CLOBBERS, \ 593 EXTRA_CLOBBERS, pre, post, ##__VA_ARGS__) 594 595 #define __PVOP_CALLEESAVE(rettype, op, pre, post, ...) \ 596 ____PVOP_CALL(rettype, op.func, CLBR_RET_REG, \ 597 PVOP_CALLEE_CLOBBERS, , \ 598 pre, post, ##__VA_ARGS__) 599 600 601 #define ____PVOP_VCALL(op, clbr, call_clbr, extra_clbr, pre, post, ...) \ 602 ({ \ 603 PVOP_VCALL_ARGS; \ 604 PVOP_TEST_NULL(op); \ 605 asm volatile(pre \ 606 paravirt_alt(PARAVIRT_CALL) \ 607 post \ 608 : call_clbr \ 609 : paravirt_type(op), \ 610 paravirt_clobber(clbr), \ 611 ##__VA_ARGS__ \ 612 : "memory", "cc" extra_clbr); \ 613 }) 614 615 #define __PVOP_VCALL(op, pre, post, ...) \ 616 ____PVOP_VCALL(op, CLBR_ANY, PVOP_VCALL_CLOBBERS, \ 617 VEXTRA_CLOBBERS, \ 618 pre, post, ##__VA_ARGS__) 619 620 #define __PVOP_VCALLEESAVE(rettype, op, pre, post, ...) \ 621 ____PVOP_CALL(rettype, op.func, CLBR_RET_REG, \ 622 PVOP_VCALLEE_CLOBBERS, , \ 623 pre, post, ##__VA_ARGS__) 624 625 626 627 #define PVOP_CALL0(rettype, op) \ 628 __PVOP_CALL(rettype, op, "", "") 629 #define PVOP_VCALL0(op) \ 630 __PVOP_VCALL(op, "", "") 631 632 #define PVOP_CALLEE0(rettype, op) \ 633 __PVOP_CALLEESAVE(rettype, op, "", "") 634 #define PVOP_VCALLEE0(op) \ 635 __PVOP_VCALLEESAVE(op, "", "") 636 637 638 #define PVOP_CALL1(rettype, op, arg1) \ 639 __PVOP_CALL(rettype, op, "", "", PVOP_CALL_ARG1(arg1)) 640 #define PVOP_VCALL1(op, arg1) \ 641 __PVOP_VCALL(op, "", "", PVOP_CALL_ARG1(arg1)) 642 643 #define PVOP_CALLEE1(rettype, op, arg1) \ 644 __PVOP_CALLEESAVE(rettype, op, "", "", PVOP_CALL_ARG1(arg1)) 645 #define PVOP_VCALLEE1(op, arg1) \ 646 __PVOP_VCALLEESAVE(op, "", "", PVOP_CALL_ARG1(arg1)) 647 648 649 #define PVOP_CALL2(rettype, op, arg1, arg2) \ 650 __PVOP_CALL(rettype, op, "", "", PVOP_CALL_ARG1(arg1), \ 651 PVOP_CALL_ARG2(arg2)) 652 #define PVOP_VCALL2(op, arg1, arg2) \ 653 __PVOP_VCALL(op, "", "", PVOP_CALL_ARG1(arg1), \ 654 PVOP_CALL_ARG2(arg2)) 655 656 #define PVOP_CALLEE2(rettype, op, arg1, arg2) \ 657 __PVOP_CALLEESAVE(rettype, op, "", "", PVOP_CALL_ARG1(arg1), \ 658 PVOP_CALL_ARG2(arg2)) 659 #define PVOP_VCALLEE2(op, arg1, arg2) \ 660 __PVOP_VCALLEESAVE(op, "", "", PVOP_CALL_ARG1(arg1), \ 661 PVOP_CALL_ARG2(arg2)) 662 663 664 #define PVOP_CALL3(rettype, op, arg1, arg2, arg3) \ 665 __PVOP_CALL(rettype, op, "", "", PVOP_CALL_ARG1(arg1), \ 666 PVOP_CALL_ARG2(arg2), PVOP_CALL_ARG3(arg3)) 667 #define PVOP_VCALL3(op, arg1, arg2, arg3) \ 668 __PVOP_VCALL(op, "", "", PVOP_CALL_ARG1(arg1), \ 669 PVOP_CALL_ARG2(arg2), PVOP_CALL_ARG3(arg3)) 670 671 /* This is the only difference in x86_64. We can make it much simpler */ 672 #ifdef CONFIG_X86_32 673 #define PVOP_CALL4(rettype, op, arg1, arg2, arg3, arg4) \ 674 __PVOP_CALL(rettype, op, \ 675 "push %[_arg4];", "lea 4(%%esp),%%esp;", \ 676 PVOP_CALL_ARG1(arg1), PVOP_CALL_ARG2(arg2), \ 677 PVOP_CALL_ARG3(arg3), [_arg4] "mr" ((u32)(arg4))) 678 #define PVOP_VCALL4(op, arg1, arg2, arg3, arg4) \ 679 __PVOP_VCALL(op, \ 680 "push %[_arg4];", "lea 4(%%esp),%%esp;", \ 681 "0" ((u32)(arg1)), "1" ((u32)(arg2)), \ 682 "2" ((u32)(arg3)), [_arg4] "mr" ((u32)(arg4))) 683 #else 684 #define PVOP_CALL4(rettype, op, arg1, arg2, arg3, arg4) \ 685 __PVOP_CALL(rettype, op, "", "", \ 686 PVOP_CALL_ARG1(arg1), PVOP_CALL_ARG2(arg2), \ 687 PVOP_CALL_ARG3(arg3), PVOP_CALL_ARG4(arg4)) 688 #define PVOP_VCALL4(op, arg1, arg2, arg3, arg4) \ 689 __PVOP_VCALL(op, "", "", \ 690 PVOP_CALL_ARG1(arg1), PVOP_CALL_ARG2(arg2), \ 691 PVOP_CALL_ARG3(arg3), PVOP_CALL_ARG4(arg4)) 692 #endif 693 694 static inline int paravirt_enabled(void) 695 { 696 return pv_info.paravirt_enabled; 697 } 698 699 static inline void load_sp0(struct tss_struct *tss, 700 struct thread_struct *thread) 701 { 702 PVOP_VCALL2(pv_cpu_ops.load_sp0, tss, thread); 703 } 704 705 #define ARCH_SETUP pv_init_ops.arch_setup(); 706 static inline unsigned long get_wallclock(void) 707 { 708 return PVOP_CALL0(unsigned long, pv_time_ops.get_wallclock); 709 } 710 711 static inline int set_wallclock(unsigned long nowtime) 712 { 713 return PVOP_CALL1(int, pv_time_ops.set_wallclock, nowtime); 714 } 715 716 static inline void (*choose_time_init(void))(void) 717 { 718 return pv_time_ops.time_init; 719 } 720 721 /* The paravirtualized CPUID instruction. */ 722 static inline void __cpuid(unsigned int *eax, unsigned int *ebx, 723 unsigned int *ecx, unsigned int *edx) 724 { 725 PVOP_VCALL4(pv_cpu_ops.cpuid, eax, ebx, ecx, edx); 726 } 727 728 /* 729 * These special macros can be used to get or set a debugging register 730 */ 731 static inline unsigned long paravirt_get_debugreg(int reg) 732 { 733 return PVOP_CALL1(unsigned long, pv_cpu_ops.get_debugreg, reg); 734 } 735 #define get_debugreg(var, reg) var = paravirt_get_debugreg(reg) 736 static inline void set_debugreg(unsigned long val, int reg) 737 { 738 PVOP_VCALL2(pv_cpu_ops.set_debugreg, reg, val); 739 } 740 741 static inline void clts(void) 742 { 743 PVOP_VCALL0(pv_cpu_ops.clts); 744 } 745 746 static inline unsigned long read_cr0(void) 747 { 748 return PVOP_CALL0(unsigned long, pv_cpu_ops.read_cr0); 749 } 750 751 static inline void write_cr0(unsigned long x) 752 { 753 PVOP_VCALL1(pv_cpu_ops.write_cr0, x); 754 } 755 756 static inline unsigned long read_cr2(void) 757 { 758 return PVOP_CALL0(unsigned long, pv_mmu_ops.read_cr2); 759 } 760 761 static inline void write_cr2(unsigned long x) 762 { 763 PVOP_VCALL1(pv_mmu_ops.write_cr2, x); 764 } 765 766 static inline unsigned long read_cr3(void) 767 { 768 return PVOP_CALL0(unsigned long, pv_mmu_ops.read_cr3); 769 } 770 771 static inline void write_cr3(unsigned long x) 772 { 773 PVOP_VCALL1(pv_mmu_ops.write_cr3, x); 774 } 775 776 static inline unsigned long read_cr4(void) 777 { 778 return PVOP_CALL0(unsigned long, pv_cpu_ops.read_cr4); 779 } 780 static inline unsigned long read_cr4_safe(void) 781 { 782 return PVOP_CALL0(unsigned long, pv_cpu_ops.read_cr4_safe); 783 } 784 785 static inline void write_cr4(unsigned long x) 786 { 787 PVOP_VCALL1(pv_cpu_ops.write_cr4, x); 788 } 789 790 #ifdef CONFIG_X86_64 791 static inline unsigned long read_cr8(void) 792 { 793 return PVOP_CALL0(unsigned long, pv_cpu_ops.read_cr8); 794 } 795 796 static inline void write_cr8(unsigned long x) 797 { 798 PVOP_VCALL1(pv_cpu_ops.write_cr8, x); 799 } 800 #endif 801 802 static inline void raw_safe_halt(void) 803 { 804 PVOP_VCALL0(pv_irq_ops.safe_halt); 805 } 806 807 static inline void halt(void) 808 { 809 PVOP_VCALL0(pv_irq_ops.safe_halt); 810 } 811 812 static inline void wbinvd(void) 813 { 814 PVOP_VCALL0(pv_cpu_ops.wbinvd); 815 } 816 817 #define get_kernel_rpl() (pv_info.kernel_rpl) 818 819 static inline u64 paravirt_read_msr(unsigned msr, int *err) 820 { 821 return PVOP_CALL2(u64, pv_cpu_ops.read_msr, msr, err); 822 } 823 static inline u64 paravirt_read_msr_amd(unsigned msr, int *err) 824 { 825 return PVOP_CALL2(u64, pv_cpu_ops.read_msr_amd, msr, err); 826 } 827 static inline int paravirt_write_msr(unsigned msr, unsigned low, unsigned high) 828 { 829 return PVOP_CALL3(int, pv_cpu_ops.write_msr, msr, low, high); 830 } 831 832 /* These should all do BUG_ON(_err), but our headers are too tangled. */ 833 #define rdmsr(msr, val1, val2) \ 834 do { \ 835 int _err; \ 836 u64 _l = paravirt_read_msr(msr, &_err); \ 837 val1 = (u32)_l; \ 838 val2 = _l >> 32; \ 839 } while (0) 840 841 #define wrmsr(msr, val1, val2) \ 842 do { \ 843 paravirt_write_msr(msr, val1, val2); \ 844 } while (0) 845 846 #define rdmsrl(msr, val) \ 847 do { \ 848 int _err; \ 849 val = paravirt_read_msr(msr, &_err); \ 850 } while (0) 851 852 #define wrmsrl(msr, val) wrmsr(msr, (u32)((u64)(val)), ((u64)(val))>>32) 853 #define wrmsr_safe(msr, a, b) paravirt_write_msr(msr, a, b) 854 855 /* rdmsr with exception handling */ 856 #define rdmsr_safe(msr, a, b) \ 857 ({ \ 858 int _err; \ 859 u64 _l = paravirt_read_msr(msr, &_err); \ 860 (*a) = (u32)_l; \ 861 (*b) = _l >> 32; \ 862 _err; \ 863 }) 864 865 static inline int rdmsrl_safe(unsigned msr, unsigned long long *p) 866 { 867 int err; 868 869 *p = paravirt_read_msr(msr, &err); 870 return err; 871 } 872 static inline int rdmsrl_amd_safe(unsigned msr, unsigned long long *p) 873 { 874 int err; 875 876 *p = paravirt_read_msr_amd(msr, &err); 877 return err; 878 } 879 880 static inline u64 paravirt_read_tsc(void) 881 { 882 return PVOP_CALL0(u64, pv_cpu_ops.read_tsc); 883 } 884 885 #define rdtscl(low) \ 886 do { \ 887 u64 _l = paravirt_read_tsc(); \ 888 low = (int)_l; \ 889 } while (0) 890 891 #define rdtscll(val) (val = paravirt_read_tsc()) 892 893 static inline unsigned long long paravirt_sched_clock(void) 894 { 895 return PVOP_CALL0(unsigned long long, pv_time_ops.sched_clock); 896 } 897 #define calibrate_tsc() (pv_time_ops.get_tsc_khz()) 898 899 static inline unsigned long long paravirt_read_pmc(int counter) 900 { 901 return PVOP_CALL1(u64, pv_cpu_ops.read_pmc, counter); 902 } 903 904 #define rdpmc(counter, low, high) \ 905 do { \ 906 u64 _l = paravirt_read_pmc(counter); \ 907 low = (u32)_l; \ 908 high = _l >> 32; \ 909 } while (0) 910 911 static inline unsigned long long paravirt_rdtscp(unsigned int *aux) 912 { 913 return PVOP_CALL1(u64, pv_cpu_ops.read_tscp, aux); 914 } 915 916 #define rdtscp(low, high, aux) \ 917 do { \ 918 int __aux; \ 919 unsigned long __val = paravirt_rdtscp(&__aux); \ 920 (low) = (u32)__val; \ 921 (high) = (u32)(__val >> 32); \ 922 (aux) = __aux; \ 923 } while (0) 924 925 #define rdtscpll(val, aux) \ 926 do { \ 927 unsigned long __aux; \ 928 val = paravirt_rdtscp(&__aux); \ 929 (aux) = __aux; \ 930 } while (0) 931 932 static inline void paravirt_alloc_ldt(struct desc_struct *ldt, unsigned entries) 933 { 934 PVOP_VCALL2(pv_cpu_ops.alloc_ldt, ldt, entries); 935 } 936 937 static inline void paravirt_free_ldt(struct desc_struct *ldt, unsigned entries) 938 { 939 PVOP_VCALL2(pv_cpu_ops.free_ldt, ldt, entries); 940 } 941 942 static inline void load_TR_desc(void) 943 { 944 PVOP_VCALL0(pv_cpu_ops.load_tr_desc); 945 } 946 static inline void load_gdt(const struct desc_ptr *dtr) 947 { 948 PVOP_VCALL1(pv_cpu_ops.load_gdt, dtr); 949 } 950 static inline void load_idt(const struct desc_ptr *dtr) 951 { 952 PVOP_VCALL1(pv_cpu_ops.load_idt, dtr); 953 } 954 static inline void set_ldt(const void *addr, unsigned entries) 955 { 956 PVOP_VCALL2(pv_cpu_ops.set_ldt, addr, entries); 957 } 958 static inline void store_gdt(struct desc_ptr *dtr) 959 { 960 PVOP_VCALL1(pv_cpu_ops.store_gdt, dtr); 961 } 962 static inline void store_idt(struct desc_ptr *dtr) 963 { 964 PVOP_VCALL1(pv_cpu_ops.store_idt, dtr); 965 } 966 static inline unsigned long paravirt_store_tr(void) 967 { 968 return PVOP_CALL0(unsigned long, pv_cpu_ops.store_tr); 969 } 970 #define store_tr(tr) ((tr) = paravirt_store_tr()) 971 static inline void load_TLS(struct thread_struct *t, unsigned cpu) 972 { 973 PVOP_VCALL2(pv_cpu_ops.load_tls, t, cpu); 974 } 975 976 #ifdef CONFIG_X86_64 977 static inline void load_gs_index(unsigned int gs) 978 { 979 PVOP_VCALL1(pv_cpu_ops.load_gs_index, gs); 980 } 981 #endif 982 983 static inline void write_ldt_entry(struct desc_struct *dt, int entry, 984 const void *desc) 985 { 986 PVOP_VCALL3(pv_cpu_ops.write_ldt_entry, dt, entry, desc); 987 } 988 989 static inline void write_gdt_entry(struct desc_struct *dt, int entry, 990 void *desc, int type) 991 { 992 PVOP_VCALL4(pv_cpu_ops.write_gdt_entry, dt, entry, desc, type); 993 } 994 995 static inline void write_idt_entry(gate_desc *dt, int entry, const gate_desc *g) 996 { 997 PVOP_VCALL3(pv_cpu_ops.write_idt_entry, dt, entry, g); 998 } 999 static inline void set_iopl_mask(unsigned mask) 1000 { 1001 PVOP_VCALL1(pv_cpu_ops.set_iopl_mask, mask); 1002 } 1003 1004 /* The paravirtualized I/O functions */ 1005 static inline void slow_down_io(void) 1006 { 1007 pv_cpu_ops.io_delay(); 1008 #ifdef REALLY_SLOW_IO 1009 pv_cpu_ops.io_delay(); 1010 pv_cpu_ops.io_delay(); 1011 pv_cpu_ops.io_delay(); 1012 #endif 1013 } 1014 1015 #ifdef CONFIG_X86_LOCAL_APIC 1016 static inline void setup_boot_clock(void) 1017 { 1018 PVOP_VCALL0(pv_apic_ops.setup_boot_clock); 1019 } 1020 1021 static inline void setup_secondary_clock(void) 1022 { 1023 PVOP_VCALL0(pv_apic_ops.setup_secondary_clock); 1024 } 1025 #endif 1026 1027 static inline void paravirt_post_allocator_init(void) 1028 { 1029 if (pv_init_ops.post_allocator_init) 1030 (*pv_init_ops.post_allocator_init)(); 1031 } 1032 1033 static inline void paravirt_pagetable_setup_start(pgd_t *base) 1034 { 1035 (*pv_mmu_ops.pagetable_setup_start)(base); 1036 } 1037 1038 static inline void paravirt_pagetable_setup_done(pgd_t *base) 1039 { 1040 (*pv_mmu_ops.pagetable_setup_done)(base); 1041 } 1042 1043 #ifdef CONFIG_SMP 1044 static inline void startup_ipi_hook(int phys_apicid, unsigned long start_eip, 1045 unsigned long start_esp) 1046 { 1047 PVOP_VCALL3(pv_apic_ops.startup_ipi_hook, 1048 phys_apicid, start_eip, start_esp); 1049 } 1050 #endif 1051 1052 static inline void paravirt_activate_mm(struct mm_struct *prev, 1053 struct mm_struct *next) 1054 { 1055 PVOP_VCALL2(pv_mmu_ops.activate_mm, prev, next); 1056 } 1057 1058 static inline void arch_dup_mmap(struct mm_struct *oldmm, 1059 struct mm_struct *mm) 1060 { 1061 PVOP_VCALL2(pv_mmu_ops.dup_mmap, oldmm, mm); 1062 } 1063 1064 static inline void arch_exit_mmap(struct mm_struct *mm) 1065 { 1066 PVOP_VCALL1(pv_mmu_ops.exit_mmap, mm); 1067 } 1068 1069 static inline void __flush_tlb(void) 1070 { 1071 PVOP_VCALL0(pv_mmu_ops.flush_tlb_user); 1072 } 1073 static inline void __flush_tlb_global(void) 1074 { 1075 PVOP_VCALL0(pv_mmu_ops.flush_tlb_kernel); 1076 } 1077 static inline void __flush_tlb_single(unsigned long addr) 1078 { 1079 PVOP_VCALL1(pv_mmu_ops.flush_tlb_single, addr); 1080 } 1081 1082 static inline void flush_tlb_others(const struct cpumask *cpumask, 1083 struct mm_struct *mm, 1084 unsigned long va) 1085 { 1086 PVOP_VCALL3(pv_mmu_ops.flush_tlb_others, cpumask, mm, va); 1087 } 1088 1089 static inline int paravirt_pgd_alloc(struct mm_struct *mm) 1090 { 1091 return PVOP_CALL1(int, pv_mmu_ops.pgd_alloc, mm); 1092 } 1093 1094 static inline void paravirt_pgd_free(struct mm_struct *mm, pgd_t *pgd) 1095 { 1096 PVOP_VCALL2(pv_mmu_ops.pgd_free, mm, pgd); 1097 } 1098 1099 static inline void paravirt_alloc_pte(struct mm_struct *mm, unsigned long pfn) 1100 { 1101 PVOP_VCALL2(pv_mmu_ops.alloc_pte, mm, pfn); 1102 } 1103 static inline void paravirt_release_pte(unsigned long pfn) 1104 { 1105 PVOP_VCALL1(pv_mmu_ops.release_pte, pfn); 1106 } 1107 1108 static inline void paravirt_alloc_pmd(struct mm_struct *mm, unsigned long pfn) 1109 { 1110 PVOP_VCALL2(pv_mmu_ops.alloc_pmd, mm, pfn); 1111 } 1112 1113 static inline void paravirt_alloc_pmd_clone(unsigned long pfn, unsigned long clonepfn, 1114 unsigned long start, unsigned long count) 1115 { 1116 PVOP_VCALL4(pv_mmu_ops.alloc_pmd_clone, pfn, clonepfn, start, count); 1117 } 1118 static inline void paravirt_release_pmd(unsigned long pfn) 1119 { 1120 PVOP_VCALL1(pv_mmu_ops.release_pmd, pfn); 1121 } 1122 1123 static inline void paravirt_alloc_pud(struct mm_struct *mm, unsigned long pfn) 1124 { 1125 PVOP_VCALL2(pv_mmu_ops.alloc_pud, mm, pfn); 1126 } 1127 static inline void paravirt_release_pud(unsigned long pfn) 1128 { 1129 PVOP_VCALL1(pv_mmu_ops.release_pud, pfn); 1130 } 1131 1132 #ifdef CONFIG_HIGHPTE 1133 static inline void *kmap_atomic_pte(struct page *page, enum km_type type) 1134 { 1135 unsigned long ret; 1136 ret = PVOP_CALL2(unsigned long, pv_mmu_ops.kmap_atomic_pte, page, type); 1137 return (void *)ret; 1138 } 1139 #endif 1140 1141 static inline void pte_update(struct mm_struct *mm, unsigned long addr, 1142 pte_t *ptep) 1143 { 1144 PVOP_VCALL3(pv_mmu_ops.pte_update, mm, addr, ptep); 1145 } 1146 1147 static inline void pte_update_defer(struct mm_struct *mm, unsigned long addr, 1148 pte_t *ptep) 1149 { 1150 PVOP_VCALL3(pv_mmu_ops.pte_update_defer, mm, addr, ptep); 1151 } 1152 1153 static inline pte_t __pte(pteval_t val) 1154 { 1155 pteval_t ret; 1156 1157 if (sizeof(pteval_t) > sizeof(long)) 1158 ret = PVOP_CALLEE2(pteval_t, 1159 pv_mmu_ops.make_pte, 1160 val, (u64)val >> 32); 1161 else 1162 ret = PVOP_CALLEE1(pteval_t, 1163 pv_mmu_ops.make_pte, 1164 val); 1165 1166 return (pte_t) { .pte = ret }; 1167 } 1168 1169 static inline pteval_t pte_val(pte_t pte) 1170 { 1171 pteval_t ret; 1172 1173 if (sizeof(pteval_t) > sizeof(long)) 1174 ret = PVOP_CALLEE2(pteval_t, pv_mmu_ops.pte_val, 1175 pte.pte, (u64)pte.pte >> 32); 1176 else 1177 ret = PVOP_CALLEE1(pteval_t, pv_mmu_ops.pte_val, 1178 pte.pte); 1179 1180 return ret; 1181 } 1182 1183 static inline pgd_t __pgd(pgdval_t val) 1184 { 1185 pgdval_t ret; 1186 1187 if (sizeof(pgdval_t) > sizeof(long)) 1188 ret = PVOP_CALLEE2(pgdval_t, pv_mmu_ops.make_pgd, 1189 val, (u64)val >> 32); 1190 else 1191 ret = PVOP_CALLEE1(pgdval_t, pv_mmu_ops.make_pgd, 1192 val); 1193 1194 return (pgd_t) { ret }; 1195 } 1196 1197 static inline pgdval_t pgd_val(pgd_t pgd) 1198 { 1199 pgdval_t ret; 1200 1201 if (sizeof(pgdval_t) > sizeof(long)) 1202 ret = PVOP_CALLEE2(pgdval_t, pv_mmu_ops.pgd_val, 1203 pgd.pgd, (u64)pgd.pgd >> 32); 1204 else 1205 ret = PVOP_CALLEE1(pgdval_t, pv_mmu_ops.pgd_val, 1206 pgd.pgd); 1207 1208 return ret; 1209 } 1210 1211 #define __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION 1212 static inline pte_t ptep_modify_prot_start(struct mm_struct *mm, unsigned long addr, 1213 pte_t *ptep) 1214 { 1215 pteval_t ret; 1216 1217 ret = PVOP_CALL3(pteval_t, pv_mmu_ops.ptep_modify_prot_start, 1218 mm, addr, ptep); 1219 1220 return (pte_t) { .pte = ret }; 1221 } 1222 1223 static inline void ptep_modify_prot_commit(struct mm_struct *mm, unsigned long addr, 1224 pte_t *ptep, pte_t pte) 1225 { 1226 if (sizeof(pteval_t) > sizeof(long)) 1227 /* 5 arg words */ 1228 pv_mmu_ops.ptep_modify_prot_commit(mm, addr, ptep, pte); 1229 else 1230 PVOP_VCALL4(pv_mmu_ops.ptep_modify_prot_commit, 1231 mm, addr, ptep, pte.pte); 1232 } 1233 1234 static inline void set_pte(pte_t *ptep, pte_t pte) 1235 { 1236 if (sizeof(pteval_t) > sizeof(long)) 1237 PVOP_VCALL3(pv_mmu_ops.set_pte, ptep, 1238 pte.pte, (u64)pte.pte >> 32); 1239 else 1240 PVOP_VCALL2(pv_mmu_ops.set_pte, ptep, 1241 pte.pte); 1242 } 1243 1244 static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, 1245 pte_t *ptep, pte_t pte) 1246 { 1247 if (sizeof(pteval_t) > sizeof(long)) 1248 /* 5 arg words */ 1249 pv_mmu_ops.set_pte_at(mm, addr, ptep, pte); 1250 else 1251 PVOP_VCALL4(pv_mmu_ops.set_pte_at, mm, addr, ptep, pte.pte); 1252 } 1253 1254 static inline void set_pmd(pmd_t *pmdp, pmd_t pmd) 1255 { 1256 pmdval_t val = native_pmd_val(pmd); 1257 1258 if (sizeof(pmdval_t) > sizeof(long)) 1259 PVOP_VCALL3(pv_mmu_ops.set_pmd, pmdp, val, (u64)val >> 32); 1260 else 1261 PVOP_VCALL2(pv_mmu_ops.set_pmd, pmdp, val); 1262 } 1263 1264 #if PAGETABLE_LEVELS >= 3 1265 static inline pmd_t __pmd(pmdval_t val) 1266 { 1267 pmdval_t ret; 1268 1269 if (sizeof(pmdval_t) > sizeof(long)) 1270 ret = PVOP_CALLEE2(pmdval_t, pv_mmu_ops.make_pmd, 1271 val, (u64)val >> 32); 1272 else 1273 ret = PVOP_CALLEE1(pmdval_t, pv_mmu_ops.make_pmd, 1274 val); 1275 1276 return (pmd_t) { ret }; 1277 } 1278 1279 static inline pmdval_t pmd_val(pmd_t pmd) 1280 { 1281 pmdval_t ret; 1282 1283 if (sizeof(pmdval_t) > sizeof(long)) 1284 ret = PVOP_CALLEE2(pmdval_t, pv_mmu_ops.pmd_val, 1285 pmd.pmd, (u64)pmd.pmd >> 32); 1286 else 1287 ret = PVOP_CALLEE1(pmdval_t, pv_mmu_ops.pmd_val, 1288 pmd.pmd); 1289 1290 return ret; 1291 } 1292 1293 static inline void set_pud(pud_t *pudp, pud_t pud) 1294 { 1295 pudval_t val = native_pud_val(pud); 1296 1297 if (sizeof(pudval_t) > sizeof(long)) 1298 PVOP_VCALL3(pv_mmu_ops.set_pud, pudp, 1299 val, (u64)val >> 32); 1300 else 1301 PVOP_VCALL2(pv_mmu_ops.set_pud, pudp, 1302 val); 1303 } 1304 #if PAGETABLE_LEVELS == 4 1305 static inline pud_t __pud(pudval_t val) 1306 { 1307 pudval_t ret; 1308 1309 if (sizeof(pudval_t) > sizeof(long)) 1310 ret = PVOP_CALLEE2(pudval_t, pv_mmu_ops.make_pud, 1311 val, (u64)val >> 32); 1312 else 1313 ret = PVOP_CALLEE1(pudval_t, pv_mmu_ops.make_pud, 1314 val); 1315 1316 return (pud_t) { ret }; 1317 } 1318 1319 static inline pudval_t pud_val(pud_t pud) 1320 { 1321 pudval_t ret; 1322 1323 if (sizeof(pudval_t) > sizeof(long)) 1324 ret = PVOP_CALLEE2(pudval_t, pv_mmu_ops.pud_val, 1325 pud.pud, (u64)pud.pud >> 32); 1326 else 1327 ret = PVOP_CALLEE1(pudval_t, pv_mmu_ops.pud_val, 1328 pud.pud); 1329 1330 return ret; 1331 } 1332 1333 static inline void set_pgd(pgd_t *pgdp, pgd_t pgd) 1334 { 1335 pgdval_t val = native_pgd_val(pgd); 1336 1337 if (sizeof(pgdval_t) > sizeof(long)) 1338 PVOP_VCALL3(pv_mmu_ops.set_pgd, pgdp, 1339 val, (u64)val >> 32); 1340 else 1341 PVOP_VCALL2(pv_mmu_ops.set_pgd, pgdp, 1342 val); 1343 } 1344 1345 static inline void pgd_clear(pgd_t *pgdp) 1346 { 1347 set_pgd(pgdp, __pgd(0)); 1348 } 1349 1350 static inline void pud_clear(pud_t *pudp) 1351 { 1352 set_pud(pudp, __pud(0)); 1353 } 1354 1355 #endif /* PAGETABLE_LEVELS == 4 */ 1356 1357 #endif /* PAGETABLE_LEVELS >= 3 */ 1358 1359 #ifdef CONFIG_X86_PAE 1360 /* Special-case pte-setting operations for PAE, which can't update a 1361 64-bit pte atomically */ 1362 static inline void set_pte_atomic(pte_t *ptep, pte_t pte) 1363 { 1364 PVOP_VCALL3(pv_mmu_ops.set_pte_atomic, ptep, 1365 pte.pte, pte.pte >> 32); 1366 } 1367 1368 static inline void pte_clear(struct mm_struct *mm, unsigned long addr, 1369 pte_t *ptep) 1370 { 1371 PVOP_VCALL3(pv_mmu_ops.pte_clear, mm, addr, ptep); 1372 } 1373 1374 static inline void pmd_clear(pmd_t *pmdp) 1375 { 1376 PVOP_VCALL1(pv_mmu_ops.pmd_clear, pmdp); 1377 } 1378 #else /* !CONFIG_X86_PAE */ 1379 static inline void set_pte_atomic(pte_t *ptep, pte_t pte) 1380 { 1381 set_pte(ptep, pte); 1382 } 1383 1384 static inline void pte_clear(struct mm_struct *mm, unsigned long addr, 1385 pte_t *ptep) 1386 { 1387 set_pte_at(mm, addr, ptep, __pte(0)); 1388 } 1389 1390 static inline void pmd_clear(pmd_t *pmdp) 1391 { 1392 set_pmd(pmdp, __pmd(0)); 1393 } 1394 #endif /* CONFIG_X86_PAE */ 1395 1396 /* Lazy mode for batching updates / context switch */ 1397 enum paravirt_lazy_mode { 1398 PARAVIRT_LAZY_NONE, 1399 PARAVIRT_LAZY_MMU, 1400 PARAVIRT_LAZY_CPU, 1401 }; 1402 1403 enum paravirt_lazy_mode paravirt_get_lazy_mode(void); 1404 void paravirt_start_context_switch(struct task_struct *prev); 1405 void paravirt_end_context_switch(struct task_struct *next); 1406 1407 void paravirt_enter_lazy_mmu(void); 1408 void paravirt_leave_lazy_mmu(void); 1409 1410 #define __HAVE_ARCH_START_CONTEXT_SWITCH 1411 static inline void arch_start_context_switch(struct task_struct *prev) 1412 { 1413 PVOP_VCALL1(pv_cpu_ops.start_context_switch, prev); 1414 } 1415 1416 static inline void arch_end_context_switch(struct task_struct *next) 1417 { 1418 PVOP_VCALL1(pv_cpu_ops.end_context_switch, next); 1419 } 1420 1421 #define __HAVE_ARCH_ENTER_LAZY_MMU_MODE 1422 static inline void arch_enter_lazy_mmu_mode(void) 1423 { 1424 PVOP_VCALL0(pv_mmu_ops.lazy_mode.enter); 1425 } 1426 1427 static inline void arch_leave_lazy_mmu_mode(void) 1428 { 1429 PVOP_VCALL0(pv_mmu_ops.lazy_mode.leave); 1430 } 1431 1432 void arch_flush_lazy_mmu_mode(void); 1433 1434 static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx, 1435 phys_addr_t phys, pgprot_t flags) 1436 { 1437 pv_mmu_ops.set_fixmap(idx, phys, flags); 1438 } 1439 1440 void _paravirt_nop(void); 1441 u32 _paravirt_ident_32(u32); 1442 u64 _paravirt_ident_64(u64); 1443 1444 #define paravirt_nop ((void *)_paravirt_nop) 1445 1446 #if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS) 1447 1448 static inline int __raw_spin_is_locked(struct raw_spinlock *lock) 1449 { 1450 return PVOP_CALL1(int, pv_lock_ops.spin_is_locked, lock); 1451 } 1452 1453 static inline int __raw_spin_is_contended(struct raw_spinlock *lock) 1454 { 1455 return PVOP_CALL1(int, pv_lock_ops.spin_is_contended, lock); 1456 } 1457 #define __raw_spin_is_contended __raw_spin_is_contended 1458 1459 static __always_inline void __raw_spin_lock(struct raw_spinlock *lock) 1460 { 1461 PVOP_VCALL1(pv_lock_ops.spin_lock, lock); 1462 } 1463 1464 static __always_inline void __raw_spin_lock_flags(struct raw_spinlock *lock, 1465 unsigned long flags) 1466 { 1467 PVOP_VCALL2(pv_lock_ops.spin_lock_flags, lock, flags); 1468 } 1469 1470 static __always_inline int __raw_spin_trylock(struct raw_spinlock *lock) 1471 { 1472 return PVOP_CALL1(int, pv_lock_ops.spin_trylock, lock); 1473 } 1474 1475 static __always_inline void __raw_spin_unlock(struct raw_spinlock *lock) 1476 { 1477 PVOP_VCALL1(pv_lock_ops.spin_unlock, lock); 1478 } 1479 1480 #endif 1481 1482 /* These all sit in the .parainstructions section to tell us what to patch. */ 1483 struct paravirt_patch_site { 1484 u8 *instr; /* original instructions */ 1485 u8 instrtype; /* type of this instruction */ 1486 u8 len; /* length of original instruction */ 1487 u16 clobbers; /* what registers you may clobber */ 1488 }; 1489 1490 extern struct paravirt_patch_site __parainstructions[], 1491 __parainstructions_end[]; 1492 1493 #ifdef CONFIG_X86_32 1494 #define PV_SAVE_REGS "pushl %ecx; pushl %edx;" 1495 #define PV_RESTORE_REGS "popl %edx; popl %ecx;" 1496 1497 /* save and restore all caller-save registers, except return value */ 1498 #define PV_SAVE_ALL_CALLER_REGS "pushl %ecx;" 1499 #define PV_RESTORE_ALL_CALLER_REGS "popl %ecx;" 1500 1501 #define PV_FLAGS_ARG "0" 1502 #define PV_EXTRA_CLOBBERS 1503 #define PV_VEXTRA_CLOBBERS 1504 #else 1505 /* save and restore all caller-save registers, except return value */ 1506 #define PV_SAVE_ALL_CALLER_REGS \ 1507 "push %rcx;" \ 1508 "push %rdx;" \ 1509 "push %rsi;" \ 1510 "push %rdi;" \ 1511 "push %r8;" \ 1512 "push %r9;" \ 1513 "push %r10;" \ 1514 "push %r11;" 1515 #define PV_RESTORE_ALL_CALLER_REGS \ 1516 "pop %r11;" \ 1517 "pop %r10;" \ 1518 "pop %r9;" \ 1519 "pop %r8;" \ 1520 "pop %rdi;" \ 1521 "pop %rsi;" \ 1522 "pop %rdx;" \ 1523 "pop %rcx;" 1524 1525 /* We save some registers, but all of them, that's too much. We clobber all 1526 * caller saved registers but the argument parameter */ 1527 #define PV_SAVE_REGS "pushq %%rdi;" 1528 #define PV_RESTORE_REGS "popq %%rdi;" 1529 #define PV_EXTRA_CLOBBERS EXTRA_CLOBBERS, "rcx" , "rdx", "rsi" 1530 #define PV_VEXTRA_CLOBBERS EXTRA_CLOBBERS, "rdi", "rcx" , "rdx", "rsi" 1531 #define PV_FLAGS_ARG "D" 1532 #endif 1533 1534 /* 1535 * Generate a thunk around a function which saves all caller-save 1536 * registers except for the return value. This allows C functions to 1537 * be called from assembler code where fewer than normal registers are 1538 * available. It may also help code generation around calls from C 1539 * code if the common case doesn't use many registers. 1540 * 1541 * When a callee is wrapped in a thunk, the caller can assume that all 1542 * arg regs and all scratch registers are preserved across the 1543 * call. The return value in rax/eax will not be saved, even for void 1544 * functions. 1545 */ 1546 #define PV_CALLEE_SAVE_REGS_THUNK(func) \ 1547 extern typeof(func) __raw_callee_save_##func; \ 1548 static void *__##func##__ __used = func; \ 1549 \ 1550 asm(".pushsection .text;" \ 1551 "__raw_callee_save_" #func ": " \ 1552 PV_SAVE_ALL_CALLER_REGS \ 1553 "call " #func ";" \ 1554 PV_RESTORE_ALL_CALLER_REGS \ 1555 "ret;" \ 1556 ".popsection") 1557 1558 /* Get a reference to a callee-save function */ 1559 #define PV_CALLEE_SAVE(func) \ 1560 ((struct paravirt_callee_save) { __raw_callee_save_##func }) 1561 1562 /* Promise that "func" already uses the right calling convention */ 1563 #define __PV_IS_CALLEE_SAVE(func) \ 1564 ((struct paravirt_callee_save) { func }) 1565 1566 static inline unsigned long __raw_local_save_flags(void) 1567 { 1568 unsigned long f; 1569 1570 asm volatile(paravirt_alt(PARAVIRT_CALL) 1571 : "=a"(f) 1572 : paravirt_type(pv_irq_ops.save_fl), 1573 paravirt_clobber(CLBR_EAX) 1574 : "memory", "cc"); 1575 return f; 1576 } 1577 1578 static inline void raw_local_irq_restore(unsigned long f) 1579 { 1580 asm volatile(paravirt_alt(PARAVIRT_CALL) 1581 : "=a"(f) 1582 : PV_FLAGS_ARG(f), 1583 paravirt_type(pv_irq_ops.restore_fl), 1584 paravirt_clobber(CLBR_EAX) 1585 : "memory", "cc"); 1586 } 1587 1588 static inline void raw_local_irq_disable(void) 1589 { 1590 asm volatile(paravirt_alt(PARAVIRT_CALL) 1591 : 1592 : paravirt_type(pv_irq_ops.irq_disable), 1593 paravirt_clobber(CLBR_EAX) 1594 : "memory", "eax", "cc"); 1595 } 1596 1597 static inline void raw_local_irq_enable(void) 1598 { 1599 asm volatile(paravirt_alt(PARAVIRT_CALL) 1600 : 1601 : paravirt_type(pv_irq_ops.irq_enable), 1602 paravirt_clobber(CLBR_EAX) 1603 : "memory", "eax", "cc"); 1604 } 1605 1606 static inline unsigned long __raw_local_irq_save(void) 1607 { 1608 unsigned long f; 1609 1610 f = __raw_local_save_flags(); 1611 raw_local_irq_disable(); 1612 return f; 1613 } 1614 1615 1616 /* Make sure as little as possible of this mess escapes. */ 1617 #undef PARAVIRT_CALL 1618 #undef __PVOP_CALL 1619 #undef __PVOP_VCALL 1620 #undef PVOP_VCALL0 1621 #undef PVOP_CALL0 1622 #undef PVOP_VCALL1 1623 #undef PVOP_CALL1 1624 #undef PVOP_VCALL2 1625 #undef PVOP_CALL2 1626 #undef PVOP_VCALL3 1627 #undef PVOP_CALL3 1628 #undef PVOP_VCALL4 1629 #undef PVOP_CALL4 1630 1631 #else /* __ASSEMBLY__ */ 1632 1633 #define _PVSITE(ptype, clobbers, ops, word, algn) \ 1634 771:; \ 1635 ops; \ 1636 772:; \ 1637 .pushsection .parainstructions,"a"; \ 1638 .align algn; \ 1639 word 771b; \ 1640 .byte ptype; \ 1641 .byte 772b-771b; \ 1642 .short clobbers; \ 1643 .popsection 1644 1645 1646 #define COND_PUSH(set, mask, reg) \ 1647 .if ((~(set)) & mask); push %reg; .endif 1648 #define COND_POP(set, mask, reg) \ 1649 .if ((~(set)) & mask); pop %reg; .endif 1650 1651 #ifdef CONFIG_X86_64 1652 1653 #define PV_SAVE_REGS(set) \ 1654 COND_PUSH(set, CLBR_RAX, rax); \ 1655 COND_PUSH(set, CLBR_RCX, rcx); \ 1656 COND_PUSH(set, CLBR_RDX, rdx); \ 1657 COND_PUSH(set, CLBR_RSI, rsi); \ 1658 COND_PUSH(set, CLBR_RDI, rdi); \ 1659 COND_PUSH(set, CLBR_R8, r8); \ 1660 COND_PUSH(set, CLBR_R9, r9); \ 1661 COND_PUSH(set, CLBR_R10, r10); \ 1662 COND_PUSH(set, CLBR_R11, r11) 1663 #define PV_RESTORE_REGS(set) \ 1664 COND_POP(set, CLBR_R11, r11); \ 1665 COND_POP(set, CLBR_R10, r10); \ 1666 COND_POP(set, CLBR_R9, r9); \ 1667 COND_POP(set, CLBR_R8, r8); \ 1668 COND_POP(set, CLBR_RDI, rdi); \ 1669 COND_POP(set, CLBR_RSI, rsi); \ 1670 COND_POP(set, CLBR_RDX, rdx); \ 1671 COND_POP(set, CLBR_RCX, rcx); \ 1672 COND_POP(set, CLBR_RAX, rax) 1673 1674 #define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 8) 1675 #define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .quad, 8) 1676 #define PARA_INDIRECT(addr) *addr(%rip) 1677 #else 1678 #define PV_SAVE_REGS(set) \ 1679 COND_PUSH(set, CLBR_EAX, eax); \ 1680 COND_PUSH(set, CLBR_EDI, edi); \ 1681 COND_PUSH(set, CLBR_ECX, ecx); \ 1682 COND_PUSH(set, CLBR_EDX, edx) 1683 #define PV_RESTORE_REGS(set) \ 1684 COND_POP(set, CLBR_EDX, edx); \ 1685 COND_POP(set, CLBR_ECX, ecx); \ 1686 COND_POP(set, CLBR_EDI, edi); \ 1687 COND_POP(set, CLBR_EAX, eax) 1688 1689 #define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 4) 1690 #define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4) 1691 #define PARA_INDIRECT(addr) *%cs:addr 1692 #endif 1693 1694 #define INTERRUPT_RETURN \ 1695 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_iret), CLBR_NONE, \ 1696 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_iret)) 1697 1698 #define DISABLE_INTERRUPTS(clobbers) \ 1699 PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_irq_disable), clobbers, \ 1700 PV_SAVE_REGS(clobbers | CLBR_CALLEE_SAVE); \ 1701 call PARA_INDIRECT(pv_irq_ops+PV_IRQ_irq_disable); \ 1702 PV_RESTORE_REGS(clobbers | CLBR_CALLEE_SAVE);) 1703 1704 #define ENABLE_INTERRUPTS(clobbers) \ 1705 PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_irq_enable), clobbers, \ 1706 PV_SAVE_REGS(clobbers | CLBR_CALLEE_SAVE); \ 1707 call PARA_INDIRECT(pv_irq_ops+PV_IRQ_irq_enable); \ 1708 PV_RESTORE_REGS(clobbers | CLBR_CALLEE_SAVE);) 1709 1710 #define USERGS_SYSRET32 \ 1711 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_usergs_sysret32), \ 1712 CLBR_NONE, \ 1713 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_usergs_sysret32)) 1714 1715 #ifdef CONFIG_X86_32 1716 #define GET_CR0_INTO_EAX \ 1717 push %ecx; push %edx; \ 1718 call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); \ 1719 pop %edx; pop %ecx 1720 1721 #define ENABLE_INTERRUPTS_SYSEXIT \ 1722 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit), \ 1723 CLBR_NONE, \ 1724 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit)) 1725 1726 1727 #else /* !CONFIG_X86_32 */ 1728 1729 /* 1730 * If swapgs is used while the userspace stack is still current, 1731 * there's no way to call a pvop. The PV replacement *must* be 1732 * inlined, or the swapgs instruction must be trapped and emulated. 1733 */ 1734 #define SWAPGS_UNSAFE_STACK \ 1735 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_swapgs), CLBR_NONE, \ 1736 swapgs) 1737 1738 /* 1739 * Note: swapgs is very special, and in practise is either going to be 1740 * implemented with a single "swapgs" instruction or something very 1741 * special. Either way, we don't need to save any registers for 1742 * it. 1743 */ 1744 #define SWAPGS \ 1745 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_swapgs), CLBR_NONE, \ 1746 call PARA_INDIRECT(pv_cpu_ops+PV_CPU_swapgs) \ 1747 ) 1748 1749 #define GET_CR2_INTO_RCX \ 1750 call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr2); \ 1751 movq %rax, %rcx; \ 1752 xorq %rax, %rax; 1753 1754 #define PARAVIRT_ADJUST_EXCEPTION_FRAME \ 1755 PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_adjust_exception_frame), \ 1756 CLBR_NONE, \ 1757 call PARA_INDIRECT(pv_irq_ops+PV_IRQ_adjust_exception_frame)) 1758 1759 #define USERGS_SYSRET64 \ 1760 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_usergs_sysret64), \ 1761 CLBR_NONE, \ 1762 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_usergs_sysret64)) 1763 1764 #define ENABLE_INTERRUPTS_SYSEXIT32 \ 1765 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit), \ 1766 CLBR_NONE, \ 1767 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit)) 1768 #endif /* CONFIG_X86_32 */ 1769 1770 #endif /* __ASSEMBLY__ */ 1771 #endif /* CONFIG_PARAVIRT */ 1772 #endif /* _ASM_X86_PARAVIRT_H */ 1773