1 #ifndef _ASM_X86_PARAVIRT_H 2 #define _ASM_X86_PARAVIRT_H 3 /* Various instructions on x86 need to be replaced for 4 * para-virtualization: those hooks are defined here. */ 5 6 #ifdef CONFIG_PARAVIRT 7 #include <asm/pgtable_types.h> 8 #include <asm/asm.h> 9 10 #include <asm/paravirt_types.h> 11 12 #ifndef __ASSEMBLY__ 13 #include <linux/bug.h> 14 #include <linux/types.h> 15 #include <linux/cpumask.h> 16 #include <asm/frame.h> 17 18 static inline void load_sp0(struct tss_struct *tss, 19 struct thread_struct *thread) 20 { 21 PVOP_VCALL2(pv_cpu_ops.load_sp0, tss, thread); 22 } 23 24 /* The paravirtualized CPUID instruction. */ 25 static inline void __cpuid(unsigned int *eax, unsigned int *ebx, 26 unsigned int *ecx, unsigned int *edx) 27 { 28 PVOP_VCALL4(pv_cpu_ops.cpuid, eax, ebx, ecx, edx); 29 } 30 31 /* 32 * These special macros can be used to get or set a debugging register 33 */ 34 static inline unsigned long paravirt_get_debugreg(int reg) 35 { 36 return PVOP_CALL1(unsigned long, pv_cpu_ops.get_debugreg, reg); 37 } 38 #define get_debugreg(var, reg) var = paravirt_get_debugreg(reg) 39 static inline void set_debugreg(unsigned long val, int reg) 40 { 41 PVOP_VCALL2(pv_cpu_ops.set_debugreg, reg, val); 42 } 43 44 static inline void clts(void) 45 { 46 PVOP_VCALL0(pv_cpu_ops.clts); 47 } 48 49 static inline unsigned long read_cr0(void) 50 { 51 return PVOP_CALL0(unsigned long, pv_cpu_ops.read_cr0); 52 } 53 54 static inline void write_cr0(unsigned long x) 55 { 56 PVOP_VCALL1(pv_cpu_ops.write_cr0, x); 57 } 58 59 static inline unsigned long read_cr2(void) 60 { 61 return PVOP_CALL0(unsigned long, pv_mmu_ops.read_cr2); 62 } 63 64 static inline void write_cr2(unsigned long x) 65 { 66 PVOP_VCALL1(pv_mmu_ops.write_cr2, x); 67 } 68 69 static inline unsigned long read_cr3(void) 70 { 71 return PVOP_CALL0(unsigned long, pv_mmu_ops.read_cr3); 72 } 73 74 static inline void write_cr3(unsigned long x) 75 { 76 PVOP_VCALL1(pv_mmu_ops.write_cr3, x); 77 } 78 79 static inline unsigned long __read_cr4(void) 80 { 81 return PVOP_CALL0(unsigned long, pv_cpu_ops.read_cr4); 82 } 83 static inline unsigned long __read_cr4_safe(void) 84 { 85 return PVOP_CALL0(unsigned long, pv_cpu_ops.read_cr4_safe); 86 } 87 88 static inline void __write_cr4(unsigned long x) 89 { 90 PVOP_VCALL1(pv_cpu_ops.write_cr4, x); 91 } 92 93 #ifdef CONFIG_X86_64 94 static inline unsigned long read_cr8(void) 95 { 96 return PVOP_CALL0(unsigned long, pv_cpu_ops.read_cr8); 97 } 98 99 static inline void write_cr8(unsigned long x) 100 { 101 PVOP_VCALL1(pv_cpu_ops.write_cr8, x); 102 } 103 #endif 104 105 static inline void arch_safe_halt(void) 106 { 107 PVOP_VCALL0(pv_irq_ops.safe_halt); 108 } 109 110 static inline void halt(void) 111 { 112 PVOP_VCALL0(pv_irq_ops.halt); 113 } 114 115 static inline void wbinvd(void) 116 { 117 PVOP_VCALL0(pv_cpu_ops.wbinvd); 118 } 119 120 #define get_kernel_rpl() (pv_info.kernel_rpl) 121 122 static inline u64 paravirt_read_msr(unsigned msr) 123 { 124 return PVOP_CALL1(u64, pv_cpu_ops.read_msr, msr); 125 } 126 127 static inline void paravirt_write_msr(unsigned msr, 128 unsigned low, unsigned high) 129 { 130 return PVOP_VCALL3(pv_cpu_ops.write_msr, msr, low, high); 131 } 132 133 static inline u64 paravirt_read_msr_safe(unsigned msr, int *err) 134 { 135 return PVOP_CALL2(u64, pv_cpu_ops.read_msr_safe, msr, err); 136 } 137 138 static inline int paravirt_write_msr_safe(unsigned msr, 139 unsigned low, unsigned high) 140 { 141 return PVOP_CALL3(int, pv_cpu_ops.write_msr_safe, msr, low, high); 142 } 143 144 #define rdmsr(msr, val1, val2) \ 145 do { \ 146 u64 _l = paravirt_read_msr(msr); \ 147 val1 = (u32)_l; \ 148 val2 = _l >> 32; \ 149 } while (0) 150 151 #define wrmsr(msr, val1, val2) \ 152 do { \ 153 paravirt_write_msr(msr, val1, val2); \ 154 } while (0) 155 156 #define rdmsrl(msr, val) \ 157 do { \ 158 val = paravirt_read_msr(msr); \ 159 } while (0) 160 161 static inline void wrmsrl(unsigned msr, u64 val) 162 { 163 wrmsr(msr, (u32)val, (u32)(val>>32)); 164 } 165 166 #define wrmsr_safe(msr, a, b) paravirt_write_msr_safe(msr, a, b) 167 168 /* rdmsr with exception handling */ 169 #define rdmsr_safe(msr, a, b) \ 170 ({ \ 171 int _err; \ 172 u64 _l = paravirt_read_msr_safe(msr, &_err); \ 173 (*a) = (u32)_l; \ 174 (*b) = _l >> 32; \ 175 _err; \ 176 }) 177 178 static inline int rdmsrl_safe(unsigned msr, unsigned long long *p) 179 { 180 int err; 181 182 *p = paravirt_read_msr_safe(msr, &err); 183 return err; 184 } 185 186 static inline unsigned long long paravirt_sched_clock(void) 187 { 188 return PVOP_CALL0(unsigned long long, pv_time_ops.sched_clock); 189 } 190 191 struct static_key; 192 extern struct static_key paravirt_steal_enabled; 193 extern struct static_key paravirt_steal_rq_enabled; 194 195 static inline u64 paravirt_steal_clock(int cpu) 196 { 197 return PVOP_CALL1(u64, pv_time_ops.steal_clock, cpu); 198 } 199 200 static inline unsigned long long paravirt_read_pmc(int counter) 201 { 202 return PVOP_CALL1(u64, pv_cpu_ops.read_pmc, counter); 203 } 204 205 #define rdpmc(counter, low, high) \ 206 do { \ 207 u64 _l = paravirt_read_pmc(counter); \ 208 low = (u32)_l; \ 209 high = _l >> 32; \ 210 } while (0) 211 212 #define rdpmcl(counter, val) ((val) = paravirt_read_pmc(counter)) 213 214 static inline void paravirt_alloc_ldt(struct desc_struct *ldt, unsigned entries) 215 { 216 PVOP_VCALL2(pv_cpu_ops.alloc_ldt, ldt, entries); 217 } 218 219 static inline void paravirt_free_ldt(struct desc_struct *ldt, unsigned entries) 220 { 221 PVOP_VCALL2(pv_cpu_ops.free_ldt, ldt, entries); 222 } 223 224 static inline void load_TR_desc(void) 225 { 226 PVOP_VCALL0(pv_cpu_ops.load_tr_desc); 227 } 228 static inline void load_gdt(const struct desc_ptr *dtr) 229 { 230 PVOP_VCALL1(pv_cpu_ops.load_gdt, dtr); 231 } 232 static inline void load_idt(const struct desc_ptr *dtr) 233 { 234 PVOP_VCALL1(pv_cpu_ops.load_idt, dtr); 235 } 236 static inline void set_ldt(const void *addr, unsigned entries) 237 { 238 PVOP_VCALL2(pv_cpu_ops.set_ldt, addr, entries); 239 } 240 static inline void store_idt(struct desc_ptr *dtr) 241 { 242 PVOP_VCALL1(pv_cpu_ops.store_idt, dtr); 243 } 244 static inline unsigned long paravirt_store_tr(void) 245 { 246 return PVOP_CALL0(unsigned long, pv_cpu_ops.store_tr); 247 } 248 #define store_tr(tr) ((tr) = paravirt_store_tr()) 249 static inline void load_TLS(struct thread_struct *t, unsigned cpu) 250 { 251 PVOP_VCALL2(pv_cpu_ops.load_tls, t, cpu); 252 } 253 254 #ifdef CONFIG_X86_64 255 static inline void load_gs_index(unsigned int gs) 256 { 257 PVOP_VCALL1(pv_cpu_ops.load_gs_index, gs); 258 } 259 #endif 260 261 static inline void write_ldt_entry(struct desc_struct *dt, int entry, 262 const void *desc) 263 { 264 PVOP_VCALL3(pv_cpu_ops.write_ldt_entry, dt, entry, desc); 265 } 266 267 static inline void write_gdt_entry(struct desc_struct *dt, int entry, 268 void *desc, int type) 269 { 270 PVOP_VCALL4(pv_cpu_ops.write_gdt_entry, dt, entry, desc, type); 271 } 272 273 static inline void write_idt_entry(gate_desc *dt, int entry, const gate_desc *g) 274 { 275 PVOP_VCALL3(pv_cpu_ops.write_idt_entry, dt, entry, g); 276 } 277 static inline void set_iopl_mask(unsigned mask) 278 { 279 PVOP_VCALL1(pv_cpu_ops.set_iopl_mask, mask); 280 } 281 282 /* The paravirtualized I/O functions */ 283 static inline void slow_down_io(void) 284 { 285 pv_cpu_ops.io_delay(); 286 #ifdef REALLY_SLOW_IO 287 pv_cpu_ops.io_delay(); 288 pv_cpu_ops.io_delay(); 289 pv_cpu_ops.io_delay(); 290 #endif 291 } 292 293 static inline void paravirt_activate_mm(struct mm_struct *prev, 294 struct mm_struct *next) 295 { 296 PVOP_VCALL2(pv_mmu_ops.activate_mm, prev, next); 297 } 298 299 static inline void paravirt_arch_dup_mmap(struct mm_struct *oldmm, 300 struct mm_struct *mm) 301 { 302 PVOP_VCALL2(pv_mmu_ops.dup_mmap, oldmm, mm); 303 } 304 305 static inline void paravirt_arch_exit_mmap(struct mm_struct *mm) 306 { 307 PVOP_VCALL1(pv_mmu_ops.exit_mmap, mm); 308 } 309 310 static inline void __flush_tlb(void) 311 { 312 PVOP_VCALL0(pv_mmu_ops.flush_tlb_user); 313 } 314 static inline void __flush_tlb_global(void) 315 { 316 PVOP_VCALL0(pv_mmu_ops.flush_tlb_kernel); 317 } 318 static inline void __flush_tlb_single(unsigned long addr) 319 { 320 PVOP_VCALL1(pv_mmu_ops.flush_tlb_single, addr); 321 } 322 323 static inline void flush_tlb_others(const struct cpumask *cpumask, 324 struct mm_struct *mm, 325 unsigned long start, 326 unsigned long end) 327 { 328 PVOP_VCALL4(pv_mmu_ops.flush_tlb_others, cpumask, mm, start, end); 329 } 330 331 static inline int paravirt_pgd_alloc(struct mm_struct *mm) 332 { 333 return PVOP_CALL1(int, pv_mmu_ops.pgd_alloc, mm); 334 } 335 336 static inline void paravirt_pgd_free(struct mm_struct *mm, pgd_t *pgd) 337 { 338 PVOP_VCALL2(pv_mmu_ops.pgd_free, mm, pgd); 339 } 340 341 static inline void paravirt_alloc_pte(struct mm_struct *mm, unsigned long pfn) 342 { 343 PVOP_VCALL2(pv_mmu_ops.alloc_pte, mm, pfn); 344 } 345 static inline void paravirt_release_pte(unsigned long pfn) 346 { 347 PVOP_VCALL1(pv_mmu_ops.release_pte, pfn); 348 } 349 350 static inline void paravirt_alloc_pmd(struct mm_struct *mm, unsigned long pfn) 351 { 352 PVOP_VCALL2(pv_mmu_ops.alloc_pmd, mm, pfn); 353 } 354 355 static inline void paravirt_release_pmd(unsigned long pfn) 356 { 357 PVOP_VCALL1(pv_mmu_ops.release_pmd, pfn); 358 } 359 360 static inline void paravirt_alloc_pud(struct mm_struct *mm, unsigned long pfn) 361 { 362 PVOP_VCALL2(pv_mmu_ops.alloc_pud, mm, pfn); 363 } 364 static inline void paravirt_release_pud(unsigned long pfn) 365 { 366 PVOP_VCALL1(pv_mmu_ops.release_pud, pfn); 367 } 368 369 static inline void pte_update(struct mm_struct *mm, unsigned long addr, 370 pte_t *ptep) 371 { 372 PVOP_VCALL3(pv_mmu_ops.pte_update, mm, addr, ptep); 373 } 374 375 static inline pte_t __pte(pteval_t val) 376 { 377 pteval_t ret; 378 379 if (sizeof(pteval_t) > sizeof(long)) 380 ret = PVOP_CALLEE2(pteval_t, 381 pv_mmu_ops.make_pte, 382 val, (u64)val >> 32); 383 else 384 ret = PVOP_CALLEE1(pteval_t, 385 pv_mmu_ops.make_pte, 386 val); 387 388 return (pte_t) { .pte = ret }; 389 } 390 391 static inline pteval_t pte_val(pte_t pte) 392 { 393 pteval_t ret; 394 395 if (sizeof(pteval_t) > sizeof(long)) 396 ret = PVOP_CALLEE2(pteval_t, pv_mmu_ops.pte_val, 397 pte.pte, (u64)pte.pte >> 32); 398 else 399 ret = PVOP_CALLEE1(pteval_t, pv_mmu_ops.pte_val, 400 pte.pte); 401 402 return ret; 403 } 404 405 static inline pgd_t __pgd(pgdval_t val) 406 { 407 pgdval_t ret; 408 409 if (sizeof(pgdval_t) > sizeof(long)) 410 ret = PVOP_CALLEE2(pgdval_t, pv_mmu_ops.make_pgd, 411 val, (u64)val >> 32); 412 else 413 ret = PVOP_CALLEE1(pgdval_t, pv_mmu_ops.make_pgd, 414 val); 415 416 return (pgd_t) { ret }; 417 } 418 419 static inline pgdval_t pgd_val(pgd_t pgd) 420 { 421 pgdval_t ret; 422 423 if (sizeof(pgdval_t) > sizeof(long)) 424 ret = PVOP_CALLEE2(pgdval_t, pv_mmu_ops.pgd_val, 425 pgd.pgd, (u64)pgd.pgd >> 32); 426 else 427 ret = PVOP_CALLEE1(pgdval_t, pv_mmu_ops.pgd_val, 428 pgd.pgd); 429 430 return ret; 431 } 432 433 #define __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION 434 static inline pte_t ptep_modify_prot_start(struct mm_struct *mm, unsigned long addr, 435 pte_t *ptep) 436 { 437 pteval_t ret; 438 439 ret = PVOP_CALL3(pteval_t, pv_mmu_ops.ptep_modify_prot_start, 440 mm, addr, ptep); 441 442 return (pte_t) { .pte = ret }; 443 } 444 445 static inline void ptep_modify_prot_commit(struct mm_struct *mm, unsigned long addr, 446 pte_t *ptep, pte_t pte) 447 { 448 if (sizeof(pteval_t) > sizeof(long)) 449 /* 5 arg words */ 450 pv_mmu_ops.ptep_modify_prot_commit(mm, addr, ptep, pte); 451 else 452 PVOP_VCALL4(pv_mmu_ops.ptep_modify_prot_commit, 453 mm, addr, ptep, pte.pte); 454 } 455 456 static inline void set_pte(pte_t *ptep, pte_t pte) 457 { 458 if (sizeof(pteval_t) > sizeof(long)) 459 PVOP_VCALL3(pv_mmu_ops.set_pte, ptep, 460 pte.pte, (u64)pte.pte >> 32); 461 else 462 PVOP_VCALL2(pv_mmu_ops.set_pte, ptep, 463 pte.pte); 464 } 465 466 static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, 467 pte_t *ptep, pte_t pte) 468 { 469 if (sizeof(pteval_t) > sizeof(long)) 470 /* 5 arg words */ 471 pv_mmu_ops.set_pte_at(mm, addr, ptep, pte); 472 else 473 PVOP_VCALL4(pv_mmu_ops.set_pte_at, mm, addr, ptep, pte.pte); 474 } 475 476 static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr, 477 pmd_t *pmdp, pmd_t pmd) 478 { 479 if (sizeof(pmdval_t) > sizeof(long)) 480 /* 5 arg words */ 481 pv_mmu_ops.set_pmd_at(mm, addr, pmdp, pmd); 482 else 483 PVOP_VCALL4(pv_mmu_ops.set_pmd_at, mm, addr, pmdp, 484 native_pmd_val(pmd)); 485 } 486 487 static inline void set_pmd(pmd_t *pmdp, pmd_t pmd) 488 { 489 pmdval_t val = native_pmd_val(pmd); 490 491 if (sizeof(pmdval_t) > sizeof(long)) 492 PVOP_VCALL3(pv_mmu_ops.set_pmd, pmdp, val, (u64)val >> 32); 493 else 494 PVOP_VCALL2(pv_mmu_ops.set_pmd, pmdp, val); 495 } 496 497 #if CONFIG_PGTABLE_LEVELS >= 3 498 static inline pmd_t __pmd(pmdval_t val) 499 { 500 pmdval_t ret; 501 502 if (sizeof(pmdval_t) > sizeof(long)) 503 ret = PVOP_CALLEE2(pmdval_t, pv_mmu_ops.make_pmd, 504 val, (u64)val >> 32); 505 else 506 ret = PVOP_CALLEE1(pmdval_t, pv_mmu_ops.make_pmd, 507 val); 508 509 return (pmd_t) { ret }; 510 } 511 512 static inline pmdval_t pmd_val(pmd_t pmd) 513 { 514 pmdval_t ret; 515 516 if (sizeof(pmdval_t) > sizeof(long)) 517 ret = PVOP_CALLEE2(pmdval_t, pv_mmu_ops.pmd_val, 518 pmd.pmd, (u64)pmd.pmd >> 32); 519 else 520 ret = PVOP_CALLEE1(pmdval_t, pv_mmu_ops.pmd_val, 521 pmd.pmd); 522 523 return ret; 524 } 525 526 static inline void set_pud(pud_t *pudp, pud_t pud) 527 { 528 pudval_t val = native_pud_val(pud); 529 530 if (sizeof(pudval_t) > sizeof(long)) 531 PVOP_VCALL3(pv_mmu_ops.set_pud, pudp, 532 val, (u64)val >> 32); 533 else 534 PVOP_VCALL2(pv_mmu_ops.set_pud, pudp, 535 val); 536 } 537 #if CONFIG_PGTABLE_LEVELS == 4 538 static inline pud_t __pud(pudval_t val) 539 { 540 pudval_t ret; 541 542 if (sizeof(pudval_t) > sizeof(long)) 543 ret = PVOP_CALLEE2(pudval_t, pv_mmu_ops.make_pud, 544 val, (u64)val >> 32); 545 else 546 ret = PVOP_CALLEE1(pudval_t, pv_mmu_ops.make_pud, 547 val); 548 549 return (pud_t) { ret }; 550 } 551 552 static inline pudval_t pud_val(pud_t pud) 553 { 554 pudval_t ret; 555 556 if (sizeof(pudval_t) > sizeof(long)) 557 ret = PVOP_CALLEE2(pudval_t, pv_mmu_ops.pud_val, 558 pud.pud, (u64)pud.pud >> 32); 559 else 560 ret = PVOP_CALLEE1(pudval_t, pv_mmu_ops.pud_val, 561 pud.pud); 562 563 return ret; 564 } 565 566 static inline void set_pgd(pgd_t *pgdp, pgd_t pgd) 567 { 568 pgdval_t val = native_pgd_val(pgd); 569 570 if (sizeof(pgdval_t) > sizeof(long)) 571 PVOP_VCALL3(pv_mmu_ops.set_pgd, pgdp, 572 val, (u64)val >> 32); 573 else 574 PVOP_VCALL2(pv_mmu_ops.set_pgd, pgdp, 575 val); 576 } 577 578 static inline void pgd_clear(pgd_t *pgdp) 579 { 580 set_pgd(pgdp, __pgd(0)); 581 } 582 583 static inline void pud_clear(pud_t *pudp) 584 { 585 set_pud(pudp, __pud(0)); 586 } 587 588 #endif /* CONFIG_PGTABLE_LEVELS == 4 */ 589 590 #endif /* CONFIG_PGTABLE_LEVELS >= 3 */ 591 592 #ifdef CONFIG_X86_PAE 593 /* Special-case pte-setting operations for PAE, which can't update a 594 64-bit pte atomically */ 595 static inline void set_pte_atomic(pte_t *ptep, pte_t pte) 596 { 597 PVOP_VCALL3(pv_mmu_ops.set_pte_atomic, ptep, 598 pte.pte, pte.pte >> 32); 599 } 600 601 static inline void pte_clear(struct mm_struct *mm, unsigned long addr, 602 pte_t *ptep) 603 { 604 PVOP_VCALL3(pv_mmu_ops.pte_clear, mm, addr, ptep); 605 } 606 607 static inline void pmd_clear(pmd_t *pmdp) 608 { 609 PVOP_VCALL1(pv_mmu_ops.pmd_clear, pmdp); 610 } 611 #else /* !CONFIG_X86_PAE */ 612 static inline void set_pte_atomic(pte_t *ptep, pte_t pte) 613 { 614 set_pte(ptep, pte); 615 } 616 617 static inline void pte_clear(struct mm_struct *mm, unsigned long addr, 618 pte_t *ptep) 619 { 620 set_pte_at(mm, addr, ptep, __pte(0)); 621 } 622 623 static inline void pmd_clear(pmd_t *pmdp) 624 { 625 set_pmd(pmdp, __pmd(0)); 626 } 627 #endif /* CONFIG_X86_PAE */ 628 629 #define __HAVE_ARCH_START_CONTEXT_SWITCH 630 static inline void arch_start_context_switch(struct task_struct *prev) 631 { 632 PVOP_VCALL1(pv_cpu_ops.start_context_switch, prev); 633 } 634 635 static inline void arch_end_context_switch(struct task_struct *next) 636 { 637 PVOP_VCALL1(pv_cpu_ops.end_context_switch, next); 638 } 639 640 #define __HAVE_ARCH_ENTER_LAZY_MMU_MODE 641 static inline void arch_enter_lazy_mmu_mode(void) 642 { 643 PVOP_VCALL0(pv_mmu_ops.lazy_mode.enter); 644 } 645 646 static inline void arch_leave_lazy_mmu_mode(void) 647 { 648 PVOP_VCALL0(pv_mmu_ops.lazy_mode.leave); 649 } 650 651 static inline void arch_flush_lazy_mmu_mode(void) 652 { 653 PVOP_VCALL0(pv_mmu_ops.lazy_mode.flush); 654 } 655 656 static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx, 657 phys_addr_t phys, pgprot_t flags) 658 { 659 pv_mmu_ops.set_fixmap(idx, phys, flags); 660 } 661 662 #if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS) 663 664 #ifdef CONFIG_QUEUED_SPINLOCKS 665 666 static __always_inline void pv_queued_spin_lock_slowpath(struct qspinlock *lock, 667 u32 val) 668 { 669 PVOP_VCALL2(pv_lock_ops.queued_spin_lock_slowpath, lock, val); 670 } 671 672 static __always_inline void pv_queued_spin_unlock(struct qspinlock *lock) 673 { 674 PVOP_VCALLEE1(pv_lock_ops.queued_spin_unlock, lock); 675 } 676 677 static __always_inline void pv_wait(u8 *ptr, u8 val) 678 { 679 PVOP_VCALL2(pv_lock_ops.wait, ptr, val); 680 } 681 682 static __always_inline void pv_kick(int cpu) 683 { 684 PVOP_VCALL1(pv_lock_ops.kick, cpu); 685 } 686 687 #else /* !CONFIG_QUEUED_SPINLOCKS */ 688 689 static __always_inline void __ticket_lock_spinning(struct arch_spinlock *lock, 690 __ticket_t ticket) 691 { 692 PVOP_VCALLEE2(pv_lock_ops.lock_spinning, lock, ticket); 693 } 694 695 static __always_inline void __ticket_unlock_kick(struct arch_spinlock *lock, 696 __ticket_t ticket) 697 { 698 PVOP_VCALL2(pv_lock_ops.unlock_kick, lock, ticket); 699 } 700 701 #endif /* CONFIG_QUEUED_SPINLOCKS */ 702 703 #endif /* SMP && PARAVIRT_SPINLOCKS */ 704 705 #ifdef CONFIG_X86_32 706 #define PV_SAVE_REGS "pushl %ecx; pushl %edx;" 707 #define PV_RESTORE_REGS "popl %edx; popl %ecx;" 708 709 /* save and restore all caller-save registers, except return value */ 710 #define PV_SAVE_ALL_CALLER_REGS "pushl %ecx;" 711 #define PV_RESTORE_ALL_CALLER_REGS "popl %ecx;" 712 713 #define PV_FLAGS_ARG "0" 714 #define PV_EXTRA_CLOBBERS 715 #define PV_VEXTRA_CLOBBERS 716 #else 717 /* save and restore all caller-save registers, except return value */ 718 #define PV_SAVE_ALL_CALLER_REGS \ 719 "push %rcx;" \ 720 "push %rdx;" \ 721 "push %rsi;" \ 722 "push %rdi;" \ 723 "push %r8;" \ 724 "push %r9;" \ 725 "push %r10;" \ 726 "push %r11;" 727 #define PV_RESTORE_ALL_CALLER_REGS \ 728 "pop %r11;" \ 729 "pop %r10;" \ 730 "pop %r9;" \ 731 "pop %r8;" \ 732 "pop %rdi;" \ 733 "pop %rsi;" \ 734 "pop %rdx;" \ 735 "pop %rcx;" 736 737 /* We save some registers, but all of them, that's too much. We clobber all 738 * caller saved registers but the argument parameter */ 739 #define PV_SAVE_REGS "pushq %%rdi;" 740 #define PV_RESTORE_REGS "popq %%rdi;" 741 #define PV_EXTRA_CLOBBERS EXTRA_CLOBBERS, "rcx" , "rdx", "rsi" 742 #define PV_VEXTRA_CLOBBERS EXTRA_CLOBBERS, "rdi", "rcx" , "rdx", "rsi" 743 #define PV_FLAGS_ARG "D" 744 #endif 745 746 /* 747 * Generate a thunk around a function which saves all caller-save 748 * registers except for the return value. This allows C functions to 749 * be called from assembler code where fewer than normal registers are 750 * available. It may also help code generation around calls from C 751 * code if the common case doesn't use many registers. 752 * 753 * When a callee is wrapped in a thunk, the caller can assume that all 754 * arg regs and all scratch registers are preserved across the 755 * call. The return value in rax/eax will not be saved, even for void 756 * functions. 757 */ 758 #define PV_THUNK_NAME(func) "__raw_callee_save_" #func 759 #define PV_CALLEE_SAVE_REGS_THUNK(func) \ 760 extern typeof(func) __raw_callee_save_##func; \ 761 \ 762 asm(".pushsection .text;" \ 763 ".globl " PV_THUNK_NAME(func) ";" \ 764 ".type " PV_THUNK_NAME(func) ", @function;" \ 765 PV_THUNK_NAME(func) ":" \ 766 FRAME_BEGIN \ 767 PV_SAVE_ALL_CALLER_REGS \ 768 "call " #func ";" \ 769 PV_RESTORE_ALL_CALLER_REGS \ 770 FRAME_END \ 771 "ret;" \ 772 ".popsection") 773 774 /* Get a reference to a callee-save function */ 775 #define PV_CALLEE_SAVE(func) \ 776 ((struct paravirt_callee_save) { __raw_callee_save_##func }) 777 778 /* Promise that "func" already uses the right calling convention */ 779 #define __PV_IS_CALLEE_SAVE(func) \ 780 ((struct paravirt_callee_save) { func }) 781 782 static inline notrace unsigned long arch_local_save_flags(void) 783 { 784 return PVOP_CALLEE0(unsigned long, pv_irq_ops.save_fl); 785 } 786 787 static inline notrace void arch_local_irq_restore(unsigned long f) 788 { 789 PVOP_VCALLEE1(pv_irq_ops.restore_fl, f); 790 } 791 792 static inline notrace void arch_local_irq_disable(void) 793 { 794 PVOP_VCALLEE0(pv_irq_ops.irq_disable); 795 } 796 797 static inline notrace void arch_local_irq_enable(void) 798 { 799 PVOP_VCALLEE0(pv_irq_ops.irq_enable); 800 } 801 802 static inline notrace unsigned long arch_local_irq_save(void) 803 { 804 unsigned long f; 805 806 f = arch_local_save_flags(); 807 arch_local_irq_disable(); 808 return f; 809 } 810 811 812 /* Make sure as little as possible of this mess escapes. */ 813 #undef PARAVIRT_CALL 814 #undef __PVOP_CALL 815 #undef __PVOP_VCALL 816 #undef PVOP_VCALL0 817 #undef PVOP_CALL0 818 #undef PVOP_VCALL1 819 #undef PVOP_CALL1 820 #undef PVOP_VCALL2 821 #undef PVOP_CALL2 822 #undef PVOP_VCALL3 823 #undef PVOP_CALL3 824 #undef PVOP_VCALL4 825 #undef PVOP_CALL4 826 827 extern void default_banner(void); 828 829 #else /* __ASSEMBLY__ */ 830 831 #define _PVSITE(ptype, clobbers, ops, word, algn) \ 832 771:; \ 833 ops; \ 834 772:; \ 835 .pushsection .parainstructions,"a"; \ 836 .align algn; \ 837 word 771b; \ 838 .byte ptype; \ 839 .byte 772b-771b; \ 840 .short clobbers; \ 841 .popsection 842 843 844 #define COND_PUSH(set, mask, reg) \ 845 .if ((~(set)) & mask); push %reg; .endif 846 #define COND_POP(set, mask, reg) \ 847 .if ((~(set)) & mask); pop %reg; .endif 848 849 #ifdef CONFIG_X86_64 850 851 #define PV_SAVE_REGS(set) \ 852 COND_PUSH(set, CLBR_RAX, rax); \ 853 COND_PUSH(set, CLBR_RCX, rcx); \ 854 COND_PUSH(set, CLBR_RDX, rdx); \ 855 COND_PUSH(set, CLBR_RSI, rsi); \ 856 COND_PUSH(set, CLBR_RDI, rdi); \ 857 COND_PUSH(set, CLBR_R8, r8); \ 858 COND_PUSH(set, CLBR_R9, r9); \ 859 COND_PUSH(set, CLBR_R10, r10); \ 860 COND_PUSH(set, CLBR_R11, r11) 861 #define PV_RESTORE_REGS(set) \ 862 COND_POP(set, CLBR_R11, r11); \ 863 COND_POP(set, CLBR_R10, r10); \ 864 COND_POP(set, CLBR_R9, r9); \ 865 COND_POP(set, CLBR_R8, r8); \ 866 COND_POP(set, CLBR_RDI, rdi); \ 867 COND_POP(set, CLBR_RSI, rsi); \ 868 COND_POP(set, CLBR_RDX, rdx); \ 869 COND_POP(set, CLBR_RCX, rcx); \ 870 COND_POP(set, CLBR_RAX, rax) 871 872 #define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 8) 873 #define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .quad, 8) 874 #define PARA_INDIRECT(addr) *addr(%rip) 875 #else 876 #define PV_SAVE_REGS(set) \ 877 COND_PUSH(set, CLBR_EAX, eax); \ 878 COND_PUSH(set, CLBR_EDI, edi); \ 879 COND_PUSH(set, CLBR_ECX, ecx); \ 880 COND_PUSH(set, CLBR_EDX, edx) 881 #define PV_RESTORE_REGS(set) \ 882 COND_POP(set, CLBR_EDX, edx); \ 883 COND_POP(set, CLBR_ECX, ecx); \ 884 COND_POP(set, CLBR_EDI, edi); \ 885 COND_POP(set, CLBR_EAX, eax) 886 887 #define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 4) 888 #define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4) 889 #define PARA_INDIRECT(addr) *%cs:addr 890 #endif 891 892 #define INTERRUPT_RETURN \ 893 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_iret), CLBR_NONE, \ 894 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_iret)) 895 896 #define DISABLE_INTERRUPTS(clobbers) \ 897 PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_irq_disable), clobbers, \ 898 PV_SAVE_REGS(clobbers | CLBR_CALLEE_SAVE); \ 899 call PARA_INDIRECT(pv_irq_ops+PV_IRQ_irq_disable); \ 900 PV_RESTORE_REGS(clobbers | CLBR_CALLEE_SAVE);) 901 902 #define ENABLE_INTERRUPTS(clobbers) \ 903 PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_irq_enable), clobbers, \ 904 PV_SAVE_REGS(clobbers | CLBR_CALLEE_SAVE); \ 905 call PARA_INDIRECT(pv_irq_ops+PV_IRQ_irq_enable); \ 906 PV_RESTORE_REGS(clobbers | CLBR_CALLEE_SAVE);) 907 908 #ifdef CONFIG_X86_32 909 #define GET_CR0_INTO_EAX \ 910 push %ecx; push %edx; \ 911 call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); \ 912 pop %edx; pop %ecx 913 #else /* !CONFIG_X86_32 */ 914 915 /* 916 * If swapgs is used while the userspace stack is still current, 917 * there's no way to call a pvop. The PV replacement *must* be 918 * inlined, or the swapgs instruction must be trapped and emulated. 919 */ 920 #define SWAPGS_UNSAFE_STACK \ 921 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_swapgs), CLBR_NONE, \ 922 swapgs) 923 924 /* 925 * Note: swapgs is very special, and in practise is either going to be 926 * implemented with a single "swapgs" instruction or something very 927 * special. Either way, we don't need to save any registers for 928 * it. 929 */ 930 #define SWAPGS \ 931 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_swapgs), CLBR_NONE, \ 932 call PARA_INDIRECT(pv_cpu_ops+PV_CPU_swapgs) \ 933 ) 934 935 #define GET_CR2_INTO_RAX \ 936 call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr2) 937 938 #define PARAVIRT_ADJUST_EXCEPTION_FRAME \ 939 PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_adjust_exception_frame), \ 940 CLBR_NONE, \ 941 call PARA_INDIRECT(pv_irq_ops+PV_IRQ_adjust_exception_frame)) 942 943 #define USERGS_SYSRET64 \ 944 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_usergs_sysret64), \ 945 CLBR_NONE, \ 946 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_usergs_sysret64)) 947 #endif /* CONFIG_X86_32 */ 948 949 #endif /* __ASSEMBLY__ */ 950 #else /* CONFIG_PARAVIRT */ 951 # define default_banner x86_init_noop 952 #ifndef __ASSEMBLY__ 953 static inline void paravirt_arch_dup_mmap(struct mm_struct *oldmm, 954 struct mm_struct *mm) 955 { 956 } 957 958 static inline void paravirt_arch_exit_mmap(struct mm_struct *mm) 959 { 960 } 961 #endif /* __ASSEMBLY__ */ 962 #endif /* !CONFIG_PARAVIRT */ 963 #endif /* _ASM_X86_PARAVIRT_H */ 964