1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef _ASM_X86_PARAVIRT_H 3 #define _ASM_X86_PARAVIRT_H 4 /* Various instructions on x86 need to be replaced for 5 * para-virtualization: those hooks are defined here. */ 6 7 #ifdef CONFIG_PARAVIRT 8 #include <asm/pgtable_types.h> 9 #include <asm/asm.h> 10 #include <asm/nospec-branch.h> 11 12 #include <asm/paravirt_types.h> 13 14 #ifndef __ASSEMBLY__ 15 #include <linux/bug.h> 16 #include <linux/types.h> 17 #include <linux/cpumask.h> 18 #include <asm/frame.h> 19 20 static inline unsigned long long paravirt_sched_clock(void) 21 { 22 return PVOP_CALL0(unsigned long long, time.sched_clock); 23 } 24 25 struct static_key; 26 extern struct static_key paravirt_steal_enabled; 27 extern struct static_key paravirt_steal_rq_enabled; 28 29 __visible void __native_queued_spin_unlock(struct qspinlock *lock); 30 bool pv_is_native_spin_unlock(void); 31 __visible bool __native_vcpu_is_preempted(long cpu); 32 bool pv_is_native_vcpu_is_preempted(void); 33 34 static inline u64 paravirt_steal_clock(int cpu) 35 { 36 return PVOP_CALL1(u64, time.steal_clock, cpu); 37 } 38 39 /* The paravirtualized I/O functions */ 40 static inline void slow_down_io(void) 41 { 42 pv_ops.cpu.io_delay(); 43 #ifdef REALLY_SLOW_IO 44 pv_ops.cpu.io_delay(); 45 pv_ops.cpu.io_delay(); 46 pv_ops.cpu.io_delay(); 47 #endif 48 } 49 50 static inline void __flush_tlb(void) 51 { 52 PVOP_VCALL0(mmu.flush_tlb_user); 53 } 54 55 static inline void __flush_tlb_global(void) 56 { 57 PVOP_VCALL0(mmu.flush_tlb_kernel); 58 } 59 60 static inline void __flush_tlb_one_user(unsigned long addr) 61 { 62 PVOP_VCALL1(mmu.flush_tlb_one_user, addr); 63 } 64 65 static inline void flush_tlb_others(const struct cpumask *cpumask, 66 const struct flush_tlb_info *info) 67 { 68 PVOP_VCALL2(mmu.flush_tlb_others, cpumask, info); 69 } 70 71 static inline void paravirt_tlb_remove_table(struct mmu_gather *tlb, void *table) 72 { 73 PVOP_VCALL2(mmu.tlb_remove_table, tlb, table); 74 } 75 76 static inline void paravirt_arch_exit_mmap(struct mm_struct *mm) 77 { 78 PVOP_VCALL1(mmu.exit_mmap, mm); 79 } 80 81 #ifdef CONFIG_PARAVIRT_XXL 82 static inline void load_sp0(unsigned long sp0) 83 { 84 PVOP_VCALL1(cpu.load_sp0, sp0); 85 } 86 87 /* The paravirtualized CPUID instruction. */ 88 static inline void __cpuid(unsigned int *eax, unsigned int *ebx, 89 unsigned int *ecx, unsigned int *edx) 90 { 91 PVOP_VCALL4(cpu.cpuid, eax, ebx, ecx, edx); 92 } 93 94 /* 95 * These special macros can be used to get or set a debugging register 96 */ 97 static inline unsigned long paravirt_get_debugreg(int reg) 98 { 99 return PVOP_CALL1(unsigned long, cpu.get_debugreg, reg); 100 } 101 #define get_debugreg(var, reg) var = paravirt_get_debugreg(reg) 102 static inline void set_debugreg(unsigned long val, int reg) 103 { 104 PVOP_VCALL2(cpu.set_debugreg, reg, val); 105 } 106 107 static inline unsigned long read_cr0(void) 108 { 109 return PVOP_CALL0(unsigned long, cpu.read_cr0); 110 } 111 112 static inline void write_cr0(unsigned long x) 113 { 114 PVOP_VCALL1(cpu.write_cr0, x); 115 } 116 117 static inline unsigned long read_cr2(void) 118 { 119 return PVOP_CALLEE0(unsigned long, mmu.read_cr2); 120 } 121 122 static inline void write_cr2(unsigned long x) 123 { 124 PVOP_VCALL1(mmu.write_cr2, x); 125 } 126 127 static inline unsigned long __read_cr3(void) 128 { 129 return PVOP_CALL0(unsigned long, mmu.read_cr3); 130 } 131 132 static inline void write_cr3(unsigned long x) 133 { 134 PVOP_VCALL1(mmu.write_cr3, x); 135 } 136 137 static inline void __write_cr4(unsigned long x) 138 { 139 PVOP_VCALL1(cpu.write_cr4, x); 140 } 141 142 static inline void arch_safe_halt(void) 143 { 144 PVOP_VCALL0(irq.safe_halt); 145 } 146 147 static inline void halt(void) 148 { 149 PVOP_VCALL0(irq.halt); 150 } 151 152 static inline void wbinvd(void) 153 { 154 PVOP_VCALL0(cpu.wbinvd); 155 } 156 157 #define get_kernel_rpl() (pv_info.kernel_rpl) 158 159 static inline u64 paravirt_read_msr(unsigned msr) 160 { 161 return PVOP_CALL1(u64, cpu.read_msr, msr); 162 } 163 164 static inline void paravirt_write_msr(unsigned msr, 165 unsigned low, unsigned high) 166 { 167 PVOP_VCALL3(cpu.write_msr, msr, low, high); 168 } 169 170 static inline u64 paravirt_read_msr_safe(unsigned msr, int *err) 171 { 172 return PVOP_CALL2(u64, cpu.read_msr_safe, msr, err); 173 } 174 175 static inline int paravirt_write_msr_safe(unsigned msr, 176 unsigned low, unsigned high) 177 { 178 return PVOP_CALL3(int, cpu.write_msr_safe, msr, low, high); 179 } 180 181 #define rdmsr(msr, val1, val2) \ 182 do { \ 183 u64 _l = paravirt_read_msr(msr); \ 184 val1 = (u32)_l; \ 185 val2 = _l >> 32; \ 186 } while (0) 187 188 #define wrmsr(msr, val1, val2) \ 189 do { \ 190 paravirt_write_msr(msr, val1, val2); \ 191 } while (0) 192 193 #define rdmsrl(msr, val) \ 194 do { \ 195 val = paravirt_read_msr(msr); \ 196 } while (0) 197 198 static inline void wrmsrl(unsigned msr, u64 val) 199 { 200 wrmsr(msr, (u32)val, (u32)(val>>32)); 201 } 202 203 #define wrmsr_safe(msr, a, b) paravirt_write_msr_safe(msr, a, b) 204 205 /* rdmsr with exception handling */ 206 #define rdmsr_safe(msr, a, b) \ 207 ({ \ 208 int _err; \ 209 u64 _l = paravirt_read_msr_safe(msr, &_err); \ 210 (*a) = (u32)_l; \ 211 (*b) = _l >> 32; \ 212 _err; \ 213 }) 214 215 static inline int rdmsrl_safe(unsigned msr, unsigned long long *p) 216 { 217 int err; 218 219 *p = paravirt_read_msr_safe(msr, &err); 220 return err; 221 } 222 223 static inline unsigned long long paravirt_read_pmc(int counter) 224 { 225 return PVOP_CALL1(u64, cpu.read_pmc, counter); 226 } 227 228 #define rdpmc(counter, low, high) \ 229 do { \ 230 u64 _l = paravirt_read_pmc(counter); \ 231 low = (u32)_l; \ 232 high = _l >> 32; \ 233 } while (0) 234 235 #define rdpmcl(counter, val) ((val) = paravirt_read_pmc(counter)) 236 237 static inline void paravirt_alloc_ldt(struct desc_struct *ldt, unsigned entries) 238 { 239 PVOP_VCALL2(cpu.alloc_ldt, ldt, entries); 240 } 241 242 static inline void paravirt_free_ldt(struct desc_struct *ldt, unsigned entries) 243 { 244 PVOP_VCALL2(cpu.free_ldt, ldt, entries); 245 } 246 247 static inline void load_TR_desc(void) 248 { 249 PVOP_VCALL0(cpu.load_tr_desc); 250 } 251 static inline void load_gdt(const struct desc_ptr *dtr) 252 { 253 PVOP_VCALL1(cpu.load_gdt, dtr); 254 } 255 static inline void load_idt(const struct desc_ptr *dtr) 256 { 257 PVOP_VCALL1(cpu.load_idt, dtr); 258 } 259 static inline void set_ldt(const void *addr, unsigned entries) 260 { 261 PVOP_VCALL2(cpu.set_ldt, addr, entries); 262 } 263 static inline unsigned long paravirt_store_tr(void) 264 { 265 return PVOP_CALL0(unsigned long, cpu.store_tr); 266 } 267 268 #define store_tr(tr) ((tr) = paravirt_store_tr()) 269 static inline void load_TLS(struct thread_struct *t, unsigned cpu) 270 { 271 PVOP_VCALL2(cpu.load_tls, t, cpu); 272 } 273 274 #ifdef CONFIG_X86_64 275 static inline void load_gs_index(unsigned int gs) 276 { 277 PVOP_VCALL1(cpu.load_gs_index, gs); 278 } 279 #endif 280 281 static inline void write_ldt_entry(struct desc_struct *dt, int entry, 282 const void *desc) 283 { 284 PVOP_VCALL3(cpu.write_ldt_entry, dt, entry, desc); 285 } 286 287 static inline void write_gdt_entry(struct desc_struct *dt, int entry, 288 void *desc, int type) 289 { 290 PVOP_VCALL4(cpu.write_gdt_entry, dt, entry, desc, type); 291 } 292 293 static inline void write_idt_entry(gate_desc *dt, int entry, const gate_desc *g) 294 { 295 PVOP_VCALL3(cpu.write_idt_entry, dt, entry, g); 296 } 297 298 static inline void paravirt_activate_mm(struct mm_struct *prev, 299 struct mm_struct *next) 300 { 301 PVOP_VCALL2(mmu.activate_mm, prev, next); 302 } 303 304 static inline void paravirt_arch_dup_mmap(struct mm_struct *oldmm, 305 struct mm_struct *mm) 306 { 307 PVOP_VCALL2(mmu.dup_mmap, oldmm, mm); 308 } 309 310 static inline int paravirt_pgd_alloc(struct mm_struct *mm) 311 { 312 return PVOP_CALL1(int, mmu.pgd_alloc, mm); 313 } 314 315 static inline void paravirt_pgd_free(struct mm_struct *mm, pgd_t *pgd) 316 { 317 PVOP_VCALL2(mmu.pgd_free, mm, pgd); 318 } 319 320 static inline void paravirt_alloc_pte(struct mm_struct *mm, unsigned long pfn) 321 { 322 PVOP_VCALL2(mmu.alloc_pte, mm, pfn); 323 } 324 static inline void paravirt_release_pte(unsigned long pfn) 325 { 326 PVOP_VCALL1(mmu.release_pte, pfn); 327 } 328 329 static inline void paravirt_alloc_pmd(struct mm_struct *mm, unsigned long pfn) 330 { 331 PVOP_VCALL2(mmu.alloc_pmd, mm, pfn); 332 } 333 334 static inline void paravirt_release_pmd(unsigned long pfn) 335 { 336 PVOP_VCALL1(mmu.release_pmd, pfn); 337 } 338 339 static inline void paravirt_alloc_pud(struct mm_struct *mm, unsigned long pfn) 340 { 341 PVOP_VCALL2(mmu.alloc_pud, mm, pfn); 342 } 343 static inline void paravirt_release_pud(unsigned long pfn) 344 { 345 PVOP_VCALL1(mmu.release_pud, pfn); 346 } 347 348 static inline void paravirt_alloc_p4d(struct mm_struct *mm, unsigned long pfn) 349 { 350 PVOP_VCALL2(mmu.alloc_p4d, mm, pfn); 351 } 352 353 static inline void paravirt_release_p4d(unsigned long pfn) 354 { 355 PVOP_VCALL1(mmu.release_p4d, pfn); 356 } 357 358 static inline pte_t __pte(pteval_t val) 359 { 360 pteval_t ret; 361 362 if (sizeof(pteval_t) > sizeof(long)) 363 ret = PVOP_CALLEE2(pteval_t, mmu.make_pte, val, (u64)val >> 32); 364 else 365 ret = PVOP_CALLEE1(pteval_t, mmu.make_pte, val); 366 367 return (pte_t) { .pte = ret }; 368 } 369 370 static inline pteval_t pte_val(pte_t pte) 371 { 372 pteval_t ret; 373 374 if (sizeof(pteval_t) > sizeof(long)) 375 ret = PVOP_CALLEE2(pteval_t, mmu.pte_val, 376 pte.pte, (u64)pte.pte >> 32); 377 else 378 ret = PVOP_CALLEE1(pteval_t, mmu.pte_val, pte.pte); 379 380 return ret; 381 } 382 383 static inline pgd_t __pgd(pgdval_t val) 384 { 385 pgdval_t ret; 386 387 if (sizeof(pgdval_t) > sizeof(long)) 388 ret = PVOP_CALLEE2(pgdval_t, mmu.make_pgd, val, (u64)val >> 32); 389 else 390 ret = PVOP_CALLEE1(pgdval_t, mmu.make_pgd, val); 391 392 return (pgd_t) { ret }; 393 } 394 395 static inline pgdval_t pgd_val(pgd_t pgd) 396 { 397 pgdval_t ret; 398 399 if (sizeof(pgdval_t) > sizeof(long)) 400 ret = PVOP_CALLEE2(pgdval_t, mmu.pgd_val, 401 pgd.pgd, (u64)pgd.pgd >> 32); 402 else 403 ret = PVOP_CALLEE1(pgdval_t, mmu.pgd_val, pgd.pgd); 404 405 return ret; 406 } 407 408 #define __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION 409 static inline pte_t ptep_modify_prot_start(struct vm_area_struct *vma, unsigned long addr, 410 pte_t *ptep) 411 { 412 pteval_t ret; 413 414 ret = PVOP_CALL3(pteval_t, mmu.ptep_modify_prot_start, vma, addr, ptep); 415 416 return (pte_t) { .pte = ret }; 417 } 418 419 static inline void ptep_modify_prot_commit(struct vm_area_struct *vma, unsigned long addr, 420 pte_t *ptep, pte_t old_pte, pte_t pte) 421 { 422 423 if (sizeof(pteval_t) > sizeof(long)) 424 /* 5 arg words */ 425 pv_ops.mmu.ptep_modify_prot_commit(vma, addr, ptep, pte); 426 else 427 PVOP_VCALL4(mmu.ptep_modify_prot_commit, 428 vma, addr, ptep, pte.pte); 429 } 430 431 static inline void set_pte(pte_t *ptep, pte_t pte) 432 { 433 if (sizeof(pteval_t) > sizeof(long)) 434 PVOP_VCALL3(mmu.set_pte, ptep, pte.pte, (u64)pte.pte >> 32); 435 else 436 PVOP_VCALL2(mmu.set_pte, ptep, pte.pte); 437 } 438 439 static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, 440 pte_t *ptep, pte_t pte) 441 { 442 if (sizeof(pteval_t) > sizeof(long)) 443 /* 5 arg words */ 444 pv_ops.mmu.set_pte_at(mm, addr, ptep, pte); 445 else 446 PVOP_VCALL4(mmu.set_pte_at, mm, addr, ptep, pte.pte); 447 } 448 449 static inline void set_pmd(pmd_t *pmdp, pmd_t pmd) 450 { 451 pmdval_t val = native_pmd_val(pmd); 452 453 if (sizeof(pmdval_t) > sizeof(long)) 454 PVOP_VCALL3(mmu.set_pmd, pmdp, val, (u64)val >> 32); 455 else 456 PVOP_VCALL2(mmu.set_pmd, pmdp, val); 457 } 458 459 #if CONFIG_PGTABLE_LEVELS >= 3 460 static inline pmd_t __pmd(pmdval_t val) 461 { 462 pmdval_t ret; 463 464 if (sizeof(pmdval_t) > sizeof(long)) 465 ret = PVOP_CALLEE2(pmdval_t, mmu.make_pmd, val, (u64)val >> 32); 466 else 467 ret = PVOP_CALLEE1(pmdval_t, mmu.make_pmd, val); 468 469 return (pmd_t) { ret }; 470 } 471 472 static inline pmdval_t pmd_val(pmd_t pmd) 473 { 474 pmdval_t ret; 475 476 if (sizeof(pmdval_t) > sizeof(long)) 477 ret = PVOP_CALLEE2(pmdval_t, mmu.pmd_val, 478 pmd.pmd, (u64)pmd.pmd >> 32); 479 else 480 ret = PVOP_CALLEE1(pmdval_t, mmu.pmd_val, pmd.pmd); 481 482 return ret; 483 } 484 485 static inline void set_pud(pud_t *pudp, pud_t pud) 486 { 487 pudval_t val = native_pud_val(pud); 488 489 if (sizeof(pudval_t) > sizeof(long)) 490 PVOP_VCALL3(mmu.set_pud, pudp, val, (u64)val >> 32); 491 else 492 PVOP_VCALL2(mmu.set_pud, pudp, val); 493 } 494 #if CONFIG_PGTABLE_LEVELS >= 4 495 static inline pud_t __pud(pudval_t val) 496 { 497 pudval_t ret; 498 499 ret = PVOP_CALLEE1(pudval_t, mmu.make_pud, val); 500 501 return (pud_t) { ret }; 502 } 503 504 static inline pudval_t pud_val(pud_t pud) 505 { 506 return PVOP_CALLEE1(pudval_t, mmu.pud_val, pud.pud); 507 } 508 509 static inline void pud_clear(pud_t *pudp) 510 { 511 set_pud(pudp, __pud(0)); 512 } 513 514 static inline void set_p4d(p4d_t *p4dp, p4d_t p4d) 515 { 516 p4dval_t val = native_p4d_val(p4d); 517 518 PVOP_VCALL2(mmu.set_p4d, p4dp, val); 519 } 520 521 #if CONFIG_PGTABLE_LEVELS >= 5 522 523 static inline p4d_t __p4d(p4dval_t val) 524 { 525 p4dval_t ret = PVOP_CALLEE1(p4dval_t, mmu.make_p4d, val); 526 527 return (p4d_t) { ret }; 528 } 529 530 static inline p4dval_t p4d_val(p4d_t p4d) 531 { 532 return PVOP_CALLEE1(p4dval_t, mmu.p4d_val, p4d.p4d); 533 } 534 535 static inline void __set_pgd(pgd_t *pgdp, pgd_t pgd) 536 { 537 PVOP_VCALL2(mmu.set_pgd, pgdp, native_pgd_val(pgd)); 538 } 539 540 #define set_pgd(pgdp, pgdval) do { \ 541 if (pgtable_l5_enabled()) \ 542 __set_pgd(pgdp, pgdval); \ 543 else \ 544 set_p4d((p4d_t *)(pgdp), (p4d_t) { (pgdval).pgd }); \ 545 } while (0) 546 547 #define pgd_clear(pgdp) do { \ 548 if (pgtable_l5_enabled()) \ 549 set_pgd(pgdp, __pgd(0)); \ 550 } while (0) 551 552 #endif /* CONFIG_PGTABLE_LEVELS == 5 */ 553 554 static inline void p4d_clear(p4d_t *p4dp) 555 { 556 set_p4d(p4dp, __p4d(0)); 557 } 558 559 #endif /* CONFIG_PGTABLE_LEVELS == 4 */ 560 561 #endif /* CONFIG_PGTABLE_LEVELS >= 3 */ 562 563 #ifdef CONFIG_X86_PAE 564 /* Special-case pte-setting operations for PAE, which can't update a 565 64-bit pte atomically */ 566 static inline void set_pte_atomic(pte_t *ptep, pte_t pte) 567 { 568 PVOP_VCALL3(mmu.set_pte_atomic, ptep, pte.pte, pte.pte >> 32); 569 } 570 571 static inline void pte_clear(struct mm_struct *mm, unsigned long addr, 572 pte_t *ptep) 573 { 574 PVOP_VCALL3(mmu.pte_clear, mm, addr, ptep); 575 } 576 577 static inline void pmd_clear(pmd_t *pmdp) 578 { 579 PVOP_VCALL1(mmu.pmd_clear, pmdp); 580 } 581 #else /* !CONFIG_X86_PAE */ 582 static inline void set_pte_atomic(pte_t *ptep, pte_t pte) 583 { 584 set_pte(ptep, pte); 585 } 586 587 static inline void pte_clear(struct mm_struct *mm, unsigned long addr, 588 pte_t *ptep) 589 { 590 set_pte_at(mm, addr, ptep, __pte(0)); 591 } 592 593 static inline void pmd_clear(pmd_t *pmdp) 594 { 595 set_pmd(pmdp, __pmd(0)); 596 } 597 #endif /* CONFIG_X86_PAE */ 598 599 #define __HAVE_ARCH_START_CONTEXT_SWITCH 600 static inline void arch_start_context_switch(struct task_struct *prev) 601 { 602 PVOP_VCALL1(cpu.start_context_switch, prev); 603 } 604 605 static inline void arch_end_context_switch(struct task_struct *next) 606 { 607 PVOP_VCALL1(cpu.end_context_switch, next); 608 } 609 610 #define __HAVE_ARCH_ENTER_LAZY_MMU_MODE 611 static inline void arch_enter_lazy_mmu_mode(void) 612 { 613 PVOP_VCALL0(mmu.lazy_mode.enter); 614 } 615 616 static inline void arch_leave_lazy_mmu_mode(void) 617 { 618 PVOP_VCALL0(mmu.lazy_mode.leave); 619 } 620 621 static inline void arch_flush_lazy_mmu_mode(void) 622 { 623 PVOP_VCALL0(mmu.lazy_mode.flush); 624 } 625 626 static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx, 627 phys_addr_t phys, pgprot_t flags) 628 { 629 pv_ops.mmu.set_fixmap(idx, phys, flags); 630 } 631 #endif 632 633 #if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS) 634 635 static __always_inline void pv_queued_spin_lock_slowpath(struct qspinlock *lock, 636 u32 val) 637 { 638 PVOP_VCALL2(lock.queued_spin_lock_slowpath, lock, val); 639 } 640 641 static __always_inline void pv_queued_spin_unlock(struct qspinlock *lock) 642 { 643 PVOP_VCALLEE1(lock.queued_spin_unlock, lock); 644 } 645 646 static __always_inline void pv_wait(u8 *ptr, u8 val) 647 { 648 PVOP_VCALL2(lock.wait, ptr, val); 649 } 650 651 static __always_inline void pv_kick(int cpu) 652 { 653 PVOP_VCALL1(lock.kick, cpu); 654 } 655 656 static __always_inline bool pv_vcpu_is_preempted(long cpu) 657 { 658 return PVOP_CALLEE1(bool, lock.vcpu_is_preempted, cpu); 659 } 660 661 void __raw_callee_save___native_queued_spin_unlock(struct qspinlock *lock); 662 bool __raw_callee_save___native_vcpu_is_preempted(long cpu); 663 664 #endif /* SMP && PARAVIRT_SPINLOCKS */ 665 666 #ifdef CONFIG_X86_32 667 #define PV_SAVE_REGS "pushl %ecx; pushl %edx;" 668 #define PV_RESTORE_REGS "popl %edx; popl %ecx;" 669 670 /* save and restore all caller-save registers, except return value */ 671 #define PV_SAVE_ALL_CALLER_REGS "pushl %ecx;" 672 #define PV_RESTORE_ALL_CALLER_REGS "popl %ecx;" 673 674 #define PV_FLAGS_ARG "0" 675 #define PV_EXTRA_CLOBBERS 676 #define PV_VEXTRA_CLOBBERS 677 #else 678 /* save and restore all caller-save registers, except return value */ 679 #define PV_SAVE_ALL_CALLER_REGS \ 680 "push %rcx;" \ 681 "push %rdx;" \ 682 "push %rsi;" \ 683 "push %rdi;" \ 684 "push %r8;" \ 685 "push %r9;" \ 686 "push %r10;" \ 687 "push %r11;" 688 #define PV_RESTORE_ALL_CALLER_REGS \ 689 "pop %r11;" \ 690 "pop %r10;" \ 691 "pop %r9;" \ 692 "pop %r8;" \ 693 "pop %rdi;" \ 694 "pop %rsi;" \ 695 "pop %rdx;" \ 696 "pop %rcx;" 697 698 /* We save some registers, but all of them, that's too much. We clobber all 699 * caller saved registers but the argument parameter */ 700 #define PV_SAVE_REGS "pushq %%rdi;" 701 #define PV_RESTORE_REGS "popq %%rdi;" 702 #define PV_EXTRA_CLOBBERS EXTRA_CLOBBERS, "rcx" , "rdx", "rsi" 703 #define PV_VEXTRA_CLOBBERS EXTRA_CLOBBERS, "rdi", "rcx" , "rdx", "rsi" 704 #define PV_FLAGS_ARG "D" 705 #endif 706 707 /* 708 * Generate a thunk around a function which saves all caller-save 709 * registers except for the return value. This allows C functions to 710 * be called from assembler code where fewer than normal registers are 711 * available. It may also help code generation around calls from C 712 * code if the common case doesn't use many registers. 713 * 714 * When a callee is wrapped in a thunk, the caller can assume that all 715 * arg regs and all scratch registers are preserved across the 716 * call. The return value in rax/eax will not be saved, even for void 717 * functions. 718 */ 719 #define PV_THUNK_NAME(func) "__raw_callee_save_" #func 720 #define PV_CALLEE_SAVE_REGS_THUNK(func) \ 721 extern typeof(func) __raw_callee_save_##func; \ 722 \ 723 asm(".pushsection .text;" \ 724 ".globl " PV_THUNK_NAME(func) ";" \ 725 ".type " PV_THUNK_NAME(func) ", @function;" \ 726 PV_THUNK_NAME(func) ":" \ 727 FRAME_BEGIN \ 728 PV_SAVE_ALL_CALLER_REGS \ 729 "call " #func ";" \ 730 PV_RESTORE_ALL_CALLER_REGS \ 731 FRAME_END \ 732 "ret;" \ 733 ".size " PV_THUNK_NAME(func) ", .-" PV_THUNK_NAME(func) ";" \ 734 ".popsection") 735 736 /* Get a reference to a callee-save function */ 737 #define PV_CALLEE_SAVE(func) \ 738 ((struct paravirt_callee_save) { __raw_callee_save_##func }) 739 740 /* Promise that "func" already uses the right calling convention */ 741 #define __PV_IS_CALLEE_SAVE(func) \ 742 ((struct paravirt_callee_save) { func }) 743 744 #ifdef CONFIG_PARAVIRT_XXL 745 static inline notrace unsigned long arch_local_save_flags(void) 746 { 747 return PVOP_CALLEE0(unsigned long, irq.save_fl); 748 } 749 750 static inline notrace void arch_local_irq_restore(unsigned long f) 751 { 752 PVOP_VCALLEE1(irq.restore_fl, f); 753 } 754 755 static inline notrace void arch_local_irq_disable(void) 756 { 757 PVOP_VCALLEE0(irq.irq_disable); 758 } 759 760 static inline notrace void arch_local_irq_enable(void) 761 { 762 PVOP_VCALLEE0(irq.irq_enable); 763 } 764 765 static inline notrace unsigned long arch_local_irq_save(void) 766 { 767 unsigned long f; 768 769 f = arch_local_save_flags(); 770 arch_local_irq_disable(); 771 return f; 772 } 773 #endif 774 775 776 /* Make sure as little as possible of this mess escapes. */ 777 #undef PARAVIRT_CALL 778 #undef __PVOP_CALL 779 #undef __PVOP_VCALL 780 #undef PVOP_VCALL0 781 #undef PVOP_CALL0 782 #undef PVOP_VCALL1 783 #undef PVOP_CALL1 784 #undef PVOP_VCALL2 785 #undef PVOP_CALL2 786 #undef PVOP_VCALL3 787 #undef PVOP_CALL3 788 #undef PVOP_VCALL4 789 #undef PVOP_CALL4 790 791 extern void default_banner(void); 792 793 #else /* __ASSEMBLY__ */ 794 795 #define _PVSITE(ptype, ops, word, algn) \ 796 771:; \ 797 ops; \ 798 772:; \ 799 .pushsection .parainstructions,"a"; \ 800 .align algn; \ 801 word 771b; \ 802 .byte ptype; \ 803 .byte 772b-771b; \ 804 .popsection 805 806 807 #define COND_PUSH(set, mask, reg) \ 808 .if ((~(set)) & mask); push %reg; .endif 809 #define COND_POP(set, mask, reg) \ 810 .if ((~(set)) & mask); pop %reg; .endif 811 812 #ifdef CONFIG_X86_64 813 814 #define PV_SAVE_REGS(set) \ 815 COND_PUSH(set, CLBR_RAX, rax); \ 816 COND_PUSH(set, CLBR_RCX, rcx); \ 817 COND_PUSH(set, CLBR_RDX, rdx); \ 818 COND_PUSH(set, CLBR_RSI, rsi); \ 819 COND_PUSH(set, CLBR_RDI, rdi); \ 820 COND_PUSH(set, CLBR_R8, r8); \ 821 COND_PUSH(set, CLBR_R9, r9); \ 822 COND_PUSH(set, CLBR_R10, r10); \ 823 COND_PUSH(set, CLBR_R11, r11) 824 #define PV_RESTORE_REGS(set) \ 825 COND_POP(set, CLBR_R11, r11); \ 826 COND_POP(set, CLBR_R10, r10); \ 827 COND_POP(set, CLBR_R9, r9); \ 828 COND_POP(set, CLBR_R8, r8); \ 829 COND_POP(set, CLBR_RDI, rdi); \ 830 COND_POP(set, CLBR_RSI, rsi); \ 831 COND_POP(set, CLBR_RDX, rdx); \ 832 COND_POP(set, CLBR_RCX, rcx); \ 833 COND_POP(set, CLBR_RAX, rax) 834 835 #define PARA_PATCH(off) ((off) / 8) 836 #define PARA_SITE(ptype, ops) _PVSITE(ptype, ops, .quad, 8) 837 #define PARA_INDIRECT(addr) *addr(%rip) 838 #else 839 #define PV_SAVE_REGS(set) \ 840 COND_PUSH(set, CLBR_EAX, eax); \ 841 COND_PUSH(set, CLBR_EDI, edi); \ 842 COND_PUSH(set, CLBR_ECX, ecx); \ 843 COND_PUSH(set, CLBR_EDX, edx) 844 #define PV_RESTORE_REGS(set) \ 845 COND_POP(set, CLBR_EDX, edx); \ 846 COND_POP(set, CLBR_ECX, ecx); \ 847 COND_POP(set, CLBR_EDI, edi); \ 848 COND_POP(set, CLBR_EAX, eax) 849 850 #define PARA_PATCH(off) ((off) / 4) 851 #define PARA_SITE(ptype, ops) _PVSITE(ptype, ops, .long, 4) 852 #define PARA_INDIRECT(addr) *%cs:addr 853 #endif 854 855 #ifdef CONFIG_PARAVIRT_XXL 856 #define INTERRUPT_RETURN \ 857 PARA_SITE(PARA_PATCH(PV_CPU_iret), \ 858 ANNOTATE_RETPOLINE_SAFE; \ 859 jmp PARA_INDIRECT(pv_ops+PV_CPU_iret);) 860 861 #define DISABLE_INTERRUPTS(clobbers) \ 862 PARA_SITE(PARA_PATCH(PV_IRQ_irq_disable), \ 863 PV_SAVE_REGS(clobbers | CLBR_CALLEE_SAVE); \ 864 ANNOTATE_RETPOLINE_SAFE; \ 865 call PARA_INDIRECT(pv_ops+PV_IRQ_irq_disable); \ 866 PV_RESTORE_REGS(clobbers | CLBR_CALLEE_SAVE);) 867 868 #define ENABLE_INTERRUPTS(clobbers) \ 869 PARA_SITE(PARA_PATCH(PV_IRQ_irq_enable), \ 870 PV_SAVE_REGS(clobbers | CLBR_CALLEE_SAVE); \ 871 ANNOTATE_RETPOLINE_SAFE; \ 872 call PARA_INDIRECT(pv_ops+PV_IRQ_irq_enable); \ 873 PV_RESTORE_REGS(clobbers | CLBR_CALLEE_SAVE);) 874 #endif 875 876 #ifdef CONFIG_X86_64 877 #ifdef CONFIG_PARAVIRT_XXL 878 /* 879 * If swapgs is used while the userspace stack is still current, 880 * there's no way to call a pvop. The PV replacement *must* be 881 * inlined, or the swapgs instruction must be trapped and emulated. 882 */ 883 #define SWAPGS_UNSAFE_STACK \ 884 PARA_SITE(PARA_PATCH(PV_CPU_swapgs), swapgs) 885 886 /* 887 * Note: swapgs is very special, and in practise is either going to be 888 * implemented with a single "swapgs" instruction or something very 889 * special. Either way, we don't need to save any registers for 890 * it. 891 */ 892 #define SWAPGS \ 893 PARA_SITE(PARA_PATCH(PV_CPU_swapgs), \ 894 ANNOTATE_RETPOLINE_SAFE; \ 895 call PARA_INDIRECT(pv_ops+PV_CPU_swapgs); \ 896 ) 897 898 #define USERGS_SYSRET64 \ 899 PARA_SITE(PARA_PATCH(PV_CPU_usergs_sysret64), \ 900 ANNOTATE_RETPOLINE_SAFE; \ 901 jmp PARA_INDIRECT(pv_ops+PV_CPU_usergs_sysret64);) 902 903 #ifdef CONFIG_DEBUG_ENTRY 904 #define SAVE_FLAGS(clobbers) \ 905 PARA_SITE(PARA_PATCH(PV_IRQ_save_fl), \ 906 PV_SAVE_REGS(clobbers | CLBR_CALLEE_SAVE); \ 907 ANNOTATE_RETPOLINE_SAFE; \ 908 call PARA_INDIRECT(pv_ops+PV_IRQ_save_fl); \ 909 PV_RESTORE_REGS(clobbers | CLBR_CALLEE_SAVE);) 910 #endif 911 #endif /* CONFIG_PARAVIRT_XXL */ 912 #endif /* CONFIG_X86_64 */ 913 914 #ifdef CONFIG_PARAVIRT_XXL 915 916 #define GET_CR2_INTO_AX \ 917 PARA_SITE(PARA_PATCH(PV_MMU_read_cr2), \ 918 ANNOTATE_RETPOLINE_SAFE; \ 919 call PARA_INDIRECT(pv_ops+PV_MMU_read_cr2); \ 920 ) 921 922 #endif /* CONFIG_PARAVIRT_XXL */ 923 924 925 #endif /* __ASSEMBLY__ */ 926 #else /* CONFIG_PARAVIRT */ 927 # define default_banner x86_init_noop 928 #endif /* !CONFIG_PARAVIRT */ 929 930 #ifndef __ASSEMBLY__ 931 #ifndef CONFIG_PARAVIRT_XXL 932 static inline void paravirt_arch_dup_mmap(struct mm_struct *oldmm, 933 struct mm_struct *mm) 934 { 935 } 936 #endif 937 938 #ifndef CONFIG_PARAVIRT 939 static inline void paravirt_arch_exit_mmap(struct mm_struct *mm) 940 { 941 } 942 #endif 943 #endif /* __ASSEMBLY__ */ 944 #endif /* _ASM_X86_PARAVIRT_H */ 945