paravirt.h (27876f3882fdd4acb3d3614a0133ecdc777fc292) | paravirt.h (5c83511bdb9832c86be20fb86b783356e2f58062) |
---|---|
1/* SPDX-License-Identifier: GPL-2.0 */ 2#ifndef _ASM_X86_PARAVIRT_H 3#define _ASM_X86_PARAVIRT_H 4/* Various instructions on x86 need to be replaced for 5 * para-virtualization: those hooks are defined here. */ 6 7#ifdef CONFIG_PARAVIRT 8#include <asm/pgtable_types.h> --- 5 unchanged lines hidden (view full) --- 14#ifndef __ASSEMBLY__ 15#include <linux/bug.h> 16#include <linux/types.h> 17#include <linux/cpumask.h> 18#include <asm/frame.h> 19 20static inline void load_sp0(unsigned long sp0) 21{ | 1/* SPDX-License-Identifier: GPL-2.0 */ 2#ifndef _ASM_X86_PARAVIRT_H 3#define _ASM_X86_PARAVIRT_H 4/* Various instructions on x86 need to be replaced for 5 * para-virtualization: those hooks are defined here. */ 6 7#ifdef CONFIG_PARAVIRT 8#include <asm/pgtable_types.h> --- 5 unchanged lines hidden (view full) --- 14#ifndef __ASSEMBLY__ 15#include <linux/bug.h> 16#include <linux/types.h> 17#include <linux/cpumask.h> 18#include <asm/frame.h> 19 20static inline void load_sp0(unsigned long sp0) 21{ |
22 PVOP_VCALL1(pv_cpu_ops.load_sp0, sp0); | 22 PVOP_VCALL1(cpu.load_sp0, sp0); |
23} 24 25/* The paravirtualized CPUID instruction. */ 26static inline void __cpuid(unsigned int *eax, unsigned int *ebx, 27 unsigned int *ecx, unsigned int *edx) 28{ | 23} 24 25/* The paravirtualized CPUID instruction. */ 26static inline void __cpuid(unsigned int *eax, unsigned int *ebx, 27 unsigned int *ecx, unsigned int *edx) 28{ |
29 PVOP_VCALL4(pv_cpu_ops.cpuid, eax, ebx, ecx, edx); | 29 PVOP_VCALL4(cpu.cpuid, eax, ebx, ecx, edx); |
30} 31 32/* 33 * These special macros can be used to get or set a debugging register 34 */ 35static inline unsigned long paravirt_get_debugreg(int reg) 36{ | 30} 31 32/* 33 * These special macros can be used to get or set a debugging register 34 */ 35static inline unsigned long paravirt_get_debugreg(int reg) 36{ |
37 return PVOP_CALL1(unsigned long, pv_cpu_ops.get_debugreg, reg); | 37 return PVOP_CALL1(unsigned long, cpu.get_debugreg, reg); |
38} 39#define get_debugreg(var, reg) var = paravirt_get_debugreg(reg) 40static inline void set_debugreg(unsigned long val, int reg) 41{ | 38} 39#define get_debugreg(var, reg) var = paravirt_get_debugreg(reg) 40static inline void set_debugreg(unsigned long val, int reg) 41{ |
42 PVOP_VCALL2(pv_cpu_ops.set_debugreg, reg, val); | 42 PVOP_VCALL2(cpu.set_debugreg, reg, val); |
43} 44 45static inline unsigned long read_cr0(void) 46{ | 43} 44 45static inline unsigned long read_cr0(void) 46{ |
47 return PVOP_CALL0(unsigned long, pv_cpu_ops.read_cr0); | 47 return PVOP_CALL0(unsigned long, cpu.read_cr0); |
48} 49 50static inline void write_cr0(unsigned long x) 51{ | 48} 49 50static inline void write_cr0(unsigned long x) 51{ |
52 PVOP_VCALL1(pv_cpu_ops.write_cr0, x); | 52 PVOP_VCALL1(cpu.write_cr0, x); |
53} 54 55static inline unsigned long read_cr2(void) 56{ | 53} 54 55static inline unsigned long read_cr2(void) 56{ |
57 return PVOP_CALL0(unsigned long, pv_mmu_ops.read_cr2); | 57 return PVOP_CALL0(unsigned long, mmu.read_cr2); |
58} 59 60static inline void write_cr2(unsigned long x) 61{ | 58} 59 60static inline void write_cr2(unsigned long x) 61{ |
62 PVOP_VCALL1(pv_mmu_ops.write_cr2, x); | 62 PVOP_VCALL1(mmu.write_cr2, x); |
63} 64 65static inline unsigned long __read_cr3(void) 66{ | 63} 64 65static inline unsigned long __read_cr3(void) 66{ |
67 return PVOP_CALL0(unsigned long, pv_mmu_ops.read_cr3); | 67 return PVOP_CALL0(unsigned long, mmu.read_cr3); |
68} 69 70static inline void write_cr3(unsigned long x) 71{ | 68} 69 70static inline void write_cr3(unsigned long x) 71{ |
72 PVOP_VCALL1(pv_mmu_ops.write_cr3, x); | 72 PVOP_VCALL1(mmu.write_cr3, x); |
73} 74 75static inline void __write_cr4(unsigned long x) 76{ | 73} 74 75static inline void __write_cr4(unsigned long x) 76{ |
77 PVOP_VCALL1(pv_cpu_ops.write_cr4, x); | 77 PVOP_VCALL1(cpu.write_cr4, x); |
78} 79 80#ifdef CONFIG_X86_64 81static inline unsigned long read_cr8(void) 82{ | 78} 79 80#ifdef CONFIG_X86_64 81static inline unsigned long read_cr8(void) 82{ |
83 return PVOP_CALL0(unsigned long, pv_cpu_ops.read_cr8); | 83 return PVOP_CALL0(unsigned long, cpu.read_cr8); |
84} 85 86static inline void write_cr8(unsigned long x) 87{ | 84} 85 86static inline void write_cr8(unsigned long x) 87{ |
88 PVOP_VCALL1(pv_cpu_ops.write_cr8, x); | 88 PVOP_VCALL1(cpu.write_cr8, x); |
89} 90#endif 91 92static inline void arch_safe_halt(void) 93{ | 89} 90#endif 91 92static inline void arch_safe_halt(void) 93{ |
94 PVOP_VCALL0(pv_irq_ops.safe_halt); | 94 PVOP_VCALL0(irq.safe_halt); |
95} 96 97static inline void halt(void) 98{ | 95} 96 97static inline void halt(void) 98{ |
99 PVOP_VCALL0(pv_irq_ops.halt); | 99 PVOP_VCALL0(irq.halt); |
100} 101 102static inline void wbinvd(void) 103{ | 100} 101 102static inline void wbinvd(void) 103{ |
104 PVOP_VCALL0(pv_cpu_ops.wbinvd); | 104 PVOP_VCALL0(cpu.wbinvd); |
105} 106 107#define get_kernel_rpl() (pv_info.kernel_rpl) 108 109static inline u64 paravirt_read_msr(unsigned msr) 110{ | 105} 106 107#define get_kernel_rpl() (pv_info.kernel_rpl) 108 109static inline u64 paravirt_read_msr(unsigned msr) 110{ |
111 return PVOP_CALL1(u64, pv_cpu_ops.read_msr, msr); | 111 return PVOP_CALL1(u64, cpu.read_msr, msr); |
112} 113 114static inline void paravirt_write_msr(unsigned msr, 115 unsigned low, unsigned high) 116{ | 112} 113 114static inline void paravirt_write_msr(unsigned msr, 115 unsigned low, unsigned high) 116{ |
117 PVOP_VCALL3(pv_cpu_ops.write_msr, msr, low, high); | 117 PVOP_VCALL3(cpu.write_msr, msr, low, high); |
118} 119 120static inline u64 paravirt_read_msr_safe(unsigned msr, int *err) 121{ | 118} 119 120static inline u64 paravirt_read_msr_safe(unsigned msr, int *err) 121{ |
122 return PVOP_CALL2(u64, pv_cpu_ops.read_msr_safe, msr, err); | 122 return PVOP_CALL2(u64, cpu.read_msr_safe, msr, err); |
123} 124 125static inline int paravirt_write_msr_safe(unsigned msr, 126 unsigned low, unsigned high) 127{ | 123} 124 125static inline int paravirt_write_msr_safe(unsigned msr, 126 unsigned low, unsigned high) 127{ |
128 return PVOP_CALL3(int, pv_cpu_ops.write_msr_safe, msr, low, high); | 128 return PVOP_CALL3(int, cpu.write_msr_safe, msr, low, high); |
129} 130 131#define rdmsr(msr, val1, val2) \ 132do { \ 133 u64 _l = paravirt_read_msr(msr); \ 134 val1 = (u32)_l; \ 135 val2 = _l >> 32; \ 136} while (0) --- 30 unchanged lines hidden (view full) --- 167 int err; 168 169 *p = paravirt_read_msr_safe(msr, &err); 170 return err; 171} 172 173static inline unsigned long long paravirt_sched_clock(void) 174{ | 129} 130 131#define rdmsr(msr, val1, val2) \ 132do { \ 133 u64 _l = paravirt_read_msr(msr); \ 134 val1 = (u32)_l; \ 135 val2 = _l >> 32; \ 136} while (0) --- 30 unchanged lines hidden (view full) --- 167 int err; 168 169 *p = paravirt_read_msr_safe(msr, &err); 170 return err; 171} 172 173static inline unsigned long long paravirt_sched_clock(void) 174{ |
175 return PVOP_CALL0(unsigned long long, pv_time_ops.sched_clock); | 175 return PVOP_CALL0(unsigned long long, time.sched_clock); |
176} 177 178struct static_key; 179extern struct static_key paravirt_steal_enabled; 180extern struct static_key paravirt_steal_rq_enabled; 181 182static inline u64 paravirt_steal_clock(int cpu) 183{ | 176} 177 178struct static_key; 179extern struct static_key paravirt_steal_enabled; 180extern struct static_key paravirt_steal_rq_enabled; 181 182static inline u64 paravirt_steal_clock(int cpu) 183{ |
184 return PVOP_CALL1(u64, pv_time_ops.steal_clock, cpu); | 184 return PVOP_CALL1(u64, time.steal_clock, cpu); |
185} 186 187static inline unsigned long long paravirt_read_pmc(int counter) 188{ | 185} 186 187static inline unsigned long long paravirt_read_pmc(int counter) 188{ |
189 return PVOP_CALL1(u64, pv_cpu_ops.read_pmc, counter); | 189 return PVOP_CALL1(u64, cpu.read_pmc, counter); |
190} 191 192#define rdpmc(counter, low, high) \ 193do { \ 194 u64 _l = paravirt_read_pmc(counter); \ 195 low = (u32)_l; \ 196 high = _l >> 32; \ 197} while (0) 198 199#define rdpmcl(counter, val) ((val) = paravirt_read_pmc(counter)) 200 201static inline void paravirt_alloc_ldt(struct desc_struct *ldt, unsigned entries) 202{ | 190} 191 192#define rdpmc(counter, low, high) \ 193do { \ 194 u64 _l = paravirt_read_pmc(counter); \ 195 low = (u32)_l; \ 196 high = _l >> 32; \ 197} while (0) 198 199#define rdpmcl(counter, val) ((val) = paravirt_read_pmc(counter)) 200 201static inline void paravirt_alloc_ldt(struct desc_struct *ldt, unsigned entries) 202{ |
203 PVOP_VCALL2(pv_cpu_ops.alloc_ldt, ldt, entries); | 203 PVOP_VCALL2(cpu.alloc_ldt, ldt, entries); |
204} 205 206static inline void paravirt_free_ldt(struct desc_struct *ldt, unsigned entries) 207{ | 204} 205 206static inline void paravirt_free_ldt(struct desc_struct *ldt, unsigned entries) 207{ |
208 PVOP_VCALL2(pv_cpu_ops.free_ldt, ldt, entries); | 208 PVOP_VCALL2(cpu.free_ldt, ldt, entries); |
209} 210 211static inline void load_TR_desc(void) 212{ | 209} 210 211static inline void load_TR_desc(void) 212{ |
213 PVOP_VCALL0(pv_cpu_ops.load_tr_desc); | 213 PVOP_VCALL0(cpu.load_tr_desc); |
214} 215static inline void load_gdt(const struct desc_ptr *dtr) 216{ | 214} 215static inline void load_gdt(const struct desc_ptr *dtr) 216{ |
217 PVOP_VCALL1(pv_cpu_ops.load_gdt, dtr); | 217 PVOP_VCALL1(cpu.load_gdt, dtr); |
218} 219static inline void load_idt(const struct desc_ptr *dtr) 220{ | 218} 219static inline void load_idt(const struct desc_ptr *dtr) 220{ |
221 PVOP_VCALL1(pv_cpu_ops.load_idt, dtr); | 221 PVOP_VCALL1(cpu.load_idt, dtr); |
222} 223static inline void set_ldt(const void *addr, unsigned entries) 224{ | 222} 223static inline void set_ldt(const void *addr, unsigned entries) 224{ |
225 PVOP_VCALL2(pv_cpu_ops.set_ldt, addr, entries); | 225 PVOP_VCALL2(cpu.set_ldt, addr, entries); |
226} 227static inline unsigned long paravirt_store_tr(void) 228{ | 226} 227static inline unsigned long paravirt_store_tr(void) 228{ |
229 return PVOP_CALL0(unsigned long, pv_cpu_ops.store_tr); | 229 return PVOP_CALL0(unsigned long, cpu.store_tr); |
230} 231#define store_tr(tr) ((tr) = paravirt_store_tr()) 232static inline void load_TLS(struct thread_struct *t, unsigned cpu) 233{ | 230} 231#define store_tr(tr) ((tr) = paravirt_store_tr()) 232static inline void load_TLS(struct thread_struct *t, unsigned cpu) 233{ |
234 PVOP_VCALL2(pv_cpu_ops.load_tls, t, cpu); | 234 PVOP_VCALL2(cpu.load_tls, t, cpu); |
235} 236 237#ifdef CONFIG_X86_64 238static inline void load_gs_index(unsigned int gs) 239{ | 235} 236 237#ifdef CONFIG_X86_64 238static inline void load_gs_index(unsigned int gs) 239{ |
240 PVOP_VCALL1(pv_cpu_ops.load_gs_index, gs); | 240 PVOP_VCALL1(cpu.load_gs_index, gs); |
241} 242#endif 243 244static inline void write_ldt_entry(struct desc_struct *dt, int entry, 245 const void *desc) 246{ | 241} 242#endif 243 244static inline void write_ldt_entry(struct desc_struct *dt, int entry, 245 const void *desc) 246{ |
247 PVOP_VCALL3(pv_cpu_ops.write_ldt_entry, dt, entry, desc); | 247 PVOP_VCALL3(cpu.write_ldt_entry, dt, entry, desc); |
248} 249 250static inline void write_gdt_entry(struct desc_struct *dt, int entry, 251 void *desc, int type) 252{ | 248} 249 250static inline void write_gdt_entry(struct desc_struct *dt, int entry, 251 void *desc, int type) 252{ |
253 PVOP_VCALL4(pv_cpu_ops.write_gdt_entry, dt, entry, desc, type); | 253 PVOP_VCALL4(cpu.write_gdt_entry, dt, entry, desc, type); |
254} 255 256static inline void write_idt_entry(gate_desc *dt, int entry, const gate_desc *g) 257{ | 254} 255 256static inline void write_idt_entry(gate_desc *dt, int entry, const gate_desc *g) 257{ |
258 PVOP_VCALL3(pv_cpu_ops.write_idt_entry, dt, entry, g); | 258 PVOP_VCALL3(cpu.write_idt_entry, dt, entry, g); |
259} 260static inline void set_iopl_mask(unsigned mask) 261{ | 259} 260static inline void set_iopl_mask(unsigned mask) 261{ |
262 PVOP_VCALL1(pv_cpu_ops.set_iopl_mask, mask); | 262 PVOP_VCALL1(cpu.set_iopl_mask, mask); |
263} 264 265/* The paravirtualized I/O functions */ 266static inline void slow_down_io(void) 267{ | 263} 264 265/* The paravirtualized I/O functions */ 266static inline void slow_down_io(void) 267{ |
268 pv_cpu_ops.io_delay(); | 268 pv_ops.cpu.io_delay(); |
269#ifdef REALLY_SLOW_IO | 269#ifdef REALLY_SLOW_IO |
270 pv_cpu_ops.io_delay(); 271 pv_cpu_ops.io_delay(); 272 pv_cpu_ops.io_delay(); | 270 pv_ops.cpu.io_delay(); 271 pv_ops.cpu.io_delay(); 272 pv_ops.cpu.io_delay(); |
273#endif 274} 275 276static inline void paravirt_activate_mm(struct mm_struct *prev, 277 struct mm_struct *next) 278{ | 273#endif 274} 275 276static inline void paravirt_activate_mm(struct mm_struct *prev, 277 struct mm_struct *next) 278{ |
279 PVOP_VCALL2(pv_mmu_ops.activate_mm, prev, next); | 279 PVOP_VCALL2(mmu.activate_mm, prev, next); |
280} 281 282static inline void paravirt_arch_dup_mmap(struct mm_struct *oldmm, 283 struct mm_struct *mm) 284{ | 280} 281 282static inline void paravirt_arch_dup_mmap(struct mm_struct *oldmm, 283 struct mm_struct *mm) 284{ |
285 PVOP_VCALL2(pv_mmu_ops.dup_mmap, oldmm, mm); | 285 PVOP_VCALL2(mmu.dup_mmap, oldmm, mm); |
286} 287 288static inline void paravirt_arch_exit_mmap(struct mm_struct *mm) 289{ | 286} 287 288static inline void paravirt_arch_exit_mmap(struct mm_struct *mm) 289{ |
290 PVOP_VCALL1(pv_mmu_ops.exit_mmap, mm); | 290 PVOP_VCALL1(mmu.exit_mmap, mm); |
291} 292 293static inline void __flush_tlb(void) 294{ | 291} 292 293static inline void __flush_tlb(void) 294{ |
295 PVOP_VCALL0(pv_mmu_ops.flush_tlb_user); | 295 PVOP_VCALL0(mmu.flush_tlb_user); |
296} 297static inline void __flush_tlb_global(void) 298{ | 296} 297static inline void __flush_tlb_global(void) 298{ |
299 PVOP_VCALL0(pv_mmu_ops.flush_tlb_kernel); | 299 PVOP_VCALL0(mmu.flush_tlb_kernel); |
300} 301static inline void __flush_tlb_one_user(unsigned long addr) 302{ | 300} 301static inline void __flush_tlb_one_user(unsigned long addr) 302{ |
303 PVOP_VCALL1(pv_mmu_ops.flush_tlb_one_user, addr); | 303 PVOP_VCALL1(mmu.flush_tlb_one_user, addr); |
304} 305 306static inline void flush_tlb_others(const struct cpumask *cpumask, 307 const struct flush_tlb_info *info) 308{ | 304} 305 306static inline void flush_tlb_others(const struct cpumask *cpumask, 307 const struct flush_tlb_info *info) 308{ |
309 PVOP_VCALL2(pv_mmu_ops.flush_tlb_others, cpumask, info); | 309 PVOP_VCALL2(mmu.flush_tlb_others, cpumask, info); |
310} 311 312static inline void paravirt_tlb_remove_table(struct mmu_gather *tlb, void *table) 313{ | 310} 311 312static inline void paravirt_tlb_remove_table(struct mmu_gather *tlb, void *table) 313{ |
314 PVOP_VCALL2(pv_mmu_ops.tlb_remove_table, tlb, table); | 314 PVOP_VCALL2(mmu.tlb_remove_table, tlb, table); |
315} 316 317static inline int paravirt_pgd_alloc(struct mm_struct *mm) 318{ | 315} 316 317static inline int paravirt_pgd_alloc(struct mm_struct *mm) 318{ |
319 return PVOP_CALL1(int, pv_mmu_ops.pgd_alloc, mm); | 319 return PVOP_CALL1(int, mmu.pgd_alloc, mm); |
320} 321 322static inline void paravirt_pgd_free(struct mm_struct *mm, pgd_t *pgd) 323{ | 320} 321 322static inline void paravirt_pgd_free(struct mm_struct *mm, pgd_t *pgd) 323{ |
324 PVOP_VCALL2(pv_mmu_ops.pgd_free, mm, pgd); | 324 PVOP_VCALL2(mmu.pgd_free, mm, pgd); |
325} 326 327static inline void paravirt_alloc_pte(struct mm_struct *mm, unsigned long pfn) 328{ | 325} 326 327static inline void paravirt_alloc_pte(struct mm_struct *mm, unsigned long pfn) 328{ |
329 PVOP_VCALL2(pv_mmu_ops.alloc_pte, mm, pfn); | 329 PVOP_VCALL2(mmu.alloc_pte, mm, pfn); |
330} 331static inline void paravirt_release_pte(unsigned long pfn) 332{ | 330} 331static inline void paravirt_release_pte(unsigned long pfn) 332{ |
333 PVOP_VCALL1(pv_mmu_ops.release_pte, pfn); | 333 PVOP_VCALL1(mmu.release_pte, pfn); |
334} 335 336static inline void paravirt_alloc_pmd(struct mm_struct *mm, unsigned long pfn) 337{ | 334} 335 336static inline void paravirt_alloc_pmd(struct mm_struct *mm, unsigned long pfn) 337{ |
338 PVOP_VCALL2(pv_mmu_ops.alloc_pmd, mm, pfn); | 338 PVOP_VCALL2(mmu.alloc_pmd, mm, pfn); |
339} 340 341static inline void paravirt_release_pmd(unsigned long pfn) 342{ | 339} 340 341static inline void paravirt_release_pmd(unsigned long pfn) 342{ |
343 PVOP_VCALL1(pv_mmu_ops.release_pmd, pfn); | 343 PVOP_VCALL1(mmu.release_pmd, pfn); |
344} 345 346static inline void paravirt_alloc_pud(struct mm_struct *mm, unsigned long pfn) 347{ | 344} 345 346static inline void paravirt_alloc_pud(struct mm_struct *mm, unsigned long pfn) 347{ |
348 PVOP_VCALL2(pv_mmu_ops.alloc_pud, mm, pfn); | 348 PVOP_VCALL2(mmu.alloc_pud, mm, pfn); |
349} 350static inline void paravirt_release_pud(unsigned long pfn) 351{ | 349} 350static inline void paravirt_release_pud(unsigned long pfn) 351{ |
352 PVOP_VCALL1(pv_mmu_ops.release_pud, pfn); | 352 PVOP_VCALL1(mmu.release_pud, pfn); |
353} 354 355static inline void paravirt_alloc_p4d(struct mm_struct *mm, unsigned long pfn) 356{ | 353} 354 355static inline void paravirt_alloc_p4d(struct mm_struct *mm, unsigned long pfn) 356{ |
357 PVOP_VCALL2(pv_mmu_ops.alloc_p4d, mm, pfn); | 357 PVOP_VCALL2(mmu.alloc_p4d, mm, pfn); |
358} 359 360static inline void paravirt_release_p4d(unsigned long pfn) 361{ | 358} 359 360static inline void paravirt_release_p4d(unsigned long pfn) 361{ |
362 PVOP_VCALL1(pv_mmu_ops.release_p4d, pfn); | 362 PVOP_VCALL1(mmu.release_p4d, pfn); |
363} 364 365static inline pte_t __pte(pteval_t val) 366{ 367 pteval_t ret; 368 369 if (sizeof(pteval_t) > sizeof(long)) | 363} 364 365static inline pte_t __pte(pteval_t val) 366{ 367 pteval_t ret; 368 369 if (sizeof(pteval_t) > sizeof(long)) |
370 ret = PVOP_CALLEE2(pteval_t, 371 pv_mmu_ops.make_pte, 372 val, (u64)val >> 32); | 370 ret = PVOP_CALLEE2(pteval_t, mmu.make_pte, val, (u64)val >> 32); |
373 else | 371 else |
374 ret = PVOP_CALLEE1(pteval_t, 375 pv_mmu_ops.make_pte, 376 val); | 372 ret = PVOP_CALLEE1(pteval_t, mmu.make_pte, val); |
377 378 return (pte_t) { .pte = ret }; 379} 380 381static inline pteval_t pte_val(pte_t pte) 382{ 383 pteval_t ret; 384 385 if (sizeof(pteval_t) > sizeof(long)) | 373 374 return (pte_t) { .pte = ret }; 375} 376 377static inline pteval_t pte_val(pte_t pte) 378{ 379 pteval_t ret; 380 381 if (sizeof(pteval_t) > sizeof(long)) |
386 ret = PVOP_CALLEE2(pteval_t, pv_mmu_ops.pte_val, | 382 ret = PVOP_CALLEE2(pteval_t, mmu.pte_val, |
387 pte.pte, (u64)pte.pte >> 32); 388 else | 383 pte.pte, (u64)pte.pte >> 32); 384 else |
389 ret = PVOP_CALLEE1(pteval_t, pv_mmu_ops.pte_val, 390 pte.pte); | 385 ret = PVOP_CALLEE1(pteval_t, mmu.pte_val, pte.pte); |
391 392 return ret; 393} 394 395static inline pgd_t __pgd(pgdval_t val) 396{ 397 pgdval_t ret; 398 399 if (sizeof(pgdval_t) > sizeof(long)) | 386 387 return ret; 388} 389 390static inline pgd_t __pgd(pgdval_t val) 391{ 392 pgdval_t ret; 393 394 if (sizeof(pgdval_t) > sizeof(long)) |
400 ret = PVOP_CALLEE2(pgdval_t, pv_mmu_ops.make_pgd, 401 val, (u64)val >> 32); | 395 ret = PVOP_CALLEE2(pgdval_t, mmu.make_pgd, val, (u64)val >> 32); |
402 else | 396 else |
403 ret = PVOP_CALLEE1(pgdval_t, pv_mmu_ops.make_pgd, 404 val); | 397 ret = PVOP_CALLEE1(pgdval_t, mmu.make_pgd, val); |
405 406 return (pgd_t) { ret }; 407} 408 409static inline pgdval_t pgd_val(pgd_t pgd) 410{ 411 pgdval_t ret; 412 413 if (sizeof(pgdval_t) > sizeof(long)) | 398 399 return (pgd_t) { ret }; 400} 401 402static inline pgdval_t pgd_val(pgd_t pgd) 403{ 404 pgdval_t ret; 405 406 if (sizeof(pgdval_t) > sizeof(long)) |
414 ret = PVOP_CALLEE2(pgdval_t, pv_mmu_ops.pgd_val, | 407 ret = PVOP_CALLEE2(pgdval_t, mmu.pgd_val, |
415 pgd.pgd, (u64)pgd.pgd >> 32); 416 else | 408 pgd.pgd, (u64)pgd.pgd >> 32); 409 else |
417 ret = PVOP_CALLEE1(pgdval_t, pv_mmu_ops.pgd_val, 418 pgd.pgd); | 410 ret = PVOP_CALLEE1(pgdval_t, mmu.pgd_val, pgd.pgd); |
419 420 return ret; 421} 422 423#define __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION 424static inline pte_t ptep_modify_prot_start(struct mm_struct *mm, unsigned long addr, 425 pte_t *ptep) 426{ 427 pteval_t ret; 428 | 411 412 return ret; 413} 414 415#define __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION 416static inline pte_t ptep_modify_prot_start(struct mm_struct *mm, unsigned long addr, 417 pte_t *ptep) 418{ 419 pteval_t ret; 420 |
429 ret = PVOP_CALL3(pteval_t, pv_mmu_ops.ptep_modify_prot_start, 430 mm, addr, ptep); | 421 ret = PVOP_CALL3(pteval_t, mmu.ptep_modify_prot_start, mm, addr, ptep); |
431 432 return (pte_t) { .pte = ret }; 433} 434 435static inline void ptep_modify_prot_commit(struct mm_struct *mm, unsigned long addr, 436 pte_t *ptep, pte_t pte) 437{ 438 if (sizeof(pteval_t) > sizeof(long)) 439 /* 5 arg words */ | 422 423 return (pte_t) { .pte = ret }; 424} 425 426static inline void ptep_modify_prot_commit(struct mm_struct *mm, unsigned long addr, 427 pte_t *ptep, pte_t pte) 428{ 429 if (sizeof(pteval_t) > sizeof(long)) 430 /* 5 arg words */ |
440 pv_mmu_ops.ptep_modify_prot_commit(mm, addr, ptep, pte); | 431 pv_ops.mmu.ptep_modify_prot_commit(mm, addr, ptep, pte); |
441 else | 432 else |
442 PVOP_VCALL4(pv_mmu_ops.ptep_modify_prot_commit, | 433 PVOP_VCALL4(mmu.ptep_modify_prot_commit, |
443 mm, addr, ptep, pte.pte); 444} 445 446static inline void set_pte(pte_t *ptep, pte_t pte) 447{ 448 if (sizeof(pteval_t) > sizeof(long)) | 434 mm, addr, ptep, pte.pte); 435} 436 437static inline void set_pte(pte_t *ptep, pte_t pte) 438{ 439 if (sizeof(pteval_t) > sizeof(long)) |
449 PVOP_VCALL3(pv_mmu_ops.set_pte, ptep, 450 pte.pte, (u64)pte.pte >> 32); | 440 PVOP_VCALL3(mmu.set_pte, ptep, pte.pte, (u64)pte.pte >> 32); |
451 else | 441 else |
452 PVOP_VCALL2(pv_mmu_ops.set_pte, ptep, 453 pte.pte); | 442 PVOP_VCALL2(mmu.set_pte, ptep, pte.pte); |
454} 455 456static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, 457 pte_t *ptep, pte_t pte) 458{ 459 if (sizeof(pteval_t) > sizeof(long)) 460 /* 5 arg words */ | 443} 444 445static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, 446 pte_t *ptep, pte_t pte) 447{ 448 if (sizeof(pteval_t) > sizeof(long)) 449 /* 5 arg words */ |
461 pv_mmu_ops.set_pte_at(mm, addr, ptep, pte); | 450 pv_ops.mmu.set_pte_at(mm, addr, ptep, pte); |
462 else | 451 else |
463 PVOP_VCALL4(pv_mmu_ops.set_pte_at, mm, addr, ptep, pte.pte); | 452 PVOP_VCALL4(mmu.set_pte_at, mm, addr, ptep, pte.pte); |
464} 465 466static inline void set_pmd(pmd_t *pmdp, pmd_t pmd) 467{ 468 pmdval_t val = native_pmd_val(pmd); 469 470 if (sizeof(pmdval_t) > sizeof(long)) | 453} 454 455static inline void set_pmd(pmd_t *pmdp, pmd_t pmd) 456{ 457 pmdval_t val = native_pmd_val(pmd); 458 459 if (sizeof(pmdval_t) > sizeof(long)) |
471 PVOP_VCALL3(pv_mmu_ops.set_pmd, pmdp, val, (u64)val >> 32); | 460 PVOP_VCALL3(mmu.set_pmd, pmdp, val, (u64)val >> 32); |
472 else | 461 else |
473 PVOP_VCALL2(pv_mmu_ops.set_pmd, pmdp, val); | 462 PVOP_VCALL2(mmu.set_pmd, pmdp, val); |
474} 475 476#if CONFIG_PGTABLE_LEVELS >= 3 477static inline pmd_t __pmd(pmdval_t val) 478{ 479 pmdval_t ret; 480 481 if (sizeof(pmdval_t) > sizeof(long)) | 463} 464 465#if CONFIG_PGTABLE_LEVELS >= 3 466static inline pmd_t __pmd(pmdval_t val) 467{ 468 pmdval_t ret; 469 470 if (sizeof(pmdval_t) > sizeof(long)) |
482 ret = PVOP_CALLEE2(pmdval_t, pv_mmu_ops.make_pmd, 483 val, (u64)val >> 32); | 471 ret = PVOP_CALLEE2(pmdval_t, mmu.make_pmd, val, (u64)val >> 32); |
484 else | 472 else |
485 ret = PVOP_CALLEE1(pmdval_t, pv_mmu_ops.make_pmd, 486 val); | 473 ret = PVOP_CALLEE1(pmdval_t, mmu.make_pmd, val); |
487 488 return (pmd_t) { ret }; 489} 490 491static inline pmdval_t pmd_val(pmd_t pmd) 492{ 493 pmdval_t ret; 494 495 if (sizeof(pmdval_t) > sizeof(long)) | 474 475 return (pmd_t) { ret }; 476} 477 478static inline pmdval_t pmd_val(pmd_t pmd) 479{ 480 pmdval_t ret; 481 482 if (sizeof(pmdval_t) > sizeof(long)) |
496 ret = PVOP_CALLEE2(pmdval_t, pv_mmu_ops.pmd_val, | 483 ret = PVOP_CALLEE2(pmdval_t, mmu.pmd_val, |
497 pmd.pmd, (u64)pmd.pmd >> 32); 498 else | 484 pmd.pmd, (u64)pmd.pmd >> 32); 485 else |
499 ret = PVOP_CALLEE1(pmdval_t, pv_mmu_ops.pmd_val, 500 pmd.pmd); | 486 ret = PVOP_CALLEE1(pmdval_t, mmu.pmd_val, pmd.pmd); |
501 502 return ret; 503} 504 505static inline void set_pud(pud_t *pudp, pud_t pud) 506{ 507 pudval_t val = native_pud_val(pud); 508 509 if (sizeof(pudval_t) > sizeof(long)) | 487 488 return ret; 489} 490 491static inline void set_pud(pud_t *pudp, pud_t pud) 492{ 493 pudval_t val = native_pud_val(pud); 494 495 if (sizeof(pudval_t) > sizeof(long)) |
510 PVOP_VCALL3(pv_mmu_ops.set_pud, pudp, 511 val, (u64)val >> 32); | 496 PVOP_VCALL3(mmu.set_pud, pudp, val, (u64)val >> 32); |
512 else | 497 else |
513 PVOP_VCALL2(pv_mmu_ops.set_pud, pudp, 514 val); | 498 PVOP_VCALL2(mmu.set_pud, pudp, val); |
515} 516#if CONFIG_PGTABLE_LEVELS >= 4 517static inline pud_t __pud(pudval_t val) 518{ 519 pudval_t ret; 520 521 if (sizeof(pudval_t) > sizeof(long)) | 499} 500#if CONFIG_PGTABLE_LEVELS >= 4 501static inline pud_t __pud(pudval_t val) 502{ 503 pudval_t ret; 504 505 if (sizeof(pudval_t) > sizeof(long)) |
522 ret = PVOP_CALLEE2(pudval_t, pv_mmu_ops.make_pud, 523 val, (u64)val >> 32); | 506 ret = PVOP_CALLEE2(pudval_t, mmu.make_pud, val, (u64)val >> 32); |
524 else | 507 else |
525 ret = PVOP_CALLEE1(pudval_t, pv_mmu_ops.make_pud, 526 val); | 508 ret = PVOP_CALLEE1(pudval_t, mmu.make_pud, val); |
527 528 return (pud_t) { ret }; 529} 530 531static inline pudval_t pud_val(pud_t pud) 532{ 533 pudval_t ret; 534 535 if (sizeof(pudval_t) > sizeof(long)) | 509 510 return (pud_t) { ret }; 511} 512 513static inline pudval_t pud_val(pud_t pud) 514{ 515 pudval_t ret; 516 517 if (sizeof(pudval_t) > sizeof(long)) |
536 ret = PVOP_CALLEE2(pudval_t, pv_mmu_ops.pud_val, | 518 ret = PVOP_CALLEE2(pudval_t, mmu.pud_val, |
537 pud.pud, (u64)pud.pud >> 32); 538 else | 519 pud.pud, (u64)pud.pud >> 32); 520 else |
539 ret = PVOP_CALLEE1(pudval_t, pv_mmu_ops.pud_val, 540 pud.pud); | 521 ret = PVOP_CALLEE1(pudval_t, mmu.pud_val, pud.pud); |
541 542 return ret; 543} 544 545static inline void pud_clear(pud_t *pudp) 546{ 547 set_pud(pudp, __pud(0)); 548} 549 550static inline void set_p4d(p4d_t *p4dp, p4d_t p4d) 551{ 552 p4dval_t val = native_p4d_val(p4d); 553 554 if (sizeof(p4dval_t) > sizeof(long)) | 522 523 return ret; 524} 525 526static inline void pud_clear(pud_t *pudp) 527{ 528 set_pud(pudp, __pud(0)); 529} 530 531static inline void set_p4d(p4d_t *p4dp, p4d_t p4d) 532{ 533 p4dval_t val = native_p4d_val(p4d); 534 535 if (sizeof(p4dval_t) > sizeof(long)) |
555 PVOP_VCALL3(pv_mmu_ops.set_p4d, p4dp, 556 val, (u64)val >> 32); | 536 PVOP_VCALL3(mmu.set_p4d, p4dp, val, (u64)val >> 32); |
557 else | 537 else |
558 PVOP_VCALL2(pv_mmu_ops.set_p4d, p4dp, 559 val); | 538 PVOP_VCALL2(mmu.set_p4d, p4dp, val); |
560} 561 562#if CONFIG_PGTABLE_LEVELS >= 5 563 564static inline p4d_t __p4d(p4dval_t val) 565{ | 539} 540 541#if CONFIG_PGTABLE_LEVELS >= 5 542 543static inline p4d_t __p4d(p4dval_t val) 544{ |
566 p4dval_t ret = PVOP_CALLEE1(p4dval_t, pv_mmu_ops.make_p4d, val); | 545 p4dval_t ret = PVOP_CALLEE1(p4dval_t, mmu.make_p4d, val); |
567 568 return (p4d_t) { ret }; 569} 570 571static inline p4dval_t p4d_val(p4d_t p4d) 572{ | 546 547 return (p4d_t) { ret }; 548} 549 550static inline p4dval_t p4d_val(p4d_t p4d) 551{ |
573 return PVOP_CALLEE1(p4dval_t, pv_mmu_ops.p4d_val, p4d.p4d); | 552 return PVOP_CALLEE1(p4dval_t, mmu.p4d_val, p4d.p4d); |
574} 575 576static inline void __set_pgd(pgd_t *pgdp, pgd_t pgd) 577{ | 553} 554 555static inline void __set_pgd(pgd_t *pgdp, pgd_t pgd) 556{ |
578 PVOP_VCALL2(pv_mmu_ops.set_pgd, pgdp, native_pgd_val(pgd)); | 557 PVOP_VCALL2(mmu.set_pgd, pgdp, native_pgd_val(pgd)); |
579} 580 581#define set_pgd(pgdp, pgdval) do { \ 582 if (pgtable_l5_enabled()) \ 583 __set_pgd(pgdp, pgdval); \ 584 else \ 585 set_p4d((p4d_t *)(pgdp), (p4d_t) { (pgdval).pgd }); \ 586} while (0) --- 14 unchanged lines hidden (view full) --- 601 602#endif /* CONFIG_PGTABLE_LEVELS >= 3 */ 603 604#ifdef CONFIG_X86_PAE 605/* Special-case pte-setting operations for PAE, which can't update a 606 64-bit pte atomically */ 607static inline void set_pte_atomic(pte_t *ptep, pte_t pte) 608{ | 558} 559 560#define set_pgd(pgdp, pgdval) do { \ 561 if (pgtable_l5_enabled()) \ 562 __set_pgd(pgdp, pgdval); \ 563 else \ 564 set_p4d((p4d_t *)(pgdp), (p4d_t) { (pgdval).pgd }); \ 565} while (0) --- 14 unchanged lines hidden (view full) --- 580 581#endif /* CONFIG_PGTABLE_LEVELS >= 3 */ 582 583#ifdef CONFIG_X86_PAE 584/* Special-case pte-setting operations for PAE, which can't update a 585 64-bit pte atomically */ 586static inline void set_pte_atomic(pte_t *ptep, pte_t pte) 587{ |
609 PVOP_VCALL3(pv_mmu_ops.set_pte_atomic, ptep, 610 pte.pte, pte.pte >> 32); | 588 PVOP_VCALL3(mmu.set_pte_atomic, ptep, pte.pte, pte.pte >> 32); |
611} 612 613static inline void pte_clear(struct mm_struct *mm, unsigned long addr, 614 pte_t *ptep) 615{ | 589} 590 591static inline void pte_clear(struct mm_struct *mm, unsigned long addr, 592 pte_t *ptep) 593{ |
616 PVOP_VCALL3(pv_mmu_ops.pte_clear, mm, addr, ptep); | 594 PVOP_VCALL3(mmu.pte_clear, mm, addr, ptep); |
617} 618 619static inline void pmd_clear(pmd_t *pmdp) 620{ | 595} 596 597static inline void pmd_clear(pmd_t *pmdp) 598{ |
621 PVOP_VCALL1(pv_mmu_ops.pmd_clear, pmdp); | 599 PVOP_VCALL1(mmu.pmd_clear, pmdp); |
622} 623#else /* !CONFIG_X86_PAE */ 624static inline void set_pte_atomic(pte_t *ptep, pte_t pte) 625{ 626 set_pte(ptep, pte); 627} 628 629static inline void pte_clear(struct mm_struct *mm, unsigned long addr, --- 6 unchanged lines hidden (view full) --- 636{ 637 set_pmd(pmdp, __pmd(0)); 638} 639#endif /* CONFIG_X86_PAE */ 640 641#define __HAVE_ARCH_START_CONTEXT_SWITCH 642static inline void arch_start_context_switch(struct task_struct *prev) 643{ | 600} 601#else /* !CONFIG_X86_PAE */ 602static inline void set_pte_atomic(pte_t *ptep, pte_t pte) 603{ 604 set_pte(ptep, pte); 605} 606 607static inline void pte_clear(struct mm_struct *mm, unsigned long addr, --- 6 unchanged lines hidden (view full) --- 614{ 615 set_pmd(pmdp, __pmd(0)); 616} 617#endif /* CONFIG_X86_PAE */ 618 619#define __HAVE_ARCH_START_CONTEXT_SWITCH 620static inline void arch_start_context_switch(struct task_struct *prev) 621{ |
644 PVOP_VCALL1(pv_cpu_ops.start_context_switch, prev); | 622 PVOP_VCALL1(cpu.start_context_switch, prev); |
645} 646 647static inline void arch_end_context_switch(struct task_struct *next) 648{ | 623} 624 625static inline void arch_end_context_switch(struct task_struct *next) 626{ |
649 PVOP_VCALL1(pv_cpu_ops.end_context_switch, next); | 627 PVOP_VCALL1(cpu.end_context_switch, next); |
650} 651 652#define __HAVE_ARCH_ENTER_LAZY_MMU_MODE 653static inline void arch_enter_lazy_mmu_mode(void) 654{ | 628} 629 630#define __HAVE_ARCH_ENTER_LAZY_MMU_MODE 631static inline void arch_enter_lazy_mmu_mode(void) 632{ |
655 PVOP_VCALL0(pv_mmu_ops.lazy_mode.enter); | 633 PVOP_VCALL0(mmu.lazy_mode.enter); |
656} 657 658static inline void arch_leave_lazy_mmu_mode(void) 659{ | 634} 635 636static inline void arch_leave_lazy_mmu_mode(void) 637{ |
660 PVOP_VCALL0(pv_mmu_ops.lazy_mode.leave); | 638 PVOP_VCALL0(mmu.lazy_mode.leave); |
661} 662 663static inline void arch_flush_lazy_mmu_mode(void) 664{ | 639} 640 641static inline void arch_flush_lazy_mmu_mode(void) 642{ |
665 PVOP_VCALL0(pv_mmu_ops.lazy_mode.flush); | 643 PVOP_VCALL0(mmu.lazy_mode.flush); |
666} 667 668static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx, 669 phys_addr_t phys, pgprot_t flags) 670{ | 644} 645 646static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx, 647 phys_addr_t phys, pgprot_t flags) 648{ |
671 pv_mmu_ops.set_fixmap(idx, phys, flags); | 649 pv_ops.mmu.set_fixmap(idx, phys, flags); |
672} 673 674#if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS) 675 676static __always_inline void pv_queued_spin_lock_slowpath(struct qspinlock *lock, 677 u32 val) 678{ | 650} 651 652#if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS) 653 654static __always_inline void pv_queued_spin_lock_slowpath(struct qspinlock *lock, 655 u32 val) 656{ |
679 PVOP_VCALL2(pv_lock_ops.queued_spin_lock_slowpath, lock, val); | 657 PVOP_VCALL2(lock.queued_spin_lock_slowpath, lock, val); |
680} 681 682static __always_inline void pv_queued_spin_unlock(struct qspinlock *lock) 683{ | 658} 659 660static __always_inline void pv_queued_spin_unlock(struct qspinlock *lock) 661{ |
684 PVOP_VCALLEE1(pv_lock_ops.queued_spin_unlock, lock); | 662 PVOP_VCALLEE1(lock.queued_spin_unlock, lock); |
685} 686 687static __always_inline void pv_wait(u8 *ptr, u8 val) 688{ | 663} 664 665static __always_inline void pv_wait(u8 *ptr, u8 val) 666{ |
689 PVOP_VCALL2(pv_lock_ops.wait, ptr, val); | 667 PVOP_VCALL2(lock.wait, ptr, val); |
690} 691 692static __always_inline void pv_kick(int cpu) 693{ | 668} 669 670static __always_inline void pv_kick(int cpu) 671{ |
694 PVOP_VCALL1(pv_lock_ops.kick, cpu); | 672 PVOP_VCALL1(lock.kick, cpu); |
695} 696 697static __always_inline bool pv_vcpu_is_preempted(long cpu) 698{ | 673} 674 675static __always_inline bool pv_vcpu_is_preempted(long cpu) 676{ |
699 return PVOP_CALLEE1(bool, pv_lock_ops.vcpu_is_preempted, cpu); | 677 return PVOP_CALLEE1(bool, lock.vcpu_is_preempted, cpu); |
700} 701 | 678} 679 |
680void __raw_callee_save___native_queued_spin_unlock(struct qspinlock *lock); 681bool __raw_callee_save___native_vcpu_is_preempted(long cpu); 682 |
|
702#endif /* SMP && PARAVIRT_SPINLOCKS */ 703 704#ifdef CONFIG_X86_32 705#define PV_SAVE_REGS "pushl %ecx; pushl %edx;" 706#define PV_RESTORE_REGS "popl %edx; popl %ecx;" 707 708/* save and restore all caller-save registers, except return value */ 709#define PV_SAVE_ALL_CALLER_REGS "pushl %ecx;" --- 65 unchanged lines hidden (view full) --- 775 ((struct paravirt_callee_save) { __raw_callee_save_##func }) 776 777/* Promise that "func" already uses the right calling convention */ 778#define __PV_IS_CALLEE_SAVE(func) \ 779 ((struct paravirt_callee_save) { func }) 780 781static inline notrace unsigned long arch_local_save_flags(void) 782{ | 683#endif /* SMP && PARAVIRT_SPINLOCKS */ 684 685#ifdef CONFIG_X86_32 686#define PV_SAVE_REGS "pushl %ecx; pushl %edx;" 687#define PV_RESTORE_REGS "popl %edx; popl %ecx;" 688 689/* save and restore all caller-save registers, except return value */ 690#define PV_SAVE_ALL_CALLER_REGS "pushl %ecx;" --- 65 unchanged lines hidden (view full) --- 756 ((struct paravirt_callee_save) { __raw_callee_save_##func }) 757 758/* Promise that "func" already uses the right calling convention */ 759#define __PV_IS_CALLEE_SAVE(func) \ 760 ((struct paravirt_callee_save) { func }) 761 762static inline notrace unsigned long arch_local_save_flags(void) 763{ |
783 return PVOP_CALLEE0(unsigned long, pv_irq_ops.save_fl); | 764 return PVOP_CALLEE0(unsigned long, irq.save_fl); |
784} 785 786static inline notrace void arch_local_irq_restore(unsigned long f) 787{ | 765} 766 767static inline notrace void arch_local_irq_restore(unsigned long f) 768{ |
788 PVOP_VCALLEE1(pv_irq_ops.restore_fl, f); | 769 PVOP_VCALLEE1(irq.restore_fl, f); |
789} 790 791static inline notrace void arch_local_irq_disable(void) 792{ | 770} 771 772static inline notrace void arch_local_irq_disable(void) 773{ |
793 PVOP_VCALLEE0(pv_irq_ops.irq_disable); | 774 PVOP_VCALLEE0(irq.irq_disable); |
794} 795 796static inline notrace void arch_local_irq_enable(void) 797{ | 775} 776 777static inline notrace void arch_local_irq_enable(void) 778{ |
798 PVOP_VCALLEE0(pv_irq_ops.irq_enable); | 779 PVOP_VCALLEE0(irq.irq_enable); |
799} 800 801static inline notrace unsigned long arch_local_irq_save(void) 802{ 803 unsigned long f; 804 805 f = arch_local_save_flags(); 806 arch_local_irq_disable(); --- 55 unchanged lines hidden (view full) --- 862 COND_POP(set, CLBR_R9, r9); \ 863 COND_POP(set, CLBR_R8, r8); \ 864 COND_POP(set, CLBR_RDI, rdi); \ 865 COND_POP(set, CLBR_RSI, rsi); \ 866 COND_POP(set, CLBR_RDX, rdx); \ 867 COND_POP(set, CLBR_RCX, rcx); \ 868 COND_POP(set, CLBR_RAX, rax) 869 | 780} 781 782static inline notrace unsigned long arch_local_irq_save(void) 783{ 784 unsigned long f; 785 786 f = arch_local_save_flags(); 787 arch_local_irq_disable(); --- 55 unchanged lines hidden (view full) --- 843 COND_POP(set, CLBR_R9, r9); \ 844 COND_POP(set, CLBR_R8, r8); \ 845 COND_POP(set, CLBR_RDI, rdi); \ 846 COND_POP(set, CLBR_RSI, rsi); \ 847 COND_POP(set, CLBR_RDX, rdx); \ 848 COND_POP(set, CLBR_RCX, rcx); \ 849 COND_POP(set, CLBR_RAX, rax) 850 |
870#define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 8) | 851#define PARA_PATCH(off) ((off) / 8) |
871#define PARA_SITE(ptype, ops) _PVSITE(ptype, ops, .quad, 8) 872#define PARA_INDIRECT(addr) *addr(%rip) 873#else 874#define PV_SAVE_REGS(set) \ 875 COND_PUSH(set, CLBR_EAX, eax); \ 876 COND_PUSH(set, CLBR_EDI, edi); \ 877 COND_PUSH(set, CLBR_ECX, ecx); \ 878 COND_PUSH(set, CLBR_EDX, edx) 879#define PV_RESTORE_REGS(set) \ 880 COND_POP(set, CLBR_EDX, edx); \ 881 COND_POP(set, CLBR_ECX, ecx); \ 882 COND_POP(set, CLBR_EDI, edi); \ 883 COND_POP(set, CLBR_EAX, eax) 884 | 852#define PARA_SITE(ptype, ops) _PVSITE(ptype, ops, .quad, 8) 853#define PARA_INDIRECT(addr) *addr(%rip) 854#else 855#define PV_SAVE_REGS(set) \ 856 COND_PUSH(set, CLBR_EAX, eax); \ 857 COND_PUSH(set, CLBR_EDI, edi); \ 858 COND_PUSH(set, CLBR_ECX, ecx); \ 859 COND_PUSH(set, CLBR_EDX, edx) 860#define PV_RESTORE_REGS(set) \ 861 COND_POP(set, CLBR_EDX, edx); \ 862 COND_POP(set, CLBR_ECX, ecx); \ 863 COND_POP(set, CLBR_EDI, edi); \ 864 COND_POP(set, CLBR_EAX, eax) 865 |
885#define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 4) | 866#define PARA_PATCH(off) ((off) / 4) |
886#define PARA_SITE(ptype, ops) _PVSITE(ptype, ops, .long, 4) 887#define PARA_INDIRECT(addr) *%cs:addr 888#endif 889 890#define INTERRUPT_RETURN \ | 867#define PARA_SITE(ptype, ops) _PVSITE(ptype, ops, .long, 4) 868#define PARA_INDIRECT(addr) *%cs:addr 869#endif 870 871#define INTERRUPT_RETURN \ |
891 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_iret), \ | 872 PARA_SITE(PARA_PATCH(PV_CPU_iret), \ |
892 ANNOTATE_RETPOLINE_SAFE; \ | 873 ANNOTATE_RETPOLINE_SAFE; \ |
893 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_iret);) | 874 jmp PARA_INDIRECT(pv_ops+PV_CPU_iret);) |
894 895#define DISABLE_INTERRUPTS(clobbers) \ | 875 876#define DISABLE_INTERRUPTS(clobbers) \ |
896 PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_irq_disable), \ | 877 PARA_SITE(PARA_PATCH(PV_IRQ_irq_disable), \ |
897 PV_SAVE_REGS(clobbers | CLBR_CALLEE_SAVE); \ 898 ANNOTATE_RETPOLINE_SAFE; \ | 878 PV_SAVE_REGS(clobbers | CLBR_CALLEE_SAVE); \ 879 ANNOTATE_RETPOLINE_SAFE; \ |
899 call PARA_INDIRECT(pv_irq_ops+PV_IRQ_irq_disable); \ | 880 call PARA_INDIRECT(pv_ops+PV_IRQ_irq_disable); \ |
900 PV_RESTORE_REGS(clobbers | CLBR_CALLEE_SAVE);) 901 902#define ENABLE_INTERRUPTS(clobbers) \ | 881 PV_RESTORE_REGS(clobbers | CLBR_CALLEE_SAVE);) 882 883#define ENABLE_INTERRUPTS(clobbers) \ |
903 PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_irq_enable), \ | 884 PARA_SITE(PARA_PATCH(PV_IRQ_irq_enable), \ |
904 PV_SAVE_REGS(clobbers | CLBR_CALLEE_SAVE); \ 905 ANNOTATE_RETPOLINE_SAFE; \ | 885 PV_SAVE_REGS(clobbers | CLBR_CALLEE_SAVE); \ 886 ANNOTATE_RETPOLINE_SAFE; \ |
906 call PARA_INDIRECT(pv_irq_ops+PV_IRQ_irq_enable); \ | 887 call PARA_INDIRECT(pv_ops+PV_IRQ_irq_enable); \ |
907 PV_RESTORE_REGS(clobbers | CLBR_CALLEE_SAVE);) 908 909#ifdef CONFIG_X86_32 910#define GET_CR0_INTO_EAX \ 911 push %ecx; push %edx; \ 912 ANNOTATE_RETPOLINE_SAFE; \ | 888 PV_RESTORE_REGS(clobbers | CLBR_CALLEE_SAVE);) 889 890#ifdef CONFIG_X86_32 891#define GET_CR0_INTO_EAX \ 892 push %ecx; push %edx; \ 893 ANNOTATE_RETPOLINE_SAFE; \ |
913 call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); \ | 894 call PARA_INDIRECT(pv_ops+PV_CPU_read_cr0); \ |
914 pop %edx; pop %ecx 915#else /* !CONFIG_X86_32 */ 916 917/* 918 * If swapgs is used while the userspace stack is still current, 919 * there's no way to call a pvop. The PV replacement *must* be 920 * inlined, or the swapgs instruction must be trapped and emulated. 921 */ 922#define SWAPGS_UNSAFE_STACK \ | 895 pop %edx; pop %ecx 896#else /* !CONFIG_X86_32 */ 897 898/* 899 * If swapgs is used while the userspace stack is still current, 900 * there's no way to call a pvop. The PV replacement *must* be 901 * inlined, or the swapgs instruction must be trapped and emulated. 902 */ 903#define SWAPGS_UNSAFE_STACK \ |
923 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_swapgs), swapgs) | 904 PARA_SITE(PARA_PATCH(PV_CPU_swapgs), swapgs) |
924 925/* 926 * Note: swapgs is very special, and in practise is either going to be 927 * implemented with a single "swapgs" instruction or something very 928 * special. Either way, we don't need to save any registers for 929 * it. 930 */ 931#define SWAPGS \ | 905 906/* 907 * Note: swapgs is very special, and in practise is either going to be 908 * implemented with a single "swapgs" instruction or something very 909 * special. Either way, we don't need to save any registers for 910 * it. 911 */ 912#define SWAPGS \ |
932 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_swapgs), \ | 913 PARA_SITE(PARA_PATCH(PV_CPU_swapgs), \ |
933 ANNOTATE_RETPOLINE_SAFE; \ | 914 ANNOTATE_RETPOLINE_SAFE; \ |
934 call PARA_INDIRECT(pv_cpu_ops+PV_CPU_swapgs); \ | 915 call PARA_INDIRECT(pv_ops+PV_CPU_swapgs); \ |
935 ) 936 937#define GET_CR2_INTO_RAX \ 938 ANNOTATE_RETPOLINE_SAFE; \ | 916 ) 917 918#define GET_CR2_INTO_RAX \ 919 ANNOTATE_RETPOLINE_SAFE; \ |
939 call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr2); | 920 call PARA_INDIRECT(pv_ops+PV_MMU_read_cr2); |
940 941#define USERGS_SYSRET64 \ | 921 922#define USERGS_SYSRET64 \ |
942 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_usergs_sysret64), \ | 923 PARA_SITE(PARA_PATCH(PV_CPU_usergs_sysret64), \ |
943 ANNOTATE_RETPOLINE_SAFE; \ | 924 ANNOTATE_RETPOLINE_SAFE; \ |
944 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_usergs_sysret64);) | 925 jmp PARA_INDIRECT(pv_ops+PV_CPU_usergs_sysret64);) |
945 946#ifdef CONFIG_DEBUG_ENTRY 947#define SAVE_FLAGS(clobbers) \ | 926 927#ifdef CONFIG_DEBUG_ENTRY 928#define SAVE_FLAGS(clobbers) \ |
948 PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_save_fl), \ | 929 PARA_SITE(PARA_PATCH(PV_IRQ_save_fl), \ |
949 PV_SAVE_REGS(clobbers | CLBR_CALLEE_SAVE); \ 950 ANNOTATE_RETPOLINE_SAFE; \ | 930 PV_SAVE_REGS(clobbers | CLBR_CALLEE_SAVE); \ 931 ANNOTATE_RETPOLINE_SAFE; \ |
951 call PARA_INDIRECT(pv_irq_ops+PV_IRQ_save_fl); \ | 932 call PARA_INDIRECT(pv_ops+PV_IRQ_save_fl); \ |
952 PV_RESTORE_REGS(clobbers | CLBR_CALLEE_SAVE);) 953#endif 954 955#endif /* CONFIG_X86_32 */ 956 957#endif /* __ASSEMBLY__ */ 958#else /* CONFIG_PARAVIRT */ 959# define default_banner x86_init_noop --- 12 unchanged lines hidden --- | 933 PV_RESTORE_REGS(clobbers | CLBR_CALLEE_SAVE);) 934#endif 935 936#endif /* CONFIG_X86_32 */ 937 938#endif /* __ASSEMBLY__ */ 939#else /* CONFIG_PARAVIRT */ 940# define default_banner x86_init_noop --- 12 unchanged lines hidden --- |