xref: /openbmc/linux/arch/x86/include/asm/paravirt.h (revision 5d0e4d78)
1 #ifndef _ASM_X86_PARAVIRT_H
2 #define _ASM_X86_PARAVIRT_H
3 /* Various instructions on x86 need to be replaced for
4  * para-virtualization: those hooks are defined here. */
5 
6 #ifdef CONFIG_PARAVIRT
7 #include <asm/pgtable_types.h>
8 #include <asm/asm.h>
9 
10 #include <asm/paravirt_types.h>
11 
12 #ifndef __ASSEMBLY__
13 #include <linux/bug.h>
14 #include <linux/types.h>
15 #include <linux/cpumask.h>
16 #include <asm/frame.h>
17 
18 static inline void load_sp0(struct tss_struct *tss,
19 			     struct thread_struct *thread)
20 {
21 	PVOP_VCALL2(pv_cpu_ops.load_sp0, tss, thread);
22 }
23 
24 /* The paravirtualized CPUID instruction. */
25 static inline void __cpuid(unsigned int *eax, unsigned int *ebx,
26 			   unsigned int *ecx, unsigned int *edx)
27 {
28 	PVOP_VCALL4(pv_cpu_ops.cpuid, eax, ebx, ecx, edx);
29 }
30 
31 /*
32  * These special macros can be used to get or set a debugging register
33  */
34 static inline unsigned long paravirt_get_debugreg(int reg)
35 {
36 	return PVOP_CALL1(unsigned long, pv_cpu_ops.get_debugreg, reg);
37 }
38 #define get_debugreg(var, reg) var = paravirt_get_debugreg(reg)
39 static inline void set_debugreg(unsigned long val, int reg)
40 {
41 	PVOP_VCALL2(pv_cpu_ops.set_debugreg, reg, val);
42 }
43 
44 static inline unsigned long read_cr0(void)
45 {
46 	return PVOP_CALL0(unsigned long, pv_cpu_ops.read_cr0);
47 }
48 
49 static inline void write_cr0(unsigned long x)
50 {
51 	PVOP_VCALL1(pv_cpu_ops.write_cr0, x);
52 }
53 
54 static inline unsigned long read_cr2(void)
55 {
56 	return PVOP_CALL0(unsigned long, pv_mmu_ops.read_cr2);
57 }
58 
59 static inline void write_cr2(unsigned long x)
60 {
61 	PVOP_VCALL1(pv_mmu_ops.write_cr2, x);
62 }
63 
64 static inline unsigned long __read_cr3(void)
65 {
66 	return PVOP_CALL0(unsigned long, pv_mmu_ops.read_cr3);
67 }
68 
69 static inline void write_cr3(unsigned long x)
70 {
71 	PVOP_VCALL1(pv_mmu_ops.write_cr3, x);
72 }
73 
74 static inline unsigned long __read_cr4(void)
75 {
76 	return PVOP_CALL0(unsigned long, pv_cpu_ops.read_cr4);
77 }
78 
79 static inline void __write_cr4(unsigned long x)
80 {
81 	PVOP_VCALL1(pv_cpu_ops.write_cr4, x);
82 }
83 
84 #ifdef CONFIG_X86_64
85 static inline unsigned long read_cr8(void)
86 {
87 	return PVOP_CALL0(unsigned long, pv_cpu_ops.read_cr8);
88 }
89 
90 static inline void write_cr8(unsigned long x)
91 {
92 	PVOP_VCALL1(pv_cpu_ops.write_cr8, x);
93 }
94 #endif
95 
96 static inline void arch_safe_halt(void)
97 {
98 	PVOP_VCALL0(pv_irq_ops.safe_halt);
99 }
100 
101 static inline void halt(void)
102 {
103 	PVOP_VCALL0(pv_irq_ops.halt);
104 }
105 
106 static inline void wbinvd(void)
107 {
108 	PVOP_VCALL0(pv_cpu_ops.wbinvd);
109 }
110 
111 #define get_kernel_rpl()  (pv_info.kernel_rpl)
112 
113 static inline u64 paravirt_read_msr(unsigned msr)
114 {
115 	return PVOP_CALL1(u64, pv_cpu_ops.read_msr, msr);
116 }
117 
118 static inline void paravirt_write_msr(unsigned msr,
119 				      unsigned low, unsigned high)
120 {
121 	PVOP_VCALL3(pv_cpu_ops.write_msr, msr, low, high);
122 }
123 
124 static inline u64 paravirt_read_msr_safe(unsigned msr, int *err)
125 {
126 	return PVOP_CALL2(u64, pv_cpu_ops.read_msr_safe, msr, err);
127 }
128 
129 static inline int paravirt_write_msr_safe(unsigned msr,
130 					  unsigned low, unsigned high)
131 {
132 	return PVOP_CALL3(int, pv_cpu_ops.write_msr_safe, msr, low, high);
133 }
134 
135 #define rdmsr(msr, val1, val2)			\
136 do {						\
137 	u64 _l = paravirt_read_msr(msr);	\
138 	val1 = (u32)_l;				\
139 	val2 = _l >> 32;			\
140 } while (0)
141 
142 #define wrmsr(msr, val1, val2)			\
143 do {						\
144 	paravirt_write_msr(msr, val1, val2);	\
145 } while (0)
146 
147 #define rdmsrl(msr, val)			\
148 do {						\
149 	val = paravirt_read_msr(msr);		\
150 } while (0)
151 
152 static inline void wrmsrl(unsigned msr, u64 val)
153 {
154 	wrmsr(msr, (u32)val, (u32)(val>>32));
155 }
156 
157 #define wrmsr_safe(msr, a, b)	paravirt_write_msr_safe(msr, a, b)
158 
159 /* rdmsr with exception handling */
160 #define rdmsr_safe(msr, a, b)				\
161 ({							\
162 	int _err;					\
163 	u64 _l = paravirt_read_msr_safe(msr, &_err);	\
164 	(*a) = (u32)_l;					\
165 	(*b) = _l >> 32;				\
166 	_err;						\
167 })
168 
169 static inline int rdmsrl_safe(unsigned msr, unsigned long long *p)
170 {
171 	int err;
172 
173 	*p = paravirt_read_msr_safe(msr, &err);
174 	return err;
175 }
176 
177 static inline unsigned long long paravirt_sched_clock(void)
178 {
179 	return PVOP_CALL0(unsigned long long, pv_time_ops.sched_clock);
180 }
181 
182 struct static_key;
183 extern struct static_key paravirt_steal_enabled;
184 extern struct static_key paravirt_steal_rq_enabled;
185 
186 static inline u64 paravirt_steal_clock(int cpu)
187 {
188 	return PVOP_CALL1(u64, pv_time_ops.steal_clock, cpu);
189 }
190 
191 static inline unsigned long long paravirt_read_pmc(int counter)
192 {
193 	return PVOP_CALL1(u64, pv_cpu_ops.read_pmc, counter);
194 }
195 
196 #define rdpmc(counter, low, high)		\
197 do {						\
198 	u64 _l = paravirt_read_pmc(counter);	\
199 	low = (u32)_l;				\
200 	high = _l >> 32;			\
201 } while (0)
202 
203 #define rdpmcl(counter, val) ((val) = paravirt_read_pmc(counter))
204 
205 static inline void paravirt_alloc_ldt(struct desc_struct *ldt, unsigned entries)
206 {
207 	PVOP_VCALL2(pv_cpu_ops.alloc_ldt, ldt, entries);
208 }
209 
210 static inline void paravirt_free_ldt(struct desc_struct *ldt, unsigned entries)
211 {
212 	PVOP_VCALL2(pv_cpu_ops.free_ldt, ldt, entries);
213 }
214 
215 static inline void load_TR_desc(void)
216 {
217 	PVOP_VCALL0(pv_cpu_ops.load_tr_desc);
218 }
219 static inline void load_gdt(const struct desc_ptr *dtr)
220 {
221 	PVOP_VCALL1(pv_cpu_ops.load_gdt, dtr);
222 }
223 static inline void load_idt(const struct desc_ptr *dtr)
224 {
225 	PVOP_VCALL1(pv_cpu_ops.load_idt, dtr);
226 }
227 static inline void set_ldt(const void *addr, unsigned entries)
228 {
229 	PVOP_VCALL2(pv_cpu_ops.set_ldt, addr, entries);
230 }
231 static inline void store_idt(struct desc_ptr *dtr)
232 {
233 	PVOP_VCALL1(pv_cpu_ops.store_idt, dtr);
234 }
235 static inline unsigned long paravirt_store_tr(void)
236 {
237 	return PVOP_CALL0(unsigned long, pv_cpu_ops.store_tr);
238 }
239 #define store_tr(tr)	((tr) = paravirt_store_tr())
240 static inline void load_TLS(struct thread_struct *t, unsigned cpu)
241 {
242 	PVOP_VCALL2(pv_cpu_ops.load_tls, t, cpu);
243 }
244 
245 #ifdef CONFIG_X86_64
246 static inline void load_gs_index(unsigned int gs)
247 {
248 	PVOP_VCALL1(pv_cpu_ops.load_gs_index, gs);
249 }
250 #endif
251 
252 static inline void write_ldt_entry(struct desc_struct *dt, int entry,
253 				   const void *desc)
254 {
255 	PVOP_VCALL3(pv_cpu_ops.write_ldt_entry, dt, entry, desc);
256 }
257 
258 static inline void write_gdt_entry(struct desc_struct *dt, int entry,
259 				   void *desc, int type)
260 {
261 	PVOP_VCALL4(pv_cpu_ops.write_gdt_entry, dt, entry, desc, type);
262 }
263 
264 static inline void write_idt_entry(gate_desc *dt, int entry, const gate_desc *g)
265 {
266 	PVOP_VCALL3(pv_cpu_ops.write_idt_entry, dt, entry, g);
267 }
268 static inline void set_iopl_mask(unsigned mask)
269 {
270 	PVOP_VCALL1(pv_cpu_ops.set_iopl_mask, mask);
271 }
272 
273 /* The paravirtualized I/O functions */
274 static inline void slow_down_io(void)
275 {
276 	pv_cpu_ops.io_delay();
277 #ifdef REALLY_SLOW_IO
278 	pv_cpu_ops.io_delay();
279 	pv_cpu_ops.io_delay();
280 	pv_cpu_ops.io_delay();
281 #endif
282 }
283 
284 static inline void paravirt_activate_mm(struct mm_struct *prev,
285 					struct mm_struct *next)
286 {
287 	PVOP_VCALL2(pv_mmu_ops.activate_mm, prev, next);
288 }
289 
290 static inline void paravirt_arch_dup_mmap(struct mm_struct *oldmm,
291 					  struct mm_struct *mm)
292 {
293 	PVOP_VCALL2(pv_mmu_ops.dup_mmap, oldmm, mm);
294 }
295 
296 static inline void paravirt_arch_exit_mmap(struct mm_struct *mm)
297 {
298 	PVOP_VCALL1(pv_mmu_ops.exit_mmap, mm);
299 }
300 
301 static inline void __flush_tlb(void)
302 {
303 	PVOP_VCALL0(pv_mmu_ops.flush_tlb_user);
304 }
305 static inline void __flush_tlb_global(void)
306 {
307 	PVOP_VCALL0(pv_mmu_ops.flush_tlb_kernel);
308 }
309 static inline void __flush_tlb_single(unsigned long addr)
310 {
311 	PVOP_VCALL1(pv_mmu_ops.flush_tlb_single, addr);
312 }
313 
314 static inline void flush_tlb_others(const struct cpumask *cpumask,
315 				    const struct flush_tlb_info *info)
316 {
317 	PVOP_VCALL2(pv_mmu_ops.flush_tlb_others, cpumask, info);
318 }
319 
320 static inline int paravirt_pgd_alloc(struct mm_struct *mm)
321 {
322 	return PVOP_CALL1(int, pv_mmu_ops.pgd_alloc, mm);
323 }
324 
325 static inline void paravirt_pgd_free(struct mm_struct *mm, pgd_t *pgd)
326 {
327 	PVOP_VCALL2(pv_mmu_ops.pgd_free, mm, pgd);
328 }
329 
330 static inline void paravirt_alloc_pte(struct mm_struct *mm, unsigned long pfn)
331 {
332 	PVOP_VCALL2(pv_mmu_ops.alloc_pte, mm, pfn);
333 }
334 static inline void paravirt_release_pte(unsigned long pfn)
335 {
336 	PVOP_VCALL1(pv_mmu_ops.release_pte, pfn);
337 }
338 
339 static inline void paravirt_alloc_pmd(struct mm_struct *mm, unsigned long pfn)
340 {
341 	PVOP_VCALL2(pv_mmu_ops.alloc_pmd, mm, pfn);
342 }
343 
344 static inline void paravirt_release_pmd(unsigned long pfn)
345 {
346 	PVOP_VCALL1(pv_mmu_ops.release_pmd, pfn);
347 }
348 
349 static inline void paravirt_alloc_pud(struct mm_struct *mm, unsigned long pfn)
350 {
351 	PVOP_VCALL2(pv_mmu_ops.alloc_pud, mm, pfn);
352 }
353 static inline void paravirt_release_pud(unsigned long pfn)
354 {
355 	PVOP_VCALL1(pv_mmu_ops.release_pud, pfn);
356 }
357 
358 static inline void paravirt_alloc_p4d(struct mm_struct *mm, unsigned long pfn)
359 {
360 	PVOP_VCALL2(pv_mmu_ops.alloc_p4d, mm, pfn);
361 }
362 
363 static inline void paravirt_release_p4d(unsigned long pfn)
364 {
365 	PVOP_VCALL1(pv_mmu_ops.release_p4d, pfn);
366 }
367 
368 static inline void pte_update(struct mm_struct *mm, unsigned long addr,
369 			      pte_t *ptep)
370 {
371 	PVOP_VCALL3(pv_mmu_ops.pte_update, mm, addr, ptep);
372 }
373 
374 static inline pte_t __pte(pteval_t val)
375 {
376 	pteval_t ret;
377 
378 	if (sizeof(pteval_t) > sizeof(long))
379 		ret = PVOP_CALLEE2(pteval_t,
380 				   pv_mmu_ops.make_pte,
381 				   val, (u64)val >> 32);
382 	else
383 		ret = PVOP_CALLEE1(pteval_t,
384 				   pv_mmu_ops.make_pte,
385 				   val);
386 
387 	return (pte_t) { .pte = ret };
388 }
389 
390 static inline pteval_t pte_val(pte_t pte)
391 {
392 	pteval_t ret;
393 
394 	if (sizeof(pteval_t) > sizeof(long))
395 		ret = PVOP_CALLEE2(pteval_t, pv_mmu_ops.pte_val,
396 				   pte.pte, (u64)pte.pte >> 32);
397 	else
398 		ret = PVOP_CALLEE1(pteval_t, pv_mmu_ops.pte_val,
399 				   pte.pte);
400 
401 	return ret;
402 }
403 
404 static inline pgd_t __pgd(pgdval_t val)
405 {
406 	pgdval_t ret;
407 
408 	if (sizeof(pgdval_t) > sizeof(long))
409 		ret = PVOP_CALLEE2(pgdval_t, pv_mmu_ops.make_pgd,
410 				   val, (u64)val >> 32);
411 	else
412 		ret = PVOP_CALLEE1(pgdval_t, pv_mmu_ops.make_pgd,
413 				   val);
414 
415 	return (pgd_t) { ret };
416 }
417 
418 static inline pgdval_t pgd_val(pgd_t pgd)
419 {
420 	pgdval_t ret;
421 
422 	if (sizeof(pgdval_t) > sizeof(long))
423 		ret =  PVOP_CALLEE2(pgdval_t, pv_mmu_ops.pgd_val,
424 				    pgd.pgd, (u64)pgd.pgd >> 32);
425 	else
426 		ret =  PVOP_CALLEE1(pgdval_t, pv_mmu_ops.pgd_val,
427 				    pgd.pgd);
428 
429 	return ret;
430 }
431 
432 #define  __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION
433 static inline pte_t ptep_modify_prot_start(struct mm_struct *mm, unsigned long addr,
434 					   pte_t *ptep)
435 {
436 	pteval_t ret;
437 
438 	ret = PVOP_CALL3(pteval_t, pv_mmu_ops.ptep_modify_prot_start,
439 			 mm, addr, ptep);
440 
441 	return (pte_t) { .pte = ret };
442 }
443 
444 static inline void ptep_modify_prot_commit(struct mm_struct *mm, unsigned long addr,
445 					   pte_t *ptep, pte_t pte)
446 {
447 	if (sizeof(pteval_t) > sizeof(long))
448 		/* 5 arg words */
449 		pv_mmu_ops.ptep_modify_prot_commit(mm, addr, ptep, pte);
450 	else
451 		PVOP_VCALL4(pv_mmu_ops.ptep_modify_prot_commit,
452 			    mm, addr, ptep, pte.pte);
453 }
454 
455 static inline void set_pte(pte_t *ptep, pte_t pte)
456 {
457 	if (sizeof(pteval_t) > sizeof(long))
458 		PVOP_VCALL3(pv_mmu_ops.set_pte, ptep,
459 			    pte.pte, (u64)pte.pte >> 32);
460 	else
461 		PVOP_VCALL2(pv_mmu_ops.set_pte, ptep,
462 			    pte.pte);
463 }
464 
465 static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
466 			      pte_t *ptep, pte_t pte)
467 {
468 	if (sizeof(pteval_t) > sizeof(long))
469 		/* 5 arg words */
470 		pv_mmu_ops.set_pte_at(mm, addr, ptep, pte);
471 	else
472 		PVOP_VCALL4(pv_mmu_ops.set_pte_at, mm, addr, ptep, pte.pte);
473 }
474 
475 static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
476 			      pmd_t *pmdp, pmd_t pmd)
477 {
478 	if (sizeof(pmdval_t) > sizeof(long))
479 		/* 5 arg words */
480 		pv_mmu_ops.set_pmd_at(mm, addr, pmdp, pmd);
481 	else
482 		PVOP_VCALL4(pv_mmu_ops.set_pmd_at, mm, addr, pmdp,
483 			    native_pmd_val(pmd));
484 }
485 
486 static inline void set_pud_at(struct mm_struct *mm, unsigned long addr,
487 			      pud_t *pudp, pud_t pud)
488 {
489 	if (sizeof(pudval_t) > sizeof(long))
490 		/* 5 arg words */
491 		pv_mmu_ops.set_pud_at(mm, addr, pudp, pud);
492 	else
493 		PVOP_VCALL4(pv_mmu_ops.set_pud_at, mm, addr, pudp,
494 			    native_pud_val(pud));
495 }
496 
497 static inline void set_pmd(pmd_t *pmdp, pmd_t pmd)
498 {
499 	pmdval_t val = native_pmd_val(pmd);
500 
501 	if (sizeof(pmdval_t) > sizeof(long))
502 		PVOP_VCALL3(pv_mmu_ops.set_pmd, pmdp, val, (u64)val >> 32);
503 	else
504 		PVOP_VCALL2(pv_mmu_ops.set_pmd, pmdp, val);
505 }
506 
507 #if CONFIG_PGTABLE_LEVELS >= 3
508 static inline pmd_t __pmd(pmdval_t val)
509 {
510 	pmdval_t ret;
511 
512 	if (sizeof(pmdval_t) > sizeof(long))
513 		ret = PVOP_CALLEE2(pmdval_t, pv_mmu_ops.make_pmd,
514 				   val, (u64)val >> 32);
515 	else
516 		ret = PVOP_CALLEE1(pmdval_t, pv_mmu_ops.make_pmd,
517 				   val);
518 
519 	return (pmd_t) { ret };
520 }
521 
522 static inline pmdval_t pmd_val(pmd_t pmd)
523 {
524 	pmdval_t ret;
525 
526 	if (sizeof(pmdval_t) > sizeof(long))
527 		ret =  PVOP_CALLEE2(pmdval_t, pv_mmu_ops.pmd_val,
528 				    pmd.pmd, (u64)pmd.pmd >> 32);
529 	else
530 		ret =  PVOP_CALLEE1(pmdval_t, pv_mmu_ops.pmd_val,
531 				    pmd.pmd);
532 
533 	return ret;
534 }
535 
536 static inline void set_pud(pud_t *pudp, pud_t pud)
537 {
538 	pudval_t val = native_pud_val(pud);
539 
540 	if (sizeof(pudval_t) > sizeof(long))
541 		PVOP_VCALL3(pv_mmu_ops.set_pud, pudp,
542 			    val, (u64)val >> 32);
543 	else
544 		PVOP_VCALL2(pv_mmu_ops.set_pud, pudp,
545 			    val);
546 }
547 #if CONFIG_PGTABLE_LEVELS >= 4
548 static inline pud_t __pud(pudval_t val)
549 {
550 	pudval_t ret;
551 
552 	if (sizeof(pudval_t) > sizeof(long))
553 		ret = PVOP_CALLEE2(pudval_t, pv_mmu_ops.make_pud,
554 				   val, (u64)val >> 32);
555 	else
556 		ret = PVOP_CALLEE1(pudval_t, pv_mmu_ops.make_pud,
557 				   val);
558 
559 	return (pud_t) { ret };
560 }
561 
562 static inline pudval_t pud_val(pud_t pud)
563 {
564 	pudval_t ret;
565 
566 	if (sizeof(pudval_t) > sizeof(long))
567 		ret =  PVOP_CALLEE2(pudval_t, pv_mmu_ops.pud_val,
568 				    pud.pud, (u64)pud.pud >> 32);
569 	else
570 		ret =  PVOP_CALLEE1(pudval_t, pv_mmu_ops.pud_val,
571 				    pud.pud);
572 
573 	return ret;
574 }
575 
576 static inline void pud_clear(pud_t *pudp)
577 {
578 	set_pud(pudp, __pud(0));
579 }
580 
581 static inline void set_p4d(p4d_t *p4dp, p4d_t p4d)
582 {
583 	p4dval_t val = native_p4d_val(p4d);
584 
585 	if (sizeof(p4dval_t) > sizeof(long))
586 		PVOP_VCALL3(pv_mmu_ops.set_p4d, p4dp,
587 			    val, (u64)val >> 32);
588 	else
589 		PVOP_VCALL2(pv_mmu_ops.set_p4d, p4dp,
590 			    val);
591 }
592 
593 #if CONFIG_PGTABLE_LEVELS >= 5
594 
595 static inline p4d_t __p4d(p4dval_t val)
596 {
597 	p4dval_t ret = PVOP_CALLEE1(p4dval_t, pv_mmu_ops.make_p4d, val);
598 
599 	return (p4d_t) { ret };
600 }
601 
602 static inline p4dval_t p4d_val(p4d_t p4d)
603 {
604 	return PVOP_CALLEE1(p4dval_t, pv_mmu_ops.p4d_val, p4d.p4d);
605 }
606 
607 static inline void set_pgd(pgd_t *pgdp, pgd_t pgd)
608 {
609 	pgdval_t val = native_pgd_val(pgd);
610 
611 	PVOP_VCALL2(pv_mmu_ops.set_pgd, pgdp, val);
612 }
613 
614 static inline void pgd_clear(pgd_t *pgdp)
615 {
616 	set_pgd(pgdp, __pgd(0));
617 }
618 
619 #endif  /* CONFIG_PGTABLE_LEVELS == 5 */
620 
621 static inline void p4d_clear(p4d_t *p4dp)
622 {
623 	set_p4d(p4dp, __p4d(0));
624 }
625 
626 #endif	/* CONFIG_PGTABLE_LEVELS == 4 */
627 
628 #endif	/* CONFIG_PGTABLE_LEVELS >= 3 */
629 
630 #ifdef CONFIG_X86_PAE
631 /* Special-case pte-setting operations for PAE, which can't update a
632    64-bit pte atomically */
633 static inline void set_pte_atomic(pte_t *ptep, pte_t pte)
634 {
635 	PVOP_VCALL3(pv_mmu_ops.set_pte_atomic, ptep,
636 		    pte.pte, pte.pte >> 32);
637 }
638 
639 static inline void pte_clear(struct mm_struct *mm, unsigned long addr,
640 			     pte_t *ptep)
641 {
642 	PVOP_VCALL3(pv_mmu_ops.pte_clear, mm, addr, ptep);
643 }
644 
645 static inline void pmd_clear(pmd_t *pmdp)
646 {
647 	PVOP_VCALL1(pv_mmu_ops.pmd_clear, pmdp);
648 }
649 #else  /* !CONFIG_X86_PAE */
650 static inline void set_pte_atomic(pte_t *ptep, pte_t pte)
651 {
652 	set_pte(ptep, pte);
653 }
654 
655 static inline void pte_clear(struct mm_struct *mm, unsigned long addr,
656 			     pte_t *ptep)
657 {
658 	set_pte_at(mm, addr, ptep, __pte(0));
659 }
660 
661 static inline void pmd_clear(pmd_t *pmdp)
662 {
663 	set_pmd(pmdp, __pmd(0));
664 }
665 #endif	/* CONFIG_X86_PAE */
666 
667 #define  __HAVE_ARCH_START_CONTEXT_SWITCH
668 static inline void arch_start_context_switch(struct task_struct *prev)
669 {
670 	PVOP_VCALL1(pv_cpu_ops.start_context_switch, prev);
671 }
672 
673 static inline void arch_end_context_switch(struct task_struct *next)
674 {
675 	PVOP_VCALL1(pv_cpu_ops.end_context_switch, next);
676 }
677 
678 #define  __HAVE_ARCH_ENTER_LAZY_MMU_MODE
679 static inline void arch_enter_lazy_mmu_mode(void)
680 {
681 	PVOP_VCALL0(pv_mmu_ops.lazy_mode.enter);
682 }
683 
684 static inline void arch_leave_lazy_mmu_mode(void)
685 {
686 	PVOP_VCALL0(pv_mmu_ops.lazy_mode.leave);
687 }
688 
689 static inline void arch_flush_lazy_mmu_mode(void)
690 {
691 	PVOP_VCALL0(pv_mmu_ops.lazy_mode.flush);
692 }
693 
694 static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
695 				phys_addr_t phys, pgprot_t flags)
696 {
697 	pv_mmu_ops.set_fixmap(idx, phys, flags);
698 }
699 
700 #if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
701 
702 static __always_inline void pv_queued_spin_lock_slowpath(struct qspinlock *lock,
703 							u32 val)
704 {
705 	PVOP_VCALL2(pv_lock_ops.queued_spin_lock_slowpath, lock, val);
706 }
707 
708 static __always_inline void pv_queued_spin_unlock(struct qspinlock *lock)
709 {
710 	PVOP_VCALLEE1(pv_lock_ops.queued_spin_unlock, lock);
711 }
712 
713 static __always_inline void pv_wait(u8 *ptr, u8 val)
714 {
715 	PVOP_VCALL2(pv_lock_ops.wait, ptr, val);
716 }
717 
718 static __always_inline void pv_kick(int cpu)
719 {
720 	PVOP_VCALL1(pv_lock_ops.kick, cpu);
721 }
722 
723 static __always_inline bool pv_vcpu_is_preempted(long cpu)
724 {
725 	return PVOP_CALLEE1(bool, pv_lock_ops.vcpu_is_preempted, cpu);
726 }
727 
728 #endif /* SMP && PARAVIRT_SPINLOCKS */
729 
730 #ifdef CONFIG_X86_32
731 #define PV_SAVE_REGS "pushl %ecx; pushl %edx;"
732 #define PV_RESTORE_REGS "popl %edx; popl %ecx;"
733 
734 /* save and restore all caller-save registers, except return value */
735 #define PV_SAVE_ALL_CALLER_REGS		"pushl %ecx;"
736 #define PV_RESTORE_ALL_CALLER_REGS	"popl  %ecx;"
737 
738 #define PV_FLAGS_ARG "0"
739 #define PV_EXTRA_CLOBBERS
740 #define PV_VEXTRA_CLOBBERS
741 #else
742 /* save and restore all caller-save registers, except return value */
743 #define PV_SAVE_ALL_CALLER_REGS						\
744 	"push %rcx;"							\
745 	"push %rdx;"							\
746 	"push %rsi;"							\
747 	"push %rdi;"							\
748 	"push %r8;"							\
749 	"push %r9;"							\
750 	"push %r10;"							\
751 	"push %r11;"
752 #define PV_RESTORE_ALL_CALLER_REGS					\
753 	"pop %r11;"							\
754 	"pop %r10;"							\
755 	"pop %r9;"							\
756 	"pop %r8;"							\
757 	"pop %rdi;"							\
758 	"pop %rsi;"							\
759 	"pop %rdx;"							\
760 	"pop %rcx;"
761 
762 /* We save some registers, but all of them, that's too much. We clobber all
763  * caller saved registers but the argument parameter */
764 #define PV_SAVE_REGS "pushq %%rdi;"
765 #define PV_RESTORE_REGS "popq %%rdi;"
766 #define PV_EXTRA_CLOBBERS EXTRA_CLOBBERS, "rcx" , "rdx", "rsi"
767 #define PV_VEXTRA_CLOBBERS EXTRA_CLOBBERS, "rdi", "rcx" , "rdx", "rsi"
768 #define PV_FLAGS_ARG "D"
769 #endif
770 
771 /*
772  * Generate a thunk around a function which saves all caller-save
773  * registers except for the return value.  This allows C functions to
774  * be called from assembler code where fewer than normal registers are
775  * available.  It may also help code generation around calls from C
776  * code if the common case doesn't use many registers.
777  *
778  * When a callee is wrapped in a thunk, the caller can assume that all
779  * arg regs and all scratch registers are preserved across the
780  * call. The return value in rax/eax will not be saved, even for void
781  * functions.
782  */
783 #define PV_THUNK_NAME(func) "__raw_callee_save_" #func
784 #define PV_CALLEE_SAVE_REGS_THUNK(func)					\
785 	extern typeof(func) __raw_callee_save_##func;			\
786 									\
787 	asm(".pushsection .text;"					\
788 	    ".globl " PV_THUNK_NAME(func) ";"				\
789 	    ".type " PV_THUNK_NAME(func) ", @function;"			\
790 	    PV_THUNK_NAME(func) ":"					\
791 	    FRAME_BEGIN							\
792 	    PV_SAVE_ALL_CALLER_REGS					\
793 	    "call " #func ";"						\
794 	    PV_RESTORE_ALL_CALLER_REGS					\
795 	    FRAME_END							\
796 	    "ret;"							\
797 	    ".popsection")
798 
799 /* Get a reference to a callee-save function */
800 #define PV_CALLEE_SAVE(func)						\
801 	((struct paravirt_callee_save) { __raw_callee_save_##func })
802 
803 /* Promise that "func" already uses the right calling convention */
804 #define __PV_IS_CALLEE_SAVE(func)			\
805 	((struct paravirt_callee_save) { func })
806 
807 static inline notrace unsigned long arch_local_save_flags(void)
808 {
809 	return PVOP_CALLEE0(unsigned long, pv_irq_ops.save_fl);
810 }
811 
812 static inline notrace void arch_local_irq_restore(unsigned long f)
813 {
814 	PVOP_VCALLEE1(pv_irq_ops.restore_fl, f);
815 }
816 
817 static inline notrace void arch_local_irq_disable(void)
818 {
819 	PVOP_VCALLEE0(pv_irq_ops.irq_disable);
820 }
821 
822 static inline notrace void arch_local_irq_enable(void)
823 {
824 	PVOP_VCALLEE0(pv_irq_ops.irq_enable);
825 }
826 
827 static inline notrace unsigned long arch_local_irq_save(void)
828 {
829 	unsigned long f;
830 
831 	f = arch_local_save_flags();
832 	arch_local_irq_disable();
833 	return f;
834 }
835 
836 
837 /* Make sure as little as possible of this mess escapes. */
838 #undef PARAVIRT_CALL
839 #undef __PVOP_CALL
840 #undef __PVOP_VCALL
841 #undef PVOP_VCALL0
842 #undef PVOP_CALL0
843 #undef PVOP_VCALL1
844 #undef PVOP_CALL1
845 #undef PVOP_VCALL2
846 #undef PVOP_CALL2
847 #undef PVOP_VCALL3
848 #undef PVOP_CALL3
849 #undef PVOP_VCALL4
850 #undef PVOP_CALL4
851 
852 extern void default_banner(void);
853 
854 #else  /* __ASSEMBLY__ */
855 
856 #define _PVSITE(ptype, clobbers, ops, word, algn)	\
857 771:;						\
858 	ops;					\
859 772:;						\
860 	.pushsection .parainstructions,"a";	\
861 	 .align	algn;				\
862 	 word 771b;				\
863 	 .byte ptype;				\
864 	 .byte 772b-771b;			\
865 	 .short clobbers;			\
866 	.popsection
867 
868 
869 #define COND_PUSH(set, mask, reg)			\
870 	.if ((~(set)) & mask); push %reg; .endif
871 #define COND_POP(set, mask, reg)			\
872 	.if ((~(set)) & mask); pop %reg; .endif
873 
874 #ifdef CONFIG_X86_64
875 
876 #define PV_SAVE_REGS(set)			\
877 	COND_PUSH(set, CLBR_RAX, rax);		\
878 	COND_PUSH(set, CLBR_RCX, rcx);		\
879 	COND_PUSH(set, CLBR_RDX, rdx);		\
880 	COND_PUSH(set, CLBR_RSI, rsi);		\
881 	COND_PUSH(set, CLBR_RDI, rdi);		\
882 	COND_PUSH(set, CLBR_R8, r8);		\
883 	COND_PUSH(set, CLBR_R9, r9);		\
884 	COND_PUSH(set, CLBR_R10, r10);		\
885 	COND_PUSH(set, CLBR_R11, r11)
886 #define PV_RESTORE_REGS(set)			\
887 	COND_POP(set, CLBR_R11, r11);		\
888 	COND_POP(set, CLBR_R10, r10);		\
889 	COND_POP(set, CLBR_R9, r9);		\
890 	COND_POP(set, CLBR_R8, r8);		\
891 	COND_POP(set, CLBR_RDI, rdi);		\
892 	COND_POP(set, CLBR_RSI, rsi);		\
893 	COND_POP(set, CLBR_RDX, rdx);		\
894 	COND_POP(set, CLBR_RCX, rcx);		\
895 	COND_POP(set, CLBR_RAX, rax)
896 
897 #define PARA_PATCH(struct, off)        ((PARAVIRT_PATCH_##struct + (off)) / 8)
898 #define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .quad, 8)
899 #define PARA_INDIRECT(addr)	*addr(%rip)
900 #else
901 #define PV_SAVE_REGS(set)			\
902 	COND_PUSH(set, CLBR_EAX, eax);		\
903 	COND_PUSH(set, CLBR_EDI, edi);		\
904 	COND_PUSH(set, CLBR_ECX, ecx);		\
905 	COND_PUSH(set, CLBR_EDX, edx)
906 #define PV_RESTORE_REGS(set)			\
907 	COND_POP(set, CLBR_EDX, edx);		\
908 	COND_POP(set, CLBR_ECX, ecx);		\
909 	COND_POP(set, CLBR_EDI, edi);		\
910 	COND_POP(set, CLBR_EAX, eax)
911 
912 #define PARA_PATCH(struct, off)        ((PARAVIRT_PATCH_##struct + (off)) / 4)
913 #define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4)
914 #define PARA_INDIRECT(addr)	*%cs:addr
915 #endif
916 
917 #define INTERRUPT_RETURN						\
918 	PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_iret), CLBR_NONE,	\
919 		  jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_iret))
920 
921 #define DISABLE_INTERRUPTS(clobbers)					\
922 	PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_irq_disable), clobbers, \
923 		  PV_SAVE_REGS(clobbers | CLBR_CALLEE_SAVE);		\
924 		  call PARA_INDIRECT(pv_irq_ops+PV_IRQ_irq_disable);	\
925 		  PV_RESTORE_REGS(clobbers | CLBR_CALLEE_SAVE);)
926 
927 #define ENABLE_INTERRUPTS(clobbers)					\
928 	PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_irq_enable), clobbers,	\
929 		  PV_SAVE_REGS(clobbers | CLBR_CALLEE_SAVE);		\
930 		  call PARA_INDIRECT(pv_irq_ops+PV_IRQ_irq_enable);	\
931 		  PV_RESTORE_REGS(clobbers | CLBR_CALLEE_SAVE);)
932 
933 #ifdef CONFIG_X86_32
934 #define GET_CR0_INTO_EAX				\
935 	push %ecx; push %edx;				\
936 	call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0);	\
937 	pop %edx; pop %ecx
938 #else	/* !CONFIG_X86_32 */
939 
940 /*
941  * If swapgs is used while the userspace stack is still current,
942  * there's no way to call a pvop.  The PV replacement *must* be
943  * inlined, or the swapgs instruction must be trapped and emulated.
944  */
945 #define SWAPGS_UNSAFE_STACK						\
946 	PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_swapgs), CLBR_NONE,	\
947 		  swapgs)
948 
949 /*
950  * Note: swapgs is very special, and in practise is either going to be
951  * implemented with a single "swapgs" instruction or something very
952  * special.  Either way, we don't need to save any registers for
953  * it.
954  */
955 #define SWAPGS								\
956 	PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_swapgs), CLBR_NONE,	\
957 		  call PARA_INDIRECT(pv_cpu_ops+PV_CPU_swapgs)		\
958 		 )
959 
960 #define GET_CR2_INTO_RAX				\
961 	call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr2)
962 
963 #define PARAVIRT_ADJUST_EXCEPTION_FRAME					\
964 	PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_adjust_exception_frame), \
965 		  CLBR_NONE,						\
966 		  call PARA_INDIRECT(pv_irq_ops+PV_IRQ_adjust_exception_frame))
967 
968 #define USERGS_SYSRET64							\
969 	PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_usergs_sysret64),	\
970 		  CLBR_NONE,						\
971 		  jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_usergs_sysret64))
972 #endif	/* CONFIG_X86_32 */
973 
974 #endif /* __ASSEMBLY__ */
975 #else  /* CONFIG_PARAVIRT */
976 # define default_banner x86_init_noop
977 #ifndef __ASSEMBLY__
978 static inline void paravirt_arch_dup_mmap(struct mm_struct *oldmm,
979 					  struct mm_struct *mm)
980 {
981 }
982 
983 static inline void paravirt_arch_exit_mmap(struct mm_struct *mm)
984 {
985 }
986 #endif /* __ASSEMBLY__ */
987 #endif /* !CONFIG_PARAVIRT */
988 #endif /* _ASM_X86_PARAVIRT_H */
989