xref: /openbmc/linux/include/xen/xen-ops.h (revision 7ae9fb1b7ecbb5d85d07857943f677fd1a559b18)
1b2441318SGreg Kroah-Hartman /* SPDX-License-Identifier: GPL-2.0 */
2e04d0d07SIsaku Yamahata #ifndef INCLUDE_XEN_OPS_H
3e04d0d07SIsaku Yamahata #define INCLUDE_XEN_OPS_H
4e04d0d07SIsaku Yamahata 
5e04d0d07SIsaku Yamahata #include <linux/percpu.h>
6cd979883SStanislaw Gruszka #include <linux/notifier.h>
7be81c8a1SDaniel Kiper #include <linux/efi.h>
8251e90e7SJuergen Gross #include <linux/virtio_anchor.h>
9f030aadeSJuergen Gross #include <xen/features.h>
107892f692SIan Campbell #include <asm/xen/interface.h>
114ccefbe5SStefano Stabellini #include <xen/interface/vcpu.h>
12e04d0d07SIsaku Yamahata 
13e04d0d07SIsaku Yamahata DECLARE_PER_CPU(struct vcpu_info *, xen_vcpu);
14e04d0d07SIsaku Yamahata 
1555467deaSVitaly Kuznetsov DECLARE_PER_CPU(uint32_t, xen_vcpu_id);
xen_vcpu_nr(int cpu)1655467deaSVitaly Kuznetsov static inline uint32_t xen_vcpu_nr(int cpu)
1788e957d6SVitaly Kuznetsov {
1888e957d6SVitaly Kuznetsov 	return per_cpu(xen_vcpu_id, cpu);
1988e957d6SVitaly Kuznetsov }
2088e957d6SVitaly Kuznetsov 
210b64ffb8SAnkur Arora #define XEN_VCPU_ID_INVALID U32_MAX
220b64ffb8SAnkur Arora 
2303c8142bSIan Campbell void xen_arch_pre_suspend(void);
2403c8142bSIan Campbell void xen_arch_post_suspend(int suspend_cancelled);
250e91398fSJeremy Fitzhardinge 
26ad55db9fSIsaku Yamahata void xen_timer_resume(void);
27ad55db9fSIsaku Yamahata void xen_arch_resume(void);
282b953a5eSBoris Ostrovsky void xen_arch_suspend(void);
29ad55db9fSIsaku Yamahata 
305d9404e1SJulien Grall void xen_reboot(int reason);
315d9404e1SJulien Grall 
32cd979883SStanislaw Gruszka void xen_resume_notifier_register(struct notifier_block *nb);
33cd979883SStanislaw Gruszka void xen_resume_notifier_unregister(struct notifier_block *nb);
34cd979883SStanislaw Gruszka 
354ccefbe5SStefano Stabellini bool xen_vcpu_stolen(int vcpu);
364ccefbe5SStefano Stabellini void xen_setup_runstate_info(int cpu);
37ecb23dc6SJuergen Gross void xen_time_setup_guest(void);
385e25f5dbSDongli Zhang void xen_manage_runstate_time(int action);
394ccefbe5SStefano Stabellini void xen_get_runstate_snapshot(struct vcpu_runstate_info *res);
40d34c30ccSJuergen Gross u64 xen_steal_clock(int cpu);
414ccefbe5SStefano Stabellini 
42016b6f5fSStefano Stabellini int xen_setup_shutdown_event(void);
43016b6f5fSStefano Stabellini 
4408bbc9daSAlex Nixon extern unsigned long *xen_contiguous_bitmap;
4516624390SVitaly Kuznetsov 
46f9005571SStefano Stabellini #if defined(CONFIG_XEN_PV)
47f9005571SStefano Stabellini int xen_remap_pfn(struct vm_area_struct *vma, unsigned long addr,
48f9005571SStefano Stabellini 		  xen_pfn_t *pfn, int nr, int *err_ptr, pgprot_t prot,
4997315723SJan Beulich 		  unsigned int domid, bool no_translate);
50f9005571SStefano Stabellini #else
xen_remap_pfn(struct vm_area_struct * vma,unsigned long addr,xen_pfn_t * pfn,int nr,int * err_ptr,pgprot_t prot,unsigned int domid,bool no_translate)51f030aadeSJuergen Gross static inline int xen_remap_pfn(struct vm_area_struct *vma, unsigned long addr,
52f030aadeSJuergen Gross 				xen_pfn_t *pfn, int nr, int *err_ptr,
53f030aadeSJuergen Gross 				pgprot_t prot,  unsigned int domid,
5497315723SJan Beulich 				bool no_translate)
55f030aadeSJuergen Gross {
56f030aadeSJuergen Gross 	BUG();
57f030aadeSJuergen Gross 	return 0;
58f030aadeSJuergen Gross }
5916624390SVitaly Kuznetsov #endif
6008bbc9daSAlex Nixon 
61c140d879SDavid Howells struct vm_area_struct;
624e8c0c8cSDavid Vrabel 
63ec4001c3SPaul Durrant #ifdef CONFIG_XEN_AUTO_XLATE
644e8c0c8cSDavid Vrabel int xen_xlate_remap_gfn_array(struct vm_area_struct *vma,
65628c28eeSDavid Vrabel 			      unsigned long addr,
664e8c0c8cSDavid Vrabel 			      xen_pfn_t *gfn, int nr,
674e8c0c8cSDavid Vrabel 			      int *err_ptr, pgprot_t prot,
68f030aadeSJuergen Gross 			      unsigned int domid,
69628c28eeSDavid Vrabel 			      struct page **pages);
70628c28eeSDavid Vrabel int xen_xlate_unmap_gfn_range(struct vm_area_struct *vma,
71628c28eeSDavid Vrabel 			      int nr, struct page **pages);
72ec4001c3SPaul Durrant #else
73ec4001c3SPaul Durrant /*
74ec4001c3SPaul Durrant  * These two functions are called from arch/x86/xen/mmu.c and so stubs
75ec4001c3SPaul Durrant  * are needed for a configuration not specifying CONFIG_XEN_AUTO_XLATE.
76ec4001c3SPaul Durrant  */
xen_xlate_remap_gfn_array(struct vm_area_struct * vma,unsigned long addr,xen_pfn_t * gfn,int nr,int * err_ptr,pgprot_t prot,unsigned int domid,struct page ** pages)77ec4001c3SPaul Durrant static inline int xen_xlate_remap_gfn_array(struct vm_area_struct *vma,
78ec4001c3SPaul Durrant 					    unsigned long addr,
79ec4001c3SPaul Durrant 					    xen_pfn_t *gfn, int nr,
80ec4001c3SPaul Durrant 					    int *err_ptr, pgprot_t prot,
81ec4001c3SPaul Durrant 					    unsigned int domid,
82ec4001c3SPaul Durrant 					    struct page **pages)
83ec4001c3SPaul Durrant {
84ec4001c3SPaul Durrant 	return -EOPNOTSUPP;
85ec4001c3SPaul Durrant }
86ec4001c3SPaul Durrant 
xen_xlate_unmap_gfn_range(struct vm_area_struct * vma,int nr,struct page ** pages)87ec4001c3SPaul Durrant static inline int xen_xlate_unmap_gfn_range(struct vm_area_struct *vma,
88ec4001c3SPaul Durrant 					    int nr, struct page **pages)
89ec4001c3SPaul Durrant {
90ec4001c3SPaul Durrant 	return -EOPNOTSUPP;
91ec4001c3SPaul Durrant }
92ec4001c3SPaul Durrant #endif
93ec4001c3SPaul Durrant 
94a78d14a3SArnd Bergmann int xen_remap_vma_range(struct vm_area_struct *vma, unsigned long addr,
95a78d14a3SArnd Bergmann 			unsigned long len);
96a78d14a3SArnd Bergmann 
97f030aadeSJuergen Gross /*
98f030aadeSJuergen Gross  * xen_remap_domain_gfn_array() - map an array of foreign frames by gfn
99f030aadeSJuergen Gross  * @vma:     VMA to map the pages into
100f030aadeSJuergen Gross  * @addr:    Address at which to map the pages
101f030aadeSJuergen Gross  * @gfn:     Array of GFNs to map
102f030aadeSJuergen Gross  * @nr:      Number entries in the GFN array
103f030aadeSJuergen Gross  * @err_ptr: Returns per-GFN error status.
104f030aadeSJuergen Gross  * @prot:    page protection mask
105f030aadeSJuergen Gross  * @domid:   Domain owning the pages
106f030aadeSJuergen Gross  * @pages:   Array of pages if this domain has an auto-translated physmap
107f030aadeSJuergen Gross  *
108f030aadeSJuergen Gross  * @gfn and @err_ptr may point to the same buffer, the GFNs will be
109f030aadeSJuergen Gross  * overwritten by the error codes after they are mapped.
110f030aadeSJuergen Gross  *
111f030aadeSJuergen Gross  * Returns the number of successfully mapped frames, or a -ve error
112f030aadeSJuergen Gross  * code.
113f030aadeSJuergen Gross  */
xen_remap_domain_gfn_array(struct vm_area_struct * vma,unsigned long addr,xen_pfn_t * gfn,int nr,int * err_ptr,pgprot_t prot,unsigned int domid,struct page ** pages)114f030aadeSJuergen Gross static inline int xen_remap_domain_gfn_array(struct vm_area_struct *vma,
115f030aadeSJuergen Gross 					     unsigned long addr,
116f030aadeSJuergen Gross 					     xen_pfn_t *gfn, int nr,
117f030aadeSJuergen Gross 					     int *err_ptr, pgprot_t prot,
118f030aadeSJuergen Gross 					     unsigned int domid,
119f030aadeSJuergen Gross 					     struct page **pages)
120f030aadeSJuergen Gross {
121f030aadeSJuergen Gross 	if (xen_feature(XENFEAT_auto_translated_physmap))
122f030aadeSJuergen Gross 		return xen_xlate_remap_gfn_array(vma, addr, gfn, nr, err_ptr,
123f030aadeSJuergen Gross 						 prot, domid, pages);
124f030aadeSJuergen Gross 
125f030aadeSJuergen Gross 	/* We BUG_ON because it's a programmer error to pass a NULL err_ptr,
126f030aadeSJuergen Gross 	 * and the consequences later is quite hard to detect what the actual
127f030aadeSJuergen Gross 	 * cause of "wrong memory was mapped in".
128f030aadeSJuergen Gross 	 */
129f030aadeSJuergen Gross 	BUG_ON(err_ptr == NULL);
130f030aadeSJuergen Gross 	return xen_remap_pfn(vma, addr, gfn, nr, err_ptr, prot, domid,
13197315723SJan Beulich 			     false);
132f030aadeSJuergen Gross }
133f030aadeSJuergen Gross 
134f030aadeSJuergen Gross /*
135f030aadeSJuergen Gross  * xen_remap_domain_mfn_array() - map an array of foreign frames by mfn
136f030aadeSJuergen Gross  * @vma:     VMA to map the pages into
137f030aadeSJuergen Gross  * @addr:    Address at which to map the pages
138f030aadeSJuergen Gross  * @mfn:     Array of MFNs to map
139f030aadeSJuergen Gross  * @nr:      Number entries in the MFN array
140f030aadeSJuergen Gross  * @err_ptr: Returns per-MFN error status.
141f030aadeSJuergen Gross  * @prot:    page protection mask
142f030aadeSJuergen Gross  * @domid:   Domain owning the pages
143f030aadeSJuergen Gross  *
144f030aadeSJuergen Gross  * @mfn and @err_ptr may point to the same buffer, the MFNs will be
145f030aadeSJuergen Gross  * overwritten by the error codes after they are mapped.
146f030aadeSJuergen Gross  *
147f030aadeSJuergen Gross  * Returns the number of successfully mapped frames, or a -ve error
148f030aadeSJuergen Gross  * code.
149f030aadeSJuergen Gross  */
xen_remap_domain_mfn_array(struct vm_area_struct * vma,unsigned long addr,xen_pfn_t * mfn,int nr,int * err_ptr,pgprot_t prot,unsigned int domid)150f030aadeSJuergen Gross static inline int xen_remap_domain_mfn_array(struct vm_area_struct *vma,
151f030aadeSJuergen Gross 					     unsigned long addr, xen_pfn_t *mfn,
152f030aadeSJuergen Gross 					     int nr, int *err_ptr,
15397315723SJan Beulich 					     pgprot_t prot, unsigned int domid)
154f030aadeSJuergen Gross {
155f030aadeSJuergen Gross 	if (xen_feature(XENFEAT_auto_translated_physmap))
156f030aadeSJuergen Gross 		return -EOPNOTSUPP;
157f030aadeSJuergen Gross 
158f030aadeSJuergen Gross 	return xen_remap_pfn(vma, addr, mfn, nr, err_ptr, prot, domid,
15997315723SJan Beulich 			     true);
160f030aadeSJuergen Gross }
161f030aadeSJuergen Gross 
162f030aadeSJuergen Gross /* xen_remap_domain_gfn_range() - map a range of foreign frames
163f030aadeSJuergen Gross  * @vma:     VMA to map the pages into
164f030aadeSJuergen Gross  * @addr:    Address at which to map the pages
165f030aadeSJuergen Gross  * @gfn:     First GFN to map.
166f030aadeSJuergen Gross  * @nr:      Number frames to map
167f030aadeSJuergen Gross  * @prot:    page protection mask
168f030aadeSJuergen Gross  * @domid:   Domain owning the pages
169f030aadeSJuergen Gross  * @pages:   Array of pages if this domain has an auto-translated physmap
170f030aadeSJuergen Gross  *
171f030aadeSJuergen Gross  * Returns the number of successfully mapped frames, or a -ve error
172f030aadeSJuergen Gross  * code.
173f030aadeSJuergen Gross  */
xen_remap_domain_gfn_range(struct vm_area_struct * vma,unsigned long addr,xen_pfn_t gfn,int nr,pgprot_t prot,unsigned int domid,struct page ** pages)174f030aadeSJuergen Gross static inline int xen_remap_domain_gfn_range(struct vm_area_struct *vma,
175f030aadeSJuergen Gross 					     unsigned long addr,
176f030aadeSJuergen Gross 					     xen_pfn_t gfn, int nr,
177f030aadeSJuergen Gross 					     pgprot_t prot, unsigned int domid,
178f030aadeSJuergen Gross 					     struct page **pages)
179f030aadeSJuergen Gross {
180f030aadeSJuergen Gross 	if (xen_feature(XENFEAT_auto_translated_physmap))
181f030aadeSJuergen Gross 		return -EOPNOTSUPP;
182f030aadeSJuergen Gross 
18397315723SJan Beulich 	return xen_remap_pfn(vma, addr, &gfn, nr, NULL, prot, domid, false);
184f030aadeSJuergen Gross }
185f030aadeSJuergen Gross 
186f030aadeSJuergen Gross int xen_unmap_domain_gfn_range(struct vm_area_struct *vma,
187f030aadeSJuergen Gross 			       int numpgs, struct page **pages);
188f030aadeSJuergen Gross 
189243848fcSShannon Zhao int xen_xlate_map_ballooned_pages(xen_pfn_t **pfns, void **vaddr,
190243848fcSShannon Zhao 				  unsigned long nr_grant_frames);
191de1ef206SIan Campbell 
192394b40f6SKonrad Rzeszutek Wilk bool xen_running_on_version_or_later(unsigned int major, unsigned int minor);
193be81c8a1SDaniel Kiper 
19409515706SJuergen Gross void xen_efi_runtime_setup(void);
195e371fd76SJulien Grall 
196be81c8a1SDaniel Kiper 
1972f6474e4SThomas Gleixner #if defined(CONFIG_XEN_PV) && !defined(CONFIG_PREEMPTION)
198fdfd811dSDavid Vrabel 
199fdfd811dSDavid Vrabel DECLARE_PER_CPU(bool, xen_in_preemptible_hcall);
200fdfd811dSDavid Vrabel 
xen_preemptible_hcall_begin(void)201fdfd811dSDavid Vrabel static inline void xen_preemptible_hcall_begin(void)
202fdfd811dSDavid Vrabel {
203fdfd811dSDavid Vrabel 	__this_cpu_write(xen_in_preemptible_hcall, true);
204fdfd811dSDavid Vrabel }
205fdfd811dSDavid Vrabel 
xen_preemptible_hcall_end(void)206fdfd811dSDavid Vrabel static inline void xen_preemptible_hcall_end(void)
207fdfd811dSDavid Vrabel {
208fdfd811dSDavid Vrabel 	__this_cpu_write(xen_in_preemptible_hcall, false);
209fdfd811dSDavid Vrabel }
210fdfd811dSDavid Vrabel 
2112f6474e4SThomas Gleixner #else
2122f6474e4SThomas Gleixner 
xen_preemptible_hcall_begin(void)2132f6474e4SThomas Gleixner static inline void xen_preemptible_hcall_begin(void) { }
xen_preemptible_hcall_end(void)2142f6474e4SThomas Gleixner static inline void xen_preemptible_hcall_end(void) { }
2152f6474e4SThomas Gleixner 
2162f6474e4SThomas Gleixner #endif /* CONFIG_XEN_PV && !CONFIG_PREEMPTION */
217fdfd811dSDavid Vrabel 
218d6aca350SJuergen Gross #ifdef CONFIG_XEN_GRANT_DMA_OPS
219*61367688SJuergen Gross bool xen_virtio_restricted_mem_acc(struct virtio_device *dev);
220d6aca350SJuergen Gross #else
221251e90e7SJuergen Gross struct virtio_device;
222251e90e7SJuergen Gross 
xen_virtio_restricted_mem_acc(struct virtio_device * dev)223*61367688SJuergen Gross static inline bool xen_virtio_restricted_mem_acc(struct virtio_device *dev)
224*61367688SJuergen Gross {
225*61367688SJuergen Gross 	return false;
226*61367688SJuergen Gross }
227d6aca350SJuergen Gross #endif /* CONFIG_XEN_GRANT_DMA_OPS */
228d6aca350SJuergen Gross 
229e04d0d07SIsaku Yamahata #endif /* INCLUDE_XEN_OPS_H */
230