xref: /openbmc/linux/arch/x86/include/asm/xen/page.h (revision cd5d5810)
1 #ifndef _ASM_X86_XEN_PAGE_H
2 #define _ASM_X86_XEN_PAGE_H
3 
4 #include <linux/kernel.h>
5 #include <linux/types.h>
6 #include <linux/spinlock.h>
7 #include <linux/pfn.h>
8 #include <linux/mm.h>
9 
10 #include <asm/uaccess.h>
11 #include <asm/page.h>
12 #include <asm/pgtable.h>
13 
14 #include <xen/interface/xen.h>
15 #include <xen/grant_table.h>
16 #include <xen/features.h>
17 
18 /* Xen machine address */
19 typedef struct xmaddr {
20 	phys_addr_t maddr;
21 } xmaddr_t;
22 
23 /* Xen pseudo-physical address */
24 typedef struct xpaddr {
25 	phys_addr_t paddr;
26 } xpaddr_t;
27 
28 #define XMADDR(x)	((xmaddr_t) { .maddr = (x) })
29 #define XPADDR(x)	((xpaddr_t) { .paddr = (x) })
30 
31 /**** MACHINE <-> PHYSICAL CONVERSION MACROS ****/
32 #define INVALID_P2M_ENTRY	(~0UL)
33 #define FOREIGN_FRAME_BIT	(1UL<<(BITS_PER_LONG-1))
34 #define IDENTITY_FRAME_BIT	(1UL<<(BITS_PER_LONG-2))
35 #define FOREIGN_FRAME(m)	((m) | FOREIGN_FRAME_BIT)
36 #define IDENTITY_FRAME(m)	((m) | IDENTITY_FRAME_BIT)
37 
38 /* Maximum amount of memory we can handle in a domain in pages */
39 #define MAX_DOMAIN_PAGES						\
40     ((unsigned long)((u64)CONFIG_XEN_MAX_DOMAIN_MEMORY * 1024 * 1024 * 1024 / PAGE_SIZE))
41 
42 extern unsigned long *machine_to_phys_mapping;
43 extern unsigned long  machine_to_phys_nr;
44 
45 extern unsigned long get_phys_to_machine(unsigned long pfn);
46 extern bool set_phys_to_machine(unsigned long pfn, unsigned long mfn);
47 extern bool __init early_set_phys_to_machine(unsigned long pfn, unsigned long mfn);
48 extern bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn);
49 extern unsigned long set_phys_range_identity(unsigned long pfn_s,
50 					     unsigned long pfn_e);
51 
52 extern int m2p_add_override(unsigned long mfn, struct page *page,
53 			    struct gnttab_map_grant_ref *kmap_op);
54 extern int m2p_remove_override(struct page *page,
55 				struct gnttab_map_grant_ref *kmap_op);
56 extern struct page *m2p_find_override(unsigned long mfn);
57 extern unsigned long m2p_find_override_pfn(unsigned long mfn, unsigned long pfn);
58 
59 static inline unsigned long pfn_to_mfn(unsigned long pfn)
60 {
61 	unsigned long mfn;
62 
63 	if (xen_feature(XENFEAT_auto_translated_physmap))
64 		return pfn;
65 
66 	mfn = get_phys_to_machine(pfn);
67 
68 	if (mfn != INVALID_P2M_ENTRY)
69 		mfn &= ~(FOREIGN_FRAME_BIT | IDENTITY_FRAME_BIT);
70 
71 	return mfn;
72 }
73 
74 static inline int phys_to_machine_mapping_valid(unsigned long pfn)
75 {
76 	if (xen_feature(XENFEAT_auto_translated_physmap))
77 		return 1;
78 
79 	return get_phys_to_machine(pfn) != INVALID_P2M_ENTRY;
80 }
81 
82 static inline unsigned long mfn_to_pfn_no_overrides(unsigned long mfn)
83 {
84 	unsigned long pfn;
85 	int ret;
86 
87 	if (xen_feature(XENFEAT_auto_translated_physmap))
88 		return mfn;
89 
90 	if (unlikely(mfn >= machine_to_phys_nr))
91 		return ~0;
92 
93 	/*
94 	 * The array access can fail (e.g., device space beyond end of RAM).
95 	 * In such cases it doesn't matter what we return (we return garbage),
96 	 * but we must handle the fault without crashing!
97 	 */
98 	ret = __get_user(pfn, &machine_to_phys_mapping[mfn]);
99 	if (ret < 0)
100 		return ~0;
101 
102 	return pfn;
103 }
104 
105 static inline unsigned long mfn_to_pfn(unsigned long mfn)
106 {
107 	unsigned long pfn;
108 
109 	if (xen_feature(XENFEAT_auto_translated_physmap))
110 		return mfn;
111 
112 	pfn = mfn_to_pfn_no_overrides(mfn);
113 	if (get_phys_to_machine(pfn) != mfn) {
114 		/*
115 		 * If this appears to be a foreign mfn (because the pfn
116 		 * doesn't map back to the mfn), then check the local override
117 		 * table to see if there's a better pfn to use.
118 		 *
119 		 * m2p_find_override_pfn returns ~0 if it doesn't find anything.
120 		 */
121 		pfn = m2p_find_override_pfn(mfn, ~0);
122 	}
123 
124 	/*
125 	 * pfn is ~0 if there are no entries in the m2p for mfn or if the
126 	 * entry doesn't map back to the mfn and m2p_override doesn't have a
127 	 * valid entry for it.
128 	 */
129 	if (pfn == ~0 &&
130 			get_phys_to_machine(mfn) == IDENTITY_FRAME(mfn))
131 		pfn = mfn;
132 
133 	return pfn;
134 }
135 
136 static inline xmaddr_t phys_to_machine(xpaddr_t phys)
137 {
138 	unsigned offset = phys.paddr & ~PAGE_MASK;
139 	return XMADDR(PFN_PHYS(pfn_to_mfn(PFN_DOWN(phys.paddr))) | offset);
140 }
141 
142 static inline xpaddr_t machine_to_phys(xmaddr_t machine)
143 {
144 	unsigned offset = machine.maddr & ~PAGE_MASK;
145 	return XPADDR(PFN_PHYS(mfn_to_pfn(PFN_DOWN(machine.maddr))) | offset);
146 }
147 
148 /*
149  * We detect special mappings in one of two ways:
150  *  1. If the MFN is an I/O page then Xen will set the m2p entry
151  *     to be outside our maximum possible pseudophys range.
152  *  2. If the MFN belongs to a different domain then we will certainly
153  *     not have MFN in our p2m table. Conversely, if the page is ours,
154  *     then we'll have p2m(m2p(MFN))==MFN.
155  * If we detect a special mapping then it doesn't have a 'struct page'.
156  * We force !pfn_valid() by returning an out-of-range pointer.
157  *
158  * NB. These checks require that, for any MFN that is not in our reservation,
159  * there is no PFN such that p2m(PFN) == MFN. Otherwise we can get confused if
160  * we are foreign-mapping the MFN, and the other domain as m2p(MFN) == PFN.
161  * Yikes! Various places must poke in INVALID_P2M_ENTRY for safety.
162  *
163  * NB2. When deliberately mapping foreign pages into the p2m table, you *must*
164  *      use FOREIGN_FRAME(). This will cause pte_pfn() to choke on it, as we
165  *      require. In all the cases we care about, the FOREIGN_FRAME bit is
166  *      masked (e.g., pfn_to_mfn()) so behaviour there is correct.
167  */
168 static inline unsigned long mfn_to_local_pfn(unsigned long mfn)
169 {
170 	unsigned long pfn = mfn_to_pfn(mfn);
171 	if (get_phys_to_machine(pfn) != mfn)
172 		return -1; /* force !pfn_valid() */
173 	return pfn;
174 }
175 
176 /* VIRT <-> MACHINE conversion */
177 #define virt_to_machine(v)	(phys_to_machine(XPADDR(__pa(v))))
178 #define virt_to_pfn(v)          (PFN_DOWN(__pa(v)))
179 #define virt_to_mfn(v)		(pfn_to_mfn(virt_to_pfn(v)))
180 #define mfn_to_virt(m)		(__va(mfn_to_pfn(m) << PAGE_SHIFT))
181 
182 static inline unsigned long pte_mfn(pte_t pte)
183 {
184 	return (pte.pte & PTE_PFN_MASK) >> PAGE_SHIFT;
185 }
186 
187 static inline pte_t mfn_pte(unsigned long page_nr, pgprot_t pgprot)
188 {
189 	pte_t pte;
190 
191 	pte.pte = ((phys_addr_t)page_nr << PAGE_SHIFT) |
192 			massage_pgprot(pgprot);
193 
194 	return pte;
195 }
196 
197 static inline pteval_t pte_val_ma(pte_t pte)
198 {
199 	return pte.pte;
200 }
201 
202 static inline pte_t __pte_ma(pteval_t x)
203 {
204 	return (pte_t) { .pte = x };
205 }
206 
207 #define pmd_val_ma(v) ((v).pmd)
208 #ifdef __PAGETABLE_PUD_FOLDED
209 #define pud_val_ma(v) ((v).pgd.pgd)
210 #else
211 #define pud_val_ma(v) ((v).pud)
212 #endif
213 #define __pmd_ma(x)	((pmd_t) { (x) } )
214 
215 #define pgd_val_ma(x)	((x).pgd)
216 
217 void xen_set_domain_pte(pte_t *ptep, pte_t pteval, unsigned domid);
218 
219 xmaddr_t arbitrary_virt_to_machine(void *address);
220 unsigned long arbitrary_virt_to_mfn(void *vaddr);
221 void make_lowmem_page_readonly(void *vaddr);
222 void make_lowmem_page_readwrite(void *vaddr);
223 
224 #define xen_remap(cookie, size) ioremap((cookie), (size));
225 
226 #endif /* _ASM_X86_XEN_PAGE_H */
227