1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef _ASM_X86_XEN_PAGE_H 3 #define _ASM_X86_XEN_PAGE_H 4 5 #include <linux/kernel.h> 6 #include <linux/types.h> 7 #include <linux/spinlock.h> 8 #include <linux/pfn.h> 9 #include <linux/mm.h> 10 #include <linux/device.h> 11 12 #include <asm/extable.h> 13 #include <asm/page.h> 14 15 #include <xen/interface/xen.h> 16 #include <xen/interface/grant_table.h> 17 #include <xen/features.h> 18 19 /* Xen machine address */ 20 typedef struct xmaddr { 21 phys_addr_t maddr; 22 } xmaddr_t; 23 24 /* Xen pseudo-physical address */ 25 typedef struct xpaddr { 26 phys_addr_t paddr; 27 } xpaddr_t; 28 29 #ifdef CONFIG_X86_64 30 #define XEN_PHYSICAL_MASK __sme_clr((1UL << 52) - 1) 31 #else 32 #define XEN_PHYSICAL_MASK __PHYSICAL_MASK 33 #endif 34 35 #define XEN_PTE_MFN_MASK ((pteval_t)(((signed long)PAGE_MASK) & \ 36 XEN_PHYSICAL_MASK)) 37 38 #define XMADDR(x) ((xmaddr_t) { .maddr = (x) }) 39 #define XPADDR(x) ((xpaddr_t) { .paddr = (x) }) 40 41 /**** MACHINE <-> PHYSICAL CONVERSION MACROS ****/ 42 #define INVALID_P2M_ENTRY (~0UL) 43 #define FOREIGN_FRAME_BIT (1UL<<(BITS_PER_LONG-1)) 44 #define IDENTITY_FRAME_BIT (1UL<<(BITS_PER_LONG-2)) 45 #define FOREIGN_FRAME(m) ((m) | FOREIGN_FRAME_BIT) 46 #define IDENTITY_FRAME(m) ((m) | IDENTITY_FRAME_BIT) 47 48 #define P2M_PER_PAGE (PAGE_SIZE / sizeof(unsigned long)) 49 50 extern unsigned long *machine_to_phys_mapping; 51 extern unsigned long machine_to_phys_nr; 52 extern unsigned long *xen_p2m_addr; 53 extern unsigned long xen_p2m_size; 54 extern unsigned long xen_max_p2m_pfn; 55 56 extern int xen_alloc_p2m_entry(unsigned long pfn); 57 58 extern unsigned long get_phys_to_machine(unsigned long pfn); 59 extern bool set_phys_to_machine(unsigned long pfn, unsigned long mfn); 60 extern bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn); 61 extern unsigned long __init set_phys_range_identity(unsigned long pfn_s, 62 unsigned long pfn_e); 63 64 #ifdef CONFIG_XEN_PV 65 extern int set_foreign_p2m_mapping(struct gnttab_map_grant_ref *map_ops, 66 struct gnttab_map_grant_ref *kmap_ops, 67 struct page **pages, unsigned int count); 68 extern int clear_foreign_p2m_mapping(struct gnttab_unmap_grant_ref *unmap_ops, 69 struct gnttab_unmap_grant_ref *kunmap_ops, 70 struct page **pages, unsigned int count); 71 #else 72 static inline int 73 set_foreign_p2m_mapping(struct gnttab_map_grant_ref *map_ops, 74 struct gnttab_map_grant_ref *kmap_ops, 75 struct page **pages, unsigned int count) 76 { 77 return 0; 78 } 79 80 static inline int 81 clear_foreign_p2m_mapping(struct gnttab_unmap_grant_ref *unmap_ops, 82 struct gnttab_unmap_grant_ref *kunmap_ops, 83 struct page **pages, unsigned int count) 84 { 85 return 0; 86 } 87 #endif 88 89 /* 90 * The maximum amount of extra memory compared to the base size. The 91 * main scaling factor is the size of struct page. At extreme ratios 92 * of base:extra, all the base memory can be filled with page 93 * structures for the extra memory, leaving no space for anything 94 * else. 95 * 96 * 10x seems like a reasonable balance between scaling flexibility and 97 * leaving a practically usable system. 98 */ 99 #define XEN_EXTRA_MEM_RATIO (10) 100 101 /* 102 * Helper functions to write or read unsigned long values to/from 103 * memory, when the access may fault. 104 */ 105 static inline int xen_safe_write_ulong(unsigned long *addr, unsigned long val) 106 { 107 int ret = 0; 108 109 asm volatile("1: mov %[val], %[ptr]\n" 110 "2:\n" 111 ".section .fixup, \"ax\"\n" 112 "3: sub $1, %[ret]\n" 113 " jmp 2b\n" 114 ".previous\n" 115 _ASM_EXTABLE(1b, 3b) 116 : [ret] "+r" (ret), [ptr] "=m" (*addr) 117 : [val] "r" (val)); 118 119 return ret; 120 } 121 122 static inline int xen_safe_read_ulong(const unsigned long *addr, 123 unsigned long *val) 124 { 125 int ret = 0; 126 unsigned long rval = ~0ul; 127 128 asm volatile("1: mov %[ptr], %[rval]\n" 129 "2:\n" 130 ".section .fixup, \"ax\"\n" 131 "3: sub $1, %[ret]\n" 132 " jmp 2b\n" 133 ".previous\n" 134 _ASM_EXTABLE(1b, 3b) 135 : [ret] "+r" (ret), [rval] "+r" (rval) 136 : [ptr] "m" (*addr)); 137 *val = rval; 138 139 return ret; 140 } 141 142 #ifdef CONFIG_XEN_PV 143 /* 144 * When to use pfn_to_mfn(), __pfn_to_mfn() or get_phys_to_machine(): 145 * - pfn_to_mfn() returns either INVALID_P2M_ENTRY or the mfn. No indicator 146 * bits (identity or foreign) are set. 147 * - __pfn_to_mfn() returns the found entry of the p2m table. A possibly set 148 * identity or foreign indicator will be still set. __pfn_to_mfn() is 149 * encapsulating get_phys_to_machine() which is called in special cases only. 150 * - get_phys_to_machine() is to be called by __pfn_to_mfn() only in special 151 * cases needing an extended handling. 152 */ 153 static inline unsigned long __pfn_to_mfn(unsigned long pfn) 154 { 155 unsigned long mfn; 156 157 if (pfn < xen_p2m_size) 158 mfn = xen_p2m_addr[pfn]; 159 else if (unlikely(pfn < xen_max_p2m_pfn)) 160 return get_phys_to_machine(pfn); 161 else 162 return IDENTITY_FRAME(pfn); 163 164 if (unlikely(mfn == INVALID_P2M_ENTRY)) 165 return get_phys_to_machine(pfn); 166 167 return mfn; 168 } 169 #else 170 static inline unsigned long __pfn_to_mfn(unsigned long pfn) 171 { 172 return pfn; 173 } 174 #endif 175 176 static inline unsigned long pfn_to_mfn(unsigned long pfn) 177 { 178 unsigned long mfn; 179 180 /* 181 * Some x86 code are still using pfn_to_mfn instead of 182 * pfn_to_mfn. This will have to be removed when we figured 183 * out which call. 184 */ 185 if (xen_feature(XENFEAT_auto_translated_physmap)) 186 return pfn; 187 188 mfn = __pfn_to_mfn(pfn); 189 190 if (mfn != INVALID_P2M_ENTRY) 191 mfn &= ~(FOREIGN_FRAME_BIT | IDENTITY_FRAME_BIT); 192 193 return mfn; 194 } 195 196 static inline int phys_to_machine_mapping_valid(unsigned long pfn) 197 { 198 if (xen_feature(XENFEAT_auto_translated_physmap)) 199 return 1; 200 201 return __pfn_to_mfn(pfn) != INVALID_P2M_ENTRY; 202 } 203 204 static inline unsigned long mfn_to_pfn_no_overrides(unsigned long mfn) 205 { 206 unsigned long pfn; 207 int ret; 208 209 if (unlikely(mfn >= machine_to_phys_nr)) 210 return ~0; 211 212 /* 213 * The array access can fail (e.g., device space beyond end of RAM). 214 * In such cases it doesn't matter what we return (we return garbage), 215 * but we must handle the fault without crashing! 216 */ 217 ret = xen_safe_read_ulong(&machine_to_phys_mapping[mfn], &pfn); 218 if (ret < 0) 219 return ~0; 220 221 return pfn; 222 } 223 224 static inline unsigned long mfn_to_pfn(unsigned long mfn) 225 { 226 unsigned long pfn; 227 228 /* 229 * Some x86 code are still using mfn_to_pfn instead of 230 * gfn_to_pfn. This will have to be removed when we figure 231 * out which call. 232 */ 233 if (xen_feature(XENFEAT_auto_translated_physmap)) 234 return mfn; 235 236 pfn = mfn_to_pfn_no_overrides(mfn); 237 if (__pfn_to_mfn(pfn) != mfn) 238 pfn = ~0; 239 240 /* 241 * pfn is ~0 if there are no entries in the m2p for mfn or the 242 * entry doesn't map back to the mfn. 243 */ 244 if (pfn == ~0 && __pfn_to_mfn(mfn) == IDENTITY_FRAME(mfn)) 245 pfn = mfn; 246 247 return pfn; 248 } 249 250 static inline xmaddr_t phys_to_machine(xpaddr_t phys) 251 { 252 unsigned offset = phys.paddr & ~PAGE_MASK; 253 return XMADDR(PFN_PHYS(pfn_to_mfn(PFN_DOWN(phys.paddr))) | offset); 254 } 255 256 static inline xpaddr_t machine_to_phys(xmaddr_t machine) 257 { 258 unsigned offset = machine.maddr & ~PAGE_MASK; 259 return XPADDR(PFN_PHYS(mfn_to_pfn(PFN_DOWN(machine.maddr))) | offset); 260 } 261 262 /* Pseudo-physical <-> Guest conversion */ 263 static inline unsigned long pfn_to_gfn(unsigned long pfn) 264 { 265 if (xen_feature(XENFEAT_auto_translated_physmap)) 266 return pfn; 267 else 268 return pfn_to_mfn(pfn); 269 } 270 271 static inline unsigned long gfn_to_pfn(unsigned long gfn) 272 { 273 if (xen_feature(XENFEAT_auto_translated_physmap)) 274 return gfn; 275 else 276 return mfn_to_pfn(gfn); 277 } 278 279 /* Pseudo-physical <-> Bus conversion */ 280 #define pfn_to_bfn(pfn) pfn_to_gfn(pfn) 281 #define bfn_to_pfn(bfn) gfn_to_pfn(bfn) 282 283 /* 284 * We detect special mappings in one of two ways: 285 * 1. If the MFN is an I/O page then Xen will set the m2p entry 286 * to be outside our maximum possible pseudophys range. 287 * 2. If the MFN belongs to a different domain then we will certainly 288 * not have MFN in our p2m table. Conversely, if the page is ours, 289 * then we'll have p2m(m2p(MFN))==MFN. 290 * If we detect a special mapping then it doesn't have a 'struct page'. 291 * We force !pfn_valid() by returning an out-of-range pointer. 292 * 293 * NB. These checks require that, for any MFN that is not in our reservation, 294 * there is no PFN such that p2m(PFN) == MFN. Otherwise we can get confused if 295 * we are foreign-mapping the MFN, and the other domain as m2p(MFN) == PFN. 296 * Yikes! Various places must poke in INVALID_P2M_ENTRY for safety. 297 * 298 * NB2. When deliberately mapping foreign pages into the p2m table, you *must* 299 * use FOREIGN_FRAME(). This will cause pte_pfn() to choke on it, as we 300 * require. In all the cases we care about, the FOREIGN_FRAME bit is 301 * masked (e.g., pfn_to_mfn()) so behaviour there is correct. 302 */ 303 static inline unsigned long bfn_to_local_pfn(unsigned long mfn) 304 { 305 unsigned long pfn; 306 307 if (xen_feature(XENFEAT_auto_translated_physmap)) 308 return mfn; 309 310 pfn = mfn_to_pfn(mfn); 311 if (__pfn_to_mfn(pfn) != mfn) 312 return -1; /* force !pfn_valid() */ 313 return pfn; 314 } 315 316 /* VIRT <-> MACHINE conversion */ 317 #define virt_to_machine(v) (phys_to_machine(XPADDR(__pa(v)))) 318 #define virt_to_pfn(v) (PFN_DOWN(__pa(v))) 319 #define virt_to_mfn(v) (pfn_to_mfn(virt_to_pfn(v))) 320 #define mfn_to_virt(m) (__va(mfn_to_pfn(m) << PAGE_SHIFT)) 321 322 /* VIRT <-> GUEST conversion */ 323 #define virt_to_gfn(v) (pfn_to_gfn(virt_to_pfn(v))) 324 #define gfn_to_virt(g) (__va(gfn_to_pfn(g) << PAGE_SHIFT)) 325 326 static inline unsigned long pte_mfn(pte_t pte) 327 { 328 return (pte.pte & XEN_PTE_MFN_MASK) >> PAGE_SHIFT; 329 } 330 331 static inline pte_t mfn_pte(unsigned long page_nr, pgprot_t pgprot) 332 { 333 pte_t pte; 334 335 pte.pte = ((phys_addr_t)page_nr << PAGE_SHIFT) | 336 massage_pgprot(pgprot); 337 338 return pte; 339 } 340 341 static inline pteval_t pte_val_ma(pte_t pte) 342 { 343 return pte.pte; 344 } 345 346 static inline pte_t __pte_ma(pteval_t x) 347 { 348 return (pte_t) { .pte = x }; 349 } 350 351 #define pmd_val_ma(v) ((v).pmd) 352 #ifdef __PAGETABLE_PUD_FOLDED 353 #define pud_val_ma(v) ((v).p4d.pgd.pgd) 354 #else 355 #define pud_val_ma(v) ((v).pud) 356 #endif 357 #define __pmd_ma(x) ((pmd_t) { (x) } ) 358 359 #ifdef __PAGETABLE_P4D_FOLDED 360 #define p4d_val_ma(x) ((x).pgd.pgd) 361 #else 362 #define p4d_val_ma(x) ((x).p4d) 363 #endif 364 365 xmaddr_t arbitrary_virt_to_machine(void *address); 366 unsigned long arbitrary_virt_to_mfn(void *vaddr); 367 void make_lowmem_page_readonly(void *vaddr); 368 void make_lowmem_page_readwrite(void *vaddr); 369 370 #define xen_remap(cookie, size) ioremap((cookie), (size)) 371 #define xen_unmap(cookie) iounmap((cookie)) 372 373 static inline bool xen_arch_need_swiotlb(struct device *dev, 374 phys_addr_t phys, 375 dma_addr_t dev_addr) 376 { 377 return false; 378 } 379 380 static inline unsigned long xen_get_swiotlb_free_pages(unsigned int order) 381 { 382 return __get_free_pages(__GFP_NOWARN, order); 383 } 384 385 #endif /* _ASM_X86_XEN_PAGE_H */ 386