1 #ifndef _ASM_X86_XEN_PAGE_H 2 #define _ASM_X86_XEN_PAGE_H 3 4 #include <linux/kernel.h> 5 #include <linux/types.h> 6 #include <linux/spinlock.h> 7 #include <linux/pfn.h> 8 #include <linux/mm.h> 9 10 #include <asm/uaccess.h> 11 #include <asm/page.h> 12 #include <asm/pgtable.h> 13 14 #include <xen/interface/xen.h> 15 #include <xen/features.h> 16 17 /* Xen machine address */ 18 typedef struct xmaddr { 19 phys_addr_t maddr; 20 } xmaddr_t; 21 22 /* Xen pseudo-physical address */ 23 typedef struct xpaddr { 24 phys_addr_t paddr; 25 } xpaddr_t; 26 27 #define XMADDR(x) ((xmaddr_t) { .maddr = (x) }) 28 #define XPADDR(x) ((xpaddr_t) { .paddr = (x) }) 29 30 /**** MACHINE <-> PHYSICAL CONVERSION MACROS ****/ 31 #define INVALID_P2M_ENTRY (~0UL) 32 #define FOREIGN_FRAME_BIT (1UL<<31) 33 #define FOREIGN_FRAME(m) ((m) | FOREIGN_FRAME_BIT) 34 35 /* Maximum amount of memory we can handle in a domain in pages */ 36 #define MAX_DOMAIN_PAGES \ 37 ((unsigned long)((u64)CONFIG_XEN_MAX_DOMAIN_MEMORY * 1024 * 1024 * 1024 / PAGE_SIZE)) 38 39 extern unsigned long *machine_to_phys_mapping; 40 extern unsigned int machine_to_phys_order; 41 42 extern unsigned long get_phys_to_machine(unsigned long pfn); 43 extern bool set_phys_to_machine(unsigned long pfn, unsigned long mfn); 44 45 static inline unsigned long pfn_to_mfn(unsigned long pfn) 46 { 47 unsigned long mfn; 48 49 if (xen_feature(XENFEAT_auto_translated_physmap)) 50 return pfn; 51 52 mfn = get_phys_to_machine(pfn); 53 54 if (mfn != INVALID_P2M_ENTRY) 55 mfn &= ~FOREIGN_FRAME_BIT; 56 57 return mfn; 58 } 59 60 static inline int phys_to_machine_mapping_valid(unsigned long pfn) 61 { 62 if (xen_feature(XENFEAT_auto_translated_physmap)) 63 return 1; 64 65 return get_phys_to_machine(pfn) != INVALID_P2M_ENTRY; 66 } 67 68 static inline unsigned long mfn_to_pfn(unsigned long mfn) 69 { 70 unsigned long pfn; 71 72 if (xen_feature(XENFEAT_auto_translated_physmap)) 73 return mfn; 74 75 if (unlikely((mfn >> machine_to_phys_order) != 0)) 76 return ~0; 77 78 pfn = 0; 79 /* 80 * The array access can fail (e.g., device space beyond end of RAM). 81 * In such cases it doesn't matter what we return (we return garbage), 82 * but we must handle the fault without crashing! 83 */ 84 __get_user(pfn, &machine_to_phys_mapping[mfn]); 85 86 return pfn; 87 } 88 89 static inline xmaddr_t phys_to_machine(xpaddr_t phys) 90 { 91 unsigned offset = phys.paddr & ~PAGE_MASK; 92 return XMADDR(PFN_PHYS(pfn_to_mfn(PFN_DOWN(phys.paddr))) | offset); 93 } 94 95 static inline xpaddr_t machine_to_phys(xmaddr_t machine) 96 { 97 unsigned offset = machine.maddr & ~PAGE_MASK; 98 return XPADDR(PFN_PHYS(mfn_to_pfn(PFN_DOWN(machine.maddr))) | offset); 99 } 100 101 /* 102 * We detect special mappings in one of two ways: 103 * 1. If the MFN is an I/O page then Xen will set the m2p entry 104 * to be outside our maximum possible pseudophys range. 105 * 2. If the MFN belongs to a different domain then we will certainly 106 * not have MFN in our p2m table. Conversely, if the page is ours, 107 * then we'll have p2m(m2p(MFN))==MFN. 108 * If we detect a special mapping then it doesn't have a 'struct page'. 109 * We force !pfn_valid() by returning an out-of-range pointer. 110 * 111 * NB. These checks require that, for any MFN that is not in our reservation, 112 * there is no PFN such that p2m(PFN) == MFN. Otherwise we can get confused if 113 * we are foreign-mapping the MFN, and the other domain as m2p(MFN) == PFN. 114 * Yikes! Various places must poke in INVALID_P2M_ENTRY for safety. 115 * 116 * NB2. When deliberately mapping foreign pages into the p2m table, you *must* 117 * use FOREIGN_FRAME(). This will cause pte_pfn() to choke on it, as we 118 * require. In all the cases we care about, the FOREIGN_FRAME bit is 119 * masked (e.g., pfn_to_mfn()) so behaviour there is correct. 120 */ 121 static inline unsigned long mfn_to_local_pfn(unsigned long mfn) 122 { 123 unsigned long pfn = mfn_to_pfn(mfn); 124 if (get_phys_to_machine(pfn) != mfn) 125 return -1; /* force !pfn_valid() */ 126 return pfn; 127 } 128 129 /* VIRT <-> MACHINE conversion */ 130 #define virt_to_machine(v) (phys_to_machine(XPADDR(__pa(v)))) 131 #define virt_to_pfn(v) (PFN_DOWN(__pa(v))) 132 #define virt_to_mfn(v) (pfn_to_mfn(virt_to_pfn(v))) 133 #define mfn_to_virt(m) (__va(mfn_to_pfn(m) << PAGE_SHIFT)) 134 135 static inline unsigned long pte_mfn(pte_t pte) 136 { 137 return (pte.pte & PTE_PFN_MASK) >> PAGE_SHIFT; 138 } 139 140 static inline pte_t mfn_pte(unsigned long page_nr, pgprot_t pgprot) 141 { 142 pte_t pte; 143 144 pte.pte = ((phys_addr_t)page_nr << PAGE_SHIFT) | 145 massage_pgprot(pgprot); 146 147 return pte; 148 } 149 150 static inline pteval_t pte_val_ma(pte_t pte) 151 { 152 return pte.pte; 153 } 154 155 static inline pte_t __pte_ma(pteval_t x) 156 { 157 return (pte_t) { .pte = x }; 158 } 159 160 #define pmd_val_ma(v) ((v).pmd) 161 #ifdef __PAGETABLE_PUD_FOLDED 162 #define pud_val_ma(v) ((v).pgd.pgd) 163 #else 164 #define pud_val_ma(v) ((v).pud) 165 #endif 166 #define __pmd_ma(x) ((pmd_t) { (x) } ) 167 168 #define pgd_val_ma(x) ((x).pgd) 169 170 void xen_set_domain_pte(pte_t *ptep, pte_t pteval, unsigned domid); 171 172 xmaddr_t arbitrary_virt_to_machine(void *address); 173 unsigned long arbitrary_virt_to_mfn(void *vaddr); 174 void make_lowmem_page_readonly(void *vaddr); 175 void make_lowmem_page_readwrite(void *vaddr); 176 177 #endif /* _ASM_X86_XEN_PAGE_H */ 178