1 /* 2 * Copyright (C) 2012 ARM Ltd. 3 * 4 * This program is free software; you can redistribute it and/or modify 5 * it under the terms of the GNU General Public License version 2 as 6 * published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope that it will be useful, 9 * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 11 * GNU General Public License for more details. 12 * 13 * You should have received a copy of the GNU General Public License 14 * along with this program. If not, see <http://www.gnu.org/licenses/>. 15 */ 16 #ifndef __ASM_PGTABLE_H 17 #define __ASM_PGTABLE_H 18 19 #include <asm/proc-fns.h> 20 21 #include <asm/memory.h> 22 #include <asm/pgtable-hwdef.h> 23 24 /* 25 * Software defined PTE bits definition. 26 */ 27 #define PTE_VALID (_AT(pteval_t, 1) << 0) 28 #define PTE_PROT_NONE (_AT(pteval_t, 1) << 1) /* only when !PTE_VALID */ 29 #define PTE_FILE (_AT(pteval_t, 1) << 2) /* only when !pte_present() */ 30 #define PTE_DIRTY (_AT(pteval_t, 1) << 55) 31 #define PTE_SPECIAL (_AT(pteval_t, 1) << 56) 32 33 /* 34 * VMALLOC and SPARSEMEM_VMEMMAP ranges. 35 */ 36 #define VMALLOC_START UL(0xffffff8000000000) 37 #define VMALLOC_END (PAGE_OFFSET - UL(0x400000000) - SZ_64K) 38 39 #define vmemmap ((struct page *)(VMALLOC_END + SZ_64K)) 40 41 #define FIRST_USER_ADDRESS 0 42 43 #ifndef __ASSEMBLY__ 44 extern void __pte_error(const char *file, int line, unsigned long val); 45 extern void __pmd_error(const char *file, int line, unsigned long val); 46 extern void __pgd_error(const char *file, int line, unsigned long val); 47 48 #define pte_ERROR(pte) __pte_error(__FILE__, __LINE__, pte_val(pte)) 49 #ifndef CONFIG_ARM64_64K_PAGES 50 #define pmd_ERROR(pmd) __pmd_error(__FILE__, __LINE__, pmd_val(pmd)) 51 #endif 52 #define pgd_ERROR(pgd) __pgd_error(__FILE__, __LINE__, pgd_val(pgd)) 53 54 /* 55 * The pgprot_* and protection_map entries will be fixed up at runtime to 56 * include the cachable and bufferable bits based on memory policy, as well as 57 * any architecture dependent bits like global/ASID and SMP shared mapping 58 * bits. 59 */ 60 #define _PAGE_DEFAULT PTE_TYPE_PAGE | PTE_AF 61 62 extern pgprot_t pgprot_default; 63 64 #define __pgprot_modify(prot,mask,bits) \ 65 __pgprot((pgprot_val(prot) & ~(mask)) | (bits)) 66 67 #define _MOD_PROT(p, b) __pgprot_modify(p, 0, b) 68 69 #define PAGE_NONE __pgprot_modify(pgprot_default, PTE_TYPE_MASK, PTE_PROT_NONE) 70 #define PAGE_SHARED _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN | PTE_UXN) 71 #define PAGE_SHARED_EXEC _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN) 72 #define PAGE_COPY _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN | PTE_UXN | PTE_RDONLY) 73 #define PAGE_COPY_EXEC _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN | PTE_RDONLY) 74 #define PAGE_READONLY _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN | PTE_UXN | PTE_RDONLY) 75 #define PAGE_READONLY_EXEC _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN | PTE_RDONLY) 76 #define PAGE_KERNEL _MOD_PROT(pgprot_default, PTE_PXN | PTE_UXN | PTE_DIRTY) 77 #define PAGE_KERNEL_EXEC _MOD_PROT(pgprot_default, PTE_UXN | PTE_DIRTY) 78 79 #define __PAGE_NONE __pgprot(((_PAGE_DEFAULT) & ~PTE_TYPE_MASK) | PTE_PROT_NONE) 80 #define __PAGE_SHARED __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN) 81 #define __PAGE_SHARED_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN) 82 #define __PAGE_COPY __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN | PTE_RDONLY) 83 #define __PAGE_COPY_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_RDONLY) 84 #define __PAGE_READONLY __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN | PTE_RDONLY) 85 #define __PAGE_READONLY_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_RDONLY) 86 87 #endif /* __ASSEMBLY__ */ 88 89 #define __P000 __PAGE_NONE 90 #define __P001 __PAGE_READONLY 91 #define __P010 __PAGE_COPY 92 #define __P011 __PAGE_COPY 93 #define __P100 __PAGE_READONLY_EXEC 94 #define __P101 __PAGE_READONLY_EXEC 95 #define __P110 __PAGE_COPY_EXEC 96 #define __P111 __PAGE_COPY_EXEC 97 98 #define __S000 __PAGE_NONE 99 #define __S001 __PAGE_READONLY 100 #define __S010 __PAGE_SHARED 101 #define __S011 __PAGE_SHARED 102 #define __S100 __PAGE_READONLY_EXEC 103 #define __S101 __PAGE_READONLY_EXEC 104 #define __S110 __PAGE_SHARED_EXEC 105 #define __S111 __PAGE_SHARED_EXEC 106 107 #ifndef __ASSEMBLY__ 108 /* 109 * ZERO_PAGE is a global shared page that is always zero: used 110 * for zero-mapped memory areas etc.. 111 */ 112 extern struct page *empty_zero_page; 113 #define ZERO_PAGE(vaddr) (empty_zero_page) 114 115 #define pte_pfn(pte) ((pte_val(pte) & PHYS_MASK) >> PAGE_SHIFT) 116 117 #define pfn_pte(pfn,prot) (__pte(((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot))) 118 119 #define pte_none(pte) (!pte_val(pte)) 120 #define pte_clear(mm,addr,ptep) set_pte(ptep, __pte(0)) 121 #define pte_page(pte) (pfn_to_page(pte_pfn(pte))) 122 #define pte_offset_kernel(dir,addr) (pmd_page_vaddr(*(dir)) + __pte_index(addr)) 123 124 #define pte_offset_map(dir,addr) pte_offset_kernel((dir), (addr)) 125 #define pte_offset_map_nested(dir,addr) pte_offset_kernel((dir), (addr)) 126 #define pte_unmap(pte) do { } while (0) 127 #define pte_unmap_nested(pte) do { } while (0) 128 129 /* 130 * The following only work if pte_present(). Undefined behaviour otherwise. 131 */ 132 #define pte_present(pte) (pte_val(pte) & (PTE_VALID | PTE_PROT_NONE)) 133 #define pte_dirty(pte) (pte_val(pte) & PTE_DIRTY) 134 #define pte_young(pte) (pte_val(pte) & PTE_AF) 135 #define pte_special(pte) (pte_val(pte) & PTE_SPECIAL) 136 #define pte_write(pte) (!(pte_val(pte) & PTE_RDONLY)) 137 #define pte_exec(pte) (!(pte_val(pte) & PTE_UXN)) 138 139 #define pte_valid_user(pte) \ 140 ((pte_val(pte) & (PTE_VALID | PTE_USER)) == (PTE_VALID | PTE_USER)) 141 142 #define PTE_BIT_FUNC(fn,op) \ 143 static inline pte_t pte_##fn(pte_t pte) { pte_val(pte) op; return pte; } 144 145 PTE_BIT_FUNC(wrprotect, |= PTE_RDONLY); 146 PTE_BIT_FUNC(mkwrite, &= ~PTE_RDONLY); 147 PTE_BIT_FUNC(mkclean, &= ~PTE_DIRTY); 148 PTE_BIT_FUNC(mkdirty, |= PTE_DIRTY); 149 PTE_BIT_FUNC(mkold, &= ~PTE_AF); 150 PTE_BIT_FUNC(mkyoung, |= PTE_AF); 151 PTE_BIT_FUNC(mkspecial, |= PTE_SPECIAL); 152 153 static inline void set_pte(pte_t *ptep, pte_t pte) 154 { 155 *ptep = pte; 156 } 157 158 extern void __sync_icache_dcache(pte_t pteval, unsigned long addr); 159 160 static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, 161 pte_t *ptep, pte_t pte) 162 { 163 if (pte_valid_user(pte)) { 164 if (pte_exec(pte)) 165 __sync_icache_dcache(pte, addr); 166 if (!pte_dirty(pte)) 167 pte = pte_wrprotect(pte); 168 } 169 170 set_pte(ptep, pte); 171 } 172 173 /* 174 * Huge pte definitions. 175 */ 176 #define pte_huge(pte) ((pte_val(pte) & PTE_TYPE_MASK) == PTE_TYPE_HUGEPAGE) 177 #define pte_mkhuge(pte) (__pte((pte_val(pte) & ~PTE_TYPE_MASK) | PTE_TYPE_HUGEPAGE)) 178 179 #define __HAVE_ARCH_PTE_SPECIAL 180 181 /* 182 * Mark the prot value as uncacheable and unbufferable. 183 */ 184 #define pgprot_noncached(prot) \ 185 __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_DEVICE_nGnRnE)) 186 #define pgprot_writecombine(prot) \ 187 __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_DEVICE_GRE)) 188 #define pgprot_dmacoherent(prot) \ 189 __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_NORMAL_NC)) 190 #define __HAVE_PHYS_MEM_ACCESS_PROT 191 struct file; 192 extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, 193 unsigned long size, pgprot_t vma_prot); 194 195 #define pmd_none(pmd) (!pmd_val(pmd)) 196 #define pmd_present(pmd) (pmd_val(pmd)) 197 198 #define pmd_bad(pmd) (!(pmd_val(pmd) & 2)) 199 200 static inline void set_pmd(pmd_t *pmdp, pmd_t pmd) 201 { 202 *pmdp = pmd; 203 dsb(); 204 } 205 206 static inline void pmd_clear(pmd_t *pmdp) 207 { 208 set_pmd(pmdp, __pmd(0)); 209 } 210 211 static inline pte_t *pmd_page_vaddr(pmd_t pmd) 212 { 213 return __va(pmd_val(pmd) & PHYS_MASK & (s32)PAGE_MASK); 214 } 215 216 #define pmd_page(pmd) pfn_to_page(__phys_to_pfn(pmd_val(pmd) & PHYS_MASK)) 217 218 /* 219 * Conversion functions: convert a page and protection to a page entry, 220 * and a page entry and page directory to the page they refer to. 221 */ 222 #define mk_pte(page,prot) pfn_pte(page_to_pfn(page),prot) 223 224 #ifndef CONFIG_ARM64_64K_PAGES 225 226 #define pud_none(pud) (!pud_val(pud)) 227 #define pud_bad(pud) (!(pud_val(pud) & 2)) 228 #define pud_present(pud) (pud_val(pud)) 229 230 static inline void set_pud(pud_t *pudp, pud_t pud) 231 { 232 *pudp = pud; 233 dsb(); 234 } 235 236 static inline void pud_clear(pud_t *pudp) 237 { 238 set_pud(pudp, __pud(0)); 239 } 240 241 static inline pmd_t *pud_page_vaddr(pud_t pud) 242 { 243 return __va(pud_val(pud) & PHYS_MASK & (s32)PAGE_MASK); 244 } 245 246 #endif /* CONFIG_ARM64_64K_PAGES */ 247 248 /* to find an entry in a page-table-directory */ 249 #define pgd_index(addr) (((addr) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1)) 250 251 #define pgd_offset(mm, addr) ((mm)->pgd+pgd_index(addr)) 252 253 /* to find an entry in a kernel page-table-directory */ 254 #define pgd_offset_k(addr) pgd_offset(&init_mm, addr) 255 256 /* Find an entry in the second-level page table.. */ 257 #ifndef CONFIG_ARM64_64K_PAGES 258 #define pmd_index(addr) (((addr) >> PMD_SHIFT) & (PTRS_PER_PMD - 1)) 259 static inline pmd_t *pmd_offset(pud_t *pud, unsigned long addr) 260 { 261 return (pmd_t *)pud_page_vaddr(*pud) + pmd_index(addr); 262 } 263 #endif 264 265 /* Find an entry in the third-level page table.. */ 266 #define __pte_index(addr) (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) 267 268 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) 269 { 270 const pteval_t mask = PTE_USER | PTE_PXN | PTE_UXN | PTE_RDONLY | 271 PTE_PROT_NONE | PTE_VALID; 272 pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask); 273 return pte; 274 } 275 276 extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; 277 extern pgd_t idmap_pg_dir[PTRS_PER_PGD]; 278 279 #define SWAPPER_DIR_SIZE (3 * PAGE_SIZE) 280 #define IDMAP_DIR_SIZE (2 * PAGE_SIZE) 281 282 /* 283 * Encode and decode a swap entry: 284 * bits 0-1: present (must be zero) 285 * bit 2: PTE_FILE 286 * bits 3-8: swap type 287 * bits 9-63: swap offset 288 */ 289 #define __SWP_TYPE_SHIFT 3 290 #define __SWP_TYPE_BITS 6 291 #define __SWP_TYPE_MASK ((1 << __SWP_TYPE_BITS) - 1) 292 #define __SWP_OFFSET_SHIFT (__SWP_TYPE_BITS + __SWP_TYPE_SHIFT) 293 294 #define __swp_type(x) (((x).val >> __SWP_TYPE_SHIFT) & __SWP_TYPE_MASK) 295 #define __swp_offset(x) ((x).val >> __SWP_OFFSET_SHIFT) 296 #define __swp_entry(type,offset) ((swp_entry_t) { ((type) << __SWP_TYPE_SHIFT) | ((offset) << __SWP_OFFSET_SHIFT) }) 297 298 #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) 299 #define __swp_entry_to_pte(swp) ((pte_t) { (swp).val }) 300 301 /* 302 * Ensure that there are not more swap files than can be encoded in the kernel 303 * the PTEs. 304 */ 305 #define MAX_SWAPFILES_CHECK() BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > __SWP_TYPE_BITS) 306 307 /* 308 * Encode and decode a file entry: 309 * bits 0-1: present (must be zero) 310 * bit 2: PTE_FILE 311 * bits 3-63: file offset / PAGE_SIZE 312 */ 313 #define pte_file(pte) (pte_val(pte) & PTE_FILE) 314 #define pte_to_pgoff(x) (pte_val(x) >> 3) 315 #define pgoff_to_pte(x) __pte(((x) << 3) | PTE_FILE) 316 317 #define PTE_FILE_MAX_BITS 61 318 319 extern int kern_addr_valid(unsigned long addr); 320 321 #include <asm-generic/pgtable.h> 322 323 /* 324 * remap a physical page `pfn' of size `size' with page protection `prot' 325 * into virtual address `from' 326 */ 327 #define io_remap_pfn_range(vma,from,pfn,size,prot) \ 328 remap_pfn_range(vma, from, pfn, size, prot) 329 330 #define pgtable_cache_init() do { } while (0) 331 332 #endif /* !__ASSEMBLY__ */ 333 334 #endif /* __ASM_PGTABLE_H */ 335