1 /* 2 * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com) 3 * Copyright 2003 PathScale, Inc. 4 * Derived from include/asm-i386/pgtable.h 5 * Licensed under the GPL 6 */ 7 8 #ifndef __UM_PGTABLE_H 9 #define __UM_PGTABLE_H 10 11 #include <asm/fixmap.h> 12 13 #define _PAGE_PRESENT 0x001 14 #define _PAGE_NEWPAGE 0x002 15 #define _PAGE_NEWPROT 0x004 16 #define _PAGE_RW 0x020 17 #define _PAGE_USER 0x040 18 #define _PAGE_ACCESSED 0x080 19 #define _PAGE_DIRTY 0x100 20 /* If _PAGE_PRESENT is clear, we use these: */ 21 #define _PAGE_PROTNONE 0x010 /* if the user mapped it with PROT_NONE; 22 pte_present gives true */ 23 24 #ifdef CONFIG_3_LEVEL_PGTABLES 25 #include <asm/pgtable-3level.h> 26 #else 27 #include <asm/pgtable-2level.h> 28 #endif 29 30 extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; 31 32 /* zero page used for uninitialized stuff */ 33 extern unsigned long *empty_zero_page; 34 35 #define pgtable_cache_init() do ; while (0) 36 37 /* Just any arbitrary offset to the start of the vmalloc VM area: the 38 * current 8MB value just means that there will be a 8MB "hole" after the 39 * physical memory until the kernel virtual memory starts. That means that 40 * any out-of-bounds memory accesses will hopefully be caught. 41 * The vmalloc() routines leaves a hole of 4kB between each vmalloced 42 * area for the same reason. ;) 43 */ 44 45 extern unsigned long end_iomem; 46 47 #define VMALLOC_OFFSET (__va_space) 48 #define VMALLOC_START ((end_iomem + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1)) 49 #define PKMAP_BASE ((FIXADDR_START - LAST_PKMAP * PAGE_SIZE) & PMD_MASK) 50 #ifdef CONFIG_HIGHMEM 51 # define VMALLOC_END (PKMAP_BASE-2*PAGE_SIZE) 52 #else 53 # define VMALLOC_END (FIXADDR_START-2*PAGE_SIZE) 54 #endif 55 #define MODULES_VADDR VMALLOC_START 56 #define MODULES_END VMALLOC_END 57 #define MODULES_LEN (MODULES_VADDR - MODULES_END) 58 59 #define _PAGE_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED | _PAGE_DIRTY) 60 #define _KERNPG_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY) 61 #define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY) 62 #define __PAGE_KERNEL_EXEC \ 63 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED) 64 #define PAGE_NONE __pgprot(_PAGE_PROTNONE | _PAGE_ACCESSED) 65 #define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED) 66 #define PAGE_COPY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED) 67 #define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED) 68 #define PAGE_KERNEL __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED) 69 #define PAGE_KERNEL_EXEC __pgprot(__PAGE_KERNEL_EXEC) 70 71 /* 72 * The i386 can't do page protection for execute, and considers that the same 73 * are read. 74 * Also, write permissions imply read permissions. This is the closest we can 75 * get.. 76 */ 77 #define __P000 PAGE_NONE 78 #define __P001 PAGE_READONLY 79 #define __P010 PAGE_COPY 80 #define __P011 PAGE_COPY 81 #define __P100 PAGE_READONLY 82 #define __P101 PAGE_READONLY 83 #define __P110 PAGE_COPY 84 #define __P111 PAGE_COPY 85 86 #define __S000 PAGE_NONE 87 #define __S001 PAGE_READONLY 88 #define __S010 PAGE_SHARED 89 #define __S011 PAGE_SHARED 90 #define __S100 PAGE_READONLY 91 #define __S101 PAGE_READONLY 92 #define __S110 PAGE_SHARED 93 #define __S111 PAGE_SHARED 94 95 /* 96 * ZERO_PAGE is a global shared page that is always zero: used 97 * for zero-mapped memory areas etc.. 98 */ 99 #define ZERO_PAGE(vaddr) virt_to_page(empty_zero_page) 100 101 #define pte_clear(mm,addr,xp) pte_set_val(*(xp), (phys_t) 0, __pgprot(_PAGE_NEWPAGE)) 102 103 #define pmd_none(x) (!((unsigned long)pmd_val(x) & ~_PAGE_NEWPAGE)) 104 #define pmd_bad(x) ((pmd_val(x) & (~PAGE_MASK & ~_PAGE_USER)) != _KERNPG_TABLE) 105 106 #define pmd_present(x) (pmd_val(x) & _PAGE_PRESENT) 107 #define pmd_clear(xp) do { pmd_val(*(xp)) = _PAGE_NEWPAGE; } while (0) 108 109 #define pmd_newpage(x) (pmd_val(x) & _PAGE_NEWPAGE) 110 #define pmd_mkuptodate(x) (pmd_val(x) &= ~_PAGE_NEWPAGE) 111 112 #define pud_newpage(x) (pud_val(x) & _PAGE_NEWPAGE) 113 #define pud_mkuptodate(x) (pud_val(x) &= ~_PAGE_NEWPAGE) 114 115 #define pmd_page(pmd) phys_to_page(pmd_val(pmd) & PAGE_MASK) 116 117 #define pte_page(x) pfn_to_page(pte_pfn(x)) 118 119 #define pte_present(x) pte_get_bits(x, (_PAGE_PRESENT | _PAGE_PROTNONE)) 120 121 /* 122 * ================================= 123 * Flags checking section. 124 * ================================= 125 */ 126 127 static inline int pte_none(pte_t pte) 128 { 129 return pte_is_zero(pte); 130 } 131 132 /* 133 * The following only work if pte_present() is true. 134 * Undefined behaviour if not.. 135 */ 136 static inline int pte_read(pte_t pte) 137 { 138 return((pte_get_bits(pte, _PAGE_USER)) && 139 !(pte_get_bits(pte, _PAGE_PROTNONE))); 140 } 141 142 static inline int pte_exec(pte_t pte){ 143 return((pte_get_bits(pte, _PAGE_USER)) && 144 !(pte_get_bits(pte, _PAGE_PROTNONE))); 145 } 146 147 static inline int pte_write(pte_t pte) 148 { 149 return((pte_get_bits(pte, _PAGE_RW)) && 150 !(pte_get_bits(pte, _PAGE_PROTNONE))); 151 } 152 153 static inline int pte_dirty(pte_t pte) 154 { 155 return pte_get_bits(pte, _PAGE_DIRTY); 156 } 157 158 static inline int pte_young(pte_t pte) 159 { 160 return pte_get_bits(pte, _PAGE_ACCESSED); 161 } 162 163 static inline int pte_newpage(pte_t pte) 164 { 165 return pte_get_bits(pte, _PAGE_NEWPAGE); 166 } 167 168 static inline int pte_newprot(pte_t pte) 169 { 170 return(pte_present(pte) && (pte_get_bits(pte, _PAGE_NEWPROT))); 171 } 172 173 static inline int pte_special(pte_t pte) 174 { 175 return 0; 176 } 177 178 /* 179 * ================================= 180 * Flags setting section. 181 * ================================= 182 */ 183 184 static inline pte_t pte_mknewprot(pte_t pte) 185 { 186 pte_set_bits(pte, _PAGE_NEWPROT); 187 return(pte); 188 } 189 190 static inline pte_t pte_mkclean(pte_t pte) 191 { 192 pte_clear_bits(pte, _PAGE_DIRTY); 193 return(pte); 194 } 195 196 static inline pte_t pte_mkold(pte_t pte) 197 { 198 pte_clear_bits(pte, _PAGE_ACCESSED); 199 return(pte); 200 } 201 202 static inline pte_t pte_wrprotect(pte_t pte) 203 { 204 pte_clear_bits(pte, _PAGE_RW); 205 return(pte_mknewprot(pte)); 206 } 207 208 static inline pte_t pte_mkread(pte_t pte) 209 { 210 pte_set_bits(pte, _PAGE_USER); 211 return(pte_mknewprot(pte)); 212 } 213 214 static inline pte_t pte_mkdirty(pte_t pte) 215 { 216 pte_set_bits(pte, _PAGE_DIRTY); 217 return(pte); 218 } 219 220 static inline pte_t pte_mkyoung(pte_t pte) 221 { 222 pte_set_bits(pte, _PAGE_ACCESSED); 223 return(pte); 224 } 225 226 static inline pte_t pte_mkwrite(pte_t pte) 227 { 228 pte_set_bits(pte, _PAGE_RW); 229 return(pte_mknewprot(pte)); 230 } 231 232 static inline pte_t pte_mkuptodate(pte_t pte) 233 { 234 pte_clear_bits(pte, _PAGE_NEWPAGE); 235 if(pte_present(pte)) 236 pte_clear_bits(pte, _PAGE_NEWPROT); 237 return(pte); 238 } 239 240 static inline pte_t pte_mknewpage(pte_t pte) 241 { 242 pte_set_bits(pte, _PAGE_NEWPAGE); 243 return(pte); 244 } 245 246 static inline pte_t pte_mkspecial(pte_t pte) 247 { 248 return(pte); 249 } 250 251 static inline void set_pte(pte_t *pteptr, pte_t pteval) 252 { 253 pte_copy(*pteptr, pteval); 254 255 /* If it's a swap entry, it needs to be marked _PAGE_NEWPAGE so 256 * fix_range knows to unmap it. _PAGE_NEWPROT is specific to 257 * mapped pages. 258 */ 259 260 *pteptr = pte_mknewpage(*pteptr); 261 if(pte_present(*pteptr)) *pteptr = pte_mknewprot(*pteptr); 262 } 263 #define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval) 264 265 #define __HAVE_ARCH_PTE_SAME 266 static inline int pte_same(pte_t pte_a, pte_t pte_b) 267 { 268 return !((pte_val(pte_a) ^ pte_val(pte_b)) & ~_PAGE_NEWPAGE); 269 } 270 271 /* 272 * Conversion functions: convert a page and protection to a page entry, 273 * and a page entry and page directory to the page they refer to. 274 */ 275 276 #define phys_to_page(phys) pfn_to_page(phys_to_pfn(phys)) 277 #define __virt_to_page(virt) phys_to_page(__pa(virt)) 278 #define page_to_phys(page) pfn_to_phys((pfn_t) page_to_pfn(page)) 279 #define virt_to_page(addr) __virt_to_page((const unsigned long) addr) 280 281 #define mk_pte(page, pgprot) \ 282 ({ pte_t pte; \ 283 \ 284 pte_set_val(pte, page_to_phys(page), (pgprot)); \ 285 if (pte_present(pte)) \ 286 pte_mknewprot(pte_mknewpage(pte)); \ 287 pte;}) 288 289 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) 290 { 291 pte_set_val(pte, (pte_val(pte) & _PAGE_CHG_MASK), newprot); 292 return pte; 293 } 294 295 /* 296 * the pgd page can be thought of an array like this: pgd_t[PTRS_PER_PGD] 297 * 298 * this macro returns the index of the entry in the pgd page which would 299 * control the given virtual address 300 */ 301 #define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1)) 302 303 /* 304 * pgd_offset() returns a (pgd_t *) 305 * pgd_index() is used get the offset into the pgd page's array of pgd_t's; 306 */ 307 #define pgd_offset(mm, address) ((mm)->pgd+pgd_index(address)) 308 309 /* 310 * a shortcut which implies the use of the kernel's pgd, instead 311 * of a process's 312 */ 313 #define pgd_offset_k(address) pgd_offset(&init_mm, address) 314 315 /* 316 * the pmd page can be thought of an array like this: pmd_t[PTRS_PER_PMD] 317 * 318 * this macro returns the index of the entry in the pmd page which would 319 * control the given virtual address 320 */ 321 #define pmd_page_vaddr(pmd) ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK)) 322 #define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1)) 323 324 #define pmd_page_vaddr(pmd) \ 325 ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK)) 326 327 /* 328 * the pte page can be thought of an array like this: pte_t[PTRS_PER_PTE] 329 * 330 * this macro returns the index of the entry in the pte page which would 331 * control the given virtual address 332 */ 333 #define pte_index(address) (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) 334 #define pte_offset_kernel(dir, address) \ 335 ((pte_t *) pmd_page_vaddr(*(dir)) + pte_index(address)) 336 #define pte_offset_map(dir, address) \ 337 ((pte_t *)page_address(pmd_page(*(dir))) + pte_index(address)) 338 #define pte_unmap(pte) do { } while (0) 339 340 struct mm_struct; 341 extern pte_t *virt_to_pte(struct mm_struct *mm, unsigned long addr); 342 343 #define update_mmu_cache(vma,address,ptep) do ; while (0) 344 345 /* Encode and de-code a swap entry */ 346 #define __swp_type(x) (((x).val >> 5) & 0x1f) 347 #define __swp_offset(x) ((x).val >> 11) 348 349 #define __swp_entry(type, offset) \ 350 ((swp_entry_t) { ((type) << 5) | ((offset) << 11) }) 351 #define __pte_to_swp_entry(pte) \ 352 ((swp_entry_t) { pte_val(pte_mkuptodate(pte)) }) 353 #define __swp_entry_to_pte(x) ((pte_t) { (x).val }) 354 355 #define kern_addr_valid(addr) (1) 356 357 #include <asm-generic/pgtable.h> 358 359 /* Clear a kernel PTE and flush it from the TLB */ 360 #define kpte_clear_flush(ptep, vaddr) \ 361 do { \ 362 pte_clear(&init_mm, (vaddr), (ptep)); \ 363 __flush_tlb_one((vaddr)); \ 364 } while (0) 365 366 #endif 367