1 /* 2 * OpenRISC Linux 3 * 4 * Linux architectural port borrowing liberally from similar works of 5 * others. All original copyrights apply as per the original source 6 * declaration. 7 * 8 * OpenRISC implementation: 9 * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com> 10 * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se> 11 * et al. 12 * 13 * This program is free software; you can redistribute it and/or modify 14 * it under the terms of the GNU General Public License as published by 15 * the Free Software Foundation; either version 2 of the License, or 16 * (at your option) any later version. 17 */ 18 19 /* or32 pgtable.h - macros and functions to manipulate page tables 20 * 21 * Based on: 22 * include/asm-cris/pgtable.h 23 */ 24 25 #ifndef __ASM_OPENRISC_PGTABLE_H 26 #define __ASM_OPENRISC_PGTABLE_H 27 28 #include <asm-generic/pgtable-nopmd.h> 29 30 #ifndef __ASSEMBLY__ 31 #include <asm/mmu.h> 32 #include <asm/fixmap.h> 33 34 /* 35 * The Linux memory management assumes a three-level page table setup. On 36 * or32, we use that, but "fold" the mid level into the top-level page 37 * table. Since the MMU TLB is software loaded through an interrupt, it 38 * supports any page table structure, so we could have used a three-level 39 * setup, but for the amounts of memory we normally use, a two-level is 40 * probably more efficient. 41 * 42 * This file contains the functions and defines necessary to modify and use 43 * the or32 page table tree. 44 */ 45 46 extern void paging_init(void); 47 48 /* Certain architectures need to do special things when pte's 49 * within a page table are directly modified. Thus, the following 50 * hook is made available. 51 */ 52 #define set_pte(pteptr, pteval) ((*(pteptr)) = (pteval)) 53 #define set_pte_at(mm, addr, ptep, pteval) set_pte(ptep, pteval) 54 /* 55 * (pmds are folded into pgds so this doesn't get actually called, 56 * but the define is needed for a generic inline function.) 57 */ 58 #define set_pmd(pmdptr, pmdval) (*(pmdptr) = pmdval) 59 60 #define PGDIR_SHIFT (PAGE_SHIFT + (PAGE_SHIFT-2)) 61 #define PGDIR_SIZE (1UL << PGDIR_SHIFT) 62 #define PGDIR_MASK (~(PGDIR_SIZE-1)) 63 64 /* 65 * entries per page directory level: we use a two-level, so 66 * we don't really have any PMD directory physically. 67 * pointers are 4 bytes so we can use the page size and 68 * divide it by 4 (shift by 2). 69 */ 70 #define PTRS_PER_PTE (1UL << (PAGE_SHIFT-2)) 71 72 #define PTRS_PER_PGD (1UL << (PAGE_SHIFT-2)) 73 74 /* calculate how many PGD entries a user-level program can use 75 * the first mappable virtual address is 0 76 * (TASK_SIZE is the maximum virtual address space) 77 */ 78 79 #define USER_PTRS_PER_PGD (TASK_SIZE/PGDIR_SIZE) 80 #define FIRST_USER_ADDRESS 0UL 81 82 /* 83 * Kernels own virtual memory area. 84 */ 85 86 /* 87 * The size and location of the vmalloc area are chosen so that modules 88 * placed in this area aren't more than a 28-bit signed offset from any 89 * kernel functions that they may need. This greatly simplifies handling 90 * of the relocations for l.j and l.jal instructions as we don't need to 91 * introduce any trampolines for reaching "distant" code. 92 * 93 * 64 MB of vmalloc area is comparable to what's available on other arches. 94 */ 95 96 #define VMALLOC_START (PAGE_OFFSET-0x04000000) 97 #define VMALLOC_END (PAGE_OFFSET) 98 #define VMALLOC_VMADDR(x) ((unsigned long)(x)) 99 100 /* Define some higher level generic page attributes. 101 * 102 * If you change _PAGE_CI definition be sure to change it in 103 * io.h for ioremap_nocache() too. 104 */ 105 106 /* 107 * An OR32 PTE looks like this: 108 * 109 * | 31 ... 10 | 9 | 8 ... 6 | 5 | 4 | 3 | 2 | 1 | 0 | 110 * Phys pg.num L PP Index D A WOM WBC CI CC 111 * 112 * L : link 113 * PPI: Page protection index 114 * D : Dirty 115 * A : Accessed 116 * WOM: Weakly ordered memory 117 * WBC: Write-back cache 118 * CI : Cache inhibit 119 * CC : Cache coherent 120 * 121 * The protection bits below should correspond to the layout of the actual 122 * PTE as per above 123 */ 124 125 #define _PAGE_CC 0x001 /* software: pte contains a translation */ 126 #define _PAGE_CI 0x002 /* cache inhibit */ 127 #define _PAGE_WBC 0x004 /* write back cache */ 128 #define _PAGE_WOM 0x008 /* weakly ordered memory */ 129 130 #define _PAGE_A 0x010 /* accessed */ 131 #define _PAGE_D 0x020 /* dirty */ 132 #define _PAGE_URE 0x040 /* user read enable */ 133 #define _PAGE_UWE 0x080 /* user write enable */ 134 135 #define _PAGE_SRE 0x100 /* superuser read enable */ 136 #define _PAGE_SWE 0x200 /* superuser write enable */ 137 #define _PAGE_EXEC 0x400 /* software: page is executable */ 138 #define _PAGE_U_SHARED 0x800 /* software: page is shared in user space */ 139 140 /* 0x001 is cache coherency bit, which should always be set to 141 * 1 - for SMP (when we support it) 142 * 0 - otherwise 143 * 144 * we just reuse this bit in software for _PAGE_PRESENT and 145 * force it to 0 when loading it into TLB. 146 */ 147 #define _PAGE_PRESENT _PAGE_CC 148 #define _PAGE_USER _PAGE_URE 149 #define _PAGE_WRITE (_PAGE_UWE | _PAGE_SWE) 150 #define _PAGE_DIRTY _PAGE_D 151 #define _PAGE_ACCESSED _PAGE_A 152 #define _PAGE_NO_CACHE _PAGE_CI 153 #define _PAGE_SHARED _PAGE_U_SHARED 154 #define _PAGE_READ (_PAGE_URE | _PAGE_SRE) 155 156 #define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY) 157 #define _PAGE_BASE (_PAGE_PRESENT | _PAGE_ACCESSED) 158 #define _PAGE_ALL (_PAGE_PRESENT | _PAGE_ACCESSED) 159 #define _KERNPG_TABLE \ 160 (_PAGE_BASE | _PAGE_SRE | _PAGE_SWE | _PAGE_ACCESSED | _PAGE_DIRTY) 161 162 #define PAGE_NONE __pgprot(_PAGE_ALL) 163 #define PAGE_READONLY __pgprot(_PAGE_ALL | _PAGE_URE | _PAGE_SRE) 164 #define PAGE_READONLY_X __pgprot(_PAGE_ALL | _PAGE_URE | _PAGE_SRE | _PAGE_EXEC) 165 #define PAGE_SHARED \ 166 __pgprot(_PAGE_ALL | _PAGE_URE | _PAGE_SRE | _PAGE_UWE | _PAGE_SWE \ 167 | _PAGE_SHARED) 168 #define PAGE_SHARED_X \ 169 __pgprot(_PAGE_ALL | _PAGE_URE | _PAGE_SRE | _PAGE_UWE | _PAGE_SWE \ 170 | _PAGE_SHARED | _PAGE_EXEC) 171 #define PAGE_COPY __pgprot(_PAGE_ALL | _PAGE_URE | _PAGE_SRE) 172 #define PAGE_COPY_X __pgprot(_PAGE_ALL | _PAGE_URE | _PAGE_SRE | _PAGE_EXEC) 173 174 #define PAGE_KERNEL \ 175 __pgprot(_PAGE_ALL | _PAGE_SRE | _PAGE_SWE \ 176 | _PAGE_SHARED | _PAGE_DIRTY | _PAGE_EXEC) 177 #define PAGE_KERNEL_RO \ 178 __pgprot(_PAGE_ALL | _PAGE_SRE \ 179 | _PAGE_SHARED | _PAGE_DIRTY | _PAGE_EXEC) 180 #define PAGE_KERNEL_NOCACHE \ 181 __pgprot(_PAGE_ALL | _PAGE_SRE | _PAGE_SWE \ 182 | _PAGE_SHARED | _PAGE_DIRTY | _PAGE_EXEC | _PAGE_CI) 183 184 #define __P000 PAGE_NONE 185 #define __P001 PAGE_READONLY_X 186 #define __P010 PAGE_COPY 187 #define __P011 PAGE_COPY_X 188 #define __P100 PAGE_READONLY 189 #define __P101 PAGE_READONLY_X 190 #define __P110 PAGE_COPY 191 #define __P111 PAGE_COPY_X 192 193 #define __S000 PAGE_NONE 194 #define __S001 PAGE_READONLY_X 195 #define __S010 PAGE_SHARED 196 #define __S011 PAGE_SHARED_X 197 #define __S100 PAGE_READONLY 198 #define __S101 PAGE_READONLY_X 199 #define __S110 PAGE_SHARED 200 #define __S111 PAGE_SHARED_X 201 202 /* zero page used for uninitialized stuff */ 203 extern unsigned long empty_zero_page[2048]; 204 #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page)) 205 206 /* number of bits that fit into a memory pointer */ 207 #define BITS_PER_PTR (8*sizeof(unsigned long)) 208 209 /* to align the pointer to a pointer address */ 210 #define PTR_MASK (~(sizeof(void *)-1)) 211 212 /* sizeof(void*)==1<<SIZEOF_PTR_LOG2 */ 213 /* 64-bit machines, beware! SRB. */ 214 #define SIZEOF_PTR_LOG2 2 215 216 /* to find an entry in a page-table */ 217 #define PAGE_PTR(address) \ 218 ((unsigned long)(address)>>(PAGE_SHIFT-SIZEOF_PTR_LOG2)&PTR_MASK&~PAGE_MASK) 219 220 /* to set the page-dir */ 221 #define SET_PAGE_DIR(tsk, pgdir) 222 223 #define pte_none(x) (!pte_val(x)) 224 #define pte_present(x) (pte_val(x) & _PAGE_PRESENT) 225 #define pte_clear(mm, addr, xp) do { pte_val(*(xp)) = 0; } while (0) 226 227 #define pmd_none(x) (!pmd_val(x)) 228 #define pmd_bad(x) ((pmd_val(x) & (~PAGE_MASK)) != _KERNPG_TABLE) 229 #define pmd_present(x) (pmd_val(x) & _PAGE_PRESENT) 230 #define pmd_clear(xp) do { pmd_val(*(xp)) = 0; } while (0) 231 232 /* 233 * The following only work if pte_present() is true. 234 * Undefined behaviour if not.. 235 */ 236 237 static inline int pte_read(pte_t pte) { return pte_val(pte) & _PAGE_READ; } 238 static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_WRITE; } 239 static inline int pte_exec(pte_t pte) { return pte_val(pte) & _PAGE_EXEC; } 240 static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; } 241 static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; } 242 static inline int pte_special(pte_t pte) { return 0; } 243 static inline pte_t pte_mkspecial(pte_t pte) { return pte; } 244 245 static inline pte_t pte_wrprotect(pte_t pte) 246 { 247 pte_val(pte) &= ~(_PAGE_WRITE); 248 return pte; 249 } 250 251 static inline pte_t pte_rdprotect(pte_t pte) 252 { 253 pte_val(pte) &= ~(_PAGE_READ); 254 return pte; 255 } 256 257 static inline pte_t pte_exprotect(pte_t pte) 258 { 259 pte_val(pte) &= ~(_PAGE_EXEC); 260 return pte; 261 } 262 263 static inline pte_t pte_mkclean(pte_t pte) 264 { 265 pte_val(pte) &= ~(_PAGE_DIRTY); 266 return pte; 267 } 268 269 static inline pte_t pte_mkold(pte_t pte) 270 { 271 pte_val(pte) &= ~(_PAGE_ACCESSED); 272 return pte; 273 } 274 275 static inline pte_t pte_mkwrite(pte_t pte) 276 { 277 pte_val(pte) |= _PAGE_WRITE; 278 return pte; 279 } 280 281 static inline pte_t pte_mkread(pte_t pte) 282 { 283 pte_val(pte) |= _PAGE_READ; 284 return pte; 285 } 286 287 static inline pte_t pte_mkexec(pte_t pte) 288 { 289 pte_val(pte) |= _PAGE_EXEC; 290 return pte; 291 } 292 293 static inline pte_t pte_mkdirty(pte_t pte) 294 { 295 pte_val(pte) |= _PAGE_DIRTY; 296 return pte; 297 } 298 299 static inline pte_t pte_mkyoung(pte_t pte) 300 { 301 pte_val(pte) |= _PAGE_ACCESSED; 302 return pte; 303 } 304 305 /* 306 * Conversion functions: convert a page and protection to a page entry, 307 * and a page entry and page directory to the page they refer to. 308 */ 309 310 /* What actually goes as arguments to the various functions is less than 311 * obvious, but a rule of thumb is that struct page's goes as struct page *, 312 * really physical DRAM addresses are unsigned long's, and DRAM "virtual" 313 * addresses (the 0xc0xxxxxx's) goes as void *'s. 314 */ 315 316 static inline pte_t __mk_pte(void *page, pgprot_t pgprot) 317 { 318 pte_t pte; 319 /* the PTE needs a physical address */ 320 pte_val(pte) = __pa(page) | pgprot_val(pgprot); 321 return pte; 322 } 323 324 #define mk_pte(page, pgprot) __mk_pte(page_address(page), (pgprot)) 325 326 #define mk_pte_phys(physpage, pgprot) \ 327 ({ \ 328 pte_t __pte; \ 329 \ 330 pte_val(__pte) = (physpage) + pgprot_val(pgprot); \ 331 __pte; \ 332 }) 333 334 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) 335 { 336 pte_val(pte) = (pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot); 337 return pte; 338 } 339 340 341 /* 342 * pte_val refers to a page in the 0x0xxxxxxx physical DRAM interval 343 * __pte_page(pte_val) refers to the "virtual" DRAM interval 344 * pte_pagenr refers to the page-number counted starting from the virtual 345 * DRAM start 346 */ 347 348 static inline unsigned long __pte_page(pte_t pte) 349 { 350 /* the PTE contains a physical address */ 351 return (unsigned long)__va(pte_val(pte) & PAGE_MASK); 352 } 353 354 #define pte_pagenr(pte) ((__pte_page(pte) - PAGE_OFFSET) >> PAGE_SHIFT) 355 356 /* permanent address of a page */ 357 358 #define __page_address(page) (PAGE_OFFSET + (((page) - mem_map) << PAGE_SHIFT)) 359 #define pte_page(pte) (mem_map+pte_pagenr(pte)) 360 361 /* 362 * only the pte's themselves need to point to physical DRAM (see above) 363 * the pagetable links are purely handled within the kernel SW and thus 364 * don't need the __pa and __va transformations. 365 */ 366 static inline void pmd_set(pmd_t *pmdp, pte_t *ptep) 367 { 368 pmd_val(*pmdp) = _KERNPG_TABLE | (unsigned long) ptep; 369 } 370 371 #define pmd_page(pmd) (pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT)) 372 #define pmd_page_kernel(pmd) ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK)) 373 374 /* to find an entry in a page-table-directory. */ 375 #define pgd_index(address) ((address >> PGDIR_SHIFT) & (PTRS_PER_PGD-1)) 376 377 #define __pgd_offset(address) pgd_index(address) 378 379 #define pgd_offset(mm, address) ((mm)->pgd+pgd_index(address)) 380 381 /* to find an entry in a kernel page-table-directory */ 382 #define pgd_offset_k(address) pgd_offset(&init_mm, address) 383 384 #define __pmd_offset(address) \ 385 (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1)) 386 387 /* 388 * the pte page can be thought of an array like this: pte_t[PTRS_PER_PTE] 389 * 390 * this macro returns the index of the entry in the pte page which would 391 * control the given virtual address 392 */ 393 #define __pte_offset(address) \ 394 (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) 395 #define pte_offset_kernel(dir, address) \ 396 ((pte_t *) pmd_page_kernel(*(dir)) + __pte_offset(address)) 397 #define pte_offset_map(dir, address) \ 398 ((pte_t *)page_address(pmd_page(*(dir))) + __pte_offset(address)) 399 #define pte_offset_map_nested(dir, address) \ 400 pte_offset_map(dir, address) 401 402 #define pte_unmap(pte) do { } while (0) 403 #define pte_unmap_nested(pte) do { } while (0) 404 #define pte_pfn(x) ((unsigned long)(((x).pte)) >> PAGE_SHIFT) 405 #define pfn_pte(pfn, prot) __pte((((pfn) << PAGE_SHIFT)) | pgprot_val(prot)) 406 407 #define pte_ERROR(e) \ 408 printk(KERN_ERR "%s:%d: bad pte %p(%08lx).\n", \ 409 __FILE__, __LINE__, &(e), pte_val(e)) 410 #define pgd_ERROR(e) \ 411 printk(KERN_ERR "%s:%d: bad pgd %p(%08lx).\n", \ 412 __FILE__, __LINE__, &(e), pgd_val(e)) 413 414 extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; /* defined in head.S */ 415 416 /* 417 * or32 doesn't have any external MMU info: the kernel page 418 * tables contain all the necessary information. 419 * 420 * Actually I am not sure on what this could be used for. 421 */ 422 static inline void update_mmu_cache(struct vm_area_struct *vma, 423 unsigned long address, pte_t *pte) 424 { 425 } 426 427 /* __PHX__ FIXME, SWAP, this probably doesn't work */ 428 429 /* Encode and de-code a swap entry (must be !pte_none(e) && !pte_present(e)) */ 430 /* Since the PAGE_PRESENT bit is bit 4, we can use the bits above */ 431 432 #define __swp_type(x) (((x).val >> 5) & 0x7f) 433 #define __swp_offset(x) ((x).val >> 12) 434 #define __swp_entry(type, offset) \ 435 ((swp_entry_t) { ((type) << 5) | ((offset) << 12) }) 436 #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) 437 #define __swp_entry_to_pte(x) ((pte_t) { (x).val }) 438 439 #define kern_addr_valid(addr) (1) 440 441 #include <asm-generic/pgtable.h> 442 443 /* 444 * No page table caches to initialise 445 */ 446 #define pgtable_cache_init() do { } while (0) 447 448 typedef pte_t *pte_addr_t; 449 450 #endif /* __ASSEMBLY__ */ 451 #endif /* __ASM_OPENRISC_PGTABLE_H */ 452