1 /* 2 * This file contains the functions and defines necessary to modify and 3 * use the SuperH page table tree. 4 * 5 * Copyright (C) 1999 Niibe Yutaka 6 * Copyright (C) 2002 - 2007 Paul Mundt 7 * 8 * This file is subject to the terms and conditions of the GNU General 9 * Public License. See the file "COPYING" in the main directory of this 10 * archive for more details. 11 */ 12 #ifndef __ASM_SH_PGTABLE_H 13 #define __ASM_SH_PGTABLE_H 14 15 #ifdef CONFIG_X2TLB 16 #include <asm/pgtable-3level.h> 17 #else 18 #include <asm/pgtable-2level.h> 19 #endif 20 #include <asm/page.h> 21 22 #ifndef __ASSEMBLY__ 23 #include <asm/addrspace.h> 24 #include <asm/fixmap.h> 25 26 /* 27 * ZERO_PAGE is a global shared page that is always zero: used 28 * for zero-mapped memory areas etc.. 29 */ 30 extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]; 31 #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page)) 32 33 #endif /* !__ASSEMBLY__ */ 34 35 /* 36 * Effective and physical address definitions, to aid with sign 37 * extension. 38 */ 39 #define NEFF 32 40 #define NEFF_SIGN (1LL << (NEFF - 1)) 41 #define NEFF_MASK (-1LL << NEFF) 42 43 static inline unsigned long long neff_sign_extend(unsigned long val) 44 { 45 unsigned long long extended = val; 46 return (extended & NEFF_SIGN) ? (extended | NEFF_MASK) : extended; 47 } 48 49 #ifdef CONFIG_29BIT 50 #define NPHYS 29 51 #else 52 #define NPHYS 32 53 #endif 54 55 #define NPHYS_SIGN (1LL << (NPHYS - 1)) 56 #define NPHYS_MASK (-1LL << NPHYS) 57 58 #define PGDIR_SIZE (1UL << PGDIR_SHIFT) 59 #define PGDIR_MASK (~(PGDIR_SIZE-1)) 60 61 /* Entries per level */ 62 #define PTRS_PER_PTE (PAGE_SIZE / (1 << PTE_MAGNITUDE)) 63 64 #define FIRST_USER_ADDRESS 0 65 66 #define PHYS_ADDR_MASK29 0x1fffffff 67 #define PHYS_ADDR_MASK32 0xffffffff 68 69 #ifdef CONFIG_PMB 70 static inline unsigned long phys_addr_mask(void) 71 { 72 /* Is the MMU in 29bit mode? */ 73 if (__in_29bit_mode()) 74 return PHYS_ADDR_MASK29; 75 76 return PHYS_ADDR_MASK32; 77 } 78 #elif defined(CONFIG_32BIT) 79 static inline unsigned long phys_addr_mask(void) 80 { 81 return PHYS_ADDR_MASK32; 82 } 83 #else 84 static inline unsigned long phys_addr_mask(void) 85 { 86 return PHYS_ADDR_MASK29; 87 } 88 #endif 89 90 #define PTE_PHYS_MASK (phys_addr_mask() & PAGE_MASK) 91 #define PTE_FLAGS_MASK (~(PTE_PHYS_MASK) << PAGE_SHIFT) 92 93 #ifdef CONFIG_SUPERH32 94 #define VMALLOC_START (P3SEG) 95 #else 96 #define VMALLOC_START (0xf0000000) 97 #endif 98 #define VMALLOC_END (FIXADDR_START-2*PAGE_SIZE) 99 100 #if defined(CONFIG_SUPERH32) 101 #include <asm/pgtable_32.h> 102 #else 103 #include <asm/pgtable_64.h> 104 #endif 105 106 /* 107 * SH-X and lower (legacy) SuperH parts (SH-3, SH-4, some SH-4A) can't do page 108 * protection for execute, and considers it the same as a read. Also, write 109 * permission implies read permission. This is the closest we can get.. 110 * 111 * SH-X2 (SH7785) and later parts take this to the opposite end of the extreme, 112 * not only supporting separate execute, read, and write bits, but having 113 * completely separate permission bits for user and kernel space. 114 */ 115 /*xwr*/ 116 #define __P000 PAGE_NONE 117 #define __P001 PAGE_READONLY 118 #define __P010 PAGE_COPY 119 #define __P011 PAGE_COPY 120 #define __P100 PAGE_EXECREAD 121 #define __P101 PAGE_EXECREAD 122 #define __P110 PAGE_COPY 123 #define __P111 PAGE_COPY 124 125 #define __S000 PAGE_NONE 126 #define __S001 PAGE_READONLY 127 #define __S010 PAGE_WRITEONLY 128 #define __S011 PAGE_SHARED 129 #define __S100 PAGE_EXECREAD 130 #define __S101 PAGE_EXECREAD 131 #define __S110 PAGE_RWX 132 #define __S111 PAGE_RWX 133 134 typedef pte_t *pte_addr_t; 135 136 #define kern_addr_valid(addr) (1) 137 138 #define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \ 139 remap_pfn_range(vma, vaddr, pfn, size, prot) 140 141 #define pte_pfn(x) ((unsigned long)(((x).pte_low >> PAGE_SHIFT))) 142 143 /* 144 * Initialise the page table caches 145 */ 146 extern void pgtable_cache_init(void); 147 148 struct vm_area_struct; 149 150 extern void __update_cache(struct vm_area_struct *vma, 151 unsigned long address, pte_t pte); 152 extern void __update_tlb(struct vm_area_struct *vma, 153 unsigned long address, pte_t pte); 154 155 static inline void 156 update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t pte) 157 { 158 __update_cache(vma, address, pte); 159 __update_tlb(vma, address, pte); 160 } 161 162 extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; 163 extern void paging_init(void); 164 extern void page_table_range_init(unsigned long start, unsigned long end, 165 pgd_t *pgd); 166 167 /* arch/sh/mm/mmap.c */ 168 #define HAVE_ARCH_UNMAPPED_AREA 169 #define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN 170 171 #include <asm-generic/pgtable.h> 172 173 #endif /* __ASM_SH_PGTABLE_H */ 174