1 #ifndef _ASM_POWERPC_PAGE_H 2 #define _ASM_POWERPC_PAGE_H 3 4 /* 5 * Copyright (C) 2001,2005 IBM Corporation. 6 * 7 * This program is free software; you can redistribute it and/or 8 * modify it under the terms of the GNU General Public License 9 * as published by the Free Software Foundation; either version 10 * 2 of the License, or (at your option) any later version. 11 */ 12 13 #ifndef __ASSEMBLY__ 14 #include <linux/types.h> 15 #include <linux/kernel.h> 16 #else 17 #include <asm/types.h> 18 #endif 19 #include <asm/asm-const.h> 20 21 /* 22 * On regular PPC32 page size is 4K (but we support 4K/16K/64K/256K pages 23 * on PPC44x and 4K/16K on 8xx). For PPC64 we support either 4K or 64K software 24 * page size. When using 64K pages however, whether we are really supporting 25 * 64K pages in HW or not is irrelevant to those definitions. 26 */ 27 #define PAGE_SHIFT CONFIG_PPC_PAGE_SHIFT 28 #define PAGE_SIZE (ASM_CONST(1) << PAGE_SHIFT) 29 30 #ifndef __ASSEMBLY__ 31 #ifndef CONFIG_HUGETLB_PAGE 32 #define HPAGE_SHIFT PAGE_SHIFT 33 #elif defined(CONFIG_PPC_BOOK3S_64) 34 extern unsigned int hpage_shift; 35 #define HPAGE_SHIFT hpage_shift 36 #elif defined(CONFIG_PPC_8xx) 37 #define HPAGE_SHIFT 19 /* 512k pages */ 38 #elif defined(CONFIG_PPC_FSL_BOOK3E) 39 #define HPAGE_SHIFT 22 /* 4M pages */ 40 #endif 41 #define HPAGE_SIZE ((1UL) << HPAGE_SHIFT) 42 #define HPAGE_MASK (~(HPAGE_SIZE - 1)) 43 #define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT) 44 #define HUGE_MAX_HSTATE (MMU_PAGE_COUNT-1) 45 #endif 46 47 /* 48 * Subtle: (1 << PAGE_SHIFT) is an int, not an unsigned long. So if we 49 * assign PAGE_MASK to a larger type it gets extended the way we want 50 * (i.e. with 1s in the high bits) 51 */ 52 #define PAGE_MASK (~((1 << PAGE_SHIFT) - 1)) 53 54 /* 55 * KERNELBASE is the virtual address of the start of the kernel, it's often 56 * the same as PAGE_OFFSET, but _might not be_. 57 * 58 * The kdump dump kernel is one example where KERNELBASE != PAGE_OFFSET. 59 * 60 * PAGE_OFFSET is the virtual address of the start of lowmem. 61 * 62 * PHYSICAL_START is the physical address of the start of the kernel. 63 * 64 * MEMORY_START is the physical address of the start of lowmem. 65 * 66 * KERNELBASE, PAGE_OFFSET, and PHYSICAL_START are all configurable on 67 * ppc32 and based on how they are set we determine MEMORY_START. 68 * 69 * For the linear mapping the following equation should be true: 70 * KERNELBASE - PAGE_OFFSET = PHYSICAL_START - MEMORY_START 71 * 72 * Also, KERNELBASE >= PAGE_OFFSET and PHYSICAL_START >= MEMORY_START 73 * 74 * There are two ways to determine a physical address from a virtual one: 75 * va = pa + PAGE_OFFSET - MEMORY_START 76 * va = pa + KERNELBASE - PHYSICAL_START 77 * 78 * If you want to know something's offset from the start of the kernel you 79 * should subtract KERNELBASE. 80 * 81 * If you want to test if something's a kernel address, use is_kernel_addr(). 82 */ 83 84 #define KERNELBASE ASM_CONST(CONFIG_KERNEL_START) 85 #define PAGE_OFFSET ASM_CONST(CONFIG_PAGE_OFFSET) 86 #define LOAD_OFFSET ASM_CONST((CONFIG_KERNEL_START-CONFIG_PHYSICAL_START)) 87 88 #if defined(CONFIG_NONSTATIC_KERNEL) 89 #ifndef __ASSEMBLY__ 90 91 extern phys_addr_t memstart_addr; 92 extern phys_addr_t kernstart_addr; 93 94 #if defined(CONFIG_RELOCATABLE) && defined(CONFIG_PPC32) 95 extern long long virt_phys_offset; 96 #endif 97 98 #endif /* __ASSEMBLY__ */ 99 #define PHYSICAL_START kernstart_addr 100 101 #else /* !CONFIG_NONSTATIC_KERNEL */ 102 #define PHYSICAL_START ASM_CONST(CONFIG_PHYSICAL_START) 103 #endif 104 105 /* See Description below for VIRT_PHYS_OFFSET */ 106 #if defined(CONFIG_PPC32) && defined(CONFIG_BOOKE) 107 #ifdef CONFIG_RELOCATABLE 108 #define VIRT_PHYS_OFFSET virt_phys_offset 109 #else 110 #define VIRT_PHYS_OFFSET (KERNELBASE - PHYSICAL_START) 111 #endif 112 #endif 113 114 #ifdef CONFIG_PPC64 115 #define MEMORY_START 0UL 116 #elif defined(CONFIG_NONSTATIC_KERNEL) 117 #define MEMORY_START memstart_addr 118 #else 119 #define MEMORY_START (PHYSICAL_START + PAGE_OFFSET - KERNELBASE) 120 #endif 121 122 #ifdef CONFIG_FLATMEM 123 #define ARCH_PFN_OFFSET ((unsigned long)(MEMORY_START >> PAGE_SHIFT)) 124 #ifndef __ASSEMBLY__ 125 extern unsigned long max_mapnr; 126 static inline bool pfn_valid(unsigned long pfn) 127 { 128 unsigned long min_pfn = ARCH_PFN_OFFSET; 129 130 return pfn >= min_pfn && pfn < max_mapnr; 131 } 132 #endif 133 #endif 134 135 #define virt_to_pfn(kaddr) (__pa(kaddr) >> PAGE_SHIFT) 136 #define virt_to_page(kaddr) pfn_to_page(virt_to_pfn(kaddr)) 137 #define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT) 138 139 #define virt_addr_valid(kaddr) pfn_valid(virt_to_pfn(kaddr)) 140 141 /* 142 * On Book-E parts we need __va to parse the device tree and we can't 143 * determine MEMORY_START until then. However we can determine PHYSICAL_START 144 * from information at hand (program counter, TLB lookup). 145 * 146 * On BookE with RELOCATABLE && PPC32 147 * 148 * With RELOCATABLE && PPC32, we support loading the kernel at any physical 149 * address without any restriction on the page alignment. 150 * 151 * We find the runtime address of _stext and relocate ourselves based on 152 * the following calculation: 153 * 154 * virtual_base = ALIGN_DOWN(KERNELBASE,256M) + 155 * MODULO(_stext.run,256M) 156 * and create the following mapping: 157 * 158 * ALIGN_DOWN(_stext.run,256M) => ALIGN_DOWN(KERNELBASE,256M) 159 * 160 * When we process relocations, we cannot depend on the 161 * existing equation for the __va()/__pa() translations: 162 * 163 * __va(x) = (x) - PHYSICAL_START + KERNELBASE 164 * 165 * Where: 166 * PHYSICAL_START = kernstart_addr = Physical address of _stext 167 * KERNELBASE = Compiled virtual address of _stext. 168 * 169 * This formula holds true iff, kernel load address is TLB page aligned. 170 * 171 * In our case, we need to also account for the shift in the kernel Virtual 172 * address. 173 * 174 * E.g., 175 * 176 * Let the kernel be loaded at 64MB and KERNELBASE be 0xc0000000 (same as PAGE_OFFSET). 177 * In this case, we would be mapping 0 to 0xc0000000, and kernstart_addr = 64M 178 * 179 * Now __va(1MB) = (0x100000) - (0x4000000) + 0xc0000000 180 * = 0xbc100000 , which is wrong. 181 * 182 * Rather, it should be : 0xc0000000 + 0x100000 = 0xc0100000 183 * according to our mapping. 184 * 185 * Hence we use the following formula to get the translations right: 186 * 187 * __va(x) = (x) - [ PHYSICAL_START - Effective KERNELBASE ] 188 * 189 * Where : 190 * PHYSICAL_START = dynamic load address.(kernstart_addr variable) 191 * Effective KERNELBASE = virtual_base = 192 * = ALIGN_DOWN(KERNELBASE,256M) + 193 * MODULO(PHYSICAL_START,256M) 194 * 195 * To make the cost of __va() / __pa() more light weight, we introduce 196 * a new variable virt_phys_offset, which will hold : 197 * 198 * virt_phys_offset = Effective KERNELBASE - PHYSICAL_START 199 * = ALIGN_DOWN(KERNELBASE,256M) - 200 * ALIGN_DOWN(PHYSICALSTART,256M) 201 * 202 * Hence : 203 * 204 * __va(x) = x - PHYSICAL_START + Effective KERNELBASE 205 * = x + virt_phys_offset 206 * 207 * and 208 * __pa(x) = x + PHYSICAL_START - Effective KERNELBASE 209 * = x - virt_phys_offset 210 * 211 * On non-Book-E PPC64 PAGE_OFFSET and MEMORY_START are constants so use 212 * the other definitions for __va & __pa. 213 */ 214 #if defined(CONFIG_PPC32) && defined(CONFIG_BOOKE) 215 #define __va(x) ((void *)(unsigned long)((phys_addr_t)(x) + VIRT_PHYS_OFFSET)) 216 #define __pa(x) ((unsigned long)(x) - VIRT_PHYS_OFFSET) 217 #else 218 #ifdef CONFIG_PPC64 219 /* 220 * gcc miscompiles (unsigned long)(&static_var) - PAGE_OFFSET 221 * with -mcmodel=medium, so we use & and | instead of - and + on 64-bit. 222 */ 223 #define __va(x) ((void *)(unsigned long)((phys_addr_t)(x) | PAGE_OFFSET)) 224 #define __pa(x) ((unsigned long)(x) & 0x0fffffffffffffffUL) 225 226 #else /* 32-bit, non book E */ 227 #define __va(x) ((void *)(unsigned long)((phys_addr_t)(x) + PAGE_OFFSET - MEMORY_START)) 228 #define __pa(x) ((unsigned long)(x) - PAGE_OFFSET + MEMORY_START) 229 #endif 230 #endif 231 232 /* 233 * Unfortunately the PLT is in the BSS in the PPC32 ELF ABI, 234 * and needs to be executable. This means the whole heap ends 235 * up being executable. 236 */ 237 #define VM_DATA_DEFAULT_FLAGS32 \ 238 (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \ 239 VM_READ | VM_WRITE | \ 240 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) 241 242 #define VM_DATA_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \ 243 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) 244 245 #ifdef __powerpc64__ 246 #include <asm/page_64.h> 247 #else 248 #include <asm/page_32.h> 249 #endif 250 251 /* align addr on a size boundary - adjust address up/down if needed */ 252 #define _ALIGN_UP(addr, size) __ALIGN_KERNEL(addr, size) 253 #define _ALIGN_DOWN(addr, size) ((addr)&(~((typeof(addr))(size)-1))) 254 255 /* align addr on a size boundary - adjust address up if needed */ 256 #define _ALIGN(addr,size) _ALIGN_UP(addr,size) 257 258 /* 259 * Don't compare things with KERNELBASE or PAGE_OFFSET to test for 260 * "kernelness", use is_kernel_addr() - it should do what you want. 261 */ 262 #ifdef CONFIG_PPC_BOOK3E_64 263 #define is_kernel_addr(x) ((x) >= 0x8000000000000000ul) 264 #else 265 #define is_kernel_addr(x) ((x) >= PAGE_OFFSET) 266 #endif 267 268 #ifndef CONFIG_PPC_BOOK3S_64 269 /* 270 * Use the top bit of the higher-level page table entries to indicate whether 271 * the entries we point to contain hugepages. This works because we know that 272 * the page tables live in kernel space. If we ever decide to support having 273 * page tables at arbitrary addresses, this breaks and will have to change. 274 */ 275 #ifdef CONFIG_PPC64 276 #define PD_HUGE 0x8000000000000000UL 277 #else 278 #define PD_HUGE 0x80000000 279 #endif 280 281 #else /* CONFIG_PPC_BOOK3S_64 */ 282 /* 283 * Book3S 64 stores real addresses in the hugepd entries to 284 * avoid overlaps with _PAGE_PRESENT and _PAGE_PTE. 285 */ 286 #define HUGEPD_ADDR_MASK (0x0ffffffffffffffful & ~HUGEPD_SHIFT_MASK) 287 #endif /* CONFIG_PPC_BOOK3S_64 */ 288 289 /* 290 * Some number of bits at the level of the page table that points to 291 * a hugepte are used to encode the size. This masks those bits. 292 */ 293 #define HUGEPD_SHIFT_MASK 0x3f 294 295 #ifndef __ASSEMBLY__ 296 297 #ifdef CONFIG_PPC_BOOK3S_64 298 #include <asm/pgtable-be-types.h> 299 #else 300 #include <asm/pgtable-types.h> 301 #endif 302 303 304 #ifndef CONFIG_HUGETLB_PAGE 305 #define is_hugepd(pdep) (0) 306 #define pgd_huge(pgd) (0) 307 #endif /* CONFIG_HUGETLB_PAGE */ 308 309 struct page; 310 extern void clear_user_page(void *page, unsigned long vaddr, struct page *pg); 311 extern void copy_user_page(void *to, void *from, unsigned long vaddr, 312 struct page *p); 313 extern int devmem_is_allowed(unsigned long pfn); 314 315 #ifdef CONFIG_PPC_SMLPAR 316 void arch_free_page(struct page *page, int order); 317 #define HAVE_ARCH_FREE_PAGE 318 #endif 319 320 struct vm_area_struct; 321 322 #include <asm-generic/memory_model.h> 323 #endif /* __ASSEMBLY__ */ 324 #include <asm/slice.h> 325 326 #define ARCH_ZONE_DMA_BITS 31 327 328 #endif /* _ASM_POWERPC_PAGE_H */ 329