1 /* 2 * arch/arm/include/asm/memory.h 3 * 4 * Copyright (C) 2000-2002 Russell King 5 * modification for nommu, Hyok S. Choi, 2004 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License version 2 as 9 * published by the Free Software Foundation. 10 * 11 * Note: this file should not be included by non-asm/.h files 12 */ 13 #ifndef __ASM_ARM_MEMORY_H 14 #define __ASM_ARM_MEMORY_H 15 16 #include <linux/compiler.h> 17 #include <linux/const.h> 18 #include <linux/types.h> 19 #include <linux/sizes.h> 20 21 #ifdef CONFIG_NEED_MACH_MEMORY_H 22 #include <mach/memory.h> 23 #endif 24 25 /* 26 * Allow for constants defined here to be used from assembly code 27 * by prepending the UL suffix only with actual C code compilation. 28 */ 29 #define UL(x) _AC(x, UL) 30 31 /* PAGE_OFFSET - the virtual address of the start of the kernel image */ 32 #define PAGE_OFFSET UL(CONFIG_PAGE_OFFSET) 33 34 #ifdef CONFIG_MMU 35 36 /* 37 * TASK_SIZE - the maximum size of a user space task. 38 * TASK_UNMAPPED_BASE - the lower boundary of the mmap VM area 39 */ 40 #define TASK_SIZE (UL(CONFIG_PAGE_OFFSET) - UL(SZ_16M)) 41 #define TASK_UNMAPPED_BASE ALIGN(TASK_SIZE / 3, SZ_16M) 42 43 /* 44 * The maximum size of a 26-bit user space task. 45 */ 46 #define TASK_SIZE_26 (UL(1) << 26) 47 48 /* 49 * The module space lives between the addresses given by TASK_SIZE 50 * and PAGE_OFFSET - it must be within 32MB of the kernel text. 51 */ 52 #ifndef CONFIG_THUMB2_KERNEL 53 #define MODULES_VADDR (PAGE_OFFSET - SZ_16M) 54 #else 55 /* smaller range for Thumb-2 symbols relocation (2^24)*/ 56 #define MODULES_VADDR (PAGE_OFFSET - SZ_8M) 57 #endif 58 59 #if TASK_SIZE > MODULES_VADDR 60 #error Top of user space clashes with start of module space 61 #endif 62 63 /* 64 * The highmem pkmap virtual space shares the end of the module area. 65 */ 66 #ifdef CONFIG_HIGHMEM 67 #define MODULES_END (PAGE_OFFSET - PMD_SIZE) 68 #else 69 #define MODULES_END (PAGE_OFFSET) 70 #endif 71 72 /* 73 * The XIP kernel gets mapped at the bottom of the module vm area. 74 * Since we use sections to map it, this macro replaces the physical address 75 * with its virtual address while keeping offset from the base section. 76 */ 77 #define XIP_VIRT_ADDR(physaddr) (MODULES_VADDR + ((physaddr) & 0x000fffff)) 78 79 #if !defined(CONFIG_SMP) && !defined(CONFIG_ARM_LPAE) 80 /* 81 * Allow 16MB-aligned ioremap pages 82 */ 83 #define IOREMAP_MAX_ORDER 24 84 #endif 85 86 #else /* CONFIG_MMU */ 87 88 /* 89 * The limitation of user task size can grow up to the end of free ram region. 90 * It is difficult to define and perhaps will never meet the original meaning 91 * of this define that was meant to. 92 * Fortunately, there is no reference for this in noMMU mode, for now. 93 */ 94 #define TASK_SIZE UL(0xffffffff) 95 96 #ifndef TASK_UNMAPPED_BASE 97 #define TASK_UNMAPPED_BASE UL(0x00000000) 98 #endif 99 100 #ifndef END_MEM 101 #define END_MEM (UL(CONFIG_DRAM_BASE) + CONFIG_DRAM_SIZE) 102 #endif 103 104 /* 105 * The module can be at any place in ram in nommu mode. 106 */ 107 #define MODULES_END (END_MEM) 108 #define MODULES_VADDR PAGE_OFFSET 109 110 #define XIP_VIRT_ADDR(physaddr) (physaddr) 111 112 #endif /* !CONFIG_MMU */ 113 114 /* 115 * We fix the TCM memories max 32 KiB ITCM resp DTCM at these 116 * locations 117 */ 118 #ifdef CONFIG_HAVE_TCM 119 #define ITCM_OFFSET UL(0xfffe0000) 120 #define DTCM_OFFSET UL(0xfffe8000) 121 #endif 122 123 /* 124 * Convert a page to/from a physical address 125 */ 126 #define page_to_phys(page) (__pfn_to_phys(page_to_pfn(page))) 127 #define phys_to_page(phys) (pfn_to_page(__phys_to_pfn(phys))) 128 129 /* 130 * PLAT_PHYS_OFFSET is the offset (from zero) of the start of physical 131 * memory. This is used for XIP and NoMMU kernels, and on platforms that don't 132 * have CONFIG_ARM_PATCH_PHYS_VIRT. Assembly code must always use 133 * PLAT_PHYS_OFFSET and not PHYS_OFFSET. 134 */ 135 #define PLAT_PHYS_OFFSET UL(CONFIG_PHYS_OFFSET) 136 137 #ifdef CONFIG_XIP_KERNEL 138 /* 139 * When referencing data in RAM from the XIP region in a relative manner 140 * with the MMU off, we need the relative offset between the two physical 141 * addresses. The macro below achieves this, which is: 142 * __pa(v_data) - __xip_pa(v_text) 143 */ 144 #define PHYS_RELATIVE(v_data, v_text) \ 145 (((v_data) - PAGE_OFFSET + PLAT_PHYS_OFFSET) - \ 146 ((v_text) - XIP_VIRT_ADDR(CONFIG_XIP_PHYS_ADDR) + \ 147 CONFIG_XIP_PHYS_ADDR)) 148 #else 149 #define PHYS_RELATIVE(v_data, v_text) ((v_data) - (v_text)) 150 #endif 151 152 #ifndef __ASSEMBLY__ 153 154 /* 155 * Physical vs virtual RAM address space conversion. These are 156 * private definitions which should NOT be used outside memory.h 157 * files. Use virt_to_phys/phys_to_virt/__pa/__va instead. 158 * 159 * PFNs are used to describe any physical page; this means 160 * PFN 0 == physical address 0. 161 */ 162 #if defined(__virt_to_phys) 163 #define PHYS_OFFSET PLAT_PHYS_OFFSET 164 #define PHYS_PFN_OFFSET ((unsigned long)(PHYS_OFFSET >> PAGE_SHIFT)) 165 166 #define virt_to_pfn(kaddr) (__pa(kaddr) >> PAGE_SHIFT) 167 168 #elif defined(CONFIG_ARM_PATCH_PHYS_VIRT) 169 170 /* 171 * Constants used to force the right instruction encodings and shifts 172 * so that all we need to do is modify the 8-bit constant field. 173 */ 174 #define __PV_BITS_31_24 0x81000000 175 #define __PV_BITS_7_0 0x81 176 177 extern unsigned long __pv_phys_pfn_offset; 178 extern u64 __pv_offset; 179 extern void fixup_pv_table(const void *, unsigned long); 180 extern const void *__pv_table_begin, *__pv_table_end; 181 182 #define PHYS_OFFSET ((phys_addr_t)__pv_phys_pfn_offset << PAGE_SHIFT) 183 #define PHYS_PFN_OFFSET (__pv_phys_pfn_offset) 184 185 #define virt_to_pfn(kaddr) \ 186 ((((unsigned long)(kaddr) - PAGE_OFFSET) >> PAGE_SHIFT) + \ 187 PHYS_PFN_OFFSET) 188 189 #define __pv_stub(from,to,instr,type) \ 190 __asm__("@ __pv_stub\n" \ 191 "1: " instr " %0, %1, %2\n" \ 192 " .pushsection .pv_table,\"a\"\n" \ 193 " .long 1b\n" \ 194 " .popsection\n" \ 195 : "=r" (to) \ 196 : "r" (from), "I" (type)) 197 198 #define __pv_stub_mov_hi(t) \ 199 __asm__ volatile("@ __pv_stub_mov\n" \ 200 "1: mov %R0, %1\n" \ 201 " .pushsection .pv_table,\"a\"\n" \ 202 " .long 1b\n" \ 203 " .popsection\n" \ 204 : "=r" (t) \ 205 : "I" (__PV_BITS_7_0)) 206 207 #define __pv_add_carry_stub(x, y) \ 208 __asm__ volatile("@ __pv_add_carry_stub\n" \ 209 "1: adds %Q0, %1, %2\n" \ 210 " adc %R0, %R0, #0\n" \ 211 " .pushsection .pv_table,\"a\"\n" \ 212 " .long 1b\n" \ 213 " .popsection\n" \ 214 : "+r" (y) \ 215 : "r" (x), "I" (__PV_BITS_31_24) \ 216 : "cc") 217 218 static inline phys_addr_t __virt_to_phys(unsigned long x) 219 { 220 phys_addr_t t; 221 222 if (sizeof(phys_addr_t) == 4) { 223 __pv_stub(x, t, "add", __PV_BITS_31_24); 224 } else { 225 __pv_stub_mov_hi(t); 226 __pv_add_carry_stub(x, t); 227 } 228 return t; 229 } 230 231 static inline unsigned long __phys_to_virt(phys_addr_t x) 232 { 233 unsigned long t; 234 235 /* 236 * 'unsigned long' cast discard upper word when 237 * phys_addr_t is 64 bit, and makes sure that inline 238 * assembler expression receives 32 bit argument 239 * in place where 'r' 32 bit operand is expected. 240 */ 241 __pv_stub((unsigned long) x, t, "sub", __PV_BITS_31_24); 242 return t; 243 } 244 245 #else 246 247 #define PHYS_OFFSET PLAT_PHYS_OFFSET 248 #define PHYS_PFN_OFFSET ((unsigned long)(PHYS_OFFSET >> PAGE_SHIFT)) 249 250 static inline phys_addr_t __virt_to_phys(unsigned long x) 251 { 252 return (phys_addr_t)x - PAGE_OFFSET + PHYS_OFFSET; 253 } 254 255 static inline unsigned long __phys_to_virt(phys_addr_t x) 256 { 257 return x - PHYS_OFFSET + PAGE_OFFSET; 258 } 259 260 #define virt_to_pfn(kaddr) \ 261 ((((unsigned long)(kaddr) - PAGE_OFFSET) >> PAGE_SHIFT) + \ 262 PHYS_PFN_OFFSET) 263 264 #endif 265 266 /* 267 * These are *only* valid on the kernel direct mapped RAM memory. 268 * Note: Drivers should NOT use these. They are the wrong 269 * translation for translating DMA addresses. Use the driver 270 * DMA support - see dma-mapping.h. 271 */ 272 #define virt_to_phys virt_to_phys 273 static inline phys_addr_t virt_to_phys(const volatile void *x) 274 { 275 return __virt_to_phys((unsigned long)(x)); 276 } 277 278 #define phys_to_virt phys_to_virt 279 static inline void *phys_to_virt(phys_addr_t x) 280 { 281 return (void *)__phys_to_virt(x); 282 } 283 284 /* 285 * Drivers should NOT use these either. 286 */ 287 #define __pa(x) __virt_to_phys((unsigned long)(x)) 288 #define __va(x) ((void *)__phys_to_virt((phys_addr_t)(x))) 289 #define pfn_to_kaddr(pfn) __va((phys_addr_t)(pfn) << PAGE_SHIFT) 290 291 extern long long arch_phys_to_idmap_offset; 292 293 /* 294 * These are for systems that have a hardware interconnect supported alias 295 * of physical memory for idmap purposes. Most cases should leave these 296 * untouched. Note: this can only return addresses less than 4GiB. 297 */ 298 static inline bool arm_has_idmap_alias(void) 299 { 300 return IS_ENABLED(CONFIG_MMU) && arch_phys_to_idmap_offset != 0; 301 } 302 303 #define IDMAP_INVALID_ADDR ((u32)~0) 304 305 static inline unsigned long phys_to_idmap(phys_addr_t addr) 306 { 307 if (IS_ENABLED(CONFIG_MMU) && arch_phys_to_idmap_offset) { 308 addr += arch_phys_to_idmap_offset; 309 if (addr > (u32)~0) 310 addr = IDMAP_INVALID_ADDR; 311 } 312 return addr; 313 } 314 315 static inline phys_addr_t idmap_to_phys(unsigned long idmap) 316 { 317 phys_addr_t addr = idmap; 318 319 if (IS_ENABLED(CONFIG_MMU) && arch_phys_to_idmap_offset) 320 addr -= arch_phys_to_idmap_offset; 321 322 return addr; 323 } 324 325 static inline unsigned long __virt_to_idmap(unsigned long x) 326 { 327 return phys_to_idmap(__virt_to_phys(x)); 328 } 329 330 #define virt_to_idmap(x) __virt_to_idmap((unsigned long)(x)) 331 332 /* 333 * Virtual <-> DMA view memory address translations 334 * Again, these are *only* valid on the kernel direct mapped RAM 335 * memory. Use of these is *deprecated* (and that doesn't mean 336 * use the __ prefixed forms instead.) See dma-mapping.h. 337 */ 338 #ifndef __virt_to_bus 339 #define __virt_to_bus __virt_to_phys 340 #define __bus_to_virt __phys_to_virt 341 #define __pfn_to_bus(x) __pfn_to_phys(x) 342 #define __bus_to_pfn(x) __phys_to_pfn(x) 343 #endif 344 345 /* 346 * Conversion between a struct page and a physical address. 347 * 348 * page_to_pfn(page) convert a struct page * to a PFN number 349 * pfn_to_page(pfn) convert a _valid_ PFN number to struct page * 350 * 351 * virt_to_page(k) convert a _valid_ virtual address to struct page * 352 * virt_addr_valid(k) indicates whether a virtual address is valid 353 */ 354 #define ARCH_PFN_OFFSET PHYS_PFN_OFFSET 355 356 #define virt_to_page(kaddr) pfn_to_page(virt_to_pfn(kaddr)) 357 #define virt_addr_valid(kaddr) (((unsigned long)(kaddr) >= PAGE_OFFSET && (unsigned long)(kaddr) < (unsigned long)high_memory) \ 358 && pfn_valid(virt_to_pfn(kaddr))) 359 360 #endif 361 362 #include <asm-generic/memory_model.h> 363 364 #endif 365