1 /* 2 * linux/arch/arm/mm/mmu.c 3 * 4 * Copyright (C) 1995-2005 Russell King 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License version 2 as 8 * published by the Free Software Foundation. 9 */ 10 #include <linux/module.h> 11 #include <linux/kernel.h> 12 #include <linux/errno.h> 13 #include <linux/init.h> 14 #include <linux/mman.h> 15 #include <linux/nodemask.h> 16 #include <linux/memblock.h> 17 #include <linux/sort.h> 18 19 #include <asm/cputype.h> 20 #include <asm/sections.h> 21 #include <asm/cachetype.h> 22 #include <asm/setup.h> 23 #include <asm/sizes.h> 24 #include <asm/smp_plat.h> 25 #include <asm/tlb.h> 26 #include <asm/highmem.h> 27 28 #include <asm/mach/arch.h> 29 #include <asm/mach/map.h> 30 31 #include "mm.h" 32 33 DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); 34 35 /* 36 * empty_zero_page is a special page that is used for 37 * zero-initialized data and COW. 38 */ 39 struct page *empty_zero_page; 40 EXPORT_SYMBOL(empty_zero_page); 41 42 /* 43 * The pmd table for the upper-most set of pages. 44 */ 45 pmd_t *top_pmd; 46 47 #define CPOLICY_UNCACHED 0 48 #define CPOLICY_BUFFERED 1 49 #define CPOLICY_WRITETHROUGH 2 50 #define CPOLICY_WRITEBACK 3 51 #define CPOLICY_WRITEALLOC 4 52 53 static unsigned int cachepolicy __initdata = CPOLICY_WRITEBACK; 54 static unsigned int ecc_mask __initdata = 0; 55 pgprot_t pgprot_user; 56 pgprot_t pgprot_kernel; 57 58 EXPORT_SYMBOL(pgprot_user); 59 EXPORT_SYMBOL(pgprot_kernel); 60 61 struct cachepolicy { 62 const char policy[16]; 63 unsigned int cr_mask; 64 unsigned int pmd; 65 unsigned int pte; 66 }; 67 68 static struct cachepolicy cache_policies[] __initdata = { 69 { 70 .policy = "uncached", 71 .cr_mask = CR_W|CR_C, 72 .pmd = PMD_SECT_UNCACHED, 73 .pte = L_PTE_MT_UNCACHED, 74 }, { 75 .policy = "buffered", 76 .cr_mask = CR_C, 77 .pmd = PMD_SECT_BUFFERED, 78 .pte = L_PTE_MT_BUFFERABLE, 79 }, { 80 .policy = "writethrough", 81 .cr_mask = 0, 82 .pmd = PMD_SECT_WT, 83 .pte = L_PTE_MT_WRITETHROUGH, 84 }, { 85 .policy = "writeback", 86 .cr_mask = 0, 87 .pmd = PMD_SECT_WB, 88 .pte = L_PTE_MT_WRITEBACK, 89 }, { 90 .policy = "writealloc", 91 .cr_mask = 0, 92 .pmd = PMD_SECT_WBWA, 93 .pte = L_PTE_MT_WRITEALLOC, 94 } 95 }; 96 97 /* 98 * These are useful for identifying cache coherency 99 * problems by allowing the cache or the cache and 100 * writebuffer to be turned off. (Note: the write 101 * buffer should not be on and the cache off). 102 */ 103 static int __init early_cachepolicy(char *p) 104 { 105 int i; 106 107 for (i = 0; i < ARRAY_SIZE(cache_policies); i++) { 108 int len = strlen(cache_policies[i].policy); 109 110 if (memcmp(p, cache_policies[i].policy, len) == 0) { 111 cachepolicy = i; 112 cr_alignment &= ~cache_policies[i].cr_mask; 113 cr_no_alignment &= ~cache_policies[i].cr_mask; 114 break; 115 } 116 } 117 if (i == ARRAY_SIZE(cache_policies)) 118 printk(KERN_ERR "ERROR: unknown or unsupported cache policy\n"); 119 /* 120 * This restriction is partly to do with the way we boot; it is 121 * unpredictable to have memory mapped using two different sets of 122 * memory attributes (shared, type, and cache attribs). We can not 123 * change these attributes once the initial assembly has setup the 124 * page tables. 125 */ 126 if (cpu_architecture() >= CPU_ARCH_ARMv6) { 127 printk(KERN_WARNING "Only cachepolicy=writeback supported on ARMv6 and later\n"); 128 cachepolicy = CPOLICY_WRITEBACK; 129 } 130 flush_cache_all(); 131 set_cr(cr_alignment); 132 return 0; 133 } 134 early_param("cachepolicy", early_cachepolicy); 135 136 static int __init early_nocache(char *__unused) 137 { 138 char *p = "buffered"; 139 printk(KERN_WARNING "nocache is deprecated; use cachepolicy=%s\n", p); 140 early_cachepolicy(p); 141 return 0; 142 } 143 early_param("nocache", early_nocache); 144 145 static int __init early_nowrite(char *__unused) 146 { 147 char *p = "uncached"; 148 printk(KERN_WARNING "nowb is deprecated; use cachepolicy=%s\n", p); 149 early_cachepolicy(p); 150 return 0; 151 } 152 early_param("nowb", early_nowrite); 153 154 static int __init early_ecc(char *p) 155 { 156 if (memcmp(p, "on", 2) == 0) 157 ecc_mask = PMD_PROTECTION; 158 else if (memcmp(p, "off", 3) == 0) 159 ecc_mask = 0; 160 return 0; 161 } 162 early_param("ecc", early_ecc); 163 164 static int __init noalign_setup(char *__unused) 165 { 166 cr_alignment &= ~CR_A; 167 cr_no_alignment &= ~CR_A; 168 set_cr(cr_alignment); 169 return 1; 170 } 171 __setup("noalign", noalign_setup); 172 173 #ifndef CONFIG_SMP 174 void adjust_cr(unsigned long mask, unsigned long set) 175 { 176 unsigned long flags; 177 178 mask &= ~CR_A; 179 180 set &= mask; 181 182 local_irq_save(flags); 183 184 cr_no_alignment = (cr_no_alignment & ~mask) | set; 185 cr_alignment = (cr_alignment & ~mask) | set; 186 187 set_cr((get_cr() & ~mask) | set); 188 189 local_irq_restore(flags); 190 } 191 #endif 192 193 #define PROT_PTE_DEVICE L_PTE_PRESENT|L_PTE_YOUNG|L_PTE_DIRTY|L_PTE_WRITE 194 #define PROT_SECT_DEVICE PMD_TYPE_SECT|PMD_SECT_AP_WRITE 195 196 static struct mem_type mem_types[] = { 197 [MT_DEVICE] = { /* Strongly ordered / ARMv6 shared device */ 198 .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_SHARED | 199 L_PTE_SHARED, 200 .prot_l1 = PMD_TYPE_TABLE, 201 .prot_sect = PROT_SECT_DEVICE | PMD_SECT_S, 202 .domain = DOMAIN_IO, 203 }, 204 [MT_DEVICE_NONSHARED] = { /* ARMv6 non-shared device */ 205 .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_NONSHARED, 206 .prot_l1 = PMD_TYPE_TABLE, 207 .prot_sect = PROT_SECT_DEVICE, 208 .domain = DOMAIN_IO, 209 }, 210 [MT_DEVICE_CACHED] = { /* ioremap_cached */ 211 .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_CACHED, 212 .prot_l1 = PMD_TYPE_TABLE, 213 .prot_sect = PROT_SECT_DEVICE | PMD_SECT_WB, 214 .domain = DOMAIN_IO, 215 }, 216 [MT_DEVICE_WC] = { /* ioremap_wc */ 217 .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_WC, 218 .prot_l1 = PMD_TYPE_TABLE, 219 .prot_sect = PROT_SECT_DEVICE, 220 .domain = DOMAIN_IO, 221 }, 222 [MT_UNCACHED] = { 223 .prot_pte = PROT_PTE_DEVICE, 224 .prot_l1 = PMD_TYPE_TABLE, 225 .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN, 226 .domain = DOMAIN_IO, 227 }, 228 [MT_CACHECLEAN] = { 229 .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN, 230 .domain = DOMAIN_KERNEL, 231 }, 232 [MT_MINICLEAN] = { 233 .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN | PMD_SECT_MINICACHE, 234 .domain = DOMAIN_KERNEL, 235 }, 236 [MT_LOW_VECTORS] = { 237 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | 238 L_PTE_EXEC, 239 .prot_l1 = PMD_TYPE_TABLE, 240 .domain = DOMAIN_USER, 241 }, 242 [MT_HIGH_VECTORS] = { 243 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | 244 L_PTE_USER | L_PTE_EXEC, 245 .prot_l1 = PMD_TYPE_TABLE, 246 .domain = DOMAIN_USER, 247 }, 248 [MT_MEMORY] = { 249 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE, 250 .domain = DOMAIN_KERNEL, 251 }, 252 [MT_ROM] = { 253 .prot_sect = PMD_TYPE_SECT, 254 .domain = DOMAIN_KERNEL, 255 }, 256 [MT_MEMORY_NONCACHED] = { 257 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE, 258 .domain = DOMAIN_KERNEL, 259 }, 260 [MT_MEMORY_DTCM] = { 261 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | 262 L_PTE_DIRTY | L_PTE_WRITE, 263 .prot_l1 = PMD_TYPE_TABLE, 264 .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN, 265 .domain = DOMAIN_KERNEL, 266 }, 267 [MT_MEMORY_ITCM] = { 268 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | 269 L_PTE_USER | L_PTE_EXEC, 270 .prot_l1 = PMD_TYPE_TABLE, 271 .domain = DOMAIN_IO, 272 }, 273 }; 274 275 const struct mem_type *get_mem_type(unsigned int type) 276 { 277 return type < ARRAY_SIZE(mem_types) ? &mem_types[type] : NULL; 278 } 279 EXPORT_SYMBOL(get_mem_type); 280 281 /* 282 * Adjust the PMD section entries according to the CPU in use. 283 */ 284 static void __init build_mem_type_table(void) 285 { 286 struct cachepolicy *cp; 287 unsigned int cr = get_cr(); 288 unsigned int user_pgprot, kern_pgprot, vecs_pgprot; 289 int cpu_arch = cpu_architecture(); 290 int i; 291 292 if (cpu_arch < CPU_ARCH_ARMv6) { 293 #if defined(CONFIG_CPU_DCACHE_DISABLE) 294 if (cachepolicy > CPOLICY_BUFFERED) 295 cachepolicy = CPOLICY_BUFFERED; 296 #elif defined(CONFIG_CPU_DCACHE_WRITETHROUGH) 297 if (cachepolicy > CPOLICY_WRITETHROUGH) 298 cachepolicy = CPOLICY_WRITETHROUGH; 299 #endif 300 } 301 if (cpu_arch < CPU_ARCH_ARMv5) { 302 if (cachepolicy >= CPOLICY_WRITEALLOC) 303 cachepolicy = CPOLICY_WRITEBACK; 304 ecc_mask = 0; 305 } 306 #ifdef CONFIG_SMP 307 cachepolicy = CPOLICY_WRITEALLOC; 308 #endif 309 310 /* 311 * Strip out features not present on earlier architectures. 312 * Pre-ARMv5 CPUs don't have TEX bits. Pre-ARMv6 CPUs or those 313 * without extended page tables don't have the 'Shared' bit. 314 */ 315 if (cpu_arch < CPU_ARCH_ARMv5) 316 for (i = 0; i < ARRAY_SIZE(mem_types); i++) 317 mem_types[i].prot_sect &= ~PMD_SECT_TEX(7); 318 if ((cpu_arch < CPU_ARCH_ARMv6 || !(cr & CR_XP)) && !cpu_is_xsc3()) 319 for (i = 0; i < ARRAY_SIZE(mem_types); i++) 320 mem_types[i].prot_sect &= ~PMD_SECT_S; 321 322 /* 323 * ARMv5 and lower, bit 4 must be set for page tables (was: cache 324 * "update-able on write" bit on ARM610). However, Xscale and 325 * Xscale3 require this bit to be cleared. 326 */ 327 if (cpu_is_xscale() || cpu_is_xsc3()) { 328 for (i = 0; i < ARRAY_SIZE(mem_types); i++) { 329 mem_types[i].prot_sect &= ~PMD_BIT4; 330 mem_types[i].prot_l1 &= ~PMD_BIT4; 331 } 332 } else if (cpu_arch < CPU_ARCH_ARMv6) { 333 for (i = 0; i < ARRAY_SIZE(mem_types); i++) { 334 if (mem_types[i].prot_l1) 335 mem_types[i].prot_l1 |= PMD_BIT4; 336 if (mem_types[i].prot_sect) 337 mem_types[i].prot_sect |= PMD_BIT4; 338 } 339 } 340 341 /* 342 * Mark the device areas according to the CPU/architecture. 343 */ 344 if (cpu_is_xsc3() || (cpu_arch >= CPU_ARCH_ARMv6 && (cr & CR_XP))) { 345 if (!cpu_is_xsc3()) { 346 /* 347 * Mark device regions on ARMv6+ as execute-never 348 * to prevent speculative instruction fetches. 349 */ 350 mem_types[MT_DEVICE].prot_sect |= PMD_SECT_XN; 351 mem_types[MT_DEVICE_NONSHARED].prot_sect |= PMD_SECT_XN; 352 mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_XN; 353 mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_XN; 354 } 355 if (cpu_arch >= CPU_ARCH_ARMv7 && (cr & CR_TRE)) { 356 /* 357 * For ARMv7 with TEX remapping, 358 * - shared device is SXCB=1100 359 * - nonshared device is SXCB=0100 360 * - write combine device mem is SXCB=0001 361 * (Uncached Normal memory) 362 */ 363 mem_types[MT_DEVICE].prot_sect |= PMD_SECT_TEX(1); 364 mem_types[MT_DEVICE_NONSHARED].prot_sect |= PMD_SECT_TEX(1); 365 mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_BUFFERABLE; 366 } else if (cpu_is_xsc3()) { 367 /* 368 * For Xscale3, 369 * - shared device is TEXCB=00101 370 * - nonshared device is TEXCB=01000 371 * - write combine device mem is TEXCB=00100 372 * (Inner/Outer Uncacheable in xsc3 parlance) 373 */ 374 mem_types[MT_DEVICE].prot_sect |= PMD_SECT_TEX(1) | PMD_SECT_BUFFERED; 375 mem_types[MT_DEVICE_NONSHARED].prot_sect |= PMD_SECT_TEX(2); 376 mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_TEX(1); 377 } else { 378 /* 379 * For ARMv6 and ARMv7 without TEX remapping, 380 * - shared device is TEXCB=00001 381 * - nonshared device is TEXCB=01000 382 * - write combine device mem is TEXCB=00100 383 * (Uncached Normal in ARMv6 parlance). 384 */ 385 mem_types[MT_DEVICE].prot_sect |= PMD_SECT_BUFFERED; 386 mem_types[MT_DEVICE_NONSHARED].prot_sect |= PMD_SECT_TEX(2); 387 mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_TEX(1); 388 } 389 } else { 390 /* 391 * On others, write combining is "Uncached/Buffered" 392 */ 393 mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_BUFFERABLE; 394 } 395 396 /* 397 * Now deal with the memory-type mappings 398 */ 399 cp = &cache_policies[cachepolicy]; 400 vecs_pgprot = kern_pgprot = user_pgprot = cp->pte; 401 402 #ifndef CONFIG_SMP 403 /* 404 * Only use write-through for non-SMP systems 405 */ 406 if (cpu_arch >= CPU_ARCH_ARMv5 && cachepolicy > CPOLICY_WRITETHROUGH) 407 vecs_pgprot = cache_policies[CPOLICY_WRITETHROUGH].pte; 408 #endif 409 410 /* 411 * Enable CPU-specific coherency if supported. 412 * (Only available on XSC3 at the moment.) 413 */ 414 if (arch_is_coherent() && cpu_is_xsc3()) 415 mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S; 416 417 /* 418 * ARMv6 and above have extended page tables. 419 */ 420 if (cpu_arch >= CPU_ARCH_ARMv6 && (cr & CR_XP)) { 421 /* 422 * Mark cache clean areas and XIP ROM read only 423 * from SVC mode and no access from userspace. 424 */ 425 mem_types[MT_ROM].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE; 426 mem_types[MT_MINICLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE; 427 mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE; 428 429 #ifdef CONFIG_SMP 430 /* 431 * Mark memory with the "shared" attribute for SMP systems 432 */ 433 user_pgprot |= L_PTE_SHARED; 434 kern_pgprot |= L_PTE_SHARED; 435 vecs_pgprot |= L_PTE_SHARED; 436 mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_S; 437 mem_types[MT_DEVICE_WC].prot_pte |= L_PTE_SHARED; 438 mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_S; 439 mem_types[MT_DEVICE_CACHED].prot_pte |= L_PTE_SHARED; 440 mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S; 441 mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_S; 442 #endif 443 } 444 445 /* 446 * Non-cacheable Normal - intended for memory areas that must 447 * not cause dirty cache line writebacks when used 448 */ 449 if (cpu_arch >= CPU_ARCH_ARMv6) { 450 if (cpu_arch >= CPU_ARCH_ARMv7 && (cr & CR_TRE)) { 451 /* Non-cacheable Normal is XCB = 001 */ 452 mem_types[MT_MEMORY_NONCACHED].prot_sect |= 453 PMD_SECT_BUFFERED; 454 } else { 455 /* For both ARMv6 and non-TEX-remapping ARMv7 */ 456 mem_types[MT_MEMORY_NONCACHED].prot_sect |= 457 PMD_SECT_TEX(1); 458 } 459 } else { 460 mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_BUFFERABLE; 461 } 462 463 for (i = 0; i < 16; i++) { 464 unsigned long v = pgprot_val(protection_map[i]); 465 protection_map[i] = __pgprot(v | user_pgprot); 466 } 467 468 mem_types[MT_LOW_VECTORS].prot_pte |= vecs_pgprot; 469 mem_types[MT_HIGH_VECTORS].prot_pte |= vecs_pgprot; 470 471 pgprot_user = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | user_pgprot); 472 pgprot_kernel = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | 473 L_PTE_DIRTY | L_PTE_WRITE | kern_pgprot); 474 475 mem_types[MT_LOW_VECTORS].prot_l1 |= ecc_mask; 476 mem_types[MT_HIGH_VECTORS].prot_l1 |= ecc_mask; 477 mem_types[MT_MEMORY].prot_sect |= ecc_mask | cp->pmd; 478 mem_types[MT_ROM].prot_sect |= cp->pmd; 479 480 switch (cp->pmd) { 481 case PMD_SECT_WT: 482 mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_WT; 483 break; 484 case PMD_SECT_WB: 485 case PMD_SECT_WBWA: 486 mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_WB; 487 break; 488 } 489 printk("Memory policy: ECC %sabled, Data cache %s\n", 490 ecc_mask ? "en" : "dis", cp->policy); 491 492 for (i = 0; i < ARRAY_SIZE(mem_types); i++) { 493 struct mem_type *t = &mem_types[i]; 494 if (t->prot_l1) 495 t->prot_l1 |= PMD_DOMAIN(t->domain); 496 if (t->prot_sect) 497 t->prot_sect |= PMD_DOMAIN(t->domain); 498 } 499 } 500 501 #define vectors_base() (vectors_high() ? 0xffff0000 : 0) 502 503 static void __init *early_alloc(unsigned long sz) 504 { 505 void *ptr = __va(memblock_alloc(sz, sz)); 506 memset(ptr, 0, sz); 507 return ptr; 508 } 509 510 static pte_t * __init early_pte_alloc(pmd_t *pmd, unsigned long addr, unsigned long prot) 511 { 512 if (pmd_none(*pmd)) { 513 pte_t *pte = early_alloc(2 * PTRS_PER_PTE * sizeof(pte_t)); 514 __pmd_populate(pmd, __pa(pte) | prot); 515 } 516 BUG_ON(pmd_bad(*pmd)); 517 return pte_offset_kernel(pmd, addr); 518 } 519 520 static void __init alloc_init_pte(pmd_t *pmd, unsigned long addr, 521 unsigned long end, unsigned long pfn, 522 const struct mem_type *type) 523 { 524 pte_t *pte = early_pte_alloc(pmd, addr, type->prot_l1); 525 do { 526 set_pte_ext(pte, pfn_pte(pfn, __pgprot(type->prot_pte)), 0); 527 pfn++; 528 } while (pte++, addr += PAGE_SIZE, addr != end); 529 } 530 531 static void __init alloc_init_section(pgd_t *pgd, unsigned long addr, 532 unsigned long end, unsigned long phys, 533 const struct mem_type *type) 534 { 535 pmd_t *pmd = pmd_offset(pgd, addr); 536 537 /* 538 * Try a section mapping - end, addr and phys must all be aligned 539 * to a section boundary. Note that PMDs refer to the individual 540 * L1 entries, whereas PGDs refer to a group of L1 entries making 541 * up one logical pointer to an L2 table. 542 */ 543 if (((addr | end | phys) & ~SECTION_MASK) == 0) { 544 pmd_t *p = pmd; 545 546 if (addr & SECTION_SIZE) 547 pmd++; 548 549 do { 550 *pmd = __pmd(phys | type->prot_sect); 551 phys += SECTION_SIZE; 552 } while (pmd++, addr += SECTION_SIZE, addr != end); 553 554 flush_pmd_entry(p); 555 } else { 556 /* 557 * No need to loop; pte's aren't interested in the 558 * individual L1 entries. 559 */ 560 alloc_init_pte(pmd, addr, end, __phys_to_pfn(phys), type); 561 } 562 } 563 564 static void __init create_36bit_mapping(struct map_desc *md, 565 const struct mem_type *type) 566 { 567 unsigned long phys, addr, length, end; 568 pgd_t *pgd; 569 570 addr = md->virtual; 571 phys = (unsigned long)__pfn_to_phys(md->pfn); 572 length = PAGE_ALIGN(md->length); 573 574 if (!(cpu_architecture() >= CPU_ARCH_ARMv6 || cpu_is_xsc3())) { 575 printk(KERN_ERR "MM: CPU does not support supersection " 576 "mapping for 0x%08llx at 0x%08lx\n", 577 __pfn_to_phys((u64)md->pfn), addr); 578 return; 579 } 580 581 /* N.B. ARMv6 supersections are only defined to work with domain 0. 582 * Since domain assignments can in fact be arbitrary, the 583 * 'domain == 0' check below is required to insure that ARMv6 584 * supersections are only allocated for domain 0 regardless 585 * of the actual domain assignments in use. 586 */ 587 if (type->domain) { 588 printk(KERN_ERR "MM: invalid domain in supersection " 589 "mapping for 0x%08llx at 0x%08lx\n", 590 __pfn_to_phys((u64)md->pfn), addr); 591 return; 592 } 593 594 if ((addr | length | __pfn_to_phys(md->pfn)) & ~SUPERSECTION_MASK) { 595 printk(KERN_ERR "MM: cannot create mapping for " 596 "0x%08llx at 0x%08lx invalid alignment\n", 597 __pfn_to_phys((u64)md->pfn), addr); 598 return; 599 } 600 601 /* 602 * Shift bits [35:32] of address into bits [23:20] of PMD 603 * (See ARMv6 spec). 604 */ 605 phys |= (((md->pfn >> (32 - PAGE_SHIFT)) & 0xF) << 20); 606 607 pgd = pgd_offset_k(addr); 608 end = addr + length; 609 do { 610 pmd_t *pmd = pmd_offset(pgd, addr); 611 int i; 612 613 for (i = 0; i < 16; i++) 614 *pmd++ = __pmd(phys | type->prot_sect | PMD_SECT_SUPER); 615 616 addr += SUPERSECTION_SIZE; 617 phys += SUPERSECTION_SIZE; 618 pgd += SUPERSECTION_SIZE >> PGDIR_SHIFT; 619 } while (addr != end); 620 } 621 622 /* 623 * Create the page directory entries and any necessary 624 * page tables for the mapping specified by `md'. We 625 * are able to cope here with varying sizes and address 626 * offsets, and we take full advantage of sections and 627 * supersections. 628 */ 629 static void __init create_mapping(struct map_desc *md) 630 { 631 unsigned long phys, addr, length, end; 632 const struct mem_type *type; 633 pgd_t *pgd; 634 635 if (md->virtual != vectors_base() && md->virtual < TASK_SIZE) { 636 printk(KERN_WARNING "BUG: not creating mapping for " 637 "0x%08llx at 0x%08lx in user region\n", 638 __pfn_to_phys((u64)md->pfn), md->virtual); 639 return; 640 } 641 642 if ((md->type == MT_DEVICE || md->type == MT_ROM) && 643 md->virtual >= PAGE_OFFSET && md->virtual < VMALLOC_END) { 644 printk(KERN_WARNING "BUG: mapping for 0x%08llx at 0x%08lx " 645 "overlaps vmalloc space\n", 646 __pfn_to_phys((u64)md->pfn), md->virtual); 647 } 648 649 type = &mem_types[md->type]; 650 651 /* 652 * Catch 36-bit addresses 653 */ 654 if (md->pfn >= 0x100000) { 655 create_36bit_mapping(md, type); 656 return; 657 } 658 659 addr = md->virtual & PAGE_MASK; 660 phys = (unsigned long)__pfn_to_phys(md->pfn); 661 length = PAGE_ALIGN(md->length + (md->virtual & ~PAGE_MASK)); 662 663 if (type->prot_l1 == 0 && ((addr | phys | length) & ~SECTION_MASK)) { 664 printk(KERN_WARNING "BUG: map for 0x%08lx at 0x%08lx can not " 665 "be mapped using pages, ignoring.\n", 666 __pfn_to_phys(md->pfn), addr); 667 return; 668 } 669 670 pgd = pgd_offset_k(addr); 671 end = addr + length; 672 do { 673 unsigned long next = pgd_addr_end(addr, end); 674 675 alloc_init_section(pgd, addr, next, phys, type); 676 677 phys += next - addr; 678 addr = next; 679 } while (pgd++, addr != end); 680 } 681 682 /* 683 * Create the architecture specific mappings 684 */ 685 void __init iotable_init(struct map_desc *io_desc, int nr) 686 { 687 int i; 688 689 for (i = 0; i < nr; i++) 690 create_mapping(io_desc + i); 691 } 692 693 static void * __initdata vmalloc_min = (void *)(VMALLOC_END - SZ_128M); 694 695 /* 696 * vmalloc=size forces the vmalloc area to be exactly 'size' 697 * bytes. This can be used to increase (or decrease) the vmalloc 698 * area - the default is 128m. 699 */ 700 static int __init early_vmalloc(char *arg) 701 { 702 unsigned long vmalloc_reserve = memparse(arg, NULL); 703 704 if (vmalloc_reserve < SZ_16M) { 705 vmalloc_reserve = SZ_16M; 706 printk(KERN_WARNING 707 "vmalloc area too small, limiting to %luMB\n", 708 vmalloc_reserve >> 20); 709 } 710 711 if (vmalloc_reserve > VMALLOC_END - (PAGE_OFFSET + SZ_32M)) { 712 vmalloc_reserve = VMALLOC_END - (PAGE_OFFSET + SZ_32M); 713 printk(KERN_WARNING 714 "vmalloc area is too big, limiting to %luMB\n", 715 vmalloc_reserve >> 20); 716 } 717 718 vmalloc_min = (void *)(VMALLOC_END - vmalloc_reserve); 719 return 0; 720 } 721 early_param("vmalloc", early_vmalloc); 722 723 phys_addr_t lowmem_end_addr; 724 725 static void __init sanity_check_meminfo(void) 726 { 727 int i, j, highmem = 0; 728 729 lowmem_end_addr = __pa(vmalloc_min - 1) + 1; 730 731 for (i = 0, j = 0; i < meminfo.nr_banks; i++) { 732 struct membank *bank = &meminfo.bank[j]; 733 *bank = meminfo.bank[i]; 734 735 #ifdef CONFIG_HIGHMEM 736 if (__va(bank->start) > vmalloc_min || 737 __va(bank->start) < (void *)PAGE_OFFSET) 738 highmem = 1; 739 740 bank->highmem = highmem; 741 742 /* 743 * Split those memory banks which are partially overlapping 744 * the vmalloc area greatly simplifying things later. 745 */ 746 if (__va(bank->start) < vmalloc_min && 747 bank->size > vmalloc_min - __va(bank->start)) { 748 if (meminfo.nr_banks >= NR_BANKS) { 749 printk(KERN_CRIT "NR_BANKS too low, " 750 "ignoring high memory\n"); 751 } else { 752 memmove(bank + 1, bank, 753 (meminfo.nr_banks - i) * sizeof(*bank)); 754 meminfo.nr_banks++; 755 i++; 756 bank[1].size -= vmalloc_min - __va(bank->start); 757 bank[1].start = __pa(vmalloc_min - 1) + 1; 758 bank[1].highmem = highmem = 1; 759 j++; 760 } 761 bank->size = vmalloc_min - __va(bank->start); 762 } 763 #else 764 bank->highmem = highmem; 765 766 /* 767 * Check whether this memory bank would entirely overlap 768 * the vmalloc area. 769 */ 770 if (__va(bank->start) >= vmalloc_min || 771 __va(bank->start) < (void *)PAGE_OFFSET) { 772 printk(KERN_NOTICE "Ignoring RAM at %.8lx-%.8lx " 773 "(vmalloc region overlap).\n", 774 bank->start, bank->start + bank->size - 1); 775 continue; 776 } 777 778 /* 779 * Check whether this memory bank would partially overlap 780 * the vmalloc area. 781 */ 782 if (__va(bank->start + bank->size) > vmalloc_min || 783 __va(bank->start + bank->size) < __va(bank->start)) { 784 unsigned long newsize = vmalloc_min - __va(bank->start); 785 printk(KERN_NOTICE "Truncating RAM at %.8lx-%.8lx " 786 "to -%.8lx (vmalloc region overlap).\n", 787 bank->start, bank->start + bank->size - 1, 788 bank->start + newsize - 1); 789 bank->size = newsize; 790 } 791 #endif 792 j++; 793 } 794 #ifdef CONFIG_HIGHMEM 795 if (highmem) { 796 const char *reason = NULL; 797 798 if (cache_is_vipt_aliasing()) { 799 /* 800 * Interactions between kmap and other mappings 801 * make highmem support with aliasing VIPT caches 802 * rather difficult. 803 */ 804 reason = "with VIPT aliasing cache"; 805 #ifdef CONFIG_SMP 806 } else if (tlb_ops_need_broadcast()) { 807 /* 808 * kmap_high needs to occasionally flush TLB entries, 809 * however, if the TLB entries need to be broadcast 810 * we may deadlock: 811 * kmap_high(irqs off)->flush_all_zero_pkmaps-> 812 * flush_tlb_kernel_range->smp_call_function_many 813 * (must not be called with irqs off) 814 */ 815 reason = "without hardware TLB ops broadcasting"; 816 #endif 817 } 818 if (reason) { 819 printk(KERN_CRIT "HIGHMEM is not supported %s, ignoring high memory\n", 820 reason); 821 while (j > 0 && meminfo.bank[j - 1].highmem) 822 j--; 823 } 824 } 825 #endif 826 meminfo.nr_banks = j; 827 } 828 829 static inline void prepare_page_table(void) 830 { 831 unsigned long addr; 832 833 /* 834 * Clear out all the mappings below the kernel image. 835 */ 836 for (addr = 0; addr < MODULES_VADDR; addr += PGDIR_SIZE) 837 pmd_clear(pmd_off_k(addr)); 838 839 #ifdef CONFIG_XIP_KERNEL 840 /* The XIP kernel is mapped in the module area -- skip over it */ 841 addr = ((unsigned long)_etext + PGDIR_SIZE - 1) & PGDIR_MASK; 842 #endif 843 for ( ; addr < PAGE_OFFSET; addr += PGDIR_SIZE) 844 pmd_clear(pmd_off_k(addr)); 845 846 /* 847 * Clear out all the kernel space mappings, except for the first 848 * memory bank, up to the end of the vmalloc region. 849 */ 850 for (addr = __phys_to_virt(bank_phys_end(&meminfo.bank[0])); 851 addr < VMALLOC_END; addr += PGDIR_SIZE) 852 pmd_clear(pmd_off_k(addr)); 853 } 854 855 /* 856 * Reserve the special regions of memory 857 */ 858 void __init arm_mm_memblock_reserve(void) 859 { 860 /* 861 * Reserve the page tables. These are already in use, 862 * and can only be in node 0. 863 */ 864 memblock_reserve(__pa(swapper_pg_dir), PTRS_PER_PGD * sizeof(pgd_t)); 865 866 #ifdef CONFIG_SA1111 867 /* 868 * Because of the SA1111 DMA bug, we want to preserve our 869 * precious DMA-able memory... 870 */ 871 memblock_reserve(PHYS_OFFSET, __pa(swapper_pg_dir) - PHYS_OFFSET); 872 #endif 873 } 874 875 /* 876 * Set up device the mappings. Since we clear out the page tables for all 877 * mappings above VMALLOC_END, we will remove any debug device mappings. 878 * This means you have to be careful how you debug this function, or any 879 * called function. This means you can't use any function or debugging 880 * method which may touch any device, otherwise the kernel _will_ crash. 881 */ 882 static void __init devicemaps_init(struct machine_desc *mdesc) 883 { 884 struct map_desc map; 885 unsigned long addr; 886 void *vectors; 887 888 /* 889 * Allocate the vector page early. 890 */ 891 vectors = early_alloc(PAGE_SIZE); 892 893 for (addr = VMALLOC_END; addr; addr += PGDIR_SIZE) 894 pmd_clear(pmd_off_k(addr)); 895 896 /* 897 * Map the kernel if it is XIP. 898 * It is always first in the modulearea. 899 */ 900 #ifdef CONFIG_XIP_KERNEL 901 map.pfn = __phys_to_pfn(CONFIG_XIP_PHYS_ADDR & SECTION_MASK); 902 map.virtual = MODULES_VADDR; 903 map.length = ((unsigned long)_etext - map.virtual + ~SECTION_MASK) & SECTION_MASK; 904 map.type = MT_ROM; 905 create_mapping(&map); 906 #endif 907 908 /* 909 * Map the cache flushing regions. 910 */ 911 #ifdef FLUSH_BASE 912 map.pfn = __phys_to_pfn(FLUSH_BASE_PHYS); 913 map.virtual = FLUSH_BASE; 914 map.length = SZ_1M; 915 map.type = MT_CACHECLEAN; 916 create_mapping(&map); 917 #endif 918 #ifdef FLUSH_BASE_MINICACHE 919 map.pfn = __phys_to_pfn(FLUSH_BASE_PHYS + SZ_1M); 920 map.virtual = FLUSH_BASE_MINICACHE; 921 map.length = SZ_1M; 922 map.type = MT_MINICLEAN; 923 create_mapping(&map); 924 #endif 925 926 /* 927 * Create a mapping for the machine vectors at the high-vectors 928 * location (0xffff0000). If we aren't using high-vectors, also 929 * create a mapping at the low-vectors virtual address. 930 */ 931 map.pfn = __phys_to_pfn(virt_to_phys(vectors)); 932 map.virtual = 0xffff0000; 933 map.length = PAGE_SIZE; 934 map.type = MT_HIGH_VECTORS; 935 create_mapping(&map); 936 937 if (!vectors_high()) { 938 map.virtual = 0; 939 map.type = MT_LOW_VECTORS; 940 create_mapping(&map); 941 } 942 943 /* 944 * Ask the machine support to map in the statically mapped devices. 945 */ 946 if (mdesc->map_io) 947 mdesc->map_io(); 948 949 /* 950 * Finally flush the caches and tlb to ensure that we're in a 951 * consistent state wrt the writebuffer. This also ensures that 952 * any write-allocated cache lines in the vector page are written 953 * back. After this point, we can start to touch devices again. 954 */ 955 local_flush_tlb_all(); 956 flush_cache_all(); 957 } 958 959 static void __init kmap_init(void) 960 { 961 #ifdef CONFIG_HIGHMEM 962 pkmap_page_table = early_pte_alloc(pmd_off_k(PKMAP_BASE), 963 PKMAP_BASE, _PAGE_KERNEL_TABLE); 964 #endif 965 } 966 967 static inline void map_memory_bank(struct membank *bank) 968 { 969 struct map_desc map; 970 971 map.pfn = bank_pfn_start(bank); 972 map.virtual = __phys_to_virt(bank_phys_start(bank)); 973 map.length = bank_phys_size(bank); 974 map.type = MT_MEMORY; 975 976 create_mapping(&map); 977 } 978 979 static void __init map_lowmem(void) 980 { 981 struct meminfo *mi = &meminfo; 982 int i; 983 984 /* Map all the lowmem memory banks. */ 985 for (i = 0; i < mi->nr_banks; i++) { 986 struct membank *bank = &mi->bank[i]; 987 988 if (!bank->highmem) 989 map_memory_bank(bank); 990 } 991 } 992 993 static int __init meminfo_cmp(const void *_a, const void *_b) 994 { 995 const struct membank *a = _a, *b = _b; 996 long cmp = bank_pfn_start(a) - bank_pfn_start(b); 997 return cmp < 0 ? -1 : cmp > 0 ? 1 : 0; 998 } 999 1000 /* 1001 * paging_init() sets up the page tables, initialises the zone memory 1002 * maps, and sets up the zero page, bad page and bad page tables. 1003 */ 1004 void __init paging_init(struct machine_desc *mdesc) 1005 { 1006 void *zero_page; 1007 1008 sort(&meminfo.bank, meminfo.nr_banks, sizeof(meminfo.bank[0]), meminfo_cmp, NULL); 1009 1010 build_mem_type_table(); 1011 sanity_check_meminfo(); 1012 prepare_page_table(); 1013 map_lowmem(); 1014 devicemaps_init(mdesc); 1015 kmap_init(); 1016 1017 top_pmd = pmd_off_k(0xffff0000); 1018 1019 /* allocate the zero page. */ 1020 zero_page = early_alloc(PAGE_SIZE); 1021 1022 bootmem_init(); 1023 1024 empty_zero_page = virt_to_page(zero_page); 1025 __flush_dcache_page(NULL, empty_zero_page); 1026 } 1027 1028 /* 1029 * In order to soft-boot, we need to insert a 1:1 mapping in place of 1030 * the user-mode pages. This will then ensure that we have predictable 1031 * results when turning the mmu off 1032 */ 1033 void setup_mm_for_reboot(char mode) 1034 { 1035 unsigned long base_pmdval; 1036 pgd_t *pgd; 1037 int i; 1038 1039 /* 1040 * We need to access to user-mode page tables here. For kernel threads 1041 * we don't have any user-mode mappings so we use the context that we 1042 * "borrowed". 1043 */ 1044 pgd = current->active_mm->pgd; 1045 1046 base_pmdval = PMD_SECT_AP_WRITE | PMD_SECT_AP_READ | PMD_TYPE_SECT; 1047 if (cpu_architecture() <= CPU_ARCH_ARMv5TEJ && !cpu_is_xscale()) 1048 base_pmdval |= PMD_BIT4; 1049 1050 for (i = 0; i < FIRST_USER_PGD_NR + USER_PTRS_PER_PGD; i++, pgd++) { 1051 unsigned long pmdval = (i << PGDIR_SHIFT) | base_pmdval; 1052 pmd_t *pmd; 1053 1054 pmd = pmd_off(pgd, i << PGDIR_SHIFT); 1055 pmd[0] = __pmd(pmdval); 1056 pmd[1] = __pmd(pmdval + (1 << (PGDIR_SHIFT - 1))); 1057 flush_pmd_entry(pmd); 1058 } 1059 1060 local_flush_tlb_all(); 1061 } 1062