1 /* 2 * linux/arch/sh/mm/init.c 3 * 4 * Copyright (C) 1999 Niibe Yutaka 5 * Copyright (C) 2002 - 2010 Paul Mundt 6 * 7 * Based on linux/arch/i386/mm/init.c: 8 * Copyright (C) 1995 Linus Torvalds 9 */ 10 #include <linux/mm.h> 11 #include <linux/swap.h> 12 #include <linux/init.h> 13 #include <linux/gfp.h> 14 #include <linux/bootmem.h> 15 #include <linux/proc_fs.h> 16 #include <linux/pagemap.h> 17 #include <linux/percpu.h> 18 #include <linux/io.h> 19 #include <linux/lmb.h> 20 #include <linux/dma-mapping.h> 21 #include <asm/mmu_context.h> 22 #include <asm/mmzone.h> 23 #include <asm/kexec.h> 24 #include <asm/tlb.h> 25 #include <asm/cacheflush.h> 26 #include <asm/sections.h> 27 #include <asm/setup.h> 28 #include <asm/cache.h> 29 #include <asm/sizes.h> 30 31 DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); 32 pgd_t swapper_pg_dir[PTRS_PER_PGD]; 33 34 void __init generic_mem_init(void) 35 { 36 lmb_add(__MEMORY_START, __MEMORY_SIZE); 37 } 38 39 void __init __weak plat_mem_setup(void) 40 { 41 /* Nothing to see here, move along. */ 42 } 43 44 #ifdef CONFIG_MMU 45 static pte_t *__get_pte_phys(unsigned long addr) 46 { 47 pgd_t *pgd; 48 pud_t *pud; 49 pmd_t *pmd; 50 pte_t *pte; 51 52 pgd = pgd_offset_k(addr); 53 if (pgd_none(*pgd)) { 54 pgd_ERROR(*pgd); 55 return NULL; 56 } 57 58 pud = pud_alloc(NULL, pgd, addr); 59 if (unlikely(!pud)) { 60 pud_ERROR(*pud); 61 return NULL; 62 } 63 64 pmd = pmd_alloc(NULL, pud, addr); 65 if (unlikely(!pmd)) { 66 pmd_ERROR(*pmd); 67 return NULL; 68 } 69 70 pte = pte_offset_kernel(pmd, addr); 71 return pte; 72 } 73 74 static void set_pte_phys(unsigned long addr, unsigned long phys, pgprot_t prot) 75 { 76 pte_t *pte; 77 78 pte = __get_pte_phys(addr); 79 if (!pte_none(*pte)) { 80 pte_ERROR(*pte); 81 return; 82 } 83 84 set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, prot)); 85 local_flush_tlb_one(get_asid(), addr); 86 87 if (pgprot_val(prot) & _PAGE_WIRED) 88 tlb_wire_entry(NULL, addr, *pte); 89 } 90 91 static void clear_pte_phys(unsigned long addr, pgprot_t prot) 92 { 93 pte_t *pte; 94 95 pte = __get_pte_phys(addr); 96 97 if (pgprot_val(prot) & _PAGE_WIRED) 98 tlb_unwire_entry(); 99 100 set_pte(pte, pfn_pte(0, __pgprot(0))); 101 local_flush_tlb_one(get_asid(), addr); 102 } 103 104 void __set_fixmap(enum fixed_addresses idx, unsigned long phys, pgprot_t prot) 105 { 106 unsigned long address = __fix_to_virt(idx); 107 108 if (idx >= __end_of_fixed_addresses) { 109 BUG(); 110 return; 111 } 112 113 set_pte_phys(address, phys, prot); 114 } 115 116 void __clear_fixmap(enum fixed_addresses idx, pgprot_t prot) 117 { 118 unsigned long address = __fix_to_virt(idx); 119 120 if (idx >= __end_of_fixed_addresses) { 121 BUG(); 122 return; 123 } 124 125 clear_pte_phys(address, prot); 126 } 127 128 void __init page_table_range_init(unsigned long start, unsigned long end, 129 pgd_t *pgd_base) 130 { 131 pgd_t *pgd; 132 pud_t *pud; 133 pmd_t *pmd; 134 pte_t *pte; 135 int i, j, k; 136 unsigned long vaddr; 137 138 vaddr = start; 139 i = __pgd_offset(vaddr); 140 j = __pud_offset(vaddr); 141 k = __pmd_offset(vaddr); 142 pgd = pgd_base + i; 143 144 for ( ; (i < PTRS_PER_PGD) && (vaddr != end); pgd++, i++) { 145 pud = (pud_t *)pgd; 146 for ( ; (j < PTRS_PER_PUD) && (vaddr != end); pud++, j++) { 147 #ifdef __PAGETABLE_PMD_FOLDED 148 pmd = (pmd_t *)pud; 149 #else 150 pmd = (pmd_t *)alloc_bootmem_low_pages(PAGE_SIZE); 151 pud_populate(&init_mm, pud, pmd); 152 pmd += k; 153 #endif 154 for (; (k < PTRS_PER_PMD) && (vaddr != end); pmd++, k++) { 155 if (pmd_none(*pmd)) { 156 pte = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE); 157 pmd_populate_kernel(&init_mm, pmd, pte); 158 BUG_ON(pte != pte_offset_kernel(pmd, 0)); 159 } 160 vaddr += PMD_SIZE; 161 } 162 k = 0; 163 } 164 j = 0; 165 } 166 } 167 #endif /* CONFIG_MMU */ 168 169 void __init allocate_pgdat(unsigned int nid) 170 { 171 unsigned long start_pfn, end_pfn; 172 #ifdef CONFIG_NEED_MULTIPLE_NODES 173 unsigned long phys; 174 #endif 175 176 get_pfn_range_for_nid(nid, &start_pfn, &end_pfn); 177 178 #ifdef CONFIG_NEED_MULTIPLE_NODES 179 phys = __lmb_alloc_base(sizeof(struct pglist_data), 180 SMP_CACHE_BYTES, end_pfn << PAGE_SHIFT); 181 /* Retry with all of system memory */ 182 if (!phys) 183 phys = __lmb_alloc_base(sizeof(struct pglist_data), 184 SMP_CACHE_BYTES, lmb_end_of_DRAM()); 185 if (!phys) 186 panic("Can't allocate pgdat for node %d\n", nid); 187 188 NODE_DATA(nid) = __va(phys); 189 memset(NODE_DATA(nid), 0, sizeof(struct pglist_data)); 190 191 NODE_DATA(nid)->bdata = &bootmem_node_data[nid]; 192 #endif 193 194 NODE_DATA(nid)->node_start_pfn = start_pfn; 195 NODE_DATA(nid)->node_spanned_pages = end_pfn - start_pfn; 196 } 197 198 static void __init bootmem_init_one_node(unsigned int nid) 199 { 200 unsigned long total_pages, paddr; 201 unsigned long end_pfn; 202 struct pglist_data *p; 203 int i; 204 205 p = NODE_DATA(nid); 206 207 /* Nothing to do.. */ 208 if (!p->node_spanned_pages) 209 return; 210 211 end_pfn = p->node_start_pfn + p->node_spanned_pages; 212 213 total_pages = bootmem_bootmap_pages(p->node_spanned_pages); 214 215 paddr = lmb_alloc(total_pages << PAGE_SHIFT, PAGE_SIZE); 216 if (!paddr) 217 panic("Can't allocate bootmap for nid[%d]\n", nid); 218 219 init_bootmem_node(p, paddr >> PAGE_SHIFT, p->node_start_pfn, end_pfn); 220 221 free_bootmem_with_active_regions(nid, end_pfn); 222 223 /* 224 * XXX Handle initial reservations for the system memory node 225 * only for the moment, we'll refactor this later for handling 226 * reservations in other nodes. 227 */ 228 if (nid == 0) { 229 /* Reserve the sections we're already using. */ 230 for (i = 0; i < lmb.reserved.cnt; i++) 231 reserve_bootmem(lmb.reserved.region[i].base, 232 lmb_size_bytes(&lmb.reserved, i), 233 BOOTMEM_DEFAULT); 234 } 235 236 sparse_memory_present_with_active_regions(nid); 237 } 238 239 static void __init do_init_bootmem(void) 240 { 241 int i; 242 243 /* Add active regions with valid PFNs. */ 244 for (i = 0; i < lmb.memory.cnt; i++) { 245 unsigned long start_pfn, end_pfn; 246 start_pfn = lmb.memory.region[i].base >> PAGE_SHIFT; 247 end_pfn = start_pfn + lmb_size_pages(&lmb.memory, i); 248 __add_active_range(0, start_pfn, end_pfn); 249 } 250 251 /* All of system RAM sits in node 0 for the non-NUMA case */ 252 allocate_pgdat(0); 253 node_set_online(0); 254 255 plat_mem_setup(); 256 257 for_each_online_node(i) 258 bootmem_init_one_node(i); 259 260 sparse_init(); 261 } 262 263 static void __init early_reserve_mem(void) 264 { 265 unsigned long start_pfn; 266 267 /* 268 * Partially used pages are not usable - thus 269 * we are rounding upwards: 270 */ 271 start_pfn = PFN_UP(__pa(_end)); 272 273 /* 274 * Reserve the kernel text and Reserve the bootmem bitmap. We do 275 * this in two steps (first step was init_bootmem()), because 276 * this catches the (definitely buggy) case of us accidentally 277 * initializing the bootmem allocator with an invalid RAM area. 278 */ 279 lmb_reserve(__MEMORY_START + CONFIG_ZERO_PAGE_OFFSET, 280 (PFN_PHYS(start_pfn) + PAGE_SIZE - 1) - 281 (__MEMORY_START + CONFIG_ZERO_PAGE_OFFSET)); 282 283 /* 284 * Reserve physical pages below CONFIG_ZERO_PAGE_OFFSET. 285 */ 286 if (CONFIG_ZERO_PAGE_OFFSET != 0) 287 lmb_reserve(__MEMORY_START, CONFIG_ZERO_PAGE_OFFSET); 288 289 /* 290 * Handle additional early reservations 291 */ 292 check_for_initrd(); 293 reserve_crashkernel(); 294 } 295 296 void __init paging_init(void) 297 { 298 unsigned long max_zone_pfns[MAX_NR_ZONES]; 299 unsigned long vaddr, end; 300 int nid; 301 302 lmb_init(); 303 304 sh_mv.mv_mem_init(); 305 306 early_reserve_mem(); 307 308 lmb_enforce_memory_limit(memory_limit); 309 lmb_analyze(); 310 311 lmb_dump_all(); 312 313 /* 314 * Determine low and high memory ranges: 315 */ 316 max_low_pfn = max_pfn = lmb_end_of_DRAM() >> PAGE_SHIFT; 317 min_low_pfn = __MEMORY_START >> PAGE_SHIFT; 318 319 nodes_clear(node_online_map); 320 321 memory_start = (unsigned long)__va(__MEMORY_START); 322 memory_end = memory_start + (memory_limit ?: lmb_phys_mem_size()); 323 324 uncached_init(); 325 pmb_init(); 326 do_init_bootmem(); 327 ioremap_fixed_init(); 328 329 /* We don't need to map the kernel through the TLB, as 330 * it is permanatly mapped using P1. So clear the 331 * entire pgd. */ 332 memset(swapper_pg_dir, 0, sizeof(swapper_pg_dir)); 333 334 /* Set an initial value for the MMU.TTB so we don't have to 335 * check for a null value. */ 336 set_TTB(swapper_pg_dir); 337 338 /* 339 * Populate the relevant portions of swapper_pg_dir so that 340 * we can use the fixmap entries without calling kmalloc. 341 * pte's will be filled in by __set_fixmap(). 342 */ 343 vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK; 344 end = (FIXADDR_TOP + PMD_SIZE - 1) & PMD_MASK; 345 page_table_range_init(vaddr, end, swapper_pg_dir); 346 347 kmap_coherent_init(); 348 349 memset(max_zone_pfns, 0, sizeof(max_zone_pfns)); 350 351 for_each_online_node(nid) { 352 pg_data_t *pgdat = NODE_DATA(nid); 353 unsigned long low, start_pfn; 354 355 start_pfn = pgdat->bdata->node_min_pfn; 356 low = pgdat->bdata->node_low_pfn; 357 358 if (max_zone_pfns[ZONE_NORMAL] < low) 359 max_zone_pfns[ZONE_NORMAL] = low; 360 361 printk("Node %u: start_pfn = 0x%lx, low = 0x%lx\n", 362 nid, start_pfn, low); 363 } 364 365 free_area_init_nodes(max_zone_pfns); 366 } 367 368 /* 369 * Early initialization for any I/O MMUs we might have. 370 */ 371 static void __init iommu_init(void) 372 { 373 no_iommu_init(); 374 } 375 376 unsigned int mem_init_done = 0; 377 378 void __init mem_init(void) 379 { 380 int codesize, datasize, initsize; 381 int nid; 382 383 iommu_init(); 384 385 num_physpages = 0; 386 high_memory = NULL; 387 388 for_each_online_node(nid) { 389 pg_data_t *pgdat = NODE_DATA(nid); 390 unsigned long node_pages = 0; 391 void *node_high_memory; 392 393 num_physpages += pgdat->node_present_pages; 394 395 if (pgdat->node_spanned_pages) 396 node_pages = free_all_bootmem_node(pgdat); 397 398 totalram_pages += node_pages; 399 400 node_high_memory = (void *)__va((pgdat->node_start_pfn + 401 pgdat->node_spanned_pages) << 402 PAGE_SHIFT); 403 if (node_high_memory > high_memory) 404 high_memory = node_high_memory; 405 } 406 407 /* Set this up early, so we can take care of the zero page */ 408 cpu_cache_init(); 409 410 /* clear the zero-page */ 411 memset(empty_zero_page, 0, PAGE_SIZE); 412 __flush_wback_region(empty_zero_page, PAGE_SIZE); 413 414 vsyscall_init(); 415 416 codesize = (unsigned long) &_etext - (unsigned long) &_text; 417 datasize = (unsigned long) &_edata - (unsigned long) &_etext; 418 initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin; 419 420 printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, " 421 "%dk data, %dk init)\n", 422 nr_free_pages() << (PAGE_SHIFT-10), 423 num_physpages << (PAGE_SHIFT-10), 424 codesize >> 10, 425 datasize >> 10, 426 initsize >> 10); 427 428 printk(KERN_INFO "virtual kernel memory layout:\n" 429 " fixmap : 0x%08lx - 0x%08lx (%4ld kB)\n" 430 #ifdef CONFIG_HIGHMEM 431 " pkmap : 0x%08lx - 0x%08lx (%4ld kB)\n" 432 #endif 433 " vmalloc : 0x%08lx - 0x%08lx (%4ld MB)\n" 434 " lowmem : 0x%08lx - 0x%08lx (%4ld MB) (cached)\n" 435 #ifdef CONFIG_UNCACHED_MAPPING 436 " : 0x%08lx - 0x%08lx (%4ld MB) (uncached)\n" 437 #endif 438 " .init : 0x%08lx - 0x%08lx (%4ld kB)\n" 439 " .data : 0x%08lx - 0x%08lx (%4ld kB)\n" 440 " .text : 0x%08lx - 0x%08lx (%4ld kB)\n", 441 FIXADDR_START, FIXADDR_TOP, 442 (FIXADDR_TOP - FIXADDR_START) >> 10, 443 444 #ifdef CONFIG_HIGHMEM 445 PKMAP_BASE, PKMAP_BASE+LAST_PKMAP*PAGE_SIZE, 446 (LAST_PKMAP*PAGE_SIZE) >> 10, 447 #endif 448 449 (unsigned long)VMALLOC_START, VMALLOC_END, 450 (VMALLOC_END - VMALLOC_START) >> 20, 451 452 (unsigned long)memory_start, (unsigned long)high_memory, 453 ((unsigned long)high_memory - (unsigned long)memory_start) >> 20, 454 455 #ifdef CONFIG_UNCACHED_MAPPING 456 uncached_start, uncached_end, uncached_size >> 20, 457 #endif 458 459 (unsigned long)&__init_begin, (unsigned long)&__init_end, 460 ((unsigned long)&__init_end - 461 (unsigned long)&__init_begin) >> 10, 462 463 (unsigned long)&_etext, (unsigned long)&_edata, 464 ((unsigned long)&_edata - (unsigned long)&_etext) >> 10, 465 466 (unsigned long)&_text, (unsigned long)&_etext, 467 ((unsigned long)&_etext - (unsigned long)&_text) >> 10); 468 469 mem_init_done = 1; 470 } 471 472 void free_initmem(void) 473 { 474 unsigned long addr; 475 476 addr = (unsigned long)(&__init_begin); 477 for (; addr < (unsigned long)(&__init_end); addr += PAGE_SIZE) { 478 ClearPageReserved(virt_to_page(addr)); 479 init_page_count(virt_to_page(addr)); 480 free_page(addr); 481 totalram_pages++; 482 } 483 printk("Freeing unused kernel memory: %ldk freed\n", 484 ((unsigned long)&__init_end - 485 (unsigned long)&__init_begin) >> 10); 486 } 487 488 #ifdef CONFIG_BLK_DEV_INITRD 489 void free_initrd_mem(unsigned long start, unsigned long end) 490 { 491 unsigned long p; 492 for (p = start; p < end; p += PAGE_SIZE) { 493 ClearPageReserved(virt_to_page(p)); 494 init_page_count(virt_to_page(p)); 495 free_page(p); 496 totalram_pages++; 497 } 498 printk("Freeing initrd memory: %ldk freed\n", (end - start) >> 10); 499 } 500 #endif 501 502 #ifdef CONFIG_MEMORY_HOTPLUG 503 int arch_add_memory(int nid, u64 start, u64 size) 504 { 505 pg_data_t *pgdat; 506 unsigned long start_pfn = start >> PAGE_SHIFT; 507 unsigned long nr_pages = size >> PAGE_SHIFT; 508 int ret; 509 510 pgdat = NODE_DATA(nid); 511 512 /* We only have ZONE_NORMAL, so this is easy.. */ 513 ret = __add_pages(nid, pgdat->node_zones + ZONE_NORMAL, 514 start_pfn, nr_pages); 515 if (unlikely(ret)) 516 printk("%s: Failed, __add_pages() == %d\n", __func__, ret); 517 518 return ret; 519 } 520 EXPORT_SYMBOL_GPL(arch_add_memory); 521 522 #ifdef CONFIG_NUMA 523 int memory_add_physaddr_to_nid(u64 addr) 524 { 525 /* Node 0 for now.. */ 526 return 0; 527 } 528 EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid); 529 #endif 530 531 #endif /* CONFIG_MEMORY_HOTPLUG */ 532