1 /* 2 * linux/arch/parisc/mm/init.c 3 * 4 * Copyright (C) 1995 Linus Torvalds 5 * Copyright 1999 SuSE GmbH 6 * changed by Philipp Rumpf 7 * Copyright 1999 Philipp Rumpf (prumpf@tux.org) 8 * Copyright 2004 Randolph Chung (tausq@debian.org) 9 * Copyright 2006-2007 Helge Deller (deller@gmx.de) 10 * 11 */ 12 13 14 #include <linux/module.h> 15 #include <linux/mm.h> 16 #include <linux/bootmem.h> 17 #include <linux/gfp.h> 18 #include <linux/delay.h> 19 #include <linux/init.h> 20 #include <linux/pci.h> /* for hppa_dma_ops and pcxl_dma_ops */ 21 #include <linux/initrd.h> 22 #include <linux/swap.h> 23 #include <linux/unistd.h> 24 #include <linux/nodemask.h> /* for node_online_map */ 25 #include <linux/pagemap.h> /* for release_pages and page_cache_release */ 26 #include <linux/compat.h> 27 28 #include <asm/pgalloc.h> 29 #include <asm/pgtable.h> 30 #include <asm/tlb.h> 31 #include <asm/pdc_chassis.h> 32 #include <asm/mmzone.h> 33 #include <asm/sections.h> 34 #include <asm/msgbuf.h> 35 36 extern int data_start; 37 extern void parisc_kernel_start(void); /* Kernel entry point in head.S */ 38 39 #if CONFIG_PGTABLE_LEVELS == 3 40 /* NOTE: This layout exactly conforms to the hybrid L2/L3 page table layout 41 * with the first pmd adjacent to the pgd and below it. gcc doesn't actually 42 * guarantee that global objects will be laid out in memory in the same order 43 * as the order of declaration, so put these in different sections and use 44 * the linker script to order them. */ 45 pmd_t pmd0[PTRS_PER_PMD] __attribute__ ((__section__ (".data..vm0.pmd"), aligned(PAGE_SIZE))); 46 #endif 47 48 pgd_t swapper_pg_dir[PTRS_PER_PGD] __attribute__ ((__section__ (".data..vm0.pgd"), aligned(PAGE_SIZE))); 49 pte_t pg0[PT_INITIAL * PTRS_PER_PTE] __attribute__ ((__section__ (".data..vm0.pte"), aligned(PAGE_SIZE))); 50 51 #ifdef CONFIG_DISCONTIGMEM 52 struct node_map_data node_data[MAX_NUMNODES] __read_mostly; 53 signed char pfnnid_map[PFNNID_MAP_MAX] __read_mostly; 54 #endif 55 56 static struct resource data_resource = { 57 .name = "Kernel data", 58 .flags = IORESOURCE_BUSY | IORESOURCE_MEM, 59 }; 60 61 static struct resource code_resource = { 62 .name = "Kernel code", 63 .flags = IORESOURCE_BUSY | IORESOURCE_MEM, 64 }; 65 66 static struct resource pdcdata_resource = { 67 .name = "PDC data (Page Zero)", 68 .start = 0, 69 .end = 0x9ff, 70 .flags = IORESOURCE_BUSY | IORESOURCE_MEM, 71 }; 72 73 static struct resource sysram_resources[MAX_PHYSMEM_RANGES] __read_mostly; 74 75 /* The following array is initialized from the firmware specific 76 * information retrieved in kernel/inventory.c. 77 */ 78 79 physmem_range_t pmem_ranges[MAX_PHYSMEM_RANGES] __read_mostly; 80 int npmem_ranges __read_mostly; 81 82 #ifdef CONFIG_64BIT 83 #define MAX_MEM (~0UL) 84 #else /* !CONFIG_64BIT */ 85 #define MAX_MEM (3584U*1024U*1024U) 86 #endif /* !CONFIG_64BIT */ 87 88 static unsigned long mem_limit __read_mostly = MAX_MEM; 89 90 static void __init mem_limit_func(void) 91 { 92 char *cp, *end; 93 unsigned long limit; 94 95 /* We need this before __setup() functions are called */ 96 97 limit = MAX_MEM; 98 for (cp = boot_command_line; *cp; ) { 99 if (memcmp(cp, "mem=", 4) == 0) { 100 cp += 4; 101 limit = memparse(cp, &end); 102 if (end != cp) 103 break; 104 cp = end; 105 } else { 106 while (*cp != ' ' && *cp) 107 ++cp; 108 while (*cp == ' ') 109 ++cp; 110 } 111 } 112 113 if (limit < mem_limit) 114 mem_limit = limit; 115 } 116 117 #define MAX_GAP (0x40000000UL >> PAGE_SHIFT) 118 119 static void __init setup_bootmem(void) 120 { 121 unsigned long bootmap_size; 122 unsigned long mem_max; 123 unsigned long bootmap_pages; 124 unsigned long bootmap_start_pfn; 125 unsigned long bootmap_pfn; 126 #ifndef CONFIG_DISCONTIGMEM 127 physmem_range_t pmem_holes[MAX_PHYSMEM_RANGES - 1]; 128 int npmem_holes; 129 #endif 130 int i, sysram_resource_count; 131 132 disable_sr_hashing(); /* Turn off space register hashing */ 133 134 /* 135 * Sort the ranges. Since the number of ranges is typically 136 * small, and performance is not an issue here, just do 137 * a simple insertion sort. 138 */ 139 140 for (i = 1; i < npmem_ranges; i++) { 141 int j; 142 143 for (j = i; j > 0; j--) { 144 unsigned long tmp; 145 146 if (pmem_ranges[j-1].start_pfn < 147 pmem_ranges[j].start_pfn) { 148 149 break; 150 } 151 tmp = pmem_ranges[j-1].start_pfn; 152 pmem_ranges[j-1].start_pfn = pmem_ranges[j].start_pfn; 153 pmem_ranges[j].start_pfn = tmp; 154 tmp = pmem_ranges[j-1].pages; 155 pmem_ranges[j-1].pages = pmem_ranges[j].pages; 156 pmem_ranges[j].pages = tmp; 157 } 158 } 159 160 #ifndef CONFIG_DISCONTIGMEM 161 /* 162 * Throw out ranges that are too far apart (controlled by 163 * MAX_GAP). 164 */ 165 166 for (i = 1; i < npmem_ranges; i++) { 167 if (pmem_ranges[i].start_pfn - 168 (pmem_ranges[i-1].start_pfn + 169 pmem_ranges[i-1].pages) > MAX_GAP) { 170 npmem_ranges = i; 171 printk("Large gap in memory detected (%ld pages). " 172 "Consider turning on CONFIG_DISCONTIGMEM\n", 173 pmem_ranges[i].start_pfn - 174 (pmem_ranges[i-1].start_pfn + 175 pmem_ranges[i-1].pages)); 176 break; 177 } 178 } 179 #endif 180 181 if (npmem_ranges > 1) { 182 183 /* Print the memory ranges */ 184 185 printk(KERN_INFO "Memory Ranges:\n"); 186 187 for (i = 0; i < npmem_ranges; i++) { 188 unsigned long start; 189 unsigned long size; 190 191 size = (pmem_ranges[i].pages << PAGE_SHIFT); 192 start = (pmem_ranges[i].start_pfn << PAGE_SHIFT); 193 printk(KERN_INFO "%2d) Start 0x%016lx End 0x%016lx Size %6ld MB\n", 194 i,start, start + (size - 1), size >> 20); 195 } 196 } 197 198 sysram_resource_count = npmem_ranges; 199 for (i = 0; i < sysram_resource_count; i++) { 200 struct resource *res = &sysram_resources[i]; 201 res->name = "System RAM"; 202 res->start = pmem_ranges[i].start_pfn << PAGE_SHIFT; 203 res->end = res->start + (pmem_ranges[i].pages << PAGE_SHIFT)-1; 204 res->flags = IORESOURCE_MEM | IORESOURCE_BUSY; 205 request_resource(&iomem_resource, res); 206 } 207 208 /* 209 * For 32 bit kernels we limit the amount of memory we can 210 * support, in order to preserve enough kernel address space 211 * for other purposes. For 64 bit kernels we don't normally 212 * limit the memory, but this mechanism can be used to 213 * artificially limit the amount of memory (and it is written 214 * to work with multiple memory ranges). 215 */ 216 217 mem_limit_func(); /* check for "mem=" argument */ 218 219 mem_max = 0; 220 for (i = 0; i < npmem_ranges; i++) { 221 unsigned long rsize; 222 223 rsize = pmem_ranges[i].pages << PAGE_SHIFT; 224 if ((mem_max + rsize) > mem_limit) { 225 printk(KERN_WARNING "Memory truncated to %ld MB\n", mem_limit >> 20); 226 if (mem_max == mem_limit) 227 npmem_ranges = i; 228 else { 229 pmem_ranges[i].pages = (mem_limit >> PAGE_SHIFT) 230 - (mem_max >> PAGE_SHIFT); 231 npmem_ranges = i + 1; 232 mem_max = mem_limit; 233 } 234 break; 235 } 236 mem_max += rsize; 237 } 238 239 printk(KERN_INFO "Total Memory: %ld MB\n",mem_max >> 20); 240 241 #ifndef CONFIG_DISCONTIGMEM 242 /* Merge the ranges, keeping track of the holes */ 243 244 { 245 unsigned long end_pfn; 246 unsigned long hole_pages; 247 248 npmem_holes = 0; 249 end_pfn = pmem_ranges[0].start_pfn + pmem_ranges[0].pages; 250 for (i = 1; i < npmem_ranges; i++) { 251 252 hole_pages = pmem_ranges[i].start_pfn - end_pfn; 253 if (hole_pages) { 254 pmem_holes[npmem_holes].start_pfn = end_pfn; 255 pmem_holes[npmem_holes++].pages = hole_pages; 256 end_pfn += hole_pages; 257 } 258 end_pfn += pmem_ranges[i].pages; 259 } 260 261 pmem_ranges[0].pages = end_pfn - pmem_ranges[0].start_pfn; 262 npmem_ranges = 1; 263 } 264 #endif 265 266 bootmap_pages = 0; 267 for (i = 0; i < npmem_ranges; i++) 268 bootmap_pages += bootmem_bootmap_pages(pmem_ranges[i].pages); 269 270 bootmap_start_pfn = PAGE_ALIGN(__pa((unsigned long) &_end)) >> PAGE_SHIFT; 271 272 #ifdef CONFIG_DISCONTIGMEM 273 for (i = 0; i < MAX_PHYSMEM_RANGES; i++) { 274 memset(NODE_DATA(i), 0, sizeof(pg_data_t)); 275 NODE_DATA(i)->bdata = &bootmem_node_data[i]; 276 } 277 memset(pfnnid_map, 0xff, sizeof(pfnnid_map)); 278 279 for (i = 0; i < npmem_ranges; i++) { 280 node_set_state(i, N_NORMAL_MEMORY); 281 node_set_online(i); 282 } 283 #endif 284 285 /* 286 * Initialize and free the full range of memory in each range. 287 * Note that the only writing these routines do are to the bootmap, 288 * and we've made sure to locate the bootmap properly so that they 289 * won't be writing over anything important. 290 */ 291 292 bootmap_pfn = bootmap_start_pfn; 293 max_pfn = 0; 294 for (i = 0; i < npmem_ranges; i++) { 295 unsigned long start_pfn; 296 unsigned long npages; 297 298 start_pfn = pmem_ranges[i].start_pfn; 299 npages = pmem_ranges[i].pages; 300 301 bootmap_size = init_bootmem_node(NODE_DATA(i), 302 bootmap_pfn, 303 start_pfn, 304 (start_pfn + npages) ); 305 free_bootmem_node(NODE_DATA(i), 306 (start_pfn << PAGE_SHIFT), 307 (npages << PAGE_SHIFT) ); 308 bootmap_pfn += (bootmap_size + PAGE_SIZE - 1) >> PAGE_SHIFT; 309 if ((start_pfn + npages) > max_pfn) 310 max_pfn = start_pfn + npages; 311 } 312 313 /* IOMMU is always used to access "high mem" on those boxes 314 * that can support enough mem that a PCI device couldn't 315 * directly DMA to any physical addresses. 316 * ISA DMA support will need to revisit this. 317 */ 318 max_low_pfn = max_pfn; 319 320 /* bootmap sizing messed up? */ 321 BUG_ON((bootmap_pfn - bootmap_start_pfn) != bootmap_pages); 322 323 /* reserve PAGE0 pdc memory, kernel text/data/bss & bootmap */ 324 325 #define PDC_CONSOLE_IO_IODC_SIZE 32768 326 327 reserve_bootmem_node(NODE_DATA(0), 0UL, 328 (unsigned long)(PAGE0->mem_free + 329 PDC_CONSOLE_IO_IODC_SIZE), BOOTMEM_DEFAULT); 330 reserve_bootmem_node(NODE_DATA(0), __pa(KERNEL_BINARY_TEXT_START), 331 (unsigned long)(_end - KERNEL_BINARY_TEXT_START), 332 BOOTMEM_DEFAULT); 333 reserve_bootmem_node(NODE_DATA(0), (bootmap_start_pfn << PAGE_SHIFT), 334 ((bootmap_pfn - bootmap_start_pfn) << PAGE_SHIFT), 335 BOOTMEM_DEFAULT); 336 337 #ifndef CONFIG_DISCONTIGMEM 338 339 /* reserve the holes */ 340 341 for (i = 0; i < npmem_holes; i++) { 342 reserve_bootmem_node(NODE_DATA(0), 343 (pmem_holes[i].start_pfn << PAGE_SHIFT), 344 (pmem_holes[i].pages << PAGE_SHIFT), 345 BOOTMEM_DEFAULT); 346 } 347 #endif 348 349 #ifdef CONFIG_BLK_DEV_INITRD 350 if (initrd_start) { 351 printk(KERN_INFO "initrd: %08lx-%08lx\n", initrd_start, initrd_end); 352 if (__pa(initrd_start) < mem_max) { 353 unsigned long initrd_reserve; 354 355 if (__pa(initrd_end) > mem_max) { 356 initrd_reserve = mem_max - __pa(initrd_start); 357 } else { 358 initrd_reserve = initrd_end - initrd_start; 359 } 360 initrd_below_start_ok = 1; 361 printk(KERN_INFO "initrd: reserving %08lx-%08lx (mem_max %08lx)\n", __pa(initrd_start), __pa(initrd_start) + initrd_reserve, mem_max); 362 363 reserve_bootmem_node(NODE_DATA(0), __pa(initrd_start), 364 initrd_reserve, BOOTMEM_DEFAULT); 365 } 366 } 367 #endif 368 369 data_resource.start = virt_to_phys(&data_start); 370 data_resource.end = virt_to_phys(_end) - 1; 371 code_resource.start = virt_to_phys(_text); 372 code_resource.end = virt_to_phys(&data_start)-1; 373 374 /* We don't know which region the kernel will be in, so try 375 * all of them. 376 */ 377 for (i = 0; i < sysram_resource_count; i++) { 378 struct resource *res = &sysram_resources[i]; 379 request_resource(res, &code_resource); 380 request_resource(res, &data_resource); 381 } 382 request_resource(&sysram_resources[0], &pdcdata_resource); 383 } 384 385 static int __init parisc_text_address(unsigned long vaddr) 386 { 387 static unsigned long head_ptr __initdata; 388 389 if (!head_ptr) 390 head_ptr = PAGE_MASK & (unsigned long) 391 dereference_function_descriptor(&parisc_kernel_start); 392 393 return core_kernel_text(vaddr) || vaddr == head_ptr; 394 } 395 396 static void __init map_pages(unsigned long start_vaddr, 397 unsigned long start_paddr, unsigned long size, 398 pgprot_t pgprot, int force) 399 { 400 pgd_t *pg_dir; 401 pmd_t *pmd; 402 pte_t *pg_table; 403 unsigned long end_paddr; 404 unsigned long start_pmd; 405 unsigned long start_pte; 406 unsigned long tmp1; 407 unsigned long tmp2; 408 unsigned long address; 409 unsigned long vaddr; 410 unsigned long ro_start; 411 unsigned long ro_end; 412 unsigned long fv_addr; 413 unsigned long gw_addr; 414 extern const unsigned long fault_vector_20; 415 extern void * const linux_gateway_page; 416 417 ro_start = __pa((unsigned long)_text); 418 ro_end = __pa((unsigned long)&data_start); 419 fv_addr = __pa((unsigned long)&fault_vector_20) & PAGE_MASK; 420 gw_addr = __pa((unsigned long)&linux_gateway_page) & PAGE_MASK; 421 422 end_paddr = start_paddr + size; 423 424 pg_dir = pgd_offset_k(start_vaddr); 425 426 #if PTRS_PER_PMD == 1 427 start_pmd = 0; 428 #else 429 start_pmd = ((start_vaddr >> PMD_SHIFT) & (PTRS_PER_PMD - 1)); 430 #endif 431 start_pte = ((start_vaddr >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)); 432 433 address = start_paddr; 434 vaddr = start_vaddr; 435 while (address < end_paddr) { 436 #if PTRS_PER_PMD == 1 437 pmd = (pmd_t *)__pa(pg_dir); 438 #else 439 pmd = (pmd_t *)pgd_address(*pg_dir); 440 441 /* 442 * pmd is physical at this point 443 */ 444 445 if (!pmd) { 446 pmd = (pmd_t *) alloc_bootmem_low_pages_node(NODE_DATA(0), PAGE_SIZE << PMD_ORDER); 447 pmd = (pmd_t *) __pa(pmd); 448 } 449 450 pgd_populate(NULL, pg_dir, __va(pmd)); 451 #endif 452 pg_dir++; 453 454 /* now change pmd to kernel virtual addresses */ 455 456 pmd = (pmd_t *)__va(pmd) + start_pmd; 457 for (tmp1 = start_pmd; tmp1 < PTRS_PER_PMD; tmp1++, pmd++) { 458 459 /* 460 * pg_table is physical at this point 461 */ 462 463 pg_table = (pte_t *)pmd_address(*pmd); 464 if (!pg_table) { 465 pg_table = (pte_t *) 466 alloc_bootmem_low_pages_node(NODE_DATA(0), PAGE_SIZE); 467 pg_table = (pte_t *) __pa(pg_table); 468 } 469 470 pmd_populate_kernel(NULL, pmd, __va(pg_table)); 471 472 /* now change pg_table to kernel virtual addresses */ 473 474 pg_table = (pte_t *) __va(pg_table) + start_pte; 475 for (tmp2 = start_pte; tmp2 < PTRS_PER_PTE; tmp2++, pg_table++) { 476 pte_t pte; 477 478 /* 479 * Map the fault vector writable so we can 480 * write the HPMC checksum. 481 */ 482 if (force) 483 pte = __mk_pte(address, pgprot); 484 else if (parisc_text_address(vaddr) && 485 address != fv_addr) 486 pte = __mk_pte(address, PAGE_KERNEL_EXEC); 487 else 488 #if defined(CONFIG_PARISC_PAGE_SIZE_4KB) 489 if (address >= ro_start && address < ro_end 490 && address != fv_addr 491 && address != gw_addr) 492 pte = __mk_pte(address, PAGE_KERNEL_RO); 493 else 494 #endif 495 pte = __mk_pte(address, pgprot); 496 497 if (address >= end_paddr) { 498 if (force) 499 break; 500 else 501 pte_val(pte) = 0; 502 } 503 504 set_pte(pg_table, pte); 505 506 address += PAGE_SIZE; 507 vaddr += PAGE_SIZE; 508 } 509 start_pte = 0; 510 511 if (address >= end_paddr) 512 break; 513 } 514 start_pmd = 0; 515 } 516 } 517 518 void free_initmem(void) 519 { 520 unsigned long init_begin = (unsigned long)__init_begin; 521 unsigned long init_end = (unsigned long)__init_end; 522 523 /* The init text pages are marked R-X. We have to 524 * flush the icache and mark them RW- 525 * 526 * This is tricky, because map_pages is in the init section. 527 * Do a dummy remap of the data section first (the data 528 * section is already PAGE_KERNEL) to pull in the TLB entries 529 * for map_kernel */ 530 map_pages(init_begin, __pa(init_begin), init_end - init_begin, 531 PAGE_KERNEL_RWX, 1); 532 /* now remap at PAGE_KERNEL since the TLB is pre-primed to execute 533 * map_pages */ 534 map_pages(init_begin, __pa(init_begin), init_end - init_begin, 535 PAGE_KERNEL, 1); 536 537 /* force the kernel to see the new TLB entries */ 538 __flush_tlb_range(0, init_begin, init_end); 539 /* Attempt to catch anyone trying to execute code here 540 * by filling the page with BRK insns. 541 */ 542 memset((void *)init_begin, 0x00, init_end - init_begin); 543 /* finally dump all the instructions which were cached, since the 544 * pages are no-longer executable */ 545 flush_icache_range(init_begin, init_end); 546 547 free_initmem_default(-1); 548 549 /* set up a new led state on systems shipped LED State panel */ 550 pdc_chassis_send_status(PDC_CHASSIS_DIRECT_BCOMPLETE); 551 } 552 553 554 #ifdef CONFIG_DEBUG_RODATA 555 void mark_rodata_ro(void) 556 { 557 /* rodata memory was already mapped with KERNEL_RO access rights by 558 pagetable_init() and map_pages(). No need to do additional stuff here */ 559 printk (KERN_INFO "Write protecting the kernel read-only data: %luk\n", 560 (unsigned long)(__end_rodata - __start_rodata) >> 10); 561 } 562 #endif 563 564 565 /* 566 * Just an arbitrary offset to serve as a "hole" between mapping areas 567 * (between top of physical memory and a potential pcxl dma mapping 568 * area, and below the vmalloc mapping area). 569 * 570 * The current 32K value just means that there will be a 32K "hole" 571 * between mapping areas. That means that any out-of-bounds memory 572 * accesses will hopefully be caught. The vmalloc() routines leaves 573 * a hole of 4kB between each vmalloced area for the same reason. 574 */ 575 576 /* Leave room for gateway page expansion */ 577 #if KERNEL_MAP_START < GATEWAY_PAGE_SIZE 578 #error KERNEL_MAP_START is in gateway reserved region 579 #endif 580 #define MAP_START (KERNEL_MAP_START) 581 582 #define VM_MAP_OFFSET (32*1024) 583 #define SET_MAP_OFFSET(x) ((void *)(((unsigned long)(x) + VM_MAP_OFFSET) \ 584 & ~(VM_MAP_OFFSET-1))) 585 586 void *parisc_vmalloc_start __read_mostly; 587 EXPORT_SYMBOL(parisc_vmalloc_start); 588 589 #ifdef CONFIG_PA11 590 unsigned long pcxl_dma_start __read_mostly; 591 #endif 592 593 void __init mem_init(void) 594 { 595 /* Do sanity checks on IPC (compat) structures */ 596 BUILD_BUG_ON(sizeof(struct ipc64_perm) != 48); 597 #ifndef CONFIG_64BIT 598 BUILD_BUG_ON(sizeof(struct semid64_ds) != 80); 599 BUILD_BUG_ON(sizeof(struct msqid64_ds) != 104); 600 BUILD_BUG_ON(sizeof(struct shmid64_ds) != 104); 601 #endif 602 #ifdef CONFIG_COMPAT 603 BUILD_BUG_ON(sizeof(struct compat_ipc64_perm) != sizeof(struct ipc64_perm)); 604 BUILD_BUG_ON(sizeof(struct compat_semid64_ds) != 80); 605 BUILD_BUG_ON(sizeof(struct compat_msqid64_ds) != 104); 606 BUILD_BUG_ON(sizeof(struct compat_shmid64_ds) != 104); 607 #endif 608 609 /* Do sanity checks on page table constants */ 610 BUILD_BUG_ON(PTE_ENTRY_SIZE != sizeof(pte_t)); 611 BUILD_BUG_ON(PMD_ENTRY_SIZE != sizeof(pmd_t)); 612 BUILD_BUG_ON(PGD_ENTRY_SIZE != sizeof(pgd_t)); 613 BUILD_BUG_ON(PAGE_SHIFT + BITS_PER_PTE + BITS_PER_PMD + BITS_PER_PGD 614 > BITS_PER_LONG); 615 616 high_memory = __va((max_pfn << PAGE_SHIFT)); 617 set_max_mapnr(page_to_pfn(virt_to_page(high_memory - 1)) + 1); 618 free_all_bootmem(); 619 620 #ifdef CONFIG_PA11 621 if (hppa_dma_ops == &pcxl_dma_ops) { 622 pcxl_dma_start = (unsigned long)SET_MAP_OFFSET(MAP_START); 623 parisc_vmalloc_start = SET_MAP_OFFSET(pcxl_dma_start 624 + PCXL_DMA_MAP_SIZE); 625 } else { 626 pcxl_dma_start = 0; 627 parisc_vmalloc_start = SET_MAP_OFFSET(MAP_START); 628 } 629 #else 630 parisc_vmalloc_start = SET_MAP_OFFSET(MAP_START); 631 #endif 632 633 mem_init_print_info(NULL); 634 #ifdef CONFIG_DEBUG_KERNEL /* double-sanity-check paranoia */ 635 printk("virtual kernel memory layout:\n" 636 " vmalloc : 0x%p - 0x%p (%4ld MB)\n" 637 " memory : 0x%p - 0x%p (%4ld MB)\n" 638 " .init : 0x%p - 0x%p (%4ld kB)\n" 639 " .data : 0x%p - 0x%p (%4ld kB)\n" 640 " .text : 0x%p - 0x%p (%4ld kB)\n", 641 642 (void*)VMALLOC_START, (void*)VMALLOC_END, 643 (VMALLOC_END - VMALLOC_START) >> 20, 644 645 __va(0), high_memory, 646 ((unsigned long)high_memory - (unsigned long)__va(0)) >> 20, 647 648 __init_begin, __init_end, 649 ((unsigned long)__init_end - (unsigned long)__init_begin) >> 10, 650 651 _etext, _edata, 652 ((unsigned long)_edata - (unsigned long)_etext) >> 10, 653 654 _text, _etext, 655 ((unsigned long)_etext - (unsigned long)_text) >> 10); 656 #endif 657 } 658 659 unsigned long *empty_zero_page __read_mostly; 660 EXPORT_SYMBOL(empty_zero_page); 661 662 void show_mem(unsigned int filter) 663 { 664 int total = 0,reserved = 0; 665 pg_data_t *pgdat; 666 667 printk(KERN_INFO "Mem-info:\n"); 668 show_free_areas(filter); 669 670 for_each_online_pgdat(pgdat) { 671 unsigned long flags; 672 int zoneid; 673 674 pgdat_resize_lock(pgdat, &flags); 675 for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) { 676 struct zone *zone = &pgdat->node_zones[zoneid]; 677 if (!populated_zone(zone)) 678 continue; 679 680 total += zone->present_pages; 681 reserved = zone->present_pages - zone->managed_pages; 682 } 683 pgdat_resize_unlock(pgdat, &flags); 684 } 685 686 printk(KERN_INFO "%d pages of RAM\n", total); 687 printk(KERN_INFO "%d reserved pages\n", reserved); 688 689 #ifdef CONFIG_DISCONTIGMEM 690 { 691 struct zonelist *zl; 692 int i, j; 693 694 for (i = 0; i < npmem_ranges; i++) { 695 zl = node_zonelist(i, 0); 696 for (j = 0; j < MAX_NR_ZONES; j++) { 697 struct zoneref *z; 698 struct zone *zone; 699 700 printk("Zone list for zone %d on node %d: ", j, i); 701 for_each_zone_zonelist(zone, z, zl, j) 702 printk("[%d/%s] ", zone_to_nid(zone), 703 zone->name); 704 printk("\n"); 705 } 706 } 707 } 708 #endif 709 } 710 711 /* 712 * pagetable_init() sets up the page tables 713 * 714 * Note that gateway_init() places the Linux gateway page at page 0. 715 * Since gateway pages cannot be dereferenced this has the desirable 716 * side effect of trapping those pesky NULL-reference errors in the 717 * kernel. 718 */ 719 static void __init pagetable_init(void) 720 { 721 int range; 722 723 /* Map each physical memory range to its kernel vaddr */ 724 725 for (range = 0; range < npmem_ranges; range++) { 726 unsigned long start_paddr; 727 unsigned long end_paddr; 728 unsigned long size; 729 730 start_paddr = pmem_ranges[range].start_pfn << PAGE_SHIFT; 731 end_paddr = start_paddr + (pmem_ranges[range].pages << PAGE_SHIFT); 732 size = pmem_ranges[range].pages << PAGE_SHIFT; 733 734 map_pages((unsigned long)__va(start_paddr), start_paddr, 735 size, PAGE_KERNEL, 0); 736 } 737 738 #ifdef CONFIG_BLK_DEV_INITRD 739 if (initrd_end && initrd_end > mem_limit) { 740 printk(KERN_INFO "initrd: mapping %08lx-%08lx\n", initrd_start, initrd_end); 741 map_pages(initrd_start, __pa(initrd_start), 742 initrd_end - initrd_start, PAGE_KERNEL, 0); 743 } 744 #endif 745 746 empty_zero_page = alloc_bootmem_pages(PAGE_SIZE); 747 } 748 749 static void __init gateway_init(void) 750 { 751 unsigned long linux_gateway_page_addr; 752 /* FIXME: This is 'const' in order to trick the compiler 753 into not treating it as DP-relative data. */ 754 extern void * const linux_gateway_page; 755 756 linux_gateway_page_addr = LINUX_GATEWAY_ADDR & PAGE_MASK; 757 758 /* 759 * Setup Linux Gateway page. 760 * 761 * The Linux gateway page will reside in kernel space (on virtual 762 * page 0), so it doesn't need to be aliased into user space. 763 */ 764 765 map_pages(linux_gateway_page_addr, __pa(&linux_gateway_page), 766 PAGE_SIZE, PAGE_GATEWAY, 1); 767 } 768 769 void __init paging_init(void) 770 { 771 int i; 772 773 setup_bootmem(); 774 pagetable_init(); 775 gateway_init(); 776 flush_cache_all_local(); /* start with known state */ 777 flush_tlb_all_local(NULL); 778 779 for (i = 0; i < npmem_ranges; i++) { 780 unsigned long zones_size[MAX_NR_ZONES] = { 0, }; 781 782 zones_size[ZONE_NORMAL] = pmem_ranges[i].pages; 783 784 #ifdef CONFIG_DISCONTIGMEM 785 /* Need to initialize the pfnnid_map before we can initialize 786 the zone */ 787 { 788 int j; 789 for (j = (pmem_ranges[i].start_pfn >> PFNNID_SHIFT); 790 j <= ((pmem_ranges[i].start_pfn + pmem_ranges[i].pages) >> PFNNID_SHIFT); 791 j++) { 792 pfnnid_map[j] = i; 793 } 794 } 795 #endif 796 797 free_area_init_node(i, zones_size, 798 pmem_ranges[i].start_pfn, NULL); 799 } 800 } 801 802 #ifdef CONFIG_PA20 803 804 /* 805 * Currently, all PA20 chips have 18 bit protection IDs, which is the 806 * limiting factor (space ids are 32 bits). 807 */ 808 809 #define NR_SPACE_IDS 262144 810 811 #else 812 813 /* 814 * Currently we have a one-to-one relationship between space IDs and 815 * protection IDs. Older parisc chips (PCXS, PCXT, PCXL, PCXL2) only 816 * support 15 bit protection IDs, so that is the limiting factor. 817 * PCXT' has 18 bit protection IDs, but only 16 bit spaceids, so it's 818 * probably not worth the effort for a special case here. 819 */ 820 821 #define NR_SPACE_IDS 32768 822 823 #endif /* !CONFIG_PA20 */ 824 825 #define RECYCLE_THRESHOLD (NR_SPACE_IDS / 2) 826 #define SID_ARRAY_SIZE (NR_SPACE_IDS / (8 * sizeof(long))) 827 828 static unsigned long space_id[SID_ARRAY_SIZE] = { 1 }; /* disallow space 0 */ 829 static unsigned long dirty_space_id[SID_ARRAY_SIZE]; 830 static unsigned long space_id_index; 831 static unsigned long free_space_ids = NR_SPACE_IDS - 1; 832 static unsigned long dirty_space_ids = 0; 833 834 static DEFINE_SPINLOCK(sid_lock); 835 836 unsigned long alloc_sid(void) 837 { 838 unsigned long index; 839 840 spin_lock(&sid_lock); 841 842 if (free_space_ids == 0) { 843 if (dirty_space_ids != 0) { 844 spin_unlock(&sid_lock); 845 flush_tlb_all(); /* flush_tlb_all() calls recycle_sids() */ 846 spin_lock(&sid_lock); 847 } 848 BUG_ON(free_space_ids == 0); 849 } 850 851 free_space_ids--; 852 853 index = find_next_zero_bit(space_id, NR_SPACE_IDS, space_id_index); 854 space_id[index >> SHIFT_PER_LONG] |= (1L << (index & (BITS_PER_LONG - 1))); 855 space_id_index = index; 856 857 spin_unlock(&sid_lock); 858 859 return index << SPACEID_SHIFT; 860 } 861 862 void free_sid(unsigned long spaceid) 863 { 864 unsigned long index = spaceid >> SPACEID_SHIFT; 865 unsigned long *dirty_space_offset; 866 867 dirty_space_offset = dirty_space_id + (index >> SHIFT_PER_LONG); 868 index &= (BITS_PER_LONG - 1); 869 870 spin_lock(&sid_lock); 871 872 BUG_ON(*dirty_space_offset & (1L << index)); /* attempt to free space id twice */ 873 874 *dirty_space_offset |= (1L << index); 875 dirty_space_ids++; 876 877 spin_unlock(&sid_lock); 878 } 879 880 881 #ifdef CONFIG_SMP 882 static void get_dirty_sids(unsigned long *ndirtyptr,unsigned long *dirty_array) 883 { 884 int i; 885 886 /* NOTE: sid_lock must be held upon entry */ 887 888 *ndirtyptr = dirty_space_ids; 889 if (dirty_space_ids != 0) { 890 for (i = 0; i < SID_ARRAY_SIZE; i++) { 891 dirty_array[i] = dirty_space_id[i]; 892 dirty_space_id[i] = 0; 893 } 894 dirty_space_ids = 0; 895 } 896 897 return; 898 } 899 900 static void recycle_sids(unsigned long ndirty,unsigned long *dirty_array) 901 { 902 int i; 903 904 /* NOTE: sid_lock must be held upon entry */ 905 906 if (ndirty != 0) { 907 for (i = 0; i < SID_ARRAY_SIZE; i++) { 908 space_id[i] ^= dirty_array[i]; 909 } 910 911 free_space_ids += ndirty; 912 space_id_index = 0; 913 } 914 } 915 916 #else /* CONFIG_SMP */ 917 918 static void recycle_sids(void) 919 { 920 int i; 921 922 /* NOTE: sid_lock must be held upon entry */ 923 924 if (dirty_space_ids != 0) { 925 for (i = 0; i < SID_ARRAY_SIZE; i++) { 926 space_id[i] ^= dirty_space_id[i]; 927 dirty_space_id[i] = 0; 928 } 929 930 free_space_ids += dirty_space_ids; 931 dirty_space_ids = 0; 932 space_id_index = 0; 933 } 934 } 935 #endif 936 937 /* 938 * flush_tlb_all() calls recycle_sids(), since whenever the entire tlb is 939 * purged, we can safely reuse the space ids that were released but 940 * not flushed from the tlb. 941 */ 942 943 #ifdef CONFIG_SMP 944 945 static unsigned long recycle_ndirty; 946 static unsigned long recycle_dirty_array[SID_ARRAY_SIZE]; 947 static unsigned int recycle_inuse; 948 949 void flush_tlb_all(void) 950 { 951 int do_recycle; 952 953 __inc_irq_stat(irq_tlb_count); 954 do_recycle = 0; 955 spin_lock(&sid_lock); 956 if (dirty_space_ids > RECYCLE_THRESHOLD) { 957 BUG_ON(recycle_inuse); /* FIXME: Use a semaphore/wait queue here */ 958 get_dirty_sids(&recycle_ndirty,recycle_dirty_array); 959 recycle_inuse++; 960 do_recycle++; 961 } 962 spin_unlock(&sid_lock); 963 on_each_cpu(flush_tlb_all_local, NULL, 1); 964 if (do_recycle) { 965 spin_lock(&sid_lock); 966 recycle_sids(recycle_ndirty,recycle_dirty_array); 967 recycle_inuse = 0; 968 spin_unlock(&sid_lock); 969 } 970 } 971 #else 972 void flush_tlb_all(void) 973 { 974 __inc_irq_stat(irq_tlb_count); 975 spin_lock(&sid_lock); 976 flush_tlb_all_local(NULL); 977 recycle_sids(); 978 spin_unlock(&sid_lock); 979 } 980 #endif 981 982 #ifdef CONFIG_BLK_DEV_INITRD 983 void free_initrd_mem(unsigned long start, unsigned long end) 984 { 985 free_reserved_area((void *)start, (void *)end, -1, "initrd"); 986 } 987 #endif 988