1 /* 2 * Initialize MMU support. 3 * 4 * Copyright (C) 1998-2003 Hewlett-Packard Co 5 * David Mosberger-Tang <davidm@hpl.hp.com> 6 */ 7 #include <linux/kernel.h> 8 #include <linux/init.h> 9 10 #include <linux/bootmem.h> 11 #include <linux/efi.h> 12 #include <linux/elf.h> 13 #include <linux/memblock.h> 14 #include <linux/mm.h> 15 #include <linux/mmzone.h> 16 #include <linux/module.h> 17 #include <linux/personality.h> 18 #include <linux/reboot.h> 19 #include <linux/slab.h> 20 #include <linux/swap.h> 21 #include <linux/proc_fs.h> 22 #include <linux/bitops.h> 23 #include <linux/kexec.h> 24 25 #include <asm/dma.h> 26 #include <asm/io.h> 27 #include <asm/machvec.h> 28 #include <asm/numa.h> 29 #include <asm/patch.h> 30 #include <asm/pgalloc.h> 31 #include <asm/sal.h> 32 #include <asm/sections.h> 33 #include <asm/tlb.h> 34 #include <asm/uaccess.h> 35 #include <asm/unistd.h> 36 #include <asm/mca.h> 37 #include <asm/paravirt.h> 38 39 extern void ia64_tlb_init (void); 40 41 unsigned long MAX_DMA_ADDRESS = PAGE_OFFSET + 0x100000000UL; 42 43 #ifdef CONFIG_VIRTUAL_MEM_MAP 44 unsigned long VMALLOC_END = VMALLOC_END_INIT; 45 EXPORT_SYMBOL(VMALLOC_END); 46 struct page *vmem_map; 47 EXPORT_SYMBOL(vmem_map); 48 #endif 49 50 struct page *zero_page_memmap_ptr; /* map entry for zero page */ 51 EXPORT_SYMBOL(zero_page_memmap_ptr); 52 53 void 54 __ia64_sync_icache_dcache (pte_t pte) 55 { 56 unsigned long addr; 57 struct page *page; 58 59 page = pte_page(pte); 60 addr = (unsigned long) page_address(page); 61 62 if (test_bit(PG_arch_1, &page->flags)) 63 return; /* i-cache is already coherent with d-cache */ 64 65 flush_icache_range(addr, addr + (PAGE_SIZE << compound_order(page))); 66 set_bit(PG_arch_1, &page->flags); /* mark page as clean */ 67 } 68 69 /* 70 * Since DMA is i-cache coherent, any (complete) pages that were written via 71 * DMA can be marked as "clean" so that lazy_mmu_prot_update() doesn't have to 72 * flush them when they get mapped into an executable vm-area. 73 */ 74 void 75 dma_mark_clean(void *addr, size_t size) 76 { 77 unsigned long pg_addr, end; 78 79 pg_addr = PAGE_ALIGN((unsigned long) addr); 80 end = (unsigned long) addr + size; 81 while (pg_addr + PAGE_SIZE <= end) { 82 struct page *page = virt_to_page(pg_addr); 83 set_bit(PG_arch_1, &page->flags); 84 pg_addr += PAGE_SIZE; 85 } 86 } 87 88 inline void 89 ia64_set_rbs_bot (void) 90 { 91 unsigned long stack_size = rlimit_max(RLIMIT_STACK) & -16; 92 93 if (stack_size > MAX_USER_STACK_SIZE) 94 stack_size = MAX_USER_STACK_SIZE; 95 current->thread.rbs_bot = PAGE_ALIGN(current->mm->start_stack - stack_size); 96 } 97 98 /* 99 * This performs some platform-dependent address space initialization. 100 * On IA-64, we want to setup the VM area for the register backing 101 * store (which grows upwards) and install the gateway page which is 102 * used for signal trampolines, etc. 103 */ 104 void 105 ia64_init_addr_space (void) 106 { 107 struct vm_area_struct *vma; 108 109 ia64_set_rbs_bot(); 110 111 /* 112 * If we're out of memory and kmem_cache_alloc() returns NULL, we simply ignore 113 * the problem. When the process attempts to write to the register backing store 114 * for the first time, it will get a SEGFAULT in this case. 115 */ 116 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL); 117 if (vma) { 118 INIT_LIST_HEAD(&vma->anon_vma_chain); 119 vma->vm_mm = current->mm; 120 vma->vm_start = current->thread.rbs_bot & PAGE_MASK; 121 vma->vm_end = vma->vm_start + PAGE_SIZE; 122 vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT; 123 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); 124 down_write(¤t->mm->mmap_sem); 125 if (insert_vm_struct(current->mm, vma)) { 126 up_write(¤t->mm->mmap_sem); 127 kmem_cache_free(vm_area_cachep, vma); 128 return; 129 } 130 up_write(¤t->mm->mmap_sem); 131 } 132 133 /* map NaT-page at address zero to speed up speculative dereferencing of NULL: */ 134 if (!(current->personality & MMAP_PAGE_ZERO)) { 135 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL); 136 if (vma) { 137 INIT_LIST_HEAD(&vma->anon_vma_chain); 138 vma->vm_mm = current->mm; 139 vma->vm_end = PAGE_SIZE; 140 vma->vm_page_prot = __pgprot(pgprot_val(PAGE_READONLY) | _PAGE_MA_NAT); 141 vma->vm_flags = VM_READ | VM_MAYREAD | VM_IO | 142 VM_DONTEXPAND | VM_DONTDUMP; 143 down_write(¤t->mm->mmap_sem); 144 if (insert_vm_struct(current->mm, vma)) { 145 up_write(¤t->mm->mmap_sem); 146 kmem_cache_free(vm_area_cachep, vma); 147 return; 148 } 149 up_write(¤t->mm->mmap_sem); 150 } 151 } 152 } 153 154 void 155 free_initmem (void) 156 { 157 free_reserved_area((unsigned long)ia64_imva(__init_begin), 158 (unsigned long)ia64_imva(__init_end), 159 0, "unused kernel"); 160 } 161 162 void __init 163 free_initrd_mem (unsigned long start, unsigned long end) 164 { 165 /* 166 * EFI uses 4KB pages while the kernel can use 4KB or bigger. 167 * Thus EFI and the kernel may have different page sizes. It is 168 * therefore possible to have the initrd share the same page as 169 * the end of the kernel (given current setup). 170 * 171 * To avoid freeing/using the wrong page (kernel sized) we: 172 * - align up the beginning of initrd 173 * - align down the end of initrd 174 * 175 * | | 176 * |=============| a000 177 * | | 178 * | | 179 * | | 9000 180 * |/////////////| 181 * |/////////////| 182 * |=============| 8000 183 * |///INITRD////| 184 * |/////////////| 185 * |/////////////| 7000 186 * | | 187 * |KKKKKKKKKKKKK| 188 * |=============| 6000 189 * |KKKKKKKKKKKKK| 190 * |KKKKKKKKKKKKK| 191 * K=kernel using 8KB pages 192 * 193 * In this example, we must free page 8000 ONLY. So we must align up 194 * initrd_start and keep initrd_end as is. 195 */ 196 start = PAGE_ALIGN(start); 197 end = end & PAGE_MASK; 198 199 if (start < end) 200 printk(KERN_INFO "Freeing initrd memory: %ldkB freed\n", (end - start) >> 10); 201 202 for (; start < end; start += PAGE_SIZE) { 203 if (!virt_addr_valid(start)) 204 continue; 205 free_reserved_page(virt_to_page(start)); 206 } 207 } 208 209 /* 210 * This installs a clean page in the kernel's page table. 211 */ 212 static struct page * __init 213 put_kernel_page (struct page *page, unsigned long address, pgprot_t pgprot) 214 { 215 pgd_t *pgd; 216 pud_t *pud; 217 pmd_t *pmd; 218 pte_t *pte; 219 220 if (!PageReserved(page)) 221 printk(KERN_ERR "put_kernel_page: page at 0x%p not in reserved memory\n", 222 page_address(page)); 223 224 pgd = pgd_offset_k(address); /* note: this is NOT pgd_offset()! */ 225 226 { 227 pud = pud_alloc(&init_mm, pgd, address); 228 if (!pud) 229 goto out; 230 pmd = pmd_alloc(&init_mm, pud, address); 231 if (!pmd) 232 goto out; 233 pte = pte_alloc_kernel(pmd, address); 234 if (!pte) 235 goto out; 236 if (!pte_none(*pte)) 237 goto out; 238 set_pte(pte, mk_pte(page, pgprot)); 239 } 240 out: 241 /* no need for flush_tlb */ 242 return page; 243 } 244 245 static void __init 246 setup_gate (void) 247 { 248 void *gate_section; 249 struct page *page; 250 251 /* 252 * Map the gate page twice: once read-only to export the ELF 253 * headers etc. and once execute-only page to enable 254 * privilege-promotion via "epc": 255 */ 256 gate_section = paravirt_get_gate_section(); 257 page = virt_to_page(ia64_imva(gate_section)); 258 put_kernel_page(page, GATE_ADDR, PAGE_READONLY); 259 #ifdef HAVE_BUGGY_SEGREL 260 page = virt_to_page(ia64_imva(gate_section + PAGE_SIZE)); 261 put_kernel_page(page, GATE_ADDR + PAGE_SIZE, PAGE_GATE); 262 #else 263 put_kernel_page(page, GATE_ADDR + PERCPU_PAGE_SIZE, PAGE_GATE); 264 /* Fill in the holes (if any) with read-only zero pages: */ 265 { 266 unsigned long addr; 267 268 for (addr = GATE_ADDR + PAGE_SIZE; 269 addr < GATE_ADDR + PERCPU_PAGE_SIZE; 270 addr += PAGE_SIZE) 271 { 272 put_kernel_page(ZERO_PAGE(0), addr, 273 PAGE_READONLY); 274 put_kernel_page(ZERO_PAGE(0), addr + PERCPU_PAGE_SIZE, 275 PAGE_READONLY); 276 } 277 } 278 #endif 279 ia64_patch_gate(); 280 } 281 282 void ia64_mmu_init(void *my_cpu_data) 283 { 284 unsigned long pta, impl_va_bits; 285 extern void tlb_init(void); 286 287 #ifdef CONFIG_DISABLE_VHPT 288 # define VHPT_ENABLE_BIT 0 289 #else 290 # define VHPT_ENABLE_BIT 1 291 #endif 292 293 /* 294 * Check if the virtually mapped linear page table (VMLPT) overlaps with a mapped 295 * address space. The IA-64 architecture guarantees that at least 50 bits of 296 * virtual address space are implemented but if we pick a large enough page size 297 * (e.g., 64KB), the mapped address space is big enough that it will overlap with 298 * VMLPT. I assume that once we run on machines big enough to warrant 64KB pages, 299 * IMPL_VA_MSB will be significantly bigger, so this is unlikely to become a 300 * problem in practice. Alternatively, we could truncate the top of the mapped 301 * address space to not permit mappings that would overlap with the VMLPT. 302 * --davidm 00/12/06 303 */ 304 # define pte_bits 3 305 # define mapped_space_bits (3*(PAGE_SHIFT - pte_bits) + PAGE_SHIFT) 306 /* 307 * The virtual page table has to cover the entire implemented address space within 308 * a region even though not all of this space may be mappable. The reason for 309 * this is that the Access bit and Dirty bit fault handlers perform 310 * non-speculative accesses to the virtual page table, so the address range of the 311 * virtual page table itself needs to be covered by virtual page table. 312 */ 313 # define vmlpt_bits (impl_va_bits - PAGE_SHIFT + pte_bits) 314 # define POW2(n) (1ULL << (n)) 315 316 impl_va_bits = ffz(~(local_cpu_data->unimpl_va_mask | (7UL << 61))); 317 318 if (impl_va_bits < 51 || impl_va_bits > 61) 319 panic("CPU has bogus IMPL_VA_MSB value of %lu!\n", impl_va_bits - 1); 320 /* 321 * mapped_space_bits - PAGE_SHIFT is the total number of ptes we need, 322 * which must fit into "vmlpt_bits - pte_bits" slots. Second half of 323 * the test makes sure that our mapped space doesn't overlap the 324 * unimplemented hole in the middle of the region. 325 */ 326 if ((mapped_space_bits - PAGE_SHIFT > vmlpt_bits - pte_bits) || 327 (mapped_space_bits > impl_va_bits - 1)) 328 panic("Cannot build a big enough virtual-linear page table" 329 " to cover mapped address space.\n" 330 " Try using a smaller page size.\n"); 331 332 333 /* place the VMLPT at the end of each page-table mapped region: */ 334 pta = POW2(61) - POW2(vmlpt_bits); 335 336 /* 337 * Set the (virtually mapped linear) page table address. Bit 338 * 8 selects between the short and long format, bits 2-7 the 339 * size of the table, and bit 0 whether the VHPT walker is 340 * enabled. 341 */ 342 ia64_set_pta(pta | (0 << 8) | (vmlpt_bits << 2) | VHPT_ENABLE_BIT); 343 344 ia64_tlb_init(); 345 346 #ifdef CONFIG_HUGETLB_PAGE 347 ia64_set_rr(HPAGE_REGION_BASE, HPAGE_SHIFT << 2); 348 ia64_srlz_d(); 349 #endif 350 } 351 352 #ifdef CONFIG_VIRTUAL_MEM_MAP 353 int vmemmap_find_next_valid_pfn(int node, int i) 354 { 355 unsigned long end_address, hole_next_pfn; 356 unsigned long stop_address; 357 pg_data_t *pgdat = NODE_DATA(node); 358 359 end_address = (unsigned long) &vmem_map[pgdat->node_start_pfn + i]; 360 end_address = PAGE_ALIGN(end_address); 361 362 stop_address = (unsigned long) &vmem_map[ 363 pgdat->node_start_pfn + pgdat->node_spanned_pages]; 364 365 do { 366 pgd_t *pgd; 367 pud_t *pud; 368 pmd_t *pmd; 369 pte_t *pte; 370 371 pgd = pgd_offset_k(end_address); 372 if (pgd_none(*pgd)) { 373 end_address += PGDIR_SIZE; 374 continue; 375 } 376 377 pud = pud_offset(pgd, end_address); 378 if (pud_none(*pud)) { 379 end_address += PUD_SIZE; 380 continue; 381 } 382 383 pmd = pmd_offset(pud, end_address); 384 if (pmd_none(*pmd)) { 385 end_address += PMD_SIZE; 386 continue; 387 } 388 389 pte = pte_offset_kernel(pmd, end_address); 390 retry_pte: 391 if (pte_none(*pte)) { 392 end_address += PAGE_SIZE; 393 pte++; 394 if ((end_address < stop_address) && 395 (end_address != ALIGN(end_address, 1UL << PMD_SHIFT))) 396 goto retry_pte; 397 continue; 398 } 399 /* Found next valid vmem_map page */ 400 break; 401 } while (end_address < stop_address); 402 403 end_address = min(end_address, stop_address); 404 end_address = end_address - (unsigned long) vmem_map + sizeof(struct page) - 1; 405 hole_next_pfn = end_address / sizeof(struct page); 406 return hole_next_pfn - pgdat->node_start_pfn; 407 } 408 409 int __init create_mem_map_page_table(u64 start, u64 end, void *arg) 410 { 411 unsigned long address, start_page, end_page; 412 struct page *map_start, *map_end; 413 int node; 414 pgd_t *pgd; 415 pud_t *pud; 416 pmd_t *pmd; 417 pte_t *pte; 418 419 map_start = vmem_map + (__pa(start) >> PAGE_SHIFT); 420 map_end = vmem_map + (__pa(end) >> PAGE_SHIFT); 421 422 start_page = (unsigned long) map_start & PAGE_MASK; 423 end_page = PAGE_ALIGN((unsigned long) map_end); 424 node = paddr_to_nid(__pa(start)); 425 426 for (address = start_page; address < end_page; address += PAGE_SIZE) { 427 pgd = pgd_offset_k(address); 428 if (pgd_none(*pgd)) 429 pgd_populate(&init_mm, pgd, alloc_bootmem_pages_node(NODE_DATA(node), PAGE_SIZE)); 430 pud = pud_offset(pgd, address); 431 432 if (pud_none(*pud)) 433 pud_populate(&init_mm, pud, alloc_bootmem_pages_node(NODE_DATA(node), PAGE_SIZE)); 434 pmd = pmd_offset(pud, address); 435 436 if (pmd_none(*pmd)) 437 pmd_populate_kernel(&init_mm, pmd, alloc_bootmem_pages_node(NODE_DATA(node), PAGE_SIZE)); 438 pte = pte_offset_kernel(pmd, address); 439 440 if (pte_none(*pte)) 441 set_pte(pte, pfn_pte(__pa(alloc_bootmem_pages_node(NODE_DATA(node), PAGE_SIZE)) >> PAGE_SHIFT, 442 PAGE_KERNEL)); 443 } 444 return 0; 445 } 446 447 struct memmap_init_callback_data { 448 struct page *start; 449 struct page *end; 450 int nid; 451 unsigned long zone; 452 }; 453 454 static int __meminit 455 virtual_memmap_init(u64 start, u64 end, void *arg) 456 { 457 struct memmap_init_callback_data *args; 458 struct page *map_start, *map_end; 459 460 args = (struct memmap_init_callback_data *) arg; 461 map_start = vmem_map + (__pa(start) >> PAGE_SHIFT); 462 map_end = vmem_map + (__pa(end) >> PAGE_SHIFT); 463 464 if (map_start < args->start) 465 map_start = args->start; 466 if (map_end > args->end) 467 map_end = args->end; 468 469 /* 470 * We have to initialize "out of bounds" struct page elements that fit completely 471 * on the same pages that were allocated for the "in bounds" elements because they 472 * may be referenced later (and found to be "reserved"). 473 */ 474 map_start -= ((unsigned long) map_start & (PAGE_SIZE - 1)) / sizeof(struct page); 475 map_end += ((PAGE_ALIGN((unsigned long) map_end) - (unsigned long) map_end) 476 / sizeof(struct page)); 477 478 if (map_start < map_end) 479 memmap_init_zone((unsigned long)(map_end - map_start), 480 args->nid, args->zone, page_to_pfn(map_start), 481 MEMMAP_EARLY); 482 return 0; 483 } 484 485 void __meminit 486 memmap_init (unsigned long size, int nid, unsigned long zone, 487 unsigned long start_pfn) 488 { 489 if (!vmem_map) 490 memmap_init_zone(size, nid, zone, start_pfn, MEMMAP_EARLY); 491 else { 492 struct page *start; 493 struct memmap_init_callback_data args; 494 495 start = pfn_to_page(start_pfn); 496 args.start = start; 497 args.end = start + size; 498 args.nid = nid; 499 args.zone = zone; 500 501 efi_memmap_walk(virtual_memmap_init, &args); 502 } 503 } 504 505 int 506 ia64_pfn_valid (unsigned long pfn) 507 { 508 char byte; 509 struct page *pg = pfn_to_page(pfn); 510 511 return (__get_user(byte, (char __user *) pg) == 0) 512 && ((((u64)pg & PAGE_MASK) == (((u64)(pg + 1) - 1) & PAGE_MASK)) 513 || (__get_user(byte, (char __user *) (pg + 1) - 1) == 0)); 514 } 515 EXPORT_SYMBOL(ia64_pfn_valid); 516 517 int __init find_largest_hole(u64 start, u64 end, void *arg) 518 { 519 u64 *max_gap = arg; 520 521 static u64 last_end = PAGE_OFFSET; 522 523 /* NOTE: this algorithm assumes efi memmap table is ordered */ 524 525 if (*max_gap < (start - last_end)) 526 *max_gap = start - last_end; 527 last_end = end; 528 return 0; 529 } 530 531 #endif /* CONFIG_VIRTUAL_MEM_MAP */ 532 533 int __init register_active_ranges(u64 start, u64 len, int nid) 534 { 535 u64 end = start + len; 536 537 #ifdef CONFIG_KEXEC 538 if (start > crashk_res.start && start < crashk_res.end) 539 start = crashk_res.end; 540 if (end > crashk_res.start && end < crashk_res.end) 541 end = crashk_res.start; 542 #endif 543 544 if (start < end) 545 memblock_add_node(__pa(start), end - start, nid); 546 return 0; 547 } 548 549 static int __init 550 count_reserved_pages(u64 start, u64 end, void *arg) 551 { 552 unsigned long num_reserved = 0; 553 unsigned long *count = arg; 554 555 for (; start < end; start += PAGE_SIZE) 556 if (PageReserved(virt_to_page(start))) 557 ++num_reserved; 558 *count += num_reserved; 559 return 0; 560 } 561 562 int 563 find_max_min_low_pfn (u64 start, u64 end, void *arg) 564 { 565 unsigned long pfn_start, pfn_end; 566 #ifdef CONFIG_FLATMEM 567 pfn_start = (PAGE_ALIGN(__pa(start))) >> PAGE_SHIFT; 568 pfn_end = (PAGE_ALIGN(__pa(end - 1))) >> PAGE_SHIFT; 569 #else 570 pfn_start = GRANULEROUNDDOWN(__pa(start)) >> PAGE_SHIFT; 571 pfn_end = GRANULEROUNDUP(__pa(end - 1)) >> PAGE_SHIFT; 572 #endif 573 min_low_pfn = min(min_low_pfn, pfn_start); 574 max_low_pfn = max(max_low_pfn, pfn_end); 575 return 0; 576 } 577 578 /* 579 * Boot command-line option "nolwsys" can be used to disable the use of any light-weight 580 * system call handler. When this option is in effect, all fsyscalls will end up bubbling 581 * down into the kernel and calling the normal (heavy-weight) syscall handler. This is 582 * useful for performance testing, but conceivably could also come in handy for debugging 583 * purposes. 584 */ 585 586 static int nolwsys __initdata; 587 588 static int __init 589 nolwsys_setup (char *s) 590 { 591 nolwsys = 1; 592 return 1; 593 } 594 595 __setup("nolwsys", nolwsys_setup); 596 597 void __init 598 mem_init (void) 599 { 600 long reserved_pages, codesize, datasize, initsize; 601 pg_data_t *pgdat; 602 int i; 603 604 BUG_ON(PTRS_PER_PGD * sizeof(pgd_t) != PAGE_SIZE); 605 BUG_ON(PTRS_PER_PMD * sizeof(pmd_t) != PAGE_SIZE); 606 BUG_ON(PTRS_PER_PTE * sizeof(pte_t) != PAGE_SIZE); 607 608 #ifdef CONFIG_PCI 609 /* 610 * This needs to be called _after_ the command line has been parsed but _before_ 611 * any drivers that may need the PCI DMA interface are initialized or bootmem has 612 * been freed. 613 */ 614 platform_dma_init(); 615 #endif 616 617 #ifdef CONFIG_FLATMEM 618 BUG_ON(!mem_map); 619 max_mapnr = max_low_pfn; 620 #endif 621 622 high_memory = __va(max_low_pfn * PAGE_SIZE); 623 624 for_each_online_pgdat(pgdat) 625 if (pgdat->bdata->node_bootmem_map) 626 totalram_pages += free_all_bootmem_node(pgdat); 627 628 reserved_pages = 0; 629 efi_memmap_walk(count_reserved_pages, &reserved_pages); 630 631 codesize = (unsigned long) _etext - (unsigned long) _stext; 632 datasize = (unsigned long) _edata - (unsigned long) _etext; 633 initsize = (unsigned long) __init_end - (unsigned long) __init_begin; 634 635 printk(KERN_INFO "Memory: %luk/%luk available (%luk code, %luk reserved, " 636 "%luk data, %luk init)\n", nr_free_pages() << (PAGE_SHIFT - 10), 637 num_physpages << (PAGE_SHIFT - 10), codesize >> 10, 638 reserved_pages << (PAGE_SHIFT - 10), datasize >> 10, initsize >> 10); 639 640 641 /* 642 * For fsyscall entrpoints with no light-weight handler, use the ordinary 643 * (heavy-weight) handler, but mark it by setting bit 0, so the fsyscall entry 644 * code can tell them apart. 645 */ 646 for (i = 0; i < NR_syscalls; ++i) { 647 extern unsigned long sys_call_table[NR_syscalls]; 648 unsigned long *fsyscall_table = paravirt_get_fsyscall_table(); 649 650 if (!fsyscall_table[i] || nolwsys) 651 fsyscall_table[i] = sys_call_table[i] | 1; 652 } 653 setup_gate(); 654 } 655 656 #ifdef CONFIG_MEMORY_HOTPLUG 657 int arch_add_memory(int nid, u64 start, u64 size) 658 { 659 pg_data_t *pgdat; 660 struct zone *zone; 661 unsigned long start_pfn = start >> PAGE_SHIFT; 662 unsigned long nr_pages = size >> PAGE_SHIFT; 663 int ret; 664 665 pgdat = NODE_DATA(nid); 666 667 zone = pgdat->node_zones + ZONE_NORMAL; 668 ret = __add_pages(nid, zone, start_pfn, nr_pages); 669 670 if (ret) 671 printk("%s: Problem encountered in __add_pages() as ret=%d\n", 672 __func__, ret); 673 674 return ret; 675 } 676 677 #ifdef CONFIG_MEMORY_HOTREMOVE 678 int arch_remove_memory(u64 start, u64 size) 679 { 680 unsigned long start_pfn = start >> PAGE_SHIFT; 681 unsigned long nr_pages = size >> PAGE_SHIFT; 682 struct zone *zone; 683 int ret; 684 685 zone = page_zone(pfn_to_page(start_pfn)); 686 ret = __remove_pages(zone, start_pfn, nr_pages); 687 if (ret) 688 pr_warn("%s: Problem encountered in __remove_pages() as" 689 " ret=%d\n", __func__, ret); 690 691 return ret; 692 } 693 #endif 694 #endif 695 696 /* 697 * Even when CONFIG_IA32_SUPPORT is not enabled it is 698 * useful to have the Linux/x86 domain registered to 699 * avoid an attempted module load when emulators call 700 * personality(PER_LINUX32). This saves several milliseconds 701 * on each such call. 702 */ 703 static struct exec_domain ia32_exec_domain; 704 705 static int __init 706 per_linux32_init(void) 707 { 708 ia32_exec_domain.name = "Linux/x86"; 709 ia32_exec_domain.handler = NULL; 710 ia32_exec_domain.pers_low = PER_LINUX32; 711 ia32_exec_domain.pers_high = PER_LINUX32; 712 ia32_exec_domain.signal_map = default_exec_domain.signal_map; 713 ia32_exec_domain.signal_invmap = default_exec_domain.signal_invmap; 714 register_exec_domain(&ia32_exec_domain); 715 716 return 0; 717 } 718 719 __initcall(per_linux32_init); 720