xref: /openbmc/linux/arch/ia64/mm/init.c (revision 95e9fd10)
1 /*
2  * Initialize MMU support.
3  *
4  * Copyright (C) 1998-2003 Hewlett-Packard Co
5  *	David Mosberger-Tang <davidm@hpl.hp.com>
6  */
7 #include <linux/kernel.h>
8 #include <linux/init.h>
9 
10 #include <linux/bootmem.h>
11 #include <linux/efi.h>
12 #include <linux/elf.h>
13 #include <linux/memblock.h>
14 #include <linux/mm.h>
15 #include <linux/mmzone.h>
16 #include <linux/module.h>
17 #include <linux/personality.h>
18 #include <linux/reboot.h>
19 #include <linux/slab.h>
20 #include <linux/swap.h>
21 #include <linux/proc_fs.h>
22 #include <linux/bitops.h>
23 #include <linux/kexec.h>
24 
25 #include <asm/dma.h>
26 #include <asm/io.h>
27 #include <asm/machvec.h>
28 #include <asm/numa.h>
29 #include <asm/patch.h>
30 #include <asm/pgalloc.h>
31 #include <asm/sal.h>
32 #include <asm/sections.h>
33 #include <asm/tlb.h>
34 #include <asm/uaccess.h>
35 #include <asm/unistd.h>
36 #include <asm/mca.h>
37 #include <asm/paravirt.h>
38 
39 extern void ia64_tlb_init (void);
40 
41 unsigned long MAX_DMA_ADDRESS = PAGE_OFFSET + 0x100000000UL;
42 
43 #ifdef CONFIG_VIRTUAL_MEM_MAP
44 unsigned long VMALLOC_END = VMALLOC_END_INIT;
45 EXPORT_SYMBOL(VMALLOC_END);
46 struct page *vmem_map;
47 EXPORT_SYMBOL(vmem_map);
48 #endif
49 
50 struct page *zero_page_memmap_ptr;	/* map entry for zero page */
51 EXPORT_SYMBOL(zero_page_memmap_ptr);
52 
53 void
54 __ia64_sync_icache_dcache (pte_t pte)
55 {
56 	unsigned long addr;
57 	struct page *page;
58 
59 	page = pte_page(pte);
60 	addr = (unsigned long) page_address(page);
61 
62 	if (test_bit(PG_arch_1, &page->flags))
63 		return;				/* i-cache is already coherent with d-cache */
64 
65 	flush_icache_range(addr, addr + (PAGE_SIZE << compound_order(page)));
66 	set_bit(PG_arch_1, &page->flags);	/* mark page as clean */
67 }
68 
69 /*
70  * Since DMA is i-cache coherent, any (complete) pages that were written via
71  * DMA can be marked as "clean" so that lazy_mmu_prot_update() doesn't have to
72  * flush them when they get mapped into an executable vm-area.
73  */
74 void
75 dma_mark_clean(void *addr, size_t size)
76 {
77 	unsigned long pg_addr, end;
78 
79 	pg_addr = PAGE_ALIGN((unsigned long) addr);
80 	end = (unsigned long) addr + size;
81 	while (pg_addr + PAGE_SIZE <= end) {
82 		struct page *page = virt_to_page(pg_addr);
83 		set_bit(PG_arch_1, &page->flags);
84 		pg_addr += PAGE_SIZE;
85 	}
86 }
87 
88 inline void
89 ia64_set_rbs_bot (void)
90 {
91 	unsigned long stack_size = rlimit_max(RLIMIT_STACK) & -16;
92 
93 	if (stack_size > MAX_USER_STACK_SIZE)
94 		stack_size = MAX_USER_STACK_SIZE;
95 	current->thread.rbs_bot = PAGE_ALIGN(current->mm->start_stack - stack_size);
96 }
97 
98 /*
99  * This performs some platform-dependent address space initialization.
100  * On IA-64, we want to setup the VM area for the register backing
101  * store (which grows upwards) and install the gateway page which is
102  * used for signal trampolines, etc.
103  */
104 void
105 ia64_init_addr_space (void)
106 {
107 	struct vm_area_struct *vma;
108 
109 	ia64_set_rbs_bot();
110 
111 	/*
112 	 * If we're out of memory and kmem_cache_alloc() returns NULL, we simply ignore
113 	 * the problem.  When the process attempts to write to the register backing store
114 	 * for the first time, it will get a SEGFAULT in this case.
115 	 */
116 	vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
117 	if (vma) {
118 		INIT_LIST_HEAD(&vma->anon_vma_chain);
119 		vma->vm_mm = current->mm;
120 		vma->vm_start = current->thread.rbs_bot & PAGE_MASK;
121 		vma->vm_end = vma->vm_start + PAGE_SIZE;
122 		vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT;
123 		vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
124 		down_write(&current->mm->mmap_sem);
125 		if (insert_vm_struct(current->mm, vma)) {
126 			up_write(&current->mm->mmap_sem);
127 			kmem_cache_free(vm_area_cachep, vma);
128 			return;
129 		}
130 		up_write(&current->mm->mmap_sem);
131 	}
132 
133 	/* map NaT-page at address zero to speed up speculative dereferencing of NULL: */
134 	if (!(current->personality & MMAP_PAGE_ZERO)) {
135 		vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
136 		if (vma) {
137 			INIT_LIST_HEAD(&vma->anon_vma_chain);
138 			vma->vm_mm = current->mm;
139 			vma->vm_end = PAGE_SIZE;
140 			vma->vm_page_prot = __pgprot(pgprot_val(PAGE_READONLY) | _PAGE_MA_NAT);
141 			vma->vm_flags = VM_READ | VM_MAYREAD | VM_IO | VM_RESERVED;
142 			down_write(&current->mm->mmap_sem);
143 			if (insert_vm_struct(current->mm, vma)) {
144 				up_write(&current->mm->mmap_sem);
145 				kmem_cache_free(vm_area_cachep, vma);
146 				return;
147 			}
148 			up_write(&current->mm->mmap_sem);
149 		}
150 	}
151 }
152 
153 void
154 free_initmem (void)
155 {
156 	unsigned long addr, eaddr;
157 
158 	addr = (unsigned long) ia64_imva(__init_begin);
159 	eaddr = (unsigned long) ia64_imva(__init_end);
160 	while (addr < eaddr) {
161 		ClearPageReserved(virt_to_page(addr));
162 		init_page_count(virt_to_page(addr));
163 		free_page(addr);
164 		++totalram_pages;
165 		addr += PAGE_SIZE;
166 	}
167 	printk(KERN_INFO "Freeing unused kernel memory: %ldkB freed\n",
168 	       (__init_end - __init_begin) >> 10);
169 }
170 
171 void __init
172 free_initrd_mem (unsigned long start, unsigned long end)
173 {
174 	struct page *page;
175 	/*
176 	 * EFI uses 4KB pages while the kernel can use 4KB or bigger.
177 	 * Thus EFI and the kernel may have different page sizes. It is
178 	 * therefore possible to have the initrd share the same page as
179 	 * the end of the kernel (given current setup).
180 	 *
181 	 * To avoid freeing/using the wrong page (kernel sized) we:
182 	 *	- align up the beginning of initrd
183 	 *	- align down the end of initrd
184 	 *
185 	 *  |             |
186 	 *  |=============| a000
187 	 *  |             |
188 	 *  |             |
189 	 *  |             | 9000
190 	 *  |/////////////|
191 	 *  |/////////////|
192 	 *  |=============| 8000
193 	 *  |///INITRD////|
194 	 *  |/////////////|
195 	 *  |/////////////| 7000
196 	 *  |             |
197 	 *  |KKKKKKKKKKKKK|
198 	 *  |=============| 6000
199 	 *  |KKKKKKKKKKKKK|
200 	 *  |KKKKKKKKKKKKK|
201 	 *  K=kernel using 8KB pages
202 	 *
203 	 * In this example, we must free page 8000 ONLY. So we must align up
204 	 * initrd_start and keep initrd_end as is.
205 	 */
206 	start = PAGE_ALIGN(start);
207 	end = end & PAGE_MASK;
208 
209 	if (start < end)
210 		printk(KERN_INFO "Freeing initrd memory: %ldkB freed\n", (end - start) >> 10);
211 
212 	for (; start < end; start += PAGE_SIZE) {
213 		if (!virt_addr_valid(start))
214 			continue;
215 		page = virt_to_page(start);
216 		ClearPageReserved(page);
217 		init_page_count(page);
218 		free_page(start);
219 		++totalram_pages;
220 	}
221 }
222 
223 /*
224  * This installs a clean page in the kernel's page table.
225  */
226 static struct page * __init
227 put_kernel_page (struct page *page, unsigned long address, pgprot_t pgprot)
228 {
229 	pgd_t *pgd;
230 	pud_t *pud;
231 	pmd_t *pmd;
232 	pte_t *pte;
233 
234 	if (!PageReserved(page))
235 		printk(KERN_ERR "put_kernel_page: page at 0x%p not in reserved memory\n",
236 		       page_address(page));
237 
238 	pgd = pgd_offset_k(address);		/* note: this is NOT pgd_offset()! */
239 
240 	{
241 		pud = pud_alloc(&init_mm, pgd, address);
242 		if (!pud)
243 			goto out;
244 		pmd = pmd_alloc(&init_mm, pud, address);
245 		if (!pmd)
246 			goto out;
247 		pte = pte_alloc_kernel(pmd, address);
248 		if (!pte)
249 			goto out;
250 		if (!pte_none(*pte))
251 			goto out;
252 		set_pte(pte, mk_pte(page, pgprot));
253 	}
254   out:
255 	/* no need for flush_tlb */
256 	return page;
257 }
258 
259 static void __init
260 setup_gate (void)
261 {
262 	void *gate_section;
263 	struct page *page;
264 
265 	/*
266 	 * Map the gate page twice: once read-only to export the ELF
267 	 * headers etc. and once execute-only page to enable
268 	 * privilege-promotion via "epc":
269 	 */
270 	gate_section = paravirt_get_gate_section();
271 	page = virt_to_page(ia64_imva(gate_section));
272 	put_kernel_page(page, GATE_ADDR, PAGE_READONLY);
273 #ifdef HAVE_BUGGY_SEGREL
274 	page = virt_to_page(ia64_imva(gate_section + PAGE_SIZE));
275 	put_kernel_page(page, GATE_ADDR + PAGE_SIZE, PAGE_GATE);
276 #else
277 	put_kernel_page(page, GATE_ADDR + PERCPU_PAGE_SIZE, PAGE_GATE);
278 	/* Fill in the holes (if any) with read-only zero pages: */
279 	{
280 		unsigned long addr;
281 
282 		for (addr = GATE_ADDR + PAGE_SIZE;
283 		     addr < GATE_ADDR + PERCPU_PAGE_SIZE;
284 		     addr += PAGE_SIZE)
285 		{
286 			put_kernel_page(ZERO_PAGE(0), addr,
287 					PAGE_READONLY);
288 			put_kernel_page(ZERO_PAGE(0), addr + PERCPU_PAGE_SIZE,
289 					PAGE_READONLY);
290 		}
291 	}
292 #endif
293 	ia64_patch_gate();
294 }
295 
296 void __devinit
297 ia64_mmu_init (void *my_cpu_data)
298 {
299 	unsigned long pta, impl_va_bits;
300 	extern void __devinit tlb_init (void);
301 
302 #ifdef CONFIG_DISABLE_VHPT
303 #	define VHPT_ENABLE_BIT	0
304 #else
305 #	define VHPT_ENABLE_BIT	1
306 #endif
307 
308 	/*
309 	 * Check if the virtually mapped linear page table (VMLPT) overlaps with a mapped
310 	 * address space.  The IA-64 architecture guarantees that at least 50 bits of
311 	 * virtual address space are implemented but if we pick a large enough page size
312 	 * (e.g., 64KB), the mapped address space is big enough that it will overlap with
313 	 * VMLPT.  I assume that once we run on machines big enough to warrant 64KB pages,
314 	 * IMPL_VA_MSB will be significantly bigger, so this is unlikely to become a
315 	 * problem in practice.  Alternatively, we could truncate the top of the mapped
316 	 * address space to not permit mappings that would overlap with the VMLPT.
317 	 * --davidm 00/12/06
318 	 */
319 #	define pte_bits			3
320 #	define mapped_space_bits	(3*(PAGE_SHIFT - pte_bits) + PAGE_SHIFT)
321 	/*
322 	 * The virtual page table has to cover the entire implemented address space within
323 	 * a region even though not all of this space may be mappable.  The reason for
324 	 * this is that the Access bit and Dirty bit fault handlers perform
325 	 * non-speculative accesses to the virtual page table, so the address range of the
326 	 * virtual page table itself needs to be covered by virtual page table.
327 	 */
328 #	define vmlpt_bits		(impl_va_bits - PAGE_SHIFT + pte_bits)
329 #	define POW2(n)			(1ULL << (n))
330 
331 	impl_va_bits = ffz(~(local_cpu_data->unimpl_va_mask | (7UL << 61)));
332 
333 	if (impl_va_bits < 51 || impl_va_bits > 61)
334 		panic("CPU has bogus IMPL_VA_MSB value of %lu!\n", impl_va_bits - 1);
335 	/*
336 	 * mapped_space_bits - PAGE_SHIFT is the total number of ptes we need,
337 	 * which must fit into "vmlpt_bits - pte_bits" slots. Second half of
338 	 * the test makes sure that our mapped space doesn't overlap the
339 	 * unimplemented hole in the middle of the region.
340 	 */
341 	if ((mapped_space_bits - PAGE_SHIFT > vmlpt_bits - pte_bits) ||
342 	    (mapped_space_bits > impl_va_bits - 1))
343 		panic("Cannot build a big enough virtual-linear page table"
344 		      " to cover mapped address space.\n"
345 		      " Try using a smaller page size.\n");
346 
347 
348 	/* place the VMLPT at the end of each page-table mapped region: */
349 	pta = POW2(61) - POW2(vmlpt_bits);
350 
351 	/*
352 	 * Set the (virtually mapped linear) page table address.  Bit
353 	 * 8 selects between the short and long format, bits 2-7 the
354 	 * size of the table, and bit 0 whether the VHPT walker is
355 	 * enabled.
356 	 */
357 	ia64_set_pta(pta | (0 << 8) | (vmlpt_bits << 2) | VHPT_ENABLE_BIT);
358 
359 	ia64_tlb_init();
360 
361 #ifdef	CONFIG_HUGETLB_PAGE
362 	ia64_set_rr(HPAGE_REGION_BASE, HPAGE_SHIFT << 2);
363 	ia64_srlz_d();
364 #endif
365 }
366 
367 #ifdef CONFIG_VIRTUAL_MEM_MAP
368 int vmemmap_find_next_valid_pfn(int node, int i)
369 {
370 	unsigned long end_address, hole_next_pfn;
371 	unsigned long stop_address;
372 	pg_data_t *pgdat = NODE_DATA(node);
373 
374 	end_address = (unsigned long) &vmem_map[pgdat->node_start_pfn + i];
375 	end_address = PAGE_ALIGN(end_address);
376 
377 	stop_address = (unsigned long) &vmem_map[
378 		pgdat->node_start_pfn + pgdat->node_spanned_pages];
379 
380 	do {
381 		pgd_t *pgd;
382 		pud_t *pud;
383 		pmd_t *pmd;
384 		pte_t *pte;
385 
386 		pgd = pgd_offset_k(end_address);
387 		if (pgd_none(*pgd)) {
388 			end_address += PGDIR_SIZE;
389 			continue;
390 		}
391 
392 		pud = pud_offset(pgd, end_address);
393 		if (pud_none(*pud)) {
394 			end_address += PUD_SIZE;
395 			continue;
396 		}
397 
398 		pmd = pmd_offset(pud, end_address);
399 		if (pmd_none(*pmd)) {
400 			end_address += PMD_SIZE;
401 			continue;
402 		}
403 
404 		pte = pte_offset_kernel(pmd, end_address);
405 retry_pte:
406 		if (pte_none(*pte)) {
407 			end_address += PAGE_SIZE;
408 			pte++;
409 			if ((end_address < stop_address) &&
410 			    (end_address != ALIGN(end_address, 1UL << PMD_SHIFT)))
411 				goto retry_pte;
412 			continue;
413 		}
414 		/* Found next valid vmem_map page */
415 		break;
416 	} while (end_address < stop_address);
417 
418 	end_address = min(end_address, stop_address);
419 	end_address = end_address - (unsigned long) vmem_map + sizeof(struct page) - 1;
420 	hole_next_pfn = end_address / sizeof(struct page);
421 	return hole_next_pfn - pgdat->node_start_pfn;
422 }
423 
424 int __init create_mem_map_page_table(u64 start, u64 end, void *arg)
425 {
426 	unsigned long address, start_page, end_page;
427 	struct page *map_start, *map_end;
428 	int node;
429 	pgd_t *pgd;
430 	pud_t *pud;
431 	pmd_t *pmd;
432 	pte_t *pte;
433 
434 	map_start = vmem_map + (__pa(start) >> PAGE_SHIFT);
435 	map_end   = vmem_map + (__pa(end) >> PAGE_SHIFT);
436 
437 	start_page = (unsigned long) map_start & PAGE_MASK;
438 	end_page = PAGE_ALIGN((unsigned long) map_end);
439 	node = paddr_to_nid(__pa(start));
440 
441 	for (address = start_page; address < end_page; address += PAGE_SIZE) {
442 		pgd = pgd_offset_k(address);
443 		if (pgd_none(*pgd))
444 			pgd_populate(&init_mm, pgd, alloc_bootmem_pages_node(NODE_DATA(node), PAGE_SIZE));
445 		pud = pud_offset(pgd, address);
446 
447 		if (pud_none(*pud))
448 			pud_populate(&init_mm, pud, alloc_bootmem_pages_node(NODE_DATA(node), PAGE_SIZE));
449 		pmd = pmd_offset(pud, address);
450 
451 		if (pmd_none(*pmd))
452 			pmd_populate_kernel(&init_mm, pmd, alloc_bootmem_pages_node(NODE_DATA(node), PAGE_SIZE));
453 		pte = pte_offset_kernel(pmd, address);
454 
455 		if (pte_none(*pte))
456 			set_pte(pte, pfn_pte(__pa(alloc_bootmem_pages_node(NODE_DATA(node), PAGE_SIZE)) >> PAGE_SHIFT,
457 					     PAGE_KERNEL));
458 	}
459 	return 0;
460 }
461 
462 struct memmap_init_callback_data {
463 	struct page *start;
464 	struct page *end;
465 	int nid;
466 	unsigned long zone;
467 };
468 
469 static int __meminit
470 virtual_memmap_init(u64 start, u64 end, void *arg)
471 {
472 	struct memmap_init_callback_data *args;
473 	struct page *map_start, *map_end;
474 
475 	args = (struct memmap_init_callback_data *) arg;
476 	map_start = vmem_map + (__pa(start) >> PAGE_SHIFT);
477 	map_end   = vmem_map + (__pa(end) >> PAGE_SHIFT);
478 
479 	if (map_start < args->start)
480 		map_start = args->start;
481 	if (map_end > args->end)
482 		map_end = args->end;
483 
484 	/*
485 	 * We have to initialize "out of bounds" struct page elements that fit completely
486 	 * on the same pages that were allocated for the "in bounds" elements because they
487 	 * may be referenced later (and found to be "reserved").
488 	 */
489 	map_start -= ((unsigned long) map_start & (PAGE_SIZE - 1)) / sizeof(struct page);
490 	map_end += ((PAGE_ALIGN((unsigned long) map_end) - (unsigned long) map_end)
491 		    / sizeof(struct page));
492 
493 	if (map_start < map_end)
494 		memmap_init_zone((unsigned long)(map_end - map_start),
495 				 args->nid, args->zone, page_to_pfn(map_start),
496 				 MEMMAP_EARLY);
497 	return 0;
498 }
499 
500 void __meminit
501 memmap_init (unsigned long size, int nid, unsigned long zone,
502 	     unsigned long start_pfn)
503 {
504 	if (!vmem_map)
505 		memmap_init_zone(size, nid, zone, start_pfn, MEMMAP_EARLY);
506 	else {
507 		struct page *start;
508 		struct memmap_init_callback_data args;
509 
510 		start = pfn_to_page(start_pfn);
511 		args.start = start;
512 		args.end = start + size;
513 		args.nid = nid;
514 		args.zone = zone;
515 
516 		efi_memmap_walk(virtual_memmap_init, &args);
517 	}
518 }
519 
520 int
521 ia64_pfn_valid (unsigned long pfn)
522 {
523 	char byte;
524 	struct page *pg = pfn_to_page(pfn);
525 
526 	return     (__get_user(byte, (char __user *) pg) == 0)
527 		&& ((((u64)pg & PAGE_MASK) == (((u64)(pg + 1) - 1) & PAGE_MASK))
528 			|| (__get_user(byte, (char __user *) (pg + 1) - 1) == 0));
529 }
530 EXPORT_SYMBOL(ia64_pfn_valid);
531 
532 int __init find_largest_hole(u64 start, u64 end, void *arg)
533 {
534 	u64 *max_gap = arg;
535 
536 	static u64 last_end = PAGE_OFFSET;
537 
538 	/* NOTE: this algorithm assumes efi memmap table is ordered */
539 
540 	if (*max_gap < (start - last_end))
541 		*max_gap = start - last_end;
542 	last_end = end;
543 	return 0;
544 }
545 
546 #endif /* CONFIG_VIRTUAL_MEM_MAP */
547 
548 int __init register_active_ranges(u64 start, u64 len, int nid)
549 {
550 	u64 end = start + len;
551 
552 #ifdef CONFIG_KEXEC
553 	if (start > crashk_res.start && start < crashk_res.end)
554 		start = crashk_res.end;
555 	if (end > crashk_res.start && end < crashk_res.end)
556 		end = crashk_res.start;
557 #endif
558 
559 	if (start < end)
560 		memblock_add_node(__pa(start), end - start, nid);
561 	return 0;
562 }
563 
564 static int __init
565 count_reserved_pages(u64 start, u64 end, void *arg)
566 {
567 	unsigned long num_reserved = 0;
568 	unsigned long *count = arg;
569 
570 	for (; start < end; start += PAGE_SIZE)
571 		if (PageReserved(virt_to_page(start)))
572 			++num_reserved;
573 	*count += num_reserved;
574 	return 0;
575 }
576 
577 int
578 find_max_min_low_pfn (u64 start, u64 end, void *arg)
579 {
580 	unsigned long pfn_start, pfn_end;
581 #ifdef CONFIG_FLATMEM
582 	pfn_start = (PAGE_ALIGN(__pa(start))) >> PAGE_SHIFT;
583 	pfn_end = (PAGE_ALIGN(__pa(end - 1))) >> PAGE_SHIFT;
584 #else
585 	pfn_start = GRANULEROUNDDOWN(__pa(start)) >> PAGE_SHIFT;
586 	pfn_end = GRANULEROUNDUP(__pa(end - 1)) >> PAGE_SHIFT;
587 #endif
588 	min_low_pfn = min(min_low_pfn, pfn_start);
589 	max_low_pfn = max(max_low_pfn, pfn_end);
590 	return 0;
591 }
592 
593 /*
594  * Boot command-line option "nolwsys" can be used to disable the use of any light-weight
595  * system call handler.  When this option is in effect, all fsyscalls will end up bubbling
596  * down into the kernel and calling the normal (heavy-weight) syscall handler.  This is
597  * useful for performance testing, but conceivably could also come in handy for debugging
598  * purposes.
599  */
600 
601 static int nolwsys __initdata;
602 
603 static int __init
604 nolwsys_setup (char *s)
605 {
606 	nolwsys = 1;
607 	return 1;
608 }
609 
610 __setup("nolwsys", nolwsys_setup);
611 
612 void __init
613 mem_init (void)
614 {
615 	long reserved_pages, codesize, datasize, initsize;
616 	pg_data_t *pgdat;
617 	int i;
618 
619 	BUG_ON(PTRS_PER_PGD * sizeof(pgd_t) != PAGE_SIZE);
620 	BUG_ON(PTRS_PER_PMD * sizeof(pmd_t) != PAGE_SIZE);
621 	BUG_ON(PTRS_PER_PTE * sizeof(pte_t) != PAGE_SIZE);
622 
623 #ifdef CONFIG_PCI
624 	/*
625 	 * This needs to be called _after_ the command line has been parsed but _before_
626 	 * any drivers that may need the PCI DMA interface are initialized or bootmem has
627 	 * been freed.
628 	 */
629 	platform_dma_init();
630 #endif
631 
632 #ifdef CONFIG_FLATMEM
633 	BUG_ON(!mem_map);
634 	max_mapnr = max_low_pfn;
635 #endif
636 
637 	high_memory = __va(max_low_pfn * PAGE_SIZE);
638 
639 	for_each_online_pgdat(pgdat)
640 		if (pgdat->bdata->node_bootmem_map)
641 			totalram_pages += free_all_bootmem_node(pgdat);
642 
643 	reserved_pages = 0;
644 	efi_memmap_walk(count_reserved_pages, &reserved_pages);
645 
646 	codesize =  (unsigned long) _etext - (unsigned long) _stext;
647 	datasize =  (unsigned long) _edata - (unsigned long) _etext;
648 	initsize =  (unsigned long) __init_end - (unsigned long) __init_begin;
649 
650 	printk(KERN_INFO "Memory: %luk/%luk available (%luk code, %luk reserved, "
651 	       "%luk data, %luk init)\n", nr_free_pages() << (PAGE_SHIFT - 10),
652 	       num_physpages << (PAGE_SHIFT - 10), codesize >> 10,
653 	       reserved_pages << (PAGE_SHIFT - 10), datasize >> 10, initsize >> 10);
654 
655 
656 	/*
657 	 * For fsyscall entrpoints with no light-weight handler, use the ordinary
658 	 * (heavy-weight) handler, but mark it by setting bit 0, so the fsyscall entry
659 	 * code can tell them apart.
660 	 */
661 	for (i = 0; i < NR_syscalls; ++i) {
662 		extern unsigned long sys_call_table[NR_syscalls];
663 		unsigned long *fsyscall_table = paravirt_get_fsyscall_table();
664 
665 		if (!fsyscall_table[i] || nolwsys)
666 			fsyscall_table[i] = sys_call_table[i] | 1;
667 	}
668 	setup_gate();
669 }
670 
671 #ifdef CONFIG_MEMORY_HOTPLUG
672 int arch_add_memory(int nid, u64 start, u64 size)
673 {
674 	pg_data_t *pgdat;
675 	struct zone *zone;
676 	unsigned long start_pfn = start >> PAGE_SHIFT;
677 	unsigned long nr_pages = size >> PAGE_SHIFT;
678 	int ret;
679 
680 	pgdat = NODE_DATA(nid);
681 
682 	zone = pgdat->node_zones + ZONE_NORMAL;
683 	ret = __add_pages(nid, zone, start_pfn, nr_pages);
684 
685 	if (ret)
686 		printk("%s: Problem encountered in __add_pages() as ret=%d\n",
687 		       __func__,  ret);
688 
689 	return ret;
690 }
691 #endif
692 
693 /*
694  * Even when CONFIG_IA32_SUPPORT is not enabled it is
695  * useful to have the Linux/x86 domain registered to
696  * avoid an attempted module load when emulators call
697  * personality(PER_LINUX32). This saves several milliseconds
698  * on each such call.
699  */
700 static struct exec_domain ia32_exec_domain;
701 
702 static int __init
703 per_linux32_init(void)
704 {
705 	ia32_exec_domain.name = "Linux/x86";
706 	ia32_exec_domain.handler = NULL;
707 	ia32_exec_domain.pers_low = PER_LINUX32;
708 	ia32_exec_domain.pers_high = PER_LINUX32;
709 	ia32_exec_domain.signal_map = default_exec_domain.signal_map;
710 	ia32_exec_domain.signal_invmap = default_exec_domain.signal_invmap;
711 	register_exec_domain(&ia32_exec_domain);
712 
713 	return 0;
714 }
715 
716 __initcall(per_linux32_init);
717