xref: /openbmc/linux/arch/ia64/mm/contig.c (revision 7e60e389)
1 /*
2  * This file is subject to the terms and conditions of the GNU General Public
3  * License.  See the file "COPYING" in the main directory of this archive
4  * for more details.
5  *
6  * Copyright (C) 1998-2003 Hewlett-Packard Co
7  *	David Mosberger-Tang <davidm@hpl.hp.com>
8  *	Stephane Eranian <eranian@hpl.hp.com>
9  * Copyright (C) 2000, Rohit Seth <rohit.seth@intel.com>
10  * Copyright (C) 1999 VA Linux Systems
11  * Copyright (C) 1999 Walt Drummond <drummond@valinux.com>
12  * Copyright (C) 2003 Silicon Graphics, Inc. All rights reserved.
13  *
14  * Routines used by ia64 machines with contiguous (or virtually contiguous)
15  * memory.
16  */
17 #include <linux/efi.h>
18 #include <linux/memblock.h>
19 #include <linux/mm.h>
20 #include <linux/nmi.h>
21 #include <linux/swap.h>
22 #include <linux/sizes.h>
23 
24 #include <asm/meminit.h>
25 #include <asm/sections.h>
26 #include <asm/mca.h>
27 
28 /* physical address where the bootmem map is located */
29 unsigned long bootmap_start;
30 
31 #ifdef CONFIG_SMP
32 static void *cpu_data;
33 /**
34  * per_cpu_init - setup per-cpu variables
35  *
36  * Allocate and setup per-cpu data areas.
37  */
38 void *per_cpu_init(void)
39 {
40 	static bool first_time = true;
41 	void *cpu0_data = __cpu0_per_cpu;
42 	unsigned int cpu;
43 
44 	if (!first_time)
45 		goto skip;
46 	first_time = false;
47 
48 	/*
49 	 * get_free_pages() cannot be used before cpu_init() done.
50 	 * BSP allocates PERCPU_PAGE_SIZE bytes for all possible CPUs
51 	 * to avoid that AP calls get_zeroed_page().
52 	 */
53 	for_each_possible_cpu(cpu) {
54 		void *src = cpu == 0 ? cpu0_data : __phys_per_cpu_start;
55 
56 		memcpy(cpu_data, src, __per_cpu_end - __per_cpu_start);
57 		__per_cpu_offset[cpu] = (char *)cpu_data - __per_cpu_start;
58 		per_cpu(local_per_cpu_offset, cpu) = __per_cpu_offset[cpu];
59 
60 		/*
61 		 * percpu area for cpu0 is moved from the __init area
62 		 * which is setup by head.S and used till this point.
63 		 * Update ar.k3.  This move is ensures that percpu
64 		 * area for cpu0 is on the correct node and its
65 		 * virtual address isn't insanely far from other
66 		 * percpu areas which is important for congruent
67 		 * percpu allocator.
68 		 */
69 		if (cpu == 0)
70 			ia64_set_kr(IA64_KR_PER_CPU_DATA, __pa(cpu_data) -
71 				    (unsigned long)__per_cpu_start);
72 
73 		cpu_data += PERCPU_PAGE_SIZE;
74 	}
75 skip:
76 	return __per_cpu_start + __per_cpu_offset[smp_processor_id()];
77 }
78 
79 static inline void
80 alloc_per_cpu_data(void)
81 {
82 	size_t size = PERCPU_PAGE_SIZE * num_possible_cpus();
83 
84 	cpu_data = memblock_alloc_from(size, PERCPU_PAGE_SIZE,
85 				       __pa(MAX_DMA_ADDRESS));
86 	if (!cpu_data)
87 		panic("%s: Failed to allocate %lu bytes align=%lx from=%lx\n",
88 		      __func__, size, PERCPU_PAGE_SIZE, __pa(MAX_DMA_ADDRESS));
89 }
90 
91 /**
92  * setup_per_cpu_areas - setup percpu areas
93  *
94  * Arch code has already allocated and initialized percpu areas.  All
95  * this function has to do is to teach the determined layout to the
96  * dynamic percpu allocator, which happens to be more complex than
97  * creating whole new ones using helpers.
98  */
99 void __init
100 setup_per_cpu_areas(void)
101 {
102 	struct pcpu_alloc_info *ai;
103 	struct pcpu_group_info *gi;
104 	unsigned int cpu;
105 	ssize_t static_size, reserved_size, dyn_size;
106 
107 	ai = pcpu_alloc_alloc_info(1, num_possible_cpus());
108 	if (!ai)
109 		panic("failed to allocate pcpu_alloc_info");
110 	gi = &ai->groups[0];
111 
112 	/* units are assigned consecutively to possible cpus */
113 	for_each_possible_cpu(cpu)
114 		gi->cpu_map[gi->nr_units++] = cpu;
115 
116 	/* set parameters */
117 	static_size = __per_cpu_end - __per_cpu_start;
118 	reserved_size = PERCPU_MODULE_RESERVE;
119 	dyn_size = PERCPU_PAGE_SIZE - static_size - reserved_size;
120 	if (dyn_size < 0)
121 		panic("percpu area overflow static=%zd reserved=%zd\n",
122 		      static_size, reserved_size);
123 
124 	ai->static_size		= static_size;
125 	ai->reserved_size	= reserved_size;
126 	ai->dyn_size		= dyn_size;
127 	ai->unit_size		= PERCPU_PAGE_SIZE;
128 	ai->atom_size		= PAGE_SIZE;
129 	ai->alloc_size		= PERCPU_PAGE_SIZE;
130 
131 	pcpu_setup_first_chunk(ai, __per_cpu_start + __per_cpu_offset[0]);
132 	pcpu_free_alloc_info(ai);
133 }
134 #else
135 #define alloc_per_cpu_data() do { } while (0)
136 #endif /* CONFIG_SMP */
137 
138 /**
139  * find_memory - setup memory map
140  *
141  * Walk the EFI memory map and find usable memory for the system, taking
142  * into account reserved areas.
143  */
144 void __init
145 find_memory (void)
146 {
147 	reserve_memory();
148 
149 	/* first find highest page frame number */
150 	min_low_pfn = ~0UL;
151 	max_low_pfn = 0;
152 	efi_memmap_walk(find_max_min_low_pfn, NULL);
153 	max_pfn = max_low_pfn;
154 
155 #ifdef CONFIG_VIRTUAL_MEM_MAP
156 	efi_memmap_walk(filter_memory, register_active_ranges);
157 #else
158 	memblock_add_node(0, PFN_PHYS(max_low_pfn), 0);
159 #endif
160 
161 	find_initrd();
162 
163 	alloc_per_cpu_data();
164 }
165 
166 static int __init find_largest_hole(u64 start, u64 end, void *arg)
167 {
168 	u64 *max_gap = arg;
169 
170 	static u64 last_end = PAGE_OFFSET;
171 
172 	/* NOTE: this algorithm assumes efi memmap table is ordered */
173 
174 	if (*max_gap < (start - last_end))
175 		*max_gap = start - last_end;
176 	last_end = end;
177 	return 0;
178 }
179 
180 static void __init verify_gap_absence(void)
181 {
182 	unsigned long max_gap;
183 
184 	/* Forbid FLATMEM if hole is > than 1G */
185 	efi_memmap_walk(find_largest_hole, (u64 *)&max_gap);
186 	if (max_gap >= SZ_1G)
187 		panic("Cannot use FLATMEM with %ldMB hole\n"
188 		      "Please switch over to SPARSEMEM\n",
189 		      (max_gap >> 20));
190 }
191 
192 /*
193  * Set up the page tables.
194  */
195 
196 void __init
197 paging_init (void)
198 {
199 	unsigned long max_dma;
200 	unsigned long max_zone_pfns[MAX_NR_ZONES];
201 
202 	memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
203 	max_dma = virt_to_phys((void *) MAX_DMA_ADDRESS) >> PAGE_SHIFT;
204 	max_zone_pfns[ZONE_DMA32] = max_dma;
205 	max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
206 
207 	verify_gap_absence();
208 
209 	free_area_init(max_zone_pfns);
210 	zero_page_memmap_ptr = virt_to_page(ia64_imva(empty_zero_page));
211 }
212