xref: /openbmc/linux/arch/ia64/mm/contig.c (revision e553d2a5)
1 /*
2  * This file is subject to the terms and conditions of the GNU General Public
3  * License.  See the file "COPYING" in the main directory of this archive
4  * for more details.
5  *
6  * Copyright (C) 1998-2003 Hewlett-Packard Co
7  *	David Mosberger-Tang <davidm@hpl.hp.com>
8  *	Stephane Eranian <eranian@hpl.hp.com>
9  * Copyright (C) 2000, Rohit Seth <rohit.seth@intel.com>
10  * Copyright (C) 1999 VA Linux Systems
11  * Copyright (C) 1999 Walt Drummond <drummond@valinux.com>
12  * Copyright (C) 2003 Silicon Graphics, Inc. All rights reserved.
13  *
14  * Routines used by ia64 machines with contiguous (or virtually contiguous)
15  * memory.
16  */
17 #include <linux/efi.h>
18 #include <linux/memblock.h>
19 #include <linux/mm.h>
20 #include <linux/nmi.h>
21 #include <linux/swap.h>
22 
23 #include <asm/meminit.h>
24 #include <asm/pgalloc.h>
25 #include <asm/pgtable.h>
26 #include <asm/sections.h>
27 #include <asm/mca.h>
28 
29 #ifdef CONFIG_VIRTUAL_MEM_MAP
30 static unsigned long max_gap;
31 #endif
32 
33 /* physical address where the bootmem map is located */
34 unsigned long bootmap_start;
35 
36 #ifdef CONFIG_SMP
37 static void *cpu_data;
38 /**
39  * per_cpu_init - setup per-cpu variables
40  *
41  * Allocate and setup per-cpu data areas.
42  */
43 void *per_cpu_init(void)
44 {
45 	static bool first_time = true;
46 	void *cpu0_data = __cpu0_per_cpu;
47 	unsigned int cpu;
48 
49 	if (!first_time)
50 		goto skip;
51 	first_time = false;
52 
53 	/*
54 	 * get_free_pages() cannot be used before cpu_init() done.
55 	 * BSP allocates PERCPU_PAGE_SIZE bytes for all possible CPUs
56 	 * to avoid that AP calls get_zeroed_page().
57 	 */
58 	for_each_possible_cpu(cpu) {
59 		void *src = cpu == 0 ? cpu0_data : __phys_per_cpu_start;
60 
61 		memcpy(cpu_data, src, __per_cpu_end - __per_cpu_start);
62 		__per_cpu_offset[cpu] = (char *)cpu_data - __per_cpu_start;
63 		per_cpu(local_per_cpu_offset, cpu) = __per_cpu_offset[cpu];
64 
65 		/*
66 		 * percpu area for cpu0 is moved from the __init area
67 		 * which is setup by head.S and used till this point.
68 		 * Update ar.k3.  This move is ensures that percpu
69 		 * area for cpu0 is on the correct node and its
70 		 * virtual address isn't insanely far from other
71 		 * percpu areas which is important for congruent
72 		 * percpu allocator.
73 		 */
74 		if (cpu == 0)
75 			ia64_set_kr(IA64_KR_PER_CPU_DATA, __pa(cpu_data) -
76 				    (unsigned long)__per_cpu_start);
77 
78 		cpu_data += PERCPU_PAGE_SIZE;
79 	}
80 skip:
81 	return __per_cpu_start + __per_cpu_offset[smp_processor_id()];
82 }
83 
84 static inline void
85 alloc_per_cpu_data(void)
86 {
87 	size_t size = PERCPU_PAGE_SIZE * num_possible_cpus();
88 
89 	cpu_data = memblock_alloc_from(size, PERCPU_PAGE_SIZE,
90 				       __pa(MAX_DMA_ADDRESS));
91 	if (!cpu_data)
92 		panic("%s: Failed to allocate %lu bytes align=%lx from=%lx\n",
93 		      __func__, size, PERCPU_PAGE_SIZE, __pa(MAX_DMA_ADDRESS));
94 }
95 
96 /**
97  * setup_per_cpu_areas - setup percpu areas
98  *
99  * Arch code has already allocated and initialized percpu areas.  All
100  * this function has to do is to teach the determined layout to the
101  * dynamic percpu allocator, which happens to be more complex than
102  * creating whole new ones using helpers.
103  */
104 void __init
105 setup_per_cpu_areas(void)
106 {
107 	struct pcpu_alloc_info *ai;
108 	struct pcpu_group_info *gi;
109 	unsigned int cpu;
110 	ssize_t static_size, reserved_size, dyn_size;
111 	int rc;
112 
113 	ai = pcpu_alloc_alloc_info(1, num_possible_cpus());
114 	if (!ai)
115 		panic("failed to allocate pcpu_alloc_info");
116 	gi = &ai->groups[0];
117 
118 	/* units are assigned consecutively to possible cpus */
119 	for_each_possible_cpu(cpu)
120 		gi->cpu_map[gi->nr_units++] = cpu;
121 
122 	/* set parameters */
123 	static_size = __per_cpu_end - __per_cpu_start;
124 	reserved_size = PERCPU_MODULE_RESERVE;
125 	dyn_size = PERCPU_PAGE_SIZE - static_size - reserved_size;
126 	if (dyn_size < 0)
127 		panic("percpu area overflow static=%zd reserved=%zd\n",
128 		      static_size, reserved_size);
129 
130 	ai->static_size		= static_size;
131 	ai->reserved_size	= reserved_size;
132 	ai->dyn_size		= dyn_size;
133 	ai->unit_size		= PERCPU_PAGE_SIZE;
134 	ai->atom_size		= PAGE_SIZE;
135 	ai->alloc_size		= PERCPU_PAGE_SIZE;
136 
137 	pcpu_setup_first_chunk(ai, __per_cpu_start + __per_cpu_offset[0]);
138 	pcpu_free_alloc_info(ai);
139 }
140 #else
141 #define alloc_per_cpu_data() do { } while (0)
142 #endif /* CONFIG_SMP */
143 
144 /**
145  * find_memory - setup memory map
146  *
147  * Walk the EFI memory map and find usable memory for the system, taking
148  * into account reserved areas.
149  */
150 void __init
151 find_memory (void)
152 {
153 	reserve_memory();
154 
155 	/* first find highest page frame number */
156 	min_low_pfn = ~0UL;
157 	max_low_pfn = 0;
158 	efi_memmap_walk(find_max_min_low_pfn, NULL);
159 	max_pfn = max_low_pfn;
160 
161 #ifdef CONFIG_VIRTUAL_MEM_MAP
162 	efi_memmap_walk(filter_memory, register_active_ranges);
163 #else
164 	memblock_add_node(0, PFN_PHYS(max_low_pfn), 0);
165 #endif
166 
167 	find_initrd();
168 
169 	alloc_per_cpu_data();
170 }
171 
172 /*
173  * Set up the page tables.
174  */
175 
176 void __init
177 paging_init (void)
178 {
179 	unsigned long max_dma;
180 	unsigned long max_zone_pfns[MAX_NR_ZONES];
181 
182 	memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
183 #ifdef CONFIG_ZONE_DMA32
184 	max_dma = virt_to_phys((void *) MAX_DMA_ADDRESS) >> PAGE_SHIFT;
185 	max_zone_pfns[ZONE_DMA32] = max_dma;
186 #endif
187 	max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
188 
189 #ifdef CONFIG_VIRTUAL_MEM_MAP
190 	efi_memmap_walk(find_largest_hole, (u64 *)&max_gap);
191 	if (max_gap < LARGE_GAP) {
192 		vmem_map = (struct page *) 0;
193 	} else {
194 		unsigned long map_size;
195 
196 		/* allocate virtual_mem_map */
197 
198 		map_size = PAGE_ALIGN(ALIGN(max_low_pfn, MAX_ORDER_NR_PAGES) *
199 			sizeof(struct page));
200 		VMALLOC_END -= map_size;
201 		vmem_map = (struct page *) VMALLOC_END;
202 		efi_memmap_walk(create_mem_map_page_table, NULL);
203 
204 		/*
205 		 * alloc_node_mem_map makes an adjustment for mem_map
206 		 * which isn't compatible with vmem_map.
207 		 */
208 		NODE_DATA(0)->node_mem_map = vmem_map +
209 			find_min_pfn_with_active_regions();
210 
211 		printk("Virtual mem_map starts at 0x%p\n", mem_map);
212 	}
213 #endif /* !CONFIG_VIRTUAL_MEM_MAP */
214 	free_area_init_nodes(max_zone_pfns);
215 	zero_page_memmap_ptr = virt_to_page(ia64_imva(empty_zero_page));
216 }
217