xref: /openbmc/linux/mm/sparse.c (revision d5cb9783536a41df9f9cba5b0a1d78047ed787f7)
1 /*
2  * sparse memory mappings.
3  */
4 #include <linux/config.h>
5 #include <linux/mm.h>
6 #include <linux/mmzone.h>
7 #include <linux/bootmem.h>
8 #include <linux/highmem.h>
9 #include <linux/module.h>
10 #include <linux/spinlock.h>
11 #include <linux/vmalloc.h>
12 #include <asm/dma.h>
13 
14 /*
15  * Permanent SPARSEMEM data:
16  *
17  * 1) mem_section	- memory sections, mem_map's for valid memory
18  */
19 #ifdef CONFIG_SPARSEMEM_EXTREME
20 struct mem_section *mem_section[NR_SECTION_ROOTS]
21 	____cacheline_maxaligned_in_smp;
22 #else
23 struct mem_section mem_section[NR_SECTION_ROOTS][SECTIONS_PER_ROOT]
24 	____cacheline_maxaligned_in_smp;
25 #endif
26 EXPORT_SYMBOL(mem_section);
27 
28 #ifdef CONFIG_SPARSEMEM_EXTREME
29 static struct mem_section *sparse_index_alloc(int nid)
30 {
31 	struct mem_section *section = NULL;
32 	unsigned long array_size = SECTIONS_PER_ROOT *
33 				   sizeof(struct mem_section);
34 
35 	section = alloc_bootmem_node(NODE_DATA(nid), array_size);
36 
37 	if (section)
38 		memset(section, 0, array_size);
39 
40 	return section;
41 }
42 
43 static int sparse_index_init(unsigned long section_nr, int nid)
44 {
45 	static spinlock_t index_init_lock = SPIN_LOCK_UNLOCKED;
46 	unsigned long root = SECTION_NR_TO_ROOT(section_nr);
47 	struct mem_section *section;
48 	int ret = 0;
49 
50 	if (mem_section[root])
51 		return -EEXIST;
52 
53 	section = sparse_index_alloc(nid);
54 	/*
55 	 * This lock keeps two different sections from
56 	 * reallocating for the same index
57 	 */
58 	spin_lock(&index_init_lock);
59 
60 	if (mem_section[root]) {
61 		ret = -EEXIST;
62 		goto out;
63 	}
64 
65 	mem_section[root] = section;
66 out:
67 	spin_unlock(&index_init_lock);
68 	return ret;
69 }
70 #else /* !SPARSEMEM_EXTREME */
71 static inline int sparse_index_init(unsigned long section_nr, int nid)
72 {
73 	return 0;
74 }
75 #endif
76 
77 /*
78  * Although written for the SPARSEMEM_EXTREME case, this happens
79  * to also work for the flat array case becase
80  * NR_SECTION_ROOTS==NR_MEM_SECTIONS.
81  */
82 int __section_nr(struct mem_section* ms)
83 {
84 	unsigned long root_nr;
85 	struct mem_section* root;
86 
87 	for (root_nr = 0;
88 	     root_nr < NR_MEM_SECTIONS;
89 	     root_nr += SECTIONS_PER_ROOT) {
90 		root = __nr_to_section(root_nr);
91 
92 		if (!root)
93 			continue;
94 
95 		if ((ms >= root) && (ms < (root + SECTIONS_PER_ROOT)))
96 		     break;
97 	}
98 
99 	return (root_nr * SECTIONS_PER_ROOT) + (ms - root);
100 }
101 
102 /* Record a memory area against a node. */
103 void memory_present(int nid, unsigned long start, unsigned long end)
104 {
105 	unsigned long pfn;
106 
107 	start &= PAGE_SECTION_MASK;
108 	for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION) {
109 		unsigned long section = pfn_to_section_nr(pfn);
110 		struct mem_section *ms;
111 
112 		sparse_index_init(section, nid);
113 
114 		ms = __nr_to_section(section);
115 		if (!ms->section_mem_map)
116 			ms->section_mem_map = SECTION_MARKED_PRESENT;
117 	}
118 }
119 
120 /*
121  * Only used by the i386 NUMA architecures, but relatively
122  * generic code.
123  */
124 unsigned long __init node_memmap_size_bytes(int nid, unsigned long start_pfn,
125 						     unsigned long end_pfn)
126 {
127 	unsigned long pfn;
128 	unsigned long nr_pages = 0;
129 
130 	for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) {
131 		if (nid != early_pfn_to_nid(pfn))
132 			continue;
133 
134 		if (pfn_valid(pfn))
135 			nr_pages += PAGES_PER_SECTION;
136 	}
137 
138 	return nr_pages * sizeof(struct page);
139 }
140 
141 /*
142  * Subtle, we encode the real pfn into the mem_map such that
143  * the identity pfn - section_mem_map will return the actual
144  * physical page frame number.
145  */
146 static unsigned long sparse_encode_mem_map(struct page *mem_map, unsigned long pnum)
147 {
148 	return (unsigned long)(mem_map - (section_nr_to_pfn(pnum)));
149 }
150 
151 /*
152  * We need this if we ever free the mem_maps.  While not implemented yet,
153  * this function is included for parity with its sibling.
154  */
155 static __attribute((unused))
156 struct page *sparse_decode_mem_map(unsigned long coded_mem_map, unsigned long pnum)
157 {
158 	return ((struct page *)coded_mem_map) + section_nr_to_pfn(pnum);
159 }
160 
161 static int sparse_init_one_section(struct mem_section *ms,
162 		unsigned long pnum, struct page *mem_map)
163 {
164 	if (!valid_section(ms))
165 		return -EINVAL;
166 
167 	ms->section_mem_map |= sparse_encode_mem_map(mem_map, pnum);
168 
169 	return 1;
170 }
171 
172 static struct page *sparse_early_mem_map_alloc(unsigned long pnum)
173 {
174 	struct page *map;
175 	int nid = early_pfn_to_nid(section_nr_to_pfn(pnum));
176 	struct mem_section *ms = __nr_to_section(pnum);
177 
178 	map = alloc_remap(nid, sizeof(struct page) * PAGES_PER_SECTION);
179 	if (map)
180 		return map;
181 
182 	map = alloc_bootmem_node(NODE_DATA(nid),
183 			sizeof(struct page) * PAGES_PER_SECTION);
184 	if (map)
185 		return map;
186 
187 	printk(KERN_WARNING "%s: allocation failed\n", __FUNCTION__);
188 	ms->section_mem_map = 0;
189 	return NULL;
190 }
191 
192 static struct page *__kmalloc_section_memmap(unsigned long nr_pages)
193 {
194 	struct page *page, *ret;
195 	unsigned long memmap_size = sizeof(struct page) * nr_pages;
196 
197 	page = alloc_pages(GFP_KERNEL, get_order(memmap_size));
198 	if (page)
199 		goto got_map_page;
200 
201 	ret = vmalloc(memmap_size);
202 	if (ret)
203 		goto got_map_ptr;
204 
205 	return NULL;
206 got_map_page:
207 	ret = (struct page *)pfn_to_kaddr(page_to_pfn(page));
208 got_map_ptr:
209 	memset(ret, 0, memmap_size);
210 
211 	return ret;
212 }
213 
214 static int vaddr_in_vmalloc_area(void *addr)
215 {
216 	if (addr >= (void *)VMALLOC_START &&
217 	    addr < (void *)VMALLOC_END)
218 		return 1;
219 	return 0;
220 }
221 
222 static void __kfree_section_memmap(struct page *memmap, unsigned long nr_pages)
223 {
224 	if (vaddr_in_vmalloc_area(memmap))
225 		vfree(memmap);
226 	else
227 		free_pages((unsigned long)memmap,
228 			   get_order(sizeof(struct page) * nr_pages));
229 }
230 
231 /*
232  * Allocate the accumulated non-linear sections, allocate a mem_map
233  * for each and record the physical to section mapping.
234  */
235 void sparse_init(void)
236 {
237 	unsigned long pnum;
238 	struct page *map;
239 
240 	for (pnum = 0; pnum < NR_MEM_SECTIONS; pnum++) {
241 		if (!valid_section_nr(pnum))
242 			continue;
243 
244 		map = sparse_early_mem_map_alloc(pnum);
245 		if (!map)
246 			continue;
247 		sparse_init_one_section(__nr_to_section(pnum), pnum, map);
248 	}
249 }
250 
251 /*
252  * returns the number of sections whose mem_maps were properly
253  * set.  If this is <=0, then that means that the passed-in
254  * map was not consumed and must be freed.
255  */
256 int sparse_add_one_section(struct zone *zone, unsigned long start_pfn,
257 			   int nr_pages)
258 {
259 	unsigned long section_nr = pfn_to_section_nr(start_pfn);
260 	struct pglist_data *pgdat = zone->zone_pgdat;
261 	struct mem_section *ms;
262 	struct page *memmap;
263 	unsigned long flags;
264 	int ret;
265 
266 	/*
267 	 * no locking for this, because it does its own
268 	 * plus, it does a kmalloc
269 	 */
270 	sparse_index_init(section_nr, pgdat->node_id);
271 	memmap = __kmalloc_section_memmap(nr_pages);
272 
273 	pgdat_resize_lock(pgdat, &flags);
274 
275 	ms = __pfn_to_section(start_pfn);
276 	if (ms->section_mem_map & SECTION_MARKED_PRESENT) {
277 		ret = -EEXIST;
278 		goto out;
279 	}
280 	ms->section_mem_map |= SECTION_MARKED_PRESENT;
281 
282 	ret = sparse_init_one_section(ms, section_nr, memmap);
283 
284 	if (ret <= 0)
285 		__kfree_section_memmap(memmap, nr_pages);
286 out:
287 	pgdat_resize_unlock(pgdat, &flags);
288 	return ret;
289 }
290