xref: /openbmc/linux/mm/sparse.c (revision 39b6f3aa)
1 /*
2  * sparse memory mappings.
3  */
4 #include <linux/mm.h>
5 #include <linux/slab.h>
6 #include <linux/mmzone.h>
7 #include <linux/bootmem.h>
8 #include <linux/highmem.h>
9 #include <linux/export.h>
10 #include <linux/spinlock.h>
11 #include <linux/vmalloc.h>
12 #include "internal.h"
13 #include <asm/dma.h>
14 #include <asm/pgalloc.h>
15 #include <asm/pgtable.h>
16 
17 /*
18  * Permanent SPARSEMEM data:
19  *
20  * 1) mem_section	- memory sections, mem_map's for valid memory
21  */
22 #ifdef CONFIG_SPARSEMEM_EXTREME
23 struct mem_section *mem_section[NR_SECTION_ROOTS]
24 	____cacheline_internodealigned_in_smp;
25 #else
26 struct mem_section mem_section[NR_SECTION_ROOTS][SECTIONS_PER_ROOT]
27 	____cacheline_internodealigned_in_smp;
28 #endif
29 EXPORT_SYMBOL(mem_section);
30 
31 #ifdef NODE_NOT_IN_PAGE_FLAGS
32 /*
33  * If we did not store the node number in the page then we have to
34  * do a lookup in the section_to_node_table in order to find which
35  * node the page belongs to.
36  */
37 #if MAX_NUMNODES <= 256
38 static u8 section_to_node_table[NR_MEM_SECTIONS] __cacheline_aligned;
39 #else
40 static u16 section_to_node_table[NR_MEM_SECTIONS] __cacheline_aligned;
41 #endif
42 
43 int page_to_nid(const struct page *page)
44 {
45 	return section_to_node_table[page_to_section(page)];
46 }
47 EXPORT_SYMBOL(page_to_nid);
48 
49 static void set_section_nid(unsigned long section_nr, int nid)
50 {
51 	section_to_node_table[section_nr] = nid;
52 }
53 #else /* !NODE_NOT_IN_PAGE_FLAGS */
54 static inline void set_section_nid(unsigned long section_nr, int nid)
55 {
56 }
57 #endif
58 
59 #ifdef CONFIG_SPARSEMEM_EXTREME
60 static struct mem_section noinline __init_refok *sparse_index_alloc(int nid)
61 {
62 	struct mem_section *section = NULL;
63 	unsigned long array_size = SECTIONS_PER_ROOT *
64 				   sizeof(struct mem_section);
65 
66 	if (slab_is_available()) {
67 		if (node_state(nid, N_HIGH_MEMORY))
68 			section = kzalloc_node(array_size, GFP_KERNEL, nid);
69 		else
70 			section = kzalloc(array_size, GFP_KERNEL);
71 	} else {
72 		section = alloc_bootmem_node(NODE_DATA(nid), array_size);
73 	}
74 
75 	return section;
76 }
77 
78 static int __meminit sparse_index_init(unsigned long section_nr, int nid)
79 {
80 	unsigned long root = SECTION_NR_TO_ROOT(section_nr);
81 	struct mem_section *section;
82 	int ret = 0;
83 
84 	if (mem_section[root])
85 		return -EEXIST;
86 
87 	section = sparse_index_alloc(nid);
88 	if (!section)
89 		return -ENOMEM;
90 
91 	mem_section[root] = section;
92 
93 	return ret;
94 }
95 #else /* !SPARSEMEM_EXTREME */
96 static inline int sparse_index_init(unsigned long section_nr, int nid)
97 {
98 	return 0;
99 }
100 #endif
101 
102 /*
103  * Although written for the SPARSEMEM_EXTREME case, this happens
104  * to also work for the flat array case because
105  * NR_SECTION_ROOTS==NR_MEM_SECTIONS.
106  */
107 int __section_nr(struct mem_section* ms)
108 {
109 	unsigned long root_nr;
110 	struct mem_section* root;
111 
112 	for (root_nr = 0; root_nr < NR_SECTION_ROOTS; root_nr++) {
113 		root = __nr_to_section(root_nr * SECTIONS_PER_ROOT);
114 		if (!root)
115 			continue;
116 
117 		if ((ms >= root) && (ms < (root + SECTIONS_PER_ROOT)))
118 		     break;
119 	}
120 
121 	VM_BUG_ON(root_nr == NR_SECTION_ROOTS);
122 
123 	return (root_nr * SECTIONS_PER_ROOT) + (ms - root);
124 }
125 
126 /*
127  * During early boot, before section_mem_map is used for an actual
128  * mem_map, we use section_mem_map to store the section's NUMA
129  * node.  This keeps us from having to use another data structure.  The
130  * node information is cleared just before we store the real mem_map.
131  */
132 static inline unsigned long sparse_encode_early_nid(int nid)
133 {
134 	return (nid << SECTION_NID_SHIFT);
135 }
136 
137 static inline int sparse_early_nid(struct mem_section *section)
138 {
139 	return (section->section_mem_map >> SECTION_NID_SHIFT);
140 }
141 
142 /* Validate the physical addressing limitations of the model */
143 void __meminit mminit_validate_memmodel_limits(unsigned long *start_pfn,
144 						unsigned long *end_pfn)
145 {
146 	unsigned long max_sparsemem_pfn = 1UL << (MAX_PHYSMEM_BITS-PAGE_SHIFT);
147 
148 	/*
149 	 * Sanity checks - do not allow an architecture to pass
150 	 * in larger pfns than the maximum scope of sparsemem:
151 	 */
152 	if (*start_pfn > max_sparsemem_pfn) {
153 		mminit_dprintk(MMINIT_WARNING, "pfnvalidation",
154 			"Start of range %lu -> %lu exceeds SPARSEMEM max %lu\n",
155 			*start_pfn, *end_pfn, max_sparsemem_pfn);
156 		WARN_ON_ONCE(1);
157 		*start_pfn = max_sparsemem_pfn;
158 		*end_pfn = max_sparsemem_pfn;
159 	} else if (*end_pfn > max_sparsemem_pfn) {
160 		mminit_dprintk(MMINIT_WARNING, "pfnvalidation",
161 			"End of range %lu -> %lu exceeds SPARSEMEM max %lu\n",
162 			*start_pfn, *end_pfn, max_sparsemem_pfn);
163 		WARN_ON_ONCE(1);
164 		*end_pfn = max_sparsemem_pfn;
165 	}
166 }
167 
168 /* Record a memory area against a node. */
169 void __init memory_present(int nid, unsigned long start, unsigned long end)
170 {
171 	unsigned long pfn;
172 
173 	start &= PAGE_SECTION_MASK;
174 	mminit_validate_memmodel_limits(&start, &end);
175 	for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION) {
176 		unsigned long section = pfn_to_section_nr(pfn);
177 		struct mem_section *ms;
178 
179 		sparse_index_init(section, nid);
180 		set_section_nid(section, nid);
181 
182 		ms = __nr_to_section(section);
183 		if (!ms->section_mem_map)
184 			ms->section_mem_map = sparse_encode_early_nid(nid) |
185 							SECTION_MARKED_PRESENT;
186 	}
187 }
188 
189 /*
190  * Only used by the i386 NUMA architecures, but relatively
191  * generic code.
192  */
193 unsigned long __init node_memmap_size_bytes(int nid, unsigned long start_pfn,
194 						     unsigned long end_pfn)
195 {
196 	unsigned long pfn;
197 	unsigned long nr_pages = 0;
198 
199 	mminit_validate_memmodel_limits(&start_pfn, &end_pfn);
200 	for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) {
201 		if (nid != early_pfn_to_nid(pfn))
202 			continue;
203 
204 		if (pfn_present(pfn))
205 			nr_pages += PAGES_PER_SECTION;
206 	}
207 
208 	return nr_pages * sizeof(struct page);
209 }
210 
211 /*
212  * Subtle, we encode the real pfn into the mem_map such that
213  * the identity pfn - section_mem_map will return the actual
214  * physical page frame number.
215  */
216 static unsigned long sparse_encode_mem_map(struct page *mem_map, unsigned long pnum)
217 {
218 	return (unsigned long)(mem_map - (section_nr_to_pfn(pnum)));
219 }
220 
221 /*
222  * Decode mem_map from the coded memmap
223  */
224 struct page *sparse_decode_mem_map(unsigned long coded_mem_map, unsigned long pnum)
225 {
226 	/* mask off the extra low bits of information */
227 	coded_mem_map &= SECTION_MAP_MASK;
228 	return ((struct page *)coded_mem_map) + section_nr_to_pfn(pnum);
229 }
230 
231 static int __meminit sparse_init_one_section(struct mem_section *ms,
232 		unsigned long pnum, struct page *mem_map,
233 		unsigned long *pageblock_bitmap)
234 {
235 	if (!present_section(ms))
236 		return -EINVAL;
237 
238 	ms->section_mem_map &= ~SECTION_MAP_MASK;
239 	ms->section_mem_map |= sparse_encode_mem_map(mem_map, pnum) |
240 							SECTION_HAS_MEM_MAP;
241  	ms->pageblock_flags = pageblock_bitmap;
242 
243 	return 1;
244 }
245 
246 unsigned long usemap_size(void)
247 {
248 	unsigned long size_bytes;
249 	size_bytes = roundup(SECTION_BLOCKFLAGS_BITS, 8) / 8;
250 	size_bytes = roundup(size_bytes, sizeof(unsigned long));
251 	return size_bytes;
252 }
253 
254 #ifdef CONFIG_MEMORY_HOTPLUG
255 static unsigned long *__kmalloc_section_usemap(void)
256 {
257 	return kmalloc(usemap_size(), GFP_KERNEL);
258 }
259 #endif /* CONFIG_MEMORY_HOTPLUG */
260 
261 #ifdef CONFIG_MEMORY_HOTREMOVE
262 static unsigned long * __init
263 sparse_early_usemaps_alloc_pgdat_section(struct pglist_data *pgdat,
264 					 unsigned long size)
265 {
266 	unsigned long goal, limit;
267 	unsigned long *p;
268 	int nid;
269 	/*
270 	 * A page may contain usemaps for other sections preventing the
271 	 * page being freed and making a section unremovable while
272 	 * other sections referencing the usemap retmain active. Similarly,
273 	 * a pgdat can prevent a section being removed. If section A
274 	 * contains a pgdat and section B contains the usemap, both
275 	 * sections become inter-dependent. This allocates usemaps
276 	 * from the same section as the pgdat where possible to avoid
277 	 * this problem.
278 	 */
279 	goal = __pa(pgdat) & (PAGE_SECTION_MASK << PAGE_SHIFT);
280 	limit = goal + (1UL << PA_SECTION_SHIFT);
281 	nid = early_pfn_to_nid(goal >> PAGE_SHIFT);
282 again:
283 	p = ___alloc_bootmem_node_nopanic(NODE_DATA(nid), size,
284 					  SMP_CACHE_BYTES, goal, limit);
285 	if (!p && limit) {
286 		limit = 0;
287 		goto again;
288 	}
289 	return p;
290 }
291 
292 static void __init check_usemap_section_nr(int nid, unsigned long *usemap)
293 {
294 	unsigned long usemap_snr, pgdat_snr;
295 	static unsigned long old_usemap_snr = NR_MEM_SECTIONS;
296 	static unsigned long old_pgdat_snr = NR_MEM_SECTIONS;
297 	struct pglist_data *pgdat = NODE_DATA(nid);
298 	int usemap_nid;
299 
300 	usemap_snr = pfn_to_section_nr(__pa(usemap) >> PAGE_SHIFT);
301 	pgdat_snr = pfn_to_section_nr(__pa(pgdat) >> PAGE_SHIFT);
302 	if (usemap_snr == pgdat_snr)
303 		return;
304 
305 	if (old_usemap_snr == usemap_snr && old_pgdat_snr == pgdat_snr)
306 		/* skip redundant message */
307 		return;
308 
309 	old_usemap_snr = usemap_snr;
310 	old_pgdat_snr = pgdat_snr;
311 
312 	usemap_nid = sparse_early_nid(__nr_to_section(usemap_snr));
313 	if (usemap_nid != nid) {
314 		printk(KERN_INFO
315 		       "node %d must be removed before remove section %ld\n",
316 		       nid, usemap_snr);
317 		return;
318 	}
319 	/*
320 	 * There is a circular dependency.
321 	 * Some platforms allow un-removable section because they will just
322 	 * gather other removable sections for dynamic partitioning.
323 	 * Just notify un-removable section's number here.
324 	 */
325 	printk(KERN_INFO "Section %ld and %ld (node %d)", usemap_snr,
326 	       pgdat_snr, nid);
327 	printk(KERN_CONT
328 	       " have a circular dependency on usemap and pgdat allocations\n");
329 }
330 #else
331 static unsigned long * __init
332 sparse_early_usemaps_alloc_pgdat_section(struct pglist_data *pgdat,
333 					 unsigned long size)
334 {
335 	return alloc_bootmem_node_nopanic(pgdat, size);
336 }
337 
338 static void __init check_usemap_section_nr(int nid, unsigned long *usemap)
339 {
340 }
341 #endif /* CONFIG_MEMORY_HOTREMOVE */
342 
343 static void __init sparse_early_usemaps_alloc_node(unsigned long**usemap_map,
344 				 unsigned long pnum_begin,
345 				 unsigned long pnum_end,
346 				 unsigned long usemap_count, int nodeid)
347 {
348 	void *usemap;
349 	unsigned long pnum;
350 	int size = usemap_size();
351 
352 	usemap = sparse_early_usemaps_alloc_pgdat_section(NODE_DATA(nodeid),
353 							  size * usemap_count);
354 	if (!usemap) {
355 		printk(KERN_WARNING "%s: allocation failed\n", __func__);
356 		return;
357 	}
358 
359 	for (pnum = pnum_begin; pnum < pnum_end; pnum++) {
360 		if (!present_section_nr(pnum))
361 			continue;
362 		usemap_map[pnum] = usemap;
363 		usemap += size;
364 		check_usemap_section_nr(nodeid, usemap_map[pnum]);
365 	}
366 }
367 
368 #ifndef CONFIG_SPARSEMEM_VMEMMAP
369 struct page __init *sparse_mem_map_populate(unsigned long pnum, int nid)
370 {
371 	struct page *map;
372 	unsigned long size;
373 
374 	map = alloc_remap(nid, sizeof(struct page) * PAGES_PER_SECTION);
375 	if (map)
376 		return map;
377 
378 	size = PAGE_ALIGN(sizeof(struct page) * PAGES_PER_SECTION);
379 	map = __alloc_bootmem_node_high(NODE_DATA(nid), size,
380 					 PAGE_SIZE, __pa(MAX_DMA_ADDRESS));
381 	return map;
382 }
383 void __init sparse_mem_maps_populate_node(struct page **map_map,
384 					  unsigned long pnum_begin,
385 					  unsigned long pnum_end,
386 					  unsigned long map_count, int nodeid)
387 {
388 	void *map;
389 	unsigned long pnum;
390 	unsigned long size = sizeof(struct page) * PAGES_PER_SECTION;
391 
392 	map = alloc_remap(nodeid, size * map_count);
393 	if (map) {
394 		for (pnum = pnum_begin; pnum < pnum_end; pnum++) {
395 			if (!present_section_nr(pnum))
396 				continue;
397 			map_map[pnum] = map;
398 			map += size;
399 		}
400 		return;
401 	}
402 
403 	size = PAGE_ALIGN(size);
404 	map = __alloc_bootmem_node_high(NODE_DATA(nodeid), size * map_count,
405 					 PAGE_SIZE, __pa(MAX_DMA_ADDRESS));
406 	if (map) {
407 		for (pnum = pnum_begin; pnum < pnum_end; pnum++) {
408 			if (!present_section_nr(pnum))
409 				continue;
410 			map_map[pnum] = map;
411 			map += size;
412 		}
413 		return;
414 	}
415 
416 	/* fallback */
417 	for (pnum = pnum_begin; pnum < pnum_end; pnum++) {
418 		struct mem_section *ms;
419 
420 		if (!present_section_nr(pnum))
421 			continue;
422 		map_map[pnum] = sparse_mem_map_populate(pnum, nodeid);
423 		if (map_map[pnum])
424 			continue;
425 		ms = __nr_to_section(pnum);
426 		printk(KERN_ERR "%s: sparsemem memory map backing failed "
427 			"some memory will not be available.\n", __func__);
428 		ms->section_mem_map = 0;
429 	}
430 }
431 #endif /* !CONFIG_SPARSEMEM_VMEMMAP */
432 
433 #ifdef CONFIG_SPARSEMEM_ALLOC_MEM_MAP_TOGETHER
434 static void __init sparse_early_mem_maps_alloc_node(struct page **map_map,
435 				 unsigned long pnum_begin,
436 				 unsigned long pnum_end,
437 				 unsigned long map_count, int nodeid)
438 {
439 	sparse_mem_maps_populate_node(map_map, pnum_begin, pnum_end,
440 					 map_count, nodeid);
441 }
442 #else
443 static struct page __init *sparse_early_mem_map_alloc(unsigned long pnum)
444 {
445 	struct page *map;
446 	struct mem_section *ms = __nr_to_section(pnum);
447 	int nid = sparse_early_nid(ms);
448 
449 	map = sparse_mem_map_populate(pnum, nid);
450 	if (map)
451 		return map;
452 
453 	printk(KERN_ERR "%s: sparsemem memory map backing failed "
454 			"some memory will not be available.\n", __func__);
455 	ms->section_mem_map = 0;
456 	return NULL;
457 }
458 #endif
459 
460 void __attribute__((weak)) __meminit vmemmap_populate_print_last(void)
461 {
462 }
463 
464 /*
465  * Allocate the accumulated non-linear sections, allocate a mem_map
466  * for each and record the physical to section mapping.
467  */
468 void __init sparse_init(void)
469 {
470 	unsigned long pnum;
471 	struct page *map;
472 	unsigned long *usemap;
473 	unsigned long **usemap_map;
474 	int size;
475 	int nodeid_begin = 0;
476 	unsigned long pnum_begin = 0;
477 	unsigned long usemap_count;
478 #ifdef CONFIG_SPARSEMEM_ALLOC_MEM_MAP_TOGETHER
479 	unsigned long map_count;
480 	int size2;
481 	struct page **map_map;
482 #endif
483 
484 	/* Setup pageblock_order for HUGETLB_PAGE_SIZE_VARIABLE */
485 	set_pageblock_order();
486 
487 	/*
488 	 * map is using big page (aka 2M in x86 64 bit)
489 	 * usemap is less one page (aka 24 bytes)
490 	 * so alloc 2M (with 2M align) and 24 bytes in turn will
491 	 * make next 2M slip to one more 2M later.
492 	 * then in big system, the memory will have a lot of holes...
493 	 * here try to allocate 2M pages continuously.
494 	 *
495 	 * powerpc need to call sparse_init_one_section right after each
496 	 * sparse_early_mem_map_alloc, so allocate usemap_map at first.
497 	 */
498 	size = sizeof(unsigned long *) * NR_MEM_SECTIONS;
499 	usemap_map = alloc_bootmem(size);
500 	if (!usemap_map)
501 		panic("can not allocate usemap_map\n");
502 
503 	for (pnum = 0; pnum < NR_MEM_SECTIONS; pnum++) {
504 		struct mem_section *ms;
505 
506 		if (!present_section_nr(pnum))
507 			continue;
508 		ms = __nr_to_section(pnum);
509 		nodeid_begin = sparse_early_nid(ms);
510 		pnum_begin = pnum;
511 		break;
512 	}
513 	usemap_count = 1;
514 	for (pnum = pnum_begin + 1; pnum < NR_MEM_SECTIONS; pnum++) {
515 		struct mem_section *ms;
516 		int nodeid;
517 
518 		if (!present_section_nr(pnum))
519 			continue;
520 		ms = __nr_to_section(pnum);
521 		nodeid = sparse_early_nid(ms);
522 		if (nodeid == nodeid_begin) {
523 			usemap_count++;
524 			continue;
525 		}
526 		/* ok, we need to take cake of from pnum_begin to pnum - 1*/
527 		sparse_early_usemaps_alloc_node(usemap_map, pnum_begin, pnum,
528 						 usemap_count, nodeid_begin);
529 		/* new start, update count etc*/
530 		nodeid_begin = nodeid;
531 		pnum_begin = pnum;
532 		usemap_count = 1;
533 	}
534 	/* ok, last chunk */
535 	sparse_early_usemaps_alloc_node(usemap_map, pnum_begin, NR_MEM_SECTIONS,
536 					 usemap_count, nodeid_begin);
537 
538 #ifdef CONFIG_SPARSEMEM_ALLOC_MEM_MAP_TOGETHER
539 	size2 = sizeof(struct page *) * NR_MEM_SECTIONS;
540 	map_map = alloc_bootmem(size2);
541 	if (!map_map)
542 		panic("can not allocate map_map\n");
543 
544 	for (pnum = 0; pnum < NR_MEM_SECTIONS; pnum++) {
545 		struct mem_section *ms;
546 
547 		if (!present_section_nr(pnum))
548 			continue;
549 		ms = __nr_to_section(pnum);
550 		nodeid_begin = sparse_early_nid(ms);
551 		pnum_begin = pnum;
552 		break;
553 	}
554 	map_count = 1;
555 	for (pnum = pnum_begin + 1; pnum < NR_MEM_SECTIONS; pnum++) {
556 		struct mem_section *ms;
557 		int nodeid;
558 
559 		if (!present_section_nr(pnum))
560 			continue;
561 		ms = __nr_to_section(pnum);
562 		nodeid = sparse_early_nid(ms);
563 		if (nodeid == nodeid_begin) {
564 			map_count++;
565 			continue;
566 		}
567 		/* ok, we need to take cake of from pnum_begin to pnum - 1*/
568 		sparse_early_mem_maps_alloc_node(map_map, pnum_begin, pnum,
569 						 map_count, nodeid_begin);
570 		/* new start, update count etc*/
571 		nodeid_begin = nodeid;
572 		pnum_begin = pnum;
573 		map_count = 1;
574 	}
575 	/* ok, last chunk */
576 	sparse_early_mem_maps_alloc_node(map_map, pnum_begin, NR_MEM_SECTIONS,
577 					 map_count, nodeid_begin);
578 #endif
579 
580 	for (pnum = 0; pnum < NR_MEM_SECTIONS; pnum++) {
581 		if (!present_section_nr(pnum))
582 			continue;
583 
584 		usemap = usemap_map[pnum];
585 		if (!usemap)
586 			continue;
587 
588 #ifdef CONFIG_SPARSEMEM_ALLOC_MEM_MAP_TOGETHER
589 		map = map_map[pnum];
590 #else
591 		map = sparse_early_mem_map_alloc(pnum);
592 #endif
593 		if (!map)
594 			continue;
595 
596 		sparse_init_one_section(__nr_to_section(pnum), pnum, map,
597 								usemap);
598 	}
599 
600 	vmemmap_populate_print_last();
601 
602 #ifdef CONFIG_SPARSEMEM_ALLOC_MEM_MAP_TOGETHER
603 	free_bootmem(__pa(map_map), size2);
604 #endif
605 	free_bootmem(__pa(usemap_map), size);
606 }
607 
608 #ifdef CONFIG_MEMORY_HOTPLUG
609 #ifdef CONFIG_SPARSEMEM_VMEMMAP
610 static inline struct page *kmalloc_section_memmap(unsigned long pnum, int nid,
611 						 unsigned long nr_pages)
612 {
613 	/* This will make the necessary allocations eventually. */
614 	return sparse_mem_map_populate(pnum, nid);
615 }
616 static void __kfree_section_memmap(struct page *memmap, unsigned long nr_pages)
617 {
618 	unsigned long start = (unsigned long)memmap;
619 	unsigned long end = (unsigned long)(memmap + nr_pages);
620 
621 	vmemmap_free(start, end);
622 }
623 #ifdef CONFIG_MEMORY_HOTREMOVE
624 static void free_map_bootmem(struct page *memmap, unsigned long nr_pages)
625 {
626 	unsigned long start = (unsigned long)memmap;
627 	unsigned long end = (unsigned long)(memmap + nr_pages);
628 
629 	vmemmap_free(start, end);
630 }
631 #endif /* CONFIG_MEMORY_HOTREMOVE */
632 #else
633 static struct page *__kmalloc_section_memmap(unsigned long nr_pages)
634 {
635 	struct page *page, *ret;
636 	unsigned long memmap_size = sizeof(struct page) * nr_pages;
637 
638 	page = alloc_pages(GFP_KERNEL|__GFP_NOWARN, get_order(memmap_size));
639 	if (page)
640 		goto got_map_page;
641 
642 	ret = vmalloc(memmap_size);
643 	if (ret)
644 		goto got_map_ptr;
645 
646 	return NULL;
647 got_map_page:
648 	ret = (struct page *)pfn_to_kaddr(page_to_pfn(page));
649 got_map_ptr:
650 
651 	return ret;
652 }
653 
654 static inline struct page *kmalloc_section_memmap(unsigned long pnum, int nid,
655 						  unsigned long nr_pages)
656 {
657 	return __kmalloc_section_memmap(nr_pages);
658 }
659 
660 static void __kfree_section_memmap(struct page *memmap, unsigned long nr_pages)
661 {
662 	if (is_vmalloc_addr(memmap))
663 		vfree(memmap);
664 	else
665 		free_pages((unsigned long)memmap,
666 			   get_order(sizeof(struct page) * nr_pages));
667 }
668 
669 #ifdef CONFIG_MEMORY_HOTREMOVE
670 static void free_map_bootmem(struct page *memmap, unsigned long nr_pages)
671 {
672 	unsigned long maps_section_nr, removing_section_nr, i;
673 	unsigned long magic;
674 	struct page *page = virt_to_page(memmap);
675 
676 	for (i = 0; i < nr_pages; i++, page++) {
677 		magic = (unsigned long) page->lru.next;
678 
679 		BUG_ON(magic == NODE_INFO);
680 
681 		maps_section_nr = pfn_to_section_nr(page_to_pfn(page));
682 		removing_section_nr = page->private;
683 
684 		/*
685 		 * When this function is called, the removing section is
686 		 * logical offlined state. This means all pages are isolated
687 		 * from page allocator. If removing section's memmap is placed
688 		 * on the same section, it must not be freed.
689 		 * If it is freed, page allocator may allocate it which will
690 		 * be removed physically soon.
691 		 */
692 		if (maps_section_nr != removing_section_nr)
693 			put_page_bootmem(page);
694 	}
695 }
696 #endif /* CONFIG_MEMORY_HOTREMOVE */
697 #endif /* CONFIG_SPARSEMEM_VMEMMAP */
698 
699 /*
700  * returns the number of sections whose mem_maps were properly
701  * set.  If this is <=0, then that means that the passed-in
702  * map was not consumed and must be freed.
703  */
704 int __meminit sparse_add_one_section(struct zone *zone, unsigned long start_pfn,
705 			   int nr_pages)
706 {
707 	unsigned long section_nr = pfn_to_section_nr(start_pfn);
708 	struct pglist_data *pgdat = zone->zone_pgdat;
709 	struct mem_section *ms;
710 	struct page *memmap;
711 	unsigned long *usemap;
712 	unsigned long flags;
713 	int ret;
714 
715 	/*
716 	 * no locking for this, because it does its own
717 	 * plus, it does a kmalloc
718 	 */
719 	ret = sparse_index_init(section_nr, pgdat->node_id);
720 	if (ret < 0 && ret != -EEXIST)
721 		return ret;
722 	memmap = kmalloc_section_memmap(section_nr, pgdat->node_id, nr_pages);
723 	if (!memmap)
724 		return -ENOMEM;
725 	usemap = __kmalloc_section_usemap();
726 	if (!usemap) {
727 		__kfree_section_memmap(memmap, nr_pages);
728 		return -ENOMEM;
729 	}
730 
731 	pgdat_resize_lock(pgdat, &flags);
732 
733 	ms = __pfn_to_section(start_pfn);
734 	if (ms->section_mem_map & SECTION_MARKED_PRESENT) {
735 		ret = -EEXIST;
736 		goto out;
737 	}
738 
739 	memset(memmap, 0, sizeof(struct page) * nr_pages);
740 
741 	ms->section_mem_map |= SECTION_MARKED_PRESENT;
742 
743 	ret = sparse_init_one_section(ms, section_nr, memmap, usemap);
744 
745 out:
746 	pgdat_resize_unlock(pgdat, &flags);
747 	if (ret <= 0) {
748 		kfree(usemap);
749 		__kfree_section_memmap(memmap, nr_pages);
750 	}
751 	return ret;
752 }
753 
754 #ifdef CONFIG_MEMORY_FAILURE
755 static void clear_hwpoisoned_pages(struct page *memmap, int nr_pages)
756 {
757 	int i;
758 
759 	if (!memmap)
760 		return;
761 
762 	for (i = 0; i < PAGES_PER_SECTION; i++) {
763 		if (PageHWPoison(&memmap[i])) {
764 			atomic_long_sub(1, &num_poisoned_pages);
765 			ClearPageHWPoison(&memmap[i]);
766 		}
767 	}
768 }
769 #else
770 static inline void clear_hwpoisoned_pages(struct page *memmap, int nr_pages)
771 {
772 }
773 #endif
774 
775 #ifdef CONFIG_MEMORY_HOTREMOVE
776 static void free_section_usemap(struct page *memmap, unsigned long *usemap)
777 {
778 	struct page *usemap_page;
779 	unsigned long nr_pages;
780 
781 	if (!usemap)
782 		return;
783 
784 	usemap_page = virt_to_page(usemap);
785 	/*
786 	 * Check to see if allocation came from hot-plug-add
787 	 */
788 	if (PageSlab(usemap_page) || PageCompound(usemap_page)) {
789 		kfree(usemap);
790 		if (memmap)
791 			__kfree_section_memmap(memmap, PAGES_PER_SECTION);
792 		return;
793 	}
794 
795 	/*
796 	 * The usemap came from bootmem. This is packed with other usemaps
797 	 * on the section which has pgdat at boot time. Just keep it as is now.
798 	 */
799 
800 	if (memmap) {
801 		nr_pages = PAGE_ALIGN(PAGES_PER_SECTION * sizeof(struct page))
802 			>> PAGE_SHIFT;
803 
804 		free_map_bootmem(memmap, nr_pages);
805 	}
806 }
807 
808 void sparse_remove_one_section(struct zone *zone, struct mem_section *ms)
809 {
810 	struct page *memmap = NULL;
811 	unsigned long *usemap = NULL, flags;
812 	struct pglist_data *pgdat = zone->zone_pgdat;
813 
814 	pgdat_resize_lock(pgdat, &flags);
815 	if (ms->section_mem_map) {
816 		usemap = ms->pageblock_flags;
817 		memmap = sparse_decode_mem_map(ms->section_mem_map,
818 						__section_nr(ms));
819 		ms->section_mem_map = 0;
820 		ms->pageblock_flags = NULL;
821 	}
822 	pgdat_resize_unlock(pgdat, &flags);
823 
824 	clear_hwpoisoned_pages(memmap, PAGES_PER_SECTION);
825 	free_section_usemap(memmap, usemap);
826 }
827 #endif /* CONFIG_MEMORY_HOTREMOVE */
828 #endif /* CONFIG_MEMORY_HOTPLUG */
829