xref: /openbmc/linux/mm/sparse.c (revision fcc8487d)
1 /*
2  * sparse memory mappings.
3  */
4 #include <linux/mm.h>
5 #include <linux/slab.h>
6 #include <linux/mmzone.h>
7 #include <linux/bootmem.h>
8 #include <linux/compiler.h>
9 #include <linux/highmem.h>
10 #include <linux/export.h>
11 #include <linux/spinlock.h>
12 #include <linux/vmalloc.h>
13 
14 #include "internal.h"
15 #include <asm/dma.h>
16 #include <asm/pgalloc.h>
17 #include <asm/pgtable.h>
18 
19 /*
20  * Permanent SPARSEMEM data:
21  *
22  * 1) mem_section	- memory sections, mem_map's for valid memory
23  */
24 #ifdef CONFIG_SPARSEMEM_EXTREME
25 struct mem_section *mem_section[NR_SECTION_ROOTS]
26 	____cacheline_internodealigned_in_smp;
27 #else
28 struct mem_section mem_section[NR_SECTION_ROOTS][SECTIONS_PER_ROOT]
29 	____cacheline_internodealigned_in_smp;
30 #endif
31 EXPORT_SYMBOL(mem_section);
32 
33 #ifdef NODE_NOT_IN_PAGE_FLAGS
34 /*
35  * If we did not store the node number in the page then we have to
36  * do a lookup in the section_to_node_table in order to find which
37  * node the page belongs to.
38  */
39 #if MAX_NUMNODES <= 256
40 static u8 section_to_node_table[NR_MEM_SECTIONS] __cacheline_aligned;
41 #else
42 static u16 section_to_node_table[NR_MEM_SECTIONS] __cacheline_aligned;
43 #endif
44 
45 int page_to_nid(const struct page *page)
46 {
47 	return section_to_node_table[page_to_section(page)];
48 }
49 EXPORT_SYMBOL(page_to_nid);
50 
51 static void set_section_nid(unsigned long section_nr, int nid)
52 {
53 	section_to_node_table[section_nr] = nid;
54 }
55 #else /* !NODE_NOT_IN_PAGE_FLAGS */
56 static inline void set_section_nid(unsigned long section_nr, int nid)
57 {
58 }
59 #endif
60 
61 #ifdef CONFIG_SPARSEMEM_EXTREME
62 static noinline struct mem_section __ref *sparse_index_alloc(int nid)
63 {
64 	struct mem_section *section = NULL;
65 	unsigned long array_size = SECTIONS_PER_ROOT *
66 				   sizeof(struct mem_section);
67 
68 	if (slab_is_available()) {
69 		if (node_state(nid, N_HIGH_MEMORY))
70 			section = kzalloc_node(array_size, GFP_KERNEL, nid);
71 		else
72 			section = kzalloc(array_size, GFP_KERNEL);
73 	} else {
74 		section = memblock_virt_alloc_node(array_size, nid);
75 	}
76 
77 	return section;
78 }
79 
80 static int __meminit sparse_index_init(unsigned long section_nr, int nid)
81 {
82 	unsigned long root = SECTION_NR_TO_ROOT(section_nr);
83 	struct mem_section *section;
84 
85 	if (mem_section[root])
86 		return -EEXIST;
87 
88 	section = sparse_index_alloc(nid);
89 	if (!section)
90 		return -ENOMEM;
91 
92 	mem_section[root] = section;
93 
94 	return 0;
95 }
96 #else /* !SPARSEMEM_EXTREME */
97 static inline int sparse_index_init(unsigned long section_nr, int nid)
98 {
99 	return 0;
100 }
101 #endif
102 
103 #ifdef CONFIG_SPARSEMEM_EXTREME
104 int __section_nr(struct mem_section* ms)
105 {
106 	unsigned long root_nr;
107 	struct mem_section* root;
108 
109 	for (root_nr = 0; root_nr < NR_SECTION_ROOTS; root_nr++) {
110 		root = __nr_to_section(root_nr * SECTIONS_PER_ROOT);
111 		if (!root)
112 			continue;
113 
114 		if ((ms >= root) && (ms < (root + SECTIONS_PER_ROOT)))
115 		     break;
116 	}
117 
118 	VM_BUG_ON(root_nr == NR_SECTION_ROOTS);
119 
120 	return (root_nr * SECTIONS_PER_ROOT) + (ms - root);
121 }
122 #else
123 int __section_nr(struct mem_section* ms)
124 {
125 	return (int)(ms - mem_section[0]);
126 }
127 #endif
128 
129 /*
130  * During early boot, before section_mem_map is used for an actual
131  * mem_map, we use section_mem_map to store the section's NUMA
132  * node.  This keeps us from having to use another data structure.  The
133  * node information is cleared just before we store the real mem_map.
134  */
135 static inline unsigned long sparse_encode_early_nid(int nid)
136 {
137 	return (nid << SECTION_NID_SHIFT);
138 }
139 
140 static inline int sparse_early_nid(struct mem_section *section)
141 {
142 	return (section->section_mem_map >> SECTION_NID_SHIFT);
143 }
144 
145 /* Validate the physical addressing limitations of the model */
146 void __meminit mminit_validate_memmodel_limits(unsigned long *start_pfn,
147 						unsigned long *end_pfn)
148 {
149 	unsigned long max_sparsemem_pfn = 1UL << (MAX_PHYSMEM_BITS-PAGE_SHIFT);
150 
151 	/*
152 	 * Sanity checks - do not allow an architecture to pass
153 	 * in larger pfns than the maximum scope of sparsemem:
154 	 */
155 	if (*start_pfn > max_sparsemem_pfn) {
156 		mminit_dprintk(MMINIT_WARNING, "pfnvalidation",
157 			"Start of range %lu -> %lu exceeds SPARSEMEM max %lu\n",
158 			*start_pfn, *end_pfn, max_sparsemem_pfn);
159 		WARN_ON_ONCE(1);
160 		*start_pfn = max_sparsemem_pfn;
161 		*end_pfn = max_sparsemem_pfn;
162 	} else if (*end_pfn > max_sparsemem_pfn) {
163 		mminit_dprintk(MMINIT_WARNING, "pfnvalidation",
164 			"End of range %lu -> %lu exceeds SPARSEMEM max %lu\n",
165 			*start_pfn, *end_pfn, max_sparsemem_pfn);
166 		WARN_ON_ONCE(1);
167 		*end_pfn = max_sparsemem_pfn;
168 	}
169 }
170 
171 /* Record a memory area against a node. */
172 void __init memory_present(int nid, unsigned long start, unsigned long end)
173 {
174 	unsigned long pfn;
175 
176 	start &= PAGE_SECTION_MASK;
177 	mminit_validate_memmodel_limits(&start, &end);
178 	for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION) {
179 		unsigned long section = pfn_to_section_nr(pfn);
180 		struct mem_section *ms;
181 
182 		sparse_index_init(section, nid);
183 		set_section_nid(section, nid);
184 
185 		ms = __nr_to_section(section);
186 		if (!ms->section_mem_map)
187 			ms->section_mem_map = sparse_encode_early_nid(nid) |
188 							SECTION_MARKED_PRESENT;
189 	}
190 }
191 
192 /*
193  * Only used by the i386 NUMA architecures, but relatively
194  * generic code.
195  */
196 unsigned long __init node_memmap_size_bytes(int nid, unsigned long start_pfn,
197 						     unsigned long end_pfn)
198 {
199 	unsigned long pfn;
200 	unsigned long nr_pages = 0;
201 
202 	mminit_validate_memmodel_limits(&start_pfn, &end_pfn);
203 	for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) {
204 		if (nid != early_pfn_to_nid(pfn))
205 			continue;
206 
207 		if (pfn_present(pfn))
208 			nr_pages += PAGES_PER_SECTION;
209 	}
210 
211 	return nr_pages * sizeof(struct page);
212 }
213 
214 /*
215  * Subtle, we encode the real pfn into the mem_map such that
216  * the identity pfn - section_mem_map will return the actual
217  * physical page frame number.
218  */
219 static unsigned long sparse_encode_mem_map(struct page *mem_map, unsigned long pnum)
220 {
221 	return (unsigned long)(mem_map - (section_nr_to_pfn(pnum)));
222 }
223 
224 /*
225  * Decode mem_map from the coded memmap
226  */
227 struct page *sparse_decode_mem_map(unsigned long coded_mem_map, unsigned long pnum)
228 {
229 	/* mask off the extra low bits of information */
230 	coded_mem_map &= SECTION_MAP_MASK;
231 	return ((struct page *)coded_mem_map) + section_nr_to_pfn(pnum);
232 }
233 
234 static int __meminit sparse_init_one_section(struct mem_section *ms,
235 		unsigned long pnum, struct page *mem_map,
236 		unsigned long *pageblock_bitmap)
237 {
238 	if (!present_section(ms))
239 		return -EINVAL;
240 
241 	ms->section_mem_map &= ~SECTION_MAP_MASK;
242 	ms->section_mem_map |= sparse_encode_mem_map(mem_map, pnum) |
243 							SECTION_HAS_MEM_MAP;
244  	ms->pageblock_flags = pageblock_bitmap;
245 
246 	return 1;
247 }
248 
249 unsigned long usemap_size(void)
250 {
251 	return BITS_TO_LONGS(SECTION_BLOCKFLAGS_BITS) * sizeof(unsigned long);
252 }
253 
254 #ifdef CONFIG_MEMORY_HOTPLUG
255 static unsigned long *__kmalloc_section_usemap(void)
256 {
257 	return kmalloc(usemap_size(), GFP_KERNEL);
258 }
259 #endif /* CONFIG_MEMORY_HOTPLUG */
260 
261 #ifdef CONFIG_MEMORY_HOTREMOVE
262 static unsigned long * __init
263 sparse_early_usemaps_alloc_pgdat_section(struct pglist_data *pgdat,
264 					 unsigned long size)
265 {
266 	unsigned long goal, limit;
267 	unsigned long *p;
268 	int nid;
269 	/*
270 	 * A page may contain usemaps for other sections preventing the
271 	 * page being freed and making a section unremovable while
272 	 * other sections referencing the usemap remain active. Similarly,
273 	 * a pgdat can prevent a section being removed. If section A
274 	 * contains a pgdat and section B contains the usemap, both
275 	 * sections become inter-dependent. This allocates usemaps
276 	 * from the same section as the pgdat where possible to avoid
277 	 * this problem.
278 	 */
279 	goal = __pa(pgdat) & (PAGE_SECTION_MASK << PAGE_SHIFT);
280 	limit = goal + (1UL << PA_SECTION_SHIFT);
281 	nid = early_pfn_to_nid(goal >> PAGE_SHIFT);
282 again:
283 	p = memblock_virt_alloc_try_nid_nopanic(size,
284 						SMP_CACHE_BYTES, goal, limit,
285 						nid);
286 	if (!p && limit) {
287 		limit = 0;
288 		goto again;
289 	}
290 	return p;
291 }
292 
293 static void __init check_usemap_section_nr(int nid, unsigned long *usemap)
294 {
295 	unsigned long usemap_snr, pgdat_snr;
296 	static unsigned long old_usemap_snr = NR_MEM_SECTIONS;
297 	static unsigned long old_pgdat_snr = NR_MEM_SECTIONS;
298 	struct pglist_data *pgdat = NODE_DATA(nid);
299 	int usemap_nid;
300 
301 	usemap_snr = pfn_to_section_nr(__pa(usemap) >> PAGE_SHIFT);
302 	pgdat_snr = pfn_to_section_nr(__pa(pgdat) >> PAGE_SHIFT);
303 	if (usemap_snr == pgdat_snr)
304 		return;
305 
306 	if (old_usemap_snr == usemap_snr && old_pgdat_snr == pgdat_snr)
307 		/* skip redundant message */
308 		return;
309 
310 	old_usemap_snr = usemap_snr;
311 	old_pgdat_snr = pgdat_snr;
312 
313 	usemap_nid = sparse_early_nid(__nr_to_section(usemap_snr));
314 	if (usemap_nid != nid) {
315 		pr_info("node %d must be removed before remove section %ld\n",
316 			nid, usemap_snr);
317 		return;
318 	}
319 	/*
320 	 * There is a circular dependency.
321 	 * Some platforms allow un-removable section because they will just
322 	 * gather other removable sections for dynamic partitioning.
323 	 * Just notify un-removable section's number here.
324 	 */
325 	pr_info("Section %ld and %ld (node %d) have a circular dependency on usemap and pgdat allocations\n",
326 		usemap_snr, pgdat_snr, nid);
327 }
328 #else
329 static unsigned long * __init
330 sparse_early_usemaps_alloc_pgdat_section(struct pglist_data *pgdat,
331 					 unsigned long size)
332 {
333 	return memblock_virt_alloc_node_nopanic(size, pgdat->node_id);
334 }
335 
336 static void __init check_usemap_section_nr(int nid, unsigned long *usemap)
337 {
338 }
339 #endif /* CONFIG_MEMORY_HOTREMOVE */
340 
341 static void __init sparse_early_usemaps_alloc_node(void *data,
342 				 unsigned long pnum_begin,
343 				 unsigned long pnum_end,
344 				 unsigned long usemap_count, int nodeid)
345 {
346 	void *usemap;
347 	unsigned long pnum;
348 	unsigned long **usemap_map = (unsigned long **)data;
349 	int size = usemap_size();
350 
351 	usemap = sparse_early_usemaps_alloc_pgdat_section(NODE_DATA(nodeid),
352 							  size * usemap_count);
353 	if (!usemap) {
354 		pr_warn("%s: allocation failed\n", __func__);
355 		return;
356 	}
357 
358 	for (pnum = pnum_begin; pnum < pnum_end; pnum++) {
359 		if (!present_section_nr(pnum))
360 			continue;
361 		usemap_map[pnum] = usemap;
362 		usemap += size;
363 		check_usemap_section_nr(nodeid, usemap_map[pnum]);
364 	}
365 }
366 
367 #ifndef CONFIG_SPARSEMEM_VMEMMAP
368 struct page __init *sparse_mem_map_populate(unsigned long pnum, int nid)
369 {
370 	struct page *map;
371 	unsigned long size;
372 
373 	map = alloc_remap(nid, sizeof(struct page) * PAGES_PER_SECTION);
374 	if (map)
375 		return map;
376 
377 	size = PAGE_ALIGN(sizeof(struct page) * PAGES_PER_SECTION);
378 	map = memblock_virt_alloc_try_nid(size,
379 					  PAGE_SIZE, __pa(MAX_DMA_ADDRESS),
380 					  BOOTMEM_ALLOC_ACCESSIBLE, nid);
381 	return map;
382 }
383 void __init sparse_mem_maps_populate_node(struct page **map_map,
384 					  unsigned long pnum_begin,
385 					  unsigned long pnum_end,
386 					  unsigned long map_count, int nodeid)
387 {
388 	void *map;
389 	unsigned long pnum;
390 	unsigned long size = sizeof(struct page) * PAGES_PER_SECTION;
391 
392 	map = alloc_remap(nodeid, size * map_count);
393 	if (map) {
394 		for (pnum = pnum_begin; pnum < pnum_end; pnum++) {
395 			if (!present_section_nr(pnum))
396 				continue;
397 			map_map[pnum] = map;
398 			map += size;
399 		}
400 		return;
401 	}
402 
403 	size = PAGE_ALIGN(size);
404 	map = memblock_virt_alloc_try_nid(size * map_count,
405 					  PAGE_SIZE, __pa(MAX_DMA_ADDRESS),
406 					  BOOTMEM_ALLOC_ACCESSIBLE, nodeid);
407 	if (map) {
408 		for (pnum = pnum_begin; pnum < pnum_end; pnum++) {
409 			if (!present_section_nr(pnum))
410 				continue;
411 			map_map[pnum] = map;
412 			map += size;
413 		}
414 		return;
415 	}
416 
417 	/* fallback */
418 	for (pnum = pnum_begin; pnum < pnum_end; pnum++) {
419 		struct mem_section *ms;
420 
421 		if (!present_section_nr(pnum))
422 			continue;
423 		map_map[pnum] = sparse_mem_map_populate(pnum, nodeid);
424 		if (map_map[pnum])
425 			continue;
426 		ms = __nr_to_section(pnum);
427 		pr_err("%s: sparsemem memory map backing failed some memory will not be available\n",
428 		       __func__);
429 		ms->section_mem_map = 0;
430 	}
431 }
432 #endif /* !CONFIG_SPARSEMEM_VMEMMAP */
433 
434 #ifdef CONFIG_SPARSEMEM_ALLOC_MEM_MAP_TOGETHER
435 static void __init sparse_early_mem_maps_alloc_node(void *data,
436 				 unsigned long pnum_begin,
437 				 unsigned long pnum_end,
438 				 unsigned long map_count, int nodeid)
439 {
440 	struct page **map_map = (struct page **)data;
441 	sparse_mem_maps_populate_node(map_map, pnum_begin, pnum_end,
442 					 map_count, nodeid);
443 }
444 #else
445 static struct page __init *sparse_early_mem_map_alloc(unsigned long pnum)
446 {
447 	struct page *map;
448 	struct mem_section *ms = __nr_to_section(pnum);
449 	int nid = sparse_early_nid(ms);
450 
451 	map = sparse_mem_map_populate(pnum, nid);
452 	if (map)
453 		return map;
454 
455 	pr_err("%s: sparsemem memory map backing failed some memory will not be available\n",
456 	       __func__);
457 	ms->section_mem_map = 0;
458 	return NULL;
459 }
460 #endif
461 
462 void __weak __meminit vmemmap_populate_print_last(void)
463 {
464 }
465 
466 /**
467  *  alloc_usemap_and_memmap - memory alloction for pageblock flags and vmemmap
468  *  @map: usemap_map for pageblock flags or mmap_map for vmemmap
469  */
470 static void __init alloc_usemap_and_memmap(void (*alloc_func)
471 					(void *, unsigned long, unsigned long,
472 					unsigned long, int), void *data)
473 {
474 	unsigned long pnum;
475 	unsigned long map_count;
476 	int nodeid_begin = 0;
477 	unsigned long pnum_begin = 0;
478 
479 	for (pnum = 0; pnum < NR_MEM_SECTIONS; pnum++) {
480 		struct mem_section *ms;
481 
482 		if (!present_section_nr(pnum))
483 			continue;
484 		ms = __nr_to_section(pnum);
485 		nodeid_begin = sparse_early_nid(ms);
486 		pnum_begin = pnum;
487 		break;
488 	}
489 	map_count = 1;
490 	for (pnum = pnum_begin + 1; pnum < NR_MEM_SECTIONS; pnum++) {
491 		struct mem_section *ms;
492 		int nodeid;
493 
494 		if (!present_section_nr(pnum))
495 			continue;
496 		ms = __nr_to_section(pnum);
497 		nodeid = sparse_early_nid(ms);
498 		if (nodeid == nodeid_begin) {
499 			map_count++;
500 			continue;
501 		}
502 		/* ok, we need to take cake of from pnum_begin to pnum - 1*/
503 		alloc_func(data, pnum_begin, pnum,
504 						map_count, nodeid_begin);
505 		/* new start, update count etc*/
506 		nodeid_begin = nodeid;
507 		pnum_begin = pnum;
508 		map_count = 1;
509 	}
510 	/* ok, last chunk */
511 	alloc_func(data, pnum_begin, NR_MEM_SECTIONS,
512 						map_count, nodeid_begin);
513 }
514 
515 /*
516  * Allocate the accumulated non-linear sections, allocate a mem_map
517  * for each and record the physical to section mapping.
518  */
519 void __init sparse_init(void)
520 {
521 	unsigned long pnum;
522 	struct page *map;
523 	unsigned long *usemap;
524 	unsigned long **usemap_map;
525 	int size;
526 #ifdef CONFIG_SPARSEMEM_ALLOC_MEM_MAP_TOGETHER
527 	int size2;
528 	struct page **map_map;
529 #endif
530 
531 	/* see include/linux/mmzone.h 'struct mem_section' definition */
532 	BUILD_BUG_ON(!is_power_of_2(sizeof(struct mem_section)));
533 
534 	/* Setup pageblock_order for HUGETLB_PAGE_SIZE_VARIABLE */
535 	set_pageblock_order();
536 
537 	/*
538 	 * map is using big page (aka 2M in x86 64 bit)
539 	 * usemap is less one page (aka 24 bytes)
540 	 * so alloc 2M (with 2M align) and 24 bytes in turn will
541 	 * make next 2M slip to one more 2M later.
542 	 * then in big system, the memory will have a lot of holes...
543 	 * here try to allocate 2M pages continuously.
544 	 *
545 	 * powerpc need to call sparse_init_one_section right after each
546 	 * sparse_early_mem_map_alloc, so allocate usemap_map at first.
547 	 */
548 	size = sizeof(unsigned long *) * NR_MEM_SECTIONS;
549 	usemap_map = memblock_virt_alloc(size, 0);
550 	if (!usemap_map)
551 		panic("can not allocate usemap_map\n");
552 	alloc_usemap_and_memmap(sparse_early_usemaps_alloc_node,
553 							(void *)usemap_map);
554 
555 #ifdef CONFIG_SPARSEMEM_ALLOC_MEM_MAP_TOGETHER
556 	size2 = sizeof(struct page *) * NR_MEM_SECTIONS;
557 	map_map = memblock_virt_alloc(size2, 0);
558 	if (!map_map)
559 		panic("can not allocate map_map\n");
560 	alloc_usemap_and_memmap(sparse_early_mem_maps_alloc_node,
561 							(void *)map_map);
562 #endif
563 
564 	for (pnum = 0; pnum < NR_MEM_SECTIONS; pnum++) {
565 		if (!present_section_nr(pnum))
566 			continue;
567 
568 		usemap = usemap_map[pnum];
569 		if (!usemap)
570 			continue;
571 
572 #ifdef CONFIG_SPARSEMEM_ALLOC_MEM_MAP_TOGETHER
573 		map = map_map[pnum];
574 #else
575 		map = sparse_early_mem_map_alloc(pnum);
576 #endif
577 		if (!map)
578 			continue;
579 
580 		sparse_init_one_section(__nr_to_section(pnum), pnum, map,
581 								usemap);
582 	}
583 
584 	vmemmap_populate_print_last();
585 
586 #ifdef CONFIG_SPARSEMEM_ALLOC_MEM_MAP_TOGETHER
587 	memblock_free_early(__pa(map_map), size2);
588 #endif
589 	memblock_free_early(__pa(usemap_map), size);
590 }
591 
592 #ifdef CONFIG_MEMORY_HOTPLUG
593 #ifdef CONFIG_SPARSEMEM_VMEMMAP
594 static inline struct page *kmalloc_section_memmap(unsigned long pnum, int nid)
595 {
596 	/* This will make the necessary allocations eventually. */
597 	return sparse_mem_map_populate(pnum, nid);
598 }
599 static void __kfree_section_memmap(struct page *memmap)
600 {
601 	unsigned long start = (unsigned long)memmap;
602 	unsigned long end = (unsigned long)(memmap + PAGES_PER_SECTION);
603 
604 	vmemmap_free(start, end);
605 }
606 #ifdef CONFIG_MEMORY_HOTREMOVE
607 static void free_map_bootmem(struct page *memmap)
608 {
609 	unsigned long start = (unsigned long)memmap;
610 	unsigned long end = (unsigned long)(memmap + PAGES_PER_SECTION);
611 
612 	vmemmap_free(start, end);
613 }
614 #endif /* CONFIG_MEMORY_HOTREMOVE */
615 #else
616 static struct page *__kmalloc_section_memmap(void)
617 {
618 	struct page *page, *ret;
619 	unsigned long memmap_size = sizeof(struct page) * PAGES_PER_SECTION;
620 
621 	page = alloc_pages(GFP_KERNEL|__GFP_NOWARN, get_order(memmap_size));
622 	if (page)
623 		goto got_map_page;
624 
625 	ret = vmalloc(memmap_size);
626 	if (ret)
627 		goto got_map_ptr;
628 
629 	return NULL;
630 got_map_page:
631 	ret = (struct page *)pfn_to_kaddr(page_to_pfn(page));
632 got_map_ptr:
633 
634 	return ret;
635 }
636 
637 static inline struct page *kmalloc_section_memmap(unsigned long pnum, int nid)
638 {
639 	return __kmalloc_section_memmap();
640 }
641 
642 static void __kfree_section_memmap(struct page *memmap)
643 {
644 	if (is_vmalloc_addr(memmap))
645 		vfree(memmap);
646 	else
647 		free_pages((unsigned long)memmap,
648 			   get_order(sizeof(struct page) * PAGES_PER_SECTION));
649 }
650 
651 #ifdef CONFIG_MEMORY_HOTREMOVE
652 static void free_map_bootmem(struct page *memmap)
653 {
654 	unsigned long maps_section_nr, removing_section_nr, i;
655 	unsigned long magic, nr_pages;
656 	struct page *page = virt_to_page(memmap);
657 
658 	nr_pages = PAGE_ALIGN(PAGES_PER_SECTION * sizeof(struct page))
659 		>> PAGE_SHIFT;
660 
661 	for (i = 0; i < nr_pages; i++, page++) {
662 		magic = (unsigned long) page->freelist;
663 
664 		BUG_ON(magic == NODE_INFO);
665 
666 		maps_section_nr = pfn_to_section_nr(page_to_pfn(page));
667 		removing_section_nr = page_private(page);
668 
669 		/*
670 		 * When this function is called, the removing section is
671 		 * logical offlined state. This means all pages are isolated
672 		 * from page allocator. If removing section's memmap is placed
673 		 * on the same section, it must not be freed.
674 		 * If it is freed, page allocator may allocate it which will
675 		 * be removed physically soon.
676 		 */
677 		if (maps_section_nr != removing_section_nr)
678 			put_page_bootmem(page);
679 	}
680 }
681 #endif /* CONFIG_MEMORY_HOTREMOVE */
682 #endif /* CONFIG_SPARSEMEM_VMEMMAP */
683 
684 /*
685  * returns the number of sections whose mem_maps were properly
686  * set.  If this is <=0, then that means that the passed-in
687  * map was not consumed and must be freed.
688  */
689 int __meminit sparse_add_one_section(struct zone *zone, unsigned long start_pfn)
690 {
691 	unsigned long section_nr = pfn_to_section_nr(start_pfn);
692 	struct pglist_data *pgdat = zone->zone_pgdat;
693 	struct mem_section *ms;
694 	struct page *memmap;
695 	unsigned long *usemap;
696 	unsigned long flags;
697 	int ret;
698 
699 	/*
700 	 * no locking for this, because it does its own
701 	 * plus, it does a kmalloc
702 	 */
703 	ret = sparse_index_init(section_nr, pgdat->node_id);
704 	if (ret < 0 && ret != -EEXIST)
705 		return ret;
706 	memmap = kmalloc_section_memmap(section_nr, pgdat->node_id);
707 	if (!memmap)
708 		return -ENOMEM;
709 	usemap = __kmalloc_section_usemap();
710 	if (!usemap) {
711 		__kfree_section_memmap(memmap);
712 		return -ENOMEM;
713 	}
714 
715 	pgdat_resize_lock(pgdat, &flags);
716 
717 	ms = __pfn_to_section(start_pfn);
718 	if (ms->section_mem_map & SECTION_MARKED_PRESENT) {
719 		ret = -EEXIST;
720 		goto out;
721 	}
722 
723 	memset(memmap, 0, sizeof(struct page) * PAGES_PER_SECTION);
724 
725 	ms->section_mem_map |= SECTION_MARKED_PRESENT;
726 
727 	ret = sparse_init_one_section(ms, section_nr, memmap, usemap);
728 
729 out:
730 	pgdat_resize_unlock(pgdat, &flags);
731 	if (ret <= 0) {
732 		kfree(usemap);
733 		__kfree_section_memmap(memmap);
734 	}
735 	return ret;
736 }
737 
738 #ifdef CONFIG_MEMORY_HOTREMOVE
739 #ifdef CONFIG_MEMORY_FAILURE
740 static void clear_hwpoisoned_pages(struct page *memmap, int nr_pages)
741 {
742 	int i;
743 
744 	if (!memmap)
745 		return;
746 
747 	for (i = 0; i < nr_pages; i++) {
748 		if (PageHWPoison(&memmap[i])) {
749 			atomic_long_sub(1, &num_poisoned_pages);
750 			ClearPageHWPoison(&memmap[i]);
751 		}
752 	}
753 }
754 #else
755 static inline void clear_hwpoisoned_pages(struct page *memmap, int nr_pages)
756 {
757 }
758 #endif
759 
760 static void free_section_usemap(struct page *memmap, unsigned long *usemap)
761 {
762 	struct page *usemap_page;
763 
764 	if (!usemap)
765 		return;
766 
767 	usemap_page = virt_to_page(usemap);
768 	/*
769 	 * Check to see if allocation came from hot-plug-add
770 	 */
771 	if (PageSlab(usemap_page) || PageCompound(usemap_page)) {
772 		kfree(usemap);
773 		if (memmap)
774 			__kfree_section_memmap(memmap);
775 		return;
776 	}
777 
778 	/*
779 	 * The usemap came from bootmem. This is packed with other usemaps
780 	 * on the section which has pgdat at boot time. Just keep it as is now.
781 	 */
782 
783 	if (memmap)
784 		free_map_bootmem(memmap);
785 }
786 
787 void sparse_remove_one_section(struct zone *zone, struct mem_section *ms,
788 		unsigned long map_offset)
789 {
790 	struct page *memmap = NULL;
791 	unsigned long *usemap = NULL, flags;
792 	struct pglist_data *pgdat = zone->zone_pgdat;
793 
794 	pgdat_resize_lock(pgdat, &flags);
795 	if (ms->section_mem_map) {
796 		usemap = ms->pageblock_flags;
797 		memmap = sparse_decode_mem_map(ms->section_mem_map,
798 						__section_nr(ms));
799 		ms->section_mem_map = 0;
800 		ms->pageblock_flags = NULL;
801 	}
802 	pgdat_resize_unlock(pgdat, &flags);
803 
804 	clear_hwpoisoned_pages(memmap + map_offset,
805 			PAGES_PER_SECTION - map_offset);
806 	free_section_usemap(memmap, usemap);
807 }
808 #endif /* CONFIG_MEMORY_HOTREMOVE */
809 #endif /* CONFIG_MEMORY_HOTPLUG */
810