xref: /openbmc/linux/mm/memory_hotplug.c (revision e33e33b4d1c699d06fb8ccd6da80b309b84ec975)
13947be19SDave Hansen /*
23947be19SDave Hansen  *  linux/mm/memory_hotplug.c
33947be19SDave Hansen  *
43947be19SDave Hansen  *  Copyright (C)
53947be19SDave Hansen  */
63947be19SDave Hansen 
73947be19SDave Hansen #include <linux/stddef.h>
83947be19SDave Hansen #include <linux/mm.h>
93947be19SDave Hansen #include <linux/swap.h>
103947be19SDave Hansen #include <linux/interrupt.h>
113947be19SDave Hansen #include <linux/pagemap.h>
123947be19SDave Hansen #include <linux/compiler.h>
13b95f1b31SPaul Gortmaker #include <linux/export.h>
143947be19SDave Hansen #include <linux/pagevec.h>
152d1d43f6SChandra Seetharaman #include <linux/writeback.h>
163947be19SDave Hansen #include <linux/slab.h>
173947be19SDave Hansen #include <linux/sysctl.h>
183947be19SDave Hansen #include <linux/cpu.h>
193947be19SDave Hansen #include <linux/memory.h>
204b94ffdcSDan Williams #include <linux/memremap.h>
213947be19SDave Hansen #include <linux/memory_hotplug.h>
223947be19SDave Hansen #include <linux/highmem.h>
233947be19SDave Hansen #include <linux/vmalloc.h>
240a547039SKAMEZAWA Hiroyuki #include <linux/ioport.h>
250c0e6195SKAMEZAWA Hiroyuki #include <linux/delay.h>
260c0e6195SKAMEZAWA Hiroyuki #include <linux/migrate.h>
270c0e6195SKAMEZAWA Hiroyuki #include <linux/page-isolation.h>
2871088785SBadari Pulavarty #include <linux/pfn.h>
296ad696d2SAndi Kleen #include <linux/suspend.h>
306d9c285aSKOSAKI Motohiro #include <linux/mm_inline.h>
31d96ae530Sakpm@linux-foundation.org #include <linux/firmware-map.h>
3260a5a19eSTang Chen #include <linux/stop_machine.h>
33c8721bbbSNaoya Horiguchi #include <linux/hugetlb.h>
34c5320926STang Chen #include <linux/memblock.h>
35f784a3f1STang Chen #include <linux/bootmem.h>
36698b1b30SVlastimil Babka #include <linux/compaction.h>
373947be19SDave Hansen 
383947be19SDave Hansen #include <asm/tlbflush.h>
393947be19SDave Hansen 
401e5ad9a3SAdrian Bunk #include "internal.h"
411e5ad9a3SAdrian Bunk 
429d0ad8caSDaniel Kiper /*
439d0ad8caSDaniel Kiper  * online_page_callback contains pointer to current page onlining function.
449d0ad8caSDaniel Kiper  * Initially it is generic_online_page(). If it is required it could be
459d0ad8caSDaniel Kiper  * changed by calling set_online_page_callback() for callback registration
469d0ad8caSDaniel Kiper  * and restore_online_page_callback() for generic callback restore.
479d0ad8caSDaniel Kiper  */
489d0ad8caSDaniel Kiper 
499d0ad8caSDaniel Kiper static void generic_online_page(struct page *page);
509d0ad8caSDaniel Kiper 
519d0ad8caSDaniel Kiper static online_page_callback_t online_page_callback = generic_online_page;
52bfc8c901SVladimir Davydov static DEFINE_MUTEX(online_page_callback_lock);
539d0ad8caSDaniel Kiper 
54bfc8c901SVladimir Davydov /* The same as the cpu_hotplug lock, but for memory hotplug. */
55bfc8c901SVladimir Davydov static struct {
56bfc8c901SVladimir Davydov 	struct task_struct *active_writer;
57bfc8c901SVladimir Davydov 	struct mutex lock; /* Synchronizes accesses to refcount, */
58bfc8c901SVladimir Davydov 	/*
59bfc8c901SVladimir Davydov 	 * Also blocks the new readers during
60bfc8c901SVladimir Davydov 	 * an ongoing mem hotplug operation.
61bfc8c901SVladimir Davydov 	 */
62bfc8c901SVladimir Davydov 	int refcount;
6320d6c96bSKOSAKI Motohiro 
64bfc8c901SVladimir Davydov #ifdef CONFIG_DEBUG_LOCK_ALLOC
65bfc8c901SVladimir Davydov 	struct lockdep_map dep_map;
66bfc8c901SVladimir Davydov #endif
67bfc8c901SVladimir Davydov } mem_hotplug = {
68bfc8c901SVladimir Davydov 	.active_writer = NULL,
69bfc8c901SVladimir Davydov 	.lock = __MUTEX_INITIALIZER(mem_hotplug.lock),
70bfc8c901SVladimir Davydov 	.refcount = 0,
71bfc8c901SVladimir Davydov #ifdef CONFIG_DEBUG_LOCK_ALLOC
72bfc8c901SVladimir Davydov 	.dep_map = {.name = "mem_hotplug.lock" },
73bfc8c901SVladimir Davydov #endif
74bfc8c901SVladimir Davydov };
75bfc8c901SVladimir Davydov 
76bfc8c901SVladimir Davydov /* Lockdep annotations for get/put_online_mems() and mem_hotplug_begin/end() */
77bfc8c901SVladimir Davydov #define memhp_lock_acquire_read() lock_map_acquire_read(&mem_hotplug.dep_map)
78bfc8c901SVladimir Davydov #define memhp_lock_acquire()      lock_map_acquire(&mem_hotplug.dep_map)
79bfc8c901SVladimir Davydov #define memhp_lock_release()      lock_map_release(&mem_hotplug.dep_map)
80bfc8c901SVladimir Davydov 
8131bc3858SVitaly Kuznetsov bool memhp_auto_online;
8231bc3858SVitaly Kuznetsov EXPORT_SYMBOL_GPL(memhp_auto_online);
8331bc3858SVitaly Kuznetsov 
84bfc8c901SVladimir Davydov void get_online_mems(void)
8520d6c96bSKOSAKI Motohiro {
86bfc8c901SVladimir Davydov 	might_sleep();
87bfc8c901SVladimir Davydov 	if (mem_hotplug.active_writer == current)
88bfc8c901SVladimir Davydov 		return;
89bfc8c901SVladimir Davydov 	memhp_lock_acquire_read();
90bfc8c901SVladimir Davydov 	mutex_lock(&mem_hotplug.lock);
91bfc8c901SVladimir Davydov 	mem_hotplug.refcount++;
92bfc8c901SVladimir Davydov 	mutex_unlock(&mem_hotplug.lock);
93bfc8c901SVladimir Davydov 
9420d6c96bSKOSAKI Motohiro }
9520d6c96bSKOSAKI Motohiro 
96bfc8c901SVladimir Davydov void put_online_mems(void)
9720d6c96bSKOSAKI Motohiro {
98bfc8c901SVladimir Davydov 	if (mem_hotplug.active_writer == current)
99bfc8c901SVladimir Davydov 		return;
100bfc8c901SVladimir Davydov 	mutex_lock(&mem_hotplug.lock);
101bfc8c901SVladimir Davydov 
102bfc8c901SVladimir Davydov 	if (WARN_ON(!mem_hotplug.refcount))
103bfc8c901SVladimir Davydov 		mem_hotplug.refcount++; /* try to fix things up */
104bfc8c901SVladimir Davydov 
105bfc8c901SVladimir Davydov 	if (!--mem_hotplug.refcount && unlikely(mem_hotplug.active_writer))
106bfc8c901SVladimir Davydov 		wake_up_process(mem_hotplug.active_writer);
107bfc8c901SVladimir Davydov 	mutex_unlock(&mem_hotplug.lock);
108bfc8c901SVladimir Davydov 	memhp_lock_release();
109bfc8c901SVladimir Davydov 
11020d6c96bSKOSAKI Motohiro }
11120d6c96bSKOSAKI Motohiro 
11230467e0bSDavid Rientjes void mem_hotplug_begin(void)
113bfc8c901SVladimir Davydov {
114bfc8c901SVladimir Davydov 	mem_hotplug.active_writer = current;
115bfc8c901SVladimir Davydov 
116bfc8c901SVladimir Davydov 	memhp_lock_acquire();
117bfc8c901SVladimir Davydov 	for (;;) {
118bfc8c901SVladimir Davydov 		mutex_lock(&mem_hotplug.lock);
119bfc8c901SVladimir Davydov 		if (likely(!mem_hotplug.refcount))
120bfc8c901SVladimir Davydov 			break;
121bfc8c901SVladimir Davydov 		__set_current_state(TASK_UNINTERRUPTIBLE);
122bfc8c901SVladimir Davydov 		mutex_unlock(&mem_hotplug.lock);
123bfc8c901SVladimir Davydov 		schedule();
124bfc8c901SVladimir Davydov 	}
125bfc8c901SVladimir Davydov }
126bfc8c901SVladimir Davydov 
12730467e0bSDavid Rientjes void mem_hotplug_done(void)
128bfc8c901SVladimir Davydov {
129bfc8c901SVladimir Davydov 	mem_hotplug.active_writer = NULL;
130bfc8c901SVladimir Davydov 	mutex_unlock(&mem_hotplug.lock);
131bfc8c901SVladimir Davydov 	memhp_lock_release();
132bfc8c901SVladimir Davydov }
13320d6c96bSKOSAKI Motohiro 
13445e0b78bSKeith Mannthey /* add this memory to iomem resource */
13545e0b78bSKeith Mannthey static struct resource *register_memory_resource(u64 start, u64 size)
13645e0b78bSKeith Mannthey {
13745e0b78bSKeith Mannthey 	struct resource *res;
13845e0b78bSKeith Mannthey 	res = kzalloc(sizeof(struct resource), GFP_KERNEL);
1396f754ba4SVitaly Kuznetsov 	if (!res)
1406f754ba4SVitaly Kuznetsov 		return ERR_PTR(-ENOMEM);
14145e0b78bSKeith Mannthey 
14245e0b78bSKeith Mannthey 	res->name = "System RAM";
14345e0b78bSKeith Mannthey 	res->start = start;
14445e0b78bSKeith Mannthey 	res->end = start + size - 1;
145782b8664SToshi Kani 	res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
14645e0b78bSKeith Mannthey 	if (request_resource(&iomem_resource, res) < 0) {
1474996eed8SToshi Kani 		pr_debug("System RAM resource %pR cannot be added\n", res);
14845e0b78bSKeith Mannthey 		kfree(res);
1496f754ba4SVitaly Kuznetsov 		return ERR_PTR(-EEXIST);
15045e0b78bSKeith Mannthey 	}
15145e0b78bSKeith Mannthey 	return res;
15245e0b78bSKeith Mannthey }
15345e0b78bSKeith Mannthey 
15445e0b78bSKeith Mannthey static void release_memory_resource(struct resource *res)
15545e0b78bSKeith Mannthey {
15645e0b78bSKeith Mannthey 	if (!res)
15745e0b78bSKeith Mannthey 		return;
15845e0b78bSKeith Mannthey 	release_resource(res);
15945e0b78bSKeith Mannthey 	kfree(res);
16045e0b78bSKeith Mannthey 	return;
16145e0b78bSKeith Mannthey }
16245e0b78bSKeith Mannthey 
16353947027SKeith Mannthey #ifdef CONFIG_MEMORY_HOTPLUG_SPARSE
16446723bfaSYasuaki Ishimatsu void get_page_bootmem(unsigned long info,  struct page *page,
1655f24ce5fSAndrea Arcangeli 		      unsigned long type)
16604753278SYasunori Goto {
1675f24ce5fSAndrea Arcangeli 	page->lru.next = (struct list_head *) type;
16804753278SYasunori Goto 	SetPagePrivate(page);
16904753278SYasunori Goto 	set_page_private(page, info);
170fe896d18SJoonsoo Kim 	page_ref_inc(page);
17104753278SYasunori Goto }
17204753278SYasunori Goto 
173170a5a7eSJiang Liu void put_page_bootmem(struct page *page)
17404753278SYasunori Goto {
1755f24ce5fSAndrea Arcangeli 	unsigned long type;
17604753278SYasunori Goto 
1775f24ce5fSAndrea Arcangeli 	type = (unsigned long) page->lru.next;
1785f24ce5fSAndrea Arcangeli 	BUG_ON(type < MEMORY_HOTPLUG_MIN_BOOTMEM_TYPE ||
1795f24ce5fSAndrea Arcangeli 	       type > MEMORY_HOTPLUG_MAX_BOOTMEM_TYPE);
18004753278SYasunori Goto 
181fe896d18SJoonsoo Kim 	if (page_ref_dec_return(page) == 1) {
18204753278SYasunori Goto 		ClearPagePrivate(page);
18304753278SYasunori Goto 		set_page_private(page, 0);
1845f24ce5fSAndrea Arcangeli 		INIT_LIST_HEAD(&page->lru);
185170a5a7eSJiang Liu 		free_reserved_page(page);
18604753278SYasunori Goto 	}
18704753278SYasunori Goto }
18804753278SYasunori Goto 
18946723bfaSYasuaki Ishimatsu #ifdef CONFIG_HAVE_BOOTMEM_INFO_NODE
19046723bfaSYasuaki Ishimatsu #ifndef CONFIG_SPARSEMEM_VMEMMAP
191d92bc318SAdrian Bunk static void register_page_bootmem_info_section(unsigned long start_pfn)
19204753278SYasunori Goto {
19304753278SYasunori Goto 	unsigned long *usemap, mapsize, section_nr, i;
19404753278SYasunori Goto 	struct mem_section *ms;
19504753278SYasunori Goto 	struct page *page, *memmap;
19604753278SYasunori Goto 
19704753278SYasunori Goto 	section_nr = pfn_to_section_nr(start_pfn);
19804753278SYasunori Goto 	ms = __nr_to_section(section_nr);
19904753278SYasunori Goto 
20004753278SYasunori Goto 	/* Get section's memmap address */
20104753278SYasunori Goto 	memmap = sparse_decode_mem_map(ms->section_mem_map, section_nr);
20204753278SYasunori Goto 
20304753278SYasunori Goto 	/*
20404753278SYasunori Goto 	 * Get page for the memmap's phys address
20504753278SYasunori Goto 	 * XXX: need more consideration for sparse_vmemmap...
20604753278SYasunori Goto 	 */
20704753278SYasunori Goto 	page = virt_to_page(memmap);
20804753278SYasunori Goto 	mapsize = sizeof(struct page) * PAGES_PER_SECTION;
20904753278SYasunori Goto 	mapsize = PAGE_ALIGN(mapsize) >> PAGE_SHIFT;
21004753278SYasunori Goto 
21104753278SYasunori Goto 	/* remember memmap's page */
21204753278SYasunori Goto 	for (i = 0; i < mapsize; i++, page++)
21304753278SYasunori Goto 		get_page_bootmem(section_nr, page, SECTION_INFO);
21404753278SYasunori Goto 
21504753278SYasunori Goto 	usemap = __nr_to_section(section_nr)->pageblock_flags;
21604753278SYasunori Goto 	page = virt_to_page(usemap);
21704753278SYasunori Goto 
21804753278SYasunori Goto 	mapsize = PAGE_ALIGN(usemap_size()) >> PAGE_SHIFT;
21904753278SYasunori Goto 
22004753278SYasunori Goto 	for (i = 0; i < mapsize; i++, page++)
221af370fb8SYasunori Goto 		get_page_bootmem(section_nr, page, MIX_SECTION_INFO);
22204753278SYasunori Goto 
22304753278SYasunori Goto }
22446723bfaSYasuaki Ishimatsu #else /* CONFIG_SPARSEMEM_VMEMMAP */
22546723bfaSYasuaki Ishimatsu static void register_page_bootmem_info_section(unsigned long start_pfn)
22646723bfaSYasuaki Ishimatsu {
22746723bfaSYasuaki Ishimatsu 	unsigned long *usemap, mapsize, section_nr, i;
22846723bfaSYasuaki Ishimatsu 	struct mem_section *ms;
22946723bfaSYasuaki Ishimatsu 	struct page *page, *memmap;
23046723bfaSYasuaki Ishimatsu 
23146723bfaSYasuaki Ishimatsu 	if (!pfn_valid(start_pfn))
23246723bfaSYasuaki Ishimatsu 		return;
23346723bfaSYasuaki Ishimatsu 
23446723bfaSYasuaki Ishimatsu 	section_nr = pfn_to_section_nr(start_pfn);
23546723bfaSYasuaki Ishimatsu 	ms = __nr_to_section(section_nr);
23646723bfaSYasuaki Ishimatsu 
23746723bfaSYasuaki Ishimatsu 	memmap = sparse_decode_mem_map(ms->section_mem_map, section_nr);
23846723bfaSYasuaki Ishimatsu 
23946723bfaSYasuaki Ishimatsu 	register_page_bootmem_memmap(section_nr, memmap, PAGES_PER_SECTION);
24046723bfaSYasuaki Ishimatsu 
24146723bfaSYasuaki Ishimatsu 	usemap = __nr_to_section(section_nr)->pageblock_flags;
24246723bfaSYasuaki Ishimatsu 	page = virt_to_page(usemap);
24346723bfaSYasuaki Ishimatsu 
24446723bfaSYasuaki Ishimatsu 	mapsize = PAGE_ALIGN(usemap_size()) >> PAGE_SHIFT;
24546723bfaSYasuaki Ishimatsu 
24646723bfaSYasuaki Ishimatsu 	for (i = 0; i < mapsize; i++, page++)
24746723bfaSYasuaki Ishimatsu 		get_page_bootmem(section_nr, page, MIX_SECTION_INFO);
24846723bfaSYasuaki Ishimatsu }
24946723bfaSYasuaki Ishimatsu #endif /* !CONFIG_SPARSEMEM_VMEMMAP */
25004753278SYasunori Goto 
25104753278SYasunori Goto void register_page_bootmem_info_node(struct pglist_data *pgdat)
25204753278SYasunori Goto {
25304753278SYasunori Goto 	unsigned long i, pfn, end_pfn, nr_pages;
25404753278SYasunori Goto 	int node = pgdat->node_id;
25504753278SYasunori Goto 	struct page *page;
25604753278SYasunori Goto 	struct zone *zone;
25704753278SYasunori Goto 
25804753278SYasunori Goto 	nr_pages = PAGE_ALIGN(sizeof(struct pglist_data)) >> PAGE_SHIFT;
25904753278SYasunori Goto 	page = virt_to_page(pgdat);
26004753278SYasunori Goto 
26104753278SYasunori Goto 	for (i = 0; i < nr_pages; i++, page++)
26204753278SYasunori Goto 		get_page_bootmem(node, page, NODE_INFO);
26304753278SYasunori Goto 
26404753278SYasunori Goto 	zone = &pgdat->node_zones[0];
26504753278SYasunori Goto 	for (; zone < pgdat->node_zones + MAX_NR_ZONES - 1; zone++) {
266139c2d75SXishi Qiu 		if (zone_is_initialized(zone)) {
26704753278SYasunori Goto 			nr_pages = zone->wait_table_hash_nr_entries
26804753278SYasunori Goto 				* sizeof(wait_queue_head_t);
26904753278SYasunori Goto 			nr_pages = PAGE_ALIGN(nr_pages) >> PAGE_SHIFT;
27004753278SYasunori Goto 			page = virt_to_page(zone->wait_table);
27104753278SYasunori Goto 
27204753278SYasunori Goto 			for (i = 0; i < nr_pages; i++, page++)
27304753278SYasunori Goto 				get_page_bootmem(node, page, NODE_INFO);
27404753278SYasunori Goto 		}
27504753278SYasunori Goto 	}
27604753278SYasunori Goto 
27704753278SYasunori Goto 	pfn = pgdat->node_start_pfn;
278c1f19495SCody P Schafer 	end_pfn = pgdat_end_pfn(pgdat);
27904753278SYasunori Goto 
2807e9f5eb0STang Chen 	/* register section info */
281f14851afSqiuxishi 	for (; pfn < end_pfn; pfn += PAGES_PER_SECTION) {
282f14851afSqiuxishi 		/*
283f14851afSqiuxishi 		 * Some platforms can assign the same pfn to multiple nodes - on
284f14851afSqiuxishi 		 * node0 as well as nodeN.  To avoid registering a pfn against
285f14851afSqiuxishi 		 * multiple nodes we check that this pfn does not already
2867e9f5eb0STang Chen 		 * reside in some other nodes.
287f14851afSqiuxishi 		 */
288f14851afSqiuxishi 		if (pfn_valid(pfn) && (pfn_to_nid(pfn) == node))
28904753278SYasunori Goto 			register_page_bootmem_info_section(pfn);
290f14851afSqiuxishi 	}
29104753278SYasunori Goto }
29246723bfaSYasuaki Ishimatsu #endif /* CONFIG_HAVE_BOOTMEM_INFO_NODE */
29304753278SYasunori Goto 
294f2765404SFabian Frederick static void __meminit grow_zone_span(struct zone *zone, unsigned long start_pfn,
29576cdd58eSHeiko Carstens 				     unsigned long end_pfn)
29676cdd58eSHeiko Carstens {
29776cdd58eSHeiko Carstens 	unsigned long old_zone_end_pfn;
29876cdd58eSHeiko Carstens 
29976cdd58eSHeiko Carstens 	zone_span_writelock(zone);
30076cdd58eSHeiko Carstens 
301c33bc315SXishi Qiu 	old_zone_end_pfn = zone_end_pfn(zone);
3028080fc03SXishi Qiu 	if (zone_is_empty(zone) || start_pfn < zone->zone_start_pfn)
30376cdd58eSHeiko Carstens 		zone->zone_start_pfn = start_pfn;
30476cdd58eSHeiko Carstens 
30576cdd58eSHeiko Carstens 	zone->spanned_pages = max(old_zone_end_pfn, end_pfn) -
30676cdd58eSHeiko Carstens 				zone->zone_start_pfn;
30776cdd58eSHeiko Carstens 
30876cdd58eSHeiko Carstens 	zone_span_writeunlock(zone);
30976cdd58eSHeiko Carstens }
31076cdd58eSHeiko Carstens 
311511c2abaSLai Jiangshan static void resize_zone(struct zone *zone, unsigned long start_pfn,
312511c2abaSLai Jiangshan 		unsigned long end_pfn)
313511c2abaSLai Jiangshan {
314511c2abaSLai Jiangshan 	zone_span_writelock(zone);
315511c2abaSLai Jiangshan 
316e455a9b9SLai Jiangshan 	if (end_pfn - start_pfn) {
317511c2abaSLai Jiangshan 		zone->zone_start_pfn = start_pfn;
318511c2abaSLai Jiangshan 		zone->spanned_pages = end_pfn - start_pfn;
319e455a9b9SLai Jiangshan 	} else {
320e455a9b9SLai Jiangshan 		/*
321e455a9b9SLai Jiangshan 		 * make it consist as free_area_init_core(),
322e455a9b9SLai Jiangshan 		 * if spanned_pages = 0, then keep start_pfn = 0
323e455a9b9SLai Jiangshan 		 */
324e455a9b9SLai Jiangshan 		zone->zone_start_pfn = 0;
325e455a9b9SLai Jiangshan 		zone->spanned_pages = 0;
326e455a9b9SLai Jiangshan 	}
327511c2abaSLai Jiangshan 
328511c2abaSLai Jiangshan 	zone_span_writeunlock(zone);
329511c2abaSLai Jiangshan }
330511c2abaSLai Jiangshan 
331511c2abaSLai Jiangshan static void fix_zone_id(struct zone *zone, unsigned long start_pfn,
332511c2abaSLai Jiangshan 		unsigned long end_pfn)
333511c2abaSLai Jiangshan {
334511c2abaSLai Jiangshan 	enum zone_type zid = zone_idx(zone);
335511c2abaSLai Jiangshan 	int nid = zone->zone_pgdat->node_id;
336511c2abaSLai Jiangshan 	unsigned long pfn;
337511c2abaSLai Jiangshan 
338511c2abaSLai Jiangshan 	for (pfn = start_pfn; pfn < end_pfn; pfn++)
339511c2abaSLai Jiangshan 		set_page_links(pfn_to_page(pfn), zid, nid, pfn);
340511c2abaSLai Jiangshan }
341511c2abaSLai Jiangshan 
342f6bbb78eSCody P Schafer /* Can fail with -ENOMEM from allocating a wait table with vmalloc() or
3439e43aa2bSSantosh Shilimkar  * alloc_bootmem_node_nopanic()/memblock_virt_alloc_node_nopanic() */
344f6bbb78eSCody P Schafer static int __ref ensure_zone_is_initialized(struct zone *zone,
345f6bbb78eSCody P Schafer 			unsigned long start_pfn, unsigned long num_pages)
346f6bbb78eSCody P Schafer {
347f6bbb78eSCody P Schafer 	if (!zone_is_initialized(zone))
348b171e409SYaowei Bai 		return init_currently_empty_zone(zone, start_pfn, num_pages);
349b171e409SYaowei Bai 
350f6bbb78eSCody P Schafer 	return 0;
351f6bbb78eSCody P Schafer }
352f6bbb78eSCody P Schafer 
353e455a9b9SLai Jiangshan static int __meminit move_pfn_range_left(struct zone *z1, struct zone *z2,
354511c2abaSLai Jiangshan 		unsigned long start_pfn, unsigned long end_pfn)
355511c2abaSLai Jiangshan {
356e455a9b9SLai Jiangshan 	int ret;
357511c2abaSLai Jiangshan 	unsigned long flags;
358e455a9b9SLai Jiangshan 	unsigned long z1_start_pfn;
359e455a9b9SLai Jiangshan 
36064dd1b29SCody P Schafer 	ret = ensure_zone_is_initialized(z1, start_pfn, end_pfn - start_pfn);
361e455a9b9SLai Jiangshan 	if (ret)
362e455a9b9SLai Jiangshan 		return ret;
363511c2abaSLai Jiangshan 
364511c2abaSLai Jiangshan 	pgdat_resize_lock(z1->zone_pgdat, &flags);
365511c2abaSLai Jiangshan 
366511c2abaSLai Jiangshan 	/* can't move pfns which are higher than @z2 */
367108bcc96SCody P Schafer 	if (end_pfn > zone_end_pfn(z2))
368511c2abaSLai Jiangshan 		goto out_fail;
369834405c3SJiang Liu 	/* the move out part must be at the left most of @z2 */
370511c2abaSLai Jiangshan 	if (start_pfn > z2->zone_start_pfn)
371511c2abaSLai Jiangshan 		goto out_fail;
372511c2abaSLai Jiangshan 	/* must included/overlap */
373511c2abaSLai Jiangshan 	if (end_pfn <= z2->zone_start_pfn)
374511c2abaSLai Jiangshan 		goto out_fail;
375511c2abaSLai Jiangshan 
376e455a9b9SLai Jiangshan 	/* use start_pfn for z1's start_pfn if z1 is empty */
3778080fc03SXishi Qiu 	if (!zone_is_empty(z1))
378e455a9b9SLai Jiangshan 		z1_start_pfn = z1->zone_start_pfn;
379e455a9b9SLai Jiangshan 	else
380e455a9b9SLai Jiangshan 		z1_start_pfn = start_pfn;
381e455a9b9SLai Jiangshan 
382e455a9b9SLai Jiangshan 	resize_zone(z1, z1_start_pfn, end_pfn);
383108bcc96SCody P Schafer 	resize_zone(z2, end_pfn, zone_end_pfn(z2));
384511c2abaSLai Jiangshan 
385511c2abaSLai Jiangshan 	pgdat_resize_unlock(z1->zone_pgdat, &flags);
386511c2abaSLai Jiangshan 
387511c2abaSLai Jiangshan 	fix_zone_id(z1, start_pfn, end_pfn);
388511c2abaSLai Jiangshan 
389511c2abaSLai Jiangshan 	return 0;
390511c2abaSLai Jiangshan out_fail:
391511c2abaSLai Jiangshan 	pgdat_resize_unlock(z1->zone_pgdat, &flags);
392511c2abaSLai Jiangshan 	return -1;
393511c2abaSLai Jiangshan }
394511c2abaSLai Jiangshan 
395e455a9b9SLai Jiangshan static int __meminit move_pfn_range_right(struct zone *z1, struct zone *z2,
396511c2abaSLai Jiangshan 		unsigned long start_pfn, unsigned long end_pfn)
397511c2abaSLai Jiangshan {
398e455a9b9SLai Jiangshan 	int ret;
399511c2abaSLai Jiangshan 	unsigned long flags;
400e455a9b9SLai Jiangshan 	unsigned long z2_end_pfn;
401e455a9b9SLai Jiangshan 
40264dd1b29SCody P Schafer 	ret = ensure_zone_is_initialized(z2, start_pfn, end_pfn - start_pfn);
403e455a9b9SLai Jiangshan 	if (ret)
404e455a9b9SLai Jiangshan 		return ret;
405511c2abaSLai Jiangshan 
406511c2abaSLai Jiangshan 	pgdat_resize_lock(z1->zone_pgdat, &flags);
407511c2abaSLai Jiangshan 
408511c2abaSLai Jiangshan 	/* can't move pfns which are lower than @z1 */
409511c2abaSLai Jiangshan 	if (z1->zone_start_pfn > start_pfn)
410511c2abaSLai Jiangshan 		goto out_fail;
411511c2abaSLai Jiangshan 	/* the move out part mast at the right most of @z1 */
412108bcc96SCody P Schafer 	if (zone_end_pfn(z1) >  end_pfn)
413511c2abaSLai Jiangshan 		goto out_fail;
414511c2abaSLai Jiangshan 	/* must included/overlap */
415108bcc96SCody P Schafer 	if (start_pfn >= zone_end_pfn(z1))
416511c2abaSLai Jiangshan 		goto out_fail;
417511c2abaSLai Jiangshan 
418e455a9b9SLai Jiangshan 	/* use end_pfn for z2's end_pfn if z2 is empty */
4198080fc03SXishi Qiu 	if (!zone_is_empty(z2))
420108bcc96SCody P Schafer 		z2_end_pfn = zone_end_pfn(z2);
421e455a9b9SLai Jiangshan 	else
422e455a9b9SLai Jiangshan 		z2_end_pfn = end_pfn;
423e455a9b9SLai Jiangshan 
424511c2abaSLai Jiangshan 	resize_zone(z1, z1->zone_start_pfn, start_pfn);
425e455a9b9SLai Jiangshan 	resize_zone(z2, start_pfn, z2_end_pfn);
426511c2abaSLai Jiangshan 
427511c2abaSLai Jiangshan 	pgdat_resize_unlock(z1->zone_pgdat, &flags);
428511c2abaSLai Jiangshan 
429511c2abaSLai Jiangshan 	fix_zone_id(z2, start_pfn, end_pfn);
430511c2abaSLai Jiangshan 
431511c2abaSLai Jiangshan 	return 0;
432511c2abaSLai Jiangshan out_fail:
433511c2abaSLai Jiangshan 	pgdat_resize_unlock(z1->zone_pgdat, &flags);
434511c2abaSLai Jiangshan 	return -1;
435511c2abaSLai Jiangshan }
436511c2abaSLai Jiangshan 
437f2765404SFabian Frederick static void __meminit grow_pgdat_span(struct pglist_data *pgdat, unsigned long start_pfn,
43876cdd58eSHeiko Carstens 				      unsigned long end_pfn)
43976cdd58eSHeiko Carstens {
44083285c72SXishi Qiu 	unsigned long old_pgdat_end_pfn = pgdat_end_pfn(pgdat);
44176cdd58eSHeiko Carstens 
442712cd386STang Chen 	if (!pgdat->node_spanned_pages || start_pfn < pgdat->node_start_pfn)
44376cdd58eSHeiko Carstens 		pgdat->node_start_pfn = start_pfn;
44476cdd58eSHeiko Carstens 
44576cdd58eSHeiko Carstens 	pgdat->node_spanned_pages = max(old_pgdat_end_pfn, end_pfn) -
44676cdd58eSHeiko Carstens 					pgdat->node_start_pfn;
44776cdd58eSHeiko Carstens }
44876cdd58eSHeiko Carstens 
44931168481SAl Viro static int __meminit __add_zone(struct zone *zone, unsigned long phys_start_pfn)
4503947be19SDave Hansen {
4513947be19SDave Hansen 	struct pglist_data *pgdat = zone->zone_pgdat;
4523947be19SDave Hansen 	int nr_pages = PAGES_PER_SECTION;
4533947be19SDave Hansen 	int nid = pgdat->node_id;
4543947be19SDave Hansen 	int zone_type;
455e298ff75SMel Gorman 	unsigned long flags, pfn;
45676cdd58eSHeiko Carstens 	int ret;
45776cdd58eSHeiko Carstens 
45864dd1b29SCody P Schafer 	zone_type = zone - pgdat->node_zones;
45964dd1b29SCody P Schafer 	ret = ensure_zone_is_initialized(zone, phys_start_pfn, nr_pages);
46076cdd58eSHeiko Carstens 	if (ret)
46176cdd58eSHeiko Carstens 		return ret;
46264dd1b29SCody P Schafer 
46376cdd58eSHeiko Carstens 	pgdat_resize_lock(zone->zone_pgdat, &flags);
46476cdd58eSHeiko Carstens 	grow_zone_span(zone, phys_start_pfn, phys_start_pfn + nr_pages);
46576cdd58eSHeiko Carstens 	grow_pgdat_span(zone->zone_pgdat, phys_start_pfn,
46676cdd58eSHeiko Carstens 			phys_start_pfn + nr_pages);
46776cdd58eSHeiko Carstens 	pgdat_resize_unlock(zone->zone_pgdat, &flags);
468a2f3aa02SDave Hansen 	memmap_init_zone(nr_pages, nid, zone_type,
469a2f3aa02SDave Hansen 			 phys_start_pfn, MEMMAP_HOTPLUG);
470e298ff75SMel Gorman 
471e298ff75SMel Gorman 	/* online_page_range is called later and expects pages reserved */
472e298ff75SMel Gorman 	for (pfn = phys_start_pfn; pfn < phys_start_pfn + nr_pages; pfn++) {
473e298ff75SMel Gorman 		if (!pfn_valid(pfn))
474e298ff75SMel Gorman 			continue;
475e298ff75SMel Gorman 
476e298ff75SMel Gorman 		SetPageReserved(pfn_to_page(pfn));
477e298ff75SMel Gorman 	}
478718127ccSYasunori Goto 	return 0;
4793947be19SDave Hansen }
4803947be19SDave Hansen 
481c04fc586SGary Hade static int __meminit __add_section(int nid, struct zone *zone,
482c04fc586SGary Hade 					unsigned long phys_start_pfn)
4833947be19SDave Hansen {
4843947be19SDave Hansen 	int ret;
4853947be19SDave Hansen 
486ebd15302SKAMEZAWA Hiroyuki 	if (pfn_valid(phys_start_pfn))
487ebd15302SKAMEZAWA Hiroyuki 		return -EEXIST;
488ebd15302SKAMEZAWA Hiroyuki 
48985b35feaSZhang Yanfei 	ret = sparse_add_one_section(zone, phys_start_pfn);
4903947be19SDave Hansen 
4913947be19SDave Hansen 	if (ret < 0)
4923947be19SDave Hansen 		return ret;
4933947be19SDave Hansen 
494718127ccSYasunori Goto 	ret = __add_zone(zone, phys_start_pfn);
495718127ccSYasunori Goto 
496718127ccSYasunori Goto 	if (ret < 0)
497718127ccSYasunori Goto 		return ret;
498718127ccSYasunori Goto 
499c04fc586SGary Hade 	return register_new_memory(nid, __pfn_to_section(phys_start_pfn));
5003947be19SDave Hansen }
5013947be19SDave Hansen 
5024edd7cefSDavid Rientjes /*
5034edd7cefSDavid Rientjes  * Reasonably generic function for adding memory.  It is
5044edd7cefSDavid Rientjes  * expected that archs that support memory hotplug will
5054edd7cefSDavid Rientjes  * call this function after deciding the zone to which to
5064edd7cefSDavid Rientjes  * add the new pages.
5074edd7cefSDavid Rientjes  */
5084edd7cefSDavid Rientjes int __ref __add_pages(int nid, struct zone *zone, unsigned long phys_start_pfn,
5094edd7cefSDavid Rientjes 			unsigned long nr_pages)
5104edd7cefSDavid Rientjes {
5114edd7cefSDavid Rientjes 	unsigned long i;
5124edd7cefSDavid Rientjes 	int err = 0;
5134edd7cefSDavid Rientjes 	int start_sec, end_sec;
5144b94ffdcSDan Williams 	struct vmem_altmap *altmap;
5154b94ffdcSDan Williams 
5167cf91a98SJoonsoo Kim 	clear_zone_contiguous(zone);
5177cf91a98SJoonsoo Kim 
5184edd7cefSDavid Rientjes 	/* during initialize mem_map, align hot-added range to section */
5194edd7cefSDavid Rientjes 	start_sec = pfn_to_section_nr(phys_start_pfn);
5204edd7cefSDavid Rientjes 	end_sec = pfn_to_section_nr(phys_start_pfn + nr_pages - 1);
5214edd7cefSDavid Rientjes 
5224b94ffdcSDan Williams 	altmap = to_vmem_altmap((unsigned long) pfn_to_page(phys_start_pfn));
5234b94ffdcSDan Williams 	if (altmap) {
5244b94ffdcSDan Williams 		/*
5254b94ffdcSDan Williams 		 * Validate altmap is within bounds of the total request
5264b94ffdcSDan Williams 		 */
5274b94ffdcSDan Williams 		if (altmap->base_pfn != phys_start_pfn
5284b94ffdcSDan Williams 				|| vmem_altmap_offset(altmap) > nr_pages) {
5294b94ffdcSDan Williams 			pr_warn_once("memory add fail, invalid altmap\n");
5307cf91a98SJoonsoo Kim 			err = -EINVAL;
5317cf91a98SJoonsoo Kim 			goto out;
5324b94ffdcSDan Williams 		}
5334b94ffdcSDan Williams 		altmap->alloc = 0;
5344b94ffdcSDan Williams 	}
5354b94ffdcSDan Williams 
5364edd7cefSDavid Rientjes 	for (i = start_sec; i <= end_sec; i++) {
53719c07d5eSSheng Yong 		err = __add_section(nid, zone, section_nr_to_pfn(i));
5384edd7cefSDavid Rientjes 
5394edd7cefSDavid Rientjes 		/*
5404edd7cefSDavid Rientjes 		 * EEXIST is finally dealt with by ioresource collision
5414edd7cefSDavid Rientjes 		 * check. see add_memory() => register_memory_resource()
5424edd7cefSDavid Rientjes 		 * Warning will be printed if there is collision.
5434edd7cefSDavid Rientjes 		 */
5444edd7cefSDavid Rientjes 		if (err && (err != -EEXIST))
5454edd7cefSDavid Rientjes 			break;
5464edd7cefSDavid Rientjes 		err = 0;
5474edd7cefSDavid Rientjes 	}
548c435a390SZhu Guihua 	vmemmap_populate_print_last();
5497cf91a98SJoonsoo Kim out:
5507cf91a98SJoonsoo Kim 	set_zone_contiguous(zone);
5514edd7cefSDavid Rientjes 	return err;
5524edd7cefSDavid Rientjes }
5534edd7cefSDavid Rientjes EXPORT_SYMBOL_GPL(__add_pages);
5544edd7cefSDavid Rientjes 
5554edd7cefSDavid Rientjes #ifdef CONFIG_MEMORY_HOTREMOVE
556815121d2SYasuaki Ishimatsu /* find the smallest valid pfn in the range [start_pfn, end_pfn) */
557815121d2SYasuaki Ishimatsu static int find_smallest_section_pfn(int nid, struct zone *zone,
558815121d2SYasuaki Ishimatsu 				     unsigned long start_pfn,
559815121d2SYasuaki Ishimatsu 				     unsigned long end_pfn)
560815121d2SYasuaki Ishimatsu {
561815121d2SYasuaki Ishimatsu 	struct mem_section *ms;
562815121d2SYasuaki Ishimatsu 
563815121d2SYasuaki Ishimatsu 	for (; start_pfn < end_pfn; start_pfn += PAGES_PER_SECTION) {
564815121d2SYasuaki Ishimatsu 		ms = __pfn_to_section(start_pfn);
565815121d2SYasuaki Ishimatsu 
566815121d2SYasuaki Ishimatsu 		if (unlikely(!valid_section(ms)))
567815121d2SYasuaki Ishimatsu 			continue;
568815121d2SYasuaki Ishimatsu 
569815121d2SYasuaki Ishimatsu 		if (unlikely(pfn_to_nid(start_pfn) != nid))
570815121d2SYasuaki Ishimatsu 			continue;
571815121d2SYasuaki Ishimatsu 
572815121d2SYasuaki Ishimatsu 		if (zone && zone != page_zone(pfn_to_page(start_pfn)))
573815121d2SYasuaki Ishimatsu 			continue;
574815121d2SYasuaki Ishimatsu 
575815121d2SYasuaki Ishimatsu 		return start_pfn;
576815121d2SYasuaki Ishimatsu 	}
577815121d2SYasuaki Ishimatsu 
578815121d2SYasuaki Ishimatsu 	return 0;
579815121d2SYasuaki Ishimatsu }
580815121d2SYasuaki Ishimatsu 
581815121d2SYasuaki Ishimatsu /* find the biggest valid pfn in the range [start_pfn, end_pfn). */
582815121d2SYasuaki Ishimatsu static int find_biggest_section_pfn(int nid, struct zone *zone,
583815121d2SYasuaki Ishimatsu 				    unsigned long start_pfn,
584815121d2SYasuaki Ishimatsu 				    unsigned long end_pfn)
585815121d2SYasuaki Ishimatsu {
586815121d2SYasuaki Ishimatsu 	struct mem_section *ms;
587815121d2SYasuaki Ishimatsu 	unsigned long pfn;
588815121d2SYasuaki Ishimatsu 
589815121d2SYasuaki Ishimatsu 	/* pfn is the end pfn of a memory section. */
590815121d2SYasuaki Ishimatsu 	pfn = end_pfn - 1;
591815121d2SYasuaki Ishimatsu 	for (; pfn >= start_pfn; pfn -= PAGES_PER_SECTION) {
592815121d2SYasuaki Ishimatsu 		ms = __pfn_to_section(pfn);
593815121d2SYasuaki Ishimatsu 
594815121d2SYasuaki Ishimatsu 		if (unlikely(!valid_section(ms)))
595815121d2SYasuaki Ishimatsu 			continue;
596815121d2SYasuaki Ishimatsu 
597815121d2SYasuaki Ishimatsu 		if (unlikely(pfn_to_nid(pfn) != nid))
598815121d2SYasuaki Ishimatsu 			continue;
599815121d2SYasuaki Ishimatsu 
600815121d2SYasuaki Ishimatsu 		if (zone && zone != page_zone(pfn_to_page(pfn)))
601815121d2SYasuaki Ishimatsu 			continue;
602815121d2SYasuaki Ishimatsu 
603815121d2SYasuaki Ishimatsu 		return pfn;
604815121d2SYasuaki Ishimatsu 	}
605815121d2SYasuaki Ishimatsu 
606815121d2SYasuaki Ishimatsu 	return 0;
607815121d2SYasuaki Ishimatsu }
608815121d2SYasuaki Ishimatsu 
609815121d2SYasuaki Ishimatsu static void shrink_zone_span(struct zone *zone, unsigned long start_pfn,
610815121d2SYasuaki Ishimatsu 			     unsigned long end_pfn)
611815121d2SYasuaki Ishimatsu {
612815121d2SYasuaki Ishimatsu 	unsigned long zone_start_pfn = zone->zone_start_pfn;
613c33bc315SXishi Qiu 	unsigned long z = zone_end_pfn(zone); /* zone_end_pfn namespace clash */
614c33bc315SXishi Qiu 	unsigned long zone_end_pfn = z;
615815121d2SYasuaki Ishimatsu 	unsigned long pfn;
616815121d2SYasuaki Ishimatsu 	struct mem_section *ms;
617815121d2SYasuaki Ishimatsu 	int nid = zone_to_nid(zone);
618815121d2SYasuaki Ishimatsu 
619815121d2SYasuaki Ishimatsu 	zone_span_writelock(zone);
620815121d2SYasuaki Ishimatsu 	if (zone_start_pfn == start_pfn) {
621815121d2SYasuaki Ishimatsu 		/*
622815121d2SYasuaki Ishimatsu 		 * If the section is smallest section in the zone, it need
623815121d2SYasuaki Ishimatsu 		 * shrink zone->zone_start_pfn and zone->zone_spanned_pages.
624815121d2SYasuaki Ishimatsu 		 * In this case, we find second smallest valid mem_section
625815121d2SYasuaki Ishimatsu 		 * for shrinking zone.
626815121d2SYasuaki Ishimatsu 		 */
627815121d2SYasuaki Ishimatsu 		pfn = find_smallest_section_pfn(nid, zone, end_pfn,
628815121d2SYasuaki Ishimatsu 						zone_end_pfn);
629815121d2SYasuaki Ishimatsu 		if (pfn) {
630815121d2SYasuaki Ishimatsu 			zone->zone_start_pfn = pfn;
631815121d2SYasuaki Ishimatsu 			zone->spanned_pages = zone_end_pfn - pfn;
632815121d2SYasuaki Ishimatsu 		}
633815121d2SYasuaki Ishimatsu 	} else if (zone_end_pfn == end_pfn) {
634815121d2SYasuaki Ishimatsu 		/*
635815121d2SYasuaki Ishimatsu 		 * If the section is biggest section in the zone, it need
636815121d2SYasuaki Ishimatsu 		 * shrink zone->spanned_pages.
637815121d2SYasuaki Ishimatsu 		 * In this case, we find second biggest valid mem_section for
638815121d2SYasuaki Ishimatsu 		 * shrinking zone.
639815121d2SYasuaki Ishimatsu 		 */
640815121d2SYasuaki Ishimatsu 		pfn = find_biggest_section_pfn(nid, zone, zone_start_pfn,
641815121d2SYasuaki Ishimatsu 					       start_pfn);
642815121d2SYasuaki Ishimatsu 		if (pfn)
643815121d2SYasuaki Ishimatsu 			zone->spanned_pages = pfn - zone_start_pfn + 1;
644815121d2SYasuaki Ishimatsu 	}
645815121d2SYasuaki Ishimatsu 
646815121d2SYasuaki Ishimatsu 	/*
647815121d2SYasuaki Ishimatsu 	 * The section is not biggest or smallest mem_section in the zone, it
648815121d2SYasuaki Ishimatsu 	 * only creates a hole in the zone. So in this case, we need not
649815121d2SYasuaki Ishimatsu 	 * change the zone. But perhaps, the zone has only hole data. Thus
650815121d2SYasuaki Ishimatsu 	 * it check the zone has only hole or not.
651815121d2SYasuaki Ishimatsu 	 */
652815121d2SYasuaki Ishimatsu 	pfn = zone_start_pfn;
653815121d2SYasuaki Ishimatsu 	for (; pfn < zone_end_pfn; pfn += PAGES_PER_SECTION) {
654815121d2SYasuaki Ishimatsu 		ms = __pfn_to_section(pfn);
655815121d2SYasuaki Ishimatsu 
656815121d2SYasuaki Ishimatsu 		if (unlikely(!valid_section(ms)))
657815121d2SYasuaki Ishimatsu 			continue;
658815121d2SYasuaki Ishimatsu 
659815121d2SYasuaki Ishimatsu 		if (page_zone(pfn_to_page(pfn)) != zone)
660815121d2SYasuaki Ishimatsu 			continue;
661815121d2SYasuaki Ishimatsu 
662815121d2SYasuaki Ishimatsu 		 /* If the section is current section, it continues the loop */
663815121d2SYasuaki Ishimatsu 		if (start_pfn == pfn)
664815121d2SYasuaki Ishimatsu 			continue;
665815121d2SYasuaki Ishimatsu 
666815121d2SYasuaki Ishimatsu 		/* If we find valid section, we have nothing to do */
667815121d2SYasuaki Ishimatsu 		zone_span_writeunlock(zone);
668815121d2SYasuaki Ishimatsu 		return;
669815121d2SYasuaki Ishimatsu 	}
670815121d2SYasuaki Ishimatsu 
671815121d2SYasuaki Ishimatsu 	/* The zone has no valid section */
672815121d2SYasuaki Ishimatsu 	zone->zone_start_pfn = 0;
673815121d2SYasuaki Ishimatsu 	zone->spanned_pages = 0;
674815121d2SYasuaki Ishimatsu 	zone_span_writeunlock(zone);
675815121d2SYasuaki Ishimatsu }
676815121d2SYasuaki Ishimatsu 
677815121d2SYasuaki Ishimatsu static void shrink_pgdat_span(struct pglist_data *pgdat,
678815121d2SYasuaki Ishimatsu 			      unsigned long start_pfn, unsigned long end_pfn)
679815121d2SYasuaki Ishimatsu {
680815121d2SYasuaki Ishimatsu 	unsigned long pgdat_start_pfn = pgdat->node_start_pfn;
68183285c72SXishi Qiu 	unsigned long p = pgdat_end_pfn(pgdat); /* pgdat_end_pfn namespace clash */
68283285c72SXishi Qiu 	unsigned long pgdat_end_pfn = p;
683815121d2SYasuaki Ishimatsu 	unsigned long pfn;
684815121d2SYasuaki Ishimatsu 	struct mem_section *ms;
685815121d2SYasuaki Ishimatsu 	int nid = pgdat->node_id;
686815121d2SYasuaki Ishimatsu 
687815121d2SYasuaki Ishimatsu 	if (pgdat_start_pfn == start_pfn) {
688815121d2SYasuaki Ishimatsu 		/*
689815121d2SYasuaki Ishimatsu 		 * If the section is smallest section in the pgdat, it need
690815121d2SYasuaki Ishimatsu 		 * shrink pgdat->node_start_pfn and pgdat->node_spanned_pages.
691815121d2SYasuaki Ishimatsu 		 * In this case, we find second smallest valid mem_section
692815121d2SYasuaki Ishimatsu 		 * for shrinking zone.
693815121d2SYasuaki Ishimatsu 		 */
694815121d2SYasuaki Ishimatsu 		pfn = find_smallest_section_pfn(nid, NULL, end_pfn,
695815121d2SYasuaki Ishimatsu 						pgdat_end_pfn);
696815121d2SYasuaki Ishimatsu 		if (pfn) {
697815121d2SYasuaki Ishimatsu 			pgdat->node_start_pfn = pfn;
698815121d2SYasuaki Ishimatsu 			pgdat->node_spanned_pages = pgdat_end_pfn - pfn;
699815121d2SYasuaki Ishimatsu 		}
700815121d2SYasuaki Ishimatsu 	} else if (pgdat_end_pfn == end_pfn) {
701815121d2SYasuaki Ishimatsu 		/*
702815121d2SYasuaki Ishimatsu 		 * If the section is biggest section in the pgdat, it need
703815121d2SYasuaki Ishimatsu 		 * shrink pgdat->node_spanned_pages.
704815121d2SYasuaki Ishimatsu 		 * In this case, we find second biggest valid mem_section for
705815121d2SYasuaki Ishimatsu 		 * shrinking zone.
706815121d2SYasuaki Ishimatsu 		 */
707815121d2SYasuaki Ishimatsu 		pfn = find_biggest_section_pfn(nid, NULL, pgdat_start_pfn,
708815121d2SYasuaki Ishimatsu 					       start_pfn);
709815121d2SYasuaki Ishimatsu 		if (pfn)
710815121d2SYasuaki Ishimatsu 			pgdat->node_spanned_pages = pfn - pgdat_start_pfn + 1;
711815121d2SYasuaki Ishimatsu 	}
712815121d2SYasuaki Ishimatsu 
713815121d2SYasuaki Ishimatsu 	/*
714815121d2SYasuaki Ishimatsu 	 * If the section is not biggest or smallest mem_section in the pgdat,
715815121d2SYasuaki Ishimatsu 	 * it only creates a hole in the pgdat. So in this case, we need not
716815121d2SYasuaki Ishimatsu 	 * change the pgdat.
717815121d2SYasuaki Ishimatsu 	 * But perhaps, the pgdat has only hole data. Thus it check the pgdat
718815121d2SYasuaki Ishimatsu 	 * has only hole or not.
719815121d2SYasuaki Ishimatsu 	 */
720815121d2SYasuaki Ishimatsu 	pfn = pgdat_start_pfn;
721815121d2SYasuaki Ishimatsu 	for (; pfn < pgdat_end_pfn; pfn += PAGES_PER_SECTION) {
722815121d2SYasuaki Ishimatsu 		ms = __pfn_to_section(pfn);
723815121d2SYasuaki Ishimatsu 
724815121d2SYasuaki Ishimatsu 		if (unlikely(!valid_section(ms)))
725815121d2SYasuaki Ishimatsu 			continue;
726815121d2SYasuaki Ishimatsu 
727815121d2SYasuaki Ishimatsu 		if (pfn_to_nid(pfn) != nid)
728815121d2SYasuaki Ishimatsu 			continue;
729815121d2SYasuaki Ishimatsu 
730815121d2SYasuaki Ishimatsu 		 /* If the section is current section, it continues the loop */
731815121d2SYasuaki Ishimatsu 		if (start_pfn == pfn)
732815121d2SYasuaki Ishimatsu 			continue;
733815121d2SYasuaki Ishimatsu 
734815121d2SYasuaki Ishimatsu 		/* If we find valid section, we have nothing to do */
735815121d2SYasuaki Ishimatsu 		return;
736815121d2SYasuaki Ishimatsu 	}
737815121d2SYasuaki Ishimatsu 
738815121d2SYasuaki Ishimatsu 	/* The pgdat has no valid section */
739815121d2SYasuaki Ishimatsu 	pgdat->node_start_pfn = 0;
740815121d2SYasuaki Ishimatsu 	pgdat->node_spanned_pages = 0;
741815121d2SYasuaki Ishimatsu }
742815121d2SYasuaki Ishimatsu 
743815121d2SYasuaki Ishimatsu static void __remove_zone(struct zone *zone, unsigned long start_pfn)
744815121d2SYasuaki Ishimatsu {
745815121d2SYasuaki Ishimatsu 	struct pglist_data *pgdat = zone->zone_pgdat;
746815121d2SYasuaki Ishimatsu 	int nr_pages = PAGES_PER_SECTION;
747815121d2SYasuaki Ishimatsu 	int zone_type;
748815121d2SYasuaki Ishimatsu 	unsigned long flags;
749815121d2SYasuaki Ishimatsu 
750815121d2SYasuaki Ishimatsu 	zone_type = zone - pgdat->node_zones;
751815121d2SYasuaki Ishimatsu 
752815121d2SYasuaki Ishimatsu 	pgdat_resize_lock(zone->zone_pgdat, &flags);
753815121d2SYasuaki Ishimatsu 	shrink_zone_span(zone, start_pfn, start_pfn + nr_pages);
754815121d2SYasuaki Ishimatsu 	shrink_pgdat_span(pgdat, start_pfn, start_pfn + nr_pages);
755815121d2SYasuaki Ishimatsu 	pgdat_resize_unlock(zone->zone_pgdat, &flags);
756815121d2SYasuaki Ishimatsu }
757815121d2SYasuaki Ishimatsu 
7584b94ffdcSDan Williams static int __remove_section(struct zone *zone, struct mem_section *ms,
7594b94ffdcSDan Williams 		unsigned long map_offset)
760ea01ea93SBadari Pulavarty {
761815121d2SYasuaki Ishimatsu 	unsigned long start_pfn;
762815121d2SYasuaki Ishimatsu 	int scn_nr;
763ea01ea93SBadari Pulavarty 	int ret = -EINVAL;
764ea01ea93SBadari Pulavarty 
765ea01ea93SBadari Pulavarty 	if (!valid_section(ms))
766ea01ea93SBadari Pulavarty 		return ret;
767ea01ea93SBadari Pulavarty 
768ea01ea93SBadari Pulavarty 	ret = unregister_memory_section(ms);
769ea01ea93SBadari Pulavarty 	if (ret)
770ea01ea93SBadari Pulavarty 		return ret;
771ea01ea93SBadari Pulavarty 
772815121d2SYasuaki Ishimatsu 	scn_nr = __section_nr(ms);
773815121d2SYasuaki Ishimatsu 	start_pfn = section_nr_to_pfn(scn_nr);
774815121d2SYasuaki Ishimatsu 	__remove_zone(zone, start_pfn);
775815121d2SYasuaki Ishimatsu 
7764b94ffdcSDan Williams 	sparse_remove_one_section(zone, ms, map_offset);
777ea01ea93SBadari Pulavarty 	return 0;
778ea01ea93SBadari Pulavarty }
779ea01ea93SBadari Pulavarty 
780ea01ea93SBadari Pulavarty /**
781ea01ea93SBadari Pulavarty  * __remove_pages() - remove sections of pages from a zone
782ea01ea93SBadari Pulavarty  * @zone: zone from which pages need to be removed
783ea01ea93SBadari Pulavarty  * @phys_start_pfn: starting pageframe (must be aligned to start of a section)
784ea01ea93SBadari Pulavarty  * @nr_pages: number of pages to remove (must be multiple of section size)
785ea01ea93SBadari Pulavarty  *
786ea01ea93SBadari Pulavarty  * Generic helper function to remove section mappings and sysfs entries
787ea01ea93SBadari Pulavarty  * for the section of the memory we are removing. Caller needs to make
788ea01ea93SBadari Pulavarty  * sure that pages are marked reserved and zones are adjust properly by
789ea01ea93SBadari Pulavarty  * calling offline_pages().
790ea01ea93SBadari Pulavarty  */
791ea01ea93SBadari Pulavarty int __remove_pages(struct zone *zone, unsigned long phys_start_pfn,
792ea01ea93SBadari Pulavarty 		 unsigned long nr_pages)
793ea01ea93SBadari Pulavarty {
794fe74ebb1SToshi Kani 	unsigned long i;
7954b94ffdcSDan Williams 	unsigned long map_offset = 0;
7964b94ffdcSDan Williams 	int sections_to_remove, ret = 0;
7974b94ffdcSDan Williams 
7984b94ffdcSDan Williams 	/* In the ZONE_DEVICE case device driver owns the memory region */
7994b94ffdcSDan Williams 	if (is_dev_zone(zone)) {
8004b94ffdcSDan Williams 		struct page *page = pfn_to_page(phys_start_pfn);
8014b94ffdcSDan Williams 		struct vmem_altmap *altmap;
8024b94ffdcSDan Williams 
8034b94ffdcSDan Williams 		altmap = to_vmem_altmap((unsigned long) page);
8044b94ffdcSDan Williams 		if (altmap)
8054b94ffdcSDan Williams 			map_offset = vmem_altmap_offset(altmap);
8064b94ffdcSDan Williams 	} else {
807fe74ebb1SToshi Kani 		resource_size_t start, size;
8084b94ffdcSDan Williams 
8094b94ffdcSDan Williams 		start = phys_start_pfn << PAGE_SHIFT;
8104b94ffdcSDan Williams 		size = nr_pages * PAGE_SIZE;
8114b94ffdcSDan Williams 
8124b94ffdcSDan Williams 		ret = release_mem_region_adjustable(&iomem_resource, start,
8134b94ffdcSDan Williams 					size);
8144b94ffdcSDan Williams 		if (ret) {
8154b94ffdcSDan Williams 			resource_size_t endres = start + size - 1;
8164b94ffdcSDan Williams 
8174b94ffdcSDan Williams 			pr_warn("Unable to release resource <%pa-%pa> (%d)\n",
8184b94ffdcSDan Williams 					&start, &endres, ret);
8194b94ffdcSDan Williams 		}
8204b94ffdcSDan Williams 	}
821ea01ea93SBadari Pulavarty 
8227cf91a98SJoonsoo Kim 	clear_zone_contiguous(zone);
8237cf91a98SJoonsoo Kim 
824ea01ea93SBadari Pulavarty 	/*
825ea01ea93SBadari Pulavarty 	 * We can only remove entire sections
826ea01ea93SBadari Pulavarty 	 */
827ea01ea93SBadari Pulavarty 	BUG_ON(phys_start_pfn & ~PAGE_SECTION_MASK);
828ea01ea93SBadari Pulavarty 	BUG_ON(nr_pages % PAGES_PER_SECTION);
829ea01ea93SBadari Pulavarty 
830ea01ea93SBadari Pulavarty 	sections_to_remove = nr_pages / PAGES_PER_SECTION;
831ea01ea93SBadari Pulavarty 	for (i = 0; i < sections_to_remove; i++) {
832ea01ea93SBadari Pulavarty 		unsigned long pfn = phys_start_pfn + i*PAGES_PER_SECTION;
8334b94ffdcSDan Williams 
8344b94ffdcSDan Williams 		ret = __remove_section(zone, __pfn_to_section(pfn), map_offset);
8354b94ffdcSDan Williams 		map_offset = 0;
836ea01ea93SBadari Pulavarty 		if (ret)
837ea01ea93SBadari Pulavarty 			break;
838ea01ea93SBadari Pulavarty 	}
8397cf91a98SJoonsoo Kim 
8407cf91a98SJoonsoo Kim 	set_zone_contiguous(zone);
8417cf91a98SJoonsoo Kim 
842ea01ea93SBadari Pulavarty 	return ret;
843ea01ea93SBadari Pulavarty }
844ea01ea93SBadari Pulavarty EXPORT_SYMBOL_GPL(__remove_pages);
8454edd7cefSDavid Rientjes #endif /* CONFIG_MEMORY_HOTREMOVE */
846ea01ea93SBadari Pulavarty 
8479d0ad8caSDaniel Kiper int set_online_page_callback(online_page_callback_t callback)
8489d0ad8caSDaniel Kiper {
8499d0ad8caSDaniel Kiper 	int rc = -EINVAL;
8509d0ad8caSDaniel Kiper 
851bfc8c901SVladimir Davydov 	get_online_mems();
852bfc8c901SVladimir Davydov 	mutex_lock(&online_page_callback_lock);
8539d0ad8caSDaniel Kiper 
8549d0ad8caSDaniel Kiper 	if (online_page_callback == generic_online_page) {
8559d0ad8caSDaniel Kiper 		online_page_callback = callback;
8569d0ad8caSDaniel Kiper 		rc = 0;
8579d0ad8caSDaniel Kiper 	}
8589d0ad8caSDaniel Kiper 
859bfc8c901SVladimir Davydov 	mutex_unlock(&online_page_callback_lock);
860bfc8c901SVladimir Davydov 	put_online_mems();
8619d0ad8caSDaniel Kiper 
8629d0ad8caSDaniel Kiper 	return rc;
8639d0ad8caSDaniel Kiper }
8649d0ad8caSDaniel Kiper EXPORT_SYMBOL_GPL(set_online_page_callback);
8659d0ad8caSDaniel Kiper 
8669d0ad8caSDaniel Kiper int restore_online_page_callback(online_page_callback_t callback)
8679d0ad8caSDaniel Kiper {
8689d0ad8caSDaniel Kiper 	int rc = -EINVAL;
8699d0ad8caSDaniel Kiper 
870bfc8c901SVladimir Davydov 	get_online_mems();
871bfc8c901SVladimir Davydov 	mutex_lock(&online_page_callback_lock);
8729d0ad8caSDaniel Kiper 
8739d0ad8caSDaniel Kiper 	if (online_page_callback == callback) {
8749d0ad8caSDaniel Kiper 		online_page_callback = generic_online_page;
8759d0ad8caSDaniel Kiper 		rc = 0;
8769d0ad8caSDaniel Kiper 	}
8779d0ad8caSDaniel Kiper 
878bfc8c901SVladimir Davydov 	mutex_unlock(&online_page_callback_lock);
879bfc8c901SVladimir Davydov 	put_online_mems();
8809d0ad8caSDaniel Kiper 
8819d0ad8caSDaniel Kiper 	return rc;
8829d0ad8caSDaniel Kiper }
8839d0ad8caSDaniel Kiper EXPORT_SYMBOL_GPL(restore_online_page_callback);
8849d0ad8caSDaniel Kiper 
8859d0ad8caSDaniel Kiper void __online_page_set_limits(struct page *page)
886180c06efSJeremy Fitzhardinge {
8879d0ad8caSDaniel Kiper }
8889d0ad8caSDaniel Kiper EXPORT_SYMBOL_GPL(__online_page_set_limits);
8899d0ad8caSDaniel Kiper 
8909d0ad8caSDaniel Kiper void __online_page_increment_counters(struct page *page)
8919d0ad8caSDaniel Kiper {
8923dcc0571SJiang Liu 	adjust_managed_page_count(page, 1);
8939d0ad8caSDaniel Kiper }
8949d0ad8caSDaniel Kiper EXPORT_SYMBOL_GPL(__online_page_increment_counters);
895180c06efSJeremy Fitzhardinge 
8969d0ad8caSDaniel Kiper void __online_page_free(struct page *page)
8979d0ad8caSDaniel Kiper {
8983dcc0571SJiang Liu 	__free_reserved_page(page);
899180c06efSJeremy Fitzhardinge }
9009d0ad8caSDaniel Kiper EXPORT_SYMBOL_GPL(__online_page_free);
9019d0ad8caSDaniel Kiper 
9029d0ad8caSDaniel Kiper static void generic_online_page(struct page *page)
9039d0ad8caSDaniel Kiper {
9049d0ad8caSDaniel Kiper 	__online_page_set_limits(page);
9059d0ad8caSDaniel Kiper 	__online_page_increment_counters(page);
9069d0ad8caSDaniel Kiper 	__online_page_free(page);
9079d0ad8caSDaniel Kiper }
908180c06efSJeremy Fitzhardinge 
90975884fb1SKAMEZAWA Hiroyuki static int online_pages_range(unsigned long start_pfn, unsigned long nr_pages,
91075884fb1SKAMEZAWA Hiroyuki 			void *arg)
9113947be19SDave Hansen {
9123947be19SDave Hansen 	unsigned long i;
91375884fb1SKAMEZAWA Hiroyuki 	unsigned long onlined_pages = *(unsigned long *)arg;
91475884fb1SKAMEZAWA Hiroyuki 	struct page *page;
91575884fb1SKAMEZAWA Hiroyuki 	if (PageReserved(pfn_to_page(start_pfn)))
91675884fb1SKAMEZAWA Hiroyuki 		for (i = 0; i < nr_pages; i++) {
91775884fb1SKAMEZAWA Hiroyuki 			page = pfn_to_page(start_pfn + i);
9189d0ad8caSDaniel Kiper 			(*online_page_callback)(page);
91975884fb1SKAMEZAWA Hiroyuki 			onlined_pages++;
92075884fb1SKAMEZAWA Hiroyuki 		}
92175884fb1SKAMEZAWA Hiroyuki 	*(unsigned long *)arg = onlined_pages;
92275884fb1SKAMEZAWA Hiroyuki 	return 0;
92375884fb1SKAMEZAWA Hiroyuki }
92475884fb1SKAMEZAWA Hiroyuki 
92509285af7SLai Jiangshan #ifdef CONFIG_MOVABLE_NODE
92679a4dcefSTang Chen /*
92779a4dcefSTang Chen  * When CONFIG_MOVABLE_NODE, we permit onlining of a node which doesn't have
92879a4dcefSTang Chen  * normal memory.
92979a4dcefSTang Chen  */
93009285af7SLai Jiangshan static bool can_online_high_movable(struct zone *zone)
93109285af7SLai Jiangshan {
93209285af7SLai Jiangshan 	return true;
93309285af7SLai Jiangshan }
93479a4dcefSTang Chen #else /* CONFIG_MOVABLE_NODE */
93574d42d8fSLai Jiangshan /* ensure every online node has NORMAL memory */
93674d42d8fSLai Jiangshan static bool can_online_high_movable(struct zone *zone)
93774d42d8fSLai Jiangshan {
93874d42d8fSLai Jiangshan 	return node_state(zone_to_nid(zone), N_NORMAL_MEMORY);
93974d42d8fSLai Jiangshan }
94079a4dcefSTang Chen #endif /* CONFIG_MOVABLE_NODE */
94174d42d8fSLai Jiangshan 
942d9713679SLai Jiangshan /* check which state of node_states will be changed when online memory */
943d9713679SLai Jiangshan static void node_states_check_changes_online(unsigned long nr_pages,
944d9713679SLai Jiangshan 	struct zone *zone, struct memory_notify *arg)
945d9713679SLai Jiangshan {
946d9713679SLai Jiangshan 	int nid = zone_to_nid(zone);
947d9713679SLai Jiangshan 	enum zone_type zone_last = ZONE_NORMAL;
948d9713679SLai Jiangshan 
949d9713679SLai Jiangshan 	/*
9506715ddf9SLai Jiangshan 	 * If we have HIGHMEM or movable node, node_states[N_NORMAL_MEMORY]
9516715ddf9SLai Jiangshan 	 * contains nodes which have zones of 0...ZONE_NORMAL,
9526715ddf9SLai Jiangshan 	 * set zone_last to ZONE_NORMAL.
953d9713679SLai Jiangshan 	 *
9546715ddf9SLai Jiangshan 	 * If we don't have HIGHMEM nor movable node,
9556715ddf9SLai Jiangshan 	 * node_states[N_NORMAL_MEMORY] contains nodes which have zones of
9566715ddf9SLai Jiangshan 	 * 0...ZONE_MOVABLE, set zone_last to ZONE_MOVABLE.
957d9713679SLai Jiangshan 	 */
9586715ddf9SLai Jiangshan 	if (N_MEMORY == N_NORMAL_MEMORY)
959d9713679SLai Jiangshan 		zone_last = ZONE_MOVABLE;
960d9713679SLai Jiangshan 
961d9713679SLai Jiangshan 	/*
962d9713679SLai Jiangshan 	 * if the memory to be online is in a zone of 0...zone_last, and
963d9713679SLai Jiangshan 	 * the zones of 0...zone_last don't have memory before online, we will
964d9713679SLai Jiangshan 	 * need to set the node to node_states[N_NORMAL_MEMORY] after
965d9713679SLai Jiangshan 	 * the memory is online.
966d9713679SLai Jiangshan 	 */
967d9713679SLai Jiangshan 	if (zone_idx(zone) <= zone_last && !node_state(nid, N_NORMAL_MEMORY))
968d9713679SLai Jiangshan 		arg->status_change_nid_normal = nid;
969d9713679SLai Jiangshan 	else
970d9713679SLai Jiangshan 		arg->status_change_nid_normal = -1;
971d9713679SLai Jiangshan 
9726715ddf9SLai Jiangshan #ifdef CONFIG_HIGHMEM
9736715ddf9SLai Jiangshan 	/*
9746715ddf9SLai Jiangshan 	 * If we have movable node, node_states[N_HIGH_MEMORY]
9756715ddf9SLai Jiangshan 	 * contains nodes which have zones of 0...ZONE_HIGHMEM,
9766715ddf9SLai Jiangshan 	 * set zone_last to ZONE_HIGHMEM.
9776715ddf9SLai Jiangshan 	 *
9786715ddf9SLai Jiangshan 	 * If we don't have movable node, node_states[N_NORMAL_MEMORY]
9796715ddf9SLai Jiangshan 	 * contains nodes which have zones of 0...ZONE_MOVABLE,
9806715ddf9SLai Jiangshan 	 * set zone_last to ZONE_MOVABLE.
9816715ddf9SLai Jiangshan 	 */
9826715ddf9SLai Jiangshan 	zone_last = ZONE_HIGHMEM;
9836715ddf9SLai Jiangshan 	if (N_MEMORY == N_HIGH_MEMORY)
9846715ddf9SLai Jiangshan 		zone_last = ZONE_MOVABLE;
9856715ddf9SLai Jiangshan 
9866715ddf9SLai Jiangshan 	if (zone_idx(zone) <= zone_last && !node_state(nid, N_HIGH_MEMORY))
9876715ddf9SLai Jiangshan 		arg->status_change_nid_high = nid;
9886715ddf9SLai Jiangshan 	else
9896715ddf9SLai Jiangshan 		arg->status_change_nid_high = -1;
9906715ddf9SLai Jiangshan #else
9916715ddf9SLai Jiangshan 	arg->status_change_nid_high = arg->status_change_nid_normal;
9926715ddf9SLai Jiangshan #endif
9936715ddf9SLai Jiangshan 
994d9713679SLai Jiangshan 	/*
995d9713679SLai Jiangshan 	 * if the node don't have memory befor online, we will need to
9966715ddf9SLai Jiangshan 	 * set the node to node_states[N_MEMORY] after the memory
997d9713679SLai Jiangshan 	 * is online.
998d9713679SLai Jiangshan 	 */
9996715ddf9SLai Jiangshan 	if (!node_state(nid, N_MEMORY))
1000d9713679SLai Jiangshan 		arg->status_change_nid = nid;
1001d9713679SLai Jiangshan 	else
1002d9713679SLai Jiangshan 		arg->status_change_nid = -1;
1003d9713679SLai Jiangshan }
1004d9713679SLai Jiangshan 
1005d9713679SLai Jiangshan static void node_states_set_node(int node, struct memory_notify *arg)
1006d9713679SLai Jiangshan {
1007d9713679SLai Jiangshan 	if (arg->status_change_nid_normal >= 0)
1008d9713679SLai Jiangshan 		node_set_state(node, N_NORMAL_MEMORY);
1009d9713679SLai Jiangshan 
10106715ddf9SLai Jiangshan 	if (arg->status_change_nid_high >= 0)
1011d9713679SLai Jiangshan 		node_set_state(node, N_HIGH_MEMORY);
10126715ddf9SLai Jiangshan 
10136715ddf9SLai Jiangshan 	node_set_state(node, N_MEMORY);
1014d9713679SLai Jiangshan }
1015d9713679SLai Jiangshan 
101675884fb1SKAMEZAWA Hiroyuki 
101730467e0bSDavid Rientjes /* Must be protected by mem_hotplug_begin() */
1018511c2abaSLai Jiangshan int __ref online_pages(unsigned long pfn, unsigned long nr_pages, int online_type)
101975884fb1SKAMEZAWA Hiroyuki {
1020aa47228aSCody P Schafer 	unsigned long flags;
10213947be19SDave Hansen 	unsigned long onlined_pages = 0;
10223947be19SDave Hansen 	struct zone *zone;
10236811378eSYasunori Goto 	int need_zonelists_rebuild = 0;
10247b78d335SYasunori Goto 	int nid;
10257b78d335SYasunori Goto 	int ret;
10267b78d335SYasunori Goto 	struct memory_notify arg;
10273947be19SDave Hansen 
1028d9713679SLai Jiangshan 	/*
1029d9713679SLai Jiangshan 	 * This doesn't need a lock to do pfn_to_page().
1030d9713679SLai Jiangshan 	 * The section can't be removed here because of the
1031d9713679SLai Jiangshan 	 * memory_block->state_mutex.
1032d9713679SLai Jiangshan 	 */
1033d9713679SLai Jiangshan 	zone = page_zone(pfn_to_page(pfn));
1034d9713679SLai Jiangshan 
10354f7c6b49STang Chen 	if ((zone_idx(zone) > ZONE_NORMAL ||
10364f7c6b49STang Chen 	    online_type == MMOP_ONLINE_MOVABLE) &&
1037bfc8c901SVladimir Davydov 	    !can_online_high_movable(zone))
103830467e0bSDavid Rientjes 		return -EINVAL;
103974d42d8fSLai Jiangshan 
10404f7c6b49STang Chen 	if (online_type == MMOP_ONLINE_KERNEL &&
10414f7c6b49STang Chen 	    zone_idx(zone) == ZONE_MOVABLE) {
1042bfc8c901SVladimir Davydov 		if (move_pfn_range_left(zone - 1, zone, pfn, pfn + nr_pages))
104330467e0bSDavid Rientjes 			return -EINVAL;
1044511c2abaSLai Jiangshan 	}
10454f7c6b49STang Chen 	if (online_type == MMOP_ONLINE_MOVABLE &&
10464f7c6b49STang Chen 	    zone_idx(zone) == ZONE_MOVABLE - 1) {
1047bfc8c901SVladimir Davydov 		if (move_pfn_range_right(zone, zone + 1, pfn, pfn + nr_pages))
104830467e0bSDavid Rientjes 			return -EINVAL;
1049511c2abaSLai Jiangshan 	}
1050511c2abaSLai Jiangshan 
1051511c2abaSLai Jiangshan 	/* Previous code may changed the zone of the pfn range */
1052511c2abaSLai Jiangshan 	zone = page_zone(pfn_to_page(pfn));
1053511c2abaSLai Jiangshan 
10547b78d335SYasunori Goto 	arg.start_pfn = pfn;
10557b78d335SYasunori Goto 	arg.nr_pages = nr_pages;
1056d9713679SLai Jiangshan 	node_states_check_changes_online(nr_pages, zone, &arg);
10577b78d335SYasunori Goto 
1058e888ca35SVlastimil Babka 	nid = zone_to_nid(zone);
10597b78d335SYasunori Goto 
10607b78d335SYasunori Goto 	ret = memory_notify(MEM_GOING_ONLINE, &arg);
10617b78d335SYasunori Goto 	ret = notifier_to_errno(ret);
1062*e33e33b4SChen Yucong 	if (ret)
1063*e33e33b4SChen Yucong 		goto failed_addition;
1064*e33e33b4SChen Yucong 
10653947be19SDave Hansen 	/*
10666811378eSYasunori Goto 	 * If this zone is not populated, then it is not in zonelist.
10676811378eSYasunori Goto 	 * This means the page allocator ignores this zone.
10686811378eSYasunori Goto 	 * So, zonelist must be updated after online.
10696811378eSYasunori Goto 	 */
10704eaf3f64SHaicheng Li 	mutex_lock(&zonelists_mutex);
10716dcd73d7SWen Congyang 	if (!populated_zone(zone)) {
10726811378eSYasunori Goto 		need_zonelists_rebuild = 1;
10736dcd73d7SWen Congyang 		build_all_zonelists(NULL, zone);
10746dcd73d7SWen Congyang 	}
10756811378eSYasunori Goto 
1076908eedc6SKAMEZAWA Hiroyuki 	ret = walk_system_ram_range(pfn, nr_pages, &onlined_pages,
107775884fb1SKAMEZAWA Hiroyuki 		online_pages_range);
1078fd8a4221SGeoff Levand 	if (ret) {
10796dcd73d7SWen Congyang 		if (need_zonelists_rebuild)
10806dcd73d7SWen Congyang 			zone_pcp_reset(zone);
10814eaf3f64SHaicheng Li 		mutex_unlock(&zonelists_mutex);
1082*e33e33b4SChen Yucong 		goto failed_addition;
1083fd8a4221SGeoff Levand 	}
1084fd8a4221SGeoff Levand 
10853947be19SDave Hansen 	zone->present_pages += onlined_pages;
1086aa47228aSCody P Schafer 
1087aa47228aSCody P Schafer 	pgdat_resize_lock(zone->zone_pgdat, &flags);
1088f2937be5SYasunori Goto 	zone->zone_pgdat->node_present_pages += onlined_pages;
1089aa47228aSCody P Schafer 	pgdat_resize_unlock(zone->zone_pgdat, &flags);
1090aa47228aSCody P Schafer 
109108dff7b7SJiang Liu 	if (onlined_pages) {
1092e888ca35SVlastimil Babka 		node_states_set_node(nid, &arg);
10931f522509SHaicheng Li 		if (need_zonelists_rebuild)
10946dcd73d7SWen Congyang 			build_all_zonelists(NULL, NULL);
10951f522509SHaicheng Li 		else
1096112067f0SShaohua Li 			zone_pcp_update(zone);
109708dff7b7SJiang Liu 	}
10981f522509SHaicheng Li 
10994eaf3f64SHaicheng Li 	mutex_unlock(&zonelists_mutex);
11001b79acc9SKOSAKI Motohiro 
11011b79acc9SKOSAKI Motohiro 	init_per_zone_wmark_min();
11021b79acc9SKOSAKI Motohiro 
1103698b1b30SVlastimil Babka 	if (onlined_pages) {
1104e888ca35SVlastimil Babka 		kswapd_run(nid);
1105698b1b30SVlastimil Babka 		kcompactd_run(nid);
1106698b1b30SVlastimil Babka 	}
110761b13993SDave Hansen 
11085a4d4361SKAMEZAWA Hiroyuki 	vm_total_pages = nr_free_pagecache_pages();
11092f7f24ecSKent Liu 
11102d1d43f6SChandra Seetharaman 	writeback_set_ratelimit();
11117b78d335SYasunori Goto 
11127b78d335SYasunori Goto 	if (onlined_pages)
11137b78d335SYasunori Goto 		memory_notify(MEM_ONLINE, &arg);
111430467e0bSDavid Rientjes 	return 0;
1115*e33e33b4SChen Yucong 
1116*e33e33b4SChen Yucong failed_addition:
1117*e33e33b4SChen Yucong 	pr_debug("online_pages [mem %#010llx-%#010llx] failed\n",
1118*e33e33b4SChen Yucong 		 (unsigned long long) pfn << PAGE_SHIFT,
1119*e33e33b4SChen Yucong 		 (((unsigned long long) pfn + nr_pages) << PAGE_SHIFT) - 1);
1120*e33e33b4SChen Yucong 	memory_notify(MEM_CANCEL_ONLINE, &arg);
1121*e33e33b4SChen Yucong 	return ret;
11223947be19SDave Hansen }
112353947027SKeith Mannthey #endif /* CONFIG_MEMORY_HOTPLUG_SPARSE */
1124bc02af93SYasunori Goto 
11250bd85420STang Chen static void reset_node_present_pages(pg_data_t *pgdat)
11260bd85420STang Chen {
11270bd85420STang Chen 	struct zone *z;
11280bd85420STang Chen 
11290bd85420STang Chen 	for (z = pgdat->node_zones; z < pgdat->node_zones + MAX_NR_ZONES; z++)
11300bd85420STang Chen 		z->present_pages = 0;
11310bd85420STang Chen 
11320bd85420STang Chen 	pgdat->node_present_pages = 0;
11330bd85420STang Chen }
11340bd85420STang Chen 
1135e1319331SHidetoshi Seto /* we are OK calling __meminit stuff here - we have CONFIG_MEMORY_HOTPLUG */
1136e1319331SHidetoshi Seto static pg_data_t __ref *hotadd_new_pgdat(int nid, u64 start)
11379af3c2deSYasunori Goto {
11389af3c2deSYasunori Goto 	struct pglist_data *pgdat;
11399af3c2deSYasunori Goto 	unsigned long zones_size[MAX_NR_ZONES] = {0};
11409af3c2deSYasunori Goto 	unsigned long zholes_size[MAX_NR_ZONES] = {0};
1141c8e861a5SFabian Frederick 	unsigned long start_pfn = PFN_DOWN(start);
11429af3c2deSYasunori Goto 
1143a1e565aaSTang Chen 	pgdat = NODE_DATA(nid);
1144a1e565aaSTang Chen 	if (!pgdat) {
11459af3c2deSYasunori Goto 		pgdat = arch_alloc_nodedata(nid);
11469af3c2deSYasunori Goto 		if (!pgdat)
11479af3c2deSYasunori Goto 			return NULL;
11489af3c2deSYasunori Goto 
11499af3c2deSYasunori Goto 		arch_refresh_nodedata(nid, pgdat);
1150b0dc3a34SGu Zheng 	} else {
1151b0dc3a34SGu Zheng 		/* Reset the nr_zones and classzone_idx to 0 before reuse */
1152b0dc3a34SGu Zheng 		pgdat->nr_zones = 0;
1153b0dc3a34SGu Zheng 		pgdat->classzone_idx = 0;
1154a1e565aaSTang Chen 	}
11559af3c2deSYasunori Goto 
11569af3c2deSYasunori Goto 	/* we can use NODE_DATA(nid) from here */
11579af3c2deSYasunori Goto 
11589af3c2deSYasunori Goto 	/* init node's zones as empty zones, we don't have any present pages.*/
11599109fb7bSJohannes Weiner 	free_area_init_node(nid, zones_size, start_pfn, zholes_size);
11609af3c2deSYasunori Goto 
1161959ecc48SKAMEZAWA Hiroyuki 	/*
1162959ecc48SKAMEZAWA Hiroyuki 	 * The node we allocated has no zone fallback lists. For avoiding
1163959ecc48SKAMEZAWA Hiroyuki 	 * to access not-initialized zonelist, build here.
1164959ecc48SKAMEZAWA Hiroyuki 	 */
1165f957db4fSDavid Rientjes 	mutex_lock(&zonelists_mutex);
11669adb62a5SJiang Liu 	build_all_zonelists(pgdat, NULL);
1167f957db4fSDavid Rientjes 	mutex_unlock(&zonelists_mutex);
1168959ecc48SKAMEZAWA Hiroyuki 
1169f784a3f1STang Chen 	/*
1170f784a3f1STang Chen 	 * zone->managed_pages is set to an approximate value in
1171f784a3f1STang Chen 	 * free_area_init_core(), which will cause
1172f784a3f1STang Chen 	 * /sys/device/system/node/nodeX/meminfo has wrong data.
1173f784a3f1STang Chen 	 * So reset it to 0 before any memory is onlined.
1174f784a3f1STang Chen 	 */
1175f784a3f1STang Chen 	reset_node_managed_pages(pgdat);
1176f784a3f1STang Chen 
11770bd85420STang Chen 	/*
11780bd85420STang Chen 	 * When memory is hot-added, all the memory is in offline state. So
11790bd85420STang Chen 	 * clear all zones' present_pages because they will be updated in
11800bd85420STang Chen 	 * online_pages() and offline_pages().
11810bd85420STang Chen 	 */
11820bd85420STang Chen 	reset_node_present_pages(pgdat);
11830bd85420STang Chen 
11849af3c2deSYasunori Goto 	return pgdat;
11859af3c2deSYasunori Goto }
11869af3c2deSYasunori Goto 
11879af3c2deSYasunori Goto static void rollback_node_hotadd(int nid, pg_data_t *pgdat)
11889af3c2deSYasunori Goto {
11899af3c2deSYasunori Goto 	arch_refresh_nodedata(nid, NULL);
11909af3c2deSYasunori Goto 	arch_free_nodedata(pgdat);
11919af3c2deSYasunori Goto 	return;
11929af3c2deSYasunori Goto }
11939af3c2deSYasunori Goto 
11940a547039SKAMEZAWA Hiroyuki 
119501b0f197SToshi Kani /**
119601b0f197SToshi Kani  * try_online_node - online a node if offlined
119701b0f197SToshi Kani  *
1198cf23422bSminskey guo  * called by cpu_up() to online a node without onlined memory.
1199cf23422bSminskey guo  */
120001b0f197SToshi Kani int try_online_node(int nid)
1201cf23422bSminskey guo {
1202cf23422bSminskey guo 	pg_data_t	*pgdat;
1203cf23422bSminskey guo 	int	ret;
1204cf23422bSminskey guo 
120501b0f197SToshi Kani 	if (node_online(nid))
120601b0f197SToshi Kani 		return 0;
120701b0f197SToshi Kani 
1208bfc8c901SVladimir Davydov 	mem_hotplug_begin();
1209cf23422bSminskey guo 	pgdat = hotadd_new_pgdat(nid, 0);
12107553e8f2SDavid Rientjes 	if (!pgdat) {
121101b0f197SToshi Kani 		pr_err("Cannot online node %d due to NULL pgdat\n", nid);
1212cf23422bSminskey guo 		ret = -ENOMEM;
1213cf23422bSminskey guo 		goto out;
1214cf23422bSminskey guo 	}
1215cf23422bSminskey guo 	node_set_online(nid);
1216cf23422bSminskey guo 	ret = register_one_node(nid);
1217cf23422bSminskey guo 	BUG_ON(ret);
1218cf23422bSminskey guo 
121901b0f197SToshi Kani 	if (pgdat->node_zonelists->_zonerefs->zone == NULL) {
122001b0f197SToshi Kani 		mutex_lock(&zonelists_mutex);
122101b0f197SToshi Kani 		build_all_zonelists(NULL, NULL);
122201b0f197SToshi Kani 		mutex_unlock(&zonelists_mutex);
122301b0f197SToshi Kani 	}
122401b0f197SToshi Kani 
1225cf23422bSminskey guo out:
1226bfc8c901SVladimir Davydov 	mem_hotplug_done();
1227cf23422bSminskey guo 	return ret;
1228cf23422bSminskey guo }
1229cf23422bSminskey guo 
123027356f54SToshi Kani static int check_hotplug_memory_range(u64 start, u64 size)
123127356f54SToshi Kani {
1232c8e861a5SFabian Frederick 	u64 start_pfn = PFN_DOWN(start);
123327356f54SToshi Kani 	u64 nr_pages = size >> PAGE_SHIFT;
123427356f54SToshi Kani 
123527356f54SToshi Kani 	/* Memory range must be aligned with section */
123627356f54SToshi Kani 	if ((start_pfn & ~PAGE_SECTION_MASK) ||
123727356f54SToshi Kani 	    (nr_pages % PAGES_PER_SECTION) || (!nr_pages)) {
123827356f54SToshi Kani 		pr_err("Section-unaligned hotplug range: start 0x%llx, size 0x%llx\n",
123927356f54SToshi Kani 				(unsigned long long)start,
124027356f54SToshi Kani 				(unsigned long long)size);
124127356f54SToshi Kani 		return -EINVAL;
124227356f54SToshi Kani 	}
124327356f54SToshi Kani 
124427356f54SToshi Kani 	return 0;
124527356f54SToshi Kani }
124627356f54SToshi Kani 
124763264400SWang Nan /*
124863264400SWang Nan  * If movable zone has already been setup, newly added memory should be check.
124963264400SWang Nan  * If its address is higher than movable zone, it should be added as movable.
125063264400SWang Nan  * Without this check, movable zone may overlap with other zone.
125163264400SWang Nan  */
125263264400SWang Nan static int should_add_memory_movable(int nid, u64 start, u64 size)
125363264400SWang Nan {
125463264400SWang Nan 	unsigned long start_pfn = start >> PAGE_SHIFT;
125563264400SWang Nan 	pg_data_t *pgdat = NODE_DATA(nid);
125663264400SWang Nan 	struct zone *movable_zone = pgdat->node_zones + ZONE_MOVABLE;
125763264400SWang Nan 
125863264400SWang Nan 	if (zone_is_empty(movable_zone))
125963264400SWang Nan 		return 0;
126063264400SWang Nan 
126163264400SWang Nan 	if (movable_zone->zone_start_pfn <= start_pfn)
126263264400SWang Nan 		return 1;
126363264400SWang Nan 
126463264400SWang Nan 	return 0;
126563264400SWang Nan }
126663264400SWang Nan 
1267033fbae9SDan Williams int zone_for_memory(int nid, u64 start, u64 size, int zone_default,
1268033fbae9SDan Williams 		bool for_device)
126963264400SWang Nan {
1270033fbae9SDan Williams #ifdef CONFIG_ZONE_DEVICE
1271033fbae9SDan Williams 	if (for_device)
1272033fbae9SDan Williams 		return ZONE_DEVICE;
1273033fbae9SDan Williams #endif
127463264400SWang Nan 	if (should_add_memory_movable(nid, start, size))
127563264400SWang Nan 		return ZONE_MOVABLE;
127663264400SWang Nan 
127763264400SWang Nan 	return zone_default;
127863264400SWang Nan }
127963264400SWang Nan 
128031bc3858SVitaly Kuznetsov static int online_memory_block(struct memory_block *mem, void *arg)
128131bc3858SVitaly Kuznetsov {
128231bc3858SVitaly Kuznetsov 	return memory_block_change_state(mem, MEM_ONLINE, MEM_OFFLINE);
128331bc3858SVitaly Kuznetsov }
128431bc3858SVitaly Kuznetsov 
128531168481SAl Viro /* we are OK calling __meminit stuff here - we have CONFIG_MEMORY_HOTPLUG */
128631bc3858SVitaly Kuznetsov int __ref add_memory_resource(int nid, struct resource *res, bool online)
1287bc02af93SYasunori Goto {
128862cedb9fSDavid Vrabel 	u64 start, size;
12899af3c2deSYasunori Goto 	pg_data_t *pgdat = NULL;
1290a1e565aaSTang Chen 	bool new_pgdat;
1291a1e565aaSTang Chen 	bool new_node;
1292bc02af93SYasunori Goto 	int ret;
1293bc02af93SYasunori Goto 
129462cedb9fSDavid Vrabel 	start = res->start;
129562cedb9fSDavid Vrabel 	size = resource_size(res);
129662cedb9fSDavid Vrabel 
129727356f54SToshi Kani 	ret = check_hotplug_memory_range(start, size);
129827356f54SToshi Kani 	if (ret)
129927356f54SToshi Kani 		return ret;
130027356f54SToshi Kani 
1301a1e565aaSTang Chen 	{	/* Stupid hack to suppress address-never-null warning */
1302a1e565aaSTang Chen 		void *p = NODE_DATA(nid);
1303a1e565aaSTang Chen 		new_pgdat = !p;
1304a1e565aaSTang Chen 	}
1305ac13c462SNathan Zimmer 
1306bfc8c901SVladimir Davydov 	mem_hotplug_begin();
1307ac13c462SNathan Zimmer 
13087f36e3e5STang Chen 	/*
13097f36e3e5STang Chen 	 * Add new range to memblock so that when hotadd_new_pgdat() is called
13107f36e3e5STang Chen 	 * to allocate new pgdat, get_pfn_range_for_nid() will be able to find
13117f36e3e5STang Chen 	 * this new range and calculate total pages correctly.  The range will
13127f36e3e5STang Chen 	 * be removed at hot-remove time.
13137f36e3e5STang Chen 	 */
13147f36e3e5STang Chen 	memblock_add_node(start, size, nid);
13157f36e3e5STang Chen 
1316a1e565aaSTang Chen 	new_node = !node_online(nid);
1317a1e565aaSTang Chen 	if (new_node) {
13189af3c2deSYasunori Goto 		pgdat = hotadd_new_pgdat(nid, start);
13196ad696d2SAndi Kleen 		ret = -ENOMEM;
13209af3c2deSYasunori Goto 		if (!pgdat)
132141b9e2d7SWen Congyang 			goto error;
13229af3c2deSYasunori Goto 	}
13239af3c2deSYasunori Goto 
1324bc02af93SYasunori Goto 	/* call arch's memory hotadd */
1325033fbae9SDan Williams 	ret = arch_add_memory(nid, start, size, false);
1326bc02af93SYasunori Goto 
13279af3c2deSYasunori Goto 	if (ret < 0)
13289af3c2deSYasunori Goto 		goto error;
13299af3c2deSYasunori Goto 
13300fc44159SYasunori Goto 	/* we online node here. we can't roll back from here. */
13319af3c2deSYasunori Goto 	node_set_online(nid);
13329af3c2deSYasunori Goto 
1333a1e565aaSTang Chen 	if (new_node) {
13340fc44159SYasunori Goto 		ret = register_one_node(nid);
13350fc44159SYasunori Goto 		/*
13360fc44159SYasunori Goto 		 * If sysfs file of new node can't create, cpu on the node
13370fc44159SYasunori Goto 		 * can't be hot-added. There is no rollback way now.
13380fc44159SYasunori Goto 		 * So, check by BUG_ON() to catch it reluctantly..
13390fc44159SYasunori Goto 		 */
13400fc44159SYasunori Goto 		BUG_ON(ret);
13410fc44159SYasunori Goto 	}
13420fc44159SYasunori Goto 
1343d96ae530Sakpm@linux-foundation.org 	/* create new memmap entry */
1344d96ae530Sakpm@linux-foundation.org 	firmware_map_add_hotplug(start, start + size, "System RAM");
1345d96ae530Sakpm@linux-foundation.org 
134631bc3858SVitaly Kuznetsov 	/* online pages if requested */
134731bc3858SVitaly Kuznetsov 	if (online)
134831bc3858SVitaly Kuznetsov 		walk_memory_range(PFN_DOWN(start), PFN_UP(start + size - 1),
134931bc3858SVitaly Kuznetsov 				  NULL, online_memory_block);
135031bc3858SVitaly Kuznetsov 
13516ad696d2SAndi Kleen 	goto out;
13526ad696d2SAndi Kleen 
13539af3c2deSYasunori Goto error:
13549af3c2deSYasunori Goto 	/* rollback pgdat allocation and others */
13559af3c2deSYasunori Goto 	if (new_pgdat)
13569af3c2deSYasunori Goto 		rollback_node_hotadd(nid, pgdat);
13577f36e3e5STang Chen 	memblock_remove(start, size);
13589af3c2deSYasunori Goto 
13596ad696d2SAndi Kleen out:
1360bfc8c901SVladimir Davydov 	mem_hotplug_done();
1361bc02af93SYasunori Goto 	return ret;
1362bc02af93SYasunori Goto }
136362cedb9fSDavid Vrabel EXPORT_SYMBOL_GPL(add_memory_resource);
136462cedb9fSDavid Vrabel 
136562cedb9fSDavid Vrabel int __ref add_memory(int nid, u64 start, u64 size)
136662cedb9fSDavid Vrabel {
136762cedb9fSDavid Vrabel 	struct resource *res;
136862cedb9fSDavid Vrabel 	int ret;
136962cedb9fSDavid Vrabel 
137062cedb9fSDavid Vrabel 	res = register_memory_resource(start, size);
13716f754ba4SVitaly Kuznetsov 	if (IS_ERR(res))
13726f754ba4SVitaly Kuznetsov 		return PTR_ERR(res);
137362cedb9fSDavid Vrabel 
137431bc3858SVitaly Kuznetsov 	ret = add_memory_resource(nid, res, memhp_auto_online);
137562cedb9fSDavid Vrabel 	if (ret < 0)
137662cedb9fSDavid Vrabel 		release_memory_resource(res);
137762cedb9fSDavid Vrabel 	return ret;
137862cedb9fSDavid Vrabel }
1379bc02af93SYasunori Goto EXPORT_SYMBOL_GPL(add_memory);
13800c0e6195SKAMEZAWA Hiroyuki 
13810c0e6195SKAMEZAWA Hiroyuki #ifdef CONFIG_MEMORY_HOTREMOVE
13820c0e6195SKAMEZAWA Hiroyuki /*
13835c755e9fSBadari Pulavarty  * A free page on the buddy free lists (not the per-cpu lists) has PageBuddy
13845c755e9fSBadari Pulavarty  * set and the size of the free page is given by page_order(). Using this,
13855c755e9fSBadari Pulavarty  * the function determines if the pageblock contains only free pages.
13865c755e9fSBadari Pulavarty  * Due to buddy contraints, a free page at least the size of a pageblock will
13875c755e9fSBadari Pulavarty  * be located at the start of the pageblock
13885c755e9fSBadari Pulavarty  */
13895c755e9fSBadari Pulavarty static inline int pageblock_free(struct page *page)
13905c755e9fSBadari Pulavarty {
13915c755e9fSBadari Pulavarty 	return PageBuddy(page) && page_order(page) >= pageblock_order;
13925c755e9fSBadari Pulavarty }
13935c755e9fSBadari Pulavarty 
13945c755e9fSBadari Pulavarty /* Return the start of the next active pageblock after a given page */
13955c755e9fSBadari Pulavarty static struct page *next_active_pageblock(struct page *page)
13965c755e9fSBadari Pulavarty {
13975c755e9fSBadari Pulavarty 	/* Ensure the starting page is pageblock-aligned */
13985c755e9fSBadari Pulavarty 	BUG_ON(page_to_pfn(page) & (pageblock_nr_pages - 1));
13995c755e9fSBadari Pulavarty 
14005c755e9fSBadari Pulavarty 	/* If the entire pageblock is free, move to the end of free page */
14010dcc48c1SKAMEZAWA Hiroyuki 	if (pageblock_free(page)) {
14020dcc48c1SKAMEZAWA Hiroyuki 		int order;
14030dcc48c1SKAMEZAWA Hiroyuki 		/* be careful. we don't have locks, page_order can be changed.*/
14040dcc48c1SKAMEZAWA Hiroyuki 		order = page_order(page);
14050dcc48c1SKAMEZAWA Hiroyuki 		if ((order < MAX_ORDER) && (order >= pageblock_order))
14060dcc48c1SKAMEZAWA Hiroyuki 			return page + (1 << order);
14070dcc48c1SKAMEZAWA Hiroyuki 	}
14085c755e9fSBadari Pulavarty 
14090dcc48c1SKAMEZAWA Hiroyuki 	return page + pageblock_nr_pages;
14105c755e9fSBadari Pulavarty }
14115c755e9fSBadari Pulavarty 
14125c755e9fSBadari Pulavarty /* Checks if this range of memory is likely to be hot-removable. */
14135c755e9fSBadari Pulavarty int is_mem_section_removable(unsigned long start_pfn, unsigned long nr_pages)
14145c755e9fSBadari Pulavarty {
14155c755e9fSBadari Pulavarty 	struct page *page = pfn_to_page(start_pfn);
14165c755e9fSBadari Pulavarty 	struct page *end_page = page + nr_pages;
14175c755e9fSBadari Pulavarty 
14185c755e9fSBadari Pulavarty 	/* Check the starting page of each pageblock within the range */
14195c755e9fSBadari Pulavarty 	for (; page < end_page; page = next_active_pageblock(page)) {
142049ac8255SKAMEZAWA Hiroyuki 		if (!is_pageblock_removable_nolock(page))
14215c755e9fSBadari Pulavarty 			return 0;
142249ac8255SKAMEZAWA Hiroyuki 		cond_resched();
14235c755e9fSBadari Pulavarty 	}
14245c755e9fSBadari Pulavarty 
14255c755e9fSBadari Pulavarty 	/* All pageblocks in the memory block are likely to be hot-removable */
14265c755e9fSBadari Pulavarty 	return 1;
14275c755e9fSBadari Pulavarty }
14285c755e9fSBadari Pulavarty 
14295c755e9fSBadari Pulavarty /*
14300c0e6195SKAMEZAWA Hiroyuki  * Confirm all pages in a range [start, end) is belongs to the same zone.
14310c0e6195SKAMEZAWA Hiroyuki  */
1432ed2f2400SZhang Zhen int test_pages_in_a_zone(unsigned long start_pfn, unsigned long end_pfn)
14330c0e6195SKAMEZAWA Hiroyuki {
14345f0f2887SAndrew Banman 	unsigned long pfn, sec_end_pfn;
14350c0e6195SKAMEZAWA Hiroyuki 	struct zone *zone = NULL;
14360c0e6195SKAMEZAWA Hiroyuki 	struct page *page;
14370c0e6195SKAMEZAWA Hiroyuki 	int i;
14385f0f2887SAndrew Banman 	for (pfn = start_pfn, sec_end_pfn = SECTION_ALIGN_UP(start_pfn);
14390c0e6195SKAMEZAWA Hiroyuki 	     pfn < end_pfn;
14405f0f2887SAndrew Banman 	     pfn = sec_end_pfn + 1, sec_end_pfn += PAGES_PER_SECTION) {
14415f0f2887SAndrew Banman 		/* Make sure the memory section is present first */
14425f0f2887SAndrew Banman 		if (!present_section_nr(pfn_to_section_nr(pfn)))
14435f0f2887SAndrew Banman 			continue;
14445f0f2887SAndrew Banman 		for (; pfn < sec_end_pfn && pfn < end_pfn;
14450c0e6195SKAMEZAWA Hiroyuki 		     pfn += MAX_ORDER_NR_PAGES) {
14460c0e6195SKAMEZAWA Hiroyuki 			i = 0;
14470c0e6195SKAMEZAWA Hiroyuki 			/* This is just a CONFIG_HOLES_IN_ZONE check.*/
14485f0f2887SAndrew Banman 			while ((i < MAX_ORDER_NR_PAGES) &&
14495f0f2887SAndrew Banman 				!pfn_valid_within(pfn + i))
14500c0e6195SKAMEZAWA Hiroyuki 				i++;
14510c0e6195SKAMEZAWA Hiroyuki 			if (i == MAX_ORDER_NR_PAGES)
14520c0e6195SKAMEZAWA Hiroyuki 				continue;
14530c0e6195SKAMEZAWA Hiroyuki 			page = pfn_to_page(pfn + i);
14540c0e6195SKAMEZAWA Hiroyuki 			if (zone && page_zone(page) != zone)
14550c0e6195SKAMEZAWA Hiroyuki 				return 0;
14560c0e6195SKAMEZAWA Hiroyuki 			zone = page_zone(page);
14570c0e6195SKAMEZAWA Hiroyuki 		}
14585f0f2887SAndrew Banman 	}
14590c0e6195SKAMEZAWA Hiroyuki 	return 1;
14600c0e6195SKAMEZAWA Hiroyuki }
14610c0e6195SKAMEZAWA Hiroyuki 
14620c0e6195SKAMEZAWA Hiroyuki /*
1463c8721bbbSNaoya Horiguchi  * Scan pfn range [start,end) to find movable/migratable pages (LRU pages
1464c8721bbbSNaoya Horiguchi  * and hugepages). We scan pfn because it's much easier than scanning over
1465c8721bbbSNaoya Horiguchi  * linked list. This function returns the pfn of the first found movable
1466c8721bbbSNaoya Horiguchi  * page if it's found, otherwise 0.
14670c0e6195SKAMEZAWA Hiroyuki  */
1468c8721bbbSNaoya Horiguchi static unsigned long scan_movable_pages(unsigned long start, unsigned long end)
14690c0e6195SKAMEZAWA Hiroyuki {
14700c0e6195SKAMEZAWA Hiroyuki 	unsigned long pfn;
14710c0e6195SKAMEZAWA Hiroyuki 	struct page *page;
14720c0e6195SKAMEZAWA Hiroyuki 	for (pfn = start; pfn < end; pfn++) {
14730c0e6195SKAMEZAWA Hiroyuki 		if (pfn_valid(pfn)) {
14740c0e6195SKAMEZAWA Hiroyuki 			page = pfn_to_page(pfn);
14750c0e6195SKAMEZAWA Hiroyuki 			if (PageLRU(page))
14760c0e6195SKAMEZAWA Hiroyuki 				return pfn;
1477c8721bbbSNaoya Horiguchi 			if (PageHuge(page)) {
14787e1f049eSNaoya Horiguchi 				if (page_huge_active(page))
1479c8721bbbSNaoya Horiguchi 					return pfn;
1480c8721bbbSNaoya Horiguchi 				else
1481c8721bbbSNaoya Horiguchi 					pfn = round_up(pfn + 1,
1482c8721bbbSNaoya Horiguchi 						1 << compound_order(page)) - 1;
1483c8721bbbSNaoya Horiguchi 			}
14840c0e6195SKAMEZAWA Hiroyuki 		}
14850c0e6195SKAMEZAWA Hiroyuki 	}
14860c0e6195SKAMEZAWA Hiroyuki 	return 0;
14870c0e6195SKAMEZAWA Hiroyuki }
14880c0e6195SKAMEZAWA Hiroyuki 
14890c0e6195SKAMEZAWA Hiroyuki #define NR_OFFLINE_AT_ONCE_PAGES	(256)
14900c0e6195SKAMEZAWA Hiroyuki static int
14910c0e6195SKAMEZAWA Hiroyuki do_migrate_range(unsigned long start_pfn, unsigned long end_pfn)
14920c0e6195SKAMEZAWA Hiroyuki {
14930c0e6195SKAMEZAWA Hiroyuki 	unsigned long pfn;
14940c0e6195SKAMEZAWA Hiroyuki 	struct page *page;
14950c0e6195SKAMEZAWA Hiroyuki 	int move_pages = NR_OFFLINE_AT_ONCE_PAGES;
14960c0e6195SKAMEZAWA Hiroyuki 	int not_managed = 0;
14970c0e6195SKAMEZAWA Hiroyuki 	int ret = 0;
14980c0e6195SKAMEZAWA Hiroyuki 	LIST_HEAD(source);
14990c0e6195SKAMEZAWA Hiroyuki 
15000c0e6195SKAMEZAWA Hiroyuki 	for (pfn = start_pfn; pfn < end_pfn && move_pages > 0; pfn++) {
15010c0e6195SKAMEZAWA Hiroyuki 		if (!pfn_valid(pfn))
15020c0e6195SKAMEZAWA Hiroyuki 			continue;
15030c0e6195SKAMEZAWA Hiroyuki 		page = pfn_to_page(pfn);
1504c8721bbbSNaoya Horiguchi 
1505c8721bbbSNaoya Horiguchi 		if (PageHuge(page)) {
1506c8721bbbSNaoya Horiguchi 			struct page *head = compound_head(page);
1507c8721bbbSNaoya Horiguchi 			pfn = page_to_pfn(head) + (1<<compound_order(head)) - 1;
1508c8721bbbSNaoya Horiguchi 			if (compound_order(head) > PFN_SECTION_SHIFT) {
1509c8721bbbSNaoya Horiguchi 				ret = -EBUSY;
1510c8721bbbSNaoya Horiguchi 				break;
1511c8721bbbSNaoya Horiguchi 			}
1512c8721bbbSNaoya Horiguchi 			if (isolate_huge_page(page, &source))
1513c8721bbbSNaoya Horiguchi 				move_pages -= 1 << compound_order(head);
1514c8721bbbSNaoya Horiguchi 			continue;
1515c8721bbbSNaoya Horiguchi 		}
1516c8721bbbSNaoya Horiguchi 
1517700c2a46SKonstantin Khlebnikov 		if (!get_page_unless_zero(page))
15180c0e6195SKAMEZAWA Hiroyuki 			continue;
15190c0e6195SKAMEZAWA Hiroyuki 		/*
15200c0e6195SKAMEZAWA Hiroyuki 		 * We can skip free pages. And we can only deal with pages on
15210c0e6195SKAMEZAWA Hiroyuki 		 * LRU.
15220c0e6195SKAMEZAWA Hiroyuki 		 */
152362695a84SNick Piggin 		ret = isolate_lru_page(page);
15240c0e6195SKAMEZAWA Hiroyuki 		if (!ret) { /* Success */
1525700c2a46SKonstantin Khlebnikov 			put_page(page);
152662695a84SNick Piggin 			list_add_tail(&page->lru, &source);
15270c0e6195SKAMEZAWA Hiroyuki 			move_pages--;
15286d9c285aSKOSAKI Motohiro 			inc_zone_page_state(page, NR_ISOLATED_ANON +
15296d9c285aSKOSAKI Motohiro 					    page_is_file_cache(page));
15306d9c285aSKOSAKI Motohiro 
15310c0e6195SKAMEZAWA Hiroyuki 		} else {
15320c0e6195SKAMEZAWA Hiroyuki #ifdef CONFIG_DEBUG_VM
1533*e33e33b4SChen Yucong 			pr_alert("removing pfn %lx from LRU failed\n", pfn);
1534f0b791a3SDave Hansen 			dump_page(page, "failed to remove from LRU");
15350c0e6195SKAMEZAWA Hiroyuki #endif
1536700c2a46SKonstantin Khlebnikov 			put_page(page);
153725985edcSLucas De Marchi 			/* Because we don't have big zone->lock. we should
1538809c4449SBob Liu 			   check this again here. */
1539809c4449SBob Liu 			if (page_count(page)) {
1540809c4449SBob Liu 				not_managed++;
1541f3ab2636SBob Liu 				ret = -EBUSY;
1542809c4449SBob Liu 				break;
1543809c4449SBob Liu 			}
15440c0e6195SKAMEZAWA Hiroyuki 		}
15450c0e6195SKAMEZAWA Hiroyuki 	}
1546f3ab2636SBob Liu 	if (!list_empty(&source)) {
15470c0e6195SKAMEZAWA Hiroyuki 		if (not_managed) {
1548c8721bbbSNaoya Horiguchi 			putback_movable_pages(&source);
15490c0e6195SKAMEZAWA Hiroyuki 			goto out;
15500c0e6195SKAMEZAWA Hiroyuki 		}
155174c08f98SMinchan Kim 
155274c08f98SMinchan Kim 		/*
155374c08f98SMinchan Kim 		 * alloc_migrate_target should be improooooved!!
155474c08f98SMinchan Kim 		 * migrate_pages returns # of failed pages.
155574c08f98SMinchan Kim 		 */
155668711a74SDavid Rientjes 		ret = migrate_pages(&source, alloc_migrate_target, NULL, 0,
15579c620e2bSHugh Dickins 					MIGRATE_SYNC, MR_MEMORY_HOTPLUG);
1558cf608ac1SMinchan Kim 		if (ret)
1559c8721bbbSNaoya Horiguchi 			putback_movable_pages(&source);
1560f3ab2636SBob Liu 	}
15610c0e6195SKAMEZAWA Hiroyuki out:
15620c0e6195SKAMEZAWA Hiroyuki 	return ret;
15630c0e6195SKAMEZAWA Hiroyuki }
15640c0e6195SKAMEZAWA Hiroyuki 
15650c0e6195SKAMEZAWA Hiroyuki /*
15660c0e6195SKAMEZAWA Hiroyuki  * remove from free_area[] and mark all as Reserved.
15670c0e6195SKAMEZAWA Hiroyuki  */
15680c0e6195SKAMEZAWA Hiroyuki static int
15690c0e6195SKAMEZAWA Hiroyuki offline_isolated_pages_cb(unsigned long start, unsigned long nr_pages,
15700c0e6195SKAMEZAWA Hiroyuki 			void *data)
15710c0e6195SKAMEZAWA Hiroyuki {
15720c0e6195SKAMEZAWA Hiroyuki 	__offline_isolated_pages(start, start + nr_pages);
15730c0e6195SKAMEZAWA Hiroyuki 	return 0;
15740c0e6195SKAMEZAWA Hiroyuki }
15750c0e6195SKAMEZAWA Hiroyuki 
15760c0e6195SKAMEZAWA Hiroyuki static void
15770c0e6195SKAMEZAWA Hiroyuki offline_isolated_pages(unsigned long start_pfn, unsigned long end_pfn)
15780c0e6195SKAMEZAWA Hiroyuki {
1579908eedc6SKAMEZAWA Hiroyuki 	walk_system_ram_range(start_pfn, end_pfn - start_pfn, NULL,
15800c0e6195SKAMEZAWA Hiroyuki 				offline_isolated_pages_cb);
15810c0e6195SKAMEZAWA Hiroyuki }
15820c0e6195SKAMEZAWA Hiroyuki 
15830c0e6195SKAMEZAWA Hiroyuki /*
15840c0e6195SKAMEZAWA Hiroyuki  * Check all pages in range, recoreded as memory resource, are isolated.
15850c0e6195SKAMEZAWA Hiroyuki  */
15860c0e6195SKAMEZAWA Hiroyuki static int
15870c0e6195SKAMEZAWA Hiroyuki check_pages_isolated_cb(unsigned long start_pfn, unsigned long nr_pages,
15880c0e6195SKAMEZAWA Hiroyuki 			void *data)
15890c0e6195SKAMEZAWA Hiroyuki {
15900c0e6195SKAMEZAWA Hiroyuki 	int ret;
15910c0e6195SKAMEZAWA Hiroyuki 	long offlined = *(long *)data;
1592b023f468SWen Congyang 	ret = test_pages_isolated(start_pfn, start_pfn + nr_pages, true);
15930c0e6195SKAMEZAWA Hiroyuki 	offlined = nr_pages;
15940c0e6195SKAMEZAWA Hiroyuki 	if (!ret)
15950c0e6195SKAMEZAWA Hiroyuki 		*(long *)data += offlined;
15960c0e6195SKAMEZAWA Hiroyuki 	return ret;
15970c0e6195SKAMEZAWA Hiroyuki }
15980c0e6195SKAMEZAWA Hiroyuki 
15990c0e6195SKAMEZAWA Hiroyuki static long
16000c0e6195SKAMEZAWA Hiroyuki check_pages_isolated(unsigned long start_pfn, unsigned long end_pfn)
16010c0e6195SKAMEZAWA Hiroyuki {
16020c0e6195SKAMEZAWA Hiroyuki 	long offlined = 0;
16030c0e6195SKAMEZAWA Hiroyuki 	int ret;
16040c0e6195SKAMEZAWA Hiroyuki 
1605908eedc6SKAMEZAWA Hiroyuki 	ret = walk_system_ram_range(start_pfn, end_pfn - start_pfn, &offlined,
16060c0e6195SKAMEZAWA Hiroyuki 			check_pages_isolated_cb);
16070c0e6195SKAMEZAWA Hiroyuki 	if (ret < 0)
16080c0e6195SKAMEZAWA Hiroyuki 		offlined = (long)ret;
16090c0e6195SKAMEZAWA Hiroyuki 	return offlined;
16100c0e6195SKAMEZAWA Hiroyuki }
16110c0e6195SKAMEZAWA Hiroyuki 
161209285af7SLai Jiangshan #ifdef CONFIG_MOVABLE_NODE
161379a4dcefSTang Chen /*
161479a4dcefSTang Chen  * When CONFIG_MOVABLE_NODE, we permit offlining of a node which doesn't have
161579a4dcefSTang Chen  * normal memory.
161679a4dcefSTang Chen  */
161709285af7SLai Jiangshan static bool can_offline_normal(struct zone *zone, unsigned long nr_pages)
161809285af7SLai Jiangshan {
161909285af7SLai Jiangshan 	return true;
162009285af7SLai Jiangshan }
162179a4dcefSTang Chen #else /* CONFIG_MOVABLE_NODE */
162274d42d8fSLai Jiangshan /* ensure the node has NORMAL memory if it is still online */
162374d42d8fSLai Jiangshan static bool can_offline_normal(struct zone *zone, unsigned long nr_pages)
162474d42d8fSLai Jiangshan {
162574d42d8fSLai Jiangshan 	struct pglist_data *pgdat = zone->zone_pgdat;
162674d42d8fSLai Jiangshan 	unsigned long present_pages = 0;
162774d42d8fSLai Jiangshan 	enum zone_type zt;
162874d42d8fSLai Jiangshan 
162974d42d8fSLai Jiangshan 	for (zt = 0; zt <= ZONE_NORMAL; zt++)
163074d42d8fSLai Jiangshan 		present_pages += pgdat->node_zones[zt].present_pages;
163174d42d8fSLai Jiangshan 
163274d42d8fSLai Jiangshan 	if (present_pages > nr_pages)
163374d42d8fSLai Jiangshan 		return true;
163474d42d8fSLai Jiangshan 
163574d42d8fSLai Jiangshan 	present_pages = 0;
163674d42d8fSLai Jiangshan 	for (; zt <= ZONE_MOVABLE; zt++)
163774d42d8fSLai Jiangshan 		present_pages += pgdat->node_zones[zt].present_pages;
163874d42d8fSLai Jiangshan 
163974d42d8fSLai Jiangshan 	/*
164074d42d8fSLai Jiangshan 	 * we can't offline the last normal memory until all
164174d42d8fSLai Jiangshan 	 * higher memory is offlined.
164274d42d8fSLai Jiangshan 	 */
164374d42d8fSLai Jiangshan 	return present_pages == 0;
164474d42d8fSLai Jiangshan }
164579a4dcefSTang Chen #endif /* CONFIG_MOVABLE_NODE */
164674d42d8fSLai Jiangshan 
1647c5320926STang Chen static int __init cmdline_parse_movable_node(char *p)
1648c5320926STang Chen {
1649c5320926STang Chen #ifdef CONFIG_MOVABLE_NODE
1650c5320926STang Chen 	/*
1651c5320926STang Chen 	 * Memory used by the kernel cannot be hot-removed because Linux
1652c5320926STang Chen 	 * cannot migrate the kernel pages. When memory hotplug is
1653c5320926STang Chen 	 * enabled, we should prevent memblock from allocating memory
1654c5320926STang Chen 	 * for the kernel.
1655c5320926STang Chen 	 *
1656c5320926STang Chen 	 * ACPI SRAT records all hotpluggable memory ranges. But before
1657c5320926STang Chen 	 * SRAT is parsed, we don't know about it.
1658c5320926STang Chen 	 *
1659c5320926STang Chen 	 * The kernel image is loaded into memory at very early time. We
1660c5320926STang Chen 	 * cannot prevent this anyway. So on NUMA system, we set any
1661c5320926STang Chen 	 * node the kernel resides in as un-hotpluggable.
1662c5320926STang Chen 	 *
1663c5320926STang Chen 	 * Since on modern servers, one node could have double-digit
1664c5320926STang Chen 	 * gigabytes memory, we can assume the memory around the kernel
1665c5320926STang Chen 	 * image is also un-hotpluggable. So before SRAT is parsed, just
1666c5320926STang Chen 	 * allocate memory near the kernel image to try the best to keep
1667c5320926STang Chen 	 * the kernel away from hotpluggable memory.
1668c5320926STang Chen 	 */
1669c5320926STang Chen 	memblock_set_bottom_up(true);
167055ac590cSTang Chen 	movable_node_enabled = true;
1671c5320926STang Chen #else
1672c5320926STang Chen 	pr_warn("movable_node option not supported\n");
1673c5320926STang Chen #endif
1674c5320926STang Chen 	return 0;
1675c5320926STang Chen }
1676c5320926STang Chen early_param("movable_node", cmdline_parse_movable_node);
1677c5320926STang Chen 
1678d9713679SLai Jiangshan /* check which state of node_states will be changed when offline memory */
1679d9713679SLai Jiangshan static void node_states_check_changes_offline(unsigned long nr_pages,
1680d9713679SLai Jiangshan 		struct zone *zone, struct memory_notify *arg)
1681d9713679SLai Jiangshan {
1682d9713679SLai Jiangshan 	struct pglist_data *pgdat = zone->zone_pgdat;
1683d9713679SLai Jiangshan 	unsigned long present_pages = 0;
1684d9713679SLai Jiangshan 	enum zone_type zt, zone_last = ZONE_NORMAL;
1685d9713679SLai Jiangshan 
1686d9713679SLai Jiangshan 	/*
16876715ddf9SLai Jiangshan 	 * If we have HIGHMEM or movable node, node_states[N_NORMAL_MEMORY]
16886715ddf9SLai Jiangshan 	 * contains nodes which have zones of 0...ZONE_NORMAL,
16896715ddf9SLai Jiangshan 	 * set zone_last to ZONE_NORMAL.
1690d9713679SLai Jiangshan 	 *
16916715ddf9SLai Jiangshan 	 * If we don't have HIGHMEM nor movable node,
16926715ddf9SLai Jiangshan 	 * node_states[N_NORMAL_MEMORY] contains nodes which have zones of
16936715ddf9SLai Jiangshan 	 * 0...ZONE_MOVABLE, set zone_last to ZONE_MOVABLE.
1694d9713679SLai Jiangshan 	 */
16956715ddf9SLai Jiangshan 	if (N_MEMORY == N_NORMAL_MEMORY)
1696d9713679SLai Jiangshan 		zone_last = ZONE_MOVABLE;
1697d9713679SLai Jiangshan 
1698d9713679SLai Jiangshan 	/*
1699d9713679SLai Jiangshan 	 * check whether node_states[N_NORMAL_MEMORY] will be changed.
1700d9713679SLai Jiangshan 	 * If the memory to be offline is in a zone of 0...zone_last,
1701d9713679SLai Jiangshan 	 * and it is the last present memory, 0...zone_last will
1702d9713679SLai Jiangshan 	 * become empty after offline , thus we can determind we will
1703d9713679SLai Jiangshan 	 * need to clear the node from node_states[N_NORMAL_MEMORY].
1704d9713679SLai Jiangshan 	 */
1705d9713679SLai Jiangshan 	for (zt = 0; zt <= zone_last; zt++)
1706d9713679SLai Jiangshan 		present_pages += pgdat->node_zones[zt].present_pages;
1707d9713679SLai Jiangshan 	if (zone_idx(zone) <= zone_last && nr_pages >= present_pages)
1708d9713679SLai Jiangshan 		arg->status_change_nid_normal = zone_to_nid(zone);
1709d9713679SLai Jiangshan 	else
1710d9713679SLai Jiangshan 		arg->status_change_nid_normal = -1;
1711d9713679SLai Jiangshan 
17126715ddf9SLai Jiangshan #ifdef CONFIG_HIGHMEM
17136715ddf9SLai Jiangshan 	/*
17146715ddf9SLai Jiangshan 	 * If we have movable node, node_states[N_HIGH_MEMORY]
17156715ddf9SLai Jiangshan 	 * contains nodes which have zones of 0...ZONE_HIGHMEM,
17166715ddf9SLai Jiangshan 	 * set zone_last to ZONE_HIGHMEM.
17176715ddf9SLai Jiangshan 	 *
17186715ddf9SLai Jiangshan 	 * If we don't have movable node, node_states[N_NORMAL_MEMORY]
17196715ddf9SLai Jiangshan 	 * contains nodes which have zones of 0...ZONE_MOVABLE,
17206715ddf9SLai Jiangshan 	 * set zone_last to ZONE_MOVABLE.
17216715ddf9SLai Jiangshan 	 */
17226715ddf9SLai Jiangshan 	zone_last = ZONE_HIGHMEM;
17236715ddf9SLai Jiangshan 	if (N_MEMORY == N_HIGH_MEMORY)
17246715ddf9SLai Jiangshan 		zone_last = ZONE_MOVABLE;
17256715ddf9SLai Jiangshan 
17266715ddf9SLai Jiangshan 	for (; zt <= zone_last; zt++)
17276715ddf9SLai Jiangshan 		present_pages += pgdat->node_zones[zt].present_pages;
17286715ddf9SLai Jiangshan 	if (zone_idx(zone) <= zone_last && nr_pages >= present_pages)
17296715ddf9SLai Jiangshan 		arg->status_change_nid_high = zone_to_nid(zone);
17306715ddf9SLai Jiangshan 	else
17316715ddf9SLai Jiangshan 		arg->status_change_nid_high = -1;
17326715ddf9SLai Jiangshan #else
17336715ddf9SLai Jiangshan 	arg->status_change_nid_high = arg->status_change_nid_normal;
17346715ddf9SLai Jiangshan #endif
17356715ddf9SLai Jiangshan 
1736d9713679SLai Jiangshan 	/*
1737d9713679SLai Jiangshan 	 * node_states[N_HIGH_MEMORY] contains nodes which have 0...ZONE_MOVABLE
1738d9713679SLai Jiangshan 	 */
1739d9713679SLai Jiangshan 	zone_last = ZONE_MOVABLE;
1740d9713679SLai Jiangshan 
1741d9713679SLai Jiangshan 	/*
1742d9713679SLai Jiangshan 	 * check whether node_states[N_HIGH_MEMORY] will be changed
1743d9713679SLai Jiangshan 	 * If we try to offline the last present @nr_pages from the node,
1744d9713679SLai Jiangshan 	 * we can determind we will need to clear the node from
1745d9713679SLai Jiangshan 	 * node_states[N_HIGH_MEMORY].
1746d9713679SLai Jiangshan 	 */
1747d9713679SLai Jiangshan 	for (; zt <= zone_last; zt++)
1748d9713679SLai Jiangshan 		present_pages += pgdat->node_zones[zt].present_pages;
1749d9713679SLai Jiangshan 	if (nr_pages >= present_pages)
1750d9713679SLai Jiangshan 		arg->status_change_nid = zone_to_nid(zone);
1751d9713679SLai Jiangshan 	else
1752d9713679SLai Jiangshan 		arg->status_change_nid = -1;
1753d9713679SLai Jiangshan }
1754d9713679SLai Jiangshan 
1755d9713679SLai Jiangshan static void node_states_clear_node(int node, struct memory_notify *arg)
1756d9713679SLai Jiangshan {
1757d9713679SLai Jiangshan 	if (arg->status_change_nid_normal >= 0)
1758d9713679SLai Jiangshan 		node_clear_state(node, N_NORMAL_MEMORY);
1759d9713679SLai Jiangshan 
17606715ddf9SLai Jiangshan 	if ((N_MEMORY != N_NORMAL_MEMORY) &&
17616715ddf9SLai Jiangshan 	    (arg->status_change_nid_high >= 0))
1762d9713679SLai Jiangshan 		node_clear_state(node, N_HIGH_MEMORY);
17636715ddf9SLai Jiangshan 
17646715ddf9SLai Jiangshan 	if ((N_MEMORY != N_HIGH_MEMORY) &&
17656715ddf9SLai Jiangshan 	    (arg->status_change_nid >= 0))
17666715ddf9SLai Jiangshan 		node_clear_state(node, N_MEMORY);
1767d9713679SLai Jiangshan }
1768d9713679SLai Jiangshan 
1769a16cee10SWen Congyang static int __ref __offline_pages(unsigned long start_pfn,
17700c0e6195SKAMEZAWA Hiroyuki 		  unsigned long end_pfn, unsigned long timeout)
17710c0e6195SKAMEZAWA Hiroyuki {
17720c0e6195SKAMEZAWA Hiroyuki 	unsigned long pfn, nr_pages, expire;
17730c0e6195SKAMEZAWA Hiroyuki 	long offlined_pages;
17747b78d335SYasunori Goto 	int ret, drain, retry_max, node;
1775d702909fSCody P Schafer 	unsigned long flags;
17760c0e6195SKAMEZAWA Hiroyuki 	struct zone *zone;
17777b78d335SYasunori Goto 	struct memory_notify arg;
17780c0e6195SKAMEZAWA Hiroyuki 
17790c0e6195SKAMEZAWA Hiroyuki 	/* at least, alignment against pageblock is necessary */
17800c0e6195SKAMEZAWA Hiroyuki 	if (!IS_ALIGNED(start_pfn, pageblock_nr_pages))
17810c0e6195SKAMEZAWA Hiroyuki 		return -EINVAL;
17820c0e6195SKAMEZAWA Hiroyuki 	if (!IS_ALIGNED(end_pfn, pageblock_nr_pages))
17830c0e6195SKAMEZAWA Hiroyuki 		return -EINVAL;
17840c0e6195SKAMEZAWA Hiroyuki 	/* This makes hotplug much easier...and readable.
17850c0e6195SKAMEZAWA Hiroyuki 	   we assume this for now. .*/
17860c0e6195SKAMEZAWA Hiroyuki 	if (!test_pages_in_a_zone(start_pfn, end_pfn))
17870c0e6195SKAMEZAWA Hiroyuki 		return -EINVAL;
17887b78d335SYasunori Goto 
17897b78d335SYasunori Goto 	zone = page_zone(pfn_to_page(start_pfn));
17907b78d335SYasunori Goto 	node = zone_to_nid(zone);
17917b78d335SYasunori Goto 	nr_pages = end_pfn - start_pfn;
17927b78d335SYasunori Goto 
179374d42d8fSLai Jiangshan 	if (zone_idx(zone) <= ZONE_NORMAL && !can_offline_normal(zone, nr_pages))
179430467e0bSDavid Rientjes 		return -EINVAL;
179574d42d8fSLai Jiangshan 
17960c0e6195SKAMEZAWA Hiroyuki 	/* set above range as isolated */
1797b023f468SWen Congyang 	ret = start_isolate_page_range(start_pfn, end_pfn,
1798b023f468SWen Congyang 				       MIGRATE_MOVABLE, true);
17990c0e6195SKAMEZAWA Hiroyuki 	if (ret)
180030467e0bSDavid Rientjes 		return ret;
18017b78d335SYasunori Goto 
18027b78d335SYasunori Goto 	arg.start_pfn = start_pfn;
18037b78d335SYasunori Goto 	arg.nr_pages = nr_pages;
1804d9713679SLai Jiangshan 	node_states_check_changes_offline(nr_pages, zone, &arg);
18057b78d335SYasunori Goto 
18067b78d335SYasunori Goto 	ret = memory_notify(MEM_GOING_OFFLINE, &arg);
18077b78d335SYasunori Goto 	ret = notifier_to_errno(ret);
18087b78d335SYasunori Goto 	if (ret)
18097b78d335SYasunori Goto 		goto failed_removal;
18107b78d335SYasunori Goto 
18110c0e6195SKAMEZAWA Hiroyuki 	pfn = start_pfn;
18120c0e6195SKAMEZAWA Hiroyuki 	expire = jiffies + timeout;
18130c0e6195SKAMEZAWA Hiroyuki 	drain = 0;
18140c0e6195SKAMEZAWA Hiroyuki 	retry_max = 5;
18150c0e6195SKAMEZAWA Hiroyuki repeat:
18160c0e6195SKAMEZAWA Hiroyuki 	/* start memory hot removal */
18170c0e6195SKAMEZAWA Hiroyuki 	ret = -EAGAIN;
18180c0e6195SKAMEZAWA Hiroyuki 	if (time_after(jiffies, expire))
18190c0e6195SKAMEZAWA Hiroyuki 		goto failed_removal;
18200c0e6195SKAMEZAWA Hiroyuki 	ret = -EINTR;
18210c0e6195SKAMEZAWA Hiroyuki 	if (signal_pending(current))
18220c0e6195SKAMEZAWA Hiroyuki 		goto failed_removal;
18230c0e6195SKAMEZAWA Hiroyuki 	ret = 0;
18240c0e6195SKAMEZAWA Hiroyuki 	if (drain) {
18250c0e6195SKAMEZAWA Hiroyuki 		lru_add_drain_all();
18260c0e6195SKAMEZAWA Hiroyuki 		cond_resched();
1827c0554329SVlastimil Babka 		drain_all_pages(zone);
18280c0e6195SKAMEZAWA Hiroyuki 	}
18290c0e6195SKAMEZAWA Hiroyuki 
1830c8721bbbSNaoya Horiguchi 	pfn = scan_movable_pages(start_pfn, end_pfn);
1831c8721bbbSNaoya Horiguchi 	if (pfn) { /* We have movable pages */
18320c0e6195SKAMEZAWA Hiroyuki 		ret = do_migrate_range(pfn, end_pfn);
18330c0e6195SKAMEZAWA Hiroyuki 		if (!ret) {
18340c0e6195SKAMEZAWA Hiroyuki 			drain = 1;
18350c0e6195SKAMEZAWA Hiroyuki 			goto repeat;
18360c0e6195SKAMEZAWA Hiroyuki 		} else {
18370c0e6195SKAMEZAWA Hiroyuki 			if (ret < 0)
18380c0e6195SKAMEZAWA Hiroyuki 				if (--retry_max == 0)
18390c0e6195SKAMEZAWA Hiroyuki 					goto failed_removal;
18400c0e6195SKAMEZAWA Hiroyuki 			yield();
18410c0e6195SKAMEZAWA Hiroyuki 			drain = 1;
18420c0e6195SKAMEZAWA Hiroyuki 			goto repeat;
18430c0e6195SKAMEZAWA Hiroyuki 		}
18440c0e6195SKAMEZAWA Hiroyuki 	}
1845b3834be5SAdam Buchbinder 	/* drain all zone's lru pagevec, this is asynchronous... */
18460c0e6195SKAMEZAWA Hiroyuki 	lru_add_drain_all();
18470c0e6195SKAMEZAWA Hiroyuki 	yield();
1848b3834be5SAdam Buchbinder 	/* drain pcp pages, this is synchronous. */
1849c0554329SVlastimil Babka 	drain_all_pages(zone);
1850c8721bbbSNaoya Horiguchi 	/*
1851c8721bbbSNaoya Horiguchi 	 * dissolve free hugepages in the memory block before doing offlining
1852c8721bbbSNaoya Horiguchi 	 * actually in order to make hugetlbfs's object counting consistent.
1853c8721bbbSNaoya Horiguchi 	 */
1854c8721bbbSNaoya Horiguchi 	dissolve_free_huge_pages(start_pfn, end_pfn);
18550c0e6195SKAMEZAWA Hiroyuki 	/* check again */
18560c0e6195SKAMEZAWA Hiroyuki 	offlined_pages = check_pages_isolated(start_pfn, end_pfn);
18570c0e6195SKAMEZAWA Hiroyuki 	if (offlined_pages < 0) {
18580c0e6195SKAMEZAWA Hiroyuki 		ret = -EBUSY;
18590c0e6195SKAMEZAWA Hiroyuki 		goto failed_removal;
18600c0e6195SKAMEZAWA Hiroyuki 	}
1861*e33e33b4SChen Yucong 	pr_info("Offlined Pages %ld\n", offlined_pages);
1862b3834be5SAdam Buchbinder 	/* Ok, all of our target is isolated.
18630c0e6195SKAMEZAWA Hiroyuki 	   We cannot do rollback at this point. */
18640c0e6195SKAMEZAWA Hiroyuki 	offline_isolated_pages(start_pfn, end_pfn);
1865dbc0e4ceSKAMEZAWA Hiroyuki 	/* reset pagetype flags and makes migrate type to be MOVABLE */
18660815f3d8SMichal Nazarewicz 	undo_isolate_page_range(start_pfn, end_pfn, MIGRATE_MOVABLE);
18670c0e6195SKAMEZAWA Hiroyuki 	/* removal success */
18683dcc0571SJiang Liu 	adjust_managed_page_count(pfn_to_page(start_pfn), -offlined_pages);
18690c0e6195SKAMEZAWA Hiroyuki 	zone->present_pages -= offlined_pages;
1870d702909fSCody P Schafer 
1871d702909fSCody P Schafer 	pgdat_resize_lock(zone->zone_pgdat, &flags);
18720c0e6195SKAMEZAWA Hiroyuki 	zone->zone_pgdat->node_present_pages -= offlined_pages;
1873d702909fSCody P Schafer 	pgdat_resize_unlock(zone->zone_pgdat, &flags);
18747b78d335SYasunori Goto 
18751b79acc9SKOSAKI Motohiro 	init_per_zone_wmark_min();
18761b79acc9SKOSAKI Motohiro 
18771e8537baSXishi Qiu 	if (!populated_zone(zone)) {
1878340175b7SJiang Liu 		zone_pcp_reset(zone);
18791e8537baSXishi Qiu 		mutex_lock(&zonelists_mutex);
18801e8537baSXishi Qiu 		build_all_zonelists(NULL, NULL);
18811e8537baSXishi Qiu 		mutex_unlock(&zonelists_mutex);
18821e8537baSXishi Qiu 	} else
18831e8537baSXishi Qiu 		zone_pcp_update(zone);
1884340175b7SJiang Liu 
1885d9713679SLai Jiangshan 	node_states_clear_node(node, &arg);
1886698b1b30SVlastimil Babka 	if (arg.status_change_nid >= 0) {
18878fe23e05SDavid Rientjes 		kswapd_stop(node);
1888698b1b30SVlastimil Babka 		kcompactd_stop(node);
1889698b1b30SVlastimil Babka 	}
1890bce7394aSMinchan Kim 
18910c0e6195SKAMEZAWA Hiroyuki 	vm_total_pages = nr_free_pagecache_pages();
18920c0e6195SKAMEZAWA Hiroyuki 	writeback_set_ratelimit();
18937b78d335SYasunori Goto 
18947b78d335SYasunori Goto 	memory_notify(MEM_OFFLINE, &arg);
18950c0e6195SKAMEZAWA Hiroyuki 	return 0;
18960c0e6195SKAMEZAWA Hiroyuki 
18970c0e6195SKAMEZAWA Hiroyuki failed_removal:
1898*e33e33b4SChen Yucong 	pr_debug("memory offlining [mem %#010llx-%#010llx] failed\n",
1899a62e2f4fSBjorn Helgaas 		 (unsigned long long) start_pfn << PAGE_SHIFT,
1900a62e2f4fSBjorn Helgaas 		 ((unsigned long long) end_pfn << PAGE_SHIFT) - 1);
19017b78d335SYasunori Goto 	memory_notify(MEM_CANCEL_OFFLINE, &arg);
19020c0e6195SKAMEZAWA Hiroyuki 	/* pushback to free area */
19030815f3d8SMichal Nazarewicz 	undo_isolate_page_range(start_pfn, end_pfn, MIGRATE_MOVABLE);
19040c0e6195SKAMEZAWA Hiroyuki 	return ret;
19050c0e6195SKAMEZAWA Hiroyuki }
190671088785SBadari Pulavarty 
190730467e0bSDavid Rientjes /* Must be protected by mem_hotplug_begin() */
1908a16cee10SWen Congyang int offline_pages(unsigned long start_pfn, unsigned long nr_pages)
1909a16cee10SWen Congyang {
1910a16cee10SWen Congyang 	return __offline_pages(start_pfn, start_pfn + nr_pages, 120 * HZ);
1911a16cee10SWen Congyang }
1912e2ff3940SRafael J. Wysocki #endif /* CONFIG_MEMORY_HOTREMOVE */
1913a16cee10SWen Congyang 
1914bbc76be6SWen Congyang /**
1915bbc76be6SWen Congyang  * walk_memory_range - walks through all mem sections in [start_pfn, end_pfn)
1916bbc76be6SWen Congyang  * @start_pfn: start pfn of the memory range
1917e05c4bbfSToshi Kani  * @end_pfn: end pfn of the memory range
1918bbc76be6SWen Congyang  * @arg: argument passed to func
1919bbc76be6SWen Congyang  * @func: callback for each memory section walked
1920bbc76be6SWen Congyang  *
1921bbc76be6SWen Congyang  * This function walks through all present mem sections in range
1922bbc76be6SWen Congyang  * [start_pfn, end_pfn) and call func on each mem section.
1923bbc76be6SWen Congyang  *
1924bbc76be6SWen Congyang  * Returns the return value of func.
1925bbc76be6SWen Congyang  */
1926e2ff3940SRafael J. Wysocki int walk_memory_range(unsigned long start_pfn, unsigned long end_pfn,
1927bbc76be6SWen Congyang 		void *arg, int (*func)(struct memory_block *, void *))
192871088785SBadari Pulavarty {
1929e90bdb7fSWen Congyang 	struct memory_block *mem = NULL;
1930e90bdb7fSWen Congyang 	struct mem_section *section;
1931e90bdb7fSWen Congyang 	unsigned long pfn, section_nr;
1932e90bdb7fSWen Congyang 	int ret;
193371088785SBadari Pulavarty 
1934e90bdb7fSWen Congyang 	for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) {
1935e90bdb7fSWen Congyang 		section_nr = pfn_to_section_nr(pfn);
1936e90bdb7fSWen Congyang 		if (!present_section_nr(section_nr))
1937e90bdb7fSWen Congyang 			continue;
1938e90bdb7fSWen Congyang 
1939e90bdb7fSWen Congyang 		section = __nr_to_section(section_nr);
1940e90bdb7fSWen Congyang 		/* same memblock? */
1941e90bdb7fSWen Congyang 		if (mem)
1942e90bdb7fSWen Congyang 			if ((section_nr >= mem->start_section_nr) &&
1943e90bdb7fSWen Congyang 			    (section_nr <= mem->end_section_nr))
1944e90bdb7fSWen Congyang 				continue;
1945e90bdb7fSWen Congyang 
1946e90bdb7fSWen Congyang 		mem = find_memory_block_hinted(section, mem);
1947e90bdb7fSWen Congyang 		if (!mem)
1948e90bdb7fSWen Congyang 			continue;
1949e90bdb7fSWen Congyang 
1950bbc76be6SWen Congyang 		ret = func(mem, arg);
1951e90bdb7fSWen Congyang 		if (ret) {
1952e90bdb7fSWen Congyang 			kobject_put(&mem->dev.kobj);
1953e90bdb7fSWen Congyang 			return ret;
1954e90bdb7fSWen Congyang 		}
1955e90bdb7fSWen Congyang 	}
1956e90bdb7fSWen Congyang 
1957e90bdb7fSWen Congyang 	if (mem)
1958e90bdb7fSWen Congyang 		kobject_put(&mem->dev.kobj);
1959e90bdb7fSWen Congyang 
1960bbc76be6SWen Congyang 	return 0;
1961bbc76be6SWen Congyang }
1962bbc76be6SWen Congyang 
1963e2ff3940SRafael J. Wysocki #ifdef CONFIG_MEMORY_HOTREMOVE
1964d6de9d53SXishi Qiu static int check_memblock_offlined_cb(struct memory_block *mem, void *arg)
1965bbc76be6SWen Congyang {
1966bbc76be6SWen Congyang 	int ret = !is_memblock_offlined(mem);
1967bbc76be6SWen Congyang 
1968349daa0fSRandy Dunlap 	if (unlikely(ret)) {
1969349daa0fSRandy Dunlap 		phys_addr_t beginpa, endpa;
1970349daa0fSRandy Dunlap 
1971349daa0fSRandy Dunlap 		beginpa = PFN_PHYS(section_nr_to_pfn(mem->start_section_nr));
1972349daa0fSRandy Dunlap 		endpa = PFN_PHYS(section_nr_to_pfn(mem->end_section_nr + 1))-1;
1973bbc76be6SWen Congyang 		pr_warn("removing memory fails, because memory "
1974349daa0fSRandy Dunlap 			"[%pa-%pa] is onlined\n",
1975349daa0fSRandy Dunlap 			&beginpa, &endpa);
1976349daa0fSRandy Dunlap 	}
1977bbc76be6SWen Congyang 
1978bbc76be6SWen Congyang 	return ret;
1979bbc76be6SWen Congyang }
1980bbc76be6SWen Congyang 
19810f1cfe9dSToshi Kani static int check_cpu_on_node(pg_data_t *pgdat)
198260a5a19eSTang Chen {
198360a5a19eSTang Chen 	int cpu;
198460a5a19eSTang Chen 
198560a5a19eSTang Chen 	for_each_present_cpu(cpu) {
198660a5a19eSTang Chen 		if (cpu_to_node(cpu) == pgdat->node_id)
198760a5a19eSTang Chen 			/*
198860a5a19eSTang Chen 			 * the cpu on this node isn't removed, and we can't
198960a5a19eSTang Chen 			 * offline this node.
199060a5a19eSTang Chen 			 */
199160a5a19eSTang Chen 			return -EBUSY;
199260a5a19eSTang Chen 	}
199360a5a19eSTang Chen 
199460a5a19eSTang Chen 	return 0;
199560a5a19eSTang Chen }
199660a5a19eSTang Chen 
19970f1cfe9dSToshi Kani static void unmap_cpu_on_node(pg_data_t *pgdat)
1998e13fe869SWen Congyang {
1999e13fe869SWen Congyang #ifdef CONFIG_ACPI_NUMA
2000e13fe869SWen Congyang 	int cpu;
2001e13fe869SWen Congyang 
2002e13fe869SWen Congyang 	for_each_possible_cpu(cpu)
2003e13fe869SWen Congyang 		if (cpu_to_node(cpu) == pgdat->node_id)
2004e13fe869SWen Congyang 			numa_clear_node(cpu);
2005e13fe869SWen Congyang #endif
2006e13fe869SWen Congyang }
2007e13fe869SWen Congyang 
20080f1cfe9dSToshi Kani static int check_and_unmap_cpu_on_node(pg_data_t *pgdat)
2009e13fe869SWen Congyang {
20100f1cfe9dSToshi Kani 	int ret;
2011e13fe869SWen Congyang 
20120f1cfe9dSToshi Kani 	ret = check_cpu_on_node(pgdat);
2013e13fe869SWen Congyang 	if (ret)
2014e13fe869SWen Congyang 		return ret;
2015e13fe869SWen Congyang 
2016e13fe869SWen Congyang 	/*
2017e13fe869SWen Congyang 	 * the node will be offlined when we come here, so we can clear
2018e13fe869SWen Congyang 	 * the cpu_to_node() now.
2019e13fe869SWen Congyang 	 */
2020e13fe869SWen Congyang 
20210f1cfe9dSToshi Kani 	unmap_cpu_on_node(pgdat);
2022e13fe869SWen Congyang 	return 0;
2023e13fe869SWen Congyang }
2024e13fe869SWen Congyang 
20250f1cfe9dSToshi Kani /**
20260f1cfe9dSToshi Kani  * try_offline_node
20270f1cfe9dSToshi Kani  *
20280f1cfe9dSToshi Kani  * Offline a node if all memory sections and cpus of the node are removed.
20290f1cfe9dSToshi Kani  *
20300f1cfe9dSToshi Kani  * NOTE: The caller must call lock_device_hotplug() to serialize hotplug
20310f1cfe9dSToshi Kani  * and online/offline operations before this call.
20320f1cfe9dSToshi Kani  */
203390b30cdcSWen Congyang void try_offline_node(int nid)
203460a5a19eSTang Chen {
2035d822b86aSWen Congyang 	pg_data_t *pgdat = NODE_DATA(nid);
2036d822b86aSWen Congyang 	unsigned long start_pfn = pgdat->node_start_pfn;
2037d822b86aSWen Congyang 	unsigned long end_pfn = start_pfn + pgdat->node_spanned_pages;
203860a5a19eSTang Chen 	unsigned long pfn;
2039d822b86aSWen Congyang 	int i;
204060a5a19eSTang Chen 
204160a5a19eSTang Chen 	for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) {
204260a5a19eSTang Chen 		unsigned long section_nr = pfn_to_section_nr(pfn);
204360a5a19eSTang Chen 
204460a5a19eSTang Chen 		if (!present_section_nr(section_nr))
204560a5a19eSTang Chen 			continue;
204660a5a19eSTang Chen 
204760a5a19eSTang Chen 		if (pfn_to_nid(pfn) != nid)
204860a5a19eSTang Chen 			continue;
204960a5a19eSTang Chen 
205060a5a19eSTang Chen 		/*
205160a5a19eSTang Chen 		 * some memory sections of this node are not removed, and we
205260a5a19eSTang Chen 		 * can't offline node now.
205360a5a19eSTang Chen 		 */
205460a5a19eSTang Chen 		return;
205560a5a19eSTang Chen 	}
205660a5a19eSTang Chen 
20570f1cfe9dSToshi Kani 	if (check_and_unmap_cpu_on_node(pgdat))
205860a5a19eSTang Chen 		return;
205960a5a19eSTang Chen 
206060a5a19eSTang Chen 	/*
206160a5a19eSTang Chen 	 * all memory/cpu of this node are removed, we can offline this
206260a5a19eSTang Chen 	 * node now.
206360a5a19eSTang Chen 	 */
206460a5a19eSTang Chen 	node_set_offline(nid);
206560a5a19eSTang Chen 	unregister_one_node(nid);
2066d822b86aSWen Congyang 
2067d822b86aSWen Congyang 	/* free waittable in each zone */
2068d822b86aSWen Congyang 	for (i = 0; i < MAX_NR_ZONES; i++) {
2069d822b86aSWen Congyang 		struct zone *zone = pgdat->node_zones + i;
2070d822b86aSWen Congyang 
2071ca4b3f30SJianguo Wu 		/*
2072ca4b3f30SJianguo Wu 		 * wait_table may be allocated from boot memory,
2073ca4b3f30SJianguo Wu 		 * here only free if it's allocated by vmalloc.
2074ca4b3f30SJianguo Wu 		 */
207585bd8399SGu Zheng 		if (is_vmalloc_addr(zone->wait_table)) {
2076d822b86aSWen Congyang 			vfree(zone->wait_table);
207785bd8399SGu Zheng 			zone->wait_table = NULL;
207885bd8399SGu Zheng 		}
2079d822b86aSWen Congyang 	}
208060a5a19eSTang Chen }
208190b30cdcSWen Congyang EXPORT_SYMBOL(try_offline_node);
208260a5a19eSTang Chen 
20830f1cfe9dSToshi Kani /**
20840f1cfe9dSToshi Kani  * remove_memory
20850f1cfe9dSToshi Kani  *
20860f1cfe9dSToshi Kani  * NOTE: The caller must call lock_device_hotplug() to serialize hotplug
20870f1cfe9dSToshi Kani  * and online/offline operations before this call, as required by
20880f1cfe9dSToshi Kani  * try_offline_node().
20890f1cfe9dSToshi Kani  */
2090242831ebSRafael J. Wysocki void __ref remove_memory(int nid, u64 start, u64 size)
2091bbc76be6SWen Congyang {
2092242831ebSRafael J. Wysocki 	int ret;
2093993c1aadSWen Congyang 
209427356f54SToshi Kani 	BUG_ON(check_hotplug_memory_range(start, size));
209527356f54SToshi Kani 
2096bfc8c901SVladimir Davydov 	mem_hotplug_begin();
20976677e3eaSYasuaki Ishimatsu 
20986677e3eaSYasuaki Ishimatsu 	/*
2099242831ebSRafael J. Wysocki 	 * All memory blocks must be offlined before removing memory.  Check
2100242831ebSRafael J. Wysocki 	 * whether all memory blocks in question are offline and trigger a BUG()
2101242831ebSRafael J. Wysocki 	 * if this is not the case.
21026677e3eaSYasuaki Ishimatsu 	 */
2103242831ebSRafael J. Wysocki 	ret = walk_memory_range(PFN_DOWN(start), PFN_UP(start + size - 1), NULL,
2104d6de9d53SXishi Qiu 				check_memblock_offlined_cb);
2105bfc8c901SVladimir Davydov 	if (ret)
2106242831ebSRafael J. Wysocki 		BUG();
21076677e3eaSYasuaki Ishimatsu 
210846c66c4bSYasuaki Ishimatsu 	/* remove memmap entry */
210946c66c4bSYasuaki Ishimatsu 	firmware_map_remove(start, start + size, "System RAM");
2110f9126ab9SXishi Qiu 	memblock_free(start, size);
2111f9126ab9SXishi Qiu 	memblock_remove(start, size);
211246c66c4bSYasuaki Ishimatsu 
211324d335caSWen Congyang 	arch_remove_memory(start, size);
211424d335caSWen Congyang 
211560a5a19eSTang Chen 	try_offline_node(nid);
211660a5a19eSTang Chen 
2117bfc8c901SVladimir Davydov 	mem_hotplug_done();
211871088785SBadari Pulavarty }
211971088785SBadari Pulavarty EXPORT_SYMBOL_GPL(remove_memory);
2120aba6efc4SRafael J. Wysocki #endif /* CONFIG_MEMORY_HOTREMOVE */
2121