xref: /openbmc/linux/mm/memory_hotplug.c (revision c246a213f5bad687c6c2cea27d7265eaf8f6f5d7)
13947be19SDave Hansen /*
23947be19SDave Hansen  *  linux/mm/memory_hotplug.c
33947be19SDave Hansen  *
43947be19SDave Hansen  *  Copyright (C)
53947be19SDave Hansen  */
63947be19SDave Hansen 
73947be19SDave Hansen #include <linux/stddef.h>
83947be19SDave Hansen #include <linux/mm.h>
9174cd4b1SIngo Molnar #include <linux/sched/signal.h>
103947be19SDave Hansen #include <linux/swap.h>
113947be19SDave Hansen #include <linux/interrupt.h>
123947be19SDave Hansen #include <linux/pagemap.h>
133947be19SDave Hansen #include <linux/compiler.h>
14b95f1b31SPaul Gortmaker #include <linux/export.h>
153947be19SDave Hansen #include <linux/pagevec.h>
162d1d43f6SChandra Seetharaman #include <linux/writeback.h>
173947be19SDave Hansen #include <linux/slab.h>
183947be19SDave Hansen #include <linux/sysctl.h>
193947be19SDave Hansen #include <linux/cpu.h>
203947be19SDave Hansen #include <linux/memory.h>
214b94ffdcSDan Williams #include <linux/memremap.h>
223947be19SDave Hansen #include <linux/memory_hotplug.h>
233947be19SDave Hansen #include <linux/highmem.h>
243947be19SDave Hansen #include <linux/vmalloc.h>
250a547039SKAMEZAWA Hiroyuki #include <linux/ioport.h>
260c0e6195SKAMEZAWA Hiroyuki #include <linux/delay.h>
270c0e6195SKAMEZAWA Hiroyuki #include <linux/migrate.h>
280c0e6195SKAMEZAWA Hiroyuki #include <linux/page-isolation.h>
2971088785SBadari Pulavarty #include <linux/pfn.h>
306ad696d2SAndi Kleen #include <linux/suspend.h>
316d9c285aSKOSAKI Motohiro #include <linux/mm_inline.h>
32d96ae530Sakpm@linux-foundation.org #include <linux/firmware-map.h>
3360a5a19eSTang Chen #include <linux/stop_machine.h>
34c8721bbbSNaoya Horiguchi #include <linux/hugetlb.h>
35c5320926STang Chen #include <linux/memblock.h>
36f784a3f1STang Chen #include <linux/bootmem.h>
37698b1b30SVlastimil Babka #include <linux/compaction.h>
383947be19SDave Hansen 
393947be19SDave Hansen #include <asm/tlbflush.h>
403947be19SDave Hansen 
411e5ad9a3SAdrian Bunk #include "internal.h"
421e5ad9a3SAdrian Bunk 
439d0ad8caSDaniel Kiper /*
449d0ad8caSDaniel Kiper  * online_page_callback contains pointer to current page onlining function.
459d0ad8caSDaniel Kiper  * Initially it is generic_online_page(). If it is required it could be
469d0ad8caSDaniel Kiper  * changed by calling set_online_page_callback() for callback registration
479d0ad8caSDaniel Kiper  * and restore_online_page_callback() for generic callback restore.
489d0ad8caSDaniel Kiper  */
499d0ad8caSDaniel Kiper 
509d0ad8caSDaniel Kiper static void generic_online_page(struct page *page);
519d0ad8caSDaniel Kiper 
529d0ad8caSDaniel Kiper static online_page_callback_t online_page_callback = generic_online_page;
53bfc8c901SVladimir Davydov static DEFINE_MUTEX(online_page_callback_lock);
549d0ad8caSDaniel Kiper 
55bfc8c901SVladimir Davydov /* The same as the cpu_hotplug lock, but for memory hotplug. */
56bfc8c901SVladimir Davydov static struct {
57bfc8c901SVladimir Davydov 	struct task_struct *active_writer;
58bfc8c901SVladimir Davydov 	struct mutex lock; /* Synchronizes accesses to refcount, */
59bfc8c901SVladimir Davydov 	/*
60bfc8c901SVladimir Davydov 	 * Also blocks the new readers during
61bfc8c901SVladimir Davydov 	 * an ongoing mem hotplug operation.
62bfc8c901SVladimir Davydov 	 */
63bfc8c901SVladimir Davydov 	int refcount;
6420d6c96bSKOSAKI Motohiro 
65bfc8c901SVladimir Davydov #ifdef CONFIG_DEBUG_LOCK_ALLOC
66bfc8c901SVladimir Davydov 	struct lockdep_map dep_map;
67bfc8c901SVladimir Davydov #endif
68bfc8c901SVladimir Davydov } mem_hotplug = {
69bfc8c901SVladimir Davydov 	.active_writer = NULL,
70bfc8c901SVladimir Davydov 	.lock = __MUTEX_INITIALIZER(mem_hotplug.lock),
71bfc8c901SVladimir Davydov 	.refcount = 0,
72bfc8c901SVladimir Davydov #ifdef CONFIG_DEBUG_LOCK_ALLOC
73bfc8c901SVladimir Davydov 	.dep_map = {.name = "mem_hotplug.lock" },
74bfc8c901SVladimir Davydov #endif
75bfc8c901SVladimir Davydov };
76bfc8c901SVladimir Davydov 
77bfc8c901SVladimir Davydov /* Lockdep annotations for get/put_online_mems() and mem_hotplug_begin/end() */
78bfc8c901SVladimir Davydov #define memhp_lock_acquire_read() lock_map_acquire_read(&mem_hotplug.dep_map)
79bfc8c901SVladimir Davydov #define memhp_lock_acquire()      lock_map_acquire(&mem_hotplug.dep_map)
80bfc8c901SVladimir Davydov #define memhp_lock_release()      lock_map_release(&mem_hotplug.dep_map)
81bfc8c901SVladimir Davydov 
828604d9e5SVitaly Kuznetsov #ifndef CONFIG_MEMORY_HOTPLUG_DEFAULT_ONLINE
8331bc3858SVitaly Kuznetsov bool memhp_auto_online;
848604d9e5SVitaly Kuznetsov #else
858604d9e5SVitaly Kuznetsov bool memhp_auto_online = true;
868604d9e5SVitaly Kuznetsov #endif
8731bc3858SVitaly Kuznetsov EXPORT_SYMBOL_GPL(memhp_auto_online);
8831bc3858SVitaly Kuznetsov 
8986dd995dSVitaly Kuznetsov static int __init setup_memhp_default_state(char *str)
9086dd995dSVitaly Kuznetsov {
9186dd995dSVitaly Kuznetsov 	if (!strcmp(str, "online"))
9286dd995dSVitaly Kuznetsov 		memhp_auto_online = true;
9386dd995dSVitaly Kuznetsov 	else if (!strcmp(str, "offline"))
9486dd995dSVitaly Kuznetsov 		memhp_auto_online = false;
9586dd995dSVitaly Kuznetsov 
9686dd995dSVitaly Kuznetsov 	return 1;
9786dd995dSVitaly Kuznetsov }
9886dd995dSVitaly Kuznetsov __setup("memhp_default_state=", setup_memhp_default_state);
9986dd995dSVitaly Kuznetsov 
100bfc8c901SVladimir Davydov void get_online_mems(void)
10120d6c96bSKOSAKI Motohiro {
102bfc8c901SVladimir Davydov 	might_sleep();
103bfc8c901SVladimir Davydov 	if (mem_hotplug.active_writer == current)
104bfc8c901SVladimir Davydov 		return;
105bfc8c901SVladimir Davydov 	memhp_lock_acquire_read();
106bfc8c901SVladimir Davydov 	mutex_lock(&mem_hotplug.lock);
107bfc8c901SVladimir Davydov 	mem_hotplug.refcount++;
108bfc8c901SVladimir Davydov 	mutex_unlock(&mem_hotplug.lock);
109bfc8c901SVladimir Davydov 
11020d6c96bSKOSAKI Motohiro }
11120d6c96bSKOSAKI Motohiro 
112bfc8c901SVladimir Davydov void put_online_mems(void)
11320d6c96bSKOSAKI Motohiro {
114bfc8c901SVladimir Davydov 	if (mem_hotplug.active_writer == current)
115bfc8c901SVladimir Davydov 		return;
116bfc8c901SVladimir Davydov 	mutex_lock(&mem_hotplug.lock);
117bfc8c901SVladimir Davydov 
118bfc8c901SVladimir Davydov 	if (WARN_ON(!mem_hotplug.refcount))
119bfc8c901SVladimir Davydov 		mem_hotplug.refcount++; /* try to fix things up */
120bfc8c901SVladimir Davydov 
121bfc8c901SVladimir Davydov 	if (!--mem_hotplug.refcount && unlikely(mem_hotplug.active_writer))
122bfc8c901SVladimir Davydov 		wake_up_process(mem_hotplug.active_writer);
123bfc8c901SVladimir Davydov 	mutex_unlock(&mem_hotplug.lock);
124bfc8c901SVladimir Davydov 	memhp_lock_release();
125bfc8c901SVladimir Davydov 
12620d6c96bSKOSAKI Motohiro }
12720d6c96bSKOSAKI Motohiro 
12855adc1d0SHeiko Carstens /* Serializes write accesses to mem_hotplug.active_writer. */
12955adc1d0SHeiko Carstens static DEFINE_MUTEX(memory_add_remove_lock);
13055adc1d0SHeiko Carstens 
13130467e0bSDavid Rientjes void mem_hotplug_begin(void)
132bfc8c901SVladimir Davydov {
13355adc1d0SHeiko Carstens 	mutex_lock(&memory_add_remove_lock);
1343fc21924SDan Williams 
135bfc8c901SVladimir Davydov 	mem_hotplug.active_writer = current;
136bfc8c901SVladimir Davydov 
137bfc8c901SVladimir Davydov 	memhp_lock_acquire();
138bfc8c901SVladimir Davydov 	for (;;) {
139bfc8c901SVladimir Davydov 		mutex_lock(&mem_hotplug.lock);
140bfc8c901SVladimir Davydov 		if (likely(!mem_hotplug.refcount))
141bfc8c901SVladimir Davydov 			break;
142bfc8c901SVladimir Davydov 		__set_current_state(TASK_UNINTERRUPTIBLE);
143bfc8c901SVladimir Davydov 		mutex_unlock(&mem_hotplug.lock);
144bfc8c901SVladimir Davydov 		schedule();
145bfc8c901SVladimir Davydov 	}
146bfc8c901SVladimir Davydov }
147bfc8c901SVladimir Davydov 
14830467e0bSDavid Rientjes void mem_hotplug_done(void)
149bfc8c901SVladimir Davydov {
150bfc8c901SVladimir Davydov 	mem_hotplug.active_writer = NULL;
151bfc8c901SVladimir Davydov 	mutex_unlock(&mem_hotplug.lock);
152bfc8c901SVladimir Davydov 	memhp_lock_release();
15355adc1d0SHeiko Carstens 	mutex_unlock(&memory_add_remove_lock);
154bfc8c901SVladimir Davydov }
15520d6c96bSKOSAKI Motohiro 
15645e0b78bSKeith Mannthey /* add this memory to iomem resource */
15745e0b78bSKeith Mannthey static struct resource *register_memory_resource(u64 start, u64 size)
15845e0b78bSKeith Mannthey {
15945e0b78bSKeith Mannthey 	struct resource *res;
16045e0b78bSKeith Mannthey 	res = kzalloc(sizeof(struct resource), GFP_KERNEL);
1616f754ba4SVitaly Kuznetsov 	if (!res)
1626f754ba4SVitaly Kuznetsov 		return ERR_PTR(-ENOMEM);
16345e0b78bSKeith Mannthey 
16445e0b78bSKeith Mannthey 	res->name = "System RAM";
16545e0b78bSKeith Mannthey 	res->start = start;
16645e0b78bSKeith Mannthey 	res->end = start + size - 1;
167782b8664SToshi Kani 	res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
16845e0b78bSKeith Mannthey 	if (request_resource(&iomem_resource, res) < 0) {
1694996eed8SToshi Kani 		pr_debug("System RAM resource %pR cannot be added\n", res);
17045e0b78bSKeith Mannthey 		kfree(res);
1716f754ba4SVitaly Kuznetsov 		return ERR_PTR(-EEXIST);
17245e0b78bSKeith Mannthey 	}
17345e0b78bSKeith Mannthey 	return res;
17445e0b78bSKeith Mannthey }
17545e0b78bSKeith Mannthey 
17645e0b78bSKeith Mannthey static void release_memory_resource(struct resource *res)
17745e0b78bSKeith Mannthey {
17845e0b78bSKeith Mannthey 	if (!res)
17945e0b78bSKeith Mannthey 		return;
18045e0b78bSKeith Mannthey 	release_resource(res);
18145e0b78bSKeith Mannthey 	kfree(res);
18245e0b78bSKeith Mannthey 	return;
18345e0b78bSKeith Mannthey }
18445e0b78bSKeith Mannthey 
18553947027SKeith Mannthey #ifdef CONFIG_MEMORY_HOTPLUG_SPARSE
18646723bfaSYasuaki Ishimatsu void get_page_bootmem(unsigned long info,  struct page *page,
1875f24ce5fSAndrea Arcangeli 		      unsigned long type)
18804753278SYasunori Goto {
189ddffe98dSYasuaki Ishimatsu 	page->freelist = (void *)type;
19004753278SYasunori Goto 	SetPagePrivate(page);
19104753278SYasunori Goto 	set_page_private(page, info);
192fe896d18SJoonsoo Kim 	page_ref_inc(page);
19304753278SYasunori Goto }
19404753278SYasunori Goto 
195170a5a7eSJiang Liu void put_page_bootmem(struct page *page)
19604753278SYasunori Goto {
1975f24ce5fSAndrea Arcangeli 	unsigned long type;
19804753278SYasunori Goto 
199ddffe98dSYasuaki Ishimatsu 	type = (unsigned long) page->freelist;
2005f24ce5fSAndrea Arcangeli 	BUG_ON(type < MEMORY_HOTPLUG_MIN_BOOTMEM_TYPE ||
2015f24ce5fSAndrea Arcangeli 	       type > MEMORY_HOTPLUG_MAX_BOOTMEM_TYPE);
20204753278SYasunori Goto 
203fe896d18SJoonsoo Kim 	if (page_ref_dec_return(page) == 1) {
204ddffe98dSYasuaki Ishimatsu 		page->freelist = NULL;
20504753278SYasunori Goto 		ClearPagePrivate(page);
20604753278SYasunori Goto 		set_page_private(page, 0);
2075f24ce5fSAndrea Arcangeli 		INIT_LIST_HEAD(&page->lru);
208170a5a7eSJiang Liu 		free_reserved_page(page);
20904753278SYasunori Goto 	}
21004753278SYasunori Goto }
21104753278SYasunori Goto 
21246723bfaSYasuaki Ishimatsu #ifdef CONFIG_HAVE_BOOTMEM_INFO_NODE
21346723bfaSYasuaki Ishimatsu #ifndef CONFIG_SPARSEMEM_VMEMMAP
214d92bc318SAdrian Bunk static void register_page_bootmem_info_section(unsigned long start_pfn)
21504753278SYasunori Goto {
21604753278SYasunori Goto 	unsigned long *usemap, mapsize, section_nr, i;
21704753278SYasunori Goto 	struct mem_section *ms;
21804753278SYasunori Goto 	struct page *page, *memmap;
21904753278SYasunori Goto 
22004753278SYasunori Goto 	section_nr = pfn_to_section_nr(start_pfn);
22104753278SYasunori Goto 	ms = __nr_to_section(section_nr);
22204753278SYasunori Goto 
22304753278SYasunori Goto 	/* Get section's memmap address */
22404753278SYasunori Goto 	memmap = sparse_decode_mem_map(ms->section_mem_map, section_nr);
22504753278SYasunori Goto 
22604753278SYasunori Goto 	/*
22704753278SYasunori Goto 	 * Get page for the memmap's phys address
22804753278SYasunori Goto 	 * XXX: need more consideration for sparse_vmemmap...
22904753278SYasunori Goto 	 */
23004753278SYasunori Goto 	page = virt_to_page(memmap);
23104753278SYasunori Goto 	mapsize = sizeof(struct page) * PAGES_PER_SECTION;
23204753278SYasunori Goto 	mapsize = PAGE_ALIGN(mapsize) >> PAGE_SHIFT;
23304753278SYasunori Goto 
23404753278SYasunori Goto 	/* remember memmap's page */
23504753278SYasunori Goto 	for (i = 0; i < mapsize; i++, page++)
23604753278SYasunori Goto 		get_page_bootmem(section_nr, page, SECTION_INFO);
23704753278SYasunori Goto 
23804753278SYasunori Goto 	usemap = __nr_to_section(section_nr)->pageblock_flags;
23904753278SYasunori Goto 	page = virt_to_page(usemap);
24004753278SYasunori Goto 
24104753278SYasunori Goto 	mapsize = PAGE_ALIGN(usemap_size()) >> PAGE_SHIFT;
24204753278SYasunori Goto 
24304753278SYasunori Goto 	for (i = 0; i < mapsize; i++, page++)
244af370fb8SYasunori Goto 		get_page_bootmem(section_nr, page, MIX_SECTION_INFO);
24504753278SYasunori Goto 
24604753278SYasunori Goto }
24746723bfaSYasuaki Ishimatsu #else /* CONFIG_SPARSEMEM_VMEMMAP */
24846723bfaSYasuaki Ishimatsu static void register_page_bootmem_info_section(unsigned long start_pfn)
24946723bfaSYasuaki Ishimatsu {
25046723bfaSYasuaki Ishimatsu 	unsigned long *usemap, mapsize, section_nr, i;
25146723bfaSYasuaki Ishimatsu 	struct mem_section *ms;
25246723bfaSYasuaki Ishimatsu 	struct page *page, *memmap;
25346723bfaSYasuaki Ishimatsu 
25446723bfaSYasuaki Ishimatsu 	if (!pfn_valid(start_pfn))
25546723bfaSYasuaki Ishimatsu 		return;
25646723bfaSYasuaki Ishimatsu 
25746723bfaSYasuaki Ishimatsu 	section_nr = pfn_to_section_nr(start_pfn);
25846723bfaSYasuaki Ishimatsu 	ms = __nr_to_section(section_nr);
25946723bfaSYasuaki Ishimatsu 
26046723bfaSYasuaki Ishimatsu 	memmap = sparse_decode_mem_map(ms->section_mem_map, section_nr);
26146723bfaSYasuaki Ishimatsu 
26246723bfaSYasuaki Ishimatsu 	register_page_bootmem_memmap(section_nr, memmap, PAGES_PER_SECTION);
26346723bfaSYasuaki Ishimatsu 
26446723bfaSYasuaki Ishimatsu 	usemap = __nr_to_section(section_nr)->pageblock_flags;
26546723bfaSYasuaki Ishimatsu 	page = virt_to_page(usemap);
26646723bfaSYasuaki Ishimatsu 
26746723bfaSYasuaki Ishimatsu 	mapsize = PAGE_ALIGN(usemap_size()) >> PAGE_SHIFT;
26846723bfaSYasuaki Ishimatsu 
26946723bfaSYasuaki Ishimatsu 	for (i = 0; i < mapsize; i++, page++)
27046723bfaSYasuaki Ishimatsu 		get_page_bootmem(section_nr, page, MIX_SECTION_INFO);
27146723bfaSYasuaki Ishimatsu }
27246723bfaSYasuaki Ishimatsu #endif /* !CONFIG_SPARSEMEM_VMEMMAP */
27304753278SYasunori Goto 
2747ded384aSLinus Torvalds void __init register_page_bootmem_info_node(struct pglist_data *pgdat)
27504753278SYasunori Goto {
27604753278SYasunori Goto 	unsigned long i, pfn, end_pfn, nr_pages;
27704753278SYasunori Goto 	int node = pgdat->node_id;
27804753278SYasunori Goto 	struct page *page;
27904753278SYasunori Goto 
28004753278SYasunori Goto 	nr_pages = PAGE_ALIGN(sizeof(struct pglist_data)) >> PAGE_SHIFT;
28104753278SYasunori Goto 	page = virt_to_page(pgdat);
28204753278SYasunori Goto 
28304753278SYasunori Goto 	for (i = 0; i < nr_pages; i++, page++)
28404753278SYasunori Goto 		get_page_bootmem(node, page, NODE_INFO);
28504753278SYasunori Goto 
28604753278SYasunori Goto 	pfn = pgdat->node_start_pfn;
287c1f19495SCody P Schafer 	end_pfn = pgdat_end_pfn(pgdat);
28804753278SYasunori Goto 
2897e9f5eb0STang Chen 	/* register section info */
290f14851afSqiuxishi 	for (; pfn < end_pfn; pfn += PAGES_PER_SECTION) {
291f14851afSqiuxishi 		/*
292f14851afSqiuxishi 		 * Some platforms can assign the same pfn to multiple nodes - on
293f14851afSqiuxishi 		 * node0 as well as nodeN.  To avoid registering a pfn against
294f14851afSqiuxishi 		 * multiple nodes we check that this pfn does not already
2957e9f5eb0STang Chen 		 * reside in some other nodes.
296f14851afSqiuxishi 		 */
297f65e91dfSYang Shi 		if (pfn_valid(pfn) && (early_pfn_to_nid(pfn) == node))
29804753278SYasunori Goto 			register_page_bootmem_info_section(pfn);
299f14851afSqiuxishi 	}
30004753278SYasunori Goto }
30146723bfaSYasuaki Ishimatsu #endif /* CONFIG_HAVE_BOOTMEM_INFO_NODE */
30204753278SYasunori Goto 
303f2765404SFabian Frederick static void __meminit grow_zone_span(struct zone *zone, unsigned long start_pfn,
30476cdd58eSHeiko Carstens 				     unsigned long end_pfn)
30576cdd58eSHeiko Carstens {
30676cdd58eSHeiko Carstens 	unsigned long old_zone_end_pfn;
30776cdd58eSHeiko Carstens 
30876cdd58eSHeiko Carstens 	zone_span_writelock(zone);
30976cdd58eSHeiko Carstens 
310c33bc315SXishi Qiu 	old_zone_end_pfn = zone_end_pfn(zone);
3118080fc03SXishi Qiu 	if (zone_is_empty(zone) || start_pfn < zone->zone_start_pfn)
31276cdd58eSHeiko Carstens 		zone->zone_start_pfn = start_pfn;
31376cdd58eSHeiko Carstens 
31476cdd58eSHeiko Carstens 	zone->spanned_pages = max(old_zone_end_pfn, end_pfn) -
31576cdd58eSHeiko Carstens 				zone->zone_start_pfn;
31676cdd58eSHeiko Carstens 
31776cdd58eSHeiko Carstens 	zone_span_writeunlock(zone);
31876cdd58eSHeiko Carstens }
31976cdd58eSHeiko Carstens 
320511c2abaSLai Jiangshan static void resize_zone(struct zone *zone, unsigned long start_pfn,
321511c2abaSLai Jiangshan 		unsigned long end_pfn)
322511c2abaSLai Jiangshan {
323511c2abaSLai Jiangshan 	zone_span_writelock(zone);
324511c2abaSLai Jiangshan 
325e455a9b9SLai Jiangshan 	if (end_pfn - start_pfn) {
326511c2abaSLai Jiangshan 		zone->zone_start_pfn = start_pfn;
327511c2abaSLai Jiangshan 		zone->spanned_pages = end_pfn - start_pfn;
328e455a9b9SLai Jiangshan 	} else {
329e455a9b9SLai Jiangshan 		/*
330e455a9b9SLai Jiangshan 		 * make it consist as free_area_init_core(),
331e455a9b9SLai Jiangshan 		 * if spanned_pages = 0, then keep start_pfn = 0
332e455a9b9SLai Jiangshan 		 */
333e455a9b9SLai Jiangshan 		zone->zone_start_pfn = 0;
334e455a9b9SLai Jiangshan 		zone->spanned_pages = 0;
335e455a9b9SLai Jiangshan 	}
336511c2abaSLai Jiangshan 
337511c2abaSLai Jiangshan 	zone_span_writeunlock(zone);
338511c2abaSLai Jiangshan }
339511c2abaSLai Jiangshan 
340511c2abaSLai Jiangshan static void fix_zone_id(struct zone *zone, unsigned long start_pfn,
341511c2abaSLai Jiangshan 		unsigned long end_pfn)
342511c2abaSLai Jiangshan {
343511c2abaSLai Jiangshan 	enum zone_type zid = zone_idx(zone);
344511c2abaSLai Jiangshan 	int nid = zone->zone_pgdat->node_id;
345511c2abaSLai Jiangshan 	unsigned long pfn;
346511c2abaSLai Jiangshan 
347511c2abaSLai Jiangshan 	for (pfn = start_pfn; pfn < end_pfn; pfn++)
348511c2abaSLai Jiangshan 		set_page_links(pfn_to_page(pfn), zid, nid, pfn);
349511c2abaSLai Jiangshan }
350511c2abaSLai Jiangshan 
351dc0bbf3bSMichal Hocko static void __ref ensure_zone_is_initialized(struct zone *zone,
352f6bbb78eSCody P Schafer 			unsigned long start_pfn, unsigned long num_pages)
353f6bbb78eSCody P Schafer {
354f6bbb78eSCody P Schafer 	if (!zone_is_initialized(zone))
355dc0bbf3bSMichal Hocko 		init_currently_empty_zone(zone, start_pfn, num_pages);
356f6bbb78eSCody P Schafer }
357f6bbb78eSCody P Schafer 
358e455a9b9SLai Jiangshan static int __meminit move_pfn_range_left(struct zone *z1, struct zone *z2,
359511c2abaSLai Jiangshan 		unsigned long start_pfn, unsigned long end_pfn)
360511c2abaSLai Jiangshan {
361511c2abaSLai Jiangshan 	unsigned long flags;
362e455a9b9SLai Jiangshan 	unsigned long z1_start_pfn;
363e455a9b9SLai Jiangshan 
364dc0bbf3bSMichal Hocko 	ensure_zone_is_initialized(z1, start_pfn, end_pfn - start_pfn);
365511c2abaSLai Jiangshan 
366511c2abaSLai Jiangshan 	pgdat_resize_lock(z1->zone_pgdat, &flags);
367511c2abaSLai Jiangshan 
368511c2abaSLai Jiangshan 	/* can't move pfns which are higher than @z2 */
369108bcc96SCody P Schafer 	if (end_pfn > zone_end_pfn(z2))
370511c2abaSLai Jiangshan 		goto out_fail;
371834405c3SJiang Liu 	/* the move out part must be at the left most of @z2 */
372511c2abaSLai Jiangshan 	if (start_pfn > z2->zone_start_pfn)
373511c2abaSLai Jiangshan 		goto out_fail;
374511c2abaSLai Jiangshan 	/* must included/overlap */
375511c2abaSLai Jiangshan 	if (end_pfn <= z2->zone_start_pfn)
376511c2abaSLai Jiangshan 		goto out_fail;
377511c2abaSLai Jiangshan 
378e455a9b9SLai Jiangshan 	/* use start_pfn for z1's start_pfn if z1 is empty */
3798080fc03SXishi Qiu 	if (!zone_is_empty(z1))
380e455a9b9SLai Jiangshan 		z1_start_pfn = z1->zone_start_pfn;
381e455a9b9SLai Jiangshan 	else
382e455a9b9SLai Jiangshan 		z1_start_pfn = start_pfn;
383e455a9b9SLai Jiangshan 
384e455a9b9SLai Jiangshan 	resize_zone(z1, z1_start_pfn, end_pfn);
385108bcc96SCody P Schafer 	resize_zone(z2, end_pfn, zone_end_pfn(z2));
386511c2abaSLai Jiangshan 
387511c2abaSLai Jiangshan 	pgdat_resize_unlock(z1->zone_pgdat, &flags);
388511c2abaSLai Jiangshan 
389511c2abaSLai Jiangshan 	fix_zone_id(z1, start_pfn, end_pfn);
390511c2abaSLai Jiangshan 
391511c2abaSLai Jiangshan 	return 0;
392511c2abaSLai Jiangshan out_fail:
393511c2abaSLai Jiangshan 	pgdat_resize_unlock(z1->zone_pgdat, &flags);
394511c2abaSLai Jiangshan 	return -1;
395511c2abaSLai Jiangshan }
396511c2abaSLai Jiangshan 
397e455a9b9SLai Jiangshan static int __meminit move_pfn_range_right(struct zone *z1, struct zone *z2,
398511c2abaSLai Jiangshan 		unsigned long start_pfn, unsigned long end_pfn)
399511c2abaSLai Jiangshan {
400511c2abaSLai Jiangshan 	unsigned long flags;
401e455a9b9SLai Jiangshan 	unsigned long z2_end_pfn;
402e455a9b9SLai Jiangshan 
403dc0bbf3bSMichal Hocko 	ensure_zone_is_initialized(z2, start_pfn, end_pfn - start_pfn);
404511c2abaSLai Jiangshan 
405511c2abaSLai Jiangshan 	pgdat_resize_lock(z1->zone_pgdat, &flags);
406511c2abaSLai Jiangshan 
407511c2abaSLai Jiangshan 	/* can't move pfns which are lower than @z1 */
408511c2abaSLai Jiangshan 	if (z1->zone_start_pfn > start_pfn)
409511c2abaSLai Jiangshan 		goto out_fail;
410511c2abaSLai Jiangshan 	/* the move out part mast at the right most of @z1 */
411108bcc96SCody P Schafer 	if (zone_end_pfn(z1) >  end_pfn)
412511c2abaSLai Jiangshan 		goto out_fail;
413511c2abaSLai Jiangshan 	/* must included/overlap */
414108bcc96SCody P Schafer 	if (start_pfn >= zone_end_pfn(z1))
415511c2abaSLai Jiangshan 		goto out_fail;
416511c2abaSLai Jiangshan 
417e455a9b9SLai Jiangshan 	/* use end_pfn for z2's end_pfn if z2 is empty */
4188080fc03SXishi Qiu 	if (!zone_is_empty(z2))
419108bcc96SCody P Schafer 		z2_end_pfn = zone_end_pfn(z2);
420e455a9b9SLai Jiangshan 	else
421e455a9b9SLai Jiangshan 		z2_end_pfn = end_pfn;
422e455a9b9SLai Jiangshan 
423511c2abaSLai Jiangshan 	resize_zone(z1, z1->zone_start_pfn, start_pfn);
424e455a9b9SLai Jiangshan 	resize_zone(z2, start_pfn, z2_end_pfn);
425511c2abaSLai Jiangshan 
426511c2abaSLai Jiangshan 	pgdat_resize_unlock(z1->zone_pgdat, &flags);
427511c2abaSLai Jiangshan 
428511c2abaSLai Jiangshan 	fix_zone_id(z2, start_pfn, end_pfn);
429511c2abaSLai Jiangshan 
430511c2abaSLai Jiangshan 	return 0;
431511c2abaSLai Jiangshan out_fail:
432511c2abaSLai Jiangshan 	pgdat_resize_unlock(z1->zone_pgdat, &flags);
433511c2abaSLai Jiangshan 	return -1;
434511c2abaSLai Jiangshan }
435511c2abaSLai Jiangshan 
436f2765404SFabian Frederick static void __meminit grow_pgdat_span(struct pglist_data *pgdat, unsigned long start_pfn,
43776cdd58eSHeiko Carstens 				      unsigned long end_pfn)
43876cdd58eSHeiko Carstens {
43983285c72SXishi Qiu 	unsigned long old_pgdat_end_pfn = pgdat_end_pfn(pgdat);
44076cdd58eSHeiko Carstens 
441712cd386STang Chen 	if (!pgdat->node_spanned_pages || start_pfn < pgdat->node_start_pfn)
44276cdd58eSHeiko Carstens 		pgdat->node_start_pfn = start_pfn;
44376cdd58eSHeiko Carstens 
44476cdd58eSHeiko Carstens 	pgdat->node_spanned_pages = max(old_pgdat_end_pfn, end_pfn) -
44576cdd58eSHeiko Carstens 					pgdat->node_start_pfn;
44676cdd58eSHeiko Carstens }
44776cdd58eSHeiko Carstens 
44831168481SAl Viro static int __meminit __add_zone(struct zone *zone, unsigned long phys_start_pfn)
4493947be19SDave Hansen {
4503947be19SDave Hansen 	struct pglist_data *pgdat = zone->zone_pgdat;
4513947be19SDave Hansen 	int nr_pages = PAGES_PER_SECTION;
4523947be19SDave Hansen 	int nid = pgdat->node_id;
4533947be19SDave Hansen 	int zone_type;
454e298ff75SMel Gorman 	unsigned long flags, pfn;
45576cdd58eSHeiko Carstens 
45664dd1b29SCody P Schafer 	zone_type = zone - pgdat->node_zones;
457dc0bbf3bSMichal Hocko 	ensure_zone_is_initialized(zone, phys_start_pfn, nr_pages);
45864dd1b29SCody P Schafer 
45976cdd58eSHeiko Carstens 	pgdat_resize_lock(zone->zone_pgdat, &flags);
46076cdd58eSHeiko Carstens 	grow_zone_span(zone, phys_start_pfn, phys_start_pfn + nr_pages);
46176cdd58eSHeiko Carstens 	grow_pgdat_span(zone->zone_pgdat, phys_start_pfn,
46276cdd58eSHeiko Carstens 			phys_start_pfn + nr_pages);
46376cdd58eSHeiko Carstens 	pgdat_resize_unlock(zone->zone_pgdat, &flags);
464a2f3aa02SDave Hansen 	memmap_init_zone(nr_pages, nid, zone_type,
465a2f3aa02SDave Hansen 			 phys_start_pfn, MEMMAP_HOTPLUG);
466e298ff75SMel Gorman 
467e298ff75SMel Gorman 	/* online_page_range is called later and expects pages reserved */
468e298ff75SMel Gorman 	for (pfn = phys_start_pfn; pfn < phys_start_pfn + nr_pages; pfn++) {
469e298ff75SMel Gorman 		if (!pfn_valid(pfn))
470e298ff75SMel Gorman 			continue;
471e298ff75SMel Gorman 
472e298ff75SMel Gorman 		SetPageReserved(pfn_to_page(pfn));
473e298ff75SMel Gorman 	}
474718127ccSYasunori Goto 	return 0;
4753947be19SDave Hansen }
4763947be19SDave Hansen 
477f1dd2cd1SMichal Hocko static int __meminit __add_section(int nid, unsigned long phys_start_pfn,
478f1dd2cd1SMichal Hocko 		bool want_memblock)
4793947be19SDave Hansen {
4803947be19SDave Hansen 	int ret;
481f1dd2cd1SMichal Hocko 	int i;
4823947be19SDave Hansen 
483ebd15302SKAMEZAWA Hiroyuki 	if (pfn_valid(phys_start_pfn))
484ebd15302SKAMEZAWA Hiroyuki 		return -EEXIST;
485ebd15302SKAMEZAWA Hiroyuki 
486f1dd2cd1SMichal Hocko 	ret = sparse_add_one_section(NODE_DATA(nid), phys_start_pfn);
4873947be19SDave Hansen 	if (ret < 0)
4883947be19SDave Hansen 		return ret;
4893947be19SDave Hansen 
490f1dd2cd1SMichal Hocko 	/*
491f1dd2cd1SMichal Hocko 	 * Make all the pages reserved so that nobody will stumble over half
492f1dd2cd1SMichal Hocko 	 * initialized state.
493f1dd2cd1SMichal Hocko 	 * FIXME: We also have to associate it with a node because pfn_to_node
494f1dd2cd1SMichal Hocko 	 * relies on having page with the proper node.
495f1dd2cd1SMichal Hocko 	 */
496f1dd2cd1SMichal Hocko 	for (i = 0; i < PAGES_PER_SECTION; i++) {
497f1dd2cd1SMichal Hocko 		unsigned long pfn = phys_start_pfn + i;
498f1dd2cd1SMichal Hocko 		struct page *page;
499f1dd2cd1SMichal Hocko 		if (!pfn_valid(pfn))
500f1dd2cd1SMichal Hocko 			continue;
501718127ccSYasunori Goto 
502f1dd2cd1SMichal Hocko 		page = pfn_to_page(pfn);
503f1dd2cd1SMichal Hocko 		set_page_node(page, nid);
504f1dd2cd1SMichal Hocko 		SetPageReserved(page);
505f1dd2cd1SMichal Hocko 	}
506718127ccSYasunori Goto 
5071b862aecSMichal Hocko 	if (!want_memblock)
5081b862aecSMichal Hocko 		return 0;
5091b862aecSMichal Hocko 
510c04fc586SGary Hade 	return register_new_memory(nid, __pfn_to_section(phys_start_pfn));
5113947be19SDave Hansen }
5123947be19SDave Hansen 
5134edd7cefSDavid Rientjes /*
5144edd7cefSDavid Rientjes  * Reasonably generic function for adding memory.  It is
5154edd7cefSDavid Rientjes  * expected that archs that support memory hotplug will
5164edd7cefSDavid Rientjes  * call this function after deciding the zone to which to
5174edd7cefSDavid Rientjes  * add the new pages.
5184edd7cefSDavid Rientjes  */
519f1dd2cd1SMichal Hocko int __ref __add_pages(int nid, unsigned long phys_start_pfn,
5201b862aecSMichal Hocko 			unsigned long nr_pages, bool want_memblock)
5214edd7cefSDavid Rientjes {
5224edd7cefSDavid Rientjes 	unsigned long i;
5234edd7cefSDavid Rientjes 	int err = 0;
5244edd7cefSDavid Rientjes 	int start_sec, end_sec;
5254b94ffdcSDan Williams 	struct vmem_altmap *altmap;
5264b94ffdcSDan Williams 
5274edd7cefSDavid Rientjes 	/* during initialize mem_map, align hot-added range to section */
5284edd7cefSDavid Rientjes 	start_sec = pfn_to_section_nr(phys_start_pfn);
5294edd7cefSDavid Rientjes 	end_sec = pfn_to_section_nr(phys_start_pfn + nr_pages - 1);
5304edd7cefSDavid Rientjes 
5314b94ffdcSDan Williams 	altmap = to_vmem_altmap((unsigned long) pfn_to_page(phys_start_pfn));
5324b94ffdcSDan Williams 	if (altmap) {
5334b94ffdcSDan Williams 		/*
5344b94ffdcSDan Williams 		 * Validate altmap is within bounds of the total request
5354b94ffdcSDan Williams 		 */
5364b94ffdcSDan Williams 		if (altmap->base_pfn != phys_start_pfn
5374b94ffdcSDan Williams 				|| vmem_altmap_offset(altmap) > nr_pages) {
5384b94ffdcSDan Williams 			pr_warn_once("memory add fail, invalid altmap\n");
5397cf91a98SJoonsoo Kim 			err = -EINVAL;
5407cf91a98SJoonsoo Kim 			goto out;
5414b94ffdcSDan Williams 		}
5424b94ffdcSDan Williams 		altmap->alloc = 0;
5434b94ffdcSDan Williams 	}
5444b94ffdcSDan Williams 
5454edd7cefSDavid Rientjes 	for (i = start_sec; i <= end_sec; i++) {
546f1dd2cd1SMichal Hocko 		err = __add_section(nid, section_nr_to_pfn(i), want_memblock);
5474edd7cefSDavid Rientjes 
5484edd7cefSDavid Rientjes 		/*
5494edd7cefSDavid Rientjes 		 * EEXIST is finally dealt with by ioresource collision
5504edd7cefSDavid Rientjes 		 * check. see add_memory() => register_memory_resource()
5514edd7cefSDavid Rientjes 		 * Warning will be printed if there is collision.
5524edd7cefSDavid Rientjes 		 */
5534edd7cefSDavid Rientjes 		if (err && (err != -EEXIST))
5544edd7cefSDavid Rientjes 			break;
5554edd7cefSDavid Rientjes 		err = 0;
5564edd7cefSDavid Rientjes 	}
557c435a390SZhu Guihua 	vmemmap_populate_print_last();
5587cf91a98SJoonsoo Kim out:
5594edd7cefSDavid Rientjes 	return err;
5604edd7cefSDavid Rientjes }
5614edd7cefSDavid Rientjes EXPORT_SYMBOL_GPL(__add_pages);
5624edd7cefSDavid Rientjes 
5634edd7cefSDavid Rientjes #ifdef CONFIG_MEMORY_HOTREMOVE
564815121d2SYasuaki Ishimatsu /* find the smallest valid pfn in the range [start_pfn, end_pfn) */
565815121d2SYasuaki Ishimatsu static int find_smallest_section_pfn(int nid, struct zone *zone,
566815121d2SYasuaki Ishimatsu 				     unsigned long start_pfn,
567815121d2SYasuaki Ishimatsu 				     unsigned long end_pfn)
568815121d2SYasuaki Ishimatsu {
569815121d2SYasuaki Ishimatsu 	struct mem_section *ms;
570815121d2SYasuaki Ishimatsu 
571815121d2SYasuaki Ishimatsu 	for (; start_pfn < end_pfn; start_pfn += PAGES_PER_SECTION) {
572815121d2SYasuaki Ishimatsu 		ms = __pfn_to_section(start_pfn);
573815121d2SYasuaki Ishimatsu 
574815121d2SYasuaki Ishimatsu 		if (unlikely(!valid_section(ms)))
575815121d2SYasuaki Ishimatsu 			continue;
576815121d2SYasuaki Ishimatsu 
577815121d2SYasuaki Ishimatsu 		if (unlikely(pfn_to_nid(start_pfn) != nid))
578815121d2SYasuaki Ishimatsu 			continue;
579815121d2SYasuaki Ishimatsu 
580815121d2SYasuaki Ishimatsu 		if (zone && zone != page_zone(pfn_to_page(start_pfn)))
581815121d2SYasuaki Ishimatsu 			continue;
582815121d2SYasuaki Ishimatsu 
583815121d2SYasuaki Ishimatsu 		return start_pfn;
584815121d2SYasuaki Ishimatsu 	}
585815121d2SYasuaki Ishimatsu 
586815121d2SYasuaki Ishimatsu 	return 0;
587815121d2SYasuaki Ishimatsu }
588815121d2SYasuaki Ishimatsu 
589815121d2SYasuaki Ishimatsu /* find the biggest valid pfn in the range [start_pfn, end_pfn). */
590815121d2SYasuaki Ishimatsu static int find_biggest_section_pfn(int nid, struct zone *zone,
591815121d2SYasuaki Ishimatsu 				    unsigned long start_pfn,
592815121d2SYasuaki Ishimatsu 				    unsigned long end_pfn)
593815121d2SYasuaki Ishimatsu {
594815121d2SYasuaki Ishimatsu 	struct mem_section *ms;
595815121d2SYasuaki Ishimatsu 	unsigned long pfn;
596815121d2SYasuaki Ishimatsu 
597815121d2SYasuaki Ishimatsu 	/* pfn is the end pfn of a memory section. */
598815121d2SYasuaki Ishimatsu 	pfn = end_pfn - 1;
599815121d2SYasuaki Ishimatsu 	for (; pfn >= start_pfn; pfn -= PAGES_PER_SECTION) {
600815121d2SYasuaki Ishimatsu 		ms = __pfn_to_section(pfn);
601815121d2SYasuaki Ishimatsu 
602815121d2SYasuaki Ishimatsu 		if (unlikely(!valid_section(ms)))
603815121d2SYasuaki Ishimatsu 			continue;
604815121d2SYasuaki Ishimatsu 
605815121d2SYasuaki Ishimatsu 		if (unlikely(pfn_to_nid(pfn) != nid))
606815121d2SYasuaki Ishimatsu 			continue;
607815121d2SYasuaki Ishimatsu 
608815121d2SYasuaki Ishimatsu 		if (zone && zone != page_zone(pfn_to_page(pfn)))
609815121d2SYasuaki Ishimatsu 			continue;
610815121d2SYasuaki Ishimatsu 
611815121d2SYasuaki Ishimatsu 		return pfn;
612815121d2SYasuaki Ishimatsu 	}
613815121d2SYasuaki Ishimatsu 
614815121d2SYasuaki Ishimatsu 	return 0;
615815121d2SYasuaki Ishimatsu }
616815121d2SYasuaki Ishimatsu 
617815121d2SYasuaki Ishimatsu static void shrink_zone_span(struct zone *zone, unsigned long start_pfn,
618815121d2SYasuaki Ishimatsu 			     unsigned long end_pfn)
619815121d2SYasuaki Ishimatsu {
620815121d2SYasuaki Ishimatsu 	unsigned long zone_start_pfn = zone->zone_start_pfn;
621c33bc315SXishi Qiu 	unsigned long z = zone_end_pfn(zone); /* zone_end_pfn namespace clash */
622c33bc315SXishi Qiu 	unsigned long zone_end_pfn = z;
623815121d2SYasuaki Ishimatsu 	unsigned long pfn;
624815121d2SYasuaki Ishimatsu 	struct mem_section *ms;
625815121d2SYasuaki Ishimatsu 	int nid = zone_to_nid(zone);
626815121d2SYasuaki Ishimatsu 
627815121d2SYasuaki Ishimatsu 	zone_span_writelock(zone);
628815121d2SYasuaki Ishimatsu 	if (zone_start_pfn == start_pfn) {
629815121d2SYasuaki Ishimatsu 		/*
630815121d2SYasuaki Ishimatsu 		 * If the section is smallest section in the zone, it need
631815121d2SYasuaki Ishimatsu 		 * shrink zone->zone_start_pfn and zone->zone_spanned_pages.
632815121d2SYasuaki Ishimatsu 		 * In this case, we find second smallest valid mem_section
633815121d2SYasuaki Ishimatsu 		 * for shrinking zone.
634815121d2SYasuaki Ishimatsu 		 */
635815121d2SYasuaki Ishimatsu 		pfn = find_smallest_section_pfn(nid, zone, end_pfn,
636815121d2SYasuaki Ishimatsu 						zone_end_pfn);
637815121d2SYasuaki Ishimatsu 		if (pfn) {
638815121d2SYasuaki Ishimatsu 			zone->zone_start_pfn = pfn;
639815121d2SYasuaki Ishimatsu 			zone->spanned_pages = zone_end_pfn - pfn;
640815121d2SYasuaki Ishimatsu 		}
641815121d2SYasuaki Ishimatsu 	} else if (zone_end_pfn == end_pfn) {
642815121d2SYasuaki Ishimatsu 		/*
643815121d2SYasuaki Ishimatsu 		 * If the section is biggest section in the zone, it need
644815121d2SYasuaki Ishimatsu 		 * shrink zone->spanned_pages.
645815121d2SYasuaki Ishimatsu 		 * In this case, we find second biggest valid mem_section for
646815121d2SYasuaki Ishimatsu 		 * shrinking zone.
647815121d2SYasuaki Ishimatsu 		 */
648815121d2SYasuaki Ishimatsu 		pfn = find_biggest_section_pfn(nid, zone, zone_start_pfn,
649815121d2SYasuaki Ishimatsu 					       start_pfn);
650815121d2SYasuaki Ishimatsu 		if (pfn)
651815121d2SYasuaki Ishimatsu 			zone->spanned_pages = pfn - zone_start_pfn + 1;
652815121d2SYasuaki Ishimatsu 	}
653815121d2SYasuaki Ishimatsu 
654815121d2SYasuaki Ishimatsu 	/*
655815121d2SYasuaki Ishimatsu 	 * The section is not biggest or smallest mem_section in the zone, it
656815121d2SYasuaki Ishimatsu 	 * only creates a hole in the zone. So in this case, we need not
657815121d2SYasuaki Ishimatsu 	 * change the zone. But perhaps, the zone has only hole data. Thus
658815121d2SYasuaki Ishimatsu 	 * it check the zone has only hole or not.
659815121d2SYasuaki Ishimatsu 	 */
660815121d2SYasuaki Ishimatsu 	pfn = zone_start_pfn;
661815121d2SYasuaki Ishimatsu 	for (; pfn < zone_end_pfn; pfn += PAGES_PER_SECTION) {
662815121d2SYasuaki Ishimatsu 		ms = __pfn_to_section(pfn);
663815121d2SYasuaki Ishimatsu 
664815121d2SYasuaki Ishimatsu 		if (unlikely(!valid_section(ms)))
665815121d2SYasuaki Ishimatsu 			continue;
666815121d2SYasuaki Ishimatsu 
667815121d2SYasuaki Ishimatsu 		if (page_zone(pfn_to_page(pfn)) != zone)
668815121d2SYasuaki Ishimatsu 			continue;
669815121d2SYasuaki Ishimatsu 
670815121d2SYasuaki Ishimatsu 		 /* If the section is current section, it continues the loop */
671815121d2SYasuaki Ishimatsu 		if (start_pfn == pfn)
672815121d2SYasuaki Ishimatsu 			continue;
673815121d2SYasuaki Ishimatsu 
674815121d2SYasuaki Ishimatsu 		/* If we find valid section, we have nothing to do */
675815121d2SYasuaki Ishimatsu 		zone_span_writeunlock(zone);
676815121d2SYasuaki Ishimatsu 		return;
677815121d2SYasuaki Ishimatsu 	}
678815121d2SYasuaki Ishimatsu 
679815121d2SYasuaki Ishimatsu 	/* The zone has no valid section */
680815121d2SYasuaki Ishimatsu 	zone->zone_start_pfn = 0;
681815121d2SYasuaki Ishimatsu 	zone->spanned_pages = 0;
682815121d2SYasuaki Ishimatsu 	zone_span_writeunlock(zone);
683815121d2SYasuaki Ishimatsu }
684815121d2SYasuaki Ishimatsu 
685815121d2SYasuaki Ishimatsu static void shrink_pgdat_span(struct pglist_data *pgdat,
686815121d2SYasuaki Ishimatsu 			      unsigned long start_pfn, unsigned long end_pfn)
687815121d2SYasuaki Ishimatsu {
688815121d2SYasuaki Ishimatsu 	unsigned long pgdat_start_pfn = pgdat->node_start_pfn;
68983285c72SXishi Qiu 	unsigned long p = pgdat_end_pfn(pgdat); /* pgdat_end_pfn namespace clash */
69083285c72SXishi Qiu 	unsigned long pgdat_end_pfn = p;
691815121d2SYasuaki Ishimatsu 	unsigned long pfn;
692815121d2SYasuaki Ishimatsu 	struct mem_section *ms;
693815121d2SYasuaki Ishimatsu 	int nid = pgdat->node_id;
694815121d2SYasuaki Ishimatsu 
695815121d2SYasuaki Ishimatsu 	if (pgdat_start_pfn == start_pfn) {
696815121d2SYasuaki Ishimatsu 		/*
697815121d2SYasuaki Ishimatsu 		 * If the section is smallest section in the pgdat, it need
698815121d2SYasuaki Ishimatsu 		 * shrink pgdat->node_start_pfn and pgdat->node_spanned_pages.
699815121d2SYasuaki Ishimatsu 		 * In this case, we find second smallest valid mem_section
700815121d2SYasuaki Ishimatsu 		 * for shrinking zone.
701815121d2SYasuaki Ishimatsu 		 */
702815121d2SYasuaki Ishimatsu 		pfn = find_smallest_section_pfn(nid, NULL, end_pfn,
703815121d2SYasuaki Ishimatsu 						pgdat_end_pfn);
704815121d2SYasuaki Ishimatsu 		if (pfn) {
705815121d2SYasuaki Ishimatsu 			pgdat->node_start_pfn = pfn;
706815121d2SYasuaki Ishimatsu 			pgdat->node_spanned_pages = pgdat_end_pfn - pfn;
707815121d2SYasuaki Ishimatsu 		}
708815121d2SYasuaki Ishimatsu 	} else if (pgdat_end_pfn == end_pfn) {
709815121d2SYasuaki Ishimatsu 		/*
710815121d2SYasuaki Ishimatsu 		 * If the section is biggest section in the pgdat, it need
711815121d2SYasuaki Ishimatsu 		 * shrink pgdat->node_spanned_pages.
712815121d2SYasuaki Ishimatsu 		 * In this case, we find second biggest valid mem_section for
713815121d2SYasuaki Ishimatsu 		 * shrinking zone.
714815121d2SYasuaki Ishimatsu 		 */
715815121d2SYasuaki Ishimatsu 		pfn = find_biggest_section_pfn(nid, NULL, pgdat_start_pfn,
716815121d2SYasuaki Ishimatsu 					       start_pfn);
717815121d2SYasuaki Ishimatsu 		if (pfn)
718815121d2SYasuaki Ishimatsu 			pgdat->node_spanned_pages = pfn - pgdat_start_pfn + 1;
719815121d2SYasuaki Ishimatsu 	}
720815121d2SYasuaki Ishimatsu 
721815121d2SYasuaki Ishimatsu 	/*
722815121d2SYasuaki Ishimatsu 	 * If the section is not biggest or smallest mem_section in the pgdat,
723815121d2SYasuaki Ishimatsu 	 * it only creates a hole in the pgdat. So in this case, we need not
724815121d2SYasuaki Ishimatsu 	 * change the pgdat.
725815121d2SYasuaki Ishimatsu 	 * But perhaps, the pgdat has only hole data. Thus it check the pgdat
726815121d2SYasuaki Ishimatsu 	 * has only hole or not.
727815121d2SYasuaki Ishimatsu 	 */
728815121d2SYasuaki Ishimatsu 	pfn = pgdat_start_pfn;
729815121d2SYasuaki Ishimatsu 	for (; pfn < pgdat_end_pfn; pfn += PAGES_PER_SECTION) {
730815121d2SYasuaki Ishimatsu 		ms = __pfn_to_section(pfn);
731815121d2SYasuaki Ishimatsu 
732815121d2SYasuaki Ishimatsu 		if (unlikely(!valid_section(ms)))
733815121d2SYasuaki Ishimatsu 			continue;
734815121d2SYasuaki Ishimatsu 
735815121d2SYasuaki Ishimatsu 		if (pfn_to_nid(pfn) != nid)
736815121d2SYasuaki Ishimatsu 			continue;
737815121d2SYasuaki Ishimatsu 
738815121d2SYasuaki Ishimatsu 		 /* If the section is current section, it continues the loop */
739815121d2SYasuaki Ishimatsu 		if (start_pfn == pfn)
740815121d2SYasuaki Ishimatsu 			continue;
741815121d2SYasuaki Ishimatsu 
742815121d2SYasuaki Ishimatsu 		/* If we find valid section, we have nothing to do */
743815121d2SYasuaki Ishimatsu 		return;
744815121d2SYasuaki Ishimatsu 	}
745815121d2SYasuaki Ishimatsu 
746815121d2SYasuaki Ishimatsu 	/* The pgdat has no valid section */
747815121d2SYasuaki Ishimatsu 	pgdat->node_start_pfn = 0;
748815121d2SYasuaki Ishimatsu 	pgdat->node_spanned_pages = 0;
749815121d2SYasuaki Ishimatsu }
750815121d2SYasuaki Ishimatsu 
751815121d2SYasuaki Ishimatsu static void __remove_zone(struct zone *zone, unsigned long start_pfn)
752815121d2SYasuaki Ishimatsu {
753815121d2SYasuaki Ishimatsu 	struct pglist_data *pgdat = zone->zone_pgdat;
754815121d2SYasuaki Ishimatsu 	int nr_pages = PAGES_PER_SECTION;
755815121d2SYasuaki Ishimatsu 	int zone_type;
756815121d2SYasuaki Ishimatsu 	unsigned long flags;
757815121d2SYasuaki Ishimatsu 
758815121d2SYasuaki Ishimatsu 	zone_type = zone - pgdat->node_zones;
759815121d2SYasuaki Ishimatsu 
760815121d2SYasuaki Ishimatsu 	pgdat_resize_lock(zone->zone_pgdat, &flags);
761815121d2SYasuaki Ishimatsu 	shrink_zone_span(zone, start_pfn, start_pfn + nr_pages);
762815121d2SYasuaki Ishimatsu 	shrink_pgdat_span(pgdat, start_pfn, start_pfn + nr_pages);
763815121d2SYasuaki Ishimatsu 	pgdat_resize_unlock(zone->zone_pgdat, &flags);
764815121d2SYasuaki Ishimatsu }
765815121d2SYasuaki Ishimatsu 
7664b94ffdcSDan Williams static int __remove_section(struct zone *zone, struct mem_section *ms,
7674b94ffdcSDan Williams 		unsigned long map_offset)
768ea01ea93SBadari Pulavarty {
769815121d2SYasuaki Ishimatsu 	unsigned long start_pfn;
770815121d2SYasuaki Ishimatsu 	int scn_nr;
771ea01ea93SBadari Pulavarty 	int ret = -EINVAL;
772ea01ea93SBadari Pulavarty 
773ea01ea93SBadari Pulavarty 	if (!valid_section(ms))
774ea01ea93SBadari Pulavarty 		return ret;
775ea01ea93SBadari Pulavarty 
776ea01ea93SBadari Pulavarty 	ret = unregister_memory_section(ms);
777ea01ea93SBadari Pulavarty 	if (ret)
778ea01ea93SBadari Pulavarty 		return ret;
779ea01ea93SBadari Pulavarty 
780815121d2SYasuaki Ishimatsu 	scn_nr = __section_nr(ms);
781815121d2SYasuaki Ishimatsu 	start_pfn = section_nr_to_pfn(scn_nr);
782815121d2SYasuaki Ishimatsu 	__remove_zone(zone, start_pfn);
783815121d2SYasuaki Ishimatsu 
7844b94ffdcSDan Williams 	sparse_remove_one_section(zone, ms, map_offset);
785ea01ea93SBadari Pulavarty 	return 0;
786ea01ea93SBadari Pulavarty }
787ea01ea93SBadari Pulavarty 
788ea01ea93SBadari Pulavarty /**
789ea01ea93SBadari Pulavarty  * __remove_pages() - remove sections of pages from a zone
790ea01ea93SBadari Pulavarty  * @zone: zone from which pages need to be removed
791ea01ea93SBadari Pulavarty  * @phys_start_pfn: starting pageframe (must be aligned to start of a section)
792ea01ea93SBadari Pulavarty  * @nr_pages: number of pages to remove (must be multiple of section size)
793ea01ea93SBadari Pulavarty  *
794ea01ea93SBadari Pulavarty  * Generic helper function to remove section mappings and sysfs entries
795ea01ea93SBadari Pulavarty  * for the section of the memory we are removing. Caller needs to make
796ea01ea93SBadari Pulavarty  * sure that pages are marked reserved and zones are adjust properly by
797ea01ea93SBadari Pulavarty  * calling offline_pages().
798ea01ea93SBadari Pulavarty  */
799ea01ea93SBadari Pulavarty int __remove_pages(struct zone *zone, unsigned long phys_start_pfn,
800ea01ea93SBadari Pulavarty 		 unsigned long nr_pages)
801ea01ea93SBadari Pulavarty {
802fe74ebb1SToshi Kani 	unsigned long i;
8034b94ffdcSDan Williams 	unsigned long map_offset = 0;
8044b94ffdcSDan Williams 	int sections_to_remove, ret = 0;
8054b94ffdcSDan Williams 
8064b94ffdcSDan Williams 	/* In the ZONE_DEVICE case device driver owns the memory region */
8074b94ffdcSDan Williams 	if (is_dev_zone(zone)) {
8084b94ffdcSDan Williams 		struct page *page = pfn_to_page(phys_start_pfn);
8094b94ffdcSDan Williams 		struct vmem_altmap *altmap;
8104b94ffdcSDan Williams 
8114b94ffdcSDan Williams 		altmap = to_vmem_altmap((unsigned long) page);
8124b94ffdcSDan Williams 		if (altmap)
8134b94ffdcSDan Williams 			map_offset = vmem_altmap_offset(altmap);
8144b94ffdcSDan Williams 	} else {
815fe74ebb1SToshi Kani 		resource_size_t start, size;
8164b94ffdcSDan Williams 
8174b94ffdcSDan Williams 		start = phys_start_pfn << PAGE_SHIFT;
8184b94ffdcSDan Williams 		size = nr_pages * PAGE_SIZE;
8194b94ffdcSDan Williams 
8204b94ffdcSDan Williams 		ret = release_mem_region_adjustable(&iomem_resource, start,
8214b94ffdcSDan Williams 					size);
8224b94ffdcSDan Williams 		if (ret) {
8234b94ffdcSDan Williams 			resource_size_t endres = start + size - 1;
8244b94ffdcSDan Williams 
8254b94ffdcSDan Williams 			pr_warn("Unable to release resource <%pa-%pa> (%d)\n",
8264b94ffdcSDan Williams 					&start, &endres, ret);
8274b94ffdcSDan Williams 		}
8284b94ffdcSDan Williams 	}
829ea01ea93SBadari Pulavarty 
8307cf91a98SJoonsoo Kim 	clear_zone_contiguous(zone);
8317cf91a98SJoonsoo Kim 
832ea01ea93SBadari Pulavarty 	/*
833ea01ea93SBadari Pulavarty 	 * We can only remove entire sections
834ea01ea93SBadari Pulavarty 	 */
835ea01ea93SBadari Pulavarty 	BUG_ON(phys_start_pfn & ~PAGE_SECTION_MASK);
836ea01ea93SBadari Pulavarty 	BUG_ON(nr_pages % PAGES_PER_SECTION);
837ea01ea93SBadari Pulavarty 
838ea01ea93SBadari Pulavarty 	sections_to_remove = nr_pages / PAGES_PER_SECTION;
839ea01ea93SBadari Pulavarty 	for (i = 0; i < sections_to_remove; i++) {
840ea01ea93SBadari Pulavarty 		unsigned long pfn = phys_start_pfn + i*PAGES_PER_SECTION;
8414b94ffdcSDan Williams 
8424b94ffdcSDan Williams 		ret = __remove_section(zone, __pfn_to_section(pfn), map_offset);
8434b94ffdcSDan Williams 		map_offset = 0;
844ea01ea93SBadari Pulavarty 		if (ret)
845ea01ea93SBadari Pulavarty 			break;
846ea01ea93SBadari Pulavarty 	}
8477cf91a98SJoonsoo Kim 
8487cf91a98SJoonsoo Kim 	set_zone_contiguous(zone);
8497cf91a98SJoonsoo Kim 
850ea01ea93SBadari Pulavarty 	return ret;
851ea01ea93SBadari Pulavarty }
8524edd7cefSDavid Rientjes #endif /* CONFIG_MEMORY_HOTREMOVE */
853ea01ea93SBadari Pulavarty 
8549d0ad8caSDaniel Kiper int set_online_page_callback(online_page_callback_t callback)
8559d0ad8caSDaniel Kiper {
8569d0ad8caSDaniel Kiper 	int rc = -EINVAL;
8579d0ad8caSDaniel Kiper 
858bfc8c901SVladimir Davydov 	get_online_mems();
859bfc8c901SVladimir Davydov 	mutex_lock(&online_page_callback_lock);
8609d0ad8caSDaniel Kiper 
8619d0ad8caSDaniel Kiper 	if (online_page_callback == generic_online_page) {
8629d0ad8caSDaniel Kiper 		online_page_callback = callback;
8639d0ad8caSDaniel Kiper 		rc = 0;
8649d0ad8caSDaniel Kiper 	}
8659d0ad8caSDaniel Kiper 
866bfc8c901SVladimir Davydov 	mutex_unlock(&online_page_callback_lock);
867bfc8c901SVladimir Davydov 	put_online_mems();
8689d0ad8caSDaniel Kiper 
8699d0ad8caSDaniel Kiper 	return rc;
8709d0ad8caSDaniel Kiper }
8719d0ad8caSDaniel Kiper EXPORT_SYMBOL_GPL(set_online_page_callback);
8729d0ad8caSDaniel Kiper 
8739d0ad8caSDaniel Kiper int restore_online_page_callback(online_page_callback_t callback)
8749d0ad8caSDaniel Kiper {
8759d0ad8caSDaniel Kiper 	int rc = -EINVAL;
8769d0ad8caSDaniel Kiper 
877bfc8c901SVladimir Davydov 	get_online_mems();
878bfc8c901SVladimir Davydov 	mutex_lock(&online_page_callback_lock);
8799d0ad8caSDaniel Kiper 
8809d0ad8caSDaniel Kiper 	if (online_page_callback == callback) {
8819d0ad8caSDaniel Kiper 		online_page_callback = generic_online_page;
8829d0ad8caSDaniel Kiper 		rc = 0;
8839d0ad8caSDaniel Kiper 	}
8849d0ad8caSDaniel Kiper 
885bfc8c901SVladimir Davydov 	mutex_unlock(&online_page_callback_lock);
886bfc8c901SVladimir Davydov 	put_online_mems();
8879d0ad8caSDaniel Kiper 
8889d0ad8caSDaniel Kiper 	return rc;
8899d0ad8caSDaniel Kiper }
8909d0ad8caSDaniel Kiper EXPORT_SYMBOL_GPL(restore_online_page_callback);
8919d0ad8caSDaniel Kiper 
8929d0ad8caSDaniel Kiper void __online_page_set_limits(struct page *page)
893180c06efSJeremy Fitzhardinge {
8949d0ad8caSDaniel Kiper }
8959d0ad8caSDaniel Kiper EXPORT_SYMBOL_GPL(__online_page_set_limits);
8969d0ad8caSDaniel Kiper 
8979d0ad8caSDaniel Kiper void __online_page_increment_counters(struct page *page)
8989d0ad8caSDaniel Kiper {
8993dcc0571SJiang Liu 	adjust_managed_page_count(page, 1);
9009d0ad8caSDaniel Kiper }
9019d0ad8caSDaniel Kiper EXPORT_SYMBOL_GPL(__online_page_increment_counters);
902180c06efSJeremy Fitzhardinge 
9039d0ad8caSDaniel Kiper void __online_page_free(struct page *page)
9049d0ad8caSDaniel Kiper {
9053dcc0571SJiang Liu 	__free_reserved_page(page);
906180c06efSJeremy Fitzhardinge }
9079d0ad8caSDaniel Kiper EXPORT_SYMBOL_GPL(__online_page_free);
9089d0ad8caSDaniel Kiper 
9099d0ad8caSDaniel Kiper static void generic_online_page(struct page *page)
9109d0ad8caSDaniel Kiper {
9119d0ad8caSDaniel Kiper 	__online_page_set_limits(page);
9129d0ad8caSDaniel Kiper 	__online_page_increment_counters(page);
9139d0ad8caSDaniel Kiper 	__online_page_free(page);
9149d0ad8caSDaniel Kiper }
915180c06efSJeremy Fitzhardinge 
91675884fb1SKAMEZAWA Hiroyuki static int online_pages_range(unsigned long start_pfn, unsigned long nr_pages,
91775884fb1SKAMEZAWA Hiroyuki 			void *arg)
9183947be19SDave Hansen {
9193947be19SDave Hansen 	unsigned long i;
92075884fb1SKAMEZAWA Hiroyuki 	unsigned long onlined_pages = *(unsigned long *)arg;
92175884fb1SKAMEZAWA Hiroyuki 	struct page *page;
9222d070eabSMichal Hocko 
92375884fb1SKAMEZAWA Hiroyuki 	if (PageReserved(pfn_to_page(start_pfn)))
92475884fb1SKAMEZAWA Hiroyuki 		for (i = 0; i < nr_pages; i++) {
92575884fb1SKAMEZAWA Hiroyuki 			page = pfn_to_page(start_pfn + i);
9269d0ad8caSDaniel Kiper 			(*online_page_callback)(page);
92775884fb1SKAMEZAWA Hiroyuki 			onlined_pages++;
92875884fb1SKAMEZAWA Hiroyuki 		}
9292d070eabSMichal Hocko 
9302d070eabSMichal Hocko 	online_mem_sections(start_pfn, start_pfn + nr_pages);
9312d070eabSMichal Hocko 
93275884fb1SKAMEZAWA Hiroyuki 	*(unsigned long *)arg = onlined_pages;
93375884fb1SKAMEZAWA Hiroyuki 	return 0;
93475884fb1SKAMEZAWA Hiroyuki }
93575884fb1SKAMEZAWA Hiroyuki 
93609285af7SLai Jiangshan #ifdef CONFIG_MOVABLE_NODE
93779a4dcefSTang Chen /*
93879a4dcefSTang Chen  * When CONFIG_MOVABLE_NODE, we permit onlining of a node which doesn't have
93979a4dcefSTang Chen  * normal memory.
94079a4dcefSTang Chen  */
941c8f95657SMichal Hocko static bool can_online_high_movable(int nid)
94209285af7SLai Jiangshan {
94309285af7SLai Jiangshan 	return true;
94409285af7SLai Jiangshan }
94579a4dcefSTang Chen #else /* CONFIG_MOVABLE_NODE */
94674d42d8fSLai Jiangshan /* ensure every online node has NORMAL memory */
947c8f95657SMichal Hocko static bool can_online_high_movable(int nid)
94874d42d8fSLai Jiangshan {
949c8f95657SMichal Hocko 	return node_state(nid, N_NORMAL_MEMORY);
95074d42d8fSLai Jiangshan }
95179a4dcefSTang Chen #endif /* CONFIG_MOVABLE_NODE */
95274d42d8fSLai Jiangshan 
953d9713679SLai Jiangshan /* check which state of node_states will be changed when online memory */
954d9713679SLai Jiangshan static void node_states_check_changes_online(unsigned long nr_pages,
955d9713679SLai Jiangshan 	struct zone *zone, struct memory_notify *arg)
956d9713679SLai Jiangshan {
957d9713679SLai Jiangshan 	int nid = zone_to_nid(zone);
958d9713679SLai Jiangshan 	enum zone_type zone_last = ZONE_NORMAL;
959d9713679SLai Jiangshan 
960d9713679SLai Jiangshan 	/*
9616715ddf9SLai Jiangshan 	 * If we have HIGHMEM or movable node, node_states[N_NORMAL_MEMORY]
9626715ddf9SLai Jiangshan 	 * contains nodes which have zones of 0...ZONE_NORMAL,
9636715ddf9SLai Jiangshan 	 * set zone_last to ZONE_NORMAL.
964d9713679SLai Jiangshan 	 *
9656715ddf9SLai Jiangshan 	 * If we don't have HIGHMEM nor movable node,
9666715ddf9SLai Jiangshan 	 * node_states[N_NORMAL_MEMORY] contains nodes which have zones of
9676715ddf9SLai Jiangshan 	 * 0...ZONE_MOVABLE, set zone_last to ZONE_MOVABLE.
968d9713679SLai Jiangshan 	 */
9696715ddf9SLai Jiangshan 	if (N_MEMORY == N_NORMAL_MEMORY)
970d9713679SLai Jiangshan 		zone_last = ZONE_MOVABLE;
971d9713679SLai Jiangshan 
972d9713679SLai Jiangshan 	/*
973d9713679SLai Jiangshan 	 * if the memory to be online is in a zone of 0...zone_last, and
974d9713679SLai Jiangshan 	 * the zones of 0...zone_last don't have memory before online, we will
975d9713679SLai Jiangshan 	 * need to set the node to node_states[N_NORMAL_MEMORY] after
976d9713679SLai Jiangshan 	 * the memory is online.
977d9713679SLai Jiangshan 	 */
978d9713679SLai Jiangshan 	if (zone_idx(zone) <= zone_last && !node_state(nid, N_NORMAL_MEMORY))
979d9713679SLai Jiangshan 		arg->status_change_nid_normal = nid;
980d9713679SLai Jiangshan 	else
981d9713679SLai Jiangshan 		arg->status_change_nid_normal = -1;
982d9713679SLai Jiangshan 
9836715ddf9SLai Jiangshan #ifdef CONFIG_HIGHMEM
9846715ddf9SLai Jiangshan 	/*
9856715ddf9SLai Jiangshan 	 * If we have movable node, node_states[N_HIGH_MEMORY]
9866715ddf9SLai Jiangshan 	 * contains nodes which have zones of 0...ZONE_HIGHMEM,
9876715ddf9SLai Jiangshan 	 * set zone_last to ZONE_HIGHMEM.
9886715ddf9SLai Jiangshan 	 *
9896715ddf9SLai Jiangshan 	 * If we don't have movable node, node_states[N_NORMAL_MEMORY]
9906715ddf9SLai Jiangshan 	 * contains nodes which have zones of 0...ZONE_MOVABLE,
9916715ddf9SLai Jiangshan 	 * set zone_last to ZONE_MOVABLE.
9926715ddf9SLai Jiangshan 	 */
9936715ddf9SLai Jiangshan 	zone_last = ZONE_HIGHMEM;
9946715ddf9SLai Jiangshan 	if (N_MEMORY == N_HIGH_MEMORY)
9956715ddf9SLai Jiangshan 		zone_last = ZONE_MOVABLE;
9966715ddf9SLai Jiangshan 
9976715ddf9SLai Jiangshan 	if (zone_idx(zone) <= zone_last && !node_state(nid, N_HIGH_MEMORY))
9986715ddf9SLai Jiangshan 		arg->status_change_nid_high = nid;
9996715ddf9SLai Jiangshan 	else
10006715ddf9SLai Jiangshan 		arg->status_change_nid_high = -1;
10016715ddf9SLai Jiangshan #else
10026715ddf9SLai Jiangshan 	arg->status_change_nid_high = arg->status_change_nid_normal;
10036715ddf9SLai Jiangshan #endif
10046715ddf9SLai Jiangshan 
1005d9713679SLai Jiangshan 	/*
1006d9713679SLai Jiangshan 	 * if the node don't have memory befor online, we will need to
10076715ddf9SLai Jiangshan 	 * set the node to node_states[N_MEMORY] after the memory
1008d9713679SLai Jiangshan 	 * is online.
1009d9713679SLai Jiangshan 	 */
10106715ddf9SLai Jiangshan 	if (!node_state(nid, N_MEMORY))
1011d9713679SLai Jiangshan 		arg->status_change_nid = nid;
1012d9713679SLai Jiangshan 	else
1013d9713679SLai Jiangshan 		arg->status_change_nid = -1;
1014d9713679SLai Jiangshan }
1015d9713679SLai Jiangshan 
1016d9713679SLai Jiangshan static void node_states_set_node(int node, struct memory_notify *arg)
1017d9713679SLai Jiangshan {
1018d9713679SLai Jiangshan 	if (arg->status_change_nid_normal >= 0)
1019d9713679SLai Jiangshan 		node_set_state(node, N_NORMAL_MEMORY);
1020d9713679SLai Jiangshan 
10216715ddf9SLai Jiangshan 	if (arg->status_change_nid_high >= 0)
1022d9713679SLai Jiangshan 		node_set_state(node, N_HIGH_MEMORY);
10236715ddf9SLai Jiangshan 
10246715ddf9SLai Jiangshan 	node_set_state(node, N_MEMORY);
1025d9713679SLai Jiangshan }
1026d9713679SLai Jiangshan 
1027f1dd2cd1SMichal Hocko bool allow_online_pfn_range(int nid, unsigned long pfn, unsigned long nr_pages, int online_type)
1028df429ac0SReza Arbab {
1029f1dd2cd1SMichal Hocko 	struct pglist_data *pgdat = NODE_DATA(nid);
1030f1dd2cd1SMichal Hocko 	struct zone *movable_zone = &pgdat->node_zones[ZONE_MOVABLE];
1031*c246a213SMichal Hocko 	struct zone *default_zone = default_zone_for_pfn(nid, pfn, nr_pages);
1032df429ac0SReza Arbab 
1033f1dd2cd1SMichal Hocko 	/*
1034f1dd2cd1SMichal Hocko 	 * TODO there shouldn't be any inherent reason to have ZONE_NORMAL
1035f1dd2cd1SMichal Hocko 	 * physically before ZONE_MOVABLE. All we need is they do not
1036f1dd2cd1SMichal Hocko 	 * overlap. Historically we didn't allow ZONE_NORMAL after ZONE_MOVABLE
1037f1dd2cd1SMichal Hocko 	 * though so let's stick with it for simplicity for now.
1038f1dd2cd1SMichal Hocko 	 * TODO make sure we do not overlap with ZONE_DEVICE
1039f1dd2cd1SMichal Hocko 	 */
1040f1dd2cd1SMichal Hocko 	if (online_type == MMOP_ONLINE_KERNEL) {
1041f1dd2cd1SMichal Hocko 		if (zone_is_empty(movable_zone))
10428a1f780eSYasuaki Ishimatsu 			return true;
1043f1dd2cd1SMichal Hocko 		return movable_zone->zone_start_pfn >= pfn + nr_pages;
1044f1dd2cd1SMichal Hocko 	} else if (online_type == MMOP_ONLINE_MOVABLE) {
1045*c246a213SMichal Hocko 		return zone_end_pfn(default_zone) <= pfn;
1046f1dd2cd1SMichal Hocko 	}
1047f1dd2cd1SMichal Hocko 
1048f1dd2cd1SMichal Hocko 	/* MMOP_ONLINE_KEEP will always succeed and inherits the current zone */
1049f1dd2cd1SMichal Hocko 	return online_type == MMOP_ONLINE_KEEP;
1050f1dd2cd1SMichal Hocko }
1051f1dd2cd1SMichal Hocko 
1052f1dd2cd1SMichal Hocko static void __meminit resize_zone_range(struct zone *zone, unsigned long start_pfn,
1053f1dd2cd1SMichal Hocko 		unsigned long nr_pages)
1054f1dd2cd1SMichal Hocko {
1055f1dd2cd1SMichal Hocko 	unsigned long old_end_pfn = zone_end_pfn(zone);
1056f1dd2cd1SMichal Hocko 
1057f1dd2cd1SMichal Hocko 	if (zone_is_empty(zone) || start_pfn < zone->zone_start_pfn)
1058f1dd2cd1SMichal Hocko 		zone->zone_start_pfn = start_pfn;
1059f1dd2cd1SMichal Hocko 
1060f1dd2cd1SMichal Hocko 	zone->spanned_pages = max(start_pfn + nr_pages, old_end_pfn) - zone->zone_start_pfn;
1061f1dd2cd1SMichal Hocko }
1062f1dd2cd1SMichal Hocko 
1063f1dd2cd1SMichal Hocko static void __meminit resize_pgdat_range(struct pglist_data *pgdat, unsigned long start_pfn,
1064f1dd2cd1SMichal Hocko                                      unsigned long nr_pages)
1065f1dd2cd1SMichal Hocko {
1066f1dd2cd1SMichal Hocko 	unsigned long old_end_pfn = pgdat_end_pfn(pgdat);
1067f1dd2cd1SMichal Hocko 
1068f1dd2cd1SMichal Hocko 	if (!pgdat->node_spanned_pages || start_pfn < pgdat->node_start_pfn)
1069f1dd2cd1SMichal Hocko 		pgdat->node_start_pfn = start_pfn;
1070f1dd2cd1SMichal Hocko 
1071f1dd2cd1SMichal Hocko 	pgdat->node_spanned_pages = max(start_pfn + nr_pages, old_end_pfn) - pgdat->node_start_pfn;
1072f1dd2cd1SMichal Hocko }
1073f1dd2cd1SMichal Hocko 
1074f1dd2cd1SMichal Hocko void move_pfn_range_to_zone(struct zone *zone,
1075f1dd2cd1SMichal Hocko 		unsigned long start_pfn, unsigned long nr_pages)
1076f1dd2cd1SMichal Hocko {
1077f1dd2cd1SMichal Hocko 	struct pglist_data *pgdat = zone->zone_pgdat;
1078f1dd2cd1SMichal Hocko 	int nid = pgdat->node_id;
1079f1dd2cd1SMichal Hocko 	unsigned long flags;
1080f1dd2cd1SMichal Hocko 
1081f1dd2cd1SMichal Hocko 	if (zone_is_empty(zone))
1082f1dd2cd1SMichal Hocko 		init_currently_empty_zone(zone, start_pfn, nr_pages);
1083f1dd2cd1SMichal Hocko 
1084f1dd2cd1SMichal Hocko 	clear_zone_contiguous(zone);
1085f1dd2cd1SMichal Hocko 
1086f1dd2cd1SMichal Hocko 	/* TODO Huh pgdat is irqsave while zone is not. It used to be like that before */
1087f1dd2cd1SMichal Hocko 	pgdat_resize_lock(pgdat, &flags);
1088f1dd2cd1SMichal Hocko 	zone_span_writelock(zone);
1089f1dd2cd1SMichal Hocko 	resize_zone_range(zone, start_pfn, nr_pages);
1090f1dd2cd1SMichal Hocko 	zone_span_writeunlock(zone);
1091f1dd2cd1SMichal Hocko 	resize_pgdat_range(pgdat, start_pfn, nr_pages);
1092f1dd2cd1SMichal Hocko 	pgdat_resize_unlock(pgdat, &flags);
1093f1dd2cd1SMichal Hocko 
1094f1dd2cd1SMichal Hocko 	/*
1095f1dd2cd1SMichal Hocko 	 * TODO now we have a visible range of pages which are not associated
1096f1dd2cd1SMichal Hocko 	 * with their zone properly. Not nice but set_pfnblock_flags_mask
1097f1dd2cd1SMichal Hocko 	 * expects the zone spans the pfn range. All the pages in the range
1098f1dd2cd1SMichal Hocko 	 * are reserved so nobody should be touching them so we should be safe
1099f1dd2cd1SMichal Hocko 	 */
1100f1dd2cd1SMichal Hocko 	memmap_init_zone(nr_pages, nid, zone_idx(zone), start_pfn, MEMMAP_HOTPLUG);
1101f1dd2cd1SMichal Hocko 
1102f1dd2cd1SMichal Hocko 	set_zone_contiguous(zone);
1103f1dd2cd1SMichal Hocko }
1104f1dd2cd1SMichal Hocko 
1105f1dd2cd1SMichal Hocko /*
1106*c246a213SMichal Hocko  * Returns a default kernel memory zone for the given pfn range.
1107*c246a213SMichal Hocko  * If no kernel zone covers this pfn range it will automatically go
1108*c246a213SMichal Hocko  * to the ZONE_NORMAL.
1109*c246a213SMichal Hocko  */
1110*c246a213SMichal Hocko struct zone *default_zone_for_pfn(int nid, unsigned long start_pfn,
1111*c246a213SMichal Hocko 		unsigned long nr_pages)
1112*c246a213SMichal Hocko {
1113*c246a213SMichal Hocko 	struct pglist_data *pgdat = NODE_DATA(nid);
1114*c246a213SMichal Hocko 	int zid;
1115*c246a213SMichal Hocko 
1116*c246a213SMichal Hocko 	for (zid = 0; zid <= ZONE_NORMAL; zid++) {
1117*c246a213SMichal Hocko 		struct zone *zone = &pgdat->node_zones[zid];
1118*c246a213SMichal Hocko 
1119*c246a213SMichal Hocko 		if (zone_intersects(zone, start_pfn, nr_pages))
1120*c246a213SMichal Hocko 			return zone;
1121*c246a213SMichal Hocko 	}
1122*c246a213SMichal Hocko 
1123*c246a213SMichal Hocko 	return &pgdat->node_zones[ZONE_NORMAL];
1124*c246a213SMichal Hocko }
1125*c246a213SMichal Hocko 
1126*c246a213SMichal Hocko /*
1127f1dd2cd1SMichal Hocko  * Associates the given pfn range with the given node and the zone appropriate
1128f1dd2cd1SMichal Hocko  * for the given online type.
1129f1dd2cd1SMichal Hocko  */
1130f1dd2cd1SMichal Hocko static struct zone * __meminit move_pfn_range(int online_type, int nid,
1131f1dd2cd1SMichal Hocko 		unsigned long start_pfn, unsigned long nr_pages)
1132f1dd2cd1SMichal Hocko {
1133f1dd2cd1SMichal Hocko 	struct pglist_data *pgdat = NODE_DATA(nid);
1134*c246a213SMichal Hocko 	struct zone *zone = default_zone_for_pfn(nid, start_pfn, nr_pages);
1135f1dd2cd1SMichal Hocko 
1136f1dd2cd1SMichal Hocko 	if (online_type == MMOP_ONLINE_KEEP) {
1137f1dd2cd1SMichal Hocko 		struct zone *movable_zone = &pgdat->node_zones[ZONE_MOVABLE];
1138f1dd2cd1SMichal Hocko 		/*
1139a69578a1SMichal Hocko 		 * MMOP_ONLINE_KEEP defaults to MMOP_ONLINE_KERNEL but use
1140a69578a1SMichal Hocko 		 * movable zone if that is not possible (e.g. we are within
1141a69578a1SMichal Hocko 		 * or past the existing movable zone)
1142f1dd2cd1SMichal Hocko 		 */
1143a69578a1SMichal Hocko 		if (!allow_online_pfn_range(nid, start_pfn, nr_pages,
1144a69578a1SMichal Hocko 					MMOP_ONLINE_KERNEL))
1145f1dd2cd1SMichal Hocko 			zone = movable_zone;
1146f1dd2cd1SMichal Hocko 	} else if (online_type == MMOP_ONLINE_MOVABLE) {
1147f1dd2cd1SMichal Hocko 		zone = &pgdat->node_zones[ZONE_MOVABLE];
1148f1dd2cd1SMichal Hocko 	}
1149f1dd2cd1SMichal Hocko 
1150f1dd2cd1SMichal Hocko 	move_pfn_range_to_zone(zone, start_pfn, nr_pages);
1151f1dd2cd1SMichal Hocko 	return zone;
1152df429ac0SReza Arbab }
115375884fb1SKAMEZAWA Hiroyuki 
115430467e0bSDavid Rientjes /* Must be protected by mem_hotplug_begin() */
1155511c2abaSLai Jiangshan int __ref online_pages(unsigned long pfn, unsigned long nr_pages, int online_type)
115675884fb1SKAMEZAWA Hiroyuki {
1157aa47228aSCody P Schafer 	unsigned long flags;
11583947be19SDave Hansen 	unsigned long onlined_pages = 0;
11593947be19SDave Hansen 	struct zone *zone;
11606811378eSYasunori Goto 	int need_zonelists_rebuild = 0;
11617b78d335SYasunori Goto 	int nid;
11627b78d335SYasunori Goto 	int ret;
11637b78d335SYasunori Goto 	struct memory_notify arg;
11643947be19SDave Hansen 
1165f1dd2cd1SMichal Hocko 	nid = pfn_to_nid(pfn);
1166f1dd2cd1SMichal Hocko 	if (!allow_online_pfn_range(nid, pfn, nr_pages, online_type))
116730467e0bSDavid Rientjes 		return -EINVAL;
116874d42d8fSLai Jiangshan 
1169f1dd2cd1SMichal Hocko 	if (online_type == MMOP_ONLINE_MOVABLE && !can_online_high_movable(nid))
11708a1f780eSYasuaki Ishimatsu 		return -EINVAL;
1171e51e6c8fSReza Arbab 
1172f1dd2cd1SMichal Hocko 	/* associate pfn range with the zone */
1173f1dd2cd1SMichal Hocko 	zone = move_pfn_range(online_type, nid, pfn, nr_pages);
1174511c2abaSLai Jiangshan 
11757b78d335SYasunori Goto 	arg.start_pfn = pfn;
11767b78d335SYasunori Goto 	arg.nr_pages = nr_pages;
1177d9713679SLai Jiangshan 	node_states_check_changes_online(nr_pages, zone, &arg);
11787b78d335SYasunori Goto 
11797b78d335SYasunori Goto 	ret = memory_notify(MEM_GOING_ONLINE, &arg);
11807b78d335SYasunori Goto 	ret = notifier_to_errno(ret);
1181e33e33b4SChen Yucong 	if (ret)
1182e33e33b4SChen Yucong 		goto failed_addition;
1183e33e33b4SChen Yucong 
11843947be19SDave Hansen 	/*
11856811378eSYasunori Goto 	 * If this zone is not populated, then it is not in zonelist.
11866811378eSYasunori Goto 	 * This means the page allocator ignores this zone.
11876811378eSYasunori Goto 	 * So, zonelist must be updated after online.
11886811378eSYasunori Goto 	 */
11894eaf3f64SHaicheng Li 	mutex_lock(&zonelists_mutex);
11906dcd73d7SWen Congyang 	if (!populated_zone(zone)) {
11916811378eSYasunori Goto 		need_zonelists_rebuild = 1;
11926dcd73d7SWen Congyang 		build_all_zonelists(NULL, zone);
11936dcd73d7SWen Congyang 	}
11946811378eSYasunori Goto 
1195908eedc6SKAMEZAWA Hiroyuki 	ret = walk_system_ram_range(pfn, nr_pages, &onlined_pages,
119675884fb1SKAMEZAWA Hiroyuki 		online_pages_range);
1197fd8a4221SGeoff Levand 	if (ret) {
11986dcd73d7SWen Congyang 		if (need_zonelists_rebuild)
11996dcd73d7SWen Congyang 			zone_pcp_reset(zone);
12004eaf3f64SHaicheng Li 		mutex_unlock(&zonelists_mutex);
1201e33e33b4SChen Yucong 		goto failed_addition;
1202fd8a4221SGeoff Levand 	}
1203fd8a4221SGeoff Levand 
12043947be19SDave Hansen 	zone->present_pages += onlined_pages;
1205aa47228aSCody P Schafer 
1206aa47228aSCody P Schafer 	pgdat_resize_lock(zone->zone_pgdat, &flags);
1207f2937be5SYasunori Goto 	zone->zone_pgdat->node_present_pages += onlined_pages;
1208aa47228aSCody P Schafer 	pgdat_resize_unlock(zone->zone_pgdat, &flags);
1209aa47228aSCody P Schafer 
121008dff7b7SJiang Liu 	if (onlined_pages) {
1211e888ca35SVlastimil Babka 		node_states_set_node(nid, &arg);
12121f522509SHaicheng Li 		if (need_zonelists_rebuild)
12136dcd73d7SWen Congyang 			build_all_zonelists(NULL, NULL);
12141f522509SHaicheng Li 		else
1215112067f0SShaohua Li 			zone_pcp_update(zone);
121608dff7b7SJiang Liu 	}
12171f522509SHaicheng Li 
12184eaf3f64SHaicheng Li 	mutex_unlock(&zonelists_mutex);
12191b79acc9SKOSAKI Motohiro 
12201b79acc9SKOSAKI Motohiro 	init_per_zone_wmark_min();
12211b79acc9SKOSAKI Motohiro 
1222698b1b30SVlastimil Babka 	if (onlined_pages) {
1223e888ca35SVlastimil Babka 		kswapd_run(nid);
1224698b1b30SVlastimil Babka 		kcompactd_run(nid);
1225698b1b30SVlastimil Babka 	}
122661b13993SDave Hansen 
12275a4d4361SKAMEZAWA Hiroyuki 	vm_total_pages = nr_free_pagecache_pages();
12282f7f24ecSKent Liu 
12292d1d43f6SChandra Seetharaman 	writeback_set_ratelimit();
12307b78d335SYasunori Goto 
12317b78d335SYasunori Goto 	if (onlined_pages)
12327b78d335SYasunori Goto 		memory_notify(MEM_ONLINE, &arg);
123330467e0bSDavid Rientjes 	return 0;
1234e33e33b4SChen Yucong 
1235e33e33b4SChen Yucong failed_addition:
1236e33e33b4SChen Yucong 	pr_debug("online_pages [mem %#010llx-%#010llx] failed\n",
1237e33e33b4SChen Yucong 		 (unsigned long long) pfn << PAGE_SHIFT,
1238e33e33b4SChen Yucong 		 (((unsigned long long) pfn + nr_pages) << PAGE_SHIFT) - 1);
1239e33e33b4SChen Yucong 	memory_notify(MEM_CANCEL_ONLINE, &arg);
1240e33e33b4SChen Yucong 	return ret;
12413947be19SDave Hansen }
124253947027SKeith Mannthey #endif /* CONFIG_MEMORY_HOTPLUG_SPARSE */
1243bc02af93SYasunori Goto 
12440bd85420STang Chen static void reset_node_present_pages(pg_data_t *pgdat)
12450bd85420STang Chen {
12460bd85420STang Chen 	struct zone *z;
12470bd85420STang Chen 
12480bd85420STang Chen 	for (z = pgdat->node_zones; z < pgdat->node_zones + MAX_NR_ZONES; z++)
12490bd85420STang Chen 		z->present_pages = 0;
12500bd85420STang Chen 
12510bd85420STang Chen 	pgdat->node_present_pages = 0;
12520bd85420STang Chen }
12530bd85420STang Chen 
1254e1319331SHidetoshi Seto /* we are OK calling __meminit stuff here - we have CONFIG_MEMORY_HOTPLUG */
1255e1319331SHidetoshi Seto static pg_data_t __ref *hotadd_new_pgdat(int nid, u64 start)
12569af3c2deSYasunori Goto {
12579af3c2deSYasunori Goto 	struct pglist_data *pgdat;
12589af3c2deSYasunori Goto 	unsigned long zones_size[MAX_NR_ZONES] = {0};
12599af3c2deSYasunori Goto 	unsigned long zholes_size[MAX_NR_ZONES] = {0};
1260c8e861a5SFabian Frederick 	unsigned long start_pfn = PFN_DOWN(start);
12619af3c2deSYasunori Goto 
1262a1e565aaSTang Chen 	pgdat = NODE_DATA(nid);
1263a1e565aaSTang Chen 	if (!pgdat) {
12649af3c2deSYasunori Goto 		pgdat = arch_alloc_nodedata(nid);
12659af3c2deSYasunori Goto 		if (!pgdat)
12669af3c2deSYasunori Goto 			return NULL;
12679af3c2deSYasunori Goto 
12689af3c2deSYasunori Goto 		arch_refresh_nodedata(nid, pgdat);
1269b0dc3a34SGu Zheng 	} else {
1270e716f2ebSMel Gorman 		/*
1271e716f2ebSMel Gorman 		 * Reset the nr_zones, order and classzone_idx before reuse.
1272e716f2ebSMel Gorman 		 * Note that kswapd will init kswapd_classzone_idx properly
1273e716f2ebSMel Gorman 		 * when it starts in the near future.
1274e716f2ebSMel Gorman 		 */
1275b0dc3a34SGu Zheng 		pgdat->nr_zones = 0;
127638087d9bSMel Gorman 		pgdat->kswapd_order = 0;
127738087d9bSMel Gorman 		pgdat->kswapd_classzone_idx = 0;
1278a1e565aaSTang Chen 	}
12799af3c2deSYasunori Goto 
12809af3c2deSYasunori Goto 	/* we can use NODE_DATA(nid) from here */
12819af3c2deSYasunori Goto 
12829af3c2deSYasunori Goto 	/* init node's zones as empty zones, we don't have any present pages.*/
12839109fb7bSJohannes Weiner 	free_area_init_node(nid, zones_size, start_pfn, zholes_size);
12845830169fSReza Arbab 	pgdat->per_cpu_nodestats = alloc_percpu(struct per_cpu_nodestat);
12859af3c2deSYasunori Goto 
1286959ecc48SKAMEZAWA Hiroyuki 	/*
1287959ecc48SKAMEZAWA Hiroyuki 	 * The node we allocated has no zone fallback lists. For avoiding
1288959ecc48SKAMEZAWA Hiroyuki 	 * to access not-initialized zonelist, build here.
1289959ecc48SKAMEZAWA Hiroyuki 	 */
1290f957db4fSDavid Rientjes 	mutex_lock(&zonelists_mutex);
12919adb62a5SJiang Liu 	build_all_zonelists(pgdat, NULL);
1292f957db4fSDavid Rientjes 	mutex_unlock(&zonelists_mutex);
1293959ecc48SKAMEZAWA Hiroyuki 
1294f784a3f1STang Chen 	/*
1295f784a3f1STang Chen 	 * zone->managed_pages is set to an approximate value in
1296f784a3f1STang Chen 	 * free_area_init_core(), which will cause
1297f784a3f1STang Chen 	 * /sys/device/system/node/nodeX/meminfo has wrong data.
1298f784a3f1STang Chen 	 * So reset it to 0 before any memory is onlined.
1299f784a3f1STang Chen 	 */
1300f784a3f1STang Chen 	reset_node_managed_pages(pgdat);
1301f784a3f1STang Chen 
13020bd85420STang Chen 	/*
13030bd85420STang Chen 	 * When memory is hot-added, all the memory is in offline state. So
13040bd85420STang Chen 	 * clear all zones' present_pages because they will be updated in
13050bd85420STang Chen 	 * online_pages() and offline_pages().
13060bd85420STang Chen 	 */
13070bd85420STang Chen 	reset_node_present_pages(pgdat);
13080bd85420STang Chen 
13099af3c2deSYasunori Goto 	return pgdat;
13109af3c2deSYasunori Goto }
13119af3c2deSYasunori Goto 
13129af3c2deSYasunori Goto static void rollback_node_hotadd(int nid, pg_data_t *pgdat)
13139af3c2deSYasunori Goto {
13149af3c2deSYasunori Goto 	arch_refresh_nodedata(nid, NULL);
13155830169fSReza Arbab 	free_percpu(pgdat->per_cpu_nodestats);
13169af3c2deSYasunori Goto 	arch_free_nodedata(pgdat);
13179af3c2deSYasunori Goto 	return;
13189af3c2deSYasunori Goto }
13199af3c2deSYasunori Goto 
13200a547039SKAMEZAWA Hiroyuki 
132101b0f197SToshi Kani /**
132201b0f197SToshi Kani  * try_online_node - online a node if offlined
132301b0f197SToshi Kani  *
1324cf23422bSminskey guo  * called by cpu_up() to online a node without onlined memory.
1325cf23422bSminskey guo  */
132601b0f197SToshi Kani int try_online_node(int nid)
1327cf23422bSminskey guo {
1328cf23422bSminskey guo 	pg_data_t	*pgdat;
1329cf23422bSminskey guo 	int	ret;
1330cf23422bSminskey guo 
133101b0f197SToshi Kani 	if (node_online(nid))
133201b0f197SToshi Kani 		return 0;
133301b0f197SToshi Kani 
1334bfc8c901SVladimir Davydov 	mem_hotplug_begin();
1335cf23422bSminskey guo 	pgdat = hotadd_new_pgdat(nid, 0);
13367553e8f2SDavid Rientjes 	if (!pgdat) {
133701b0f197SToshi Kani 		pr_err("Cannot online node %d due to NULL pgdat\n", nid);
1338cf23422bSminskey guo 		ret = -ENOMEM;
1339cf23422bSminskey guo 		goto out;
1340cf23422bSminskey guo 	}
1341cf23422bSminskey guo 	node_set_online(nid);
1342cf23422bSminskey guo 	ret = register_one_node(nid);
1343cf23422bSminskey guo 	BUG_ON(ret);
1344cf23422bSminskey guo 
134501b0f197SToshi Kani 	if (pgdat->node_zonelists->_zonerefs->zone == NULL) {
134601b0f197SToshi Kani 		mutex_lock(&zonelists_mutex);
134701b0f197SToshi Kani 		build_all_zonelists(NULL, NULL);
134801b0f197SToshi Kani 		mutex_unlock(&zonelists_mutex);
134901b0f197SToshi Kani 	}
135001b0f197SToshi Kani 
1351cf23422bSminskey guo out:
1352bfc8c901SVladimir Davydov 	mem_hotplug_done();
1353cf23422bSminskey guo 	return ret;
1354cf23422bSminskey guo }
1355cf23422bSminskey guo 
135627356f54SToshi Kani static int check_hotplug_memory_range(u64 start, u64 size)
135727356f54SToshi Kani {
1358c8e861a5SFabian Frederick 	u64 start_pfn = PFN_DOWN(start);
135927356f54SToshi Kani 	u64 nr_pages = size >> PAGE_SHIFT;
136027356f54SToshi Kani 
136127356f54SToshi Kani 	/* Memory range must be aligned with section */
136227356f54SToshi Kani 	if ((start_pfn & ~PAGE_SECTION_MASK) ||
136327356f54SToshi Kani 	    (nr_pages % PAGES_PER_SECTION) || (!nr_pages)) {
136427356f54SToshi Kani 		pr_err("Section-unaligned hotplug range: start 0x%llx, size 0x%llx\n",
136527356f54SToshi Kani 				(unsigned long long)start,
136627356f54SToshi Kani 				(unsigned long long)size);
136727356f54SToshi Kani 		return -EINVAL;
136827356f54SToshi Kani 	}
136927356f54SToshi Kani 
137027356f54SToshi Kani 	return 0;
137127356f54SToshi Kani }
137227356f54SToshi Kani 
137363264400SWang Nan /*
137463264400SWang Nan  * If movable zone has already been setup, newly added memory should be check.
137563264400SWang Nan  * If its address is higher than movable zone, it should be added as movable.
137663264400SWang Nan  * Without this check, movable zone may overlap with other zone.
137763264400SWang Nan  */
137863264400SWang Nan static int should_add_memory_movable(int nid, u64 start, u64 size)
137963264400SWang Nan {
138063264400SWang Nan 	unsigned long start_pfn = start >> PAGE_SHIFT;
138163264400SWang Nan 	pg_data_t *pgdat = NODE_DATA(nid);
138263264400SWang Nan 	struct zone *movable_zone = pgdat->node_zones + ZONE_MOVABLE;
138363264400SWang Nan 
138463264400SWang Nan 	if (zone_is_empty(movable_zone))
138563264400SWang Nan 		return 0;
138663264400SWang Nan 
138763264400SWang Nan 	if (movable_zone->zone_start_pfn <= start_pfn)
138863264400SWang Nan 		return 1;
138963264400SWang Nan 
139063264400SWang Nan 	return 0;
139163264400SWang Nan }
139263264400SWang Nan 
1393033fbae9SDan Williams int zone_for_memory(int nid, u64 start, u64 size, int zone_default,
1394033fbae9SDan Williams 		bool for_device)
139563264400SWang Nan {
1396033fbae9SDan Williams #ifdef CONFIG_ZONE_DEVICE
1397033fbae9SDan Williams 	if (for_device)
1398033fbae9SDan Williams 		return ZONE_DEVICE;
1399033fbae9SDan Williams #endif
140063264400SWang Nan 	if (should_add_memory_movable(nid, start, size))
140163264400SWang Nan 		return ZONE_MOVABLE;
140263264400SWang Nan 
140363264400SWang Nan 	return zone_default;
140463264400SWang Nan }
140563264400SWang Nan 
140631bc3858SVitaly Kuznetsov static int online_memory_block(struct memory_block *mem, void *arg)
140731bc3858SVitaly Kuznetsov {
1408dc18d706SNathan Fontenot 	return device_online(&mem->dev);
140931bc3858SVitaly Kuznetsov }
141031bc3858SVitaly Kuznetsov 
141131168481SAl Viro /* we are OK calling __meminit stuff here - we have CONFIG_MEMORY_HOTPLUG */
141231bc3858SVitaly Kuznetsov int __ref add_memory_resource(int nid, struct resource *res, bool online)
1413bc02af93SYasunori Goto {
141462cedb9fSDavid Vrabel 	u64 start, size;
14159af3c2deSYasunori Goto 	pg_data_t *pgdat = NULL;
1416a1e565aaSTang Chen 	bool new_pgdat;
1417a1e565aaSTang Chen 	bool new_node;
1418bc02af93SYasunori Goto 	int ret;
1419bc02af93SYasunori Goto 
142062cedb9fSDavid Vrabel 	start = res->start;
142162cedb9fSDavid Vrabel 	size = resource_size(res);
142262cedb9fSDavid Vrabel 
142327356f54SToshi Kani 	ret = check_hotplug_memory_range(start, size);
142427356f54SToshi Kani 	if (ret)
142527356f54SToshi Kani 		return ret;
142627356f54SToshi Kani 
1427a1e565aaSTang Chen 	{	/* Stupid hack to suppress address-never-null warning */
1428a1e565aaSTang Chen 		void *p = NODE_DATA(nid);
1429a1e565aaSTang Chen 		new_pgdat = !p;
1430a1e565aaSTang Chen 	}
1431ac13c462SNathan Zimmer 
1432bfc8c901SVladimir Davydov 	mem_hotplug_begin();
1433ac13c462SNathan Zimmer 
14347f36e3e5STang Chen 	/*
14357f36e3e5STang Chen 	 * Add new range to memblock so that when hotadd_new_pgdat() is called
14367f36e3e5STang Chen 	 * to allocate new pgdat, get_pfn_range_for_nid() will be able to find
14377f36e3e5STang Chen 	 * this new range and calculate total pages correctly.  The range will
14387f36e3e5STang Chen 	 * be removed at hot-remove time.
14397f36e3e5STang Chen 	 */
14407f36e3e5STang Chen 	memblock_add_node(start, size, nid);
14417f36e3e5STang Chen 
1442a1e565aaSTang Chen 	new_node = !node_online(nid);
1443a1e565aaSTang Chen 	if (new_node) {
14449af3c2deSYasunori Goto 		pgdat = hotadd_new_pgdat(nid, start);
14456ad696d2SAndi Kleen 		ret = -ENOMEM;
14469af3c2deSYasunori Goto 		if (!pgdat)
144741b9e2d7SWen Congyang 			goto error;
14489af3c2deSYasunori Goto 	}
14499af3c2deSYasunori Goto 
1450bc02af93SYasunori Goto 	/* call arch's memory hotadd */
1451033fbae9SDan Williams 	ret = arch_add_memory(nid, start, size, false);
1452bc02af93SYasunori Goto 
14539af3c2deSYasunori Goto 	if (ret < 0)
14549af3c2deSYasunori Goto 		goto error;
14559af3c2deSYasunori Goto 
14560fc44159SYasunori Goto 	/* we online node here. we can't roll back from here. */
14579af3c2deSYasunori Goto 	node_set_online(nid);
14589af3c2deSYasunori Goto 
1459a1e565aaSTang Chen 	if (new_node) {
14609037a993SMichal Hocko 		unsigned long start_pfn = start >> PAGE_SHIFT;
14619037a993SMichal Hocko 		unsigned long nr_pages = size >> PAGE_SHIFT;
14629037a993SMichal Hocko 
14639037a993SMichal Hocko 		ret = __register_one_node(nid);
14649037a993SMichal Hocko 		if (ret)
14659037a993SMichal Hocko 			goto register_fail;
14669037a993SMichal Hocko 
14679037a993SMichal Hocko 		/*
14689037a993SMichal Hocko 		 * link memory sections under this node. This is already
14699037a993SMichal Hocko 		 * done when creatig memory section in register_new_memory
14709037a993SMichal Hocko 		 * but that depends to have the node registered so offline
14719037a993SMichal Hocko 		 * nodes have to go through register_node.
14729037a993SMichal Hocko 		 * TODO clean up this mess.
14739037a993SMichal Hocko 		 */
14749037a993SMichal Hocko 		ret = link_mem_sections(nid, start_pfn, nr_pages);
14759037a993SMichal Hocko register_fail:
14760fc44159SYasunori Goto 		/*
14770fc44159SYasunori Goto 		 * If sysfs file of new node can't create, cpu on the node
14780fc44159SYasunori Goto 		 * can't be hot-added. There is no rollback way now.
14790fc44159SYasunori Goto 		 * So, check by BUG_ON() to catch it reluctantly..
14800fc44159SYasunori Goto 		 */
14810fc44159SYasunori Goto 		BUG_ON(ret);
14820fc44159SYasunori Goto 	}
14830fc44159SYasunori Goto 
1484d96ae530Sakpm@linux-foundation.org 	/* create new memmap entry */
1485d96ae530Sakpm@linux-foundation.org 	firmware_map_add_hotplug(start, start + size, "System RAM");
1486d96ae530Sakpm@linux-foundation.org 
148731bc3858SVitaly Kuznetsov 	/* online pages if requested */
148831bc3858SVitaly Kuznetsov 	if (online)
148931bc3858SVitaly Kuznetsov 		walk_memory_range(PFN_DOWN(start), PFN_UP(start + size - 1),
149031bc3858SVitaly Kuznetsov 				  NULL, online_memory_block);
149131bc3858SVitaly Kuznetsov 
14926ad696d2SAndi Kleen 	goto out;
14936ad696d2SAndi Kleen 
14949af3c2deSYasunori Goto error:
14959af3c2deSYasunori Goto 	/* rollback pgdat allocation and others */
14969af3c2deSYasunori Goto 	if (new_pgdat)
14979af3c2deSYasunori Goto 		rollback_node_hotadd(nid, pgdat);
14987f36e3e5STang Chen 	memblock_remove(start, size);
14999af3c2deSYasunori Goto 
15006ad696d2SAndi Kleen out:
1501bfc8c901SVladimir Davydov 	mem_hotplug_done();
1502bc02af93SYasunori Goto 	return ret;
1503bc02af93SYasunori Goto }
150462cedb9fSDavid Vrabel EXPORT_SYMBOL_GPL(add_memory_resource);
150562cedb9fSDavid Vrabel 
150662cedb9fSDavid Vrabel int __ref add_memory(int nid, u64 start, u64 size)
150762cedb9fSDavid Vrabel {
150862cedb9fSDavid Vrabel 	struct resource *res;
150962cedb9fSDavid Vrabel 	int ret;
151062cedb9fSDavid Vrabel 
151162cedb9fSDavid Vrabel 	res = register_memory_resource(start, size);
15126f754ba4SVitaly Kuznetsov 	if (IS_ERR(res))
15136f754ba4SVitaly Kuznetsov 		return PTR_ERR(res);
151462cedb9fSDavid Vrabel 
151531bc3858SVitaly Kuznetsov 	ret = add_memory_resource(nid, res, memhp_auto_online);
151662cedb9fSDavid Vrabel 	if (ret < 0)
151762cedb9fSDavid Vrabel 		release_memory_resource(res);
151862cedb9fSDavid Vrabel 	return ret;
151962cedb9fSDavid Vrabel }
1520bc02af93SYasunori Goto EXPORT_SYMBOL_GPL(add_memory);
15210c0e6195SKAMEZAWA Hiroyuki 
15220c0e6195SKAMEZAWA Hiroyuki #ifdef CONFIG_MEMORY_HOTREMOVE
15230c0e6195SKAMEZAWA Hiroyuki /*
15245c755e9fSBadari Pulavarty  * A free page on the buddy free lists (not the per-cpu lists) has PageBuddy
15255c755e9fSBadari Pulavarty  * set and the size of the free page is given by page_order(). Using this,
15265c755e9fSBadari Pulavarty  * the function determines if the pageblock contains only free pages.
15275c755e9fSBadari Pulavarty  * Due to buddy contraints, a free page at least the size of a pageblock will
15285c755e9fSBadari Pulavarty  * be located at the start of the pageblock
15295c755e9fSBadari Pulavarty  */
15305c755e9fSBadari Pulavarty static inline int pageblock_free(struct page *page)
15315c755e9fSBadari Pulavarty {
15325c755e9fSBadari Pulavarty 	return PageBuddy(page) && page_order(page) >= pageblock_order;
15335c755e9fSBadari Pulavarty }
15345c755e9fSBadari Pulavarty 
15355c755e9fSBadari Pulavarty /* Return the start of the next active pageblock after a given page */
15365c755e9fSBadari Pulavarty static struct page *next_active_pageblock(struct page *page)
15375c755e9fSBadari Pulavarty {
15385c755e9fSBadari Pulavarty 	/* Ensure the starting page is pageblock-aligned */
15395c755e9fSBadari Pulavarty 	BUG_ON(page_to_pfn(page) & (pageblock_nr_pages - 1));
15405c755e9fSBadari Pulavarty 
15415c755e9fSBadari Pulavarty 	/* If the entire pageblock is free, move to the end of free page */
15420dcc48c1SKAMEZAWA Hiroyuki 	if (pageblock_free(page)) {
15430dcc48c1SKAMEZAWA Hiroyuki 		int order;
15440dcc48c1SKAMEZAWA Hiroyuki 		/* be careful. we don't have locks, page_order can be changed.*/
15450dcc48c1SKAMEZAWA Hiroyuki 		order = page_order(page);
15460dcc48c1SKAMEZAWA Hiroyuki 		if ((order < MAX_ORDER) && (order >= pageblock_order))
15470dcc48c1SKAMEZAWA Hiroyuki 			return page + (1 << order);
15480dcc48c1SKAMEZAWA Hiroyuki 	}
15495c755e9fSBadari Pulavarty 
15500dcc48c1SKAMEZAWA Hiroyuki 	return page + pageblock_nr_pages;
15515c755e9fSBadari Pulavarty }
15525c755e9fSBadari Pulavarty 
15535c755e9fSBadari Pulavarty /* Checks if this range of memory is likely to be hot-removable. */
1554c98940f6SYaowei Bai bool is_mem_section_removable(unsigned long start_pfn, unsigned long nr_pages)
15555c755e9fSBadari Pulavarty {
15565c755e9fSBadari Pulavarty 	struct page *page = pfn_to_page(start_pfn);
15575c755e9fSBadari Pulavarty 	struct page *end_page = page + nr_pages;
15585c755e9fSBadari Pulavarty 
15595c755e9fSBadari Pulavarty 	/* Check the starting page of each pageblock within the range */
15605c755e9fSBadari Pulavarty 	for (; page < end_page; page = next_active_pageblock(page)) {
156149ac8255SKAMEZAWA Hiroyuki 		if (!is_pageblock_removable_nolock(page))
1562c98940f6SYaowei Bai 			return false;
156349ac8255SKAMEZAWA Hiroyuki 		cond_resched();
15645c755e9fSBadari Pulavarty 	}
15655c755e9fSBadari Pulavarty 
15665c755e9fSBadari Pulavarty 	/* All pageblocks in the memory block are likely to be hot-removable */
1567c98940f6SYaowei Bai 	return true;
15685c755e9fSBadari Pulavarty }
15695c755e9fSBadari Pulavarty 
15705c755e9fSBadari Pulavarty /*
1571deb88a2aSToshi Kani  * Confirm all pages in a range [start, end) belong to the same zone.
1572a96dfddbSToshi Kani  * When true, return its valid [start, end).
15730c0e6195SKAMEZAWA Hiroyuki  */
1574a96dfddbSToshi Kani int test_pages_in_a_zone(unsigned long start_pfn, unsigned long end_pfn,
1575a96dfddbSToshi Kani 			 unsigned long *valid_start, unsigned long *valid_end)
15760c0e6195SKAMEZAWA Hiroyuki {
15775f0f2887SAndrew Banman 	unsigned long pfn, sec_end_pfn;
1578a96dfddbSToshi Kani 	unsigned long start, end;
15790c0e6195SKAMEZAWA Hiroyuki 	struct zone *zone = NULL;
15800c0e6195SKAMEZAWA Hiroyuki 	struct page *page;
15810c0e6195SKAMEZAWA Hiroyuki 	int i;
1582deb88a2aSToshi Kani 	for (pfn = start_pfn, sec_end_pfn = SECTION_ALIGN_UP(start_pfn + 1);
15830c0e6195SKAMEZAWA Hiroyuki 	     pfn < end_pfn;
1584deb88a2aSToshi Kani 	     pfn = sec_end_pfn, sec_end_pfn += PAGES_PER_SECTION) {
15855f0f2887SAndrew Banman 		/* Make sure the memory section is present first */
15865f0f2887SAndrew Banman 		if (!present_section_nr(pfn_to_section_nr(pfn)))
15875f0f2887SAndrew Banman 			continue;
15885f0f2887SAndrew Banman 		for (; pfn < sec_end_pfn && pfn < end_pfn;
15890c0e6195SKAMEZAWA Hiroyuki 		     pfn += MAX_ORDER_NR_PAGES) {
15900c0e6195SKAMEZAWA Hiroyuki 			i = 0;
15910c0e6195SKAMEZAWA Hiroyuki 			/* This is just a CONFIG_HOLES_IN_ZONE check.*/
15925f0f2887SAndrew Banman 			while ((i < MAX_ORDER_NR_PAGES) &&
15935f0f2887SAndrew Banman 				!pfn_valid_within(pfn + i))
15940c0e6195SKAMEZAWA Hiroyuki 				i++;
1595d6d8c8a4Szhong jiang 			if (i == MAX_ORDER_NR_PAGES || pfn + i >= end_pfn)
15960c0e6195SKAMEZAWA Hiroyuki 				continue;
15970c0e6195SKAMEZAWA Hiroyuki 			page = pfn_to_page(pfn + i);
15980c0e6195SKAMEZAWA Hiroyuki 			if (zone && page_zone(page) != zone)
15990c0e6195SKAMEZAWA Hiroyuki 				return 0;
1600a96dfddbSToshi Kani 			if (!zone)
1601a96dfddbSToshi Kani 				start = pfn + i;
16020c0e6195SKAMEZAWA Hiroyuki 			zone = page_zone(page);
1603a96dfddbSToshi Kani 			end = pfn + MAX_ORDER_NR_PAGES;
16040c0e6195SKAMEZAWA Hiroyuki 		}
16055f0f2887SAndrew Banman 	}
1606deb88a2aSToshi Kani 
1607a96dfddbSToshi Kani 	if (zone) {
1608a96dfddbSToshi Kani 		*valid_start = start;
1609d6d8c8a4Szhong jiang 		*valid_end = min(end, end_pfn);
16100c0e6195SKAMEZAWA Hiroyuki 		return 1;
1611a96dfddbSToshi Kani 	} else {
1612deb88a2aSToshi Kani 		return 0;
16130c0e6195SKAMEZAWA Hiroyuki 	}
1614a96dfddbSToshi Kani }
16150c0e6195SKAMEZAWA Hiroyuki 
16160c0e6195SKAMEZAWA Hiroyuki /*
16170efadf48SYisheng Xie  * Scan pfn range [start,end) to find movable/migratable pages (LRU pages,
16180efadf48SYisheng Xie  * non-lru movable pages and hugepages). We scan pfn because it's much
16190efadf48SYisheng Xie  * easier than scanning over linked list. This function returns the pfn
16200efadf48SYisheng Xie  * of the first found movable page if it's found, otherwise 0.
16210c0e6195SKAMEZAWA Hiroyuki  */
1622c8721bbbSNaoya Horiguchi static unsigned long scan_movable_pages(unsigned long start, unsigned long end)
16230c0e6195SKAMEZAWA Hiroyuki {
16240c0e6195SKAMEZAWA Hiroyuki 	unsigned long pfn;
16250c0e6195SKAMEZAWA Hiroyuki 	struct page *page;
16260c0e6195SKAMEZAWA Hiroyuki 	for (pfn = start; pfn < end; pfn++) {
16270c0e6195SKAMEZAWA Hiroyuki 		if (pfn_valid(pfn)) {
16280c0e6195SKAMEZAWA Hiroyuki 			page = pfn_to_page(pfn);
16290c0e6195SKAMEZAWA Hiroyuki 			if (PageLRU(page))
16300c0e6195SKAMEZAWA Hiroyuki 				return pfn;
16310efadf48SYisheng Xie 			if (__PageMovable(page))
16320efadf48SYisheng Xie 				return pfn;
1633c8721bbbSNaoya Horiguchi 			if (PageHuge(page)) {
16347e1f049eSNaoya Horiguchi 				if (page_huge_active(page))
1635c8721bbbSNaoya Horiguchi 					return pfn;
1636c8721bbbSNaoya Horiguchi 				else
1637c8721bbbSNaoya Horiguchi 					pfn = round_up(pfn + 1,
1638c8721bbbSNaoya Horiguchi 						1 << compound_order(page)) - 1;
1639c8721bbbSNaoya Horiguchi 			}
16400c0e6195SKAMEZAWA Hiroyuki 		}
16410c0e6195SKAMEZAWA Hiroyuki 	}
16420c0e6195SKAMEZAWA Hiroyuki 	return 0;
16430c0e6195SKAMEZAWA Hiroyuki }
16440c0e6195SKAMEZAWA Hiroyuki 
1645394e31d2SXishi Qiu static struct page *new_node_page(struct page *page, unsigned long private,
1646394e31d2SXishi Qiu 		int **result)
1647394e31d2SXishi Qiu {
1648394e31d2SXishi Qiu 	gfp_t gfp_mask = GFP_USER | __GFP_MOVABLE;
1649394e31d2SXishi Qiu 	int nid = page_to_nid(page);
1650231e97e2SLi Zhong 	nodemask_t nmask = node_states[N_MEMORY];
1651231e97e2SLi Zhong 	struct page *new_page = NULL;
1652394e31d2SXishi Qiu 
1653394e31d2SXishi Qiu 	/*
1654394e31d2SXishi Qiu 	 * TODO: allocate a destination hugepage from a nearest neighbor node,
1655394e31d2SXishi Qiu 	 * accordance with memory policy of the user process if possible. For
1656394e31d2SXishi Qiu 	 * now as a simple work-around, we use the next node for destination.
1657394e31d2SXishi Qiu 	 */
1658394e31d2SXishi Qiu 	if (PageHuge(page))
1659394e31d2SXishi Qiu 		return alloc_huge_page_node(page_hstate(compound_head(page)),
1660394e31d2SXishi Qiu 					next_node_in(nid, nmask));
1661394e31d2SXishi Qiu 
1662394e31d2SXishi Qiu 	node_clear(nid, nmask);
16639bb627beSLi Zhong 
1664394e31d2SXishi Qiu 	if (PageHighMem(page)
1665394e31d2SXishi Qiu 	    || (zone_idx(page_zone(page)) == ZONE_MOVABLE))
1666394e31d2SXishi Qiu 		gfp_mask |= __GFP_HIGHMEM;
1667394e31d2SXishi Qiu 
1668231e97e2SLi Zhong 	if (!nodes_empty(nmask))
1669394e31d2SXishi Qiu 		new_page = __alloc_pages_nodemask(gfp_mask, 0,
1670394e31d2SXishi Qiu 					node_zonelist(nid, gfp_mask), &nmask);
1671394e31d2SXishi Qiu 	if (!new_page)
1672394e31d2SXishi Qiu 		new_page = __alloc_pages(gfp_mask, 0,
1673394e31d2SXishi Qiu 					node_zonelist(nid, gfp_mask));
1674394e31d2SXishi Qiu 
1675394e31d2SXishi Qiu 	return new_page;
1676394e31d2SXishi Qiu }
1677394e31d2SXishi Qiu 
16780c0e6195SKAMEZAWA Hiroyuki #define NR_OFFLINE_AT_ONCE_PAGES	(256)
16790c0e6195SKAMEZAWA Hiroyuki static int
16800c0e6195SKAMEZAWA Hiroyuki do_migrate_range(unsigned long start_pfn, unsigned long end_pfn)
16810c0e6195SKAMEZAWA Hiroyuki {
16820c0e6195SKAMEZAWA Hiroyuki 	unsigned long pfn;
16830c0e6195SKAMEZAWA Hiroyuki 	struct page *page;
16840c0e6195SKAMEZAWA Hiroyuki 	int move_pages = NR_OFFLINE_AT_ONCE_PAGES;
16850c0e6195SKAMEZAWA Hiroyuki 	int not_managed = 0;
16860c0e6195SKAMEZAWA Hiroyuki 	int ret = 0;
16870c0e6195SKAMEZAWA Hiroyuki 	LIST_HEAD(source);
16880c0e6195SKAMEZAWA Hiroyuki 
16890c0e6195SKAMEZAWA Hiroyuki 	for (pfn = start_pfn; pfn < end_pfn && move_pages > 0; pfn++) {
16900c0e6195SKAMEZAWA Hiroyuki 		if (!pfn_valid(pfn))
16910c0e6195SKAMEZAWA Hiroyuki 			continue;
16920c0e6195SKAMEZAWA Hiroyuki 		page = pfn_to_page(pfn);
1693c8721bbbSNaoya Horiguchi 
1694c8721bbbSNaoya Horiguchi 		if (PageHuge(page)) {
1695c8721bbbSNaoya Horiguchi 			struct page *head = compound_head(page);
1696c8721bbbSNaoya Horiguchi 			pfn = page_to_pfn(head) + (1<<compound_order(head)) - 1;
1697c8721bbbSNaoya Horiguchi 			if (compound_order(head) > PFN_SECTION_SHIFT) {
1698c8721bbbSNaoya Horiguchi 				ret = -EBUSY;
1699c8721bbbSNaoya Horiguchi 				break;
1700c8721bbbSNaoya Horiguchi 			}
1701c8721bbbSNaoya Horiguchi 			if (isolate_huge_page(page, &source))
1702c8721bbbSNaoya Horiguchi 				move_pages -= 1 << compound_order(head);
1703c8721bbbSNaoya Horiguchi 			continue;
1704c8721bbbSNaoya Horiguchi 		}
1705c8721bbbSNaoya Horiguchi 
1706700c2a46SKonstantin Khlebnikov 		if (!get_page_unless_zero(page))
17070c0e6195SKAMEZAWA Hiroyuki 			continue;
17080c0e6195SKAMEZAWA Hiroyuki 		/*
17090efadf48SYisheng Xie 		 * We can skip free pages. And we can deal with pages on
17100efadf48SYisheng Xie 		 * LRU and non-lru movable pages.
17110c0e6195SKAMEZAWA Hiroyuki 		 */
17120efadf48SYisheng Xie 		if (PageLRU(page))
171362695a84SNick Piggin 			ret = isolate_lru_page(page);
17140efadf48SYisheng Xie 		else
17150efadf48SYisheng Xie 			ret = isolate_movable_page(page, ISOLATE_UNEVICTABLE);
17160c0e6195SKAMEZAWA Hiroyuki 		if (!ret) { /* Success */
1717700c2a46SKonstantin Khlebnikov 			put_page(page);
171862695a84SNick Piggin 			list_add_tail(&page->lru, &source);
17190c0e6195SKAMEZAWA Hiroyuki 			move_pages--;
17200efadf48SYisheng Xie 			if (!__PageMovable(page))
1721599d0c95SMel Gorman 				inc_node_page_state(page, NR_ISOLATED_ANON +
17226d9c285aSKOSAKI Motohiro 						    page_is_file_cache(page));
17236d9c285aSKOSAKI Motohiro 
17240c0e6195SKAMEZAWA Hiroyuki 		} else {
17250c0e6195SKAMEZAWA Hiroyuki #ifdef CONFIG_DEBUG_VM
17260efadf48SYisheng Xie 			pr_alert("failed to isolate pfn %lx\n", pfn);
17270efadf48SYisheng Xie 			dump_page(page, "isolation failed");
17280c0e6195SKAMEZAWA Hiroyuki #endif
1729700c2a46SKonstantin Khlebnikov 			put_page(page);
173025985edcSLucas De Marchi 			/* Because we don't have big zone->lock. we should
1731809c4449SBob Liu 			   check this again here. */
1732809c4449SBob Liu 			if (page_count(page)) {
1733809c4449SBob Liu 				not_managed++;
1734f3ab2636SBob Liu 				ret = -EBUSY;
1735809c4449SBob Liu 				break;
1736809c4449SBob Liu 			}
17370c0e6195SKAMEZAWA Hiroyuki 		}
17380c0e6195SKAMEZAWA Hiroyuki 	}
1739f3ab2636SBob Liu 	if (!list_empty(&source)) {
17400c0e6195SKAMEZAWA Hiroyuki 		if (not_managed) {
1741c8721bbbSNaoya Horiguchi 			putback_movable_pages(&source);
17420c0e6195SKAMEZAWA Hiroyuki 			goto out;
17430c0e6195SKAMEZAWA Hiroyuki 		}
174474c08f98SMinchan Kim 
1745394e31d2SXishi Qiu 		/* Allocate a new page from the nearest neighbor node */
1746394e31d2SXishi Qiu 		ret = migrate_pages(&source, new_node_page, NULL, 0,
17479c620e2bSHugh Dickins 					MIGRATE_SYNC, MR_MEMORY_HOTPLUG);
1748cf608ac1SMinchan Kim 		if (ret)
1749c8721bbbSNaoya Horiguchi 			putback_movable_pages(&source);
1750f3ab2636SBob Liu 	}
17510c0e6195SKAMEZAWA Hiroyuki out:
17520c0e6195SKAMEZAWA Hiroyuki 	return ret;
17530c0e6195SKAMEZAWA Hiroyuki }
17540c0e6195SKAMEZAWA Hiroyuki 
17550c0e6195SKAMEZAWA Hiroyuki /*
17560c0e6195SKAMEZAWA Hiroyuki  * remove from free_area[] and mark all as Reserved.
17570c0e6195SKAMEZAWA Hiroyuki  */
17580c0e6195SKAMEZAWA Hiroyuki static int
17590c0e6195SKAMEZAWA Hiroyuki offline_isolated_pages_cb(unsigned long start, unsigned long nr_pages,
17600c0e6195SKAMEZAWA Hiroyuki 			void *data)
17610c0e6195SKAMEZAWA Hiroyuki {
17620c0e6195SKAMEZAWA Hiroyuki 	__offline_isolated_pages(start, start + nr_pages);
17630c0e6195SKAMEZAWA Hiroyuki 	return 0;
17640c0e6195SKAMEZAWA Hiroyuki }
17650c0e6195SKAMEZAWA Hiroyuki 
17660c0e6195SKAMEZAWA Hiroyuki static void
17670c0e6195SKAMEZAWA Hiroyuki offline_isolated_pages(unsigned long start_pfn, unsigned long end_pfn)
17680c0e6195SKAMEZAWA Hiroyuki {
1769908eedc6SKAMEZAWA Hiroyuki 	walk_system_ram_range(start_pfn, end_pfn - start_pfn, NULL,
17700c0e6195SKAMEZAWA Hiroyuki 				offline_isolated_pages_cb);
17710c0e6195SKAMEZAWA Hiroyuki }
17720c0e6195SKAMEZAWA Hiroyuki 
17730c0e6195SKAMEZAWA Hiroyuki /*
17740c0e6195SKAMEZAWA Hiroyuki  * Check all pages in range, recoreded as memory resource, are isolated.
17750c0e6195SKAMEZAWA Hiroyuki  */
17760c0e6195SKAMEZAWA Hiroyuki static int
17770c0e6195SKAMEZAWA Hiroyuki check_pages_isolated_cb(unsigned long start_pfn, unsigned long nr_pages,
17780c0e6195SKAMEZAWA Hiroyuki 			void *data)
17790c0e6195SKAMEZAWA Hiroyuki {
17800c0e6195SKAMEZAWA Hiroyuki 	int ret;
17810c0e6195SKAMEZAWA Hiroyuki 	long offlined = *(long *)data;
1782b023f468SWen Congyang 	ret = test_pages_isolated(start_pfn, start_pfn + nr_pages, true);
17830c0e6195SKAMEZAWA Hiroyuki 	offlined = nr_pages;
17840c0e6195SKAMEZAWA Hiroyuki 	if (!ret)
17850c0e6195SKAMEZAWA Hiroyuki 		*(long *)data += offlined;
17860c0e6195SKAMEZAWA Hiroyuki 	return ret;
17870c0e6195SKAMEZAWA Hiroyuki }
17880c0e6195SKAMEZAWA Hiroyuki 
17890c0e6195SKAMEZAWA Hiroyuki static long
17900c0e6195SKAMEZAWA Hiroyuki check_pages_isolated(unsigned long start_pfn, unsigned long end_pfn)
17910c0e6195SKAMEZAWA Hiroyuki {
17920c0e6195SKAMEZAWA Hiroyuki 	long offlined = 0;
17930c0e6195SKAMEZAWA Hiroyuki 	int ret;
17940c0e6195SKAMEZAWA Hiroyuki 
1795908eedc6SKAMEZAWA Hiroyuki 	ret = walk_system_ram_range(start_pfn, end_pfn - start_pfn, &offlined,
17960c0e6195SKAMEZAWA Hiroyuki 			check_pages_isolated_cb);
17970c0e6195SKAMEZAWA Hiroyuki 	if (ret < 0)
17980c0e6195SKAMEZAWA Hiroyuki 		offlined = (long)ret;
17990c0e6195SKAMEZAWA Hiroyuki 	return offlined;
18000c0e6195SKAMEZAWA Hiroyuki }
18010c0e6195SKAMEZAWA Hiroyuki 
180209285af7SLai Jiangshan #ifdef CONFIG_MOVABLE_NODE
180379a4dcefSTang Chen /*
180479a4dcefSTang Chen  * When CONFIG_MOVABLE_NODE, we permit offlining of a node which doesn't have
180579a4dcefSTang Chen  * normal memory.
180679a4dcefSTang Chen  */
180709285af7SLai Jiangshan static bool can_offline_normal(struct zone *zone, unsigned long nr_pages)
180809285af7SLai Jiangshan {
180909285af7SLai Jiangshan 	return true;
181009285af7SLai Jiangshan }
181179a4dcefSTang Chen #else /* CONFIG_MOVABLE_NODE */
181274d42d8fSLai Jiangshan /* ensure the node has NORMAL memory if it is still online */
181374d42d8fSLai Jiangshan static bool can_offline_normal(struct zone *zone, unsigned long nr_pages)
181474d42d8fSLai Jiangshan {
181574d42d8fSLai Jiangshan 	struct pglist_data *pgdat = zone->zone_pgdat;
181674d42d8fSLai Jiangshan 	unsigned long present_pages = 0;
181774d42d8fSLai Jiangshan 	enum zone_type zt;
181874d42d8fSLai Jiangshan 
181974d42d8fSLai Jiangshan 	for (zt = 0; zt <= ZONE_NORMAL; zt++)
182074d42d8fSLai Jiangshan 		present_pages += pgdat->node_zones[zt].present_pages;
182174d42d8fSLai Jiangshan 
182274d42d8fSLai Jiangshan 	if (present_pages > nr_pages)
182374d42d8fSLai Jiangshan 		return true;
182474d42d8fSLai Jiangshan 
182574d42d8fSLai Jiangshan 	present_pages = 0;
182674d42d8fSLai Jiangshan 	for (; zt <= ZONE_MOVABLE; zt++)
182774d42d8fSLai Jiangshan 		present_pages += pgdat->node_zones[zt].present_pages;
182874d42d8fSLai Jiangshan 
182974d42d8fSLai Jiangshan 	/*
183074d42d8fSLai Jiangshan 	 * we can't offline the last normal memory until all
183174d42d8fSLai Jiangshan 	 * higher memory is offlined.
183274d42d8fSLai Jiangshan 	 */
183374d42d8fSLai Jiangshan 	return present_pages == 0;
183474d42d8fSLai Jiangshan }
183579a4dcefSTang Chen #endif /* CONFIG_MOVABLE_NODE */
183674d42d8fSLai Jiangshan 
1837c5320926STang Chen static int __init cmdline_parse_movable_node(char *p)
1838c5320926STang Chen {
1839c5320926STang Chen #ifdef CONFIG_MOVABLE_NODE
184055ac590cSTang Chen 	movable_node_enabled = true;
1841c5320926STang Chen #else
1842c5320926STang Chen 	pr_warn("movable_node option not supported\n");
1843c5320926STang Chen #endif
1844c5320926STang Chen 	return 0;
1845c5320926STang Chen }
1846c5320926STang Chen early_param("movable_node", cmdline_parse_movable_node);
1847c5320926STang Chen 
1848d9713679SLai Jiangshan /* check which state of node_states will be changed when offline memory */
1849d9713679SLai Jiangshan static void node_states_check_changes_offline(unsigned long nr_pages,
1850d9713679SLai Jiangshan 		struct zone *zone, struct memory_notify *arg)
1851d9713679SLai Jiangshan {
1852d9713679SLai Jiangshan 	struct pglist_data *pgdat = zone->zone_pgdat;
1853d9713679SLai Jiangshan 	unsigned long present_pages = 0;
1854d9713679SLai Jiangshan 	enum zone_type zt, zone_last = ZONE_NORMAL;
1855d9713679SLai Jiangshan 
1856d9713679SLai Jiangshan 	/*
18576715ddf9SLai Jiangshan 	 * If we have HIGHMEM or movable node, node_states[N_NORMAL_MEMORY]
18586715ddf9SLai Jiangshan 	 * contains nodes which have zones of 0...ZONE_NORMAL,
18596715ddf9SLai Jiangshan 	 * set zone_last to ZONE_NORMAL.
1860d9713679SLai Jiangshan 	 *
18616715ddf9SLai Jiangshan 	 * If we don't have HIGHMEM nor movable node,
18626715ddf9SLai Jiangshan 	 * node_states[N_NORMAL_MEMORY] contains nodes which have zones of
18636715ddf9SLai Jiangshan 	 * 0...ZONE_MOVABLE, set zone_last to ZONE_MOVABLE.
1864d9713679SLai Jiangshan 	 */
18656715ddf9SLai Jiangshan 	if (N_MEMORY == N_NORMAL_MEMORY)
1866d9713679SLai Jiangshan 		zone_last = ZONE_MOVABLE;
1867d9713679SLai Jiangshan 
1868d9713679SLai Jiangshan 	/*
1869d9713679SLai Jiangshan 	 * check whether node_states[N_NORMAL_MEMORY] will be changed.
1870d9713679SLai Jiangshan 	 * If the memory to be offline is in a zone of 0...zone_last,
1871d9713679SLai Jiangshan 	 * and it is the last present memory, 0...zone_last will
1872d9713679SLai Jiangshan 	 * become empty after offline , thus we can determind we will
1873d9713679SLai Jiangshan 	 * need to clear the node from node_states[N_NORMAL_MEMORY].
1874d9713679SLai Jiangshan 	 */
1875d9713679SLai Jiangshan 	for (zt = 0; zt <= zone_last; zt++)
1876d9713679SLai Jiangshan 		present_pages += pgdat->node_zones[zt].present_pages;
1877d9713679SLai Jiangshan 	if (zone_idx(zone) <= zone_last && nr_pages >= present_pages)
1878d9713679SLai Jiangshan 		arg->status_change_nid_normal = zone_to_nid(zone);
1879d9713679SLai Jiangshan 	else
1880d9713679SLai Jiangshan 		arg->status_change_nid_normal = -1;
1881d9713679SLai Jiangshan 
18826715ddf9SLai Jiangshan #ifdef CONFIG_HIGHMEM
18836715ddf9SLai Jiangshan 	/*
18846715ddf9SLai Jiangshan 	 * If we have movable node, node_states[N_HIGH_MEMORY]
18856715ddf9SLai Jiangshan 	 * contains nodes which have zones of 0...ZONE_HIGHMEM,
18866715ddf9SLai Jiangshan 	 * set zone_last to ZONE_HIGHMEM.
18876715ddf9SLai Jiangshan 	 *
18886715ddf9SLai Jiangshan 	 * If we don't have movable node, node_states[N_NORMAL_MEMORY]
18896715ddf9SLai Jiangshan 	 * contains nodes which have zones of 0...ZONE_MOVABLE,
18906715ddf9SLai Jiangshan 	 * set zone_last to ZONE_MOVABLE.
18916715ddf9SLai Jiangshan 	 */
18926715ddf9SLai Jiangshan 	zone_last = ZONE_HIGHMEM;
18936715ddf9SLai Jiangshan 	if (N_MEMORY == N_HIGH_MEMORY)
18946715ddf9SLai Jiangshan 		zone_last = ZONE_MOVABLE;
18956715ddf9SLai Jiangshan 
18966715ddf9SLai Jiangshan 	for (; zt <= zone_last; zt++)
18976715ddf9SLai Jiangshan 		present_pages += pgdat->node_zones[zt].present_pages;
18986715ddf9SLai Jiangshan 	if (zone_idx(zone) <= zone_last && nr_pages >= present_pages)
18996715ddf9SLai Jiangshan 		arg->status_change_nid_high = zone_to_nid(zone);
19006715ddf9SLai Jiangshan 	else
19016715ddf9SLai Jiangshan 		arg->status_change_nid_high = -1;
19026715ddf9SLai Jiangshan #else
19036715ddf9SLai Jiangshan 	arg->status_change_nid_high = arg->status_change_nid_normal;
19046715ddf9SLai Jiangshan #endif
19056715ddf9SLai Jiangshan 
1906d9713679SLai Jiangshan 	/*
1907d9713679SLai Jiangshan 	 * node_states[N_HIGH_MEMORY] contains nodes which have 0...ZONE_MOVABLE
1908d9713679SLai Jiangshan 	 */
1909d9713679SLai Jiangshan 	zone_last = ZONE_MOVABLE;
1910d9713679SLai Jiangshan 
1911d9713679SLai Jiangshan 	/*
1912d9713679SLai Jiangshan 	 * check whether node_states[N_HIGH_MEMORY] will be changed
1913d9713679SLai Jiangshan 	 * If we try to offline the last present @nr_pages from the node,
1914d9713679SLai Jiangshan 	 * we can determind we will need to clear the node from
1915d9713679SLai Jiangshan 	 * node_states[N_HIGH_MEMORY].
1916d9713679SLai Jiangshan 	 */
1917d9713679SLai Jiangshan 	for (; zt <= zone_last; zt++)
1918d9713679SLai Jiangshan 		present_pages += pgdat->node_zones[zt].present_pages;
1919d9713679SLai Jiangshan 	if (nr_pages >= present_pages)
1920d9713679SLai Jiangshan 		arg->status_change_nid = zone_to_nid(zone);
1921d9713679SLai Jiangshan 	else
1922d9713679SLai Jiangshan 		arg->status_change_nid = -1;
1923d9713679SLai Jiangshan }
1924d9713679SLai Jiangshan 
1925d9713679SLai Jiangshan static void node_states_clear_node(int node, struct memory_notify *arg)
1926d9713679SLai Jiangshan {
1927d9713679SLai Jiangshan 	if (arg->status_change_nid_normal >= 0)
1928d9713679SLai Jiangshan 		node_clear_state(node, N_NORMAL_MEMORY);
1929d9713679SLai Jiangshan 
19306715ddf9SLai Jiangshan 	if ((N_MEMORY != N_NORMAL_MEMORY) &&
19316715ddf9SLai Jiangshan 	    (arg->status_change_nid_high >= 0))
1932d9713679SLai Jiangshan 		node_clear_state(node, N_HIGH_MEMORY);
19336715ddf9SLai Jiangshan 
19346715ddf9SLai Jiangshan 	if ((N_MEMORY != N_HIGH_MEMORY) &&
19356715ddf9SLai Jiangshan 	    (arg->status_change_nid >= 0))
19366715ddf9SLai Jiangshan 		node_clear_state(node, N_MEMORY);
1937d9713679SLai Jiangshan }
1938d9713679SLai Jiangshan 
1939a16cee10SWen Congyang static int __ref __offline_pages(unsigned long start_pfn,
19400c0e6195SKAMEZAWA Hiroyuki 		  unsigned long end_pfn, unsigned long timeout)
19410c0e6195SKAMEZAWA Hiroyuki {
19420c0e6195SKAMEZAWA Hiroyuki 	unsigned long pfn, nr_pages, expire;
19430c0e6195SKAMEZAWA Hiroyuki 	long offlined_pages;
19447b78d335SYasunori Goto 	int ret, drain, retry_max, node;
1945d702909fSCody P Schafer 	unsigned long flags;
1946a96dfddbSToshi Kani 	unsigned long valid_start, valid_end;
19470c0e6195SKAMEZAWA Hiroyuki 	struct zone *zone;
19487b78d335SYasunori Goto 	struct memory_notify arg;
19490c0e6195SKAMEZAWA Hiroyuki 
19500c0e6195SKAMEZAWA Hiroyuki 	/* at least, alignment against pageblock is necessary */
19510c0e6195SKAMEZAWA Hiroyuki 	if (!IS_ALIGNED(start_pfn, pageblock_nr_pages))
19520c0e6195SKAMEZAWA Hiroyuki 		return -EINVAL;
19530c0e6195SKAMEZAWA Hiroyuki 	if (!IS_ALIGNED(end_pfn, pageblock_nr_pages))
19540c0e6195SKAMEZAWA Hiroyuki 		return -EINVAL;
19550c0e6195SKAMEZAWA Hiroyuki 	/* This makes hotplug much easier...and readable.
19560c0e6195SKAMEZAWA Hiroyuki 	   we assume this for now. .*/
1957a96dfddbSToshi Kani 	if (!test_pages_in_a_zone(start_pfn, end_pfn, &valid_start, &valid_end))
19580c0e6195SKAMEZAWA Hiroyuki 		return -EINVAL;
19597b78d335SYasunori Goto 
1960a96dfddbSToshi Kani 	zone = page_zone(pfn_to_page(valid_start));
19617b78d335SYasunori Goto 	node = zone_to_nid(zone);
19627b78d335SYasunori Goto 	nr_pages = end_pfn - start_pfn;
19637b78d335SYasunori Goto 
196474d42d8fSLai Jiangshan 	if (zone_idx(zone) <= ZONE_NORMAL && !can_offline_normal(zone, nr_pages))
196530467e0bSDavid Rientjes 		return -EINVAL;
196674d42d8fSLai Jiangshan 
19670c0e6195SKAMEZAWA Hiroyuki 	/* set above range as isolated */
1968b023f468SWen Congyang 	ret = start_isolate_page_range(start_pfn, end_pfn,
1969b023f468SWen Congyang 				       MIGRATE_MOVABLE, true);
19700c0e6195SKAMEZAWA Hiroyuki 	if (ret)
197130467e0bSDavid Rientjes 		return ret;
19727b78d335SYasunori Goto 
19737b78d335SYasunori Goto 	arg.start_pfn = start_pfn;
19747b78d335SYasunori Goto 	arg.nr_pages = nr_pages;
1975d9713679SLai Jiangshan 	node_states_check_changes_offline(nr_pages, zone, &arg);
19767b78d335SYasunori Goto 
19777b78d335SYasunori Goto 	ret = memory_notify(MEM_GOING_OFFLINE, &arg);
19787b78d335SYasunori Goto 	ret = notifier_to_errno(ret);
19797b78d335SYasunori Goto 	if (ret)
19807b78d335SYasunori Goto 		goto failed_removal;
19817b78d335SYasunori Goto 
19820c0e6195SKAMEZAWA Hiroyuki 	pfn = start_pfn;
19830c0e6195SKAMEZAWA Hiroyuki 	expire = jiffies + timeout;
19840c0e6195SKAMEZAWA Hiroyuki 	drain = 0;
19850c0e6195SKAMEZAWA Hiroyuki 	retry_max = 5;
19860c0e6195SKAMEZAWA Hiroyuki repeat:
19870c0e6195SKAMEZAWA Hiroyuki 	/* start memory hot removal */
19880c0e6195SKAMEZAWA Hiroyuki 	ret = -EAGAIN;
19890c0e6195SKAMEZAWA Hiroyuki 	if (time_after(jiffies, expire))
19900c0e6195SKAMEZAWA Hiroyuki 		goto failed_removal;
19910c0e6195SKAMEZAWA Hiroyuki 	ret = -EINTR;
19920c0e6195SKAMEZAWA Hiroyuki 	if (signal_pending(current))
19930c0e6195SKAMEZAWA Hiroyuki 		goto failed_removal;
19940c0e6195SKAMEZAWA Hiroyuki 	ret = 0;
19950c0e6195SKAMEZAWA Hiroyuki 	if (drain) {
19960c0e6195SKAMEZAWA Hiroyuki 		lru_add_drain_all();
19970c0e6195SKAMEZAWA Hiroyuki 		cond_resched();
1998c0554329SVlastimil Babka 		drain_all_pages(zone);
19990c0e6195SKAMEZAWA Hiroyuki 	}
20000c0e6195SKAMEZAWA Hiroyuki 
2001c8721bbbSNaoya Horiguchi 	pfn = scan_movable_pages(start_pfn, end_pfn);
2002c8721bbbSNaoya Horiguchi 	if (pfn) { /* We have movable pages */
20030c0e6195SKAMEZAWA Hiroyuki 		ret = do_migrate_range(pfn, end_pfn);
20040c0e6195SKAMEZAWA Hiroyuki 		if (!ret) {
20050c0e6195SKAMEZAWA Hiroyuki 			drain = 1;
20060c0e6195SKAMEZAWA Hiroyuki 			goto repeat;
20070c0e6195SKAMEZAWA Hiroyuki 		} else {
20080c0e6195SKAMEZAWA Hiroyuki 			if (ret < 0)
20090c0e6195SKAMEZAWA Hiroyuki 				if (--retry_max == 0)
20100c0e6195SKAMEZAWA Hiroyuki 					goto failed_removal;
20110c0e6195SKAMEZAWA Hiroyuki 			yield();
20120c0e6195SKAMEZAWA Hiroyuki 			drain = 1;
20130c0e6195SKAMEZAWA Hiroyuki 			goto repeat;
20140c0e6195SKAMEZAWA Hiroyuki 		}
20150c0e6195SKAMEZAWA Hiroyuki 	}
2016b3834be5SAdam Buchbinder 	/* drain all zone's lru pagevec, this is asynchronous... */
20170c0e6195SKAMEZAWA Hiroyuki 	lru_add_drain_all();
20180c0e6195SKAMEZAWA Hiroyuki 	yield();
2019b3834be5SAdam Buchbinder 	/* drain pcp pages, this is synchronous. */
2020c0554329SVlastimil Babka 	drain_all_pages(zone);
2021c8721bbbSNaoya Horiguchi 	/*
2022c8721bbbSNaoya Horiguchi 	 * dissolve free hugepages in the memory block before doing offlining
2023c8721bbbSNaoya Horiguchi 	 * actually in order to make hugetlbfs's object counting consistent.
2024c8721bbbSNaoya Horiguchi 	 */
2025082d5b6bSGerald Schaefer 	ret = dissolve_free_huge_pages(start_pfn, end_pfn);
2026082d5b6bSGerald Schaefer 	if (ret)
2027082d5b6bSGerald Schaefer 		goto failed_removal;
20280c0e6195SKAMEZAWA Hiroyuki 	/* check again */
20290c0e6195SKAMEZAWA Hiroyuki 	offlined_pages = check_pages_isolated(start_pfn, end_pfn);
20300c0e6195SKAMEZAWA Hiroyuki 	if (offlined_pages < 0) {
20310c0e6195SKAMEZAWA Hiroyuki 		ret = -EBUSY;
20320c0e6195SKAMEZAWA Hiroyuki 		goto failed_removal;
20330c0e6195SKAMEZAWA Hiroyuki 	}
2034e33e33b4SChen Yucong 	pr_info("Offlined Pages %ld\n", offlined_pages);
2035b3834be5SAdam Buchbinder 	/* Ok, all of our target is isolated.
20360c0e6195SKAMEZAWA Hiroyuki 	   We cannot do rollback at this point. */
20370c0e6195SKAMEZAWA Hiroyuki 	offline_isolated_pages(start_pfn, end_pfn);
2038dbc0e4ceSKAMEZAWA Hiroyuki 	/* reset pagetype flags and makes migrate type to be MOVABLE */
20390815f3d8SMichal Nazarewicz 	undo_isolate_page_range(start_pfn, end_pfn, MIGRATE_MOVABLE);
20400c0e6195SKAMEZAWA Hiroyuki 	/* removal success */
20413dcc0571SJiang Liu 	adjust_managed_page_count(pfn_to_page(start_pfn), -offlined_pages);
20420c0e6195SKAMEZAWA Hiroyuki 	zone->present_pages -= offlined_pages;
2043d702909fSCody P Schafer 
2044d702909fSCody P Schafer 	pgdat_resize_lock(zone->zone_pgdat, &flags);
20450c0e6195SKAMEZAWA Hiroyuki 	zone->zone_pgdat->node_present_pages -= offlined_pages;
2046d702909fSCody P Schafer 	pgdat_resize_unlock(zone->zone_pgdat, &flags);
20477b78d335SYasunori Goto 
20481b79acc9SKOSAKI Motohiro 	init_per_zone_wmark_min();
20491b79acc9SKOSAKI Motohiro 
20501e8537baSXishi Qiu 	if (!populated_zone(zone)) {
2051340175b7SJiang Liu 		zone_pcp_reset(zone);
20521e8537baSXishi Qiu 		mutex_lock(&zonelists_mutex);
20531e8537baSXishi Qiu 		build_all_zonelists(NULL, NULL);
20541e8537baSXishi Qiu 		mutex_unlock(&zonelists_mutex);
20551e8537baSXishi Qiu 	} else
20561e8537baSXishi Qiu 		zone_pcp_update(zone);
2057340175b7SJiang Liu 
2058d9713679SLai Jiangshan 	node_states_clear_node(node, &arg);
2059698b1b30SVlastimil Babka 	if (arg.status_change_nid >= 0) {
20608fe23e05SDavid Rientjes 		kswapd_stop(node);
2061698b1b30SVlastimil Babka 		kcompactd_stop(node);
2062698b1b30SVlastimil Babka 	}
2063bce7394aSMinchan Kim 
20640c0e6195SKAMEZAWA Hiroyuki 	vm_total_pages = nr_free_pagecache_pages();
20650c0e6195SKAMEZAWA Hiroyuki 	writeback_set_ratelimit();
20667b78d335SYasunori Goto 
20677b78d335SYasunori Goto 	memory_notify(MEM_OFFLINE, &arg);
20680c0e6195SKAMEZAWA Hiroyuki 	return 0;
20690c0e6195SKAMEZAWA Hiroyuki 
20700c0e6195SKAMEZAWA Hiroyuki failed_removal:
2071e33e33b4SChen Yucong 	pr_debug("memory offlining [mem %#010llx-%#010llx] failed\n",
2072a62e2f4fSBjorn Helgaas 		 (unsigned long long) start_pfn << PAGE_SHIFT,
2073a62e2f4fSBjorn Helgaas 		 ((unsigned long long) end_pfn << PAGE_SHIFT) - 1);
20747b78d335SYasunori Goto 	memory_notify(MEM_CANCEL_OFFLINE, &arg);
20750c0e6195SKAMEZAWA Hiroyuki 	/* pushback to free area */
20760815f3d8SMichal Nazarewicz 	undo_isolate_page_range(start_pfn, end_pfn, MIGRATE_MOVABLE);
20770c0e6195SKAMEZAWA Hiroyuki 	return ret;
20780c0e6195SKAMEZAWA Hiroyuki }
207971088785SBadari Pulavarty 
208030467e0bSDavid Rientjes /* Must be protected by mem_hotplug_begin() */
2081a16cee10SWen Congyang int offline_pages(unsigned long start_pfn, unsigned long nr_pages)
2082a16cee10SWen Congyang {
2083a16cee10SWen Congyang 	return __offline_pages(start_pfn, start_pfn + nr_pages, 120 * HZ);
2084a16cee10SWen Congyang }
2085e2ff3940SRafael J. Wysocki #endif /* CONFIG_MEMORY_HOTREMOVE */
2086a16cee10SWen Congyang 
2087bbc76be6SWen Congyang /**
2088bbc76be6SWen Congyang  * walk_memory_range - walks through all mem sections in [start_pfn, end_pfn)
2089bbc76be6SWen Congyang  * @start_pfn: start pfn of the memory range
2090e05c4bbfSToshi Kani  * @end_pfn: end pfn of the memory range
2091bbc76be6SWen Congyang  * @arg: argument passed to func
2092bbc76be6SWen Congyang  * @func: callback for each memory section walked
2093bbc76be6SWen Congyang  *
2094bbc76be6SWen Congyang  * This function walks through all present mem sections in range
2095bbc76be6SWen Congyang  * [start_pfn, end_pfn) and call func on each mem section.
2096bbc76be6SWen Congyang  *
2097bbc76be6SWen Congyang  * Returns the return value of func.
2098bbc76be6SWen Congyang  */
2099e2ff3940SRafael J. Wysocki int walk_memory_range(unsigned long start_pfn, unsigned long end_pfn,
2100bbc76be6SWen Congyang 		void *arg, int (*func)(struct memory_block *, void *))
210171088785SBadari Pulavarty {
2102e90bdb7fSWen Congyang 	struct memory_block *mem = NULL;
2103e90bdb7fSWen Congyang 	struct mem_section *section;
2104e90bdb7fSWen Congyang 	unsigned long pfn, section_nr;
2105e90bdb7fSWen Congyang 	int ret;
210671088785SBadari Pulavarty 
2107e90bdb7fSWen Congyang 	for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) {
2108e90bdb7fSWen Congyang 		section_nr = pfn_to_section_nr(pfn);
2109e90bdb7fSWen Congyang 		if (!present_section_nr(section_nr))
2110e90bdb7fSWen Congyang 			continue;
2111e90bdb7fSWen Congyang 
2112e90bdb7fSWen Congyang 		section = __nr_to_section(section_nr);
2113e90bdb7fSWen Congyang 		/* same memblock? */
2114e90bdb7fSWen Congyang 		if (mem)
2115e90bdb7fSWen Congyang 			if ((section_nr >= mem->start_section_nr) &&
2116e90bdb7fSWen Congyang 			    (section_nr <= mem->end_section_nr))
2117e90bdb7fSWen Congyang 				continue;
2118e90bdb7fSWen Congyang 
2119e90bdb7fSWen Congyang 		mem = find_memory_block_hinted(section, mem);
2120e90bdb7fSWen Congyang 		if (!mem)
2121e90bdb7fSWen Congyang 			continue;
2122e90bdb7fSWen Congyang 
2123bbc76be6SWen Congyang 		ret = func(mem, arg);
2124e90bdb7fSWen Congyang 		if (ret) {
2125e90bdb7fSWen Congyang 			kobject_put(&mem->dev.kobj);
2126e90bdb7fSWen Congyang 			return ret;
2127e90bdb7fSWen Congyang 		}
2128e90bdb7fSWen Congyang 	}
2129e90bdb7fSWen Congyang 
2130e90bdb7fSWen Congyang 	if (mem)
2131e90bdb7fSWen Congyang 		kobject_put(&mem->dev.kobj);
2132e90bdb7fSWen Congyang 
2133bbc76be6SWen Congyang 	return 0;
2134bbc76be6SWen Congyang }
2135bbc76be6SWen Congyang 
2136e2ff3940SRafael J. Wysocki #ifdef CONFIG_MEMORY_HOTREMOVE
2137d6de9d53SXishi Qiu static int check_memblock_offlined_cb(struct memory_block *mem, void *arg)
2138bbc76be6SWen Congyang {
2139bbc76be6SWen Congyang 	int ret = !is_memblock_offlined(mem);
2140bbc76be6SWen Congyang 
2141349daa0fSRandy Dunlap 	if (unlikely(ret)) {
2142349daa0fSRandy Dunlap 		phys_addr_t beginpa, endpa;
2143349daa0fSRandy Dunlap 
2144349daa0fSRandy Dunlap 		beginpa = PFN_PHYS(section_nr_to_pfn(mem->start_section_nr));
2145349daa0fSRandy Dunlap 		endpa = PFN_PHYS(section_nr_to_pfn(mem->end_section_nr + 1))-1;
2146756a025fSJoe Perches 		pr_warn("removing memory fails, because memory [%pa-%pa] is onlined\n",
2147349daa0fSRandy Dunlap 			&beginpa, &endpa);
2148349daa0fSRandy Dunlap 	}
2149bbc76be6SWen Congyang 
2150bbc76be6SWen Congyang 	return ret;
2151bbc76be6SWen Congyang }
2152bbc76be6SWen Congyang 
21530f1cfe9dSToshi Kani static int check_cpu_on_node(pg_data_t *pgdat)
215460a5a19eSTang Chen {
215560a5a19eSTang Chen 	int cpu;
215660a5a19eSTang Chen 
215760a5a19eSTang Chen 	for_each_present_cpu(cpu) {
215860a5a19eSTang Chen 		if (cpu_to_node(cpu) == pgdat->node_id)
215960a5a19eSTang Chen 			/*
216060a5a19eSTang Chen 			 * the cpu on this node isn't removed, and we can't
216160a5a19eSTang Chen 			 * offline this node.
216260a5a19eSTang Chen 			 */
216360a5a19eSTang Chen 			return -EBUSY;
216460a5a19eSTang Chen 	}
216560a5a19eSTang Chen 
216660a5a19eSTang Chen 	return 0;
216760a5a19eSTang Chen }
216860a5a19eSTang Chen 
21690f1cfe9dSToshi Kani static void unmap_cpu_on_node(pg_data_t *pgdat)
2170e13fe869SWen Congyang {
2171e13fe869SWen Congyang #ifdef CONFIG_ACPI_NUMA
2172e13fe869SWen Congyang 	int cpu;
2173e13fe869SWen Congyang 
2174e13fe869SWen Congyang 	for_each_possible_cpu(cpu)
2175e13fe869SWen Congyang 		if (cpu_to_node(cpu) == pgdat->node_id)
2176e13fe869SWen Congyang 			numa_clear_node(cpu);
2177e13fe869SWen Congyang #endif
2178e13fe869SWen Congyang }
2179e13fe869SWen Congyang 
21800f1cfe9dSToshi Kani static int check_and_unmap_cpu_on_node(pg_data_t *pgdat)
2181e13fe869SWen Congyang {
21820f1cfe9dSToshi Kani 	int ret;
2183e13fe869SWen Congyang 
21840f1cfe9dSToshi Kani 	ret = check_cpu_on_node(pgdat);
2185e13fe869SWen Congyang 	if (ret)
2186e13fe869SWen Congyang 		return ret;
2187e13fe869SWen Congyang 
2188e13fe869SWen Congyang 	/*
2189e13fe869SWen Congyang 	 * the node will be offlined when we come here, so we can clear
2190e13fe869SWen Congyang 	 * the cpu_to_node() now.
2191e13fe869SWen Congyang 	 */
2192e13fe869SWen Congyang 
21930f1cfe9dSToshi Kani 	unmap_cpu_on_node(pgdat);
2194e13fe869SWen Congyang 	return 0;
2195e13fe869SWen Congyang }
2196e13fe869SWen Congyang 
21970f1cfe9dSToshi Kani /**
21980f1cfe9dSToshi Kani  * try_offline_node
21990f1cfe9dSToshi Kani  *
22000f1cfe9dSToshi Kani  * Offline a node if all memory sections and cpus of the node are removed.
22010f1cfe9dSToshi Kani  *
22020f1cfe9dSToshi Kani  * NOTE: The caller must call lock_device_hotplug() to serialize hotplug
22030f1cfe9dSToshi Kani  * and online/offline operations before this call.
22040f1cfe9dSToshi Kani  */
220590b30cdcSWen Congyang void try_offline_node(int nid)
220660a5a19eSTang Chen {
2207d822b86aSWen Congyang 	pg_data_t *pgdat = NODE_DATA(nid);
2208d822b86aSWen Congyang 	unsigned long start_pfn = pgdat->node_start_pfn;
2209d822b86aSWen Congyang 	unsigned long end_pfn = start_pfn + pgdat->node_spanned_pages;
221060a5a19eSTang Chen 	unsigned long pfn;
221160a5a19eSTang Chen 
221260a5a19eSTang Chen 	for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) {
221360a5a19eSTang Chen 		unsigned long section_nr = pfn_to_section_nr(pfn);
221460a5a19eSTang Chen 
221560a5a19eSTang Chen 		if (!present_section_nr(section_nr))
221660a5a19eSTang Chen 			continue;
221760a5a19eSTang Chen 
221860a5a19eSTang Chen 		if (pfn_to_nid(pfn) != nid)
221960a5a19eSTang Chen 			continue;
222060a5a19eSTang Chen 
222160a5a19eSTang Chen 		/*
222260a5a19eSTang Chen 		 * some memory sections of this node are not removed, and we
222360a5a19eSTang Chen 		 * can't offline node now.
222460a5a19eSTang Chen 		 */
222560a5a19eSTang Chen 		return;
222660a5a19eSTang Chen 	}
222760a5a19eSTang Chen 
22280f1cfe9dSToshi Kani 	if (check_and_unmap_cpu_on_node(pgdat))
222960a5a19eSTang Chen 		return;
223060a5a19eSTang Chen 
223160a5a19eSTang Chen 	/*
223260a5a19eSTang Chen 	 * all memory/cpu of this node are removed, we can offline this
223360a5a19eSTang Chen 	 * node now.
223460a5a19eSTang Chen 	 */
223560a5a19eSTang Chen 	node_set_offline(nid);
223660a5a19eSTang Chen 	unregister_one_node(nid);
223760a5a19eSTang Chen }
223890b30cdcSWen Congyang EXPORT_SYMBOL(try_offline_node);
223960a5a19eSTang Chen 
22400f1cfe9dSToshi Kani /**
22410f1cfe9dSToshi Kani  * remove_memory
22420f1cfe9dSToshi Kani  *
22430f1cfe9dSToshi Kani  * NOTE: The caller must call lock_device_hotplug() to serialize hotplug
22440f1cfe9dSToshi Kani  * and online/offline operations before this call, as required by
22450f1cfe9dSToshi Kani  * try_offline_node().
22460f1cfe9dSToshi Kani  */
2247242831ebSRafael J. Wysocki void __ref remove_memory(int nid, u64 start, u64 size)
2248bbc76be6SWen Congyang {
2249242831ebSRafael J. Wysocki 	int ret;
2250993c1aadSWen Congyang 
225127356f54SToshi Kani 	BUG_ON(check_hotplug_memory_range(start, size));
225227356f54SToshi Kani 
2253bfc8c901SVladimir Davydov 	mem_hotplug_begin();
22546677e3eaSYasuaki Ishimatsu 
22556677e3eaSYasuaki Ishimatsu 	/*
2256242831ebSRafael J. Wysocki 	 * All memory blocks must be offlined before removing memory.  Check
2257242831ebSRafael J. Wysocki 	 * whether all memory blocks in question are offline and trigger a BUG()
2258242831ebSRafael J. Wysocki 	 * if this is not the case.
22596677e3eaSYasuaki Ishimatsu 	 */
2260242831ebSRafael J. Wysocki 	ret = walk_memory_range(PFN_DOWN(start), PFN_UP(start + size - 1), NULL,
2261d6de9d53SXishi Qiu 				check_memblock_offlined_cb);
2262bfc8c901SVladimir Davydov 	if (ret)
2263242831ebSRafael J. Wysocki 		BUG();
22646677e3eaSYasuaki Ishimatsu 
226546c66c4bSYasuaki Ishimatsu 	/* remove memmap entry */
226646c66c4bSYasuaki Ishimatsu 	firmware_map_remove(start, start + size, "System RAM");
2267f9126ab9SXishi Qiu 	memblock_free(start, size);
2268f9126ab9SXishi Qiu 	memblock_remove(start, size);
226946c66c4bSYasuaki Ishimatsu 
227024d335caSWen Congyang 	arch_remove_memory(start, size);
227124d335caSWen Congyang 
227260a5a19eSTang Chen 	try_offline_node(nid);
227360a5a19eSTang Chen 
2274bfc8c901SVladimir Davydov 	mem_hotplug_done();
227571088785SBadari Pulavarty }
227671088785SBadari Pulavarty EXPORT_SYMBOL_GPL(remove_memory);
2277aba6efc4SRafael J. Wysocki #endif /* CONFIG_MEMORY_HOTREMOVE */
2278