xref: /openbmc/linux/mm/memory_hotplug.c (revision 3f906ba23689a3f824424c50f3ae937c2c70f676)
13947be19SDave Hansen /*
23947be19SDave Hansen  *  linux/mm/memory_hotplug.c
33947be19SDave Hansen  *
43947be19SDave Hansen  *  Copyright (C)
53947be19SDave Hansen  */
63947be19SDave Hansen 
73947be19SDave Hansen #include <linux/stddef.h>
83947be19SDave Hansen #include <linux/mm.h>
9174cd4b1SIngo Molnar #include <linux/sched/signal.h>
103947be19SDave Hansen #include <linux/swap.h>
113947be19SDave Hansen #include <linux/interrupt.h>
123947be19SDave Hansen #include <linux/pagemap.h>
133947be19SDave Hansen #include <linux/compiler.h>
14b95f1b31SPaul Gortmaker #include <linux/export.h>
153947be19SDave Hansen #include <linux/pagevec.h>
162d1d43f6SChandra Seetharaman #include <linux/writeback.h>
173947be19SDave Hansen #include <linux/slab.h>
183947be19SDave Hansen #include <linux/sysctl.h>
193947be19SDave Hansen #include <linux/cpu.h>
203947be19SDave Hansen #include <linux/memory.h>
214b94ffdcSDan Williams #include <linux/memremap.h>
223947be19SDave Hansen #include <linux/memory_hotplug.h>
233947be19SDave Hansen #include <linux/highmem.h>
243947be19SDave Hansen #include <linux/vmalloc.h>
250a547039SKAMEZAWA Hiroyuki #include <linux/ioport.h>
260c0e6195SKAMEZAWA Hiroyuki #include <linux/delay.h>
270c0e6195SKAMEZAWA Hiroyuki #include <linux/migrate.h>
280c0e6195SKAMEZAWA Hiroyuki #include <linux/page-isolation.h>
2971088785SBadari Pulavarty #include <linux/pfn.h>
306ad696d2SAndi Kleen #include <linux/suspend.h>
316d9c285aSKOSAKI Motohiro #include <linux/mm_inline.h>
32d96ae530Sakpm@linux-foundation.org #include <linux/firmware-map.h>
3360a5a19eSTang Chen #include <linux/stop_machine.h>
34c8721bbbSNaoya Horiguchi #include <linux/hugetlb.h>
35c5320926STang Chen #include <linux/memblock.h>
36f784a3f1STang Chen #include <linux/bootmem.h>
37698b1b30SVlastimil Babka #include <linux/compaction.h>
383947be19SDave Hansen 
393947be19SDave Hansen #include <asm/tlbflush.h>
403947be19SDave Hansen 
411e5ad9a3SAdrian Bunk #include "internal.h"
421e5ad9a3SAdrian Bunk 
439d0ad8caSDaniel Kiper /*
449d0ad8caSDaniel Kiper  * online_page_callback contains pointer to current page onlining function.
459d0ad8caSDaniel Kiper  * Initially it is generic_online_page(). If it is required it could be
469d0ad8caSDaniel Kiper  * changed by calling set_online_page_callback() for callback registration
479d0ad8caSDaniel Kiper  * and restore_online_page_callback() for generic callback restore.
489d0ad8caSDaniel Kiper  */
499d0ad8caSDaniel Kiper 
509d0ad8caSDaniel Kiper static void generic_online_page(struct page *page);
519d0ad8caSDaniel Kiper 
529d0ad8caSDaniel Kiper static online_page_callback_t online_page_callback = generic_online_page;
53bfc8c901SVladimir Davydov static DEFINE_MUTEX(online_page_callback_lock);
549d0ad8caSDaniel Kiper 
55*3f906ba2SThomas Gleixner DEFINE_STATIC_PERCPU_RWSEM(mem_hotplug_lock);
5620d6c96bSKOSAKI Motohiro 
57*3f906ba2SThomas Gleixner void get_online_mems(void)
58*3f906ba2SThomas Gleixner {
59*3f906ba2SThomas Gleixner 	percpu_down_read(&mem_hotplug_lock);
60*3f906ba2SThomas Gleixner }
61bfc8c901SVladimir Davydov 
62*3f906ba2SThomas Gleixner void put_online_mems(void)
63*3f906ba2SThomas Gleixner {
64*3f906ba2SThomas Gleixner 	percpu_up_read(&mem_hotplug_lock);
65*3f906ba2SThomas Gleixner }
66bfc8c901SVladimir Davydov 
674932381eSMichal Hocko bool movable_node_enabled = false;
684932381eSMichal Hocko 
698604d9e5SVitaly Kuznetsov #ifndef CONFIG_MEMORY_HOTPLUG_DEFAULT_ONLINE
7031bc3858SVitaly Kuznetsov bool memhp_auto_online;
718604d9e5SVitaly Kuznetsov #else
728604d9e5SVitaly Kuznetsov bool memhp_auto_online = true;
738604d9e5SVitaly Kuznetsov #endif
7431bc3858SVitaly Kuznetsov EXPORT_SYMBOL_GPL(memhp_auto_online);
7531bc3858SVitaly Kuznetsov 
7686dd995dSVitaly Kuznetsov static int __init setup_memhp_default_state(char *str)
7786dd995dSVitaly Kuznetsov {
7886dd995dSVitaly Kuznetsov 	if (!strcmp(str, "online"))
7986dd995dSVitaly Kuznetsov 		memhp_auto_online = true;
8086dd995dSVitaly Kuznetsov 	else if (!strcmp(str, "offline"))
8186dd995dSVitaly Kuznetsov 		memhp_auto_online = false;
8286dd995dSVitaly Kuznetsov 
8386dd995dSVitaly Kuznetsov 	return 1;
8486dd995dSVitaly Kuznetsov }
8586dd995dSVitaly Kuznetsov __setup("memhp_default_state=", setup_memhp_default_state);
8686dd995dSVitaly Kuznetsov 
8730467e0bSDavid Rientjes void mem_hotplug_begin(void)
88bfc8c901SVladimir Davydov {
89*3f906ba2SThomas Gleixner 	cpus_read_lock();
90*3f906ba2SThomas Gleixner 	percpu_down_write(&mem_hotplug_lock);
91bfc8c901SVladimir Davydov }
92bfc8c901SVladimir Davydov 
9330467e0bSDavid Rientjes void mem_hotplug_done(void)
94bfc8c901SVladimir Davydov {
95*3f906ba2SThomas Gleixner 	percpu_up_write(&mem_hotplug_lock);
96*3f906ba2SThomas Gleixner 	cpus_read_unlock();
97bfc8c901SVladimir Davydov }
9820d6c96bSKOSAKI Motohiro 
9945e0b78bSKeith Mannthey /* add this memory to iomem resource */
10045e0b78bSKeith Mannthey static struct resource *register_memory_resource(u64 start, u64 size)
10145e0b78bSKeith Mannthey {
10245e0b78bSKeith Mannthey 	struct resource *res;
10345e0b78bSKeith Mannthey 	res = kzalloc(sizeof(struct resource), GFP_KERNEL);
1046f754ba4SVitaly Kuznetsov 	if (!res)
1056f754ba4SVitaly Kuznetsov 		return ERR_PTR(-ENOMEM);
10645e0b78bSKeith Mannthey 
10745e0b78bSKeith Mannthey 	res->name = "System RAM";
10845e0b78bSKeith Mannthey 	res->start = start;
10945e0b78bSKeith Mannthey 	res->end = start + size - 1;
110782b8664SToshi Kani 	res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
11145e0b78bSKeith Mannthey 	if (request_resource(&iomem_resource, res) < 0) {
1124996eed8SToshi Kani 		pr_debug("System RAM resource %pR cannot be added\n", res);
11345e0b78bSKeith Mannthey 		kfree(res);
1146f754ba4SVitaly Kuznetsov 		return ERR_PTR(-EEXIST);
11545e0b78bSKeith Mannthey 	}
11645e0b78bSKeith Mannthey 	return res;
11745e0b78bSKeith Mannthey }
11845e0b78bSKeith Mannthey 
11945e0b78bSKeith Mannthey static void release_memory_resource(struct resource *res)
12045e0b78bSKeith Mannthey {
12145e0b78bSKeith Mannthey 	if (!res)
12245e0b78bSKeith Mannthey 		return;
12345e0b78bSKeith Mannthey 	release_resource(res);
12445e0b78bSKeith Mannthey 	kfree(res);
12545e0b78bSKeith Mannthey 	return;
12645e0b78bSKeith Mannthey }
12745e0b78bSKeith Mannthey 
12853947027SKeith Mannthey #ifdef CONFIG_MEMORY_HOTPLUG_SPARSE
12946723bfaSYasuaki Ishimatsu void get_page_bootmem(unsigned long info,  struct page *page,
1305f24ce5fSAndrea Arcangeli 		      unsigned long type)
13104753278SYasunori Goto {
132ddffe98dSYasuaki Ishimatsu 	page->freelist = (void *)type;
13304753278SYasunori Goto 	SetPagePrivate(page);
13404753278SYasunori Goto 	set_page_private(page, info);
135fe896d18SJoonsoo Kim 	page_ref_inc(page);
13604753278SYasunori Goto }
13704753278SYasunori Goto 
138170a5a7eSJiang Liu void put_page_bootmem(struct page *page)
13904753278SYasunori Goto {
1405f24ce5fSAndrea Arcangeli 	unsigned long type;
14104753278SYasunori Goto 
142ddffe98dSYasuaki Ishimatsu 	type = (unsigned long) page->freelist;
1435f24ce5fSAndrea Arcangeli 	BUG_ON(type < MEMORY_HOTPLUG_MIN_BOOTMEM_TYPE ||
1445f24ce5fSAndrea Arcangeli 	       type > MEMORY_HOTPLUG_MAX_BOOTMEM_TYPE);
14504753278SYasunori Goto 
146fe896d18SJoonsoo Kim 	if (page_ref_dec_return(page) == 1) {
147ddffe98dSYasuaki Ishimatsu 		page->freelist = NULL;
14804753278SYasunori Goto 		ClearPagePrivate(page);
14904753278SYasunori Goto 		set_page_private(page, 0);
1505f24ce5fSAndrea Arcangeli 		INIT_LIST_HEAD(&page->lru);
151170a5a7eSJiang Liu 		free_reserved_page(page);
15204753278SYasunori Goto 	}
15304753278SYasunori Goto }
15404753278SYasunori Goto 
15546723bfaSYasuaki Ishimatsu #ifdef CONFIG_HAVE_BOOTMEM_INFO_NODE
15646723bfaSYasuaki Ishimatsu #ifndef CONFIG_SPARSEMEM_VMEMMAP
157d92bc318SAdrian Bunk static void register_page_bootmem_info_section(unsigned long start_pfn)
15804753278SYasunori Goto {
15904753278SYasunori Goto 	unsigned long *usemap, mapsize, section_nr, i;
16004753278SYasunori Goto 	struct mem_section *ms;
16104753278SYasunori Goto 	struct page *page, *memmap;
16204753278SYasunori Goto 
16304753278SYasunori Goto 	section_nr = pfn_to_section_nr(start_pfn);
16404753278SYasunori Goto 	ms = __nr_to_section(section_nr);
16504753278SYasunori Goto 
16604753278SYasunori Goto 	/* Get section's memmap address */
16704753278SYasunori Goto 	memmap = sparse_decode_mem_map(ms->section_mem_map, section_nr);
16804753278SYasunori Goto 
16904753278SYasunori Goto 	/*
17004753278SYasunori Goto 	 * Get page for the memmap's phys address
17104753278SYasunori Goto 	 * XXX: need more consideration for sparse_vmemmap...
17204753278SYasunori Goto 	 */
17304753278SYasunori Goto 	page = virt_to_page(memmap);
17404753278SYasunori Goto 	mapsize = sizeof(struct page) * PAGES_PER_SECTION;
17504753278SYasunori Goto 	mapsize = PAGE_ALIGN(mapsize) >> PAGE_SHIFT;
17604753278SYasunori Goto 
17704753278SYasunori Goto 	/* remember memmap's page */
17804753278SYasunori Goto 	for (i = 0; i < mapsize; i++, page++)
17904753278SYasunori Goto 		get_page_bootmem(section_nr, page, SECTION_INFO);
18004753278SYasunori Goto 
18104753278SYasunori Goto 	usemap = __nr_to_section(section_nr)->pageblock_flags;
18204753278SYasunori Goto 	page = virt_to_page(usemap);
18304753278SYasunori Goto 
18404753278SYasunori Goto 	mapsize = PAGE_ALIGN(usemap_size()) >> PAGE_SHIFT;
18504753278SYasunori Goto 
18604753278SYasunori Goto 	for (i = 0; i < mapsize; i++, page++)
187af370fb8SYasunori Goto 		get_page_bootmem(section_nr, page, MIX_SECTION_INFO);
18804753278SYasunori Goto 
18904753278SYasunori Goto }
19046723bfaSYasuaki Ishimatsu #else /* CONFIG_SPARSEMEM_VMEMMAP */
19146723bfaSYasuaki Ishimatsu static void register_page_bootmem_info_section(unsigned long start_pfn)
19246723bfaSYasuaki Ishimatsu {
19346723bfaSYasuaki Ishimatsu 	unsigned long *usemap, mapsize, section_nr, i;
19446723bfaSYasuaki Ishimatsu 	struct mem_section *ms;
19546723bfaSYasuaki Ishimatsu 	struct page *page, *memmap;
19646723bfaSYasuaki Ishimatsu 
19746723bfaSYasuaki Ishimatsu 	if (!pfn_valid(start_pfn))
19846723bfaSYasuaki Ishimatsu 		return;
19946723bfaSYasuaki Ishimatsu 
20046723bfaSYasuaki Ishimatsu 	section_nr = pfn_to_section_nr(start_pfn);
20146723bfaSYasuaki Ishimatsu 	ms = __nr_to_section(section_nr);
20246723bfaSYasuaki Ishimatsu 
20346723bfaSYasuaki Ishimatsu 	memmap = sparse_decode_mem_map(ms->section_mem_map, section_nr);
20446723bfaSYasuaki Ishimatsu 
20546723bfaSYasuaki Ishimatsu 	register_page_bootmem_memmap(section_nr, memmap, PAGES_PER_SECTION);
20646723bfaSYasuaki Ishimatsu 
20746723bfaSYasuaki Ishimatsu 	usemap = __nr_to_section(section_nr)->pageblock_flags;
20846723bfaSYasuaki Ishimatsu 	page = virt_to_page(usemap);
20946723bfaSYasuaki Ishimatsu 
21046723bfaSYasuaki Ishimatsu 	mapsize = PAGE_ALIGN(usemap_size()) >> PAGE_SHIFT;
21146723bfaSYasuaki Ishimatsu 
21246723bfaSYasuaki Ishimatsu 	for (i = 0; i < mapsize; i++, page++)
21346723bfaSYasuaki Ishimatsu 		get_page_bootmem(section_nr, page, MIX_SECTION_INFO);
21446723bfaSYasuaki Ishimatsu }
21546723bfaSYasuaki Ishimatsu #endif /* !CONFIG_SPARSEMEM_VMEMMAP */
21604753278SYasunori Goto 
2177ded384aSLinus Torvalds void __init register_page_bootmem_info_node(struct pglist_data *pgdat)
21804753278SYasunori Goto {
21904753278SYasunori Goto 	unsigned long i, pfn, end_pfn, nr_pages;
22004753278SYasunori Goto 	int node = pgdat->node_id;
22104753278SYasunori Goto 	struct page *page;
22204753278SYasunori Goto 
22304753278SYasunori Goto 	nr_pages = PAGE_ALIGN(sizeof(struct pglist_data)) >> PAGE_SHIFT;
22404753278SYasunori Goto 	page = virt_to_page(pgdat);
22504753278SYasunori Goto 
22604753278SYasunori Goto 	for (i = 0; i < nr_pages; i++, page++)
22704753278SYasunori Goto 		get_page_bootmem(node, page, NODE_INFO);
22804753278SYasunori Goto 
22904753278SYasunori Goto 	pfn = pgdat->node_start_pfn;
230c1f19495SCody P Schafer 	end_pfn = pgdat_end_pfn(pgdat);
23104753278SYasunori Goto 
2327e9f5eb0STang Chen 	/* register section info */
233f14851afSqiuxishi 	for (; pfn < end_pfn; pfn += PAGES_PER_SECTION) {
234f14851afSqiuxishi 		/*
235f14851afSqiuxishi 		 * Some platforms can assign the same pfn to multiple nodes - on
236f14851afSqiuxishi 		 * node0 as well as nodeN.  To avoid registering a pfn against
237f14851afSqiuxishi 		 * multiple nodes we check that this pfn does not already
2387e9f5eb0STang Chen 		 * reside in some other nodes.
239f14851afSqiuxishi 		 */
240f65e91dfSYang Shi 		if (pfn_valid(pfn) && (early_pfn_to_nid(pfn) == node))
24104753278SYasunori Goto 			register_page_bootmem_info_section(pfn);
242f14851afSqiuxishi 	}
24304753278SYasunori Goto }
24446723bfaSYasuaki Ishimatsu #endif /* CONFIG_HAVE_BOOTMEM_INFO_NODE */
24504753278SYasunori Goto 
246f1dd2cd1SMichal Hocko static int __meminit __add_section(int nid, unsigned long phys_start_pfn,
247f1dd2cd1SMichal Hocko 		bool want_memblock)
2483947be19SDave Hansen {
2493947be19SDave Hansen 	int ret;
250f1dd2cd1SMichal Hocko 	int i;
2513947be19SDave Hansen 
252ebd15302SKAMEZAWA Hiroyuki 	if (pfn_valid(phys_start_pfn))
253ebd15302SKAMEZAWA Hiroyuki 		return -EEXIST;
254ebd15302SKAMEZAWA Hiroyuki 
255f1dd2cd1SMichal Hocko 	ret = sparse_add_one_section(NODE_DATA(nid), phys_start_pfn);
2563947be19SDave Hansen 	if (ret < 0)
2573947be19SDave Hansen 		return ret;
2583947be19SDave Hansen 
259f1dd2cd1SMichal Hocko 	/*
260f1dd2cd1SMichal Hocko 	 * Make all the pages reserved so that nobody will stumble over half
261f1dd2cd1SMichal Hocko 	 * initialized state.
262f1dd2cd1SMichal Hocko 	 * FIXME: We also have to associate it with a node because pfn_to_node
263f1dd2cd1SMichal Hocko 	 * relies on having page with the proper node.
264f1dd2cd1SMichal Hocko 	 */
265f1dd2cd1SMichal Hocko 	for (i = 0; i < PAGES_PER_SECTION; i++) {
266f1dd2cd1SMichal Hocko 		unsigned long pfn = phys_start_pfn + i;
267f1dd2cd1SMichal Hocko 		struct page *page;
268f1dd2cd1SMichal Hocko 		if (!pfn_valid(pfn))
269f1dd2cd1SMichal Hocko 			continue;
270718127ccSYasunori Goto 
271f1dd2cd1SMichal Hocko 		page = pfn_to_page(pfn);
272f1dd2cd1SMichal Hocko 		set_page_node(page, nid);
273f1dd2cd1SMichal Hocko 		SetPageReserved(page);
274f1dd2cd1SMichal Hocko 	}
275718127ccSYasunori Goto 
2761b862aecSMichal Hocko 	if (!want_memblock)
2771b862aecSMichal Hocko 		return 0;
2781b862aecSMichal Hocko 
279c04fc586SGary Hade 	return register_new_memory(nid, __pfn_to_section(phys_start_pfn));
2803947be19SDave Hansen }
2813947be19SDave Hansen 
2824edd7cefSDavid Rientjes /*
2834edd7cefSDavid Rientjes  * Reasonably generic function for adding memory.  It is
2844edd7cefSDavid Rientjes  * expected that archs that support memory hotplug will
2854edd7cefSDavid Rientjes  * call this function after deciding the zone to which to
2864edd7cefSDavid Rientjes  * add the new pages.
2874edd7cefSDavid Rientjes  */
288f1dd2cd1SMichal Hocko int __ref __add_pages(int nid, unsigned long phys_start_pfn,
2891b862aecSMichal Hocko 			unsigned long nr_pages, bool want_memblock)
2904edd7cefSDavid Rientjes {
2914edd7cefSDavid Rientjes 	unsigned long i;
2924edd7cefSDavid Rientjes 	int err = 0;
2934edd7cefSDavid Rientjes 	int start_sec, end_sec;
2944b94ffdcSDan Williams 	struct vmem_altmap *altmap;
2954b94ffdcSDan Williams 
2964edd7cefSDavid Rientjes 	/* during initialize mem_map, align hot-added range to section */
2974edd7cefSDavid Rientjes 	start_sec = pfn_to_section_nr(phys_start_pfn);
2984edd7cefSDavid Rientjes 	end_sec = pfn_to_section_nr(phys_start_pfn + nr_pages - 1);
2994edd7cefSDavid Rientjes 
3004b94ffdcSDan Williams 	altmap = to_vmem_altmap((unsigned long) pfn_to_page(phys_start_pfn));
3014b94ffdcSDan Williams 	if (altmap) {
3024b94ffdcSDan Williams 		/*
3034b94ffdcSDan Williams 		 * Validate altmap is within bounds of the total request
3044b94ffdcSDan Williams 		 */
3054b94ffdcSDan Williams 		if (altmap->base_pfn != phys_start_pfn
3064b94ffdcSDan Williams 				|| vmem_altmap_offset(altmap) > nr_pages) {
3074b94ffdcSDan Williams 			pr_warn_once("memory add fail, invalid altmap\n");
3087cf91a98SJoonsoo Kim 			err = -EINVAL;
3097cf91a98SJoonsoo Kim 			goto out;
3104b94ffdcSDan Williams 		}
3114b94ffdcSDan Williams 		altmap->alloc = 0;
3124b94ffdcSDan Williams 	}
3134b94ffdcSDan Williams 
3144edd7cefSDavid Rientjes 	for (i = start_sec; i <= end_sec; i++) {
315f1dd2cd1SMichal Hocko 		err = __add_section(nid, section_nr_to_pfn(i), want_memblock);
3164edd7cefSDavid Rientjes 
3174edd7cefSDavid Rientjes 		/*
3184edd7cefSDavid Rientjes 		 * EEXIST is finally dealt with by ioresource collision
3194edd7cefSDavid Rientjes 		 * check. see add_memory() => register_memory_resource()
3204edd7cefSDavid Rientjes 		 * Warning will be printed if there is collision.
3214edd7cefSDavid Rientjes 		 */
3224edd7cefSDavid Rientjes 		if (err && (err != -EEXIST))
3234edd7cefSDavid Rientjes 			break;
3244edd7cefSDavid Rientjes 		err = 0;
3254edd7cefSDavid Rientjes 	}
326c435a390SZhu Guihua 	vmemmap_populate_print_last();
3277cf91a98SJoonsoo Kim out:
3284edd7cefSDavid Rientjes 	return err;
3294edd7cefSDavid Rientjes }
3304edd7cefSDavid Rientjes EXPORT_SYMBOL_GPL(__add_pages);
3314edd7cefSDavid Rientjes 
3324edd7cefSDavid Rientjes #ifdef CONFIG_MEMORY_HOTREMOVE
333815121d2SYasuaki Ishimatsu /* find the smallest valid pfn in the range [start_pfn, end_pfn) */
334815121d2SYasuaki Ishimatsu static int find_smallest_section_pfn(int nid, struct zone *zone,
335815121d2SYasuaki Ishimatsu 				     unsigned long start_pfn,
336815121d2SYasuaki Ishimatsu 				     unsigned long end_pfn)
337815121d2SYasuaki Ishimatsu {
338815121d2SYasuaki Ishimatsu 	struct mem_section *ms;
339815121d2SYasuaki Ishimatsu 
340815121d2SYasuaki Ishimatsu 	for (; start_pfn < end_pfn; start_pfn += PAGES_PER_SECTION) {
341815121d2SYasuaki Ishimatsu 		ms = __pfn_to_section(start_pfn);
342815121d2SYasuaki Ishimatsu 
343815121d2SYasuaki Ishimatsu 		if (unlikely(!valid_section(ms)))
344815121d2SYasuaki Ishimatsu 			continue;
345815121d2SYasuaki Ishimatsu 
346815121d2SYasuaki Ishimatsu 		if (unlikely(pfn_to_nid(start_pfn) != nid))
347815121d2SYasuaki Ishimatsu 			continue;
348815121d2SYasuaki Ishimatsu 
349815121d2SYasuaki Ishimatsu 		if (zone && zone != page_zone(pfn_to_page(start_pfn)))
350815121d2SYasuaki Ishimatsu 			continue;
351815121d2SYasuaki Ishimatsu 
352815121d2SYasuaki Ishimatsu 		return start_pfn;
353815121d2SYasuaki Ishimatsu 	}
354815121d2SYasuaki Ishimatsu 
355815121d2SYasuaki Ishimatsu 	return 0;
356815121d2SYasuaki Ishimatsu }
357815121d2SYasuaki Ishimatsu 
358815121d2SYasuaki Ishimatsu /* find the biggest valid pfn in the range [start_pfn, end_pfn). */
359815121d2SYasuaki Ishimatsu static int find_biggest_section_pfn(int nid, struct zone *zone,
360815121d2SYasuaki Ishimatsu 				    unsigned long start_pfn,
361815121d2SYasuaki Ishimatsu 				    unsigned long end_pfn)
362815121d2SYasuaki Ishimatsu {
363815121d2SYasuaki Ishimatsu 	struct mem_section *ms;
364815121d2SYasuaki Ishimatsu 	unsigned long pfn;
365815121d2SYasuaki Ishimatsu 
366815121d2SYasuaki Ishimatsu 	/* pfn is the end pfn of a memory section. */
367815121d2SYasuaki Ishimatsu 	pfn = end_pfn - 1;
368815121d2SYasuaki Ishimatsu 	for (; pfn >= start_pfn; pfn -= PAGES_PER_SECTION) {
369815121d2SYasuaki Ishimatsu 		ms = __pfn_to_section(pfn);
370815121d2SYasuaki Ishimatsu 
371815121d2SYasuaki Ishimatsu 		if (unlikely(!valid_section(ms)))
372815121d2SYasuaki Ishimatsu 			continue;
373815121d2SYasuaki Ishimatsu 
374815121d2SYasuaki Ishimatsu 		if (unlikely(pfn_to_nid(pfn) != nid))
375815121d2SYasuaki Ishimatsu 			continue;
376815121d2SYasuaki Ishimatsu 
377815121d2SYasuaki Ishimatsu 		if (zone && zone != page_zone(pfn_to_page(pfn)))
378815121d2SYasuaki Ishimatsu 			continue;
379815121d2SYasuaki Ishimatsu 
380815121d2SYasuaki Ishimatsu 		return pfn;
381815121d2SYasuaki Ishimatsu 	}
382815121d2SYasuaki Ishimatsu 
383815121d2SYasuaki Ishimatsu 	return 0;
384815121d2SYasuaki Ishimatsu }
385815121d2SYasuaki Ishimatsu 
386815121d2SYasuaki Ishimatsu static void shrink_zone_span(struct zone *zone, unsigned long start_pfn,
387815121d2SYasuaki Ishimatsu 			     unsigned long end_pfn)
388815121d2SYasuaki Ishimatsu {
389815121d2SYasuaki Ishimatsu 	unsigned long zone_start_pfn = zone->zone_start_pfn;
390c33bc315SXishi Qiu 	unsigned long z = zone_end_pfn(zone); /* zone_end_pfn namespace clash */
391c33bc315SXishi Qiu 	unsigned long zone_end_pfn = z;
392815121d2SYasuaki Ishimatsu 	unsigned long pfn;
393815121d2SYasuaki Ishimatsu 	struct mem_section *ms;
394815121d2SYasuaki Ishimatsu 	int nid = zone_to_nid(zone);
395815121d2SYasuaki Ishimatsu 
396815121d2SYasuaki Ishimatsu 	zone_span_writelock(zone);
397815121d2SYasuaki Ishimatsu 	if (zone_start_pfn == start_pfn) {
398815121d2SYasuaki Ishimatsu 		/*
399815121d2SYasuaki Ishimatsu 		 * If the section is smallest section in the zone, it need
400815121d2SYasuaki Ishimatsu 		 * shrink zone->zone_start_pfn and zone->zone_spanned_pages.
401815121d2SYasuaki Ishimatsu 		 * In this case, we find second smallest valid mem_section
402815121d2SYasuaki Ishimatsu 		 * for shrinking zone.
403815121d2SYasuaki Ishimatsu 		 */
404815121d2SYasuaki Ishimatsu 		pfn = find_smallest_section_pfn(nid, zone, end_pfn,
405815121d2SYasuaki Ishimatsu 						zone_end_pfn);
406815121d2SYasuaki Ishimatsu 		if (pfn) {
407815121d2SYasuaki Ishimatsu 			zone->zone_start_pfn = pfn;
408815121d2SYasuaki Ishimatsu 			zone->spanned_pages = zone_end_pfn - pfn;
409815121d2SYasuaki Ishimatsu 		}
410815121d2SYasuaki Ishimatsu 	} else if (zone_end_pfn == end_pfn) {
411815121d2SYasuaki Ishimatsu 		/*
412815121d2SYasuaki Ishimatsu 		 * If the section is biggest section in the zone, it need
413815121d2SYasuaki Ishimatsu 		 * shrink zone->spanned_pages.
414815121d2SYasuaki Ishimatsu 		 * In this case, we find second biggest valid mem_section for
415815121d2SYasuaki Ishimatsu 		 * shrinking zone.
416815121d2SYasuaki Ishimatsu 		 */
417815121d2SYasuaki Ishimatsu 		pfn = find_biggest_section_pfn(nid, zone, zone_start_pfn,
418815121d2SYasuaki Ishimatsu 					       start_pfn);
419815121d2SYasuaki Ishimatsu 		if (pfn)
420815121d2SYasuaki Ishimatsu 			zone->spanned_pages = pfn - zone_start_pfn + 1;
421815121d2SYasuaki Ishimatsu 	}
422815121d2SYasuaki Ishimatsu 
423815121d2SYasuaki Ishimatsu 	/*
424815121d2SYasuaki Ishimatsu 	 * The section is not biggest or smallest mem_section in the zone, it
425815121d2SYasuaki Ishimatsu 	 * only creates a hole in the zone. So in this case, we need not
426815121d2SYasuaki Ishimatsu 	 * change the zone. But perhaps, the zone has only hole data. Thus
427815121d2SYasuaki Ishimatsu 	 * it check the zone has only hole or not.
428815121d2SYasuaki Ishimatsu 	 */
429815121d2SYasuaki Ishimatsu 	pfn = zone_start_pfn;
430815121d2SYasuaki Ishimatsu 	for (; pfn < zone_end_pfn; pfn += PAGES_PER_SECTION) {
431815121d2SYasuaki Ishimatsu 		ms = __pfn_to_section(pfn);
432815121d2SYasuaki Ishimatsu 
433815121d2SYasuaki Ishimatsu 		if (unlikely(!valid_section(ms)))
434815121d2SYasuaki Ishimatsu 			continue;
435815121d2SYasuaki Ishimatsu 
436815121d2SYasuaki Ishimatsu 		if (page_zone(pfn_to_page(pfn)) != zone)
437815121d2SYasuaki Ishimatsu 			continue;
438815121d2SYasuaki Ishimatsu 
439815121d2SYasuaki Ishimatsu 		 /* If the section is current section, it continues the loop */
440815121d2SYasuaki Ishimatsu 		if (start_pfn == pfn)
441815121d2SYasuaki Ishimatsu 			continue;
442815121d2SYasuaki Ishimatsu 
443815121d2SYasuaki Ishimatsu 		/* If we find valid section, we have nothing to do */
444815121d2SYasuaki Ishimatsu 		zone_span_writeunlock(zone);
445815121d2SYasuaki Ishimatsu 		return;
446815121d2SYasuaki Ishimatsu 	}
447815121d2SYasuaki Ishimatsu 
448815121d2SYasuaki Ishimatsu 	/* The zone has no valid section */
449815121d2SYasuaki Ishimatsu 	zone->zone_start_pfn = 0;
450815121d2SYasuaki Ishimatsu 	zone->spanned_pages = 0;
451815121d2SYasuaki Ishimatsu 	zone_span_writeunlock(zone);
452815121d2SYasuaki Ishimatsu }
453815121d2SYasuaki Ishimatsu 
454815121d2SYasuaki Ishimatsu static void shrink_pgdat_span(struct pglist_data *pgdat,
455815121d2SYasuaki Ishimatsu 			      unsigned long start_pfn, unsigned long end_pfn)
456815121d2SYasuaki Ishimatsu {
457815121d2SYasuaki Ishimatsu 	unsigned long pgdat_start_pfn = pgdat->node_start_pfn;
45883285c72SXishi Qiu 	unsigned long p = pgdat_end_pfn(pgdat); /* pgdat_end_pfn namespace clash */
45983285c72SXishi Qiu 	unsigned long pgdat_end_pfn = p;
460815121d2SYasuaki Ishimatsu 	unsigned long pfn;
461815121d2SYasuaki Ishimatsu 	struct mem_section *ms;
462815121d2SYasuaki Ishimatsu 	int nid = pgdat->node_id;
463815121d2SYasuaki Ishimatsu 
464815121d2SYasuaki Ishimatsu 	if (pgdat_start_pfn == start_pfn) {
465815121d2SYasuaki Ishimatsu 		/*
466815121d2SYasuaki Ishimatsu 		 * If the section is smallest section in the pgdat, it need
467815121d2SYasuaki Ishimatsu 		 * shrink pgdat->node_start_pfn and pgdat->node_spanned_pages.
468815121d2SYasuaki Ishimatsu 		 * In this case, we find second smallest valid mem_section
469815121d2SYasuaki Ishimatsu 		 * for shrinking zone.
470815121d2SYasuaki Ishimatsu 		 */
471815121d2SYasuaki Ishimatsu 		pfn = find_smallest_section_pfn(nid, NULL, end_pfn,
472815121d2SYasuaki Ishimatsu 						pgdat_end_pfn);
473815121d2SYasuaki Ishimatsu 		if (pfn) {
474815121d2SYasuaki Ishimatsu 			pgdat->node_start_pfn = pfn;
475815121d2SYasuaki Ishimatsu 			pgdat->node_spanned_pages = pgdat_end_pfn - pfn;
476815121d2SYasuaki Ishimatsu 		}
477815121d2SYasuaki Ishimatsu 	} else if (pgdat_end_pfn == end_pfn) {
478815121d2SYasuaki Ishimatsu 		/*
479815121d2SYasuaki Ishimatsu 		 * If the section is biggest section in the pgdat, it need
480815121d2SYasuaki Ishimatsu 		 * shrink pgdat->node_spanned_pages.
481815121d2SYasuaki Ishimatsu 		 * In this case, we find second biggest valid mem_section for
482815121d2SYasuaki Ishimatsu 		 * shrinking zone.
483815121d2SYasuaki Ishimatsu 		 */
484815121d2SYasuaki Ishimatsu 		pfn = find_biggest_section_pfn(nid, NULL, pgdat_start_pfn,
485815121d2SYasuaki Ishimatsu 					       start_pfn);
486815121d2SYasuaki Ishimatsu 		if (pfn)
487815121d2SYasuaki Ishimatsu 			pgdat->node_spanned_pages = pfn - pgdat_start_pfn + 1;
488815121d2SYasuaki Ishimatsu 	}
489815121d2SYasuaki Ishimatsu 
490815121d2SYasuaki Ishimatsu 	/*
491815121d2SYasuaki Ishimatsu 	 * If the section is not biggest or smallest mem_section in the pgdat,
492815121d2SYasuaki Ishimatsu 	 * it only creates a hole in the pgdat. So in this case, we need not
493815121d2SYasuaki Ishimatsu 	 * change the pgdat.
494815121d2SYasuaki Ishimatsu 	 * But perhaps, the pgdat has only hole data. Thus it check the pgdat
495815121d2SYasuaki Ishimatsu 	 * has only hole or not.
496815121d2SYasuaki Ishimatsu 	 */
497815121d2SYasuaki Ishimatsu 	pfn = pgdat_start_pfn;
498815121d2SYasuaki Ishimatsu 	for (; pfn < pgdat_end_pfn; pfn += PAGES_PER_SECTION) {
499815121d2SYasuaki Ishimatsu 		ms = __pfn_to_section(pfn);
500815121d2SYasuaki Ishimatsu 
501815121d2SYasuaki Ishimatsu 		if (unlikely(!valid_section(ms)))
502815121d2SYasuaki Ishimatsu 			continue;
503815121d2SYasuaki Ishimatsu 
504815121d2SYasuaki Ishimatsu 		if (pfn_to_nid(pfn) != nid)
505815121d2SYasuaki Ishimatsu 			continue;
506815121d2SYasuaki Ishimatsu 
507815121d2SYasuaki Ishimatsu 		 /* If the section is current section, it continues the loop */
508815121d2SYasuaki Ishimatsu 		if (start_pfn == pfn)
509815121d2SYasuaki Ishimatsu 			continue;
510815121d2SYasuaki Ishimatsu 
511815121d2SYasuaki Ishimatsu 		/* If we find valid section, we have nothing to do */
512815121d2SYasuaki Ishimatsu 		return;
513815121d2SYasuaki Ishimatsu 	}
514815121d2SYasuaki Ishimatsu 
515815121d2SYasuaki Ishimatsu 	/* The pgdat has no valid section */
516815121d2SYasuaki Ishimatsu 	pgdat->node_start_pfn = 0;
517815121d2SYasuaki Ishimatsu 	pgdat->node_spanned_pages = 0;
518815121d2SYasuaki Ishimatsu }
519815121d2SYasuaki Ishimatsu 
520815121d2SYasuaki Ishimatsu static void __remove_zone(struct zone *zone, unsigned long start_pfn)
521815121d2SYasuaki Ishimatsu {
522815121d2SYasuaki Ishimatsu 	struct pglist_data *pgdat = zone->zone_pgdat;
523815121d2SYasuaki Ishimatsu 	int nr_pages = PAGES_PER_SECTION;
524815121d2SYasuaki Ishimatsu 	unsigned long flags;
525815121d2SYasuaki Ishimatsu 
526815121d2SYasuaki Ishimatsu 	pgdat_resize_lock(zone->zone_pgdat, &flags);
527815121d2SYasuaki Ishimatsu 	shrink_zone_span(zone, start_pfn, start_pfn + nr_pages);
528815121d2SYasuaki Ishimatsu 	shrink_pgdat_span(pgdat, start_pfn, start_pfn + nr_pages);
529815121d2SYasuaki Ishimatsu 	pgdat_resize_unlock(zone->zone_pgdat, &flags);
530815121d2SYasuaki Ishimatsu }
531815121d2SYasuaki Ishimatsu 
5324b94ffdcSDan Williams static int __remove_section(struct zone *zone, struct mem_section *ms,
5334b94ffdcSDan Williams 		unsigned long map_offset)
534ea01ea93SBadari Pulavarty {
535815121d2SYasuaki Ishimatsu 	unsigned long start_pfn;
536815121d2SYasuaki Ishimatsu 	int scn_nr;
537ea01ea93SBadari Pulavarty 	int ret = -EINVAL;
538ea01ea93SBadari Pulavarty 
539ea01ea93SBadari Pulavarty 	if (!valid_section(ms))
540ea01ea93SBadari Pulavarty 		return ret;
541ea01ea93SBadari Pulavarty 
542ea01ea93SBadari Pulavarty 	ret = unregister_memory_section(ms);
543ea01ea93SBadari Pulavarty 	if (ret)
544ea01ea93SBadari Pulavarty 		return ret;
545ea01ea93SBadari Pulavarty 
546815121d2SYasuaki Ishimatsu 	scn_nr = __section_nr(ms);
547815121d2SYasuaki Ishimatsu 	start_pfn = section_nr_to_pfn(scn_nr);
548815121d2SYasuaki Ishimatsu 	__remove_zone(zone, start_pfn);
549815121d2SYasuaki Ishimatsu 
5504b94ffdcSDan Williams 	sparse_remove_one_section(zone, ms, map_offset);
551ea01ea93SBadari Pulavarty 	return 0;
552ea01ea93SBadari Pulavarty }
553ea01ea93SBadari Pulavarty 
554ea01ea93SBadari Pulavarty /**
555ea01ea93SBadari Pulavarty  * __remove_pages() - remove sections of pages from a zone
556ea01ea93SBadari Pulavarty  * @zone: zone from which pages need to be removed
557ea01ea93SBadari Pulavarty  * @phys_start_pfn: starting pageframe (must be aligned to start of a section)
558ea01ea93SBadari Pulavarty  * @nr_pages: number of pages to remove (must be multiple of section size)
559ea01ea93SBadari Pulavarty  *
560ea01ea93SBadari Pulavarty  * Generic helper function to remove section mappings and sysfs entries
561ea01ea93SBadari Pulavarty  * for the section of the memory we are removing. Caller needs to make
562ea01ea93SBadari Pulavarty  * sure that pages are marked reserved and zones are adjust properly by
563ea01ea93SBadari Pulavarty  * calling offline_pages().
564ea01ea93SBadari Pulavarty  */
565ea01ea93SBadari Pulavarty int __remove_pages(struct zone *zone, unsigned long phys_start_pfn,
566ea01ea93SBadari Pulavarty 		 unsigned long nr_pages)
567ea01ea93SBadari Pulavarty {
568fe74ebb1SToshi Kani 	unsigned long i;
5694b94ffdcSDan Williams 	unsigned long map_offset = 0;
5704b94ffdcSDan Williams 	int sections_to_remove, ret = 0;
5714b94ffdcSDan Williams 
5724b94ffdcSDan Williams 	/* In the ZONE_DEVICE case device driver owns the memory region */
5734b94ffdcSDan Williams 	if (is_dev_zone(zone)) {
5744b94ffdcSDan Williams 		struct page *page = pfn_to_page(phys_start_pfn);
5754b94ffdcSDan Williams 		struct vmem_altmap *altmap;
5764b94ffdcSDan Williams 
5774b94ffdcSDan Williams 		altmap = to_vmem_altmap((unsigned long) page);
5784b94ffdcSDan Williams 		if (altmap)
5794b94ffdcSDan Williams 			map_offset = vmem_altmap_offset(altmap);
5804b94ffdcSDan Williams 	} else {
581fe74ebb1SToshi Kani 		resource_size_t start, size;
5824b94ffdcSDan Williams 
5834b94ffdcSDan Williams 		start = phys_start_pfn << PAGE_SHIFT;
5844b94ffdcSDan Williams 		size = nr_pages * PAGE_SIZE;
5854b94ffdcSDan Williams 
5864b94ffdcSDan Williams 		ret = release_mem_region_adjustable(&iomem_resource, start,
5874b94ffdcSDan Williams 					size);
5884b94ffdcSDan Williams 		if (ret) {
5894b94ffdcSDan Williams 			resource_size_t endres = start + size - 1;
5904b94ffdcSDan Williams 
5914b94ffdcSDan Williams 			pr_warn("Unable to release resource <%pa-%pa> (%d)\n",
5924b94ffdcSDan Williams 					&start, &endres, ret);
5934b94ffdcSDan Williams 		}
5944b94ffdcSDan Williams 	}
595ea01ea93SBadari Pulavarty 
5967cf91a98SJoonsoo Kim 	clear_zone_contiguous(zone);
5977cf91a98SJoonsoo Kim 
598ea01ea93SBadari Pulavarty 	/*
599ea01ea93SBadari Pulavarty 	 * We can only remove entire sections
600ea01ea93SBadari Pulavarty 	 */
601ea01ea93SBadari Pulavarty 	BUG_ON(phys_start_pfn & ~PAGE_SECTION_MASK);
602ea01ea93SBadari Pulavarty 	BUG_ON(nr_pages % PAGES_PER_SECTION);
603ea01ea93SBadari Pulavarty 
604ea01ea93SBadari Pulavarty 	sections_to_remove = nr_pages / PAGES_PER_SECTION;
605ea01ea93SBadari Pulavarty 	for (i = 0; i < sections_to_remove; i++) {
606ea01ea93SBadari Pulavarty 		unsigned long pfn = phys_start_pfn + i*PAGES_PER_SECTION;
6074b94ffdcSDan Williams 
6084b94ffdcSDan Williams 		ret = __remove_section(zone, __pfn_to_section(pfn), map_offset);
6094b94ffdcSDan Williams 		map_offset = 0;
610ea01ea93SBadari Pulavarty 		if (ret)
611ea01ea93SBadari Pulavarty 			break;
612ea01ea93SBadari Pulavarty 	}
6137cf91a98SJoonsoo Kim 
6147cf91a98SJoonsoo Kim 	set_zone_contiguous(zone);
6157cf91a98SJoonsoo Kim 
616ea01ea93SBadari Pulavarty 	return ret;
617ea01ea93SBadari Pulavarty }
6184edd7cefSDavid Rientjes #endif /* CONFIG_MEMORY_HOTREMOVE */
619ea01ea93SBadari Pulavarty 
6209d0ad8caSDaniel Kiper int set_online_page_callback(online_page_callback_t callback)
6219d0ad8caSDaniel Kiper {
6229d0ad8caSDaniel Kiper 	int rc = -EINVAL;
6239d0ad8caSDaniel Kiper 
624bfc8c901SVladimir Davydov 	get_online_mems();
625bfc8c901SVladimir Davydov 	mutex_lock(&online_page_callback_lock);
6269d0ad8caSDaniel Kiper 
6279d0ad8caSDaniel Kiper 	if (online_page_callback == generic_online_page) {
6289d0ad8caSDaniel Kiper 		online_page_callback = callback;
6299d0ad8caSDaniel Kiper 		rc = 0;
6309d0ad8caSDaniel Kiper 	}
6319d0ad8caSDaniel Kiper 
632bfc8c901SVladimir Davydov 	mutex_unlock(&online_page_callback_lock);
633bfc8c901SVladimir Davydov 	put_online_mems();
6349d0ad8caSDaniel Kiper 
6359d0ad8caSDaniel Kiper 	return rc;
6369d0ad8caSDaniel Kiper }
6379d0ad8caSDaniel Kiper EXPORT_SYMBOL_GPL(set_online_page_callback);
6389d0ad8caSDaniel Kiper 
6399d0ad8caSDaniel Kiper int restore_online_page_callback(online_page_callback_t callback)
6409d0ad8caSDaniel Kiper {
6419d0ad8caSDaniel Kiper 	int rc = -EINVAL;
6429d0ad8caSDaniel Kiper 
643bfc8c901SVladimir Davydov 	get_online_mems();
644bfc8c901SVladimir Davydov 	mutex_lock(&online_page_callback_lock);
6459d0ad8caSDaniel Kiper 
6469d0ad8caSDaniel Kiper 	if (online_page_callback == callback) {
6479d0ad8caSDaniel Kiper 		online_page_callback = generic_online_page;
6489d0ad8caSDaniel Kiper 		rc = 0;
6499d0ad8caSDaniel Kiper 	}
6509d0ad8caSDaniel Kiper 
651bfc8c901SVladimir Davydov 	mutex_unlock(&online_page_callback_lock);
652bfc8c901SVladimir Davydov 	put_online_mems();
6539d0ad8caSDaniel Kiper 
6549d0ad8caSDaniel Kiper 	return rc;
6559d0ad8caSDaniel Kiper }
6569d0ad8caSDaniel Kiper EXPORT_SYMBOL_GPL(restore_online_page_callback);
6579d0ad8caSDaniel Kiper 
6589d0ad8caSDaniel Kiper void __online_page_set_limits(struct page *page)
659180c06efSJeremy Fitzhardinge {
6609d0ad8caSDaniel Kiper }
6619d0ad8caSDaniel Kiper EXPORT_SYMBOL_GPL(__online_page_set_limits);
6629d0ad8caSDaniel Kiper 
6639d0ad8caSDaniel Kiper void __online_page_increment_counters(struct page *page)
6649d0ad8caSDaniel Kiper {
6653dcc0571SJiang Liu 	adjust_managed_page_count(page, 1);
6669d0ad8caSDaniel Kiper }
6679d0ad8caSDaniel Kiper EXPORT_SYMBOL_GPL(__online_page_increment_counters);
668180c06efSJeremy Fitzhardinge 
6699d0ad8caSDaniel Kiper void __online_page_free(struct page *page)
6709d0ad8caSDaniel Kiper {
6713dcc0571SJiang Liu 	__free_reserved_page(page);
672180c06efSJeremy Fitzhardinge }
6739d0ad8caSDaniel Kiper EXPORT_SYMBOL_GPL(__online_page_free);
6749d0ad8caSDaniel Kiper 
6759d0ad8caSDaniel Kiper static void generic_online_page(struct page *page)
6769d0ad8caSDaniel Kiper {
6779d0ad8caSDaniel Kiper 	__online_page_set_limits(page);
6789d0ad8caSDaniel Kiper 	__online_page_increment_counters(page);
6799d0ad8caSDaniel Kiper 	__online_page_free(page);
6809d0ad8caSDaniel Kiper }
681180c06efSJeremy Fitzhardinge 
68275884fb1SKAMEZAWA Hiroyuki static int online_pages_range(unsigned long start_pfn, unsigned long nr_pages,
68375884fb1SKAMEZAWA Hiroyuki 			void *arg)
6843947be19SDave Hansen {
6853947be19SDave Hansen 	unsigned long i;
68675884fb1SKAMEZAWA Hiroyuki 	unsigned long onlined_pages = *(unsigned long *)arg;
68775884fb1SKAMEZAWA Hiroyuki 	struct page *page;
6882d070eabSMichal Hocko 
68975884fb1SKAMEZAWA Hiroyuki 	if (PageReserved(pfn_to_page(start_pfn)))
69075884fb1SKAMEZAWA Hiroyuki 		for (i = 0; i < nr_pages; i++) {
69175884fb1SKAMEZAWA Hiroyuki 			page = pfn_to_page(start_pfn + i);
6929d0ad8caSDaniel Kiper 			(*online_page_callback)(page);
69375884fb1SKAMEZAWA Hiroyuki 			onlined_pages++;
69475884fb1SKAMEZAWA Hiroyuki 		}
6952d070eabSMichal Hocko 
6962d070eabSMichal Hocko 	online_mem_sections(start_pfn, start_pfn + nr_pages);
6972d070eabSMichal Hocko 
69875884fb1SKAMEZAWA Hiroyuki 	*(unsigned long *)arg = onlined_pages;
69975884fb1SKAMEZAWA Hiroyuki 	return 0;
70075884fb1SKAMEZAWA Hiroyuki }
70175884fb1SKAMEZAWA Hiroyuki 
702d9713679SLai Jiangshan /* check which state of node_states will be changed when online memory */
703d9713679SLai Jiangshan static void node_states_check_changes_online(unsigned long nr_pages,
704d9713679SLai Jiangshan 	struct zone *zone, struct memory_notify *arg)
705d9713679SLai Jiangshan {
706d9713679SLai Jiangshan 	int nid = zone_to_nid(zone);
707d9713679SLai Jiangshan 	enum zone_type zone_last = ZONE_NORMAL;
708d9713679SLai Jiangshan 
709d9713679SLai Jiangshan 	/*
7106715ddf9SLai Jiangshan 	 * If we have HIGHMEM or movable node, node_states[N_NORMAL_MEMORY]
7116715ddf9SLai Jiangshan 	 * contains nodes which have zones of 0...ZONE_NORMAL,
7126715ddf9SLai Jiangshan 	 * set zone_last to ZONE_NORMAL.
713d9713679SLai Jiangshan 	 *
7146715ddf9SLai Jiangshan 	 * If we don't have HIGHMEM nor movable node,
7156715ddf9SLai Jiangshan 	 * node_states[N_NORMAL_MEMORY] contains nodes which have zones of
7166715ddf9SLai Jiangshan 	 * 0...ZONE_MOVABLE, set zone_last to ZONE_MOVABLE.
717d9713679SLai Jiangshan 	 */
7186715ddf9SLai Jiangshan 	if (N_MEMORY == N_NORMAL_MEMORY)
719d9713679SLai Jiangshan 		zone_last = ZONE_MOVABLE;
720d9713679SLai Jiangshan 
721d9713679SLai Jiangshan 	/*
722d9713679SLai Jiangshan 	 * if the memory to be online is in a zone of 0...zone_last, and
723d9713679SLai Jiangshan 	 * the zones of 0...zone_last don't have memory before online, we will
724d9713679SLai Jiangshan 	 * need to set the node to node_states[N_NORMAL_MEMORY] after
725d9713679SLai Jiangshan 	 * the memory is online.
726d9713679SLai Jiangshan 	 */
727d9713679SLai Jiangshan 	if (zone_idx(zone) <= zone_last && !node_state(nid, N_NORMAL_MEMORY))
728d9713679SLai Jiangshan 		arg->status_change_nid_normal = nid;
729d9713679SLai Jiangshan 	else
730d9713679SLai Jiangshan 		arg->status_change_nid_normal = -1;
731d9713679SLai Jiangshan 
7326715ddf9SLai Jiangshan #ifdef CONFIG_HIGHMEM
7336715ddf9SLai Jiangshan 	/*
7346715ddf9SLai Jiangshan 	 * If we have movable node, node_states[N_HIGH_MEMORY]
7356715ddf9SLai Jiangshan 	 * contains nodes which have zones of 0...ZONE_HIGHMEM,
7366715ddf9SLai Jiangshan 	 * set zone_last to ZONE_HIGHMEM.
7376715ddf9SLai Jiangshan 	 *
7386715ddf9SLai Jiangshan 	 * If we don't have movable node, node_states[N_NORMAL_MEMORY]
7396715ddf9SLai Jiangshan 	 * contains nodes which have zones of 0...ZONE_MOVABLE,
7406715ddf9SLai Jiangshan 	 * set zone_last to ZONE_MOVABLE.
7416715ddf9SLai Jiangshan 	 */
7426715ddf9SLai Jiangshan 	zone_last = ZONE_HIGHMEM;
7436715ddf9SLai Jiangshan 	if (N_MEMORY == N_HIGH_MEMORY)
7446715ddf9SLai Jiangshan 		zone_last = ZONE_MOVABLE;
7456715ddf9SLai Jiangshan 
7466715ddf9SLai Jiangshan 	if (zone_idx(zone) <= zone_last && !node_state(nid, N_HIGH_MEMORY))
7476715ddf9SLai Jiangshan 		arg->status_change_nid_high = nid;
7486715ddf9SLai Jiangshan 	else
7496715ddf9SLai Jiangshan 		arg->status_change_nid_high = -1;
7506715ddf9SLai Jiangshan #else
7516715ddf9SLai Jiangshan 	arg->status_change_nid_high = arg->status_change_nid_normal;
7526715ddf9SLai Jiangshan #endif
7536715ddf9SLai Jiangshan 
754d9713679SLai Jiangshan 	/*
755d9713679SLai Jiangshan 	 * if the node don't have memory befor online, we will need to
7566715ddf9SLai Jiangshan 	 * set the node to node_states[N_MEMORY] after the memory
757d9713679SLai Jiangshan 	 * is online.
758d9713679SLai Jiangshan 	 */
7596715ddf9SLai Jiangshan 	if (!node_state(nid, N_MEMORY))
760d9713679SLai Jiangshan 		arg->status_change_nid = nid;
761d9713679SLai Jiangshan 	else
762d9713679SLai Jiangshan 		arg->status_change_nid = -1;
763d9713679SLai Jiangshan }
764d9713679SLai Jiangshan 
765d9713679SLai Jiangshan static void node_states_set_node(int node, struct memory_notify *arg)
766d9713679SLai Jiangshan {
767d9713679SLai Jiangshan 	if (arg->status_change_nid_normal >= 0)
768d9713679SLai Jiangshan 		node_set_state(node, N_NORMAL_MEMORY);
769d9713679SLai Jiangshan 
7706715ddf9SLai Jiangshan 	if (arg->status_change_nid_high >= 0)
771d9713679SLai Jiangshan 		node_set_state(node, N_HIGH_MEMORY);
7726715ddf9SLai Jiangshan 
7736715ddf9SLai Jiangshan 	node_set_state(node, N_MEMORY);
774d9713679SLai Jiangshan }
775d9713679SLai Jiangshan 
776f1dd2cd1SMichal Hocko bool allow_online_pfn_range(int nid, unsigned long pfn, unsigned long nr_pages, int online_type)
777df429ac0SReza Arbab {
778f1dd2cd1SMichal Hocko 	struct pglist_data *pgdat = NODE_DATA(nid);
779f1dd2cd1SMichal Hocko 	struct zone *movable_zone = &pgdat->node_zones[ZONE_MOVABLE];
780c246a213SMichal Hocko 	struct zone *default_zone = default_zone_for_pfn(nid, pfn, nr_pages);
781df429ac0SReza Arbab 
782f1dd2cd1SMichal Hocko 	/*
783f1dd2cd1SMichal Hocko 	 * TODO there shouldn't be any inherent reason to have ZONE_NORMAL
784f1dd2cd1SMichal Hocko 	 * physically before ZONE_MOVABLE. All we need is they do not
785f1dd2cd1SMichal Hocko 	 * overlap. Historically we didn't allow ZONE_NORMAL after ZONE_MOVABLE
786f1dd2cd1SMichal Hocko 	 * though so let's stick with it for simplicity for now.
787f1dd2cd1SMichal Hocko 	 * TODO make sure we do not overlap with ZONE_DEVICE
788f1dd2cd1SMichal Hocko 	 */
789f1dd2cd1SMichal Hocko 	if (online_type == MMOP_ONLINE_KERNEL) {
790f1dd2cd1SMichal Hocko 		if (zone_is_empty(movable_zone))
7918a1f780eSYasuaki Ishimatsu 			return true;
792f1dd2cd1SMichal Hocko 		return movable_zone->zone_start_pfn >= pfn + nr_pages;
793f1dd2cd1SMichal Hocko 	} else if (online_type == MMOP_ONLINE_MOVABLE) {
794c246a213SMichal Hocko 		return zone_end_pfn(default_zone) <= pfn;
795f1dd2cd1SMichal Hocko 	}
796f1dd2cd1SMichal Hocko 
797f1dd2cd1SMichal Hocko 	/* MMOP_ONLINE_KEEP will always succeed and inherits the current zone */
798f1dd2cd1SMichal Hocko 	return online_type == MMOP_ONLINE_KEEP;
799f1dd2cd1SMichal Hocko }
800f1dd2cd1SMichal Hocko 
801f1dd2cd1SMichal Hocko static void __meminit resize_zone_range(struct zone *zone, unsigned long start_pfn,
802f1dd2cd1SMichal Hocko 		unsigned long nr_pages)
803f1dd2cd1SMichal Hocko {
804f1dd2cd1SMichal Hocko 	unsigned long old_end_pfn = zone_end_pfn(zone);
805f1dd2cd1SMichal Hocko 
806f1dd2cd1SMichal Hocko 	if (zone_is_empty(zone) || start_pfn < zone->zone_start_pfn)
807f1dd2cd1SMichal Hocko 		zone->zone_start_pfn = start_pfn;
808f1dd2cd1SMichal Hocko 
809f1dd2cd1SMichal Hocko 	zone->spanned_pages = max(start_pfn + nr_pages, old_end_pfn) - zone->zone_start_pfn;
810f1dd2cd1SMichal Hocko }
811f1dd2cd1SMichal Hocko 
812f1dd2cd1SMichal Hocko static void __meminit resize_pgdat_range(struct pglist_data *pgdat, unsigned long start_pfn,
813f1dd2cd1SMichal Hocko                                      unsigned long nr_pages)
814f1dd2cd1SMichal Hocko {
815f1dd2cd1SMichal Hocko 	unsigned long old_end_pfn = pgdat_end_pfn(pgdat);
816f1dd2cd1SMichal Hocko 
817f1dd2cd1SMichal Hocko 	if (!pgdat->node_spanned_pages || start_pfn < pgdat->node_start_pfn)
818f1dd2cd1SMichal Hocko 		pgdat->node_start_pfn = start_pfn;
819f1dd2cd1SMichal Hocko 
820f1dd2cd1SMichal Hocko 	pgdat->node_spanned_pages = max(start_pfn + nr_pages, old_end_pfn) - pgdat->node_start_pfn;
821f1dd2cd1SMichal Hocko }
822f1dd2cd1SMichal Hocko 
823cdf72f25SMichal Hocko void __ref move_pfn_range_to_zone(struct zone *zone,
824f1dd2cd1SMichal Hocko 		unsigned long start_pfn, unsigned long nr_pages)
825f1dd2cd1SMichal Hocko {
826f1dd2cd1SMichal Hocko 	struct pglist_data *pgdat = zone->zone_pgdat;
827f1dd2cd1SMichal Hocko 	int nid = pgdat->node_id;
828f1dd2cd1SMichal Hocko 	unsigned long flags;
829f1dd2cd1SMichal Hocko 
830f1dd2cd1SMichal Hocko 	if (zone_is_empty(zone))
831f1dd2cd1SMichal Hocko 		init_currently_empty_zone(zone, start_pfn, nr_pages);
832f1dd2cd1SMichal Hocko 
833f1dd2cd1SMichal Hocko 	clear_zone_contiguous(zone);
834f1dd2cd1SMichal Hocko 
835f1dd2cd1SMichal Hocko 	/* TODO Huh pgdat is irqsave while zone is not. It used to be like that before */
836f1dd2cd1SMichal Hocko 	pgdat_resize_lock(pgdat, &flags);
837f1dd2cd1SMichal Hocko 	zone_span_writelock(zone);
838f1dd2cd1SMichal Hocko 	resize_zone_range(zone, start_pfn, nr_pages);
839f1dd2cd1SMichal Hocko 	zone_span_writeunlock(zone);
840f1dd2cd1SMichal Hocko 	resize_pgdat_range(pgdat, start_pfn, nr_pages);
841f1dd2cd1SMichal Hocko 	pgdat_resize_unlock(pgdat, &flags);
842f1dd2cd1SMichal Hocko 
843f1dd2cd1SMichal Hocko 	/*
844f1dd2cd1SMichal Hocko 	 * TODO now we have a visible range of pages which are not associated
845f1dd2cd1SMichal Hocko 	 * with their zone properly. Not nice but set_pfnblock_flags_mask
846f1dd2cd1SMichal Hocko 	 * expects the zone spans the pfn range. All the pages in the range
847f1dd2cd1SMichal Hocko 	 * are reserved so nobody should be touching them so we should be safe
848f1dd2cd1SMichal Hocko 	 */
849f1dd2cd1SMichal Hocko 	memmap_init_zone(nr_pages, nid, zone_idx(zone), start_pfn, MEMMAP_HOTPLUG);
850f1dd2cd1SMichal Hocko 
851f1dd2cd1SMichal Hocko 	set_zone_contiguous(zone);
852f1dd2cd1SMichal Hocko }
853f1dd2cd1SMichal Hocko 
854f1dd2cd1SMichal Hocko /*
855c246a213SMichal Hocko  * Returns a default kernel memory zone for the given pfn range.
856c246a213SMichal Hocko  * If no kernel zone covers this pfn range it will automatically go
857c246a213SMichal Hocko  * to the ZONE_NORMAL.
858c246a213SMichal Hocko  */
859c246a213SMichal Hocko struct zone *default_zone_for_pfn(int nid, unsigned long start_pfn,
860c246a213SMichal Hocko 		unsigned long nr_pages)
861c246a213SMichal Hocko {
862c246a213SMichal Hocko 	struct pglist_data *pgdat = NODE_DATA(nid);
863c246a213SMichal Hocko 	int zid;
864c246a213SMichal Hocko 
865c246a213SMichal Hocko 	for (zid = 0; zid <= ZONE_NORMAL; zid++) {
866c246a213SMichal Hocko 		struct zone *zone = &pgdat->node_zones[zid];
867c246a213SMichal Hocko 
868c246a213SMichal Hocko 		if (zone_intersects(zone, start_pfn, nr_pages))
869c246a213SMichal Hocko 			return zone;
870c246a213SMichal Hocko 	}
871c246a213SMichal Hocko 
872c246a213SMichal Hocko 	return &pgdat->node_zones[ZONE_NORMAL];
873c246a213SMichal Hocko }
874c246a213SMichal Hocko 
8759f123ab5SMichal Hocko static inline bool movable_pfn_range(int nid, struct zone *default_zone,
8769f123ab5SMichal Hocko 		unsigned long start_pfn, unsigned long nr_pages)
8779f123ab5SMichal Hocko {
8789f123ab5SMichal Hocko 	if (!allow_online_pfn_range(nid, start_pfn, nr_pages,
8799f123ab5SMichal Hocko 				MMOP_ONLINE_KERNEL))
8809f123ab5SMichal Hocko 		return true;
8819f123ab5SMichal Hocko 
8829f123ab5SMichal Hocko 	if (!movable_node_is_enabled())
8839f123ab5SMichal Hocko 		return false;
8849f123ab5SMichal Hocko 
8859f123ab5SMichal Hocko 	return !zone_intersects(default_zone, start_pfn, nr_pages);
8869f123ab5SMichal Hocko }
8879f123ab5SMichal Hocko 
888c246a213SMichal Hocko /*
889f1dd2cd1SMichal Hocko  * Associates the given pfn range with the given node and the zone appropriate
890f1dd2cd1SMichal Hocko  * for the given online type.
891f1dd2cd1SMichal Hocko  */
892f1dd2cd1SMichal Hocko static struct zone * __meminit move_pfn_range(int online_type, int nid,
893f1dd2cd1SMichal Hocko 		unsigned long start_pfn, unsigned long nr_pages)
894f1dd2cd1SMichal Hocko {
895f1dd2cd1SMichal Hocko 	struct pglist_data *pgdat = NODE_DATA(nid);
896c246a213SMichal Hocko 	struct zone *zone = default_zone_for_pfn(nid, start_pfn, nr_pages);
897f1dd2cd1SMichal Hocko 
898f1dd2cd1SMichal Hocko 	if (online_type == MMOP_ONLINE_KEEP) {
899f1dd2cd1SMichal Hocko 		struct zone *movable_zone = &pgdat->node_zones[ZONE_MOVABLE];
900f1dd2cd1SMichal Hocko 		/*
901a69578a1SMichal Hocko 		 * MMOP_ONLINE_KEEP defaults to MMOP_ONLINE_KERNEL but use
902a69578a1SMichal Hocko 		 * movable zone if that is not possible (e.g. we are within
9039f123ab5SMichal Hocko 		 * or past the existing movable zone). movable_node overrides
9049f123ab5SMichal Hocko 		 * this default and defaults to movable zone
905f1dd2cd1SMichal Hocko 		 */
9069f123ab5SMichal Hocko 		if (movable_pfn_range(nid, zone, start_pfn, nr_pages))
907f1dd2cd1SMichal Hocko 			zone = movable_zone;
908f1dd2cd1SMichal Hocko 	} else if (online_type == MMOP_ONLINE_MOVABLE) {
909f1dd2cd1SMichal Hocko 		zone = &pgdat->node_zones[ZONE_MOVABLE];
910f1dd2cd1SMichal Hocko 	}
911f1dd2cd1SMichal Hocko 
912f1dd2cd1SMichal Hocko 	move_pfn_range_to_zone(zone, start_pfn, nr_pages);
913f1dd2cd1SMichal Hocko 	return zone;
914df429ac0SReza Arbab }
91575884fb1SKAMEZAWA Hiroyuki 
91630467e0bSDavid Rientjes /* Must be protected by mem_hotplug_begin() */
917511c2abaSLai Jiangshan int __ref online_pages(unsigned long pfn, unsigned long nr_pages, int online_type)
91875884fb1SKAMEZAWA Hiroyuki {
919aa47228aSCody P Schafer 	unsigned long flags;
9203947be19SDave Hansen 	unsigned long onlined_pages = 0;
9213947be19SDave Hansen 	struct zone *zone;
9226811378eSYasunori Goto 	int need_zonelists_rebuild = 0;
9237b78d335SYasunori Goto 	int nid;
9247b78d335SYasunori Goto 	int ret;
9257b78d335SYasunori Goto 	struct memory_notify arg;
9263947be19SDave Hansen 
927f1dd2cd1SMichal Hocko 	nid = pfn_to_nid(pfn);
928f1dd2cd1SMichal Hocko 	if (!allow_online_pfn_range(nid, pfn, nr_pages, online_type))
92930467e0bSDavid Rientjes 		return -EINVAL;
93074d42d8fSLai Jiangshan 
931f1dd2cd1SMichal Hocko 	/* associate pfn range with the zone */
932f1dd2cd1SMichal Hocko 	zone = move_pfn_range(online_type, nid, pfn, nr_pages);
933511c2abaSLai Jiangshan 
9347b78d335SYasunori Goto 	arg.start_pfn = pfn;
9357b78d335SYasunori Goto 	arg.nr_pages = nr_pages;
936d9713679SLai Jiangshan 	node_states_check_changes_online(nr_pages, zone, &arg);
9377b78d335SYasunori Goto 
9387b78d335SYasunori Goto 	ret = memory_notify(MEM_GOING_ONLINE, &arg);
9397b78d335SYasunori Goto 	ret = notifier_to_errno(ret);
940e33e33b4SChen Yucong 	if (ret)
941e33e33b4SChen Yucong 		goto failed_addition;
942e33e33b4SChen Yucong 
9433947be19SDave Hansen 	/*
9446811378eSYasunori Goto 	 * If this zone is not populated, then it is not in zonelist.
9456811378eSYasunori Goto 	 * This means the page allocator ignores this zone.
9466811378eSYasunori Goto 	 * So, zonelist must be updated after online.
9476811378eSYasunori Goto 	 */
9484eaf3f64SHaicheng Li 	mutex_lock(&zonelists_mutex);
9496dcd73d7SWen Congyang 	if (!populated_zone(zone)) {
9506811378eSYasunori Goto 		need_zonelists_rebuild = 1;
9516dcd73d7SWen Congyang 		build_all_zonelists(NULL, zone);
9526dcd73d7SWen Congyang 	}
9536811378eSYasunori Goto 
954908eedc6SKAMEZAWA Hiroyuki 	ret = walk_system_ram_range(pfn, nr_pages, &onlined_pages,
95575884fb1SKAMEZAWA Hiroyuki 		online_pages_range);
956fd8a4221SGeoff Levand 	if (ret) {
9576dcd73d7SWen Congyang 		if (need_zonelists_rebuild)
9586dcd73d7SWen Congyang 			zone_pcp_reset(zone);
9594eaf3f64SHaicheng Li 		mutex_unlock(&zonelists_mutex);
960e33e33b4SChen Yucong 		goto failed_addition;
961fd8a4221SGeoff Levand 	}
962fd8a4221SGeoff Levand 
9633947be19SDave Hansen 	zone->present_pages += onlined_pages;
964aa47228aSCody P Schafer 
965aa47228aSCody P Schafer 	pgdat_resize_lock(zone->zone_pgdat, &flags);
966f2937be5SYasunori Goto 	zone->zone_pgdat->node_present_pages += onlined_pages;
967aa47228aSCody P Schafer 	pgdat_resize_unlock(zone->zone_pgdat, &flags);
968aa47228aSCody P Schafer 
96908dff7b7SJiang Liu 	if (onlined_pages) {
970e888ca35SVlastimil Babka 		node_states_set_node(nid, &arg);
9711f522509SHaicheng Li 		if (need_zonelists_rebuild)
9726dcd73d7SWen Congyang 			build_all_zonelists(NULL, NULL);
9731f522509SHaicheng Li 		else
974112067f0SShaohua Li 			zone_pcp_update(zone);
97508dff7b7SJiang Liu 	}
9761f522509SHaicheng Li 
9774eaf3f64SHaicheng Li 	mutex_unlock(&zonelists_mutex);
9781b79acc9SKOSAKI Motohiro 
9791b79acc9SKOSAKI Motohiro 	init_per_zone_wmark_min();
9801b79acc9SKOSAKI Motohiro 
981698b1b30SVlastimil Babka 	if (onlined_pages) {
982e888ca35SVlastimil Babka 		kswapd_run(nid);
983698b1b30SVlastimil Babka 		kcompactd_run(nid);
984698b1b30SVlastimil Babka 	}
98561b13993SDave Hansen 
9865a4d4361SKAMEZAWA Hiroyuki 	vm_total_pages = nr_free_pagecache_pages();
9872f7f24ecSKent Liu 
9882d1d43f6SChandra Seetharaman 	writeback_set_ratelimit();
9897b78d335SYasunori Goto 
9907b78d335SYasunori Goto 	if (onlined_pages)
9917b78d335SYasunori Goto 		memory_notify(MEM_ONLINE, &arg);
99230467e0bSDavid Rientjes 	return 0;
993e33e33b4SChen Yucong 
994e33e33b4SChen Yucong failed_addition:
995e33e33b4SChen Yucong 	pr_debug("online_pages [mem %#010llx-%#010llx] failed\n",
996e33e33b4SChen Yucong 		 (unsigned long long) pfn << PAGE_SHIFT,
997e33e33b4SChen Yucong 		 (((unsigned long long) pfn + nr_pages) << PAGE_SHIFT) - 1);
998e33e33b4SChen Yucong 	memory_notify(MEM_CANCEL_ONLINE, &arg);
999e33e33b4SChen Yucong 	return ret;
10003947be19SDave Hansen }
100153947027SKeith Mannthey #endif /* CONFIG_MEMORY_HOTPLUG_SPARSE */
1002bc02af93SYasunori Goto 
10030bd85420STang Chen static void reset_node_present_pages(pg_data_t *pgdat)
10040bd85420STang Chen {
10050bd85420STang Chen 	struct zone *z;
10060bd85420STang Chen 
10070bd85420STang Chen 	for (z = pgdat->node_zones; z < pgdat->node_zones + MAX_NR_ZONES; z++)
10080bd85420STang Chen 		z->present_pages = 0;
10090bd85420STang Chen 
10100bd85420STang Chen 	pgdat->node_present_pages = 0;
10110bd85420STang Chen }
10120bd85420STang Chen 
1013e1319331SHidetoshi Seto /* we are OK calling __meminit stuff here - we have CONFIG_MEMORY_HOTPLUG */
1014e1319331SHidetoshi Seto static pg_data_t __ref *hotadd_new_pgdat(int nid, u64 start)
10159af3c2deSYasunori Goto {
10169af3c2deSYasunori Goto 	struct pglist_data *pgdat;
10179af3c2deSYasunori Goto 	unsigned long zones_size[MAX_NR_ZONES] = {0};
10189af3c2deSYasunori Goto 	unsigned long zholes_size[MAX_NR_ZONES] = {0};
1019c8e861a5SFabian Frederick 	unsigned long start_pfn = PFN_DOWN(start);
10209af3c2deSYasunori Goto 
1021a1e565aaSTang Chen 	pgdat = NODE_DATA(nid);
1022a1e565aaSTang Chen 	if (!pgdat) {
10239af3c2deSYasunori Goto 		pgdat = arch_alloc_nodedata(nid);
10249af3c2deSYasunori Goto 		if (!pgdat)
10259af3c2deSYasunori Goto 			return NULL;
10269af3c2deSYasunori Goto 
10279af3c2deSYasunori Goto 		arch_refresh_nodedata(nid, pgdat);
1028b0dc3a34SGu Zheng 	} else {
1029e716f2ebSMel Gorman 		/*
1030e716f2ebSMel Gorman 		 * Reset the nr_zones, order and classzone_idx before reuse.
1031e716f2ebSMel Gorman 		 * Note that kswapd will init kswapd_classzone_idx properly
1032e716f2ebSMel Gorman 		 * when it starts in the near future.
1033e716f2ebSMel Gorman 		 */
1034b0dc3a34SGu Zheng 		pgdat->nr_zones = 0;
103538087d9bSMel Gorman 		pgdat->kswapd_order = 0;
103638087d9bSMel Gorman 		pgdat->kswapd_classzone_idx = 0;
1037a1e565aaSTang Chen 	}
10389af3c2deSYasunori Goto 
10399af3c2deSYasunori Goto 	/* we can use NODE_DATA(nid) from here */
10409af3c2deSYasunori Goto 
10419af3c2deSYasunori Goto 	/* init node's zones as empty zones, we don't have any present pages.*/
10429109fb7bSJohannes Weiner 	free_area_init_node(nid, zones_size, start_pfn, zholes_size);
10435830169fSReza Arbab 	pgdat->per_cpu_nodestats = alloc_percpu(struct per_cpu_nodestat);
10449af3c2deSYasunori Goto 
1045959ecc48SKAMEZAWA Hiroyuki 	/*
1046959ecc48SKAMEZAWA Hiroyuki 	 * The node we allocated has no zone fallback lists. For avoiding
1047959ecc48SKAMEZAWA Hiroyuki 	 * to access not-initialized zonelist, build here.
1048959ecc48SKAMEZAWA Hiroyuki 	 */
1049f957db4fSDavid Rientjes 	mutex_lock(&zonelists_mutex);
10509adb62a5SJiang Liu 	build_all_zonelists(pgdat, NULL);
1051f957db4fSDavid Rientjes 	mutex_unlock(&zonelists_mutex);
1052959ecc48SKAMEZAWA Hiroyuki 
1053f784a3f1STang Chen 	/*
1054f784a3f1STang Chen 	 * zone->managed_pages is set to an approximate value in
1055f784a3f1STang Chen 	 * free_area_init_core(), which will cause
1056f784a3f1STang Chen 	 * /sys/device/system/node/nodeX/meminfo has wrong data.
1057f784a3f1STang Chen 	 * So reset it to 0 before any memory is onlined.
1058f784a3f1STang Chen 	 */
1059f784a3f1STang Chen 	reset_node_managed_pages(pgdat);
1060f784a3f1STang Chen 
10610bd85420STang Chen 	/*
10620bd85420STang Chen 	 * When memory is hot-added, all the memory is in offline state. So
10630bd85420STang Chen 	 * clear all zones' present_pages because they will be updated in
10640bd85420STang Chen 	 * online_pages() and offline_pages().
10650bd85420STang Chen 	 */
10660bd85420STang Chen 	reset_node_present_pages(pgdat);
10670bd85420STang Chen 
10689af3c2deSYasunori Goto 	return pgdat;
10699af3c2deSYasunori Goto }
10709af3c2deSYasunori Goto 
10719af3c2deSYasunori Goto static void rollback_node_hotadd(int nid, pg_data_t *pgdat)
10729af3c2deSYasunori Goto {
10739af3c2deSYasunori Goto 	arch_refresh_nodedata(nid, NULL);
10745830169fSReza Arbab 	free_percpu(pgdat->per_cpu_nodestats);
10759af3c2deSYasunori Goto 	arch_free_nodedata(pgdat);
10769af3c2deSYasunori Goto 	return;
10779af3c2deSYasunori Goto }
10789af3c2deSYasunori Goto 
10790a547039SKAMEZAWA Hiroyuki 
108001b0f197SToshi Kani /**
108101b0f197SToshi Kani  * try_online_node - online a node if offlined
108201b0f197SToshi Kani  *
1083cf23422bSminskey guo  * called by cpu_up() to online a node without onlined memory.
1084cf23422bSminskey guo  */
108501b0f197SToshi Kani int try_online_node(int nid)
1086cf23422bSminskey guo {
1087cf23422bSminskey guo 	pg_data_t	*pgdat;
1088cf23422bSminskey guo 	int	ret;
1089cf23422bSminskey guo 
109001b0f197SToshi Kani 	if (node_online(nid))
109101b0f197SToshi Kani 		return 0;
109201b0f197SToshi Kani 
1093bfc8c901SVladimir Davydov 	mem_hotplug_begin();
1094cf23422bSminskey guo 	pgdat = hotadd_new_pgdat(nid, 0);
10957553e8f2SDavid Rientjes 	if (!pgdat) {
109601b0f197SToshi Kani 		pr_err("Cannot online node %d due to NULL pgdat\n", nid);
1097cf23422bSminskey guo 		ret = -ENOMEM;
1098cf23422bSminskey guo 		goto out;
1099cf23422bSminskey guo 	}
1100cf23422bSminskey guo 	node_set_online(nid);
1101cf23422bSminskey guo 	ret = register_one_node(nid);
1102cf23422bSminskey guo 	BUG_ON(ret);
1103cf23422bSminskey guo 
110401b0f197SToshi Kani 	if (pgdat->node_zonelists->_zonerefs->zone == NULL) {
110501b0f197SToshi Kani 		mutex_lock(&zonelists_mutex);
110601b0f197SToshi Kani 		build_all_zonelists(NULL, NULL);
110701b0f197SToshi Kani 		mutex_unlock(&zonelists_mutex);
110801b0f197SToshi Kani 	}
110901b0f197SToshi Kani 
1110cf23422bSminskey guo out:
1111bfc8c901SVladimir Davydov 	mem_hotplug_done();
1112cf23422bSminskey guo 	return ret;
1113cf23422bSminskey guo }
1114cf23422bSminskey guo 
111527356f54SToshi Kani static int check_hotplug_memory_range(u64 start, u64 size)
111627356f54SToshi Kani {
1117c8e861a5SFabian Frederick 	u64 start_pfn = PFN_DOWN(start);
111827356f54SToshi Kani 	u64 nr_pages = size >> PAGE_SHIFT;
111927356f54SToshi Kani 
112027356f54SToshi Kani 	/* Memory range must be aligned with section */
112127356f54SToshi Kani 	if ((start_pfn & ~PAGE_SECTION_MASK) ||
112227356f54SToshi Kani 	    (nr_pages % PAGES_PER_SECTION) || (!nr_pages)) {
112327356f54SToshi Kani 		pr_err("Section-unaligned hotplug range: start 0x%llx, size 0x%llx\n",
112427356f54SToshi Kani 				(unsigned long long)start,
112527356f54SToshi Kani 				(unsigned long long)size);
112627356f54SToshi Kani 		return -EINVAL;
112727356f54SToshi Kani 	}
112827356f54SToshi Kani 
112927356f54SToshi Kani 	return 0;
113027356f54SToshi Kani }
113127356f54SToshi Kani 
113231bc3858SVitaly Kuznetsov static int online_memory_block(struct memory_block *mem, void *arg)
113331bc3858SVitaly Kuznetsov {
1134dc18d706SNathan Fontenot 	return device_online(&mem->dev);
113531bc3858SVitaly Kuznetsov }
113631bc3858SVitaly Kuznetsov 
113731168481SAl Viro /* we are OK calling __meminit stuff here - we have CONFIG_MEMORY_HOTPLUG */
113831bc3858SVitaly Kuznetsov int __ref add_memory_resource(int nid, struct resource *res, bool online)
1139bc02af93SYasunori Goto {
114062cedb9fSDavid Vrabel 	u64 start, size;
11419af3c2deSYasunori Goto 	pg_data_t *pgdat = NULL;
1142a1e565aaSTang Chen 	bool new_pgdat;
1143a1e565aaSTang Chen 	bool new_node;
1144bc02af93SYasunori Goto 	int ret;
1145bc02af93SYasunori Goto 
114662cedb9fSDavid Vrabel 	start = res->start;
114762cedb9fSDavid Vrabel 	size = resource_size(res);
114862cedb9fSDavid Vrabel 
114927356f54SToshi Kani 	ret = check_hotplug_memory_range(start, size);
115027356f54SToshi Kani 	if (ret)
115127356f54SToshi Kani 		return ret;
115227356f54SToshi Kani 
1153a1e565aaSTang Chen 	{	/* Stupid hack to suppress address-never-null warning */
1154a1e565aaSTang Chen 		void *p = NODE_DATA(nid);
1155a1e565aaSTang Chen 		new_pgdat = !p;
1156a1e565aaSTang Chen 	}
1157ac13c462SNathan Zimmer 
1158bfc8c901SVladimir Davydov 	mem_hotplug_begin();
1159ac13c462SNathan Zimmer 
11607f36e3e5STang Chen 	/*
11617f36e3e5STang Chen 	 * Add new range to memblock so that when hotadd_new_pgdat() is called
11627f36e3e5STang Chen 	 * to allocate new pgdat, get_pfn_range_for_nid() will be able to find
11637f36e3e5STang Chen 	 * this new range and calculate total pages correctly.  The range will
11647f36e3e5STang Chen 	 * be removed at hot-remove time.
11657f36e3e5STang Chen 	 */
11667f36e3e5STang Chen 	memblock_add_node(start, size, nid);
11677f36e3e5STang Chen 
1168a1e565aaSTang Chen 	new_node = !node_online(nid);
1169a1e565aaSTang Chen 	if (new_node) {
11709af3c2deSYasunori Goto 		pgdat = hotadd_new_pgdat(nid, start);
11716ad696d2SAndi Kleen 		ret = -ENOMEM;
11729af3c2deSYasunori Goto 		if (!pgdat)
117341b9e2d7SWen Congyang 			goto error;
11749af3c2deSYasunori Goto 	}
11759af3c2deSYasunori Goto 
1176bc02af93SYasunori Goto 	/* call arch's memory hotadd */
11773d79a728SMichal Hocko 	ret = arch_add_memory(nid, start, size, true);
1178bc02af93SYasunori Goto 
11799af3c2deSYasunori Goto 	if (ret < 0)
11809af3c2deSYasunori Goto 		goto error;
11819af3c2deSYasunori Goto 
11820fc44159SYasunori Goto 	/* we online node here. we can't roll back from here. */
11839af3c2deSYasunori Goto 	node_set_online(nid);
11849af3c2deSYasunori Goto 
1185a1e565aaSTang Chen 	if (new_node) {
11869037a993SMichal Hocko 		unsigned long start_pfn = start >> PAGE_SHIFT;
11879037a993SMichal Hocko 		unsigned long nr_pages = size >> PAGE_SHIFT;
11889037a993SMichal Hocko 
11899037a993SMichal Hocko 		ret = __register_one_node(nid);
11909037a993SMichal Hocko 		if (ret)
11919037a993SMichal Hocko 			goto register_fail;
11929037a993SMichal Hocko 
11939037a993SMichal Hocko 		/*
11949037a993SMichal Hocko 		 * link memory sections under this node. This is already
11959037a993SMichal Hocko 		 * done when creatig memory section in register_new_memory
11969037a993SMichal Hocko 		 * but that depends to have the node registered so offline
11979037a993SMichal Hocko 		 * nodes have to go through register_node.
11989037a993SMichal Hocko 		 * TODO clean up this mess.
11999037a993SMichal Hocko 		 */
12009037a993SMichal Hocko 		ret = link_mem_sections(nid, start_pfn, nr_pages);
12019037a993SMichal Hocko register_fail:
12020fc44159SYasunori Goto 		/*
12030fc44159SYasunori Goto 		 * If sysfs file of new node can't create, cpu on the node
12040fc44159SYasunori Goto 		 * can't be hot-added. There is no rollback way now.
12050fc44159SYasunori Goto 		 * So, check by BUG_ON() to catch it reluctantly..
12060fc44159SYasunori Goto 		 */
12070fc44159SYasunori Goto 		BUG_ON(ret);
12080fc44159SYasunori Goto 	}
12090fc44159SYasunori Goto 
1210d96ae530Sakpm@linux-foundation.org 	/* create new memmap entry */
1211d96ae530Sakpm@linux-foundation.org 	firmware_map_add_hotplug(start, start + size, "System RAM");
1212d96ae530Sakpm@linux-foundation.org 
121331bc3858SVitaly Kuznetsov 	/* online pages if requested */
121431bc3858SVitaly Kuznetsov 	if (online)
121531bc3858SVitaly Kuznetsov 		walk_memory_range(PFN_DOWN(start), PFN_UP(start + size - 1),
121631bc3858SVitaly Kuznetsov 				  NULL, online_memory_block);
121731bc3858SVitaly Kuznetsov 
12186ad696d2SAndi Kleen 	goto out;
12196ad696d2SAndi Kleen 
12209af3c2deSYasunori Goto error:
12219af3c2deSYasunori Goto 	/* rollback pgdat allocation and others */
1222dbac61a3SGustavo A. R. Silva 	if (new_pgdat && pgdat)
12239af3c2deSYasunori Goto 		rollback_node_hotadd(nid, pgdat);
12247f36e3e5STang Chen 	memblock_remove(start, size);
12259af3c2deSYasunori Goto 
12266ad696d2SAndi Kleen out:
1227bfc8c901SVladimir Davydov 	mem_hotplug_done();
1228bc02af93SYasunori Goto 	return ret;
1229bc02af93SYasunori Goto }
123062cedb9fSDavid Vrabel EXPORT_SYMBOL_GPL(add_memory_resource);
123162cedb9fSDavid Vrabel 
123262cedb9fSDavid Vrabel int __ref add_memory(int nid, u64 start, u64 size)
123362cedb9fSDavid Vrabel {
123462cedb9fSDavid Vrabel 	struct resource *res;
123562cedb9fSDavid Vrabel 	int ret;
123662cedb9fSDavid Vrabel 
123762cedb9fSDavid Vrabel 	res = register_memory_resource(start, size);
12386f754ba4SVitaly Kuznetsov 	if (IS_ERR(res))
12396f754ba4SVitaly Kuznetsov 		return PTR_ERR(res);
124062cedb9fSDavid Vrabel 
124131bc3858SVitaly Kuznetsov 	ret = add_memory_resource(nid, res, memhp_auto_online);
124262cedb9fSDavid Vrabel 	if (ret < 0)
124362cedb9fSDavid Vrabel 		release_memory_resource(res);
124462cedb9fSDavid Vrabel 	return ret;
124562cedb9fSDavid Vrabel }
1246bc02af93SYasunori Goto EXPORT_SYMBOL_GPL(add_memory);
12470c0e6195SKAMEZAWA Hiroyuki 
12480c0e6195SKAMEZAWA Hiroyuki #ifdef CONFIG_MEMORY_HOTREMOVE
12490c0e6195SKAMEZAWA Hiroyuki /*
12505c755e9fSBadari Pulavarty  * A free page on the buddy free lists (not the per-cpu lists) has PageBuddy
12515c755e9fSBadari Pulavarty  * set and the size of the free page is given by page_order(). Using this,
12525c755e9fSBadari Pulavarty  * the function determines if the pageblock contains only free pages.
12535c755e9fSBadari Pulavarty  * Due to buddy contraints, a free page at least the size of a pageblock will
12545c755e9fSBadari Pulavarty  * be located at the start of the pageblock
12555c755e9fSBadari Pulavarty  */
12565c755e9fSBadari Pulavarty static inline int pageblock_free(struct page *page)
12575c755e9fSBadari Pulavarty {
12585c755e9fSBadari Pulavarty 	return PageBuddy(page) && page_order(page) >= pageblock_order;
12595c755e9fSBadari Pulavarty }
12605c755e9fSBadari Pulavarty 
12615c755e9fSBadari Pulavarty /* Return the start of the next active pageblock after a given page */
12625c755e9fSBadari Pulavarty static struct page *next_active_pageblock(struct page *page)
12635c755e9fSBadari Pulavarty {
12645c755e9fSBadari Pulavarty 	/* Ensure the starting page is pageblock-aligned */
12655c755e9fSBadari Pulavarty 	BUG_ON(page_to_pfn(page) & (pageblock_nr_pages - 1));
12665c755e9fSBadari Pulavarty 
12675c755e9fSBadari Pulavarty 	/* If the entire pageblock is free, move to the end of free page */
12680dcc48c1SKAMEZAWA Hiroyuki 	if (pageblock_free(page)) {
12690dcc48c1SKAMEZAWA Hiroyuki 		int order;
12700dcc48c1SKAMEZAWA Hiroyuki 		/* be careful. we don't have locks, page_order can be changed.*/
12710dcc48c1SKAMEZAWA Hiroyuki 		order = page_order(page);
12720dcc48c1SKAMEZAWA Hiroyuki 		if ((order < MAX_ORDER) && (order >= pageblock_order))
12730dcc48c1SKAMEZAWA Hiroyuki 			return page + (1 << order);
12740dcc48c1SKAMEZAWA Hiroyuki 	}
12755c755e9fSBadari Pulavarty 
12760dcc48c1SKAMEZAWA Hiroyuki 	return page + pageblock_nr_pages;
12775c755e9fSBadari Pulavarty }
12785c755e9fSBadari Pulavarty 
12795c755e9fSBadari Pulavarty /* Checks if this range of memory is likely to be hot-removable. */
1280c98940f6SYaowei Bai bool is_mem_section_removable(unsigned long start_pfn, unsigned long nr_pages)
12815c755e9fSBadari Pulavarty {
12825c755e9fSBadari Pulavarty 	struct page *page = pfn_to_page(start_pfn);
12835c755e9fSBadari Pulavarty 	struct page *end_page = page + nr_pages;
12845c755e9fSBadari Pulavarty 
12855c755e9fSBadari Pulavarty 	/* Check the starting page of each pageblock within the range */
12865c755e9fSBadari Pulavarty 	for (; page < end_page; page = next_active_pageblock(page)) {
128749ac8255SKAMEZAWA Hiroyuki 		if (!is_pageblock_removable_nolock(page))
1288c98940f6SYaowei Bai 			return false;
128949ac8255SKAMEZAWA Hiroyuki 		cond_resched();
12905c755e9fSBadari Pulavarty 	}
12915c755e9fSBadari Pulavarty 
12925c755e9fSBadari Pulavarty 	/* All pageblocks in the memory block are likely to be hot-removable */
1293c98940f6SYaowei Bai 	return true;
12945c755e9fSBadari Pulavarty }
12955c755e9fSBadari Pulavarty 
12965c755e9fSBadari Pulavarty /*
1297deb88a2aSToshi Kani  * Confirm all pages in a range [start, end) belong to the same zone.
1298a96dfddbSToshi Kani  * When true, return its valid [start, end).
12990c0e6195SKAMEZAWA Hiroyuki  */
1300a96dfddbSToshi Kani int test_pages_in_a_zone(unsigned long start_pfn, unsigned long end_pfn,
1301a96dfddbSToshi Kani 			 unsigned long *valid_start, unsigned long *valid_end)
13020c0e6195SKAMEZAWA Hiroyuki {
13035f0f2887SAndrew Banman 	unsigned long pfn, sec_end_pfn;
1304a96dfddbSToshi Kani 	unsigned long start, end;
13050c0e6195SKAMEZAWA Hiroyuki 	struct zone *zone = NULL;
13060c0e6195SKAMEZAWA Hiroyuki 	struct page *page;
13070c0e6195SKAMEZAWA Hiroyuki 	int i;
1308deb88a2aSToshi Kani 	for (pfn = start_pfn, sec_end_pfn = SECTION_ALIGN_UP(start_pfn + 1);
13090c0e6195SKAMEZAWA Hiroyuki 	     pfn < end_pfn;
1310deb88a2aSToshi Kani 	     pfn = sec_end_pfn, sec_end_pfn += PAGES_PER_SECTION) {
13115f0f2887SAndrew Banman 		/* Make sure the memory section is present first */
13125f0f2887SAndrew Banman 		if (!present_section_nr(pfn_to_section_nr(pfn)))
13135f0f2887SAndrew Banman 			continue;
13145f0f2887SAndrew Banman 		for (; pfn < sec_end_pfn && pfn < end_pfn;
13150c0e6195SKAMEZAWA Hiroyuki 		     pfn += MAX_ORDER_NR_PAGES) {
13160c0e6195SKAMEZAWA Hiroyuki 			i = 0;
13170c0e6195SKAMEZAWA Hiroyuki 			/* This is just a CONFIG_HOLES_IN_ZONE check.*/
13185f0f2887SAndrew Banman 			while ((i < MAX_ORDER_NR_PAGES) &&
13195f0f2887SAndrew Banman 				!pfn_valid_within(pfn + i))
13200c0e6195SKAMEZAWA Hiroyuki 				i++;
1321d6d8c8a4Szhong jiang 			if (i == MAX_ORDER_NR_PAGES || pfn + i >= end_pfn)
13220c0e6195SKAMEZAWA Hiroyuki 				continue;
13230c0e6195SKAMEZAWA Hiroyuki 			page = pfn_to_page(pfn + i);
13240c0e6195SKAMEZAWA Hiroyuki 			if (zone && page_zone(page) != zone)
13250c0e6195SKAMEZAWA Hiroyuki 				return 0;
1326a96dfddbSToshi Kani 			if (!zone)
1327a96dfddbSToshi Kani 				start = pfn + i;
13280c0e6195SKAMEZAWA Hiroyuki 			zone = page_zone(page);
1329a96dfddbSToshi Kani 			end = pfn + MAX_ORDER_NR_PAGES;
13300c0e6195SKAMEZAWA Hiroyuki 		}
13315f0f2887SAndrew Banman 	}
1332deb88a2aSToshi Kani 
1333a96dfddbSToshi Kani 	if (zone) {
1334a96dfddbSToshi Kani 		*valid_start = start;
1335d6d8c8a4Szhong jiang 		*valid_end = min(end, end_pfn);
13360c0e6195SKAMEZAWA Hiroyuki 		return 1;
1337a96dfddbSToshi Kani 	} else {
1338deb88a2aSToshi Kani 		return 0;
13390c0e6195SKAMEZAWA Hiroyuki 	}
1340a96dfddbSToshi Kani }
13410c0e6195SKAMEZAWA Hiroyuki 
13420c0e6195SKAMEZAWA Hiroyuki /*
13430efadf48SYisheng Xie  * Scan pfn range [start,end) to find movable/migratable pages (LRU pages,
13440efadf48SYisheng Xie  * non-lru movable pages and hugepages). We scan pfn because it's much
13450efadf48SYisheng Xie  * easier than scanning over linked list. This function returns the pfn
13460efadf48SYisheng Xie  * of the first found movable page if it's found, otherwise 0.
13470c0e6195SKAMEZAWA Hiroyuki  */
1348c8721bbbSNaoya Horiguchi static unsigned long scan_movable_pages(unsigned long start, unsigned long end)
13490c0e6195SKAMEZAWA Hiroyuki {
13500c0e6195SKAMEZAWA Hiroyuki 	unsigned long pfn;
13510c0e6195SKAMEZAWA Hiroyuki 	struct page *page;
13520c0e6195SKAMEZAWA Hiroyuki 	for (pfn = start; pfn < end; pfn++) {
13530c0e6195SKAMEZAWA Hiroyuki 		if (pfn_valid(pfn)) {
13540c0e6195SKAMEZAWA Hiroyuki 			page = pfn_to_page(pfn);
13550c0e6195SKAMEZAWA Hiroyuki 			if (PageLRU(page))
13560c0e6195SKAMEZAWA Hiroyuki 				return pfn;
13570efadf48SYisheng Xie 			if (__PageMovable(page))
13580efadf48SYisheng Xie 				return pfn;
1359c8721bbbSNaoya Horiguchi 			if (PageHuge(page)) {
13607e1f049eSNaoya Horiguchi 				if (page_huge_active(page))
1361c8721bbbSNaoya Horiguchi 					return pfn;
1362c8721bbbSNaoya Horiguchi 				else
1363c8721bbbSNaoya Horiguchi 					pfn = round_up(pfn + 1,
1364c8721bbbSNaoya Horiguchi 						1 << compound_order(page)) - 1;
1365c8721bbbSNaoya Horiguchi 			}
13660c0e6195SKAMEZAWA Hiroyuki 		}
13670c0e6195SKAMEZAWA Hiroyuki 	}
13680c0e6195SKAMEZAWA Hiroyuki 	return 0;
13690c0e6195SKAMEZAWA Hiroyuki }
13700c0e6195SKAMEZAWA Hiroyuki 
1371394e31d2SXishi Qiu static struct page *new_node_page(struct page *page, unsigned long private,
1372394e31d2SXishi Qiu 		int **result)
1373394e31d2SXishi Qiu {
1374394e31d2SXishi Qiu 	int nid = page_to_nid(page);
1375231e97e2SLi Zhong 	nodemask_t nmask = node_states[N_MEMORY];
13767f252f27SMichal Hocko 
13777f252f27SMichal Hocko 	/*
13787f252f27SMichal Hocko 	 * try to allocate from a different node but reuse this node if there
13797f252f27SMichal Hocko 	 * are no other online nodes to be used (e.g. we are offlining a part
13807f252f27SMichal Hocko 	 * of the only existing node)
13817f252f27SMichal Hocko 	 */
13827f252f27SMichal Hocko 	node_clear(nid, nmask);
13837f252f27SMichal Hocko 	if (nodes_empty(nmask))
13847f252f27SMichal Hocko 		node_set(nid, nmask);
1385394e31d2SXishi Qiu 
13868b913238SMichal Hocko 	return new_page_nodemask(page, nid, &nmask);
1387394e31d2SXishi Qiu }
1388394e31d2SXishi Qiu 
13890c0e6195SKAMEZAWA Hiroyuki #define NR_OFFLINE_AT_ONCE_PAGES	(256)
13900c0e6195SKAMEZAWA Hiroyuki static int
13910c0e6195SKAMEZAWA Hiroyuki do_migrate_range(unsigned long start_pfn, unsigned long end_pfn)
13920c0e6195SKAMEZAWA Hiroyuki {
13930c0e6195SKAMEZAWA Hiroyuki 	unsigned long pfn;
13940c0e6195SKAMEZAWA Hiroyuki 	struct page *page;
13950c0e6195SKAMEZAWA Hiroyuki 	int move_pages = NR_OFFLINE_AT_ONCE_PAGES;
13960c0e6195SKAMEZAWA Hiroyuki 	int not_managed = 0;
13970c0e6195SKAMEZAWA Hiroyuki 	int ret = 0;
13980c0e6195SKAMEZAWA Hiroyuki 	LIST_HEAD(source);
13990c0e6195SKAMEZAWA Hiroyuki 
14000c0e6195SKAMEZAWA Hiroyuki 	for (pfn = start_pfn; pfn < end_pfn && move_pages > 0; pfn++) {
14010c0e6195SKAMEZAWA Hiroyuki 		if (!pfn_valid(pfn))
14020c0e6195SKAMEZAWA Hiroyuki 			continue;
14030c0e6195SKAMEZAWA Hiroyuki 		page = pfn_to_page(pfn);
1404c8721bbbSNaoya Horiguchi 
1405c8721bbbSNaoya Horiguchi 		if (PageHuge(page)) {
1406c8721bbbSNaoya Horiguchi 			struct page *head = compound_head(page);
1407c8721bbbSNaoya Horiguchi 			pfn = page_to_pfn(head) + (1<<compound_order(head)) - 1;
1408c8721bbbSNaoya Horiguchi 			if (compound_order(head) > PFN_SECTION_SHIFT) {
1409c8721bbbSNaoya Horiguchi 				ret = -EBUSY;
1410c8721bbbSNaoya Horiguchi 				break;
1411c8721bbbSNaoya Horiguchi 			}
1412c8721bbbSNaoya Horiguchi 			if (isolate_huge_page(page, &source))
1413c8721bbbSNaoya Horiguchi 				move_pages -= 1 << compound_order(head);
1414c8721bbbSNaoya Horiguchi 			continue;
1415c8721bbbSNaoya Horiguchi 		}
1416c8721bbbSNaoya Horiguchi 
1417700c2a46SKonstantin Khlebnikov 		if (!get_page_unless_zero(page))
14180c0e6195SKAMEZAWA Hiroyuki 			continue;
14190c0e6195SKAMEZAWA Hiroyuki 		/*
14200efadf48SYisheng Xie 		 * We can skip free pages. And we can deal with pages on
14210efadf48SYisheng Xie 		 * LRU and non-lru movable pages.
14220c0e6195SKAMEZAWA Hiroyuki 		 */
14230efadf48SYisheng Xie 		if (PageLRU(page))
142462695a84SNick Piggin 			ret = isolate_lru_page(page);
14250efadf48SYisheng Xie 		else
14260efadf48SYisheng Xie 			ret = isolate_movable_page(page, ISOLATE_UNEVICTABLE);
14270c0e6195SKAMEZAWA Hiroyuki 		if (!ret) { /* Success */
1428700c2a46SKonstantin Khlebnikov 			put_page(page);
142962695a84SNick Piggin 			list_add_tail(&page->lru, &source);
14300c0e6195SKAMEZAWA Hiroyuki 			move_pages--;
14310efadf48SYisheng Xie 			if (!__PageMovable(page))
1432599d0c95SMel Gorman 				inc_node_page_state(page, NR_ISOLATED_ANON +
14336d9c285aSKOSAKI Motohiro 						    page_is_file_cache(page));
14346d9c285aSKOSAKI Motohiro 
14350c0e6195SKAMEZAWA Hiroyuki 		} else {
14360c0e6195SKAMEZAWA Hiroyuki #ifdef CONFIG_DEBUG_VM
14370efadf48SYisheng Xie 			pr_alert("failed to isolate pfn %lx\n", pfn);
14380efadf48SYisheng Xie 			dump_page(page, "isolation failed");
14390c0e6195SKAMEZAWA Hiroyuki #endif
1440700c2a46SKonstantin Khlebnikov 			put_page(page);
144125985edcSLucas De Marchi 			/* Because we don't have big zone->lock. we should
1442809c4449SBob Liu 			   check this again here. */
1443809c4449SBob Liu 			if (page_count(page)) {
1444809c4449SBob Liu 				not_managed++;
1445f3ab2636SBob Liu 				ret = -EBUSY;
1446809c4449SBob Liu 				break;
1447809c4449SBob Liu 			}
14480c0e6195SKAMEZAWA Hiroyuki 		}
14490c0e6195SKAMEZAWA Hiroyuki 	}
1450f3ab2636SBob Liu 	if (!list_empty(&source)) {
14510c0e6195SKAMEZAWA Hiroyuki 		if (not_managed) {
1452c8721bbbSNaoya Horiguchi 			putback_movable_pages(&source);
14530c0e6195SKAMEZAWA Hiroyuki 			goto out;
14540c0e6195SKAMEZAWA Hiroyuki 		}
145574c08f98SMinchan Kim 
1456394e31d2SXishi Qiu 		/* Allocate a new page from the nearest neighbor node */
1457394e31d2SXishi Qiu 		ret = migrate_pages(&source, new_node_page, NULL, 0,
14589c620e2bSHugh Dickins 					MIGRATE_SYNC, MR_MEMORY_HOTPLUG);
1459cf608ac1SMinchan Kim 		if (ret)
1460c8721bbbSNaoya Horiguchi 			putback_movable_pages(&source);
1461f3ab2636SBob Liu 	}
14620c0e6195SKAMEZAWA Hiroyuki out:
14630c0e6195SKAMEZAWA Hiroyuki 	return ret;
14640c0e6195SKAMEZAWA Hiroyuki }
14650c0e6195SKAMEZAWA Hiroyuki 
14660c0e6195SKAMEZAWA Hiroyuki /*
14670c0e6195SKAMEZAWA Hiroyuki  * remove from free_area[] and mark all as Reserved.
14680c0e6195SKAMEZAWA Hiroyuki  */
14690c0e6195SKAMEZAWA Hiroyuki static int
14700c0e6195SKAMEZAWA Hiroyuki offline_isolated_pages_cb(unsigned long start, unsigned long nr_pages,
14710c0e6195SKAMEZAWA Hiroyuki 			void *data)
14720c0e6195SKAMEZAWA Hiroyuki {
14730c0e6195SKAMEZAWA Hiroyuki 	__offline_isolated_pages(start, start + nr_pages);
14740c0e6195SKAMEZAWA Hiroyuki 	return 0;
14750c0e6195SKAMEZAWA Hiroyuki }
14760c0e6195SKAMEZAWA Hiroyuki 
14770c0e6195SKAMEZAWA Hiroyuki static void
14780c0e6195SKAMEZAWA Hiroyuki offline_isolated_pages(unsigned long start_pfn, unsigned long end_pfn)
14790c0e6195SKAMEZAWA Hiroyuki {
1480908eedc6SKAMEZAWA Hiroyuki 	walk_system_ram_range(start_pfn, end_pfn - start_pfn, NULL,
14810c0e6195SKAMEZAWA Hiroyuki 				offline_isolated_pages_cb);
14820c0e6195SKAMEZAWA Hiroyuki }
14830c0e6195SKAMEZAWA Hiroyuki 
14840c0e6195SKAMEZAWA Hiroyuki /*
14850c0e6195SKAMEZAWA Hiroyuki  * Check all pages in range, recoreded as memory resource, are isolated.
14860c0e6195SKAMEZAWA Hiroyuki  */
14870c0e6195SKAMEZAWA Hiroyuki static int
14880c0e6195SKAMEZAWA Hiroyuki check_pages_isolated_cb(unsigned long start_pfn, unsigned long nr_pages,
14890c0e6195SKAMEZAWA Hiroyuki 			void *data)
14900c0e6195SKAMEZAWA Hiroyuki {
14910c0e6195SKAMEZAWA Hiroyuki 	int ret;
14920c0e6195SKAMEZAWA Hiroyuki 	long offlined = *(long *)data;
1493b023f468SWen Congyang 	ret = test_pages_isolated(start_pfn, start_pfn + nr_pages, true);
14940c0e6195SKAMEZAWA Hiroyuki 	offlined = nr_pages;
14950c0e6195SKAMEZAWA Hiroyuki 	if (!ret)
14960c0e6195SKAMEZAWA Hiroyuki 		*(long *)data += offlined;
14970c0e6195SKAMEZAWA Hiroyuki 	return ret;
14980c0e6195SKAMEZAWA Hiroyuki }
14990c0e6195SKAMEZAWA Hiroyuki 
15000c0e6195SKAMEZAWA Hiroyuki static long
15010c0e6195SKAMEZAWA Hiroyuki check_pages_isolated(unsigned long start_pfn, unsigned long end_pfn)
15020c0e6195SKAMEZAWA Hiroyuki {
15030c0e6195SKAMEZAWA Hiroyuki 	long offlined = 0;
15040c0e6195SKAMEZAWA Hiroyuki 	int ret;
15050c0e6195SKAMEZAWA Hiroyuki 
1506908eedc6SKAMEZAWA Hiroyuki 	ret = walk_system_ram_range(start_pfn, end_pfn - start_pfn, &offlined,
15070c0e6195SKAMEZAWA Hiroyuki 			check_pages_isolated_cb);
15080c0e6195SKAMEZAWA Hiroyuki 	if (ret < 0)
15090c0e6195SKAMEZAWA Hiroyuki 		offlined = (long)ret;
15100c0e6195SKAMEZAWA Hiroyuki 	return offlined;
15110c0e6195SKAMEZAWA Hiroyuki }
15120c0e6195SKAMEZAWA Hiroyuki 
1513c5320926STang Chen static int __init cmdline_parse_movable_node(char *p)
1514c5320926STang Chen {
15154932381eSMichal Hocko #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
151655ac590cSTang Chen 	movable_node_enabled = true;
15174932381eSMichal Hocko #else
15184932381eSMichal Hocko 	pr_warn("movable_node parameter depends on CONFIG_HAVE_MEMBLOCK_NODE_MAP to work properly\n");
15194932381eSMichal Hocko #endif
1520c5320926STang Chen 	return 0;
1521c5320926STang Chen }
1522c5320926STang Chen early_param("movable_node", cmdline_parse_movable_node);
1523c5320926STang Chen 
1524d9713679SLai Jiangshan /* check which state of node_states will be changed when offline memory */
1525d9713679SLai Jiangshan static void node_states_check_changes_offline(unsigned long nr_pages,
1526d9713679SLai Jiangshan 		struct zone *zone, struct memory_notify *arg)
1527d9713679SLai Jiangshan {
1528d9713679SLai Jiangshan 	struct pglist_data *pgdat = zone->zone_pgdat;
1529d9713679SLai Jiangshan 	unsigned long present_pages = 0;
1530d9713679SLai Jiangshan 	enum zone_type zt, zone_last = ZONE_NORMAL;
1531d9713679SLai Jiangshan 
1532d9713679SLai Jiangshan 	/*
15336715ddf9SLai Jiangshan 	 * If we have HIGHMEM or movable node, node_states[N_NORMAL_MEMORY]
15346715ddf9SLai Jiangshan 	 * contains nodes which have zones of 0...ZONE_NORMAL,
15356715ddf9SLai Jiangshan 	 * set zone_last to ZONE_NORMAL.
1536d9713679SLai Jiangshan 	 *
15376715ddf9SLai Jiangshan 	 * If we don't have HIGHMEM nor movable node,
15386715ddf9SLai Jiangshan 	 * node_states[N_NORMAL_MEMORY] contains nodes which have zones of
15396715ddf9SLai Jiangshan 	 * 0...ZONE_MOVABLE, set zone_last to ZONE_MOVABLE.
1540d9713679SLai Jiangshan 	 */
15416715ddf9SLai Jiangshan 	if (N_MEMORY == N_NORMAL_MEMORY)
1542d9713679SLai Jiangshan 		zone_last = ZONE_MOVABLE;
1543d9713679SLai Jiangshan 
1544d9713679SLai Jiangshan 	/*
1545d9713679SLai Jiangshan 	 * check whether node_states[N_NORMAL_MEMORY] will be changed.
1546d9713679SLai Jiangshan 	 * If the memory to be offline is in a zone of 0...zone_last,
1547d9713679SLai Jiangshan 	 * and it is the last present memory, 0...zone_last will
1548d9713679SLai Jiangshan 	 * become empty after offline , thus we can determind we will
1549d9713679SLai Jiangshan 	 * need to clear the node from node_states[N_NORMAL_MEMORY].
1550d9713679SLai Jiangshan 	 */
1551d9713679SLai Jiangshan 	for (zt = 0; zt <= zone_last; zt++)
1552d9713679SLai Jiangshan 		present_pages += pgdat->node_zones[zt].present_pages;
1553d9713679SLai Jiangshan 	if (zone_idx(zone) <= zone_last && nr_pages >= present_pages)
1554d9713679SLai Jiangshan 		arg->status_change_nid_normal = zone_to_nid(zone);
1555d9713679SLai Jiangshan 	else
1556d9713679SLai Jiangshan 		arg->status_change_nid_normal = -1;
1557d9713679SLai Jiangshan 
15586715ddf9SLai Jiangshan #ifdef CONFIG_HIGHMEM
15596715ddf9SLai Jiangshan 	/*
15606715ddf9SLai Jiangshan 	 * If we have movable node, node_states[N_HIGH_MEMORY]
15616715ddf9SLai Jiangshan 	 * contains nodes which have zones of 0...ZONE_HIGHMEM,
15626715ddf9SLai Jiangshan 	 * set zone_last to ZONE_HIGHMEM.
15636715ddf9SLai Jiangshan 	 *
15646715ddf9SLai Jiangshan 	 * If we don't have movable node, node_states[N_NORMAL_MEMORY]
15656715ddf9SLai Jiangshan 	 * contains nodes which have zones of 0...ZONE_MOVABLE,
15666715ddf9SLai Jiangshan 	 * set zone_last to ZONE_MOVABLE.
15676715ddf9SLai Jiangshan 	 */
15686715ddf9SLai Jiangshan 	zone_last = ZONE_HIGHMEM;
15696715ddf9SLai Jiangshan 	if (N_MEMORY == N_HIGH_MEMORY)
15706715ddf9SLai Jiangshan 		zone_last = ZONE_MOVABLE;
15716715ddf9SLai Jiangshan 
15726715ddf9SLai Jiangshan 	for (; zt <= zone_last; zt++)
15736715ddf9SLai Jiangshan 		present_pages += pgdat->node_zones[zt].present_pages;
15746715ddf9SLai Jiangshan 	if (zone_idx(zone) <= zone_last && nr_pages >= present_pages)
15756715ddf9SLai Jiangshan 		arg->status_change_nid_high = zone_to_nid(zone);
15766715ddf9SLai Jiangshan 	else
15776715ddf9SLai Jiangshan 		arg->status_change_nid_high = -1;
15786715ddf9SLai Jiangshan #else
15796715ddf9SLai Jiangshan 	arg->status_change_nid_high = arg->status_change_nid_normal;
15806715ddf9SLai Jiangshan #endif
15816715ddf9SLai Jiangshan 
1582d9713679SLai Jiangshan 	/*
1583d9713679SLai Jiangshan 	 * node_states[N_HIGH_MEMORY] contains nodes which have 0...ZONE_MOVABLE
1584d9713679SLai Jiangshan 	 */
1585d9713679SLai Jiangshan 	zone_last = ZONE_MOVABLE;
1586d9713679SLai Jiangshan 
1587d9713679SLai Jiangshan 	/*
1588d9713679SLai Jiangshan 	 * check whether node_states[N_HIGH_MEMORY] will be changed
1589d9713679SLai Jiangshan 	 * If we try to offline the last present @nr_pages from the node,
1590d9713679SLai Jiangshan 	 * we can determind we will need to clear the node from
1591d9713679SLai Jiangshan 	 * node_states[N_HIGH_MEMORY].
1592d9713679SLai Jiangshan 	 */
1593d9713679SLai Jiangshan 	for (; zt <= zone_last; zt++)
1594d9713679SLai Jiangshan 		present_pages += pgdat->node_zones[zt].present_pages;
1595d9713679SLai Jiangshan 	if (nr_pages >= present_pages)
1596d9713679SLai Jiangshan 		arg->status_change_nid = zone_to_nid(zone);
1597d9713679SLai Jiangshan 	else
1598d9713679SLai Jiangshan 		arg->status_change_nid = -1;
1599d9713679SLai Jiangshan }
1600d9713679SLai Jiangshan 
1601d9713679SLai Jiangshan static void node_states_clear_node(int node, struct memory_notify *arg)
1602d9713679SLai Jiangshan {
1603d9713679SLai Jiangshan 	if (arg->status_change_nid_normal >= 0)
1604d9713679SLai Jiangshan 		node_clear_state(node, N_NORMAL_MEMORY);
1605d9713679SLai Jiangshan 
16066715ddf9SLai Jiangshan 	if ((N_MEMORY != N_NORMAL_MEMORY) &&
16076715ddf9SLai Jiangshan 	    (arg->status_change_nid_high >= 0))
1608d9713679SLai Jiangshan 		node_clear_state(node, N_HIGH_MEMORY);
16096715ddf9SLai Jiangshan 
16106715ddf9SLai Jiangshan 	if ((N_MEMORY != N_HIGH_MEMORY) &&
16116715ddf9SLai Jiangshan 	    (arg->status_change_nid >= 0))
16126715ddf9SLai Jiangshan 		node_clear_state(node, N_MEMORY);
1613d9713679SLai Jiangshan }
1614d9713679SLai Jiangshan 
1615a16cee10SWen Congyang static int __ref __offline_pages(unsigned long start_pfn,
16160c0e6195SKAMEZAWA Hiroyuki 		  unsigned long end_pfn, unsigned long timeout)
16170c0e6195SKAMEZAWA Hiroyuki {
16180c0e6195SKAMEZAWA Hiroyuki 	unsigned long pfn, nr_pages, expire;
16190c0e6195SKAMEZAWA Hiroyuki 	long offlined_pages;
16207b78d335SYasunori Goto 	int ret, drain, retry_max, node;
1621d702909fSCody P Schafer 	unsigned long flags;
1622a96dfddbSToshi Kani 	unsigned long valid_start, valid_end;
16230c0e6195SKAMEZAWA Hiroyuki 	struct zone *zone;
16247b78d335SYasunori Goto 	struct memory_notify arg;
16250c0e6195SKAMEZAWA Hiroyuki 
16260c0e6195SKAMEZAWA Hiroyuki 	/* at least, alignment against pageblock is necessary */
16270c0e6195SKAMEZAWA Hiroyuki 	if (!IS_ALIGNED(start_pfn, pageblock_nr_pages))
16280c0e6195SKAMEZAWA Hiroyuki 		return -EINVAL;
16290c0e6195SKAMEZAWA Hiroyuki 	if (!IS_ALIGNED(end_pfn, pageblock_nr_pages))
16300c0e6195SKAMEZAWA Hiroyuki 		return -EINVAL;
16310c0e6195SKAMEZAWA Hiroyuki 	/* This makes hotplug much easier...and readable.
16320c0e6195SKAMEZAWA Hiroyuki 	   we assume this for now. .*/
1633a96dfddbSToshi Kani 	if (!test_pages_in_a_zone(start_pfn, end_pfn, &valid_start, &valid_end))
16340c0e6195SKAMEZAWA Hiroyuki 		return -EINVAL;
16357b78d335SYasunori Goto 
1636a96dfddbSToshi Kani 	zone = page_zone(pfn_to_page(valid_start));
16377b78d335SYasunori Goto 	node = zone_to_nid(zone);
16387b78d335SYasunori Goto 	nr_pages = end_pfn - start_pfn;
16397b78d335SYasunori Goto 
16400c0e6195SKAMEZAWA Hiroyuki 	/* set above range as isolated */
1641b023f468SWen Congyang 	ret = start_isolate_page_range(start_pfn, end_pfn,
1642b023f468SWen Congyang 				       MIGRATE_MOVABLE, true);
16430c0e6195SKAMEZAWA Hiroyuki 	if (ret)
164430467e0bSDavid Rientjes 		return ret;
16457b78d335SYasunori Goto 
16467b78d335SYasunori Goto 	arg.start_pfn = start_pfn;
16477b78d335SYasunori Goto 	arg.nr_pages = nr_pages;
1648d9713679SLai Jiangshan 	node_states_check_changes_offline(nr_pages, zone, &arg);
16497b78d335SYasunori Goto 
16507b78d335SYasunori Goto 	ret = memory_notify(MEM_GOING_OFFLINE, &arg);
16517b78d335SYasunori Goto 	ret = notifier_to_errno(ret);
16527b78d335SYasunori Goto 	if (ret)
16537b78d335SYasunori Goto 		goto failed_removal;
16547b78d335SYasunori Goto 
16550c0e6195SKAMEZAWA Hiroyuki 	pfn = start_pfn;
16560c0e6195SKAMEZAWA Hiroyuki 	expire = jiffies + timeout;
16570c0e6195SKAMEZAWA Hiroyuki 	drain = 0;
16580c0e6195SKAMEZAWA Hiroyuki 	retry_max = 5;
16590c0e6195SKAMEZAWA Hiroyuki repeat:
16600c0e6195SKAMEZAWA Hiroyuki 	/* start memory hot removal */
16610c0e6195SKAMEZAWA Hiroyuki 	ret = -EAGAIN;
16620c0e6195SKAMEZAWA Hiroyuki 	if (time_after(jiffies, expire))
16630c0e6195SKAMEZAWA Hiroyuki 		goto failed_removal;
16640c0e6195SKAMEZAWA Hiroyuki 	ret = -EINTR;
16650c0e6195SKAMEZAWA Hiroyuki 	if (signal_pending(current))
16660c0e6195SKAMEZAWA Hiroyuki 		goto failed_removal;
16670c0e6195SKAMEZAWA Hiroyuki 	ret = 0;
16680c0e6195SKAMEZAWA Hiroyuki 	if (drain) {
1669*3f906ba2SThomas Gleixner 		lru_add_drain_all_cpuslocked();
16700c0e6195SKAMEZAWA Hiroyuki 		cond_resched();
1671c0554329SVlastimil Babka 		drain_all_pages(zone);
16720c0e6195SKAMEZAWA Hiroyuki 	}
16730c0e6195SKAMEZAWA Hiroyuki 
1674c8721bbbSNaoya Horiguchi 	pfn = scan_movable_pages(start_pfn, end_pfn);
1675c8721bbbSNaoya Horiguchi 	if (pfn) { /* We have movable pages */
16760c0e6195SKAMEZAWA Hiroyuki 		ret = do_migrate_range(pfn, end_pfn);
16770c0e6195SKAMEZAWA Hiroyuki 		if (!ret) {
16780c0e6195SKAMEZAWA Hiroyuki 			drain = 1;
16790c0e6195SKAMEZAWA Hiroyuki 			goto repeat;
16800c0e6195SKAMEZAWA Hiroyuki 		} else {
16810c0e6195SKAMEZAWA Hiroyuki 			if (ret < 0)
16820c0e6195SKAMEZAWA Hiroyuki 				if (--retry_max == 0)
16830c0e6195SKAMEZAWA Hiroyuki 					goto failed_removal;
16840c0e6195SKAMEZAWA Hiroyuki 			yield();
16850c0e6195SKAMEZAWA Hiroyuki 			drain = 1;
16860c0e6195SKAMEZAWA Hiroyuki 			goto repeat;
16870c0e6195SKAMEZAWA Hiroyuki 		}
16880c0e6195SKAMEZAWA Hiroyuki 	}
1689b3834be5SAdam Buchbinder 	/* drain all zone's lru pagevec, this is asynchronous... */
1690*3f906ba2SThomas Gleixner 	lru_add_drain_all_cpuslocked();
16910c0e6195SKAMEZAWA Hiroyuki 	yield();
1692b3834be5SAdam Buchbinder 	/* drain pcp pages, this is synchronous. */
1693c0554329SVlastimil Babka 	drain_all_pages(zone);
1694c8721bbbSNaoya Horiguchi 	/*
1695c8721bbbSNaoya Horiguchi 	 * dissolve free hugepages in the memory block before doing offlining
1696c8721bbbSNaoya Horiguchi 	 * actually in order to make hugetlbfs's object counting consistent.
1697c8721bbbSNaoya Horiguchi 	 */
1698082d5b6bSGerald Schaefer 	ret = dissolve_free_huge_pages(start_pfn, end_pfn);
1699082d5b6bSGerald Schaefer 	if (ret)
1700082d5b6bSGerald Schaefer 		goto failed_removal;
17010c0e6195SKAMEZAWA Hiroyuki 	/* check again */
17020c0e6195SKAMEZAWA Hiroyuki 	offlined_pages = check_pages_isolated(start_pfn, end_pfn);
17030c0e6195SKAMEZAWA Hiroyuki 	if (offlined_pages < 0) {
17040c0e6195SKAMEZAWA Hiroyuki 		ret = -EBUSY;
17050c0e6195SKAMEZAWA Hiroyuki 		goto failed_removal;
17060c0e6195SKAMEZAWA Hiroyuki 	}
1707e33e33b4SChen Yucong 	pr_info("Offlined Pages %ld\n", offlined_pages);
1708b3834be5SAdam Buchbinder 	/* Ok, all of our target is isolated.
17090c0e6195SKAMEZAWA Hiroyuki 	   We cannot do rollback at this point. */
17100c0e6195SKAMEZAWA Hiroyuki 	offline_isolated_pages(start_pfn, end_pfn);
1711dbc0e4ceSKAMEZAWA Hiroyuki 	/* reset pagetype flags and makes migrate type to be MOVABLE */
17120815f3d8SMichal Nazarewicz 	undo_isolate_page_range(start_pfn, end_pfn, MIGRATE_MOVABLE);
17130c0e6195SKAMEZAWA Hiroyuki 	/* removal success */
17143dcc0571SJiang Liu 	adjust_managed_page_count(pfn_to_page(start_pfn), -offlined_pages);
17150c0e6195SKAMEZAWA Hiroyuki 	zone->present_pages -= offlined_pages;
1716d702909fSCody P Schafer 
1717d702909fSCody P Schafer 	pgdat_resize_lock(zone->zone_pgdat, &flags);
17180c0e6195SKAMEZAWA Hiroyuki 	zone->zone_pgdat->node_present_pages -= offlined_pages;
1719d702909fSCody P Schafer 	pgdat_resize_unlock(zone->zone_pgdat, &flags);
17207b78d335SYasunori Goto 
17211b79acc9SKOSAKI Motohiro 	init_per_zone_wmark_min();
17221b79acc9SKOSAKI Motohiro 
17231e8537baSXishi Qiu 	if (!populated_zone(zone)) {
1724340175b7SJiang Liu 		zone_pcp_reset(zone);
17251e8537baSXishi Qiu 		mutex_lock(&zonelists_mutex);
17261e8537baSXishi Qiu 		build_all_zonelists(NULL, NULL);
17271e8537baSXishi Qiu 		mutex_unlock(&zonelists_mutex);
17281e8537baSXishi Qiu 	} else
17291e8537baSXishi Qiu 		zone_pcp_update(zone);
1730340175b7SJiang Liu 
1731d9713679SLai Jiangshan 	node_states_clear_node(node, &arg);
1732698b1b30SVlastimil Babka 	if (arg.status_change_nid >= 0) {
17338fe23e05SDavid Rientjes 		kswapd_stop(node);
1734698b1b30SVlastimil Babka 		kcompactd_stop(node);
1735698b1b30SVlastimil Babka 	}
1736bce7394aSMinchan Kim 
17370c0e6195SKAMEZAWA Hiroyuki 	vm_total_pages = nr_free_pagecache_pages();
17380c0e6195SKAMEZAWA Hiroyuki 	writeback_set_ratelimit();
17397b78d335SYasunori Goto 
17407b78d335SYasunori Goto 	memory_notify(MEM_OFFLINE, &arg);
17410c0e6195SKAMEZAWA Hiroyuki 	return 0;
17420c0e6195SKAMEZAWA Hiroyuki 
17430c0e6195SKAMEZAWA Hiroyuki failed_removal:
1744e33e33b4SChen Yucong 	pr_debug("memory offlining [mem %#010llx-%#010llx] failed\n",
1745a62e2f4fSBjorn Helgaas 		 (unsigned long long) start_pfn << PAGE_SHIFT,
1746a62e2f4fSBjorn Helgaas 		 ((unsigned long long) end_pfn << PAGE_SHIFT) - 1);
17477b78d335SYasunori Goto 	memory_notify(MEM_CANCEL_OFFLINE, &arg);
17480c0e6195SKAMEZAWA Hiroyuki 	/* pushback to free area */
17490815f3d8SMichal Nazarewicz 	undo_isolate_page_range(start_pfn, end_pfn, MIGRATE_MOVABLE);
17500c0e6195SKAMEZAWA Hiroyuki 	return ret;
17510c0e6195SKAMEZAWA Hiroyuki }
175271088785SBadari Pulavarty 
175330467e0bSDavid Rientjes /* Must be protected by mem_hotplug_begin() */
1754a16cee10SWen Congyang int offline_pages(unsigned long start_pfn, unsigned long nr_pages)
1755a16cee10SWen Congyang {
1756a16cee10SWen Congyang 	return __offline_pages(start_pfn, start_pfn + nr_pages, 120 * HZ);
1757a16cee10SWen Congyang }
1758e2ff3940SRafael J. Wysocki #endif /* CONFIG_MEMORY_HOTREMOVE */
1759a16cee10SWen Congyang 
1760bbc76be6SWen Congyang /**
1761bbc76be6SWen Congyang  * walk_memory_range - walks through all mem sections in [start_pfn, end_pfn)
1762bbc76be6SWen Congyang  * @start_pfn: start pfn of the memory range
1763e05c4bbfSToshi Kani  * @end_pfn: end pfn of the memory range
1764bbc76be6SWen Congyang  * @arg: argument passed to func
1765bbc76be6SWen Congyang  * @func: callback for each memory section walked
1766bbc76be6SWen Congyang  *
1767bbc76be6SWen Congyang  * This function walks through all present mem sections in range
1768bbc76be6SWen Congyang  * [start_pfn, end_pfn) and call func on each mem section.
1769bbc76be6SWen Congyang  *
1770bbc76be6SWen Congyang  * Returns the return value of func.
1771bbc76be6SWen Congyang  */
1772e2ff3940SRafael J. Wysocki int walk_memory_range(unsigned long start_pfn, unsigned long end_pfn,
1773bbc76be6SWen Congyang 		void *arg, int (*func)(struct memory_block *, void *))
177471088785SBadari Pulavarty {
1775e90bdb7fSWen Congyang 	struct memory_block *mem = NULL;
1776e90bdb7fSWen Congyang 	struct mem_section *section;
1777e90bdb7fSWen Congyang 	unsigned long pfn, section_nr;
1778e90bdb7fSWen Congyang 	int ret;
177971088785SBadari Pulavarty 
1780e90bdb7fSWen Congyang 	for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) {
1781e90bdb7fSWen Congyang 		section_nr = pfn_to_section_nr(pfn);
1782e90bdb7fSWen Congyang 		if (!present_section_nr(section_nr))
1783e90bdb7fSWen Congyang 			continue;
1784e90bdb7fSWen Congyang 
1785e90bdb7fSWen Congyang 		section = __nr_to_section(section_nr);
1786e90bdb7fSWen Congyang 		/* same memblock? */
1787e90bdb7fSWen Congyang 		if (mem)
1788e90bdb7fSWen Congyang 			if ((section_nr >= mem->start_section_nr) &&
1789e90bdb7fSWen Congyang 			    (section_nr <= mem->end_section_nr))
1790e90bdb7fSWen Congyang 				continue;
1791e90bdb7fSWen Congyang 
1792e90bdb7fSWen Congyang 		mem = find_memory_block_hinted(section, mem);
1793e90bdb7fSWen Congyang 		if (!mem)
1794e90bdb7fSWen Congyang 			continue;
1795e90bdb7fSWen Congyang 
1796bbc76be6SWen Congyang 		ret = func(mem, arg);
1797e90bdb7fSWen Congyang 		if (ret) {
1798e90bdb7fSWen Congyang 			kobject_put(&mem->dev.kobj);
1799e90bdb7fSWen Congyang 			return ret;
1800e90bdb7fSWen Congyang 		}
1801e90bdb7fSWen Congyang 	}
1802e90bdb7fSWen Congyang 
1803e90bdb7fSWen Congyang 	if (mem)
1804e90bdb7fSWen Congyang 		kobject_put(&mem->dev.kobj);
1805e90bdb7fSWen Congyang 
1806bbc76be6SWen Congyang 	return 0;
1807bbc76be6SWen Congyang }
1808bbc76be6SWen Congyang 
1809e2ff3940SRafael J. Wysocki #ifdef CONFIG_MEMORY_HOTREMOVE
1810d6de9d53SXishi Qiu static int check_memblock_offlined_cb(struct memory_block *mem, void *arg)
1811bbc76be6SWen Congyang {
1812bbc76be6SWen Congyang 	int ret = !is_memblock_offlined(mem);
1813bbc76be6SWen Congyang 
1814349daa0fSRandy Dunlap 	if (unlikely(ret)) {
1815349daa0fSRandy Dunlap 		phys_addr_t beginpa, endpa;
1816349daa0fSRandy Dunlap 
1817349daa0fSRandy Dunlap 		beginpa = PFN_PHYS(section_nr_to_pfn(mem->start_section_nr));
1818349daa0fSRandy Dunlap 		endpa = PFN_PHYS(section_nr_to_pfn(mem->end_section_nr + 1))-1;
1819756a025fSJoe Perches 		pr_warn("removing memory fails, because memory [%pa-%pa] is onlined\n",
1820349daa0fSRandy Dunlap 			&beginpa, &endpa);
1821349daa0fSRandy Dunlap 	}
1822bbc76be6SWen Congyang 
1823bbc76be6SWen Congyang 	return ret;
1824bbc76be6SWen Congyang }
1825bbc76be6SWen Congyang 
18260f1cfe9dSToshi Kani static int check_cpu_on_node(pg_data_t *pgdat)
182760a5a19eSTang Chen {
182860a5a19eSTang Chen 	int cpu;
182960a5a19eSTang Chen 
183060a5a19eSTang Chen 	for_each_present_cpu(cpu) {
183160a5a19eSTang Chen 		if (cpu_to_node(cpu) == pgdat->node_id)
183260a5a19eSTang Chen 			/*
183360a5a19eSTang Chen 			 * the cpu on this node isn't removed, and we can't
183460a5a19eSTang Chen 			 * offline this node.
183560a5a19eSTang Chen 			 */
183660a5a19eSTang Chen 			return -EBUSY;
183760a5a19eSTang Chen 	}
183860a5a19eSTang Chen 
183960a5a19eSTang Chen 	return 0;
184060a5a19eSTang Chen }
184160a5a19eSTang Chen 
18420f1cfe9dSToshi Kani static void unmap_cpu_on_node(pg_data_t *pgdat)
1843e13fe869SWen Congyang {
1844e13fe869SWen Congyang #ifdef CONFIG_ACPI_NUMA
1845e13fe869SWen Congyang 	int cpu;
1846e13fe869SWen Congyang 
1847e13fe869SWen Congyang 	for_each_possible_cpu(cpu)
1848e13fe869SWen Congyang 		if (cpu_to_node(cpu) == pgdat->node_id)
1849e13fe869SWen Congyang 			numa_clear_node(cpu);
1850e13fe869SWen Congyang #endif
1851e13fe869SWen Congyang }
1852e13fe869SWen Congyang 
18530f1cfe9dSToshi Kani static int check_and_unmap_cpu_on_node(pg_data_t *pgdat)
1854e13fe869SWen Congyang {
18550f1cfe9dSToshi Kani 	int ret;
1856e13fe869SWen Congyang 
18570f1cfe9dSToshi Kani 	ret = check_cpu_on_node(pgdat);
1858e13fe869SWen Congyang 	if (ret)
1859e13fe869SWen Congyang 		return ret;
1860e13fe869SWen Congyang 
1861e13fe869SWen Congyang 	/*
1862e13fe869SWen Congyang 	 * the node will be offlined when we come here, so we can clear
1863e13fe869SWen Congyang 	 * the cpu_to_node() now.
1864e13fe869SWen Congyang 	 */
1865e13fe869SWen Congyang 
18660f1cfe9dSToshi Kani 	unmap_cpu_on_node(pgdat);
1867e13fe869SWen Congyang 	return 0;
1868e13fe869SWen Congyang }
1869e13fe869SWen Congyang 
18700f1cfe9dSToshi Kani /**
18710f1cfe9dSToshi Kani  * try_offline_node
18720f1cfe9dSToshi Kani  *
18730f1cfe9dSToshi Kani  * Offline a node if all memory sections and cpus of the node are removed.
18740f1cfe9dSToshi Kani  *
18750f1cfe9dSToshi Kani  * NOTE: The caller must call lock_device_hotplug() to serialize hotplug
18760f1cfe9dSToshi Kani  * and online/offline operations before this call.
18770f1cfe9dSToshi Kani  */
187890b30cdcSWen Congyang void try_offline_node(int nid)
187960a5a19eSTang Chen {
1880d822b86aSWen Congyang 	pg_data_t *pgdat = NODE_DATA(nid);
1881d822b86aSWen Congyang 	unsigned long start_pfn = pgdat->node_start_pfn;
1882d822b86aSWen Congyang 	unsigned long end_pfn = start_pfn + pgdat->node_spanned_pages;
188360a5a19eSTang Chen 	unsigned long pfn;
188460a5a19eSTang Chen 
188560a5a19eSTang Chen 	for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) {
188660a5a19eSTang Chen 		unsigned long section_nr = pfn_to_section_nr(pfn);
188760a5a19eSTang Chen 
188860a5a19eSTang Chen 		if (!present_section_nr(section_nr))
188960a5a19eSTang Chen 			continue;
189060a5a19eSTang Chen 
189160a5a19eSTang Chen 		if (pfn_to_nid(pfn) != nid)
189260a5a19eSTang Chen 			continue;
189360a5a19eSTang Chen 
189460a5a19eSTang Chen 		/*
189560a5a19eSTang Chen 		 * some memory sections of this node are not removed, and we
189660a5a19eSTang Chen 		 * can't offline node now.
189760a5a19eSTang Chen 		 */
189860a5a19eSTang Chen 		return;
189960a5a19eSTang Chen 	}
190060a5a19eSTang Chen 
19010f1cfe9dSToshi Kani 	if (check_and_unmap_cpu_on_node(pgdat))
190260a5a19eSTang Chen 		return;
190360a5a19eSTang Chen 
190460a5a19eSTang Chen 	/*
190560a5a19eSTang Chen 	 * all memory/cpu of this node are removed, we can offline this
190660a5a19eSTang Chen 	 * node now.
190760a5a19eSTang Chen 	 */
190860a5a19eSTang Chen 	node_set_offline(nid);
190960a5a19eSTang Chen 	unregister_one_node(nid);
191060a5a19eSTang Chen }
191190b30cdcSWen Congyang EXPORT_SYMBOL(try_offline_node);
191260a5a19eSTang Chen 
19130f1cfe9dSToshi Kani /**
19140f1cfe9dSToshi Kani  * remove_memory
19150f1cfe9dSToshi Kani  *
19160f1cfe9dSToshi Kani  * NOTE: The caller must call lock_device_hotplug() to serialize hotplug
19170f1cfe9dSToshi Kani  * and online/offline operations before this call, as required by
19180f1cfe9dSToshi Kani  * try_offline_node().
19190f1cfe9dSToshi Kani  */
1920242831ebSRafael J. Wysocki void __ref remove_memory(int nid, u64 start, u64 size)
1921bbc76be6SWen Congyang {
1922242831ebSRafael J. Wysocki 	int ret;
1923993c1aadSWen Congyang 
192427356f54SToshi Kani 	BUG_ON(check_hotplug_memory_range(start, size));
192527356f54SToshi Kani 
1926bfc8c901SVladimir Davydov 	mem_hotplug_begin();
19276677e3eaSYasuaki Ishimatsu 
19286677e3eaSYasuaki Ishimatsu 	/*
1929242831ebSRafael J. Wysocki 	 * All memory blocks must be offlined before removing memory.  Check
1930242831ebSRafael J. Wysocki 	 * whether all memory blocks in question are offline and trigger a BUG()
1931242831ebSRafael J. Wysocki 	 * if this is not the case.
19326677e3eaSYasuaki Ishimatsu 	 */
1933242831ebSRafael J. Wysocki 	ret = walk_memory_range(PFN_DOWN(start), PFN_UP(start + size - 1), NULL,
1934d6de9d53SXishi Qiu 				check_memblock_offlined_cb);
1935bfc8c901SVladimir Davydov 	if (ret)
1936242831ebSRafael J. Wysocki 		BUG();
19376677e3eaSYasuaki Ishimatsu 
193846c66c4bSYasuaki Ishimatsu 	/* remove memmap entry */
193946c66c4bSYasuaki Ishimatsu 	firmware_map_remove(start, start + size, "System RAM");
1940f9126ab9SXishi Qiu 	memblock_free(start, size);
1941f9126ab9SXishi Qiu 	memblock_remove(start, size);
194246c66c4bSYasuaki Ishimatsu 
194324d335caSWen Congyang 	arch_remove_memory(start, size);
194424d335caSWen Congyang 
194560a5a19eSTang Chen 	try_offline_node(nid);
194660a5a19eSTang Chen 
1947bfc8c901SVladimir Davydov 	mem_hotplug_done();
194871088785SBadari Pulavarty }
194971088785SBadari Pulavarty EXPORT_SYMBOL_GPL(remove_memory);
1950aba6efc4SRafael J. Wysocki #endif /* CONFIG_MEMORY_HOTREMOVE */
1951