1457c8996SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
23947be19SDave Hansen /*
33947be19SDave Hansen * linux/mm/memory_hotplug.c
43947be19SDave Hansen *
53947be19SDave Hansen * Copyright (C)
63947be19SDave Hansen */
73947be19SDave Hansen
83947be19SDave Hansen #include <linux/stddef.h>
93947be19SDave Hansen #include <linux/mm.h>
10174cd4b1SIngo Molnar #include <linux/sched/signal.h>
113947be19SDave Hansen #include <linux/swap.h>
123947be19SDave Hansen #include <linux/interrupt.h>
133947be19SDave Hansen #include <linux/pagemap.h>
143947be19SDave Hansen #include <linux/compiler.h>
15b95f1b31SPaul Gortmaker #include <linux/export.h>
162d1d43f6SChandra Seetharaman #include <linux/writeback.h>
173947be19SDave Hansen #include <linux/slab.h>
183947be19SDave Hansen #include <linux/sysctl.h>
193947be19SDave Hansen #include <linux/cpu.h>
203947be19SDave Hansen #include <linux/memory.h>
214b94ffdcSDan Williams #include <linux/memremap.h>
223947be19SDave Hansen #include <linux/memory_hotplug.h>
233947be19SDave Hansen #include <linux/vmalloc.h>
240a547039SKAMEZAWA Hiroyuki #include <linux/ioport.h>
250c0e6195SKAMEZAWA Hiroyuki #include <linux/delay.h>
260c0e6195SKAMEZAWA Hiroyuki #include <linux/migrate.h>
270c0e6195SKAMEZAWA Hiroyuki #include <linux/page-isolation.h>
2871088785SBadari Pulavarty #include <linux/pfn.h>
296ad696d2SAndi Kleen #include <linux/suspend.h>
306d9c285aSKOSAKI Motohiro #include <linux/mm_inline.h>
31d96ae530Sakpm@linux-foundation.org #include <linux/firmware-map.h>
3260a5a19eSTang Chen #include <linux/stop_machine.h>
33c8721bbbSNaoya Horiguchi #include <linux/hugetlb.h>
34c5320926STang Chen #include <linux/memblock.h>
35698b1b30SVlastimil Babka #include <linux/compaction.h>
36b15c8726SMichal Hocko #include <linux/rmap.h>
378581fd40SJakub Kicinski #include <linux/module.h>
383947be19SDave Hansen
393947be19SDave Hansen #include <asm/tlbflush.h>
403947be19SDave Hansen
411e5ad9a3SAdrian Bunk #include "internal.h"
42e900a918SDan Williams #include "shuffle.h"
431e5ad9a3SAdrian Bunk
442d1f649cSAneesh Kumar K.V enum {
452d1f649cSAneesh Kumar K.V MEMMAP_ON_MEMORY_DISABLE = 0,
462d1f649cSAneesh Kumar K.V MEMMAP_ON_MEMORY_ENABLE,
472d1f649cSAneesh Kumar K.V MEMMAP_ON_MEMORY_FORCE,
482d1f649cSAneesh Kumar K.V };
492d1f649cSAneesh Kumar K.V
502d1f649cSAneesh Kumar K.V static int memmap_mode __read_mostly = MEMMAP_ON_MEMORY_DISABLE;
512d1f649cSAneesh Kumar K.V
memory_block_memmap_size(void)522d1f649cSAneesh Kumar K.V static inline unsigned long memory_block_memmap_size(void)
532d1f649cSAneesh Kumar K.V {
542d1f649cSAneesh Kumar K.V return PHYS_PFN(memory_block_size_bytes()) * sizeof(struct page);
552d1f649cSAneesh Kumar K.V }
562d1f649cSAneesh Kumar K.V
memory_block_memmap_on_memory_pages(void)572d1f649cSAneesh Kumar K.V static inline unsigned long memory_block_memmap_on_memory_pages(void)
582d1f649cSAneesh Kumar K.V {
592d1f649cSAneesh Kumar K.V unsigned long nr_pages = PFN_UP(memory_block_memmap_size());
602d1f649cSAneesh Kumar K.V
612d1f649cSAneesh Kumar K.V /*
622d1f649cSAneesh Kumar K.V * In "forced" memmap_on_memory mode, we add extra pages to align the
632d1f649cSAneesh Kumar K.V * vmemmap size to cover full pageblocks. That way, we can add memory
642d1f649cSAneesh Kumar K.V * even if the vmemmap size is not properly aligned, however, we might waste
652d1f649cSAneesh Kumar K.V * memory.
662d1f649cSAneesh Kumar K.V */
672d1f649cSAneesh Kumar K.V if (memmap_mode == MEMMAP_ON_MEMORY_FORCE)
682d1f649cSAneesh Kumar K.V return pageblock_align(nr_pages);
692d1f649cSAneesh Kumar K.V return nr_pages;
702d1f649cSAneesh Kumar K.V }
712d1f649cSAneesh Kumar K.V
726e02c46bSMuchun Song #ifdef CONFIG_MHP_MEMMAP_ON_MEMORY
73e3a9d9fcSOscar Salvador /*
74e3a9d9fcSOscar Salvador * memory_hotplug.memmap_on_memory parameter
75e3a9d9fcSOscar Salvador */
set_memmap_mode(const char * val,const struct kernel_param * kp)762d1f649cSAneesh Kumar K.V static int set_memmap_mode(const char *val, const struct kernel_param *kp)
772d1f649cSAneesh Kumar K.V {
782d1f649cSAneesh Kumar K.V int ret, mode;
792d1f649cSAneesh Kumar K.V bool enabled;
802d1f649cSAneesh Kumar K.V
812d1f649cSAneesh Kumar K.V if (sysfs_streq(val, "force") || sysfs_streq(val, "FORCE")) {
822d1f649cSAneesh Kumar K.V mode = MEMMAP_ON_MEMORY_FORCE;
832d1f649cSAneesh Kumar K.V } else {
842d1f649cSAneesh Kumar K.V ret = kstrtobool(val, &enabled);
852d1f649cSAneesh Kumar K.V if (ret < 0)
862d1f649cSAneesh Kumar K.V return ret;
872d1f649cSAneesh Kumar K.V if (enabled)
882d1f649cSAneesh Kumar K.V mode = MEMMAP_ON_MEMORY_ENABLE;
892d1f649cSAneesh Kumar K.V else
902d1f649cSAneesh Kumar K.V mode = MEMMAP_ON_MEMORY_DISABLE;
912d1f649cSAneesh Kumar K.V }
922d1f649cSAneesh Kumar K.V *((int *)kp->arg) = mode;
932d1f649cSAneesh Kumar K.V if (mode == MEMMAP_ON_MEMORY_FORCE) {
942d1f649cSAneesh Kumar K.V unsigned long memmap_pages = memory_block_memmap_on_memory_pages();
952d1f649cSAneesh Kumar K.V
962d1f649cSAneesh Kumar K.V pr_info_once("Memory hotplug will waste %ld pages in each memory block\n",
972d1f649cSAneesh Kumar K.V memmap_pages - PFN_UP(memory_block_memmap_size()));
982d1f649cSAneesh Kumar K.V }
992d1f649cSAneesh Kumar K.V return 0;
1002d1f649cSAneesh Kumar K.V }
1012d1f649cSAneesh Kumar K.V
get_memmap_mode(char * buffer,const struct kernel_param * kp)1022d1f649cSAneesh Kumar K.V static int get_memmap_mode(char *buffer, const struct kernel_param *kp)
1032d1f649cSAneesh Kumar K.V {
1049584c8d6SSumanth Korikkar int mode = *((int *)kp->arg);
1059584c8d6SSumanth Korikkar
1069584c8d6SSumanth Korikkar if (mode == MEMMAP_ON_MEMORY_FORCE)
1072d1f649cSAneesh Kumar K.V return sprintf(buffer, "force\n");
1089584c8d6SSumanth Korikkar return sprintf(buffer, "%c\n", mode ? 'Y' : 'N');
1092d1f649cSAneesh Kumar K.V }
1102d1f649cSAneesh Kumar K.V
1112d1f649cSAneesh Kumar K.V static const struct kernel_param_ops memmap_mode_ops = {
1122d1f649cSAneesh Kumar K.V .set = set_memmap_mode,
1132d1f649cSAneesh Kumar K.V .get = get_memmap_mode,
1142d1f649cSAneesh Kumar K.V };
1152d1f649cSAneesh Kumar K.V module_param_cb(memmap_on_memory, &memmap_mode_ops, &memmap_mode, 0444);
1162d1f649cSAneesh Kumar K.V MODULE_PARM_DESC(memmap_on_memory, "Enable memmap on memory for memory hotplug\n"
1172d1f649cSAneesh Kumar K.V "With value \"force\" it could result in memory wastage due "
1182d1f649cSAneesh Kumar K.V "to memmap size limitations (Y/N/force)");
1196e02c46bSMuchun Song
mhp_memmap_on_memory(void)12066361095SMuchun Song static inline bool mhp_memmap_on_memory(void)
1216e02c46bSMuchun Song {
1222d1f649cSAneesh Kumar K.V return memmap_mode != MEMMAP_ON_MEMORY_DISABLE;
1236e02c46bSMuchun Song }
12466361095SMuchun Song #else
mhp_memmap_on_memory(void)12566361095SMuchun Song static inline bool mhp_memmap_on_memory(void)
12666361095SMuchun Song {
12766361095SMuchun Song return false;
12866361095SMuchun Song }
129e3a9d9fcSOscar Salvador #endif
130a08a2ae3SOscar Salvador
131e83a437fSDavid Hildenbrand enum {
132e83a437fSDavid Hildenbrand ONLINE_POLICY_CONTIG_ZONES = 0,
133e83a437fSDavid Hildenbrand ONLINE_POLICY_AUTO_MOVABLE,
134e83a437fSDavid Hildenbrand };
135e83a437fSDavid Hildenbrand
136ac62554bSTang Yizhou static const char * const online_policy_to_str[] = {
137e83a437fSDavid Hildenbrand [ONLINE_POLICY_CONTIG_ZONES] = "contig-zones",
138e83a437fSDavid Hildenbrand [ONLINE_POLICY_AUTO_MOVABLE] = "auto-movable",
139e83a437fSDavid Hildenbrand };
140e83a437fSDavid Hildenbrand
set_online_policy(const char * val,const struct kernel_param * kp)141e83a437fSDavid Hildenbrand static int set_online_policy(const char *val, const struct kernel_param *kp)
142e83a437fSDavid Hildenbrand {
143e83a437fSDavid Hildenbrand int ret = sysfs_match_string(online_policy_to_str, val);
144e83a437fSDavid Hildenbrand
145e83a437fSDavid Hildenbrand if (ret < 0)
146e83a437fSDavid Hildenbrand return ret;
147e83a437fSDavid Hildenbrand *((int *)kp->arg) = ret;
148e83a437fSDavid Hildenbrand return 0;
149e83a437fSDavid Hildenbrand }
150e83a437fSDavid Hildenbrand
get_online_policy(char * buffer,const struct kernel_param * kp)151e83a437fSDavid Hildenbrand static int get_online_policy(char *buffer, const struct kernel_param *kp)
152e83a437fSDavid Hildenbrand {
153e83a437fSDavid Hildenbrand return sprintf(buffer, "%s\n", online_policy_to_str[*((int *)kp->arg)]);
154e83a437fSDavid Hildenbrand }
155e83a437fSDavid Hildenbrand
156e83a437fSDavid Hildenbrand /*
157e83a437fSDavid Hildenbrand * memory_hotplug.online_policy: configure online behavior when onlining without
158e83a437fSDavid Hildenbrand * specifying a zone (MMOP_ONLINE)
159e83a437fSDavid Hildenbrand *
160e83a437fSDavid Hildenbrand * "contig-zones": keep zone contiguous
161e83a437fSDavid Hildenbrand * "auto-movable": online memory to ZONE_MOVABLE if the configuration
162e83a437fSDavid Hildenbrand * (auto_movable_ratio, auto_movable_numa_aware) allows for it
163e83a437fSDavid Hildenbrand */
164e83a437fSDavid Hildenbrand static int online_policy __read_mostly = ONLINE_POLICY_CONTIG_ZONES;
165e83a437fSDavid Hildenbrand static const struct kernel_param_ops online_policy_ops = {
166e83a437fSDavid Hildenbrand .set = set_online_policy,
167e83a437fSDavid Hildenbrand .get = get_online_policy,
168e83a437fSDavid Hildenbrand };
169e83a437fSDavid Hildenbrand module_param_cb(online_policy, &online_policy_ops, &online_policy, 0644);
170e83a437fSDavid Hildenbrand MODULE_PARM_DESC(online_policy,
171e83a437fSDavid Hildenbrand "Set the online policy (\"contig-zones\", \"auto-movable\") "
172e83a437fSDavid Hildenbrand "Default: \"contig-zones\"");
173e83a437fSDavid Hildenbrand
174e83a437fSDavid Hildenbrand /*
175e83a437fSDavid Hildenbrand * memory_hotplug.auto_movable_ratio: specify maximum MOVABLE:KERNEL ratio
176e83a437fSDavid Hildenbrand *
177e83a437fSDavid Hildenbrand * The ratio represent an upper limit and the kernel might decide to not
178e83a437fSDavid Hildenbrand * online some memory to ZONE_MOVABLE -- e.g., because hotplugged KERNEL memory
179e83a437fSDavid Hildenbrand * doesn't allow for more MOVABLE memory.
180e83a437fSDavid Hildenbrand */
181e83a437fSDavid Hildenbrand static unsigned int auto_movable_ratio __read_mostly = 301;
182e83a437fSDavid Hildenbrand module_param(auto_movable_ratio, uint, 0644);
183e83a437fSDavid Hildenbrand MODULE_PARM_DESC(auto_movable_ratio,
184e83a437fSDavid Hildenbrand "Set the maximum ratio of MOVABLE:KERNEL memory in the system "
185e83a437fSDavid Hildenbrand "in percent for \"auto-movable\" online policy. Default: 301");
186e83a437fSDavid Hildenbrand
187e83a437fSDavid Hildenbrand /*
188e83a437fSDavid Hildenbrand * memory_hotplug.auto_movable_numa_aware: consider numa node stats
189e83a437fSDavid Hildenbrand */
190e83a437fSDavid Hildenbrand #ifdef CONFIG_NUMA
191e83a437fSDavid Hildenbrand static bool auto_movable_numa_aware __read_mostly = true;
192e83a437fSDavid Hildenbrand module_param(auto_movable_numa_aware, bool, 0644);
193e83a437fSDavid Hildenbrand MODULE_PARM_DESC(auto_movable_numa_aware,
194e83a437fSDavid Hildenbrand "Consider numa node stats in addition to global stats in "
195e83a437fSDavid Hildenbrand "\"auto-movable\" online policy. Default: true");
196e83a437fSDavid Hildenbrand #endif /* CONFIG_NUMA */
197e83a437fSDavid Hildenbrand
1989d0ad8caSDaniel Kiper /*
1999d0ad8caSDaniel Kiper * online_page_callback contains pointer to current page onlining function.
2009d0ad8caSDaniel Kiper * Initially it is generic_online_page(). If it is required it could be
2019d0ad8caSDaniel Kiper * changed by calling set_online_page_callback() for callback registration
2029d0ad8caSDaniel Kiper * and restore_online_page_callback() for generic callback restore.
2039d0ad8caSDaniel Kiper */
2049d0ad8caSDaniel Kiper
2059d0ad8caSDaniel Kiper static online_page_callback_t online_page_callback = generic_online_page;
206bfc8c901SVladimir Davydov static DEFINE_MUTEX(online_page_callback_lock);
2079d0ad8caSDaniel Kiper
2083f906ba2SThomas Gleixner DEFINE_STATIC_PERCPU_RWSEM(mem_hotplug_lock);
20920d6c96bSKOSAKI Motohiro
get_online_mems(void)2103f906ba2SThomas Gleixner void get_online_mems(void)
2113f906ba2SThomas Gleixner {
2123f906ba2SThomas Gleixner percpu_down_read(&mem_hotplug_lock);
2133f906ba2SThomas Gleixner }
214bfc8c901SVladimir Davydov
put_online_mems(void)2153f906ba2SThomas Gleixner void put_online_mems(void)
2163f906ba2SThomas Gleixner {
2173f906ba2SThomas Gleixner percpu_up_read(&mem_hotplug_lock);
2183f906ba2SThomas Gleixner }
219bfc8c901SVladimir Davydov
2204932381eSMichal Hocko bool movable_node_enabled = false;
2214932381eSMichal Hocko
2228604d9e5SVitaly Kuznetsov #ifndef CONFIG_MEMORY_HOTPLUG_DEFAULT_ONLINE
2231adf8b46SAnshuman Khandual int mhp_default_online_type = MMOP_OFFLINE;
2248604d9e5SVitaly Kuznetsov #else
2251adf8b46SAnshuman Khandual int mhp_default_online_type = MMOP_ONLINE;
2268604d9e5SVitaly Kuznetsov #endif
22731bc3858SVitaly Kuznetsov
setup_memhp_default_state(char * str)22886dd995dSVitaly Kuznetsov static int __init setup_memhp_default_state(char *str)
22986dd995dSVitaly Kuznetsov {
2301adf8b46SAnshuman Khandual const int online_type = mhp_online_type_from_str(str);
2315f47adf7SDavid Hildenbrand
2325f47adf7SDavid Hildenbrand if (online_type >= 0)
2331adf8b46SAnshuman Khandual mhp_default_online_type = online_type;
23486dd995dSVitaly Kuznetsov
23586dd995dSVitaly Kuznetsov return 1;
23686dd995dSVitaly Kuznetsov }
23786dd995dSVitaly Kuznetsov __setup("memhp_default_state=", setup_memhp_default_state);
23886dd995dSVitaly Kuznetsov
mem_hotplug_begin(void)23930467e0bSDavid Rientjes void mem_hotplug_begin(void)
240bfc8c901SVladimir Davydov {
2413f906ba2SThomas Gleixner cpus_read_lock();
2423f906ba2SThomas Gleixner percpu_down_write(&mem_hotplug_lock);
243bfc8c901SVladimir Davydov }
244bfc8c901SVladimir Davydov
mem_hotplug_done(void)24530467e0bSDavid Rientjes void mem_hotplug_done(void)
246bfc8c901SVladimir Davydov {
2473f906ba2SThomas Gleixner percpu_up_write(&mem_hotplug_lock);
2483f906ba2SThomas Gleixner cpus_read_unlock();
249bfc8c901SVladimir Davydov }
25020d6c96bSKOSAKI Motohiro
251357b4da5SJuergen Gross u64 max_mem_size = U64_MAX;
252357b4da5SJuergen Gross
25345e0b78bSKeith Mannthey /* add this memory to iomem resource */
register_memory_resource(u64 start,u64 size,const char * resource_name)2547b7b2721SDavid Hildenbrand static struct resource *register_memory_resource(u64 start, u64 size,
2557b7b2721SDavid Hildenbrand const char *resource_name)
25645e0b78bSKeith Mannthey {
2572794129eSDave Hansen struct resource *res;
2582794129eSDave Hansen unsigned long flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
2597b7b2721SDavid Hildenbrand
2607b7b2721SDavid Hildenbrand if (strcmp(resource_name, "System RAM"))
2617cf603d1SDavid Hildenbrand flags |= IORESOURCE_SYSRAM_DRIVER_MANAGED;
262357b4da5SJuergen Gross
263bca3feaaSAnshuman Khandual if (!mhp_range_allowed(start, size, true))
264bca3feaaSAnshuman Khandual return ERR_PTR(-E2BIG);
265bca3feaaSAnshuman Khandual
266f3cd4c86SBaoquan He /*
267f3cd4c86SBaoquan He * Make sure value parsed from 'mem=' only restricts memory adding
268f3cd4c86SBaoquan He * while booting, so that memory hotplug won't be impacted. Please
269f3cd4c86SBaoquan He * refer to document of 'mem=' in kernel-parameters.txt for more
270f3cd4c86SBaoquan He * details.
271f3cd4c86SBaoquan He */
272f3cd4c86SBaoquan He if (start + size > max_mem_size && system_state < SYSTEM_RUNNING)
273357b4da5SJuergen Gross return ERR_PTR(-E2BIG);
274357b4da5SJuergen Gross
2752794129eSDave Hansen /*
2762794129eSDave Hansen * Request ownership of the new memory range. This might be
2772794129eSDave Hansen * a child of an existing resource that was present but
2782794129eSDave Hansen * not marked as busy.
2792794129eSDave Hansen */
2802794129eSDave Hansen res = __request_region(&iomem_resource, start, size,
2812794129eSDave Hansen resource_name, flags);
28245e0b78bSKeith Mannthey
2832794129eSDave Hansen if (!res) {
2842794129eSDave Hansen pr_debug("Unable to reserve System RAM region: %016llx->%016llx\n",
2852794129eSDave Hansen start, start + size);
2866f754ba4SVitaly Kuznetsov return ERR_PTR(-EEXIST);
28745e0b78bSKeith Mannthey }
28845e0b78bSKeith Mannthey return res;
28945e0b78bSKeith Mannthey }
29045e0b78bSKeith Mannthey
release_memory_resource(struct resource * res)29145e0b78bSKeith Mannthey static void release_memory_resource(struct resource *res)
29245e0b78bSKeith Mannthey {
29345e0b78bSKeith Mannthey if (!res)
29445e0b78bSKeith Mannthey return;
29545e0b78bSKeith Mannthey release_resource(res);
29645e0b78bSKeith Mannthey kfree(res);
29745e0b78bSKeith Mannthey }
29845e0b78bSKeith Mannthey
check_pfn_span(unsigned long pfn,unsigned long nr_pages)299943189dbSAnshuman Khandual static int check_pfn_span(unsigned long pfn, unsigned long nr_pages)
3007ea62160SDan Williams {
3017ea62160SDan Williams /*
3027ea62160SDan Williams * Disallow all operations smaller than a sub-section and only
3037ea62160SDan Williams * allow operations smaller than a section for
3047ea62160SDan Williams * SPARSEMEM_VMEMMAP. Note that check_hotplug_memory_range()
3057ea62160SDan Williams * enforces a larger memory_block_size_bytes() granularity for
3067ea62160SDan Williams * memory that will be marked online, so this check should only
3077ea62160SDan Williams * fire for direct arch_{add,remove}_memory() users outside of
3087ea62160SDan Williams * add_memory_resource().
3097ea62160SDan Williams */
3107ea62160SDan Williams unsigned long min_align;
3117ea62160SDan Williams
3127ea62160SDan Williams if (IS_ENABLED(CONFIG_SPARSEMEM_VMEMMAP))
3137ea62160SDan Williams min_align = PAGES_PER_SUBSECTION;
3147ea62160SDan Williams else
3157ea62160SDan Williams min_align = PAGES_PER_SECTION;
316943189dbSAnshuman Khandual if (!IS_ALIGNED(pfn | nr_pages, min_align))
3177ea62160SDan Williams return -EINVAL;
3187ea62160SDan Williams return 0;
3197ea62160SDan Williams }
3207ea62160SDan Williams
3214edd7cefSDavid Rientjes /*
3229f605f26SDan Williams * Return page for the valid pfn only if the page is online. All pfn
3239f605f26SDan Williams * walkers which rely on the fully initialized page->flags and others
3249f605f26SDan Williams * should use this rather than pfn_valid && pfn_to_page
3259f605f26SDan Williams */
pfn_to_online_page(unsigned long pfn)3269f605f26SDan Williams struct page *pfn_to_online_page(unsigned long pfn)
3279f605f26SDan Williams {
3289f605f26SDan Williams unsigned long nr = pfn_to_section_nr(pfn);
3291f90a347SDan Williams struct dev_pagemap *pgmap;
3309f9b02e5SDan Williams struct mem_section *ms;
3319f605f26SDan Williams
3329f9b02e5SDan Williams if (nr >= NR_MEM_SECTIONS)
3339f605f26SDan Williams return NULL;
3349f9b02e5SDan Williams
3359f9b02e5SDan Williams ms = __nr_to_section(nr);
3369f9b02e5SDan Williams if (!online_section(ms))
3379f9b02e5SDan Williams return NULL;
3389f9b02e5SDan Williams
3399f9b02e5SDan Williams /*
3409f9b02e5SDan Williams * Save some code text when online_section() +
3419f9b02e5SDan Williams * pfn_section_valid() are sufficient.
3429f9b02e5SDan Williams */
3439f9b02e5SDan Williams if (IS_ENABLED(CONFIG_HAVE_ARCH_PFN_VALID) && !pfn_valid(pfn))
3449f9b02e5SDan Williams return NULL;
3459f9b02e5SDan Williams
3469f9b02e5SDan Williams if (!pfn_section_valid(ms, pfn))
3479f9b02e5SDan Williams return NULL;
3489f9b02e5SDan Williams
3491f90a347SDan Williams if (!online_device_section(ms))
3501f90a347SDan Williams return pfn_to_page(pfn);
3511f90a347SDan Williams
3521f90a347SDan Williams /*
3531f90a347SDan Williams * Slowpath: when ZONE_DEVICE collides with
3541f90a347SDan Williams * ZONE_{NORMAL,MOVABLE} within the same section some pfns in
3551f90a347SDan Williams * the section may be 'offline' but 'valid'. Only
3561f90a347SDan Williams * get_dev_pagemap() can determine sub-section online status.
3571f90a347SDan Williams */
3581f90a347SDan Williams pgmap = get_dev_pagemap(pfn, NULL);
3591f90a347SDan Williams put_dev_pagemap(pgmap);
3601f90a347SDan Williams
3611f90a347SDan Williams /* The presence of a pgmap indicates ZONE_DEVICE offline pfn */
3621f90a347SDan Williams if (pgmap)
3631f90a347SDan Williams return NULL;
3641f90a347SDan Williams
3659f9b02e5SDan Williams return pfn_to_page(pfn);
3669f605f26SDan Williams }
3679f605f26SDan Williams EXPORT_SYMBOL_GPL(pfn_to_online_page);
3689f605f26SDan Williams
__add_pages(int nid,unsigned long pfn,unsigned long nr_pages,struct mhp_params * params)3697ea62160SDan Williams int __ref __add_pages(int nid, unsigned long pfn, unsigned long nr_pages,
370f5637d3bSLogan Gunthorpe struct mhp_params *params)
3714edd7cefSDavid Rientjes {
3726cdd0b30SDavid Hildenbrand const unsigned long end_pfn = pfn + nr_pages;
3736cdd0b30SDavid Hildenbrand unsigned long cur_nr_pages;
3749a845030SDan Williams int err;
375f5637d3bSLogan Gunthorpe struct vmem_altmap *altmap = params->altmap;
3764b94ffdcSDan Williams
3776366238bSliusongtang if (WARN_ON_ONCE(!pgprot_val(params->pgprot)))
378bfeb022fSLogan Gunthorpe return -EINVAL;
379bfeb022fSLogan Gunthorpe
380bca3feaaSAnshuman Khandual VM_BUG_ON(!mhp_range_allowed(PFN_PHYS(pfn), nr_pages * PAGE_SIZE, false));
381dca4436dSAlastair D'Silva
3824b94ffdcSDan Williams if (altmap) {
3834b94ffdcSDan Williams /*
3844b94ffdcSDan Williams * Validate altmap is within bounds of the total request
3854b94ffdcSDan Williams */
3867ea62160SDan Williams if (altmap->base_pfn != pfn
3874b94ffdcSDan Williams || vmem_altmap_offset(altmap) > nr_pages) {
3884b94ffdcSDan Williams pr_warn_once("memory add fail, invalid altmap\n");
3897ea62160SDan Williams return -EINVAL;
3904b94ffdcSDan Williams }
3914b94ffdcSDan Williams altmap->alloc = 0;
3924b94ffdcSDan Williams }
3934b94ffdcSDan Williams
394943189dbSAnshuman Khandual if (check_pfn_span(pfn, nr_pages)) {
39550135045SRick Wertenbroek WARN(1, "Misaligned %s start: %#lx end: %#lx\n", __func__, pfn, pfn + nr_pages - 1);
396943189dbSAnshuman Khandual return -EINVAL;
397943189dbSAnshuman Khandual }
3987ea62160SDan Williams
3996cdd0b30SDavid Hildenbrand for (; pfn < end_pfn; pfn += cur_nr_pages) {
4006cdd0b30SDavid Hildenbrand /* Select all remaining pages up to the next section boundary */
4016cdd0b30SDavid Hildenbrand cur_nr_pages = min(end_pfn - pfn,
4026cdd0b30SDavid Hildenbrand SECTION_ALIGN_UP(pfn + 1) - pfn);
403e3246d8fSJoao Martins err = sparse_add_section(nid, pfn, cur_nr_pages, altmap,
404e3246d8fSJoao Martins params->pgmap);
405ba72b4c8SDan Williams if (err)
406ba72b4c8SDan Williams break;
407f64ac5e6SMichal Hocko cond_resched();
4084edd7cefSDavid Rientjes }
409c435a390SZhu Guihua vmemmap_populate_print_last();
4104edd7cefSDavid Rientjes return err;
4114edd7cefSDavid Rientjes }
4124edd7cefSDavid Rientjes
413815121d2SYasuaki Ishimatsu /* find the smallest valid pfn in the range [start_pfn, end_pfn) */
find_smallest_section_pfn(int nid,struct zone * zone,unsigned long start_pfn,unsigned long end_pfn)414d09b0137SYASUAKI ISHIMATSU static unsigned long find_smallest_section_pfn(int nid, struct zone *zone,
415815121d2SYasuaki Ishimatsu unsigned long start_pfn,
416815121d2SYasuaki Ishimatsu unsigned long end_pfn)
417815121d2SYasuaki Ishimatsu {
41849ba3c6bSDan Williams for (; start_pfn < end_pfn; start_pfn += PAGES_PER_SUBSECTION) {
4197ce700bfSDavid Hildenbrand if (unlikely(!pfn_to_online_page(start_pfn)))
420815121d2SYasuaki Ishimatsu continue;
421815121d2SYasuaki Ishimatsu
422815121d2SYasuaki Ishimatsu if (unlikely(pfn_to_nid(start_pfn) != nid))
423815121d2SYasuaki Ishimatsu continue;
424815121d2SYasuaki Ishimatsu
4259b05158fSDavid Hildenbrand if (zone != page_zone(pfn_to_page(start_pfn)))
426815121d2SYasuaki Ishimatsu continue;
427815121d2SYasuaki Ishimatsu
428815121d2SYasuaki Ishimatsu return start_pfn;
429815121d2SYasuaki Ishimatsu }
430815121d2SYasuaki Ishimatsu
431815121d2SYasuaki Ishimatsu return 0;
432815121d2SYasuaki Ishimatsu }
433815121d2SYasuaki Ishimatsu
434815121d2SYasuaki Ishimatsu /* find the biggest valid pfn in the range [start_pfn, end_pfn). */
find_biggest_section_pfn(int nid,struct zone * zone,unsigned long start_pfn,unsigned long end_pfn)435d09b0137SYASUAKI ISHIMATSU static unsigned long find_biggest_section_pfn(int nid, struct zone *zone,
436815121d2SYasuaki Ishimatsu unsigned long start_pfn,
437815121d2SYasuaki Ishimatsu unsigned long end_pfn)
438815121d2SYasuaki Ishimatsu {
439815121d2SYasuaki Ishimatsu unsigned long pfn;
440815121d2SYasuaki Ishimatsu
441815121d2SYasuaki Ishimatsu /* pfn is the end pfn of a memory section. */
442815121d2SYasuaki Ishimatsu pfn = end_pfn - 1;
44349ba3c6bSDan Williams for (; pfn >= start_pfn; pfn -= PAGES_PER_SUBSECTION) {
4447ce700bfSDavid Hildenbrand if (unlikely(!pfn_to_online_page(pfn)))
445815121d2SYasuaki Ishimatsu continue;
446815121d2SYasuaki Ishimatsu
447815121d2SYasuaki Ishimatsu if (unlikely(pfn_to_nid(pfn) != nid))
448815121d2SYasuaki Ishimatsu continue;
449815121d2SYasuaki Ishimatsu
4509b05158fSDavid Hildenbrand if (zone != page_zone(pfn_to_page(pfn)))
451815121d2SYasuaki Ishimatsu continue;
452815121d2SYasuaki Ishimatsu
453815121d2SYasuaki Ishimatsu return pfn;
454815121d2SYasuaki Ishimatsu }
455815121d2SYasuaki Ishimatsu
456815121d2SYasuaki Ishimatsu return 0;
457815121d2SYasuaki Ishimatsu }
458815121d2SYasuaki Ishimatsu
shrink_zone_span(struct zone * zone,unsigned long start_pfn,unsigned long end_pfn)459815121d2SYasuaki Ishimatsu static void shrink_zone_span(struct zone *zone, unsigned long start_pfn,
460815121d2SYasuaki Ishimatsu unsigned long end_pfn)
461815121d2SYasuaki Ishimatsu {
462815121d2SYasuaki Ishimatsu unsigned long pfn;
463815121d2SYasuaki Ishimatsu int nid = zone_to_nid(zone);
464815121d2SYasuaki Ishimatsu
4655d12071cSDavid Hildenbrand if (zone->zone_start_pfn == start_pfn) {
466815121d2SYasuaki Ishimatsu /*
467815121d2SYasuaki Ishimatsu * If the section is smallest section in the zone, it need
468815121d2SYasuaki Ishimatsu * shrink zone->zone_start_pfn and zone->zone_spanned_pages.
469815121d2SYasuaki Ishimatsu * In this case, we find second smallest valid mem_section
470815121d2SYasuaki Ishimatsu * for shrinking zone.
471815121d2SYasuaki Ishimatsu */
472815121d2SYasuaki Ishimatsu pfn = find_smallest_section_pfn(nid, zone, end_pfn,
4735d12071cSDavid Hildenbrand zone_end_pfn(zone));
474815121d2SYasuaki Ishimatsu if (pfn) {
4755d12071cSDavid Hildenbrand zone->spanned_pages = zone_end_pfn(zone) - pfn;
476815121d2SYasuaki Ishimatsu zone->zone_start_pfn = pfn;
477950b68d9SDavid Hildenbrand } else {
478950b68d9SDavid Hildenbrand zone->zone_start_pfn = 0;
479950b68d9SDavid Hildenbrand zone->spanned_pages = 0;
480815121d2SYasuaki Ishimatsu }
4815d12071cSDavid Hildenbrand } else if (zone_end_pfn(zone) == end_pfn) {
482815121d2SYasuaki Ishimatsu /*
483815121d2SYasuaki Ishimatsu * If the section is biggest section in the zone, it need
484815121d2SYasuaki Ishimatsu * shrink zone->spanned_pages.
485815121d2SYasuaki Ishimatsu * In this case, we find second biggest valid mem_section for
486815121d2SYasuaki Ishimatsu * shrinking zone.
487815121d2SYasuaki Ishimatsu */
4885d12071cSDavid Hildenbrand pfn = find_biggest_section_pfn(nid, zone, zone->zone_start_pfn,
489815121d2SYasuaki Ishimatsu start_pfn);
490815121d2SYasuaki Ishimatsu if (pfn)
4915d12071cSDavid Hildenbrand zone->spanned_pages = pfn - zone->zone_start_pfn + 1;
492950b68d9SDavid Hildenbrand else {
493815121d2SYasuaki Ishimatsu zone->zone_start_pfn = 0;
494815121d2SYasuaki Ishimatsu zone->spanned_pages = 0;
495950b68d9SDavid Hildenbrand }
496950b68d9SDavid Hildenbrand }
497815121d2SYasuaki Ishimatsu }
498815121d2SYasuaki Ishimatsu
update_pgdat_span(struct pglist_data * pgdat)49900d6c019SDavid Hildenbrand static void update_pgdat_span(struct pglist_data *pgdat)
500815121d2SYasuaki Ishimatsu {
50100d6c019SDavid Hildenbrand unsigned long node_start_pfn = 0, node_end_pfn = 0;
50200d6c019SDavid Hildenbrand struct zone *zone;
503815121d2SYasuaki Ishimatsu
50400d6c019SDavid Hildenbrand for (zone = pgdat->node_zones;
50500d6c019SDavid Hildenbrand zone < pgdat->node_zones + MAX_NR_ZONES; zone++) {
5066c922cf7SMiaohe Lin unsigned long end_pfn = zone_end_pfn(zone);
50700d6c019SDavid Hildenbrand
50800d6c019SDavid Hildenbrand /* No need to lock the zones, they can't change. */
509656d5711SDavid Hildenbrand if (!zone->spanned_pages)
510656d5711SDavid Hildenbrand continue;
511656d5711SDavid Hildenbrand if (!node_end_pfn) {
512656d5711SDavid Hildenbrand node_start_pfn = zone->zone_start_pfn;
5136c922cf7SMiaohe Lin node_end_pfn = end_pfn;
514656d5711SDavid Hildenbrand continue;
515656d5711SDavid Hildenbrand }
516656d5711SDavid Hildenbrand
5176c922cf7SMiaohe Lin if (end_pfn > node_end_pfn)
5186c922cf7SMiaohe Lin node_end_pfn = end_pfn;
51900d6c019SDavid Hildenbrand if (zone->zone_start_pfn < node_start_pfn)
52000d6c019SDavid Hildenbrand node_start_pfn = zone->zone_start_pfn;
521815121d2SYasuaki Ishimatsu }
522815121d2SYasuaki Ishimatsu
52300d6c019SDavid Hildenbrand pgdat->node_start_pfn = node_start_pfn;
52400d6c019SDavid Hildenbrand pgdat->node_spanned_pages = node_end_pfn - node_start_pfn;
525815121d2SYasuaki Ishimatsu }
526815121d2SYasuaki Ishimatsu
remove_pfn_range_from_zone(struct zone * zone,unsigned long start_pfn,unsigned long nr_pages)527feee6b29SDavid Hildenbrand void __ref remove_pfn_range_from_zone(struct zone *zone,
528feee6b29SDavid Hildenbrand unsigned long start_pfn,
5297ea62160SDan Williams unsigned long nr_pages)
530815121d2SYasuaki Ishimatsu {
531b7e3debdSBen Widawsky const unsigned long end_pfn = start_pfn + nr_pages;
532815121d2SYasuaki Ishimatsu struct pglist_data *pgdat = zone->zone_pgdat;
53327cacaadSOscar Salvador unsigned long pfn, cur_nr_pages;
534815121d2SYasuaki Ishimatsu
535d33695b1SDavid Hildenbrand /* Poison struct pages because they are now uninitialized again. */
536b7e3debdSBen Widawsky for (pfn = start_pfn; pfn < end_pfn; pfn += cur_nr_pages) {
537b7e3debdSBen Widawsky cond_resched();
538b7e3debdSBen Widawsky
539b7e3debdSBen Widawsky /* Select all remaining pages up to the next section boundary */
540b7e3debdSBen Widawsky cur_nr_pages =
541b7e3debdSBen Widawsky min(end_pfn - pfn, SECTION_ALIGN_UP(pfn + 1) - pfn);
542b7e3debdSBen Widawsky page_init_poison(pfn_to_page(pfn),
543b7e3debdSBen Widawsky sizeof(struct page) * cur_nr_pages);
544b7e3debdSBen Widawsky }
545d33695b1SDavid Hildenbrand
5467ce700bfSDavid Hildenbrand /*
5477ce700bfSDavid Hildenbrand * Zone shrinking code cannot properly deal with ZONE_DEVICE. So
5487ce700bfSDavid Hildenbrand * we will not try to shrink the zones - which is okay as
5497ce700bfSDavid Hildenbrand * set_zone_contiguous() cannot deal with ZONE_DEVICE either way.
5507ce700bfSDavid Hildenbrand */
5515ef5f810SMiaohe Lin if (zone_is_zone_device(zone))
5527ce700bfSDavid Hildenbrand return;
5537ce700bfSDavid Hildenbrand
554feee6b29SDavid Hildenbrand clear_zone_contiguous(zone);
555feee6b29SDavid Hildenbrand
556815121d2SYasuaki Ishimatsu shrink_zone_span(zone, start_pfn, start_pfn + nr_pages);
55700d6c019SDavid Hildenbrand update_pgdat_span(pgdat);
558feee6b29SDavid Hildenbrand
559feee6b29SDavid Hildenbrand set_zone_contiguous(zone);
560815121d2SYasuaki Ishimatsu }
561815121d2SYasuaki Ishimatsu
562ea01ea93SBadari Pulavarty /**
563feee6b29SDavid Hildenbrand * __remove_pages() - remove sections of pages
5647ea62160SDan Williams * @pfn: starting pageframe (must be aligned to start of a section)
565ea01ea93SBadari Pulavarty * @nr_pages: number of pages to remove (must be multiple of section size)
566e8b098fcSMike Rapoport * @altmap: alternative device page map or %NULL if default memmap is used
567ea01ea93SBadari Pulavarty *
568ea01ea93SBadari Pulavarty * Generic helper function to remove section mappings and sysfs entries
569ea01ea93SBadari Pulavarty * for the section of the memory we are removing. Caller needs to make
570ea01ea93SBadari Pulavarty * sure that pages are marked reserved and zones are adjust properly by
571ea01ea93SBadari Pulavarty * calling offline_pages().
572ea01ea93SBadari Pulavarty */
__remove_pages(unsigned long pfn,unsigned long nr_pages,struct vmem_altmap * altmap)573feee6b29SDavid Hildenbrand void __remove_pages(unsigned long pfn, unsigned long nr_pages,
574feee6b29SDavid Hildenbrand struct vmem_altmap *altmap)
575ea01ea93SBadari Pulavarty {
57652fb87c8SDavid Hildenbrand const unsigned long end_pfn = pfn + nr_pages;
57752fb87c8SDavid Hildenbrand unsigned long cur_nr_pages;
578ea01ea93SBadari Pulavarty
579943189dbSAnshuman Khandual if (check_pfn_span(pfn, nr_pages)) {
58050135045SRick Wertenbroek WARN(1, "Misaligned %s start: %#lx end: %#lx\n", __func__, pfn, pfn + nr_pages - 1);
5817ea62160SDan Williams return;
582943189dbSAnshuman Khandual }
583ea01ea93SBadari Pulavarty
58452fb87c8SDavid Hildenbrand for (; pfn < end_pfn; pfn += cur_nr_pages) {
585dd33ad7bSMichal Hocko cond_resched();
58652fb87c8SDavid Hildenbrand /* Select all remaining pages up to the next section boundary */
587a11b9419SDavid Hildenbrand cur_nr_pages = min(end_pfn - pfn,
588a11b9419SDavid Hildenbrand SECTION_ALIGN_UP(pfn + 1) - pfn);
589bd5f79abSYajun Deng sparse_remove_section(pfn, cur_nr_pages, altmap);
590ea01ea93SBadari Pulavarty }
591ea01ea93SBadari Pulavarty }
592ea01ea93SBadari Pulavarty
set_online_page_callback(online_page_callback_t callback)5939d0ad8caSDaniel Kiper int set_online_page_callback(online_page_callback_t callback)
5949d0ad8caSDaniel Kiper {
5959d0ad8caSDaniel Kiper int rc = -EINVAL;
5969d0ad8caSDaniel Kiper
597bfc8c901SVladimir Davydov get_online_mems();
598bfc8c901SVladimir Davydov mutex_lock(&online_page_callback_lock);
5999d0ad8caSDaniel Kiper
6009d0ad8caSDaniel Kiper if (online_page_callback == generic_online_page) {
6019d0ad8caSDaniel Kiper online_page_callback = callback;
6029d0ad8caSDaniel Kiper rc = 0;
6039d0ad8caSDaniel Kiper }
6049d0ad8caSDaniel Kiper
605bfc8c901SVladimir Davydov mutex_unlock(&online_page_callback_lock);
606bfc8c901SVladimir Davydov put_online_mems();
6079d0ad8caSDaniel Kiper
6089d0ad8caSDaniel Kiper return rc;
6099d0ad8caSDaniel Kiper }
6109d0ad8caSDaniel Kiper EXPORT_SYMBOL_GPL(set_online_page_callback);
6119d0ad8caSDaniel Kiper
restore_online_page_callback(online_page_callback_t callback)6129d0ad8caSDaniel Kiper int restore_online_page_callback(online_page_callback_t callback)
6139d0ad8caSDaniel Kiper {
6149d0ad8caSDaniel Kiper int rc = -EINVAL;
6159d0ad8caSDaniel Kiper
616bfc8c901SVladimir Davydov get_online_mems();
617bfc8c901SVladimir Davydov mutex_lock(&online_page_callback_lock);
6189d0ad8caSDaniel Kiper
6199d0ad8caSDaniel Kiper if (online_page_callback == callback) {
6209d0ad8caSDaniel Kiper online_page_callback = generic_online_page;
6219d0ad8caSDaniel Kiper rc = 0;
6229d0ad8caSDaniel Kiper }
6239d0ad8caSDaniel Kiper
624bfc8c901SVladimir Davydov mutex_unlock(&online_page_callback_lock);
625bfc8c901SVladimir Davydov put_online_mems();
6269d0ad8caSDaniel Kiper
6279d0ad8caSDaniel Kiper return rc;
6289d0ad8caSDaniel Kiper }
6299d0ad8caSDaniel Kiper EXPORT_SYMBOL_GPL(restore_online_page_callback);
6309d0ad8caSDaniel Kiper
generic_online_page(struct page * page,unsigned int order)63118db1491SDavid Hildenbrand void generic_online_page(struct page *page, unsigned int order)
6329d0ad8caSDaniel Kiper {
633c87cbc1fSVlastimil Babka /*
634c87cbc1fSVlastimil Babka * Freeing the page with debug_pagealloc enabled will try to unmap it,
635c87cbc1fSVlastimil Babka * so we should map it first. This is better than introducing a special
636c87cbc1fSVlastimil Babka * case in page freeing fast path.
637c87cbc1fSVlastimil Babka */
63877bc7fd6SMike Rapoport debug_pagealloc_map_pages(page, 1 << order);
639a9cd410aSArun KS __free_pages_core(page, order);
640a9cd410aSArun KS totalram_pages_add(1UL << order);
641a9cd410aSArun KS }
64218db1491SDavid Hildenbrand EXPORT_SYMBOL_GPL(generic_online_page);
643a9cd410aSArun KS
online_pages_range(unsigned long start_pfn,unsigned long nr_pages)644aac65321SDavid Hildenbrand static void online_pages_range(unsigned long start_pfn, unsigned long nr_pages)
6453947be19SDave Hansen {
646b2c2ab20SDavid Hildenbrand const unsigned long end_pfn = start_pfn + nr_pages;
647b2c2ab20SDavid Hildenbrand unsigned long pfn;
6482d070eabSMichal Hocko
649b2c2ab20SDavid Hildenbrand /*
65023baf831SKirill A. Shutemov * Online the pages in MAX_ORDER aligned chunks. The callback might
651aac65321SDavid Hildenbrand * decide to not expose all pages to the buddy (e.g., expose them
652aac65321SDavid Hildenbrand * later). We account all pages as being online and belonging to this
653aac65321SDavid Hildenbrand * zone ("present").
654a08a2ae3SOscar Salvador * When using memmap_on_memory, the range might not be aligned to
655a08a2ae3SOscar Salvador * MAX_ORDER_NR_PAGES - 1, but pageblock aligned. __ffs() will detect
656a08a2ae3SOscar Salvador * this and the first chunk to online will be pageblock_nr_pages.
657b2c2ab20SDavid Hildenbrand */
658a08a2ae3SOscar Salvador for (pfn = start_pfn; pfn < end_pfn;) {
65959f876fbSKirill A. Shutemov int order;
66059f876fbSKirill A. Shutemov
66159f876fbSKirill A. Shutemov /*
66259f876fbSKirill A. Shutemov * Free to online pages in the largest chunks alignment allows.
66359f876fbSKirill A. Shutemov *
66459f876fbSKirill A. Shutemov * __ffs() behaviour is undefined for 0. start == 0 is
66559f876fbSKirill A. Shutemov * MAX_ORDER-aligned, Set order to MAX_ORDER for the case.
66659f876fbSKirill A. Shutemov */
66759f876fbSKirill A. Shutemov if (pfn)
66859f876fbSKirill A. Shutemov order = min_t(int, MAX_ORDER, __ffs(pfn));
66959f876fbSKirill A. Shutemov else
67059f876fbSKirill A. Shutemov order = MAX_ORDER;
671a08a2ae3SOscar Salvador
672a08a2ae3SOscar Salvador (*online_page_callback)(pfn_to_page(pfn), order);
673a08a2ae3SOscar Salvador pfn += (1UL << order);
674a08a2ae3SOscar Salvador }
6752d070eabSMichal Hocko
676b2c2ab20SDavid Hildenbrand /* mark all involved sections as online */
677b2c2ab20SDavid Hildenbrand online_mem_sections(start_pfn, end_pfn);
67875884fb1SKAMEZAWA Hiroyuki }
67975884fb1SKAMEZAWA Hiroyuki
680d9713679SLai Jiangshan /* check which state of node_states will be changed when online memory */
node_states_check_changes_online(unsigned long nr_pages,struct zone * zone,struct memory_notify * arg)681d9713679SLai Jiangshan static void node_states_check_changes_online(unsigned long nr_pages,
682d9713679SLai Jiangshan struct zone *zone, struct memory_notify *arg)
683d9713679SLai Jiangshan {
684d9713679SLai Jiangshan int nid = zone_to_nid(zone);
685d9713679SLai Jiangshan
68698fa15f3SAnshuman Khandual arg->status_change_nid = NUMA_NO_NODE;
68798fa15f3SAnshuman Khandual arg->status_change_nid_normal = NUMA_NO_NODE;
6886715ddf9SLai Jiangshan
6896715ddf9SLai Jiangshan if (!node_state(nid, N_MEMORY))
690d9713679SLai Jiangshan arg->status_change_nid = nid;
6918efe33f4SOscar Salvador if (zone_idx(zone) <= ZONE_NORMAL && !node_state(nid, N_NORMAL_MEMORY))
6928efe33f4SOscar Salvador arg->status_change_nid_normal = nid;
693d9713679SLai Jiangshan }
694d9713679SLai Jiangshan
node_states_set_node(int node,struct memory_notify * arg)695d9713679SLai Jiangshan static void node_states_set_node(int node, struct memory_notify *arg)
696d9713679SLai Jiangshan {
697d9713679SLai Jiangshan if (arg->status_change_nid_normal >= 0)
698d9713679SLai Jiangshan node_set_state(node, N_NORMAL_MEMORY);
699d9713679SLai Jiangshan
70083d83612SOscar Salvador if (arg->status_change_nid >= 0)
7016715ddf9SLai Jiangshan node_set_state(node, N_MEMORY);
702d9713679SLai Jiangshan }
703d9713679SLai Jiangshan
resize_zone_range(struct zone * zone,unsigned long start_pfn,unsigned long nr_pages)704f1dd2cd1SMichal Hocko static void __meminit resize_zone_range(struct zone *zone, unsigned long start_pfn,
705f1dd2cd1SMichal Hocko unsigned long nr_pages)
706f1dd2cd1SMichal Hocko {
707f1dd2cd1SMichal Hocko unsigned long old_end_pfn = zone_end_pfn(zone);
708f1dd2cd1SMichal Hocko
709f1dd2cd1SMichal Hocko if (zone_is_empty(zone) || start_pfn < zone->zone_start_pfn)
710f1dd2cd1SMichal Hocko zone->zone_start_pfn = start_pfn;
711f1dd2cd1SMichal Hocko
712f1dd2cd1SMichal Hocko zone->spanned_pages = max(start_pfn + nr_pages, old_end_pfn) - zone->zone_start_pfn;
713f1dd2cd1SMichal Hocko }
714f1dd2cd1SMichal Hocko
resize_pgdat_range(struct pglist_data * pgdat,unsigned long start_pfn,unsigned long nr_pages)715f1dd2cd1SMichal Hocko static void __meminit resize_pgdat_range(struct pglist_data *pgdat, unsigned long start_pfn,
716f1dd2cd1SMichal Hocko unsigned long nr_pages)
717f1dd2cd1SMichal Hocko {
718f1dd2cd1SMichal Hocko unsigned long old_end_pfn = pgdat_end_pfn(pgdat);
719f1dd2cd1SMichal Hocko
720f1dd2cd1SMichal Hocko if (!pgdat->node_spanned_pages || start_pfn < pgdat->node_start_pfn)
721f1dd2cd1SMichal Hocko pgdat->node_start_pfn = start_pfn;
722f1dd2cd1SMichal Hocko
723f1dd2cd1SMichal Hocko pgdat->node_spanned_pages = max(start_pfn + nr_pages, old_end_pfn) - pgdat->node_start_pfn;
724f1dd2cd1SMichal Hocko
7253fccb74cSDavid Hildenbrand }
7261f90a347SDan Williams
727ed7802ddSMuchun Song #ifdef CONFIG_ZONE_DEVICE
section_taint_zone_device(unsigned long pfn)7281f90a347SDan Williams static void section_taint_zone_device(unsigned long pfn)
7291f90a347SDan Williams {
7301f90a347SDan Williams struct mem_section *ms = __pfn_to_section(pfn);
7311f90a347SDan Williams
7321f90a347SDan Williams ms->section_mem_map |= SECTION_TAINT_ZONE_DEVICE;
7331f90a347SDan Williams }
734ed7802ddSMuchun Song #else
section_taint_zone_device(unsigned long pfn)735ed7802ddSMuchun Song static inline void section_taint_zone_device(unsigned long pfn)
736ed7802ddSMuchun Song {
737ed7802ddSMuchun Song }
738ed7802ddSMuchun Song #endif
7391f90a347SDan Williams
7403fccb74cSDavid Hildenbrand /*
7413fccb74cSDavid Hildenbrand * Associate the pfn range with the given zone, initializing the memmaps
7423fccb74cSDavid Hildenbrand * and resizing the pgdat/zone data to span the added pages. After this
7433fccb74cSDavid Hildenbrand * call, all affected pages are PG_reserved.
744d882c006SDavid Hildenbrand *
745d882c006SDavid Hildenbrand * All aligned pageblocks are initialized to the specified migratetype
746d882c006SDavid Hildenbrand * (usually MIGRATE_MOVABLE). Besides setting the migratetype, no related
747d882c006SDavid Hildenbrand * zone stats (e.g., nr_isolate_pageblock) are touched.
7483fccb74cSDavid Hildenbrand */
move_pfn_range_to_zone(struct zone * zone,unsigned long start_pfn,unsigned long nr_pages,struct vmem_altmap * altmap,int migratetype)749a99583e7SChristoph Hellwig void __ref move_pfn_range_to_zone(struct zone *zone, unsigned long start_pfn,
750d882c006SDavid Hildenbrand unsigned long nr_pages,
751d882c006SDavid Hildenbrand struct vmem_altmap *altmap, int migratetype)
752f1dd2cd1SMichal Hocko {
753f1dd2cd1SMichal Hocko struct pglist_data *pgdat = zone->zone_pgdat;
754f1dd2cd1SMichal Hocko int nid = pgdat->node_id;
755f1dd2cd1SMichal Hocko
756f1dd2cd1SMichal Hocko clear_zone_contiguous(zone);
757f1dd2cd1SMichal Hocko
758fa004ab7SWei Yang if (zone_is_empty(zone))
759fa004ab7SWei Yang init_currently_empty_zone(zone, start_pfn, nr_pages);
760f1dd2cd1SMichal Hocko resize_zone_range(zone, start_pfn, nr_pages);
761f1dd2cd1SMichal Hocko resize_pgdat_range(pgdat, start_pfn, nr_pages);
762f1dd2cd1SMichal Hocko
763f1dd2cd1SMichal Hocko /*
7641f90a347SDan Williams * Subsection population requires care in pfn_to_online_page().
7651f90a347SDan Williams * Set the taint to enable the slow path detection of
7661f90a347SDan Williams * ZONE_DEVICE pages in an otherwise ZONE_{NORMAL,MOVABLE}
7671f90a347SDan Williams * section.
7681f90a347SDan Williams */
7691f90a347SDan Williams if (zone_is_zone_device(zone)) {
7701f90a347SDan Williams if (!IS_ALIGNED(start_pfn, PAGES_PER_SECTION))
7711f90a347SDan Williams section_taint_zone_device(start_pfn);
7721f90a347SDan Williams if (!IS_ALIGNED(start_pfn + nr_pages, PAGES_PER_SECTION))
7731f90a347SDan Williams section_taint_zone_device(start_pfn + nr_pages);
7741f90a347SDan Williams }
7751f90a347SDan Williams
7761f90a347SDan Williams /*
777f1dd2cd1SMichal Hocko * TODO now we have a visible range of pages which are not associated
778f1dd2cd1SMichal Hocko * with their zone properly. Not nice but set_pfnblock_flags_mask
779f1dd2cd1SMichal Hocko * expects the zone spans the pfn range. All the pages in the range
780f1dd2cd1SMichal Hocko * are reserved so nobody should be touching them so we should be safe
781f1dd2cd1SMichal Hocko */
782ab28cb6eSBaoquan He memmap_init_range(nr_pages, nid, zone_idx(zone), start_pfn, 0,
783d882c006SDavid Hildenbrand MEMINIT_HOTPLUG, altmap, migratetype);
784f1dd2cd1SMichal Hocko
785f1dd2cd1SMichal Hocko set_zone_contiguous(zone);
786f1dd2cd1SMichal Hocko }
787f1dd2cd1SMichal Hocko
788e83a437fSDavid Hildenbrand struct auto_movable_stats {
789e83a437fSDavid Hildenbrand unsigned long kernel_early_pages;
790e83a437fSDavid Hildenbrand unsigned long movable_pages;
791e83a437fSDavid Hildenbrand };
792e83a437fSDavid Hildenbrand
auto_movable_stats_account_zone(struct auto_movable_stats * stats,struct zone * zone)793e83a437fSDavid Hildenbrand static void auto_movable_stats_account_zone(struct auto_movable_stats *stats,
794e83a437fSDavid Hildenbrand struct zone *zone)
795e83a437fSDavid Hildenbrand {
796e83a437fSDavid Hildenbrand if (zone_idx(zone) == ZONE_MOVABLE) {
797e83a437fSDavid Hildenbrand stats->movable_pages += zone->present_pages;
798e83a437fSDavid Hildenbrand } else {
799e83a437fSDavid Hildenbrand stats->kernel_early_pages += zone->present_early_pages;
800e83a437fSDavid Hildenbrand #ifdef CONFIG_CMA
801e83a437fSDavid Hildenbrand /*
802e83a437fSDavid Hildenbrand * CMA pages (never on hotplugged memory) behave like
803e83a437fSDavid Hildenbrand * ZONE_MOVABLE.
804e83a437fSDavid Hildenbrand */
805e83a437fSDavid Hildenbrand stats->movable_pages += zone->cma_pages;
806e83a437fSDavid Hildenbrand stats->kernel_early_pages -= zone->cma_pages;
807e83a437fSDavid Hildenbrand #endif /* CONFIG_CMA */
808e83a437fSDavid Hildenbrand }
809e83a437fSDavid Hildenbrand }
8103fcebf90SDavid Hildenbrand struct auto_movable_group_stats {
8113fcebf90SDavid Hildenbrand unsigned long movable_pages;
8123fcebf90SDavid Hildenbrand unsigned long req_kernel_early_pages;
8133fcebf90SDavid Hildenbrand };
814e83a437fSDavid Hildenbrand
auto_movable_stats_account_group(struct memory_group * group,void * arg)8153fcebf90SDavid Hildenbrand static int auto_movable_stats_account_group(struct memory_group *group,
8163fcebf90SDavid Hildenbrand void *arg)
817e83a437fSDavid Hildenbrand {
8183fcebf90SDavid Hildenbrand const int ratio = READ_ONCE(auto_movable_ratio);
8193fcebf90SDavid Hildenbrand struct auto_movable_group_stats *stats = arg;
8203fcebf90SDavid Hildenbrand long pages;
8213fcebf90SDavid Hildenbrand
8223fcebf90SDavid Hildenbrand /*
8233fcebf90SDavid Hildenbrand * We don't support modifying the config while the auto-movable online
8243fcebf90SDavid Hildenbrand * policy is already enabled. Just avoid the division by zero below.
8253fcebf90SDavid Hildenbrand */
8263fcebf90SDavid Hildenbrand if (!ratio)
8273fcebf90SDavid Hildenbrand return 0;
8283fcebf90SDavid Hildenbrand
8293fcebf90SDavid Hildenbrand /*
8303fcebf90SDavid Hildenbrand * Calculate how many early kernel pages this group requires to
8313fcebf90SDavid Hildenbrand * satisfy the configured zone ratio.
8323fcebf90SDavid Hildenbrand */
8333fcebf90SDavid Hildenbrand pages = group->present_movable_pages * 100 / ratio;
8343fcebf90SDavid Hildenbrand pages -= group->present_kernel_pages;
8353fcebf90SDavid Hildenbrand
8363fcebf90SDavid Hildenbrand if (pages > 0)
8373fcebf90SDavid Hildenbrand stats->req_kernel_early_pages += pages;
8383fcebf90SDavid Hildenbrand stats->movable_pages += group->present_movable_pages;
8393fcebf90SDavid Hildenbrand return 0;
8403fcebf90SDavid Hildenbrand }
8413fcebf90SDavid Hildenbrand
auto_movable_can_online_movable(int nid,struct memory_group * group,unsigned long nr_pages)8423fcebf90SDavid Hildenbrand static bool auto_movable_can_online_movable(int nid, struct memory_group *group,
8433fcebf90SDavid Hildenbrand unsigned long nr_pages)
8443fcebf90SDavid Hildenbrand {
845e83a437fSDavid Hildenbrand unsigned long kernel_early_pages, movable_pages;
8463fcebf90SDavid Hildenbrand struct auto_movable_group_stats group_stats = {};
8473fcebf90SDavid Hildenbrand struct auto_movable_stats stats = {};
848e83a437fSDavid Hildenbrand pg_data_t *pgdat = NODE_DATA(nid);
849e83a437fSDavid Hildenbrand struct zone *zone;
850e83a437fSDavid Hildenbrand int i;
851e83a437fSDavid Hildenbrand
852e83a437fSDavid Hildenbrand /* Walk all relevant zones and collect MOVABLE vs. KERNEL stats. */
853e83a437fSDavid Hildenbrand if (nid == NUMA_NO_NODE) {
854e83a437fSDavid Hildenbrand /* TODO: cache values */
855e83a437fSDavid Hildenbrand for_each_populated_zone(zone)
856e83a437fSDavid Hildenbrand auto_movable_stats_account_zone(&stats, zone);
857e83a437fSDavid Hildenbrand } else {
858e83a437fSDavid Hildenbrand for (i = 0; i < MAX_NR_ZONES; i++) {
859e83a437fSDavid Hildenbrand zone = pgdat->node_zones + i;
860e83a437fSDavid Hildenbrand if (populated_zone(zone))
861e83a437fSDavid Hildenbrand auto_movable_stats_account_zone(&stats, zone);
862e83a437fSDavid Hildenbrand }
863e83a437fSDavid Hildenbrand }
864e83a437fSDavid Hildenbrand
865e83a437fSDavid Hildenbrand kernel_early_pages = stats.kernel_early_pages;
866e83a437fSDavid Hildenbrand movable_pages = stats.movable_pages;
867e83a437fSDavid Hildenbrand
868e83a437fSDavid Hildenbrand /*
8693fcebf90SDavid Hildenbrand * Kernel memory inside dynamic memory group allows for more MOVABLE
8703fcebf90SDavid Hildenbrand * memory within the same group. Remove the effect of all but the
8713fcebf90SDavid Hildenbrand * current group from the stats.
8723fcebf90SDavid Hildenbrand */
8733fcebf90SDavid Hildenbrand walk_dynamic_memory_groups(nid, auto_movable_stats_account_group,
8743fcebf90SDavid Hildenbrand group, &group_stats);
8753fcebf90SDavid Hildenbrand if (kernel_early_pages <= group_stats.req_kernel_early_pages)
8763fcebf90SDavid Hildenbrand return false;
8773fcebf90SDavid Hildenbrand kernel_early_pages -= group_stats.req_kernel_early_pages;
8783fcebf90SDavid Hildenbrand movable_pages -= group_stats.movable_pages;
8793fcebf90SDavid Hildenbrand
8803fcebf90SDavid Hildenbrand if (group && group->is_dynamic)
8813fcebf90SDavid Hildenbrand kernel_early_pages += group->present_kernel_pages;
8823fcebf90SDavid Hildenbrand
8833fcebf90SDavid Hildenbrand /*
884e83a437fSDavid Hildenbrand * Test if we could online the given number of pages to ZONE_MOVABLE
885e83a437fSDavid Hildenbrand * and still stay in the configured ratio.
886e83a437fSDavid Hildenbrand */
887e83a437fSDavid Hildenbrand movable_pages += nr_pages;
888e83a437fSDavid Hildenbrand return movable_pages <= (auto_movable_ratio * kernel_early_pages) / 100;
889e83a437fSDavid Hildenbrand }
890e83a437fSDavid Hildenbrand
891f1dd2cd1SMichal Hocko /*
892c246a213SMichal Hocko * Returns a default kernel memory zone for the given pfn range.
893c246a213SMichal Hocko * If no kernel zone covers this pfn range it will automatically go
894c246a213SMichal Hocko * to the ZONE_NORMAL.
895c246a213SMichal Hocko */
default_kernel_zone_for_pfn(int nid,unsigned long start_pfn,unsigned long nr_pages)896c6f03e29SMichal Hocko static struct zone *default_kernel_zone_for_pfn(int nid, unsigned long start_pfn,
897c246a213SMichal Hocko unsigned long nr_pages)
898c246a213SMichal Hocko {
899c246a213SMichal Hocko struct pglist_data *pgdat = NODE_DATA(nid);
900c246a213SMichal Hocko int zid;
901c246a213SMichal Hocko
902d6aad201SMiaohe Lin for (zid = 0; zid < ZONE_NORMAL; zid++) {
903c246a213SMichal Hocko struct zone *zone = &pgdat->node_zones[zid];
904c246a213SMichal Hocko
905c246a213SMichal Hocko if (zone_intersects(zone, start_pfn, nr_pages))
906c246a213SMichal Hocko return zone;
907c246a213SMichal Hocko }
908c246a213SMichal Hocko
909c246a213SMichal Hocko return &pgdat->node_zones[ZONE_NORMAL];
910c246a213SMichal Hocko }
911c246a213SMichal Hocko
912e83a437fSDavid Hildenbrand /*
913e83a437fSDavid Hildenbrand * Determine to which zone to online memory dynamically based on user
914e83a437fSDavid Hildenbrand * configuration and system stats. We care about the following ratio:
915e83a437fSDavid Hildenbrand *
916e83a437fSDavid Hildenbrand * MOVABLE : KERNEL
917e83a437fSDavid Hildenbrand *
918e83a437fSDavid Hildenbrand * Whereby MOVABLE is memory in ZONE_MOVABLE and KERNEL is memory in
919e83a437fSDavid Hildenbrand * one of the kernel zones. CMA pages inside one of the kernel zones really
920e83a437fSDavid Hildenbrand * behaves like ZONE_MOVABLE, so we treat them accordingly.
921e83a437fSDavid Hildenbrand *
922e83a437fSDavid Hildenbrand * We don't allow for hotplugged memory in a KERNEL zone to increase the
923e83a437fSDavid Hildenbrand * amount of MOVABLE memory we can have, so we end up with:
924e83a437fSDavid Hildenbrand *
925e83a437fSDavid Hildenbrand * MOVABLE : KERNEL_EARLY
926e83a437fSDavid Hildenbrand *
927e83a437fSDavid Hildenbrand * Whereby KERNEL_EARLY is memory in one of the kernel zones, available sinze
928e83a437fSDavid Hildenbrand * boot. We base our calculation on KERNEL_EARLY internally, because:
929e83a437fSDavid Hildenbrand *
930e83a437fSDavid Hildenbrand * a) Hotplugged memory in one of the kernel zones can sometimes still get
931e83a437fSDavid Hildenbrand * hotunplugged, especially when hot(un)plugging individual memory blocks.
932e83a437fSDavid Hildenbrand * There is no coordination across memory devices, therefore "automatic"
933e83a437fSDavid Hildenbrand * hotunplugging, as implemented in hypervisors, could result in zone
934e83a437fSDavid Hildenbrand * imbalances.
935e83a437fSDavid Hildenbrand * b) Early/boot memory in one of the kernel zones can usually not get
936e83a437fSDavid Hildenbrand * hotunplugged again (e.g., no firmware interface to unplug, fragmented
937e83a437fSDavid Hildenbrand * with unmovable allocations). While there are corner cases where it might
938e83a437fSDavid Hildenbrand * still work, it is barely relevant in practice.
939e83a437fSDavid Hildenbrand *
9403fcebf90SDavid Hildenbrand * Exceptions are dynamic memory groups, which allow for more MOVABLE
9413fcebf90SDavid Hildenbrand * memory within the same memory group -- because in that case, there is
9423fcebf90SDavid Hildenbrand * coordination within the single memory device managed by a single driver.
9433fcebf90SDavid Hildenbrand *
944e83a437fSDavid Hildenbrand * We rely on "present pages" instead of "managed pages", as the latter is
945e83a437fSDavid Hildenbrand * highly unreliable and dynamic in virtualized environments, and does not
946e83a437fSDavid Hildenbrand * consider boot time allocations. For example, memory ballooning adjusts the
947e83a437fSDavid Hildenbrand * managed pages when inflating/deflating the balloon, and balloon compaction
948e83a437fSDavid Hildenbrand * can even migrate inflated pages between zones.
949e83a437fSDavid Hildenbrand *
950e83a437fSDavid Hildenbrand * Using "present pages" is better but some things to keep in mind are:
951e83a437fSDavid Hildenbrand *
952e83a437fSDavid Hildenbrand * a) Some memblock allocations, such as for the crashkernel area, are
953e83a437fSDavid Hildenbrand * effectively unused by the kernel, yet they account to "present pages".
954e83a437fSDavid Hildenbrand * Fortunately, these allocations are comparatively small in relevant setups
955e83a437fSDavid Hildenbrand * (e.g., fraction of system memory).
956e83a437fSDavid Hildenbrand * b) Some hotplugged memory blocks in virtualized environments, esecially
957e83a437fSDavid Hildenbrand * hotplugged by virtio-mem, look like they are completely present, however,
958e83a437fSDavid Hildenbrand * only parts of the memory block are actually currently usable.
959e83a437fSDavid Hildenbrand * "present pages" is an upper limit that can get reached at runtime. As
960e83a437fSDavid Hildenbrand * we base our calculations on KERNEL_EARLY, this is not an issue.
961e83a437fSDavid Hildenbrand */
auto_movable_zone_for_pfn(int nid,struct memory_group * group,unsigned long pfn,unsigned long nr_pages)962445fcf7cSDavid Hildenbrand static struct zone *auto_movable_zone_for_pfn(int nid,
963445fcf7cSDavid Hildenbrand struct memory_group *group,
964445fcf7cSDavid Hildenbrand unsigned long pfn,
965e83a437fSDavid Hildenbrand unsigned long nr_pages)
966e83a437fSDavid Hildenbrand {
967445fcf7cSDavid Hildenbrand unsigned long online_pages = 0, max_pages, end_pfn;
968445fcf7cSDavid Hildenbrand struct page *page;
969445fcf7cSDavid Hildenbrand
970e83a437fSDavid Hildenbrand if (!auto_movable_ratio)
971e83a437fSDavid Hildenbrand goto kernel_zone;
972e83a437fSDavid Hildenbrand
973445fcf7cSDavid Hildenbrand if (group && !group->is_dynamic) {
974445fcf7cSDavid Hildenbrand max_pages = group->s.max_pages;
975445fcf7cSDavid Hildenbrand online_pages = group->present_movable_pages;
976445fcf7cSDavid Hildenbrand
977445fcf7cSDavid Hildenbrand /* If anything is !MOVABLE online the rest !MOVABLE. */
978445fcf7cSDavid Hildenbrand if (group->present_kernel_pages)
979445fcf7cSDavid Hildenbrand goto kernel_zone;
980445fcf7cSDavid Hildenbrand } else if (!group || group->d.unit_pages == nr_pages) {
981445fcf7cSDavid Hildenbrand max_pages = nr_pages;
982445fcf7cSDavid Hildenbrand } else {
983445fcf7cSDavid Hildenbrand max_pages = group->d.unit_pages;
984445fcf7cSDavid Hildenbrand /*
985445fcf7cSDavid Hildenbrand * Take a look at all online sections in the current unit.
986445fcf7cSDavid Hildenbrand * We can safely assume that all pages within a section belong
987445fcf7cSDavid Hildenbrand * to the same zone, because dynamic memory groups only deal
988445fcf7cSDavid Hildenbrand * with hotplugged memory.
989445fcf7cSDavid Hildenbrand */
990445fcf7cSDavid Hildenbrand pfn = ALIGN_DOWN(pfn, group->d.unit_pages);
991445fcf7cSDavid Hildenbrand end_pfn = pfn + group->d.unit_pages;
992445fcf7cSDavid Hildenbrand for (; pfn < end_pfn; pfn += PAGES_PER_SECTION) {
993445fcf7cSDavid Hildenbrand page = pfn_to_online_page(pfn);
994445fcf7cSDavid Hildenbrand if (!page)
995445fcf7cSDavid Hildenbrand continue;
996445fcf7cSDavid Hildenbrand /* If anything is !MOVABLE online the rest !MOVABLE. */
99707252dfeSKefeng Wang if (!is_zone_movable_page(page))
998445fcf7cSDavid Hildenbrand goto kernel_zone;
999445fcf7cSDavid Hildenbrand online_pages += PAGES_PER_SECTION;
1000445fcf7cSDavid Hildenbrand }
1001445fcf7cSDavid Hildenbrand }
1002445fcf7cSDavid Hildenbrand
1003445fcf7cSDavid Hildenbrand /*
1004445fcf7cSDavid Hildenbrand * Online MOVABLE if we could *currently* online all remaining parts
1005445fcf7cSDavid Hildenbrand * MOVABLE. We expect to (add+) online them immediately next, so if
1006445fcf7cSDavid Hildenbrand * nobody interferes, all will be MOVABLE if possible.
1007445fcf7cSDavid Hildenbrand */
1008445fcf7cSDavid Hildenbrand nr_pages = max_pages - online_pages;
10093fcebf90SDavid Hildenbrand if (!auto_movable_can_online_movable(NUMA_NO_NODE, group, nr_pages))
1010e83a437fSDavid Hildenbrand goto kernel_zone;
1011e83a437fSDavid Hildenbrand
1012e83a437fSDavid Hildenbrand #ifdef CONFIG_NUMA
1013e83a437fSDavid Hildenbrand if (auto_movable_numa_aware &&
10143fcebf90SDavid Hildenbrand !auto_movable_can_online_movable(nid, group, nr_pages))
1015e83a437fSDavid Hildenbrand goto kernel_zone;
1016e83a437fSDavid Hildenbrand #endif /* CONFIG_NUMA */
1017e83a437fSDavid Hildenbrand
1018e83a437fSDavid Hildenbrand return &NODE_DATA(nid)->node_zones[ZONE_MOVABLE];
1019e83a437fSDavid Hildenbrand kernel_zone:
1020e83a437fSDavid Hildenbrand return default_kernel_zone_for_pfn(nid, pfn, nr_pages);
1021e83a437fSDavid Hildenbrand }
1022e83a437fSDavid Hildenbrand
default_zone_for_pfn(int nid,unsigned long start_pfn,unsigned long nr_pages)1023c6f03e29SMichal Hocko static inline struct zone *default_zone_for_pfn(int nid, unsigned long start_pfn,
1024c6f03e29SMichal Hocko unsigned long nr_pages)
1025e5e68930SMichal Hocko {
1026c6f03e29SMichal Hocko struct zone *kernel_zone = default_kernel_zone_for_pfn(nid, start_pfn,
1027c6f03e29SMichal Hocko nr_pages);
1028c6f03e29SMichal Hocko struct zone *movable_zone = &NODE_DATA(nid)->node_zones[ZONE_MOVABLE];
1029c6f03e29SMichal Hocko bool in_kernel = zone_intersects(kernel_zone, start_pfn, nr_pages);
1030c6f03e29SMichal Hocko bool in_movable = zone_intersects(movable_zone, start_pfn, nr_pages);
1031e5e68930SMichal Hocko
1032e5e68930SMichal Hocko /*
1033c6f03e29SMichal Hocko * We inherit the existing zone in a simple case where zones do not
1034c6f03e29SMichal Hocko * overlap in the given range
1035e5e68930SMichal Hocko */
1036c6f03e29SMichal Hocko if (in_kernel ^ in_movable)
1037c6f03e29SMichal Hocko return (in_kernel) ? kernel_zone : movable_zone;
1038e5e68930SMichal Hocko
1039c6f03e29SMichal Hocko /*
1040c6f03e29SMichal Hocko * If the range doesn't belong to any zone or two zones overlap in the
1041c6f03e29SMichal Hocko * given range then we use movable zone only if movable_node is
1042c6f03e29SMichal Hocko * enabled because we always online to a kernel zone by default.
1043c6f03e29SMichal Hocko */
1044c6f03e29SMichal Hocko return movable_node_enabled ? movable_zone : kernel_zone;
10459f123ab5SMichal Hocko }
10469f123ab5SMichal Hocko
zone_for_pfn_range(int online_type,int nid,struct memory_group * group,unsigned long start_pfn,unsigned long nr_pages)10477cf209baSDavid Hildenbrand struct zone *zone_for_pfn_range(int online_type, int nid,
1048445fcf7cSDavid Hildenbrand struct memory_group *group, unsigned long start_pfn,
1049e5e68930SMichal Hocko unsigned long nr_pages)
1050f1dd2cd1SMichal Hocko {
1051c6f03e29SMichal Hocko if (online_type == MMOP_ONLINE_KERNEL)
1052c6f03e29SMichal Hocko return default_kernel_zone_for_pfn(nid, start_pfn, nr_pages);
1053f1dd2cd1SMichal Hocko
1054c6f03e29SMichal Hocko if (online_type == MMOP_ONLINE_MOVABLE)
1055c6f03e29SMichal Hocko return &NODE_DATA(nid)->node_zones[ZONE_MOVABLE];
1056f1dd2cd1SMichal Hocko
1057e83a437fSDavid Hildenbrand if (online_policy == ONLINE_POLICY_AUTO_MOVABLE)
1058445fcf7cSDavid Hildenbrand return auto_movable_zone_for_pfn(nid, group, start_pfn, nr_pages);
1059e83a437fSDavid Hildenbrand
1060c6f03e29SMichal Hocko return default_zone_for_pfn(nid, start_pfn, nr_pages);
1061e5e68930SMichal Hocko }
1062e5e68930SMichal Hocko
1063a08a2ae3SOscar Salvador /*
1064a08a2ae3SOscar Salvador * This function should only be called by memory_block_{online,offline},
1065a08a2ae3SOscar Salvador * and {online,offline}_pages.
1066a08a2ae3SOscar Salvador */
adjust_present_page_count(struct page * page,struct memory_group * group,long nr_pages)1067836809ecSDavid Hildenbrand void adjust_present_page_count(struct page *page, struct memory_group *group,
1068836809ecSDavid Hildenbrand long nr_pages)
1069f9901144SDavid Hildenbrand {
10704b097002SDavid Hildenbrand struct zone *zone = page_zone(page);
1071836809ecSDavid Hildenbrand const bool movable = zone_idx(zone) == ZONE_MOVABLE;
10724b097002SDavid Hildenbrand
10734b097002SDavid Hildenbrand /*
10744b097002SDavid Hildenbrand * We only support onlining/offlining/adding/removing of complete
10754b097002SDavid Hildenbrand * memory blocks; therefore, either all is either early or hotplugged.
10764b097002SDavid Hildenbrand */
10774b097002SDavid Hildenbrand if (early_section(__pfn_to_section(page_to_pfn(page))))
10784b097002SDavid Hildenbrand zone->present_early_pages += nr_pages;
1079f9901144SDavid Hildenbrand zone->present_pages += nr_pages;
1080f9901144SDavid Hildenbrand zone->zone_pgdat->node_present_pages += nr_pages;
1081836809ecSDavid Hildenbrand
1082836809ecSDavid Hildenbrand if (group && movable)
1083836809ecSDavid Hildenbrand group->present_movable_pages += nr_pages;
1084836809ecSDavid Hildenbrand else if (group && !movable)
1085836809ecSDavid Hildenbrand group->present_kernel_pages += nr_pages;
1086f9901144SDavid Hildenbrand }
1087f9901144SDavid Hildenbrand
mhp_init_memmap_on_memory(unsigned long pfn,unsigned long nr_pages,struct zone * zone)1088a08a2ae3SOscar Salvador int mhp_init_memmap_on_memory(unsigned long pfn, unsigned long nr_pages,
1089a08a2ae3SOscar Salvador struct zone *zone)
1090a08a2ae3SOscar Salvador {
1091a08a2ae3SOscar Salvador unsigned long end_pfn = pfn + nr_pages;
109266361095SMuchun Song int ret, i;
1093a08a2ae3SOscar Salvador
1094a08a2ae3SOscar Salvador ret = kasan_add_zero_shadow(__va(PFN_PHYS(pfn)), PFN_PHYS(nr_pages));
1095a08a2ae3SOscar Salvador if (ret)
1096a08a2ae3SOscar Salvador return ret;
1097a08a2ae3SOscar Salvador
1098a08a2ae3SOscar Salvador move_pfn_range_to_zone(zone, pfn, nr_pages, NULL, MIGRATE_UNMOVABLE);
1099a08a2ae3SOscar Salvador
110066361095SMuchun Song for (i = 0; i < nr_pages; i++)
110166361095SMuchun Song SetPageVmemmapSelfHosted(pfn_to_page(pfn + i));
110266361095SMuchun Song
1103a08a2ae3SOscar Salvador /*
1104a08a2ae3SOscar Salvador * It might be that the vmemmap_pages fully span sections. If that is
1105a08a2ae3SOscar Salvador * the case, mark those sections online here as otherwise they will be
1106a08a2ae3SOscar Salvador * left offline.
1107a08a2ae3SOscar Salvador */
1108a08a2ae3SOscar Salvador if (nr_pages >= PAGES_PER_SECTION)
1109a08a2ae3SOscar Salvador online_mem_sections(pfn, ALIGN_DOWN(end_pfn, PAGES_PER_SECTION));
1110a08a2ae3SOscar Salvador
1111a08a2ae3SOscar Salvador return ret;
1112a08a2ae3SOscar Salvador }
1113a08a2ae3SOscar Salvador
mhp_deinit_memmap_on_memory(unsigned long pfn,unsigned long nr_pages)1114a08a2ae3SOscar Salvador void mhp_deinit_memmap_on_memory(unsigned long pfn, unsigned long nr_pages)
1115a08a2ae3SOscar Salvador {
1116a08a2ae3SOscar Salvador unsigned long end_pfn = pfn + nr_pages;
1117a08a2ae3SOscar Salvador
1118a08a2ae3SOscar Salvador /*
1119a08a2ae3SOscar Salvador * It might be that the vmemmap_pages fully span sections. If that is
1120a08a2ae3SOscar Salvador * the case, mark those sections offline here as otherwise they will be
1121a08a2ae3SOscar Salvador * left online.
1122a08a2ae3SOscar Salvador */
1123a08a2ae3SOscar Salvador if (nr_pages >= PAGES_PER_SECTION)
1124a08a2ae3SOscar Salvador offline_mem_sections(pfn, ALIGN_DOWN(end_pfn, PAGES_PER_SECTION));
1125a08a2ae3SOscar Salvador
1126a08a2ae3SOscar Salvador /*
1127a08a2ae3SOscar Salvador * The pages associated with this vmemmap have been offlined, so
1128a08a2ae3SOscar Salvador * we can reset its state here.
1129a08a2ae3SOscar Salvador */
1130a08a2ae3SOscar Salvador remove_pfn_range_from_zone(page_zone(pfn_to_page(pfn)), pfn, nr_pages);
1131a08a2ae3SOscar Salvador kasan_remove_zero_shadow(__va(PFN_PHYS(pfn)), PFN_PHYS(nr_pages));
1132a08a2ae3SOscar Salvador }
1133a08a2ae3SOscar Salvador
1134e0270ffaSSumanth Korikkar /*
1135e0270ffaSSumanth Korikkar * Must be called with mem_hotplug_lock in write mode.
1136e0270ffaSSumanth Korikkar */
online_pages(unsigned long pfn,unsigned long nr_pages,struct zone * zone,struct memory_group * group)1137836809ecSDavid Hildenbrand int __ref online_pages(unsigned long pfn, unsigned long nr_pages,
1138836809ecSDavid Hildenbrand struct zone *zone, struct memory_group *group)
113975884fb1SKAMEZAWA Hiroyuki {
1140aa47228aSCody P Schafer unsigned long flags;
11416811378eSYasunori Goto int need_zonelists_rebuild = 0;
1142a08a2ae3SOscar Salvador const int nid = zone_to_nid(zone);
11437b78d335SYasunori Goto int ret;
11447b78d335SYasunori Goto struct memory_notify arg;
11453947be19SDave Hansen
1146dd8e2f23SOscar Salvador /*
1147dd8e2f23SOscar Salvador * {on,off}lining is constrained to full memory sections (or more
1148041711ceSZhen Lei * precisely to memory blocks from the user space POV).
1149dd8e2f23SOscar Salvador * memmap_on_memory is an exception because it reserves initial part
1150dd8e2f23SOscar Salvador * of the physical memory space for vmemmaps. That space is pageblock
1151dd8e2f23SOscar Salvador * aligned.
1152dd8e2f23SOscar Salvador */
1153ee0913c4SKefeng Wang if (WARN_ON_ONCE(!nr_pages || !pageblock_aligned(pfn) ||
1154dd8e2f23SOscar Salvador !IS_ALIGNED(pfn + nr_pages, PAGES_PER_SECTION)))
11554986fac1SDavid Hildenbrand return -EINVAL;
11564986fac1SDavid Hildenbrand
1157381eab4aSDavid Hildenbrand
1158f1dd2cd1SMichal Hocko /* associate pfn range with the zone */
1159b30c5927SDavid Hildenbrand move_pfn_range_to_zone(zone, pfn, nr_pages, NULL, MIGRATE_ISOLATE);
1160511c2abaSLai Jiangshan
11617b78d335SYasunori Goto arg.start_pfn = pfn;
11627b78d335SYasunori Goto arg.nr_pages = nr_pages;
1163d9713679SLai Jiangshan node_states_check_changes_online(nr_pages, zone, &arg);
11647b78d335SYasunori Goto
11657b78d335SYasunori Goto ret = memory_notify(MEM_GOING_ONLINE, &arg);
11667b78d335SYasunori Goto ret = notifier_to_errno(ret);
1167e33e33b4SChen Yucong if (ret)
1168e33e33b4SChen Yucong goto failed_addition;
1169e33e33b4SChen Yucong
11703947be19SDave Hansen /*
1171b30c5927SDavid Hildenbrand * Fixup the number of isolated pageblocks before marking the sections
1172b30c5927SDavid Hildenbrand * onlining, such that undo_isolate_page_range() works correctly.
1173b30c5927SDavid Hildenbrand */
1174b30c5927SDavid Hildenbrand spin_lock_irqsave(&zone->lock, flags);
1175b30c5927SDavid Hildenbrand zone->nr_isolate_pageblock += nr_pages / pageblock_nr_pages;
1176b30c5927SDavid Hildenbrand spin_unlock_irqrestore(&zone->lock, flags);
1177b30c5927SDavid Hildenbrand
1178b30c5927SDavid Hildenbrand /*
11796811378eSYasunori Goto * If this zone is not populated, then it is not in zonelist.
11806811378eSYasunori Goto * This means the page allocator ignores this zone.
11816811378eSYasunori Goto * So, zonelist must be updated after online.
11826811378eSYasunori Goto */
11836dcd73d7SWen Congyang if (!populated_zone(zone)) {
11846811378eSYasunori Goto need_zonelists_rebuild = 1;
118572675e13SMichal Hocko setup_zone_pageset(zone);
11866dcd73d7SWen Congyang }
11876811378eSYasunori Goto
1188aac65321SDavid Hildenbrand online_pages_range(pfn, nr_pages);
1189836809ecSDavid Hildenbrand adjust_present_page_count(pfn_to_page(pfn), group, nr_pages);
1190aa47228aSCody P Schafer
1191b30c5927SDavid Hildenbrand node_states_set_node(nid, &arg);
1192b30c5927SDavid Hildenbrand if (need_zonelists_rebuild)
1193b30c5927SDavid Hildenbrand build_all_zonelists(NULL);
1194b30c5927SDavid Hildenbrand
1195b30c5927SDavid Hildenbrand /* Basic onlining is complete, allow allocation of onlined pages. */
1196b30c5927SDavid Hildenbrand undo_isolate_page_range(pfn, pfn + nr_pages, MIGRATE_MOVABLE);
1197b30c5927SDavid Hildenbrand
119893146d98SDavid Hildenbrand /*
1199b86c5fc4SDavid Hildenbrand * Freshly onlined pages aren't shuffled (e.g., all pages are placed to
1200b86c5fc4SDavid Hildenbrand * the tail of the freelist when undoing isolation). Shuffle the whole
1201b86c5fc4SDavid Hildenbrand * zone to make sure the just onlined pages are properly distributed
1202b86c5fc4SDavid Hildenbrand * across the whole freelist - to create an initial shuffle.
120393146d98SDavid Hildenbrand */
1204e900a918SDan Williams shuffle_zone(zone);
1205e900a918SDan Williams
1206b92ca18eSMel Gorman /* reinitialise watermarks and update pcp limits */
12071b79acc9SKOSAKI Motohiro init_per_zone_wmark_min();
12081b79acc9SKOSAKI Motohiro
1209e888ca35SVlastimil Babka kswapd_run(nid);
1210698b1b30SVlastimil Babka kcompactd_run(nid);
121161b13993SDave Hansen
12122d1d43f6SChandra Seetharaman writeback_set_ratelimit();
12137b78d335SYasunori Goto
12147b78d335SYasunori Goto memory_notify(MEM_ONLINE, &arg);
121530467e0bSDavid Rientjes return 0;
1216e33e33b4SChen Yucong
1217e33e33b4SChen Yucong failed_addition:
1218e33e33b4SChen Yucong pr_debug("online_pages [mem %#010llx-%#010llx] failed\n",
1219e33e33b4SChen Yucong (unsigned long long) pfn << PAGE_SHIFT,
1220e33e33b4SChen Yucong (((unsigned long long) pfn + nr_pages) << PAGE_SHIFT) - 1);
1221e33e33b4SChen Yucong memory_notify(MEM_CANCEL_ONLINE, &arg);
1222feee6b29SDavid Hildenbrand remove_pfn_range_from_zone(zone, pfn, nr_pages);
1223e33e33b4SChen Yucong return ret;
12243947be19SDave Hansen }
1225bc02af93SYasunori Goto
1226e1319331SHidetoshi Seto /* we are OK calling __meminit stuff here - we have CONFIG_MEMORY_HOTPLUG */
hotadd_init_pgdat(int nid)122709f49dcaSMichal Hocko static pg_data_t __ref *hotadd_init_pgdat(int nid)
12289af3c2deSYasunori Goto {
12299af3c2deSYasunori Goto struct pglist_data *pgdat;
12309af3c2deSYasunori Goto
123109f49dcaSMichal Hocko /*
123209f49dcaSMichal Hocko * NODE_DATA is preallocated (free_area_init) but its internal
123309f49dcaSMichal Hocko * state is not allocated completely. Add missing pieces.
123409f49dcaSMichal Hocko * Completely offline nodes stay around and they just need
123509f49dcaSMichal Hocko * reintialization.
123609f49dcaSMichal Hocko */
123770b5b46aSMichal Hocko pgdat = NODE_DATA(nid);
123803e85f9dSOscar Salvador
12399af3c2deSYasunori Goto /* init node's zones as empty zones, we don't have any present pages.*/
124070b5b46aSMichal Hocko free_area_init_core_hotplug(pgdat);
12419af3c2deSYasunori Goto
1242959ecc48SKAMEZAWA Hiroyuki /*
1243959ecc48SKAMEZAWA Hiroyuki * The node we allocated has no zone fallback lists. For avoiding
1244959ecc48SKAMEZAWA Hiroyuki * to access not-initialized zonelist, build here.
1245959ecc48SKAMEZAWA Hiroyuki */
124672675e13SMichal Hocko build_all_zonelists(pgdat);
1247959ecc48SKAMEZAWA Hiroyuki
12489af3c2deSYasunori Goto return pgdat;
12499af3c2deSYasunori Goto }
12509af3c2deSYasunori Goto
1251ba2d2666SMel Gorman /*
1252ba2d2666SMel Gorman * __try_online_node - online a node if offlined
1253e8b098fcSMike Rapoport * @nid: the node ID
1254b9ff0360SOscar Salvador * @set_node_online: Whether we want to online the node
1255cf23422bSminskey guo * called by cpu_up() to online a node without onlined memory.
1256b9ff0360SOscar Salvador *
1257b9ff0360SOscar Salvador * Returns:
1258b9ff0360SOscar Salvador * 1 -> a new node has been allocated
1259b9ff0360SOscar Salvador * 0 -> the node is already online
1260b9ff0360SOscar Salvador * -ENOMEM -> the node could not be allocated
1261cf23422bSminskey guo */
__try_online_node(int nid,bool set_node_online)1262c68ab18cSDavid Hildenbrand static int __try_online_node(int nid, bool set_node_online)
1263cf23422bSminskey guo {
1264cf23422bSminskey guo pg_data_t *pgdat;
1265b9ff0360SOscar Salvador int ret = 1;
1266cf23422bSminskey guo
126701b0f197SToshi Kani if (node_online(nid))
126801b0f197SToshi Kani return 0;
126901b0f197SToshi Kani
127009f49dcaSMichal Hocko pgdat = hotadd_init_pgdat(nid);
12717553e8f2SDavid Rientjes if (!pgdat) {
127201b0f197SToshi Kani pr_err("Cannot online node %d due to NULL pgdat\n", nid);
1273cf23422bSminskey guo ret = -ENOMEM;
1274cf23422bSminskey guo goto out;
1275cf23422bSminskey guo }
1276b9ff0360SOscar Salvador
1277b9ff0360SOscar Salvador if (set_node_online) {
1278cf23422bSminskey guo node_set_online(nid);
1279cf23422bSminskey guo ret = register_one_node(nid);
1280cf23422bSminskey guo BUG_ON(ret);
1281b9ff0360SOscar Salvador }
1282cf23422bSminskey guo out:
1283b9ff0360SOscar Salvador return ret;
1284b9ff0360SOscar Salvador }
1285b9ff0360SOscar Salvador
1286b9ff0360SOscar Salvador /*
1287b9ff0360SOscar Salvador * Users of this function always want to online/register the node
1288b9ff0360SOscar Salvador */
try_online_node(int nid)1289b9ff0360SOscar Salvador int try_online_node(int nid)
1290b9ff0360SOscar Salvador {
1291b9ff0360SOscar Salvador int ret;
1292b9ff0360SOscar Salvador
1293b9ff0360SOscar Salvador mem_hotplug_begin();
1294c68ab18cSDavid Hildenbrand ret = __try_online_node(nid, true);
1295bfc8c901SVladimir Davydov mem_hotplug_done();
1296cf23422bSminskey guo return ret;
1297cf23422bSminskey guo }
1298cf23422bSminskey guo
check_hotplug_memory_range(u64 start,u64 size)129927356f54SToshi Kani static int check_hotplug_memory_range(u64 start, u64 size)
130027356f54SToshi Kani {
1301ba325585SPavel Tatashin /* memory range must be block size aligned */
1302cec3ebd0SDavid Hildenbrand if (!size || !IS_ALIGNED(start, memory_block_size_bytes()) ||
1303cec3ebd0SDavid Hildenbrand !IS_ALIGNED(size, memory_block_size_bytes())) {
1304ba325585SPavel Tatashin pr_err("Block size [%#lx] unaligned hotplug range: start %#llx, size %#llx",
1305cec3ebd0SDavid Hildenbrand memory_block_size_bytes(), start, size);
130627356f54SToshi Kani return -EINVAL;
130727356f54SToshi Kani }
130827356f54SToshi Kani
130927356f54SToshi Kani return 0;
131027356f54SToshi Kani }
131127356f54SToshi Kani
online_memory_block(struct memory_block * mem,void * arg)131231bc3858SVitaly Kuznetsov static int online_memory_block(struct memory_block *mem, void *arg)
131331bc3858SVitaly Kuznetsov {
13141adf8b46SAnshuman Khandual mem->online_type = mhp_default_online_type;
1315dc18d706SNathan Fontenot return device_online(&mem->dev);
131631bc3858SVitaly Kuznetsov }
131731bc3858SVitaly Kuznetsov
131885a2b4b0SAneesh Kumar K.V #ifndef arch_supports_memmap_on_memory
arch_supports_memmap_on_memory(unsigned long vmemmap_size)131985a2b4b0SAneesh Kumar K.V static inline bool arch_supports_memmap_on_memory(unsigned long vmemmap_size)
132085a2b4b0SAneesh Kumar K.V {
132185a2b4b0SAneesh Kumar K.V /*
132285a2b4b0SAneesh Kumar K.V * As default, we want the vmemmap to span a complete PMD such that we
132385a2b4b0SAneesh Kumar K.V * can map the vmemmap using a single PMD if supported by the
132485a2b4b0SAneesh Kumar K.V * architecture.
132585a2b4b0SAneesh Kumar K.V */
132685a2b4b0SAneesh Kumar K.V return IS_ALIGNED(vmemmap_size, PMD_SIZE);
132785a2b4b0SAneesh Kumar K.V }
132885a2b4b0SAneesh Kumar K.V #endif
132985a2b4b0SAneesh Kumar K.V
mhp_supports_memmap_on_memory(unsigned long size)1330e3c2bfddSAneesh Kumar K.V static bool mhp_supports_memmap_on_memory(unsigned long size)
1331a08a2ae3SOscar Salvador {
133285a2b4b0SAneesh Kumar K.V unsigned long vmemmap_size = memory_block_memmap_size();
13332d1f649cSAneesh Kumar K.V unsigned long memmap_pages = memory_block_memmap_on_memory_pages();
1334a08a2ae3SOscar Salvador
1335a08a2ae3SOscar Salvador /*
1336a08a2ae3SOscar Salvador * Besides having arch support and the feature enabled at runtime, we
1337a08a2ae3SOscar Salvador * need a few more assumptions to hold true:
1338a08a2ae3SOscar Salvador *
1339a08a2ae3SOscar Salvador * a) We span a single memory block: memory onlining/offlinin;g happens
1340a08a2ae3SOscar Salvador * in memory block granularity. We don't want the vmemmap of online
1341a08a2ae3SOscar Salvador * memory blocks to reside on offline memory blocks. In the future,
1342a08a2ae3SOscar Salvador * we might want to support variable-sized memory blocks to make the
1343a08a2ae3SOscar Salvador * feature more versatile.
1344a08a2ae3SOscar Salvador *
1345a08a2ae3SOscar Salvador * b) The vmemmap pages span complete PMDs: We don't want vmemmap code
1346a08a2ae3SOscar Salvador * to populate memory from the altmap for unrelated parts (i.e.,
1347a08a2ae3SOscar Salvador * other memory blocks)
1348a08a2ae3SOscar Salvador *
1349a08a2ae3SOscar Salvador * c) The vmemmap pages (and thereby the pages that will be exposed to
1350a08a2ae3SOscar Salvador * the buddy) have to cover full pageblocks: memory onlining/offlining
1351a08a2ae3SOscar Salvador * code requires applicable ranges to be page-aligned, for example, to
1352a08a2ae3SOscar Salvador * set the migratetypes properly.
1353a08a2ae3SOscar Salvador *
1354a08a2ae3SOscar Salvador * TODO: Although we have a check here to make sure that vmemmap pages
1355a08a2ae3SOscar Salvador * fully populate a PMD, it is not the right place to check for
1356a08a2ae3SOscar Salvador * this. A much better solution involves improving vmemmap code
1357a08a2ae3SOscar Salvador * to fallback to base pages when trying to populate vmemmap using
1358a08a2ae3SOscar Salvador * altmap as an alternative source of memory, and we do not exactly
1359a08a2ae3SOscar Salvador * populate a single PMD.
1360a08a2ae3SOscar Salvador */
13612d1f649cSAneesh Kumar K.V if (!mhp_memmap_on_memory() || size != memory_block_size_bytes())
13622d1f649cSAneesh Kumar K.V return false;
13632d1f649cSAneesh Kumar K.V
13642d1f649cSAneesh Kumar K.V /*
13652d1f649cSAneesh Kumar K.V * Make sure the vmemmap allocation is fully contained
13662d1f649cSAneesh Kumar K.V * so that we always allocate vmemmap memory from altmap area.
13672d1f649cSAneesh Kumar K.V */
13682d1f649cSAneesh Kumar K.V if (!IS_ALIGNED(vmemmap_size, PAGE_SIZE))
13692d1f649cSAneesh Kumar K.V return false;
13702d1f649cSAneesh Kumar K.V
13712d1f649cSAneesh Kumar K.V /*
13722d1f649cSAneesh Kumar K.V * start pfn should be pageblock_nr_pages aligned for correctly
13732d1f649cSAneesh Kumar K.V * setting migrate types
13742d1f649cSAneesh Kumar K.V */
13752d1f649cSAneesh Kumar K.V if (!pageblock_aligned(memmap_pages))
13762d1f649cSAneesh Kumar K.V return false;
13772d1f649cSAneesh Kumar K.V
13782d1f649cSAneesh Kumar K.V if (memmap_pages == PHYS_PFN(memory_block_size_bytes()))
13792d1f649cSAneesh Kumar K.V /* No effective hotplugged memory doesn't make sense. */
13802d1f649cSAneesh Kumar K.V return false;
13812d1f649cSAneesh Kumar K.V
13822d1f649cSAneesh Kumar K.V return arch_supports_memmap_on_memory(vmemmap_size);
1383a08a2ae3SOscar Salvador }
1384a08a2ae3SOscar Salvador
13858df1d0e4SDavid Hildenbrand /*
13868df1d0e4SDavid Hildenbrand * NOTE: The caller must call lock_device_hotplug() to serialize hotplug
13878df1d0e4SDavid Hildenbrand * and online/offline operations (triggered e.g. by sysfs).
13888df1d0e4SDavid Hildenbrand *
13898df1d0e4SDavid Hildenbrand * we are OK calling __meminit stuff here - we have CONFIG_MEMORY_HOTPLUG
13908df1d0e4SDavid Hildenbrand */
add_memory_resource(int nid,struct resource * res,mhp_t mhp_flags)1391b6117199SDavid Hildenbrand int __ref add_memory_resource(int nid, struct resource *res, mhp_t mhp_flags)
1392bc02af93SYasunori Goto {
1393d15dfd31SCatalin Marinas struct mhp_params params = { .pgprot = pgprot_mhp(PAGE_KERNEL) };
139432befe9eSDavid Hildenbrand enum memblock_flags memblock_flags = MEMBLOCK_NONE;
13952d1f649cSAneesh Kumar K.V struct vmem_altmap mhp_altmap = {
13962d1f649cSAneesh Kumar K.V .base_pfn = PHYS_PFN(res->start),
13972d1f649cSAneesh Kumar K.V .end_pfn = PHYS_PFN(res->end),
13982d1f649cSAneesh Kumar K.V };
1399028fc57aSDavid Hildenbrand struct memory_group *group = NULL;
140062cedb9fSDavid Vrabel u64 start, size;
1401b9ff0360SOscar Salvador bool new_node = false;
1402bc02af93SYasunori Goto int ret;
1403bc02af93SYasunori Goto
140462cedb9fSDavid Vrabel start = res->start;
140562cedb9fSDavid Vrabel size = resource_size(res);
140662cedb9fSDavid Vrabel
140727356f54SToshi Kani ret = check_hotplug_memory_range(start, size);
140827356f54SToshi Kani if (ret)
140927356f54SToshi Kani return ret;
141027356f54SToshi Kani
1411028fc57aSDavid Hildenbrand if (mhp_flags & MHP_NID_IS_MGID) {
1412028fc57aSDavid Hildenbrand group = memory_group_find_by_id(nid);
1413028fc57aSDavid Hildenbrand if (!group)
1414028fc57aSDavid Hildenbrand return -EINVAL;
1415028fc57aSDavid Hildenbrand nid = group->nid;
1416028fc57aSDavid Hildenbrand }
1417028fc57aSDavid Hildenbrand
1418fa6d9ec7SVishal Verma if (!node_possible(nid)) {
1419fa6d9ec7SVishal Verma WARN(1, "node %d was absent from the node_possible_map\n", nid);
1420fa6d9ec7SVishal Verma return -EINVAL;
1421fa6d9ec7SVishal Verma }
1422fa6d9ec7SVishal Verma
1423bfc8c901SVladimir Davydov mem_hotplug_begin();
1424ac13c462SNathan Zimmer
142553d38316SDavid Hildenbrand if (IS_ENABLED(CONFIG_ARCH_KEEP_MEMBLOCK)) {
142632befe9eSDavid Hildenbrand if (res->flags & IORESOURCE_SYSRAM_DRIVER_MANAGED)
142732befe9eSDavid Hildenbrand memblock_flags = MEMBLOCK_DRIVER_MANAGED;
142832befe9eSDavid Hildenbrand ret = memblock_add_node(start, size, nid, memblock_flags);
142953d38316SDavid Hildenbrand if (ret)
143053d38316SDavid Hildenbrand goto error_mem_hotplug_end;
143153d38316SDavid Hildenbrand }
14327f36e3e5STang Chen
1433c68ab18cSDavid Hildenbrand ret = __try_online_node(nid, false);
1434b9ff0360SOscar Salvador if (ret < 0)
143541b9e2d7SWen Congyang goto error;
1436b9ff0360SOscar Salvador new_node = ret;
14379af3c2deSYasunori Goto
1438a08a2ae3SOscar Salvador /*
1439a08a2ae3SOscar Salvador * Self hosted memmap array
1440a08a2ae3SOscar Salvador */
1441a08a2ae3SOscar Salvador if (mhp_flags & MHP_MEMMAP_ON_MEMORY) {
1442e3c2bfddSAneesh Kumar K.V if (mhp_supports_memmap_on_memory(size)) {
14432d1f649cSAneesh Kumar K.V mhp_altmap.free = memory_block_memmap_on_memory_pages();
14441a8c64e1SAneesh Kumar K.V params.altmap = kmalloc(sizeof(struct vmem_altmap), GFP_KERNEL);
14451a8c64e1SAneesh Kumar K.V if (!params.altmap) {
14461a8c64e1SAneesh Kumar K.V ret = -ENOMEM;
14471a8c64e1SAneesh Kumar K.V goto error;
14481a8c64e1SAneesh Kumar K.V }
14491a8c64e1SAneesh Kumar K.V
14501a8c64e1SAneesh Kumar K.V memcpy(params.altmap, &mhp_altmap, sizeof(mhp_altmap));
1451a08a2ae3SOscar Salvador }
1452e3c2bfddSAneesh Kumar K.V /* fallback to not using altmap */
1453e3c2bfddSAneesh Kumar K.V }
1454a08a2ae3SOscar Salvador
1455bc02af93SYasunori Goto /* call arch's memory hotadd */
1456f5637d3bSLogan Gunthorpe ret = arch_add_memory(nid, start, size, ¶ms);
14579af3c2deSYasunori Goto if (ret < 0)
14581a8c64e1SAneesh Kumar K.V goto error_free;
14599af3c2deSYasunori Goto
1460db051a0dSDavid Hildenbrand /* create memory block devices after memory was added */
14611a8c64e1SAneesh Kumar K.V ret = create_memory_block_devices(start, size, params.altmap, group);
1462db051a0dSDavid Hildenbrand if (ret) {
14639e5d3096SSumanth Korikkar arch_remove_memory(start, size, params.altmap);
14641a8c64e1SAneesh Kumar K.V goto error_free;
1465db051a0dSDavid Hildenbrand }
1466db051a0dSDavid Hildenbrand
1467a1e565aaSTang Chen if (new_node) {
1468d5b6f6a3SOscar Salvador /* If sysfs file of new node can't be created, cpu on the node
14690fc44159SYasunori Goto * can't be hot-added. There is no rollback way now.
14700fc44159SYasunori Goto * So, check by BUG_ON() to catch it reluctantly..
1471d5b6f6a3SOscar Salvador * We online node here. We can't roll back from here.
14720fc44159SYasunori Goto */
1473d5b6f6a3SOscar Salvador node_set_online(nid);
1474d5b6f6a3SOscar Salvador ret = __register_one_node(nid);
14750fc44159SYasunori Goto BUG_ON(ret);
14760fc44159SYasunori Goto }
14770fc44159SYasunori Goto
1478cc651559SDavid Hildenbrand register_memory_blocks_under_node(nid, PFN_DOWN(start),
1479cc651559SDavid Hildenbrand PFN_UP(start + size - 1),
1480f85086f9SLaurent Dufour MEMINIT_HOTPLUG);
1481d5b6f6a3SOscar Salvador
1482d96ae530Sakpm@linux-foundation.org /* create new memmap entry */
14837b7b2721SDavid Hildenbrand if (!strcmp(res->name, "System RAM"))
1484d96ae530Sakpm@linux-foundation.org firmware_map_add_hotplug(start, start + size, "System RAM");
1485d96ae530Sakpm@linux-foundation.org
1486381eab4aSDavid Hildenbrand /* device_online() will take the lock when calling online_pages() */
1487381eab4aSDavid Hildenbrand mem_hotplug_done();
1488381eab4aSDavid Hildenbrand
14899ca6551eSDavid Hildenbrand /*
14909ca6551eSDavid Hildenbrand * In case we're allowed to merge the resource, flag it and trigger
14919ca6551eSDavid Hildenbrand * merging now that adding succeeded.
14929ca6551eSDavid Hildenbrand */
149326011267SDavid Hildenbrand if (mhp_flags & MHP_MERGE_RESOURCE)
14949ca6551eSDavid Hildenbrand merge_system_ram_resource(res);
14959ca6551eSDavid Hildenbrand
149631bc3858SVitaly Kuznetsov /* online pages if requested */
14971adf8b46SAnshuman Khandual if (mhp_default_online_type != MMOP_OFFLINE)
1498fbcf73ceSDavid Hildenbrand walk_memory_blocks(start, size, NULL, online_memory_block);
149931bc3858SVitaly Kuznetsov
1500381eab4aSDavid Hildenbrand return ret;
15011a8c64e1SAneesh Kumar K.V error_free:
15021a8c64e1SAneesh Kumar K.V kfree(params.altmap);
15039af3c2deSYasunori Goto error:
150452219aeaSDavid Hildenbrand if (IS_ENABLED(CONFIG_ARCH_KEEP_MEMBLOCK))
15057f36e3e5STang Chen memblock_remove(start, size);
150653d38316SDavid Hildenbrand error_mem_hotplug_end:
1507bfc8c901SVladimir Davydov mem_hotplug_done();
1508bc02af93SYasunori Goto return ret;
1509bc02af93SYasunori Goto }
151062cedb9fSDavid Vrabel
15118df1d0e4SDavid Hildenbrand /* requires device_hotplug_lock, see add_memory_resource() */
__add_memory(int nid,u64 start,u64 size,mhp_t mhp_flags)1512b6117199SDavid Hildenbrand int __ref __add_memory(int nid, u64 start, u64 size, mhp_t mhp_flags)
151362cedb9fSDavid Vrabel {
151462cedb9fSDavid Vrabel struct resource *res;
151562cedb9fSDavid Vrabel int ret;
151662cedb9fSDavid Vrabel
15177b7b2721SDavid Hildenbrand res = register_memory_resource(start, size, "System RAM");
15186f754ba4SVitaly Kuznetsov if (IS_ERR(res))
15196f754ba4SVitaly Kuznetsov return PTR_ERR(res);
152062cedb9fSDavid Vrabel
1521b6117199SDavid Hildenbrand ret = add_memory_resource(nid, res, mhp_flags);
152262cedb9fSDavid Vrabel if (ret < 0)
152362cedb9fSDavid Vrabel release_memory_resource(res);
152462cedb9fSDavid Vrabel return ret;
152562cedb9fSDavid Vrabel }
15268df1d0e4SDavid Hildenbrand
add_memory(int nid,u64 start,u64 size,mhp_t mhp_flags)1527b6117199SDavid Hildenbrand int add_memory(int nid, u64 start, u64 size, mhp_t mhp_flags)
15288df1d0e4SDavid Hildenbrand {
15298df1d0e4SDavid Hildenbrand int rc;
15308df1d0e4SDavid Hildenbrand
15318df1d0e4SDavid Hildenbrand lock_device_hotplug();
1532b6117199SDavid Hildenbrand rc = __add_memory(nid, start, size, mhp_flags);
15338df1d0e4SDavid Hildenbrand unlock_device_hotplug();
15348df1d0e4SDavid Hildenbrand
15358df1d0e4SDavid Hildenbrand return rc;
15368df1d0e4SDavid Hildenbrand }
1537bc02af93SYasunori Goto EXPORT_SYMBOL_GPL(add_memory);
15380c0e6195SKAMEZAWA Hiroyuki
15397b7b2721SDavid Hildenbrand /*
15407b7b2721SDavid Hildenbrand * Add special, driver-managed memory to the system as system RAM. Such
15417b7b2721SDavid Hildenbrand * memory is not exposed via the raw firmware-provided memmap as system
15427b7b2721SDavid Hildenbrand * RAM, instead, it is detected and added by a driver - during cold boot,
15437b7b2721SDavid Hildenbrand * after a reboot, and after kexec.
15447b7b2721SDavid Hildenbrand *
15457b7b2721SDavid Hildenbrand * Reasons why this memory should not be used for the initial memmap of a
15467b7b2721SDavid Hildenbrand * kexec kernel or for placing kexec images:
15477b7b2721SDavid Hildenbrand * - The booting kernel is in charge of determining how this memory will be
15487b7b2721SDavid Hildenbrand * used (e.g., use persistent memory as system RAM)
15497b7b2721SDavid Hildenbrand * - Coordination with a hypervisor is required before this memory
15507b7b2721SDavid Hildenbrand * can be used (e.g., inaccessible parts).
15517b7b2721SDavid Hildenbrand *
15527b7b2721SDavid Hildenbrand * For this memory, no entries in /sys/firmware/memmap ("raw firmware-provided
15537b7b2721SDavid Hildenbrand * memory map") are created. Also, the created memory resource is flagged
15547cf603d1SDavid Hildenbrand * with IORESOURCE_SYSRAM_DRIVER_MANAGED, so in-kernel users can special-case
15557b7b2721SDavid Hildenbrand * this memory as well (esp., not place kexec images onto it).
15567b7b2721SDavid Hildenbrand *
15577b7b2721SDavid Hildenbrand * The resource_name (visible via /proc/iomem) has to have the format
15587b7b2721SDavid Hildenbrand * "System RAM ($DRIVER)".
15597b7b2721SDavid Hildenbrand */
add_memory_driver_managed(int nid,u64 start,u64 size,const char * resource_name,mhp_t mhp_flags)15607b7b2721SDavid Hildenbrand int add_memory_driver_managed(int nid, u64 start, u64 size,
1561b6117199SDavid Hildenbrand const char *resource_name, mhp_t mhp_flags)
15627b7b2721SDavid Hildenbrand {
15637b7b2721SDavid Hildenbrand struct resource *res;
15647b7b2721SDavid Hildenbrand int rc;
15657b7b2721SDavid Hildenbrand
15667b7b2721SDavid Hildenbrand if (!resource_name ||
15677b7b2721SDavid Hildenbrand strstr(resource_name, "System RAM (") != resource_name ||
15687b7b2721SDavid Hildenbrand resource_name[strlen(resource_name) - 1] != ')')
15697b7b2721SDavid Hildenbrand return -EINVAL;
15707b7b2721SDavid Hildenbrand
15717b7b2721SDavid Hildenbrand lock_device_hotplug();
15727b7b2721SDavid Hildenbrand
15737b7b2721SDavid Hildenbrand res = register_memory_resource(start, size, resource_name);
15747b7b2721SDavid Hildenbrand if (IS_ERR(res)) {
15757b7b2721SDavid Hildenbrand rc = PTR_ERR(res);
15767b7b2721SDavid Hildenbrand goto out_unlock;
15777b7b2721SDavid Hildenbrand }
15787b7b2721SDavid Hildenbrand
1579b6117199SDavid Hildenbrand rc = add_memory_resource(nid, res, mhp_flags);
15807b7b2721SDavid Hildenbrand if (rc < 0)
15817b7b2721SDavid Hildenbrand release_memory_resource(res);
15827b7b2721SDavid Hildenbrand
15837b7b2721SDavid Hildenbrand out_unlock:
15847b7b2721SDavid Hildenbrand unlock_device_hotplug();
15857b7b2721SDavid Hildenbrand return rc;
15867b7b2721SDavid Hildenbrand }
15877b7b2721SDavid Hildenbrand EXPORT_SYMBOL_GPL(add_memory_driver_managed);
15887b7b2721SDavid Hildenbrand
1589bca3feaaSAnshuman Khandual /*
1590bca3feaaSAnshuman Khandual * Platforms should define arch_get_mappable_range() that provides
1591bca3feaaSAnshuman Khandual * maximum possible addressable physical memory range for which the
1592bca3feaaSAnshuman Khandual * linear mapping could be created. The platform returned address
1593bca3feaaSAnshuman Khandual * range must adhere to these following semantics.
1594bca3feaaSAnshuman Khandual *
1595bca3feaaSAnshuman Khandual * - range.start <= range.end
1596bca3feaaSAnshuman Khandual * - Range includes both end points [range.start..range.end]
1597bca3feaaSAnshuman Khandual *
1598bca3feaaSAnshuman Khandual * There is also a fallback definition provided here, allowing the
1599bca3feaaSAnshuman Khandual * entire possible physical address range in case any platform does
1600bca3feaaSAnshuman Khandual * not define arch_get_mappable_range().
1601bca3feaaSAnshuman Khandual */
arch_get_mappable_range(void)1602bca3feaaSAnshuman Khandual struct range __weak arch_get_mappable_range(void)
1603bca3feaaSAnshuman Khandual {
1604bca3feaaSAnshuman Khandual struct range mhp_range = {
1605bca3feaaSAnshuman Khandual .start = 0UL,
1606bca3feaaSAnshuman Khandual .end = -1ULL,
1607bca3feaaSAnshuman Khandual };
1608bca3feaaSAnshuman Khandual return mhp_range;
1609bca3feaaSAnshuman Khandual }
1610bca3feaaSAnshuman Khandual
mhp_get_pluggable_range(bool need_mapping)1611bca3feaaSAnshuman Khandual struct range mhp_get_pluggable_range(bool need_mapping)
1612bca3feaaSAnshuman Khandual {
1613*0b46b4acSThomas Gleixner const u64 max_phys = PHYSMEM_END;
1614bca3feaaSAnshuman Khandual struct range mhp_range;
1615bca3feaaSAnshuman Khandual
1616bca3feaaSAnshuman Khandual if (need_mapping) {
1617bca3feaaSAnshuman Khandual mhp_range = arch_get_mappable_range();
1618bca3feaaSAnshuman Khandual if (mhp_range.start > max_phys) {
1619bca3feaaSAnshuman Khandual mhp_range.start = 0;
1620bca3feaaSAnshuman Khandual mhp_range.end = 0;
1621bca3feaaSAnshuman Khandual }
1622bca3feaaSAnshuman Khandual mhp_range.end = min_t(u64, mhp_range.end, max_phys);
1623bca3feaaSAnshuman Khandual } else {
1624bca3feaaSAnshuman Khandual mhp_range.start = 0;
1625bca3feaaSAnshuman Khandual mhp_range.end = max_phys;
1626bca3feaaSAnshuman Khandual }
1627bca3feaaSAnshuman Khandual return mhp_range;
1628bca3feaaSAnshuman Khandual }
1629bca3feaaSAnshuman Khandual EXPORT_SYMBOL_GPL(mhp_get_pluggable_range);
1630bca3feaaSAnshuman Khandual
mhp_range_allowed(u64 start,u64 size,bool need_mapping)1631bca3feaaSAnshuman Khandual bool mhp_range_allowed(u64 start, u64 size, bool need_mapping)
1632bca3feaaSAnshuman Khandual {
1633bca3feaaSAnshuman Khandual struct range mhp_range = mhp_get_pluggable_range(need_mapping);
1634bca3feaaSAnshuman Khandual u64 end = start + size;
1635bca3feaaSAnshuman Khandual
1636bca3feaaSAnshuman Khandual if (start < end && start >= mhp_range.start && (end - 1) <= mhp_range.end)
1637bca3feaaSAnshuman Khandual return true;
1638bca3feaaSAnshuman Khandual
1639bca3feaaSAnshuman Khandual pr_warn("Hotplug memory [%#llx-%#llx] exceeds maximum addressable range [%#llx-%#llx]\n",
1640bca3feaaSAnshuman Khandual start, end, mhp_range.start, mhp_range.end);
1641bca3feaaSAnshuman Khandual return false;
1642bca3feaaSAnshuman Khandual }
1643bca3feaaSAnshuman Khandual
16440c0e6195SKAMEZAWA Hiroyuki #ifdef CONFIG_MEMORY_HOTREMOVE
16450c0e6195SKAMEZAWA Hiroyuki /*
16460efadf48SYisheng Xie * Scan pfn range [start,end) to find movable/migratable pages (LRU pages,
1647aa218795SDavid Hildenbrand * non-lru movable pages and hugepages). Will skip over most unmovable
1648aa218795SDavid Hildenbrand * pages (esp., pages that can be skipped when offlining), but bail out on
1649aa218795SDavid Hildenbrand * definitely unmovable pages.
1650aa218795SDavid Hildenbrand *
1651aa218795SDavid Hildenbrand * Returns:
1652aa218795SDavid Hildenbrand * 0 in case a movable page is found and movable_pfn was updated.
1653aa218795SDavid Hildenbrand * -ENOENT in case no movable page was found.
1654aa218795SDavid Hildenbrand * -EBUSY in case a definitely unmovable page was found.
16550c0e6195SKAMEZAWA Hiroyuki */
scan_movable_pages(unsigned long start,unsigned long end,unsigned long * movable_pfn)1656aa218795SDavid Hildenbrand static int scan_movable_pages(unsigned long start, unsigned long end,
1657aa218795SDavid Hildenbrand unsigned long *movable_pfn)
16580c0e6195SKAMEZAWA Hiroyuki {
16590c0e6195SKAMEZAWA Hiroyuki unsigned long pfn;
1660eeb0efd0SOscar Salvador
16610c0e6195SKAMEZAWA Hiroyuki for (pfn = start; pfn < end; pfn++) {
1662eeb0efd0SOscar Salvador struct page *page, *head;
1663eeb0efd0SOscar Salvador unsigned long skip;
1664eeb0efd0SOscar Salvador
1665eeb0efd0SOscar Salvador if (!pfn_valid(pfn))
1666eeb0efd0SOscar Salvador continue;
16670c0e6195SKAMEZAWA Hiroyuki page = pfn_to_page(pfn);
16680c0e6195SKAMEZAWA Hiroyuki if (PageLRU(page))
1669aa218795SDavid Hildenbrand goto found;
16700efadf48SYisheng Xie if (__PageMovable(page))
1671aa218795SDavid Hildenbrand goto found;
1672aa218795SDavid Hildenbrand
1673aa218795SDavid Hildenbrand /*
1674aa218795SDavid Hildenbrand * PageOffline() pages that are not marked __PageMovable() and
1675aa218795SDavid Hildenbrand * have a reference count > 0 (after MEM_GOING_OFFLINE) are
1676aa218795SDavid Hildenbrand * definitely unmovable. If their reference count would be 0,
1677aa218795SDavid Hildenbrand * they could at least be skipped when offlining memory.
1678aa218795SDavid Hildenbrand */
1679aa218795SDavid Hildenbrand if (PageOffline(page) && page_count(page))
1680aa218795SDavid Hildenbrand return -EBUSY;
1681eeb0efd0SOscar Salvador
1682eeb0efd0SOscar Salvador if (!PageHuge(page))
1683eeb0efd0SOscar Salvador continue;
1684eeb0efd0SOscar Salvador head = compound_head(page);
16858f251a3dSMike Kravetz /*
16868f251a3dSMike Kravetz * This test is racy as we hold no reference or lock. The
16878f251a3dSMike Kravetz * hugetlb page could have been free'ed and head is no longer
16888f251a3dSMike Kravetz * a hugetlb page before the following check. In such unlikely
16898f251a3dSMike Kravetz * cases false positives and negatives are possible. Calling
16908f251a3dSMike Kravetz * code must deal with these scenarios.
16918f251a3dSMike Kravetz */
16928f251a3dSMike Kravetz if (HPageMigratable(head))
1693aa218795SDavid Hildenbrand goto found;
16942c1589c7SZi Yan skip = compound_nr(head) - (pfn - page_to_pfn(head));
1695eeb0efd0SOscar Salvador pfn += skip - 1;
16960c0e6195SKAMEZAWA Hiroyuki }
1697aa218795SDavid Hildenbrand return -ENOENT;
1698aa218795SDavid Hildenbrand found:
1699aa218795SDavid Hildenbrand *movable_pfn = pfn;
17000c0e6195SKAMEZAWA Hiroyuki return 0;
17010c0e6195SKAMEZAWA Hiroyuki }
17020c0e6195SKAMEZAWA Hiroyuki
do_migrate_range(unsigned long start_pfn,unsigned long end_pfn)170332cf666eSSeongJae Park static void do_migrate_range(unsigned long start_pfn, unsigned long end_pfn)
17040c0e6195SKAMEZAWA Hiroyuki {
17050c0e6195SKAMEZAWA Hiroyuki unsigned long pfn;
17066c357848SMatthew Wilcox (Oracle) struct page *page, *head;
17070c0e6195SKAMEZAWA Hiroyuki LIST_HEAD(source);
1708786dee86SLiam Mark static DEFINE_RATELIMIT_STATE(migrate_rs, DEFAULT_RATELIMIT_INTERVAL,
1709786dee86SLiam Mark DEFAULT_RATELIMIT_BURST);
17100c0e6195SKAMEZAWA Hiroyuki
1711a85009c3SMichal Hocko for (pfn = start_pfn; pfn < end_pfn; pfn++) {
1712869f7ee6SMatthew Wilcox (Oracle) struct folio *folio;
1713f7f9c00dSBaolin Wang bool isolated;
1714869f7ee6SMatthew Wilcox (Oracle)
17150c0e6195SKAMEZAWA Hiroyuki if (!pfn_valid(pfn))
17160c0e6195SKAMEZAWA Hiroyuki continue;
17170c0e6195SKAMEZAWA Hiroyuki page = pfn_to_page(pfn);
1718869f7ee6SMatthew Wilcox (Oracle) folio = page_folio(page);
1719869f7ee6SMatthew Wilcox (Oracle) head = &folio->page;
1720c8721bbbSNaoya Horiguchi
1721c8721bbbSNaoya Horiguchi if (PageHuge(page)) {
1722d8c6546bSMatthew Wilcox (Oracle) pfn = page_to_pfn(head) + compound_nr(head) - 1;
17236aa3a920SSidhartha Kumar isolate_hugetlb(folio, &source);
1724c8721bbbSNaoya Horiguchi continue;
172594723aafSMichal Hocko } else if (PageTransHuge(page))
17266c357848SMatthew Wilcox (Oracle) pfn = page_to_pfn(head) + thp_nr_pages(page) - 1;
1727c8721bbbSNaoya Horiguchi
1728b15c8726SMichal Hocko /*
1729b15c8726SMichal Hocko * HWPoison pages have elevated reference counts so the migration would
1730b15c8726SMichal Hocko * fail on them. It also doesn't make any sense to migrate them in the
1731b15c8726SMichal Hocko * first place. Still try to unmap such a page in case it is still mapped
1732b15c8726SMichal Hocko * (e.g. current hwpoison implementation doesn't unmap KSM pages but keep
1733b15c8726SMichal Hocko * the unmap as the catch all safety net).
1734b15c8726SMichal Hocko */
1735b15c8726SMichal Hocko if (PageHWPoison(page)) {
1736869f7ee6SMatthew Wilcox (Oracle) if (WARN_ON(folio_test_lru(folio)))
1737869f7ee6SMatthew Wilcox (Oracle) folio_isolate_lru(folio);
1738869f7ee6SMatthew Wilcox (Oracle) if (folio_mapped(folio))
1739869f7ee6SMatthew Wilcox (Oracle) try_to_unmap(folio, TTU_IGNORE_MLOCK);
1740b15c8726SMichal Hocko continue;
1741b15c8726SMichal Hocko }
1742b15c8726SMichal Hocko
1743700c2a46SKonstantin Khlebnikov if (!get_page_unless_zero(page))
17440c0e6195SKAMEZAWA Hiroyuki continue;
17450c0e6195SKAMEZAWA Hiroyuki /*
17460efadf48SYisheng Xie * We can skip free pages. And we can deal with pages on
17470efadf48SYisheng Xie * LRU and non-lru movable pages.
17480c0e6195SKAMEZAWA Hiroyuki */
1749cd775580SBaolin Wang if (PageLRU(page))
1750f7f9c00dSBaolin Wang isolated = isolate_lru_page(page);
1751cd775580SBaolin Wang else
1752cd775580SBaolin Wang isolated = isolate_movable_page(page, ISOLATE_UNEVICTABLE);
1753cd775580SBaolin Wang if (isolated) {
175462695a84SNick Piggin list_add_tail(&page->lru, &source);
17550efadf48SYisheng Xie if (!__PageMovable(page))
1756599d0c95SMel Gorman inc_node_page_state(page, NR_ISOLATED_ANON +
17579de4f22aSHuang Ying page_is_file_lru(page));
17586d9c285aSKOSAKI Motohiro
17590c0e6195SKAMEZAWA Hiroyuki } else {
1760786dee86SLiam Mark if (__ratelimit(&migrate_rs)) {
17612932c8b0SMichal Hocko pr_warn("failed to isolate pfn %lx\n", pfn);
17620efadf48SYisheng Xie dump_page(page, "isolation failed");
17631723058eSOscar Salvador }
1764786dee86SLiam Mark }
1765700c2a46SKonstantin Khlebnikov put_page(page);
17660c0e6195SKAMEZAWA Hiroyuki }
1767f3ab2636SBob Liu if (!list_empty(&source)) {
1768203e6e5cSJoonsoo Kim nodemask_t nmask = node_states[N_MEMORY];
1769203e6e5cSJoonsoo Kim struct migration_target_control mtc = {
1770203e6e5cSJoonsoo Kim .nmask = &nmask,
1771203e6e5cSJoonsoo Kim .gfp_mask = GFP_USER | __GFP_MOVABLE | __GFP_RETRY_MAYFAIL,
1772203e6e5cSJoonsoo Kim };
177332cf666eSSeongJae Park int ret;
1774203e6e5cSJoonsoo Kim
1775203e6e5cSJoonsoo Kim /*
1776203e6e5cSJoonsoo Kim * We have checked that migration range is on a single zone so
1777203e6e5cSJoonsoo Kim * we can use the nid of the first page to all the others.
1778203e6e5cSJoonsoo Kim */
1779203e6e5cSJoonsoo Kim mtc.nid = page_to_nid(list_first_entry(&source, struct page, lru));
1780203e6e5cSJoonsoo Kim
1781203e6e5cSJoonsoo Kim /*
1782203e6e5cSJoonsoo Kim * try to allocate from a different node but reuse this node
1783203e6e5cSJoonsoo Kim * if there are no other online nodes to be used (e.g. we are
1784203e6e5cSJoonsoo Kim * offlining a part of the only existing node)
1785203e6e5cSJoonsoo Kim */
1786203e6e5cSJoonsoo Kim node_clear(mtc.nid, nmask);
1787203e6e5cSJoonsoo Kim if (nodes_empty(nmask))
1788203e6e5cSJoonsoo Kim node_set(mtc.nid, nmask);
1789203e6e5cSJoonsoo Kim ret = migrate_pages(&source, alloc_migration_target, NULL,
17905ac95884SYang Shi (unsigned long)&mtc, MIGRATE_SYNC, MR_MEMORY_HOTPLUG, NULL);
17912932c8b0SMichal Hocko if (ret) {
17922932c8b0SMichal Hocko list_for_each_entry(page, &source, lru) {
1793786dee86SLiam Mark if (__ratelimit(&migrate_rs)) {
1794786dee86SLiam Mark pr_warn("migrating pfn %lx failed ret:%d\n",
17952932c8b0SMichal Hocko page_to_pfn(page), ret);
17962932c8b0SMichal Hocko dump_page(page, "migration failure");
17972932c8b0SMichal Hocko }
1798786dee86SLiam Mark }
1799c8721bbbSNaoya Horiguchi putback_movable_pages(&source);
1800f3ab2636SBob Liu }
18012932c8b0SMichal Hocko }
18020c0e6195SKAMEZAWA Hiroyuki }
18030c0e6195SKAMEZAWA Hiroyuki
cmdline_parse_movable_node(char * p)1804c5320926STang Chen static int __init cmdline_parse_movable_node(char *p)
1805c5320926STang Chen {
180655ac590cSTang Chen movable_node_enabled = true;
1807c5320926STang Chen return 0;
1808c5320926STang Chen }
1809c5320926STang Chen early_param("movable_node", cmdline_parse_movable_node);
1810c5320926STang Chen
1811d9713679SLai Jiangshan /* check which state of node_states will be changed when offline memory */
node_states_check_changes_offline(unsigned long nr_pages,struct zone * zone,struct memory_notify * arg)1812d9713679SLai Jiangshan static void node_states_check_changes_offline(unsigned long nr_pages,
1813d9713679SLai Jiangshan struct zone *zone, struct memory_notify *arg)
1814d9713679SLai Jiangshan {
1815d9713679SLai Jiangshan struct pglist_data *pgdat = zone->zone_pgdat;
1816d9713679SLai Jiangshan unsigned long present_pages = 0;
181786b27beaSOscar Salvador enum zone_type zt;
1818d9713679SLai Jiangshan
181998fa15f3SAnshuman Khandual arg->status_change_nid = NUMA_NO_NODE;
182098fa15f3SAnshuman Khandual arg->status_change_nid_normal = NUMA_NO_NODE;
182186b27beaSOscar Salvador
182286b27beaSOscar Salvador /*
182386b27beaSOscar Salvador * Check whether node_states[N_NORMAL_MEMORY] will be changed.
182486b27beaSOscar Salvador * If the memory to be offline is within the range
182586b27beaSOscar Salvador * [0..ZONE_NORMAL], and it is the last present memory there,
182686b27beaSOscar Salvador * the zones in that range will become empty after the offlining,
182786b27beaSOscar Salvador * thus we can determine that we need to clear the node from
182886b27beaSOscar Salvador * node_states[N_NORMAL_MEMORY].
182986b27beaSOscar Salvador */
183086b27beaSOscar Salvador for (zt = 0; zt <= ZONE_NORMAL; zt++)
183186b27beaSOscar Salvador present_pages += pgdat->node_zones[zt].present_pages;
183286b27beaSOscar Salvador if (zone_idx(zone) <= ZONE_NORMAL && nr_pages >= present_pages)
183386b27beaSOscar Salvador arg->status_change_nid_normal = zone_to_nid(zone);
1834d9713679SLai Jiangshan
18356715ddf9SLai Jiangshan /*
18366b740c6cSDavid Hildenbrand * We have accounted the pages from [0..ZONE_NORMAL); ZONE_HIGHMEM
18376b740c6cSDavid Hildenbrand * does not apply as we don't support 32bit.
183886b27beaSOscar Salvador * Here we count the possible pages from ZONE_MOVABLE.
183986b27beaSOscar Salvador * If after having accounted all the pages, we see that the nr_pages
184086b27beaSOscar Salvador * to be offlined is over or equal to the accounted pages,
184186b27beaSOscar Salvador * we know that the node will become empty, and so, we can clear
184286b27beaSOscar Salvador * it for N_MEMORY as well.
1843d9713679SLai Jiangshan */
184486b27beaSOscar Salvador present_pages += pgdat->node_zones[ZONE_MOVABLE].present_pages;
1845d9713679SLai Jiangshan
1846d9713679SLai Jiangshan if (nr_pages >= present_pages)
1847d9713679SLai Jiangshan arg->status_change_nid = zone_to_nid(zone);
1848d9713679SLai Jiangshan }
1849d9713679SLai Jiangshan
node_states_clear_node(int node,struct memory_notify * arg)1850d9713679SLai Jiangshan static void node_states_clear_node(int node, struct memory_notify *arg)
1851d9713679SLai Jiangshan {
1852d9713679SLai Jiangshan if (arg->status_change_nid_normal >= 0)
1853d9713679SLai Jiangshan node_clear_state(node, N_NORMAL_MEMORY);
1854d9713679SLai Jiangshan
1855cf01f6f5SOscar Salvador if (arg->status_change_nid >= 0)
18566715ddf9SLai Jiangshan node_clear_state(node, N_MEMORY);
1857d9713679SLai Jiangshan }
1858d9713679SLai Jiangshan
count_system_ram_pages_cb(unsigned long start_pfn,unsigned long nr_pages,void * data)1859c5e79ef5SDavid Hildenbrand static int count_system_ram_pages_cb(unsigned long start_pfn,
1860c5e79ef5SDavid Hildenbrand unsigned long nr_pages, void *data)
1861c5e79ef5SDavid Hildenbrand {
1862c5e79ef5SDavid Hildenbrand unsigned long *nr_system_ram_pages = data;
1863c5e79ef5SDavid Hildenbrand
1864c5e79ef5SDavid Hildenbrand *nr_system_ram_pages += nr_pages;
1865c5e79ef5SDavid Hildenbrand return 0;
1866c5e79ef5SDavid Hildenbrand }
1867c5e79ef5SDavid Hildenbrand
1868e0270ffaSSumanth Korikkar /*
1869e0270ffaSSumanth Korikkar * Must be called with mem_hotplug_lock in write mode.
1870e0270ffaSSumanth Korikkar */
offline_pages(unsigned long start_pfn,unsigned long nr_pages,struct zone * zone,struct memory_group * group)1871836809ecSDavid Hildenbrand int __ref offline_pages(unsigned long start_pfn, unsigned long nr_pages,
1872395f6081SDavid Hildenbrand struct zone *zone, struct memory_group *group)
18730c0e6195SKAMEZAWA Hiroyuki {
187473a11c96SDavid Hildenbrand const unsigned long end_pfn = start_pfn + nr_pages;
18750a1a9a00SDavid Hildenbrand unsigned long pfn, system_ram_pages = 0;
1876395f6081SDavid Hildenbrand const int node = zone_to_nid(zone);
1877d702909fSCody P Schafer unsigned long flags;
18787b78d335SYasunori Goto struct memory_notify arg;
187979605093SMichal Hocko char *reason;
1880395f6081SDavid Hildenbrand int ret;
18810c0e6195SKAMEZAWA Hiroyuki
1882dd8e2f23SOscar Salvador /*
1883dd8e2f23SOscar Salvador * {on,off}lining is constrained to full memory sections (or more
1884041711ceSZhen Lei * precisely to memory blocks from the user space POV).
1885dd8e2f23SOscar Salvador * memmap_on_memory is an exception because it reserves initial part
1886dd8e2f23SOscar Salvador * of the physical memory space for vmemmaps. That space is pageblock
1887dd8e2f23SOscar Salvador * aligned.
1888dd8e2f23SOscar Salvador */
1889ee0913c4SKefeng Wang if (WARN_ON_ONCE(!nr_pages || !pageblock_aligned(start_pfn) ||
1890dd8e2f23SOscar Salvador !IS_ALIGNED(start_pfn + nr_pages, PAGES_PER_SECTION)))
18914986fac1SDavid Hildenbrand return -EINVAL;
18924986fac1SDavid Hildenbrand
1893c5e79ef5SDavid Hildenbrand /*
1894c5e79ef5SDavid Hildenbrand * Don't allow to offline memory blocks that contain holes.
1895c5e79ef5SDavid Hildenbrand * Consequently, memory blocks with holes can never get onlined
1896c5e79ef5SDavid Hildenbrand * via the hotplug path - online_pages() - as hotplugged memory has
1897c5e79ef5SDavid Hildenbrand * no holes. This way, we e.g., don't have to worry about marking
1898c5e79ef5SDavid Hildenbrand * memory holes PG_reserved, don't need pfn_valid() checks, and can
1899c5e79ef5SDavid Hildenbrand * avoid using walk_system_ram_range() later.
1900c5e79ef5SDavid Hildenbrand */
190173a11c96SDavid Hildenbrand walk_system_ram_range(start_pfn, nr_pages, &system_ram_pages,
1902c5e79ef5SDavid Hildenbrand count_system_ram_pages_cb);
190373a11c96SDavid Hildenbrand if (system_ram_pages != nr_pages) {
1904c5e79ef5SDavid Hildenbrand ret = -EINVAL;
1905c5e79ef5SDavid Hildenbrand reason = "memory holes";
1906c5e79ef5SDavid Hildenbrand goto failed_removal;
1907c5e79ef5SDavid Hildenbrand }
1908c5e79ef5SDavid Hildenbrand
1909395f6081SDavid Hildenbrand /*
1910395f6081SDavid Hildenbrand * We only support offlining of memory blocks managed by a single zone,
1911395f6081SDavid Hildenbrand * checked by calling code. This is just a sanity check that we might
1912395f6081SDavid Hildenbrand * want to remove in the future.
1913395f6081SDavid Hildenbrand */
1914395f6081SDavid Hildenbrand if (WARN_ON_ONCE(page_zone(pfn_to_page(start_pfn)) != zone ||
1915395f6081SDavid Hildenbrand page_zone(pfn_to_page(end_pfn - 1)) != zone)) {
191679605093SMichal Hocko ret = -EINVAL;
191779605093SMichal Hocko reason = "multizone range";
191879605093SMichal Hocko goto failed_removal;
1919381eab4aSDavid Hildenbrand }
19207b78d335SYasunori Goto
1921ec6e8c7eSVlastimil Babka /*
1922ec6e8c7eSVlastimil Babka * Disable pcplists so that page isolation cannot race with freeing
1923ec6e8c7eSVlastimil Babka * in a way that pages from isolated pageblock are left on pcplists.
1924ec6e8c7eSVlastimil Babka */
1925ec6e8c7eSVlastimil Babka zone_pcp_disable(zone);
1926d479960eSMinchan Kim lru_cache_disable();
1927ec6e8c7eSVlastimil Babka
19280c0e6195SKAMEZAWA Hiroyuki /* set above range as isolated */
1929b023f468SWen Congyang ret = start_isolate_page_range(start_pfn, end_pfn,
1930d381c547SMichal Hocko MIGRATE_MOVABLE,
1931b2c9e2fbSZi Yan MEMORY_OFFLINE | REPORT_FAILURE,
1932b2c9e2fbSZi Yan GFP_USER | __GFP_MOVABLE | __GFP_RETRY_MAYFAIL);
19333fa0c7c7SDavid Hildenbrand if (ret) {
193479605093SMichal Hocko reason = "failure to isolate range";
1935ec6e8c7eSVlastimil Babka goto failed_removal_pcplists_disabled;
1936381eab4aSDavid Hildenbrand }
19377b78d335SYasunori Goto
19387b78d335SYasunori Goto arg.start_pfn = start_pfn;
19397b78d335SYasunori Goto arg.nr_pages = nr_pages;
1940d9713679SLai Jiangshan node_states_check_changes_offline(nr_pages, zone, &arg);
19417b78d335SYasunori Goto
19427b78d335SYasunori Goto ret = memory_notify(MEM_GOING_OFFLINE, &arg);
19437b78d335SYasunori Goto ret = notifier_to_errno(ret);
194479605093SMichal Hocko if (ret) {
194579605093SMichal Hocko reason = "notifier failure";
194679605093SMichal Hocko goto failed_removal_isolated;
194779605093SMichal Hocko }
19487b78d335SYasunori Goto
1949bb8965bdSMichal Hocko do {
1950aa218795SDavid Hildenbrand pfn = start_pfn;
1951aa218795SDavid Hildenbrand do {
1952de7cb03dSDavid Hildenbrand /*
1953de7cb03dSDavid Hildenbrand * Historically we always checked for any signal and
1954de7cb03dSDavid Hildenbrand * can't limit it to fatal signals without eventually
1955de7cb03dSDavid Hildenbrand * breaking user space.
1956de7cb03dSDavid Hildenbrand */
195779605093SMichal Hocko if (signal_pending(current)) {
1958bb8965bdSMichal Hocko ret = -EINTR;
195979605093SMichal Hocko reason = "signal backoff";
196079605093SMichal Hocko goto failed_removal_isolated;
196179605093SMichal Hocko }
196272b39cfcSMichal Hocko
19630c0e6195SKAMEZAWA Hiroyuki cond_resched();
19640c0e6195SKAMEZAWA Hiroyuki
1965aa218795SDavid Hildenbrand ret = scan_movable_pages(pfn, end_pfn, &pfn);
1966aa218795SDavid Hildenbrand if (!ret) {
1967bb8965bdSMichal Hocko /*
1968bb8965bdSMichal Hocko * TODO: fatal migration failures should bail
1969bb8965bdSMichal Hocko * out
1970bb8965bdSMichal Hocko */
1971bb8965bdSMichal Hocko do_migrate_range(pfn, end_pfn);
1972bb8965bdSMichal Hocko }
1973aa218795SDavid Hildenbrand } while (!ret);
1974aa218795SDavid Hildenbrand
1975aa218795SDavid Hildenbrand if (ret != -ENOENT) {
1976aa218795SDavid Hildenbrand reason = "unmovable page";
1977aa218795SDavid Hildenbrand goto failed_removal_isolated;
19780c0e6195SKAMEZAWA Hiroyuki }
197972b39cfcSMichal Hocko
1980c8721bbbSNaoya Horiguchi /*
1981bb8965bdSMichal Hocko * Dissolve free hugepages in the memory block before doing
1982bb8965bdSMichal Hocko * offlining actually in order to make hugetlbfs's object
1983bb8965bdSMichal Hocko * counting consistent.
1984c8721bbbSNaoya Horiguchi */
1985082d5b6bSGerald Schaefer ret = dissolve_free_huge_pages(start_pfn, end_pfn);
198679605093SMichal Hocko if (ret) {
198779605093SMichal Hocko reason = "failure to dissolve huge pages";
198879605093SMichal Hocko goto failed_removal_isolated;
198979605093SMichal Hocko }
19900a1a9a00SDavid Hildenbrand
19910a1a9a00SDavid Hildenbrand ret = test_pages_isolated(start_pfn, end_pfn, MEMORY_OFFLINE);
1992ec6e8c7eSVlastimil Babka
19935557c766SMichal Hocko } while (ret);
1994bb8965bdSMichal Hocko
19950a1a9a00SDavid Hildenbrand /* Mark all sections offline and remove free pages from the buddy. */
19960a1a9a00SDavid Hildenbrand __offline_isolated_pages(start_pfn, end_pfn);
19977c33023aSLaurent Dufour pr_debug("Offlined Pages %ld\n", nr_pages);
19980a1a9a00SDavid Hildenbrand
19999b7ea46aSQian Cai /*
2000b30c5927SDavid Hildenbrand * The memory sections are marked offline, and the pageblock flags
2001b30c5927SDavid Hildenbrand * effectively stale; nobody should be touching them. Fixup the number
2002b30c5927SDavid Hildenbrand * of isolated pageblocks, memory onlining will properly revert this.
20039b7ea46aSQian Cai */
20049b7ea46aSQian Cai spin_lock_irqsave(&zone->lock, flags);
2005ea15153cSDavid Hildenbrand zone->nr_isolate_pageblock -= nr_pages / pageblock_nr_pages;
20069b7ea46aSQian Cai spin_unlock_irqrestore(&zone->lock, flags);
20079b7ea46aSQian Cai
2008d479960eSMinchan Kim lru_cache_enable();
2009ec6e8c7eSVlastimil Babka zone_pcp_enable(zone);
2010ec6e8c7eSVlastimil Babka
20110c0e6195SKAMEZAWA Hiroyuki /* removal success */
20120a1a9a00SDavid Hildenbrand adjust_managed_page_count(pfn_to_page(start_pfn), -nr_pages);
2013836809ecSDavid Hildenbrand adjust_present_page_count(pfn_to_page(start_pfn), group, -nr_pages);
20147b78d335SYasunori Goto
2015b92ca18eSMel Gorman /* reinitialise watermarks and update pcp limits */
20161b79acc9SKOSAKI Motohiro init_per_zone_wmark_min();
20171b79acc9SKOSAKI Motohiro
20181e8537baSXishi Qiu if (!populated_zone(zone)) {
2019340175b7SJiang Liu zone_pcp_reset(zone);
202072675e13SMichal Hocko build_all_zonelists(NULL);
2021b92ca18eSMel Gorman }
2022340175b7SJiang Liu
2023d9713679SLai Jiangshan node_states_clear_node(node, &arg);
2024698b1b30SVlastimil Babka if (arg.status_change_nid >= 0) {
2025698b1b30SVlastimil Babka kcompactd_stop(node);
2026b4a0215eSKefeng Wang kswapd_stop(node);
2027698b1b30SVlastimil Babka }
2028bce7394aSMinchan Kim
20290c0e6195SKAMEZAWA Hiroyuki writeback_set_ratelimit();
20307b78d335SYasunori Goto
20317b78d335SYasunori Goto memory_notify(MEM_OFFLINE, &arg);
2032feee6b29SDavid Hildenbrand remove_pfn_range_from_zone(zone, start_pfn, nr_pages);
20330c0e6195SKAMEZAWA Hiroyuki return 0;
20340c0e6195SKAMEZAWA Hiroyuki
203579605093SMichal Hocko failed_removal_isolated:
203636ba30bcSMiaohe Lin /* pushback to free area */
203779605093SMichal Hocko undo_isolate_page_range(start_pfn, end_pfn, MIGRATE_MOVABLE);
2038c4efe484SQian Cai memory_notify(MEM_CANCEL_OFFLINE, &arg);
2039ec6e8c7eSVlastimil Babka failed_removal_pcplists_disabled:
2040946746d1SMiaohe Lin lru_cache_enable();
2041ec6e8c7eSVlastimil Babka zone_pcp_enable(zone);
20420c0e6195SKAMEZAWA Hiroyuki failed_removal:
204379605093SMichal Hocko pr_debug("memory offlining [mem %#010llx-%#010llx] failed due to %s\n",
2044a62e2f4fSBjorn Helgaas (unsigned long long) start_pfn << PAGE_SHIFT,
204579605093SMichal Hocko ((unsigned long long) end_pfn << PAGE_SHIFT) - 1,
204679605093SMichal Hocko reason);
20470c0e6195SKAMEZAWA Hiroyuki return ret;
20480c0e6195SKAMEZAWA Hiroyuki }
204971088785SBadari Pulavarty
check_memblock_offlined_cb(struct memory_block * mem,void * arg)2050d6de9d53SXishi Qiu static int check_memblock_offlined_cb(struct memory_block *mem, void *arg)
2051bbc76be6SWen Congyang {
2052e1c158e4SDavid Hildenbrand int *nid = arg;
2053bbc76be6SWen Congyang
2054e1c158e4SDavid Hildenbrand *nid = mem->nid;
2055639118d1SKefeng Wang if (unlikely(mem->state != MEM_OFFLINE)) {
2056349daa0fSRandy Dunlap phys_addr_t beginpa, endpa;
2057349daa0fSRandy Dunlap
2058349daa0fSRandy Dunlap beginpa = PFN_PHYS(section_nr_to_pfn(mem->start_section_nr));
2059b6c88d3bSDavid Hildenbrand endpa = beginpa + memory_block_size_bytes() - 1;
2060756a025fSJoe Perches pr_warn("removing memory fails, because memory [%pa-%pa] is onlined\n",
2061349daa0fSRandy Dunlap &beginpa, &endpa);
2062bbc76be6SWen Congyang
2063eca499abSPavel Tatashin return -EBUSY;
2064eca499abSPavel Tatashin }
2065eca499abSPavel Tatashin return 0;
2066bbc76be6SWen Congyang }
2067bbc76be6SWen Congyang
test_has_altmap_cb(struct memory_block * mem,void * arg)20681a8c64e1SAneesh Kumar K.V static int test_has_altmap_cb(struct memory_block *mem, void *arg)
2069a08a2ae3SOscar Salvador {
20701a8c64e1SAneesh Kumar K.V struct memory_block **mem_ptr = (struct memory_block **)arg;
2071a08a2ae3SOscar Salvador /*
20721a8c64e1SAneesh Kumar K.V * return the memblock if we have altmap
20731a8c64e1SAneesh Kumar K.V * and break callback.
2074a08a2ae3SOscar Salvador */
20751a8c64e1SAneesh Kumar K.V if (mem->altmap) {
20761a8c64e1SAneesh Kumar K.V *mem_ptr = mem;
20771a8c64e1SAneesh Kumar K.V return 1;
20781a8c64e1SAneesh Kumar K.V }
20791a8c64e1SAneesh Kumar K.V return 0;
2080a08a2ae3SOscar Salvador }
2081a08a2ae3SOscar Salvador
check_cpu_on_node(int nid)2082b27340a5SMiaohe Lin static int check_cpu_on_node(int nid)
208360a5a19eSTang Chen {
208460a5a19eSTang Chen int cpu;
208560a5a19eSTang Chen
208660a5a19eSTang Chen for_each_present_cpu(cpu) {
2087b27340a5SMiaohe Lin if (cpu_to_node(cpu) == nid)
208860a5a19eSTang Chen /*
208960a5a19eSTang Chen * the cpu on this node isn't removed, and we can't
209060a5a19eSTang Chen * offline this node.
209160a5a19eSTang Chen */
209260a5a19eSTang Chen return -EBUSY;
209360a5a19eSTang Chen }
209460a5a19eSTang Chen
209560a5a19eSTang Chen return 0;
209660a5a19eSTang Chen }
209760a5a19eSTang Chen
check_no_memblock_for_node_cb(struct memory_block * mem,void * arg)20982c91f8fcSDavid Hildenbrand static int check_no_memblock_for_node_cb(struct memory_block *mem, void *arg)
20992c91f8fcSDavid Hildenbrand {
21002c91f8fcSDavid Hildenbrand int nid = *(int *)arg;
21012c91f8fcSDavid Hildenbrand
21022c91f8fcSDavid Hildenbrand /*
21032c91f8fcSDavid Hildenbrand * If a memory block belongs to multiple nodes, the stored nid is not
21042c91f8fcSDavid Hildenbrand * reliable. However, such blocks are always online (e.g., cannot get
21052c91f8fcSDavid Hildenbrand * offlined) and, therefore, are still spanned by the node.
21062c91f8fcSDavid Hildenbrand */
21072c91f8fcSDavid Hildenbrand return mem->nid == nid ? -EEXIST : 0;
21082c91f8fcSDavid Hildenbrand }
21092c91f8fcSDavid Hildenbrand
21100f1cfe9dSToshi Kani /**
21110f1cfe9dSToshi Kani * try_offline_node
2112e8b098fcSMike Rapoport * @nid: the node ID
21130f1cfe9dSToshi Kani *
21140f1cfe9dSToshi Kani * Offline a node if all memory sections and cpus of the node are removed.
21150f1cfe9dSToshi Kani *
21160f1cfe9dSToshi Kani * NOTE: The caller must call lock_device_hotplug() to serialize hotplug
21170f1cfe9dSToshi Kani * and online/offline operations before this call.
21180f1cfe9dSToshi Kani */
try_offline_node(int nid)211990b30cdcSWen Congyang void try_offline_node(int nid)
212060a5a19eSTang Chen {
21212c91f8fcSDavid Hildenbrand int rc;
212260a5a19eSTang Chen
212360a5a19eSTang Chen /*
21242c91f8fcSDavid Hildenbrand * If the node still spans pages (especially ZONE_DEVICE), don't
21252c91f8fcSDavid Hildenbrand * offline it. A node spans memory after move_pfn_range_to_zone(),
21262c91f8fcSDavid Hildenbrand * e.g., after the memory block was onlined.
212760a5a19eSTang Chen */
2128b27340a5SMiaohe Lin if (node_spanned_pages(nid))
212960a5a19eSTang Chen return;
21302c91f8fcSDavid Hildenbrand
21312c91f8fcSDavid Hildenbrand /*
21322c91f8fcSDavid Hildenbrand * Especially offline memory blocks might not be spanned by the
21332c91f8fcSDavid Hildenbrand * node. They will get spanned by the node once they get onlined.
21342c91f8fcSDavid Hildenbrand * However, they link to the node in sysfs and can get onlined later.
21352c91f8fcSDavid Hildenbrand */
21362c91f8fcSDavid Hildenbrand rc = for_each_memory_block(&nid, check_no_memblock_for_node_cb);
21372c91f8fcSDavid Hildenbrand if (rc)
21382c91f8fcSDavid Hildenbrand return;
213960a5a19eSTang Chen
2140b27340a5SMiaohe Lin if (check_cpu_on_node(nid))
214160a5a19eSTang Chen return;
214260a5a19eSTang Chen
214360a5a19eSTang Chen /*
214460a5a19eSTang Chen * all memory/cpu of this node are removed, we can offline this
214560a5a19eSTang Chen * node now.
214660a5a19eSTang Chen */
214760a5a19eSTang Chen node_set_offline(nid);
214860a5a19eSTang Chen unregister_one_node(nid);
214960a5a19eSTang Chen }
215090b30cdcSWen Congyang EXPORT_SYMBOL(try_offline_node);
215160a5a19eSTang Chen
try_remove_memory(u64 start,u64 size)2152e1c158e4SDavid Hildenbrand static int __ref try_remove_memory(u64 start, u64 size)
2153bbc76be6SWen Congyang {
21541a8c64e1SAneesh Kumar K.V struct memory_block *mem;
2155e1c158e4SDavid Hildenbrand int rc = 0, nid = NUMA_NO_NODE;
21561a8c64e1SAneesh Kumar K.V struct vmem_altmap *altmap = NULL;
2157993c1aadSWen Congyang
215827356f54SToshi Kani BUG_ON(check_hotplug_memory_range(start, size));
215927356f54SToshi Kani
21606677e3eaSYasuaki Ishimatsu /*
2161242831ebSRafael J. Wysocki * All memory blocks must be offlined before removing memory. Check
2162eca499abSPavel Tatashin * whether all memory blocks in question are offline and return error
2163242831ebSRafael J. Wysocki * if this is not the case.
2164e1c158e4SDavid Hildenbrand *
2165e1c158e4SDavid Hildenbrand * While at it, determine the nid. Note that if we'd have mixed nodes,
2166e1c158e4SDavid Hildenbrand * we'd only try to offline the last determined one -- which is good
2167e1c158e4SDavid Hildenbrand * enough for the cases we care about.
21686677e3eaSYasuaki Ishimatsu */
2169e1c158e4SDavid Hildenbrand rc = walk_memory_blocks(start, size, &nid, check_memblock_offlined_cb);
2170eca499abSPavel Tatashin if (rc)
2171b4223a51SJia He return rc;
21726677e3eaSYasuaki Ishimatsu
2173a08a2ae3SOscar Salvador /*
2174a08a2ae3SOscar Salvador * We only support removing memory added with MHP_MEMMAP_ON_MEMORY in
2175a08a2ae3SOscar Salvador * the same granularity it was added - a single memory block.
2176a08a2ae3SOscar Salvador */
21776e02c46bSMuchun Song if (mhp_memmap_on_memory()) {
21781a8c64e1SAneesh Kumar K.V rc = walk_memory_blocks(start, size, &mem, test_has_altmap_cb);
21791a8c64e1SAneesh Kumar K.V if (rc) {
2180a08a2ae3SOscar Salvador if (size != memory_block_size_bytes()) {
2181a08a2ae3SOscar Salvador pr_warn("Refuse to remove %#llx - %#llx,"
2182a08a2ae3SOscar Salvador "wrong granularity\n",
2183a08a2ae3SOscar Salvador start, start + size);
2184a08a2ae3SOscar Salvador return -EINVAL;
2185a08a2ae3SOscar Salvador }
21861a8c64e1SAneesh Kumar K.V altmap = mem->altmap;
2187a08a2ae3SOscar Salvador /*
21881a8c64e1SAneesh Kumar K.V * Mark altmap NULL so that we can add a debug
21891a8c64e1SAneesh Kumar K.V * check on memblock free.
2190a08a2ae3SOscar Salvador */
21911a8c64e1SAneesh Kumar K.V mem->altmap = NULL;
2192a08a2ae3SOscar Salvador }
2193a08a2ae3SOscar Salvador }
2194a08a2ae3SOscar Salvador
219546c66c4bSYasuaki Ishimatsu /* remove memmap entry */
219646c66c4bSYasuaki Ishimatsu firmware_map_remove(start, start + size, "System RAM");
219746c66c4bSYasuaki Ishimatsu
2198f1037ec0SDan Williams /*
2199f1037ec0SDan Williams * Memory block device removal under the device_hotplug_lock is
2200f1037ec0SDan Williams * a barrier against racing online attempts.
2201f1037ec0SDan Williams */
22024c4b7f9bSDavid Hildenbrand remove_memory_block_devices(start, size);
22034c4b7f9bSDavid Hildenbrand
2204f1037ec0SDan Williams mem_hotplug_begin();
2205f1037ec0SDan Williams
220665a2aa5fSDavid Hildenbrand arch_remove_memory(start, size, altmap);
220752219aeaSDavid Hildenbrand
22081a8c64e1SAneesh Kumar K.V /* Verify that all vmemmap pages have actually been freed. */
22091a8c64e1SAneesh Kumar K.V if (altmap) {
22101a8c64e1SAneesh Kumar K.V WARN(altmap->alloc, "Altmap not fully unmapped");
22111a8c64e1SAneesh Kumar K.V kfree(altmap);
22121a8c64e1SAneesh Kumar K.V }
22131a8c64e1SAneesh Kumar K.V
221452219aeaSDavid Hildenbrand if (IS_ENABLED(CONFIG_ARCH_KEEP_MEMBLOCK)) {
22153ecc6834SMike Rapoport memblock_phys_free(start, size);
221632d1fe8fSAnshuman Khandual memblock_remove(start, size);
221752219aeaSDavid Hildenbrand }
221852219aeaSDavid Hildenbrand
2219cb8e3c8bSDavid Hildenbrand release_mem_region_adjustable(start, size);
222024d335caSWen Congyang
2221e1c158e4SDavid Hildenbrand if (nid != NUMA_NO_NODE)
222260a5a19eSTang Chen try_offline_node(nid);
222360a5a19eSTang Chen
2224bfc8c901SVladimir Davydov mem_hotplug_done();
2225b4223a51SJia He return 0;
222671088785SBadari Pulavarty }
2227d15e5926SDavid Hildenbrand
2228eca499abSPavel Tatashin /**
22295640c9caSMel Gorman * __remove_memory - Remove memory if every memory block is offline
2230eca499abSPavel Tatashin * @start: physical address of the region to remove
2231eca499abSPavel Tatashin * @size: size of the region to remove
2232eca499abSPavel Tatashin *
2233eca499abSPavel Tatashin * NOTE: The caller must call lock_device_hotplug() to serialize hotplug
2234eca499abSPavel Tatashin * and online/offline operations before this call, as required by
2235eca499abSPavel Tatashin * try_offline_node().
2236eca499abSPavel Tatashin */
__remove_memory(u64 start,u64 size)2237e1c158e4SDavid Hildenbrand void __remove_memory(u64 start, u64 size)
2238d15e5926SDavid Hildenbrand {
2239eca499abSPavel Tatashin
2240eca499abSPavel Tatashin /*
224129a90db9SSouptick Joarder * trigger BUG() if some memory is not offlined prior to calling this
2242eca499abSPavel Tatashin * function
2243eca499abSPavel Tatashin */
2244e1c158e4SDavid Hildenbrand if (try_remove_memory(start, size))
2245eca499abSPavel Tatashin BUG();
2246eca499abSPavel Tatashin }
2247eca499abSPavel Tatashin
2248eca499abSPavel Tatashin /*
2249eca499abSPavel Tatashin * Remove memory if every memory block is offline, otherwise return -EBUSY is
2250eca499abSPavel Tatashin * some memory is not offline
2251eca499abSPavel Tatashin */
remove_memory(u64 start,u64 size)2252e1c158e4SDavid Hildenbrand int remove_memory(u64 start, u64 size)
2253eca499abSPavel Tatashin {
2254eca499abSPavel Tatashin int rc;
2255eca499abSPavel Tatashin
2256d15e5926SDavid Hildenbrand lock_device_hotplug();
2257e1c158e4SDavid Hildenbrand rc = try_remove_memory(start, size);
2258d15e5926SDavid Hildenbrand unlock_device_hotplug();
2259eca499abSPavel Tatashin
2260eca499abSPavel Tatashin return rc;
2261d15e5926SDavid Hildenbrand }
226271088785SBadari Pulavarty EXPORT_SYMBOL_GPL(remove_memory);
226308b3acd7SDavid Hildenbrand
try_offline_memory_block(struct memory_block * mem,void * arg)22648dc4bb58SDavid Hildenbrand static int try_offline_memory_block(struct memory_block *mem, void *arg)
22658dc4bb58SDavid Hildenbrand {
22668dc4bb58SDavid Hildenbrand uint8_t online_type = MMOP_ONLINE_KERNEL;
22678dc4bb58SDavid Hildenbrand uint8_t **online_types = arg;
22688dc4bb58SDavid Hildenbrand struct page *page;
22698dc4bb58SDavid Hildenbrand int rc;
22708dc4bb58SDavid Hildenbrand
227108b3acd7SDavid Hildenbrand /*
22728dc4bb58SDavid Hildenbrand * Sense the online_type via the zone of the memory block. Offlining
22738dc4bb58SDavid Hildenbrand * with multiple zones within one memory block will be rejected
22748dc4bb58SDavid Hildenbrand * by offlining code ... so we don't care about that.
22758dc4bb58SDavid Hildenbrand */
22768dc4bb58SDavid Hildenbrand page = pfn_to_online_page(section_nr_to_pfn(mem->start_section_nr));
22778dc4bb58SDavid Hildenbrand if (page && zone_idx(page_zone(page)) == ZONE_MOVABLE)
22788dc4bb58SDavid Hildenbrand online_type = MMOP_ONLINE_MOVABLE;
22798dc4bb58SDavid Hildenbrand
22808dc4bb58SDavid Hildenbrand rc = device_offline(&mem->dev);
22818dc4bb58SDavid Hildenbrand /*
22828dc4bb58SDavid Hildenbrand * Default is MMOP_OFFLINE - change it only if offlining succeeded,
22838dc4bb58SDavid Hildenbrand * so try_reonline_memory_block() can do the right thing.
22848dc4bb58SDavid Hildenbrand */
22858dc4bb58SDavid Hildenbrand if (!rc)
22868dc4bb58SDavid Hildenbrand **online_types = online_type;
22878dc4bb58SDavid Hildenbrand
22888dc4bb58SDavid Hildenbrand (*online_types)++;
22898dc4bb58SDavid Hildenbrand /* Ignore if already offline. */
22908dc4bb58SDavid Hildenbrand return rc < 0 ? rc : 0;
22918dc4bb58SDavid Hildenbrand }
22928dc4bb58SDavid Hildenbrand
try_reonline_memory_block(struct memory_block * mem,void * arg)22938dc4bb58SDavid Hildenbrand static int try_reonline_memory_block(struct memory_block *mem, void *arg)
22948dc4bb58SDavid Hildenbrand {
22958dc4bb58SDavid Hildenbrand uint8_t **online_types = arg;
22968dc4bb58SDavid Hildenbrand int rc;
22978dc4bb58SDavid Hildenbrand
22988dc4bb58SDavid Hildenbrand if (**online_types != MMOP_OFFLINE) {
22998dc4bb58SDavid Hildenbrand mem->online_type = **online_types;
23008dc4bb58SDavid Hildenbrand rc = device_online(&mem->dev);
23018dc4bb58SDavid Hildenbrand if (rc < 0)
23028dc4bb58SDavid Hildenbrand pr_warn("%s: Failed to re-online memory: %d",
23038dc4bb58SDavid Hildenbrand __func__, rc);
23048dc4bb58SDavid Hildenbrand }
23058dc4bb58SDavid Hildenbrand
23068dc4bb58SDavid Hildenbrand /* Continue processing all remaining memory blocks. */
23078dc4bb58SDavid Hildenbrand (*online_types)++;
23088dc4bb58SDavid Hildenbrand return 0;
23098dc4bb58SDavid Hildenbrand }
23108dc4bb58SDavid Hildenbrand
23118dc4bb58SDavid Hildenbrand /*
23128dc4bb58SDavid Hildenbrand * Try to offline and remove memory. Might take a long time to finish in case
23138dc4bb58SDavid Hildenbrand * memory is still in use. Primarily useful for memory devices that logically
23148dc4bb58SDavid Hildenbrand * unplugged all memory (so it's no longer in use) and want to offline + remove
23158dc4bb58SDavid Hildenbrand * that memory.
231608b3acd7SDavid Hildenbrand */
offline_and_remove_memory(u64 start,u64 size)2317e1c158e4SDavid Hildenbrand int offline_and_remove_memory(u64 start, u64 size)
231808b3acd7SDavid Hildenbrand {
23198dc4bb58SDavid Hildenbrand const unsigned long mb_count = size / memory_block_size_bytes();
23208dc4bb58SDavid Hildenbrand uint8_t *online_types, *tmp;
23218dc4bb58SDavid Hildenbrand int rc;
232208b3acd7SDavid Hildenbrand
232308b3acd7SDavid Hildenbrand if (!IS_ALIGNED(start, memory_block_size_bytes()) ||
23248dc4bb58SDavid Hildenbrand !IS_ALIGNED(size, memory_block_size_bytes()) || !size)
23258dc4bb58SDavid Hildenbrand return -EINVAL;
232608b3acd7SDavid Hildenbrand
232708b3acd7SDavid Hildenbrand /*
23288dc4bb58SDavid Hildenbrand * We'll remember the old online type of each memory block, so we can
23298dc4bb58SDavid Hildenbrand * try to revert whatever we did when offlining one memory block fails
23308dc4bb58SDavid Hildenbrand * after offlining some others succeeded.
23318dc4bb58SDavid Hildenbrand */
23328dc4bb58SDavid Hildenbrand online_types = kmalloc_array(mb_count, sizeof(*online_types),
23338dc4bb58SDavid Hildenbrand GFP_KERNEL);
23348dc4bb58SDavid Hildenbrand if (!online_types)
23358dc4bb58SDavid Hildenbrand return -ENOMEM;
23368dc4bb58SDavid Hildenbrand /*
23378dc4bb58SDavid Hildenbrand * Initialize all states to MMOP_OFFLINE, so when we abort processing in
23388dc4bb58SDavid Hildenbrand * try_offline_memory_block(), we'll skip all unprocessed blocks in
23398dc4bb58SDavid Hildenbrand * try_reonline_memory_block().
23408dc4bb58SDavid Hildenbrand */
23418dc4bb58SDavid Hildenbrand memset(online_types, MMOP_OFFLINE, mb_count);
23428dc4bb58SDavid Hildenbrand
23438dc4bb58SDavid Hildenbrand lock_device_hotplug();
23448dc4bb58SDavid Hildenbrand
23458dc4bb58SDavid Hildenbrand tmp = online_types;
23468dc4bb58SDavid Hildenbrand rc = walk_memory_blocks(start, size, &tmp, try_offline_memory_block);
23478dc4bb58SDavid Hildenbrand
23488dc4bb58SDavid Hildenbrand /*
23498dc4bb58SDavid Hildenbrand * In case we succeeded to offline all memory, remove it.
235008b3acd7SDavid Hildenbrand * This cannot fail as it cannot get onlined in the meantime.
235108b3acd7SDavid Hildenbrand */
235208b3acd7SDavid Hildenbrand if (!rc) {
2353e1c158e4SDavid Hildenbrand rc = try_remove_memory(start, size);
23548dc4bb58SDavid Hildenbrand if (rc)
23558dc4bb58SDavid Hildenbrand pr_err("%s: Failed to remove memory: %d", __func__, rc);
23568dc4bb58SDavid Hildenbrand }
23578dc4bb58SDavid Hildenbrand
23588dc4bb58SDavid Hildenbrand /*
23598dc4bb58SDavid Hildenbrand * Rollback what we did. While memory onlining might theoretically fail
23608dc4bb58SDavid Hildenbrand * (nacked by a notifier), it barely ever happens.
23618dc4bb58SDavid Hildenbrand */
23628dc4bb58SDavid Hildenbrand if (rc) {
23638dc4bb58SDavid Hildenbrand tmp = online_types;
23648dc4bb58SDavid Hildenbrand walk_memory_blocks(start, size, &tmp,
23658dc4bb58SDavid Hildenbrand try_reonline_memory_block);
236608b3acd7SDavid Hildenbrand }
236708b3acd7SDavid Hildenbrand unlock_device_hotplug();
236808b3acd7SDavid Hildenbrand
23698dc4bb58SDavid Hildenbrand kfree(online_types);
237008b3acd7SDavid Hildenbrand return rc;
237108b3acd7SDavid Hildenbrand }
237208b3acd7SDavid Hildenbrand EXPORT_SYMBOL_GPL(offline_and_remove_memory);
2373aba6efc4SRafael J. Wysocki #endif /* CONFIG_MEMORY_HOTREMOVE */
2374