1457c8996SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only 23947be19SDave Hansen /* 33947be19SDave Hansen * linux/mm/memory_hotplug.c 43947be19SDave Hansen * 53947be19SDave Hansen * Copyright (C) 63947be19SDave Hansen */ 73947be19SDave Hansen 83947be19SDave Hansen #include <linux/stddef.h> 93947be19SDave Hansen #include <linux/mm.h> 10174cd4b1SIngo Molnar #include <linux/sched/signal.h> 113947be19SDave Hansen #include <linux/swap.h> 123947be19SDave Hansen #include <linux/interrupt.h> 133947be19SDave Hansen #include <linux/pagemap.h> 143947be19SDave Hansen #include <linux/compiler.h> 15b95f1b31SPaul Gortmaker #include <linux/export.h> 163947be19SDave Hansen #include <linux/pagevec.h> 172d1d43f6SChandra Seetharaman #include <linux/writeback.h> 183947be19SDave Hansen #include <linux/slab.h> 193947be19SDave Hansen #include <linux/sysctl.h> 203947be19SDave Hansen #include <linux/cpu.h> 213947be19SDave Hansen #include <linux/memory.h> 224b94ffdcSDan Williams #include <linux/memremap.h> 233947be19SDave Hansen #include <linux/memory_hotplug.h> 243947be19SDave Hansen #include <linux/highmem.h> 253947be19SDave Hansen #include <linux/vmalloc.h> 260a547039SKAMEZAWA Hiroyuki #include <linux/ioport.h> 270c0e6195SKAMEZAWA Hiroyuki #include <linux/delay.h> 280c0e6195SKAMEZAWA Hiroyuki #include <linux/migrate.h> 290c0e6195SKAMEZAWA Hiroyuki #include <linux/page-isolation.h> 3071088785SBadari Pulavarty #include <linux/pfn.h> 316ad696d2SAndi Kleen #include <linux/suspend.h> 326d9c285aSKOSAKI Motohiro #include <linux/mm_inline.h> 33d96ae530Sakpm@linux-foundation.org #include <linux/firmware-map.h> 3460a5a19eSTang Chen #include <linux/stop_machine.h> 35c8721bbbSNaoya Horiguchi #include <linux/hugetlb.h> 36c5320926STang Chen #include <linux/memblock.h> 37698b1b30SVlastimil Babka #include <linux/compaction.h> 38b15c8726SMichal Hocko #include <linux/rmap.h> 393947be19SDave Hansen 403947be19SDave Hansen #include <asm/tlbflush.h> 413947be19SDave Hansen 421e5ad9a3SAdrian Bunk #include "internal.h" 43e900a918SDan Williams #include "shuffle.h" 441e5ad9a3SAdrian Bunk 459d0ad8caSDaniel Kiper /* 469d0ad8caSDaniel Kiper * online_page_callback contains pointer to current page onlining function. 479d0ad8caSDaniel Kiper * Initially it is generic_online_page(). If it is required it could be 489d0ad8caSDaniel Kiper * changed by calling set_online_page_callback() for callback registration 499d0ad8caSDaniel Kiper * and restore_online_page_callback() for generic callback restore. 509d0ad8caSDaniel Kiper */ 519d0ad8caSDaniel Kiper 52a9cd410aSArun KS static void generic_online_page(struct page *page, unsigned int order); 539d0ad8caSDaniel Kiper 549d0ad8caSDaniel Kiper static online_page_callback_t online_page_callback = generic_online_page; 55bfc8c901SVladimir Davydov static DEFINE_MUTEX(online_page_callback_lock); 569d0ad8caSDaniel Kiper 573f906ba2SThomas Gleixner DEFINE_STATIC_PERCPU_RWSEM(mem_hotplug_lock); 5820d6c96bSKOSAKI Motohiro 593f906ba2SThomas Gleixner void get_online_mems(void) 603f906ba2SThomas Gleixner { 613f906ba2SThomas Gleixner percpu_down_read(&mem_hotplug_lock); 623f906ba2SThomas Gleixner } 63bfc8c901SVladimir Davydov 643f906ba2SThomas Gleixner void put_online_mems(void) 653f906ba2SThomas Gleixner { 663f906ba2SThomas Gleixner percpu_up_read(&mem_hotplug_lock); 673f906ba2SThomas Gleixner } 68bfc8c901SVladimir Davydov 694932381eSMichal Hocko bool movable_node_enabled = false; 704932381eSMichal Hocko 718604d9e5SVitaly Kuznetsov #ifndef CONFIG_MEMORY_HOTPLUG_DEFAULT_ONLINE 7231bc3858SVitaly Kuznetsov bool memhp_auto_online; 738604d9e5SVitaly Kuznetsov #else 748604d9e5SVitaly Kuznetsov bool memhp_auto_online = true; 758604d9e5SVitaly Kuznetsov #endif 7631bc3858SVitaly Kuznetsov EXPORT_SYMBOL_GPL(memhp_auto_online); 7731bc3858SVitaly Kuznetsov 7886dd995dSVitaly Kuznetsov static int __init setup_memhp_default_state(char *str) 7986dd995dSVitaly Kuznetsov { 8086dd995dSVitaly Kuznetsov if (!strcmp(str, "online")) 8186dd995dSVitaly Kuznetsov memhp_auto_online = true; 8286dd995dSVitaly Kuznetsov else if (!strcmp(str, "offline")) 8386dd995dSVitaly Kuznetsov memhp_auto_online = false; 8486dd995dSVitaly Kuznetsov 8586dd995dSVitaly Kuznetsov return 1; 8686dd995dSVitaly Kuznetsov } 8786dd995dSVitaly Kuznetsov __setup("memhp_default_state=", setup_memhp_default_state); 8886dd995dSVitaly Kuznetsov 8930467e0bSDavid Rientjes void mem_hotplug_begin(void) 90bfc8c901SVladimir Davydov { 913f906ba2SThomas Gleixner cpus_read_lock(); 923f906ba2SThomas Gleixner percpu_down_write(&mem_hotplug_lock); 93bfc8c901SVladimir Davydov } 94bfc8c901SVladimir Davydov 9530467e0bSDavid Rientjes void mem_hotplug_done(void) 96bfc8c901SVladimir Davydov { 973f906ba2SThomas Gleixner percpu_up_write(&mem_hotplug_lock); 983f906ba2SThomas Gleixner cpus_read_unlock(); 99bfc8c901SVladimir Davydov } 10020d6c96bSKOSAKI Motohiro 101357b4da5SJuergen Gross u64 max_mem_size = U64_MAX; 102357b4da5SJuergen Gross 10345e0b78bSKeith Mannthey /* add this memory to iomem resource */ 10445e0b78bSKeith Mannthey static struct resource *register_memory_resource(u64 start, u64 size) 10545e0b78bSKeith Mannthey { 1062794129eSDave Hansen struct resource *res; 1072794129eSDave Hansen unsigned long flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY; 1082794129eSDave Hansen char *resource_name = "System RAM"; 109357b4da5SJuergen Gross 110357b4da5SJuergen Gross if (start + size > max_mem_size) 111357b4da5SJuergen Gross return ERR_PTR(-E2BIG); 112357b4da5SJuergen Gross 1132794129eSDave Hansen /* 1142794129eSDave Hansen * Request ownership of the new memory range. This might be 1152794129eSDave Hansen * a child of an existing resource that was present but 1162794129eSDave Hansen * not marked as busy. 1172794129eSDave Hansen */ 1182794129eSDave Hansen res = __request_region(&iomem_resource, start, size, 1192794129eSDave Hansen resource_name, flags); 12045e0b78bSKeith Mannthey 1212794129eSDave Hansen if (!res) { 1222794129eSDave Hansen pr_debug("Unable to reserve System RAM region: %016llx->%016llx\n", 1232794129eSDave Hansen start, start + size); 1246f754ba4SVitaly Kuznetsov return ERR_PTR(-EEXIST); 12545e0b78bSKeith Mannthey } 12645e0b78bSKeith Mannthey return res; 12745e0b78bSKeith Mannthey } 12845e0b78bSKeith Mannthey 12945e0b78bSKeith Mannthey static void release_memory_resource(struct resource *res) 13045e0b78bSKeith Mannthey { 13145e0b78bSKeith Mannthey if (!res) 13245e0b78bSKeith Mannthey return; 13345e0b78bSKeith Mannthey release_resource(res); 13445e0b78bSKeith Mannthey kfree(res); 13545e0b78bSKeith Mannthey } 13645e0b78bSKeith Mannthey 13753947027SKeith Mannthey #ifdef CONFIG_MEMORY_HOTPLUG_SPARSE 13846723bfaSYasuaki Ishimatsu void get_page_bootmem(unsigned long info, struct page *page, 1395f24ce5fSAndrea Arcangeli unsigned long type) 14004753278SYasunori Goto { 141ddffe98dSYasuaki Ishimatsu page->freelist = (void *)type; 14204753278SYasunori Goto SetPagePrivate(page); 14304753278SYasunori Goto set_page_private(page, info); 144fe896d18SJoonsoo Kim page_ref_inc(page); 14504753278SYasunori Goto } 14604753278SYasunori Goto 147170a5a7eSJiang Liu void put_page_bootmem(struct page *page) 14804753278SYasunori Goto { 1495f24ce5fSAndrea Arcangeli unsigned long type; 15004753278SYasunori Goto 151ddffe98dSYasuaki Ishimatsu type = (unsigned long) page->freelist; 1525f24ce5fSAndrea Arcangeli BUG_ON(type < MEMORY_HOTPLUG_MIN_BOOTMEM_TYPE || 1535f24ce5fSAndrea Arcangeli type > MEMORY_HOTPLUG_MAX_BOOTMEM_TYPE); 15404753278SYasunori Goto 155fe896d18SJoonsoo Kim if (page_ref_dec_return(page) == 1) { 156ddffe98dSYasuaki Ishimatsu page->freelist = NULL; 15704753278SYasunori Goto ClearPagePrivate(page); 15804753278SYasunori Goto set_page_private(page, 0); 1595f24ce5fSAndrea Arcangeli INIT_LIST_HEAD(&page->lru); 160170a5a7eSJiang Liu free_reserved_page(page); 16104753278SYasunori Goto } 16204753278SYasunori Goto } 16304753278SYasunori Goto 16446723bfaSYasuaki Ishimatsu #ifdef CONFIG_HAVE_BOOTMEM_INFO_NODE 16546723bfaSYasuaki Ishimatsu #ifndef CONFIG_SPARSEMEM_VMEMMAP 166d92bc318SAdrian Bunk static void register_page_bootmem_info_section(unsigned long start_pfn) 16704753278SYasunori Goto { 168f1eca35aSDan Williams unsigned long mapsize, section_nr, i; 16904753278SYasunori Goto struct mem_section *ms; 17004753278SYasunori Goto struct page *page, *memmap; 171f1eca35aSDan Williams struct mem_section_usage *usage; 17204753278SYasunori Goto 17304753278SYasunori Goto section_nr = pfn_to_section_nr(start_pfn); 17404753278SYasunori Goto ms = __nr_to_section(section_nr); 17504753278SYasunori Goto 17604753278SYasunori Goto /* Get section's memmap address */ 17704753278SYasunori Goto memmap = sparse_decode_mem_map(ms->section_mem_map, section_nr); 17804753278SYasunori Goto 17904753278SYasunori Goto /* 18004753278SYasunori Goto * Get page for the memmap's phys address 18104753278SYasunori Goto * XXX: need more consideration for sparse_vmemmap... 18204753278SYasunori Goto */ 18304753278SYasunori Goto page = virt_to_page(memmap); 18404753278SYasunori Goto mapsize = sizeof(struct page) * PAGES_PER_SECTION; 18504753278SYasunori Goto mapsize = PAGE_ALIGN(mapsize) >> PAGE_SHIFT; 18604753278SYasunori Goto 18704753278SYasunori Goto /* remember memmap's page */ 18804753278SYasunori Goto for (i = 0; i < mapsize; i++, page++) 18904753278SYasunori Goto get_page_bootmem(section_nr, page, SECTION_INFO); 19004753278SYasunori Goto 191f1eca35aSDan Williams usage = ms->usage; 192f1eca35aSDan Williams page = virt_to_page(usage); 19304753278SYasunori Goto 194f1eca35aSDan Williams mapsize = PAGE_ALIGN(mem_section_usage_size()) >> PAGE_SHIFT; 19504753278SYasunori Goto 19604753278SYasunori Goto for (i = 0; i < mapsize; i++, page++) 197af370fb8SYasunori Goto get_page_bootmem(section_nr, page, MIX_SECTION_INFO); 19804753278SYasunori Goto 19904753278SYasunori Goto } 20046723bfaSYasuaki Ishimatsu #else /* CONFIG_SPARSEMEM_VMEMMAP */ 20146723bfaSYasuaki Ishimatsu static void register_page_bootmem_info_section(unsigned long start_pfn) 20246723bfaSYasuaki Ishimatsu { 203f1eca35aSDan Williams unsigned long mapsize, section_nr, i; 20446723bfaSYasuaki Ishimatsu struct mem_section *ms; 20546723bfaSYasuaki Ishimatsu struct page *page, *memmap; 206f1eca35aSDan Williams struct mem_section_usage *usage; 20746723bfaSYasuaki Ishimatsu 20846723bfaSYasuaki Ishimatsu section_nr = pfn_to_section_nr(start_pfn); 20946723bfaSYasuaki Ishimatsu ms = __nr_to_section(section_nr); 21046723bfaSYasuaki Ishimatsu 21146723bfaSYasuaki Ishimatsu memmap = sparse_decode_mem_map(ms->section_mem_map, section_nr); 21246723bfaSYasuaki Ishimatsu 21346723bfaSYasuaki Ishimatsu register_page_bootmem_memmap(section_nr, memmap, PAGES_PER_SECTION); 21446723bfaSYasuaki Ishimatsu 215f1eca35aSDan Williams usage = ms->usage; 216f1eca35aSDan Williams page = virt_to_page(usage); 21746723bfaSYasuaki Ishimatsu 218f1eca35aSDan Williams mapsize = PAGE_ALIGN(mem_section_usage_size()) >> PAGE_SHIFT; 21946723bfaSYasuaki Ishimatsu 22046723bfaSYasuaki Ishimatsu for (i = 0; i < mapsize; i++, page++) 22146723bfaSYasuaki Ishimatsu get_page_bootmem(section_nr, page, MIX_SECTION_INFO); 22246723bfaSYasuaki Ishimatsu } 22346723bfaSYasuaki Ishimatsu #endif /* !CONFIG_SPARSEMEM_VMEMMAP */ 22404753278SYasunori Goto 2257ded384aSLinus Torvalds void __init register_page_bootmem_info_node(struct pglist_data *pgdat) 22604753278SYasunori Goto { 22704753278SYasunori Goto unsigned long i, pfn, end_pfn, nr_pages; 22804753278SYasunori Goto int node = pgdat->node_id; 22904753278SYasunori Goto struct page *page; 23004753278SYasunori Goto 23104753278SYasunori Goto nr_pages = PAGE_ALIGN(sizeof(struct pglist_data)) >> PAGE_SHIFT; 23204753278SYasunori Goto page = virt_to_page(pgdat); 23304753278SYasunori Goto 23404753278SYasunori Goto for (i = 0; i < nr_pages; i++, page++) 23504753278SYasunori Goto get_page_bootmem(node, page, NODE_INFO); 23604753278SYasunori Goto 23704753278SYasunori Goto pfn = pgdat->node_start_pfn; 238c1f19495SCody P Schafer end_pfn = pgdat_end_pfn(pgdat); 23904753278SYasunori Goto 2407e9f5eb0STang Chen /* register section info */ 241f14851afSqiuxishi for (; pfn < end_pfn; pfn += PAGES_PER_SECTION) { 242f14851afSqiuxishi /* 243f14851afSqiuxishi * Some platforms can assign the same pfn to multiple nodes - on 244f14851afSqiuxishi * node0 as well as nodeN. To avoid registering a pfn against 245f14851afSqiuxishi * multiple nodes we check that this pfn does not already 2467e9f5eb0STang Chen * reside in some other nodes. 247f14851afSqiuxishi */ 248f65e91dfSYang Shi if (pfn_valid(pfn) && (early_pfn_to_nid(pfn) == node)) 24904753278SYasunori Goto register_page_bootmem_info_section(pfn); 250f14851afSqiuxishi } 25104753278SYasunori Goto } 25246723bfaSYasuaki Ishimatsu #endif /* CONFIG_HAVE_BOOTMEM_INFO_NODE */ 25304753278SYasunori Goto 2547ea62160SDan Williams static int check_pfn_span(unsigned long pfn, unsigned long nr_pages, 2557ea62160SDan Williams const char *reason) 2567ea62160SDan Williams { 2577ea62160SDan Williams /* 2587ea62160SDan Williams * Disallow all operations smaller than a sub-section and only 2597ea62160SDan Williams * allow operations smaller than a section for 2607ea62160SDan Williams * SPARSEMEM_VMEMMAP. Note that check_hotplug_memory_range() 2617ea62160SDan Williams * enforces a larger memory_block_size_bytes() granularity for 2627ea62160SDan Williams * memory that will be marked online, so this check should only 2637ea62160SDan Williams * fire for direct arch_{add,remove}_memory() users outside of 2647ea62160SDan Williams * add_memory_resource(). 2657ea62160SDan Williams */ 2667ea62160SDan Williams unsigned long min_align; 2677ea62160SDan Williams 2687ea62160SDan Williams if (IS_ENABLED(CONFIG_SPARSEMEM_VMEMMAP)) 2697ea62160SDan Williams min_align = PAGES_PER_SUBSECTION; 2707ea62160SDan Williams else 2717ea62160SDan Williams min_align = PAGES_PER_SECTION; 2727ea62160SDan Williams if (!IS_ALIGNED(pfn, min_align) 2737ea62160SDan Williams || !IS_ALIGNED(nr_pages, min_align)) { 2747ea62160SDan Williams WARN(1, "Misaligned __%s_pages start: %#lx end: #%lx\n", 2757ea62160SDan Williams reason, pfn, pfn + nr_pages - 1); 2767ea62160SDan Williams return -EINVAL; 2777ea62160SDan Williams } 2787ea62160SDan Williams return 0; 2797ea62160SDan Williams } 2807ea62160SDan Williams 2814edd7cefSDavid Rientjes /* 2824edd7cefSDavid Rientjes * Reasonably generic function for adding memory. It is 2834edd7cefSDavid Rientjes * expected that archs that support memory hotplug will 2844edd7cefSDavid Rientjes * call this function after deciding the zone to which to 2854edd7cefSDavid Rientjes * add the new pages. 2864edd7cefSDavid Rientjes */ 2877ea62160SDan Williams int __ref __add_pages(int nid, unsigned long pfn, unsigned long nr_pages, 2887ea62160SDan Williams struct mhp_restrictions *restrictions) 2894edd7cefSDavid Rientjes { 2909a845030SDan Williams int err; 2919a845030SDan Williams unsigned long nr, start_sec, end_sec; 292940519f0SMichal Hocko struct vmem_altmap *altmap = restrictions->altmap; 2934b94ffdcSDan Williams 2944b94ffdcSDan Williams if (altmap) { 2954b94ffdcSDan Williams /* 2964b94ffdcSDan Williams * Validate altmap is within bounds of the total request 2974b94ffdcSDan Williams */ 2987ea62160SDan Williams if (altmap->base_pfn != pfn 2994b94ffdcSDan Williams || vmem_altmap_offset(altmap) > nr_pages) { 3004b94ffdcSDan Williams pr_warn_once("memory add fail, invalid altmap\n"); 3017ea62160SDan Williams return -EINVAL; 3024b94ffdcSDan Williams } 3034b94ffdcSDan Williams altmap->alloc = 0; 3044b94ffdcSDan Williams } 3054b94ffdcSDan Williams 3067ea62160SDan Williams err = check_pfn_span(pfn, nr_pages, "add"); 3077ea62160SDan Williams if (err) 3087ea62160SDan Williams return err; 3097ea62160SDan Williams 3107ea62160SDan Williams start_sec = pfn_to_section_nr(pfn); 3117ea62160SDan Williams end_sec = pfn_to_section_nr(pfn + nr_pages - 1); 3129a845030SDan Williams for (nr = start_sec; nr <= end_sec; nr++) { 3137ea62160SDan Williams unsigned long pfns; 3147ea62160SDan Williams 3157ea62160SDan Williams pfns = min(nr_pages, PAGES_PER_SECTION 3167ea62160SDan Williams - (pfn & ~PAGE_SECTION_MASK)); 317ba72b4c8SDan Williams err = sparse_add_section(nid, pfn, pfns, altmap); 318ba72b4c8SDan Williams if (err) 319ba72b4c8SDan Williams break; 3207ea62160SDan Williams pfn += pfns; 3217ea62160SDan Williams nr_pages -= pfns; 322f64ac5e6SMichal Hocko cond_resched(); 3234edd7cefSDavid Rientjes } 324c435a390SZhu Guihua vmemmap_populate_print_last(); 3254edd7cefSDavid Rientjes return err; 3264edd7cefSDavid Rientjes } 3274edd7cefSDavid Rientjes 328815121d2SYasuaki Ishimatsu /* find the smallest valid pfn in the range [start_pfn, end_pfn) */ 329d09b0137SYASUAKI ISHIMATSU static unsigned long find_smallest_section_pfn(int nid, struct zone *zone, 330815121d2SYasuaki Ishimatsu unsigned long start_pfn, 331815121d2SYasuaki Ishimatsu unsigned long end_pfn) 332815121d2SYasuaki Ishimatsu { 33349ba3c6bSDan Williams for (; start_pfn < end_pfn; start_pfn += PAGES_PER_SUBSECTION) { 33449ba3c6bSDan Williams if (unlikely(!pfn_valid(start_pfn))) 335815121d2SYasuaki Ishimatsu continue; 336815121d2SYasuaki Ishimatsu 337815121d2SYasuaki Ishimatsu if (unlikely(pfn_to_nid(start_pfn) != nid)) 338815121d2SYasuaki Ishimatsu continue; 339815121d2SYasuaki Ishimatsu 340815121d2SYasuaki Ishimatsu if (zone && zone != page_zone(pfn_to_page(start_pfn))) 341815121d2SYasuaki Ishimatsu continue; 342815121d2SYasuaki Ishimatsu 343815121d2SYasuaki Ishimatsu return start_pfn; 344815121d2SYasuaki Ishimatsu } 345815121d2SYasuaki Ishimatsu 346815121d2SYasuaki Ishimatsu return 0; 347815121d2SYasuaki Ishimatsu } 348815121d2SYasuaki Ishimatsu 349815121d2SYasuaki Ishimatsu /* find the biggest valid pfn in the range [start_pfn, end_pfn). */ 350d09b0137SYASUAKI ISHIMATSU static unsigned long find_biggest_section_pfn(int nid, struct zone *zone, 351815121d2SYasuaki Ishimatsu unsigned long start_pfn, 352815121d2SYasuaki Ishimatsu unsigned long end_pfn) 353815121d2SYasuaki Ishimatsu { 354815121d2SYasuaki Ishimatsu unsigned long pfn; 355815121d2SYasuaki Ishimatsu 356815121d2SYasuaki Ishimatsu /* pfn is the end pfn of a memory section. */ 357815121d2SYasuaki Ishimatsu pfn = end_pfn - 1; 35849ba3c6bSDan Williams for (; pfn >= start_pfn; pfn -= PAGES_PER_SUBSECTION) { 35949ba3c6bSDan Williams if (unlikely(!pfn_valid(pfn))) 360815121d2SYasuaki Ishimatsu continue; 361815121d2SYasuaki Ishimatsu 362815121d2SYasuaki Ishimatsu if (unlikely(pfn_to_nid(pfn) != nid)) 363815121d2SYasuaki Ishimatsu continue; 364815121d2SYasuaki Ishimatsu 365815121d2SYasuaki Ishimatsu if (zone && zone != page_zone(pfn_to_page(pfn))) 366815121d2SYasuaki Ishimatsu continue; 367815121d2SYasuaki Ishimatsu 368815121d2SYasuaki Ishimatsu return pfn; 369815121d2SYasuaki Ishimatsu } 370815121d2SYasuaki Ishimatsu 371815121d2SYasuaki Ishimatsu return 0; 372815121d2SYasuaki Ishimatsu } 373815121d2SYasuaki Ishimatsu 374815121d2SYasuaki Ishimatsu static void shrink_zone_span(struct zone *zone, unsigned long start_pfn, 375815121d2SYasuaki Ishimatsu unsigned long end_pfn) 376815121d2SYasuaki Ishimatsu { 377815121d2SYasuaki Ishimatsu unsigned long zone_start_pfn = zone->zone_start_pfn; 378c33bc315SXishi Qiu unsigned long z = zone_end_pfn(zone); /* zone_end_pfn namespace clash */ 379c33bc315SXishi Qiu unsigned long zone_end_pfn = z; 380815121d2SYasuaki Ishimatsu unsigned long pfn; 381815121d2SYasuaki Ishimatsu int nid = zone_to_nid(zone); 382815121d2SYasuaki Ishimatsu 383815121d2SYasuaki Ishimatsu zone_span_writelock(zone); 384815121d2SYasuaki Ishimatsu if (zone_start_pfn == start_pfn) { 385815121d2SYasuaki Ishimatsu /* 386815121d2SYasuaki Ishimatsu * If the section is smallest section in the zone, it need 387815121d2SYasuaki Ishimatsu * shrink zone->zone_start_pfn and zone->zone_spanned_pages. 388815121d2SYasuaki Ishimatsu * In this case, we find second smallest valid mem_section 389815121d2SYasuaki Ishimatsu * for shrinking zone. 390815121d2SYasuaki Ishimatsu */ 391815121d2SYasuaki Ishimatsu pfn = find_smallest_section_pfn(nid, zone, end_pfn, 392815121d2SYasuaki Ishimatsu zone_end_pfn); 393815121d2SYasuaki Ishimatsu if (pfn) { 394815121d2SYasuaki Ishimatsu zone->zone_start_pfn = pfn; 395815121d2SYasuaki Ishimatsu zone->spanned_pages = zone_end_pfn - pfn; 396815121d2SYasuaki Ishimatsu } 397815121d2SYasuaki Ishimatsu } else if (zone_end_pfn == end_pfn) { 398815121d2SYasuaki Ishimatsu /* 399815121d2SYasuaki Ishimatsu * If the section is biggest section in the zone, it need 400815121d2SYasuaki Ishimatsu * shrink zone->spanned_pages. 401815121d2SYasuaki Ishimatsu * In this case, we find second biggest valid mem_section for 402815121d2SYasuaki Ishimatsu * shrinking zone. 403815121d2SYasuaki Ishimatsu */ 404815121d2SYasuaki Ishimatsu pfn = find_biggest_section_pfn(nid, zone, zone_start_pfn, 405815121d2SYasuaki Ishimatsu start_pfn); 406815121d2SYasuaki Ishimatsu if (pfn) 407815121d2SYasuaki Ishimatsu zone->spanned_pages = pfn - zone_start_pfn + 1; 408815121d2SYasuaki Ishimatsu } 409815121d2SYasuaki Ishimatsu 410815121d2SYasuaki Ishimatsu /* 411815121d2SYasuaki Ishimatsu * The section is not biggest or smallest mem_section in the zone, it 412815121d2SYasuaki Ishimatsu * only creates a hole in the zone. So in this case, we need not 413815121d2SYasuaki Ishimatsu * change the zone. But perhaps, the zone has only hole data. Thus 414815121d2SYasuaki Ishimatsu * it check the zone has only hole or not. 415815121d2SYasuaki Ishimatsu */ 416815121d2SYasuaki Ishimatsu pfn = zone_start_pfn; 41749ba3c6bSDan Williams for (; pfn < zone_end_pfn; pfn += PAGES_PER_SUBSECTION) { 41849ba3c6bSDan Williams if (unlikely(!pfn_valid(pfn))) 419815121d2SYasuaki Ishimatsu continue; 420815121d2SYasuaki Ishimatsu 421815121d2SYasuaki Ishimatsu if (page_zone(pfn_to_page(pfn)) != zone) 422815121d2SYasuaki Ishimatsu continue; 423815121d2SYasuaki Ishimatsu 42449ba3c6bSDan Williams /* Skip range to be removed */ 42549ba3c6bSDan Williams if (pfn >= start_pfn && pfn < end_pfn) 426815121d2SYasuaki Ishimatsu continue; 427815121d2SYasuaki Ishimatsu 428815121d2SYasuaki Ishimatsu /* If we find valid section, we have nothing to do */ 429815121d2SYasuaki Ishimatsu zone_span_writeunlock(zone); 430815121d2SYasuaki Ishimatsu return; 431815121d2SYasuaki Ishimatsu } 432815121d2SYasuaki Ishimatsu 433815121d2SYasuaki Ishimatsu /* The zone has no valid section */ 434815121d2SYasuaki Ishimatsu zone->zone_start_pfn = 0; 435815121d2SYasuaki Ishimatsu zone->spanned_pages = 0; 436815121d2SYasuaki Ishimatsu zone_span_writeunlock(zone); 437815121d2SYasuaki Ishimatsu } 438815121d2SYasuaki Ishimatsu 439815121d2SYasuaki Ishimatsu static void shrink_pgdat_span(struct pglist_data *pgdat, 440815121d2SYasuaki Ishimatsu unsigned long start_pfn, unsigned long end_pfn) 441815121d2SYasuaki Ishimatsu { 442815121d2SYasuaki Ishimatsu unsigned long pgdat_start_pfn = pgdat->node_start_pfn; 44383285c72SXishi Qiu unsigned long p = pgdat_end_pfn(pgdat); /* pgdat_end_pfn namespace clash */ 44483285c72SXishi Qiu unsigned long pgdat_end_pfn = p; 445815121d2SYasuaki Ishimatsu unsigned long pfn; 446815121d2SYasuaki Ishimatsu int nid = pgdat->node_id; 447815121d2SYasuaki Ishimatsu 448815121d2SYasuaki Ishimatsu if (pgdat_start_pfn == start_pfn) { 449815121d2SYasuaki Ishimatsu /* 450815121d2SYasuaki Ishimatsu * If the section is smallest section in the pgdat, it need 451815121d2SYasuaki Ishimatsu * shrink pgdat->node_start_pfn and pgdat->node_spanned_pages. 452815121d2SYasuaki Ishimatsu * In this case, we find second smallest valid mem_section 453815121d2SYasuaki Ishimatsu * for shrinking zone. 454815121d2SYasuaki Ishimatsu */ 455815121d2SYasuaki Ishimatsu pfn = find_smallest_section_pfn(nid, NULL, end_pfn, 456815121d2SYasuaki Ishimatsu pgdat_end_pfn); 457815121d2SYasuaki Ishimatsu if (pfn) { 458815121d2SYasuaki Ishimatsu pgdat->node_start_pfn = pfn; 459815121d2SYasuaki Ishimatsu pgdat->node_spanned_pages = pgdat_end_pfn - pfn; 460815121d2SYasuaki Ishimatsu } 461815121d2SYasuaki Ishimatsu } else if (pgdat_end_pfn == end_pfn) { 462815121d2SYasuaki Ishimatsu /* 463815121d2SYasuaki Ishimatsu * If the section is biggest section in the pgdat, it need 464815121d2SYasuaki Ishimatsu * shrink pgdat->node_spanned_pages. 465815121d2SYasuaki Ishimatsu * In this case, we find second biggest valid mem_section for 466815121d2SYasuaki Ishimatsu * shrinking zone. 467815121d2SYasuaki Ishimatsu */ 468815121d2SYasuaki Ishimatsu pfn = find_biggest_section_pfn(nid, NULL, pgdat_start_pfn, 469815121d2SYasuaki Ishimatsu start_pfn); 470815121d2SYasuaki Ishimatsu if (pfn) 471815121d2SYasuaki Ishimatsu pgdat->node_spanned_pages = pfn - pgdat_start_pfn + 1; 472815121d2SYasuaki Ishimatsu } 473815121d2SYasuaki Ishimatsu 474815121d2SYasuaki Ishimatsu /* 475815121d2SYasuaki Ishimatsu * If the section is not biggest or smallest mem_section in the pgdat, 476815121d2SYasuaki Ishimatsu * it only creates a hole in the pgdat. So in this case, we need not 477815121d2SYasuaki Ishimatsu * change the pgdat. 478815121d2SYasuaki Ishimatsu * But perhaps, the pgdat has only hole data. Thus it check the pgdat 479815121d2SYasuaki Ishimatsu * has only hole or not. 480815121d2SYasuaki Ishimatsu */ 481815121d2SYasuaki Ishimatsu pfn = pgdat_start_pfn; 48249ba3c6bSDan Williams for (; pfn < pgdat_end_pfn; pfn += PAGES_PER_SUBSECTION) { 48349ba3c6bSDan Williams if (unlikely(!pfn_valid(pfn))) 484815121d2SYasuaki Ishimatsu continue; 485815121d2SYasuaki Ishimatsu 486815121d2SYasuaki Ishimatsu if (pfn_to_nid(pfn) != nid) 487815121d2SYasuaki Ishimatsu continue; 488815121d2SYasuaki Ishimatsu 48949ba3c6bSDan Williams /* Skip range to be removed */ 49049ba3c6bSDan Williams if (pfn >= start_pfn && pfn < end_pfn) 491815121d2SYasuaki Ishimatsu continue; 492815121d2SYasuaki Ishimatsu 493815121d2SYasuaki Ishimatsu /* If we find valid section, we have nothing to do */ 494815121d2SYasuaki Ishimatsu return; 495815121d2SYasuaki Ishimatsu } 496815121d2SYasuaki Ishimatsu 497815121d2SYasuaki Ishimatsu /* The pgdat has no valid section */ 498815121d2SYasuaki Ishimatsu pgdat->node_start_pfn = 0; 499815121d2SYasuaki Ishimatsu pgdat->node_spanned_pages = 0; 500815121d2SYasuaki Ishimatsu } 501815121d2SYasuaki Ishimatsu 5027ea62160SDan Williams static void __remove_zone(struct zone *zone, unsigned long start_pfn, 5037ea62160SDan Williams unsigned long nr_pages) 504815121d2SYasuaki Ishimatsu { 505815121d2SYasuaki Ishimatsu struct pglist_data *pgdat = zone->zone_pgdat; 506815121d2SYasuaki Ishimatsu unsigned long flags; 507815121d2SYasuaki Ishimatsu 508815121d2SYasuaki Ishimatsu pgdat_resize_lock(zone->zone_pgdat, &flags); 509815121d2SYasuaki Ishimatsu shrink_zone_span(zone, start_pfn, start_pfn + nr_pages); 510815121d2SYasuaki Ishimatsu shrink_pgdat_span(pgdat, start_pfn, start_pfn + nr_pages); 511815121d2SYasuaki Ishimatsu pgdat_resize_unlock(zone->zone_pgdat, &flags); 512815121d2SYasuaki Ishimatsu } 513815121d2SYasuaki Ishimatsu 5147ea62160SDan Williams static void __remove_section(struct zone *zone, unsigned long pfn, 5157ea62160SDan Williams unsigned long nr_pages, unsigned long map_offset, 5169d1d887dSDavid Hildenbrand struct vmem_altmap *altmap) 517ea01ea93SBadari Pulavarty { 5187ea62160SDan Williams struct mem_section *ms = __nr_to_section(pfn_to_section_nr(pfn)); 519ea01ea93SBadari Pulavarty 5209d1d887dSDavid Hildenbrand if (WARN_ON_ONCE(!valid_section(ms))) 5219d1d887dSDavid Hildenbrand return; 522ea01ea93SBadari Pulavarty 5237ea62160SDan Williams __remove_zone(zone, pfn, nr_pages); 524ba72b4c8SDan Williams sparse_remove_section(ms, pfn, nr_pages, map_offset, altmap); 525ea01ea93SBadari Pulavarty } 526ea01ea93SBadari Pulavarty 527ea01ea93SBadari Pulavarty /** 528ea01ea93SBadari Pulavarty * __remove_pages() - remove sections of pages from a zone 529ea01ea93SBadari Pulavarty * @zone: zone from which pages need to be removed 5307ea62160SDan Williams * @pfn: starting pageframe (must be aligned to start of a section) 531ea01ea93SBadari Pulavarty * @nr_pages: number of pages to remove (must be multiple of section size) 532e8b098fcSMike Rapoport * @altmap: alternative device page map or %NULL if default memmap is used 533ea01ea93SBadari Pulavarty * 534ea01ea93SBadari Pulavarty * Generic helper function to remove section mappings and sysfs entries 535ea01ea93SBadari Pulavarty * for the section of the memory we are removing. Caller needs to make 536ea01ea93SBadari Pulavarty * sure that pages are marked reserved and zones are adjust properly by 537ea01ea93SBadari Pulavarty * calling offline_pages(). 538ea01ea93SBadari Pulavarty */ 5397ea62160SDan Williams void __remove_pages(struct zone *zone, unsigned long pfn, 540da024512SChristoph Hellwig unsigned long nr_pages, struct vmem_altmap *altmap) 541ea01ea93SBadari Pulavarty { 5424b94ffdcSDan Williams unsigned long map_offset = 0; 5439a845030SDan Williams unsigned long nr, start_sec, end_sec; 5444b94ffdcSDan Williams 5454b94ffdcSDan Williams map_offset = vmem_altmap_offset(altmap); 546ea01ea93SBadari Pulavarty 5477cf91a98SJoonsoo Kim clear_zone_contiguous(zone); 5487cf91a98SJoonsoo Kim 5497ea62160SDan Williams if (check_pfn_span(pfn, nr_pages, "remove")) 5507ea62160SDan Williams return; 551ea01ea93SBadari Pulavarty 5527ea62160SDan Williams start_sec = pfn_to_section_nr(pfn); 5537ea62160SDan Williams end_sec = pfn_to_section_nr(pfn + nr_pages - 1); 5549a845030SDan Williams for (nr = start_sec; nr <= end_sec; nr++) { 5557ea62160SDan Williams unsigned long pfns; 5564b94ffdcSDan Williams 557dd33ad7bSMichal Hocko cond_resched(); 5587ea62160SDan Williams pfns = min(nr_pages, PAGES_PER_SECTION 5597ea62160SDan Williams - (pfn & ~PAGE_SECTION_MASK)); 5607ea62160SDan Williams __remove_section(zone, pfn, pfns, map_offset, altmap); 5617ea62160SDan Williams pfn += pfns; 5627ea62160SDan Williams nr_pages -= pfns; 5634b94ffdcSDan Williams map_offset = 0; 564ea01ea93SBadari Pulavarty } 5657cf91a98SJoonsoo Kim 5667cf91a98SJoonsoo Kim set_zone_contiguous(zone); 567ea01ea93SBadari Pulavarty } 568ea01ea93SBadari Pulavarty 5699d0ad8caSDaniel Kiper int set_online_page_callback(online_page_callback_t callback) 5709d0ad8caSDaniel Kiper { 5719d0ad8caSDaniel Kiper int rc = -EINVAL; 5729d0ad8caSDaniel Kiper 573bfc8c901SVladimir Davydov get_online_mems(); 574bfc8c901SVladimir Davydov mutex_lock(&online_page_callback_lock); 5759d0ad8caSDaniel Kiper 5769d0ad8caSDaniel Kiper if (online_page_callback == generic_online_page) { 5779d0ad8caSDaniel Kiper online_page_callback = callback; 5789d0ad8caSDaniel Kiper rc = 0; 5799d0ad8caSDaniel Kiper } 5809d0ad8caSDaniel Kiper 581bfc8c901SVladimir Davydov mutex_unlock(&online_page_callback_lock); 582bfc8c901SVladimir Davydov put_online_mems(); 5839d0ad8caSDaniel Kiper 5849d0ad8caSDaniel Kiper return rc; 5859d0ad8caSDaniel Kiper } 5869d0ad8caSDaniel Kiper EXPORT_SYMBOL_GPL(set_online_page_callback); 5879d0ad8caSDaniel Kiper 5889d0ad8caSDaniel Kiper int restore_online_page_callback(online_page_callback_t callback) 5899d0ad8caSDaniel Kiper { 5909d0ad8caSDaniel Kiper int rc = -EINVAL; 5919d0ad8caSDaniel Kiper 592bfc8c901SVladimir Davydov get_online_mems(); 593bfc8c901SVladimir Davydov mutex_lock(&online_page_callback_lock); 5949d0ad8caSDaniel Kiper 5959d0ad8caSDaniel Kiper if (online_page_callback == callback) { 5969d0ad8caSDaniel Kiper online_page_callback = generic_online_page; 5979d0ad8caSDaniel Kiper rc = 0; 5989d0ad8caSDaniel Kiper } 5999d0ad8caSDaniel Kiper 600bfc8c901SVladimir Davydov mutex_unlock(&online_page_callback_lock); 601bfc8c901SVladimir Davydov put_online_mems(); 6029d0ad8caSDaniel Kiper 6039d0ad8caSDaniel Kiper return rc; 6049d0ad8caSDaniel Kiper } 6059d0ad8caSDaniel Kiper EXPORT_SYMBOL_GPL(restore_online_page_callback); 6069d0ad8caSDaniel Kiper 6079d0ad8caSDaniel Kiper void __online_page_set_limits(struct page *page) 608180c06efSJeremy Fitzhardinge { 6099d0ad8caSDaniel Kiper } 6109d0ad8caSDaniel Kiper EXPORT_SYMBOL_GPL(__online_page_set_limits); 6119d0ad8caSDaniel Kiper 6129d0ad8caSDaniel Kiper void __online_page_increment_counters(struct page *page) 6139d0ad8caSDaniel Kiper { 6143dcc0571SJiang Liu adjust_managed_page_count(page, 1); 6159d0ad8caSDaniel Kiper } 6169d0ad8caSDaniel Kiper EXPORT_SYMBOL_GPL(__online_page_increment_counters); 617180c06efSJeremy Fitzhardinge 6189d0ad8caSDaniel Kiper void __online_page_free(struct page *page) 6199d0ad8caSDaniel Kiper { 6203dcc0571SJiang Liu __free_reserved_page(page); 621180c06efSJeremy Fitzhardinge } 6229d0ad8caSDaniel Kiper EXPORT_SYMBOL_GPL(__online_page_free); 6239d0ad8caSDaniel Kiper 624a9cd410aSArun KS static void generic_online_page(struct page *page, unsigned int order) 6259d0ad8caSDaniel Kiper { 626cd02cf1aSQian Cai kernel_map_pages(page, 1 << order, 1); 627a9cd410aSArun KS __free_pages_core(page, order); 628a9cd410aSArun KS totalram_pages_add(1UL << order); 629a9cd410aSArun KS #ifdef CONFIG_HIGHMEM 630a9cd410aSArun KS if (PageHighMem(page)) 631a9cd410aSArun KS totalhigh_pages_add(1UL << order); 632a9cd410aSArun KS #endif 633a9cd410aSArun KS } 634a9cd410aSArun KS 635a9cd410aSArun KS static int online_pages_blocks(unsigned long start, unsigned long nr_pages) 636a9cd410aSArun KS { 637a9cd410aSArun KS unsigned long end = start + nr_pages; 638a9cd410aSArun KS int order, onlined_pages = 0; 639a9cd410aSArun KS 640a9cd410aSArun KS while (start < end) { 641a9cd410aSArun KS order = min(MAX_ORDER - 1, 642a9cd410aSArun KS get_order(PFN_PHYS(end) - PFN_PHYS(start))); 643a9cd410aSArun KS (*online_page_callback)(pfn_to_page(start), order); 644a9cd410aSArun KS 645a9cd410aSArun KS onlined_pages += (1UL << order); 646a9cd410aSArun KS start += (1UL << order); 647a9cd410aSArun KS } 648a9cd410aSArun KS return onlined_pages; 6499d0ad8caSDaniel Kiper } 650180c06efSJeremy Fitzhardinge 65175884fb1SKAMEZAWA Hiroyuki static int online_pages_range(unsigned long start_pfn, unsigned long nr_pages, 65275884fb1SKAMEZAWA Hiroyuki void *arg) 6533947be19SDave Hansen { 65475884fb1SKAMEZAWA Hiroyuki unsigned long onlined_pages = *(unsigned long *)arg; 6552d070eabSMichal Hocko 65675884fb1SKAMEZAWA Hiroyuki if (PageReserved(pfn_to_page(start_pfn))) 657a9cd410aSArun KS onlined_pages += online_pages_blocks(start_pfn, nr_pages); 6582d070eabSMichal Hocko 6592d070eabSMichal Hocko online_mem_sections(start_pfn, start_pfn + nr_pages); 6602d070eabSMichal Hocko 66175884fb1SKAMEZAWA Hiroyuki *(unsigned long *)arg = onlined_pages; 66275884fb1SKAMEZAWA Hiroyuki return 0; 66375884fb1SKAMEZAWA Hiroyuki } 66475884fb1SKAMEZAWA Hiroyuki 665d9713679SLai Jiangshan /* check which state of node_states will be changed when online memory */ 666d9713679SLai Jiangshan static void node_states_check_changes_online(unsigned long nr_pages, 667d9713679SLai Jiangshan struct zone *zone, struct memory_notify *arg) 668d9713679SLai Jiangshan { 669d9713679SLai Jiangshan int nid = zone_to_nid(zone); 670d9713679SLai Jiangshan 67198fa15f3SAnshuman Khandual arg->status_change_nid = NUMA_NO_NODE; 67298fa15f3SAnshuman Khandual arg->status_change_nid_normal = NUMA_NO_NODE; 67398fa15f3SAnshuman Khandual arg->status_change_nid_high = NUMA_NO_NODE; 6746715ddf9SLai Jiangshan 6756715ddf9SLai Jiangshan if (!node_state(nid, N_MEMORY)) 676d9713679SLai Jiangshan arg->status_change_nid = nid; 6778efe33f4SOscar Salvador if (zone_idx(zone) <= ZONE_NORMAL && !node_state(nid, N_NORMAL_MEMORY)) 6788efe33f4SOscar Salvador arg->status_change_nid_normal = nid; 6798efe33f4SOscar Salvador #ifdef CONFIG_HIGHMEM 680d3ba3ae1SBaoquan He if (zone_idx(zone) <= ZONE_HIGHMEM && !node_state(nid, N_HIGH_MEMORY)) 6818efe33f4SOscar Salvador arg->status_change_nid_high = nid; 6828efe33f4SOscar Salvador #endif 683d9713679SLai Jiangshan } 684d9713679SLai Jiangshan 685d9713679SLai Jiangshan static void node_states_set_node(int node, struct memory_notify *arg) 686d9713679SLai Jiangshan { 687d9713679SLai Jiangshan if (arg->status_change_nid_normal >= 0) 688d9713679SLai Jiangshan node_set_state(node, N_NORMAL_MEMORY); 689d9713679SLai Jiangshan 6906715ddf9SLai Jiangshan if (arg->status_change_nid_high >= 0) 691d9713679SLai Jiangshan node_set_state(node, N_HIGH_MEMORY); 6926715ddf9SLai Jiangshan 69383d83612SOscar Salvador if (arg->status_change_nid >= 0) 6946715ddf9SLai Jiangshan node_set_state(node, N_MEMORY); 695d9713679SLai Jiangshan } 696d9713679SLai Jiangshan 697f1dd2cd1SMichal Hocko static void __meminit resize_zone_range(struct zone *zone, unsigned long start_pfn, 698f1dd2cd1SMichal Hocko unsigned long nr_pages) 699f1dd2cd1SMichal Hocko { 700f1dd2cd1SMichal Hocko unsigned long old_end_pfn = zone_end_pfn(zone); 701f1dd2cd1SMichal Hocko 702f1dd2cd1SMichal Hocko if (zone_is_empty(zone) || start_pfn < zone->zone_start_pfn) 703f1dd2cd1SMichal Hocko zone->zone_start_pfn = start_pfn; 704f1dd2cd1SMichal Hocko 705f1dd2cd1SMichal Hocko zone->spanned_pages = max(start_pfn + nr_pages, old_end_pfn) - zone->zone_start_pfn; 706f1dd2cd1SMichal Hocko } 707f1dd2cd1SMichal Hocko 708f1dd2cd1SMichal Hocko static void __meminit resize_pgdat_range(struct pglist_data *pgdat, unsigned long start_pfn, 709f1dd2cd1SMichal Hocko unsigned long nr_pages) 710f1dd2cd1SMichal Hocko { 711f1dd2cd1SMichal Hocko unsigned long old_end_pfn = pgdat_end_pfn(pgdat); 712f1dd2cd1SMichal Hocko 713f1dd2cd1SMichal Hocko if (!pgdat->node_spanned_pages || start_pfn < pgdat->node_start_pfn) 714f1dd2cd1SMichal Hocko pgdat->node_start_pfn = start_pfn; 715f1dd2cd1SMichal Hocko 716f1dd2cd1SMichal Hocko pgdat->node_spanned_pages = max(start_pfn + nr_pages, old_end_pfn) - pgdat->node_start_pfn; 717f1dd2cd1SMichal Hocko } 718f1dd2cd1SMichal Hocko 719a99583e7SChristoph Hellwig void __ref move_pfn_range_to_zone(struct zone *zone, unsigned long start_pfn, 720a99583e7SChristoph Hellwig unsigned long nr_pages, struct vmem_altmap *altmap) 721f1dd2cd1SMichal Hocko { 722f1dd2cd1SMichal Hocko struct pglist_data *pgdat = zone->zone_pgdat; 723f1dd2cd1SMichal Hocko int nid = pgdat->node_id; 724f1dd2cd1SMichal Hocko unsigned long flags; 725f1dd2cd1SMichal Hocko 726f1dd2cd1SMichal Hocko clear_zone_contiguous(zone); 727f1dd2cd1SMichal Hocko 728f1dd2cd1SMichal Hocko /* TODO Huh pgdat is irqsave while zone is not. It used to be like that before */ 729f1dd2cd1SMichal Hocko pgdat_resize_lock(pgdat, &flags); 730f1dd2cd1SMichal Hocko zone_span_writelock(zone); 731fa004ab7SWei Yang if (zone_is_empty(zone)) 732fa004ab7SWei Yang init_currently_empty_zone(zone, start_pfn, nr_pages); 733f1dd2cd1SMichal Hocko resize_zone_range(zone, start_pfn, nr_pages); 734f1dd2cd1SMichal Hocko zone_span_writeunlock(zone); 735f1dd2cd1SMichal Hocko resize_pgdat_range(pgdat, start_pfn, nr_pages); 736f1dd2cd1SMichal Hocko pgdat_resize_unlock(pgdat, &flags); 737f1dd2cd1SMichal Hocko 738f1dd2cd1SMichal Hocko /* 739f1dd2cd1SMichal Hocko * TODO now we have a visible range of pages which are not associated 740f1dd2cd1SMichal Hocko * with their zone properly. Not nice but set_pfnblock_flags_mask 741f1dd2cd1SMichal Hocko * expects the zone spans the pfn range. All the pages in the range 742f1dd2cd1SMichal Hocko * are reserved so nobody should be touching them so we should be safe 743f1dd2cd1SMichal Hocko */ 744a99583e7SChristoph Hellwig memmap_init_zone(nr_pages, nid, zone_idx(zone), start_pfn, 745a99583e7SChristoph Hellwig MEMMAP_HOTPLUG, altmap); 746f1dd2cd1SMichal Hocko 747f1dd2cd1SMichal Hocko set_zone_contiguous(zone); 748f1dd2cd1SMichal Hocko } 749f1dd2cd1SMichal Hocko 750f1dd2cd1SMichal Hocko /* 751c246a213SMichal Hocko * Returns a default kernel memory zone for the given pfn range. 752c246a213SMichal Hocko * If no kernel zone covers this pfn range it will automatically go 753c246a213SMichal Hocko * to the ZONE_NORMAL. 754c246a213SMichal Hocko */ 755c6f03e29SMichal Hocko static struct zone *default_kernel_zone_for_pfn(int nid, unsigned long start_pfn, 756c246a213SMichal Hocko unsigned long nr_pages) 757c246a213SMichal Hocko { 758c246a213SMichal Hocko struct pglist_data *pgdat = NODE_DATA(nid); 759c246a213SMichal Hocko int zid; 760c246a213SMichal Hocko 761c246a213SMichal Hocko for (zid = 0; zid <= ZONE_NORMAL; zid++) { 762c246a213SMichal Hocko struct zone *zone = &pgdat->node_zones[zid]; 763c246a213SMichal Hocko 764c246a213SMichal Hocko if (zone_intersects(zone, start_pfn, nr_pages)) 765c246a213SMichal Hocko return zone; 766c246a213SMichal Hocko } 767c246a213SMichal Hocko 768c246a213SMichal Hocko return &pgdat->node_zones[ZONE_NORMAL]; 769c246a213SMichal Hocko } 770c246a213SMichal Hocko 771c6f03e29SMichal Hocko static inline struct zone *default_zone_for_pfn(int nid, unsigned long start_pfn, 772c6f03e29SMichal Hocko unsigned long nr_pages) 773e5e68930SMichal Hocko { 774c6f03e29SMichal Hocko struct zone *kernel_zone = default_kernel_zone_for_pfn(nid, start_pfn, 775c6f03e29SMichal Hocko nr_pages); 776c6f03e29SMichal Hocko struct zone *movable_zone = &NODE_DATA(nid)->node_zones[ZONE_MOVABLE]; 777c6f03e29SMichal Hocko bool in_kernel = zone_intersects(kernel_zone, start_pfn, nr_pages); 778c6f03e29SMichal Hocko bool in_movable = zone_intersects(movable_zone, start_pfn, nr_pages); 779e5e68930SMichal Hocko 780e5e68930SMichal Hocko /* 781c6f03e29SMichal Hocko * We inherit the existing zone in a simple case where zones do not 782c6f03e29SMichal Hocko * overlap in the given range 783e5e68930SMichal Hocko */ 784c6f03e29SMichal Hocko if (in_kernel ^ in_movable) 785c6f03e29SMichal Hocko return (in_kernel) ? kernel_zone : movable_zone; 786e5e68930SMichal Hocko 787c6f03e29SMichal Hocko /* 788c6f03e29SMichal Hocko * If the range doesn't belong to any zone or two zones overlap in the 789c6f03e29SMichal Hocko * given range then we use movable zone only if movable_node is 790c6f03e29SMichal Hocko * enabled because we always online to a kernel zone by default. 791c6f03e29SMichal Hocko */ 792c6f03e29SMichal Hocko return movable_node_enabled ? movable_zone : kernel_zone; 7939f123ab5SMichal Hocko } 7949f123ab5SMichal Hocko 795e5e68930SMichal Hocko struct zone * zone_for_pfn_range(int online_type, int nid, unsigned start_pfn, 796e5e68930SMichal Hocko unsigned long nr_pages) 797f1dd2cd1SMichal Hocko { 798c6f03e29SMichal Hocko if (online_type == MMOP_ONLINE_KERNEL) 799c6f03e29SMichal Hocko return default_kernel_zone_for_pfn(nid, start_pfn, nr_pages); 800f1dd2cd1SMichal Hocko 801c6f03e29SMichal Hocko if (online_type == MMOP_ONLINE_MOVABLE) 802c6f03e29SMichal Hocko return &NODE_DATA(nid)->node_zones[ZONE_MOVABLE]; 803f1dd2cd1SMichal Hocko 804c6f03e29SMichal Hocko return default_zone_for_pfn(nid, start_pfn, nr_pages); 805e5e68930SMichal Hocko } 806e5e68930SMichal Hocko 807e5e68930SMichal Hocko /* 808e5e68930SMichal Hocko * Associates the given pfn range with the given node and the zone appropriate 809e5e68930SMichal Hocko * for the given online type. 810e5e68930SMichal Hocko */ 811e5e68930SMichal Hocko static struct zone * __meminit move_pfn_range(int online_type, int nid, 812e5e68930SMichal Hocko unsigned long start_pfn, unsigned long nr_pages) 813e5e68930SMichal Hocko { 814e5e68930SMichal Hocko struct zone *zone; 815e5e68930SMichal Hocko 816e5e68930SMichal Hocko zone = zone_for_pfn_range(online_type, nid, start_pfn, nr_pages); 817a99583e7SChristoph Hellwig move_pfn_range_to_zone(zone, start_pfn, nr_pages, NULL); 818f1dd2cd1SMichal Hocko return zone; 819df429ac0SReza Arbab } 82075884fb1SKAMEZAWA Hiroyuki 821511c2abaSLai Jiangshan int __ref online_pages(unsigned long pfn, unsigned long nr_pages, int online_type) 82275884fb1SKAMEZAWA Hiroyuki { 823aa47228aSCody P Schafer unsigned long flags; 8243947be19SDave Hansen unsigned long onlined_pages = 0; 8253947be19SDave Hansen struct zone *zone; 8266811378eSYasunori Goto int need_zonelists_rebuild = 0; 8277b78d335SYasunori Goto int nid; 8287b78d335SYasunori Goto int ret; 8297b78d335SYasunori Goto struct memory_notify arg; 830d0dc12e8SPavel Tatashin struct memory_block *mem; 8313947be19SDave Hansen 832381eab4aSDavid Hildenbrand mem_hotplug_begin(); 833381eab4aSDavid Hildenbrand 834d0dc12e8SPavel Tatashin /* 835d0dc12e8SPavel Tatashin * We can't use pfn_to_nid() because nid might be stored in struct page 836d0dc12e8SPavel Tatashin * which is not yet initialized. Instead, we find nid from memory block. 837d0dc12e8SPavel Tatashin */ 838d0dc12e8SPavel Tatashin mem = find_memory_block(__pfn_to_section(pfn)); 839d0dc12e8SPavel Tatashin nid = mem->nid; 84089c02e69SDavid Hildenbrand put_device(&mem->dev); 841d0dc12e8SPavel Tatashin 842f1dd2cd1SMichal Hocko /* associate pfn range with the zone */ 843f1dd2cd1SMichal Hocko zone = move_pfn_range(online_type, nid, pfn, nr_pages); 844511c2abaSLai Jiangshan 8457b78d335SYasunori Goto arg.start_pfn = pfn; 8467b78d335SYasunori Goto arg.nr_pages = nr_pages; 847d9713679SLai Jiangshan node_states_check_changes_online(nr_pages, zone, &arg); 8487b78d335SYasunori Goto 8497b78d335SYasunori Goto ret = memory_notify(MEM_GOING_ONLINE, &arg); 8507b78d335SYasunori Goto ret = notifier_to_errno(ret); 851e33e33b4SChen Yucong if (ret) 852e33e33b4SChen Yucong goto failed_addition; 853e33e33b4SChen Yucong 8543947be19SDave Hansen /* 8556811378eSYasunori Goto * If this zone is not populated, then it is not in zonelist. 8566811378eSYasunori Goto * This means the page allocator ignores this zone. 8576811378eSYasunori Goto * So, zonelist must be updated after online. 8586811378eSYasunori Goto */ 8596dcd73d7SWen Congyang if (!populated_zone(zone)) { 8606811378eSYasunori Goto need_zonelists_rebuild = 1; 86172675e13SMichal Hocko setup_zone_pageset(zone); 8626dcd73d7SWen Congyang } 8636811378eSYasunori Goto 864908eedc6SKAMEZAWA Hiroyuki ret = walk_system_ram_range(pfn, nr_pages, &onlined_pages, 86575884fb1SKAMEZAWA Hiroyuki online_pages_range); 866fd8a4221SGeoff Levand if (ret) { 8676dcd73d7SWen Congyang if (need_zonelists_rebuild) 8686dcd73d7SWen Congyang zone_pcp_reset(zone); 869e33e33b4SChen Yucong goto failed_addition; 870fd8a4221SGeoff Levand } 871fd8a4221SGeoff Levand 8723947be19SDave Hansen zone->present_pages += onlined_pages; 873aa47228aSCody P Schafer 874aa47228aSCody P Schafer pgdat_resize_lock(zone->zone_pgdat, &flags); 875f2937be5SYasunori Goto zone->zone_pgdat->node_present_pages += onlined_pages; 876aa47228aSCody P Schafer pgdat_resize_unlock(zone->zone_pgdat, &flags); 877aa47228aSCody P Schafer 878e900a918SDan Williams shuffle_zone(zone); 879e900a918SDan Williams 88008dff7b7SJiang Liu if (onlined_pages) { 881e888ca35SVlastimil Babka node_states_set_node(nid, &arg); 8821f522509SHaicheng Li if (need_zonelists_rebuild) 88372675e13SMichal Hocko build_all_zonelists(NULL); 8841f522509SHaicheng Li else 885112067f0SShaohua Li zone_pcp_update(zone); 88608dff7b7SJiang Liu } 8871f522509SHaicheng Li 8881b79acc9SKOSAKI Motohiro init_per_zone_wmark_min(); 8891b79acc9SKOSAKI Motohiro 890698b1b30SVlastimil Babka if (onlined_pages) { 891e888ca35SVlastimil Babka kswapd_run(nid); 892698b1b30SVlastimil Babka kcompactd_run(nid); 893698b1b30SVlastimil Babka } 89461b13993SDave Hansen 8955a4d4361SKAMEZAWA Hiroyuki vm_total_pages = nr_free_pagecache_pages(); 8962f7f24ecSKent Liu 8972d1d43f6SChandra Seetharaman writeback_set_ratelimit(); 8987b78d335SYasunori Goto 8997b78d335SYasunori Goto if (onlined_pages) 9007b78d335SYasunori Goto memory_notify(MEM_ONLINE, &arg); 901381eab4aSDavid Hildenbrand mem_hotplug_done(); 90230467e0bSDavid Rientjes return 0; 903e33e33b4SChen Yucong 904e33e33b4SChen Yucong failed_addition: 905e33e33b4SChen Yucong pr_debug("online_pages [mem %#010llx-%#010llx] failed\n", 906e33e33b4SChen Yucong (unsigned long long) pfn << PAGE_SHIFT, 907e33e33b4SChen Yucong (((unsigned long long) pfn + nr_pages) << PAGE_SHIFT) - 1); 908e33e33b4SChen Yucong memory_notify(MEM_CANCEL_ONLINE, &arg); 909381eab4aSDavid Hildenbrand mem_hotplug_done(); 910e33e33b4SChen Yucong return ret; 9113947be19SDave Hansen } 91253947027SKeith Mannthey #endif /* CONFIG_MEMORY_HOTPLUG_SPARSE */ 913bc02af93SYasunori Goto 9140bd85420STang Chen static void reset_node_present_pages(pg_data_t *pgdat) 9150bd85420STang Chen { 9160bd85420STang Chen struct zone *z; 9170bd85420STang Chen 9180bd85420STang Chen for (z = pgdat->node_zones; z < pgdat->node_zones + MAX_NR_ZONES; z++) 9190bd85420STang Chen z->present_pages = 0; 9200bd85420STang Chen 9210bd85420STang Chen pgdat->node_present_pages = 0; 9220bd85420STang Chen } 9230bd85420STang Chen 924e1319331SHidetoshi Seto /* we are OK calling __meminit stuff here - we have CONFIG_MEMORY_HOTPLUG */ 925e1319331SHidetoshi Seto static pg_data_t __ref *hotadd_new_pgdat(int nid, u64 start) 9269af3c2deSYasunori Goto { 9279af3c2deSYasunori Goto struct pglist_data *pgdat; 928c8e861a5SFabian Frederick unsigned long start_pfn = PFN_DOWN(start); 9299af3c2deSYasunori Goto 930a1e565aaSTang Chen pgdat = NODE_DATA(nid); 931a1e565aaSTang Chen if (!pgdat) { 9329af3c2deSYasunori Goto pgdat = arch_alloc_nodedata(nid); 9339af3c2deSYasunori Goto if (!pgdat) 9349af3c2deSYasunori Goto return NULL; 9359af3c2deSYasunori Goto 9369af3c2deSYasunori Goto arch_refresh_nodedata(nid, pgdat); 937b0dc3a34SGu Zheng } else { 938e716f2ebSMel Gorman /* 939e716f2ebSMel Gorman * Reset the nr_zones, order and classzone_idx before reuse. 940e716f2ebSMel Gorman * Note that kswapd will init kswapd_classzone_idx properly 941e716f2ebSMel Gorman * when it starts in the near future. 942e716f2ebSMel Gorman */ 943b0dc3a34SGu Zheng pgdat->nr_zones = 0; 94438087d9bSMel Gorman pgdat->kswapd_order = 0; 94538087d9bSMel Gorman pgdat->kswapd_classzone_idx = 0; 946a1e565aaSTang Chen } 9479af3c2deSYasunori Goto 9489af3c2deSYasunori Goto /* we can use NODE_DATA(nid) from here */ 9499af3c2deSYasunori Goto 95003e85f9dSOscar Salvador pgdat->node_id = nid; 95103e85f9dSOscar Salvador pgdat->node_start_pfn = start_pfn; 95203e85f9dSOscar Salvador 9539af3c2deSYasunori Goto /* init node's zones as empty zones, we don't have any present pages.*/ 95403e85f9dSOscar Salvador free_area_init_core_hotplug(nid); 9555830169fSReza Arbab pgdat->per_cpu_nodestats = alloc_percpu(struct per_cpu_nodestat); 9569af3c2deSYasunori Goto 957959ecc48SKAMEZAWA Hiroyuki /* 958959ecc48SKAMEZAWA Hiroyuki * The node we allocated has no zone fallback lists. For avoiding 959959ecc48SKAMEZAWA Hiroyuki * to access not-initialized zonelist, build here. 960959ecc48SKAMEZAWA Hiroyuki */ 96172675e13SMichal Hocko build_all_zonelists(pgdat); 962959ecc48SKAMEZAWA Hiroyuki 963f784a3f1STang Chen /* 9640bd85420STang Chen * When memory is hot-added, all the memory is in offline state. So 9650bd85420STang Chen * clear all zones' present_pages because they will be updated in 9660bd85420STang Chen * online_pages() and offline_pages(). 9670bd85420STang Chen */ 96803e85f9dSOscar Salvador reset_node_managed_pages(pgdat); 9690bd85420STang Chen reset_node_present_pages(pgdat); 9700bd85420STang Chen 9719af3c2deSYasunori Goto return pgdat; 9729af3c2deSYasunori Goto } 9739af3c2deSYasunori Goto 974b9ff0360SOscar Salvador static void rollback_node_hotadd(int nid) 9759af3c2deSYasunori Goto { 976b9ff0360SOscar Salvador pg_data_t *pgdat = NODE_DATA(nid); 977b9ff0360SOscar Salvador 9789af3c2deSYasunori Goto arch_refresh_nodedata(nid, NULL); 9795830169fSReza Arbab free_percpu(pgdat->per_cpu_nodestats); 9809af3c2deSYasunori Goto arch_free_nodedata(pgdat); 9819af3c2deSYasunori Goto } 9829af3c2deSYasunori Goto 9830a547039SKAMEZAWA Hiroyuki 98401b0f197SToshi Kani /** 98501b0f197SToshi Kani * try_online_node - online a node if offlined 986e8b098fcSMike Rapoport * @nid: the node ID 987b9ff0360SOscar Salvador * @start: start addr of the node 988b9ff0360SOscar Salvador * @set_node_online: Whether we want to online the node 989cf23422bSminskey guo * called by cpu_up() to online a node without onlined memory. 990b9ff0360SOscar Salvador * 991b9ff0360SOscar Salvador * Returns: 992b9ff0360SOscar Salvador * 1 -> a new node has been allocated 993b9ff0360SOscar Salvador * 0 -> the node is already online 994b9ff0360SOscar Salvador * -ENOMEM -> the node could not be allocated 995cf23422bSminskey guo */ 996b9ff0360SOscar Salvador static int __try_online_node(int nid, u64 start, bool set_node_online) 997cf23422bSminskey guo { 998cf23422bSminskey guo pg_data_t *pgdat; 999b9ff0360SOscar Salvador int ret = 1; 1000cf23422bSminskey guo 100101b0f197SToshi Kani if (node_online(nid)) 100201b0f197SToshi Kani return 0; 100301b0f197SToshi Kani 1004b9ff0360SOscar Salvador pgdat = hotadd_new_pgdat(nid, start); 10057553e8f2SDavid Rientjes if (!pgdat) { 100601b0f197SToshi Kani pr_err("Cannot online node %d due to NULL pgdat\n", nid); 1007cf23422bSminskey guo ret = -ENOMEM; 1008cf23422bSminskey guo goto out; 1009cf23422bSminskey guo } 1010b9ff0360SOscar Salvador 1011b9ff0360SOscar Salvador if (set_node_online) { 1012cf23422bSminskey guo node_set_online(nid); 1013cf23422bSminskey guo ret = register_one_node(nid); 1014cf23422bSminskey guo BUG_ON(ret); 1015b9ff0360SOscar Salvador } 1016cf23422bSminskey guo out: 1017b9ff0360SOscar Salvador return ret; 1018b9ff0360SOscar Salvador } 1019b9ff0360SOscar Salvador 1020b9ff0360SOscar Salvador /* 1021b9ff0360SOscar Salvador * Users of this function always want to online/register the node 1022b9ff0360SOscar Salvador */ 1023b9ff0360SOscar Salvador int try_online_node(int nid) 1024b9ff0360SOscar Salvador { 1025b9ff0360SOscar Salvador int ret; 1026b9ff0360SOscar Salvador 1027b9ff0360SOscar Salvador mem_hotplug_begin(); 1028b9ff0360SOscar Salvador ret = __try_online_node(nid, 0, true); 1029bfc8c901SVladimir Davydov mem_hotplug_done(); 1030cf23422bSminskey guo return ret; 1031cf23422bSminskey guo } 1032cf23422bSminskey guo 103327356f54SToshi Kani static int check_hotplug_memory_range(u64 start, u64 size) 103427356f54SToshi Kani { 1035ba325585SPavel Tatashin /* memory range must be block size aligned */ 1036cec3ebd0SDavid Hildenbrand if (!size || !IS_ALIGNED(start, memory_block_size_bytes()) || 1037cec3ebd0SDavid Hildenbrand !IS_ALIGNED(size, memory_block_size_bytes())) { 1038ba325585SPavel Tatashin pr_err("Block size [%#lx] unaligned hotplug range: start %#llx, size %#llx", 1039cec3ebd0SDavid Hildenbrand memory_block_size_bytes(), start, size); 104027356f54SToshi Kani return -EINVAL; 104127356f54SToshi Kani } 104227356f54SToshi Kani 104327356f54SToshi Kani return 0; 104427356f54SToshi Kani } 104527356f54SToshi Kani 104631bc3858SVitaly Kuznetsov static int online_memory_block(struct memory_block *mem, void *arg) 104731bc3858SVitaly Kuznetsov { 1048dc18d706SNathan Fontenot return device_online(&mem->dev); 104931bc3858SVitaly Kuznetsov } 105031bc3858SVitaly Kuznetsov 10518df1d0e4SDavid Hildenbrand /* 10528df1d0e4SDavid Hildenbrand * NOTE: The caller must call lock_device_hotplug() to serialize hotplug 10538df1d0e4SDavid Hildenbrand * and online/offline operations (triggered e.g. by sysfs). 10548df1d0e4SDavid Hildenbrand * 10558df1d0e4SDavid Hildenbrand * we are OK calling __meminit stuff here - we have CONFIG_MEMORY_HOTPLUG 10568df1d0e4SDavid Hildenbrand */ 1057f29d8e9cSDavid Hildenbrand int __ref add_memory_resource(int nid, struct resource *res) 1058bc02af93SYasunori Goto { 105905f800a0SDavid Hildenbrand struct mhp_restrictions restrictions = {}; 106062cedb9fSDavid Vrabel u64 start, size; 1061b9ff0360SOscar Salvador bool new_node = false; 1062bc02af93SYasunori Goto int ret; 1063bc02af93SYasunori Goto 106462cedb9fSDavid Vrabel start = res->start; 106562cedb9fSDavid Vrabel size = resource_size(res); 106662cedb9fSDavid Vrabel 106727356f54SToshi Kani ret = check_hotplug_memory_range(start, size); 106827356f54SToshi Kani if (ret) 106927356f54SToshi Kani return ret; 107027356f54SToshi Kani 1071bfc8c901SVladimir Davydov mem_hotplug_begin(); 1072ac13c462SNathan Zimmer 10737f36e3e5STang Chen /* 10747f36e3e5STang Chen * Add new range to memblock so that when hotadd_new_pgdat() is called 10757f36e3e5STang Chen * to allocate new pgdat, get_pfn_range_for_nid() will be able to find 10767f36e3e5STang Chen * this new range and calculate total pages correctly. The range will 10777f36e3e5STang Chen * be removed at hot-remove time. 10787f36e3e5STang Chen */ 10797f36e3e5STang Chen memblock_add_node(start, size, nid); 10807f36e3e5STang Chen 1081b9ff0360SOscar Salvador ret = __try_online_node(nid, start, false); 1082b9ff0360SOscar Salvador if (ret < 0) 108341b9e2d7SWen Congyang goto error; 1084b9ff0360SOscar Salvador new_node = ret; 10859af3c2deSYasunori Goto 1086bc02af93SYasunori Goto /* call arch's memory hotadd */ 1087940519f0SMichal Hocko ret = arch_add_memory(nid, start, size, &restrictions); 10889af3c2deSYasunori Goto if (ret < 0) 10899af3c2deSYasunori Goto goto error; 10909af3c2deSYasunori Goto 1091db051a0dSDavid Hildenbrand /* create memory block devices after memory was added */ 1092db051a0dSDavid Hildenbrand ret = create_memory_block_devices(start, size); 1093db051a0dSDavid Hildenbrand if (ret) { 1094db051a0dSDavid Hildenbrand arch_remove_memory(nid, start, size, NULL); 1095db051a0dSDavid Hildenbrand goto error; 1096db051a0dSDavid Hildenbrand } 1097db051a0dSDavid Hildenbrand 1098a1e565aaSTang Chen if (new_node) { 1099d5b6f6a3SOscar Salvador /* If sysfs file of new node can't be created, cpu on the node 11000fc44159SYasunori Goto * can't be hot-added. There is no rollback way now. 11010fc44159SYasunori Goto * So, check by BUG_ON() to catch it reluctantly.. 1102d5b6f6a3SOscar Salvador * We online node here. We can't roll back from here. 11030fc44159SYasunori Goto */ 1104d5b6f6a3SOscar Salvador node_set_online(nid); 1105d5b6f6a3SOscar Salvador ret = __register_one_node(nid); 11060fc44159SYasunori Goto BUG_ON(ret); 11070fc44159SYasunori Goto } 11080fc44159SYasunori Goto 1109d5b6f6a3SOscar Salvador /* link memory sections under this node.*/ 11104fbce633SOscar Salvador ret = link_mem_sections(nid, PFN_DOWN(start), PFN_UP(start + size - 1)); 1111d5b6f6a3SOscar Salvador BUG_ON(ret); 1112d5b6f6a3SOscar Salvador 1113d96ae530Sakpm@linux-foundation.org /* create new memmap entry */ 1114d96ae530Sakpm@linux-foundation.org firmware_map_add_hotplug(start, start + size, "System RAM"); 1115d96ae530Sakpm@linux-foundation.org 1116381eab4aSDavid Hildenbrand /* device_online() will take the lock when calling online_pages() */ 1117381eab4aSDavid Hildenbrand mem_hotplug_done(); 1118381eab4aSDavid Hildenbrand 111931bc3858SVitaly Kuznetsov /* online pages if requested */ 1120f29d8e9cSDavid Hildenbrand if (memhp_auto_online) 1121fbcf73ceSDavid Hildenbrand walk_memory_blocks(start, size, NULL, online_memory_block); 112231bc3858SVitaly Kuznetsov 1123381eab4aSDavid Hildenbrand return ret; 11249af3c2deSYasunori Goto error: 11259af3c2deSYasunori Goto /* rollback pgdat allocation and others */ 1126b9ff0360SOscar Salvador if (new_node) 1127b9ff0360SOscar Salvador rollback_node_hotadd(nid); 11287f36e3e5STang Chen memblock_remove(start, size); 1129bfc8c901SVladimir Davydov mem_hotplug_done(); 1130bc02af93SYasunori Goto return ret; 1131bc02af93SYasunori Goto } 113262cedb9fSDavid Vrabel 11338df1d0e4SDavid Hildenbrand /* requires device_hotplug_lock, see add_memory_resource() */ 11348df1d0e4SDavid Hildenbrand int __ref __add_memory(int nid, u64 start, u64 size) 113562cedb9fSDavid Vrabel { 113662cedb9fSDavid Vrabel struct resource *res; 113762cedb9fSDavid Vrabel int ret; 113862cedb9fSDavid Vrabel 113962cedb9fSDavid Vrabel res = register_memory_resource(start, size); 11406f754ba4SVitaly Kuznetsov if (IS_ERR(res)) 11416f754ba4SVitaly Kuznetsov return PTR_ERR(res); 114262cedb9fSDavid Vrabel 1143f29d8e9cSDavid Hildenbrand ret = add_memory_resource(nid, res); 114462cedb9fSDavid Vrabel if (ret < 0) 114562cedb9fSDavid Vrabel release_memory_resource(res); 114662cedb9fSDavid Vrabel return ret; 114762cedb9fSDavid Vrabel } 11488df1d0e4SDavid Hildenbrand 11498df1d0e4SDavid Hildenbrand int add_memory(int nid, u64 start, u64 size) 11508df1d0e4SDavid Hildenbrand { 11518df1d0e4SDavid Hildenbrand int rc; 11528df1d0e4SDavid Hildenbrand 11538df1d0e4SDavid Hildenbrand lock_device_hotplug(); 11548df1d0e4SDavid Hildenbrand rc = __add_memory(nid, start, size); 11558df1d0e4SDavid Hildenbrand unlock_device_hotplug(); 11568df1d0e4SDavid Hildenbrand 11578df1d0e4SDavid Hildenbrand return rc; 11588df1d0e4SDavid Hildenbrand } 1159bc02af93SYasunori Goto EXPORT_SYMBOL_GPL(add_memory); 11600c0e6195SKAMEZAWA Hiroyuki 11610c0e6195SKAMEZAWA Hiroyuki #ifdef CONFIG_MEMORY_HOTREMOVE 11620c0e6195SKAMEZAWA Hiroyuki /* 11635c755e9fSBadari Pulavarty * A free page on the buddy free lists (not the per-cpu lists) has PageBuddy 11645c755e9fSBadari Pulavarty * set and the size of the free page is given by page_order(). Using this, 11655c755e9fSBadari Pulavarty * the function determines if the pageblock contains only free pages. 11665c755e9fSBadari Pulavarty * Due to buddy contraints, a free page at least the size of a pageblock will 11675c755e9fSBadari Pulavarty * be located at the start of the pageblock 11685c755e9fSBadari Pulavarty */ 11695c755e9fSBadari Pulavarty static inline int pageblock_free(struct page *page) 11705c755e9fSBadari Pulavarty { 11715c755e9fSBadari Pulavarty return PageBuddy(page) && page_order(page) >= pageblock_order; 11725c755e9fSBadari Pulavarty } 11735c755e9fSBadari Pulavarty 1174891cb2a7SMichal Hocko /* Return the pfn of the start of the next active pageblock after a given pfn */ 1175891cb2a7SMichal Hocko static unsigned long next_active_pageblock(unsigned long pfn) 11765c755e9fSBadari Pulavarty { 1177891cb2a7SMichal Hocko struct page *page = pfn_to_page(pfn); 1178891cb2a7SMichal Hocko 11795c755e9fSBadari Pulavarty /* Ensure the starting page is pageblock-aligned */ 1180891cb2a7SMichal Hocko BUG_ON(pfn & (pageblock_nr_pages - 1)); 11815c755e9fSBadari Pulavarty 11825c755e9fSBadari Pulavarty /* If the entire pageblock is free, move to the end of free page */ 11830dcc48c1SKAMEZAWA Hiroyuki if (pageblock_free(page)) { 11840dcc48c1SKAMEZAWA Hiroyuki int order; 11850dcc48c1SKAMEZAWA Hiroyuki /* be careful. we don't have locks, page_order can be changed.*/ 11860dcc48c1SKAMEZAWA Hiroyuki order = page_order(page); 11870dcc48c1SKAMEZAWA Hiroyuki if ((order < MAX_ORDER) && (order >= pageblock_order)) 1188891cb2a7SMichal Hocko return pfn + (1 << order); 11890dcc48c1SKAMEZAWA Hiroyuki } 11905c755e9fSBadari Pulavarty 1191891cb2a7SMichal Hocko return pfn + pageblock_nr_pages; 11925c755e9fSBadari Pulavarty } 11935c755e9fSBadari Pulavarty 1194891cb2a7SMichal Hocko static bool is_pageblock_removable_nolock(unsigned long pfn) 1195fb52bbaeSMathieu Malaterre { 1196891cb2a7SMichal Hocko struct page *page = pfn_to_page(pfn); 1197fb52bbaeSMathieu Malaterre struct zone *zone; 1198fb52bbaeSMathieu Malaterre 1199fb52bbaeSMathieu Malaterre /* 1200fb52bbaeSMathieu Malaterre * We have to be careful here because we are iterating over memory 1201fb52bbaeSMathieu Malaterre * sections which are not zone aware so we might end up outside of 1202fb52bbaeSMathieu Malaterre * the zone but still within the section. 1203fb52bbaeSMathieu Malaterre * We have to take care about the node as well. If the node is offline 1204fb52bbaeSMathieu Malaterre * its NODE_DATA will be NULL - see page_zone. 1205fb52bbaeSMathieu Malaterre */ 1206fb52bbaeSMathieu Malaterre if (!node_online(page_to_nid(page))) 1207fb52bbaeSMathieu Malaterre return false; 1208fb52bbaeSMathieu Malaterre 1209fb52bbaeSMathieu Malaterre zone = page_zone(page); 1210fb52bbaeSMathieu Malaterre pfn = page_to_pfn(page); 1211fb52bbaeSMathieu Malaterre if (!zone_spans_pfn(zone, pfn)) 1212fb52bbaeSMathieu Malaterre return false; 1213fb52bbaeSMathieu Malaterre 1214d381c547SMichal Hocko return !has_unmovable_pages(zone, page, 0, MIGRATE_MOVABLE, SKIP_HWPOISON); 1215fb52bbaeSMathieu Malaterre } 1216fb52bbaeSMathieu Malaterre 12175c755e9fSBadari Pulavarty /* Checks if this range of memory is likely to be hot-removable. */ 1218c98940f6SYaowei Bai bool is_mem_section_removable(unsigned long start_pfn, unsigned long nr_pages) 12195c755e9fSBadari Pulavarty { 1220891cb2a7SMichal Hocko unsigned long end_pfn, pfn; 1221891cb2a7SMichal Hocko 1222891cb2a7SMichal Hocko end_pfn = min(start_pfn + nr_pages, 1223891cb2a7SMichal Hocko zone_end_pfn(page_zone(pfn_to_page(start_pfn)))); 12245c755e9fSBadari Pulavarty 12255c755e9fSBadari Pulavarty /* Check the starting page of each pageblock within the range */ 1226891cb2a7SMichal Hocko for (pfn = start_pfn; pfn < end_pfn; pfn = next_active_pageblock(pfn)) { 1227891cb2a7SMichal Hocko if (!is_pageblock_removable_nolock(pfn)) 1228c98940f6SYaowei Bai return false; 122949ac8255SKAMEZAWA Hiroyuki cond_resched(); 12305c755e9fSBadari Pulavarty } 12315c755e9fSBadari Pulavarty 12325c755e9fSBadari Pulavarty /* All pageblocks in the memory block are likely to be hot-removable */ 1233c98940f6SYaowei Bai return true; 12345c755e9fSBadari Pulavarty } 12355c755e9fSBadari Pulavarty 12365c755e9fSBadari Pulavarty /* 1237deb88a2aSToshi Kani * Confirm all pages in a range [start, end) belong to the same zone. 1238a96dfddbSToshi Kani * When true, return its valid [start, end). 12390c0e6195SKAMEZAWA Hiroyuki */ 1240a96dfddbSToshi Kani int test_pages_in_a_zone(unsigned long start_pfn, unsigned long end_pfn, 1241a96dfddbSToshi Kani unsigned long *valid_start, unsigned long *valid_end) 12420c0e6195SKAMEZAWA Hiroyuki { 12435f0f2887SAndrew Banman unsigned long pfn, sec_end_pfn; 1244a96dfddbSToshi Kani unsigned long start, end; 12450c0e6195SKAMEZAWA Hiroyuki struct zone *zone = NULL; 12460c0e6195SKAMEZAWA Hiroyuki struct page *page; 12470c0e6195SKAMEZAWA Hiroyuki int i; 1248deb88a2aSToshi Kani for (pfn = start_pfn, sec_end_pfn = SECTION_ALIGN_UP(start_pfn + 1); 12490c0e6195SKAMEZAWA Hiroyuki pfn < end_pfn; 1250deb88a2aSToshi Kani pfn = sec_end_pfn, sec_end_pfn += PAGES_PER_SECTION) { 12515f0f2887SAndrew Banman /* Make sure the memory section is present first */ 12525f0f2887SAndrew Banman if (!present_section_nr(pfn_to_section_nr(pfn))) 12535f0f2887SAndrew Banman continue; 12545f0f2887SAndrew Banman for (; pfn < sec_end_pfn && pfn < end_pfn; 12550c0e6195SKAMEZAWA Hiroyuki pfn += MAX_ORDER_NR_PAGES) { 12560c0e6195SKAMEZAWA Hiroyuki i = 0; 12570c0e6195SKAMEZAWA Hiroyuki /* This is just a CONFIG_HOLES_IN_ZONE check.*/ 12585f0f2887SAndrew Banman while ((i < MAX_ORDER_NR_PAGES) && 12595f0f2887SAndrew Banman !pfn_valid_within(pfn + i)) 12600c0e6195SKAMEZAWA Hiroyuki i++; 1261d6d8c8a4Szhong jiang if (i == MAX_ORDER_NR_PAGES || pfn + i >= end_pfn) 12620c0e6195SKAMEZAWA Hiroyuki continue; 126324feb47cSMikhail Zaslonko /* Check if we got outside of the zone */ 126424feb47cSMikhail Zaslonko if (zone && !zone_spans_pfn(zone, pfn + i)) 126524feb47cSMikhail Zaslonko return 0; 12660c0e6195SKAMEZAWA Hiroyuki page = pfn_to_page(pfn + i); 12670c0e6195SKAMEZAWA Hiroyuki if (zone && page_zone(page) != zone) 12680c0e6195SKAMEZAWA Hiroyuki return 0; 1269a96dfddbSToshi Kani if (!zone) 1270a96dfddbSToshi Kani start = pfn + i; 12710c0e6195SKAMEZAWA Hiroyuki zone = page_zone(page); 1272a96dfddbSToshi Kani end = pfn + MAX_ORDER_NR_PAGES; 12730c0e6195SKAMEZAWA Hiroyuki } 12745f0f2887SAndrew Banman } 1275deb88a2aSToshi Kani 1276a96dfddbSToshi Kani if (zone) { 1277a96dfddbSToshi Kani *valid_start = start; 1278d6d8c8a4Szhong jiang *valid_end = min(end, end_pfn); 12790c0e6195SKAMEZAWA Hiroyuki return 1; 1280a96dfddbSToshi Kani } else { 1281deb88a2aSToshi Kani return 0; 12820c0e6195SKAMEZAWA Hiroyuki } 1283a96dfddbSToshi Kani } 12840c0e6195SKAMEZAWA Hiroyuki 12850c0e6195SKAMEZAWA Hiroyuki /* 12860efadf48SYisheng Xie * Scan pfn range [start,end) to find movable/migratable pages (LRU pages, 12870efadf48SYisheng Xie * non-lru movable pages and hugepages). We scan pfn because it's much 12880efadf48SYisheng Xie * easier than scanning over linked list. This function returns the pfn 12890efadf48SYisheng Xie * of the first found movable page if it's found, otherwise 0. 12900c0e6195SKAMEZAWA Hiroyuki */ 1291c8721bbbSNaoya Horiguchi static unsigned long scan_movable_pages(unsigned long start, unsigned long end) 12920c0e6195SKAMEZAWA Hiroyuki { 12930c0e6195SKAMEZAWA Hiroyuki unsigned long pfn; 1294eeb0efd0SOscar Salvador 12950c0e6195SKAMEZAWA Hiroyuki for (pfn = start; pfn < end; pfn++) { 1296eeb0efd0SOscar Salvador struct page *page, *head; 1297eeb0efd0SOscar Salvador unsigned long skip; 1298eeb0efd0SOscar Salvador 1299eeb0efd0SOscar Salvador if (!pfn_valid(pfn)) 1300eeb0efd0SOscar Salvador continue; 13010c0e6195SKAMEZAWA Hiroyuki page = pfn_to_page(pfn); 13020c0e6195SKAMEZAWA Hiroyuki if (PageLRU(page)) 13030c0e6195SKAMEZAWA Hiroyuki return pfn; 13040efadf48SYisheng Xie if (__PageMovable(page)) 13050efadf48SYisheng Xie return pfn; 1306eeb0efd0SOscar Salvador 1307eeb0efd0SOscar Salvador if (!PageHuge(page)) 1308eeb0efd0SOscar Salvador continue; 1309eeb0efd0SOscar Salvador head = compound_head(page); 131039186cbeSOscar Salvador if (page_huge_active(head)) 1311c8721bbbSNaoya Horiguchi return pfn; 1312*d8c6546bSMatthew Wilcox (Oracle) skip = compound_nr(head) - (page - head); 1313eeb0efd0SOscar Salvador pfn += skip - 1; 13140c0e6195SKAMEZAWA Hiroyuki } 13150c0e6195SKAMEZAWA Hiroyuki return 0; 13160c0e6195SKAMEZAWA Hiroyuki } 13170c0e6195SKAMEZAWA Hiroyuki 1318666feb21SMichal Hocko static struct page *new_node_page(struct page *page, unsigned long private) 1319394e31d2SXishi Qiu { 1320394e31d2SXishi Qiu int nid = page_to_nid(page); 1321231e97e2SLi Zhong nodemask_t nmask = node_states[N_MEMORY]; 13227f252f27SMichal Hocko 13237f252f27SMichal Hocko /* 13247f252f27SMichal Hocko * try to allocate from a different node but reuse this node if there 13257f252f27SMichal Hocko * are no other online nodes to be used (e.g. we are offlining a part 13267f252f27SMichal Hocko * of the only existing node) 13277f252f27SMichal Hocko */ 13287f252f27SMichal Hocko node_clear(nid, nmask); 13297f252f27SMichal Hocko if (nodes_empty(nmask)) 13307f252f27SMichal Hocko node_set(nid, nmask); 1331394e31d2SXishi Qiu 13328b913238SMichal Hocko return new_page_nodemask(page, nid, &nmask); 1333394e31d2SXishi Qiu } 1334394e31d2SXishi Qiu 13350c0e6195SKAMEZAWA Hiroyuki static int 13360c0e6195SKAMEZAWA Hiroyuki do_migrate_range(unsigned long start_pfn, unsigned long end_pfn) 13370c0e6195SKAMEZAWA Hiroyuki { 13380c0e6195SKAMEZAWA Hiroyuki unsigned long pfn; 13390c0e6195SKAMEZAWA Hiroyuki struct page *page; 13400c0e6195SKAMEZAWA Hiroyuki int ret = 0; 13410c0e6195SKAMEZAWA Hiroyuki LIST_HEAD(source); 13420c0e6195SKAMEZAWA Hiroyuki 1343a85009c3SMichal Hocko for (pfn = start_pfn; pfn < end_pfn; pfn++) { 13440c0e6195SKAMEZAWA Hiroyuki if (!pfn_valid(pfn)) 13450c0e6195SKAMEZAWA Hiroyuki continue; 13460c0e6195SKAMEZAWA Hiroyuki page = pfn_to_page(pfn); 1347c8721bbbSNaoya Horiguchi 1348c8721bbbSNaoya Horiguchi if (PageHuge(page)) { 1349c8721bbbSNaoya Horiguchi struct page *head = compound_head(page); 1350*d8c6546bSMatthew Wilcox (Oracle) pfn = page_to_pfn(head) + compound_nr(head) - 1; 1351daf3538aSOscar Salvador isolate_huge_page(head, &source); 1352c8721bbbSNaoya Horiguchi continue; 135394723aafSMichal Hocko } else if (PageTransHuge(page)) 13548135d892SNaoya Horiguchi pfn = page_to_pfn(compound_head(page)) 13558135d892SNaoya Horiguchi + hpage_nr_pages(page) - 1; 1356c8721bbbSNaoya Horiguchi 1357b15c8726SMichal Hocko /* 1358b15c8726SMichal Hocko * HWPoison pages have elevated reference counts so the migration would 1359b15c8726SMichal Hocko * fail on them. It also doesn't make any sense to migrate them in the 1360b15c8726SMichal Hocko * first place. Still try to unmap such a page in case it is still mapped 1361b15c8726SMichal Hocko * (e.g. current hwpoison implementation doesn't unmap KSM pages but keep 1362b15c8726SMichal Hocko * the unmap as the catch all safety net). 1363b15c8726SMichal Hocko */ 1364b15c8726SMichal Hocko if (PageHWPoison(page)) { 1365b15c8726SMichal Hocko if (WARN_ON(PageLRU(page))) 1366b15c8726SMichal Hocko isolate_lru_page(page); 1367b15c8726SMichal Hocko if (page_mapped(page)) 1368b15c8726SMichal Hocko try_to_unmap(page, TTU_IGNORE_MLOCK | TTU_IGNORE_ACCESS); 1369b15c8726SMichal Hocko continue; 1370b15c8726SMichal Hocko } 1371b15c8726SMichal Hocko 1372700c2a46SKonstantin Khlebnikov if (!get_page_unless_zero(page)) 13730c0e6195SKAMEZAWA Hiroyuki continue; 13740c0e6195SKAMEZAWA Hiroyuki /* 13750efadf48SYisheng Xie * We can skip free pages. And we can deal with pages on 13760efadf48SYisheng Xie * LRU and non-lru movable pages. 13770c0e6195SKAMEZAWA Hiroyuki */ 13780efadf48SYisheng Xie if (PageLRU(page)) 137962695a84SNick Piggin ret = isolate_lru_page(page); 13800efadf48SYisheng Xie else 13810efadf48SYisheng Xie ret = isolate_movable_page(page, ISOLATE_UNEVICTABLE); 13820c0e6195SKAMEZAWA Hiroyuki if (!ret) { /* Success */ 138362695a84SNick Piggin list_add_tail(&page->lru, &source); 13840efadf48SYisheng Xie if (!__PageMovable(page)) 1385599d0c95SMel Gorman inc_node_page_state(page, NR_ISOLATED_ANON + 13866d9c285aSKOSAKI Motohiro page_is_file_cache(page)); 13876d9c285aSKOSAKI Motohiro 13880c0e6195SKAMEZAWA Hiroyuki } else { 13892932c8b0SMichal Hocko pr_warn("failed to isolate pfn %lx\n", pfn); 13900efadf48SYisheng Xie dump_page(page, "isolation failed"); 13911723058eSOscar Salvador } 1392700c2a46SKonstantin Khlebnikov put_page(page); 13930c0e6195SKAMEZAWA Hiroyuki } 1394f3ab2636SBob Liu if (!list_empty(&source)) { 1395394e31d2SXishi Qiu /* Allocate a new page from the nearest neighbor node */ 1396394e31d2SXishi Qiu ret = migrate_pages(&source, new_node_page, NULL, 0, 13979c620e2bSHugh Dickins MIGRATE_SYNC, MR_MEMORY_HOTPLUG); 13982932c8b0SMichal Hocko if (ret) { 13992932c8b0SMichal Hocko list_for_each_entry(page, &source, lru) { 14002932c8b0SMichal Hocko pr_warn("migrating pfn %lx failed ret:%d ", 14012932c8b0SMichal Hocko page_to_pfn(page), ret); 14022932c8b0SMichal Hocko dump_page(page, "migration failure"); 14032932c8b0SMichal Hocko } 1404c8721bbbSNaoya Horiguchi putback_movable_pages(&source); 1405f3ab2636SBob Liu } 14062932c8b0SMichal Hocko } 14071723058eSOscar Salvador 14080c0e6195SKAMEZAWA Hiroyuki return ret; 14090c0e6195SKAMEZAWA Hiroyuki } 14100c0e6195SKAMEZAWA Hiroyuki 14110c0e6195SKAMEZAWA Hiroyuki /* 14120c0e6195SKAMEZAWA Hiroyuki * remove from free_area[] and mark all as Reserved. 14130c0e6195SKAMEZAWA Hiroyuki */ 14140c0e6195SKAMEZAWA Hiroyuki static int 14150c0e6195SKAMEZAWA Hiroyuki offline_isolated_pages_cb(unsigned long start, unsigned long nr_pages, 14160c0e6195SKAMEZAWA Hiroyuki void *data) 14170c0e6195SKAMEZAWA Hiroyuki { 14185557c766SMichal Hocko unsigned long *offlined_pages = (unsigned long *)data; 14190c0e6195SKAMEZAWA Hiroyuki 14205557c766SMichal Hocko *offlined_pages += __offline_isolated_pages(start, start + nr_pages); 14215557c766SMichal Hocko return 0; 14220c0e6195SKAMEZAWA Hiroyuki } 14230c0e6195SKAMEZAWA Hiroyuki 14240c0e6195SKAMEZAWA Hiroyuki /* 14250c0e6195SKAMEZAWA Hiroyuki * Check all pages in range, recoreded as memory resource, are isolated. 14260c0e6195SKAMEZAWA Hiroyuki */ 14270c0e6195SKAMEZAWA Hiroyuki static int 14280c0e6195SKAMEZAWA Hiroyuki check_pages_isolated_cb(unsigned long start_pfn, unsigned long nr_pages, 14290c0e6195SKAMEZAWA Hiroyuki void *data) 14300c0e6195SKAMEZAWA Hiroyuki { 14315557c766SMichal Hocko return test_pages_isolated(start_pfn, start_pfn + nr_pages, true); 14320c0e6195SKAMEZAWA Hiroyuki } 14330c0e6195SKAMEZAWA Hiroyuki 1434c5320926STang Chen static int __init cmdline_parse_movable_node(char *p) 1435c5320926STang Chen { 14364932381eSMichal Hocko #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP 143755ac590cSTang Chen movable_node_enabled = true; 14384932381eSMichal Hocko #else 14394932381eSMichal Hocko pr_warn("movable_node parameter depends on CONFIG_HAVE_MEMBLOCK_NODE_MAP to work properly\n"); 14404932381eSMichal Hocko #endif 1441c5320926STang Chen return 0; 1442c5320926STang Chen } 1443c5320926STang Chen early_param("movable_node", cmdline_parse_movable_node); 1444c5320926STang Chen 1445d9713679SLai Jiangshan /* check which state of node_states will be changed when offline memory */ 1446d9713679SLai Jiangshan static void node_states_check_changes_offline(unsigned long nr_pages, 1447d9713679SLai Jiangshan struct zone *zone, struct memory_notify *arg) 1448d9713679SLai Jiangshan { 1449d9713679SLai Jiangshan struct pglist_data *pgdat = zone->zone_pgdat; 1450d9713679SLai Jiangshan unsigned long present_pages = 0; 145186b27beaSOscar Salvador enum zone_type zt; 1452d9713679SLai Jiangshan 145398fa15f3SAnshuman Khandual arg->status_change_nid = NUMA_NO_NODE; 145498fa15f3SAnshuman Khandual arg->status_change_nid_normal = NUMA_NO_NODE; 145598fa15f3SAnshuman Khandual arg->status_change_nid_high = NUMA_NO_NODE; 145686b27beaSOscar Salvador 145786b27beaSOscar Salvador /* 145886b27beaSOscar Salvador * Check whether node_states[N_NORMAL_MEMORY] will be changed. 145986b27beaSOscar Salvador * If the memory to be offline is within the range 146086b27beaSOscar Salvador * [0..ZONE_NORMAL], and it is the last present memory there, 146186b27beaSOscar Salvador * the zones in that range will become empty after the offlining, 146286b27beaSOscar Salvador * thus we can determine that we need to clear the node from 146386b27beaSOscar Salvador * node_states[N_NORMAL_MEMORY]. 146486b27beaSOscar Salvador */ 146586b27beaSOscar Salvador for (zt = 0; zt <= ZONE_NORMAL; zt++) 146686b27beaSOscar Salvador present_pages += pgdat->node_zones[zt].present_pages; 146786b27beaSOscar Salvador if (zone_idx(zone) <= ZONE_NORMAL && nr_pages >= present_pages) 146886b27beaSOscar Salvador arg->status_change_nid_normal = zone_to_nid(zone); 1469d9713679SLai Jiangshan 14706715ddf9SLai Jiangshan #ifdef CONFIG_HIGHMEM 14716715ddf9SLai Jiangshan /* 147286b27beaSOscar Salvador * node_states[N_HIGH_MEMORY] contains nodes which 147386b27beaSOscar Salvador * have normal memory or high memory. 147486b27beaSOscar Salvador * Here we add the present_pages belonging to ZONE_HIGHMEM. 147586b27beaSOscar Salvador * If the zone is within the range of [0..ZONE_HIGHMEM), and 147686b27beaSOscar Salvador * we determine that the zones in that range become empty, 147786b27beaSOscar Salvador * we need to clear the node for N_HIGH_MEMORY. 14786715ddf9SLai Jiangshan */ 147986b27beaSOscar Salvador present_pages += pgdat->node_zones[ZONE_HIGHMEM].present_pages; 148086b27beaSOscar Salvador if (zone_idx(zone) <= ZONE_HIGHMEM && nr_pages >= present_pages) 14816715ddf9SLai Jiangshan arg->status_change_nid_high = zone_to_nid(zone); 14826715ddf9SLai Jiangshan #endif 14836715ddf9SLai Jiangshan 1484d9713679SLai Jiangshan /* 148586b27beaSOscar Salvador * We have accounted the pages from [0..ZONE_NORMAL), and 148686b27beaSOscar Salvador * in case of CONFIG_HIGHMEM the pages from ZONE_HIGHMEM 148786b27beaSOscar Salvador * as well. 148886b27beaSOscar Salvador * Here we count the possible pages from ZONE_MOVABLE. 148986b27beaSOscar Salvador * If after having accounted all the pages, we see that the nr_pages 149086b27beaSOscar Salvador * to be offlined is over or equal to the accounted pages, 149186b27beaSOscar Salvador * we know that the node will become empty, and so, we can clear 149286b27beaSOscar Salvador * it for N_MEMORY as well. 1493d9713679SLai Jiangshan */ 149486b27beaSOscar Salvador present_pages += pgdat->node_zones[ZONE_MOVABLE].present_pages; 1495d9713679SLai Jiangshan 1496d9713679SLai Jiangshan if (nr_pages >= present_pages) 1497d9713679SLai Jiangshan arg->status_change_nid = zone_to_nid(zone); 1498d9713679SLai Jiangshan } 1499d9713679SLai Jiangshan 1500d9713679SLai Jiangshan static void node_states_clear_node(int node, struct memory_notify *arg) 1501d9713679SLai Jiangshan { 1502d9713679SLai Jiangshan if (arg->status_change_nid_normal >= 0) 1503d9713679SLai Jiangshan node_clear_state(node, N_NORMAL_MEMORY); 1504d9713679SLai Jiangshan 1505cf01f6f5SOscar Salvador if (arg->status_change_nid_high >= 0) 1506d9713679SLai Jiangshan node_clear_state(node, N_HIGH_MEMORY); 15076715ddf9SLai Jiangshan 1508cf01f6f5SOscar Salvador if (arg->status_change_nid >= 0) 15096715ddf9SLai Jiangshan node_clear_state(node, N_MEMORY); 1510d9713679SLai Jiangshan } 1511d9713679SLai Jiangshan 1512a16cee10SWen Congyang static int __ref __offline_pages(unsigned long start_pfn, 1513ecde0f3eSMichal Hocko unsigned long end_pfn) 15140c0e6195SKAMEZAWA Hiroyuki { 1515ecde0f3eSMichal Hocko unsigned long pfn, nr_pages; 15165557c766SMichal Hocko unsigned long offlined_pages = 0; 15179b7ea46aSQian Cai int ret, node, nr_isolate_pageblock; 1518d702909fSCody P Schafer unsigned long flags; 1519a96dfddbSToshi Kani unsigned long valid_start, valid_end; 15200c0e6195SKAMEZAWA Hiroyuki struct zone *zone; 15217b78d335SYasunori Goto struct memory_notify arg; 152279605093SMichal Hocko char *reason; 15230c0e6195SKAMEZAWA Hiroyuki 1524381eab4aSDavid Hildenbrand mem_hotplug_begin(); 1525381eab4aSDavid Hildenbrand 15260c0e6195SKAMEZAWA Hiroyuki /* This makes hotplug much easier...and readable. 15270c0e6195SKAMEZAWA Hiroyuki we assume this for now. .*/ 1528381eab4aSDavid Hildenbrand if (!test_pages_in_a_zone(start_pfn, end_pfn, &valid_start, 1529381eab4aSDavid Hildenbrand &valid_end)) { 153079605093SMichal Hocko ret = -EINVAL; 153179605093SMichal Hocko reason = "multizone range"; 153279605093SMichal Hocko goto failed_removal; 1533381eab4aSDavid Hildenbrand } 15347b78d335SYasunori Goto 1535a96dfddbSToshi Kani zone = page_zone(pfn_to_page(valid_start)); 15367b78d335SYasunori Goto node = zone_to_nid(zone); 15377b78d335SYasunori Goto nr_pages = end_pfn - start_pfn; 15387b78d335SYasunori Goto 15390c0e6195SKAMEZAWA Hiroyuki /* set above range as isolated */ 1540b023f468SWen Congyang ret = start_isolate_page_range(start_pfn, end_pfn, 1541d381c547SMichal Hocko MIGRATE_MOVABLE, 1542d381c547SMichal Hocko SKIP_HWPOISON | REPORT_FAILURE); 15439b7ea46aSQian Cai if (ret < 0) { 154479605093SMichal Hocko reason = "failure to isolate range"; 154579605093SMichal Hocko goto failed_removal; 1546381eab4aSDavid Hildenbrand } 15479b7ea46aSQian Cai nr_isolate_pageblock = ret; 15487b78d335SYasunori Goto 15497b78d335SYasunori Goto arg.start_pfn = start_pfn; 15507b78d335SYasunori Goto arg.nr_pages = nr_pages; 1551d9713679SLai Jiangshan node_states_check_changes_offline(nr_pages, zone, &arg); 15527b78d335SYasunori Goto 15537b78d335SYasunori Goto ret = memory_notify(MEM_GOING_OFFLINE, &arg); 15547b78d335SYasunori Goto ret = notifier_to_errno(ret); 155579605093SMichal Hocko if (ret) { 155679605093SMichal Hocko reason = "notifier failure"; 155779605093SMichal Hocko goto failed_removal_isolated; 155879605093SMichal Hocko } 15597b78d335SYasunori Goto 1560bb8965bdSMichal Hocko do { 1561bb8965bdSMichal Hocko for (pfn = start_pfn; pfn;) { 156279605093SMichal Hocko if (signal_pending(current)) { 1563bb8965bdSMichal Hocko ret = -EINTR; 156479605093SMichal Hocko reason = "signal backoff"; 156579605093SMichal Hocko goto failed_removal_isolated; 156679605093SMichal Hocko } 156772b39cfcSMichal Hocko 15680c0e6195SKAMEZAWA Hiroyuki cond_resched(); 15699852a721SMichal Hocko lru_add_drain_all(); 15700c0e6195SKAMEZAWA Hiroyuki 1571bb8965bdSMichal Hocko pfn = scan_movable_pages(pfn, end_pfn); 1572bb8965bdSMichal Hocko if (pfn) { 1573bb8965bdSMichal Hocko /* 1574bb8965bdSMichal Hocko * TODO: fatal migration failures should bail 1575bb8965bdSMichal Hocko * out 1576bb8965bdSMichal Hocko */ 1577bb8965bdSMichal Hocko do_migrate_range(pfn, end_pfn); 1578bb8965bdSMichal Hocko } 15790c0e6195SKAMEZAWA Hiroyuki } 158072b39cfcSMichal Hocko 1581c8721bbbSNaoya Horiguchi /* 1582bb8965bdSMichal Hocko * Dissolve free hugepages in the memory block before doing 1583bb8965bdSMichal Hocko * offlining actually in order to make hugetlbfs's object 1584bb8965bdSMichal Hocko * counting consistent. 1585c8721bbbSNaoya Horiguchi */ 1586082d5b6bSGerald Schaefer ret = dissolve_free_huge_pages(start_pfn, end_pfn); 158779605093SMichal Hocko if (ret) { 158879605093SMichal Hocko reason = "failure to dissolve huge pages"; 158979605093SMichal Hocko goto failed_removal_isolated; 159079605093SMichal Hocko } 15910c0e6195SKAMEZAWA Hiroyuki /* check again */ 15925557c766SMichal Hocko ret = walk_system_ram_range(start_pfn, end_pfn - start_pfn, 15935557c766SMichal Hocko NULL, check_pages_isolated_cb); 15945557c766SMichal Hocko } while (ret); 1595bb8965bdSMichal Hocko 1596b3834be5SAdam Buchbinder /* Ok, all of our target is isolated. 15970c0e6195SKAMEZAWA Hiroyuki We cannot do rollback at this point. */ 15985557c766SMichal Hocko walk_system_ram_range(start_pfn, end_pfn - start_pfn, 15995557c766SMichal Hocko &offlined_pages, offline_isolated_pages_cb); 16005557c766SMichal Hocko pr_info("Offlined Pages %ld\n", offlined_pages); 16019b7ea46aSQian Cai /* 16029b7ea46aSQian Cai * Onlining will reset pagetype flags and makes migrate type 16039b7ea46aSQian Cai * MOVABLE, so just need to decrease the number of isolated 16049b7ea46aSQian Cai * pageblocks zone counter here. 16059b7ea46aSQian Cai */ 16069b7ea46aSQian Cai spin_lock_irqsave(&zone->lock, flags); 16079b7ea46aSQian Cai zone->nr_isolate_pageblock -= nr_isolate_pageblock; 16089b7ea46aSQian Cai spin_unlock_irqrestore(&zone->lock, flags); 16099b7ea46aSQian Cai 16100c0e6195SKAMEZAWA Hiroyuki /* removal success */ 16113dcc0571SJiang Liu adjust_managed_page_count(pfn_to_page(start_pfn), -offlined_pages); 16120c0e6195SKAMEZAWA Hiroyuki zone->present_pages -= offlined_pages; 1613d702909fSCody P Schafer 1614d702909fSCody P Schafer pgdat_resize_lock(zone->zone_pgdat, &flags); 16150c0e6195SKAMEZAWA Hiroyuki zone->zone_pgdat->node_present_pages -= offlined_pages; 1616d702909fSCody P Schafer pgdat_resize_unlock(zone->zone_pgdat, &flags); 16177b78d335SYasunori Goto 16181b79acc9SKOSAKI Motohiro init_per_zone_wmark_min(); 16191b79acc9SKOSAKI Motohiro 16201e8537baSXishi Qiu if (!populated_zone(zone)) { 1621340175b7SJiang Liu zone_pcp_reset(zone); 162272675e13SMichal Hocko build_all_zonelists(NULL); 16231e8537baSXishi Qiu } else 16241e8537baSXishi Qiu zone_pcp_update(zone); 1625340175b7SJiang Liu 1626d9713679SLai Jiangshan node_states_clear_node(node, &arg); 1627698b1b30SVlastimil Babka if (arg.status_change_nid >= 0) { 16288fe23e05SDavid Rientjes kswapd_stop(node); 1629698b1b30SVlastimil Babka kcompactd_stop(node); 1630698b1b30SVlastimil Babka } 1631bce7394aSMinchan Kim 16320c0e6195SKAMEZAWA Hiroyuki vm_total_pages = nr_free_pagecache_pages(); 16330c0e6195SKAMEZAWA Hiroyuki writeback_set_ratelimit(); 16347b78d335SYasunori Goto 16357b78d335SYasunori Goto memory_notify(MEM_OFFLINE, &arg); 1636381eab4aSDavid Hildenbrand mem_hotplug_done(); 16370c0e6195SKAMEZAWA Hiroyuki return 0; 16380c0e6195SKAMEZAWA Hiroyuki 163979605093SMichal Hocko failed_removal_isolated: 164079605093SMichal Hocko undo_isolate_page_range(start_pfn, end_pfn, MIGRATE_MOVABLE); 1641c4efe484SQian Cai memory_notify(MEM_CANCEL_OFFLINE, &arg); 16420c0e6195SKAMEZAWA Hiroyuki failed_removal: 164379605093SMichal Hocko pr_debug("memory offlining [mem %#010llx-%#010llx] failed due to %s\n", 1644a62e2f4fSBjorn Helgaas (unsigned long long) start_pfn << PAGE_SHIFT, 164579605093SMichal Hocko ((unsigned long long) end_pfn << PAGE_SHIFT) - 1, 164679605093SMichal Hocko reason); 16470c0e6195SKAMEZAWA Hiroyuki /* pushback to free area */ 1648381eab4aSDavid Hildenbrand mem_hotplug_done(); 16490c0e6195SKAMEZAWA Hiroyuki return ret; 16500c0e6195SKAMEZAWA Hiroyuki } 165171088785SBadari Pulavarty 1652a16cee10SWen Congyang int offline_pages(unsigned long start_pfn, unsigned long nr_pages) 1653a16cee10SWen Congyang { 1654ecde0f3eSMichal Hocko return __offline_pages(start_pfn, start_pfn + nr_pages); 1655a16cee10SWen Congyang } 1656a16cee10SWen Congyang 1657d6de9d53SXishi Qiu static int check_memblock_offlined_cb(struct memory_block *mem, void *arg) 1658bbc76be6SWen Congyang { 1659bbc76be6SWen Congyang int ret = !is_memblock_offlined(mem); 1660bbc76be6SWen Congyang 1661349daa0fSRandy Dunlap if (unlikely(ret)) { 1662349daa0fSRandy Dunlap phys_addr_t beginpa, endpa; 1663349daa0fSRandy Dunlap 1664349daa0fSRandy Dunlap beginpa = PFN_PHYS(section_nr_to_pfn(mem->start_section_nr)); 1665349daa0fSRandy Dunlap endpa = PFN_PHYS(section_nr_to_pfn(mem->end_section_nr + 1))-1; 1666756a025fSJoe Perches pr_warn("removing memory fails, because memory [%pa-%pa] is onlined\n", 1667349daa0fSRandy Dunlap &beginpa, &endpa); 1668bbc76be6SWen Congyang 1669eca499abSPavel Tatashin return -EBUSY; 1670eca499abSPavel Tatashin } 1671eca499abSPavel Tatashin return 0; 1672bbc76be6SWen Congyang } 1673bbc76be6SWen Congyang 16740f1cfe9dSToshi Kani static int check_cpu_on_node(pg_data_t *pgdat) 167560a5a19eSTang Chen { 167660a5a19eSTang Chen int cpu; 167760a5a19eSTang Chen 167860a5a19eSTang Chen for_each_present_cpu(cpu) { 167960a5a19eSTang Chen if (cpu_to_node(cpu) == pgdat->node_id) 168060a5a19eSTang Chen /* 168160a5a19eSTang Chen * the cpu on this node isn't removed, and we can't 168260a5a19eSTang Chen * offline this node. 168360a5a19eSTang Chen */ 168460a5a19eSTang Chen return -EBUSY; 168560a5a19eSTang Chen } 168660a5a19eSTang Chen 168760a5a19eSTang Chen return 0; 168860a5a19eSTang Chen } 168960a5a19eSTang Chen 16900f1cfe9dSToshi Kani /** 16910f1cfe9dSToshi Kani * try_offline_node 1692e8b098fcSMike Rapoport * @nid: the node ID 16930f1cfe9dSToshi Kani * 16940f1cfe9dSToshi Kani * Offline a node if all memory sections and cpus of the node are removed. 16950f1cfe9dSToshi Kani * 16960f1cfe9dSToshi Kani * NOTE: The caller must call lock_device_hotplug() to serialize hotplug 16970f1cfe9dSToshi Kani * and online/offline operations before this call. 16980f1cfe9dSToshi Kani */ 169990b30cdcSWen Congyang void try_offline_node(int nid) 170060a5a19eSTang Chen { 1701d822b86aSWen Congyang pg_data_t *pgdat = NODE_DATA(nid); 1702d822b86aSWen Congyang unsigned long start_pfn = pgdat->node_start_pfn; 1703d822b86aSWen Congyang unsigned long end_pfn = start_pfn + pgdat->node_spanned_pages; 170460a5a19eSTang Chen unsigned long pfn; 170560a5a19eSTang Chen 170660a5a19eSTang Chen for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) { 170760a5a19eSTang Chen unsigned long section_nr = pfn_to_section_nr(pfn); 170860a5a19eSTang Chen 170960a5a19eSTang Chen if (!present_section_nr(section_nr)) 171060a5a19eSTang Chen continue; 171160a5a19eSTang Chen 171260a5a19eSTang Chen if (pfn_to_nid(pfn) != nid) 171360a5a19eSTang Chen continue; 171460a5a19eSTang Chen 171560a5a19eSTang Chen /* 171660a5a19eSTang Chen * some memory sections of this node are not removed, and we 171760a5a19eSTang Chen * can't offline node now. 171860a5a19eSTang Chen */ 171960a5a19eSTang Chen return; 172060a5a19eSTang Chen } 172160a5a19eSTang Chen 172246a3679bSMichal Hocko if (check_cpu_on_node(pgdat)) 172360a5a19eSTang Chen return; 172460a5a19eSTang Chen 172560a5a19eSTang Chen /* 172660a5a19eSTang Chen * all memory/cpu of this node are removed, we can offline this 172760a5a19eSTang Chen * node now. 172860a5a19eSTang Chen */ 172960a5a19eSTang Chen node_set_offline(nid); 173060a5a19eSTang Chen unregister_one_node(nid); 173160a5a19eSTang Chen } 173290b30cdcSWen Congyang EXPORT_SYMBOL(try_offline_node); 173360a5a19eSTang Chen 1734d9eb1417SDavid Hildenbrand static void __release_memory_resource(resource_size_t start, 1735d9eb1417SDavid Hildenbrand resource_size_t size) 1736d9eb1417SDavid Hildenbrand { 1737d9eb1417SDavid Hildenbrand int ret; 1738d9eb1417SDavid Hildenbrand 1739d9eb1417SDavid Hildenbrand /* 1740d9eb1417SDavid Hildenbrand * When removing memory in the same granularity as it was added, 1741d9eb1417SDavid Hildenbrand * this function never fails. It might only fail if resources 1742d9eb1417SDavid Hildenbrand * have to be adjusted or split. We'll ignore the error, as 1743d9eb1417SDavid Hildenbrand * removing of memory cannot fail. 1744d9eb1417SDavid Hildenbrand */ 1745d9eb1417SDavid Hildenbrand ret = release_mem_region_adjustable(&iomem_resource, start, size); 1746d9eb1417SDavid Hildenbrand if (ret) { 1747d9eb1417SDavid Hildenbrand resource_size_t endres = start + size - 1; 1748d9eb1417SDavid Hildenbrand 1749d9eb1417SDavid Hildenbrand pr_warn("Unable to release resource <%pa-%pa> (%d)\n", 1750d9eb1417SDavid Hildenbrand &start, &endres, ret); 1751d9eb1417SDavid Hildenbrand } 1752d9eb1417SDavid Hildenbrand } 1753d9eb1417SDavid Hildenbrand 1754eca499abSPavel Tatashin static int __ref try_remove_memory(int nid, u64 start, u64 size) 1755bbc76be6SWen Congyang { 1756eca499abSPavel Tatashin int rc = 0; 1757993c1aadSWen Congyang 175827356f54SToshi Kani BUG_ON(check_hotplug_memory_range(start, size)); 175927356f54SToshi Kani 1760bfc8c901SVladimir Davydov mem_hotplug_begin(); 17616677e3eaSYasuaki Ishimatsu 17626677e3eaSYasuaki Ishimatsu /* 1763242831ebSRafael J. Wysocki * All memory blocks must be offlined before removing memory. Check 1764eca499abSPavel Tatashin * whether all memory blocks in question are offline and return error 1765242831ebSRafael J. Wysocki * if this is not the case. 17666677e3eaSYasuaki Ishimatsu */ 1767fbcf73ceSDavid Hildenbrand rc = walk_memory_blocks(start, size, NULL, check_memblock_offlined_cb); 1768eca499abSPavel Tatashin if (rc) 1769eca499abSPavel Tatashin goto done; 17706677e3eaSYasuaki Ishimatsu 177146c66c4bSYasuaki Ishimatsu /* remove memmap entry */ 177246c66c4bSYasuaki Ishimatsu firmware_map_remove(start, start + size, "System RAM"); 1773f9126ab9SXishi Qiu memblock_free(start, size); 1774f9126ab9SXishi Qiu memblock_remove(start, size); 177546c66c4bSYasuaki Ishimatsu 17764c4b7f9bSDavid Hildenbrand /* remove memory block devices before removing memory */ 17774c4b7f9bSDavid Hildenbrand remove_memory_block_devices(start, size); 17784c4b7f9bSDavid Hildenbrand 17792c2a5af6SOscar Salvador arch_remove_memory(nid, start, size, NULL); 1780d9eb1417SDavid Hildenbrand __release_memory_resource(start, size); 178124d335caSWen Congyang 178260a5a19eSTang Chen try_offline_node(nid); 178360a5a19eSTang Chen 1784eca499abSPavel Tatashin done: 1785bfc8c901SVladimir Davydov mem_hotplug_done(); 1786eca499abSPavel Tatashin return rc; 178771088785SBadari Pulavarty } 1788d15e5926SDavid Hildenbrand 1789eca499abSPavel Tatashin /** 1790eca499abSPavel Tatashin * remove_memory 1791eca499abSPavel Tatashin * @nid: the node ID 1792eca499abSPavel Tatashin * @start: physical address of the region to remove 1793eca499abSPavel Tatashin * @size: size of the region to remove 1794eca499abSPavel Tatashin * 1795eca499abSPavel Tatashin * NOTE: The caller must call lock_device_hotplug() to serialize hotplug 1796eca499abSPavel Tatashin * and online/offline operations before this call, as required by 1797eca499abSPavel Tatashin * try_offline_node(). 1798eca499abSPavel Tatashin */ 1799eca499abSPavel Tatashin void __remove_memory(int nid, u64 start, u64 size) 1800d15e5926SDavid Hildenbrand { 1801eca499abSPavel Tatashin 1802eca499abSPavel Tatashin /* 1803eca499abSPavel Tatashin * trigger BUG() is some memory is not offlined prior to calling this 1804eca499abSPavel Tatashin * function 1805eca499abSPavel Tatashin */ 1806eca499abSPavel Tatashin if (try_remove_memory(nid, start, size)) 1807eca499abSPavel Tatashin BUG(); 1808eca499abSPavel Tatashin } 1809eca499abSPavel Tatashin 1810eca499abSPavel Tatashin /* 1811eca499abSPavel Tatashin * Remove memory if every memory block is offline, otherwise return -EBUSY is 1812eca499abSPavel Tatashin * some memory is not offline 1813eca499abSPavel Tatashin */ 1814eca499abSPavel Tatashin int remove_memory(int nid, u64 start, u64 size) 1815eca499abSPavel Tatashin { 1816eca499abSPavel Tatashin int rc; 1817eca499abSPavel Tatashin 1818d15e5926SDavid Hildenbrand lock_device_hotplug(); 1819eca499abSPavel Tatashin rc = try_remove_memory(nid, start, size); 1820d15e5926SDavid Hildenbrand unlock_device_hotplug(); 1821eca499abSPavel Tatashin 1822eca499abSPavel Tatashin return rc; 1823d15e5926SDavid Hildenbrand } 182471088785SBadari Pulavarty EXPORT_SYMBOL_GPL(remove_memory); 1825aba6efc4SRafael J. Wysocki #endif /* CONFIG_MEMORY_HOTREMOVE */ 1826