13947be19SDave Hansen /* 23947be19SDave Hansen * linux/mm/memory_hotplug.c 33947be19SDave Hansen * 43947be19SDave Hansen * Copyright (C) 53947be19SDave Hansen */ 63947be19SDave Hansen 73947be19SDave Hansen #include <linux/stddef.h> 83947be19SDave Hansen #include <linux/mm.h> 9174cd4b1SIngo Molnar #include <linux/sched/signal.h> 103947be19SDave Hansen #include <linux/swap.h> 113947be19SDave Hansen #include <linux/interrupt.h> 123947be19SDave Hansen #include <linux/pagemap.h> 133947be19SDave Hansen #include <linux/compiler.h> 14b95f1b31SPaul Gortmaker #include <linux/export.h> 153947be19SDave Hansen #include <linux/pagevec.h> 162d1d43f6SChandra Seetharaman #include <linux/writeback.h> 173947be19SDave Hansen #include <linux/slab.h> 183947be19SDave Hansen #include <linux/sysctl.h> 193947be19SDave Hansen #include <linux/cpu.h> 203947be19SDave Hansen #include <linux/memory.h> 214b94ffdcSDan Williams #include <linux/memremap.h> 223947be19SDave Hansen #include <linux/memory_hotplug.h> 233947be19SDave Hansen #include <linux/highmem.h> 243947be19SDave Hansen #include <linux/vmalloc.h> 250a547039SKAMEZAWA Hiroyuki #include <linux/ioport.h> 260c0e6195SKAMEZAWA Hiroyuki #include <linux/delay.h> 270c0e6195SKAMEZAWA Hiroyuki #include <linux/migrate.h> 280c0e6195SKAMEZAWA Hiroyuki #include <linux/page-isolation.h> 2971088785SBadari Pulavarty #include <linux/pfn.h> 306ad696d2SAndi Kleen #include <linux/suspend.h> 316d9c285aSKOSAKI Motohiro #include <linux/mm_inline.h> 32d96ae530Sakpm@linux-foundation.org #include <linux/firmware-map.h> 3360a5a19eSTang Chen #include <linux/stop_machine.h> 34c8721bbbSNaoya Horiguchi #include <linux/hugetlb.h> 35c5320926STang Chen #include <linux/memblock.h> 36f784a3f1STang Chen #include <linux/bootmem.h> 37698b1b30SVlastimil Babka #include <linux/compaction.h> 383947be19SDave Hansen 393947be19SDave Hansen #include <asm/tlbflush.h> 403947be19SDave Hansen 411e5ad9a3SAdrian Bunk #include "internal.h" 421e5ad9a3SAdrian Bunk 439d0ad8caSDaniel Kiper /* 449d0ad8caSDaniel Kiper * online_page_callback contains pointer to current page onlining function. 459d0ad8caSDaniel Kiper * Initially it is generic_online_page(). If it is required it could be 469d0ad8caSDaniel Kiper * changed by calling set_online_page_callback() for callback registration 479d0ad8caSDaniel Kiper * and restore_online_page_callback() for generic callback restore. 489d0ad8caSDaniel Kiper */ 499d0ad8caSDaniel Kiper 509d0ad8caSDaniel Kiper static void generic_online_page(struct page *page); 519d0ad8caSDaniel Kiper 529d0ad8caSDaniel Kiper static online_page_callback_t online_page_callback = generic_online_page; 53bfc8c901SVladimir Davydov static DEFINE_MUTEX(online_page_callback_lock); 549d0ad8caSDaniel Kiper 553f906ba2SThomas Gleixner DEFINE_STATIC_PERCPU_RWSEM(mem_hotplug_lock); 5620d6c96bSKOSAKI Motohiro 573f906ba2SThomas Gleixner void get_online_mems(void) 583f906ba2SThomas Gleixner { 593f906ba2SThomas Gleixner percpu_down_read(&mem_hotplug_lock); 603f906ba2SThomas Gleixner } 61bfc8c901SVladimir Davydov 623f906ba2SThomas Gleixner void put_online_mems(void) 633f906ba2SThomas Gleixner { 643f906ba2SThomas Gleixner percpu_up_read(&mem_hotplug_lock); 653f906ba2SThomas Gleixner } 66bfc8c901SVladimir Davydov 674932381eSMichal Hocko bool movable_node_enabled = false; 684932381eSMichal Hocko 698604d9e5SVitaly Kuznetsov #ifndef CONFIG_MEMORY_HOTPLUG_DEFAULT_ONLINE 7031bc3858SVitaly Kuznetsov bool memhp_auto_online; 718604d9e5SVitaly Kuznetsov #else 728604d9e5SVitaly Kuznetsov bool memhp_auto_online = true; 738604d9e5SVitaly Kuznetsov #endif 7431bc3858SVitaly Kuznetsov EXPORT_SYMBOL_GPL(memhp_auto_online); 7531bc3858SVitaly Kuznetsov 7686dd995dSVitaly Kuznetsov static int __init setup_memhp_default_state(char *str) 7786dd995dSVitaly Kuznetsov { 7886dd995dSVitaly Kuznetsov if (!strcmp(str, "online")) 7986dd995dSVitaly Kuznetsov memhp_auto_online = true; 8086dd995dSVitaly Kuznetsov else if (!strcmp(str, "offline")) 8186dd995dSVitaly Kuznetsov memhp_auto_online = false; 8286dd995dSVitaly Kuznetsov 8386dd995dSVitaly Kuznetsov return 1; 8486dd995dSVitaly Kuznetsov } 8586dd995dSVitaly Kuznetsov __setup("memhp_default_state=", setup_memhp_default_state); 8686dd995dSVitaly Kuznetsov 8730467e0bSDavid Rientjes void mem_hotplug_begin(void) 88bfc8c901SVladimir Davydov { 893f906ba2SThomas Gleixner cpus_read_lock(); 903f906ba2SThomas Gleixner percpu_down_write(&mem_hotplug_lock); 91bfc8c901SVladimir Davydov } 92bfc8c901SVladimir Davydov 9330467e0bSDavid Rientjes void mem_hotplug_done(void) 94bfc8c901SVladimir Davydov { 953f906ba2SThomas Gleixner percpu_up_write(&mem_hotplug_lock); 963f906ba2SThomas Gleixner cpus_read_unlock(); 97bfc8c901SVladimir Davydov } 9820d6c96bSKOSAKI Motohiro 9945e0b78bSKeith Mannthey /* add this memory to iomem resource */ 10045e0b78bSKeith Mannthey static struct resource *register_memory_resource(u64 start, u64 size) 10145e0b78bSKeith Mannthey { 10245e0b78bSKeith Mannthey struct resource *res; 10345e0b78bSKeith Mannthey res = kzalloc(sizeof(struct resource), GFP_KERNEL); 1046f754ba4SVitaly Kuznetsov if (!res) 1056f754ba4SVitaly Kuznetsov return ERR_PTR(-ENOMEM); 10645e0b78bSKeith Mannthey 10745e0b78bSKeith Mannthey res->name = "System RAM"; 10845e0b78bSKeith Mannthey res->start = start; 10945e0b78bSKeith Mannthey res->end = start + size - 1; 110782b8664SToshi Kani res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY; 11145e0b78bSKeith Mannthey if (request_resource(&iomem_resource, res) < 0) { 1124996eed8SToshi Kani pr_debug("System RAM resource %pR cannot be added\n", res); 11345e0b78bSKeith Mannthey kfree(res); 1146f754ba4SVitaly Kuznetsov return ERR_PTR(-EEXIST); 11545e0b78bSKeith Mannthey } 11645e0b78bSKeith Mannthey return res; 11745e0b78bSKeith Mannthey } 11845e0b78bSKeith Mannthey 11945e0b78bSKeith Mannthey static void release_memory_resource(struct resource *res) 12045e0b78bSKeith Mannthey { 12145e0b78bSKeith Mannthey if (!res) 12245e0b78bSKeith Mannthey return; 12345e0b78bSKeith Mannthey release_resource(res); 12445e0b78bSKeith Mannthey kfree(res); 12545e0b78bSKeith Mannthey return; 12645e0b78bSKeith Mannthey } 12745e0b78bSKeith Mannthey 12853947027SKeith Mannthey #ifdef CONFIG_MEMORY_HOTPLUG_SPARSE 12946723bfaSYasuaki Ishimatsu void get_page_bootmem(unsigned long info, struct page *page, 1305f24ce5fSAndrea Arcangeli unsigned long type) 13104753278SYasunori Goto { 132ddffe98dSYasuaki Ishimatsu page->freelist = (void *)type; 13304753278SYasunori Goto SetPagePrivate(page); 13404753278SYasunori Goto set_page_private(page, info); 135fe896d18SJoonsoo Kim page_ref_inc(page); 13604753278SYasunori Goto } 13704753278SYasunori Goto 138170a5a7eSJiang Liu void put_page_bootmem(struct page *page) 13904753278SYasunori Goto { 1405f24ce5fSAndrea Arcangeli unsigned long type; 14104753278SYasunori Goto 142ddffe98dSYasuaki Ishimatsu type = (unsigned long) page->freelist; 1435f24ce5fSAndrea Arcangeli BUG_ON(type < MEMORY_HOTPLUG_MIN_BOOTMEM_TYPE || 1445f24ce5fSAndrea Arcangeli type > MEMORY_HOTPLUG_MAX_BOOTMEM_TYPE); 14504753278SYasunori Goto 146fe896d18SJoonsoo Kim if (page_ref_dec_return(page) == 1) { 147ddffe98dSYasuaki Ishimatsu page->freelist = NULL; 14804753278SYasunori Goto ClearPagePrivate(page); 14904753278SYasunori Goto set_page_private(page, 0); 1505f24ce5fSAndrea Arcangeli INIT_LIST_HEAD(&page->lru); 151170a5a7eSJiang Liu free_reserved_page(page); 15204753278SYasunori Goto } 15304753278SYasunori Goto } 15404753278SYasunori Goto 15546723bfaSYasuaki Ishimatsu #ifdef CONFIG_HAVE_BOOTMEM_INFO_NODE 15646723bfaSYasuaki Ishimatsu #ifndef CONFIG_SPARSEMEM_VMEMMAP 157d92bc318SAdrian Bunk static void register_page_bootmem_info_section(unsigned long start_pfn) 15804753278SYasunori Goto { 15904753278SYasunori Goto unsigned long *usemap, mapsize, section_nr, i; 16004753278SYasunori Goto struct mem_section *ms; 16104753278SYasunori Goto struct page *page, *memmap; 16204753278SYasunori Goto 16304753278SYasunori Goto section_nr = pfn_to_section_nr(start_pfn); 16404753278SYasunori Goto ms = __nr_to_section(section_nr); 16504753278SYasunori Goto 16604753278SYasunori Goto /* Get section's memmap address */ 16704753278SYasunori Goto memmap = sparse_decode_mem_map(ms->section_mem_map, section_nr); 16804753278SYasunori Goto 16904753278SYasunori Goto /* 17004753278SYasunori Goto * Get page for the memmap's phys address 17104753278SYasunori Goto * XXX: need more consideration for sparse_vmemmap... 17204753278SYasunori Goto */ 17304753278SYasunori Goto page = virt_to_page(memmap); 17404753278SYasunori Goto mapsize = sizeof(struct page) * PAGES_PER_SECTION; 17504753278SYasunori Goto mapsize = PAGE_ALIGN(mapsize) >> PAGE_SHIFT; 17604753278SYasunori Goto 17704753278SYasunori Goto /* remember memmap's page */ 17804753278SYasunori Goto for (i = 0; i < mapsize; i++, page++) 17904753278SYasunori Goto get_page_bootmem(section_nr, page, SECTION_INFO); 18004753278SYasunori Goto 18104753278SYasunori Goto usemap = __nr_to_section(section_nr)->pageblock_flags; 18204753278SYasunori Goto page = virt_to_page(usemap); 18304753278SYasunori Goto 18404753278SYasunori Goto mapsize = PAGE_ALIGN(usemap_size()) >> PAGE_SHIFT; 18504753278SYasunori Goto 18604753278SYasunori Goto for (i = 0; i < mapsize; i++, page++) 187af370fb8SYasunori Goto get_page_bootmem(section_nr, page, MIX_SECTION_INFO); 18804753278SYasunori Goto 18904753278SYasunori Goto } 19046723bfaSYasuaki Ishimatsu #else /* CONFIG_SPARSEMEM_VMEMMAP */ 19146723bfaSYasuaki Ishimatsu static void register_page_bootmem_info_section(unsigned long start_pfn) 19246723bfaSYasuaki Ishimatsu { 19346723bfaSYasuaki Ishimatsu unsigned long *usemap, mapsize, section_nr, i; 19446723bfaSYasuaki Ishimatsu struct mem_section *ms; 19546723bfaSYasuaki Ishimatsu struct page *page, *memmap; 19646723bfaSYasuaki Ishimatsu 19746723bfaSYasuaki Ishimatsu if (!pfn_valid(start_pfn)) 19846723bfaSYasuaki Ishimatsu return; 19946723bfaSYasuaki Ishimatsu 20046723bfaSYasuaki Ishimatsu section_nr = pfn_to_section_nr(start_pfn); 20146723bfaSYasuaki Ishimatsu ms = __nr_to_section(section_nr); 20246723bfaSYasuaki Ishimatsu 20346723bfaSYasuaki Ishimatsu memmap = sparse_decode_mem_map(ms->section_mem_map, section_nr); 20446723bfaSYasuaki Ishimatsu 20546723bfaSYasuaki Ishimatsu register_page_bootmem_memmap(section_nr, memmap, PAGES_PER_SECTION); 20646723bfaSYasuaki Ishimatsu 20746723bfaSYasuaki Ishimatsu usemap = __nr_to_section(section_nr)->pageblock_flags; 20846723bfaSYasuaki Ishimatsu page = virt_to_page(usemap); 20946723bfaSYasuaki Ishimatsu 21046723bfaSYasuaki Ishimatsu mapsize = PAGE_ALIGN(usemap_size()) >> PAGE_SHIFT; 21146723bfaSYasuaki Ishimatsu 21246723bfaSYasuaki Ishimatsu for (i = 0; i < mapsize; i++, page++) 21346723bfaSYasuaki Ishimatsu get_page_bootmem(section_nr, page, MIX_SECTION_INFO); 21446723bfaSYasuaki Ishimatsu } 21546723bfaSYasuaki Ishimatsu #endif /* !CONFIG_SPARSEMEM_VMEMMAP */ 21604753278SYasunori Goto 2177ded384aSLinus Torvalds void __init register_page_bootmem_info_node(struct pglist_data *pgdat) 21804753278SYasunori Goto { 21904753278SYasunori Goto unsigned long i, pfn, end_pfn, nr_pages; 22004753278SYasunori Goto int node = pgdat->node_id; 22104753278SYasunori Goto struct page *page; 22204753278SYasunori Goto 22304753278SYasunori Goto nr_pages = PAGE_ALIGN(sizeof(struct pglist_data)) >> PAGE_SHIFT; 22404753278SYasunori Goto page = virt_to_page(pgdat); 22504753278SYasunori Goto 22604753278SYasunori Goto for (i = 0; i < nr_pages; i++, page++) 22704753278SYasunori Goto get_page_bootmem(node, page, NODE_INFO); 22804753278SYasunori Goto 22904753278SYasunori Goto pfn = pgdat->node_start_pfn; 230c1f19495SCody P Schafer end_pfn = pgdat_end_pfn(pgdat); 23104753278SYasunori Goto 2327e9f5eb0STang Chen /* register section info */ 233f14851afSqiuxishi for (; pfn < end_pfn; pfn += PAGES_PER_SECTION) { 234f14851afSqiuxishi /* 235f14851afSqiuxishi * Some platforms can assign the same pfn to multiple nodes - on 236f14851afSqiuxishi * node0 as well as nodeN. To avoid registering a pfn against 237f14851afSqiuxishi * multiple nodes we check that this pfn does not already 2387e9f5eb0STang Chen * reside in some other nodes. 239f14851afSqiuxishi */ 240f65e91dfSYang Shi if (pfn_valid(pfn) && (early_pfn_to_nid(pfn) == node)) 24104753278SYasunori Goto register_page_bootmem_info_section(pfn); 242f14851afSqiuxishi } 24304753278SYasunori Goto } 24446723bfaSYasuaki Ishimatsu #endif /* CONFIG_HAVE_BOOTMEM_INFO_NODE */ 24504753278SYasunori Goto 246f1dd2cd1SMichal Hocko static int __meminit __add_section(int nid, unsigned long phys_start_pfn, 247f1dd2cd1SMichal Hocko bool want_memblock) 2483947be19SDave Hansen { 2493947be19SDave Hansen int ret; 250f1dd2cd1SMichal Hocko int i; 2513947be19SDave Hansen 252ebd15302SKAMEZAWA Hiroyuki if (pfn_valid(phys_start_pfn)) 253ebd15302SKAMEZAWA Hiroyuki return -EEXIST; 254ebd15302SKAMEZAWA Hiroyuki 255f1dd2cd1SMichal Hocko ret = sparse_add_one_section(NODE_DATA(nid), phys_start_pfn); 2563947be19SDave Hansen if (ret < 0) 2573947be19SDave Hansen return ret; 2583947be19SDave Hansen 259f1dd2cd1SMichal Hocko /* 260f1dd2cd1SMichal Hocko * Make all the pages reserved so that nobody will stumble over half 261f1dd2cd1SMichal Hocko * initialized state. 262f1dd2cd1SMichal Hocko * FIXME: We also have to associate it with a node because pfn_to_node 263f1dd2cd1SMichal Hocko * relies on having page with the proper node. 264f1dd2cd1SMichal Hocko */ 265f1dd2cd1SMichal Hocko for (i = 0; i < PAGES_PER_SECTION; i++) { 266f1dd2cd1SMichal Hocko unsigned long pfn = phys_start_pfn + i; 267f1dd2cd1SMichal Hocko struct page *page; 268f1dd2cd1SMichal Hocko if (!pfn_valid(pfn)) 269f1dd2cd1SMichal Hocko continue; 270718127ccSYasunori Goto 271f1dd2cd1SMichal Hocko page = pfn_to_page(pfn); 272f1dd2cd1SMichal Hocko set_page_node(page, nid); 273f1dd2cd1SMichal Hocko SetPageReserved(page); 274f1dd2cd1SMichal Hocko } 275718127ccSYasunori Goto 2761b862aecSMichal Hocko if (!want_memblock) 2771b862aecSMichal Hocko return 0; 2781b862aecSMichal Hocko 279c04fc586SGary Hade return register_new_memory(nid, __pfn_to_section(phys_start_pfn)); 2803947be19SDave Hansen } 2813947be19SDave Hansen 2824edd7cefSDavid Rientjes /* 2834edd7cefSDavid Rientjes * Reasonably generic function for adding memory. It is 2844edd7cefSDavid Rientjes * expected that archs that support memory hotplug will 2854edd7cefSDavid Rientjes * call this function after deciding the zone to which to 2864edd7cefSDavid Rientjes * add the new pages. 2874edd7cefSDavid Rientjes */ 288f1dd2cd1SMichal Hocko int __ref __add_pages(int nid, unsigned long phys_start_pfn, 2891b862aecSMichal Hocko unsigned long nr_pages, bool want_memblock) 2904edd7cefSDavid Rientjes { 2914edd7cefSDavid Rientjes unsigned long i; 2924edd7cefSDavid Rientjes int err = 0; 2934edd7cefSDavid Rientjes int start_sec, end_sec; 2944b94ffdcSDan Williams struct vmem_altmap *altmap; 2954b94ffdcSDan Williams 2964edd7cefSDavid Rientjes /* during initialize mem_map, align hot-added range to section */ 2974edd7cefSDavid Rientjes start_sec = pfn_to_section_nr(phys_start_pfn); 2984edd7cefSDavid Rientjes end_sec = pfn_to_section_nr(phys_start_pfn + nr_pages - 1); 2994edd7cefSDavid Rientjes 3004b94ffdcSDan Williams altmap = to_vmem_altmap((unsigned long) pfn_to_page(phys_start_pfn)); 3014b94ffdcSDan Williams if (altmap) { 3024b94ffdcSDan Williams /* 3034b94ffdcSDan Williams * Validate altmap is within bounds of the total request 3044b94ffdcSDan Williams */ 3054b94ffdcSDan Williams if (altmap->base_pfn != phys_start_pfn 3064b94ffdcSDan Williams || vmem_altmap_offset(altmap) > nr_pages) { 3074b94ffdcSDan Williams pr_warn_once("memory add fail, invalid altmap\n"); 3087cf91a98SJoonsoo Kim err = -EINVAL; 3097cf91a98SJoonsoo Kim goto out; 3104b94ffdcSDan Williams } 3114b94ffdcSDan Williams altmap->alloc = 0; 3124b94ffdcSDan Williams } 3134b94ffdcSDan Williams 3144edd7cefSDavid Rientjes for (i = start_sec; i <= end_sec; i++) { 315f1dd2cd1SMichal Hocko err = __add_section(nid, section_nr_to_pfn(i), want_memblock); 3164edd7cefSDavid Rientjes 3174edd7cefSDavid Rientjes /* 3184edd7cefSDavid Rientjes * EEXIST is finally dealt with by ioresource collision 3194edd7cefSDavid Rientjes * check. see add_memory() => register_memory_resource() 3204edd7cefSDavid Rientjes * Warning will be printed if there is collision. 3214edd7cefSDavid Rientjes */ 3224edd7cefSDavid Rientjes if (err && (err != -EEXIST)) 3234edd7cefSDavid Rientjes break; 3244edd7cefSDavid Rientjes err = 0; 3254edd7cefSDavid Rientjes } 326c435a390SZhu Guihua vmemmap_populate_print_last(); 3277cf91a98SJoonsoo Kim out: 3284edd7cefSDavid Rientjes return err; 3294edd7cefSDavid Rientjes } 3304edd7cefSDavid Rientjes EXPORT_SYMBOL_GPL(__add_pages); 3314edd7cefSDavid Rientjes 3324edd7cefSDavid Rientjes #ifdef CONFIG_MEMORY_HOTREMOVE 333815121d2SYasuaki Ishimatsu /* find the smallest valid pfn in the range [start_pfn, end_pfn) */ 334815121d2SYasuaki Ishimatsu static int find_smallest_section_pfn(int nid, struct zone *zone, 335815121d2SYasuaki Ishimatsu unsigned long start_pfn, 336815121d2SYasuaki Ishimatsu unsigned long end_pfn) 337815121d2SYasuaki Ishimatsu { 338815121d2SYasuaki Ishimatsu struct mem_section *ms; 339815121d2SYasuaki Ishimatsu 340815121d2SYasuaki Ishimatsu for (; start_pfn < end_pfn; start_pfn += PAGES_PER_SECTION) { 341815121d2SYasuaki Ishimatsu ms = __pfn_to_section(start_pfn); 342815121d2SYasuaki Ishimatsu 343815121d2SYasuaki Ishimatsu if (unlikely(!valid_section(ms))) 344815121d2SYasuaki Ishimatsu continue; 345815121d2SYasuaki Ishimatsu 346815121d2SYasuaki Ishimatsu if (unlikely(pfn_to_nid(start_pfn) != nid)) 347815121d2SYasuaki Ishimatsu continue; 348815121d2SYasuaki Ishimatsu 349815121d2SYasuaki Ishimatsu if (zone && zone != page_zone(pfn_to_page(start_pfn))) 350815121d2SYasuaki Ishimatsu continue; 351815121d2SYasuaki Ishimatsu 352815121d2SYasuaki Ishimatsu return start_pfn; 353815121d2SYasuaki Ishimatsu } 354815121d2SYasuaki Ishimatsu 355815121d2SYasuaki Ishimatsu return 0; 356815121d2SYasuaki Ishimatsu } 357815121d2SYasuaki Ishimatsu 358815121d2SYasuaki Ishimatsu /* find the biggest valid pfn in the range [start_pfn, end_pfn). */ 359815121d2SYasuaki Ishimatsu static int find_biggest_section_pfn(int nid, struct zone *zone, 360815121d2SYasuaki Ishimatsu unsigned long start_pfn, 361815121d2SYasuaki Ishimatsu unsigned long end_pfn) 362815121d2SYasuaki Ishimatsu { 363815121d2SYasuaki Ishimatsu struct mem_section *ms; 364815121d2SYasuaki Ishimatsu unsigned long pfn; 365815121d2SYasuaki Ishimatsu 366815121d2SYasuaki Ishimatsu /* pfn is the end pfn of a memory section. */ 367815121d2SYasuaki Ishimatsu pfn = end_pfn - 1; 368815121d2SYasuaki Ishimatsu for (; pfn >= start_pfn; pfn -= PAGES_PER_SECTION) { 369815121d2SYasuaki Ishimatsu ms = __pfn_to_section(pfn); 370815121d2SYasuaki Ishimatsu 371815121d2SYasuaki Ishimatsu if (unlikely(!valid_section(ms))) 372815121d2SYasuaki Ishimatsu continue; 373815121d2SYasuaki Ishimatsu 374815121d2SYasuaki Ishimatsu if (unlikely(pfn_to_nid(pfn) != nid)) 375815121d2SYasuaki Ishimatsu continue; 376815121d2SYasuaki Ishimatsu 377815121d2SYasuaki Ishimatsu if (zone && zone != page_zone(pfn_to_page(pfn))) 378815121d2SYasuaki Ishimatsu continue; 379815121d2SYasuaki Ishimatsu 380815121d2SYasuaki Ishimatsu return pfn; 381815121d2SYasuaki Ishimatsu } 382815121d2SYasuaki Ishimatsu 383815121d2SYasuaki Ishimatsu return 0; 384815121d2SYasuaki Ishimatsu } 385815121d2SYasuaki Ishimatsu 386815121d2SYasuaki Ishimatsu static void shrink_zone_span(struct zone *zone, unsigned long start_pfn, 387815121d2SYasuaki Ishimatsu unsigned long end_pfn) 388815121d2SYasuaki Ishimatsu { 389815121d2SYasuaki Ishimatsu unsigned long zone_start_pfn = zone->zone_start_pfn; 390c33bc315SXishi Qiu unsigned long z = zone_end_pfn(zone); /* zone_end_pfn namespace clash */ 391c33bc315SXishi Qiu unsigned long zone_end_pfn = z; 392815121d2SYasuaki Ishimatsu unsigned long pfn; 393815121d2SYasuaki Ishimatsu struct mem_section *ms; 394815121d2SYasuaki Ishimatsu int nid = zone_to_nid(zone); 395815121d2SYasuaki Ishimatsu 396815121d2SYasuaki Ishimatsu zone_span_writelock(zone); 397815121d2SYasuaki Ishimatsu if (zone_start_pfn == start_pfn) { 398815121d2SYasuaki Ishimatsu /* 399815121d2SYasuaki Ishimatsu * If the section is smallest section in the zone, it need 400815121d2SYasuaki Ishimatsu * shrink zone->zone_start_pfn and zone->zone_spanned_pages. 401815121d2SYasuaki Ishimatsu * In this case, we find second smallest valid mem_section 402815121d2SYasuaki Ishimatsu * for shrinking zone. 403815121d2SYasuaki Ishimatsu */ 404815121d2SYasuaki Ishimatsu pfn = find_smallest_section_pfn(nid, zone, end_pfn, 405815121d2SYasuaki Ishimatsu zone_end_pfn); 406815121d2SYasuaki Ishimatsu if (pfn) { 407815121d2SYasuaki Ishimatsu zone->zone_start_pfn = pfn; 408815121d2SYasuaki Ishimatsu zone->spanned_pages = zone_end_pfn - pfn; 409815121d2SYasuaki Ishimatsu } 410815121d2SYasuaki Ishimatsu } else if (zone_end_pfn == end_pfn) { 411815121d2SYasuaki Ishimatsu /* 412815121d2SYasuaki Ishimatsu * If the section is biggest section in the zone, it need 413815121d2SYasuaki Ishimatsu * shrink zone->spanned_pages. 414815121d2SYasuaki Ishimatsu * In this case, we find second biggest valid mem_section for 415815121d2SYasuaki Ishimatsu * shrinking zone. 416815121d2SYasuaki Ishimatsu */ 417815121d2SYasuaki Ishimatsu pfn = find_biggest_section_pfn(nid, zone, zone_start_pfn, 418815121d2SYasuaki Ishimatsu start_pfn); 419815121d2SYasuaki Ishimatsu if (pfn) 420815121d2SYasuaki Ishimatsu zone->spanned_pages = pfn - zone_start_pfn + 1; 421815121d2SYasuaki Ishimatsu } 422815121d2SYasuaki Ishimatsu 423815121d2SYasuaki Ishimatsu /* 424815121d2SYasuaki Ishimatsu * The section is not biggest or smallest mem_section in the zone, it 425815121d2SYasuaki Ishimatsu * only creates a hole in the zone. So in this case, we need not 426815121d2SYasuaki Ishimatsu * change the zone. But perhaps, the zone has only hole data. Thus 427815121d2SYasuaki Ishimatsu * it check the zone has only hole or not. 428815121d2SYasuaki Ishimatsu */ 429815121d2SYasuaki Ishimatsu pfn = zone_start_pfn; 430815121d2SYasuaki Ishimatsu for (; pfn < zone_end_pfn; pfn += PAGES_PER_SECTION) { 431815121d2SYasuaki Ishimatsu ms = __pfn_to_section(pfn); 432815121d2SYasuaki Ishimatsu 433815121d2SYasuaki Ishimatsu if (unlikely(!valid_section(ms))) 434815121d2SYasuaki Ishimatsu continue; 435815121d2SYasuaki Ishimatsu 436815121d2SYasuaki Ishimatsu if (page_zone(pfn_to_page(pfn)) != zone) 437815121d2SYasuaki Ishimatsu continue; 438815121d2SYasuaki Ishimatsu 439815121d2SYasuaki Ishimatsu /* If the section is current section, it continues the loop */ 440815121d2SYasuaki Ishimatsu if (start_pfn == pfn) 441815121d2SYasuaki Ishimatsu continue; 442815121d2SYasuaki Ishimatsu 443815121d2SYasuaki Ishimatsu /* If we find valid section, we have nothing to do */ 444815121d2SYasuaki Ishimatsu zone_span_writeunlock(zone); 445815121d2SYasuaki Ishimatsu return; 446815121d2SYasuaki Ishimatsu } 447815121d2SYasuaki Ishimatsu 448815121d2SYasuaki Ishimatsu /* The zone has no valid section */ 449815121d2SYasuaki Ishimatsu zone->zone_start_pfn = 0; 450815121d2SYasuaki Ishimatsu zone->spanned_pages = 0; 451815121d2SYasuaki Ishimatsu zone_span_writeunlock(zone); 452815121d2SYasuaki Ishimatsu } 453815121d2SYasuaki Ishimatsu 454815121d2SYasuaki Ishimatsu static void shrink_pgdat_span(struct pglist_data *pgdat, 455815121d2SYasuaki Ishimatsu unsigned long start_pfn, unsigned long end_pfn) 456815121d2SYasuaki Ishimatsu { 457815121d2SYasuaki Ishimatsu unsigned long pgdat_start_pfn = pgdat->node_start_pfn; 45883285c72SXishi Qiu unsigned long p = pgdat_end_pfn(pgdat); /* pgdat_end_pfn namespace clash */ 45983285c72SXishi Qiu unsigned long pgdat_end_pfn = p; 460815121d2SYasuaki Ishimatsu unsigned long pfn; 461815121d2SYasuaki Ishimatsu struct mem_section *ms; 462815121d2SYasuaki Ishimatsu int nid = pgdat->node_id; 463815121d2SYasuaki Ishimatsu 464815121d2SYasuaki Ishimatsu if (pgdat_start_pfn == start_pfn) { 465815121d2SYasuaki Ishimatsu /* 466815121d2SYasuaki Ishimatsu * If the section is smallest section in the pgdat, it need 467815121d2SYasuaki Ishimatsu * shrink pgdat->node_start_pfn and pgdat->node_spanned_pages. 468815121d2SYasuaki Ishimatsu * In this case, we find second smallest valid mem_section 469815121d2SYasuaki Ishimatsu * for shrinking zone. 470815121d2SYasuaki Ishimatsu */ 471815121d2SYasuaki Ishimatsu pfn = find_smallest_section_pfn(nid, NULL, end_pfn, 472815121d2SYasuaki Ishimatsu pgdat_end_pfn); 473815121d2SYasuaki Ishimatsu if (pfn) { 474815121d2SYasuaki Ishimatsu pgdat->node_start_pfn = pfn; 475815121d2SYasuaki Ishimatsu pgdat->node_spanned_pages = pgdat_end_pfn - pfn; 476815121d2SYasuaki Ishimatsu } 477815121d2SYasuaki Ishimatsu } else if (pgdat_end_pfn == end_pfn) { 478815121d2SYasuaki Ishimatsu /* 479815121d2SYasuaki Ishimatsu * If the section is biggest section in the pgdat, it need 480815121d2SYasuaki Ishimatsu * shrink pgdat->node_spanned_pages. 481815121d2SYasuaki Ishimatsu * In this case, we find second biggest valid mem_section for 482815121d2SYasuaki Ishimatsu * shrinking zone. 483815121d2SYasuaki Ishimatsu */ 484815121d2SYasuaki Ishimatsu pfn = find_biggest_section_pfn(nid, NULL, pgdat_start_pfn, 485815121d2SYasuaki Ishimatsu start_pfn); 486815121d2SYasuaki Ishimatsu if (pfn) 487815121d2SYasuaki Ishimatsu pgdat->node_spanned_pages = pfn - pgdat_start_pfn + 1; 488815121d2SYasuaki Ishimatsu } 489815121d2SYasuaki Ishimatsu 490815121d2SYasuaki Ishimatsu /* 491815121d2SYasuaki Ishimatsu * If the section is not biggest or smallest mem_section in the pgdat, 492815121d2SYasuaki Ishimatsu * it only creates a hole in the pgdat. So in this case, we need not 493815121d2SYasuaki Ishimatsu * change the pgdat. 494815121d2SYasuaki Ishimatsu * But perhaps, the pgdat has only hole data. Thus it check the pgdat 495815121d2SYasuaki Ishimatsu * has only hole or not. 496815121d2SYasuaki Ishimatsu */ 497815121d2SYasuaki Ishimatsu pfn = pgdat_start_pfn; 498815121d2SYasuaki Ishimatsu for (; pfn < pgdat_end_pfn; pfn += PAGES_PER_SECTION) { 499815121d2SYasuaki Ishimatsu ms = __pfn_to_section(pfn); 500815121d2SYasuaki Ishimatsu 501815121d2SYasuaki Ishimatsu if (unlikely(!valid_section(ms))) 502815121d2SYasuaki Ishimatsu continue; 503815121d2SYasuaki Ishimatsu 504815121d2SYasuaki Ishimatsu if (pfn_to_nid(pfn) != nid) 505815121d2SYasuaki Ishimatsu continue; 506815121d2SYasuaki Ishimatsu 507815121d2SYasuaki Ishimatsu /* If the section is current section, it continues the loop */ 508815121d2SYasuaki Ishimatsu if (start_pfn == pfn) 509815121d2SYasuaki Ishimatsu continue; 510815121d2SYasuaki Ishimatsu 511815121d2SYasuaki Ishimatsu /* If we find valid section, we have nothing to do */ 512815121d2SYasuaki Ishimatsu return; 513815121d2SYasuaki Ishimatsu } 514815121d2SYasuaki Ishimatsu 515815121d2SYasuaki Ishimatsu /* The pgdat has no valid section */ 516815121d2SYasuaki Ishimatsu pgdat->node_start_pfn = 0; 517815121d2SYasuaki Ishimatsu pgdat->node_spanned_pages = 0; 518815121d2SYasuaki Ishimatsu } 519815121d2SYasuaki Ishimatsu 520815121d2SYasuaki Ishimatsu static void __remove_zone(struct zone *zone, unsigned long start_pfn) 521815121d2SYasuaki Ishimatsu { 522815121d2SYasuaki Ishimatsu struct pglist_data *pgdat = zone->zone_pgdat; 523815121d2SYasuaki Ishimatsu int nr_pages = PAGES_PER_SECTION; 524815121d2SYasuaki Ishimatsu unsigned long flags; 525815121d2SYasuaki Ishimatsu 526815121d2SYasuaki Ishimatsu pgdat_resize_lock(zone->zone_pgdat, &flags); 527815121d2SYasuaki Ishimatsu shrink_zone_span(zone, start_pfn, start_pfn + nr_pages); 528815121d2SYasuaki Ishimatsu shrink_pgdat_span(pgdat, start_pfn, start_pfn + nr_pages); 529815121d2SYasuaki Ishimatsu pgdat_resize_unlock(zone->zone_pgdat, &flags); 530815121d2SYasuaki Ishimatsu } 531815121d2SYasuaki Ishimatsu 5324b94ffdcSDan Williams static int __remove_section(struct zone *zone, struct mem_section *ms, 5334b94ffdcSDan Williams unsigned long map_offset) 534ea01ea93SBadari Pulavarty { 535815121d2SYasuaki Ishimatsu unsigned long start_pfn; 536815121d2SYasuaki Ishimatsu int scn_nr; 537ea01ea93SBadari Pulavarty int ret = -EINVAL; 538ea01ea93SBadari Pulavarty 539ea01ea93SBadari Pulavarty if (!valid_section(ms)) 540ea01ea93SBadari Pulavarty return ret; 541ea01ea93SBadari Pulavarty 542ea01ea93SBadari Pulavarty ret = unregister_memory_section(ms); 543ea01ea93SBadari Pulavarty if (ret) 544ea01ea93SBadari Pulavarty return ret; 545ea01ea93SBadari Pulavarty 546815121d2SYasuaki Ishimatsu scn_nr = __section_nr(ms); 547815121d2SYasuaki Ishimatsu start_pfn = section_nr_to_pfn(scn_nr); 548815121d2SYasuaki Ishimatsu __remove_zone(zone, start_pfn); 549815121d2SYasuaki Ishimatsu 5504b94ffdcSDan Williams sparse_remove_one_section(zone, ms, map_offset); 551ea01ea93SBadari Pulavarty return 0; 552ea01ea93SBadari Pulavarty } 553ea01ea93SBadari Pulavarty 554ea01ea93SBadari Pulavarty /** 555ea01ea93SBadari Pulavarty * __remove_pages() - remove sections of pages from a zone 556ea01ea93SBadari Pulavarty * @zone: zone from which pages need to be removed 557ea01ea93SBadari Pulavarty * @phys_start_pfn: starting pageframe (must be aligned to start of a section) 558ea01ea93SBadari Pulavarty * @nr_pages: number of pages to remove (must be multiple of section size) 559ea01ea93SBadari Pulavarty * 560ea01ea93SBadari Pulavarty * Generic helper function to remove section mappings and sysfs entries 561ea01ea93SBadari Pulavarty * for the section of the memory we are removing. Caller needs to make 562ea01ea93SBadari Pulavarty * sure that pages are marked reserved and zones are adjust properly by 563ea01ea93SBadari Pulavarty * calling offline_pages(). 564ea01ea93SBadari Pulavarty */ 565ea01ea93SBadari Pulavarty int __remove_pages(struct zone *zone, unsigned long phys_start_pfn, 566ea01ea93SBadari Pulavarty unsigned long nr_pages) 567ea01ea93SBadari Pulavarty { 568fe74ebb1SToshi Kani unsigned long i; 5694b94ffdcSDan Williams unsigned long map_offset = 0; 5704b94ffdcSDan Williams int sections_to_remove, ret = 0; 5714b94ffdcSDan Williams 5724b94ffdcSDan Williams /* In the ZONE_DEVICE case device driver owns the memory region */ 5734b94ffdcSDan Williams if (is_dev_zone(zone)) { 5744b94ffdcSDan Williams struct page *page = pfn_to_page(phys_start_pfn); 5754b94ffdcSDan Williams struct vmem_altmap *altmap; 5764b94ffdcSDan Williams 5774b94ffdcSDan Williams altmap = to_vmem_altmap((unsigned long) page); 5784b94ffdcSDan Williams if (altmap) 5794b94ffdcSDan Williams map_offset = vmem_altmap_offset(altmap); 5804b94ffdcSDan Williams } else { 581fe74ebb1SToshi Kani resource_size_t start, size; 5824b94ffdcSDan Williams 5834b94ffdcSDan Williams start = phys_start_pfn << PAGE_SHIFT; 5844b94ffdcSDan Williams size = nr_pages * PAGE_SIZE; 5854b94ffdcSDan Williams 5864b94ffdcSDan Williams ret = release_mem_region_adjustable(&iomem_resource, start, 5874b94ffdcSDan Williams size); 5884b94ffdcSDan Williams if (ret) { 5894b94ffdcSDan Williams resource_size_t endres = start + size - 1; 5904b94ffdcSDan Williams 5914b94ffdcSDan Williams pr_warn("Unable to release resource <%pa-%pa> (%d)\n", 5924b94ffdcSDan Williams &start, &endres, ret); 5934b94ffdcSDan Williams } 5944b94ffdcSDan Williams } 595ea01ea93SBadari Pulavarty 5967cf91a98SJoonsoo Kim clear_zone_contiguous(zone); 5977cf91a98SJoonsoo Kim 598ea01ea93SBadari Pulavarty /* 599ea01ea93SBadari Pulavarty * We can only remove entire sections 600ea01ea93SBadari Pulavarty */ 601ea01ea93SBadari Pulavarty BUG_ON(phys_start_pfn & ~PAGE_SECTION_MASK); 602ea01ea93SBadari Pulavarty BUG_ON(nr_pages % PAGES_PER_SECTION); 603ea01ea93SBadari Pulavarty 604ea01ea93SBadari Pulavarty sections_to_remove = nr_pages / PAGES_PER_SECTION; 605ea01ea93SBadari Pulavarty for (i = 0; i < sections_to_remove; i++) { 606ea01ea93SBadari Pulavarty unsigned long pfn = phys_start_pfn + i*PAGES_PER_SECTION; 6074b94ffdcSDan Williams 6084b94ffdcSDan Williams ret = __remove_section(zone, __pfn_to_section(pfn), map_offset); 6094b94ffdcSDan Williams map_offset = 0; 610ea01ea93SBadari Pulavarty if (ret) 611ea01ea93SBadari Pulavarty break; 612ea01ea93SBadari Pulavarty } 6137cf91a98SJoonsoo Kim 6147cf91a98SJoonsoo Kim set_zone_contiguous(zone); 6157cf91a98SJoonsoo Kim 616ea01ea93SBadari Pulavarty return ret; 617ea01ea93SBadari Pulavarty } 6184edd7cefSDavid Rientjes #endif /* CONFIG_MEMORY_HOTREMOVE */ 619ea01ea93SBadari Pulavarty 6209d0ad8caSDaniel Kiper int set_online_page_callback(online_page_callback_t callback) 6219d0ad8caSDaniel Kiper { 6229d0ad8caSDaniel Kiper int rc = -EINVAL; 6239d0ad8caSDaniel Kiper 624bfc8c901SVladimir Davydov get_online_mems(); 625bfc8c901SVladimir Davydov mutex_lock(&online_page_callback_lock); 6269d0ad8caSDaniel Kiper 6279d0ad8caSDaniel Kiper if (online_page_callback == generic_online_page) { 6289d0ad8caSDaniel Kiper online_page_callback = callback; 6299d0ad8caSDaniel Kiper rc = 0; 6309d0ad8caSDaniel Kiper } 6319d0ad8caSDaniel Kiper 632bfc8c901SVladimir Davydov mutex_unlock(&online_page_callback_lock); 633bfc8c901SVladimir Davydov put_online_mems(); 6349d0ad8caSDaniel Kiper 6359d0ad8caSDaniel Kiper return rc; 6369d0ad8caSDaniel Kiper } 6379d0ad8caSDaniel Kiper EXPORT_SYMBOL_GPL(set_online_page_callback); 6389d0ad8caSDaniel Kiper 6399d0ad8caSDaniel Kiper int restore_online_page_callback(online_page_callback_t callback) 6409d0ad8caSDaniel Kiper { 6419d0ad8caSDaniel Kiper int rc = -EINVAL; 6429d0ad8caSDaniel Kiper 643bfc8c901SVladimir Davydov get_online_mems(); 644bfc8c901SVladimir Davydov mutex_lock(&online_page_callback_lock); 6459d0ad8caSDaniel Kiper 6469d0ad8caSDaniel Kiper if (online_page_callback == callback) { 6479d0ad8caSDaniel Kiper online_page_callback = generic_online_page; 6489d0ad8caSDaniel Kiper rc = 0; 6499d0ad8caSDaniel Kiper } 6509d0ad8caSDaniel Kiper 651bfc8c901SVladimir Davydov mutex_unlock(&online_page_callback_lock); 652bfc8c901SVladimir Davydov put_online_mems(); 6539d0ad8caSDaniel Kiper 6549d0ad8caSDaniel Kiper return rc; 6559d0ad8caSDaniel Kiper } 6569d0ad8caSDaniel Kiper EXPORT_SYMBOL_GPL(restore_online_page_callback); 6579d0ad8caSDaniel Kiper 6589d0ad8caSDaniel Kiper void __online_page_set_limits(struct page *page) 659180c06efSJeremy Fitzhardinge { 6609d0ad8caSDaniel Kiper } 6619d0ad8caSDaniel Kiper EXPORT_SYMBOL_GPL(__online_page_set_limits); 6629d0ad8caSDaniel Kiper 6639d0ad8caSDaniel Kiper void __online_page_increment_counters(struct page *page) 6649d0ad8caSDaniel Kiper { 6653dcc0571SJiang Liu adjust_managed_page_count(page, 1); 6669d0ad8caSDaniel Kiper } 6679d0ad8caSDaniel Kiper EXPORT_SYMBOL_GPL(__online_page_increment_counters); 668180c06efSJeremy Fitzhardinge 6699d0ad8caSDaniel Kiper void __online_page_free(struct page *page) 6709d0ad8caSDaniel Kiper { 6713dcc0571SJiang Liu __free_reserved_page(page); 672180c06efSJeremy Fitzhardinge } 6739d0ad8caSDaniel Kiper EXPORT_SYMBOL_GPL(__online_page_free); 6749d0ad8caSDaniel Kiper 6759d0ad8caSDaniel Kiper static void generic_online_page(struct page *page) 6769d0ad8caSDaniel Kiper { 6779d0ad8caSDaniel Kiper __online_page_set_limits(page); 6789d0ad8caSDaniel Kiper __online_page_increment_counters(page); 6799d0ad8caSDaniel Kiper __online_page_free(page); 6809d0ad8caSDaniel Kiper } 681180c06efSJeremy Fitzhardinge 68275884fb1SKAMEZAWA Hiroyuki static int online_pages_range(unsigned long start_pfn, unsigned long nr_pages, 68375884fb1SKAMEZAWA Hiroyuki void *arg) 6843947be19SDave Hansen { 6853947be19SDave Hansen unsigned long i; 68675884fb1SKAMEZAWA Hiroyuki unsigned long onlined_pages = *(unsigned long *)arg; 68775884fb1SKAMEZAWA Hiroyuki struct page *page; 6882d070eabSMichal Hocko 68975884fb1SKAMEZAWA Hiroyuki if (PageReserved(pfn_to_page(start_pfn))) 69075884fb1SKAMEZAWA Hiroyuki for (i = 0; i < nr_pages; i++) { 69175884fb1SKAMEZAWA Hiroyuki page = pfn_to_page(start_pfn + i); 6929d0ad8caSDaniel Kiper (*online_page_callback)(page); 69375884fb1SKAMEZAWA Hiroyuki onlined_pages++; 69475884fb1SKAMEZAWA Hiroyuki } 6952d070eabSMichal Hocko 6962d070eabSMichal Hocko online_mem_sections(start_pfn, start_pfn + nr_pages); 6972d070eabSMichal Hocko 69875884fb1SKAMEZAWA Hiroyuki *(unsigned long *)arg = onlined_pages; 69975884fb1SKAMEZAWA Hiroyuki return 0; 70075884fb1SKAMEZAWA Hiroyuki } 70175884fb1SKAMEZAWA Hiroyuki 702d9713679SLai Jiangshan /* check which state of node_states will be changed when online memory */ 703d9713679SLai Jiangshan static void node_states_check_changes_online(unsigned long nr_pages, 704d9713679SLai Jiangshan struct zone *zone, struct memory_notify *arg) 705d9713679SLai Jiangshan { 706d9713679SLai Jiangshan int nid = zone_to_nid(zone); 707d9713679SLai Jiangshan enum zone_type zone_last = ZONE_NORMAL; 708d9713679SLai Jiangshan 709d9713679SLai Jiangshan /* 7106715ddf9SLai Jiangshan * If we have HIGHMEM or movable node, node_states[N_NORMAL_MEMORY] 7116715ddf9SLai Jiangshan * contains nodes which have zones of 0...ZONE_NORMAL, 7126715ddf9SLai Jiangshan * set zone_last to ZONE_NORMAL. 713d9713679SLai Jiangshan * 7146715ddf9SLai Jiangshan * If we don't have HIGHMEM nor movable node, 7156715ddf9SLai Jiangshan * node_states[N_NORMAL_MEMORY] contains nodes which have zones of 7166715ddf9SLai Jiangshan * 0...ZONE_MOVABLE, set zone_last to ZONE_MOVABLE. 717d9713679SLai Jiangshan */ 7186715ddf9SLai Jiangshan if (N_MEMORY == N_NORMAL_MEMORY) 719d9713679SLai Jiangshan zone_last = ZONE_MOVABLE; 720d9713679SLai Jiangshan 721d9713679SLai Jiangshan /* 722d9713679SLai Jiangshan * if the memory to be online is in a zone of 0...zone_last, and 723d9713679SLai Jiangshan * the zones of 0...zone_last don't have memory before online, we will 724d9713679SLai Jiangshan * need to set the node to node_states[N_NORMAL_MEMORY] after 725d9713679SLai Jiangshan * the memory is online. 726d9713679SLai Jiangshan */ 727d9713679SLai Jiangshan if (zone_idx(zone) <= zone_last && !node_state(nid, N_NORMAL_MEMORY)) 728d9713679SLai Jiangshan arg->status_change_nid_normal = nid; 729d9713679SLai Jiangshan else 730d9713679SLai Jiangshan arg->status_change_nid_normal = -1; 731d9713679SLai Jiangshan 7326715ddf9SLai Jiangshan #ifdef CONFIG_HIGHMEM 7336715ddf9SLai Jiangshan /* 7346715ddf9SLai Jiangshan * If we have movable node, node_states[N_HIGH_MEMORY] 7356715ddf9SLai Jiangshan * contains nodes which have zones of 0...ZONE_HIGHMEM, 7366715ddf9SLai Jiangshan * set zone_last to ZONE_HIGHMEM. 7376715ddf9SLai Jiangshan * 7386715ddf9SLai Jiangshan * If we don't have movable node, node_states[N_NORMAL_MEMORY] 7396715ddf9SLai Jiangshan * contains nodes which have zones of 0...ZONE_MOVABLE, 7406715ddf9SLai Jiangshan * set zone_last to ZONE_MOVABLE. 7416715ddf9SLai Jiangshan */ 7426715ddf9SLai Jiangshan zone_last = ZONE_HIGHMEM; 7436715ddf9SLai Jiangshan if (N_MEMORY == N_HIGH_MEMORY) 7446715ddf9SLai Jiangshan zone_last = ZONE_MOVABLE; 7456715ddf9SLai Jiangshan 7466715ddf9SLai Jiangshan if (zone_idx(zone) <= zone_last && !node_state(nid, N_HIGH_MEMORY)) 7476715ddf9SLai Jiangshan arg->status_change_nid_high = nid; 7486715ddf9SLai Jiangshan else 7496715ddf9SLai Jiangshan arg->status_change_nid_high = -1; 7506715ddf9SLai Jiangshan #else 7516715ddf9SLai Jiangshan arg->status_change_nid_high = arg->status_change_nid_normal; 7526715ddf9SLai Jiangshan #endif 7536715ddf9SLai Jiangshan 754d9713679SLai Jiangshan /* 755d9713679SLai Jiangshan * if the node don't have memory befor online, we will need to 7566715ddf9SLai Jiangshan * set the node to node_states[N_MEMORY] after the memory 757d9713679SLai Jiangshan * is online. 758d9713679SLai Jiangshan */ 7596715ddf9SLai Jiangshan if (!node_state(nid, N_MEMORY)) 760d9713679SLai Jiangshan arg->status_change_nid = nid; 761d9713679SLai Jiangshan else 762d9713679SLai Jiangshan arg->status_change_nid = -1; 763d9713679SLai Jiangshan } 764d9713679SLai Jiangshan 765d9713679SLai Jiangshan static void node_states_set_node(int node, struct memory_notify *arg) 766d9713679SLai Jiangshan { 767d9713679SLai Jiangshan if (arg->status_change_nid_normal >= 0) 768d9713679SLai Jiangshan node_set_state(node, N_NORMAL_MEMORY); 769d9713679SLai Jiangshan 7706715ddf9SLai Jiangshan if (arg->status_change_nid_high >= 0) 771d9713679SLai Jiangshan node_set_state(node, N_HIGH_MEMORY); 7726715ddf9SLai Jiangshan 7736715ddf9SLai Jiangshan node_set_state(node, N_MEMORY); 774d9713679SLai Jiangshan } 775d9713679SLai Jiangshan 776f1dd2cd1SMichal Hocko static void __meminit resize_zone_range(struct zone *zone, unsigned long start_pfn, 777f1dd2cd1SMichal Hocko unsigned long nr_pages) 778f1dd2cd1SMichal Hocko { 779f1dd2cd1SMichal Hocko unsigned long old_end_pfn = zone_end_pfn(zone); 780f1dd2cd1SMichal Hocko 781f1dd2cd1SMichal Hocko if (zone_is_empty(zone) || start_pfn < zone->zone_start_pfn) 782f1dd2cd1SMichal Hocko zone->zone_start_pfn = start_pfn; 783f1dd2cd1SMichal Hocko 784f1dd2cd1SMichal Hocko zone->spanned_pages = max(start_pfn + nr_pages, old_end_pfn) - zone->zone_start_pfn; 785f1dd2cd1SMichal Hocko } 786f1dd2cd1SMichal Hocko 787f1dd2cd1SMichal Hocko static void __meminit resize_pgdat_range(struct pglist_data *pgdat, unsigned long start_pfn, 788f1dd2cd1SMichal Hocko unsigned long nr_pages) 789f1dd2cd1SMichal Hocko { 790f1dd2cd1SMichal Hocko unsigned long old_end_pfn = pgdat_end_pfn(pgdat); 791f1dd2cd1SMichal Hocko 792f1dd2cd1SMichal Hocko if (!pgdat->node_spanned_pages || start_pfn < pgdat->node_start_pfn) 793f1dd2cd1SMichal Hocko pgdat->node_start_pfn = start_pfn; 794f1dd2cd1SMichal Hocko 795f1dd2cd1SMichal Hocko pgdat->node_spanned_pages = max(start_pfn + nr_pages, old_end_pfn) - pgdat->node_start_pfn; 796f1dd2cd1SMichal Hocko } 797f1dd2cd1SMichal Hocko 798cdf72f25SMichal Hocko void __ref move_pfn_range_to_zone(struct zone *zone, 799f1dd2cd1SMichal Hocko unsigned long start_pfn, unsigned long nr_pages) 800f1dd2cd1SMichal Hocko { 801f1dd2cd1SMichal Hocko struct pglist_data *pgdat = zone->zone_pgdat; 802f1dd2cd1SMichal Hocko int nid = pgdat->node_id; 803f1dd2cd1SMichal Hocko unsigned long flags; 804f1dd2cd1SMichal Hocko 805f1dd2cd1SMichal Hocko if (zone_is_empty(zone)) 806f1dd2cd1SMichal Hocko init_currently_empty_zone(zone, start_pfn, nr_pages); 807f1dd2cd1SMichal Hocko 808f1dd2cd1SMichal Hocko clear_zone_contiguous(zone); 809f1dd2cd1SMichal Hocko 810f1dd2cd1SMichal Hocko /* TODO Huh pgdat is irqsave while zone is not. It used to be like that before */ 811f1dd2cd1SMichal Hocko pgdat_resize_lock(pgdat, &flags); 812f1dd2cd1SMichal Hocko zone_span_writelock(zone); 813f1dd2cd1SMichal Hocko resize_zone_range(zone, start_pfn, nr_pages); 814f1dd2cd1SMichal Hocko zone_span_writeunlock(zone); 815f1dd2cd1SMichal Hocko resize_pgdat_range(pgdat, start_pfn, nr_pages); 816f1dd2cd1SMichal Hocko pgdat_resize_unlock(pgdat, &flags); 817f1dd2cd1SMichal Hocko 818f1dd2cd1SMichal Hocko /* 819f1dd2cd1SMichal Hocko * TODO now we have a visible range of pages which are not associated 820f1dd2cd1SMichal Hocko * with their zone properly. Not nice but set_pfnblock_flags_mask 821f1dd2cd1SMichal Hocko * expects the zone spans the pfn range. All the pages in the range 822f1dd2cd1SMichal Hocko * are reserved so nobody should be touching them so we should be safe 823f1dd2cd1SMichal Hocko */ 824f1dd2cd1SMichal Hocko memmap_init_zone(nr_pages, nid, zone_idx(zone), start_pfn, MEMMAP_HOTPLUG); 825f1dd2cd1SMichal Hocko 826f1dd2cd1SMichal Hocko set_zone_contiguous(zone); 827f1dd2cd1SMichal Hocko } 828f1dd2cd1SMichal Hocko 829f1dd2cd1SMichal Hocko /* 830c246a213SMichal Hocko * Returns a default kernel memory zone for the given pfn range. 831c246a213SMichal Hocko * If no kernel zone covers this pfn range it will automatically go 832c246a213SMichal Hocko * to the ZONE_NORMAL. 833c246a213SMichal Hocko */ 834*c6f03e29SMichal Hocko static struct zone *default_kernel_zone_for_pfn(int nid, unsigned long start_pfn, 835c246a213SMichal Hocko unsigned long nr_pages) 836c246a213SMichal Hocko { 837c246a213SMichal Hocko struct pglist_data *pgdat = NODE_DATA(nid); 838c246a213SMichal Hocko int zid; 839c246a213SMichal Hocko 840c246a213SMichal Hocko for (zid = 0; zid <= ZONE_NORMAL; zid++) { 841c246a213SMichal Hocko struct zone *zone = &pgdat->node_zones[zid]; 842c246a213SMichal Hocko 843c246a213SMichal Hocko if (zone_intersects(zone, start_pfn, nr_pages)) 844c246a213SMichal Hocko return zone; 845c246a213SMichal Hocko } 846c246a213SMichal Hocko 847c246a213SMichal Hocko return &pgdat->node_zones[ZONE_NORMAL]; 848c246a213SMichal Hocko } 849c246a213SMichal Hocko 850*c6f03e29SMichal Hocko static inline struct zone *default_zone_for_pfn(int nid, unsigned long start_pfn, 851*c6f03e29SMichal Hocko unsigned long nr_pages) 852e5e68930SMichal Hocko { 853*c6f03e29SMichal Hocko struct zone *kernel_zone = default_kernel_zone_for_pfn(nid, start_pfn, 854*c6f03e29SMichal Hocko nr_pages); 855*c6f03e29SMichal Hocko struct zone *movable_zone = &NODE_DATA(nid)->node_zones[ZONE_MOVABLE]; 856*c6f03e29SMichal Hocko bool in_kernel = zone_intersects(kernel_zone, start_pfn, nr_pages); 857*c6f03e29SMichal Hocko bool in_movable = zone_intersects(movable_zone, start_pfn, nr_pages); 858e5e68930SMichal Hocko 859e5e68930SMichal Hocko /* 860*c6f03e29SMichal Hocko * We inherit the existing zone in a simple case where zones do not 861*c6f03e29SMichal Hocko * overlap in the given range 862e5e68930SMichal Hocko */ 863*c6f03e29SMichal Hocko if (in_kernel ^ in_movable) 864*c6f03e29SMichal Hocko return (in_kernel) ? kernel_zone : movable_zone; 865e5e68930SMichal Hocko 866*c6f03e29SMichal Hocko /* 867*c6f03e29SMichal Hocko * If the range doesn't belong to any zone or two zones overlap in the 868*c6f03e29SMichal Hocko * given range then we use movable zone only if movable_node is 869*c6f03e29SMichal Hocko * enabled because we always online to a kernel zone by default. 870*c6f03e29SMichal Hocko */ 871*c6f03e29SMichal Hocko return movable_node_enabled ? movable_zone : kernel_zone; 8729f123ab5SMichal Hocko } 8739f123ab5SMichal Hocko 874e5e68930SMichal Hocko struct zone * zone_for_pfn_range(int online_type, int nid, unsigned start_pfn, 875e5e68930SMichal Hocko unsigned long nr_pages) 876f1dd2cd1SMichal Hocko { 877*c6f03e29SMichal Hocko if (online_type == MMOP_ONLINE_KERNEL) 878*c6f03e29SMichal Hocko return default_kernel_zone_for_pfn(nid, start_pfn, nr_pages); 879f1dd2cd1SMichal Hocko 880*c6f03e29SMichal Hocko if (online_type == MMOP_ONLINE_MOVABLE) 881*c6f03e29SMichal Hocko return &NODE_DATA(nid)->node_zones[ZONE_MOVABLE]; 882f1dd2cd1SMichal Hocko 883*c6f03e29SMichal Hocko return default_zone_for_pfn(nid, start_pfn, nr_pages); 884e5e68930SMichal Hocko } 885e5e68930SMichal Hocko 886e5e68930SMichal Hocko /* 887e5e68930SMichal Hocko * Associates the given pfn range with the given node and the zone appropriate 888e5e68930SMichal Hocko * for the given online type. 889e5e68930SMichal Hocko */ 890e5e68930SMichal Hocko static struct zone * __meminit move_pfn_range(int online_type, int nid, 891e5e68930SMichal Hocko unsigned long start_pfn, unsigned long nr_pages) 892e5e68930SMichal Hocko { 893e5e68930SMichal Hocko struct zone *zone; 894e5e68930SMichal Hocko 895e5e68930SMichal Hocko zone = zone_for_pfn_range(online_type, nid, start_pfn, nr_pages); 896f1dd2cd1SMichal Hocko move_pfn_range_to_zone(zone, start_pfn, nr_pages); 897f1dd2cd1SMichal Hocko return zone; 898df429ac0SReza Arbab } 89975884fb1SKAMEZAWA Hiroyuki 90030467e0bSDavid Rientjes /* Must be protected by mem_hotplug_begin() */ 901511c2abaSLai Jiangshan int __ref online_pages(unsigned long pfn, unsigned long nr_pages, int online_type) 90275884fb1SKAMEZAWA Hiroyuki { 903aa47228aSCody P Schafer unsigned long flags; 9043947be19SDave Hansen unsigned long onlined_pages = 0; 9053947be19SDave Hansen struct zone *zone; 9066811378eSYasunori Goto int need_zonelists_rebuild = 0; 9077b78d335SYasunori Goto int nid; 9087b78d335SYasunori Goto int ret; 9097b78d335SYasunori Goto struct memory_notify arg; 9103947be19SDave Hansen 911f1dd2cd1SMichal Hocko nid = pfn_to_nid(pfn); 912f1dd2cd1SMichal Hocko /* associate pfn range with the zone */ 913f1dd2cd1SMichal Hocko zone = move_pfn_range(online_type, nid, pfn, nr_pages); 914511c2abaSLai Jiangshan 9157b78d335SYasunori Goto arg.start_pfn = pfn; 9167b78d335SYasunori Goto arg.nr_pages = nr_pages; 917d9713679SLai Jiangshan node_states_check_changes_online(nr_pages, zone, &arg); 9187b78d335SYasunori Goto 9197b78d335SYasunori Goto ret = memory_notify(MEM_GOING_ONLINE, &arg); 9207b78d335SYasunori Goto ret = notifier_to_errno(ret); 921e33e33b4SChen Yucong if (ret) 922e33e33b4SChen Yucong goto failed_addition; 923e33e33b4SChen Yucong 9243947be19SDave Hansen /* 9256811378eSYasunori Goto * If this zone is not populated, then it is not in zonelist. 9266811378eSYasunori Goto * This means the page allocator ignores this zone. 9276811378eSYasunori Goto * So, zonelist must be updated after online. 9286811378eSYasunori Goto */ 9294eaf3f64SHaicheng Li mutex_lock(&zonelists_mutex); 9306dcd73d7SWen Congyang if (!populated_zone(zone)) { 9316811378eSYasunori Goto need_zonelists_rebuild = 1; 9326dcd73d7SWen Congyang build_all_zonelists(NULL, zone); 9336dcd73d7SWen Congyang } 9346811378eSYasunori Goto 935908eedc6SKAMEZAWA Hiroyuki ret = walk_system_ram_range(pfn, nr_pages, &onlined_pages, 93675884fb1SKAMEZAWA Hiroyuki online_pages_range); 937fd8a4221SGeoff Levand if (ret) { 9386dcd73d7SWen Congyang if (need_zonelists_rebuild) 9396dcd73d7SWen Congyang zone_pcp_reset(zone); 9404eaf3f64SHaicheng Li mutex_unlock(&zonelists_mutex); 941e33e33b4SChen Yucong goto failed_addition; 942fd8a4221SGeoff Levand } 943fd8a4221SGeoff Levand 9443947be19SDave Hansen zone->present_pages += onlined_pages; 945aa47228aSCody P Schafer 946aa47228aSCody P Schafer pgdat_resize_lock(zone->zone_pgdat, &flags); 947f2937be5SYasunori Goto zone->zone_pgdat->node_present_pages += onlined_pages; 948aa47228aSCody P Schafer pgdat_resize_unlock(zone->zone_pgdat, &flags); 949aa47228aSCody P Schafer 95008dff7b7SJiang Liu if (onlined_pages) { 951e888ca35SVlastimil Babka node_states_set_node(nid, &arg); 9521f522509SHaicheng Li if (need_zonelists_rebuild) 9536dcd73d7SWen Congyang build_all_zonelists(NULL, NULL); 9541f522509SHaicheng Li else 955112067f0SShaohua Li zone_pcp_update(zone); 95608dff7b7SJiang Liu } 9571f522509SHaicheng Li 9584eaf3f64SHaicheng Li mutex_unlock(&zonelists_mutex); 9591b79acc9SKOSAKI Motohiro 9601b79acc9SKOSAKI Motohiro init_per_zone_wmark_min(); 9611b79acc9SKOSAKI Motohiro 962698b1b30SVlastimil Babka if (onlined_pages) { 963e888ca35SVlastimil Babka kswapd_run(nid); 964698b1b30SVlastimil Babka kcompactd_run(nid); 965698b1b30SVlastimil Babka } 96661b13993SDave Hansen 9675a4d4361SKAMEZAWA Hiroyuki vm_total_pages = nr_free_pagecache_pages(); 9682f7f24ecSKent Liu 9692d1d43f6SChandra Seetharaman writeback_set_ratelimit(); 9707b78d335SYasunori Goto 9717b78d335SYasunori Goto if (onlined_pages) 9727b78d335SYasunori Goto memory_notify(MEM_ONLINE, &arg); 97330467e0bSDavid Rientjes return 0; 974e33e33b4SChen Yucong 975e33e33b4SChen Yucong failed_addition: 976e33e33b4SChen Yucong pr_debug("online_pages [mem %#010llx-%#010llx] failed\n", 977e33e33b4SChen Yucong (unsigned long long) pfn << PAGE_SHIFT, 978e33e33b4SChen Yucong (((unsigned long long) pfn + nr_pages) << PAGE_SHIFT) - 1); 979e33e33b4SChen Yucong memory_notify(MEM_CANCEL_ONLINE, &arg); 980e33e33b4SChen Yucong return ret; 9813947be19SDave Hansen } 98253947027SKeith Mannthey #endif /* CONFIG_MEMORY_HOTPLUG_SPARSE */ 983bc02af93SYasunori Goto 9840bd85420STang Chen static void reset_node_present_pages(pg_data_t *pgdat) 9850bd85420STang Chen { 9860bd85420STang Chen struct zone *z; 9870bd85420STang Chen 9880bd85420STang Chen for (z = pgdat->node_zones; z < pgdat->node_zones + MAX_NR_ZONES; z++) 9890bd85420STang Chen z->present_pages = 0; 9900bd85420STang Chen 9910bd85420STang Chen pgdat->node_present_pages = 0; 9920bd85420STang Chen } 9930bd85420STang Chen 994e1319331SHidetoshi Seto /* we are OK calling __meminit stuff here - we have CONFIG_MEMORY_HOTPLUG */ 995e1319331SHidetoshi Seto static pg_data_t __ref *hotadd_new_pgdat(int nid, u64 start) 9969af3c2deSYasunori Goto { 9979af3c2deSYasunori Goto struct pglist_data *pgdat; 9989af3c2deSYasunori Goto unsigned long zones_size[MAX_NR_ZONES] = {0}; 9999af3c2deSYasunori Goto unsigned long zholes_size[MAX_NR_ZONES] = {0}; 1000c8e861a5SFabian Frederick unsigned long start_pfn = PFN_DOWN(start); 10019af3c2deSYasunori Goto 1002a1e565aaSTang Chen pgdat = NODE_DATA(nid); 1003a1e565aaSTang Chen if (!pgdat) { 10049af3c2deSYasunori Goto pgdat = arch_alloc_nodedata(nid); 10059af3c2deSYasunori Goto if (!pgdat) 10069af3c2deSYasunori Goto return NULL; 10079af3c2deSYasunori Goto 10089af3c2deSYasunori Goto arch_refresh_nodedata(nid, pgdat); 1009b0dc3a34SGu Zheng } else { 1010e716f2ebSMel Gorman /* 1011e716f2ebSMel Gorman * Reset the nr_zones, order and classzone_idx before reuse. 1012e716f2ebSMel Gorman * Note that kswapd will init kswapd_classzone_idx properly 1013e716f2ebSMel Gorman * when it starts in the near future. 1014e716f2ebSMel Gorman */ 1015b0dc3a34SGu Zheng pgdat->nr_zones = 0; 101638087d9bSMel Gorman pgdat->kswapd_order = 0; 101738087d9bSMel Gorman pgdat->kswapd_classzone_idx = 0; 1018a1e565aaSTang Chen } 10199af3c2deSYasunori Goto 10209af3c2deSYasunori Goto /* we can use NODE_DATA(nid) from here */ 10219af3c2deSYasunori Goto 10229af3c2deSYasunori Goto /* init node's zones as empty zones, we don't have any present pages.*/ 10239109fb7bSJohannes Weiner free_area_init_node(nid, zones_size, start_pfn, zholes_size); 10245830169fSReza Arbab pgdat->per_cpu_nodestats = alloc_percpu(struct per_cpu_nodestat); 10259af3c2deSYasunori Goto 1026959ecc48SKAMEZAWA Hiroyuki /* 1027959ecc48SKAMEZAWA Hiroyuki * The node we allocated has no zone fallback lists. For avoiding 1028959ecc48SKAMEZAWA Hiroyuki * to access not-initialized zonelist, build here. 1029959ecc48SKAMEZAWA Hiroyuki */ 1030f957db4fSDavid Rientjes mutex_lock(&zonelists_mutex); 10319adb62a5SJiang Liu build_all_zonelists(pgdat, NULL); 1032f957db4fSDavid Rientjes mutex_unlock(&zonelists_mutex); 1033959ecc48SKAMEZAWA Hiroyuki 1034f784a3f1STang Chen /* 1035f784a3f1STang Chen * zone->managed_pages is set to an approximate value in 1036f784a3f1STang Chen * free_area_init_core(), which will cause 1037f784a3f1STang Chen * /sys/device/system/node/nodeX/meminfo has wrong data. 1038f784a3f1STang Chen * So reset it to 0 before any memory is onlined. 1039f784a3f1STang Chen */ 1040f784a3f1STang Chen reset_node_managed_pages(pgdat); 1041f784a3f1STang Chen 10420bd85420STang Chen /* 10430bd85420STang Chen * When memory is hot-added, all the memory is in offline state. So 10440bd85420STang Chen * clear all zones' present_pages because they will be updated in 10450bd85420STang Chen * online_pages() and offline_pages(). 10460bd85420STang Chen */ 10470bd85420STang Chen reset_node_present_pages(pgdat); 10480bd85420STang Chen 10499af3c2deSYasunori Goto return pgdat; 10509af3c2deSYasunori Goto } 10519af3c2deSYasunori Goto 10529af3c2deSYasunori Goto static void rollback_node_hotadd(int nid, pg_data_t *pgdat) 10539af3c2deSYasunori Goto { 10549af3c2deSYasunori Goto arch_refresh_nodedata(nid, NULL); 10555830169fSReza Arbab free_percpu(pgdat->per_cpu_nodestats); 10569af3c2deSYasunori Goto arch_free_nodedata(pgdat); 10579af3c2deSYasunori Goto return; 10589af3c2deSYasunori Goto } 10599af3c2deSYasunori Goto 10600a547039SKAMEZAWA Hiroyuki 106101b0f197SToshi Kani /** 106201b0f197SToshi Kani * try_online_node - online a node if offlined 106301b0f197SToshi Kani * 1064cf23422bSminskey guo * called by cpu_up() to online a node without onlined memory. 1065cf23422bSminskey guo */ 106601b0f197SToshi Kani int try_online_node(int nid) 1067cf23422bSminskey guo { 1068cf23422bSminskey guo pg_data_t *pgdat; 1069cf23422bSminskey guo int ret; 1070cf23422bSminskey guo 107101b0f197SToshi Kani if (node_online(nid)) 107201b0f197SToshi Kani return 0; 107301b0f197SToshi Kani 1074bfc8c901SVladimir Davydov mem_hotplug_begin(); 1075cf23422bSminskey guo pgdat = hotadd_new_pgdat(nid, 0); 10767553e8f2SDavid Rientjes if (!pgdat) { 107701b0f197SToshi Kani pr_err("Cannot online node %d due to NULL pgdat\n", nid); 1078cf23422bSminskey guo ret = -ENOMEM; 1079cf23422bSminskey guo goto out; 1080cf23422bSminskey guo } 1081cf23422bSminskey guo node_set_online(nid); 1082cf23422bSminskey guo ret = register_one_node(nid); 1083cf23422bSminskey guo BUG_ON(ret); 1084cf23422bSminskey guo 108501b0f197SToshi Kani if (pgdat->node_zonelists->_zonerefs->zone == NULL) { 108601b0f197SToshi Kani mutex_lock(&zonelists_mutex); 108701b0f197SToshi Kani build_all_zonelists(NULL, NULL); 108801b0f197SToshi Kani mutex_unlock(&zonelists_mutex); 108901b0f197SToshi Kani } 109001b0f197SToshi Kani 1091cf23422bSminskey guo out: 1092bfc8c901SVladimir Davydov mem_hotplug_done(); 1093cf23422bSminskey guo return ret; 1094cf23422bSminskey guo } 1095cf23422bSminskey guo 109627356f54SToshi Kani static int check_hotplug_memory_range(u64 start, u64 size) 109727356f54SToshi Kani { 1098c8e861a5SFabian Frederick u64 start_pfn = PFN_DOWN(start); 109927356f54SToshi Kani u64 nr_pages = size >> PAGE_SHIFT; 110027356f54SToshi Kani 110127356f54SToshi Kani /* Memory range must be aligned with section */ 110227356f54SToshi Kani if ((start_pfn & ~PAGE_SECTION_MASK) || 110327356f54SToshi Kani (nr_pages % PAGES_PER_SECTION) || (!nr_pages)) { 110427356f54SToshi Kani pr_err("Section-unaligned hotplug range: start 0x%llx, size 0x%llx\n", 110527356f54SToshi Kani (unsigned long long)start, 110627356f54SToshi Kani (unsigned long long)size); 110727356f54SToshi Kani return -EINVAL; 110827356f54SToshi Kani } 110927356f54SToshi Kani 111027356f54SToshi Kani return 0; 111127356f54SToshi Kani } 111227356f54SToshi Kani 111331bc3858SVitaly Kuznetsov static int online_memory_block(struct memory_block *mem, void *arg) 111431bc3858SVitaly Kuznetsov { 1115dc18d706SNathan Fontenot return device_online(&mem->dev); 111631bc3858SVitaly Kuznetsov } 111731bc3858SVitaly Kuznetsov 111831168481SAl Viro /* we are OK calling __meminit stuff here - we have CONFIG_MEMORY_HOTPLUG */ 111931bc3858SVitaly Kuznetsov int __ref add_memory_resource(int nid, struct resource *res, bool online) 1120bc02af93SYasunori Goto { 112162cedb9fSDavid Vrabel u64 start, size; 11229af3c2deSYasunori Goto pg_data_t *pgdat = NULL; 1123a1e565aaSTang Chen bool new_pgdat; 1124a1e565aaSTang Chen bool new_node; 1125bc02af93SYasunori Goto int ret; 1126bc02af93SYasunori Goto 112762cedb9fSDavid Vrabel start = res->start; 112862cedb9fSDavid Vrabel size = resource_size(res); 112962cedb9fSDavid Vrabel 113027356f54SToshi Kani ret = check_hotplug_memory_range(start, size); 113127356f54SToshi Kani if (ret) 113227356f54SToshi Kani return ret; 113327356f54SToshi Kani 1134a1e565aaSTang Chen { /* Stupid hack to suppress address-never-null warning */ 1135a1e565aaSTang Chen void *p = NODE_DATA(nid); 1136a1e565aaSTang Chen new_pgdat = !p; 1137a1e565aaSTang Chen } 1138ac13c462SNathan Zimmer 1139bfc8c901SVladimir Davydov mem_hotplug_begin(); 1140ac13c462SNathan Zimmer 11417f36e3e5STang Chen /* 11427f36e3e5STang Chen * Add new range to memblock so that when hotadd_new_pgdat() is called 11437f36e3e5STang Chen * to allocate new pgdat, get_pfn_range_for_nid() will be able to find 11447f36e3e5STang Chen * this new range and calculate total pages correctly. The range will 11457f36e3e5STang Chen * be removed at hot-remove time. 11467f36e3e5STang Chen */ 11477f36e3e5STang Chen memblock_add_node(start, size, nid); 11487f36e3e5STang Chen 1149a1e565aaSTang Chen new_node = !node_online(nid); 1150a1e565aaSTang Chen if (new_node) { 11519af3c2deSYasunori Goto pgdat = hotadd_new_pgdat(nid, start); 11526ad696d2SAndi Kleen ret = -ENOMEM; 11539af3c2deSYasunori Goto if (!pgdat) 115441b9e2d7SWen Congyang goto error; 11559af3c2deSYasunori Goto } 11569af3c2deSYasunori Goto 1157bc02af93SYasunori Goto /* call arch's memory hotadd */ 11583d79a728SMichal Hocko ret = arch_add_memory(nid, start, size, true); 1159bc02af93SYasunori Goto 11609af3c2deSYasunori Goto if (ret < 0) 11619af3c2deSYasunori Goto goto error; 11629af3c2deSYasunori Goto 11630fc44159SYasunori Goto /* we online node here. we can't roll back from here. */ 11649af3c2deSYasunori Goto node_set_online(nid); 11659af3c2deSYasunori Goto 1166a1e565aaSTang Chen if (new_node) { 11679037a993SMichal Hocko unsigned long start_pfn = start >> PAGE_SHIFT; 11689037a993SMichal Hocko unsigned long nr_pages = size >> PAGE_SHIFT; 11699037a993SMichal Hocko 11709037a993SMichal Hocko ret = __register_one_node(nid); 11719037a993SMichal Hocko if (ret) 11729037a993SMichal Hocko goto register_fail; 11739037a993SMichal Hocko 11749037a993SMichal Hocko /* 11759037a993SMichal Hocko * link memory sections under this node. This is already 11769037a993SMichal Hocko * done when creatig memory section in register_new_memory 11779037a993SMichal Hocko * but that depends to have the node registered so offline 11789037a993SMichal Hocko * nodes have to go through register_node. 11799037a993SMichal Hocko * TODO clean up this mess. 11809037a993SMichal Hocko */ 11819037a993SMichal Hocko ret = link_mem_sections(nid, start_pfn, nr_pages); 11829037a993SMichal Hocko register_fail: 11830fc44159SYasunori Goto /* 11840fc44159SYasunori Goto * If sysfs file of new node can't create, cpu on the node 11850fc44159SYasunori Goto * can't be hot-added. There is no rollback way now. 11860fc44159SYasunori Goto * So, check by BUG_ON() to catch it reluctantly.. 11870fc44159SYasunori Goto */ 11880fc44159SYasunori Goto BUG_ON(ret); 11890fc44159SYasunori Goto } 11900fc44159SYasunori Goto 1191d96ae530Sakpm@linux-foundation.org /* create new memmap entry */ 1192d96ae530Sakpm@linux-foundation.org firmware_map_add_hotplug(start, start + size, "System RAM"); 1193d96ae530Sakpm@linux-foundation.org 119431bc3858SVitaly Kuznetsov /* online pages if requested */ 119531bc3858SVitaly Kuznetsov if (online) 119631bc3858SVitaly Kuznetsov walk_memory_range(PFN_DOWN(start), PFN_UP(start + size - 1), 119731bc3858SVitaly Kuznetsov NULL, online_memory_block); 119831bc3858SVitaly Kuznetsov 11996ad696d2SAndi Kleen goto out; 12006ad696d2SAndi Kleen 12019af3c2deSYasunori Goto error: 12029af3c2deSYasunori Goto /* rollback pgdat allocation and others */ 1203dbac61a3SGustavo A. R. Silva if (new_pgdat && pgdat) 12049af3c2deSYasunori Goto rollback_node_hotadd(nid, pgdat); 12057f36e3e5STang Chen memblock_remove(start, size); 12069af3c2deSYasunori Goto 12076ad696d2SAndi Kleen out: 1208bfc8c901SVladimir Davydov mem_hotplug_done(); 1209bc02af93SYasunori Goto return ret; 1210bc02af93SYasunori Goto } 121162cedb9fSDavid Vrabel EXPORT_SYMBOL_GPL(add_memory_resource); 121262cedb9fSDavid Vrabel 121362cedb9fSDavid Vrabel int __ref add_memory(int nid, u64 start, u64 size) 121462cedb9fSDavid Vrabel { 121562cedb9fSDavid Vrabel struct resource *res; 121662cedb9fSDavid Vrabel int ret; 121762cedb9fSDavid Vrabel 121862cedb9fSDavid Vrabel res = register_memory_resource(start, size); 12196f754ba4SVitaly Kuznetsov if (IS_ERR(res)) 12206f754ba4SVitaly Kuznetsov return PTR_ERR(res); 122162cedb9fSDavid Vrabel 122231bc3858SVitaly Kuznetsov ret = add_memory_resource(nid, res, memhp_auto_online); 122362cedb9fSDavid Vrabel if (ret < 0) 122462cedb9fSDavid Vrabel release_memory_resource(res); 122562cedb9fSDavid Vrabel return ret; 122662cedb9fSDavid Vrabel } 1227bc02af93SYasunori Goto EXPORT_SYMBOL_GPL(add_memory); 12280c0e6195SKAMEZAWA Hiroyuki 12290c0e6195SKAMEZAWA Hiroyuki #ifdef CONFIG_MEMORY_HOTREMOVE 12300c0e6195SKAMEZAWA Hiroyuki /* 12315c755e9fSBadari Pulavarty * A free page on the buddy free lists (not the per-cpu lists) has PageBuddy 12325c755e9fSBadari Pulavarty * set and the size of the free page is given by page_order(). Using this, 12335c755e9fSBadari Pulavarty * the function determines if the pageblock contains only free pages. 12345c755e9fSBadari Pulavarty * Due to buddy contraints, a free page at least the size of a pageblock will 12355c755e9fSBadari Pulavarty * be located at the start of the pageblock 12365c755e9fSBadari Pulavarty */ 12375c755e9fSBadari Pulavarty static inline int pageblock_free(struct page *page) 12385c755e9fSBadari Pulavarty { 12395c755e9fSBadari Pulavarty return PageBuddy(page) && page_order(page) >= pageblock_order; 12405c755e9fSBadari Pulavarty } 12415c755e9fSBadari Pulavarty 12425c755e9fSBadari Pulavarty /* Return the start of the next active pageblock after a given page */ 12435c755e9fSBadari Pulavarty static struct page *next_active_pageblock(struct page *page) 12445c755e9fSBadari Pulavarty { 12455c755e9fSBadari Pulavarty /* Ensure the starting page is pageblock-aligned */ 12465c755e9fSBadari Pulavarty BUG_ON(page_to_pfn(page) & (pageblock_nr_pages - 1)); 12475c755e9fSBadari Pulavarty 12485c755e9fSBadari Pulavarty /* If the entire pageblock is free, move to the end of free page */ 12490dcc48c1SKAMEZAWA Hiroyuki if (pageblock_free(page)) { 12500dcc48c1SKAMEZAWA Hiroyuki int order; 12510dcc48c1SKAMEZAWA Hiroyuki /* be careful. we don't have locks, page_order can be changed.*/ 12520dcc48c1SKAMEZAWA Hiroyuki order = page_order(page); 12530dcc48c1SKAMEZAWA Hiroyuki if ((order < MAX_ORDER) && (order >= pageblock_order)) 12540dcc48c1SKAMEZAWA Hiroyuki return page + (1 << order); 12550dcc48c1SKAMEZAWA Hiroyuki } 12565c755e9fSBadari Pulavarty 12570dcc48c1SKAMEZAWA Hiroyuki return page + pageblock_nr_pages; 12585c755e9fSBadari Pulavarty } 12595c755e9fSBadari Pulavarty 12605c755e9fSBadari Pulavarty /* Checks if this range of memory is likely to be hot-removable. */ 1261c98940f6SYaowei Bai bool is_mem_section_removable(unsigned long start_pfn, unsigned long nr_pages) 12625c755e9fSBadari Pulavarty { 12635c755e9fSBadari Pulavarty struct page *page = pfn_to_page(start_pfn); 12645c755e9fSBadari Pulavarty struct page *end_page = page + nr_pages; 12655c755e9fSBadari Pulavarty 12665c755e9fSBadari Pulavarty /* Check the starting page of each pageblock within the range */ 12675c755e9fSBadari Pulavarty for (; page < end_page; page = next_active_pageblock(page)) { 126849ac8255SKAMEZAWA Hiroyuki if (!is_pageblock_removable_nolock(page)) 1269c98940f6SYaowei Bai return false; 127049ac8255SKAMEZAWA Hiroyuki cond_resched(); 12715c755e9fSBadari Pulavarty } 12725c755e9fSBadari Pulavarty 12735c755e9fSBadari Pulavarty /* All pageblocks in the memory block are likely to be hot-removable */ 1274c98940f6SYaowei Bai return true; 12755c755e9fSBadari Pulavarty } 12765c755e9fSBadari Pulavarty 12775c755e9fSBadari Pulavarty /* 1278deb88a2aSToshi Kani * Confirm all pages in a range [start, end) belong to the same zone. 1279a96dfddbSToshi Kani * When true, return its valid [start, end). 12800c0e6195SKAMEZAWA Hiroyuki */ 1281a96dfddbSToshi Kani int test_pages_in_a_zone(unsigned long start_pfn, unsigned long end_pfn, 1282a96dfddbSToshi Kani unsigned long *valid_start, unsigned long *valid_end) 12830c0e6195SKAMEZAWA Hiroyuki { 12845f0f2887SAndrew Banman unsigned long pfn, sec_end_pfn; 1285a96dfddbSToshi Kani unsigned long start, end; 12860c0e6195SKAMEZAWA Hiroyuki struct zone *zone = NULL; 12870c0e6195SKAMEZAWA Hiroyuki struct page *page; 12880c0e6195SKAMEZAWA Hiroyuki int i; 1289deb88a2aSToshi Kani for (pfn = start_pfn, sec_end_pfn = SECTION_ALIGN_UP(start_pfn + 1); 12900c0e6195SKAMEZAWA Hiroyuki pfn < end_pfn; 1291deb88a2aSToshi Kani pfn = sec_end_pfn, sec_end_pfn += PAGES_PER_SECTION) { 12925f0f2887SAndrew Banman /* Make sure the memory section is present first */ 12935f0f2887SAndrew Banman if (!present_section_nr(pfn_to_section_nr(pfn))) 12945f0f2887SAndrew Banman continue; 12955f0f2887SAndrew Banman for (; pfn < sec_end_pfn && pfn < end_pfn; 12960c0e6195SKAMEZAWA Hiroyuki pfn += MAX_ORDER_NR_PAGES) { 12970c0e6195SKAMEZAWA Hiroyuki i = 0; 12980c0e6195SKAMEZAWA Hiroyuki /* This is just a CONFIG_HOLES_IN_ZONE check.*/ 12995f0f2887SAndrew Banman while ((i < MAX_ORDER_NR_PAGES) && 13005f0f2887SAndrew Banman !pfn_valid_within(pfn + i)) 13010c0e6195SKAMEZAWA Hiroyuki i++; 1302d6d8c8a4Szhong jiang if (i == MAX_ORDER_NR_PAGES || pfn + i >= end_pfn) 13030c0e6195SKAMEZAWA Hiroyuki continue; 13040c0e6195SKAMEZAWA Hiroyuki page = pfn_to_page(pfn + i); 13050c0e6195SKAMEZAWA Hiroyuki if (zone && page_zone(page) != zone) 13060c0e6195SKAMEZAWA Hiroyuki return 0; 1307a96dfddbSToshi Kani if (!zone) 1308a96dfddbSToshi Kani start = pfn + i; 13090c0e6195SKAMEZAWA Hiroyuki zone = page_zone(page); 1310a96dfddbSToshi Kani end = pfn + MAX_ORDER_NR_PAGES; 13110c0e6195SKAMEZAWA Hiroyuki } 13125f0f2887SAndrew Banman } 1313deb88a2aSToshi Kani 1314a96dfddbSToshi Kani if (zone) { 1315a96dfddbSToshi Kani *valid_start = start; 1316d6d8c8a4Szhong jiang *valid_end = min(end, end_pfn); 13170c0e6195SKAMEZAWA Hiroyuki return 1; 1318a96dfddbSToshi Kani } else { 1319deb88a2aSToshi Kani return 0; 13200c0e6195SKAMEZAWA Hiroyuki } 1321a96dfddbSToshi Kani } 13220c0e6195SKAMEZAWA Hiroyuki 13230c0e6195SKAMEZAWA Hiroyuki /* 13240efadf48SYisheng Xie * Scan pfn range [start,end) to find movable/migratable pages (LRU pages, 13250efadf48SYisheng Xie * non-lru movable pages and hugepages). We scan pfn because it's much 13260efadf48SYisheng Xie * easier than scanning over linked list. This function returns the pfn 13270efadf48SYisheng Xie * of the first found movable page if it's found, otherwise 0. 13280c0e6195SKAMEZAWA Hiroyuki */ 1329c8721bbbSNaoya Horiguchi static unsigned long scan_movable_pages(unsigned long start, unsigned long end) 13300c0e6195SKAMEZAWA Hiroyuki { 13310c0e6195SKAMEZAWA Hiroyuki unsigned long pfn; 13320c0e6195SKAMEZAWA Hiroyuki struct page *page; 13330c0e6195SKAMEZAWA Hiroyuki for (pfn = start; pfn < end; pfn++) { 13340c0e6195SKAMEZAWA Hiroyuki if (pfn_valid(pfn)) { 13350c0e6195SKAMEZAWA Hiroyuki page = pfn_to_page(pfn); 13360c0e6195SKAMEZAWA Hiroyuki if (PageLRU(page)) 13370c0e6195SKAMEZAWA Hiroyuki return pfn; 13380efadf48SYisheng Xie if (__PageMovable(page)) 13390efadf48SYisheng Xie return pfn; 1340c8721bbbSNaoya Horiguchi if (PageHuge(page)) { 13417e1f049eSNaoya Horiguchi if (page_huge_active(page)) 1342c8721bbbSNaoya Horiguchi return pfn; 1343c8721bbbSNaoya Horiguchi else 1344c8721bbbSNaoya Horiguchi pfn = round_up(pfn + 1, 1345c8721bbbSNaoya Horiguchi 1 << compound_order(page)) - 1; 1346c8721bbbSNaoya Horiguchi } 13470c0e6195SKAMEZAWA Hiroyuki } 13480c0e6195SKAMEZAWA Hiroyuki } 13490c0e6195SKAMEZAWA Hiroyuki return 0; 13500c0e6195SKAMEZAWA Hiroyuki } 13510c0e6195SKAMEZAWA Hiroyuki 1352394e31d2SXishi Qiu static struct page *new_node_page(struct page *page, unsigned long private, 1353394e31d2SXishi Qiu int **result) 1354394e31d2SXishi Qiu { 1355394e31d2SXishi Qiu int nid = page_to_nid(page); 1356231e97e2SLi Zhong nodemask_t nmask = node_states[N_MEMORY]; 13577f252f27SMichal Hocko 13587f252f27SMichal Hocko /* 13597f252f27SMichal Hocko * try to allocate from a different node but reuse this node if there 13607f252f27SMichal Hocko * are no other online nodes to be used (e.g. we are offlining a part 13617f252f27SMichal Hocko * of the only existing node) 13627f252f27SMichal Hocko */ 13637f252f27SMichal Hocko node_clear(nid, nmask); 13647f252f27SMichal Hocko if (nodes_empty(nmask)) 13657f252f27SMichal Hocko node_set(nid, nmask); 1366394e31d2SXishi Qiu 13678b913238SMichal Hocko return new_page_nodemask(page, nid, &nmask); 1368394e31d2SXishi Qiu } 1369394e31d2SXishi Qiu 13700c0e6195SKAMEZAWA Hiroyuki #define NR_OFFLINE_AT_ONCE_PAGES (256) 13710c0e6195SKAMEZAWA Hiroyuki static int 13720c0e6195SKAMEZAWA Hiroyuki do_migrate_range(unsigned long start_pfn, unsigned long end_pfn) 13730c0e6195SKAMEZAWA Hiroyuki { 13740c0e6195SKAMEZAWA Hiroyuki unsigned long pfn; 13750c0e6195SKAMEZAWA Hiroyuki struct page *page; 13760c0e6195SKAMEZAWA Hiroyuki int move_pages = NR_OFFLINE_AT_ONCE_PAGES; 13770c0e6195SKAMEZAWA Hiroyuki int not_managed = 0; 13780c0e6195SKAMEZAWA Hiroyuki int ret = 0; 13790c0e6195SKAMEZAWA Hiroyuki LIST_HEAD(source); 13800c0e6195SKAMEZAWA Hiroyuki 13810c0e6195SKAMEZAWA Hiroyuki for (pfn = start_pfn; pfn < end_pfn && move_pages > 0; pfn++) { 13820c0e6195SKAMEZAWA Hiroyuki if (!pfn_valid(pfn)) 13830c0e6195SKAMEZAWA Hiroyuki continue; 13840c0e6195SKAMEZAWA Hiroyuki page = pfn_to_page(pfn); 1385c8721bbbSNaoya Horiguchi 1386c8721bbbSNaoya Horiguchi if (PageHuge(page)) { 1387c8721bbbSNaoya Horiguchi struct page *head = compound_head(page); 1388c8721bbbSNaoya Horiguchi pfn = page_to_pfn(head) + (1<<compound_order(head)) - 1; 1389c8721bbbSNaoya Horiguchi if (compound_order(head) > PFN_SECTION_SHIFT) { 1390c8721bbbSNaoya Horiguchi ret = -EBUSY; 1391c8721bbbSNaoya Horiguchi break; 1392c8721bbbSNaoya Horiguchi } 1393c8721bbbSNaoya Horiguchi if (isolate_huge_page(page, &source)) 1394c8721bbbSNaoya Horiguchi move_pages -= 1 << compound_order(head); 1395c8721bbbSNaoya Horiguchi continue; 1396c8721bbbSNaoya Horiguchi } 1397c8721bbbSNaoya Horiguchi 1398700c2a46SKonstantin Khlebnikov if (!get_page_unless_zero(page)) 13990c0e6195SKAMEZAWA Hiroyuki continue; 14000c0e6195SKAMEZAWA Hiroyuki /* 14010efadf48SYisheng Xie * We can skip free pages. And we can deal with pages on 14020efadf48SYisheng Xie * LRU and non-lru movable pages. 14030c0e6195SKAMEZAWA Hiroyuki */ 14040efadf48SYisheng Xie if (PageLRU(page)) 140562695a84SNick Piggin ret = isolate_lru_page(page); 14060efadf48SYisheng Xie else 14070efadf48SYisheng Xie ret = isolate_movable_page(page, ISOLATE_UNEVICTABLE); 14080c0e6195SKAMEZAWA Hiroyuki if (!ret) { /* Success */ 1409700c2a46SKonstantin Khlebnikov put_page(page); 141062695a84SNick Piggin list_add_tail(&page->lru, &source); 14110c0e6195SKAMEZAWA Hiroyuki move_pages--; 14120efadf48SYisheng Xie if (!__PageMovable(page)) 1413599d0c95SMel Gorman inc_node_page_state(page, NR_ISOLATED_ANON + 14146d9c285aSKOSAKI Motohiro page_is_file_cache(page)); 14156d9c285aSKOSAKI Motohiro 14160c0e6195SKAMEZAWA Hiroyuki } else { 14170c0e6195SKAMEZAWA Hiroyuki #ifdef CONFIG_DEBUG_VM 14180efadf48SYisheng Xie pr_alert("failed to isolate pfn %lx\n", pfn); 14190efadf48SYisheng Xie dump_page(page, "isolation failed"); 14200c0e6195SKAMEZAWA Hiroyuki #endif 1421700c2a46SKonstantin Khlebnikov put_page(page); 142225985edcSLucas De Marchi /* Because we don't have big zone->lock. we should 1423809c4449SBob Liu check this again here. */ 1424809c4449SBob Liu if (page_count(page)) { 1425809c4449SBob Liu not_managed++; 1426f3ab2636SBob Liu ret = -EBUSY; 1427809c4449SBob Liu break; 1428809c4449SBob Liu } 14290c0e6195SKAMEZAWA Hiroyuki } 14300c0e6195SKAMEZAWA Hiroyuki } 1431f3ab2636SBob Liu if (!list_empty(&source)) { 14320c0e6195SKAMEZAWA Hiroyuki if (not_managed) { 1433c8721bbbSNaoya Horiguchi putback_movable_pages(&source); 14340c0e6195SKAMEZAWA Hiroyuki goto out; 14350c0e6195SKAMEZAWA Hiroyuki } 143674c08f98SMinchan Kim 1437394e31d2SXishi Qiu /* Allocate a new page from the nearest neighbor node */ 1438394e31d2SXishi Qiu ret = migrate_pages(&source, new_node_page, NULL, 0, 14399c620e2bSHugh Dickins MIGRATE_SYNC, MR_MEMORY_HOTPLUG); 1440cf608ac1SMinchan Kim if (ret) 1441c8721bbbSNaoya Horiguchi putback_movable_pages(&source); 1442f3ab2636SBob Liu } 14430c0e6195SKAMEZAWA Hiroyuki out: 14440c0e6195SKAMEZAWA Hiroyuki return ret; 14450c0e6195SKAMEZAWA Hiroyuki } 14460c0e6195SKAMEZAWA Hiroyuki 14470c0e6195SKAMEZAWA Hiroyuki /* 14480c0e6195SKAMEZAWA Hiroyuki * remove from free_area[] and mark all as Reserved. 14490c0e6195SKAMEZAWA Hiroyuki */ 14500c0e6195SKAMEZAWA Hiroyuki static int 14510c0e6195SKAMEZAWA Hiroyuki offline_isolated_pages_cb(unsigned long start, unsigned long nr_pages, 14520c0e6195SKAMEZAWA Hiroyuki void *data) 14530c0e6195SKAMEZAWA Hiroyuki { 14540c0e6195SKAMEZAWA Hiroyuki __offline_isolated_pages(start, start + nr_pages); 14550c0e6195SKAMEZAWA Hiroyuki return 0; 14560c0e6195SKAMEZAWA Hiroyuki } 14570c0e6195SKAMEZAWA Hiroyuki 14580c0e6195SKAMEZAWA Hiroyuki static void 14590c0e6195SKAMEZAWA Hiroyuki offline_isolated_pages(unsigned long start_pfn, unsigned long end_pfn) 14600c0e6195SKAMEZAWA Hiroyuki { 1461908eedc6SKAMEZAWA Hiroyuki walk_system_ram_range(start_pfn, end_pfn - start_pfn, NULL, 14620c0e6195SKAMEZAWA Hiroyuki offline_isolated_pages_cb); 14630c0e6195SKAMEZAWA Hiroyuki } 14640c0e6195SKAMEZAWA Hiroyuki 14650c0e6195SKAMEZAWA Hiroyuki /* 14660c0e6195SKAMEZAWA Hiroyuki * Check all pages in range, recoreded as memory resource, are isolated. 14670c0e6195SKAMEZAWA Hiroyuki */ 14680c0e6195SKAMEZAWA Hiroyuki static int 14690c0e6195SKAMEZAWA Hiroyuki check_pages_isolated_cb(unsigned long start_pfn, unsigned long nr_pages, 14700c0e6195SKAMEZAWA Hiroyuki void *data) 14710c0e6195SKAMEZAWA Hiroyuki { 14720c0e6195SKAMEZAWA Hiroyuki int ret; 14730c0e6195SKAMEZAWA Hiroyuki long offlined = *(long *)data; 1474b023f468SWen Congyang ret = test_pages_isolated(start_pfn, start_pfn + nr_pages, true); 14750c0e6195SKAMEZAWA Hiroyuki offlined = nr_pages; 14760c0e6195SKAMEZAWA Hiroyuki if (!ret) 14770c0e6195SKAMEZAWA Hiroyuki *(long *)data += offlined; 14780c0e6195SKAMEZAWA Hiroyuki return ret; 14790c0e6195SKAMEZAWA Hiroyuki } 14800c0e6195SKAMEZAWA Hiroyuki 14810c0e6195SKAMEZAWA Hiroyuki static long 14820c0e6195SKAMEZAWA Hiroyuki check_pages_isolated(unsigned long start_pfn, unsigned long end_pfn) 14830c0e6195SKAMEZAWA Hiroyuki { 14840c0e6195SKAMEZAWA Hiroyuki long offlined = 0; 14850c0e6195SKAMEZAWA Hiroyuki int ret; 14860c0e6195SKAMEZAWA Hiroyuki 1487908eedc6SKAMEZAWA Hiroyuki ret = walk_system_ram_range(start_pfn, end_pfn - start_pfn, &offlined, 14880c0e6195SKAMEZAWA Hiroyuki check_pages_isolated_cb); 14890c0e6195SKAMEZAWA Hiroyuki if (ret < 0) 14900c0e6195SKAMEZAWA Hiroyuki offlined = (long)ret; 14910c0e6195SKAMEZAWA Hiroyuki return offlined; 14920c0e6195SKAMEZAWA Hiroyuki } 14930c0e6195SKAMEZAWA Hiroyuki 1494c5320926STang Chen static int __init cmdline_parse_movable_node(char *p) 1495c5320926STang Chen { 14964932381eSMichal Hocko #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP 149755ac590cSTang Chen movable_node_enabled = true; 14984932381eSMichal Hocko #else 14994932381eSMichal Hocko pr_warn("movable_node parameter depends on CONFIG_HAVE_MEMBLOCK_NODE_MAP to work properly\n"); 15004932381eSMichal Hocko #endif 1501c5320926STang Chen return 0; 1502c5320926STang Chen } 1503c5320926STang Chen early_param("movable_node", cmdline_parse_movable_node); 1504c5320926STang Chen 1505d9713679SLai Jiangshan /* check which state of node_states will be changed when offline memory */ 1506d9713679SLai Jiangshan static void node_states_check_changes_offline(unsigned long nr_pages, 1507d9713679SLai Jiangshan struct zone *zone, struct memory_notify *arg) 1508d9713679SLai Jiangshan { 1509d9713679SLai Jiangshan struct pglist_data *pgdat = zone->zone_pgdat; 1510d9713679SLai Jiangshan unsigned long present_pages = 0; 1511d9713679SLai Jiangshan enum zone_type zt, zone_last = ZONE_NORMAL; 1512d9713679SLai Jiangshan 1513d9713679SLai Jiangshan /* 15146715ddf9SLai Jiangshan * If we have HIGHMEM or movable node, node_states[N_NORMAL_MEMORY] 15156715ddf9SLai Jiangshan * contains nodes which have zones of 0...ZONE_NORMAL, 15166715ddf9SLai Jiangshan * set zone_last to ZONE_NORMAL. 1517d9713679SLai Jiangshan * 15186715ddf9SLai Jiangshan * If we don't have HIGHMEM nor movable node, 15196715ddf9SLai Jiangshan * node_states[N_NORMAL_MEMORY] contains nodes which have zones of 15206715ddf9SLai Jiangshan * 0...ZONE_MOVABLE, set zone_last to ZONE_MOVABLE. 1521d9713679SLai Jiangshan */ 15226715ddf9SLai Jiangshan if (N_MEMORY == N_NORMAL_MEMORY) 1523d9713679SLai Jiangshan zone_last = ZONE_MOVABLE; 1524d9713679SLai Jiangshan 1525d9713679SLai Jiangshan /* 1526d9713679SLai Jiangshan * check whether node_states[N_NORMAL_MEMORY] will be changed. 1527d9713679SLai Jiangshan * If the memory to be offline is in a zone of 0...zone_last, 1528d9713679SLai Jiangshan * and it is the last present memory, 0...zone_last will 1529d9713679SLai Jiangshan * become empty after offline , thus we can determind we will 1530d9713679SLai Jiangshan * need to clear the node from node_states[N_NORMAL_MEMORY]. 1531d9713679SLai Jiangshan */ 1532d9713679SLai Jiangshan for (zt = 0; zt <= zone_last; zt++) 1533d9713679SLai Jiangshan present_pages += pgdat->node_zones[zt].present_pages; 1534d9713679SLai Jiangshan if (zone_idx(zone) <= zone_last && nr_pages >= present_pages) 1535d9713679SLai Jiangshan arg->status_change_nid_normal = zone_to_nid(zone); 1536d9713679SLai Jiangshan else 1537d9713679SLai Jiangshan arg->status_change_nid_normal = -1; 1538d9713679SLai Jiangshan 15396715ddf9SLai Jiangshan #ifdef CONFIG_HIGHMEM 15406715ddf9SLai Jiangshan /* 15416715ddf9SLai Jiangshan * If we have movable node, node_states[N_HIGH_MEMORY] 15426715ddf9SLai Jiangshan * contains nodes which have zones of 0...ZONE_HIGHMEM, 15436715ddf9SLai Jiangshan * set zone_last to ZONE_HIGHMEM. 15446715ddf9SLai Jiangshan * 15456715ddf9SLai Jiangshan * If we don't have movable node, node_states[N_NORMAL_MEMORY] 15466715ddf9SLai Jiangshan * contains nodes which have zones of 0...ZONE_MOVABLE, 15476715ddf9SLai Jiangshan * set zone_last to ZONE_MOVABLE. 15486715ddf9SLai Jiangshan */ 15496715ddf9SLai Jiangshan zone_last = ZONE_HIGHMEM; 15506715ddf9SLai Jiangshan if (N_MEMORY == N_HIGH_MEMORY) 15516715ddf9SLai Jiangshan zone_last = ZONE_MOVABLE; 15526715ddf9SLai Jiangshan 15536715ddf9SLai Jiangshan for (; zt <= zone_last; zt++) 15546715ddf9SLai Jiangshan present_pages += pgdat->node_zones[zt].present_pages; 15556715ddf9SLai Jiangshan if (zone_idx(zone) <= zone_last && nr_pages >= present_pages) 15566715ddf9SLai Jiangshan arg->status_change_nid_high = zone_to_nid(zone); 15576715ddf9SLai Jiangshan else 15586715ddf9SLai Jiangshan arg->status_change_nid_high = -1; 15596715ddf9SLai Jiangshan #else 15606715ddf9SLai Jiangshan arg->status_change_nid_high = arg->status_change_nid_normal; 15616715ddf9SLai Jiangshan #endif 15626715ddf9SLai Jiangshan 1563d9713679SLai Jiangshan /* 1564d9713679SLai Jiangshan * node_states[N_HIGH_MEMORY] contains nodes which have 0...ZONE_MOVABLE 1565d9713679SLai Jiangshan */ 1566d9713679SLai Jiangshan zone_last = ZONE_MOVABLE; 1567d9713679SLai Jiangshan 1568d9713679SLai Jiangshan /* 1569d9713679SLai Jiangshan * check whether node_states[N_HIGH_MEMORY] will be changed 1570d9713679SLai Jiangshan * If we try to offline the last present @nr_pages from the node, 1571d9713679SLai Jiangshan * we can determind we will need to clear the node from 1572d9713679SLai Jiangshan * node_states[N_HIGH_MEMORY]. 1573d9713679SLai Jiangshan */ 1574d9713679SLai Jiangshan for (; zt <= zone_last; zt++) 1575d9713679SLai Jiangshan present_pages += pgdat->node_zones[zt].present_pages; 1576d9713679SLai Jiangshan if (nr_pages >= present_pages) 1577d9713679SLai Jiangshan arg->status_change_nid = zone_to_nid(zone); 1578d9713679SLai Jiangshan else 1579d9713679SLai Jiangshan arg->status_change_nid = -1; 1580d9713679SLai Jiangshan } 1581d9713679SLai Jiangshan 1582d9713679SLai Jiangshan static void node_states_clear_node(int node, struct memory_notify *arg) 1583d9713679SLai Jiangshan { 1584d9713679SLai Jiangshan if (arg->status_change_nid_normal >= 0) 1585d9713679SLai Jiangshan node_clear_state(node, N_NORMAL_MEMORY); 1586d9713679SLai Jiangshan 15876715ddf9SLai Jiangshan if ((N_MEMORY != N_NORMAL_MEMORY) && 15886715ddf9SLai Jiangshan (arg->status_change_nid_high >= 0)) 1589d9713679SLai Jiangshan node_clear_state(node, N_HIGH_MEMORY); 15906715ddf9SLai Jiangshan 15916715ddf9SLai Jiangshan if ((N_MEMORY != N_HIGH_MEMORY) && 15926715ddf9SLai Jiangshan (arg->status_change_nid >= 0)) 15936715ddf9SLai Jiangshan node_clear_state(node, N_MEMORY); 1594d9713679SLai Jiangshan } 1595d9713679SLai Jiangshan 1596a16cee10SWen Congyang static int __ref __offline_pages(unsigned long start_pfn, 15970c0e6195SKAMEZAWA Hiroyuki unsigned long end_pfn, unsigned long timeout) 15980c0e6195SKAMEZAWA Hiroyuki { 15990c0e6195SKAMEZAWA Hiroyuki unsigned long pfn, nr_pages, expire; 16000c0e6195SKAMEZAWA Hiroyuki long offlined_pages; 16017b78d335SYasunori Goto int ret, drain, retry_max, node; 1602d702909fSCody P Schafer unsigned long flags; 1603a96dfddbSToshi Kani unsigned long valid_start, valid_end; 16040c0e6195SKAMEZAWA Hiroyuki struct zone *zone; 16057b78d335SYasunori Goto struct memory_notify arg; 16060c0e6195SKAMEZAWA Hiroyuki 16070c0e6195SKAMEZAWA Hiroyuki /* at least, alignment against pageblock is necessary */ 16080c0e6195SKAMEZAWA Hiroyuki if (!IS_ALIGNED(start_pfn, pageblock_nr_pages)) 16090c0e6195SKAMEZAWA Hiroyuki return -EINVAL; 16100c0e6195SKAMEZAWA Hiroyuki if (!IS_ALIGNED(end_pfn, pageblock_nr_pages)) 16110c0e6195SKAMEZAWA Hiroyuki return -EINVAL; 16120c0e6195SKAMEZAWA Hiroyuki /* This makes hotplug much easier...and readable. 16130c0e6195SKAMEZAWA Hiroyuki we assume this for now. .*/ 1614a96dfddbSToshi Kani if (!test_pages_in_a_zone(start_pfn, end_pfn, &valid_start, &valid_end)) 16150c0e6195SKAMEZAWA Hiroyuki return -EINVAL; 16167b78d335SYasunori Goto 1617a96dfddbSToshi Kani zone = page_zone(pfn_to_page(valid_start)); 16187b78d335SYasunori Goto node = zone_to_nid(zone); 16197b78d335SYasunori Goto nr_pages = end_pfn - start_pfn; 16207b78d335SYasunori Goto 16210c0e6195SKAMEZAWA Hiroyuki /* set above range as isolated */ 1622b023f468SWen Congyang ret = start_isolate_page_range(start_pfn, end_pfn, 1623b023f468SWen Congyang MIGRATE_MOVABLE, true); 16240c0e6195SKAMEZAWA Hiroyuki if (ret) 162530467e0bSDavid Rientjes return ret; 16267b78d335SYasunori Goto 16277b78d335SYasunori Goto arg.start_pfn = start_pfn; 16287b78d335SYasunori Goto arg.nr_pages = nr_pages; 1629d9713679SLai Jiangshan node_states_check_changes_offline(nr_pages, zone, &arg); 16307b78d335SYasunori Goto 16317b78d335SYasunori Goto ret = memory_notify(MEM_GOING_OFFLINE, &arg); 16327b78d335SYasunori Goto ret = notifier_to_errno(ret); 16337b78d335SYasunori Goto if (ret) 16347b78d335SYasunori Goto goto failed_removal; 16357b78d335SYasunori Goto 16360c0e6195SKAMEZAWA Hiroyuki pfn = start_pfn; 16370c0e6195SKAMEZAWA Hiroyuki expire = jiffies + timeout; 16380c0e6195SKAMEZAWA Hiroyuki drain = 0; 16390c0e6195SKAMEZAWA Hiroyuki retry_max = 5; 16400c0e6195SKAMEZAWA Hiroyuki repeat: 16410c0e6195SKAMEZAWA Hiroyuki /* start memory hot removal */ 16420c0e6195SKAMEZAWA Hiroyuki ret = -EAGAIN; 16430c0e6195SKAMEZAWA Hiroyuki if (time_after(jiffies, expire)) 16440c0e6195SKAMEZAWA Hiroyuki goto failed_removal; 16450c0e6195SKAMEZAWA Hiroyuki ret = -EINTR; 16460c0e6195SKAMEZAWA Hiroyuki if (signal_pending(current)) 16470c0e6195SKAMEZAWA Hiroyuki goto failed_removal; 16480c0e6195SKAMEZAWA Hiroyuki ret = 0; 16490c0e6195SKAMEZAWA Hiroyuki if (drain) { 16503f906ba2SThomas Gleixner lru_add_drain_all_cpuslocked(); 16510c0e6195SKAMEZAWA Hiroyuki cond_resched(); 1652c0554329SVlastimil Babka drain_all_pages(zone); 16530c0e6195SKAMEZAWA Hiroyuki } 16540c0e6195SKAMEZAWA Hiroyuki 1655c8721bbbSNaoya Horiguchi pfn = scan_movable_pages(start_pfn, end_pfn); 1656c8721bbbSNaoya Horiguchi if (pfn) { /* We have movable pages */ 16570c0e6195SKAMEZAWA Hiroyuki ret = do_migrate_range(pfn, end_pfn); 16580c0e6195SKAMEZAWA Hiroyuki if (!ret) { 16590c0e6195SKAMEZAWA Hiroyuki drain = 1; 16600c0e6195SKAMEZAWA Hiroyuki goto repeat; 16610c0e6195SKAMEZAWA Hiroyuki } else { 16620c0e6195SKAMEZAWA Hiroyuki if (ret < 0) 16630c0e6195SKAMEZAWA Hiroyuki if (--retry_max == 0) 16640c0e6195SKAMEZAWA Hiroyuki goto failed_removal; 16650c0e6195SKAMEZAWA Hiroyuki yield(); 16660c0e6195SKAMEZAWA Hiroyuki drain = 1; 16670c0e6195SKAMEZAWA Hiroyuki goto repeat; 16680c0e6195SKAMEZAWA Hiroyuki } 16690c0e6195SKAMEZAWA Hiroyuki } 1670b3834be5SAdam Buchbinder /* drain all zone's lru pagevec, this is asynchronous... */ 16713f906ba2SThomas Gleixner lru_add_drain_all_cpuslocked(); 16720c0e6195SKAMEZAWA Hiroyuki yield(); 1673b3834be5SAdam Buchbinder /* drain pcp pages, this is synchronous. */ 1674c0554329SVlastimil Babka drain_all_pages(zone); 1675c8721bbbSNaoya Horiguchi /* 1676c8721bbbSNaoya Horiguchi * dissolve free hugepages in the memory block before doing offlining 1677c8721bbbSNaoya Horiguchi * actually in order to make hugetlbfs's object counting consistent. 1678c8721bbbSNaoya Horiguchi */ 1679082d5b6bSGerald Schaefer ret = dissolve_free_huge_pages(start_pfn, end_pfn); 1680082d5b6bSGerald Schaefer if (ret) 1681082d5b6bSGerald Schaefer goto failed_removal; 16820c0e6195SKAMEZAWA Hiroyuki /* check again */ 16830c0e6195SKAMEZAWA Hiroyuki offlined_pages = check_pages_isolated(start_pfn, end_pfn); 16840c0e6195SKAMEZAWA Hiroyuki if (offlined_pages < 0) { 16850c0e6195SKAMEZAWA Hiroyuki ret = -EBUSY; 16860c0e6195SKAMEZAWA Hiroyuki goto failed_removal; 16870c0e6195SKAMEZAWA Hiroyuki } 1688e33e33b4SChen Yucong pr_info("Offlined Pages %ld\n", offlined_pages); 1689b3834be5SAdam Buchbinder /* Ok, all of our target is isolated. 16900c0e6195SKAMEZAWA Hiroyuki We cannot do rollback at this point. */ 16910c0e6195SKAMEZAWA Hiroyuki offline_isolated_pages(start_pfn, end_pfn); 1692dbc0e4ceSKAMEZAWA Hiroyuki /* reset pagetype flags and makes migrate type to be MOVABLE */ 16930815f3d8SMichal Nazarewicz undo_isolate_page_range(start_pfn, end_pfn, MIGRATE_MOVABLE); 16940c0e6195SKAMEZAWA Hiroyuki /* removal success */ 16953dcc0571SJiang Liu adjust_managed_page_count(pfn_to_page(start_pfn), -offlined_pages); 16960c0e6195SKAMEZAWA Hiroyuki zone->present_pages -= offlined_pages; 1697d702909fSCody P Schafer 1698d702909fSCody P Schafer pgdat_resize_lock(zone->zone_pgdat, &flags); 16990c0e6195SKAMEZAWA Hiroyuki zone->zone_pgdat->node_present_pages -= offlined_pages; 1700d702909fSCody P Schafer pgdat_resize_unlock(zone->zone_pgdat, &flags); 17017b78d335SYasunori Goto 17021b79acc9SKOSAKI Motohiro init_per_zone_wmark_min(); 17031b79acc9SKOSAKI Motohiro 17041e8537baSXishi Qiu if (!populated_zone(zone)) { 1705340175b7SJiang Liu zone_pcp_reset(zone); 17061e8537baSXishi Qiu mutex_lock(&zonelists_mutex); 17071e8537baSXishi Qiu build_all_zonelists(NULL, NULL); 17081e8537baSXishi Qiu mutex_unlock(&zonelists_mutex); 17091e8537baSXishi Qiu } else 17101e8537baSXishi Qiu zone_pcp_update(zone); 1711340175b7SJiang Liu 1712d9713679SLai Jiangshan node_states_clear_node(node, &arg); 1713698b1b30SVlastimil Babka if (arg.status_change_nid >= 0) { 17148fe23e05SDavid Rientjes kswapd_stop(node); 1715698b1b30SVlastimil Babka kcompactd_stop(node); 1716698b1b30SVlastimil Babka } 1717bce7394aSMinchan Kim 17180c0e6195SKAMEZAWA Hiroyuki vm_total_pages = nr_free_pagecache_pages(); 17190c0e6195SKAMEZAWA Hiroyuki writeback_set_ratelimit(); 17207b78d335SYasunori Goto 17217b78d335SYasunori Goto memory_notify(MEM_OFFLINE, &arg); 17220c0e6195SKAMEZAWA Hiroyuki return 0; 17230c0e6195SKAMEZAWA Hiroyuki 17240c0e6195SKAMEZAWA Hiroyuki failed_removal: 1725e33e33b4SChen Yucong pr_debug("memory offlining [mem %#010llx-%#010llx] failed\n", 1726a62e2f4fSBjorn Helgaas (unsigned long long) start_pfn << PAGE_SHIFT, 1727a62e2f4fSBjorn Helgaas ((unsigned long long) end_pfn << PAGE_SHIFT) - 1); 17287b78d335SYasunori Goto memory_notify(MEM_CANCEL_OFFLINE, &arg); 17290c0e6195SKAMEZAWA Hiroyuki /* pushback to free area */ 17300815f3d8SMichal Nazarewicz undo_isolate_page_range(start_pfn, end_pfn, MIGRATE_MOVABLE); 17310c0e6195SKAMEZAWA Hiroyuki return ret; 17320c0e6195SKAMEZAWA Hiroyuki } 173371088785SBadari Pulavarty 173430467e0bSDavid Rientjes /* Must be protected by mem_hotplug_begin() */ 1735a16cee10SWen Congyang int offline_pages(unsigned long start_pfn, unsigned long nr_pages) 1736a16cee10SWen Congyang { 1737a16cee10SWen Congyang return __offline_pages(start_pfn, start_pfn + nr_pages, 120 * HZ); 1738a16cee10SWen Congyang } 1739e2ff3940SRafael J. Wysocki #endif /* CONFIG_MEMORY_HOTREMOVE */ 1740a16cee10SWen Congyang 1741bbc76be6SWen Congyang /** 1742bbc76be6SWen Congyang * walk_memory_range - walks through all mem sections in [start_pfn, end_pfn) 1743bbc76be6SWen Congyang * @start_pfn: start pfn of the memory range 1744e05c4bbfSToshi Kani * @end_pfn: end pfn of the memory range 1745bbc76be6SWen Congyang * @arg: argument passed to func 1746bbc76be6SWen Congyang * @func: callback for each memory section walked 1747bbc76be6SWen Congyang * 1748bbc76be6SWen Congyang * This function walks through all present mem sections in range 1749bbc76be6SWen Congyang * [start_pfn, end_pfn) and call func on each mem section. 1750bbc76be6SWen Congyang * 1751bbc76be6SWen Congyang * Returns the return value of func. 1752bbc76be6SWen Congyang */ 1753e2ff3940SRafael J. Wysocki int walk_memory_range(unsigned long start_pfn, unsigned long end_pfn, 1754bbc76be6SWen Congyang void *arg, int (*func)(struct memory_block *, void *)) 175571088785SBadari Pulavarty { 1756e90bdb7fSWen Congyang struct memory_block *mem = NULL; 1757e90bdb7fSWen Congyang struct mem_section *section; 1758e90bdb7fSWen Congyang unsigned long pfn, section_nr; 1759e90bdb7fSWen Congyang int ret; 176071088785SBadari Pulavarty 1761e90bdb7fSWen Congyang for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) { 1762e90bdb7fSWen Congyang section_nr = pfn_to_section_nr(pfn); 1763e90bdb7fSWen Congyang if (!present_section_nr(section_nr)) 1764e90bdb7fSWen Congyang continue; 1765e90bdb7fSWen Congyang 1766e90bdb7fSWen Congyang section = __nr_to_section(section_nr); 1767e90bdb7fSWen Congyang /* same memblock? */ 1768e90bdb7fSWen Congyang if (mem) 1769e90bdb7fSWen Congyang if ((section_nr >= mem->start_section_nr) && 1770e90bdb7fSWen Congyang (section_nr <= mem->end_section_nr)) 1771e90bdb7fSWen Congyang continue; 1772e90bdb7fSWen Congyang 1773e90bdb7fSWen Congyang mem = find_memory_block_hinted(section, mem); 1774e90bdb7fSWen Congyang if (!mem) 1775e90bdb7fSWen Congyang continue; 1776e90bdb7fSWen Congyang 1777bbc76be6SWen Congyang ret = func(mem, arg); 1778e90bdb7fSWen Congyang if (ret) { 1779e90bdb7fSWen Congyang kobject_put(&mem->dev.kobj); 1780e90bdb7fSWen Congyang return ret; 1781e90bdb7fSWen Congyang } 1782e90bdb7fSWen Congyang } 1783e90bdb7fSWen Congyang 1784e90bdb7fSWen Congyang if (mem) 1785e90bdb7fSWen Congyang kobject_put(&mem->dev.kobj); 1786e90bdb7fSWen Congyang 1787bbc76be6SWen Congyang return 0; 1788bbc76be6SWen Congyang } 1789bbc76be6SWen Congyang 1790e2ff3940SRafael J. Wysocki #ifdef CONFIG_MEMORY_HOTREMOVE 1791d6de9d53SXishi Qiu static int check_memblock_offlined_cb(struct memory_block *mem, void *arg) 1792bbc76be6SWen Congyang { 1793bbc76be6SWen Congyang int ret = !is_memblock_offlined(mem); 1794bbc76be6SWen Congyang 1795349daa0fSRandy Dunlap if (unlikely(ret)) { 1796349daa0fSRandy Dunlap phys_addr_t beginpa, endpa; 1797349daa0fSRandy Dunlap 1798349daa0fSRandy Dunlap beginpa = PFN_PHYS(section_nr_to_pfn(mem->start_section_nr)); 1799349daa0fSRandy Dunlap endpa = PFN_PHYS(section_nr_to_pfn(mem->end_section_nr + 1))-1; 1800756a025fSJoe Perches pr_warn("removing memory fails, because memory [%pa-%pa] is onlined\n", 1801349daa0fSRandy Dunlap &beginpa, &endpa); 1802349daa0fSRandy Dunlap } 1803bbc76be6SWen Congyang 1804bbc76be6SWen Congyang return ret; 1805bbc76be6SWen Congyang } 1806bbc76be6SWen Congyang 18070f1cfe9dSToshi Kani static int check_cpu_on_node(pg_data_t *pgdat) 180860a5a19eSTang Chen { 180960a5a19eSTang Chen int cpu; 181060a5a19eSTang Chen 181160a5a19eSTang Chen for_each_present_cpu(cpu) { 181260a5a19eSTang Chen if (cpu_to_node(cpu) == pgdat->node_id) 181360a5a19eSTang Chen /* 181460a5a19eSTang Chen * the cpu on this node isn't removed, and we can't 181560a5a19eSTang Chen * offline this node. 181660a5a19eSTang Chen */ 181760a5a19eSTang Chen return -EBUSY; 181860a5a19eSTang Chen } 181960a5a19eSTang Chen 182060a5a19eSTang Chen return 0; 182160a5a19eSTang Chen } 182260a5a19eSTang Chen 18230f1cfe9dSToshi Kani static void unmap_cpu_on_node(pg_data_t *pgdat) 1824e13fe869SWen Congyang { 1825e13fe869SWen Congyang #ifdef CONFIG_ACPI_NUMA 1826e13fe869SWen Congyang int cpu; 1827e13fe869SWen Congyang 1828e13fe869SWen Congyang for_each_possible_cpu(cpu) 1829e13fe869SWen Congyang if (cpu_to_node(cpu) == pgdat->node_id) 1830e13fe869SWen Congyang numa_clear_node(cpu); 1831e13fe869SWen Congyang #endif 1832e13fe869SWen Congyang } 1833e13fe869SWen Congyang 18340f1cfe9dSToshi Kani static int check_and_unmap_cpu_on_node(pg_data_t *pgdat) 1835e13fe869SWen Congyang { 18360f1cfe9dSToshi Kani int ret; 1837e13fe869SWen Congyang 18380f1cfe9dSToshi Kani ret = check_cpu_on_node(pgdat); 1839e13fe869SWen Congyang if (ret) 1840e13fe869SWen Congyang return ret; 1841e13fe869SWen Congyang 1842e13fe869SWen Congyang /* 1843e13fe869SWen Congyang * the node will be offlined when we come here, so we can clear 1844e13fe869SWen Congyang * the cpu_to_node() now. 1845e13fe869SWen Congyang */ 1846e13fe869SWen Congyang 18470f1cfe9dSToshi Kani unmap_cpu_on_node(pgdat); 1848e13fe869SWen Congyang return 0; 1849e13fe869SWen Congyang } 1850e13fe869SWen Congyang 18510f1cfe9dSToshi Kani /** 18520f1cfe9dSToshi Kani * try_offline_node 18530f1cfe9dSToshi Kani * 18540f1cfe9dSToshi Kani * Offline a node if all memory sections and cpus of the node are removed. 18550f1cfe9dSToshi Kani * 18560f1cfe9dSToshi Kani * NOTE: The caller must call lock_device_hotplug() to serialize hotplug 18570f1cfe9dSToshi Kani * and online/offline operations before this call. 18580f1cfe9dSToshi Kani */ 185990b30cdcSWen Congyang void try_offline_node(int nid) 186060a5a19eSTang Chen { 1861d822b86aSWen Congyang pg_data_t *pgdat = NODE_DATA(nid); 1862d822b86aSWen Congyang unsigned long start_pfn = pgdat->node_start_pfn; 1863d822b86aSWen Congyang unsigned long end_pfn = start_pfn + pgdat->node_spanned_pages; 186460a5a19eSTang Chen unsigned long pfn; 186560a5a19eSTang Chen 186660a5a19eSTang Chen for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) { 186760a5a19eSTang Chen unsigned long section_nr = pfn_to_section_nr(pfn); 186860a5a19eSTang Chen 186960a5a19eSTang Chen if (!present_section_nr(section_nr)) 187060a5a19eSTang Chen continue; 187160a5a19eSTang Chen 187260a5a19eSTang Chen if (pfn_to_nid(pfn) != nid) 187360a5a19eSTang Chen continue; 187460a5a19eSTang Chen 187560a5a19eSTang Chen /* 187660a5a19eSTang Chen * some memory sections of this node are not removed, and we 187760a5a19eSTang Chen * can't offline node now. 187860a5a19eSTang Chen */ 187960a5a19eSTang Chen return; 188060a5a19eSTang Chen } 188160a5a19eSTang Chen 18820f1cfe9dSToshi Kani if (check_and_unmap_cpu_on_node(pgdat)) 188360a5a19eSTang Chen return; 188460a5a19eSTang Chen 188560a5a19eSTang Chen /* 188660a5a19eSTang Chen * all memory/cpu of this node are removed, we can offline this 188760a5a19eSTang Chen * node now. 188860a5a19eSTang Chen */ 188960a5a19eSTang Chen node_set_offline(nid); 189060a5a19eSTang Chen unregister_one_node(nid); 189160a5a19eSTang Chen } 189290b30cdcSWen Congyang EXPORT_SYMBOL(try_offline_node); 189360a5a19eSTang Chen 18940f1cfe9dSToshi Kani /** 18950f1cfe9dSToshi Kani * remove_memory 18960f1cfe9dSToshi Kani * 18970f1cfe9dSToshi Kani * NOTE: The caller must call lock_device_hotplug() to serialize hotplug 18980f1cfe9dSToshi Kani * and online/offline operations before this call, as required by 18990f1cfe9dSToshi Kani * try_offline_node(). 19000f1cfe9dSToshi Kani */ 1901242831ebSRafael J. Wysocki void __ref remove_memory(int nid, u64 start, u64 size) 1902bbc76be6SWen Congyang { 1903242831ebSRafael J. Wysocki int ret; 1904993c1aadSWen Congyang 190527356f54SToshi Kani BUG_ON(check_hotplug_memory_range(start, size)); 190627356f54SToshi Kani 1907bfc8c901SVladimir Davydov mem_hotplug_begin(); 19086677e3eaSYasuaki Ishimatsu 19096677e3eaSYasuaki Ishimatsu /* 1910242831ebSRafael J. Wysocki * All memory blocks must be offlined before removing memory. Check 1911242831ebSRafael J. Wysocki * whether all memory blocks in question are offline and trigger a BUG() 1912242831ebSRafael J. Wysocki * if this is not the case. 19136677e3eaSYasuaki Ishimatsu */ 1914242831ebSRafael J. Wysocki ret = walk_memory_range(PFN_DOWN(start), PFN_UP(start + size - 1), NULL, 1915d6de9d53SXishi Qiu check_memblock_offlined_cb); 1916bfc8c901SVladimir Davydov if (ret) 1917242831ebSRafael J. Wysocki BUG(); 19186677e3eaSYasuaki Ishimatsu 191946c66c4bSYasuaki Ishimatsu /* remove memmap entry */ 192046c66c4bSYasuaki Ishimatsu firmware_map_remove(start, start + size, "System RAM"); 1921f9126ab9SXishi Qiu memblock_free(start, size); 1922f9126ab9SXishi Qiu memblock_remove(start, size); 192346c66c4bSYasuaki Ishimatsu 192424d335caSWen Congyang arch_remove_memory(start, size); 192524d335caSWen Congyang 192660a5a19eSTang Chen try_offline_node(nid); 192760a5a19eSTang Chen 1928bfc8c901SVladimir Davydov mem_hotplug_done(); 192971088785SBadari Pulavarty } 193071088785SBadari Pulavarty EXPORT_SYMBOL_GPL(remove_memory); 1931aba6efc4SRafael J. Wysocki #endif /* CONFIG_MEMORY_HOTREMOVE */ 1932