13947be19SDave Hansen /* 23947be19SDave Hansen * linux/mm/memory_hotplug.c 33947be19SDave Hansen * 43947be19SDave Hansen * Copyright (C) 53947be19SDave Hansen */ 63947be19SDave Hansen 73947be19SDave Hansen #include <linux/stddef.h> 83947be19SDave Hansen #include <linux/mm.h> 9174cd4b1SIngo Molnar #include <linux/sched/signal.h> 103947be19SDave Hansen #include <linux/swap.h> 113947be19SDave Hansen #include <linux/interrupt.h> 123947be19SDave Hansen #include <linux/pagemap.h> 133947be19SDave Hansen #include <linux/compiler.h> 14b95f1b31SPaul Gortmaker #include <linux/export.h> 153947be19SDave Hansen #include <linux/pagevec.h> 162d1d43f6SChandra Seetharaman #include <linux/writeback.h> 173947be19SDave Hansen #include <linux/slab.h> 183947be19SDave Hansen #include <linux/sysctl.h> 193947be19SDave Hansen #include <linux/cpu.h> 203947be19SDave Hansen #include <linux/memory.h> 214b94ffdcSDan Williams #include <linux/memremap.h> 223947be19SDave Hansen #include <linux/memory_hotplug.h> 233947be19SDave Hansen #include <linux/highmem.h> 243947be19SDave Hansen #include <linux/vmalloc.h> 250a547039SKAMEZAWA Hiroyuki #include <linux/ioport.h> 260c0e6195SKAMEZAWA Hiroyuki #include <linux/delay.h> 270c0e6195SKAMEZAWA Hiroyuki #include <linux/migrate.h> 280c0e6195SKAMEZAWA Hiroyuki #include <linux/page-isolation.h> 2971088785SBadari Pulavarty #include <linux/pfn.h> 306ad696d2SAndi Kleen #include <linux/suspend.h> 316d9c285aSKOSAKI Motohiro #include <linux/mm_inline.h> 32d96ae530Sakpm@linux-foundation.org #include <linux/firmware-map.h> 3360a5a19eSTang Chen #include <linux/stop_machine.h> 34c8721bbbSNaoya Horiguchi #include <linux/hugetlb.h> 35c5320926STang Chen #include <linux/memblock.h> 36f784a3f1STang Chen #include <linux/bootmem.h> 37698b1b30SVlastimil Babka #include <linux/compaction.h> 383947be19SDave Hansen 393947be19SDave Hansen #include <asm/tlbflush.h> 403947be19SDave Hansen 411e5ad9a3SAdrian Bunk #include "internal.h" 421e5ad9a3SAdrian Bunk 439d0ad8caSDaniel Kiper /* 449d0ad8caSDaniel Kiper * online_page_callback contains pointer to current page onlining function. 459d0ad8caSDaniel Kiper * Initially it is generic_online_page(). If it is required it could be 469d0ad8caSDaniel Kiper * changed by calling set_online_page_callback() for callback registration 479d0ad8caSDaniel Kiper * and restore_online_page_callback() for generic callback restore. 489d0ad8caSDaniel Kiper */ 499d0ad8caSDaniel Kiper 509d0ad8caSDaniel Kiper static void generic_online_page(struct page *page); 519d0ad8caSDaniel Kiper 529d0ad8caSDaniel Kiper static online_page_callback_t online_page_callback = generic_online_page; 53bfc8c901SVladimir Davydov static DEFINE_MUTEX(online_page_callback_lock); 549d0ad8caSDaniel Kiper 553f906ba2SThomas Gleixner DEFINE_STATIC_PERCPU_RWSEM(mem_hotplug_lock); 5620d6c96bSKOSAKI Motohiro 573f906ba2SThomas Gleixner void get_online_mems(void) 583f906ba2SThomas Gleixner { 593f906ba2SThomas Gleixner percpu_down_read(&mem_hotplug_lock); 603f906ba2SThomas Gleixner } 61bfc8c901SVladimir Davydov 623f906ba2SThomas Gleixner void put_online_mems(void) 633f906ba2SThomas Gleixner { 643f906ba2SThomas Gleixner percpu_up_read(&mem_hotplug_lock); 653f906ba2SThomas Gleixner } 66bfc8c901SVladimir Davydov 674932381eSMichal Hocko bool movable_node_enabled = false; 684932381eSMichal Hocko 698604d9e5SVitaly Kuznetsov #ifndef CONFIG_MEMORY_HOTPLUG_DEFAULT_ONLINE 7031bc3858SVitaly Kuznetsov bool memhp_auto_online; 718604d9e5SVitaly Kuznetsov #else 728604d9e5SVitaly Kuznetsov bool memhp_auto_online = true; 738604d9e5SVitaly Kuznetsov #endif 7431bc3858SVitaly Kuznetsov EXPORT_SYMBOL_GPL(memhp_auto_online); 7531bc3858SVitaly Kuznetsov 7686dd995dSVitaly Kuznetsov static int __init setup_memhp_default_state(char *str) 7786dd995dSVitaly Kuznetsov { 7886dd995dSVitaly Kuznetsov if (!strcmp(str, "online")) 7986dd995dSVitaly Kuznetsov memhp_auto_online = true; 8086dd995dSVitaly Kuznetsov else if (!strcmp(str, "offline")) 8186dd995dSVitaly Kuznetsov memhp_auto_online = false; 8286dd995dSVitaly Kuznetsov 8386dd995dSVitaly Kuznetsov return 1; 8486dd995dSVitaly Kuznetsov } 8586dd995dSVitaly Kuznetsov __setup("memhp_default_state=", setup_memhp_default_state); 8686dd995dSVitaly Kuznetsov 8730467e0bSDavid Rientjes void mem_hotplug_begin(void) 88bfc8c901SVladimir Davydov { 893f906ba2SThomas Gleixner cpus_read_lock(); 903f906ba2SThomas Gleixner percpu_down_write(&mem_hotplug_lock); 91bfc8c901SVladimir Davydov } 92bfc8c901SVladimir Davydov 9330467e0bSDavid Rientjes void mem_hotplug_done(void) 94bfc8c901SVladimir Davydov { 953f906ba2SThomas Gleixner percpu_up_write(&mem_hotplug_lock); 963f906ba2SThomas Gleixner cpus_read_unlock(); 97bfc8c901SVladimir Davydov } 9820d6c96bSKOSAKI Motohiro 9945e0b78bSKeith Mannthey /* add this memory to iomem resource */ 10045e0b78bSKeith Mannthey static struct resource *register_memory_resource(u64 start, u64 size) 10145e0b78bSKeith Mannthey { 1025042db43SJérôme Glisse struct resource *res, *conflict; 10345e0b78bSKeith Mannthey res = kzalloc(sizeof(struct resource), GFP_KERNEL); 1046f754ba4SVitaly Kuznetsov if (!res) 1056f754ba4SVitaly Kuznetsov return ERR_PTR(-ENOMEM); 10645e0b78bSKeith Mannthey 10745e0b78bSKeith Mannthey res->name = "System RAM"; 10845e0b78bSKeith Mannthey res->start = start; 10945e0b78bSKeith Mannthey res->end = start + size - 1; 110782b8664SToshi Kani res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY; 1115042db43SJérôme Glisse conflict = request_resource_conflict(&iomem_resource, res); 1125042db43SJérôme Glisse if (conflict) { 1135042db43SJérôme Glisse if (conflict->desc == IORES_DESC_DEVICE_PRIVATE_MEMORY) { 1145042db43SJérôme Glisse pr_debug("Device unaddressable memory block " 1155042db43SJérôme Glisse "memory hotplug at %#010llx !\n", 1165042db43SJérôme Glisse (unsigned long long)start); 1175042db43SJérôme Glisse } 1184996eed8SToshi Kani pr_debug("System RAM resource %pR cannot be added\n", res); 11945e0b78bSKeith Mannthey kfree(res); 1206f754ba4SVitaly Kuznetsov return ERR_PTR(-EEXIST); 12145e0b78bSKeith Mannthey } 12245e0b78bSKeith Mannthey return res; 12345e0b78bSKeith Mannthey } 12445e0b78bSKeith Mannthey 12545e0b78bSKeith Mannthey static void release_memory_resource(struct resource *res) 12645e0b78bSKeith Mannthey { 12745e0b78bSKeith Mannthey if (!res) 12845e0b78bSKeith Mannthey return; 12945e0b78bSKeith Mannthey release_resource(res); 13045e0b78bSKeith Mannthey kfree(res); 13145e0b78bSKeith Mannthey return; 13245e0b78bSKeith Mannthey } 13345e0b78bSKeith Mannthey 13453947027SKeith Mannthey #ifdef CONFIG_MEMORY_HOTPLUG_SPARSE 13546723bfaSYasuaki Ishimatsu void get_page_bootmem(unsigned long info, struct page *page, 1365f24ce5fSAndrea Arcangeli unsigned long type) 13704753278SYasunori Goto { 138ddffe98dSYasuaki Ishimatsu page->freelist = (void *)type; 13904753278SYasunori Goto SetPagePrivate(page); 14004753278SYasunori Goto set_page_private(page, info); 141fe896d18SJoonsoo Kim page_ref_inc(page); 14204753278SYasunori Goto } 14304753278SYasunori Goto 144170a5a7eSJiang Liu void put_page_bootmem(struct page *page) 14504753278SYasunori Goto { 1465f24ce5fSAndrea Arcangeli unsigned long type; 14704753278SYasunori Goto 148ddffe98dSYasuaki Ishimatsu type = (unsigned long) page->freelist; 1495f24ce5fSAndrea Arcangeli BUG_ON(type < MEMORY_HOTPLUG_MIN_BOOTMEM_TYPE || 1505f24ce5fSAndrea Arcangeli type > MEMORY_HOTPLUG_MAX_BOOTMEM_TYPE); 15104753278SYasunori Goto 152fe896d18SJoonsoo Kim if (page_ref_dec_return(page) == 1) { 153ddffe98dSYasuaki Ishimatsu page->freelist = NULL; 15404753278SYasunori Goto ClearPagePrivate(page); 15504753278SYasunori Goto set_page_private(page, 0); 1565f24ce5fSAndrea Arcangeli INIT_LIST_HEAD(&page->lru); 157170a5a7eSJiang Liu free_reserved_page(page); 15804753278SYasunori Goto } 15904753278SYasunori Goto } 16004753278SYasunori Goto 16146723bfaSYasuaki Ishimatsu #ifdef CONFIG_HAVE_BOOTMEM_INFO_NODE 16246723bfaSYasuaki Ishimatsu #ifndef CONFIG_SPARSEMEM_VMEMMAP 163d92bc318SAdrian Bunk static void register_page_bootmem_info_section(unsigned long start_pfn) 16404753278SYasunori Goto { 16504753278SYasunori Goto unsigned long *usemap, mapsize, section_nr, i; 16604753278SYasunori Goto struct mem_section *ms; 16704753278SYasunori Goto struct page *page, *memmap; 16804753278SYasunori Goto 16904753278SYasunori Goto section_nr = pfn_to_section_nr(start_pfn); 17004753278SYasunori Goto ms = __nr_to_section(section_nr); 17104753278SYasunori Goto 17204753278SYasunori Goto /* Get section's memmap address */ 17304753278SYasunori Goto memmap = sparse_decode_mem_map(ms->section_mem_map, section_nr); 17404753278SYasunori Goto 17504753278SYasunori Goto /* 17604753278SYasunori Goto * Get page for the memmap's phys address 17704753278SYasunori Goto * XXX: need more consideration for sparse_vmemmap... 17804753278SYasunori Goto */ 17904753278SYasunori Goto page = virt_to_page(memmap); 18004753278SYasunori Goto mapsize = sizeof(struct page) * PAGES_PER_SECTION; 18104753278SYasunori Goto mapsize = PAGE_ALIGN(mapsize) >> PAGE_SHIFT; 18204753278SYasunori Goto 18304753278SYasunori Goto /* remember memmap's page */ 18404753278SYasunori Goto for (i = 0; i < mapsize; i++, page++) 18504753278SYasunori Goto get_page_bootmem(section_nr, page, SECTION_INFO); 18604753278SYasunori Goto 18704753278SYasunori Goto usemap = __nr_to_section(section_nr)->pageblock_flags; 18804753278SYasunori Goto page = virt_to_page(usemap); 18904753278SYasunori Goto 19004753278SYasunori Goto mapsize = PAGE_ALIGN(usemap_size()) >> PAGE_SHIFT; 19104753278SYasunori Goto 19204753278SYasunori Goto for (i = 0; i < mapsize; i++, page++) 193af370fb8SYasunori Goto get_page_bootmem(section_nr, page, MIX_SECTION_INFO); 19404753278SYasunori Goto 19504753278SYasunori Goto } 19646723bfaSYasuaki Ishimatsu #else /* CONFIG_SPARSEMEM_VMEMMAP */ 19746723bfaSYasuaki Ishimatsu static void register_page_bootmem_info_section(unsigned long start_pfn) 19846723bfaSYasuaki Ishimatsu { 19946723bfaSYasuaki Ishimatsu unsigned long *usemap, mapsize, section_nr, i; 20046723bfaSYasuaki Ishimatsu struct mem_section *ms; 20146723bfaSYasuaki Ishimatsu struct page *page, *memmap; 20246723bfaSYasuaki Ishimatsu 20346723bfaSYasuaki Ishimatsu if (!pfn_valid(start_pfn)) 20446723bfaSYasuaki Ishimatsu return; 20546723bfaSYasuaki Ishimatsu 20646723bfaSYasuaki Ishimatsu section_nr = pfn_to_section_nr(start_pfn); 20746723bfaSYasuaki Ishimatsu ms = __nr_to_section(section_nr); 20846723bfaSYasuaki Ishimatsu 20946723bfaSYasuaki Ishimatsu memmap = sparse_decode_mem_map(ms->section_mem_map, section_nr); 21046723bfaSYasuaki Ishimatsu 21146723bfaSYasuaki Ishimatsu register_page_bootmem_memmap(section_nr, memmap, PAGES_PER_SECTION); 21246723bfaSYasuaki Ishimatsu 21346723bfaSYasuaki Ishimatsu usemap = __nr_to_section(section_nr)->pageblock_flags; 21446723bfaSYasuaki Ishimatsu page = virt_to_page(usemap); 21546723bfaSYasuaki Ishimatsu 21646723bfaSYasuaki Ishimatsu mapsize = PAGE_ALIGN(usemap_size()) >> PAGE_SHIFT; 21746723bfaSYasuaki Ishimatsu 21846723bfaSYasuaki Ishimatsu for (i = 0; i < mapsize; i++, page++) 21946723bfaSYasuaki Ishimatsu get_page_bootmem(section_nr, page, MIX_SECTION_INFO); 22046723bfaSYasuaki Ishimatsu } 22146723bfaSYasuaki Ishimatsu #endif /* !CONFIG_SPARSEMEM_VMEMMAP */ 22204753278SYasunori Goto 2237ded384aSLinus Torvalds void __init register_page_bootmem_info_node(struct pglist_data *pgdat) 22404753278SYasunori Goto { 22504753278SYasunori Goto unsigned long i, pfn, end_pfn, nr_pages; 22604753278SYasunori Goto int node = pgdat->node_id; 22704753278SYasunori Goto struct page *page; 22804753278SYasunori Goto 22904753278SYasunori Goto nr_pages = PAGE_ALIGN(sizeof(struct pglist_data)) >> PAGE_SHIFT; 23004753278SYasunori Goto page = virt_to_page(pgdat); 23104753278SYasunori Goto 23204753278SYasunori Goto for (i = 0; i < nr_pages; i++, page++) 23304753278SYasunori Goto get_page_bootmem(node, page, NODE_INFO); 23404753278SYasunori Goto 23504753278SYasunori Goto pfn = pgdat->node_start_pfn; 236c1f19495SCody P Schafer end_pfn = pgdat_end_pfn(pgdat); 23704753278SYasunori Goto 2387e9f5eb0STang Chen /* register section info */ 239f14851afSqiuxishi for (; pfn < end_pfn; pfn += PAGES_PER_SECTION) { 240f14851afSqiuxishi /* 241f14851afSqiuxishi * Some platforms can assign the same pfn to multiple nodes - on 242f14851afSqiuxishi * node0 as well as nodeN. To avoid registering a pfn against 243f14851afSqiuxishi * multiple nodes we check that this pfn does not already 2447e9f5eb0STang Chen * reside in some other nodes. 245f14851afSqiuxishi */ 246f65e91dfSYang Shi if (pfn_valid(pfn) && (early_pfn_to_nid(pfn) == node)) 24704753278SYasunori Goto register_page_bootmem_info_section(pfn); 248f14851afSqiuxishi } 24904753278SYasunori Goto } 25046723bfaSYasuaki Ishimatsu #endif /* CONFIG_HAVE_BOOTMEM_INFO_NODE */ 25104753278SYasunori Goto 252f1dd2cd1SMichal Hocko static int __meminit __add_section(int nid, unsigned long phys_start_pfn, 2537b73d978SChristoph Hellwig struct vmem_altmap *altmap, bool want_memblock) 2543947be19SDave Hansen { 2553947be19SDave Hansen int ret; 256f1dd2cd1SMichal Hocko int i; 2573947be19SDave Hansen 258ebd15302SKAMEZAWA Hiroyuki if (pfn_valid(phys_start_pfn)) 259ebd15302SKAMEZAWA Hiroyuki return -EEXIST; 260ebd15302SKAMEZAWA Hiroyuki 2617b73d978SChristoph Hellwig ret = sparse_add_one_section(NODE_DATA(nid), phys_start_pfn, altmap); 2623947be19SDave Hansen if (ret < 0) 2633947be19SDave Hansen return ret; 2643947be19SDave Hansen 265f1dd2cd1SMichal Hocko /* 266f1dd2cd1SMichal Hocko * Make all the pages reserved so that nobody will stumble over half 267f1dd2cd1SMichal Hocko * initialized state. 2681b7176aeSFan Du * FIXME: We also have to associate it with a node because page_to_nid 269f1dd2cd1SMichal Hocko * relies on having page with the proper node. 270f1dd2cd1SMichal Hocko */ 271f1dd2cd1SMichal Hocko for (i = 0; i < PAGES_PER_SECTION; i++) { 272f1dd2cd1SMichal Hocko unsigned long pfn = phys_start_pfn + i; 273f1dd2cd1SMichal Hocko struct page *page; 274f1dd2cd1SMichal Hocko if (!pfn_valid(pfn)) 275f1dd2cd1SMichal Hocko continue; 276718127ccSYasunori Goto 277f1dd2cd1SMichal Hocko page = pfn_to_page(pfn); 278f1dd2cd1SMichal Hocko set_page_node(page, nid); 279f1dd2cd1SMichal Hocko SetPageReserved(page); 280f1dd2cd1SMichal Hocko } 281718127ccSYasunori Goto 2821b862aecSMichal Hocko if (!want_memblock) 2831b862aecSMichal Hocko return 0; 2841b862aecSMichal Hocko 285c04fc586SGary Hade return register_new_memory(nid, __pfn_to_section(phys_start_pfn)); 2863947be19SDave Hansen } 2873947be19SDave Hansen 2884edd7cefSDavid Rientjes /* 2894edd7cefSDavid Rientjes * Reasonably generic function for adding memory. It is 2904edd7cefSDavid Rientjes * expected that archs that support memory hotplug will 2914edd7cefSDavid Rientjes * call this function after deciding the zone to which to 2924edd7cefSDavid Rientjes * add the new pages. 2934edd7cefSDavid Rientjes */ 294f1dd2cd1SMichal Hocko int __ref __add_pages(int nid, unsigned long phys_start_pfn, 29524e6d5a5SChristoph Hellwig unsigned long nr_pages, struct vmem_altmap *altmap, 29624e6d5a5SChristoph Hellwig bool want_memblock) 2974edd7cefSDavid Rientjes { 2984edd7cefSDavid Rientjes unsigned long i; 2994edd7cefSDavid Rientjes int err = 0; 3004edd7cefSDavid Rientjes int start_sec, end_sec; 3014b94ffdcSDan Williams 3024edd7cefSDavid Rientjes /* during initialize mem_map, align hot-added range to section */ 3034edd7cefSDavid Rientjes start_sec = pfn_to_section_nr(phys_start_pfn); 3044edd7cefSDavid Rientjes end_sec = pfn_to_section_nr(phys_start_pfn + nr_pages - 1); 3054edd7cefSDavid Rientjes 3064b94ffdcSDan Williams if (altmap) { 3074b94ffdcSDan Williams /* 3084b94ffdcSDan Williams * Validate altmap is within bounds of the total request 3094b94ffdcSDan Williams */ 3104b94ffdcSDan Williams if (altmap->base_pfn != phys_start_pfn 3114b94ffdcSDan Williams || vmem_altmap_offset(altmap) > nr_pages) { 3124b94ffdcSDan Williams pr_warn_once("memory add fail, invalid altmap\n"); 3137cf91a98SJoonsoo Kim err = -EINVAL; 3147cf91a98SJoonsoo Kim goto out; 3154b94ffdcSDan Williams } 3164b94ffdcSDan Williams altmap->alloc = 0; 3174b94ffdcSDan Williams } 3184b94ffdcSDan Williams 3194edd7cefSDavid Rientjes for (i = start_sec; i <= end_sec; i++) { 3207b73d978SChristoph Hellwig err = __add_section(nid, section_nr_to_pfn(i), altmap, 3217b73d978SChristoph Hellwig want_memblock); 3224edd7cefSDavid Rientjes 3234edd7cefSDavid Rientjes /* 3244edd7cefSDavid Rientjes * EEXIST is finally dealt with by ioresource collision 3254edd7cefSDavid Rientjes * check. see add_memory() => register_memory_resource() 3264edd7cefSDavid Rientjes * Warning will be printed if there is collision. 3274edd7cefSDavid Rientjes */ 3284edd7cefSDavid Rientjes if (err && (err != -EEXIST)) 3294edd7cefSDavid Rientjes break; 3304edd7cefSDavid Rientjes err = 0; 331f64ac5e6SMichal Hocko cond_resched(); 3324edd7cefSDavid Rientjes } 333c435a390SZhu Guihua vmemmap_populate_print_last(); 3347cf91a98SJoonsoo Kim out: 3354edd7cefSDavid Rientjes return err; 3364edd7cefSDavid Rientjes } 3374edd7cefSDavid Rientjes 3384edd7cefSDavid Rientjes #ifdef CONFIG_MEMORY_HOTREMOVE 339815121d2SYasuaki Ishimatsu /* find the smallest valid pfn in the range [start_pfn, end_pfn) */ 340d09b0137SYASUAKI ISHIMATSU static unsigned long find_smallest_section_pfn(int nid, struct zone *zone, 341815121d2SYasuaki Ishimatsu unsigned long start_pfn, 342815121d2SYasuaki Ishimatsu unsigned long end_pfn) 343815121d2SYasuaki Ishimatsu { 344815121d2SYasuaki Ishimatsu struct mem_section *ms; 345815121d2SYasuaki Ishimatsu 346815121d2SYasuaki Ishimatsu for (; start_pfn < end_pfn; start_pfn += PAGES_PER_SECTION) { 347815121d2SYasuaki Ishimatsu ms = __pfn_to_section(start_pfn); 348815121d2SYasuaki Ishimatsu 349815121d2SYasuaki Ishimatsu if (unlikely(!valid_section(ms))) 350815121d2SYasuaki Ishimatsu continue; 351815121d2SYasuaki Ishimatsu 352815121d2SYasuaki Ishimatsu if (unlikely(pfn_to_nid(start_pfn) != nid)) 353815121d2SYasuaki Ishimatsu continue; 354815121d2SYasuaki Ishimatsu 355815121d2SYasuaki Ishimatsu if (zone && zone != page_zone(pfn_to_page(start_pfn))) 356815121d2SYasuaki Ishimatsu continue; 357815121d2SYasuaki Ishimatsu 358815121d2SYasuaki Ishimatsu return start_pfn; 359815121d2SYasuaki Ishimatsu } 360815121d2SYasuaki Ishimatsu 361815121d2SYasuaki Ishimatsu return 0; 362815121d2SYasuaki Ishimatsu } 363815121d2SYasuaki Ishimatsu 364815121d2SYasuaki Ishimatsu /* find the biggest valid pfn in the range [start_pfn, end_pfn). */ 365d09b0137SYASUAKI ISHIMATSU static unsigned long find_biggest_section_pfn(int nid, struct zone *zone, 366815121d2SYasuaki Ishimatsu unsigned long start_pfn, 367815121d2SYasuaki Ishimatsu unsigned long end_pfn) 368815121d2SYasuaki Ishimatsu { 369815121d2SYasuaki Ishimatsu struct mem_section *ms; 370815121d2SYasuaki Ishimatsu unsigned long pfn; 371815121d2SYasuaki Ishimatsu 372815121d2SYasuaki Ishimatsu /* pfn is the end pfn of a memory section. */ 373815121d2SYasuaki Ishimatsu pfn = end_pfn - 1; 374815121d2SYasuaki Ishimatsu for (; pfn >= start_pfn; pfn -= PAGES_PER_SECTION) { 375815121d2SYasuaki Ishimatsu ms = __pfn_to_section(pfn); 376815121d2SYasuaki Ishimatsu 377815121d2SYasuaki Ishimatsu if (unlikely(!valid_section(ms))) 378815121d2SYasuaki Ishimatsu continue; 379815121d2SYasuaki Ishimatsu 380815121d2SYasuaki Ishimatsu if (unlikely(pfn_to_nid(pfn) != nid)) 381815121d2SYasuaki Ishimatsu continue; 382815121d2SYasuaki Ishimatsu 383815121d2SYasuaki Ishimatsu if (zone && zone != page_zone(pfn_to_page(pfn))) 384815121d2SYasuaki Ishimatsu continue; 385815121d2SYasuaki Ishimatsu 386815121d2SYasuaki Ishimatsu return pfn; 387815121d2SYasuaki Ishimatsu } 388815121d2SYasuaki Ishimatsu 389815121d2SYasuaki Ishimatsu return 0; 390815121d2SYasuaki Ishimatsu } 391815121d2SYasuaki Ishimatsu 392815121d2SYasuaki Ishimatsu static void shrink_zone_span(struct zone *zone, unsigned long start_pfn, 393815121d2SYasuaki Ishimatsu unsigned long end_pfn) 394815121d2SYasuaki Ishimatsu { 395815121d2SYasuaki Ishimatsu unsigned long zone_start_pfn = zone->zone_start_pfn; 396c33bc315SXishi Qiu unsigned long z = zone_end_pfn(zone); /* zone_end_pfn namespace clash */ 397c33bc315SXishi Qiu unsigned long zone_end_pfn = z; 398815121d2SYasuaki Ishimatsu unsigned long pfn; 399815121d2SYasuaki Ishimatsu struct mem_section *ms; 400815121d2SYasuaki Ishimatsu int nid = zone_to_nid(zone); 401815121d2SYasuaki Ishimatsu 402815121d2SYasuaki Ishimatsu zone_span_writelock(zone); 403815121d2SYasuaki Ishimatsu if (zone_start_pfn == start_pfn) { 404815121d2SYasuaki Ishimatsu /* 405815121d2SYasuaki Ishimatsu * If the section is smallest section in the zone, it need 406815121d2SYasuaki Ishimatsu * shrink zone->zone_start_pfn and zone->zone_spanned_pages. 407815121d2SYasuaki Ishimatsu * In this case, we find second smallest valid mem_section 408815121d2SYasuaki Ishimatsu * for shrinking zone. 409815121d2SYasuaki Ishimatsu */ 410815121d2SYasuaki Ishimatsu pfn = find_smallest_section_pfn(nid, zone, end_pfn, 411815121d2SYasuaki Ishimatsu zone_end_pfn); 412815121d2SYasuaki Ishimatsu if (pfn) { 413815121d2SYasuaki Ishimatsu zone->zone_start_pfn = pfn; 414815121d2SYasuaki Ishimatsu zone->spanned_pages = zone_end_pfn - pfn; 415815121d2SYasuaki Ishimatsu } 416815121d2SYasuaki Ishimatsu } else if (zone_end_pfn == end_pfn) { 417815121d2SYasuaki Ishimatsu /* 418815121d2SYasuaki Ishimatsu * If the section is biggest section in the zone, it need 419815121d2SYasuaki Ishimatsu * shrink zone->spanned_pages. 420815121d2SYasuaki Ishimatsu * In this case, we find second biggest valid mem_section for 421815121d2SYasuaki Ishimatsu * shrinking zone. 422815121d2SYasuaki Ishimatsu */ 423815121d2SYasuaki Ishimatsu pfn = find_biggest_section_pfn(nid, zone, zone_start_pfn, 424815121d2SYasuaki Ishimatsu start_pfn); 425815121d2SYasuaki Ishimatsu if (pfn) 426815121d2SYasuaki Ishimatsu zone->spanned_pages = pfn - zone_start_pfn + 1; 427815121d2SYasuaki Ishimatsu } 428815121d2SYasuaki Ishimatsu 429815121d2SYasuaki Ishimatsu /* 430815121d2SYasuaki Ishimatsu * The section is not biggest or smallest mem_section in the zone, it 431815121d2SYasuaki Ishimatsu * only creates a hole in the zone. So in this case, we need not 432815121d2SYasuaki Ishimatsu * change the zone. But perhaps, the zone has only hole data. Thus 433815121d2SYasuaki Ishimatsu * it check the zone has only hole or not. 434815121d2SYasuaki Ishimatsu */ 435815121d2SYasuaki Ishimatsu pfn = zone_start_pfn; 436815121d2SYasuaki Ishimatsu for (; pfn < zone_end_pfn; pfn += PAGES_PER_SECTION) { 437815121d2SYasuaki Ishimatsu ms = __pfn_to_section(pfn); 438815121d2SYasuaki Ishimatsu 439815121d2SYasuaki Ishimatsu if (unlikely(!valid_section(ms))) 440815121d2SYasuaki Ishimatsu continue; 441815121d2SYasuaki Ishimatsu 442815121d2SYasuaki Ishimatsu if (page_zone(pfn_to_page(pfn)) != zone) 443815121d2SYasuaki Ishimatsu continue; 444815121d2SYasuaki Ishimatsu 445815121d2SYasuaki Ishimatsu /* If the section is current section, it continues the loop */ 446815121d2SYasuaki Ishimatsu if (start_pfn == pfn) 447815121d2SYasuaki Ishimatsu continue; 448815121d2SYasuaki Ishimatsu 449815121d2SYasuaki Ishimatsu /* If we find valid section, we have nothing to do */ 450815121d2SYasuaki Ishimatsu zone_span_writeunlock(zone); 451815121d2SYasuaki Ishimatsu return; 452815121d2SYasuaki Ishimatsu } 453815121d2SYasuaki Ishimatsu 454815121d2SYasuaki Ishimatsu /* The zone has no valid section */ 455815121d2SYasuaki Ishimatsu zone->zone_start_pfn = 0; 456815121d2SYasuaki Ishimatsu zone->spanned_pages = 0; 457815121d2SYasuaki Ishimatsu zone_span_writeunlock(zone); 458815121d2SYasuaki Ishimatsu } 459815121d2SYasuaki Ishimatsu 460815121d2SYasuaki Ishimatsu static void shrink_pgdat_span(struct pglist_data *pgdat, 461815121d2SYasuaki Ishimatsu unsigned long start_pfn, unsigned long end_pfn) 462815121d2SYasuaki Ishimatsu { 463815121d2SYasuaki Ishimatsu unsigned long pgdat_start_pfn = pgdat->node_start_pfn; 46483285c72SXishi Qiu unsigned long p = pgdat_end_pfn(pgdat); /* pgdat_end_pfn namespace clash */ 46583285c72SXishi Qiu unsigned long pgdat_end_pfn = p; 466815121d2SYasuaki Ishimatsu unsigned long pfn; 467815121d2SYasuaki Ishimatsu struct mem_section *ms; 468815121d2SYasuaki Ishimatsu int nid = pgdat->node_id; 469815121d2SYasuaki Ishimatsu 470815121d2SYasuaki Ishimatsu if (pgdat_start_pfn == start_pfn) { 471815121d2SYasuaki Ishimatsu /* 472815121d2SYasuaki Ishimatsu * If the section is smallest section in the pgdat, it need 473815121d2SYasuaki Ishimatsu * shrink pgdat->node_start_pfn and pgdat->node_spanned_pages. 474815121d2SYasuaki Ishimatsu * In this case, we find second smallest valid mem_section 475815121d2SYasuaki Ishimatsu * for shrinking zone. 476815121d2SYasuaki Ishimatsu */ 477815121d2SYasuaki Ishimatsu pfn = find_smallest_section_pfn(nid, NULL, end_pfn, 478815121d2SYasuaki Ishimatsu pgdat_end_pfn); 479815121d2SYasuaki Ishimatsu if (pfn) { 480815121d2SYasuaki Ishimatsu pgdat->node_start_pfn = pfn; 481815121d2SYasuaki Ishimatsu pgdat->node_spanned_pages = pgdat_end_pfn - pfn; 482815121d2SYasuaki Ishimatsu } 483815121d2SYasuaki Ishimatsu } else if (pgdat_end_pfn == end_pfn) { 484815121d2SYasuaki Ishimatsu /* 485815121d2SYasuaki Ishimatsu * If the section is biggest section in the pgdat, it need 486815121d2SYasuaki Ishimatsu * shrink pgdat->node_spanned_pages. 487815121d2SYasuaki Ishimatsu * In this case, we find second biggest valid mem_section for 488815121d2SYasuaki Ishimatsu * shrinking zone. 489815121d2SYasuaki Ishimatsu */ 490815121d2SYasuaki Ishimatsu pfn = find_biggest_section_pfn(nid, NULL, pgdat_start_pfn, 491815121d2SYasuaki Ishimatsu start_pfn); 492815121d2SYasuaki Ishimatsu if (pfn) 493815121d2SYasuaki Ishimatsu pgdat->node_spanned_pages = pfn - pgdat_start_pfn + 1; 494815121d2SYasuaki Ishimatsu } 495815121d2SYasuaki Ishimatsu 496815121d2SYasuaki Ishimatsu /* 497815121d2SYasuaki Ishimatsu * If the section is not biggest or smallest mem_section in the pgdat, 498815121d2SYasuaki Ishimatsu * it only creates a hole in the pgdat. So in this case, we need not 499815121d2SYasuaki Ishimatsu * change the pgdat. 500815121d2SYasuaki Ishimatsu * But perhaps, the pgdat has only hole data. Thus it check the pgdat 501815121d2SYasuaki Ishimatsu * has only hole or not. 502815121d2SYasuaki Ishimatsu */ 503815121d2SYasuaki Ishimatsu pfn = pgdat_start_pfn; 504815121d2SYasuaki Ishimatsu for (; pfn < pgdat_end_pfn; pfn += PAGES_PER_SECTION) { 505815121d2SYasuaki Ishimatsu ms = __pfn_to_section(pfn); 506815121d2SYasuaki Ishimatsu 507815121d2SYasuaki Ishimatsu if (unlikely(!valid_section(ms))) 508815121d2SYasuaki Ishimatsu continue; 509815121d2SYasuaki Ishimatsu 510815121d2SYasuaki Ishimatsu if (pfn_to_nid(pfn) != nid) 511815121d2SYasuaki Ishimatsu continue; 512815121d2SYasuaki Ishimatsu 513815121d2SYasuaki Ishimatsu /* If the section is current section, it continues the loop */ 514815121d2SYasuaki Ishimatsu if (start_pfn == pfn) 515815121d2SYasuaki Ishimatsu continue; 516815121d2SYasuaki Ishimatsu 517815121d2SYasuaki Ishimatsu /* If we find valid section, we have nothing to do */ 518815121d2SYasuaki Ishimatsu return; 519815121d2SYasuaki Ishimatsu } 520815121d2SYasuaki Ishimatsu 521815121d2SYasuaki Ishimatsu /* The pgdat has no valid section */ 522815121d2SYasuaki Ishimatsu pgdat->node_start_pfn = 0; 523815121d2SYasuaki Ishimatsu pgdat->node_spanned_pages = 0; 524815121d2SYasuaki Ishimatsu } 525815121d2SYasuaki Ishimatsu 526815121d2SYasuaki Ishimatsu static void __remove_zone(struct zone *zone, unsigned long start_pfn) 527815121d2SYasuaki Ishimatsu { 528815121d2SYasuaki Ishimatsu struct pglist_data *pgdat = zone->zone_pgdat; 529815121d2SYasuaki Ishimatsu int nr_pages = PAGES_PER_SECTION; 530815121d2SYasuaki Ishimatsu unsigned long flags; 531815121d2SYasuaki Ishimatsu 532815121d2SYasuaki Ishimatsu pgdat_resize_lock(zone->zone_pgdat, &flags); 533815121d2SYasuaki Ishimatsu shrink_zone_span(zone, start_pfn, start_pfn + nr_pages); 534815121d2SYasuaki Ishimatsu shrink_pgdat_span(pgdat, start_pfn, start_pfn + nr_pages); 535815121d2SYasuaki Ishimatsu pgdat_resize_unlock(zone->zone_pgdat, &flags); 536815121d2SYasuaki Ishimatsu } 537815121d2SYasuaki Ishimatsu 5384b94ffdcSDan Williams static int __remove_section(struct zone *zone, struct mem_section *ms, 5394b94ffdcSDan Williams unsigned long map_offset) 540ea01ea93SBadari Pulavarty { 541815121d2SYasuaki Ishimatsu unsigned long start_pfn; 542815121d2SYasuaki Ishimatsu int scn_nr; 543ea01ea93SBadari Pulavarty int ret = -EINVAL; 544ea01ea93SBadari Pulavarty 545ea01ea93SBadari Pulavarty if (!valid_section(ms)) 546ea01ea93SBadari Pulavarty return ret; 547ea01ea93SBadari Pulavarty 548ea01ea93SBadari Pulavarty ret = unregister_memory_section(ms); 549ea01ea93SBadari Pulavarty if (ret) 550ea01ea93SBadari Pulavarty return ret; 551ea01ea93SBadari Pulavarty 552815121d2SYasuaki Ishimatsu scn_nr = __section_nr(ms); 5531dd2bfc8SYASUAKI ISHIMATSU start_pfn = section_nr_to_pfn((unsigned long)scn_nr); 554815121d2SYasuaki Ishimatsu __remove_zone(zone, start_pfn); 555815121d2SYasuaki Ishimatsu 5564b94ffdcSDan Williams sparse_remove_one_section(zone, ms, map_offset); 557ea01ea93SBadari Pulavarty return 0; 558ea01ea93SBadari Pulavarty } 559ea01ea93SBadari Pulavarty 560ea01ea93SBadari Pulavarty /** 561ea01ea93SBadari Pulavarty * __remove_pages() - remove sections of pages from a zone 562ea01ea93SBadari Pulavarty * @zone: zone from which pages need to be removed 563ea01ea93SBadari Pulavarty * @phys_start_pfn: starting pageframe (must be aligned to start of a section) 564ea01ea93SBadari Pulavarty * @nr_pages: number of pages to remove (must be multiple of section size) 565ea01ea93SBadari Pulavarty * 566ea01ea93SBadari Pulavarty * Generic helper function to remove section mappings and sysfs entries 567ea01ea93SBadari Pulavarty * for the section of the memory we are removing. Caller needs to make 568ea01ea93SBadari Pulavarty * sure that pages are marked reserved and zones are adjust properly by 569ea01ea93SBadari Pulavarty * calling offline_pages(). 570ea01ea93SBadari Pulavarty */ 571ea01ea93SBadari Pulavarty int __remove_pages(struct zone *zone, unsigned long phys_start_pfn, 572*da024512SChristoph Hellwig unsigned long nr_pages, struct vmem_altmap *altmap) 573ea01ea93SBadari Pulavarty { 574fe74ebb1SToshi Kani unsigned long i; 5754b94ffdcSDan Williams unsigned long map_offset = 0; 5764b94ffdcSDan Williams int sections_to_remove, ret = 0; 5774b94ffdcSDan Williams 5784b94ffdcSDan Williams /* In the ZONE_DEVICE case device driver owns the memory region */ 5794b94ffdcSDan Williams if (is_dev_zone(zone)) { 5804b94ffdcSDan Williams if (altmap) 5814b94ffdcSDan Williams map_offset = vmem_altmap_offset(altmap); 5824b94ffdcSDan Williams } else { 583fe74ebb1SToshi Kani resource_size_t start, size; 5844b94ffdcSDan Williams 5854b94ffdcSDan Williams start = phys_start_pfn << PAGE_SHIFT; 5864b94ffdcSDan Williams size = nr_pages * PAGE_SIZE; 5874b94ffdcSDan Williams 5884b94ffdcSDan Williams ret = release_mem_region_adjustable(&iomem_resource, start, 5894b94ffdcSDan Williams size); 5904b94ffdcSDan Williams if (ret) { 5914b94ffdcSDan Williams resource_size_t endres = start + size - 1; 5924b94ffdcSDan Williams 5934b94ffdcSDan Williams pr_warn("Unable to release resource <%pa-%pa> (%d)\n", 5944b94ffdcSDan Williams &start, &endres, ret); 5954b94ffdcSDan Williams } 5964b94ffdcSDan Williams } 597ea01ea93SBadari Pulavarty 5987cf91a98SJoonsoo Kim clear_zone_contiguous(zone); 5997cf91a98SJoonsoo Kim 600ea01ea93SBadari Pulavarty /* 601ea01ea93SBadari Pulavarty * We can only remove entire sections 602ea01ea93SBadari Pulavarty */ 603ea01ea93SBadari Pulavarty BUG_ON(phys_start_pfn & ~PAGE_SECTION_MASK); 604ea01ea93SBadari Pulavarty BUG_ON(nr_pages % PAGES_PER_SECTION); 605ea01ea93SBadari Pulavarty 606ea01ea93SBadari Pulavarty sections_to_remove = nr_pages / PAGES_PER_SECTION; 607ea01ea93SBadari Pulavarty for (i = 0; i < sections_to_remove; i++) { 608ea01ea93SBadari Pulavarty unsigned long pfn = phys_start_pfn + i*PAGES_PER_SECTION; 6094b94ffdcSDan Williams 6104b94ffdcSDan Williams ret = __remove_section(zone, __pfn_to_section(pfn), map_offset); 6114b94ffdcSDan Williams map_offset = 0; 612ea01ea93SBadari Pulavarty if (ret) 613ea01ea93SBadari Pulavarty break; 614ea01ea93SBadari Pulavarty } 6157cf91a98SJoonsoo Kim 6167cf91a98SJoonsoo Kim set_zone_contiguous(zone); 6177cf91a98SJoonsoo Kim 618ea01ea93SBadari Pulavarty return ret; 619ea01ea93SBadari Pulavarty } 6204edd7cefSDavid Rientjes #endif /* CONFIG_MEMORY_HOTREMOVE */ 621ea01ea93SBadari Pulavarty 6229d0ad8caSDaniel Kiper int set_online_page_callback(online_page_callback_t callback) 6239d0ad8caSDaniel Kiper { 6249d0ad8caSDaniel Kiper int rc = -EINVAL; 6259d0ad8caSDaniel Kiper 626bfc8c901SVladimir Davydov get_online_mems(); 627bfc8c901SVladimir Davydov mutex_lock(&online_page_callback_lock); 6289d0ad8caSDaniel Kiper 6299d0ad8caSDaniel Kiper if (online_page_callback == generic_online_page) { 6309d0ad8caSDaniel Kiper online_page_callback = callback; 6319d0ad8caSDaniel Kiper rc = 0; 6329d0ad8caSDaniel Kiper } 6339d0ad8caSDaniel Kiper 634bfc8c901SVladimir Davydov mutex_unlock(&online_page_callback_lock); 635bfc8c901SVladimir Davydov put_online_mems(); 6369d0ad8caSDaniel Kiper 6379d0ad8caSDaniel Kiper return rc; 6389d0ad8caSDaniel Kiper } 6399d0ad8caSDaniel Kiper EXPORT_SYMBOL_GPL(set_online_page_callback); 6409d0ad8caSDaniel Kiper 6419d0ad8caSDaniel Kiper int restore_online_page_callback(online_page_callback_t callback) 6429d0ad8caSDaniel Kiper { 6439d0ad8caSDaniel Kiper int rc = -EINVAL; 6449d0ad8caSDaniel Kiper 645bfc8c901SVladimir Davydov get_online_mems(); 646bfc8c901SVladimir Davydov mutex_lock(&online_page_callback_lock); 6479d0ad8caSDaniel Kiper 6489d0ad8caSDaniel Kiper if (online_page_callback == callback) { 6499d0ad8caSDaniel Kiper online_page_callback = generic_online_page; 6509d0ad8caSDaniel Kiper rc = 0; 6519d0ad8caSDaniel Kiper } 6529d0ad8caSDaniel Kiper 653bfc8c901SVladimir Davydov mutex_unlock(&online_page_callback_lock); 654bfc8c901SVladimir Davydov put_online_mems(); 6559d0ad8caSDaniel Kiper 6569d0ad8caSDaniel Kiper return rc; 6579d0ad8caSDaniel Kiper } 6589d0ad8caSDaniel Kiper EXPORT_SYMBOL_GPL(restore_online_page_callback); 6599d0ad8caSDaniel Kiper 6609d0ad8caSDaniel Kiper void __online_page_set_limits(struct page *page) 661180c06efSJeremy Fitzhardinge { 6629d0ad8caSDaniel Kiper } 6639d0ad8caSDaniel Kiper EXPORT_SYMBOL_GPL(__online_page_set_limits); 6649d0ad8caSDaniel Kiper 6659d0ad8caSDaniel Kiper void __online_page_increment_counters(struct page *page) 6669d0ad8caSDaniel Kiper { 6673dcc0571SJiang Liu adjust_managed_page_count(page, 1); 6689d0ad8caSDaniel Kiper } 6699d0ad8caSDaniel Kiper EXPORT_SYMBOL_GPL(__online_page_increment_counters); 670180c06efSJeremy Fitzhardinge 6719d0ad8caSDaniel Kiper void __online_page_free(struct page *page) 6729d0ad8caSDaniel Kiper { 6733dcc0571SJiang Liu __free_reserved_page(page); 674180c06efSJeremy Fitzhardinge } 6759d0ad8caSDaniel Kiper EXPORT_SYMBOL_GPL(__online_page_free); 6769d0ad8caSDaniel Kiper 6779d0ad8caSDaniel Kiper static void generic_online_page(struct page *page) 6789d0ad8caSDaniel Kiper { 6799d0ad8caSDaniel Kiper __online_page_set_limits(page); 6809d0ad8caSDaniel Kiper __online_page_increment_counters(page); 6819d0ad8caSDaniel Kiper __online_page_free(page); 6829d0ad8caSDaniel Kiper } 683180c06efSJeremy Fitzhardinge 68475884fb1SKAMEZAWA Hiroyuki static int online_pages_range(unsigned long start_pfn, unsigned long nr_pages, 68575884fb1SKAMEZAWA Hiroyuki void *arg) 6863947be19SDave Hansen { 6873947be19SDave Hansen unsigned long i; 68875884fb1SKAMEZAWA Hiroyuki unsigned long onlined_pages = *(unsigned long *)arg; 68975884fb1SKAMEZAWA Hiroyuki struct page *page; 6902d070eabSMichal Hocko 69175884fb1SKAMEZAWA Hiroyuki if (PageReserved(pfn_to_page(start_pfn))) 69275884fb1SKAMEZAWA Hiroyuki for (i = 0; i < nr_pages; i++) { 69375884fb1SKAMEZAWA Hiroyuki page = pfn_to_page(start_pfn + i); 6949d0ad8caSDaniel Kiper (*online_page_callback)(page); 69575884fb1SKAMEZAWA Hiroyuki onlined_pages++; 69675884fb1SKAMEZAWA Hiroyuki } 6972d070eabSMichal Hocko 6982d070eabSMichal Hocko online_mem_sections(start_pfn, start_pfn + nr_pages); 6992d070eabSMichal Hocko 70075884fb1SKAMEZAWA Hiroyuki *(unsigned long *)arg = onlined_pages; 70175884fb1SKAMEZAWA Hiroyuki return 0; 70275884fb1SKAMEZAWA Hiroyuki } 70375884fb1SKAMEZAWA Hiroyuki 704d9713679SLai Jiangshan /* check which state of node_states will be changed when online memory */ 705d9713679SLai Jiangshan static void node_states_check_changes_online(unsigned long nr_pages, 706d9713679SLai Jiangshan struct zone *zone, struct memory_notify *arg) 707d9713679SLai Jiangshan { 708d9713679SLai Jiangshan int nid = zone_to_nid(zone); 709d9713679SLai Jiangshan enum zone_type zone_last = ZONE_NORMAL; 710d9713679SLai Jiangshan 711d9713679SLai Jiangshan /* 7126715ddf9SLai Jiangshan * If we have HIGHMEM or movable node, node_states[N_NORMAL_MEMORY] 7136715ddf9SLai Jiangshan * contains nodes which have zones of 0...ZONE_NORMAL, 7146715ddf9SLai Jiangshan * set zone_last to ZONE_NORMAL. 715d9713679SLai Jiangshan * 7166715ddf9SLai Jiangshan * If we don't have HIGHMEM nor movable node, 7176715ddf9SLai Jiangshan * node_states[N_NORMAL_MEMORY] contains nodes which have zones of 7186715ddf9SLai Jiangshan * 0...ZONE_MOVABLE, set zone_last to ZONE_MOVABLE. 719d9713679SLai Jiangshan */ 7206715ddf9SLai Jiangshan if (N_MEMORY == N_NORMAL_MEMORY) 721d9713679SLai Jiangshan zone_last = ZONE_MOVABLE; 722d9713679SLai Jiangshan 723d9713679SLai Jiangshan /* 724d9713679SLai Jiangshan * if the memory to be online is in a zone of 0...zone_last, and 725d9713679SLai Jiangshan * the zones of 0...zone_last don't have memory before online, we will 726d9713679SLai Jiangshan * need to set the node to node_states[N_NORMAL_MEMORY] after 727d9713679SLai Jiangshan * the memory is online. 728d9713679SLai Jiangshan */ 729d9713679SLai Jiangshan if (zone_idx(zone) <= zone_last && !node_state(nid, N_NORMAL_MEMORY)) 730d9713679SLai Jiangshan arg->status_change_nid_normal = nid; 731d9713679SLai Jiangshan else 732d9713679SLai Jiangshan arg->status_change_nid_normal = -1; 733d9713679SLai Jiangshan 7346715ddf9SLai Jiangshan #ifdef CONFIG_HIGHMEM 7356715ddf9SLai Jiangshan /* 7366715ddf9SLai Jiangshan * If we have movable node, node_states[N_HIGH_MEMORY] 7376715ddf9SLai Jiangshan * contains nodes which have zones of 0...ZONE_HIGHMEM, 7386715ddf9SLai Jiangshan * set zone_last to ZONE_HIGHMEM. 7396715ddf9SLai Jiangshan * 7406715ddf9SLai Jiangshan * If we don't have movable node, node_states[N_NORMAL_MEMORY] 7416715ddf9SLai Jiangshan * contains nodes which have zones of 0...ZONE_MOVABLE, 7426715ddf9SLai Jiangshan * set zone_last to ZONE_MOVABLE. 7436715ddf9SLai Jiangshan */ 7446715ddf9SLai Jiangshan zone_last = ZONE_HIGHMEM; 7456715ddf9SLai Jiangshan if (N_MEMORY == N_HIGH_MEMORY) 7466715ddf9SLai Jiangshan zone_last = ZONE_MOVABLE; 7476715ddf9SLai Jiangshan 7486715ddf9SLai Jiangshan if (zone_idx(zone) <= zone_last && !node_state(nid, N_HIGH_MEMORY)) 7496715ddf9SLai Jiangshan arg->status_change_nid_high = nid; 7506715ddf9SLai Jiangshan else 7516715ddf9SLai Jiangshan arg->status_change_nid_high = -1; 7526715ddf9SLai Jiangshan #else 7536715ddf9SLai Jiangshan arg->status_change_nid_high = arg->status_change_nid_normal; 7546715ddf9SLai Jiangshan #endif 7556715ddf9SLai Jiangshan 756d9713679SLai Jiangshan /* 757d9713679SLai Jiangshan * if the node don't have memory befor online, we will need to 7586715ddf9SLai Jiangshan * set the node to node_states[N_MEMORY] after the memory 759d9713679SLai Jiangshan * is online. 760d9713679SLai Jiangshan */ 7616715ddf9SLai Jiangshan if (!node_state(nid, N_MEMORY)) 762d9713679SLai Jiangshan arg->status_change_nid = nid; 763d9713679SLai Jiangshan else 764d9713679SLai Jiangshan arg->status_change_nid = -1; 765d9713679SLai Jiangshan } 766d9713679SLai Jiangshan 767d9713679SLai Jiangshan static void node_states_set_node(int node, struct memory_notify *arg) 768d9713679SLai Jiangshan { 769d9713679SLai Jiangshan if (arg->status_change_nid_normal >= 0) 770d9713679SLai Jiangshan node_set_state(node, N_NORMAL_MEMORY); 771d9713679SLai Jiangshan 7726715ddf9SLai Jiangshan if (arg->status_change_nid_high >= 0) 773d9713679SLai Jiangshan node_set_state(node, N_HIGH_MEMORY); 7746715ddf9SLai Jiangshan 7756715ddf9SLai Jiangshan node_set_state(node, N_MEMORY); 776d9713679SLai Jiangshan } 777d9713679SLai Jiangshan 778f1dd2cd1SMichal Hocko static void __meminit resize_zone_range(struct zone *zone, unsigned long start_pfn, 779f1dd2cd1SMichal Hocko unsigned long nr_pages) 780f1dd2cd1SMichal Hocko { 781f1dd2cd1SMichal Hocko unsigned long old_end_pfn = zone_end_pfn(zone); 782f1dd2cd1SMichal Hocko 783f1dd2cd1SMichal Hocko if (zone_is_empty(zone) || start_pfn < zone->zone_start_pfn) 784f1dd2cd1SMichal Hocko zone->zone_start_pfn = start_pfn; 785f1dd2cd1SMichal Hocko 786f1dd2cd1SMichal Hocko zone->spanned_pages = max(start_pfn + nr_pages, old_end_pfn) - zone->zone_start_pfn; 787f1dd2cd1SMichal Hocko } 788f1dd2cd1SMichal Hocko 789f1dd2cd1SMichal Hocko static void __meminit resize_pgdat_range(struct pglist_data *pgdat, unsigned long start_pfn, 790f1dd2cd1SMichal Hocko unsigned long nr_pages) 791f1dd2cd1SMichal Hocko { 792f1dd2cd1SMichal Hocko unsigned long old_end_pfn = pgdat_end_pfn(pgdat); 793f1dd2cd1SMichal Hocko 794f1dd2cd1SMichal Hocko if (!pgdat->node_spanned_pages || start_pfn < pgdat->node_start_pfn) 795f1dd2cd1SMichal Hocko pgdat->node_start_pfn = start_pfn; 796f1dd2cd1SMichal Hocko 797f1dd2cd1SMichal Hocko pgdat->node_spanned_pages = max(start_pfn + nr_pages, old_end_pfn) - pgdat->node_start_pfn; 798f1dd2cd1SMichal Hocko } 799f1dd2cd1SMichal Hocko 800cdf72f25SMichal Hocko void __ref move_pfn_range_to_zone(struct zone *zone, 801f1dd2cd1SMichal Hocko unsigned long start_pfn, unsigned long nr_pages) 802f1dd2cd1SMichal Hocko { 803f1dd2cd1SMichal Hocko struct pglist_data *pgdat = zone->zone_pgdat; 804f1dd2cd1SMichal Hocko int nid = pgdat->node_id; 805f1dd2cd1SMichal Hocko unsigned long flags; 806f1dd2cd1SMichal Hocko 807f1dd2cd1SMichal Hocko if (zone_is_empty(zone)) 808f1dd2cd1SMichal Hocko init_currently_empty_zone(zone, start_pfn, nr_pages); 809f1dd2cd1SMichal Hocko 810f1dd2cd1SMichal Hocko clear_zone_contiguous(zone); 811f1dd2cd1SMichal Hocko 812f1dd2cd1SMichal Hocko /* TODO Huh pgdat is irqsave while zone is not. It used to be like that before */ 813f1dd2cd1SMichal Hocko pgdat_resize_lock(pgdat, &flags); 814f1dd2cd1SMichal Hocko zone_span_writelock(zone); 815f1dd2cd1SMichal Hocko resize_zone_range(zone, start_pfn, nr_pages); 816f1dd2cd1SMichal Hocko zone_span_writeunlock(zone); 817f1dd2cd1SMichal Hocko resize_pgdat_range(pgdat, start_pfn, nr_pages); 818f1dd2cd1SMichal Hocko pgdat_resize_unlock(pgdat, &flags); 819f1dd2cd1SMichal Hocko 820f1dd2cd1SMichal Hocko /* 821f1dd2cd1SMichal Hocko * TODO now we have a visible range of pages which are not associated 822f1dd2cd1SMichal Hocko * with their zone properly. Not nice but set_pfnblock_flags_mask 823f1dd2cd1SMichal Hocko * expects the zone spans the pfn range. All the pages in the range 824f1dd2cd1SMichal Hocko * are reserved so nobody should be touching them so we should be safe 825f1dd2cd1SMichal Hocko */ 826f1dd2cd1SMichal Hocko memmap_init_zone(nr_pages, nid, zone_idx(zone), start_pfn, MEMMAP_HOTPLUG); 827f1dd2cd1SMichal Hocko 828f1dd2cd1SMichal Hocko set_zone_contiguous(zone); 829f1dd2cd1SMichal Hocko } 830f1dd2cd1SMichal Hocko 831f1dd2cd1SMichal Hocko /* 832c246a213SMichal Hocko * Returns a default kernel memory zone for the given pfn range. 833c246a213SMichal Hocko * If no kernel zone covers this pfn range it will automatically go 834c246a213SMichal Hocko * to the ZONE_NORMAL. 835c246a213SMichal Hocko */ 836c6f03e29SMichal Hocko static struct zone *default_kernel_zone_for_pfn(int nid, unsigned long start_pfn, 837c246a213SMichal Hocko unsigned long nr_pages) 838c246a213SMichal Hocko { 839c246a213SMichal Hocko struct pglist_data *pgdat = NODE_DATA(nid); 840c246a213SMichal Hocko int zid; 841c246a213SMichal Hocko 842c246a213SMichal Hocko for (zid = 0; zid <= ZONE_NORMAL; zid++) { 843c246a213SMichal Hocko struct zone *zone = &pgdat->node_zones[zid]; 844c246a213SMichal Hocko 845c246a213SMichal Hocko if (zone_intersects(zone, start_pfn, nr_pages)) 846c246a213SMichal Hocko return zone; 847c246a213SMichal Hocko } 848c246a213SMichal Hocko 849c246a213SMichal Hocko return &pgdat->node_zones[ZONE_NORMAL]; 850c246a213SMichal Hocko } 851c246a213SMichal Hocko 852c6f03e29SMichal Hocko static inline struct zone *default_zone_for_pfn(int nid, unsigned long start_pfn, 853c6f03e29SMichal Hocko unsigned long nr_pages) 854e5e68930SMichal Hocko { 855c6f03e29SMichal Hocko struct zone *kernel_zone = default_kernel_zone_for_pfn(nid, start_pfn, 856c6f03e29SMichal Hocko nr_pages); 857c6f03e29SMichal Hocko struct zone *movable_zone = &NODE_DATA(nid)->node_zones[ZONE_MOVABLE]; 858c6f03e29SMichal Hocko bool in_kernel = zone_intersects(kernel_zone, start_pfn, nr_pages); 859c6f03e29SMichal Hocko bool in_movable = zone_intersects(movable_zone, start_pfn, nr_pages); 860e5e68930SMichal Hocko 861e5e68930SMichal Hocko /* 862c6f03e29SMichal Hocko * We inherit the existing zone in a simple case where zones do not 863c6f03e29SMichal Hocko * overlap in the given range 864e5e68930SMichal Hocko */ 865c6f03e29SMichal Hocko if (in_kernel ^ in_movable) 866c6f03e29SMichal Hocko return (in_kernel) ? kernel_zone : movable_zone; 867e5e68930SMichal Hocko 868c6f03e29SMichal Hocko /* 869c6f03e29SMichal Hocko * If the range doesn't belong to any zone or two zones overlap in the 870c6f03e29SMichal Hocko * given range then we use movable zone only if movable_node is 871c6f03e29SMichal Hocko * enabled because we always online to a kernel zone by default. 872c6f03e29SMichal Hocko */ 873c6f03e29SMichal Hocko return movable_node_enabled ? movable_zone : kernel_zone; 8749f123ab5SMichal Hocko } 8759f123ab5SMichal Hocko 876e5e68930SMichal Hocko struct zone * zone_for_pfn_range(int online_type, int nid, unsigned start_pfn, 877e5e68930SMichal Hocko unsigned long nr_pages) 878f1dd2cd1SMichal Hocko { 879c6f03e29SMichal Hocko if (online_type == MMOP_ONLINE_KERNEL) 880c6f03e29SMichal Hocko return default_kernel_zone_for_pfn(nid, start_pfn, nr_pages); 881f1dd2cd1SMichal Hocko 882c6f03e29SMichal Hocko if (online_type == MMOP_ONLINE_MOVABLE) 883c6f03e29SMichal Hocko return &NODE_DATA(nid)->node_zones[ZONE_MOVABLE]; 884f1dd2cd1SMichal Hocko 885c6f03e29SMichal Hocko return default_zone_for_pfn(nid, start_pfn, nr_pages); 886e5e68930SMichal Hocko } 887e5e68930SMichal Hocko 888e5e68930SMichal Hocko /* 889e5e68930SMichal Hocko * Associates the given pfn range with the given node and the zone appropriate 890e5e68930SMichal Hocko * for the given online type. 891e5e68930SMichal Hocko */ 892e5e68930SMichal Hocko static struct zone * __meminit move_pfn_range(int online_type, int nid, 893e5e68930SMichal Hocko unsigned long start_pfn, unsigned long nr_pages) 894e5e68930SMichal Hocko { 895e5e68930SMichal Hocko struct zone *zone; 896e5e68930SMichal Hocko 897e5e68930SMichal Hocko zone = zone_for_pfn_range(online_type, nid, start_pfn, nr_pages); 898f1dd2cd1SMichal Hocko move_pfn_range_to_zone(zone, start_pfn, nr_pages); 899f1dd2cd1SMichal Hocko return zone; 900df429ac0SReza Arbab } 90175884fb1SKAMEZAWA Hiroyuki 902b93e0f32SMichal Hocko /* Must be protected by mem_hotplug_begin() or a device_lock */ 903511c2abaSLai Jiangshan int __ref online_pages(unsigned long pfn, unsigned long nr_pages, int online_type) 90475884fb1SKAMEZAWA Hiroyuki { 905aa47228aSCody P Schafer unsigned long flags; 9063947be19SDave Hansen unsigned long onlined_pages = 0; 9073947be19SDave Hansen struct zone *zone; 9086811378eSYasunori Goto int need_zonelists_rebuild = 0; 9097b78d335SYasunori Goto int nid; 9107b78d335SYasunori Goto int ret; 9117b78d335SYasunori Goto struct memory_notify arg; 9123947be19SDave Hansen 913f1dd2cd1SMichal Hocko nid = pfn_to_nid(pfn); 914f1dd2cd1SMichal Hocko /* associate pfn range with the zone */ 915f1dd2cd1SMichal Hocko zone = move_pfn_range(online_type, nid, pfn, nr_pages); 916511c2abaSLai Jiangshan 9177b78d335SYasunori Goto arg.start_pfn = pfn; 9187b78d335SYasunori Goto arg.nr_pages = nr_pages; 919d9713679SLai Jiangshan node_states_check_changes_online(nr_pages, zone, &arg); 9207b78d335SYasunori Goto 9217b78d335SYasunori Goto ret = memory_notify(MEM_GOING_ONLINE, &arg); 9227b78d335SYasunori Goto ret = notifier_to_errno(ret); 923e33e33b4SChen Yucong if (ret) 924e33e33b4SChen Yucong goto failed_addition; 925e33e33b4SChen Yucong 9263947be19SDave Hansen /* 9276811378eSYasunori Goto * If this zone is not populated, then it is not in zonelist. 9286811378eSYasunori Goto * This means the page allocator ignores this zone. 9296811378eSYasunori Goto * So, zonelist must be updated after online. 9306811378eSYasunori Goto */ 9316dcd73d7SWen Congyang if (!populated_zone(zone)) { 9326811378eSYasunori Goto need_zonelists_rebuild = 1; 93372675e13SMichal Hocko setup_zone_pageset(zone); 9346dcd73d7SWen Congyang } 9356811378eSYasunori Goto 936908eedc6SKAMEZAWA Hiroyuki ret = walk_system_ram_range(pfn, nr_pages, &onlined_pages, 93775884fb1SKAMEZAWA Hiroyuki online_pages_range); 938fd8a4221SGeoff Levand if (ret) { 9396dcd73d7SWen Congyang if (need_zonelists_rebuild) 9406dcd73d7SWen Congyang zone_pcp_reset(zone); 941e33e33b4SChen Yucong goto failed_addition; 942fd8a4221SGeoff Levand } 943fd8a4221SGeoff Levand 9443947be19SDave Hansen zone->present_pages += onlined_pages; 945aa47228aSCody P Schafer 946aa47228aSCody P Schafer pgdat_resize_lock(zone->zone_pgdat, &flags); 947f2937be5SYasunori Goto zone->zone_pgdat->node_present_pages += onlined_pages; 948aa47228aSCody P Schafer pgdat_resize_unlock(zone->zone_pgdat, &flags); 949aa47228aSCody P Schafer 95008dff7b7SJiang Liu if (onlined_pages) { 951e888ca35SVlastimil Babka node_states_set_node(nid, &arg); 9521f522509SHaicheng Li if (need_zonelists_rebuild) 95372675e13SMichal Hocko build_all_zonelists(NULL); 9541f522509SHaicheng Li else 955112067f0SShaohua Li zone_pcp_update(zone); 95608dff7b7SJiang Liu } 9571f522509SHaicheng Li 9581b79acc9SKOSAKI Motohiro init_per_zone_wmark_min(); 9591b79acc9SKOSAKI Motohiro 960698b1b30SVlastimil Babka if (onlined_pages) { 961e888ca35SVlastimil Babka kswapd_run(nid); 962698b1b30SVlastimil Babka kcompactd_run(nid); 963698b1b30SVlastimil Babka } 96461b13993SDave Hansen 9655a4d4361SKAMEZAWA Hiroyuki vm_total_pages = nr_free_pagecache_pages(); 9662f7f24ecSKent Liu 9672d1d43f6SChandra Seetharaman writeback_set_ratelimit(); 9687b78d335SYasunori Goto 9697b78d335SYasunori Goto if (onlined_pages) 9707b78d335SYasunori Goto memory_notify(MEM_ONLINE, &arg); 97130467e0bSDavid Rientjes return 0; 972e33e33b4SChen Yucong 973e33e33b4SChen Yucong failed_addition: 974e33e33b4SChen Yucong pr_debug("online_pages [mem %#010llx-%#010llx] failed\n", 975e33e33b4SChen Yucong (unsigned long long) pfn << PAGE_SHIFT, 976e33e33b4SChen Yucong (((unsigned long long) pfn + nr_pages) << PAGE_SHIFT) - 1); 977e33e33b4SChen Yucong memory_notify(MEM_CANCEL_ONLINE, &arg); 978e33e33b4SChen Yucong return ret; 9793947be19SDave Hansen } 98053947027SKeith Mannthey #endif /* CONFIG_MEMORY_HOTPLUG_SPARSE */ 981bc02af93SYasunori Goto 9820bd85420STang Chen static void reset_node_present_pages(pg_data_t *pgdat) 9830bd85420STang Chen { 9840bd85420STang Chen struct zone *z; 9850bd85420STang Chen 9860bd85420STang Chen for (z = pgdat->node_zones; z < pgdat->node_zones + MAX_NR_ZONES; z++) 9870bd85420STang Chen z->present_pages = 0; 9880bd85420STang Chen 9890bd85420STang Chen pgdat->node_present_pages = 0; 9900bd85420STang Chen } 9910bd85420STang Chen 992e1319331SHidetoshi Seto /* we are OK calling __meminit stuff here - we have CONFIG_MEMORY_HOTPLUG */ 993e1319331SHidetoshi Seto static pg_data_t __ref *hotadd_new_pgdat(int nid, u64 start) 9949af3c2deSYasunori Goto { 9959af3c2deSYasunori Goto struct pglist_data *pgdat; 9969af3c2deSYasunori Goto unsigned long zones_size[MAX_NR_ZONES] = {0}; 9979af3c2deSYasunori Goto unsigned long zholes_size[MAX_NR_ZONES] = {0}; 998c8e861a5SFabian Frederick unsigned long start_pfn = PFN_DOWN(start); 9999af3c2deSYasunori Goto 1000a1e565aaSTang Chen pgdat = NODE_DATA(nid); 1001a1e565aaSTang Chen if (!pgdat) { 10029af3c2deSYasunori Goto pgdat = arch_alloc_nodedata(nid); 10039af3c2deSYasunori Goto if (!pgdat) 10049af3c2deSYasunori Goto return NULL; 10059af3c2deSYasunori Goto 10069af3c2deSYasunori Goto arch_refresh_nodedata(nid, pgdat); 1007b0dc3a34SGu Zheng } else { 1008e716f2ebSMel Gorman /* 1009e716f2ebSMel Gorman * Reset the nr_zones, order and classzone_idx before reuse. 1010e716f2ebSMel Gorman * Note that kswapd will init kswapd_classzone_idx properly 1011e716f2ebSMel Gorman * when it starts in the near future. 1012e716f2ebSMel Gorman */ 1013b0dc3a34SGu Zheng pgdat->nr_zones = 0; 101438087d9bSMel Gorman pgdat->kswapd_order = 0; 101538087d9bSMel Gorman pgdat->kswapd_classzone_idx = 0; 1016a1e565aaSTang Chen } 10179af3c2deSYasunori Goto 10189af3c2deSYasunori Goto /* we can use NODE_DATA(nid) from here */ 10199af3c2deSYasunori Goto 10209af3c2deSYasunori Goto /* init node's zones as empty zones, we don't have any present pages.*/ 10219109fb7bSJohannes Weiner free_area_init_node(nid, zones_size, start_pfn, zholes_size); 10225830169fSReza Arbab pgdat->per_cpu_nodestats = alloc_percpu(struct per_cpu_nodestat); 10239af3c2deSYasunori Goto 1024959ecc48SKAMEZAWA Hiroyuki /* 1025959ecc48SKAMEZAWA Hiroyuki * The node we allocated has no zone fallback lists. For avoiding 1026959ecc48SKAMEZAWA Hiroyuki * to access not-initialized zonelist, build here. 1027959ecc48SKAMEZAWA Hiroyuki */ 102872675e13SMichal Hocko build_all_zonelists(pgdat); 1029959ecc48SKAMEZAWA Hiroyuki 1030f784a3f1STang Chen /* 1031f784a3f1STang Chen * zone->managed_pages is set to an approximate value in 1032f784a3f1STang Chen * free_area_init_core(), which will cause 1033f784a3f1STang Chen * /sys/device/system/node/nodeX/meminfo has wrong data. 1034f784a3f1STang Chen * So reset it to 0 before any memory is onlined. 1035f784a3f1STang Chen */ 1036f784a3f1STang Chen reset_node_managed_pages(pgdat); 1037f784a3f1STang Chen 10380bd85420STang Chen /* 10390bd85420STang Chen * When memory is hot-added, all the memory is in offline state. So 10400bd85420STang Chen * clear all zones' present_pages because they will be updated in 10410bd85420STang Chen * online_pages() and offline_pages(). 10420bd85420STang Chen */ 10430bd85420STang Chen reset_node_present_pages(pgdat); 10440bd85420STang Chen 10459af3c2deSYasunori Goto return pgdat; 10469af3c2deSYasunori Goto } 10479af3c2deSYasunori Goto 10489af3c2deSYasunori Goto static void rollback_node_hotadd(int nid, pg_data_t *pgdat) 10499af3c2deSYasunori Goto { 10509af3c2deSYasunori Goto arch_refresh_nodedata(nid, NULL); 10515830169fSReza Arbab free_percpu(pgdat->per_cpu_nodestats); 10529af3c2deSYasunori Goto arch_free_nodedata(pgdat); 10539af3c2deSYasunori Goto return; 10549af3c2deSYasunori Goto } 10559af3c2deSYasunori Goto 10560a547039SKAMEZAWA Hiroyuki 105701b0f197SToshi Kani /** 105801b0f197SToshi Kani * try_online_node - online a node if offlined 105901b0f197SToshi Kani * 1060cf23422bSminskey guo * called by cpu_up() to online a node without onlined memory. 1061cf23422bSminskey guo */ 106201b0f197SToshi Kani int try_online_node(int nid) 1063cf23422bSminskey guo { 1064cf23422bSminskey guo pg_data_t *pgdat; 1065cf23422bSminskey guo int ret; 1066cf23422bSminskey guo 106701b0f197SToshi Kani if (node_online(nid)) 106801b0f197SToshi Kani return 0; 106901b0f197SToshi Kani 1070bfc8c901SVladimir Davydov mem_hotplug_begin(); 1071cf23422bSminskey guo pgdat = hotadd_new_pgdat(nid, 0); 10727553e8f2SDavid Rientjes if (!pgdat) { 107301b0f197SToshi Kani pr_err("Cannot online node %d due to NULL pgdat\n", nid); 1074cf23422bSminskey guo ret = -ENOMEM; 1075cf23422bSminskey guo goto out; 1076cf23422bSminskey guo } 1077cf23422bSminskey guo node_set_online(nid); 1078cf23422bSminskey guo ret = register_one_node(nid); 1079cf23422bSminskey guo BUG_ON(ret); 1080cf23422bSminskey guo out: 1081bfc8c901SVladimir Davydov mem_hotplug_done(); 1082cf23422bSminskey guo return ret; 1083cf23422bSminskey guo } 1084cf23422bSminskey guo 108527356f54SToshi Kani static int check_hotplug_memory_range(u64 start, u64 size) 108627356f54SToshi Kani { 1087c8e861a5SFabian Frederick u64 start_pfn = PFN_DOWN(start); 108827356f54SToshi Kani u64 nr_pages = size >> PAGE_SHIFT; 108927356f54SToshi Kani 109027356f54SToshi Kani /* Memory range must be aligned with section */ 109127356f54SToshi Kani if ((start_pfn & ~PAGE_SECTION_MASK) || 109227356f54SToshi Kani (nr_pages % PAGES_PER_SECTION) || (!nr_pages)) { 109327356f54SToshi Kani pr_err("Section-unaligned hotplug range: start 0x%llx, size 0x%llx\n", 109427356f54SToshi Kani (unsigned long long)start, 109527356f54SToshi Kani (unsigned long long)size); 109627356f54SToshi Kani return -EINVAL; 109727356f54SToshi Kani } 109827356f54SToshi Kani 109927356f54SToshi Kani return 0; 110027356f54SToshi Kani } 110127356f54SToshi Kani 110231bc3858SVitaly Kuznetsov static int online_memory_block(struct memory_block *mem, void *arg) 110331bc3858SVitaly Kuznetsov { 1104dc18d706SNathan Fontenot return device_online(&mem->dev); 110531bc3858SVitaly Kuznetsov } 110631bc3858SVitaly Kuznetsov 110731168481SAl Viro /* we are OK calling __meminit stuff here - we have CONFIG_MEMORY_HOTPLUG */ 110831bc3858SVitaly Kuznetsov int __ref add_memory_resource(int nid, struct resource *res, bool online) 1109bc02af93SYasunori Goto { 111062cedb9fSDavid Vrabel u64 start, size; 11119af3c2deSYasunori Goto pg_data_t *pgdat = NULL; 1112a1e565aaSTang Chen bool new_pgdat; 1113a1e565aaSTang Chen bool new_node; 1114bc02af93SYasunori Goto int ret; 1115bc02af93SYasunori Goto 111662cedb9fSDavid Vrabel start = res->start; 111762cedb9fSDavid Vrabel size = resource_size(res); 111862cedb9fSDavid Vrabel 111927356f54SToshi Kani ret = check_hotplug_memory_range(start, size); 112027356f54SToshi Kani if (ret) 112127356f54SToshi Kani return ret; 112227356f54SToshi Kani 1123a1e565aaSTang Chen { /* Stupid hack to suppress address-never-null warning */ 1124a1e565aaSTang Chen void *p = NODE_DATA(nid); 1125a1e565aaSTang Chen new_pgdat = !p; 1126a1e565aaSTang Chen } 1127ac13c462SNathan Zimmer 1128bfc8c901SVladimir Davydov mem_hotplug_begin(); 1129ac13c462SNathan Zimmer 11307f36e3e5STang Chen /* 11317f36e3e5STang Chen * Add new range to memblock so that when hotadd_new_pgdat() is called 11327f36e3e5STang Chen * to allocate new pgdat, get_pfn_range_for_nid() will be able to find 11337f36e3e5STang Chen * this new range and calculate total pages correctly. The range will 11347f36e3e5STang Chen * be removed at hot-remove time. 11357f36e3e5STang Chen */ 11367f36e3e5STang Chen memblock_add_node(start, size, nid); 11377f36e3e5STang Chen 1138a1e565aaSTang Chen new_node = !node_online(nid); 1139a1e565aaSTang Chen if (new_node) { 11409af3c2deSYasunori Goto pgdat = hotadd_new_pgdat(nid, start); 11416ad696d2SAndi Kleen ret = -ENOMEM; 11429af3c2deSYasunori Goto if (!pgdat) 114341b9e2d7SWen Congyang goto error; 11449af3c2deSYasunori Goto } 11459af3c2deSYasunori Goto 1146bc02af93SYasunori Goto /* call arch's memory hotadd */ 114724e6d5a5SChristoph Hellwig ret = arch_add_memory(nid, start, size, NULL, true); 1148bc02af93SYasunori Goto 11499af3c2deSYasunori Goto if (ret < 0) 11509af3c2deSYasunori Goto goto error; 11519af3c2deSYasunori Goto 11520fc44159SYasunori Goto /* we online node here. we can't roll back from here. */ 11539af3c2deSYasunori Goto node_set_online(nid); 11549af3c2deSYasunori Goto 1155a1e565aaSTang Chen if (new_node) { 11569037a993SMichal Hocko unsigned long start_pfn = start >> PAGE_SHIFT; 11579037a993SMichal Hocko unsigned long nr_pages = size >> PAGE_SHIFT; 11589037a993SMichal Hocko 11599037a993SMichal Hocko ret = __register_one_node(nid); 11609037a993SMichal Hocko if (ret) 11619037a993SMichal Hocko goto register_fail; 11629037a993SMichal Hocko 11639037a993SMichal Hocko /* 11649037a993SMichal Hocko * link memory sections under this node. This is already 11659037a993SMichal Hocko * done when creatig memory section in register_new_memory 11669037a993SMichal Hocko * but that depends to have the node registered so offline 11679037a993SMichal Hocko * nodes have to go through register_node. 11689037a993SMichal Hocko * TODO clean up this mess. 11699037a993SMichal Hocko */ 11709037a993SMichal Hocko ret = link_mem_sections(nid, start_pfn, nr_pages); 11719037a993SMichal Hocko register_fail: 11720fc44159SYasunori Goto /* 11730fc44159SYasunori Goto * If sysfs file of new node can't create, cpu on the node 11740fc44159SYasunori Goto * can't be hot-added. There is no rollback way now. 11750fc44159SYasunori Goto * So, check by BUG_ON() to catch it reluctantly.. 11760fc44159SYasunori Goto */ 11770fc44159SYasunori Goto BUG_ON(ret); 11780fc44159SYasunori Goto } 11790fc44159SYasunori Goto 1180d96ae530Sakpm@linux-foundation.org /* create new memmap entry */ 1181d96ae530Sakpm@linux-foundation.org firmware_map_add_hotplug(start, start + size, "System RAM"); 1182d96ae530Sakpm@linux-foundation.org 118331bc3858SVitaly Kuznetsov /* online pages if requested */ 118431bc3858SVitaly Kuznetsov if (online) 118531bc3858SVitaly Kuznetsov walk_memory_range(PFN_DOWN(start), PFN_UP(start + size - 1), 118631bc3858SVitaly Kuznetsov NULL, online_memory_block); 118731bc3858SVitaly Kuznetsov 11886ad696d2SAndi Kleen goto out; 11896ad696d2SAndi Kleen 11909af3c2deSYasunori Goto error: 11919af3c2deSYasunori Goto /* rollback pgdat allocation and others */ 1192dbac61a3SGustavo A. R. Silva if (new_pgdat && pgdat) 11939af3c2deSYasunori Goto rollback_node_hotadd(nid, pgdat); 11947f36e3e5STang Chen memblock_remove(start, size); 11959af3c2deSYasunori Goto 11966ad696d2SAndi Kleen out: 1197bfc8c901SVladimir Davydov mem_hotplug_done(); 1198bc02af93SYasunori Goto return ret; 1199bc02af93SYasunori Goto } 120062cedb9fSDavid Vrabel EXPORT_SYMBOL_GPL(add_memory_resource); 120162cedb9fSDavid Vrabel 120262cedb9fSDavid Vrabel int __ref add_memory(int nid, u64 start, u64 size) 120362cedb9fSDavid Vrabel { 120462cedb9fSDavid Vrabel struct resource *res; 120562cedb9fSDavid Vrabel int ret; 120662cedb9fSDavid Vrabel 120762cedb9fSDavid Vrabel res = register_memory_resource(start, size); 12086f754ba4SVitaly Kuznetsov if (IS_ERR(res)) 12096f754ba4SVitaly Kuznetsov return PTR_ERR(res); 121062cedb9fSDavid Vrabel 121131bc3858SVitaly Kuznetsov ret = add_memory_resource(nid, res, memhp_auto_online); 121262cedb9fSDavid Vrabel if (ret < 0) 121362cedb9fSDavid Vrabel release_memory_resource(res); 121462cedb9fSDavid Vrabel return ret; 121562cedb9fSDavid Vrabel } 1216bc02af93SYasunori Goto EXPORT_SYMBOL_GPL(add_memory); 12170c0e6195SKAMEZAWA Hiroyuki 12180c0e6195SKAMEZAWA Hiroyuki #ifdef CONFIG_MEMORY_HOTREMOVE 12190c0e6195SKAMEZAWA Hiroyuki /* 12205c755e9fSBadari Pulavarty * A free page on the buddy free lists (not the per-cpu lists) has PageBuddy 12215c755e9fSBadari Pulavarty * set and the size of the free page is given by page_order(). Using this, 12225c755e9fSBadari Pulavarty * the function determines if the pageblock contains only free pages. 12235c755e9fSBadari Pulavarty * Due to buddy contraints, a free page at least the size of a pageblock will 12245c755e9fSBadari Pulavarty * be located at the start of the pageblock 12255c755e9fSBadari Pulavarty */ 12265c755e9fSBadari Pulavarty static inline int pageblock_free(struct page *page) 12275c755e9fSBadari Pulavarty { 12285c755e9fSBadari Pulavarty return PageBuddy(page) && page_order(page) >= pageblock_order; 12295c755e9fSBadari Pulavarty } 12305c755e9fSBadari Pulavarty 12315c755e9fSBadari Pulavarty /* Return the start of the next active pageblock after a given page */ 12325c755e9fSBadari Pulavarty static struct page *next_active_pageblock(struct page *page) 12335c755e9fSBadari Pulavarty { 12345c755e9fSBadari Pulavarty /* Ensure the starting page is pageblock-aligned */ 12355c755e9fSBadari Pulavarty BUG_ON(page_to_pfn(page) & (pageblock_nr_pages - 1)); 12365c755e9fSBadari Pulavarty 12375c755e9fSBadari Pulavarty /* If the entire pageblock is free, move to the end of free page */ 12380dcc48c1SKAMEZAWA Hiroyuki if (pageblock_free(page)) { 12390dcc48c1SKAMEZAWA Hiroyuki int order; 12400dcc48c1SKAMEZAWA Hiroyuki /* be careful. we don't have locks, page_order can be changed.*/ 12410dcc48c1SKAMEZAWA Hiroyuki order = page_order(page); 12420dcc48c1SKAMEZAWA Hiroyuki if ((order < MAX_ORDER) && (order >= pageblock_order)) 12430dcc48c1SKAMEZAWA Hiroyuki return page + (1 << order); 12440dcc48c1SKAMEZAWA Hiroyuki } 12455c755e9fSBadari Pulavarty 12460dcc48c1SKAMEZAWA Hiroyuki return page + pageblock_nr_pages; 12475c755e9fSBadari Pulavarty } 12485c755e9fSBadari Pulavarty 12495c755e9fSBadari Pulavarty /* Checks if this range of memory is likely to be hot-removable. */ 1250c98940f6SYaowei Bai bool is_mem_section_removable(unsigned long start_pfn, unsigned long nr_pages) 12515c755e9fSBadari Pulavarty { 12525c755e9fSBadari Pulavarty struct page *page = pfn_to_page(start_pfn); 12535c755e9fSBadari Pulavarty struct page *end_page = page + nr_pages; 12545c755e9fSBadari Pulavarty 12555c755e9fSBadari Pulavarty /* Check the starting page of each pageblock within the range */ 12565c755e9fSBadari Pulavarty for (; page < end_page; page = next_active_pageblock(page)) { 125749ac8255SKAMEZAWA Hiroyuki if (!is_pageblock_removable_nolock(page)) 1258c98940f6SYaowei Bai return false; 125949ac8255SKAMEZAWA Hiroyuki cond_resched(); 12605c755e9fSBadari Pulavarty } 12615c755e9fSBadari Pulavarty 12625c755e9fSBadari Pulavarty /* All pageblocks in the memory block are likely to be hot-removable */ 1263c98940f6SYaowei Bai return true; 12645c755e9fSBadari Pulavarty } 12655c755e9fSBadari Pulavarty 12665c755e9fSBadari Pulavarty /* 1267deb88a2aSToshi Kani * Confirm all pages in a range [start, end) belong to the same zone. 1268a96dfddbSToshi Kani * When true, return its valid [start, end). 12690c0e6195SKAMEZAWA Hiroyuki */ 1270a96dfddbSToshi Kani int test_pages_in_a_zone(unsigned long start_pfn, unsigned long end_pfn, 1271a96dfddbSToshi Kani unsigned long *valid_start, unsigned long *valid_end) 12720c0e6195SKAMEZAWA Hiroyuki { 12735f0f2887SAndrew Banman unsigned long pfn, sec_end_pfn; 1274a96dfddbSToshi Kani unsigned long start, end; 12750c0e6195SKAMEZAWA Hiroyuki struct zone *zone = NULL; 12760c0e6195SKAMEZAWA Hiroyuki struct page *page; 12770c0e6195SKAMEZAWA Hiroyuki int i; 1278deb88a2aSToshi Kani for (pfn = start_pfn, sec_end_pfn = SECTION_ALIGN_UP(start_pfn + 1); 12790c0e6195SKAMEZAWA Hiroyuki pfn < end_pfn; 1280deb88a2aSToshi Kani pfn = sec_end_pfn, sec_end_pfn += PAGES_PER_SECTION) { 12815f0f2887SAndrew Banman /* Make sure the memory section is present first */ 12825f0f2887SAndrew Banman if (!present_section_nr(pfn_to_section_nr(pfn))) 12835f0f2887SAndrew Banman continue; 12845f0f2887SAndrew Banman for (; pfn < sec_end_pfn && pfn < end_pfn; 12850c0e6195SKAMEZAWA Hiroyuki pfn += MAX_ORDER_NR_PAGES) { 12860c0e6195SKAMEZAWA Hiroyuki i = 0; 12870c0e6195SKAMEZAWA Hiroyuki /* This is just a CONFIG_HOLES_IN_ZONE check.*/ 12885f0f2887SAndrew Banman while ((i < MAX_ORDER_NR_PAGES) && 12895f0f2887SAndrew Banman !pfn_valid_within(pfn + i)) 12900c0e6195SKAMEZAWA Hiroyuki i++; 1291d6d8c8a4Szhong jiang if (i == MAX_ORDER_NR_PAGES || pfn + i >= end_pfn) 12920c0e6195SKAMEZAWA Hiroyuki continue; 12930c0e6195SKAMEZAWA Hiroyuki page = pfn_to_page(pfn + i); 12940c0e6195SKAMEZAWA Hiroyuki if (zone && page_zone(page) != zone) 12950c0e6195SKAMEZAWA Hiroyuki return 0; 1296a96dfddbSToshi Kani if (!zone) 1297a96dfddbSToshi Kani start = pfn + i; 12980c0e6195SKAMEZAWA Hiroyuki zone = page_zone(page); 1299a96dfddbSToshi Kani end = pfn + MAX_ORDER_NR_PAGES; 13000c0e6195SKAMEZAWA Hiroyuki } 13015f0f2887SAndrew Banman } 1302deb88a2aSToshi Kani 1303a96dfddbSToshi Kani if (zone) { 1304a96dfddbSToshi Kani *valid_start = start; 1305d6d8c8a4Szhong jiang *valid_end = min(end, end_pfn); 13060c0e6195SKAMEZAWA Hiroyuki return 1; 1307a96dfddbSToshi Kani } else { 1308deb88a2aSToshi Kani return 0; 13090c0e6195SKAMEZAWA Hiroyuki } 1310a96dfddbSToshi Kani } 13110c0e6195SKAMEZAWA Hiroyuki 13120c0e6195SKAMEZAWA Hiroyuki /* 13130efadf48SYisheng Xie * Scan pfn range [start,end) to find movable/migratable pages (LRU pages, 13140efadf48SYisheng Xie * non-lru movable pages and hugepages). We scan pfn because it's much 13150efadf48SYisheng Xie * easier than scanning over linked list. This function returns the pfn 13160efadf48SYisheng Xie * of the first found movable page if it's found, otherwise 0. 13170c0e6195SKAMEZAWA Hiroyuki */ 1318c8721bbbSNaoya Horiguchi static unsigned long scan_movable_pages(unsigned long start, unsigned long end) 13190c0e6195SKAMEZAWA Hiroyuki { 13200c0e6195SKAMEZAWA Hiroyuki unsigned long pfn; 13210c0e6195SKAMEZAWA Hiroyuki struct page *page; 13220c0e6195SKAMEZAWA Hiroyuki for (pfn = start; pfn < end; pfn++) { 13230c0e6195SKAMEZAWA Hiroyuki if (pfn_valid(pfn)) { 13240c0e6195SKAMEZAWA Hiroyuki page = pfn_to_page(pfn); 13250c0e6195SKAMEZAWA Hiroyuki if (PageLRU(page)) 13260c0e6195SKAMEZAWA Hiroyuki return pfn; 13270efadf48SYisheng Xie if (__PageMovable(page)) 13280efadf48SYisheng Xie return pfn; 1329c8721bbbSNaoya Horiguchi if (PageHuge(page)) { 13307e1f049eSNaoya Horiguchi if (page_huge_active(page)) 1331c8721bbbSNaoya Horiguchi return pfn; 1332c8721bbbSNaoya Horiguchi else 1333c8721bbbSNaoya Horiguchi pfn = round_up(pfn + 1, 1334c8721bbbSNaoya Horiguchi 1 << compound_order(page)) - 1; 1335c8721bbbSNaoya Horiguchi } 13360c0e6195SKAMEZAWA Hiroyuki } 13370c0e6195SKAMEZAWA Hiroyuki } 13380c0e6195SKAMEZAWA Hiroyuki return 0; 13390c0e6195SKAMEZAWA Hiroyuki } 13400c0e6195SKAMEZAWA Hiroyuki 1341394e31d2SXishi Qiu static struct page *new_node_page(struct page *page, unsigned long private, 1342394e31d2SXishi Qiu int **result) 1343394e31d2SXishi Qiu { 1344394e31d2SXishi Qiu int nid = page_to_nid(page); 1345231e97e2SLi Zhong nodemask_t nmask = node_states[N_MEMORY]; 13467f252f27SMichal Hocko 13477f252f27SMichal Hocko /* 13487f252f27SMichal Hocko * try to allocate from a different node but reuse this node if there 13497f252f27SMichal Hocko * are no other online nodes to be used (e.g. we are offlining a part 13507f252f27SMichal Hocko * of the only existing node) 13517f252f27SMichal Hocko */ 13527f252f27SMichal Hocko node_clear(nid, nmask); 13537f252f27SMichal Hocko if (nodes_empty(nmask)) 13547f252f27SMichal Hocko node_set(nid, nmask); 1355394e31d2SXishi Qiu 13568b913238SMichal Hocko return new_page_nodemask(page, nid, &nmask); 1357394e31d2SXishi Qiu } 1358394e31d2SXishi Qiu 13590c0e6195SKAMEZAWA Hiroyuki #define NR_OFFLINE_AT_ONCE_PAGES (256) 13600c0e6195SKAMEZAWA Hiroyuki static int 13610c0e6195SKAMEZAWA Hiroyuki do_migrate_range(unsigned long start_pfn, unsigned long end_pfn) 13620c0e6195SKAMEZAWA Hiroyuki { 13630c0e6195SKAMEZAWA Hiroyuki unsigned long pfn; 13640c0e6195SKAMEZAWA Hiroyuki struct page *page; 13650c0e6195SKAMEZAWA Hiroyuki int move_pages = NR_OFFLINE_AT_ONCE_PAGES; 13660c0e6195SKAMEZAWA Hiroyuki int not_managed = 0; 13670c0e6195SKAMEZAWA Hiroyuki int ret = 0; 13680c0e6195SKAMEZAWA Hiroyuki LIST_HEAD(source); 13690c0e6195SKAMEZAWA Hiroyuki 13700c0e6195SKAMEZAWA Hiroyuki for (pfn = start_pfn; pfn < end_pfn && move_pages > 0; pfn++) { 13710c0e6195SKAMEZAWA Hiroyuki if (!pfn_valid(pfn)) 13720c0e6195SKAMEZAWA Hiroyuki continue; 13730c0e6195SKAMEZAWA Hiroyuki page = pfn_to_page(pfn); 1374c8721bbbSNaoya Horiguchi 1375c8721bbbSNaoya Horiguchi if (PageHuge(page)) { 1376c8721bbbSNaoya Horiguchi struct page *head = compound_head(page); 1377c8721bbbSNaoya Horiguchi pfn = page_to_pfn(head) + (1<<compound_order(head)) - 1; 1378c8721bbbSNaoya Horiguchi if (compound_order(head) > PFN_SECTION_SHIFT) { 1379c8721bbbSNaoya Horiguchi ret = -EBUSY; 1380c8721bbbSNaoya Horiguchi break; 1381c8721bbbSNaoya Horiguchi } 1382c8721bbbSNaoya Horiguchi if (isolate_huge_page(page, &source)) 1383c8721bbbSNaoya Horiguchi move_pages -= 1 << compound_order(head); 1384c8721bbbSNaoya Horiguchi continue; 13858135d892SNaoya Horiguchi } else if (thp_migration_supported() && PageTransHuge(page)) 13868135d892SNaoya Horiguchi pfn = page_to_pfn(compound_head(page)) 13878135d892SNaoya Horiguchi + hpage_nr_pages(page) - 1; 1388c8721bbbSNaoya Horiguchi 1389700c2a46SKonstantin Khlebnikov if (!get_page_unless_zero(page)) 13900c0e6195SKAMEZAWA Hiroyuki continue; 13910c0e6195SKAMEZAWA Hiroyuki /* 13920efadf48SYisheng Xie * We can skip free pages. And we can deal with pages on 13930efadf48SYisheng Xie * LRU and non-lru movable pages. 13940c0e6195SKAMEZAWA Hiroyuki */ 13950efadf48SYisheng Xie if (PageLRU(page)) 139662695a84SNick Piggin ret = isolate_lru_page(page); 13970efadf48SYisheng Xie else 13980efadf48SYisheng Xie ret = isolate_movable_page(page, ISOLATE_UNEVICTABLE); 13990c0e6195SKAMEZAWA Hiroyuki if (!ret) { /* Success */ 1400700c2a46SKonstantin Khlebnikov put_page(page); 140162695a84SNick Piggin list_add_tail(&page->lru, &source); 14020c0e6195SKAMEZAWA Hiroyuki move_pages--; 14030efadf48SYisheng Xie if (!__PageMovable(page)) 1404599d0c95SMel Gorman inc_node_page_state(page, NR_ISOLATED_ANON + 14056d9c285aSKOSAKI Motohiro page_is_file_cache(page)); 14066d9c285aSKOSAKI Motohiro 14070c0e6195SKAMEZAWA Hiroyuki } else { 14080c0e6195SKAMEZAWA Hiroyuki #ifdef CONFIG_DEBUG_VM 14090efadf48SYisheng Xie pr_alert("failed to isolate pfn %lx\n", pfn); 14100efadf48SYisheng Xie dump_page(page, "isolation failed"); 14110c0e6195SKAMEZAWA Hiroyuki #endif 1412700c2a46SKonstantin Khlebnikov put_page(page); 141325985edcSLucas De Marchi /* Because we don't have big zone->lock. we should 1414809c4449SBob Liu check this again here. */ 1415809c4449SBob Liu if (page_count(page)) { 1416809c4449SBob Liu not_managed++; 1417f3ab2636SBob Liu ret = -EBUSY; 1418809c4449SBob Liu break; 1419809c4449SBob Liu } 14200c0e6195SKAMEZAWA Hiroyuki } 14210c0e6195SKAMEZAWA Hiroyuki } 1422f3ab2636SBob Liu if (!list_empty(&source)) { 14230c0e6195SKAMEZAWA Hiroyuki if (not_managed) { 1424c8721bbbSNaoya Horiguchi putback_movable_pages(&source); 14250c0e6195SKAMEZAWA Hiroyuki goto out; 14260c0e6195SKAMEZAWA Hiroyuki } 142774c08f98SMinchan Kim 1428394e31d2SXishi Qiu /* Allocate a new page from the nearest neighbor node */ 1429394e31d2SXishi Qiu ret = migrate_pages(&source, new_node_page, NULL, 0, 14309c620e2bSHugh Dickins MIGRATE_SYNC, MR_MEMORY_HOTPLUG); 1431cf608ac1SMinchan Kim if (ret) 1432c8721bbbSNaoya Horiguchi putback_movable_pages(&source); 1433f3ab2636SBob Liu } 14340c0e6195SKAMEZAWA Hiroyuki out: 14350c0e6195SKAMEZAWA Hiroyuki return ret; 14360c0e6195SKAMEZAWA Hiroyuki } 14370c0e6195SKAMEZAWA Hiroyuki 14380c0e6195SKAMEZAWA Hiroyuki /* 14390c0e6195SKAMEZAWA Hiroyuki * remove from free_area[] and mark all as Reserved. 14400c0e6195SKAMEZAWA Hiroyuki */ 14410c0e6195SKAMEZAWA Hiroyuki static int 14420c0e6195SKAMEZAWA Hiroyuki offline_isolated_pages_cb(unsigned long start, unsigned long nr_pages, 14430c0e6195SKAMEZAWA Hiroyuki void *data) 14440c0e6195SKAMEZAWA Hiroyuki { 14450c0e6195SKAMEZAWA Hiroyuki __offline_isolated_pages(start, start + nr_pages); 14460c0e6195SKAMEZAWA Hiroyuki return 0; 14470c0e6195SKAMEZAWA Hiroyuki } 14480c0e6195SKAMEZAWA Hiroyuki 14490c0e6195SKAMEZAWA Hiroyuki static void 14500c0e6195SKAMEZAWA Hiroyuki offline_isolated_pages(unsigned long start_pfn, unsigned long end_pfn) 14510c0e6195SKAMEZAWA Hiroyuki { 1452908eedc6SKAMEZAWA Hiroyuki walk_system_ram_range(start_pfn, end_pfn - start_pfn, NULL, 14530c0e6195SKAMEZAWA Hiroyuki offline_isolated_pages_cb); 14540c0e6195SKAMEZAWA Hiroyuki } 14550c0e6195SKAMEZAWA Hiroyuki 14560c0e6195SKAMEZAWA Hiroyuki /* 14570c0e6195SKAMEZAWA Hiroyuki * Check all pages in range, recoreded as memory resource, are isolated. 14580c0e6195SKAMEZAWA Hiroyuki */ 14590c0e6195SKAMEZAWA Hiroyuki static int 14600c0e6195SKAMEZAWA Hiroyuki check_pages_isolated_cb(unsigned long start_pfn, unsigned long nr_pages, 14610c0e6195SKAMEZAWA Hiroyuki void *data) 14620c0e6195SKAMEZAWA Hiroyuki { 14630c0e6195SKAMEZAWA Hiroyuki int ret; 14640c0e6195SKAMEZAWA Hiroyuki long offlined = *(long *)data; 1465b023f468SWen Congyang ret = test_pages_isolated(start_pfn, start_pfn + nr_pages, true); 14660c0e6195SKAMEZAWA Hiroyuki offlined = nr_pages; 14670c0e6195SKAMEZAWA Hiroyuki if (!ret) 14680c0e6195SKAMEZAWA Hiroyuki *(long *)data += offlined; 14690c0e6195SKAMEZAWA Hiroyuki return ret; 14700c0e6195SKAMEZAWA Hiroyuki } 14710c0e6195SKAMEZAWA Hiroyuki 14720c0e6195SKAMEZAWA Hiroyuki static long 14730c0e6195SKAMEZAWA Hiroyuki check_pages_isolated(unsigned long start_pfn, unsigned long end_pfn) 14740c0e6195SKAMEZAWA Hiroyuki { 14750c0e6195SKAMEZAWA Hiroyuki long offlined = 0; 14760c0e6195SKAMEZAWA Hiroyuki int ret; 14770c0e6195SKAMEZAWA Hiroyuki 1478908eedc6SKAMEZAWA Hiroyuki ret = walk_system_ram_range(start_pfn, end_pfn - start_pfn, &offlined, 14790c0e6195SKAMEZAWA Hiroyuki check_pages_isolated_cb); 14800c0e6195SKAMEZAWA Hiroyuki if (ret < 0) 14810c0e6195SKAMEZAWA Hiroyuki offlined = (long)ret; 14820c0e6195SKAMEZAWA Hiroyuki return offlined; 14830c0e6195SKAMEZAWA Hiroyuki } 14840c0e6195SKAMEZAWA Hiroyuki 1485c5320926STang Chen static int __init cmdline_parse_movable_node(char *p) 1486c5320926STang Chen { 14874932381eSMichal Hocko #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP 148855ac590cSTang Chen movable_node_enabled = true; 14894932381eSMichal Hocko #else 14904932381eSMichal Hocko pr_warn("movable_node parameter depends on CONFIG_HAVE_MEMBLOCK_NODE_MAP to work properly\n"); 14914932381eSMichal Hocko #endif 1492c5320926STang Chen return 0; 1493c5320926STang Chen } 1494c5320926STang Chen early_param("movable_node", cmdline_parse_movable_node); 1495c5320926STang Chen 1496d9713679SLai Jiangshan /* check which state of node_states will be changed when offline memory */ 1497d9713679SLai Jiangshan static void node_states_check_changes_offline(unsigned long nr_pages, 1498d9713679SLai Jiangshan struct zone *zone, struct memory_notify *arg) 1499d9713679SLai Jiangshan { 1500d9713679SLai Jiangshan struct pglist_data *pgdat = zone->zone_pgdat; 1501d9713679SLai Jiangshan unsigned long present_pages = 0; 1502d9713679SLai Jiangshan enum zone_type zt, zone_last = ZONE_NORMAL; 1503d9713679SLai Jiangshan 1504d9713679SLai Jiangshan /* 15056715ddf9SLai Jiangshan * If we have HIGHMEM or movable node, node_states[N_NORMAL_MEMORY] 15066715ddf9SLai Jiangshan * contains nodes which have zones of 0...ZONE_NORMAL, 15076715ddf9SLai Jiangshan * set zone_last to ZONE_NORMAL. 1508d9713679SLai Jiangshan * 15096715ddf9SLai Jiangshan * If we don't have HIGHMEM nor movable node, 15106715ddf9SLai Jiangshan * node_states[N_NORMAL_MEMORY] contains nodes which have zones of 15116715ddf9SLai Jiangshan * 0...ZONE_MOVABLE, set zone_last to ZONE_MOVABLE. 1512d9713679SLai Jiangshan */ 15136715ddf9SLai Jiangshan if (N_MEMORY == N_NORMAL_MEMORY) 1514d9713679SLai Jiangshan zone_last = ZONE_MOVABLE; 1515d9713679SLai Jiangshan 1516d9713679SLai Jiangshan /* 1517d9713679SLai Jiangshan * check whether node_states[N_NORMAL_MEMORY] will be changed. 1518d9713679SLai Jiangshan * If the memory to be offline is in a zone of 0...zone_last, 1519d9713679SLai Jiangshan * and it is the last present memory, 0...zone_last will 1520d9713679SLai Jiangshan * become empty after offline , thus we can determind we will 1521d9713679SLai Jiangshan * need to clear the node from node_states[N_NORMAL_MEMORY]. 1522d9713679SLai Jiangshan */ 1523d9713679SLai Jiangshan for (zt = 0; zt <= zone_last; zt++) 1524d9713679SLai Jiangshan present_pages += pgdat->node_zones[zt].present_pages; 1525d9713679SLai Jiangshan if (zone_idx(zone) <= zone_last && nr_pages >= present_pages) 1526d9713679SLai Jiangshan arg->status_change_nid_normal = zone_to_nid(zone); 1527d9713679SLai Jiangshan else 1528d9713679SLai Jiangshan arg->status_change_nid_normal = -1; 1529d9713679SLai Jiangshan 15306715ddf9SLai Jiangshan #ifdef CONFIG_HIGHMEM 15316715ddf9SLai Jiangshan /* 15326715ddf9SLai Jiangshan * If we have movable node, node_states[N_HIGH_MEMORY] 15336715ddf9SLai Jiangshan * contains nodes which have zones of 0...ZONE_HIGHMEM, 15346715ddf9SLai Jiangshan * set zone_last to ZONE_HIGHMEM. 15356715ddf9SLai Jiangshan * 15366715ddf9SLai Jiangshan * If we don't have movable node, node_states[N_NORMAL_MEMORY] 15376715ddf9SLai Jiangshan * contains nodes which have zones of 0...ZONE_MOVABLE, 15386715ddf9SLai Jiangshan * set zone_last to ZONE_MOVABLE. 15396715ddf9SLai Jiangshan */ 15406715ddf9SLai Jiangshan zone_last = ZONE_HIGHMEM; 15416715ddf9SLai Jiangshan if (N_MEMORY == N_HIGH_MEMORY) 15426715ddf9SLai Jiangshan zone_last = ZONE_MOVABLE; 15436715ddf9SLai Jiangshan 15446715ddf9SLai Jiangshan for (; zt <= zone_last; zt++) 15456715ddf9SLai Jiangshan present_pages += pgdat->node_zones[zt].present_pages; 15466715ddf9SLai Jiangshan if (zone_idx(zone) <= zone_last && nr_pages >= present_pages) 15476715ddf9SLai Jiangshan arg->status_change_nid_high = zone_to_nid(zone); 15486715ddf9SLai Jiangshan else 15496715ddf9SLai Jiangshan arg->status_change_nid_high = -1; 15506715ddf9SLai Jiangshan #else 15516715ddf9SLai Jiangshan arg->status_change_nid_high = arg->status_change_nid_normal; 15526715ddf9SLai Jiangshan #endif 15536715ddf9SLai Jiangshan 1554d9713679SLai Jiangshan /* 1555d9713679SLai Jiangshan * node_states[N_HIGH_MEMORY] contains nodes which have 0...ZONE_MOVABLE 1556d9713679SLai Jiangshan */ 1557d9713679SLai Jiangshan zone_last = ZONE_MOVABLE; 1558d9713679SLai Jiangshan 1559d9713679SLai Jiangshan /* 1560d9713679SLai Jiangshan * check whether node_states[N_HIGH_MEMORY] will be changed 1561d9713679SLai Jiangshan * If we try to offline the last present @nr_pages from the node, 1562d9713679SLai Jiangshan * we can determind we will need to clear the node from 1563d9713679SLai Jiangshan * node_states[N_HIGH_MEMORY]. 1564d9713679SLai Jiangshan */ 1565d9713679SLai Jiangshan for (; zt <= zone_last; zt++) 1566d9713679SLai Jiangshan present_pages += pgdat->node_zones[zt].present_pages; 1567d9713679SLai Jiangshan if (nr_pages >= present_pages) 1568d9713679SLai Jiangshan arg->status_change_nid = zone_to_nid(zone); 1569d9713679SLai Jiangshan else 1570d9713679SLai Jiangshan arg->status_change_nid = -1; 1571d9713679SLai Jiangshan } 1572d9713679SLai Jiangshan 1573d9713679SLai Jiangshan static void node_states_clear_node(int node, struct memory_notify *arg) 1574d9713679SLai Jiangshan { 1575d9713679SLai Jiangshan if (arg->status_change_nid_normal >= 0) 1576d9713679SLai Jiangshan node_clear_state(node, N_NORMAL_MEMORY); 1577d9713679SLai Jiangshan 15786715ddf9SLai Jiangshan if ((N_MEMORY != N_NORMAL_MEMORY) && 15796715ddf9SLai Jiangshan (arg->status_change_nid_high >= 0)) 1580d9713679SLai Jiangshan node_clear_state(node, N_HIGH_MEMORY); 15816715ddf9SLai Jiangshan 15826715ddf9SLai Jiangshan if ((N_MEMORY != N_HIGH_MEMORY) && 15836715ddf9SLai Jiangshan (arg->status_change_nid >= 0)) 15846715ddf9SLai Jiangshan node_clear_state(node, N_MEMORY); 1585d9713679SLai Jiangshan } 1586d9713679SLai Jiangshan 1587a16cee10SWen Congyang static int __ref __offline_pages(unsigned long start_pfn, 1588ecde0f3eSMichal Hocko unsigned long end_pfn) 15890c0e6195SKAMEZAWA Hiroyuki { 1590ecde0f3eSMichal Hocko unsigned long pfn, nr_pages; 15910c0e6195SKAMEZAWA Hiroyuki long offlined_pages; 159272b39cfcSMichal Hocko int ret, node; 1593d702909fSCody P Schafer unsigned long flags; 1594a96dfddbSToshi Kani unsigned long valid_start, valid_end; 15950c0e6195SKAMEZAWA Hiroyuki struct zone *zone; 15967b78d335SYasunori Goto struct memory_notify arg; 15970c0e6195SKAMEZAWA Hiroyuki 15980c0e6195SKAMEZAWA Hiroyuki /* at least, alignment against pageblock is necessary */ 15990c0e6195SKAMEZAWA Hiroyuki if (!IS_ALIGNED(start_pfn, pageblock_nr_pages)) 16000c0e6195SKAMEZAWA Hiroyuki return -EINVAL; 16010c0e6195SKAMEZAWA Hiroyuki if (!IS_ALIGNED(end_pfn, pageblock_nr_pages)) 16020c0e6195SKAMEZAWA Hiroyuki return -EINVAL; 16030c0e6195SKAMEZAWA Hiroyuki /* This makes hotplug much easier...and readable. 16040c0e6195SKAMEZAWA Hiroyuki we assume this for now. .*/ 1605a96dfddbSToshi Kani if (!test_pages_in_a_zone(start_pfn, end_pfn, &valid_start, &valid_end)) 16060c0e6195SKAMEZAWA Hiroyuki return -EINVAL; 16077b78d335SYasunori Goto 1608a96dfddbSToshi Kani zone = page_zone(pfn_to_page(valid_start)); 16097b78d335SYasunori Goto node = zone_to_nid(zone); 16107b78d335SYasunori Goto nr_pages = end_pfn - start_pfn; 16117b78d335SYasunori Goto 16120c0e6195SKAMEZAWA Hiroyuki /* set above range as isolated */ 1613b023f468SWen Congyang ret = start_isolate_page_range(start_pfn, end_pfn, 1614b023f468SWen Congyang MIGRATE_MOVABLE, true); 16150c0e6195SKAMEZAWA Hiroyuki if (ret) 161630467e0bSDavid Rientjes return ret; 16177b78d335SYasunori Goto 16187b78d335SYasunori Goto arg.start_pfn = start_pfn; 16197b78d335SYasunori Goto arg.nr_pages = nr_pages; 1620d9713679SLai Jiangshan node_states_check_changes_offline(nr_pages, zone, &arg); 16217b78d335SYasunori Goto 16227b78d335SYasunori Goto ret = memory_notify(MEM_GOING_OFFLINE, &arg); 16237b78d335SYasunori Goto ret = notifier_to_errno(ret); 16247b78d335SYasunori Goto if (ret) 16257b78d335SYasunori Goto goto failed_removal; 16267b78d335SYasunori Goto 16270c0e6195SKAMEZAWA Hiroyuki pfn = start_pfn; 16280c0e6195SKAMEZAWA Hiroyuki repeat: 16290c0e6195SKAMEZAWA Hiroyuki /* start memory hot removal */ 16300c0e6195SKAMEZAWA Hiroyuki ret = -EINTR; 16310c0e6195SKAMEZAWA Hiroyuki if (signal_pending(current)) 16320c0e6195SKAMEZAWA Hiroyuki goto failed_removal; 163372b39cfcSMichal Hocko 16340c0e6195SKAMEZAWA Hiroyuki cond_resched(); 163572b39cfcSMichal Hocko lru_add_drain_all_cpuslocked(); 1636c0554329SVlastimil Babka drain_all_pages(zone); 16370c0e6195SKAMEZAWA Hiroyuki 1638c8721bbbSNaoya Horiguchi pfn = scan_movable_pages(start_pfn, end_pfn); 1639c8721bbbSNaoya Horiguchi if (pfn) { /* We have movable pages */ 16400c0e6195SKAMEZAWA Hiroyuki ret = do_migrate_range(pfn, end_pfn); 16410c0e6195SKAMEZAWA Hiroyuki goto repeat; 16420c0e6195SKAMEZAWA Hiroyuki } 164372b39cfcSMichal Hocko 1644c8721bbbSNaoya Horiguchi /* 1645c8721bbbSNaoya Horiguchi * dissolve free hugepages in the memory block before doing offlining 1646c8721bbbSNaoya Horiguchi * actually in order to make hugetlbfs's object counting consistent. 1647c8721bbbSNaoya Horiguchi */ 1648082d5b6bSGerald Schaefer ret = dissolve_free_huge_pages(start_pfn, end_pfn); 1649082d5b6bSGerald Schaefer if (ret) 1650082d5b6bSGerald Schaefer goto failed_removal; 16510c0e6195SKAMEZAWA Hiroyuki /* check again */ 16520c0e6195SKAMEZAWA Hiroyuki offlined_pages = check_pages_isolated(start_pfn, end_pfn); 165372b39cfcSMichal Hocko if (offlined_pages < 0) 165472b39cfcSMichal Hocko goto repeat; 1655e33e33b4SChen Yucong pr_info("Offlined Pages %ld\n", offlined_pages); 1656b3834be5SAdam Buchbinder /* Ok, all of our target is isolated. 16570c0e6195SKAMEZAWA Hiroyuki We cannot do rollback at this point. */ 16580c0e6195SKAMEZAWA Hiroyuki offline_isolated_pages(start_pfn, end_pfn); 1659dbc0e4ceSKAMEZAWA Hiroyuki /* reset pagetype flags and makes migrate type to be MOVABLE */ 16600815f3d8SMichal Nazarewicz undo_isolate_page_range(start_pfn, end_pfn, MIGRATE_MOVABLE); 16610c0e6195SKAMEZAWA Hiroyuki /* removal success */ 16623dcc0571SJiang Liu adjust_managed_page_count(pfn_to_page(start_pfn), -offlined_pages); 16630c0e6195SKAMEZAWA Hiroyuki zone->present_pages -= offlined_pages; 1664d702909fSCody P Schafer 1665d702909fSCody P Schafer pgdat_resize_lock(zone->zone_pgdat, &flags); 16660c0e6195SKAMEZAWA Hiroyuki zone->zone_pgdat->node_present_pages -= offlined_pages; 1667d702909fSCody P Schafer pgdat_resize_unlock(zone->zone_pgdat, &flags); 16687b78d335SYasunori Goto 16691b79acc9SKOSAKI Motohiro init_per_zone_wmark_min(); 16701b79acc9SKOSAKI Motohiro 16711e8537baSXishi Qiu if (!populated_zone(zone)) { 1672340175b7SJiang Liu zone_pcp_reset(zone); 167372675e13SMichal Hocko build_all_zonelists(NULL); 16741e8537baSXishi Qiu } else 16751e8537baSXishi Qiu zone_pcp_update(zone); 1676340175b7SJiang Liu 1677d9713679SLai Jiangshan node_states_clear_node(node, &arg); 1678698b1b30SVlastimil Babka if (arg.status_change_nid >= 0) { 16798fe23e05SDavid Rientjes kswapd_stop(node); 1680698b1b30SVlastimil Babka kcompactd_stop(node); 1681698b1b30SVlastimil Babka } 1682bce7394aSMinchan Kim 16830c0e6195SKAMEZAWA Hiroyuki vm_total_pages = nr_free_pagecache_pages(); 16840c0e6195SKAMEZAWA Hiroyuki writeback_set_ratelimit(); 16857b78d335SYasunori Goto 16867b78d335SYasunori Goto memory_notify(MEM_OFFLINE, &arg); 16870c0e6195SKAMEZAWA Hiroyuki return 0; 16880c0e6195SKAMEZAWA Hiroyuki 16890c0e6195SKAMEZAWA Hiroyuki failed_removal: 1690e33e33b4SChen Yucong pr_debug("memory offlining [mem %#010llx-%#010llx] failed\n", 1691a62e2f4fSBjorn Helgaas (unsigned long long) start_pfn << PAGE_SHIFT, 1692a62e2f4fSBjorn Helgaas ((unsigned long long) end_pfn << PAGE_SHIFT) - 1); 16937b78d335SYasunori Goto memory_notify(MEM_CANCEL_OFFLINE, &arg); 16940c0e6195SKAMEZAWA Hiroyuki /* pushback to free area */ 16950815f3d8SMichal Nazarewicz undo_isolate_page_range(start_pfn, end_pfn, MIGRATE_MOVABLE); 16960c0e6195SKAMEZAWA Hiroyuki return ret; 16970c0e6195SKAMEZAWA Hiroyuki } 169871088785SBadari Pulavarty 1699b93e0f32SMichal Hocko /* Must be protected by mem_hotplug_begin() or a device_lock */ 1700a16cee10SWen Congyang int offline_pages(unsigned long start_pfn, unsigned long nr_pages) 1701a16cee10SWen Congyang { 1702ecde0f3eSMichal Hocko return __offline_pages(start_pfn, start_pfn + nr_pages); 1703a16cee10SWen Congyang } 1704e2ff3940SRafael J. Wysocki #endif /* CONFIG_MEMORY_HOTREMOVE */ 1705a16cee10SWen Congyang 1706bbc76be6SWen Congyang /** 1707bbc76be6SWen Congyang * walk_memory_range - walks through all mem sections in [start_pfn, end_pfn) 1708bbc76be6SWen Congyang * @start_pfn: start pfn of the memory range 1709e05c4bbfSToshi Kani * @end_pfn: end pfn of the memory range 1710bbc76be6SWen Congyang * @arg: argument passed to func 1711bbc76be6SWen Congyang * @func: callback for each memory section walked 1712bbc76be6SWen Congyang * 1713bbc76be6SWen Congyang * This function walks through all present mem sections in range 1714bbc76be6SWen Congyang * [start_pfn, end_pfn) and call func on each mem section. 1715bbc76be6SWen Congyang * 1716bbc76be6SWen Congyang * Returns the return value of func. 1717bbc76be6SWen Congyang */ 1718e2ff3940SRafael J. Wysocki int walk_memory_range(unsigned long start_pfn, unsigned long end_pfn, 1719bbc76be6SWen Congyang void *arg, int (*func)(struct memory_block *, void *)) 172071088785SBadari Pulavarty { 1721e90bdb7fSWen Congyang struct memory_block *mem = NULL; 1722e90bdb7fSWen Congyang struct mem_section *section; 1723e90bdb7fSWen Congyang unsigned long pfn, section_nr; 1724e90bdb7fSWen Congyang int ret; 172571088785SBadari Pulavarty 1726e90bdb7fSWen Congyang for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) { 1727e90bdb7fSWen Congyang section_nr = pfn_to_section_nr(pfn); 1728e90bdb7fSWen Congyang if (!present_section_nr(section_nr)) 1729e90bdb7fSWen Congyang continue; 1730e90bdb7fSWen Congyang 1731e90bdb7fSWen Congyang section = __nr_to_section(section_nr); 1732e90bdb7fSWen Congyang /* same memblock? */ 1733e90bdb7fSWen Congyang if (mem) 1734e90bdb7fSWen Congyang if ((section_nr >= mem->start_section_nr) && 1735e90bdb7fSWen Congyang (section_nr <= mem->end_section_nr)) 1736e90bdb7fSWen Congyang continue; 1737e90bdb7fSWen Congyang 1738e90bdb7fSWen Congyang mem = find_memory_block_hinted(section, mem); 1739e90bdb7fSWen Congyang if (!mem) 1740e90bdb7fSWen Congyang continue; 1741e90bdb7fSWen Congyang 1742bbc76be6SWen Congyang ret = func(mem, arg); 1743e90bdb7fSWen Congyang if (ret) { 1744e90bdb7fSWen Congyang kobject_put(&mem->dev.kobj); 1745e90bdb7fSWen Congyang return ret; 1746e90bdb7fSWen Congyang } 1747e90bdb7fSWen Congyang } 1748e90bdb7fSWen Congyang 1749e90bdb7fSWen Congyang if (mem) 1750e90bdb7fSWen Congyang kobject_put(&mem->dev.kobj); 1751e90bdb7fSWen Congyang 1752bbc76be6SWen Congyang return 0; 1753bbc76be6SWen Congyang } 1754bbc76be6SWen Congyang 1755e2ff3940SRafael J. Wysocki #ifdef CONFIG_MEMORY_HOTREMOVE 1756d6de9d53SXishi Qiu static int check_memblock_offlined_cb(struct memory_block *mem, void *arg) 1757bbc76be6SWen Congyang { 1758bbc76be6SWen Congyang int ret = !is_memblock_offlined(mem); 1759bbc76be6SWen Congyang 1760349daa0fSRandy Dunlap if (unlikely(ret)) { 1761349daa0fSRandy Dunlap phys_addr_t beginpa, endpa; 1762349daa0fSRandy Dunlap 1763349daa0fSRandy Dunlap beginpa = PFN_PHYS(section_nr_to_pfn(mem->start_section_nr)); 1764349daa0fSRandy Dunlap endpa = PFN_PHYS(section_nr_to_pfn(mem->end_section_nr + 1))-1; 1765756a025fSJoe Perches pr_warn("removing memory fails, because memory [%pa-%pa] is onlined\n", 1766349daa0fSRandy Dunlap &beginpa, &endpa); 1767349daa0fSRandy Dunlap } 1768bbc76be6SWen Congyang 1769bbc76be6SWen Congyang return ret; 1770bbc76be6SWen Congyang } 1771bbc76be6SWen Congyang 17720f1cfe9dSToshi Kani static int check_cpu_on_node(pg_data_t *pgdat) 177360a5a19eSTang Chen { 177460a5a19eSTang Chen int cpu; 177560a5a19eSTang Chen 177660a5a19eSTang Chen for_each_present_cpu(cpu) { 177760a5a19eSTang Chen if (cpu_to_node(cpu) == pgdat->node_id) 177860a5a19eSTang Chen /* 177960a5a19eSTang Chen * the cpu on this node isn't removed, and we can't 178060a5a19eSTang Chen * offline this node. 178160a5a19eSTang Chen */ 178260a5a19eSTang Chen return -EBUSY; 178360a5a19eSTang Chen } 178460a5a19eSTang Chen 178560a5a19eSTang Chen return 0; 178660a5a19eSTang Chen } 178760a5a19eSTang Chen 17880f1cfe9dSToshi Kani static void unmap_cpu_on_node(pg_data_t *pgdat) 1789e13fe869SWen Congyang { 1790e13fe869SWen Congyang #ifdef CONFIG_ACPI_NUMA 1791e13fe869SWen Congyang int cpu; 1792e13fe869SWen Congyang 1793e13fe869SWen Congyang for_each_possible_cpu(cpu) 1794e13fe869SWen Congyang if (cpu_to_node(cpu) == pgdat->node_id) 1795e13fe869SWen Congyang numa_clear_node(cpu); 1796e13fe869SWen Congyang #endif 1797e13fe869SWen Congyang } 1798e13fe869SWen Congyang 17990f1cfe9dSToshi Kani static int check_and_unmap_cpu_on_node(pg_data_t *pgdat) 1800e13fe869SWen Congyang { 18010f1cfe9dSToshi Kani int ret; 1802e13fe869SWen Congyang 18030f1cfe9dSToshi Kani ret = check_cpu_on_node(pgdat); 1804e13fe869SWen Congyang if (ret) 1805e13fe869SWen Congyang return ret; 1806e13fe869SWen Congyang 1807e13fe869SWen Congyang /* 1808e13fe869SWen Congyang * the node will be offlined when we come here, so we can clear 1809e13fe869SWen Congyang * the cpu_to_node() now. 1810e13fe869SWen Congyang */ 1811e13fe869SWen Congyang 18120f1cfe9dSToshi Kani unmap_cpu_on_node(pgdat); 1813e13fe869SWen Congyang return 0; 1814e13fe869SWen Congyang } 1815e13fe869SWen Congyang 18160f1cfe9dSToshi Kani /** 18170f1cfe9dSToshi Kani * try_offline_node 18180f1cfe9dSToshi Kani * 18190f1cfe9dSToshi Kani * Offline a node if all memory sections and cpus of the node are removed. 18200f1cfe9dSToshi Kani * 18210f1cfe9dSToshi Kani * NOTE: The caller must call lock_device_hotplug() to serialize hotplug 18220f1cfe9dSToshi Kani * and online/offline operations before this call. 18230f1cfe9dSToshi Kani */ 182490b30cdcSWen Congyang void try_offline_node(int nid) 182560a5a19eSTang Chen { 1826d822b86aSWen Congyang pg_data_t *pgdat = NODE_DATA(nid); 1827d822b86aSWen Congyang unsigned long start_pfn = pgdat->node_start_pfn; 1828d822b86aSWen Congyang unsigned long end_pfn = start_pfn + pgdat->node_spanned_pages; 182960a5a19eSTang Chen unsigned long pfn; 183060a5a19eSTang Chen 183160a5a19eSTang Chen for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) { 183260a5a19eSTang Chen unsigned long section_nr = pfn_to_section_nr(pfn); 183360a5a19eSTang Chen 183460a5a19eSTang Chen if (!present_section_nr(section_nr)) 183560a5a19eSTang Chen continue; 183660a5a19eSTang Chen 183760a5a19eSTang Chen if (pfn_to_nid(pfn) != nid) 183860a5a19eSTang Chen continue; 183960a5a19eSTang Chen 184060a5a19eSTang Chen /* 184160a5a19eSTang Chen * some memory sections of this node are not removed, and we 184260a5a19eSTang Chen * can't offline node now. 184360a5a19eSTang Chen */ 184460a5a19eSTang Chen return; 184560a5a19eSTang Chen } 184660a5a19eSTang Chen 18470f1cfe9dSToshi Kani if (check_and_unmap_cpu_on_node(pgdat)) 184860a5a19eSTang Chen return; 184960a5a19eSTang Chen 185060a5a19eSTang Chen /* 185160a5a19eSTang Chen * all memory/cpu of this node are removed, we can offline this 185260a5a19eSTang Chen * node now. 185360a5a19eSTang Chen */ 185460a5a19eSTang Chen node_set_offline(nid); 185560a5a19eSTang Chen unregister_one_node(nid); 185660a5a19eSTang Chen } 185790b30cdcSWen Congyang EXPORT_SYMBOL(try_offline_node); 185860a5a19eSTang Chen 18590f1cfe9dSToshi Kani /** 18600f1cfe9dSToshi Kani * remove_memory 18610f1cfe9dSToshi Kani * 18620f1cfe9dSToshi Kani * NOTE: The caller must call lock_device_hotplug() to serialize hotplug 18630f1cfe9dSToshi Kani * and online/offline operations before this call, as required by 18640f1cfe9dSToshi Kani * try_offline_node(). 18650f1cfe9dSToshi Kani */ 1866242831ebSRafael J. Wysocki void __ref remove_memory(int nid, u64 start, u64 size) 1867bbc76be6SWen Congyang { 1868242831ebSRafael J. Wysocki int ret; 1869993c1aadSWen Congyang 187027356f54SToshi Kani BUG_ON(check_hotplug_memory_range(start, size)); 187127356f54SToshi Kani 1872bfc8c901SVladimir Davydov mem_hotplug_begin(); 18736677e3eaSYasuaki Ishimatsu 18746677e3eaSYasuaki Ishimatsu /* 1875242831ebSRafael J. Wysocki * All memory blocks must be offlined before removing memory. Check 1876242831ebSRafael J. Wysocki * whether all memory blocks in question are offline and trigger a BUG() 1877242831ebSRafael J. Wysocki * if this is not the case. 18786677e3eaSYasuaki Ishimatsu */ 1879242831ebSRafael J. Wysocki ret = walk_memory_range(PFN_DOWN(start), PFN_UP(start + size - 1), NULL, 1880d6de9d53SXishi Qiu check_memblock_offlined_cb); 1881bfc8c901SVladimir Davydov if (ret) 1882242831ebSRafael J. Wysocki BUG(); 18836677e3eaSYasuaki Ishimatsu 188446c66c4bSYasuaki Ishimatsu /* remove memmap entry */ 188546c66c4bSYasuaki Ishimatsu firmware_map_remove(start, start + size, "System RAM"); 1886f9126ab9SXishi Qiu memblock_free(start, size); 1887f9126ab9SXishi Qiu memblock_remove(start, size); 188846c66c4bSYasuaki Ishimatsu 1889*da024512SChristoph Hellwig arch_remove_memory(start, size, NULL); 189024d335caSWen Congyang 189160a5a19eSTang Chen try_offline_node(nid); 189260a5a19eSTang Chen 1893bfc8c901SVladimir Davydov mem_hotplug_done(); 189471088785SBadari Pulavarty } 189571088785SBadari Pulavarty EXPORT_SYMBOL_GPL(remove_memory); 1896aba6efc4SRafael J. Wysocki #endif /* CONFIG_MEMORY_HOTREMOVE */ 1897