13947be19SDave Hansen /* 23947be19SDave Hansen * linux/mm/memory_hotplug.c 33947be19SDave Hansen * 43947be19SDave Hansen * Copyright (C) 53947be19SDave Hansen */ 63947be19SDave Hansen 73947be19SDave Hansen #include <linux/stddef.h> 83947be19SDave Hansen #include <linux/mm.h> 93947be19SDave Hansen #include <linux/swap.h> 103947be19SDave Hansen #include <linux/interrupt.h> 113947be19SDave Hansen #include <linux/pagemap.h> 123947be19SDave Hansen #include <linux/bootmem.h> 133947be19SDave Hansen #include <linux/compiler.h> 143947be19SDave Hansen #include <linux/module.h> 153947be19SDave Hansen #include <linux/pagevec.h> 162d1d43f6SChandra Seetharaman #include <linux/writeback.h> 173947be19SDave Hansen #include <linux/slab.h> 183947be19SDave Hansen #include <linux/sysctl.h> 193947be19SDave Hansen #include <linux/cpu.h> 203947be19SDave Hansen #include <linux/memory.h> 213947be19SDave Hansen #include <linux/memory_hotplug.h> 223947be19SDave Hansen #include <linux/highmem.h> 233947be19SDave Hansen #include <linux/vmalloc.h> 240a547039SKAMEZAWA Hiroyuki #include <linux/ioport.h> 2538837fc7SPaul Jackson #include <linux/cpuset.h> 26*0c0e6195SKAMEZAWA Hiroyuki #include <linux/delay.h> 27*0c0e6195SKAMEZAWA Hiroyuki #include <linux/migrate.h> 28*0c0e6195SKAMEZAWA Hiroyuki #include <linux/page-isolation.h> 293947be19SDave Hansen 303947be19SDave Hansen #include <asm/tlbflush.h> 313947be19SDave Hansen 3245e0b78bSKeith Mannthey /* add this memory to iomem resource */ 3345e0b78bSKeith Mannthey static struct resource *register_memory_resource(u64 start, u64 size) 3445e0b78bSKeith Mannthey { 3545e0b78bSKeith Mannthey struct resource *res; 3645e0b78bSKeith Mannthey res = kzalloc(sizeof(struct resource), GFP_KERNEL); 3745e0b78bSKeith Mannthey BUG_ON(!res); 3845e0b78bSKeith Mannthey 3945e0b78bSKeith Mannthey res->name = "System RAM"; 4045e0b78bSKeith Mannthey res->start = start; 4145e0b78bSKeith Mannthey res->end = start + size - 1; 4245e0b78bSKeith Mannthey res->flags = IORESOURCE_MEM; 4345e0b78bSKeith Mannthey if (request_resource(&iomem_resource, res) < 0) { 4445e0b78bSKeith Mannthey printk("System RAM resource %llx - %llx cannot be added\n", 4545e0b78bSKeith Mannthey (unsigned long long)res->start, (unsigned long long)res->end); 4645e0b78bSKeith Mannthey kfree(res); 4745e0b78bSKeith Mannthey res = NULL; 4845e0b78bSKeith Mannthey } 4945e0b78bSKeith Mannthey return res; 5045e0b78bSKeith Mannthey } 5145e0b78bSKeith Mannthey 5245e0b78bSKeith Mannthey static void release_memory_resource(struct resource *res) 5345e0b78bSKeith Mannthey { 5445e0b78bSKeith Mannthey if (!res) 5545e0b78bSKeith Mannthey return; 5645e0b78bSKeith Mannthey release_resource(res); 5745e0b78bSKeith Mannthey kfree(res); 5845e0b78bSKeith Mannthey return; 5945e0b78bSKeith Mannthey } 6045e0b78bSKeith Mannthey 6145e0b78bSKeith Mannthey 6253947027SKeith Mannthey #ifdef CONFIG_MEMORY_HOTPLUG_SPARSE 63718127ccSYasunori Goto static int __add_zone(struct zone *zone, unsigned long phys_start_pfn) 643947be19SDave Hansen { 653947be19SDave Hansen struct pglist_data *pgdat = zone->zone_pgdat; 663947be19SDave Hansen int nr_pages = PAGES_PER_SECTION; 673947be19SDave Hansen int nid = pgdat->node_id; 683947be19SDave Hansen int zone_type; 693947be19SDave Hansen 703947be19SDave Hansen zone_type = zone - pgdat->node_zones; 7113466c84SYasunori Goto if (!zone->wait_table) { 72718127ccSYasunori Goto int ret = 0; 73a2f3aa02SDave Hansen ret = init_currently_empty_zone(zone, phys_start_pfn, 74a2f3aa02SDave Hansen nr_pages, MEMMAP_HOTPLUG); 75718127ccSYasunori Goto if (ret < 0) 76718127ccSYasunori Goto return ret; 77718127ccSYasunori Goto } 78a2f3aa02SDave Hansen memmap_init_zone(nr_pages, nid, zone_type, 79a2f3aa02SDave Hansen phys_start_pfn, MEMMAP_HOTPLUG); 80718127ccSYasunori Goto return 0; 813947be19SDave Hansen } 823947be19SDave Hansen 833947be19SDave Hansen static int __add_section(struct zone *zone, unsigned long phys_start_pfn) 843947be19SDave Hansen { 853947be19SDave Hansen int nr_pages = PAGES_PER_SECTION; 863947be19SDave Hansen int ret; 873947be19SDave Hansen 88ebd15302SKAMEZAWA Hiroyuki if (pfn_valid(phys_start_pfn)) 89ebd15302SKAMEZAWA Hiroyuki return -EEXIST; 90ebd15302SKAMEZAWA Hiroyuki 910b0acbecSDave Hansen ret = sparse_add_one_section(zone, phys_start_pfn, nr_pages); 923947be19SDave Hansen 933947be19SDave Hansen if (ret < 0) 943947be19SDave Hansen return ret; 953947be19SDave Hansen 96718127ccSYasunori Goto ret = __add_zone(zone, phys_start_pfn); 97718127ccSYasunori Goto 98718127ccSYasunori Goto if (ret < 0) 99718127ccSYasunori Goto return ret; 100718127ccSYasunori Goto 1013947be19SDave Hansen return register_new_memory(__pfn_to_section(phys_start_pfn)); 1023947be19SDave Hansen } 1033947be19SDave Hansen 1043947be19SDave Hansen /* 1053947be19SDave Hansen * Reasonably generic function for adding memory. It is 1063947be19SDave Hansen * expected that archs that support memory hotplug will 1073947be19SDave Hansen * call this function after deciding the zone to which to 1083947be19SDave Hansen * add the new pages. 1093947be19SDave Hansen */ 1103947be19SDave Hansen int __add_pages(struct zone *zone, unsigned long phys_start_pfn, 1113947be19SDave Hansen unsigned long nr_pages) 1123947be19SDave Hansen { 1133947be19SDave Hansen unsigned long i; 1143947be19SDave Hansen int err = 0; 1156f712711SKAMEZAWA Hiroyuki int start_sec, end_sec; 1166f712711SKAMEZAWA Hiroyuki /* during initialize mem_map, align hot-added range to section */ 1176f712711SKAMEZAWA Hiroyuki start_sec = pfn_to_section_nr(phys_start_pfn); 1186f712711SKAMEZAWA Hiroyuki end_sec = pfn_to_section_nr(phys_start_pfn + nr_pages - 1); 1193947be19SDave Hansen 1206f712711SKAMEZAWA Hiroyuki for (i = start_sec; i <= end_sec; i++) { 1216f712711SKAMEZAWA Hiroyuki err = __add_section(zone, i << PFN_SECTION_SHIFT); 1223947be19SDave Hansen 1236f712711SKAMEZAWA Hiroyuki /* 1246f712711SKAMEZAWA Hiroyuki * EEXIST is finally dealed with by ioresource collision 1256f712711SKAMEZAWA Hiroyuki * check. see add_memory() => register_memory_resource() 1266f712711SKAMEZAWA Hiroyuki * Warning will be printed if there is collision. 127bed120c6SJoel H Schopp */ 128bed120c6SJoel H Schopp if (err && (err != -EEXIST)) 1293947be19SDave Hansen break; 1306f712711SKAMEZAWA Hiroyuki err = 0; 1313947be19SDave Hansen } 1323947be19SDave Hansen 1333947be19SDave Hansen return err; 1343947be19SDave Hansen } 135bed120c6SJoel H Schopp EXPORT_SYMBOL_GPL(__add_pages); 1363947be19SDave Hansen 1373947be19SDave Hansen static void grow_zone_span(struct zone *zone, 1383947be19SDave Hansen unsigned long start_pfn, unsigned long end_pfn) 1393947be19SDave Hansen { 1403947be19SDave Hansen unsigned long old_zone_end_pfn; 1413947be19SDave Hansen 1423947be19SDave Hansen zone_span_writelock(zone); 1433947be19SDave Hansen 1443947be19SDave Hansen old_zone_end_pfn = zone->zone_start_pfn + zone->spanned_pages; 1453947be19SDave Hansen if (start_pfn < zone->zone_start_pfn) 1463947be19SDave Hansen zone->zone_start_pfn = start_pfn; 1473947be19SDave Hansen 14825a6df95SYasunori Goto zone->spanned_pages = max(old_zone_end_pfn, end_pfn) - 14925a6df95SYasunori Goto zone->zone_start_pfn; 1503947be19SDave Hansen 1513947be19SDave Hansen zone_span_writeunlock(zone); 1523947be19SDave Hansen } 1533947be19SDave Hansen 1543947be19SDave Hansen static void grow_pgdat_span(struct pglist_data *pgdat, 1553947be19SDave Hansen unsigned long start_pfn, unsigned long end_pfn) 1563947be19SDave Hansen { 1573947be19SDave Hansen unsigned long old_pgdat_end_pfn = 1583947be19SDave Hansen pgdat->node_start_pfn + pgdat->node_spanned_pages; 1593947be19SDave Hansen 1603947be19SDave Hansen if (start_pfn < pgdat->node_start_pfn) 1613947be19SDave Hansen pgdat->node_start_pfn = start_pfn; 1623947be19SDave Hansen 16325a6df95SYasunori Goto pgdat->node_spanned_pages = max(old_pgdat_end_pfn, end_pfn) - 16425a6df95SYasunori Goto pgdat->node_start_pfn; 1653947be19SDave Hansen } 1663947be19SDave Hansen 16775884fb1SKAMEZAWA Hiroyuki static int online_pages_range(unsigned long start_pfn, unsigned long nr_pages, 16875884fb1SKAMEZAWA Hiroyuki void *arg) 1693947be19SDave Hansen { 1703947be19SDave Hansen unsigned long i; 17175884fb1SKAMEZAWA Hiroyuki unsigned long onlined_pages = *(unsigned long *)arg; 17275884fb1SKAMEZAWA Hiroyuki struct page *page; 17375884fb1SKAMEZAWA Hiroyuki if (PageReserved(pfn_to_page(start_pfn))) 17475884fb1SKAMEZAWA Hiroyuki for (i = 0; i < nr_pages; i++) { 17575884fb1SKAMEZAWA Hiroyuki page = pfn_to_page(start_pfn + i); 17675884fb1SKAMEZAWA Hiroyuki online_page(page); 17775884fb1SKAMEZAWA Hiroyuki onlined_pages++; 17875884fb1SKAMEZAWA Hiroyuki } 17975884fb1SKAMEZAWA Hiroyuki *(unsigned long *)arg = onlined_pages; 18075884fb1SKAMEZAWA Hiroyuki return 0; 18175884fb1SKAMEZAWA Hiroyuki } 18275884fb1SKAMEZAWA Hiroyuki 18375884fb1SKAMEZAWA Hiroyuki 18475884fb1SKAMEZAWA Hiroyuki int online_pages(unsigned long pfn, unsigned long nr_pages) 18575884fb1SKAMEZAWA Hiroyuki { 1863947be19SDave Hansen unsigned long flags; 1873947be19SDave Hansen unsigned long onlined_pages = 0; 1883947be19SDave Hansen struct zone *zone; 1896811378eSYasunori Goto int need_zonelists_rebuild = 0; 1903947be19SDave Hansen 1913947be19SDave Hansen /* 1923947be19SDave Hansen * This doesn't need a lock to do pfn_to_page(). 1933947be19SDave Hansen * The section can't be removed here because of the 1943947be19SDave Hansen * memory_block->state_sem. 1953947be19SDave Hansen */ 1963947be19SDave Hansen zone = page_zone(pfn_to_page(pfn)); 1973947be19SDave Hansen pgdat_resize_lock(zone->zone_pgdat, &flags); 1983947be19SDave Hansen grow_zone_span(zone, pfn, pfn + nr_pages); 1993947be19SDave Hansen grow_pgdat_span(zone->zone_pgdat, pfn, pfn + nr_pages); 2003947be19SDave Hansen pgdat_resize_unlock(zone->zone_pgdat, &flags); 2013947be19SDave Hansen 2026811378eSYasunori Goto /* 2036811378eSYasunori Goto * If this zone is not populated, then it is not in zonelist. 2046811378eSYasunori Goto * This means the page allocator ignores this zone. 2056811378eSYasunori Goto * So, zonelist must be updated after online. 2066811378eSYasunori Goto */ 2076811378eSYasunori Goto if (!populated_zone(zone)) 2086811378eSYasunori Goto need_zonelists_rebuild = 1; 2096811378eSYasunori Goto 21075884fb1SKAMEZAWA Hiroyuki walk_memory_resource(pfn, nr_pages, &onlined_pages, 21175884fb1SKAMEZAWA Hiroyuki online_pages_range); 2123947be19SDave Hansen zone->present_pages += onlined_pages; 213f2937be5SYasunori Goto zone->zone_pgdat->node_present_pages += onlined_pages; 2143947be19SDave Hansen 21561b13993SDave Hansen setup_per_zone_pages_min(); 2167ea1530aSChristoph Lameter if (onlined_pages) { 2177ea1530aSChristoph Lameter kswapd_run(zone_to_nid(zone)); 2187ea1530aSChristoph Lameter node_set_state(zone_to_nid(zone), N_HIGH_MEMORY); 2197ea1530aSChristoph Lameter } 22061b13993SDave Hansen 2216811378eSYasunori Goto if (need_zonelists_rebuild) 2226811378eSYasunori Goto build_all_zonelists(); 2235a4d4361SKAMEZAWA Hiroyuki vm_total_pages = nr_free_pagecache_pages(); 2242d1d43f6SChandra Seetharaman writeback_set_ratelimit(); 2253947be19SDave Hansen return 0; 2263947be19SDave Hansen } 22753947027SKeith Mannthey #endif /* CONFIG_MEMORY_HOTPLUG_SPARSE */ 228bc02af93SYasunori Goto 2299af3c2deSYasunori Goto static pg_data_t *hotadd_new_pgdat(int nid, u64 start) 2309af3c2deSYasunori Goto { 2319af3c2deSYasunori Goto struct pglist_data *pgdat; 2329af3c2deSYasunori Goto unsigned long zones_size[MAX_NR_ZONES] = {0}; 2339af3c2deSYasunori Goto unsigned long zholes_size[MAX_NR_ZONES] = {0}; 2349af3c2deSYasunori Goto unsigned long start_pfn = start >> PAGE_SHIFT; 2359af3c2deSYasunori Goto 2369af3c2deSYasunori Goto pgdat = arch_alloc_nodedata(nid); 2379af3c2deSYasunori Goto if (!pgdat) 2389af3c2deSYasunori Goto return NULL; 2399af3c2deSYasunori Goto 2409af3c2deSYasunori Goto arch_refresh_nodedata(nid, pgdat); 2419af3c2deSYasunori Goto 2429af3c2deSYasunori Goto /* we can use NODE_DATA(nid) from here */ 2439af3c2deSYasunori Goto 2449af3c2deSYasunori Goto /* init node's zones as empty zones, we don't have any present pages.*/ 2459af3c2deSYasunori Goto free_area_init_node(nid, pgdat, zones_size, start_pfn, zholes_size); 2469af3c2deSYasunori Goto 2479af3c2deSYasunori Goto return pgdat; 2489af3c2deSYasunori Goto } 2499af3c2deSYasunori Goto 2509af3c2deSYasunori Goto static void rollback_node_hotadd(int nid, pg_data_t *pgdat) 2519af3c2deSYasunori Goto { 2529af3c2deSYasunori Goto arch_refresh_nodedata(nid, NULL); 2539af3c2deSYasunori Goto arch_free_nodedata(pgdat); 2549af3c2deSYasunori Goto return; 2559af3c2deSYasunori Goto } 2569af3c2deSYasunori Goto 2570a547039SKAMEZAWA Hiroyuki 258bc02af93SYasunori Goto int add_memory(int nid, u64 start, u64 size) 259bc02af93SYasunori Goto { 2609af3c2deSYasunori Goto pg_data_t *pgdat = NULL; 2619af3c2deSYasunori Goto int new_pgdat = 0; 262ebd15302SKAMEZAWA Hiroyuki struct resource *res; 263bc02af93SYasunori Goto int ret; 264bc02af93SYasunori Goto 265ebd15302SKAMEZAWA Hiroyuki res = register_memory_resource(start, size); 266ebd15302SKAMEZAWA Hiroyuki if (!res) 267ebd15302SKAMEZAWA Hiroyuki return -EEXIST; 268ebd15302SKAMEZAWA Hiroyuki 2699af3c2deSYasunori Goto if (!node_online(nid)) { 2709af3c2deSYasunori Goto pgdat = hotadd_new_pgdat(nid, start); 2719af3c2deSYasunori Goto if (!pgdat) 2729af3c2deSYasunori Goto return -ENOMEM; 2739af3c2deSYasunori Goto new_pgdat = 1; 2749af3c2deSYasunori Goto } 2759af3c2deSYasunori Goto 276bc02af93SYasunori Goto /* call arch's memory hotadd */ 277bc02af93SYasunori Goto ret = arch_add_memory(nid, start, size); 278bc02af93SYasunori Goto 2799af3c2deSYasunori Goto if (ret < 0) 2809af3c2deSYasunori Goto goto error; 2819af3c2deSYasunori Goto 2820fc44159SYasunori Goto /* we online node here. we can't roll back from here. */ 2839af3c2deSYasunori Goto node_set_online(nid); 2849af3c2deSYasunori Goto 28538837fc7SPaul Jackson cpuset_track_online_nodes(); 28638837fc7SPaul Jackson 2870fc44159SYasunori Goto if (new_pgdat) { 2880fc44159SYasunori Goto ret = register_one_node(nid); 2890fc44159SYasunori Goto /* 2900fc44159SYasunori Goto * If sysfs file of new node can't create, cpu on the node 2910fc44159SYasunori Goto * can't be hot-added. There is no rollback way now. 2920fc44159SYasunori Goto * So, check by BUG_ON() to catch it reluctantly.. 2930fc44159SYasunori Goto */ 2940fc44159SYasunori Goto BUG_ON(ret); 2950fc44159SYasunori Goto } 2960fc44159SYasunori Goto 2979af3c2deSYasunori Goto return ret; 2989af3c2deSYasunori Goto error: 2999af3c2deSYasunori Goto /* rollback pgdat allocation and others */ 3009af3c2deSYasunori Goto if (new_pgdat) 3019af3c2deSYasunori Goto rollback_node_hotadd(nid, pgdat); 302ebd15302SKAMEZAWA Hiroyuki if (res) 303ebd15302SKAMEZAWA Hiroyuki release_memory_resource(res); 3049af3c2deSYasunori Goto 305bc02af93SYasunori Goto return ret; 306bc02af93SYasunori Goto } 307bc02af93SYasunori Goto EXPORT_SYMBOL_GPL(add_memory); 308*0c0e6195SKAMEZAWA Hiroyuki 309*0c0e6195SKAMEZAWA Hiroyuki #ifdef CONFIG_MEMORY_HOTREMOVE 310*0c0e6195SKAMEZAWA Hiroyuki /* 311*0c0e6195SKAMEZAWA Hiroyuki * Confirm all pages in a range [start, end) is belongs to the same zone. 312*0c0e6195SKAMEZAWA Hiroyuki */ 313*0c0e6195SKAMEZAWA Hiroyuki static int test_pages_in_a_zone(unsigned long start_pfn, unsigned long end_pfn) 314*0c0e6195SKAMEZAWA Hiroyuki { 315*0c0e6195SKAMEZAWA Hiroyuki unsigned long pfn; 316*0c0e6195SKAMEZAWA Hiroyuki struct zone *zone = NULL; 317*0c0e6195SKAMEZAWA Hiroyuki struct page *page; 318*0c0e6195SKAMEZAWA Hiroyuki int i; 319*0c0e6195SKAMEZAWA Hiroyuki for (pfn = start_pfn; 320*0c0e6195SKAMEZAWA Hiroyuki pfn < end_pfn; 321*0c0e6195SKAMEZAWA Hiroyuki pfn += MAX_ORDER_NR_PAGES) { 322*0c0e6195SKAMEZAWA Hiroyuki i = 0; 323*0c0e6195SKAMEZAWA Hiroyuki /* This is just a CONFIG_HOLES_IN_ZONE check.*/ 324*0c0e6195SKAMEZAWA Hiroyuki while ((i < MAX_ORDER_NR_PAGES) && !pfn_valid_within(pfn + i)) 325*0c0e6195SKAMEZAWA Hiroyuki i++; 326*0c0e6195SKAMEZAWA Hiroyuki if (i == MAX_ORDER_NR_PAGES) 327*0c0e6195SKAMEZAWA Hiroyuki continue; 328*0c0e6195SKAMEZAWA Hiroyuki page = pfn_to_page(pfn + i); 329*0c0e6195SKAMEZAWA Hiroyuki if (zone && page_zone(page) != zone) 330*0c0e6195SKAMEZAWA Hiroyuki return 0; 331*0c0e6195SKAMEZAWA Hiroyuki zone = page_zone(page); 332*0c0e6195SKAMEZAWA Hiroyuki } 333*0c0e6195SKAMEZAWA Hiroyuki return 1; 334*0c0e6195SKAMEZAWA Hiroyuki } 335*0c0e6195SKAMEZAWA Hiroyuki 336*0c0e6195SKAMEZAWA Hiroyuki /* 337*0c0e6195SKAMEZAWA Hiroyuki * Scanning pfn is much easier than scanning lru list. 338*0c0e6195SKAMEZAWA Hiroyuki * Scan pfn from start to end and Find LRU page. 339*0c0e6195SKAMEZAWA Hiroyuki */ 340*0c0e6195SKAMEZAWA Hiroyuki int scan_lru_pages(unsigned long start, unsigned long end) 341*0c0e6195SKAMEZAWA Hiroyuki { 342*0c0e6195SKAMEZAWA Hiroyuki unsigned long pfn; 343*0c0e6195SKAMEZAWA Hiroyuki struct page *page; 344*0c0e6195SKAMEZAWA Hiroyuki for (pfn = start; pfn < end; pfn++) { 345*0c0e6195SKAMEZAWA Hiroyuki if (pfn_valid(pfn)) { 346*0c0e6195SKAMEZAWA Hiroyuki page = pfn_to_page(pfn); 347*0c0e6195SKAMEZAWA Hiroyuki if (PageLRU(page)) 348*0c0e6195SKAMEZAWA Hiroyuki return pfn; 349*0c0e6195SKAMEZAWA Hiroyuki } 350*0c0e6195SKAMEZAWA Hiroyuki } 351*0c0e6195SKAMEZAWA Hiroyuki return 0; 352*0c0e6195SKAMEZAWA Hiroyuki } 353*0c0e6195SKAMEZAWA Hiroyuki 354*0c0e6195SKAMEZAWA Hiroyuki static struct page * 355*0c0e6195SKAMEZAWA Hiroyuki hotremove_migrate_alloc(struct page *page, 356*0c0e6195SKAMEZAWA Hiroyuki unsigned long private, 357*0c0e6195SKAMEZAWA Hiroyuki int **x) 358*0c0e6195SKAMEZAWA Hiroyuki { 359*0c0e6195SKAMEZAWA Hiroyuki /* This should be improoooooved!! */ 360*0c0e6195SKAMEZAWA Hiroyuki return alloc_page(GFP_HIGHUSER_PAGECACHE); 361*0c0e6195SKAMEZAWA Hiroyuki } 362*0c0e6195SKAMEZAWA Hiroyuki 363*0c0e6195SKAMEZAWA Hiroyuki 364*0c0e6195SKAMEZAWA Hiroyuki #define NR_OFFLINE_AT_ONCE_PAGES (256) 365*0c0e6195SKAMEZAWA Hiroyuki static int 366*0c0e6195SKAMEZAWA Hiroyuki do_migrate_range(unsigned long start_pfn, unsigned long end_pfn) 367*0c0e6195SKAMEZAWA Hiroyuki { 368*0c0e6195SKAMEZAWA Hiroyuki unsigned long pfn; 369*0c0e6195SKAMEZAWA Hiroyuki struct page *page; 370*0c0e6195SKAMEZAWA Hiroyuki int move_pages = NR_OFFLINE_AT_ONCE_PAGES; 371*0c0e6195SKAMEZAWA Hiroyuki int not_managed = 0; 372*0c0e6195SKAMEZAWA Hiroyuki int ret = 0; 373*0c0e6195SKAMEZAWA Hiroyuki LIST_HEAD(source); 374*0c0e6195SKAMEZAWA Hiroyuki 375*0c0e6195SKAMEZAWA Hiroyuki for (pfn = start_pfn; pfn < end_pfn && move_pages > 0; pfn++) { 376*0c0e6195SKAMEZAWA Hiroyuki if (!pfn_valid(pfn)) 377*0c0e6195SKAMEZAWA Hiroyuki continue; 378*0c0e6195SKAMEZAWA Hiroyuki page = pfn_to_page(pfn); 379*0c0e6195SKAMEZAWA Hiroyuki if (!page_count(page)) 380*0c0e6195SKAMEZAWA Hiroyuki continue; 381*0c0e6195SKAMEZAWA Hiroyuki /* 382*0c0e6195SKAMEZAWA Hiroyuki * We can skip free pages. And we can only deal with pages on 383*0c0e6195SKAMEZAWA Hiroyuki * LRU. 384*0c0e6195SKAMEZAWA Hiroyuki */ 385*0c0e6195SKAMEZAWA Hiroyuki ret = isolate_lru_page(page, &source); 386*0c0e6195SKAMEZAWA Hiroyuki if (!ret) { /* Success */ 387*0c0e6195SKAMEZAWA Hiroyuki move_pages--; 388*0c0e6195SKAMEZAWA Hiroyuki } else { 389*0c0e6195SKAMEZAWA Hiroyuki /* Becasue we don't have big zone->lock. we should 390*0c0e6195SKAMEZAWA Hiroyuki check this again here. */ 391*0c0e6195SKAMEZAWA Hiroyuki if (page_count(page)) 392*0c0e6195SKAMEZAWA Hiroyuki not_managed++; 393*0c0e6195SKAMEZAWA Hiroyuki #ifdef CONFIG_DEBUG_VM 394*0c0e6195SKAMEZAWA Hiroyuki printk(KERN_INFO "removing from LRU failed" 395*0c0e6195SKAMEZAWA Hiroyuki " %lx/%d/%lx\n", 396*0c0e6195SKAMEZAWA Hiroyuki pfn, page_count(page), page->flags); 397*0c0e6195SKAMEZAWA Hiroyuki #endif 398*0c0e6195SKAMEZAWA Hiroyuki } 399*0c0e6195SKAMEZAWA Hiroyuki } 400*0c0e6195SKAMEZAWA Hiroyuki ret = -EBUSY; 401*0c0e6195SKAMEZAWA Hiroyuki if (not_managed) { 402*0c0e6195SKAMEZAWA Hiroyuki if (!list_empty(&source)) 403*0c0e6195SKAMEZAWA Hiroyuki putback_lru_pages(&source); 404*0c0e6195SKAMEZAWA Hiroyuki goto out; 405*0c0e6195SKAMEZAWA Hiroyuki } 406*0c0e6195SKAMEZAWA Hiroyuki ret = 0; 407*0c0e6195SKAMEZAWA Hiroyuki if (list_empty(&source)) 408*0c0e6195SKAMEZAWA Hiroyuki goto out; 409*0c0e6195SKAMEZAWA Hiroyuki /* this function returns # of failed pages */ 410*0c0e6195SKAMEZAWA Hiroyuki ret = migrate_pages(&source, hotremove_migrate_alloc, 0); 411*0c0e6195SKAMEZAWA Hiroyuki 412*0c0e6195SKAMEZAWA Hiroyuki out: 413*0c0e6195SKAMEZAWA Hiroyuki return ret; 414*0c0e6195SKAMEZAWA Hiroyuki } 415*0c0e6195SKAMEZAWA Hiroyuki 416*0c0e6195SKAMEZAWA Hiroyuki /* 417*0c0e6195SKAMEZAWA Hiroyuki * remove from free_area[] and mark all as Reserved. 418*0c0e6195SKAMEZAWA Hiroyuki */ 419*0c0e6195SKAMEZAWA Hiroyuki static int 420*0c0e6195SKAMEZAWA Hiroyuki offline_isolated_pages_cb(unsigned long start, unsigned long nr_pages, 421*0c0e6195SKAMEZAWA Hiroyuki void *data) 422*0c0e6195SKAMEZAWA Hiroyuki { 423*0c0e6195SKAMEZAWA Hiroyuki __offline_isolated_pages(start, start + nr_pages); 424*0c0e6195SKAMEZAWA Hiroyuki return 0; 425*0c0e6195SKAMEZAWA Hiroyuki } 426*0c0e6195SKAMEZAWA Hiroyuki 427*0c0e6195SKAMEZAWA Hiroyuki static void 428*0c0e6195SKAMEZAWA Hiroyuki offline_isolated_pages(unsigned long start_pfn, unsigned long end_pfn) 429*0c0e6195SKAMEZAWA Hiroyuki { 430*0c0e6195SKAMEZAWA Hiroyuki walk_memory_resource(start_pfn, end_pfn - start_pfn, NULL, 431*0c0e6195SKAMEZAWA Hiroyuki offline_isolated_pages_cb); 432*0c0e6195SKAMEZAWA Hiroyuki } 433*0c0e6195SKAMEZAWA Hiroyuki 434*0c0e6195SKAMEZAWA Hiroyuki /* 435*0c0e6195SKAMEZAWA Hiroyuki * Check all pages in range, recoreded as memory resource, are isolated. 436*0c0e6195SKAMEZAWA Hiroyuki */ 437*0c0e6195SKAMEZAWA Hiroyuki static int 438*0c0e6195SKAMEZAWA Hiroyuki check_pages_isolated_cb(unsigned long start_pfn, unsigned long nr_pages, 439*0c0e6195SKAMEZAWA Hiroyuki void *data) 440*0c0e6195SKAMEZAWA Hiroyuki { 441*0c0e6195SKAMEZAWA Hiroyuki int ret; 442*0c0e6195SKAMEZAWA Hiroyuki long offlined = *(long *)data; 443*0c0e6195SKAMEZAWA Hiroyuki ret = test_pages_isolated(start_pfn, start_pfn + nr_pages); 444*0c0e6195SKAMEZAWA Hiroyuki offlined = nr_pages; 445*0c0e6195SKAMEZAWA Hiroyuki if (!ret) 446*0c0e6195SKAMEZAWA Hiroyuki *(long *)data += offlined; 447*0c0e6195SKAMEZAWA Hiroyuki return ret; 448*0c0e6195SKAMEZAWA Hiroyuki } 449*0c0e6195SKAMEZAWA Hiroyuki 450*0c0e6195SKAMEZAWA Hiroyuki static long 451*0c0e6195SKAMEZAWA Hiroyuki check_pages_isolated(unsigned long start_pfn, unsigned long end_pfn) 452*0c0e6195SKAMEZAWA Hiroyuki { 453*0c0e6195SKAMEZAWA Hiroyuki long offlined = 0; 454*0c0e6195SKAMEZAWA Hiroyuki int ret; 455*0c0e6195SKAMEZAWA Hiroyuki 456*0c0e6195SKAMEZAWA Hiroyuki ret = walk_memory_resource(start_pfn, end_pfn - start_pfn, &offlined, 457*0c0e6195SKAMEZAWA Hiroyuki check_pages_isolated_cb); 458*0c0e6195SKAMEZAWA Hiroyuki if (ret < 0) 459*0c0e6195SKAMEZAWA Hiroyuki offlined = (long)ret; 460*0c0e6195SKAMEZAWA Hiroyuki return offlined; 461*0c0e6195SKAMEZAWA Hiroyuki } 462*0c0e6195SKAMEZAWA Hiroyuki 463*0c0e6195SKAMEZAWA Hiroyuki extern void drain_all_local_pages(void); 464*0c0e6195SKAMEZAWA Hiroyuki 465*0c0e6195SKAMEZAWA Hiroyuki int offline_pages(unsigned long start_pfn, 466*0c0e6195SKAMEZAWA Hiroyuki unsigned long end_pfn, unsigned long timeout) 467*0c0e6195SKAMEZAWA Hiroyuki { 468*0c0e6195SKAMEZAWA Hiroyuki unsigned long pfn, nr_pages, expire; 469*0c0e6195SKAMEZAWA Hiroyuki long offlined_pages; 470*0c0e6195SKAMEZAWA Hiroyuki int ret, drain, retry_max; 471*0c0e6195SKAMEZAWA Hiroyuki struct zone *zone; 472*0c0e6195SKAMEZAWA Hiroyuki 473*0c0e6195SKAMEZAWA Hiroyuki BUG_ON(start_pfn >= end_pfn); 474*0c0e6195SKAMEZAWA Hiroyuki /* at least, alignment against pageblock is necessary */ 475*0c0e6195SKAMEZAWA Hiroyuki if (!IS_ALIGNED(start_pfn, pageblock_nr_pages)) 476*0c0e6195SKAMEZAWA Hiroyuki return -EINVAL; 477*0c0e6195SKAMEZAWA Hiroyuki if (!IS_ALIGNED(end_pfn, pageblock_nr_pages)) 478*0c0e6195SKAMEZAWA Hiroyuki return -EINVAL; 479*0c0e6195SKAMEZAWA Hiroyuki /* This makes hotplug much easier...and readable. 480*0c0e6195SKAMEZAWA Hiroyuki we assume this for now. .*/ 481*0c0e6195SKAMEZAWA Hiroyuki if (!test_pages_in_a_zone(start_pfn, end_pfn)) 482*0c0e6195SKAMEZAWA Hiroyuki return -EINVAL; 483*0c0e6195SKAMEZAWA Hiroyuki /* set above range as isolated */ 484*0c0e6195SKAMEZAWA Hiroyuki ret = start_isolate_page_range(start_pfn, end_pfn); 485*0c0e6195SKAMEZAWA Hiroyuki if (ret) 486*0c0e6195SKAMEZAWA Hiroyuki return ret; 487*0c0e6195SKAMEZAWA Hiroyuki nr_pages = end_pfn - start_pfn; 488*0c0e6195SKAMEZAWA Hiroyuki pfn = start_pfn; 489*0c0e6195SKAMEZAWA Hiroyuki expire = jiffies + timeout; 490*0c0e6195SKAMEZAWA Hiroyuki drain = 0; 491*0c0e6195SKAMEZAWA Hiroyuki retry_max = 5; 492*0c0e6195SKAMEZAWA Hiroyuki repeat: 493*0c0e6195SKAMEZAWA Hiroyuki /* start memory hot removal */ 494*0c0e6195SKAMEZAWA Hiroyuki ret = -EAGAIN; 495*0c0e6195SKAMEZAWA Hiroyuki if (time_after(jiffies, expire)) 496*0c0e6195SKAMEZAWA Hiroyuki goto failed_removal; 497*0c0e6195SKAMEZAWA Hiroyuki ret = -EINTR; 498*0c0e6195SKAMEZAWA Hiroyuki if (signal_pending(current)) 499*0c0e6195SKAMEZAWA Hiroyuki goto failed_removal; 500*0c0e6195SKAMEZAWA Hiroyuki ret = 0; 501*0c0e6195SKAMEZAWA Hiroyuki if (drain) { 502*0c0e6195SKAMEZAWA Hiroyuki lru_add_drain_all(); 503*0c0e6195SKAMEZAWA Hiroyuki flush_scheduled_work(); 504*0c0e6195SKAMEZAWA Hiroyuki cond_resched(); 505*0c0e6195SKAMEZAWA Hiroyuki drain_all_local_pages(); 506*0c0e6195SKAMEZAWA Hiroyuki } 507*0c0e6195SKAMEZAWA Hiroyuki 508*0c0e6195SKAMEZAWA Hiroyuki pfn = scan_lru_pages(start_pfn, end_pfn); 509*0c0e6195SKAMEZAWA Hiroyuki if (pfn) { /* We have page on LRU */ 510*0c0e6195SKAMEZAWA Hiroyuki ret = do_migrate_range(pfn, end_pfn); 511*0c0e6195SKAMEZAWA Hiroyuki if (!ret) { 512*0c0e6195SKAMEZAWA Hiroyuki drain = 1; 513*0c0e6195SKAMEZAWA Hiroyuki goto repeat; 514*0c0e6195SKAMEZAWA Hiroyuki } else { 515*0c0e6195SKAMEZAWA Hiroyuki if (ret < 0) 516*0c0e6195SKAMEZAWA Hiroyuki if (--retry_max == 0) 517*0c0e6195SKAMEZAWA Hiroyuki goto failed_removal; 518*0c0e6195SKAMEZAWA Hiroyuki yield(); 519*0c0e6195SKAMEZAWA Hiroyuki drain = 1; 520*0c0e6195SKAMEZAWA Hiroyuki goto repeat; 521*0c0e6195SKAMEZAWA Hiroyuki } 522*0c0e6195SKAMEZAWA Hiroyuki } 523*0c0e6195SKAMEZAWA Hiroyuki /* drain all zone's lru pagevec, this is asyncronous... */ 524*0c0e6195SKAMEZAWA Hiroyuki lru_add_drain_all(); 525*0c0e6195SKAMEZAWA Hiroyuki flush_scheduled_work(); 526*0c0e6195SKAMEZAWA Hiroyuki yield(); 527*0c0e6195SKAMEZAWA Hiroyuki /* drain pcp pages , this is synchrouns. */ 528*0c0e6195SKAMEZAWA Hiroyuki drain_all_local_pages(); 529*0c0e6195SKAMEZAWA Hiroyuki /* check again */ 530*0c0e6195SKAMEZAWA Hiroyuki offlined_pages = check_pages_isolated(start_pfn, end_pfn); 531*0c0e6195SKAMEZAWA Hiroyuki if (offlined_pages < 0) { 532*0c0e6195SKAMEZAWA Hiroyuki ret = -EBUSY; 533*0c0e6195SKAMEZAWA Hiroyuki goto failed_removal; 534*0c0e6195SKAMEZAWA Hiroyuki } 535*0c0e6195SKAMEZAWA Hiroyuki printk(KERN_INFO "Offlined Pages %ld\n", offlined_pages); 536*0c0e6195SKAMEZAWA Hiroyuki /* Ok, all of our target is islaoted. 537*0c0e6195SKAMEZAWA Hiroyuki We cannot do rollback at this point. */ 538*0c0e6195SKAMEZAWA Hiroyuki offline_isolated_pages(start_pfn, end_pfn); 539*0c0e6195SKAMEZAWA Hiroyuki /* reset pagetype flags */ 540*0c0e6195SKAMEZAWA Hiroyuki start_isolate_page_range(start_pfn, end_pfn); 541*0c0e6195SKAMEZAWA Hiroyuki /* removal success */ 542*0c0e6195SKAMEZAWA Hiroyuki zone = page_zone(pfn_to_page(start_pfn)); 543*0c0e6195SKAMEZAWA Hiroyuki zone->present_pages -= offlined_pages; 544*0c0e6195SKAMEZAWA Hiroyuki zone->zone_pgdat->node_present_pages -= offlined_pages; 545*0c0e6195SKAMEZAWA Hiroyuki totalram_pages -= offlined_pages; 546*0c0e6195SKAMEZAWA Hiroyuki num_physpages -= offlined_pages; 547*0c0e6195SKAMEZAWA Hiroyuki vm_total_pages = nr_free_pagecache_pages(); 548*0c0e6195SKAMEZAWA Hiroyuki writeback_set_ratelimit(); 549*0c0e6195SKAMEZAWA Hiroyuki return 0; 550*0c0e6195SKAMEZAWA Hiroyuki 551*0c0e6195SKAMEZAWA Hiroyuki failed_removal: 552*0c0e6195SKAMEZAWA Hiroyuki printk(KERN_INFO "memory offlining %lx to %lx failed\n", 553*0c0e6195SKAMEZAWA Hiroyuki start_pfn, end_pfn); 554*0c0e6195SKAMEZAWA Hiroyuki /* pushback to free area */ 555*0c0e6195SKAMEZAWA Hiroyuki undo_isolate_page_range(start_pfn, end_pfn); 556*0c0e6195SKAMEZAWA Hiroyuki return ret; 557*0c0e6195SKAMEZAWA Hiroyuki } 558*0c0e6195SKAMEZAWA Hiroyuki #endif /* CONFIG_MEMORY_HOTREMOVE */ 559