1 /* 2 * linux/mm/page_isolation.c 3 */ 4 5 #include <linux/mm.h> 6 #include <linux/page-isolation.h> 7 #include <linux/pageblock-flags.h> 8 #include <linux/memory.h> 9 #include <linux/hugetlb.h> 10 #include "internal.h" 11 12 #define CREATE_TRACE_POINTS 13 #include <trace/events/page_isolation.h> 14 15 static int set_migratetype_isolate(struct page *page, 16 bool skip_hwpoisoned_pages) 17 { 18 struct zone *zone; 19 unsigned long flags, pfn; 20 struct memory_isolate_notify arg; 21 int notifier_ret; 22 int ret = -EBUSY; 23 24 zone = page_zone(page); 25 26 spin_lock_irqsave(&zone->lock, flags); 27 28 pfn = page_to_pfn(page); 29 arg.start_pfn = pfn; 30 arg.nr_pages = pageblock_nr_pages; 31 arg.pages_found = 0; 32 33 /* 34 * It may be possible to isolate a pageblock even if the 35 * migratetype is not MIGRATE_MOVABLE. The memory isolation 36 * notifier chain is used by balloon drivers to return the 37 * number of pages in a range that are held by the balloon 38 * driver to shrink memory. If all the pages are accounted for 39 * by balloons, are free, or on the LRU, isolation can continue. 40 * Later, for example, when memory hotplug notifier runs, these 41 * pages reported as "can be isolated" should be isolated(freed) 42 * by the balloon driver through the memory notifier chain. 43 */ 44 notifier_ret = memory_isolate_notify(MEM_ISOLATE_COUNT, &arg); 45 notifier_ret = notifier_to_errno(notifier_ret); 46 if (notifier_ret) 47 goto out; 48 /* 49 * FIXME: Now, memory hotplug doesn't call shrink_slab() by itself. 50 * We just check MOVABLE pages. 51 */ 52 if (!has_unmovable_pages(zone, page, arg.pages_found, 53 skip_hwpoisoned_pages)) 54 ret = 0; 55 56 /* 57 * immobile means "not-on-lru" paes. If immobile is larger than 58 * removable-by-driver pages reported by notifier, we'll fail. 59 */ 60 61 out: 62 if (!ret) { 63 unsigned long nr_pages; 64 int migratetype = get_pageblock_migratetype(page); 65 66 set_pageblock_migratetype(page, MIGRATE_ISOLATE); 67 zone->nr_isolate_pageblock++; 68 nr_pages = move_freepages_block(zone, page, MIGRATE_ISOLATE); 69 70 __mod_zone_freepage_state(zone, -nr_pages, migratetype); 71 } 72 73 spin_unlock_irqrestore(&zone->lock, flags); 74 if (!ret) 75 drain_all_pages(zone); 76 return ret; 77 } 78 79 static void unset_migratetype_isolate(struct page *page, unsigned migratetype) 80 { 81 struct zone *zone; 82 unsigned long flags, nr_pages; 83 struct page *isolated_page = NULL; 84 unsigned int order; 85 unsigned long page_idx, buddy_idx; 86 struct page *buddy; 87 88 zone = page_zone(page); 89 spin_lock_irqsave(&zone->lock, flags); 90 if (get_pageblock_migratetype(page) != MIGRATE_ISOLATE) 91 goto out; 92 93 /* 94 * Because freepage with more than pageblock_order on isolated 95 * pageblock is restricted to merge due to freepage counting problem, 96 * it is possible that there is free buddy page. 97 * move_freepages_block() doesn't care of merge so we need other 98 * approach in order to merge them. Isolation and free will make 99 * these pages to be merged. 100 */ 101 if (PageBuddy(page)) { 102 order = page_order(page); 103 if (order >= pageblock_order) { 104 page_idx = page_to_pfn(page) & ((1 << MAX_ORDER) - 1); 105 buddy_idx = __find_buddy_index(page_idx, order); 106 buddy = page + (buddy_idx - page_idx); 107 108 if (pfn_valid_within(page_to_pfn(buddy)) && 109 !is_migrate_isolate_page(buddy)) { 110 __isolate_free_page(page, order); 111 kernel_map_pages(page, (1 << order), 1); 112 set_page_refcounted(page); 113 isolated_page = page; 114 } 115 } 116 } 117 118 /* 119 * If we isolate freepage with more than pageblock_order, there 120 * should be no freepage in the range, so we could avoid costly 121 * pageblock scanning for freepage moving. 122 */ 123 if (!isolated_page) { 124 nr_pages = move_freepages_block(zone, page, migratetype); 125 __mod_zone_freepage_state(zone, nr_pages, migratetype); 126 } 127 set_pageblock_migratetype(page, migratetype); 128 zone->nr_isolate_pageblock--; 129 out: 130 spin_unlock_irqrestore(&zone->lock, flags); 131 if (isolated_page) 132 __free_pages(isolated_page, order); 133 } 134 135 static inline struct page * 136 __first_valid_page(unsigned long pfn, unsigned long nr_pages) 137 { 138 int i; 139 for (i = 0; i < nr_pages; i++) 140 if (pfn_valid_within(pfn + i)) 141 break; 142 if (unlikely(i == nr_pages)) 143 return NULL; 144 return pfn_to_page(pfn + i); 145 } 146 147 /* 148 * start_isolate_page_range() -- make page-allocation-type of range of pages 149 * to be MIGRATE_ISOLATE. 150 * @start_pfn: The lower PFN of the range to be isolated. 151 * @end_pfn: The upper PFN of the range to be isolated. 152 * @migratetype: migrate type to set in error recovery. 153 * 154 * Making page-allocation-type to be MIGRATE_ISOLATE means free pages in 155 * the range will never be allocated. Any free pages and pages freed in the 156 * future will not be allocated again. 157 * 158 * start_pfn/end_pfn must be aligned to pageblock_order. 159 * Returns 0 on success and -EBUSY if any part of range cannot be isolated. 160 */ 161 int start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn, 162 unsigned migratetype, bool skip_hwpoisoned_pages) 163 { 164 unsigned long pfn; 165 unsigned long undo_pfn; 166 struct page *page; 167 168 BUG_ON(!IS_ALIGNED(start_pfn, pageblock_nr_pages)); 169 BUG_ON(!IS_ALIGNED(end_pfn, pageblock_nr_pages)); 170 171 for (pfn = start_pfn; 172 pfn < end_pfn; 173 pfn += pageblock_nr_pages) { 174 page = __first_valid_page(pfn, pageblock_nr_pages); 175 if (page && 176 set_migratetype_isolate(page, skip_hwpoisoned_pages)) { 177 undo_pfn = pfn; 178 goto undo; 179 } 180 } 181 return 0; 182 undo: 183 for (pfn = start_pfn; 184 pfn < undo_pfn; 185 pfn += pageblock_nr_pages) 186 unset_migratetype_isolate(pfn_to_page(pfn), migratetype); 187 188 return -EBUSY; 189 } 190 191 /* 192 * Make isolated pages available again. 193 */ 194 int undo_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn, 195 unsigned migratetype) 196 { 197 unsigned long pfn; 198 struct page *page; 199 200 BUG_ON(!IS_ALIGNED(start_pfn, pageblock_nr_pages)); 201 BUG_ON(!IS_ALIGNED(end_pfn, pageblock_nr_pages)); 202 203 for (pfn = start_pfn; 204 pfn < end_pfn; 205 pfn += pageblock_nr_pages) { 206 page = __first_valid_page(pfn, pageblock_nr_pages); 207 if (!page || get_pageblock_migratetype(page) != MIGRATE_ISOLATE) 208 continue; 209 unset_migratetype_isolate(page, migratetype); 210 } 211 return 0; 212 } 213 /* 214 * Test all pages in the range is free(means isolated) or not. 215 * all pages in [start_pfn...end_pfn) must be in the same zone. 216 * zone->lock must be held before call this. 217 * 218 * Returns the last tested pfn. 219 */ 220 static unsigned long 221 __test_page_isolated_in_pageblock(unsigned long pfn, unsigned long end_pfn, 222 bool skip_hwpoisoned_pages) 223 { 224 struct page *page; 225 226 while (pfn < end_pfn) { 227 if (!pfn_valid_within(pfn)) { 228 pfn++; 229 continue; 230 } 231 page = pfn_to_page(pfn); 232 if (PageBuddy(page)) 233 /* 234 * If the page is on a free list, it has to be on 235 * the correct MIGRATE_ISOLATE freelist. There is no 236 * simple way to verify that as VM_BUG_ON(), though. 237 */ 238 pfn += 1 << page_order(page); 239 else if (skip_hwpoisoned_pages && PageHWPoison(page)) 240 /* A HWPoisoned page cannot be also PageBuddy */ 241 pfn++; 242 else 243 break; 244 } 245 246 return pfn; 247 } 248 249 /* Caller should ensure that requested range is in a single zone */ 250 int test_pages_isolated(unsigned long start_pfn, unsigned long end_pfn, 251 bool skip_hwpoisoned_pages) 252 { 253 unsigned long pfn, flags; 254 struct page *page; 255 struct zone *zone; 256 257 /* 258 * Note: pageblock_nr_pages != MAX_ORDER. Then, chunks of free pages 259 * are not aligned to pageblock_nr_pages. 260 * Then we just check migratetype first. 261 */ 262 for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) { 263 page = __first_valid_page(pfn, pageblock_nr_pages); 264 if (page && get_pageblock_migratetype(page) != MIGRATE_ISOLATE) 265 break; 266 } 267 page = __first_valid_page(start_pfn, end_pfn - start_pfn); 268 if ((pfn < end_pfn) || !page) 269 return -EBUSY; 270 /* Check all pages are free or marked as ISOLATED */ 271 zone = page_zone(page); 272 spin_lock_irqsave(&zone->lock, flags); 273 pfn = __test_page_isolated_in_pageblock(start_pfn, end_pfn, 274 skip_hwpoisoned_pages); 275 spin_unlock_irqrestore(&zone->lock, flags); 276 277 trace_test_pages_isolated(start_pfn, end_pfn, pfn); 278 279 return pfn < end_pfn ? -EBUSY : 0; 280 } 281 282 struct page *alloc_migrate_target(struct page *page, unsigned long private, 283 int **resultp) 284 { 285 gfp_t gfp_mask = GFP_USER | __GFP_MOVABLE; 286 287 /* 288 * TODO: allocate a destination hugepage from a nearest neighbor node, 289 * accordance with memory policy of the user process if possible. For 290 * now as a simple work-around, we use the next node for destination. 291 */ 292 if (PageHuge(page)) 293 return alloc_huge_page_node(page_hstate(compound_head(page)), 294 next_node_in(page_to_nid(page), 295 node_online_map)); 296 297 if (PageHighMem(page)) 298 gfp_mask |= __GFP_HIGHMEM; 299 300 return alloc_page(gfp_mask); 301 } 302