1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * linux/mm/page_isolation.c 4 */ 5 6 #include <linux/mm.h> 7 #include <linux/page-isolation.h> 8 #include <linux/pageblock-flags.h> 9 #include <linux/memory.h> 10 #include <linux/hugetlb.h> 11 #include <linux/page_owner.h> 12 #include <linux/migrate.h> 13 #include "internal.h" 14 15 #define CREATE_TRACE_POINTS 16 #include <trace/events/page_isolation.h> 17 18 static int set_migratetype_isolate(struct page *page, int migratetype, int isol_flags) 19 { 20 struct zone *zone = page_zone(page); 21 struct page *unmovable; 22 unsigned long flags; 23 24 spin_lock_irqsave(&zone->lock, flags); 25 26 /* 27 * We assume the caller intended to SET migrate type to isolate. 28 * If it is already set, then someone else must have raced and 29 * set it before us. 30 */ 31 if (is_migrate_isolate_page(page)) { 32 spin_unlock_irqrestore(&zone->lock, flags); 33 return -EBUSY; 34 } 35 36 /* 37 * FIXME: Now, memory hotplug doesn't call shrink_slab() by itself. 38 * We just check MOVABLE pages. 39 */ 40 unmovable = has_unmovable_pages(zone, page, migratetype, isol_flags); 41 if (!unmovable) { 42 unsigned long nr_pages; 43 int mt = get_pageblock_migratetype(page); 44 45 set_pageblock_migratetype(page, MIGRATE_ISOLATE); 46 zone->nr_isolate_pageblock++; 47 nr_pages = move_freepages_block(zone, page, MIGRATE_ISOLATE, 48 NULL); 49 50 __mod_zone_freepage_state(zone, -nr_pages, mt); 51 spin_unlock_irqrestore(&zone->lock, flags); 52 drain_all_pages(zone); 53 return 0; 54 } 55 56 spin_unlock_irqrestore(&zone->lock, flags); 57 if (isol_flags & REPORT_FAILURE) { 58 /* 59 * printk() with zone->lock held will likely trigger a 60 * lockdep splat, so defer it here. 61 */ 62 dump_page(unmovable, "unmovable page"); 63 } 64 65 return -EBUSY; 66 } 67 68 static void unset_migratetype_isolate(struct page *page, unsigned migratetype) 69 { 70 struct zone *zone; 71 unsigned long flags, nr_pages; 72 bool isolated_page = false; 73 unsigned int order; 74 unsigned long pfn, buddy_pfn; 75 struct page *buddy; 76 77 zone = page_zone(page); 78 spin_lock_irqsave(&zone->lock, flags); 79 if (!is_migrate_isolate_page(page)) 80 goto out; 81 82 /* 83 * Because freepage with more than pageblock_order on isolated 84 * pageblock is restricted to merge due to freepage counting problem, 85 * it is possible that there is free buddy page. 86 * move_freepages_block() doesn't care of merge so we need other 87 * approach in order to merge them. Isolation and free will make 88 * these pages to be merged. 89 */ 90 if (PageBuddy(page)) { 91 order = buddy_order(page); 92 if (order >= pageblock_order) { 93 pfn = page_to_pfn(page); 94 buddy_pfn = __find_buddy_pfn(pfn, order); 95 buddy = page + (buddy_pfn - pfn); 96 97 if (pfn_valid_within(buddy_pfn) && 98 !is_migrate_isolate_page(buddy)) { 99 __isolate_free_page(page, order); 100 isolated_page = true; 101 } 102 } 103 } 104 105 /* 106 * If we isolate freepage with more than pageblock_order, there 107 * should be no freepage in the range, so we could avoid costly 108 * pageblock scanning for freepage moving. 109 * 110 * We didn't actually touch any of the isolated pages, so place them 111 * to the tail of the freelist. This is an optimization for memory 112 * onlining - just onlined memory won't immediately be considered for 113 * allocation. 114 */ 115 if (!isolated_page) { 116 nr_pages = move_freepages_block(zone, page, migratetype, NULL); 117 __mod_zone_freepage_state(zone, nr_pages, migratetype); 118 } 119 set_pageblock_migratetype(page, migratetype); 120 if (isolated_page) 121 __putback_isolated_page(page, order, migratetype); 122 zone->nr_isolate_pageblock--; 123 out: 124 spin_unlock_irqrestore(&zone->lock, flags); 125 } 126 127 static inline struct page * 128 __first_valid_page(unsigned long pfn, unsigned long nr_pages) 129 { 130 int i; 131 132 for (i = 0; i < nr_pages; i++) { 133 struct page *page; 134 135 page = pfn_to_online_page(pfn + i); 136 if (!page) 137 continue; 138 return page; 139 } 140 return NULL; 141 } 142 143 /** 144 * start_isolate_page_range() - make page-allocation-type of range of pages to 145 * be MIGRATE_ISOLATE. 146 * @start_pfn: The lower PFN of the range to be isolated. 147 * @end_pfn: The upper PFN of the range to be isolated. 148 * start_pfn/end_pfn must be aligned to pageblock_order. 149 * @migratetype: Migrate type to set in error recovery. 150 * @flags: The following flags are allowed (they can be combined in 151 * a bit mask) 152 * MEMORY_OFFLINE - isolate to offline (!allocate) memory 153 * e.g., skip over PageHWPoison() pages 154 * and PageOffline() pages. 155 * REPORT_FAILURE - report details about the failure to 156 * isolate the range 157 * 158 * Making page-allocation-type to be MIGRATE_ISOLATE means free pages in 159 * the range will never be allocated. Any free pages and pages freed in the 160 * future will not be allocated again. If specified range includes migrate types 161 * other than MOVABLE or CMA, this will fail with -EBUSY. For isolating all 162 * pages in the range finally, the caller have to free all pages in the range. 163 * test_page_isolated() can be used for test it. 164 * 165 * There is no high level synchronization mechanism that prevents two threads 166 * from trying to isolate overlapping ranges. If this happens, one thread 167 * will notice pageblocks in the overlapping range already set to isolate. 168 * This happens in set_migratetype_isolate, and set_migratetype_isolate 169 * returns an error. We then clean up by restoring the migration type on 170 * pageblocks we may have modified and return -EBUSY to caller. This 171 * prevents two threads from simultaneously working on overlapping ranges. 172 * 173 * Please note that there is no strong synchronization with the page allocator 174 * either. Pages might be freed while their page blocks are marked ISOLATED. 175 * In some cases pages might still end up on pcp lists and that would allow 176 * for their allocation even when they are in fact isolated already. Depending 177 * on how strong of a guarantee the caller needs drain_all_pages might be needed 178 * (e.g. __offline_pages will need to call it after check for isolated range for 179 * a next retry). 180 * 181 * Return: 0 on success and -EBUSY if any part of range cannot be isolated. 182 */ 183 int start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn, 184 unsigned migratetype, int flags) 185 { 186 unsigned long pfn; 187 unsigned long undo_pfn; 188 struct page *page; 189 190 BUG_ON(!IS_ALIGNED(start_pfn, pageblock_nr_pages)); 191 BUG_ON(!IS_ALIGNED(end_pfn, pageblock_nr_pages)); 192 193 for (pfn = start_pfn; 194 pfn < end_pfn; 195 pfn += pageblock_nr_pages) { 196 page = __first_valid_page(pfn, pageblock_nr_pages); 197 if (page) { 198 if (set_migratetype_isolate(page, migratetype, flags)) { 199 undo_pfn = pfn; 200 goto undo; 201 } 202 } 203 } 204 return 0; 205 undo: 206 for (pfn = start_pfn; 207 pfn < undo_pfn; 208 pfn += pageblock_nr_pages) { 209 struct page *page = pfn_to_online_page(pfn); 210 if (!page) 211 continue; 212 unset_migratetype_isolate(page, migratetype); 213 } 214 215 return -EBUSY; 216 } 217 218 /* 219 * Make isolated pages available again. 220 */ 221 void undo_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn, 222 unsigned migratetype) 223 { 224 unsigned long pfn; 225 struct page *page; 226 227 BUG_ON(!IS_ALIGNED(start_pfn, pageblock_nr_pages)); 228 BUG_ON(!IS_ALIGNED(end_pfn, pageblock_nr_pages)); 229 230 for (pfn = start_pfn; 231 pfn < end_pfn; 232 pfn += pageblock_nr_pages) { 233 page = __first_valid_page(pfn, pageblock_nr_pages); 234 if (!page || !is_migrate_isolate_page(page)) 235 continue; 236 unset_migratetype_isolate(page, migratetype); 237 } 238 } 239 /* 240 * Test all pages in the range is free(means isolated) or not. 241 * all pages in [start_pfn...end_pfn) must be in the same zone. 242 * zone->lock must be held before call this. 243 * 244 * Returns the last tested pfn. 245 */ 246 static unsigned long 247 __test_page_isolated_in_pageblock(unsigned long pfn, unsigned long end_pfn, 248 int flags) 249 { 250 struct page *page; 251 252 while (pfn < end_pfn) { 253 if (!pfn_valid_within(pfn)) { 254 pfn++; 255 continue; 256 } 257 page = pfn_to_page(pfn); 258 if (PageBuddy(page)) 259 /* 260 * If the page is on a free list, it has to be on 261 * the correct MIGRATE_ISOLATE freelist. There is no 262 * simple way to verify that as VM_BUG_ON(), though. 263 */ 264 pfn += 1 << buddy_order(page); 265 else if ((flags & MEMORY_OFFLINE) && PageHWPoison(page)) 266 /* A HWPoisoned page cannot be also PageBuddy */ 267 pfn++; 268 else if ((flags & MEMORY_OFFLINE) && PageOffline(page) && 269 !page_count(page)) 270 /* 271 * The responsible driver agreed to skip PageOffline() 272 * pages when offlining memory by dropping its 273 * reference in MEM_GOING_OFFLINE. 274 */ 275 pfn++; 276 else 277 break; 278 } 279 280 return pfn; 281 } 282 283 /* Caller should ensure that requested range is in a single zone */ 284 int test_pages_isolated(unsigned long start_pfn, unsigned long end_pfn, 285 int isol_flags) 286 { 287 unsigned long pfn, flags; 288 struct page *page; 289 struct zone *zone; 290 291 /* 292 * Note: pageblock_nr_pages != MAX_ORDER. Then, chunks of free pages 293 * are not aligned to pageblock_nr_pages. 294 * Then we just check migratetype first. 295 */ 296 for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) { 297 page = __first_valid_page(pfn, pageblock_nr_pages); 298 if (page && !is_migrate_isolate_page(page)) 299 break; 300 } 301 page = __first_valid_page(start_pfn, end_pfn - start_pfn); 302 if ((pfn < end_pfn) || !page) 303 return -EBUSY; 304 /* Check all pages are free or marked as ISOLATED */ 305 zone = page_zone(page); 306 spin_lock_irqsave(&zone->lock, flags); 307 pfn = __test_page_isolated_in_pageblock(start_pfn, end_pfn, isol_flags); 308 spin_unlock_irqrestore(&zone->lock, flags); 309 310 trace_test_pages_isolated(start_pfn, end_pfn, pfn); 311 312 return pfn < end_pfn ? -EBUSY : 0; 313 } 314