xref: /openbmc/linux/mm/page_isolation.c (revision 58efe9f6)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * linux/mm/page_isolation.c
4  */
5 
6 #include <linux/mm.h>
7 #include <linux/page-isolation.h>
8 #include <linux/pageblock-flags.h>
9 #include <linux/memory.h>
10 #include <linux/hugetlb.h>
11 #include <linux/page_owner.h>
12 #include <linux/migrate.h>
13 #include "internal.h"
14 
15 #define CREATE_TRACE_POINTS
16 #include <trace/events/page_isolation.h>
17 
18 static int set_migratetype_isolate(struct page *page, int migratetype, int isol_flags)
19 {
20 	struct zone *zone = page_zone(page);
21 	struct page *unmovable;
22 	unsigned long flags;
23 
24 	spin_lock_irqsave(&zone->lock, flags);
25 
26 	/*
27 	 * We assume the caller intended to SET migrate type to isolate.
28 	 * If it is already set, then someone else must have raced and
29 	 * set it before us.
30 	 */
31 	if (is_migrate_isolate_page(page)) {
32 		spin_unlock_irqrestore(&zone->lock, flags);
33 		return -EBUSY;
34 	}
35 
36 	/*
37 	 * FIXME: Now, memory hotplug doesn't call shrink_slab() by itself.
38 	 * We just check MOVABLE pages.
39 	 */
40 	unmovable = has_unmovable_pages(zone, page, migratetype, isol_flags);
41 	if (!unmovable) {
42 		unsigned long nr_pages;
43 		int mt = get_pageblock_migratetype(page);
44 
45 		set_pageblock_migratetype(page, MIGRATE_ISOLATE);
46 		zone->nr_isolate_pageblock++;
47 		nr_pages = move_freepages_block(zone, page, MIGRATE_ISOLATE,
48 									NULL);
49 
50 		__mod_zone_freepage_state(zone, -nr_pages, mt);
51 		spin_unlock_irqrestore(&zone->lock, flags);
52 		return 0;
53 	}
54 
55 	spin_unlock_irqrestore(&zone->lock, flags);
56 	if (isol_flags & REPORT_FAILURE) {
57 		/*
58 		 * printk() with zone->lock held will likely trigger a
59 		 * lockdep splat, so defer it here.
60 		 */
61 		dump_page(unmovable, "unmovable page");
62 	}
63 
64 	return -EBUSY;
65 }
66 
67 static void unset_migratetype_isolate(struct page *page, unsigned migratetype)
68 {
69 	struct zone *zone;
70 	unsigned long flags, nr_pages;
71 	bool isolated_page = false;
72 	unsigned int order;
73 	unsigned long pfn, buddy_pfn;
74 	struct page *buddy;
75 
76 	zone = page_zone(page);
77 	spin_lock_irqsave(&zone->lock, flags);
78 	if (!is_migrate_isolate_page(page))
79 		goto out;
80 
81 	/*
82 	 * Because freepage with more than pageblock_order on isolated
83 	 * pageblock is restricted to merge due to freepage counting problem,
84 	 * it is possible that there is free buddy page.
85 	 * move_freepages_block() doesn't care of merge so we need other
86 	 * approach in order to merge them. Isolation and free will make
87 	 * these pages to be merged.
88 	 */
89 	if (PageBuddy(page)) {
90 		order = buddy_order(page);
91 		if (order >= pageblock_order && order < MAX_ORDER - 1) {
92 			pfn = page_to_pfn(page);
93 			buddy_pfn = __find_buddy_pfn(pfn, order);
94 			buddy = page + (buddy_pfn - pfn);
95 
96 			if (pfn_valid_within(buddy_pfn) &&
97 			    !is_migrate_isolate_page(buddy)) {
98 				__isolate_free_page(page, order);
99 				isolated_page = true;
100 			}
101 		}
102 	}
103 
104 	/*
105 	 * If we isolate freepage with more than pageblock_order, there
106 	 * should be no freepage in the range, so we could avoid costly
107 	 * pageblock scanning for freepage moving.
108 	 *
109 	 * We didn't actually touch any of the isolated pages, so place them
110 	 * to the tail of the freelist. This is an optimization for memory
111 	 * onlining - just onlined memory won't immediately be considered for
112 	 * allocation.
113 	 */
114 	if (!isolated_page) {
115 		nr_pages = move_freepages_block(zone, page, migratetype, NULL);
116 		__mod_zone_freepage_state(zone, nr_pages, migratetype);
117 	}
118 	set_pageblock_migratetype(page, migratetype);
119 	if (isolated_page)
120 		__putback_isolated_page(page, order, migratetype);
121 	zone->nr_isolate_pageblock--;
122 out:
123 	spin_unlock_irqrestore(&zone->lock, flags);
124 }
125 
126 static inline struct page *
127 __first_valid_page(unsigned long pfn, unsigned long nr_pages)
128 {
129 	int i;
130 
131 	for (i = 0; i < nr_pages; i++) {
132 		struct page *page;
133 
134 		page = pfn_to_online_page(pfn + i);
135 		if (!page)
136 			continue;
137 		return page;
138 	}
139 	return NULL;
140 }
141 
142 /**
143  * start_isolate_page_range() - make page-allocation-type of range of pages to
144  * be MIGRATE_ISOLATE.
145  * @start_pfn:		The lower PFN of the range to be isolated.
146  * @end_pfn:		The upper PFN of the range to be isolated.
147  *			start_pfn/end_pfn must be aligned to pageblock_order.
148  * @migratetype:	Migrate type to set in error recovery.
149  * @flags:		The following flags are allowed (they can be combined in
150  *			a bit mask)
151  *			MEMORY_OFFLINE - isolate to offline (!allocate) memory
152  *					 e.g., skip over PageHWPoison() pages
153  *					 and PageOffline() pages.
154  *			REPORT_FAILURE - report details about the failure to
155  *			isolate the range
156  *
157  * Making page-allocation-type to be MIGRATE_ISOLATE means free pages in
158  * the range will never be allocated. Any free pages and pages freed in the
159  * future will not be allocated again. If specified range includes migrate types
160  * other than MOVABLE or CMA, this will fail with -EBUSY. For isolating all
161  * pages in the range finally, the caller have to free all pages in the range.
162  * test_page_isolated() can be used for test it.
163  *
164  * There is no high level synchronization mechanism that prevents two threads
165  * from trying to isolate overlapping ranges. If this happens, one thread
166  * will notice pageblocks in the overlapping range already set to isolate.
167  * This happens in set_migratetype_isolate, and set_migratetype_isolate
168  * returns an error. We then clean up by restoring the migration type on
169  * pageblocks we may have modified and return -EBUSY to caller. This
170  * prevents two threads from simultaneously working on overlapping ranges.
171  *
172  * Please note that there is no strong synchronization with the page allocator
173  * either. Pages might be freed while their page blocks are marked ISOLATED.
174  * A call to drain_all_pages() after isolation can flush most of them. However
175  * in some cases pages might still end up on pcp lists and that would allow
176  * for their allocation even when they are in fact isolated already. Depending
177  * on how strong of a guarantee the caller needs, zone_pcp_disable/enable()
178  * might be used to flush and disable pcplist before isolation and enable after
179  * unisolation.
180  *
181  * Return: 0 on success and -EBUSY if any part of range cannot be isolated.
182  */
183 int start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
184 			     unsigned migratetype, int flags)
185 {
186 	unsigned long pfn;
187 	unsigned long undo_pfn;
188 	struct page *page;
189 
190 	BUG_ON(!IS_ALIGNED(start_pfn, pageblock_nr_pages));
191 	BUG_ON(!IS_ALIGNED(end_pfn, pageblock_nr_pages));
192 
193 	for (pfn = start_pfn;
194 	     pfn < end_pfn;
195 	     pfn += pageblock_nr_pages) {
196 		page = __first_valid_page(pfn, pageblock_nr_pages);
197 		if (page) {
198 			if (set_migratetype_isolate(page, migratetype, flags)) {
199 				undo_pfn = pfn;
200 				goto undo;
201 			}
202 		}
203 	}
204 	return 0;
205 undo:
206 	for (pfn = start_pfn;
207 	     pfn < undo_pfn;
208 	     pfn += pageblock_nr_pages) {
209 		struct page *page = pfn_to_online_page(pfn);
210 		if (!page)
211 			continue;
212 		unset_migratetype_isolate(page, migratetype);
213 	}
214 
215 	return -EBUSY;
216 }
217 
218 /*
219  * Make isolated pages available again.
220  */
221 void undo_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
222 			    unsigned migratetype)
223 {
224 	unsigned long pfn;
225 	struct page *page;
226 
227 	BUG_ON(!IS_ALIGNED(start_pfn, pageblock_nr_pages));
228 	BUG_ON(!IS_ALIGNED(end_pfn, pageblock_nr_pages));
229 
230 	for (pfn = start_pfn;
231 	     pfn < end_pfn;
232 	     pfn += pageblock_nr_pages) {
233 		page = __first_valid_page(pfn, pageblock_nr_pages);
234 		if (!page || !is_migrate_isolate_page(page))
235 			continue;
236 		unset_migratetype_isolate(page, migratetype);
237 	}
238 }
239 /*
240  * Test all pages in the range is free(means isolated) or not.
241  * all pages in [start_pfn...end_pfn) must be in the same zone.
242  * zone->lock must be held before call this.
243  *
244  * Returns the last tested pfn.
245  */
246 static unsigned long
247 __test_page_isolated_in_pageblock(unsigned long pfn, unsigned long end_pfn,
248 				  int flags)
249 {
250 	struct page *page;
251 
252 	while (pfn < end_pfn) {
253 		if (!pfn_valid_within(pfn)) {
254 			pfn++;
255 			continue;
256 		}
257 		page = pfn_to_page(pfn);
258 		if (PageBuddy(page))
259 			/*
260 			 * If the page is on a free list, it has to be on
261 			 * the correct MIGRATE_ISOLATE freelist. There is no
262 			 * simple way to verify that as VM_BUG_ON(), though.
263 			 */
264 			pfn += 1 << buddy_order(page);
265 		else if ((flags & MEMORY_OFFLINE) && PageHWPoison(page))
266 			/* A HWPoisoned page cannot be also PageBuddy */
267 			pfn++;
268 		else if ((flags & MEMORY_OFFLINE) && PageOffline(page) &&
269 			 !page_count(page))
270 			/*
271 			 * The responsible driver agreed to skip PageOffline()
272 			 * pages when offlining memory by dropping its
273 			 * reference in MEM_GOING_OFFLINE.
274 			 */
275 			pfn++;
276 		else
277 			break;
278 	}
279 
280 	return pfn;
281 }
282 
283 /* Caller should ensure that requested range is in a single zone */
284 int test_pages_isolated(unsigned long start_pfn, unsigned long end_pfn,
285 			int isol_flags)
286 {
287 	unsigned long pfn, flags;
288 	struct page *page;
289 	struct zone *zone;
290 
291 	/*
292 	 * Note: pageblock_nr_pages != MAX_ORDER. Then, chunks of free pages
293 	 * are not aligned to pageblock_nr_pages.
294 	 * Then we just check migratetype first.
295 	 */
296 	for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) {
297 		page = __first_valid_page(pfn, pageblock_nr_pages);
298 		if (page && !is_migrate_isolate_page(page))
299 			break;
300 	}
301 	page = __first_valid_page(start_pfn, end_pfn - start_pfn);
302 	if ((pfn < end_pfn) || !page)
303 		return -EBUSY;
304 	/* Check all pages are free or marked as ISOLATED */
305 	zone = page_zone(page);
306 	spin_lock_irqsave(&zone->lock, flags);
307 	pfn = __test_page_isolated_in_pageblock(start_pfn, end_pfn, isol_flags);
308 	spin_unlock_irqrestore(&zone->lock, flags);
309 
310 	trace_test_pages_isolated(start_pfn, end_pfn, pfn);
311 
312 	return pfn < end_pfn ? -EBUSY : 0;
313 }
314