xref: /openbmc/linux/mm/page_isolation.c (revision 37744fee)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * linux/mm/page_isolation.c
4  */
5 
6 #include <linux/mm.h>
7 #include <linux/page-isolation.h>
8 #include <linux/pageblock-flags.h>
9 #include <linux/memory.h>
10 #include <linux/hugetlb.h>
11 #include <linux/page_owner.h>
12 #include <linux/migrate.h>
13 #include "internal.h"
14 
15 #define CREATE_TRACE_POINTS
16 #include <trace/events/page_isolation.h>
17 
18 static int set_migratetype_isolate(struct page *page, int migratetype, int isol_flags)
19 {
20 	struct page *unmovable = NULL;
21 	struct zone *zone;
22 	unsigned long flags;
23 	int ret = -EBUSY;
24 
25 	zone = page_zone(page);
26 
27 	spin_lock_irqsave(&zone->lock, flags);
28 
29 	/*
30 	 * We assume the caller intended to SET migrate type to isolate.
31 	 * If it is already set, then someone else must have raced and
32 	 * set it before us.  Return -EBUSY
33 	 */
34 	if (is_migrate_isolate_page(page))
35 		goto out;
36 
37 	/*
38 	 * FIXME: Now, memory hotplug doesn't call shrink_slab() by itself.
39 	 * We just check MOVABLE pages.
40 	 */
41 	unmovable = has_unmovable_pages(zone, page, migratetype, isol_flags);
42 	if (!unmovable) {
43 		unsigned long nr_pages;
44 		int mt = get_pageblock_migratetype(page);
45 
46 		set_pageblock_migratetype(page, MIGRATE_ISOLATE);
47 		zone->nr_isolate_pageblock++;
48 		nr_pages = move_freepages_block(zone, page, MIGRATE_ISOLATE,
49 									NULL);
50 
51 		__mod_zone_freepage_state(zone, -nr_pages, mt);
52 		ret = 0;
53 	}
54 
55 out:
56 	spin_unlock_irqrestore(&zone->lock, flags);
57 	if (!ret) {
58 		drain_all_pages(zone);
59 	} else {
60 		WARN_ON_ONCE(zone_idx(zone) == ZONE_MOVABLE);
61 
62 		if ((isol_flags & REPORT_FAILURE) && unmovable)
63 			/*
64 			 * printk() with zone->lock held will likely trigger a
65 			 * lockdep splat, so defer it here.
66 			 */
67 			dump_page(unmovable, "unmovable page");
68 	}
69 
70 	return ret;
71 }
72 
73 static void unset_migratetype_isolate(struct page *page, unsigned migratetype)
74 {
75 	struct zone *zone;
76 	unsigned long flags, nr_pages;
77 	bool isolated_page = false;
78 	unsigned int order;
79 	unsigned long pfn, buddy_pfn;
80 	struct page *buddy;
81 
82 	zone = page_zone(page);
83 	spin_lock_irqsave(&zone->lock, flags);
84 	if (!is_migrate_isolate_page(page))
85 		goto out;
86 
87 	/*
88 	 * Because freepage with more than pageblock_order on isolated
89 	 * pageblock is restricted to merge due to freepage counting problem,
90 	 * it is possible that there is free buddy page.
91 	 * move_freepages_block() doesn't care of merge so we need other
92 	 * approach in order to merge them. Isolation and free will make
93 	 * these pages to be merged.
94 	 */
95 	if (PageBuddy(page)) {
96 		order = page_order(page);
97 		if (order >= pageblock_order) {
98 			pfn = page_to_pfn(page);
99 			buddy_pfn = __find_buddy_pfn(pfn, order);
100 			buddy = page + (buddy_pfn - pfn);
101 
102 			if (pfn_valid_within(buddy_pfn) &&
103 			    !is_migrate_isolate_page(buddy)) {
104 				__isolate_free_page(page, order);
105 				isolated_page = true;
106 			}
107 		}
108 	}
109 
110 	/*
111 	 * If we isolate freepage with more than pageblock_order, there
112 	 * should be no freepage in the range, so we could avoid costly
113 	 * pageblock scanning for freepage moving.
114 	 */
115 	if (!isolated_page) {
116 		nr_pages = move_freepages_block(zone, page, migratetype, NULL);
117 		__mod_zone_freepage_state(zone, nr_pages, migratetype);
118 	}
119 	set_pageblock_migratetype(page, migratetype);
120 	if (isolated_page)
121 		__putback_isolated_page(page, order, migratetype);
122 	zone->nr_isolate_pageblock--;
123 out:
124 	spin_unlock_irqrestore(&zone->lock, flags);
125 }
126 
127 static inline struct page *
128 __first_valid_page(unsigned long pfn, unsigned long nr_pages)
129 {
130 	int i;
131 
132 	for (i = 0; i < nr_pages; i++) {
133 		struct page *page;
134 
135 		page = pfn_to_online_page(pfn + i);
136 		if (!page)
137 			continue;
138 		return page;
139 	}
140 	return NULL;
141 }
142 
143 /**
144  * start_isolate_page_range() - make page-allocation-type of range of pages to
145  * be MIGRATE_ISOLATE.
146  * @start_pfn:		The lower PFN of the range to be isolated.
147  * @end_pfn:		The upper PFN of the range to be isolated.
148  *			start_pfn/end_pfn must be aligned to pageblock_order.
149  * @migratetype:	Migrate type to set in error recovery.
150  * @flags:		The following flags are allowed (they can be combined in
151  *			a bit mask)
152  *			MEMORY_OFFLINE - isolate to offline (!allocate) memory
153  *					 e.g., skip over PageHWPoison() pages
154  *			REPORT_FAILURE - report details about the failure to
155  *			isolate the range
156  *
157  * Making page-allocation-type to be MIGRATE_ISOLATE means free pages in
158  * the range will never be allocated. Any free pages and pages freed in the
159  * future will not be allocated again. If specified range includes migrate types
160  * other than MOVABLE or CMA, this will fail with -EBUSY. For isolating all
161  * pages in the range finally, the caller have to free all pages in the range.
162  * test_page_isolated() can be used for test it.
163  *
164  * There is no high level synchronization mechanism that prevents two threads
165  * from trying to isolate overlapping ranges. If this happens, one thread
166  * will notice pageblocks in the overlapping range already set to isolate.
167  * This happens in set_migratetype_isolate, and set_migratetype_isolate
168  * returns an error. We then clean up by restoring the migration type on
169  * pageblocks we may have modified and return -EBUSY to caller. This
170  * prevents two threads from simultaneously working on overlapping ranges.
171  *
172  * Return: the number of isolated pageblocks on success and -EBUSY if any part
173  * of range cannot be isolated.
174  */
175 int start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
176 			     unsigned migratetype, int flags)
177 {
178 	unsigned long pfn;
179 	unsigned long undo_pfn;
180 	struct page *page;
181 	int nr_isolate_pageblock = 0;
182 
183 	BUG_ON(!IS_ALIGNED(start_pfn, pageblock_nr_pages));
184 	BUG_ON(!IS_ALIGNED(end_pfn, pageblock_nr_pages));
185 
186 	for (pfn = start_pfn;
187 	     pfn < end_pfn;
188 	     pfn += pageblock_nr_pages) {
189 		page = __first_valid_page(pfn, pageblock_nr_pages);
190 		if (page) {
191 			if (set_migratetype_isolate(page, migratetype, flags)) {
192 				undo_pfn = pfn;
193 				goto undo;
194 			}
195 			nr_isolate_pageblock++;
196 		}
197 	}
198 	return nr_isolate_pageblock;
199 undo:
200 	for (pfn = start_pfn;
201 	     pfn < undo_pfn;
202 	     pfn += pageblock_nr_pages) {
203 		struct page *page = pfn_to_online_page(pfn);
204 		if (!page)
205 			continue;
206 		unset_migratetype_isolate(page, migratetype);
207 	}
208 
209 	return -EBUSY;
210 }
211 
212 /*
213  * Make isolated pages available again.
214  */
215 void undo_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
216 			    unsigned migratetype)
217 {
218 	unsigned long pfn;
219 	struct page *page;
220 
221 	BUG_ON(!IS_ALIGNED(start_pfn, pageblock_nr_pages));
222 	BUG_ON(!IS_ALIGNED(end_pfn, pageblock_nr_pages));
223 
224 	for (pfn = start_pfn;
225 	     pfn < end_pfn;
226 	     pfn += pageblock_nr_pages) {
227 		page = __first_valid_page(pfn, pageblock_nr_pages);
228 		if (!page || !is_migrate_isolate_page(page))
229 			continue;
230 		unset_migratetype_isolate(page, migratetype);
231 	}
232 }
233 /*
234  * Test all pages in the range is free(means isolated) or not.
235  * all pages in [start_pfn...end_pfn) must be in the same zone.
236  * zone->lock must be held before call this.
237  *
238  * Returns the last tested pfn.
239  */
240 static unsigned long
241 __test_page_isolated_in_pageblock(unsigned long pfn, unsigned long end_pfn,
242 				  int flags)
243 {
244 	struct page *page;
245 
246 	while (pfn < end_pfn) {
247 		if (!pfn_valid_within(pfn)) {
248 			pfn++;
249 			continue;
250 		}
251 		page = pfn_to_page(pfn);
252 		if (PageBuddy(page))
253 			/*
254 			 * If the page is on a free list, it has to be on
255 			 * the correct MIGRATE_ISOLATE freelist. There is no
256 			 * simple way to verify that as VM_BUG_ON(), though.
257 			 */
258 			pfn += 1 << page_order(page);
259 		else if ((flags & MEMORY_OFFLINE) && PageHWPoison(page))
260 			/* A HWPoisoned page cannot be also PageBuddy */
261 			pfn++;
262 		else
263 			break;
264 	}
265 
266 	return pfn;
267 }
268 
269 /* Caller should ensure that requested range is in a single zone */
270 int test_pages_isolated(unsigned long start_pfn, unsigned long end_pfn,
271 			int isol_flags)
272 {
273 	unsigned long pfn, flags;
274 	struct page *page;
275 	struct zone *zone;
276 
277 	/*
278 	 * Note: pageblock_nr_pages != MAX_ORDER. Then, chunks of free pages
279 	 * are not aligned to pageblock_nr_pages.
280 	 * Then we just check migratetype first.
281 	 */
282 	for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) {
283 		page = __first_valid_page(pfn, pageblock_nr_pages);
284 		if (page && !is_migrate_isolate_page(page))
285 			break;
286 	}
287 	page = __first_valid_page(start_pfn, end_pfn - start_pfn);
288 	if ((pfn < end_pfn) || !page)
289 		return -EBUSY;
290 	/* Check all pages are free or marked as ISOLATED */
291 	zone = page_zone(page);
292 	spin_lock_irqsave(&zone->lock, flags);
293 	pfn = __test_page_isolated_in_pageblock(start_pfn, end_pfn, isol_flags);
294 	spin_unlock_irqrestore(&zone->lock, flags);
295 
296 	trace_test_pages_isolated(start_pfn, end_pfn, pfn);
297 
298 	return pfn < end_pfn ? -EBUSY : 0;
299 }
300 
301 struct page *alloc_migrate_target(struct page *page, unsigned long private)
302 {
303 	return new_page_nodemask(page, numa_node_id(), &node_states[N_MEMORY]);
304 }
305