xref: /openbmc/linux/mm/page_owner.c (revision c67e8ec0)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/debugfs.h>
3 #include <linux/mm.h>
4 #include <linux/slab.h>
5 #include <linux/uaccess.h>
6 #include <linux/memblock.h>
7 #include <linux/stacktrace.h>
8 #include <linux/page_owner.h>
9 #include <linux/jump_label.h>
10 #include <linux/migrate.h>
11 #include <linux/stackdepot.h>
12 #include <linux/seq_file.h>
13 
14 #include "internal.h"
15 
16 /*
17  * TODO: teach PAGE_OWNER_STACK_DEPTH (__dump_page_owner and save_stack)
18  * to use off stack temporal storage
19  */
20 #define PAGE_OWNER_STACK_DEPTH (16)
21 
22 struct page_owner {
23 	unsigned short order;
24 	short last_migrate_reason;
25 	gfp_t gfp_mask;
26 	depot_stack_handle_t handle;
27 };
28 
29 static bool page_owner_disabled = true;
30 DEFINE_STATIC_KEY_FALSE(page_owner_inited);
31 
32 static depot_stack_handle_t dummy_handle;
33 static depot_stack_handle_t failure_handle;
34 static depot_stack_handle_t early_handle;
35 
36 static void init_early_allocated_pages(void);
37 
38 static int __init early_page_owner_param(char *buf)
39 {
40 	if (!buf)
41 		return -EINVAL;
42 
43 	if (strcmp(buf, "on") == 0)
44 		page_owner_disabled = false;
45 
46 	return 0;
47 }
48 early_param("page_owner", early_page_owner_param);
49 
50 static bool need_page_owner(void)
51 {
52 	if (page_owner_disabled)
53 		return false;
54 
55 	return true;
56 }
57 
58 static __always_inline depot_stack_handle_t create_dummy_stack(void)
59 {
60 	unsigned long entries[4];
61 	struct stack_trace dummy;
62 
63 	dummy.nr_entries = 0;
64 	dummy.max_entries = ARRAY_SIZE(entries);
65 	dummy.entries = &entries[0];
66 	dummy.skip = 0;
67 
68 	save_stack_trace(&dummy);
69 	return depot_save_stack(&dummy, GFP_KERNEL);
70 }
71 
72 static noinline void register_dummy_stack(void)
73 {
74 	dummy_handle = create_dummy_stack();
75 }
76 
77 static noinline void register_failure_stack(void)
78 {
79 	failure_handle = create_dummy_stack();
80 }
81 
82 static noinline void register_early_stack(void)
83 {
84 	early_handle = create_dummy_stack();
85 }
86 
87 static void init_page_owner(void)
88 {
89 	if (page_owner_disabled)
90 		return;
91 
92 	register_dummy_stack();
93 	register_failure_stack();
94 	register_early_stack();
95 	static_branch_enable(&page_owner_inited);
96 	init_early_allocated_pages();
97 }
98 
99 struct page_ext_operations page_owner_ops = {
100 	.size = sizeof(struct page_owner),
101 	.need = need_page_owner,
102 	.init = init_page_owner,
103 };
104 
105 static inline struct page_owner *get_page_owner(struct page_ext *page_ext)
106 {
107 	return (void *)page_ext + page_owner_ops.offset;
108 }
109 
110 void __reset_page_owner(struct page *page, unsigned int order)
111 {
112 	int i;
113 	struct page_ext *page_ext;
114 
115 	for (i = 0; i < (1 << order); i++) {
116 		page_ext = lookup_page_ext(page + i);
117 		if (unlikely(!page_ext))
118 			continue;
119 		__clear_bit(PAGE_EXT_OWNER, &page_ext->flags);
120 	}
121 }
122 
123 static inline bool check_recursive_alloc(struct stack_trace *trace,
124 					unsigned long ip)
125 {
126 	int i;
127 
128 	if (!trace->nr_entries)
129 		return false;
130 
131 	for (i = 0; i < trace->nr_entries; i++) {
132 		if (trace->entries[i] == ip)
133 			return true;
134 	}
135 
136 	return false;
137 }
138 
139 static noinline depot_stack_handle_t save_stack(gfp_t flags)
140 {
141 	unsigned long entries[PAGE_OWNER_STACK_DEPTH];
142 	struct stack_trace trace = {
143 		.nr_entries = 0,
144 		.entries = entries,
145 		.max_entries = PAGE_OWNER_STACK_DEPTH,
146 		.skip = 2
147 	};
148 	depot_stack_handle_t handle;
149 
150 	save_stack_trace(&trace);
151 	if (trace.nr_entries != 0 &&
152 	    trace.entries[trace.nr_entries-1] == ULONG_MAX)
153 		trace.nr_entries--;
154 
155 	/*
156 	 * We need to check recursion here because our request to stackdepot
157 	 * could trigger memory allocation to save new entry. New memory
158 	 * allocation would reach here and call depot_save_stack() again
159 	 * if we don't catch it. There is still not enough memory in stackdepot
160 	 * so it would try to allocate memory again and loop forever.
161 	 */
162 	if (check_recursive_alloc(&trace, _RET_IP_))
163 		return dummy_handle;
164 
165 	handle = depot_save_stack(&trace, flags);
166 	if (!handle)
167 		handle = failure_handle;
168 
169 	return handle;
170 }
171 
172 static inline void __set_page_owner_handle(struct page_ext *page_ext,
173 	depot_stack_handle_t handle, unsigned int order, gfp_t gfp_mask)
174 {
175 	struct page_owner *page_owner;
176 
177 	page_owner = get_page_owner(page_ext);
178 	page_owner->handle = handle;
179 	page_owner->order = order;
180 	page_owner->gfp_mask = gfp_mask;
181 	page_owner->last_migrate_reason = -1;
182 
183 	__set_bit(PAGE_EXT_OWNER, &page_ext->flags);
184 }
185 
186 noinline void __set_page_owner(struct page *page, unsigned int order,
187 					gfp_t gfp_mask)
188 {
189 	struct page_ext *page_ext = lookup_page_ext(page);
190 	depot_stack_handle_t handle;
191 
192 	if (unlikely(!page_ext))
193 		return;
194 
195 	handle = save_stack(gfp_mask);
196 	__set_page_owner_handle(page_ext, handle, order, gfp_mask);
197 }
198 
199 void __set_page_owner_migrate_reason(struct page *page, int reason)
200 {
201 	struct page_ext *page_ext = lookup_page_ext(page);
202 	struct page_owner *page_owner;
203 
204 	if (unlikely(!page_ext))
205 		return;
206 
207 	page_owner = get_page_owner(page_ext);
208 	page_owner->last_migrate_reason = reason;
209 }
210 
211 void __split_page_owner(struct page *page, unsigned int order)
212 {
213 	int i;
214 	struct page_ext *page_ext = lookup_page_ext(page);
215 	struct page_owner *page_owner;
216 
217 	if (unlikely(!page_ext))
218 		return;
219 
220 	page_owner = get_page_owner(page_ext);
221 	page_owner->order = 0;
222 	for (i = 1; i < (1 << order); i++)
223 		__copy_page_owner(page, page + i);
224 }
225 
226 void __copy_page_owner(struct page *oldpage, struct page *newpage)
227 {
228 	struct page_ext *old_ext = lookup_page_ext(oldpage);
229 	struct page_ext *new_ext = lookup_page_ext(newpage);
230 	struct page_owner *old_page_owner, *new_page_owner;
231 
232 	if (unlikely(!old_ext || !new_ext))
233 		return;
234 
235 	old_page_owner = get_page_owner(old_ext);
236 	new_page_owner = get_page_owner(new_ext);
237 	new_page_owner->order = old_page_owner->order;
238 	new_page_owner->gfp_mask = old_page_owner->gfp_mask;
239 	new_page_owner->last_migrate_reason =
240 		old_page_owner->last_migrate_reason;
241 	new_page_owner->handle = old_page_owner->handle;
242 
243 	/*
244 	 * We don't clear the bit on the oldpage as it's going to be freed
245 	 * after migration. Until then, the info can be useful in case of
246 	 * a bug, and the overal stats will be off a bit only temporarily.
247 	 * Also, migrate_misplaced_transhuge_page() can still fail the
248 	 * migration and then we want the oldpage to retain the info. But
249 	 * in that case we also don't need to explicitly clear the info from
250 	 * the new page, which will be freed.
251 	 */
252 	__set_bit(PAGE_EXT_OWNER, &new_ext->flags);
253 }
254 
255 void pagetypeinfo_showmixedcount_print(struct seq_file *m,
256 				       pg_data_t *pgdat, struct zone *zone)
257 {
258 	struct page *page;
259 	struct page_ext *page_ext;
260 	struct page_owner *page_owner;
261 	unsigned long pfn = zone->zone_start_pfn, block_end_pfn;
262 	unsigned long end_pfn = pfn + zone->spanned_pages;
263 	unsigned long count[MIGRATE_TYPES] = { 0, };
264 	int pageblock_mt, page_mt;
265 	int i;
266 
267 	/* Scan block by block. First and last block may be incomplete */
268 	pfn = zone->zone_start_pfn;
269 
270 	/*
271 	 * Walk the zone in pageblock_nr_pages steps. If a page block spans
272 	 * a zone boundary, it will be double counted between zones. This does
273 	 * not matter as the mixed block count will still be correct
274 	 */
275 	for (; pfn < end_pfn; ) {
276 		if (!pfn_valid(pfn)) {
277 			pfn = ALIGN(pfn + 1, MAX_ORDER_NR_PAGES);
278 			continue;
279 		}
280 
281 		block_end_pfn = ALIGN(pfn + 1, pageblock_nr_pages);
282 		block_end_pfn = min(block_end_pfn, end_pfn);
283 
284 		page = pfn_to_page(pfn);
285 		pageblock_mt = get_pageblock_migratetype(page);
286 
287 		for (; pfn < block_end_pfn; pfn++) {
288 			if (!pfn_valid_within(pfn))
289 				continue;
290 
291 			page = pfn_to_page(pfn);
292 
293 			if (page_zone(page) != zone)
294 				continue;
295 
296 			if (PageBuddy(page)) {
297 				unsigned long freepage_order;
298 
299 				freepage_order = page_order_unsafe(page);
300 				if (freepage_order < MAX_ORDER)
301 					pfn += (1UL << freepage_order) - 1;
302 				continue;
303 			}
304 
305 			if (PageReserved(page))
306 				continue;
307 
308 			page_ext = lookup_page_ext(page);
309 			if (unlikely(!page_ext))
310 				continue;
311 
312 			if (!test_bit(PAGE_EXT_OWNER, &page_ext->flags))
313 				continue;
314 
315 			page_owner = get_page_owner(page_ext);
316 			page_mt = gfpflags_to_migratetype(
317 					page_owner->gfp_mask);
318 			if (pageblock_mt != page_mt) {
319 				if (is_migrate_cma(pageblock_mt))
320 					count[MIGRATE_MOVABLE]++;
321 				else
322 					count[pageblock_mt]++;
323 
324 				pfn = block_end_pfn;
325 				break;
326 			}
327 			pfn += (1UL << page_owner->order) - 1;
328 		}
329 	}
330 
331 	/* Print counts */
332 	seq_printf(m, "Node %d, zone %8s ", pgdat->node_id, zone->name);
333 	for (i = 0; i < MIGRATE_TYPES; i++)
334 		seq_printf(m, "%12lu ", count[i]);
335 	seq_putc(m, '\n');
336 }
337 
338 static ssize_t
339 print_page_owner(char __user *buf, size_t count, unsigned long pfn,
340 		struct page *page, struct page_owner *page_owner,
341 		depot_stack_handle_t handle)
342 {
343 	int ret;
344 	int pageblock_mt, page_mt;
345 	char *kbuf;
346 	unsigned long entries[PAGE_OWNER_STACK_DEPTH];
347 	struct stack_trace trace = {
348 		.nr_entries = 0,
349 		.entries = entries,
350 		.max_entries = PAGE_OWNER_STACK_DEPTH,
351 		.skip = 0
352 	};
353 
354 	count = min_t(size_t, count, PAGE_SIZE);
355 	kbuf = kmalloc(count, GFP_KERNEL);
356 	if (!kbuf)
357 		return -ENOMEM;
358 
359 	ret = snprintf(kbuf, count,
360 			"Page allocated via order %u, mask %#x(%pGg)\n",
361 			page_owner->order, page_owner->gfp_mask,
362 			&page_owner->gfp_mask);
363 
364 	if (ret >= count)
365 		goto err;
366 
367 	/* Print information relevant to grouping pages by mobility */
368 	pageblock_mt = get_pageblock_migratetype(page);
369 	page_mt  = gfpflags_to_migratetype(page_owner->gfp_mask);
370 	ret += snprintf(kbuf + ret, count - ret,
371 			"PFN %lu type %s Block %lu type %s Flags %#lx(%pGp)\n",
372 			pfn,
373 			migratetype_names[page_mt],
374 			pfn >> pageblock_order,
375 			migratetype_names[pageblock_mt],
376 			page->flags, &page->flags);
377 
378 	if (ret >= count)
379 		goto err;
380 
381 	depot_fetch_stack(handle, &trace);
382 	ret += snprint_stack_trace(kbuf + ret, count - ret, &trace, 0);
383 	if (ret >= count)
384 		goto err;
385 
386 	if (page_owner->last_migrate_reason != -1) {
387 		ret += snprintf(kbuf + ret, count - ret,
388 			"Page has been migrated, last migrate reason: %s\n",
389 			migrate_reason_names[page_owner->last_migrate_reason]);
390 		if (ret >= count)
391 			goto err;
392 	}
393 
394 	ret += snprintf(kbuf + ret, count - ret, "\n");
395 	if (ret >= count)
396 		goto err;
397 
398 	if (copy_to_user(buf, kbuf, ret))
399 		ret = -EFAULT;
400 
401 	kfree(kbuf);
402 	return ret;
403 
404 err:
405 	kfree(kbuf);
406 	return -ENOMEM;
407 }
408 
409 void __dump_page_owner(struct page *page)
410 {
411 	struct page_ext *page_ext = lookup_page_ext(page);
412 	struct page_owner *page_owner;
413 	unsigned long entries[PAGE_OWNER_STACK_DEPTH];
414 	struct stack_trace trace = {
415 		.nr_entries = 0,
416 		.entries = entries,
417 		.max_entries = PAGE_OWNER_STACK_DEPTH,
418 		.skip = 0
419 	};
420 	depot_stack_handle_t handle;
421 	gfp_t gfp_mask;
422 	int mt;
423 
424 	if (unlikely(!page_ext)) {
425 		pr_alert("There is not page extension available.\n");
426 		return;
427 	}
428 
429 	page_owner = get_page_owner(page_ext);
430 	gfp_mask = page_owner->gfp_mask;
431 	mt = gfpflags_to_migratetype(gfp_mask);
432 
433 	if (!test_bit(PAGE_EXT_OWNER, &page_ext->flags)) {
434 		pr_alert("page_owner info is not active (free page?)\n");
435 		return;
436 	}
437 
438 	handle = READ_ONCE(page_owner->handle);
439 	if (!handle) {
440 		pr_alert("page_owner info is not active (free page?)\n");
441 		return;
442 	}
443 
444 	depot_fetch_stack(handle, &trace);
445 	pr_alert("page allocated via order %u, migratetype %s, gfp_mask %#x(%pGg)\n",
446 		 page_owner->order, migratetype_names[mt], gfp_mask, &gfp_mask);
447 	print_stack_trace(&trace, 0);
448 
449 	if (page_owner->last_migrate_reason != -1)
450 		pr_alert("page has been migrated, last migrate reason: %s\n",
451 			migrate_reason_names[page_owner->last_migrate_reason]);
452 }
453 
454 static ssize_t
455 read_page_owner(struct file *file, char __user *buf, size_t count, loff_t *ppos)
456 {
457 	unsigned long pfn;
458 	struct page *page;
459 	struct page_ext *page_ext;
460 	struct page_owner *page_owner;
461 	depot_stack_handle_t handle;
462 
463 	if (!static_branch_unlikely(&page_owner_inited))
464 		return -EINVAL;
465 
466 	page = NULL;
467 	pfn = min_low_pfn + *ppos;
468 
469 	/* Find a valid PFN or the start of a MAX_ORDER_NR_PAGES area */
470 	while (!pfn_valid(pfn) && (pfn & (MAX_ORDER_NR_PAGES - 1)) != 0)
471 		pfn++;
472 
473 	drain_all_pages(NULL);
474 
475 	/* Find an allocated page */
476 	for (; pfn < max_pfn; pfn++) {
477 		/*
478 		 * If the new page is in a new MAX_ORDER_NR_PAGES area,
479 		 * validate the area as existing, skip it if not
480 		 */
481 		if ((pfn & (MAX_ORDER_NR_PAGES - 1)) == 0 && !pfn_valid(pfn)) {
482 			pfn += MAX_ORDER_NR_PAGES - 1;
483 			continue;
484 		}
485 
486 		/* Check for holes within a MAX_ORDER area */
487 		if (!pfn_valid_within(pfn))
488 			continue;
489 
490 		page = pfn_to_page(pfn);
491 		if (PageBuddy(page)) {
492 			unsigned long freepage_order = page_order_unsafe(page);
493 
494 			if (freepage_order < MAX_ORDER)
495 				pfn += (1UL << freepage_order) - 1;
496 			continue;
497 		}
498 
499 		page_ext = lookup_page_ext(page);
500 		if (unlikely(!page_ext))
501 			continue;
502 
503 		/*
504 		 * Some pages could be missed by concurrent allocation or free,
505 		 * because we don't hold the zone lock.
506 		 */
507 		if (!test_bit(PAGE_EXT_OWNER, &page_ext->flags))
508 			continue;
509 
510 		page_owner = get_page_owner(page_ext);
511 
512 		/*
513 		 * Access to page_ext->handle isn't synchronous so we should
514 		 * be careful to access it.
515 		 */
516 		handle = READ_ONCE(page_owner->handle);
517 		if (!handle)
518 			continue;
519 
520 		/* Record the next PFN to read in the file offset */
521 		*ppos = (pfn - min_low_pfn) + 1;
522 
523 		return print_page_owner(buf, count, pfn, page,
524 				page_owner, handle);
525 	}
526 
527 	return 0;
528 }
529 
530 static void init_pages_in_zone(pg_data_t *pgdat, struct zone *zone)
531 {
532 	unsigned long pfn = zone->zone_start_pfn;
533 	unsigned long end_pfn = zone_end_pfn(zone);
534 	unsigned long count = 0;
535 
536 	/*
537 	 * Walk the zone in pageblock_nr_pages steps. If a page block spans
538 	 * a zone boundary, it will be double counted between zones. This does
539 	 * not matter as the mixed block count will still be correct
540 	 */
541 	for (; pfn < end_pfn; ) {
542 		unsigned long block_end_pfn;
543 
544 		if (!pfn_valid(pfn)) {
545 			pfn = ALIGN(pfn + 1, MAX_ORDER_NR_PAGES);
546 			continue;
547 		}
548 
549 		block_end_pfn = ALIGN(pfn + 1, pageblock_nr_pages);
550 		block_end_pfn = min(block_end_pfn, end_pfn);
551 
552 		for (; pfn < block_end_pfn; pfn++) {
553 			struct page *page;
554 			struct page_ext *page_ext;
555 
556 			if (!pfn_valid_within(pfn))
557 				continue;
558 
559 			page = pfn_to_page(pfn);
560 
561 			if (page_zone(page) != zone)
562 				continue;
563 
564 			/*
565 			 * To avoid having to grab zone->lock, be a little
566 			 * careful when reading buddy page order. The only
567 			 * danger is that we skip too much and potentially miss
568 			 * some early allocated pages, which is better than
569 			 * heavy lock contention.
570 			 */
571 			if (PageBuddy(page)) {
572 				unsigned long order = page_order_unsafe(page);
573 
574 				if (order > 0 && order < MAX_ORDER)
575 					pfn += (1UL << order) - 1;
576 				continue;
577 			}
578 
579 			if (PageReserved(page))
580 				continue;
581 
582 			page_ext = lookup_page_ext(page);
583 			if (unlikely(!page_ext))
584 				continue;
585 
586 			/* Maybe overlapping zone */
587 			if (test_bit(PAGE_EXT_OWNER, &page_ext->flags))
588 				continue;
589 
590 			/* Found early allocated page */
591 			__set_page_owner_handle(page_ext, early_handle, 0, 0);
592 			count++;
593 		}
594 		cond_resched();
595 	}
596 
597 	pr_info("Node %d, zone %8s: page owner found early allocated %lu pages\n",
598 		pgdat->node_id, zone->name, count);
599 }
600 
601 static void init_zones_in_node(pg_data_t *pgdat)
602 {
603 	struct zone *zone;
604 	struct zone *node_zones = pgdat->node_zones;
605 
606 	for (zone = node_zones; zone - node_zones < MAX_NR_ZONES; ++zone) {
607 		if (!populated_zone(zone))
608 			continue;
609 
610 		init_pages_in_zone(pgdat, zone);
611 	}
612 }
613 
614 static void init_early_allocated_pages(void)
615 {
616 	pg_data_t *pgdat;
617 
618 	for_each_online_pgdat(pgdat)
619 		init_zones_in_node(pgdat);
620 }
621 
622 static const struct file_operations proc_page_owner_operations = {
623 	.read		= read_page_owner,
624 };
625 
626 static int __init pageowner_init(void)
627 {
628 	struct dentry *dentry;
629 
630 	if (!static_branch_unlikely(&page_owner_inited)) {
631 		pr_info("page_owner is disabled\n");
632 		return 0;
633 	}
634 
635 	dentry = debugfs_create_file("page_owner", 0400, NULL,
636 				     NULL, &proc_page_owner_operations);
637 
638 	return PTR_ERR_OR_ZERO(dentry);
639 }
640 late_initcall(pageowner_init)
641