xref: /openbmc/linux/mm/page_owner.c (revision 1cac4c07)
1b2441318SGreg Kroah-Hartman // SPDX-License-Identifier: GPL-2.0
248c96a36SJoonsoo Kim #include <linux/debugfs.h>
348c96a36SJoonsoo Kim #include <linux/mm.h>
448c96a36SJoonsoo Kim #include <linux/slab.h>
548c96a36SJoonsoo Kim #include <linux/uaccess.h>
657c8a661SMike Rapoport #include <linux/memblock.h>
748c96a36SJoonsoo Kim #include <linux/stacktrace.h>
848c96a36SJoonsoo Kim #include <linux/page_owner.h>
97dd80b8aSVlastimil Babka #include <linux/jump_label.h>
107cd12b4aSVlastimil Babka #include <linux/migrate.h>
11f2ca0b55SJoonsoo Kim #include <linux/stackdepot.h>
12e2f612e6SJoonsoo Kim #include <linux/seq_file.h>
13fcf89358SWaiman Long #include <linux/memcontrol.h>
149cc7e96aSLiam Mark #include <linux/sched/clock.h>
15f2ca0b55SJoonsoo Kim 
1648c96a36SJoonsoo Kim #include "internal.h"
1748c96a36SJoonsoo Kim 
18f2ca0b55SJoonsoo Kim /*
19f2ca0b55SJoonsoo Kim  * TODO: teach PAGE_OWNER_STACK_DEPTH (__dump_page_owner and save_stack)
20f2ca0b55SJoonsoo Kim  * to use off stack temporal storage
21f2ca0b55SJoonsoo Kim  */
22f2ca0b55SJoonsoo Kim #define PAGE_OWNER_STACK_DEPTH (16)
23f2ca0b55SJoonsoo Kim 
249300d8dfSJoonsoo Kim struct page_owner {
256b4c54e3SAyush Mittal 	unsigned short order;
266b4c54e3SAyush Mittal 	short last_migrate_reason;
279300d8dfSJoonsoo Kim 	gfp_t gfp_mask;
289300d8dfSJoonsoo Kim 	depot_stack_handle_t handle;
298974558fSVlastimil Babka 	depot_stack_handle_t free_handle;
309cc7e96aSLiam Mark 	u64 ts_nsec;
31866b4852SGeorgi Djakov 	u64 free_ts_nsec;
32865ed6a3SWaiman Long 	char comm[TASK_COMM_LEN];
339cc7e96aSLiam Mark 	pid_t pid;
34bf215eabSYixuan Cao 	pid_t tgid;
359300d8dfSJoonsoo Kim };
369300d8dfSJoonsoo Kim 
373645b5ecSFanjun Kong static bool page_owner_enabled __initdata;
387dd80b8aSVlastimil Babka DEFINE_STATIC_KEY_FALSE(page_owner_inited);
3948c96a36SJoonsoo Kim 
40f2ca0b55SJoonsoo Kim static depot_stack_handle_t dummy_handle;
41f2ca0b55SJoonsoo Kim static depot_stack_handle_t failure_handle;
42dab4ead1SVlastimil Babka static depot_stack_handle_t early_handle;
43f2ca0b55SJoonsoo Kim 
4461cf5febSJoonsoo Kim static void init_early_allocated_pages(void);
4561cf5febSJoonsoo Kim 
early_page_owner_param(char * buf)461173194eSDou Liyang static int __init early_page_owner_param(char *buf)
4748c96a36SJoonsoo Kim {
48a5f1783bSVlastimil Babka 	int ret = kstrtobool(buf, &page_owner_enabled);
49a5f1783bSVlastimil Babka 
50a5f1783bSVlastimil Babka 	if (page_owner_enabled)
511c0310adSAndrey Konovalov 		stack_depot_request_early_init();
52a5f1783bSVlastimil Babka 
53a5f1783bSVlastimil Babka 	return ret;
5448c96a36SJoonsoo Kim }
5548c96a36SJoonsoo Kim early_param("page_owner", early_page_owner_param);
5648c96a36SJoonsoo Kim 
need_page_owner(void)57cab0a7c1STing Liu static __init bool need_page_owner(void)
5848c96a36SJoonsoo Kim {
590fe9a448SVlastimil Babka 	return page_owner_enabled;
6048c96a36SJoonsoo Kim }
6148c96a36SJoonsoo Kim 
create_dummy_stack(void)62dab4ead1SVlastimil Babka static __always_inline depot_stack_handle_t create_dummy_stack(void)
63f2ca0b55SJoonsoo Kim {
64f2ca0b55SJoonsoo Kim 	unsigned long entries[4];
65af52bf6bSThomas Gleixner 	unsigned int nr_entries;
66f2ca0b55SJoonsoo Kim 
67af52bf6bSThomas Gleixner 	nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 0);
68af52bf6bSThomas Gleixner 	return stack_depot_save(entries, nr_entries, GFP_KERNEL);
69dab4ead1SVlastimil Babka }
70dab4ead1SVlastimil Babka 
register_dummy_stack(void)71dab4ead1SVlastimil Babka static noinline void register_dummy_stack(void)
72dab4ead1SVlastimil Babka {
73dab4ead1SVlastimil Babka 	dummy_handle = create_dummy_stack();
74f2ca0b55SJoonsoo Kim }
75f2ca0b55SJoonsoo Kim 
register_failure_stack(void)76f2ca0b55SJoonsoo Kim static noinline void register_failure_stack(void)
77f2ca0b55SJoonsoo Kim {
78dab4ead1SVlastimil Babka 	failure_handle = create_dummy_stack();
79dab4ead1SVlastimil Babka }
80f2ca0b55SJoonsoo Kim 
register_early_stack(void)81dab4ead1SVlastimil Babka static noinline void register_early_stack(void)
82dab4ead1SVlastimil Babka {
83dab4ead1SVlastimil Babka 	early_handle = create_dummy_stack();
84f2ca0b55SJoonsoo Kim }
85f2ca0b55SJoonsoo Kim 
init_page_owner(void)86cab0a7c1STing Liu static __init void init_page_owner(void)
8748c96a36SJoonsoo Kim {
880fe9a448SVlastimil Babka 	if (!page_owner_enabled)
8948c96a36SJoonsoo Kim 		return;
9048c96a36SJoonsoo Kim 
91f2ca0b55SJoonsoo Kim 	register_dummy_stack();
92f2ca0b55SJoonsoo Kim 	register_failure_stack();
93dab4ead1SVlastimil Babka 	register_early_stack();
947dd80b8aSVlastimil Babka 	static_branch_enable(&page_owner_inited);
9561cf5febSJoonsoo Kim 	init_early_allocated_pages();
9648c96a36SJoonsoo Kim }
9748c96a36SJoonsoo Kim 
9848c96a36SJoonsoo Kim struct page_ext_operations page_owner_ops = {
999300d8dfSJoonsoo Kim 	.size = sizeof(struct page_owner),
10048c96a36SJoonsoo Kim 	.need = need_page_owner,
10148c96a36SJoonsoo Kim 	.init = init_page_owner,
1026189eb82SPasha Tatashin 	.need_shared_flags = true,
10348c96a36SJoonsoo Kim };
10448c96a36SJoonsoo Kim 
get_page_owner(struct page_ext * page_ext)1059300d8dfSJoonsoo Kim static inline struct page_owner *get_page_owner(struct page_ext *page_ext)
1069300d8dfSJoonsoo Kim {
107*1cac4c07SKemeng Shi 	return page_ext_data(page_ext, &page_owner_ops);
1089300d8dfSJoonsoo Kim }
1099300d8dfSJoonsoo Kim 
save_stack(gfp_t flags)110f2ca0b55SJoonsoo Kim static noinline depot_stack_handle_t save_stack(gfp_t flags)
111f2ca0b55SJoonsoo Kim {
112f2ca0b55SJoonsoo Kim 	unsigned long entries[PAGE_OWNER_STACK_DEPTH];
113f2ca0b55SJoonsoo Kim 	depot_stack_handle_t handle;
114af52bf6bSThomas Gleixner 	unsigned int nr_entries;
115f2ca0b55SJoonsoo Kim 
116f2ca0b55SJoonsoo Kim 	/*
1178e9b16c4SSergei Trofimovich 	 * Avoid recursion.
1188e9b16c4SSergei Trofimovich 	 *
1198e9b16c4SSergei Trofimovich 	 * Sometimes page metadata allocation tracking requires more
1208e9b16c4SSergei Trofimovich 	 * memory to be allocated:
1218e9b16c4SSergei Trofimovich 	 * - when new stack trace is saved to stack depot
1228e9b16c4SSergei Trofimovich 	 * - when backtrace itself is calculated (ia64)
123f2ca0b55SJoonsoo Kim 	 */
1248e9b16c4SSergei Trofimovich 	if (current->in_page_owner)
125f2ca0b55SJoonsoo Kim 		return dummy_handle;
1268e9b16c4SSergei Trofimovich 	current->in_page_owner = 1;
127f2ca0b55SJoonsoo Kim 
1288e9b16c4SSergei Trofimovich 	nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 2);
129af52bf6bSThomas Gleixner 	handle = stack_depot_save(entries, nr_entries, flags);
130f2ca0b55SJoonsoo Kim 	if (!handle)
131f2ca0b55SJoonsoo Kim 		handle = failure_handle;
132f2ca0b55SJoonsoo Kim 
1338e9b16c4SSergei Trofimovich 	current->in_page_owner = 0;
134f2ca0b55SJoonsoo Kim 	return handle;
135f2ca0b55SJoonsoo Kim }
136f2ca0b55SJoonsoo Kim 
__reset_page_owner(struct page * page,unsigned short order)1370093de69SYixuan Cao void __reset_page_owner(struct page *page, unsigned short order)
1388974558fSVlastimil Babka {
1398974558fSVlastimil Babka 	int i;
1408974558fSVlastimil Babka 	struct page_ext *page_ext;
141fab765c2SSergei Trofimovich 	depot_stack_handle_t handle;
1428974558fSVlastimil Babka 	struct page_owner *page_owner;
143866b4852SGeorgi Djakov 	u64 free_ts_nsec = local_clock();
1448974558fSVlastimil Babka 
145b1d5488aSCharan Teja Kalla 	page_ext = page_ext_get(page);
1468974558fSVlastimil Babka 	if (unlikely(!page_ext))
1475556cfe8SVlastimil Babka 		return;
148fab765c2SSergei Trofimovich 
149fab765c2SSergei Trofimovich 	handle = save_stack(GFP_NOWAIT | __GFP_NOWARN);
1505556cfe8SVlastimil Babka 	for (i = 0; i < (1 << order); i++) {
151fdf3bf80SVlastimil Babka 		__clear_bit(PAGE_EXT_OWNER_ALLOCATED, &page_ext->flags);
1528974558fSVlastimil Babka 		page_owner = get_page_owner(page_ext);
1538974558fSVlastimil Babka 		page_owner->free_handle = handle;
154866b4852SGeorgi Djakov 		page_owner->free_ts_nsec = free_ts_nsec;
1555556cfe8SVlastimil Babka 		page_ext = page_ext_next(page_ext);
1568974558fSVlastimil Babka 	}
157b1d5488aSCharan Teja Kalla 	page_ext_put(page_ext);
1588974558fSVlastimil Babka }
1598974558fSVlastimil Babka 
__set_page_owner_handle(struct page_ext * page_ext,depot_stack_handle_t handle,unsigned short order,gfp_t gfp_mask)16064ea78d2Szhongjiang-ali static inline void __set_page_owner_handle(struct page_ext *page_ext,
16164ea78d2Szhongjiang-ali 					depot_stack_handle_t handle,
1620093de69SYixuan Cao 					unsigned short order, gfp_t gfp_mask)
163f2ca0b55SJoonsoo Kim {
1649300d8dfSJoonsoo Kim 	struct page_owner *page_owner;
1657e2f2a0cSVlastimil Babka 	int i;
16605a42199SHyeonggon Yoo 	u64 ts_nsec = local_clock();
16748c96a36SJoonsoo Kim 
1687e2f2a0cSVlastimil Babka 	for (i = 0; i < (1 << order); i++) {
1699300d8dfSJoonsoo Kim 		page_owner = get_page_owner(page_ext);
170dab4ead1SVlastimil Babka 		page_owner->handle = handle;
1719300d8dfSJoonsoo Kim 		page_owner->order = order;
1729300d8dfSJoonsoo Kim 		page_owner->gfp_mask = gfp_mask;
1739300d8dfSJoonsoo Kim 		page_owner->last_migrate_reason = -1;
1749cc7e96aSLiam Mark 		page_owner->pid = current->pid;
175bf215eabSYixuan Cao 		page_owner->tgid = current->tgid;
17605a42199SHyeonggon Yoo 		page_owner->ts_nsec = ts_nsec;
177cd8c1fd8SEric Dumazet 		strscpy(page_owner->comm, current->comm,
178865ed6a3SWaiman Long 			sizeof(page_owner->comm));
17948c96a36SJoonsoo Kim 		__set_bit(PAGE_EXT_OWNER, &page_ext->flags);
180fdf3bf80SVlastimil Babka 		__set_bit(PAGE_EXT_OWNER_ALLOCATED, &page_ext->flags);
1817e2f2a0cSVlastimil Babka 
1825556cfe8SVlastimil Babka 		page_ext = page_ext_next(page_ext);
1837e2f2a0cSVlastimil Babka 	}
18448c96a36SJoonsoo Kim }
18548c96a36SJoonsoo Kim 
__set_page_owner(struct page * page,unsigned short order,gfp_t gfp_mask)1860093de69SYixuan Cao noinline void __set_page_owner(struct page *page, unsigned short order,
187dab4ead1SVlastimil Babka 					gfp_t gfp_mask)
188dab4ead1SVlastimil Babka {
189b1d5488aSCharan Teja Kalla 	struct page_ext *page_ext;
190dab4ead1SVlastimil Babka 	depot_stack_handle_t handle;
191dab4ead1SVlastimil Babka 
192b1d5488aSCharan Teja Kalla 	handle = save_stack(gfp_mask);
193b1d5488aSCharan Teja Kalla 
194b1d5488aSCharan Teja Kalla 	page_ext = page_ext_get(page);
195dab4ead1SVlastimil Babka 	if (unlikely(!page_ext))
196dab4ead1SVlastimil Babka 		return;
19764ea78d2Szhongjiang-ali 	__set_page_owner_handle(page_ext, handle, order, gfp_mask);
198b1d5488aSCharan Teja Kalla 	page_ext_put(page_ext);
199dab4ead1SVlastimil Babka }
200dab4ead1SVlastimil Babka 
__set_page_owner_migrate_reason(struct page * page,int reason)2017cd12b4aSVlastimil Babka void __set_page_owner_migrate_reason(struct page *page, int reason)
2027cd12b4aSVlastimil Babka {
203b1d5488aSCharan Teja Kalla 	struct page_ext *page_ext = page_ext_get(page);
2049300d8dfSJoonsoo Kim 	struct page_owner *page_owner;
2059300d8dfSJoonsoo Kim 
206f86e4271SYang Shi 	if (unlikely(!page_ext))
207f86e4271SYang Shi 		return;
2087cd12b4aSVlastimil Babka 
2099300d8dfSJoonsoo Kim 	page_owner = get_page_owner(page_ext);
2109300d8dfSJoonsoo Kim 	page_owner->last_migrate_reason = reason;
211b1d5488aSCharan Teja Kalla 	page_ext_put(page_ext);
2127cd12b4aSVlastimil Babka }
2137cd12b4aSVlastimil Babka 
__split_page_owner(struct page * page,unsigned int nr)2148fb156c9SMatthew Wilcox (Oracle) void __split_page_owner(struct page *page, unsigned int nr)
215e2cfc911SJoonsoo Kim {
216a9627bc5SJoonsoo Kim 	int i;
217b1d5488aSCharan Teja Kalla 	struct page_ext *page_ext = page_ext_get(page);
2189300d8dfSJoonsoo Kim 	struct page_owner *page_owner;
219e2cfc911SJoonsoo Kim 
220a9627bc5SJoonsoo Kim 	if (unlikely(!page_ext))
221a9627bc5SJoonsoo Kim 		return;
222a9627bc5SJoonsoo Kim 
2238fb156c9SMatthew Wilcox (Oracle) 	for (i = 0; i < nr; i++) {
2249300d8dfSJoonsoo Kim 		page_owner = get_page_owner(page_ext);
2259300d8dfSJoonsoo Kim 		page_owner->order = 0;
2265556cfe8SVlastimil Babka 		page_ext = page_ext_next(page_ext);
2277e2f2a0cSVlastimil Babka 	}
228b1d5488aSCharan Teja Kalla 	page_ext_put(page_ext);
229e2cfc911SJoonsoo Kim }
230e2cfc911SJoonsoo Kim 
__folio_copy_owner(struct folio * newfolio,struct folio * old)23119138349SMatthew Wilcox (Oracle) void __folio_copy_owner(struct folio *newfolio, struct folio *old)
232d435edcaSVlastimil Babka {
233b1d5488aSCharan Teja Kalla 	struct page_ext *old_ext;
234b1d5488aSCharan Teja Kalla 	struct page_ext *new_ext;
2359300d8dfSJoonsoo Kim 	struct page_owner *old_page_owner, *new_page_owner;
236d435edcaSVlastimil Babka 
237b1d5488aSCharan Teja Kalla 	old_ext = page_ext_get(&old->page);
238b1d5488aSCharan Teja Kalla 	if (unlikely(!old_ext))
239f86e4271SYang Shi 		return;
240f86e4271SYang Shi 
241b1d5488aSCharan Teja Kalla 	new_ext = page_ext_get(&newfolio->page);
242b1d5488aSCharan Teja Kalla 	if (unlikely(!new_ext)) {
243b1d5488aSCharan Teja Kalla 		page_ext_put(old_ext);
244b1d5488aSCharan Teja Kalla 		return;
245b1d5488aSCharan Teja Kalla 	}
246b1d5488aSCharan Teja Kalla 
2479300d8dfSJoonsoo Kim 	old_page_owner = get_page_owner(old_ext);
2489300d8dfSJoonsoo Kim 	new_page_owner = get_page_owner(new_ext);
2499300d8dfSJoonsoo Kim 	new_page_owner->order = old_page_owner->order;
2509300d8dfSJoonsoo Kim 	new_page_owner->gfp_mask = old_page_owner->gfp_mask;
2519300d8dfSJoonsoo Kim 	new_page_owner->last_migrate_reason =
2529300d8dfSJoonsoo Kim 		old_page_owner->last_migrate_reason;
2539300d8dfSJoonsoo Kim 	new_page_owner->handle = old_page_owner->handle;
2549cc7e96aSLiam Mark 	new_page_owner->pid = old_page_owner->pid;
255bf215eabSYixuan Cao 	new_page_owner->tgid = old_page_owner->tgid;
2569cc7e96aSLiam Mark 	new_page_owner->ts_nsec = old_page_owner->ts_nsec;
257866b4852SGeorgi Djakov 	new_page_owner->free_ts_nsec = old_page_owner->ts_nsec;
258865ed6a3SWaiman Long 	strcpy(new_page_owner->comm, old_page_owner->comm);
259d435edcaSVlastimil Babka 
260d435edcaSVlastimil Babka 	/*
26119138349SMatthew Wilcox (Oracle) 	 * We don't clear the bit on the old folio as it's going to be freed
262d435edcaSVlastimil Babka 	 * after migration. Until then, the info can be useful in case of
263f0953a1bSIngo Molnar 	 * a bug, and the overall stats will be off a bit only temporarily.
264d435edcaSVlastimil Babka 	 * Also, migrate_misplaced_transhuge_page() can still fail the
26519138349SMatthew Wilcox (Oracle) 	 * migration and then we want the old folio to retain the info. But
266d435edcaSVlastimil Babka 	 * in that case we also don't need to explicitly clear the info from
267d435edcaSVlastimil Babka 	 * the new page, which will be freed.
268d435edcaSVlastimil Babka 	 */
269d435edcaSVlastimil Babka 	__set_bit(PAGE_EXT_OWNER, &new_ext->flags);
270fdf3bf80SVlastimil Babka 	__set_bit(PAGE_EXT_OWNER_ALLOCATED, &new_ext->flags);
271b1d5488aSCharan Teja Kalla 	page_ext_put(new_ext);
272b1d5488aSCharan Teja Kalla 	page_ext_put(old_ext);
273d435edcaSVlastimil Babka }
274d435edcaSVlastimil Babka 
pagetypeinfo_showmixedcount_print(struct seq_file * m,pg_data_t * pgdat,struct zone * zone)275e2f612e6SJoonsoo Kim void pagetypeinfo_showmixedcount_print(struct seq_file *m,
276e2f612e6SJoonsoo Kim 				       pg_data_t *pgdat, struct zone *zone)
277e2f612e6SJoonsoo Kim {
278e2f612e6SJoonsoo Kim 	struct page *page;
279e2f612e6SJoonsoo Kim 	struct page_ext *page_ext;
2809300d8dfSJoonsoo Kim 	struct page_owner *page_owner;
2811d2cae8eSMiaohe Lin 	unsigned long pfn, block_end_pfn;
2821d2cae8eSMiaohe Lin 	unsigned long end_pfn = zone_end_pfn(zone);
283e2f612e6SJoonsoo Kim 	unsigned long count[MIGRATE_TYPES] = { 0, };
284e2f612e6SJoonsoo Kim 	int pageblock_mt, page_mt;
285e2f612e6SJoonsoo Kim 	int i;
286e2f612e6SJoonsoo Kim 
287e2f612e6SJoonsoo Kim 	/* Scan block by block. First and last block may be incomplete */
288e2f612e6SJoonsoo Kim 	pfn = zone->zone_start_pfn;
289e2f612e6SJoonsoo Kim 
290e2f612e6SJoonsoo Kim 	/*
291e2f612e6SJoonsoo Kim 	 * Walk the zone in pageblock_nr_pages steps. If a page block spans
292e2f612e6SJoonsoo Kim 	 * a zone boundary, it will be double counted between zones. This does
293e2f612e6SJoonsoo Kim 	 * not matter as the mixed block count will still be correct
294e2f612e6SJoonsoo Kim 	 */
295e2f612e6SJoonsoo Kim 	for (; pfn < end_pfn; ) {
296a26ee565SQian Cai 		page = pfn_to_online_page(pfn);
297a26ee565SQian Cai 		if (!page) {
298e2f612e6SJoonsoo Kim 			pfn = ALIGN(pfn + 1, MAX_ORDER_NR_PAGES);
299e2f612e6SJoonsoo Kim 			continue;
300e2f612e6SJoonsoo Kim 		}
301e2f612e6SJoonsoo Kim 
3024f9bc69aSKefeng Wang 		block_end_pfn = pageblock_end_pfn(pfn);
303e2f612e6SJoonsoo Kim 		block_end_pfn = min(block_end_pfn, end_pfn);
304e2f612e6SJoonsoo Kim 
305e2f612e6SJoonsoo Kim 		pageblock_mt = get_pageblock_migratetype(page);
306e2f612e6SJoonsoo Kim 
307e2f612e6SJoonsoo Kim 		for (; pfn < block_end_pfn; pfn++) {
308a26ee565SQian Cai 			/* The pageblock is online, no need to recheck. */
309e2f612e6SJoonsoo Kim 			page = pfn_to_page(pfn);
310e2f612e6SJoonsoo Kim 
311e2f612e6SJoonsoo Kim 			if (page_zone(page) != zone)
312e2f612e6SJoonsoo Kim 				continue;
313e2f612e6SJoonsoo Kim 
314e2f612e6SJoonsoo Kim 			if (PageBuddy(page)) {
315727c080fSVinayak Menon 				unsigned long freepage_order;
316727c080fSVinayak Menon 
317ab130f91SMatthew Wilcox (Oracle) 				freepage_order = buddy_order_unsafe(page);
31823baf831SKirill A. Shutemov 				if (freepage_order <= MAX_ORDER)
319727c080fSVinayak Menon 					pfn += (1UL << freepage_order) - 1;
320e2f612e6SJoonsoo Kim 				continue;
321e2f612e6SJoonsoo Kim 			}
322e2f612e6SJoonsoo Kim 
323e2f612e6SJoonsoo Kim 			if (PageReserved(page))
324e2f612e6SJoonsoo Kim 				continue;
325e2f612e6SJoonsoo Kim 
326b1d5488aSCharan Teja Kalla 			page_ext = page_ext_get(page);
327e2f612e6SJoonsoo Kim 			if (unlikely(!page_ext))
328e2f612e6SJoonsoo Kim 				continue;
329e2f612e6SJoonsoo Kim 
330fdf3bf80SVlastimil Babka 			if (!test_bit(PAGE_EXT_OWNER_ALLOCATED, &page_ext->flags))
331b1d5488aSCharan Teja Kalla 				goto ext_put_continue;
332e2f612e6SJoonsoo Kim 
3339300d8dfSJoonsoo Kim 			page_owner = get_page_owner(page_ext);
33401c0bfe0SWei Yang 			page_mt = gfp_migratetype(page_owner->gfp_mask);
335e2f612e6SJoonsoo Kim 			if (pageblock_mt != page_mt) {
336e2f612e6SJoonsoo Kim 				if (is_migrate_cma(pageblock_mt))
337e2f612e6SJoonsoo Kim 					count[MIGRATE_MOVABLE]++;
338e2f612e6SJoonsoo Kim 				else
339e2f612e6SJoonsoo Kim 					count[pageblock_mt]++;
340e2f612e6SJoonsoo Kim 
341e2f612e6SJoonsoo Kim 				pfn = block_end_pfn;
342b1d5488aSCharan Teja Kalla 				page_ext_put(page_ext);
343e2f612e6SJoonsoo Kim 				break;
344e2f612e6SJoonsoo Kim 			}
3459300d8dfSJoonsoo Kim 			pfn += (1UL << page_owner->order) - 1;
346b1d5488aSCharan Teja Kalla ext_put_continue:
347b1d5488aSCharan Teja Kalla 			page_ext_put(page_ext);
348e2f612e6SJoonsoo Kim 		}
349e2f612e6SJoonsoo Kim 	}
350e2f612e6SJoonsoo Kim 
351e2f612e6SJoonsoo Kim 	/* Print counts */
352e2f612e6SJoonsoo Kim 	seq_printf(m, "Node %d, zone %8s ", pgdat->node_id, zone->name);
353e2f612e6SJoonsoo Kim 	for (i = 0; i < MIGRATE_TYPES; i++)
354e2f612e6SJoonsoo Kim 		seq_printf(m, "%12lu ", count[i]);
355e2f612e6SJoonsoo Kim 	seq_putc(m, '\n');
356e2f612e6SJoonsoo Kim }
357e2f612e6SJoonsoo Kim 
358fcf89358SWaiman Long /*
359fcf89358SWaiman Long  * Looking for memcg information and print it out
360fcf89358SWaiman Long  */
print_page_owner_memcg(char * kbuf,size_t count,int ret,struct page * page)361fcf89358SWaiman Long static inline int print_page_owner_memcg(char *kbuf, size_t count, int ret,
362fcf89358SWaiman Long 					 struct page *page)
363fcf89358SWaiman Long {
364fcf89358SWaiman Long #ifdef CONFIG_MEMCG
365fcf89358SWaiman Long 	unsigned long memcg_data;
366fcf89358SWaiman Long 	struct mem_cgroup *memcg;
367fcf89358SWaiman Long 	bool online;
368fcf89358SWaiman Long 	char name[80];
369fcf89358SWaiman Long 
370fcf89358SWaiman Long 	rcu_read_lock();
371fcf89358SWaiman Long 	memcg_data = READ_ONCE(page->memcg_data);
372fcf89358SWaiman Long 	if (!memcg_data)
373fcf89358SWaiman Long 		goto out_unlock;
374fcf89358SWaiman Long 
375fcf89358SWaiman Long 	if (memcg_data & MEMCG_DATA_OBJCGS)
376fcf89358SWaiman Long 		ret += scnprintf(kbuf + ret, count - ret,
377fcf89358SWaiman Long 				"Slab cache page\n");
378fcf89358SWaiman Long 
379fcf89358SWaiman Long 	memcg = page_memcg_check(page);
380fcf89358SWaiman Long 	if (!memcg)
381fcf89358SWaiman Long 		goto out_unlock;
382fcf89358SWaiman Long 
383fcf89358SWaiman Long 	online = (memcg->css.flags & CSS_ONLINE);
384fcf89358SWaiman Long 	cgroup_name(memcg->css.cgroup, name, sizeof(name));
385fcf89358SWaiman Long 	ret += scnprintf(kbuf + ret, count - ret,
386fcf89358SWaiman Long 			"Charged %sto %smemcg %s\n",
387fcf89358SWaiman Long 			PageMemcgKmem(page) ? "(via objcg) " : "",
388fcf89358SWaiman Long 			online ? "" : "offline ",
389fcf89358SWaiman Long 			name);
390fcf89358SWaiman Long out_unlock:
391fcf89358SWaiman Long 	rcu_read_unlock();
392fcf89358SWaiman Long #endif /* CONFIG_MEMCG */
393fcf89358SWaiman Long 
394fcf89358SWaiman Long 	return ret;
395fcf89358SWaiman Long }
396fcf89358SWaiman Long 
39748c96a36SJoonsoo Kim static ssize_t
print_page_owner(char __user * buf,size_t count,unsigned long pfn,struct page * page,struct page_owner * page_owner,depot_stack_handle_t handle)39848c96a36SJoonsoo Kim print_page_owner(char __user *buf, size_t count, unsigned long pfn,
3999300d8dfSJoonsoo Kim 		struct page *page, struct page_owner *page_owner,
400f2ca0b55SJoonsoo Kim 		depot_stack_handle_t handle)
40148c96a36SJoonsoo Kim {
402af52bf6bSThomas Gleixner 	int ret, pageblock_mt, page_mt;
40348c96a36SJoonsoo Kim 	char *kbuf;
40448c96a36SJoonsoo Kim 
405c8f61cfcSMiles Chen 	count = min_t(size_t, count, PAGE_SIZE);
40648c96a36SJoonsoo Kim 	kbuf = kmalloc(count, GFP_KERNEL);
40748c96a36SJoonsoo Kim 	if (!kbuf)
40848c96a36SJoonsoo Kim 		return -ENOMEM;
40948c96a36SJoonsoo Kim 
4103ebc4397SWaiman Long 	ret = scnprintf(kbuf, count,
411bf215eabSYixuan Cao 			"Page allocated via order %u, mask %#x(%pGg), pid %d, tgid %d (%s), ts %llu ns, free_ts %llu ns\n",
4129300d8dfSJoonsoo Kim 			page_owner->order, page_owner->gfp_mask,
4139cc7e96aSLiam Mark 			&page_owner->gfp_mask, page_owner->pid,
414bf215eabSYixuan Cao 			page_owner->tgid, page_owner->comm,
415bf215eabSYixuan Cao 			page_owner->ts_nsec, page_owner->free_ts_nsec);
41648c96a36SJoonsoo Kim 
41748c96a36SJoonsoo Kim 	/* Print information relevant to grouping pages by mobility */
4180b423ca2SMel Gorman 	pageblock_mt = get_pageblock_migratetype(page);
41901c0bfe0SWei Yang 	page_mt  = gfp_migratetype(page_owner->gfp_mask);
4203ebc4397SWaiman Long 	ret += scnprintf(kbuf + ret, count - ret,
421399fd496SKassey Li 			"PFN 0x%lx type %s Block %lu type %s Flags %pGp\n",
42248c96a36SJoonsoo Kim 			pfn,
42360f30350SVlastimil Babka 			migratetype_names[page_mt],
42448c96a36SJoonsoo Kim 			pfn >> pageblock_order,
42560f30350SVlastimil Babka 			migratetype_names[pageblock_mt],
42623efd080SMatthew Wilcox (Oracle) 			&page->flags);
42748c96a36SJoonsoo Kim 
4280f68d45eSImran Khan 	ret += stack_depot_snprint(handle, kbuf + ret, count - ret, 0);
42948c96a36SJoonsoo Kim 	if (ret >= count)
43048c96a36SJoonsoo Kim 		goto err;
43148c96a36SJoonsoo Kim 
4329300d8dfSJoonsoo Kim 	if (page_owner->last_migrate_reason != -1) {
4333ebc4397SWaiman Long 		ret += scnprintf(kbuf + ret, count - ret,
4347cd12b4aSVlastimil Babka 			"Page has been migrated, last migrate reason: %s\n",
4359300d8dfSJoonsoo Kim 			migrate_reason_names[page_owner->last_migrate_reason]);
4367cd12b4aSVlastimil Babka 	}
4377cd12b4aSVlastimil Babka 
438fcf89358SWaiman Long 	ret = print_page_owner_memcg(kbuf, count, ret, page);
439fcf89358SWaiman Long 
44048c96a36SJoonsoo Kim 	ret += snprintf(kbuf + ret, count - ret, "\n");
44148c96a36SJoonsoo Kim 	if (ret >= count)
44248c96a36SJoonsoo Kim 		goto err;
44348c96a36SJoonsoo Kim 
44448c96a36SJoonsoo Kim 	if (copy_to_user(buf, kbuf, ret))
44548c96a36SJoonsoo Kim 		ret = -EFAULT;
44648c96a36SJoonsoo Kim 
44748c96a36SJoonsoo Kim 	kfree(kbuf);
44848c96a36SJoonsoo Kim 	return ret;
44948c96a36SJoonsoo Kim 
45048c96a36SJoonsoo Kim err:
45148c96a36SJoonsoo Kim 	kfree(kbuf);
45248c96a36SJoonsoo Kim 	return -ENOMEM;
45348c96a36SJoonsoo Kim }
45448c96a36SJoonsoo Kim 
__dump_page_owner(const struct page * page)4558bf6f451SMatthew Wilcox (Oracle) void __dump_page_owner(const struct page *page)
4564e462112SVlastimil Babka {
457b1d5488aSCharan Teja Kalla 	struct page_ext *page_ext = page_ext_get((void *)page);
4589300d8dfSJoonsoo Kim 	struct page_owner *page_owner;
459f2ca0b55SJoonsoo Kim 	depot_stack_handle_t handle;
4608285027fSSudip Mukherjee 	gfp_t gfp_mask;
4618285027fSSudip Mukherjee 	int mt;
4624e462112SVlastimil Babka 
463f86e4271SYang Shi 	if (unlikely(!page_ext)) {
464f86e4271SYang Shi 		pr_alert("There is not page extension available.\n");
465f86e4271SYang Shi 		return;
466f86e4271SYang Shi 	}
4679300d8dfSJoonsoo Kim 
4689300d8dfSJoonsoo Kim 	page_owner = get_page_owner(page_ext);
4699300d8dfSJoonsoo Kim 	gfp_mask = page_owner->gfp_mask;
47001c0bfe0SWei Yang 	mt = gfp_migratetype(gfp_mask);
471f86e4271SYang Shi 
4724e462112SVlastimil Babka 	if (!test_bit(PAGE_EXT_OWNER, &page_ext->flags)) {
47337389167SVlastimil Babka 		pr_alert("page_owner info is not present (never set?)\n");
474b1d5488aSCharan Teja Kalla 		page_ext_put(page_ext);
4754e462112SVlastimil Babka 		return;
4764e462112SVlastimil Babka 	}
4774e462112SVlastimil Babka 
478fdf3bf80SVlastimil Babka 	if (test_bit(PAGE_EXT_OWNER_ALLOCATED, &page_ext->flags))
47937389167SVlastimil Babka 		pr_alert("page_owner tracks the page as allocated\n");
48037389167SVlastimil Babka 	else
48137389167SVlastimil Babka 		pr_alert("page_owner tracks the page as freed\n");
48237389167SVlastimil Babka 
483bf215eabSYixuan Cao 	pr_alert("page last allocated via order %u, migratetype %s, gfp_mask %#x(%pGg), pid %d, tgid %d (%s), ts %llu, free_ts %llu\n",
4849cc7e96aSLiam Mark 		 page_owner->order, migratetype_names[mt], gfp_mask, &gfp_mask,
485bf215eabSYixuan Cao 		 page_owner->pid, page_owner->tgid, page_owner->comm,
486bf215eabSYixuan Cao 		 page_owner->ts_nsec, page_owner->free_ts_nsec);
48737389167SVlastimil Babka 
4889300d8dfSJoonsoo Kim 	handle = READ_ONCE(page_owner->handle);
489505be481SImran Khan 	if (!handle)
49037389167SVlastimil Babka 		pr_alert("page_owner allocation stack trace missing\n");
491505be481SImran Khan 	else
492505be481SImran Khan 		stack_depot_print(handle);
4934e462112SVlastimil Babka 
4948974558fSVlastimil Babka 	handle = READ_ONCE(page_owner->free_handle);
4958974558fSVlastimil Babka 	if (!handle) {
4968974558fSVlastimil Babka 		pr_alert("page_owner free stack trace missing\n");
4978974558fSVlastimil Babka 	} else {
4988974558fSVlastimil Babka 		pr_alert("page last free stack trace:\n");
499505be481SImran Khan 		stack_depot_print(handle);
5008974558fSVlastimil Babka 	}
5018974558fSVlastimil Babka 
5029300d8dfSJoonsoo Kim 	if (page_owner->last_migrate_reason != -1)
5034e462112SVlastimil Babka 		pr_alert("page has been migrated, last migrate reason: %s\n",
5049300d8dfSJoonsoo Kim 			migrate_reason_names[page_owner->last_migrate_reason]);
505b1d5488aSCharan Teja Kalla 	page_ext_put(page_ext);
5064e462112SVlastimil Babka }
5074e462112SVlastimil Babka 
50848c96a36SJoonsoo Kim static ssize_t
read_page_owner(struct file * file,char __user * buf,size_t count,loff_t * ppos)50948c96a36SJoonsoo Kim read_page_owner(struct file *file, char __user *buf, size_t count, loff_t *ppos)
51048c96a36SJoonsoo Kim {
51148c96a36SJoonsoo Kim 	unsigned long pfn;
51248c96a36SJoonsoo Kim 	struct page *page;
51348c96a36SJoonsoo Kim 	struct page_ext *page_ext;
5149300d8dfSJoonsoo Kim 	struct page_owner *page_owner;
515f2ca0b55SJoonsoo Kim 	depot_stack_handle_t handle;
51648c96a36SJoonsoo Kim 
5177dd80b8aSVlastimil Babka 	if (!static_branch_unlikely(&page_owner_inited))
51848c96a36SJoonsoo Kim 		return -EINVAL;
51948c96a36SJoonsoo Kim 
52048c96a36SJoonsoo Kim 	page = NULL;
5218f0efa81SKassey Li 	if (*ppos == 0)
5228f0efa81SKassey Li 		pfn = min_low_pfn;
5238f0efa81SKassey Li 	else
5248f0efa81SKassey Li 		pfn = *ppos;
52548c96a36SJoonsoo Kim 	/* Find a valid PFN or the start of a MAX_ORDER_NR_PAGES area */
52648c96a36SJoonsoo Kim 	while (!pfn_valid(pfn) && (pfn & (MAX_ORDER_NR_PAGES - 1)) != 0)
52748c96a36SJoonsoo Kim 		pfn++;
52848c96a36SJoonsoo Kim 
52948c96a36SJoonsoo Kim 	/* Find an allocated page */
53048c96a36SJoonsoo Kim 	for (; pfn < max_pfn; pfn++) {
53148c96a36SJoonsoo Kim 		/*
532b1d5488aSCharan Teja Kalla 		 * This temporary page_owner is required so
533b1d5488aSCharan Teja Kalla 		 * that we can avoid the context switches while holding
534b1d5488aSCharan Teja Kalla 		 * the rcu lock and copying the page owner information to
535b1d5488aSCharan Teja Kalla 		 * user through copy_to_user() or GFP_KERNEL allocations.
536b1d5488aSCharan Teja Kalla 		 */
537b1d5488aSCharan Teja Kalla 		struct page_owner page_owner_tmp;
538b1d5488aSCharan Teja Kalla 
539b1d5488aSCharan Teja Kalla 		/*
54048c96a36SJoonsoo Kim 		 * If the new page is in a new MAX_ORDER_NR_PAGES area,
54148c96a36SJoonsoo Kim 		 * validate the area as existing, skip it if not
54248c96a36SJoonsoo Kim 		 */
54348c96a36SJoonsoo Kim 		if ((pfn & (MAX_ORDER_NR_PAGES - 1)) == 0 && !pfn_valid(pfn)) {
54448c96a36SJoonsoo Kim 			pfn += MAX_ORDER_NR_PAGES - 1;
54548c96a36SJoonsoo Kim 			continue;
54648c96a36SJoonsoo Kim 		}
54748c96a36SJoonsoo Kim 
54848c96a36SJoonsoo Kim 		page = pfn_to_page(pfn);
54948c96a36SJoonsoo Kim 		if (PageBuddy(page)) {
550ab130f91SMatthew Wilcox (Oracle) 			unsigned long freepage_order = buddy_order_unsafe(page);
55148c96a36SJoonsoo Kim 
55223baf831SKirill A. Shutemov 			if (freepage_order <= MAX_ORDER)
55348c96a36SJoonsoo Kim 				pfn += (1UL << freepage_order) - 1;
55448c96a36SJoonsoo Kim 			continue;
55548c96a36SJoonsoo Kim 		}
55648c96a36SJoonsoo Kim 
557b1d5488aSCharan Teja Kalla 		page_ext = page_ext_get(page);
558f86e4271SYang Shi 		if (unlikely(!page_ext))
559f86e4271SYang Shi 			continue;
56048c96a36SJoonsoo Kim 
56148c96a36SJoonsoo Kim 		/*
56261cf5febSJoonsoo Kim 		 * Some pages could be missed by concurrent allocation or free,
56361cf5febSJoonsoo Kim 		 * because we don't hold the zone lock.
56448c96a36SJoonsoo Kim 		 */
56548c96a36SJoonsoo Kim 		if (!test_bit(PAGE_EXT_OWNER, &page_ext->flags))
566b1d5488aSCharan Teja Kalla 			goto ext_put_continue;
56748c96a36SJoonsoo Kim 
56837389167SVlastimil Babka 		/*
56937389167SVlastimil Babka 		 * Although we do have the info about past allocation of free
57037389167SVlastimil Babka 		 * pages, it's not relevant for current memory usage.
57137389167SVlastimil Babka 		 */
572fdf3bf80SVlastimil Babka 		if (!test_bit(PAGE_EXT_OWNER_ALLOCATED, &page_ext->flags))
573b1d5488aSCharan Teja Kalla 			goto ext_put_continue;
57437389167SVlastimil Babka 
5759300d8dfSJoonsoo Kim 		page_owner = get_page_owner(page_ext);
5769300d8dfSJoonsoo Kim 
577f2ca0b55SJoonsoo Kim 		/*
5787e2f2a0cSVlastimil Babka 		 * Don't print "tail" pages of high-order allocations as that
5797e2f2a0cSVlastimil Babka 		 * would inflate the stats.
5807e2f2a0cSVlastimil Babka 		 */
5817e2f2a0cSVlastimil Babka 		if (!IS_ALIGNED(pfn, 1 << page_owner->order))
582b1d5488aSCharan Teja Kalla 			goto ext_put_continue;
5837e2f2a0cSVlastimil Babka 
5847e2f2a0cSVlastimil Babka 		/*
585f2ca0b55SJoonsoo Kim 		 * Access to page_ext->handle isn't synchronous so we should
586f2ca0b55SJoonsoo Kim 		 * be careful to access it.
587f2ca0b55SJoonsoo Kim 		 */
5889300d8dfSJoonsoo Kim 		handle = READ_ONCE(page_owner->handle);
589f2ca0b55SJoonsoo Kim 		if (!handle)
590b1d5488aSCharan Teja Kalla 			goto ext_put_continue;
591f2ca0b55SJoonsoo Kim 
59248c96a36SJoonsoo Kim 		/* Record the next PFN to read in the file offset */
5938f0efa81SKassey Li 		*ppos = pfn + 1;
59448c96a36SJoonsoo Kim 
595b1d5488aSCharan Teja Kalla 		page_owner_tmp = *page_owner;
596b1d5488aSCharan Teja Kalla 		page_ext_put(page_ext);
597f2ca0b55SJoonsoo Kim 		return print_page_owner(buf, count, pfn, page,
598b1d5488aSCharan Teja Kalla 				&page_owner_tmp, handle);
599b1d5488aSCharan Teja Kalla ext_put_continue:
600b1d5488aSCharan Teja Kalla 		page_ext_put(page_ext);
60148c96a36SJoonsoo Kim 	}
60248c96a36SJoonsoo Kim 
60348c96a36SJoonsoo Kim 	return 0;
60448c96a36SJoonsoo Kim }
60548c96a36SJoonsoo Kim 
lseek_page_owner(struct file * file,loff_t offset,int orig)6068f0efa81SKassey Li static loff_t lseek_page_owner(struct file *file, loff_t offset, int orig)
6078f0efa81SKassey Li {
6088f0efa81SKassey Li 	switch (orig) {
6098f0efa81SKassey Li 	case SEEK_SET:
6108f0efa81SKassey Li 		file->f_pos = offset;
6118f0efa81SKassey Li 		break;
6128f0efa81SKassey Li 	case SEEK_CUR:
6138f0efa81SKassey Li 		file->f_pos += offset;
6148f0efa81SKassey Li 		break;
6158f0efa81SKassey Li 	default:
6168f0efa81SKassey Li 		return -EINVAL;
6178f0efa81SKassey Li 	}
6188f0efa81SKassey Li 	return file->f_pos;
6198f0efa81SKassey Li }
6208f0efa81SKassey Li 
init_pages_in_zone(pg_data_t * pgdat,struct zone * zone)62161cf5febSJoonsoo Kim static void init_pages_in_zone(pg_data_t *pgdat, struct zone *zone)
62261cf5febSJoonsoo Kim {
6236787c1daSOscar Salvador 	unsigned long pfn = zone->zone_start_pfn;
6246787c1daSOscar Salvador 	unsigned long end_pfn = zone_end_pfn(zone);
62561cf5febSJoonsoo Kim 	unsigned long count = 0;
62661cf5febSJoonsoo Kim 
62761cf5febSJoonsoo Kim 	/*
62861cf5febSJoonsoo Kim 	 * Walk the zone in pageblock_nr_pages steps. If a page block spans
62961cf5febSJoonsoo Kim 	 * a zone boundary, it will be double counted between zones. This does
63061cf5febSJoonsoo Kim 	 * not matter as the mixed block count will still be correct
63161cf5febSJoonsoo Kim 	 */
63261cf5febSJoonsoo Kim 	for (; pfn < end_pfn; ) {
6336787c1daSOscar Salvador 		unsigned long block_end_pfn;
6346787c1daSOscar Salvador 
63561cf5febSJoonsoo Kim 		if (!pfn_valid(pfn)) {
63661cf5febSJoonsoo Kim 			pfn = ALIGN(pfn + 1, MAX_ORDER_NR_PAGES);
63761cf5febSJoonsoo Kim 			continue;
63861cf5febSJoonsoo Kim 		}
63961cf5febSJoonsoo Kim 
6404f9bc69aSKefeng Wang 		block_end_pfn = pageblock_end_pfn(pfn);
64161cf5febSJoonsoo Kim 		block_end_pfn = min(block_end_pfn, end_pfn);
64261cf5febSJoonsoo Kim 
64361cf5febSJoonsoo Kim 		for (; pfn < block_end_pfn; pfn++) {
644859a85ddSMike Rapoport 			struct page *page = pfn_to_page(pfn);
6456787c1daSOscar Salvador 			struct page_ext *page_ext;
6466787c1daSOscar Salvador 
6479d43f5aeSJoonsoo Kim 			if (page_zone(page) != zone)
6489d43f5aeSJoonsoo Kim 				continue;
6499d43f5aeSJoonsoo Kim 
65061cf5febSJoonsoo Kim 			/*
65110903027SVlastimil Babka 			 * To avoid having to grab zone->lock, be a little
65210903027SVlastimil Babka 			 * careful when reading buddy page order. The only
65310903027SVlastimil Babka 			 * danger is that we skip too much and potentially miss
65410903027SVlastimil Babka 			 * some early allocated pages, which is better than
65510903027SVlastimil Babka 			 * heavy lock contention.
65661cf5febSJoonsoo Kim 			 */
65761cf5febSJoonsoo Kim 			if (PageBuddy(page)) {
658ab130f91SMatthew Wilcox (Oracle) 				unsigned long order = buddy_order_unsafe(page);
65910903027SVlastimil Babka 
66023baf831SKirill A. Shutemov 				if (order > 0 && order <= MAX_ORDER)
66110903027SVlastimil Babka 					pfn += (1UL << order) - 1;
66261cf5febSJoonsoo Kim 				continue;
66361cf5febSJoonsoo Kim 			}
66461cf5febSJoonsoo Kim 
66561cf5febSJoonsoo Kim 			if (PageReserved(page))
66661cf5febSJoonsoo Kim 				continue;
66761cf5febSJoonsoo Kim 
668b1d5488aSCharan Teja Kalla 			page_ext = page_ext_get(page);
669f86e4271SYang Shi 			if (unlikely(!page_ext))
670f86e4271SYang Shi 				continue;
67161cf5febSJoonsoo Kim 
672dab4ead1SVlastimil Babka 			/* Maybe overlapping zone */
67361cf5febSJoonsoo Kim 			if (test_bit(PAGE_EXT_OWNER, &page_ext->flags))
674b1d5488aSCharan Teja Kalla 				goto ext_put_continue;
67561cf5febSJoonsoo Kim 
67661cf5febSJoonsoo Kim 			/* Found early allocated page */
67764ea78d2Szhongjiang-ali 			__set_page_owner_handle(page_ext, early_handle,
6787e2f2a0cSVlastimil Babka 						0, 0);
67961cf5febSJoonsoo Kim 			count++;
680b1d5488aSCharan Teja Kalla ext_put_continue:
681b1d5488aSCharan Teja Kalla 			page_ext_put(page_ext);
68261cf5febSJoonsoo Kim 		}
68310903027SVlastimil Babka 		cond_resched();
68461cf5febSJoonsoo Kim 	}
68561cf5febSJoonsoo Kim 
68661cf5febSJoonsoo Kim 	pr_info("Node %d, zone %8s: page owner found early allocated %lu pages\n",
68761cf5febSJoonsoo Kim 		pgdat->node_id, zone->name, count);
68861cf5febSJoonsoo Kim }
68961cf5febSJoonsoo Kim 
init_zones_in_node(pg_data_t * pgdat)69061cf5febSJoonsoo Kim static void init_zones_in_node(pg_data_t *pgdat)
69161cf5febSJoonsoo Kim {
69261cf5febSJoonsoo Kim 	struct zone *zone;
69361cf5febSJoonsoo Kim 	struct zone *node_zones = pgdat->node_zones;
69461cf5febSJoonsoo Kim 
69561cf5febSJoonsoo Kim 	for (zone = node_zones; zone - node_zones < MAX_NR_ZONES; ++zone) {
69661cf5febSJoonsoo Kim 		if (!populated_zone(zone))
69761cf5febSJoonsoo Kim 			continue;
69861cf5febSJoonsoo Kim 
69961cf5febSJoonsoo Kim 		init_pages_in_zone(pgdat, zone);
70061cf5febSJoonsoo Kim 	}
70161cf5febSJoonsoo Kim }
70261cf5febSJoonsoo Kim 
init_early_allocated_pages(void)70361cf5febSJoonsoo Kim static void init_early_allocated_pages(void)
70461cf5febSJoonsoo Kim {
70561cf5febSJoonsoo Kim 	pg_data_t *pgdat;
70661cf5febSJoonsoo Kim 
70761cf5febSJoonsoo Kim 	for_each_online_pgdat(pgdat)
70861cf5febSJoonsoo Kim 		init_zones_in_node(pgdat);
70961cf5febSJoonsoo Kim }
71061cf5febSJoonsoo Kim 
71148c96a36SJoonsoo Kim static const struct file_operations proc_page_owner_operations = {
71248c96a36SJoonsoo Kim 	.read		= read_page_owner,
7138f0efa81SKassey Li 	.llseek		= lseek_page_owner,
71448c96a36SJoonsoo Kim };
71548c96a36SJoonsoo Kim 
pageowner_init(void)71648c96a36SJoonsoo Kim static int __init pageowner_init(void)
71748c96a36SJoonsoo Kim {
7187dd80b8aSVlastimil Babka 	if (!static_branch_unlikely(&page_owner_inited)) {
71948c96a36SJoonsoo Kim 		pr_info("page_owner is disabled\n");
72048c96a36SJoonsoo Kim 		return 0;
72148c96a36SJoonsoo Kim 	}
72248c96a36SJoonsoo Kim 
723d9f7979cSGreg Kroah-Hartman 	debugfs_create_file("page_owner", 0400, NULL, NULL,
724d9f7979cSGreg Kroah-Hartman 			    &proc_page_owner_operations);
72548c96a36SJoonsoo Kim 
726d9f7979cSGreg Kroah-Hartman 	return 0;
72748c96a36SJoonsoo Kim }
72844c5af96SPaul Gortmaker late_initcall(pageowner_init)
729