xref: /openbmc/linux/arch/s390/mm/page-states.c (revision b97d6790d03b763eca08847a9a5869a4291b9f9a)
1b2441318SGreg Kroah-Hartman // SPDX-License-Identifier: GPL-2.0
245e576b1SMartin Schwidefsky /*
345e576b1SMartin Schwidefsky  * Copyright IBM Corp. 2008
445e576b1SMartin Schwidefsky  *
545e576b1SMartin Schwidefsky  * Guest page hinting for unused pages.
645e576b1SMartin Schwidefsky  *
745e576b1SMartin Schwidefsky  * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
845e576b1SMartin Schwidefsky  */
945e576b1SMartin Schwidefsky 
1045e576b1SMartin Schwidefsky #include <linux/kernel.h>
1145e576b1SMartin Schwidefsky #include <linux/errno.h>
1245e576b1SMartin Schwidefsky #include <linux/types.h>
1345e576b1SMartin Schwidefsky #include <linux/mm.h>
14c9b5ad54SMartin Schwidefsky #include <linux/memblock.h>
155a0e3ad6STejun Heo #include <linux/gfp.h>
1645e576b1SMartin Schwidefsky #include <linux/init.h>
17d09a307fSHeiko Carstens #include <asm/asm-extable.h>
18c9b5ad54SMartin Schwidefsky #include <asm/facility.h>
192d42f947SClaudio Imbrenda #include <asm/page-states.h>
2045e576b1SMartin Schwidefsky 
212ddddf3eSHeiko Carstens static int cmma_flag = 1;
2245e576b1SMartin Schwidefsky 
cmma(char * str)2345e576b1SMartin Schwidefsky static int __init cmma(char *str)
2445e576b1SMartin Schwidefsky {
255f0917a2SVasily Gorbik 	bool enabled;
262ddddf3eSHeiko Carstens 
275f0917a2SVasily Gorbik 	if (!kstrtobool(str, &enabled))
285f0917a2SVasily Gorbik 		cmma_flag = enabled;
2945e576b1SMartin Schwidefsky 	return 1;
3045e576b1SMartin Schwidefsky }
3145e576b1SMartin Schwidefsky __setup("cmma=", cmma);
3245e576b1SMartin Schwidefsky 
cmma_test_essa(void)33931641c6SHeiko Carstens static inline int cmma_test_essa(void)
3445e576b1SMartin Schwidefsky {
352bd67038SHeiko Carstens 	unsigned long tmp = 0;
362bd67038SHeiko Carstens 	int rc = -EOPNOTSUPP;
3745e576b1SMartin Schwidefsky 
38c9b5ad54SMartin Schwidefsky 	/* test ESSA_GET_STATE */
3945e576b1SMartin Schwidefsky 	asm volatile(
402bd67038SHeiko Carstens 		"	.insn	rrf,0xb9ab0000,%[tmp],%[tmp],%[cmd],0\n"
412bd67038SHeiko Carstens 		"0:     la      %[rc],0\n"
4245e576b1SMartin Schwidefsky 		"1:\n"
4345e576b1SMartin Schwidefsky 		EX_TABLE(0b,1b)
442bd67038SHeiko Carstens 		: [rc] "+&d" (rc), [tmp] "+&d" (tmp)
452bd67038SHeiko Carstens 		: [cmd] "i" (ESSA_GET_STATE));
46931641c6SHeiko Carstens 	return rc;
47931641c6SHeiko Carstens }
48931641c6SHeiko Carstens 
cmma_init(void)49931641c6SHeiko Carstens void __init cmma_init(void)
50931641c6SHeiko Carstens {
51931641c6SHeiko Carstens 	if (!cmma_flag)
52931641c6SHeiko Carstens 		return;
53c9b5ad54SMartin Schwidefsky 	if (cmma_test_essa()) {
5445e576b1SMartin Schwidefsky 		cmma_flag = 0;
55c9b5ad54SMartin Schwidefsky 		return;
56c9b5ad54SMartin Schwidefsky 	}
57c9b5ad54SMartin Schwidefsky 	if (test_facility(147))
58c9b5ad54SMartin Schwidefsky 		cmma_flag = 2;
5945e576b1SMartin Schwidefsky }
6045e576b1SMartin Schwidefsky 
set_page_unused(struct page * page,int order)61c9b5ad54SMartin Schwidefsky static inline void set_page_unused(struct page *page, int order)
6245e576b1SMartin Schwidefsky {
6345e576b1SMartin Schwidefsky 	int i, rc;
6445e576b1SMartin Schwidefsky 
6545e576b1SMartin Schwidefsky 	for (i = 0; i < (1 << order); i++)
6645e576b1SMartin Schwidefsky 		asm volatile(".insn rrf,0xb9ab0000,%0,%1,%2,0"
6745e576b1SMartin Schwidefsky 			     : "=&d" (rc)
68846955c8SHeiko Carstens 			     : "a" (page_to_phys(page + i)),
6945e576b1SMartin Schwidefsky 			       "i" (ESSA_SET_UNUSED));
7045e576b1SMartin Schwidefsky }
7145e576b1SMartin Schwidefsky 
set_page_stable_dat(struct page * page,int order)72c9b5ad54SMartin Schwidefsky static inline void set_page_stable_dat(struct page *page, int order)
73846955c8SHeiko Carstens {
74846955c8SHeiko Carstens 	int i, rc;
75846955c8SHeiko Carstens 
76846955c8SHeiko Carstens 	for (i = 0; i < (1 << order); i++)
77846955c8SHeiko Carstens 		asm volatile(".insn rrf,0xb9ab0000,%0,%1,%2,0"
78846955c8SHeiko Carstens 			     : "=&d" (rc)
79846955c8SHeiko Carstens 			     : "a" (page_to_phys(page + i)),
80846955c8SHeiko Carstens 			       "i" (ESSA_SET_STABLE));
81846955c8SHeiko Carstens }
82846955c8SHeiko Carstens 
set_page_stable_nodat(struct page * page,int order)83c9b5ad54SMartin Schwidefsky static inline void set_page_stable_nodat(struct page *page, int order)
84c9b5ad54SMartin Schwidefsky {
85c9b5ad54SMartin Schwidefsky 	int i, rc;
86c9b5ad54SMartin Schwidefsky 
87c9b5ad54SMartin Schwidefsky 	for (i = 0; i < (1 << order); i++)
88c9b5ad54SMartin Schwidefsky 		asm volatile(".insn rrf,0xb9ab0000,%0,%1,%2,0"
89c9b5ad54SMartin Schwidefsky 			     : "=&d" (rc)
90c9b5ad54SMartin Schwidefsky 			     : "a" (page_to_phys(page + i)),
91c9b5ad54SMartin Schwidefsky 			       "i" (ESSA_SET_STABLE_NODAT));
92c9b5ad54SMartin Schwidefsky }
93c9b5ad54SMartin Schwidefsky 
mark_kernel_pmd(pud_t * pud,unsigned long addr,unsigned long end)94c9b5ad54SMartin Schwidefsky static void mark_kernel_pmd(pud_t *pud, unsigned long addr, unsigned long end)
95c9b5ad54SMartin Schwidefsky {
96c9b5ad54SMartin Schwidefsky 	unsigned long next;
97c9b5ad54SMartin Schwidefsky 	struct page *page;
98c9b5ad54SMartin Schwidefsky 	pmd_t *pmd;
99c9b5ad54SMartin Schwidefsky 
100c9b5ad54SMartin Schwidefsky 	pmd = pmd_offset(pud, addr);
101c9b5ad54SMartin Schwidefsky 	do {
102c9b5ad54SMartin Schwidefsky 		next = pmd_addr_end(addr, end);
103c9b5ad54SMartin Schwidefsky 		if (pmd_none(*pmd) || pmd_large(*pmd))
104c9b5ad54SMartin Schwidefsky 			continue;
1053784231bSAlexander Gordeev 		page = phys_to_page(pmd_val(*pmd));
106c9b5ad54SMartin Schwidefsky 		set_bit(PG_arch_1, &page->flags);
107c9b5ad54SMartin Schwidefsky 	} while (pmd++, addr = next, addr != end);
108c9b5ad54SMartin Schwidefsky }
109c9b5ad54SMartin Schwidefsky 
mark_kernel_pud(p4d_t * p4d,unsigned long addr,unsigned long end)110c9b5ad54SMartin Schwidefsky static void mark_kernel_pud(p4d_t *p4d, unsigned long addr, unsigned long end)
111c9b5ad54SMartin Schwidefsky {
112c9b5ad54SMartin Schwidefsky 	unsigned long next;
113c9b5ad54SMartin Schwidefsky 	struct page *page;
114c9b5ad54SMartin Schwidefsky 	pud_t *pud;
115c9b5ad54SMartin Schwidefsky 	int i;
116c9b5ad54SMartin Schwidefsky 
117c9b5ad54SMartin Schwidefsky 	pud = pud_offset(p4d, addr);
118c9b5ad54SMartin Schwidefsky 	do {
119c9b5ad54SMartin Schwidefsky 		next = pud_addr_end(addr, end);
120c9b5ad54SMartin Schwidefsky 		if (pud_none(*pud) || pud_large(*pud))
121c9b5ad54SMartin Schwidefsky 			continue;
122c9b5ad54SMartin Schwidefsky 		if (!pud_folded(*pud)) {
1233784231bSAlexander Gordeev 			page = phys_to_page(pud_val(*pud));
1247453e810SHeiko Carstens 			for (i = 0; i < 4; i++)
125c9b5ad54SMartin Schwidefsky 				set_bit(PG_arch_1, &page[i].flags);
126c9b5ad54SMartin Schwidefsky 		}
127c9b5ad54SMartin Schwidefsky 		mark_kernel_pmd(pud, addr, next);
128c9b5ad54SMartin Schwidefsky 	} while (pud++, addr = next, addr != end);
129c9b5ad54SMartin Schwidefsky }
130c9b5ad54SMartin Schwidefsky 
mark_kernel_p4d(pgd_t * pgd,unsigned long addr,unsigned long end)131c9b5ad54SMartin Schwidefsky static void mark_kernel_p4d(pgd_t *pgd, unsigned long addr, unsigned long end)
132c9b5ad54SMartin Schwidefsky {
133c9b5ad54SMartin Schwidefsky 	unsigned long next;
134c9b5ad54SMartin Schwidefsky 	struct page *page;
135c9b5ad54SMartin Schwidefsky 	p4d_t *p4d;
136c9b5ad54SMartin Schwidefsky 	int i;
137c9b5ad54SMartin Schwidefsky 
138c9b5ad54SMartin Schwidefsky 	p4d = p4d_offset(pgd, addr);
139c9b5ad54SMartin Schwidefsky 	do {
140c9b5ad54SMartin Schwidefsky 		next = p4d_addr_end(addr, end);
141c9b5ad54SMartin Schwidefsky 		if (p4d_none(*p4d))
142c9b5ad54SMartin Schwidefsky 			continue;
143c9b5ad54SMartin Schwidefsky 		if (!p4d_folded(*p4d)) {
1443784231bSAlexander Gordeev 			page = phys_to_page(p4d_val(*p4d));
1457453e810SHeiko Carstens 			for (i = 0; i < 4; i++)
146c9b5ad54SMartin Schwidefsky 				set_bit(PG_arch_1, &page[i].flags);
147c9b5ad54SMartin Schwidefsky 		}
148c9b5ad54SMartin Schwidefsky 		mark_kernel_pud(p4d, addr, next);
149c9b5ad54SMartin Schwidefsky 	} while (p4d++, addr = next, addr != end);
150c9b5ad54SMartin Schwidefsky }
151c9b5ad54SMartin Schwidefsky 
mark_kernel_pgd(void)152c9b5ad54SMartin Schwidefsky static void mark_kernel_pgd(void)
153c9b5ad54SMartin Schwidefsky {
154c9b5ad54SMartin Schwidefsky 	unsigned long addr, next;
155c9b5ad54SMartin Schwidefsky 	struct page *page;
156c9b5ad54SMartin Schwidefsky 	pgd_t *pgd;
157c9b5ad54SMartin Schwidefsky 	int i;
158c9b5ad54SMartin Schwidefsky 
159c9b5ad54SMartin Schwidefsky 	addr = 0;
160c9b5ad54SMartin Schwidefsky 	pgd = pgd_offset_k(addr);
161c9b5ad54SMartin Schwidefsky 	do {
162c9b5ad54SMartin Schwidefsky 		next = pgd_addr_end(addr, MODULES_END);
163c9b5ad54SMartin Schwidefsky 		if (pgd_none(*pgd))
164c9b5ad54SMartin Schwidefsky 			continue;
165c9b5ad54SMartin Schwidefsky 		if (!pgd_folded(*pgd)) {
1663784231bSAlexander Gordeev 			page = phys_to_page(pgd_val(*pgd));
1677453e810SHeiko Carstens 			for (i = 0; i < 4; i++)
168c9b5ad54SMartin Schwidefsky 				set_bit(PG_arch_1, &page[i].flags);
169c9b5ad54SMartin Schwidefsky 		}
170c9b5ad54SMartin Schwidefsky 		mark_kernel_p4d(pgd, addr, next);
171c9b5ad54SMartin Schwidefsky 	} while (pgd++, addr = next, addr != MODULES_END);
172c9b5ad54SMartin Schwidefsky }
173c9b5ad54SMartin Schwidefsky 
cmma_init_nodat(void)174c9b5ad54SMartin Schwidefsky void __init cmma_init_nodat(void)
175c9b5ad54SMartin Schwidefsky {
176c9b5ad54SMartin Schwidefsky 	struct page *page;
177c9b5ad54SMartin Schwidefsky 	unsigned long start, end, ix;
178c9118e6cSMike Rapoport 	int i;
179c9b5ad54SMartin Schwidefsky 
180c9b5ad54SMartin Schwidefsky 	if (cmma_flag < 2)
181c9b5ad54SMartin Schwidefsky 		return;
182c9b5ad54SMartin Schwidefsky 	/* Mark pages used in kernel page tables */
183c9b5ad54SMartin Schwidefsky 	mark_kernel_pgd();
184*7bfe7741SHeiko Carstens 	page = virt_to_page(&swapper_pg_dir);
185*7bfe7741SHeiko Carstens 	for (i = 0; i < 4; i++)
186*7bfe7741SHeiko Carstens 		set_bit(PG_arch_1, &page[i].flags);
187*7bfe7741SHeiko Carstens 	page = virt_to_page(&invalid_pg_dir);
188*7bfe7741SHeiko Carstens 	for (i = 0; i < 4; i++)
189*7bfe7741SHeiko Carstens 		set_bit(PG_arch_1, &page[i].flags);
190c9b5ad54SMartin Schwidefsky 
191c9b5ad54SMartin Schwidefsky 	/* Set all kernel pages not used for page tables to stable/no-dat */
192c9118e6cSMike Rapoport 	for_each_mem_pfn_range(i, MAX_NUMNODES, &start, &end, NULL) {
193c9b5ad54SMartin Schwidefsky 		page = pfn_to_page(start);
194c9b5ad54SMartin Schwidefsky 		for (ix = start; ix < end; ix++, page++) {
195c9b5ad54SMartin Schwidefsky 			if (__test_and_clear_bit(PG_arch_1, &page->flags))
196c9b5ad54SMartin Schwidefsky 				continue;	/* skip page table pages */
197c9b5ad54SMartin Schwidefsky 			if (!list_empty(&page->lru))
198c9b5ad54SMartin Schwidefsky 				continue;	/* skip free pages */
199c9b5ad54SMartin Schwidefsky 			set_page_stable_nodat(page, 0);
200c9b5ad54SMartin Schwidefsky 		}
201c9b5ad54SMartin Schwidefsky 	}
202c9b5ad54SMartin Schwidefsky }
203c9b5ad54SMartin Schwidefsky 
arch_free_page(struct page * page,int order)204c9b5ad54SMartin Schwidefsky void arch_free_page(struct page *page, int order)
205c9b5ad54SMartin Schwidefsky {
206c9b5ad54SMartin Schwidefsky 	if (!cmma_flag)
207c9b5ad54SMartin Schwidefsky 		return;
208c9b5ad54SMartin Schwidefsky 	set_page_unused(page, order);
209c9b5ad54SMartin Schwidefsky }
210c9b5ad54SMartin Schwidefsky 
arch_alloc_page(struct page * page,int order)21145e576b1SMartin Schwidefsky void arch_alloc_page(struct page *page, int order)
21245e576b1SMartin Schwidefsky {
213846955c8SHeiko Carstens 	if (!cmma_flag)
214846955c8SHeiko Carstens 		return;
215c9b5ad54SMartin Schwidefsky 	if (cmma_flag < 2)
216c9b5ad54SMartin Schwidefsky 		set_page_stable_dat(page, order);
217c9b5ad54SMartin Schwidefsky 	else
218c9b5ad54SMartin Schwidefsky 		set_page_stable_nodat(page, order);
219c9b5ad54SMartin Schwidefsky }
220c9b5ad54SMartin Schwidefsky 
arch_set_page_dat(struct page * page,int order)221c9b5ad54SMartin Schwidefsky void arch_set_page_dat(struct page *page, int order)
222c9b5ad54SMartin Schwidefsky {
223c9b5ad54SMartin Schwidefsky 	if (!cmma_flag)
224c9b5ad54SMartin Schwidefsky 		return;
225c9b5ad54SMartin Schwidefsky 	set_page_stable_dat(page, order);
226c9b5ad54SMartin Schwidefsky }
227