1072e3d96SPavel Tatashin // SPDX-License-Identifier: GPL-2.0
2072e3d96SPavel Tatashin
3072e3d96SPavel Tatashin /*
4072e3d96SPavel Tatashin * Transitional page tables for kexec and hibernate
5072e3d96SPavel Tatashin *
6072e3d96SPavel Tatashin * This file derived from: arch/arm64/kernel/hibernate.c
7072e3d96SPavel Tatashin *
8788bfdd9SPasha Tatashin * Copyright (c) 2021, Microsoft Corporation.
9788bfdd9SPasha Tatashin * Pasha Tatashin <pasha.tatashin@soleen.com>
10072e3d96SPavel Tatashin *
11072e3d96SPavel Tatashin */
12072e3d96SPavel Tatashin
13072e3d96SPavel Tatashin /*
14072e3d96SPavel Tatashin * Transitional tables are used during system transferring from one world to
15072e3d96SPavel Tatashin * another: such as during hibernate restore, and kexec reboots. During these
16072e3d96SPavel Tatashin * phases one cannot rely on page table not being overwritten. This is because
17072e3d96SPavel Tatashin * hibernate and kexec can overwrite the current page tables during transition.
18072e3d96SPavel Tatashin */
19072e3d96SPavel Tatashin
20072e3d96SPavel Tatashin #include <asm/trans_pgd.h>
21072e3d96SPavel Tatashin #include <asm/pgalloc.h>
22072e3d96SPavel Tatashin #include <asm/pgtable.h>
23072e3d96SPavel Tatashin #include <linux/suspend.h>
24072e3d96SPavel Tatashin #include <linux/bug.h>
25072e3d96SPavel Tatashin #include <linux/mm.h>
26072e3d96SPavel Tatashin #include <linux/mmzone.h>
27*a8bd38dbSNikhil V #include <linux/kfence.h>
28072e3d96SPavel Tatashin
trans_alloc(struct trans_pgd_info * info)2950f53fb7SPavel Tatashin static void *trans_alloc(struct trans_pgd_info *info)
3050f53fb7SPavel Tatashin {
3150f53fb7SPavel Tatashin return info->trans_alloc_page(info->trans_alloc_arg);
3250f53fb7SPavel Tatashin }
3350f53fb7SPavel Tatashin
_copy_pte(pte_t * dst_ptep,pte_t * src_ptep,unsigned long addr)34072e3d96SPavel Tatashin static void _copy_pte(pte_t *dst_ptep, pte_t *src_ptep, unsigned long addr)
35072e3d96SPavel Tatashin {
36072e3d96SPavel Tatashin pte_t pte = READ_ONCE(*src_ptep);
37072e3d96SPavel Tatashin
38072e3d96SPavel Tatashin if (pte_valid(pte)) {
39072e3d96SPavel Tatashin /*
40072e3d96SPavel Tatashin * Resume will overwrite areas that may be marked
41072e3d96SPavel Tatashin * read only (code, rodata). Clear the RDONLY bit from
42072e3d96SPavel Tatashin * the temporary mappings we use during restore.
43072e3d96SPavel Tatashin */
44072e3d96SPavel Tatashin set_pte(dst_ptep, pte_mkwrite_novma(pte));
45*a8bd38dbSNikhil V } else if ((debug_pagealloc_enabled() ||
46*a8bd38dbSNikhil V is_kfence_address((void *)addr)) && !pte_none(pte)) {
47072e3d96SPavel Tatashin /*
48072e3d96SPavel Tatashin * debug_pagealloc will removed the PTE_VALID bit if
49072e3d96SPavel Tatashin * the page isn't in use by the resume kernel. It may have
50072e3d96SPavel Tatashin * been in use by the original kernel, in which case we need
51072e3d96SPavel Tatashin * to put it back in our copy to do the restore.
52072e3d96SPavel Tatashin *
53072e3d96SPavel Tatashin * Before marking this entry valid, check the pfn should
54072e3d96SPavel Tatashin * be mapped.
55072e3d96SPavel Tatashin */
56072e3d96SPavel Tatashin BUG_ON(!pfn_valid(pte_pfn(pte)));
57072e3d96SPavel Tatashin
58072e3d96SPavel Tatashin set_pte(dst_ptep, pte_mkpresent(pte_mkwrite_novma(pte)));
59072e3d96SPavel Tatashin }
60072e3d96SPavel Tatashin }
61072e3d96SPavel Tatashin
copy_pte(struct trans_pgd_info * info,pmd_t * dst_pmdp,pmd_t * src_pmdp,unsigned long start,unsigned long end)6289d1410fSPavel Tatashin static int copy_pte(struct trans_pgd_info *info, pmd_t *dst_pmdp,
6389d1410fSPavel Tatashin pmd_t *src_pmdp, unsigned long start, unsigned long end)
64072e3d96SPavel Tatashin {
65072e3d96SPavel Tatashin pte_t *src_ptep;
66072e3d96SPavel Tatashin pte_t *dst_ptep;
67072e3d96SPavel Tatashin unsigned long addr = start;
68072e3d96SPavel Tatashin
6989d1410fSPavel Tatashin dst_ptep = trans_alloc(info);
70072e3d96SPavel Tatashin if (!dst_ptep)
71072e3d96SPavel Tatashin return -ENOMEM;
725de59884SPavel Tatashin pmd_populate_kernel(NULL, dst_pmdp, dst_ptep);
73072e3d96SPavel Tatashin dst_ptep = pte_offset_kernel(dst_pmdp, start);
74072e3d96SPavel Tatashin
75072e3d96SPavel Tatashin src_ptep = pte_offset_kernel(src_pmdp, start);
76072e3d96SPavel Tatashin do {
77072e3d96SPavel Tatashin _copy_pte(dst_ptep, src_ptep, addr);
78072e3d96SPavel Tatashin } while (dst_ptep++, src_ptep++, addr += PAGE_SIZE, addr != end);
79072e3d96SPavel Tatashin
80072e3d96SPavel Tatashin return 0;
81072e3d96SPavel Tatashin }
82072e3d96SPavel Tatashin
copy_pmd(struct trans_pgd_info * info,pud_t * dst_pudp,pud_t * src_pudp,unsigned long start,unsigned long end)8389d1410fSPavel Tatashin static int copy_pmd(struct trans_pgd_info *info, pud_t *dst_pudp,
8489d1410fSPavel Tatashin pud_t *src_pudp, unsigned long start, unsigned long end)
85072e3d96SPavel Tatashin {
86072e3d96SPavel Tatashin pmd_t *src_pmdp;
87072e3d96SPavel Tatashin pmd_t *dst_pmdp;
88072e3d96SPavel Tatashin unsigned long next;
89072e3d96SPavel Tatashin unsigned long addr = start;
90072e3d96SPavel Tatashin
91072e3d96SPavel Tatashin if (pud_none(READ_ONCE(*dst_pudp))) {
9289d1410fSPavel Tatashin dst_pmdp = trans_alloc(info);
93072e3d96SPavel Tatashin if (!dst_pmdp)
94072e3d96SPavel Tatashin return -ENOMEM;
955de59884SPavel Tatashin pud_populate(NULL, dst_pudp, dst_pmdp);
96072e3d96SPavel Tatashin }
97072e3d96SPavel Tatashin dst_pmdp = pmd_offset(dst_pudp, start);
98072e3d96SPavel Tatashin
99072e3d96SPavel Tatashin src_pmdp = pmd_offset(src_pudp, start);
100072e3d96SPavel Tatashin do {
101072e3d96SPavel Tatashin pmd_t pmd = READ_ONCE(*src_pmdp);
102072e3d96SPavel Tatashin
103072e3d96SPavel Tatashin next = pmd_addr_end(addr, end);
104072e3d96SPavel Tatashin if (pmd_none(pmd))
105072e3d96SPavel Tatashin continue;
106072e3d96SPavel Tatashin if (pmd_table(pmd)) {
10789d1410fSPavel Tatashin if (copy_pte(info, dst_pmdp, src_pmdp, addr, next))
108072e3d96SPavel Tatashin return -ENOMEM;
109072e3d96SPavel Tatashin } else {
110072e3d96SPavel Tatashin set_pmd(dst_pmdp,
111072e3d96SPavel Tatashin __pmd(pmd_val(pmd) & ~PMD_SECT_RDONLY));
112072e3d96SPavel Tatashin }
113072e3d96SPavel Tatashin } while (dst_pmdp++, src_pmdp++, addr = next, addr != end);
114072e3d96SPavel Tatashin
115072e3d96SPavel Tatashin return 0;
116072e3d96SPavel Tatashin }
117072e3d96SPavel Tatashin
copy_pud(struct trans_pgd_info * info,p4d_t * dst_p4dp,p4d_t * src_p4dp,unsigned long start,unsigned long end)11889d1410fSPavel Tatashin static int copy_pud(struct trans_pgd_info *info, p4d_t *dst_p4dp,
11989d1410fSPavel Tatashin p4d_t *src_p4dp, unsigned long start,
120072e3d96SPavel Tatashin unsigned long end)
121072e3d96SPavel Tatashin {
122072e3d96SPavel Tatashin pud_t *dst_pudp;
123072e3d96SPavel Tatashin pud_t *src_pudp;
124072e3d96SPavel Tatashin unsigned long next;
125072e3d96SPavel Tatashin unsigned long addr = start;
126072e3d96SPavel Tatashin
127072e3d96SPavel Tatashin if (p4d_none(READ_ONCE(*dst_p4dp))) {
12889d1410fSPavel Tatashin dst_pudp = trans_alloc(info);
129072e3d96SPavel Tatashin if (!dst_pudp)
130072e3d96SPavel Tatashin return -ENOMEM;
1315de59884SPavel Tatashin p4d_populate(NULL, dst_p4dp, dst_pudp);
132072e3d96SPavel Tatashin }
133072e3d96SPavel Tatashin dst_pudp = pud_offset(dst_p4dp, start);
134072e3d96SPavel Tatashin
135072e3d96SPavel Tatashin src_pudp = pud_offset(src_p4dp, start);
136072e3d96SPavel Tatashin do {
137072e3d96SPavel Tatashin pud_t pud = READ_ONCE(*src_pudp);
138072e3d96SPavel Tatashin
139072e3d96SPavel Tatashin next = pud_addr_end(addr, end);
140072e3d96SPavel Tatashin if (pud_none(pud))
141072e3d96SPavel Tatashin continue;
142072e3d96SPavel Tatashin if (pud_table(pud)) {
14389d1410fSPavel Tatashin if (copy_pmd(info, dst_pudp, src_pudp, addr, next))
144072e3d96SPavel Tatashin return -ENOMEM;
145072e3d96SPavel Tatashin } else {
146072e3d96SPavel Tatashin set_pud(dst_pudp,
147072e3d96SPavel Tatashin __pud(pud_val(pud) & ~PUD_SECT_RDONLY));
148072e3d96SPavel Tatashin }
149072e3d96SPavel Tatashin } while (dst_pudp++, src_pudp++, addr = next, addr != end);
150072e3d96SPavel Tatashin
151072e3d96SPavel Tatashin return 0;
152072e3d96SPavel Tatashin }
153072e3d96SPavel Tatashin
copy_p4d(struct trans_pgd_info * info,pgd_t * dst_pgdp,pgd_t * src_pgdp,unsigned long start,unsigned long end)15489d1410fSPavel Tatashin static int copy_p4d(struct trans_pgd_info *info, pgd_t *dst_pgdp,
15589d1410fSPavel Tatashin pgd_t *src_pgdp, unsigned long start,
156072e3d96SPavel Tatashin unsigned long end)
157072e3d96SPavel Tatashin {
158072e3d96SPavel Tatashin p4d_t *dst_p4dp;
159072e3d96SPavel Tatashin p4d_t *src_p4dp;
160072e3d96SPavel Tatashin unsigned long next;
161072e3d96SPavel Tatashin unsigned long addr = start;
162072e3d96SPavel Tatashin
163072e3d96SPavel Tatashin dst_p4dp = p4d_offset(dst_pgdp, start);
164072e3d96SPavel Tatashin src_p4dp = p4d_offset(src_pgdp, start);
165072e3d96SPavel Tatashin do {
166072e3d96SPavel Tatashin next = p4d_addr_end(addr, end);
167072e3d96SPavel Tatashin if (p4d_none(READ_ONCE(*src_p4dp)))
168072e3d96SPavel Tatashin continue;
16989d1410fSPavel Tatashin if (copy_pud(info, dst_p4dp, src_p4dp, addr, next))
170072e3d96SPavel Tatashin return -ENOMEM;
171072e3d96SPavel Tatashin } while (dst_p4dp++, src_p4dp++, addr = next, addr != end);
172072e3d96SPavel Tatashin
173072e3d96SPavel Tatashin return 0;
174072e3d96SPavel Tatashin }
175072e3d96SPavel Tatashin
copy_page_tables(struct trans_pgd_info * info,pgd_t * dst_pgdp,unsigned long start,unsigned long end)17689d1410fSPavel Tatashin static int copy_page_tables(struct trans_pgd_info *info, pgd_t *dst_pgdp,
17789d1410fSPavel Tatashin unsigned long start, unsigned long end)
178072e3d96SPavel Tatashin {
179072e3d96SPavel Tatashin unsigned long next;
180072e3d96SPavel Tatashin unsigned long addr = start;
181072e3d96SPavel Tatashin pgd_t *src_pgdp = pgd_offset_k(start);
182072e3d96SPavel Tatashin
183072e3d96SPavel Tatashin dst_pgdp = pgd_offset_pgd(dst_pgdp, start);
184072e3d96SPavel Tatashin do {
185072e3d96SPavel Tatashin next = pgd_addr_end(addr, end);
186072e3d96SPavel Tatashin if (pgd_none(READ_ONCE(*src_pgdp)))
187072e3d96SPavel Tatashin continue;
18889d1410fSPavel Tatashin if (copy_p4d(info, dst_pgdp, src_pgdp, addr, next))
189072e3d96SPavel Tatashin return -ENOMEM;
190072e3d96SPavel Tatashin } while (dst_pgdp++, src_pgdp++, addr = next, addr != end);
191072e3d96SPavel Tatashin
192072e3d96SPavel Tatashin return 0;
193072e3d96SPavel Tatashin }
194072e3d96SPavel Tatashin
19589d1410fSPavel Tatashin /*
19689d1410fSPavel Tatashin * Create trans_pgd and copy linear map.
19789d1410fSPavel Tatashin * info: contains allocator and its argument
19889d1410fSPavel Tatashin * dst_pgdp: new page table that is created, and to which map is copied.
19989d1410fSPavel Tatashin * start: Start of the interval (inclusive).
20089d1410fSPavel Tatashin * end: End of the interval (exclusive).
20189d1410fSPavel Tatashin *
20289d1410fSPavel Tatashin * Returns 0 on success, and -ENOMEM on failure.
20389d1410fSPavel Tatashin */
trans_pgd_create_copy(struct trans_pgd_info * info,pgd_t ** dst_pgdp,unsigned long start,unsigned long end)20489d1410fSPavel Tatashin int trans_pgd_create_copy(struct trans_pgd_info *info, pgd_t **dst_pgdp,
20589d1410fSPavel Tatashin unsigned long start, unsigned long end)
206072e3d96SPavel Tatashin {
207072e3d96SPavel Tatashin int rc;
20889d1410fSPavel Tatashin pgd_t *trans_pgd = trans_alloc(info);
209072e3d96SPavel Tatashin
210072e3d96SPavel Tatashin if (!trans_pgd) {
211072e3d96SPavel Tatashin pr_err("Failed to allocate memory for temporary page tables.\n");
212072e3d96SPavel Tatashin return -ENOMEM;
213072e3d96SPavel Tatashin }
214072e3d96SPavel Tatashin
21589d1410fSPavel Tatashin rc = copy_page_tables(info, trans_pgd, start, end);
216072e3d96SPavel Tatashin if (!rc)
217072e3d96SPavel Tatashin *dst_pgdp = trans_pgd;
218072e3d96SPavel Tatashin
219072e3d96SPavel Tatashin return rc;
220072e3d96SPavel Tatashin }
221072e3d96SPavel Tatashin
22250f53fb7SPavel Tatashin /*
2237018d467SJames Morse * The page we want to idmap may be outside the range covered by VA_BITS that
2247018d467SJames Morse * can be built using the kernel's p?d_populate() helpers. As a one off, for a
2257018d467SJames Morse * single page, we build these page tables bottom up and just assume that will
2267018d467SJames Morse * need the maximum T0SZ.
2277018d467SJames Morse *
2287018d467SJames Morse * Returns 0 on success, and -ENOMEM on failure.
2297018d467SJames Morse * On success trans_ttbr0 contains page table with idmapped page, t0sz is set to
2307018d467SJames Morse * maximum T0SZ for this page.
2317018d467SJames Morse */
trans_pgd_idmap_page(struct trans_pgd_info * info,phys_addr_t * trans_ttbr0,unsigned long * t0sz,void * page)2327018d467SJames Morse int trans_pgd_idmap_page(struct trans_pgd_info *info, phys_addr_t *trans_ttbr0,
2337018d467SJames Morse unsigned long *t0sz, void *page)
2347018d467SJames Morse {
2357018d467SJames Morse phys_addr_t dst_addr = virt_to_phys(page);
2367018d467SJames Morse unsigned long pfn = __phys_to_pfn(dst_addr);
2377018d467SJames Morse int max_msb = (dst_addr & GENMASK(52, 48)) ? 51 : 47;
2387018d467SJames Morse int bits_mapped = PAGE_SHIFT - 4;
2397018d467SJames Morse unsigned long level_mask, prev_level_entry, *levels[4];
2407018d467SJames Morse int this_level, index, level_lsb, level_msb;
2417018d467SJames Morse
2427018d467SJames Morse dst_addr &= PAGE_MASK;
24301142791SArd Biesheuvel prev_level_entry = pte_val(pfn_pte(pfn, PAGE_KERNEL_ROX));
2447018d467SJames Morse
2457018d467SJames Morse for (this_level = 3; this_level >= 0; this_level--) {
2467018d467SJames Morse levels[this_level] = trans_alloc(info);
2477018d467SJames Morse if (!levels[this_level])
2487018d467SJames Morse return -ENOMEM;
2497018d467SJames Morse
2507018d467SJames Morse level_lsb = ARM64_HW_PGTABLE_LEVEL_SHIFT(this_level);
2517018d467SJames Morse level_msb = min(level_lsb + bits_mapped, max_msb);
2527018d467SJames Morse level_mask = GENMASK_ULL(level_msb, level_lsb);
2537018d467SJames Morse
2547018d467SJames Morse index = (dst_addr & level_mask) >> level_lsb;
2557018d467SJames Morse *(levels[this_level] + index) = prev_level_entry;
2567018d467SJames Morse
2577018d467SJames Morse pfn = virt_to_pfn(levels[this_level]);
2587018d467SJames Morse prev_level_entry = pte_val(pfn_pte(pfn,
2597018d467SJames Morse __pgprot(PMD_TYPE_TABLE)));
2607018d467SJames Morse
2617018d467SJames Morse if (level_msb == max_msb)
2627018d467SJames Morse break;
2637018d467SJames Morse }
2647018d467SJames Morse
2657018d467SJames Morse *trans_ttbr0 = phys_to_ttbr(__pfn_to_phys(pfn));
2667018d467SJames Morse *t0sz = TCR_T0SZ(max_msb + 1);
2677018d467SJames Morse
2687018d467SJames Morse return 0;
2697018d467SJames Morse }
270788bfdd9SPasha Tatashin
271788bfdd9SPasha Tatashin /*
272788bfdd9SPasha Tatashin * Create a copy of the vector table so we can call HVC_SET_VECTORS or
273788bfdd9SPasha Tatashin * HVC_SOFT_RESTART from contexts where the table may be overwritten.
274788bfdd9SPasha Tatashin */
trans_pgd_copy_el2_vectors(struct trans_pgd_info * info,phys_addr_t * el2_vectors)275788bfdd9SPasha Tatashin int trans_pgd_copy_el2_vectors(struct trans_pgd_info *info,
276788bfdd9SPasha Tatashin phys_addr_t *el2_vectors)
277788bfdd9SPasha Tatashin {
278788bfdd9SPasha Tatashin void *hyp_stub = trans_alloc(info);
279788bfdd9SPasha Tatashin
280788bfdd9SPasha Tatashin if (!hyp_stub)
281788bfdd9SPasha Tatashin return -ENOMEM;
282788bfdd9SPasha Tatashin *el2_vectors = virt_to_phys(hyp_stub);
283788bfdd9SPasha Tatashin memcpy(hyp_stub, &trans_pgd_stub_vectors, ARM64_VECTOR_TABLE_LEN);
284788bfdd9SPasha Tatashin caches_clean_inval_pou((unsigned long)hyp_stub,
285788bfdd9SPasha Tatashin (unsigned long)hyp_stub +
286788bfdd9SPasha Tatashin ARM64_VECTOR_TABLE_LEN);
287788bfdd9SPasha Tatashin dcache_clean_inval_poc((unsigned long)hyp_stub,
288788bfdd9SPasha Tatashin (unsigned long)hyp_stub +
289788bfdd9SPasha Tatashin ARM64_VECTOR_TABLE_LEN);
290788bfdd9SPasha Tatashin
291788bfdd9SPasha Tatashin return 0;
292788bfdd9SPasha Tatashin }
293