xref: /openbmc/linux/arch/arm64/mm/mmu.c (revision ee017ee353506fcec58e481673e4331ff198a80e)
1caab277bSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
2c1cc1552SCatalin Marinas /*
3c1cc1552SCatalin Marinas  * Based on arch/arm/mm/mmu.c
4c1cc1552SCatalin Marinas  *
5c1cc1552SCatalin Marinas  * Copyright (C) 1995-2005 Russell King
6c1cc1552SCatalin Marinas  * Copyright (C) 2012 ARM Ltd.
7c1cc1552SCatalin Marinas  */
8c1cc1552SCatalin Marinas 
95a9e3e15SJisheng Zhang #include <linux/cache.h>
10c1cc1552SCatalin Marinas #include <linux/export.h>
11c1cc1552SCatalin Marinas #include <linux/kernel.h>
12c1cc1552SCatalin Marinas #include <linux/errno.h>
13c1cc1552SCatalin Marinas #include <linux/init.h>
1498d2e153STakahiro Akashi #include <linux/ioport.h>
1598d2e153STakahiro Akashi #include <linux/kexec.h>
1661bd93ceSArd Biesheuvel #include <linux/libfdt.h>
17c1cc1552SCatalin Marinas #include <linux/mman.h>
18c1cc1552SCatalin Marinas #include <linux/nodemask.h>
19c1cc1552SCatalin Marinas #include <linux/memblock.h>
20bbd6ec60SAnshuman Khandual #include <linux/memory.h>
21c1cc1552SCatalin Marinas #include <linux/fs.h>
222475ff9dSCatalin Marinas #include <linux/io.h>
232077be67SLaura Abbott #include <linux/mm.h>
246efd8499STobias Klauser #include <linux/vmalloc.h>
256d47c23bSMike Rapoport #include <linux/set_memory.h>
26c1cc1552SCatalin Marinas 
2721ab99c2SMark Rutland #include <asm/barrier.h>
28c1cc1552SCatalin Marinas #include <asm/cputype.h>
29af86e597SLaura Abbott #include <asm/fixmap.h>
30068a17a5SMark Rutland #include <asm/kasan.h>
31b433dce0SSuzuki K. Poulose #include <asm/kernel-pgtable.h>
32c1cc1552SCatalin Marinas #include <asm/sections.h>
33c1cc1552SCatalin Marinas #include <asm/setup.h>
3487dfb311SMasahiro Yamada #include <linux/sizes.h>
35c1cc1552SCatalin Marinas #include <asm/tlb.h>
36c1cc1552SCatalin Marinas #include <asm/mmu_context.h>
371404d6f1SLaura Abbott #include <asm/ptdump.h>
38ec28bb9cSChintan Pandya #include <asm/tlbflush.h>
39ca15ca40SMike Rapoport #include <asm/pgalloc.h>
40c1cc1552SCatalin Marinas 
41c0951366SArd Biesheuvel #define NO_BLOCK_MAPPINGS	BIT(0)
42d27cfa1fSArd Biesheuvel #define NO_CONT_MAPPINGS	BIT(1)
4387143f40SArd Biesheuvel #define NO_EXEC_MAPPINGS	BIT(2)	/* assumes FEAT_HPDS is not used */
44c0951366SArd Biesheuvel 
457ba8f2b2SArd Biesheuvel u64 idmap_t0sz = TCR_T0SZ(VA_BITS_MIN);
46fa2a8445SKristina Martsenko u64 idmap_ptrs_per_pgd = PTRS_PER_PGD;
47dd006da2SArd Biesheuvel 
485383cc6eSSteve Capper u64 __section(".mmuoff.data.write") vabits_actual;
495383cc6eSSteve Capper EXPORT_SYMBOL(vabits_actual);
50c1cc1552SCatalin Marinas 
515a9e3e15SJisheng Zhang u64 kimage_voffset __ro_after_init;
52a7f8de16SArd Biesheuvel EXPORT_SYMBOL(kimage_voffset);
53a7f8de16SArd Biesheuvel 
54c1cc1552SCatalin Marinas /*
55c1cc1552SCatalin Marinas  * Empty_zero_page is a special page that is used for zero-initialized data
56c1cc1552SCatalin Marinas  * and COW.
57c1cc1552SCatalin Marinas  */
585227cfa7SMark Rutland unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)] __page_aligned_bss;
59c1cc1552SCatalin Marinas EXPORT_SYMBOL(empty_zero_page);
60c1cc1552SCatalin Marinas 
61f9040773SArd Biesheuvel static pte_t bm_pte[PTRS_PER_PTE] __page_aligned_bss;
62f9040773SArd Biesheuvel static pmd_t bm_pmd[PTRS_PER_PMD] __page_aligned_bss __maybe_unused;
63f9040773SArd Biesheuvel static pud_t bm_pud[PTRS_PER_PUD] __page_aligned_bss __maybe_unused;
64f9040773SArd Biesheuvel 
652330b7caSJun Yao static DEFINE_SPINLOCK(swapper_pgdir_lock);
66*ee017ee3SJianyong Wu static DEFINE_MUTEX(fixmap_lock);
672330b7caSJun Yao 
682330b7caSJun Yao void set_swapper_pgd(pgd_t *pgdp, pgd_t pgd)
692330b7caSJun Yao {
702330b7caSJun Yao 	pgd_t *fixmap_pgdp;
712330b7caSJun Yao 
722330b7caSJun Yao 	spin_lock(&swapper_pgdir_lock);
7326a6f87eSJames Morse 	fixmap_pgdp = pgd_set_fixmap(__pa_symbol(pgdp));
742330b7caSJun Yao 	WRITE_ONCE(*fixmap_pgdp, pgd);
752330b7caSJun Yao 	/*
762330b7caSJun Yao 	 * We need dsb(ishst) here to ensure the page-table-walker sees
772330b7caSJun Yao 	 * our new entry before set_p?d() returns. The fixmap's
782330b7caSJun Yao 	 * flush_tlb_kernel_range() via clear_fixmap() does this for us.
792330b7caSJun Yao 	 */
802330b7caSJun Yao 	pgd_clear_fixmap();
812330b7caSJun Yao 	spin_unlock(&swapper_pgdir_lock);
822330b7caSJun Yao }
832330b7caSJun Yao 
84c1cc1552SCatalin Marinas pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
85c1cc1552SCatalin Marinas 			      unsigned long size, pgprot_t vma_prot)
86c1cc1552SCatalin Marinas {
87873ba463SMike Rapoport 	if (!pfn_is_map_memory(pfn))
88c1cc1552SCatalin Marinas 		return pgprot_noncached(vma_prot);
89c1cc1552SCatalin Marinas 	else if (file->f_flags & O_SYNC)
90c1cc1552SCatalin Marinas 		return pgprot_writecombine(vma_prot);
91c1cc1552SCatalin Marinas 	return vma_prot;
92c1cc1552SCatalin Marinas }
93c1cc1552SCatalin Marinas EXPORT_SYMBOL(phys_mem_access_prot);
94c1cc1552SCatalin Marinas 
9590292acaSYu Zhao static phys_addr_t __init early_pgtable_alloc(int shift)
96c1cc1552SCatalin Marinas {
977142392dSSuzuki K. Poulose 	phys_addr_t phys;
987142392dSSuzuki K. Poulose 	void *ptr;
997142392dSSuzuki K. Poulose 
100c6975d7cSQian Cai 	phys = memblock_phys_alloc_range(PAGE_SIZE, PAGE_SIZE, 0,
101c6975d7cSQian Cai 					 MEMBLOCK_ALLOC_NOLEAKTRACE);
102ecc3e771SMike Rapoport 	if (!phys)
103ecc3e771SMike Rapoport 		panic("Failed to allocate page table page\n");
104f4710445SMark Rutland 
105f4710445SMark Rutland 	/*
106f4710445SMark Rutland 	 * The FIX_{PGD,PUD,PMD} slots may be in active use, but the FIX_PTE
107f4710445SMark Rutland 	 * slot will be free, so we can (ab)use the FIX_PTE slot to initialise
108f4710445SMark Rutland 	 * any level of table.
109f4710445SMark Rutland 	 */
110f4710445SMark Rutland 	ptr = pte_set_fixmap(phys);
111f4710445SMark Rutland 
11221ab99c2SMark Rutland 	memset(ptr, 0, PAGE_SIZE);
11321ab99c2SMark Rutland 
114f4710445SMark Rutland 	/*
115f4710445SMark Rutland 	 * Implicit barriers also ensure the zeroed page is visible to the page
116f4710445SMark Rutland 	 * table walker
117f4710445SMark Rutland 	 */
118f4710445SMark Rutland 	pte_clear_fixmap();
119f4710445SMark Rutland 
120f4710445SMark Rutland 	return phys;
121c1cc1552SCatalin Marinas }
122c1cc1552SCatalin Marinas 
123e98216b5SArd Biesheuvel static bool pgattr_change_is_safe(u64 old, u64 new)
124e98216b5SArd Biesheuvel {
125e98216b5SArd Biesheuvel 	/*
126e98216b5SArd Biesheuvel 	 * The following mapping attributes may be updated in live
127e98216b5SArd Biesheuvel 	 * kernel mappings without the need for break-before-make.
128e98216b5SArd Biesheuvel 	 */
1290178dc76SCatalin Marinas 	pteval_t mask = PTE_PXN | PTE_RDONLY | PTE_WRITE | PTE_NG;
130e98216b5SArd Biesheuvel 
131141d1497SArd Biesheuvel 	/* creating or taking down mappings is always safe */
132141d1497SArd Biesheuvel 	if (old == 0 || new == 0)
133141d1497SArd Biesheuvel 		return true;
134141d1497SArd Biesheuvel 
135141d1497SArd Biesheuvel 	/* live contiguous mappings may not be manipulated at all */
136141d1497SArd Biesheuvel 	if ((old | new) & PTE_CONT)
137141d1497SArd Biesheuvel 		return false;
138141d1497SArd Biesheuvel 
139753e8abcSArd Biesheuvel 	/* Transitioning from Non-Global to Global is unsafe */
140753e8abcSArd Biesheuvel 	if (old & ~new & PTE_NG)
141753e8abcSArd Biesheuvel 		return false;
1424e602056SWill Deacon 
1430178dc76SCatalin Marinas 	/*
1440178dc76SCatalin Marinas 	 * Changing the memory type between Normal and Normal-Tagged is safe
1450178dc76SCatalin Marinas 	 * since Tagged is considered a permission attribute from the
1460178dc76SCatalin Marinas 	 * mismatched attribute aliases perspective.
1470178dc76SCatalin Marinas 	 */
1480178dc76SCatalin Marinas 	if (((old & PTE_ATTRINDX_MASK) == PTE_ATTRINDX(MT_NORMAL) ||
1490178dc76SCatalin Marinas 	     (old & PTE_ATTRINDX_MASK) == PTE_ATTRINDX(MT_NORMAL_TAGGED)) &&
1500178dc76SCatalin Marinas 	    ((new & PTE_ATTRINDX_MASK) == PTE_ATTRINDX(MT_NORMAL) ||
1510178dc76SCatalin Marinas 	     (new & PTE_ATTRINDX_MASK) == PTE_ATTRINDX(MT_NORMAL_TAGGED)))
1520178dc76SCatalin Marinas 		mask |= PTE_ATTRINDX_MASK;
1530178dc76SCatalin Marinas 
154141d1497SArd Biesheuvel 	return ((old ^ new) & ~mask) == 0;
155e98216b5SArd Biesheuvel }
156e98216b5SArd Biesheuvel 
15720a004e7SWill Deacon static void init_pte(pmd_t *pmdp, unsigned long addr, unsigned long end,
158d27cfa1fSArd Biesheuvel 		     phys_addr_t phys, pgprot_t prot)
159c1cc1552SCatalin Marinas {
16020a004e7SWill Deacon 	pte_t *ptep;
161c1cc1552SCatalin Marinas 
16220a004e7SWill Deacon 	ptep = pte_set_fixmap_offset(pmdp, addr);
163c1cc1552SCatalin Marinas 	do {
16420a004e7SWill Deacon 		pte_t old_pte = READ_ONCE(*ptep);
165e98216b5SArd Biesheuvel 
16620a004e7SWill Deacon 		set_pte(ptep, pfn_pte(__phys_to_pfn(phys), prot));
167e98216b5SArd Biesheuvel 
168e98216b5SArd Biesheuvel 		/*
169e98216b5SArd Biesheuvel 		 * After the PTE entry has been populated once, we
170e98216b5SArd Biesheuvel 		 * only allow updates to the permission attributes.
171e98216b5SArd Biesheuvel 		 */
17220a004e7SWill Deacon 		BUG_ON(!pgattr_change_is_safe(pte_val(old_pte),
17320a004e7SWill Deacon 					      READ_ONCE(pte_val(*ptep))));
174e98216b5SArd Biesheuvel 
175e393cf40SArd Biesheuvel 		phys += PAGE_SIZE;
17620a004e7SWill Deacon 	} while (ptep++, addr += PAGE_SIZE, addr != end);
177f4710445SMark Rutland 
178f4710445SMark Rutland 	pte_clear_fixmap();
179c1cc1552SCatalin Marinas }
180c1cc1552SCatalin Marinas 
18120a004e7SWill Deacon static void alloc_init_cont_pte(pmd_t *pmdp, unsigned long addr,
182d27cfa1fSArd Biesheuvel 				unsigned long end, phys_addr_t phys,
183d27cfa1fSArd Biesheuvel 				pgprot_t prot,
18490292acaSYu Zhao 				phys_addr_t (*pgtable_alloc)(int),
185c0951366SArd Biesheuvel 				int flags)
186c1cc1552SCatalin Marinas {
187c1cc1552SCatalin Marinas 	unsigned long next;
18820a004e7SWill Deacon 	pmd_t pmd = READ_ONCE(*pmdp);
189c1cc1552SCatalin Marinas 
19020a004e7SWill Deacon 	BUG_ON(pmd_sect(pmd));
19120a004e7SWill Deacon 	if (pmd_none(pmd)) {
19287143f40SArd Biesheuvel 		pmdval_t pmdval = PMD_TYPE_TABLE | PMD_TABLE_UXN;
193d27cfa1fSArd Biesheuvel 		phys_addr_t pte_phys;
19487143f40SArd Biesheuvel 
19587143f40SArd Biesheuvel 		if (flags & NO_EXEC_MAPPINGS)
19687143f40SArd Biesheuvel 			pmdval |= PMD_TABLE_PXN;
197132233a7SLaura Abbott 		BUG_ON(!pgtable_alloc);
19890292acaSYu Zhao 		pte_phys = pgtable_alloc(PAGE_SHIFT);
19987143f40SArd Biesheuvel 		__pmd_populate(pmdp, pte_phys, pmdval);
20020a004e7SWill Deacon 		pmd = READ_ONCE(*pmdp);
201c1cc1552SCatalin Marinas 	}
20220a004e7SWill Deacon 	BUG_ON(pmd_bad(pmd));
203d27cfa1fSArd Biesheuvel 
204d27cfa1fSArd Biesheuvel 	do {
205d27cfa1fSArd Biesheuvel 		pgprot_t __prot = prot;
206d27cfa1fSArd Biesheuvel 
207d27cfa1fSArd Biesheuvel 		next = pte_cont_addr_end(addr, end);
208d27cfa1fSArd Biesheuvel 
209d27cfa1fSArd Biesheuvel 		/* use a contiguous mapping if the range is suitably aligned */
210d27cfa1fSArd Biesheuvel 		if ((((addr | next | phys) & ~CONT_PTE_MASK) == 0) &&
211d27cfa1fSArd Biesheuvel 		    (flags & NO_CONT_MAPPINGS) == 0)
212d27cfa1fSArd Biesheuvel 			__prot = __pgprot(pgprot_val(prot) | PTE_CONT);
213d27cfa1fSArd Biesheuvel 
21420a004e7SWill Deacon 		init_pte(pmdp, addr, next, phys, __prot);
215d27cfa1fSArd Biesheuvel 
216d27cfa1fSArd Biesheuvel 		phys += next - addr;
217d27cfa1fSArd Biesheuvel 	} while (addr = next, addr != end);
218d27cfa1fSArd Biesheuvel }
219d27cfa1fSArd Biesheuvel 
22020a004e7SWill Deacon static void init_pmd(pud_t *pudp, unsigned long addr, unsigned long end,
221d27cfa1fSArd Biesheuvel 		     phys_addr_t phys, pgprot_t prot,
22290292acaSYu Zhao 		     phys_addr_t (*pgtable_alloc)(int), int flags)
223d27cfa1fSArd Biesheuvel {
224d27cfa1fSArd Biesheuvel 	unsigned long next;
22520a004e7SWill Deacon 	pmd_t *pmdp;
226c1cc1552SCatalin Marinas 
22720a004e7SWill Deacon 	pmdp = pmd_set_fixmap_offset(pudp, addr);
228c1cc1552SCatalin Marinas 	do {
22920a004e7SWill Deacon 		pmd_t old_pmd = READ_ONCE(*pmdp);
230e98216b5SArd Biesheuvel 
231c1cc1552SCatalin Marinas 		next = pmd_addr_end(addr, end);
232e98216b5SArd Biesheuvel 
233c1cc1552SCatalin Marinas 		/* try section mapping first */
2344aaa87abSAnshuman Khandual 		if (((addr | next | phys) & ~PMD_MASK) == 0 &&
235c0951366SArd Biesheuvel 		    (flags & NO_BLOCK_MAPPINGS) == 0) {
23620a004e7SWill Deacon 			pmd_set_huge(pmdp, phys, prot);
237e98216b5SArd Biesheuvel 
238a55f9929SCatalin Marinas 			/*
239e98216b5SArd Biesheuvel 			 * After the PMD entry has been populated once, we
240e98216b5SArd Biesheuvel 			 * only allow updates to the permission attributes.
241a55f9929SCatalin Marinas 			 */
242e98216b5SArd Biesheuvel 			BUG_ON(!pgattr_change_is_safe(pmd_val(old_pmd),
24320a004e7SWill Deacon 						      READ_ONCE(pmd_val(*pmdp))));
244a55f9929SCatalin Marinas 		} else {
24520a004e7SWill Deacon 			alloc_init_cont_pte(pmdp, addr, next, phys, prot,
246d27cfa1fSArd Biesheuvel 					    pgtable_alloc, flags);
247e98216b5SArd Biesheuvel 
248e98216b5SArd Biesheuvel 			BUG_ON(pmd_val(old_pmd) != 0 &&
24920a004e7SWill Deacon 			       pmd_val(old_pmd) != READ_ONCE(pmd_val(*pmdp)));
250a55f9929SCatalin Marinas 		}
251c1cc1552SCatalin Marinas 		phys += next - addr;
25220a004e7SWill Deacon 	} while (pmdp++, addr = next, addr != end);
253f4710445SMark Rutland 
254f4710445SMark Rutland 	pmd_clear_fixmap();
255c1cc1552SCatalin Marinas }
256c1cc1552SCatalin Marinas 
25720a004e7SWill Deacon static void alloc_init_cont_pmd(pud_t *pudp, unsigned long addr,
258d27cfa1fSArd Biesheuvel 				unsigned long end, phys_addr_t phys,
259d27cfa1fSArd Biesheuvel 				pgprot_t prot,
26090292acaSYu Zhao 				phys_addr_t (*pgtable_alloc)(int), int flags)
261d27cfa1fSArd Biesheuvel {
262d27cfa1fSArd Biesheuvel 	unsigned long next;
26320a004e7SWill Deacon 	pud_t pud = READ_ONCE(*pudp);
264d27cfa1fSArd Biesheuvel 
265d27cfa1fSArd Biesheuvel 	/*
266d27cfa1fSArd Biesheuvel 	 * Check for initial section mappings in the pgd/pud.
267d27cfa1fSArd Biesheuvel 	 */
26820a004e7SWill Deacon 	BUG_ON(pud_sect(pud));
26920a004e7SWill Deacon 	if (pud_none(pud)) {
27087143f40SArd Biesheuvel 		pudval_t pudval = PUD_TYPE_TABLE | PUD_TABLE_UXN;
271d27cfa1fSArd Biesheuvel 		phys_addr_t pmd_phys;
27287143f40SArd Biesheuvel 
27387143f40SArd Biesheuvel 		if (flags & NO_EXEC_MAPPINGS)
27487143f40SArd Biesheuvel 			pudval |= PUD_TABLE_PXN;
275d27cfa1fSArd Biesheuvel 		BUG_ON(!pgtable_alloc);
27690292acaSYu Zhao 		pmd_phys = pgtable_alloc(PMD_SHIFT);
27787143f40SArd Biesheuvel 		__pud_populate(pudp, pmd_phys, pudval);
27820a004e7SWill Deacon 		pud = READ_ONCE(*pudp);
279d27cfa1fSArd Biesheuvel 	}
28020a004e7SWill Deacon 	BUG_ON(pud_bad(pud));
281d27cfa1fSArd Biesheuvel 
282d27cfa1fSArd Biesheuvel 	do {
283d27cfa1fSArd Biesheuvel 		pgprot_t __prot = prot;
284d27cfa1fSArd Biesheuvel 
285d27cfa1fSArd Biesheuvel 		next = pmd_cont_addr_end(addr, end);
286d27cfa1fSArd Biesheuvel 
287d27cfa1fSArd Biesheuvel 		/* use a contiguous mapping if the range is suitably aligned */
288d27cfa1fSArd Biesheuvel 		if ((((addr | next | phys) & ~CONT_PMD_MASK) == 0) &&
289d27cfa1fSArd Biesheuvel 		    (flags & NO_CONT_MAPPINGS) == 0)
290d27cfa1fSArd Biesheuvel 			__prot = __pgprot(pgprot_val(prot) | PTE_CONT);
291d27cfa1fSArd Biesheuvel 
29220a004e7SWill Deacon 		init_pmd(pudp, addr, next, phys, __prot, pgtable_alloc, flags);
293d27cfa1fSArd Biesheuvel 
294d27cfa1fSArd Biesheuvel 		phys += next - addr;
295d27cfa1fSArd Biesheuvel 	} while (addr = next, addr != end);
296d27cfa1fSArd Biesheuvel }
297d27cfa1fSArd Biesheuvel 
298da141706SLaura Abbott static inline bool use_1G_block(unsigned long addr, unsigned long next,
299da141706SLaura Abbott 			unsigned long phys)
300da141706SLaura Abbott {
301da141706SLaura Abbott 	if (PAGE_SHIFT != 12)
302da141706SLaura Abbott 		return false;
303da141706SLaura Abbott 
304da141706SLaura Abbott 	if (((addr | next | phys) & ~PUD_MASK) != 0)
305da141706SLaura Abbott 		return false;
306da141706SLaura Abbott 
307da141706SLaura Abbott 	return true;
308da141706SLaura Abbott }
309da141706SLaura Abbott 
31020a004e7SWill Deacon static void alloc_init_pud(pgd_t *pgdp, unsigned long addr, unsigned long end,
311da141706SLaura Abbott 			   phys_addr_t phys, pgprot_t prot,
31290292acaSYu Zhao 			   phys_addr_t (*pgtable_alloc)(int),
313c0951366SArd Biesheuvel 			   int flags)
314c1cc1552SCatalin Marinas {
315c1cc1552SCatalin Marinas 	unsigned long next;
31620a004e7SWill Deacon 	pud_t *pudp;
317e9f63768SMike Rapoport 	p4d_t *p4dp = p4d_offset(pgdp, addr);
318e9f63768SMike Rapoport 	p4d_t p4d = READ_ONCE(*p4dp);
319c1cc1552SCatalin Marinas 
320e9f63768SMike Rapoport 	if (p4d_none(p4d)) {
32187143f40SArd Biesheuvel 		p4dval_t p4dval = P4D_TYPE_TABLE | P4D_TABLE_UXN;
322132233a7SLaura Abbott 		phys_addr_t pud_phys;
32387143f40SArd Biesheuvel 
32487143f40SArd Biesheuvel 		if (flags & NO_EXEC_MAPPINGS)
32587143f40SArd Biesheuvel 			p4dval |= P4D_TABLE_PXN;
326132233a7SLaura Abbott 		BUG_ON(!pgtable_alloc);
32790292acaSYu Zhao 		pud_phys = pgtable_alloc(PUD_SHIFT);
32887143f40SArd Biesheuvel 		__p4d_populate(p4dp, pud_phys, p4dval);
329e9f63768SMike Rapoport 		p4d = READ_ONCE(*p4dp);
330c79b954bSJungseok Lee 	}
331e9f63768SMike Rapoport 	BUG_ON(p4d_bad(p4d));
332c79b954bSJungseok Lee 
333*ee017ee3SJianyong Wu 	/*
334*ee017ee3SJianyong Wu 	 * No need for locking during early boot. And it doesn't work as
335*ee017ee3SJianyong Wu 	 * expected with KASLR enabled.
336*ee017ee3SJianyong Wu 	 */
337*ee017ee3SJianyong Wu 	if (system_state != SYSTEM_BOOTING)
338*ee017ee3SJianyong Wu 		mutex_lock(&fixmap_lock);
339e9f63768SMike Rapoport 	pudp = pud_set_fixmap_offset(p4dp, addr);
340c1cc1552SCatalin Marinas 	do {
34120a004e7SWill Deacon 		pud_t old_pud = READ_ONCE(*pudp);
342e98216b5SArd Biesheuvel 
343c1cc1552SCatalin Marinas 		next = pud_addr_end(addr, end);
344206a2a73SSteve Capper 
345206a2a73SSteve Capper 		/*
346206a2a73SSteve Capper 		 * For 4K granule only, attempt to put down a 1GB block
347206a2a73SSteve Capper 		 */
348c0951366SArd Biesheuvel 		if (use_1G_block(addr, next, phys) &&
349c0951366SArd Biesheuvel 		    (flags & NO_BLOCK_MAPPINGS) == 0) {
35020a004e7SWill Deacon 			pud_set_huge(pudp, phys, prot);
351206a2a73SSteve Capper 
352206a2a73SSteve Capper 			/*
353e98216b5SArd Biesheuvel 			 * After the PUD entry has been populated once, we
354e98216b5SArd Biesheuvel 			 * only allow updates to the permission attributes.
355206a2a73SSteve Capper 			 */
356e98216b5SArd Biesheuvel 			BUG_ON(!pgattr_change_is_safe(pud_val(old_pud),
35720a004e7SWill Deacon 						      READ_ONCE(pud_val(*pudp))));
358206a2a73SSteve Capper 		} else {
35920a004e7SWill Deacon 			alloc_init_cont_pmd(pudp, addr, next, phys, prot,
360c0951366SArd Biesheuvel 					    pgtable_alloc, flags);
361e98216b5SArd Biesheuvel 
362e98216b5SArd Biesheuvel 			BUG_ON(pud_val(old_pud) != 0 &&
36320a004e7SWill Deacon 			       pud_val(old_pud) != READ_ONCE(pud_val(*pudp)));
364206a2a73SSteve Capper 		}
365c1cc1552SCatalin Marinas 		phys += next - addr;
36620a004e7SWill Deacon 	} while (pudp++, addr = next, addr != end);
367f4710445SMark Rutland 
368f4710445SMark Rutland 	pud_clear_fixmap();
369*ee017ee3SJianyong Wu 	if (system_state != SYSTEM_BOOTING)
370*ee017ee3SJianyong Wu 		mutex_unlock(&fixmap_lock);
371c1cc1552SCatalin Marinas }
372c1cc1552SCatalin Marinas 
37340f87d31SArd Biesheuvel static void __create_pgd_mapping(pgd_t *pgdir, phys_addr_t phys,
37440f87d31SArd Biesheuvel 				 unsigned long virt, phys_addr_t size,
37540f87d31SArd Biesheuvel 				 pgprot_t prot,
37690292acaSYu Zhao 				 phys_addr_t (*pgtable_alloc)(int),
377c0951366SArd Biesheuvel 				 int flags)
378c1cc1552SCatalin Marinas {
37932d18708SMasahiro Yamada 	unsigned long addr, end, next;
380974b9b2cSMike Rapoport 	pgd_t *pgdp = pgd_offset_pgd(pgdir, virt);
381c1cc1552SCatalin Marinas 
382cc5d2b3bSMark Rutland 	/*
383cc5d2b3bSMark Rutland 	 * If the virtual and physical address don't have the same offset
384cc5d2b3bSMark Rutland 	 * within a page, we cannot map the region as the caller expects.
385cc5d2b3bSMark Rutland 	 */
386cc5d2b3bSMark Rutland 	if (WARN_ON((phys ^ virt) & ~PAGE_MASK))
387cc5d2b3bSMark Rutland 		return;
388cc5d2b3bSMark Rutland 
3899c4e08a3SMark Rutland 	phys &= PAGE_MASK;
390c1cc1552SCatalin Marinas 	addr = virt & PAGE_MASK;
39132d18708SMasahiro Yamada 	end = PAGE_ALIGN(virt + size);
392c1cc1552SCatalin Marinas 
393c1cc1552SCatalin Marinas 	do {
394c1cc1552SCatalin Marinas 		next = pgd_addr_end(addr, end);
39520a004e7SWill Deacon 		alloc_init_pud(pgdp, addr, next, phys, prot, pgtable_alloc,
396c0951366SArd Biesheuvel 			       flags);
397c1cc1552SCatalin Marinas 		phys += next - addr;
39820a004e7SWill Deacon 	} while (pgdp++, addr = next, addr != end);
399c1cc1552SCatalin Marinas }
400c1cc1552SCatalin Marinas 
401475ba3fcSWill Deacon static phys_addr_t __pgd_pgtable_alloc(int shift)
402369aaab8SYu Zhao {
40350f11a8aSMike Rapoport 	void *ptr = (void *)__get_free_page(GFP_PGTABLE_KERNEL);
404369aaab8SYu Zhao 	BUG_ON(!ptr);
405369aaab8SYu Zhao 
406369aaab8SYu Zhao 	/* Ensure the zeroed page is visible to the page table walker */
407369aaab8SYu Zhao 	dsb(ishst);
408369aaab8SYu Zhao 	return __pa(ptr);
409369aaab8SYu Zhao }
410369aaab8SYu Zhao 
41190292acaSYu Zhao static phys_addr_t pgd_pgtable_alloc(int shift)
412da141706SLaura Abbott {
413475ba3fcSWill Deacon 	phys_addr_t pa = __pgd_pgtable_alloc(shift);
41490292acaSYu Zhao 
41590292acaSYu Zhao 	/*
41690292acaSYu Zhao 	 * Call proper page table ctor in case later we need to
41790292acaSYu Zhao 	 * call core mm functions like apply_to_page_range() on
41890292acaSYu Zhao 	 * this pre-allocated page table.
41990292acaSYu Zhao 	 *
42090292acaSYu Zhao 	 * We don't select ARCH_ENABLE_SPLIT_PMD_PTLOCK if pmd is
42190292acaSYu Zhao 	 * folded, and if so pgtable_pmd_page_ctor() becomes nop.
42290292acaSYu Zhao 	 */
42390292acaSYu Zhao 	if (shift == PAGE_SHIFT)
424b4ed71f5SMark Rutland 		BUG_ON(!pgtable_pte_page_ctor(phys_to_page(pa)));
42590292acaSYu Zhao 	else if (shift == PMD_SHIFT)
426475ba3fcSWill Deacon 		BUG_ON(!pgtable_pmd_page_ctor(phys_to_page(pa)));
42721ab99c2SMark Rutland 
428475ba3fcSWill Deacon 	return pa;
429da141706SLaura Abbott }
430da141706SLaura Abbott 
431132233a7SLaura Abbott /*
432132233a7SLaura Abbott  * This function can only be used to modify existing table entries,
433132233a7SLaura Abbott  * without allocating new levels of table. Note that this permits the
434132233a7SLaura Abbott  * creation of new section or page entries.
435132233a7SLaura Abbott  */
436132233a7SLaura Abbott static void __init create_mapping_noalloc(phys_addr_t phys, unsigned long virt,
437da141706SLaura Abbott 				  phys_addr_t size, pgprot_t prot)
438d7ecbddfSMark Salter {
43977ad4ce6SMark Rutland 	if ((virt >= PAGE_END) && (virt < VMALLOC_START)) {
440d7ecbddfSMark Salter 		pr_warn("BUG: not creating mapping for %pa at 0x%016lx - outside kernel range\n",
441d7ecbddfSMark Salter 			&phys, virt);
442d7ecbddfSMark Salter 		return;
443d7ecbddfSMark Salter 	}
444d27cfa1fSArd Biesheuvel 	__create_pgd_mapping(init_mm.pgd, phys, virt, size, prot, NULL,
445d27cfa1fSArd Biesheuvel 			     NO_CONT_MAPPINGS);
446d7ecbddfSMark Salter }
447d7ecbddfSMark Salter 
4488ce837ceSArd Biesheuvel void __init create_pgd_mapping(struct mm_struct *mm, phys_addr_t phys,
4498ce837ceSArd Biesheuvel 			       unsigned long virt, phys_addr_t size,
450f14c66ceSArd Biesheuvel 			       pgprot_t prot, bool page_mappings_only)
4518ce837ceSArd Biesheuvel {
452c0951366SArd Biesheuvel 	int flags = 0;
453c0951366SArd Biesheuvel 
4541378dc3dSArd Biesheuvel 	BUG_ON(mm == &init_mm);
4551378dc3dSArd Biesheuvel 
456c0951366SArd Biesheuvel 	if (page_mappings_only)
457d27cfa1fSArd Biesheuvel 		flags = NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS;
458c0951366SArd Biesheuvel 
45911509a30SMark Rutland 	__create_pgd_mapping(mm->pgd, phys, virt, size, prot,
460c0951366SArd Biesheuvel 			     pgd_pgtable_alloc, flags);
461d7ecbddfSMark Salter }
462d7ecbddfSMark Salter 
463aa8c09beSArd Biesheuvel static void update_mapping_prot(phys_addr_t phys, unsigned long virt,
464da141706SLaura Abbott 				phys_addr_t size, pgprot_t prot)
465da141706SLaura Abbott {
46677ad4ce6SMark Rutland 	if ((virt >= PAGE_END) && (virt < VMALLOC_START)) {
467aa8c09beSArd Biesheuvel 		pr_warn("BUG: not updating mapping for %pa at 0x%016lx - outside kernel range\n",
468da141706SLaura Abbott 			&phys, virt);
469da141706SLaura Abbott 		return;
470da141706SLaura Abbott 	}
471da141706SLaura Abbott 
472d27cfa1fSArd Biesheuvel 	__create_pgd_mapping(init_mm.pgd, phys, virt, size, prot, NULL,
473d27cfa1fSArd Biesheuvel 			     NO_CONT_MAPPINGS);
474aa8c09beSArd Biesheuvel 
475aa8c09beSArd Biesheuvel 	/* flush the TLBs after updating live kernel mappings */
476aa8c09beSArd Biesheuvel 	flush_tlb_kernel_range(virt, virt + size);
477da141706SLaura Abbott }
478da141706SLaura Abbott 
47920a004e7SWill Deacon static void __init __map_memblock(pgd_t *pgdp, phys_addr_t start,
48098d2e153STakahiro Akashi 				  phys_addr_t end, pgprot_t prot, int flags)
481da141706SLaura Abbott {
48220a004e7SWill Deacon 	__create_pgd_mapping(pgdp, start, __phys_to_virt(start), end - start,
48398d2e153STakahiro Akashi 			     prot, early_pgtable_alloc, flags);
484da141706SLaura Abbott }
485da141706SLaura Abbott 
4865ea5306cSArd Biesheuvel void __init mark_linear_text_alias_ro(void)
4875ea5306cSArd Biesheuvel {
4885ea5306cSArd Biesheuvel 	/*
4895ea5306cSArd Biesheuvel 	 * Remove the write permissions from the linear alias of .text/.rodata
4905ea5306cSArd Biesheuvel 	 */
491e2a073ddSArd Biesheuvel 	update_mapping_prot(__pa_symbol(_stext), (unsigned long)lm_alias(_stext),
492e2a073ddSArd Biesheuvel 			    (unsigned long)__init_begin - (unsigned long)_stext,
4935ea5306cSArd Biesheuvel 			    PAGE_KERNEL_RO);
4945ea5306cSArd Biesheuvel }
4955ea5306cSArd Biesheuvel 
4962687275aSCatalin Marinas static bool crash_mem_map __initdata;
4972687275aSCatalin Marinas 
4982687275aSCatalin Marinas static int __init enable_crash_mem_map(char *arg)
4992687275aSCatalin Marinas {
5002687275aSCatalin Marinas 	/*
5012687275aSCatalin Marinas 	 * Proper parameter parsing is done by reserve_crashkernel(). We only
5022687275aSCatalin Marinas 	 * need to know if the linear map has to avoid block mappings so that
5032687275aSCatalin Marinas 	 * the crashkernel reservations can be unmapped later.
5042687275aSCatalin Marinas 	 */
5052687275aSCatalin Marinas 	crash_mem_map = true;
5062687275aSCatalin Marinas 
5072687275aSCatalin Marinas 	return 0;
5082687275aSCatalin Marinas }
5092687275aSCatalin Marinas early_param("crashkernel", enable_crash_mem_map);
5102687275aSCatalin Marinas 
51120a004e7SWill Deacon static void __init map_mem(pgd_t *pgdp)
512c1cc1552SCatalin Marinas {
51387143f40SArd Biesheuvel 	static const u64 direct_map_end = _PAGE_END(VA_BITS_MIN);
514e2a073ddSArd Biesheuvel 	phys_addr_t kernel_start = __pa_symbol(_stext);
51598d2e153STakahiro Akashi 	phys_addr_t kernel_end = __pa_symbol(__init_begin);
516b10d6bcaSMike Rapoport 	phys_addr_t start, end;
51787143f40SArd Biesheuvel 	int flags = NO_EXEC_MAPPINGS;
518b10d6bcaSMike Rapoport 	u64 i;
51998d2e153STakahiro Akashi 
52087143f40SArd Biesheuvel 	/*
52187143f40SArd Biesheuvel 	 * Setting hierarchical PXNTable attributes on table entries covering
52287143f40SArd Biesheuvel 	 * the linear region is only possible if it is guaranteed that no table
52387143f40SArd Biesheuvel 	 * entries at any level are being shared between the linear region and
52487143f40SArd Biesheuvel 	 * the vmalloc region. Check whether this is true for the PGD level, in
52587143f40SArd Biesheuvel 	 * which case it is guaranteed to be true for all other levels as well.
52687143f40SArd Biesheuvel 	 */
52787143f40SArd Biesheuvel 	BUILD_BUG_ON(pgd_index(direct_map_end - 1) == pgd_index(direct_map_end));
52887143f40SArd Biesheuvel 
5296d47c23bSMike Rapoport 	if (can_set_direct_map() || crash_mem_map || IS_ENABLED(CONFIG_KFENCE))
53087143f40SArd Biesheuvel 		flags |= NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS;
53198d2e153STakahiro Akashi 
53298d2e153STakahiro Akashi 	/*
53398d2e153STakahiro Akashi 	 * Take care not to create a writable alias for the
53498d2e153STakahiro Akashi 	 * read-only text and rodata sections of the kernel image.
53598d2e153STakahiro Akashi 	 * So temporarily mark them as NOMAP to skip mappings in
53698d2e153STakahiro Akashi 	 * the following for-loop
53798d2e153STakahiro Akashi 	 */
53898d2e153STakahiro Akashi 	memblock_mark_nomap(kernel_start, kernel_end - kernel_start);
539f6bc87c3SSteve Capper 
540c1cc1552SCatalin Marinas 	/* map all the memory banks */
541b10d6bcaSMike Rapoport 	for_each_mem_range(i, &start, &end) {
542c1cc1552SCatalin Marinas 		if (start >= end)
543c1cc1552SCatalin Marinas 			break;
5440178dc76SCatalin Marinas 		/*
5450178dc76SCatalin Marinas 		 * The linear map must allow allocation tags reading/writing
5460178dc76SCatalin Marinas 		 * if MTE is present. Otherwise, it has the same attributes as
5470178dc76SCatalin Marinas 		 * PAGE_KERNEL.
5480178dc76SCatalin Marinas 		 */
549d15dfd31SCatalin Marinas 		__map_memblock(pgdp, start, end, pgprot_tagged(PAGE_KERNEL),
550d15dfd31SCatalin Marinas 			       flags);
551c1cc1552SCatalin Marinas 	}
55298d2e153STakahiro Akashi 
55398d2e153STakahiro Akashi 	/*
554e2a073ddSArd Biesheuvel 	 * Map the linear alias of the [_stext, __init_begin) interval
55598d2e153STakahiro Akashi 	 * as non-executable now, and remove the write permission in
55698d2e153STakahiro Akashi 	 * mark_linear_text_alias_ro() below (which will be called after
55798d2e153STakahiro Akashi 	 * alternative patching has completed). This makes the contents
55898d2e153STakahiro Akashi 	 * of the region accessible to subsystems such as hibernate,
55998d2e153STakahiro Akashi 	 * but protects it from inadvertent modification or execution.
56098d2e153STakahiro Akashi 	 * Note that contiguous mappings cannot be remapped in this way,
56198d2e153STakahiro Akashi 	 * so we should avoid them here.
56298d2e153STakahiro Akashi 	 */
56320a004e7SWill Deacon 	__map_memblock(pgdp, kernel_start, kernel_end,
56498d2e153STakahiro Akashi 		       PAGE_KERNEL, NO_CONT_MAPPINGS);
56598d2e153STakahiro Akashi 	memblock_clear_nomap(kernel_start, kernel_end - kernel_start);
566c1cc1552SCatalin Marinas }
567c1cc1552SCatalin Marinas 
568da141706SLaura Abbott void mark_rodata_ro(void)
569da141706SLaura Abbott {
5702f39b5f9SJeremy Linton 	unsigned long section_size;
571f9040773SArd Biesheuvel 
5722f39b5f9SJeremy Linton 	/*
5739fdc14c5SArd Biesheuvel 	 * mark .rodata as read only. Use __init_begin rather than __end_rodata
5749fdc14c5SArd Biesheuvel 	 * to cover NOTES and EXCEPTION_TABLE.
5752f39b5f9SJeremy Linton 	 */
5769fdc14c5SArd Biesheuvel 	section_size = (unsigned long)__init_begin - (unsigned long)__start_rodata;
577aa8c09beSArd Biesheuvel 	update_mapping_prot(__pa_symbol(__start_rodata), (unsigned long)__start_rodata,
5782f39b5f9SJeremy Linton 			    section_size, PAGE_KERNEL_RO);
579e98216b5SArd Biesheuvel 
5801404d6f1SLaura Abbott 	debug_checkwx();
581da141706SLaura Abbott }
582da141706SLaura Abbott 
58320a004e7SWill Deacon static void __init map_kernel_segment(pgd_t *pgdp, void *va_start, void *va_end,
584d27cfa1fSArd Biesheuvel 				      pgprot_t prot, struct vm_struct *vma,
58592bbd16eSWill Deacon 				      int flags, unsigned long vm_flags)
586068a17a5SMark Rutland {
5872077be67SLaura Abbott 	phys_addr_t pa_start = __pa_symbol(va_start);
588068a17a5SMark Rutland 	unsigned long size = va_end - va_start;
589068a17a5SMark Rutland 
590068a17a5SMark Rutland 	BUG_ON(!PAGE_ALIGNED(pa_start));
591068a17a5SMark Rutland 	BUG_ON(!PAGE_ALIGNED(size));
592068a17a5SMark Rutland 
59320a004e7SWill Deacon 	__create_pgd_mapping(pgdp, pa_start, (unsigned long)va_start, size, prot,
594d27cfa1fSArd Biesheuvel 			     early_pgtable_alloc, flags);
595f9040773SArd Biesheuvel 
59692bbd16eSWill Deacon 	if (!(vm_flags & VM_NO_GUARD))
59792bbd16eSWill Deacon 		size += PAGE_SIZE;
59892bbd16eSWill Deacon 
599f9040773SArd Biesheuvel 	vma->addr	= va_start;
600f9040773SArd Biesheuvel 	vma->phys_addr	= pa_start;
601f9040773SArd Biesheuvel 	vma->size	= size;
60292bbd16eSWill Deacon 	vma->flags	= VM_MAP | vm_flags;
603f9040773SArd Biesheuvel 	vma->caller	= __builtin_return_address(0);
604f9040773SArd Biesheuvel 
605f9040773SArd Biesheuvel 	vm_area_add_early(vma);
606068a17a5SMark Rutland }
607068a17a5SMark Rutland 
60828b066daSArd Biesheuvel static int __init parse_rodata(char *arg)
60928b066daSArd Biesheuvel {
610c55191e9SArd Biesheuvel 	int ret = strtobool(arg, &rodata_enabled);
611c55191e9SArd Biesheuvel 	if (!ret) {
612c55191e9SArd Biesheuvel 		rodata_full = false;
613c55191e9SArd Biesheuvel 		return 0;
614c55191e9SArd Biesheuvel 	}
615c55191e9SArd Biesheuvel 
616c55191e9SArd Biesheuvel 	/* permit 'full' in addition to boolean options */
617c55191e9SArd Biesheuvel 	if (strcmp(arg, "full"))
618c55191e9SArd Biesheuvel 		return -EINVAL;
619c55191e9SArd Biesheuvel 
620c55191e9SArd Biesheuvel 	rodata_enabled = true;
621c55191e9SArd Biesheuvel 	rodata_full = true;
622c55191e9SArd Biesheuvel 	return 0;
62328b066daSArd Biesheuvel }
62428b066daSArd Biesheuvel early_param("rodata", parse_rodata);
62528b066daSArd Biesheuvel 
62651a0048bSWill Deacon #ifdef CONFIG_UNMAP_KERNEL_AT_EL0
62751a0048bSWill Deacon static int __init map_entry_trampoline(void)
62851a0048bSWill Deacon {
62951a0048bSWill Deacon 	pgprot_t prot = rodata_enabled ? PAGE_KERNEL_ROX : PAGE_KERNEL_EXEC;
63051a0048bSWill Deacon 	phys_addr_t pa_start = __pa_symbol(__entry_tramp_text_start);
63151a0048bSWill Deacon 
63251a0048bSWill Deacon 	/* The trampoline is always mapped and can therefore be global */
63351a0048bSWill Deacon 	pgprot_val(prot) &= ~PTE_NG;
63451a0048bSWill Deacon 
63551a0048bSWill Deacon 	/* Map only the text into the trampoline page table */
63651a0048bSWill Deacon 	memset(tramp_pg_dir, 0, PGD_SIZE);
63751a0048bSWill Deacon 	__create_pgd_mapping(tramp_pg_dir, pa_start, TRAMP_VALIAS, PAGE_SIZE,
638475ba3fcSWill Deacon 			     prot, __pgd_pgtable_alloc, 0);
63951a0048bSWill Deacon 
6406c27c408SWill Deacon 	/* Map both the text and data into the kernel page table */
64151a0048bSWill Deacon 	__set_fixmap(FIX_ENTRY_TRAMP_TEXT, pa_start, prot);
6426c27c408SWill Deacon 	if (IS_ENABLED(CONFIG_RANDOMIZE_BASE)) {
6436c27c408SWill Deacon 		extern char __entry_tramp_data_start[];
6446c27c408SWill Deacon 
6456c27c408SWill Deacon 		__set_fixmap(FIX_ENTRY_TRAMP_DATA,
6466c27c408SWill Deacon 			     __pa_symbol(__entry_tramp_data_start),
6476c27c408SWill Deacon 			     PAGE_KERNEL_RO);
6486c27c408SWill Deacon 	}
6496c27c408SWill Deacon 
65051a0048bSWill Deacon 	return 0;
65151a0048bSWill Deacon }
65251a0048bSWill Deacon core_initcall(map_entry_trampoline);
65351a0048bSWill Deacon #endif
65451a0048bSWill Deacon 
655068a17a5SMark Rutland /*
656c8027285SMark Brown  * Open coded check for BTI, only for use to determine configuration
657c8027285SMark Brown  * for early mappings for before the cpufeature code has run.
658c8027285SMark Brown  */
659c8027285SMark Brown static bool arm64_early_this_cpu_has_bti(void)
660c8027285SMark Brown {
661c8027285SMark Brown 	u64 pfr1;
662c8027285SMark Brown 
663c8027285SMark Brown 	if (!IS_ENABLED(CONFIG_ARM64_BTI_KERNEL))
664c8027285SMark Brown 		return false;
665c8027285SMark Brown 
66693ad55b7SMarc Zyngier 	pfr1 = __read_sysreg_by_encoding(SYS_ID_AA64PFR1_EL1);
667c8027285SMark Brown 	return cpuid_feature_extract_unsigned_field(pfr1,
668c8027285SMark Brown 						    ID_AA64PFR1_BT_SHIFT);
669c8027285SMark Brown }
670c8027285SMark Brown 
671c8027285SMark Brown /*
672068a17a5SMark Rutland  * Create fine-grained mappings for the kernel.
673068a17a5SMark Rutland  */
67420a004e7SWill Deacon static void __init map_kernel(pgd_t *pgdp)
675068a17a5SMark Rutland {
6762ebe088bSArd Biesheuvel 	static struct vm_struct vmlinux_text, vmlinux_rodata, vmlinux_inittext,
6772ebe088bSArd Biesheuvel 				vmlinux_initdata, vmlinux_data;
678068a17a5SMark Rutland 
67928b066daSArd Biesheuvel 	/*
68028b066daSArd Biesheuvel 	 * External debuggers may need to write directly to the text
68128b066daSArd Biesheuvel 	 * mapping to install SW breakpoints. Allow this (only) when
68228b066daSArd Biesheuvel 	 * explicitly requested with rodata=off.
68328b066daSArd Biesheuvel 	 */
68428b066daSArd Biesheuvel 	pgprot_t text_prot = rodata_enabled ? PAGE_KERNEL_ROX : PAGE_KERNEL_EXEC;
68528b066daSArd Biesheuvel 
686d27cfa1fSArd Biesheuvel 	/*
687c8027285SMark Brown 	 * If we have a CPU that supports BTI and a kernel built for
688c8027285SMark Brown 	 * BTI then mark the kernel executable text as guarded pages
689c8027285SMark Brown 	 * now so we don't have to rewrite the page tables later.
690c8027285SMark Brown 	 */
691c8027285SMark Brown 	if (arm64_early_this_cpu_has_bti())
692c8027285SMark Brown 		text_prot = __pgprot_modify(text_prot, PTE_GP, PTE_GP);
693c8027285SMark Brown 
694c8027285SMark Brown 	/*
695d27cfa1fSArd Biesheuvel 	 * Only rodata will be remapped with different permissions later on,
696d27cfa1fSArd Biesheuvel 	 * all other segments are allowed to use contiguous mappings.
697d27cfa1fSArd Biesheuvel 	 */
698e2a073ddSArd Biesheuvel 	map_kernel_segment(pgdp, _stext, _etext, text_prot, &vmlinux_text, 0,
69992bbd16eSWill Deacon 			   VM_NO_GUARD);
70020a004e7SWill Deacon 	map_kernel_segment(pgdp, __start_rodata, __inittext_begin, PAGE_KERNEL,
70192bbd16eSWill Deacon 			   &vmlinux_rodata, NO_CONT_MAPPINGS, VM_NO_GUARD);
70220a004e7SWill Deacon 	map_kernel_segment(pgdp, __inittext_begin, __inittext_end, text_prot,
70392bbd16eSWill Deacon 			   &vmlinux_inittext, 0, VM_NO_GUARD);
70420a004e7SWill Deacon 	map_kernel_segment(pgdp, __initdata_begin, __initdata_end, PAGE_KERNEL,
70592bbd16eSWill Deacon 			   &vmlinux_initdata, 0, VM_NO_GUARD);
70620a004e7SWill Deacon 	map_kernel_segment(pgdp, _data, _end, PAGE_KERNEL, &vmlinux_data, 0, 0);
707068a17a5SMark Rutland 
708974b9b2cSMike Rapoport 	if (!READ_ONCE(pgd_val(*pgd_offset_pgd(pgdp, FIXADDR_START)))) {
709068a17a5SMark Rutland 		/*
710f9040773SArd Biesheuvel 		 * The fixmap falls in a separate pgd to the kernel, and doesn't
711f9040773SArd Biesheuvel 		 * live in the carveout for the swapper_pg_dir. We can simply
712f9040773SArd Biesheuvel 		 * re-use the existing dir for the fixmap.
713068a17a5SMark Rutland 		 */
714974b9b2cSMike Rapoport 		set_pgd(pgd_offset_pgd(pgdp, FIXADDR_START),
71520a004e7SWill Deacon 			READ_ONCE(*pgd_offset_k(FIXADDR_START)));
716f9040773SArd Biesheuvel 	} else if (CONFIG_PGTABLE_LEVELS > 3) {
717b333b0baSMark Rutland 		pgd_t *bm_pgdp;
718e9f63768SMike Rapoport 		p4d_t *bm_p4dp;
719b333b0baSMark Rutland 		pud_t *bm_pudp;
720f9040773SArd Biesheuvel 		/*
721f9040773SArd Biesheuvel 		 * The fixmap shares its top level pgd entry with the kernel
722f9040773SArd Biesheuvel 		 * mapping. This can really only occur when we are running
723f9040773SArd Biesheuvel 		 * with 16k/4 levels, so we can simply reuse the pud level
724f9040773SArd Biesheuvel 		 * entry instead.
725f9040773SArd Biesheuvel 		 */
726f9040773SArd Biesheuvel 		BUG_ON(!IS_ENABLED(CONFIG_ARM64_16K_PAGES));
727974b9b2cSMike Rapoport 		bm_pgdp = pgd_offset_pgd(pgdp, FIXADDR_START);
728e9f63768SMike Rapoport 		bm_p4dp = p4d_offset(bm_pgdp, FIXADDR_START);
729e9f63768SMike Rapoport 		bm_pudp = pud_set_fixmap_offset(bm_p4dp, FIXADDR_START);
730b333b0baSMark Rutland 		pud_populate(&init_mm, bm_pudp, lm_alias(bm_pmd));
731f9040773SArd Biesheuvel 		pud_clear_fixmap();
732f9040773SArd Biesheuvel 	} else {
733f9040773SArd Biesheuvel 		BUG();
734f9040773SArd Biesheuvel 	}
735068a17a5SMark Rutland 
73620a004e7SWill Deacon 	kasan_copy_shadow(pgdp);
737068a17a5SMark Rutland }
738068a17a5SMark Rutland 
739c1cc1552SCatalin Marinas void __init paging_init(void)
740c1cc1552SCatalin Marinas {
7412330b7caSJun Yao 	pgd_t *pgdp = pgd_set_fixmap(__pa_symbol(swapper_pg_dir));
742068a17a5SMark Rutland 
74320a004e7SWill Deacon 	map_kernel(pgdp);
74420a004e7SWill Deacon 	map_mem(pgdp);
745068a17a5SMark Rutland 
746068a17a5SMark Rutland 	pgd_clear_fixmap();
747068a17a5SMark Rutland 
748068a17a5SMark Rutland 	cpu_replace_ttbr1(lm_alias(swapper_pg_dir));
7492b5548b6SJun Yao 	init_mm.pgd = swapper_pg_dir;
750068a17a5SMark Rutland 
7513ecc6834SMike Rapoport 	memblock_phys_free(__pa_symbol(init_pg_dir),
7522b5548b6SJun Yao 			   __pa_symbol(init_pg_end) - __pa_symbol(init_pg_dir));
75324cc61d8SArd Biesheuvel 
75424cc61d8SArd Biesheuvel 	memblock_allow_resize();
755c1cc1552SCatalin Marinas }
756c1cc1552SCatalin Marinas 
757c1cc1552SCatalin Marinas /*
758c1cc1552SCatalin Marinas  * Check whether a kernel address is valid (derived from arch/x86/).
759c1cc1552SCatalin Marinas  */
760c1cc1552SCatalin Marinas int kern_addr_valid(unsigned long addr)
761c1cc1552SCatalin Marinas {
76220a004e7SWill Deacon 	pgd_t *pgdp;
763e9f63768SMike Rapoport 	p4d_t *p4dp;
76420a004e7SWill Deacon 	pud_t *pudp, pud;
76520a004e7SWill Deacon 	pmd_t *pmdp, pmd;
76620a004e7SWill Deacon 	pte_t *ptep, pte;
767c1cc1552SCatalin Marinas 
7688dd4daa0SShyam Thombre 	addr = arch_kasan_reset_tag(addr);
769c1cc1552SCatalin Marinas 	if ((((long)addr) >> VA_BITS) != -1UL)
770c1cc1552SCatalin Marinas 		return 0;
771c1cc1552SCatalin Marinas 
77220a004e7SWill Deacon 	pgdp = pgd_offset_k(addr);
77320a004e7SWill Deacon 	if (pgd_none(READ_ONCE(*pgdp)))
774c1cc1552SCatalin Marinas 		return 0;
775c1cc1552SCatalin Marinas 
776e9f63768SMike Rapoport 	p4dp = p4d_offset(pgdp, addr);
777e9f63768SMike Rapoport 	if (p4d_none(READ_ONCE(*p4dp)))
778e9f63768SMike Rapoport 		return 0;
779e9f63768SMike Rapoport 
780e9f63768SMike Rapoport 	pudp = pud_offset(p4dp, addr);
78120a004e7SWill Deacon 	pud = READ_ONCE(*pudp);
78220a004e7SWill Deacon 	if (pud_none(pud))
783c1cc1552SCatalin Marinas 		return 0;
784c1cc1552SCatalin Marinas 
78520a004e7SWill Deacon 	if (pud_sect(pud))
78620a004e7SWill Deacon 		return pfn_valid(pud_pfn(pud));
787206a2a73SSteve Capper 
78820a004e7SWill Deacon 	pmdp = pmd_offset(pudp, addr);
78920a004e7SWill Deacon 	pmd = READ_ONCE(*pmdp);
79020a004e7SWill Deacon 	if (pmd_none(pmd))
791c1cc1552SCatalin Marinas 		return 0;
792c1cc1552SCatalin Marinas 
79320a004e7SWill Deacon 	if (pmd_sect(pmd))
79420a004e7SWill Deacon 		return pfn_valid(pmd_pfn(pmd));
795da6e4cb6SDave Anderson 
79620a004e7SWill Deacon 	ptep = pte_offset_kernel(pmdp, addr);
79720a004e7SWill Deacon 	pte = READ_ONCE(*ptep);
79820a004e7SWill Deacon 	if (pte_none(pte))
799c1cc1552SCatalin Marinas 		return 0;
800c1cc1552SCatalin Marinas 
80120a004e7SWill Deacon 	return pfn_valid(pte_pfn(pte));
802c1cc1552SCatalin Marinas }
803bbd6ec60SAnshuman Khandual 
804bbd6ec60SAnshuman Khandual #ifdef CONFIG_MEMORY_HOTPLUG
805eee07935SAnshuman Khandual static void free_hotplug_page_range(struct page *page, size_t size,
806eee07935SAnshuman Khandual 				    struct vmem_altmap *altmap)
807bbd6ec60SAnshuman Khandual {
808eee07935SAnshuman Khandual 	if (altmap) {
809eee07935SAnshuman Khandual 		vmem_altmap_free(altmap, size >> PAGE_SHIFT);
810eee07935SAnshuman Khandual 	} else {
811bbd6ec60SAnshuman Khandual 		WARN_ON(PageReserved(page));
812bbd6ec60SAnshuman Khandual 		free_pages((unsigned long)page_address(page), get_order(size));
813bbd6ec60SAnshuman Khandual 	}
814eee07935SAnshuman Khandual }
815bbd6ec60SAnshuman Khandual 
816bbd6ec60SAnshuman Khandual static void free_hotplug_pgtable_page(struct page *page)
817bbd6ec60SAnshuman Khandual {
818eee07935SAnshuman Khandual 	free_hotplug_page_range(page, PAGE_SIZE, NULL);
819bbd6ec60SAnshuman Khandual }
820bbd6ec60SAnshuman Khandual 
821bbd6ec60SAnshuman Khandual static bool pgtable_range_aligned(unsigned long start, unsigned long end,
822bbd6ec60SAnshuman Khandual 				  unsigned long floor, unsigned long ceiling,
823bbd6ec60SAnshuman Khandual 				  unsigned long mask)
824bbd6ec60SAnshuman Khandual {
825bbd6ec60SAnshuman Khandual 	start &= mask;
826bbd6ec60SAnshuman Khandual 	if (start < floor)
827bbd6ec60SAnshuman Khandual 		return false;
828bbd6ec60SAnshuman Khandual 
829bbd6ec60SAnshuman Khandual 	if (ceiling) {
830bbd6ec60SAnshuman Khandual 		ceiling &= mask;
831bbd6ec60SAnshuman Khandual 		if (!ceiling)
832bbd6ec60SAnshuman Khandual 			return false;
833bbd6ec60SAnshuman Khandual 	}
834bbd6ec60SAnshuman Khandual 
835bbd6ec60SAnshuman Khandual 	if (end - 1 > ceiling - 1)
836bbd6ec60SAnshuman Khandual 		return false;
837bbd6ec60SAnshuman Khandual 	return true;
838bbd6ec60SAnshuman Khandual }
839bbd6ec60SAnshuman Khandual 
840bbd6ec60SAnshuman Khandual static void unmap_hotplug_pte_range(pmd_t *pmdp, unsigned long addr,
841eee07935SAnshuman Khandual 				    unsigned long end, bool free_mapped,
842eee07935SAnshuman Khandual 				    struct vmem_altmap *altmap)
843bbd6ec60SAnshuman Khandual {
844bbd6ec60SAnshuman Khandual 	pte_t *ptep, pte;
845bbd6ec60SAnshuman Khandual 
846bbd6ec60SAnshuman Khandual 	do {
847bbd6ec60SAnshuman Khandual 		ptep = pte_offset_kernel(pmdp, addr);
848bbd6ec60SAnshuman Khandual 		pte = READ_ONCE(*ptep);
849bbd6ec60SAnshuman Khandual 		if (pte_none(pte))
850bbd6ec60SAnshuman Khandual 			continue;
851bbd6ec60SAnshuman Khandual 
852bbd6ec60SAnshuman Khandual 		WARN_ON(!pte_present(pte));
853bbd6ec60SAnshuman Khandual 		pte_clear(&init_mm, addr, ptep);
854bbd6ec60SAnshuman Khandual 		flush_tlb_kernel_range(addr, addr + PAGE_SIZE);
855bbd6ec60SAnshuman Khandual 		if (free_mapped)
856eee07935SAnshuman Khandual 			free_hotplug_page_range(pte_page(pte),
857eee07935SAnshuman Khandual 						PAGE_SIZE, altmap);
858bbd6ec60SAnshuman Khandual 	} while (addr += PAGE_SIZE, addr < end);
859bbd6ec60SAnshuman Khandual }
860bbd6ec60SAnshuman Khandual 
861bbd6ec60SAnshuman Khandual static void unmap_hotplug_pmd_range(pud_t *pudp, unsigned long addr,
862eee07935SAnshuman Khandual 				    unsigned long end, bool free_mapped,
863eee07935SAnshuman Khandual 				    struct vmem_altmap *altmap)
864bbd6ec60SAnshuman Khandual {
865bbd6ec60SAnshuman Khandual 	unsigned long next;
866bbd6ec60SAnshuman Khandual 	pmd_t *pmdp, pmd;
867bbd6ec60SAnshuman Khandual 
868bbd6ec60SAnshuman Khandual 	do {
869bbd6ec60SAnshuman Khandual 		next = pmd_addr_end(addr, end);
870bbd6ec60SAnshuman Khandual 		pmdp = pmd_offset(pudp, addr);
871bbd6ec60SAnshuman Khandual 		pmd = READ_ONCE(*pmdp);
872bbd6ec60SAnshuman Khandual 		if (pmd_none(pmd))
873bbd6ec60SAnshuman Khandual 			continue;
874bbd6ec60SAnshuman Khandual 
875bbd6ec60SAnshuman Khandual 		WARN_ON(!pmd_present(pmd));
876bbd6ec60SAnshuman Khandual 		if (pmd_sect(pmd)) {
877bbd6ec60SAnshuman Khandual 			pmd_clear(pmdp);
878bbd6ec60SAnshuman Khandual 
879bbd6ec60SAnshuman Khandual 			/*
880bbd6ec60SAnshuman Khandual 			 * One TLBI should be sufficient here as the PMD_SIZE
881bbd6ec60SAnshuman Khandual 			 * range is mapped with a single block entry.
882bbd6ec60SAnshuman Khandual 			 */
883bbd6ec60SAnshuman Khandual 			flush_tlb_kernel_range(addr, addr + PAGE_SIZE);
884bbd6ec60SAnshuman Khandual 			if (free_mapped)
885bbd6ec60SAnshuman Khandual 				free_hotplug_page_range(pmd_page(pmd),
886eee07935SAnshuman Khandual 							PMD_SIZE, altmap);
887bbd6ec60SAnshuman Khandual 			continue;
888bbd6ec60SAnshuman Khandual 		}
889bbd6ec60SAnshuman Khandual 		WARN_ON(!pmd_table(pmd));
890eee07935SAnshuman Khandual 		unmap_hotplug_pte_range(pmdp, addr, next, free_mapped, altmap);
891bbd6ec60SAnshuman Khandual 	} while (addr = next, addr < end);
892bbd6ec60SAnshuman Khandual }
893bbd6ec60SAnshuman Khandual 
894bbd6ec60SAnshuman Khandual static void unmap_hotplug_pud_range(p4d_t *p4dp, unsigned long addr,
895eee07935SAnshuman Khandual 				    unsigned long end, bool free_mapped,
896eee07935SAnshuman Khandual 				    struct vmem_altmap *altmap)
897bbd6ec60SAnshuman Khandual {
898bbd6ec60SAnshuman Khandual 	unsigned long next;
899bbd6ec60SAnshuman Khandual 	pud_t *pudp, pud;
900bbd6ec60SAnshuman Khandual 
901bbd6ec60SAnshuman Khandual 	do {
902bbd6ec60SAnshuman Khandual 		next = pud_addr_end(addr, end);
903bbd6ec60SAnshuman Khandual 		pudp = pud_offset(p4dp, addr);
904bbd6ec60SAnshuman Khandual 		pud = READ_ONCE(*pudp);
905bbd6ec60SAnshuman Khandual 		if (pud_none(pud))
906bbd6ec60SAnshuman Khandual 			continue;
907bbd6ec60SAnshuman Khandual 
908bbd6ec60SAnshuman Khandual 		WARN_ON(!pud_present(pud));
909bbd6ec60SAnshuman Khandual 		if (pud_sect(pud)) {
910bbd6ec60SAnshuman Khandual 			pud_clear(pudp);
911bbd6ec60SAnshuman Khandual 
912bbd6ec60SAnshuman Khandual 			/*
913bbd6ec60SAnshuman Khandual 			 * One TLBI should be sufficient here as the PUD_SIZE
914bbd6ec60SAnshuman Khandual 			 * range is mapped with a single block entry.
915bbd6ec60SAnshuman Khandual 			 */
916bbd6ec60SAnshuman Khandual 			flush_tlb_kernel_range(addr, addr + PAGE_SIZE);
917bbd6ec60SAnshuman Khandual 			if (free_mapped)
918bbd6ec60SAnshuman Khandual 				free_hotplug_page_range(pud_page(pud),
919eee07935SAnshuman Khandual 							PUD_SIZE, altmap);
920bbd6ec60SAnshuman Khandual 			continue;
921bbd6ec60SAnshuman Khandual 		}
922bbd6ec60SAnshuman Khandual 		WARN_ON(!pud_table(pud));
923eee07935SAnshuman Khandual 		unmap_hotplug_pmd_range(pudp, addr, next, free_mapped, altmap);
924bbd6ec60SAnshuman Khandual 	} while (addr = next, addr < end);
925bbd6ec60SAnshuman Khandual }
926bbd6ec60SAnshuman Khandual 
927bbd6ec60SAnshuman Khandual static void unmap_hotplug_p4d_range(pgd_t *pgdp, unsigned long addr,
928eee07935SAnshuman Khandual 				    unsigned long end, bool free_mapped,
929eee07935SAnshuman Khandual 				    struct vmem_altmap *altmap)
930bbd6ec60SAnshuman Khandual {
931bbd6ec60SAnshuman Khandual 	unsigned long next;
932bbd6ec60SAnshuman Khandual 	p4d_t *p4dp, p4d;
933bbd6ec60SAnshuman Khandual 
934bbd6ec60SAnshuman Khandual 	do {
935bbd6ec60SAnshuman Khandual 		next = p4d_addr_end(addr, end);
936bbd6ec60SAnshuman Khandual 		p4dp = p4d_offset(pgdp, addr);
937bbd6ec60SAnshuman Khandual 		p4d = READ_ONCE(*p4dp);
938bbd6ec60SAnshuman Khandual 		if (p4d_none(p4d))
939bbd6ec60SAnshuman Khandual 			continue;
940bbd6ec60SAnshuman Khandual 
941bbd6ec60SAnshuman Khandual 		WARN_ON(!p4d_present(p4d));
942eee07935SAnshuman Khandual 		unmap_hotplug_pud_range(p4dp, addr, next, free_mapped, altmap);
943bbd6ec60SAnshuman Khandual 	} while (addr = next, addr < end);
944bbd6ec60SAnshuman Khandual }
945bbd6ec60SAnshuman Khandual 
946bbd6ec60SAnshuman Khandual static void unmap_hotplug_range(unsigned long addr, unsigned long end,
947eee07935SAnshuman Khandual 				bool free_mapped, struct vmem_altmap *altmap)
948bbd6ec60SAnshuman Khandual {
949bbd6ec60SAnshuman Khandual 	unsigned long next;
950bbd6ec60SAnshuman Khandual 	pgd_t *pgdp, pgd;
951bbd6ec60SAnshuman Khandual 
952eee07935SAnshuman Khandual 	/*
953eee07935SAnshuman Khandual 	 * altmap can only be used as vmemmap mapping backing memory.
954eee07935SAnshuman Khandual 	 * In case the backing memory itself is not being freed, then
955eee07935SAnshuman Khandual 	 * altmap is irrelevant. Warn about this inconsistency when
956eee07935SAnshuman Khandual 	 * encountered.
957eee07935SAnshuman Khandual 	 */
958eee07935SAnshuman Khandual 	WARN_ON(!free_mapped && altmap);
959eee07935SAnshuman Khandual 
960bbd6ec60SAnshuman Khandual 	do {
961bbd6ec60SAnshuman Khandual 		next = pgd_addr_end(addr, end);
962bbd6ec60SAnshuman Khandual 		pgdp = pgd_offset_k(addr);
963bbd6ec60SAnshuman Khandual 		pgd = READ_ONCE(*pgdp);
964bbd6ec60SAnshuman Khandual 		if (pgd_none(pgd))
965bbd6ec60SAnshuman Khandual 			continue;
966bbd6ec60SAnshuman Khandual 
967bbd6ec60SAnshuman Khandual 		WARN_ON(!pgd_present(pgd));
968eee07935SAnshuman Khandual 		unmap_hotplug_p4d_range(pgdp, addr, next, free_mapped, altmap);
969bbd6ec60SAnshuman Khandual 	} while (addr = next, addr < end);
970bbd6ec60SAnshuman Khandual }
971bbd6ec60SAnshuman Khandual 
972bbd6ec60SAnshuman Khandual static void free_empty_pte_table(pmd_t *pmdp, unsigned long addr,
973bbd6ec60SAnshuman Khandual 				 unsigned long end, unsigned long floor,
974bbd6ec60SAnshuman Khandual 				 unsigned long ceiling)
975bbd6ec60SAnshuman Khandual {
976bbd6ec60SAnshuman Khandual 	pte_t *ptep, pte;
977bbd6ec60SAnshuman Khandual 	unsigned long i, start = addr;
978bbd6ec60SAnshuman Khandual 
979bbd6ec60SAnshuman Khandual 	do {
980bbd6ec60SAnshuman Khandual 		ptep = pte_offset_kernel(pmdp, addr);
981bbd6ec60SAnshuman Khandual 		pte = READ_ONCE(*ptep);
982bbd6ec60SAnshuman Khandual 
983bbd6ec60SAnshuman Khandual 		/*
984bbd6ec60SAnshuman Khandual 		 * This is just a sanity check here which verifies that
985bbd6ec60SAnshuman Khandual 		 * pte clearing has been done by earlier unmap loops.
986bbd6ec60SAnshuman Khandual 		 */
987bbd6ec60SAnshuman Khandual 		WARN_ON(!pte_none(pte));
988bbd6ec60SAnshuman Khandual 	} while (addr += PAGE_SIZE, addr < end);
989bbd6ec60SAnshuman Khandual 
990bbd6ec60SAnshuman Khandual 	if (!pgtable_range_aligned(start, end, floor, ceiling, PMD_MASK))
991bbd6ec60SAnshuman Khandual 		return;
992bbd6ec60SAnshuman Khandual 
993bbd6ec60SAnshuman Khandual 	/*
994bbd6ec60SAnshuman Khandual 	 * Check whether we can free the pte page if the rest of the
995bbd6ec60SAnshuman Khandual 	 * entries are empty. Overlap with other regions have been
996bbd6ec60SAnshuman Khandual 	 * handled by the floor/ceiling check.
997bbd6ec60SAnshuman Khandual 	 */
998bbd6ec60SAnshuman Khandual 	ptep = pte_offset_kernel(pmdp, 0UL);
999bbd6ec60SAnshuman Khandual 	for (i = 0; i < PTRS_PER_PTE; i++) {
1000bbd6ec60SAnshuman Khandual 		if (!pte_none(READ_ONCE(ptep[i])))
1001bbd6ec60SAnshuman Khandual 			return;
1002bbd6ec60SAnshuman Khandual 	}
1003bbd6ec60SAnshuman Khandual 
1004bbd6ec60SAnshuman Khandual 	pmd_clear(pmdp);
1005bbd6ec60SAnshuman Khandual 	__flush_tlb_kernel_pgtable(start);
1006bbd6ec60SAnshuman Khandual 	free_hotplug_pgtable_page(virt_to_page(ptep));
1007bbd6ec60SAnshuman Khandual }
1008bbd6ec60SAnshuman Khandual 
1009bbd6ec60SAnshuman Khandual static void free_empty_pmd_table(pud_t *pudp, unsigned long addr,
1010bbd6ec60SAnshuman Khandual 				 unsigned long end, unsigned long floor,
1011bbd6ec60SAnshuman Khandual 				 unsigned long ceiling)
1012bbd6ec60SAnshuman Khandual {
1013bbd6ec60SAnshuman Khandual 	pmd_t *pmdp, pmd;
1014bbd6ec60SAnshuman Khandual 	unsigned long i, next, start = addr;
1015bbd6ec60SAnshuman Khandual 
1016bbd6ec60SAnshuman Khandual 	do {
1017bbd6ec60SAnshuman Khandual 		next = pmd_addr_end(addr, end);
1018bbd6ec60SAnshuman Khandual 		pmdp = pmd_offset(pudp, addr);
1019bbd6ec60SAnshuman Khandual 		pmd = READ_ONCE(*pmdp);
1020bbd6ec60SAnshuman Khandual 		if (pmd_none(pmd))
1021bbd6ec60SAnshuman Khandual 			continue;
1022bbd6ec60SAnshuman Khandual 
1023bbd6ec60SAnshuman Khandual 		WARN_ON(!pmd_present(pmd) || !pmd_table(pmd) || pmd_sect(pmd));
1024bbd6ec60SAnshuman Khandual 		free_empty_pte_table(pmdp, addr, next, floor, ceiling);
1025bbd6ec60SAnshuman Khandual 	} while (addr = next, addr < end);
1026bbd6ec60SAnshuman Khandual 
1027bbd6ec60SAnshuman Khandual 	if (CONFIG_PGTABLE_LEVELS <= 2)
1028bbd6ec60SAnshuman Khandual 		return;
1029bbd6ec60SAnshuman Khandual 
1030bbd6ec60SAnshuman Khandual 	if (!pgtable_range_aligned(start, end, floor, ceiling, PUD_MASK))
1031bbd6ec60SAnshuman Khandual 		return;
1032bbd6ec60SAnshuman Khandual 
1033bbd6ec60SAnshuman Khandual 	/*
1034bbd6ec60SAnshuman Khandual 	 * Check whether we can free the pmd page if the rest of the
1035bbd6ec60SAnshuman Khandual 	 * entries are empty. Overlap with other regions have been
1036bbd6ec60SAnshuman Khandual 	 * handled by the floor/ceiling check.
1037bbd6ec60SAnshuman Khandual 	 */
1038bbd6ec60SAnshuman Khandual 	pmdp = pmd_offset(pudp, 0UL);
1039bbd6ec60SAnshuman Khandual 	for (i = 0; i < PTRS_PER_PMD; i++) {
1040bbd6ec60SAnshuman Khandual 		if (!pmd_none(READ_ONCE(pmdp[i])))
1041bbd6ec60SAnshuman Khandual 			return;
1042bbd6ec60SAnshuman Khandual 	}
1043bbd6ec60SAnshuman Khandual 
1044bbd6ec60SAnshuman Khandual 	pud_clear(pudp);
1045bbd6ec60SAnshuman Khandual 	__flush_tlb_kernel_pgtable(start);
1046bbd6ec60SAnshuman Khandual 	free_hotplug_pgtable_page(virt_to_page(pmdp));
1047bbd6ec60SAnshuman Khandual }
1048bbd6ec60SAnshuman Khandual 
1049bbd6ec60SAnshuman Khandual static void free_empty_pud_table(p4d_t *p4dp, unsigned long addr,
1050bbd6ec60SAnshuman Khandual 				 unsigned long end, unsigned long floor,
1051bbd6ec60SAnshuman Khandual 				 unsigned long ceiling)
1052bbd6ec60SAnshuman Khandual {
1053bbd6ec60SAnshuman Khandual 	pud_t *pudp, pud;
1054bbd6ec60SAnshuman Khandual 	unsigned long i, next, start = addr;
1055bbd6ec60SAnshuman Khandual 
1056bbd6ec60SAnshuman Khandual 	do {
1057bbd6ec60SAnshuman Khandual 		next = pud_addr_end(addr, end);
1058bbd6ec60SAnshuman Khandual 		pudp = pud_offset(p4dp, addr);
1059bbd6ec60SAnshuman Khandual 		pud = READ_ONCE(*pudp);
1060bbd6ec60SAnshuman Khandual 		if (pud_none(pud))
1061bbd6ec60SAnshuman Khandual 			continue;
1062bbd6ec60SAnshuman Khandual 
1063bbd6ec60SAnshuman Khandual 		WARN_ON(!pud_present(pud) || !pud_table(pud) || pud_sect(pud));
1064bbd6ec60SAnshuman Khandual 		free_empty_pmd_table(pudp, addr, next, floor, ceiling);
1065bbd6ec60SAnshuman Khandual 	} while (addr = next, addr < end);
1066bbd6ec60SAnshuman Khandual 
1067bbd6ec60SAnshuman Khandual 	if (CONFIG_PGTABLE_LEVELS <= 3)
1068bbd6ec60SAnshuman Khandual 		return;
1069bbd6ec60SAnshuman Khandual 
1070bbd6ec60SAnshuman Khandual 	if (!pgtable_range_aligned(start, end, floor, ceiling, PGDIR_MASK))
1071bbd6ec60SAnshuman Khandual 		return;
1072bbd6ec60SAnshuman Khandual 
1073bbd6ec60SAnshuman Khandual 	/*
1074bbd6ec60SAnshuman Khandual 	 * Check whether we can free the pud page if the rest of the
1075bbd6ec60SAnshuman Khandual 	 * entries are empty. Overlap with other regions have been
1076bbd6ec60SAnshuman Khandual 	 * handled by the floor/ceiling check.
1077bbd6ec60SAnshuman Khandual 	 */
1078bbd6ec60SAnshuman Khandual 	pudp = pud_offset(p4dp, 0UL);
1079bbd6ec60SAnshuman Khandual 	for (i = 0; i < PTRS_PER_PUD; i++) {
1080bbd6ec60SAnshuman Khandual 		if (!pud_none(READ_ONCE(pudp[i])))
1081bbd6ec60SAnshuman Khandual 			return;
1082bbd6ec60SAnshuman Khandual 	}
1083bbd6ec60SAnshuman Khandual 
1084bbd6ec60SAnshuman Khandual 	p4d_clear(p4dp);
1085bbd6ec60SAnshuman Khandual 	__flush_tlb_kernel_pgtable(start);
1086bbd6ec60SAnshuman Khandual 	free_hotplug_pgtable_page(virt_to_page(pudp));
1087bbd6ec60SAnshuman Khandual }
1088bbd6ec60SAnshuman Khandual 
1089bbd6ec60SAnshuman Khandual static void free_empty_p4d_table(pgd_t *pgdp, unsigned long addr,
1090bbd6ec60SAnshuman Khandual 				 unsigned long end, unsigned long floor,
1091bbd6ec60SAnshuman Khandual 				 unsigned long ceiling)
1092bbd6ec60SAnshuman Khandual {
1093bbd6ec60SAnshuman Khandual 	unsigned long next;
1094bbd6ec60SAnshuman Khandual 	p4d_t *p4dp, p4d;
1095bbd6ec60SAnshuman Khandual 
1096bbd6ec60SAnshuman Khandual 	do {
1097bbd6ec60SAnshuman Khandual 		next = p4d_addr_end(addr, end);
1098bbd6ec60SAnshuman Khandual 		p4dp = p4d_offset(pgdp, addr);
1099bbd6ec60SAnshuman Khandual 		p4d = READ_ONCE(*p4dp);
1100bbd6ec60SAnshuman Khandual 		if (p4d_none(p4d))
1101bbd6ec60SAnshuman Khandual 			continue;
1102bbd6ec60SAnshuman Khandual 
1103bbd6ec60SAnshuman Khandual 		WARN_ON(!p4d_present(p4d));
1104bbd6ec60SAnshuman Khandual 		free_empty_pud_table(p4dp, addr, next, floor, ceiling);
1105bbd6ec60SAnshuman Khandual 	} while (addr = next, addr < end);
1106bbd6ec60SAnshuman Khandual }
1107bbd6ec60SAnshuman Khandual 
1108bbd6ec60SAnshuman Khandual static void free_empty_tables(unsigned long addr, unsigned long end,
1109bbd6ec60SAnshuman Khandual 			      unsigned long floor, unsigned long ceiling)
1110bbd6ec60SAnshuman Khandual {
1111bbd6ec60SAnshuman Khandual 	unsigned long next;
1112bbd6ec60SAnshuman Khandual 	pgd_t *pgdp, pgd;
1113bbd6ec60SAnshuman Khandual 
1114bbd6ec60SAnshuman Khandual 	do {
1115bbd6ec60SAnshuman Khandual 		next = pgd_addr_end(addr, end);
1116bbd6ec60SAnshuman Khandual 		pgdp = pgd_offset_k(addr);
1117bbd6ec60SAnshuman Khandual 		pgd = READ_ONCE(*pgdp);
1118bbd6ec60SAnshuman Khandual 		if (pgd_none(pgd))
1119bbd6ec60SAnshuman Khandual 			continue;
1120bbd6ec60SAnshuman Khandual 
1121bbd6ec60SAnshuman Khandual 		WARN_ON(!pgd_present(pgd));
1122bbd6ec60SAnshuman Khandual 		free_empty_p4d_table(pgdp, addr, next, floor, ceiling);
1123bbd6ec60SAnshuman Khandual 	} while (addr = next, addr < end);
1124bbd6ec60SAnshuman Khandual }
1125bbd6ec60SAnshuman Khandual #endif
1126bbd6ec60SAnshuman Khandual 
11272062d44dSAnshuman Khandual #if !ARM64_KERNEL_USES_PMD_MAPS
11287b73d978SChristoph Hellwig int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node,
11297b73d978SChristoph Hellwig 		struct vmem_altmap *altmap)
1130c1cc1552SCatalin Marinas {
1131edb739eeSAnshuman Khandual 	WARN_ON((start < VMEMMAP_START) || (end > VMEMMAP_END));
1132eee07935SAnshuman Khandual 	return vmemmap_populate_basepages(start, end, node, altmap);
1133c1cc1552SCatalin Marinas }
11342062d44dSAnshuman Khandual #else	/* !ARM64_KERNEL_USES_PMD_MAPS */
11357b73d978SChristoph Hellwig int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node,
11367b73d978SChristoph Hellwig 		struct vmem_altmap *altmap)
1137c1cc1552SCatalin Marinas {
11380aad818bSJohannes Weiner 	unsigned long addr = start;
1139c1cc1552SCatalin Marinas 	unsigned long next;
114020a004e7SWill Deacon 	pgd_t *pgdp;
1141e9f63768SMike Rapoport 	p4d_t *p4dp;
114220a004e7SWill Deacon 	pud_t *pudp;
114320a004e7SWill Deacon 	pmd_t *pmdp;
1144c1cc1552SCatalin Marinas 
1145edb739eeSAnshuman Khandual 	WARN_ON((start < VMEMMAP_START) || (end > VMEMMAP_END));
1146c1cc1552SCatalin Marinas 	do {
1147c1cc1552SCatalin Marinas 		next = pmd_addr_end(addr, end);
1148c1cc1552SCatalin Marinas 
114920a004e7SWill Deacon 		pgdp = vmemmap_pgd_populate(addr, node);
115020a004e7SWill Deacon 		if (!pgdp)
1151c1cc1552SCatalin Marinas 			return -ENOMEM;
1152c1cc1552SCatalin Marinas 
1153e9f63768SMike Rapoport 		p4dp = vmemmap_p4d_populate(pgdp, addr, node);
1154e9f63768SMike Rapoport 		if (!p4dp)
1155e9f63768SMike Rapoport 			return -ENOMEM;
1156e9f63768SMike Rapoport 
1157e9f63768SMike Rapoport 		pudp = vmemmap_pud_populate(p4dp, addr, node);
115820a004e7SWill Deacon 		if (!pudp)
1159c1cc1552SCatalin Marinas 			return -ENOMEM;
1160c1cc1552SCatalin Marinas 
116120a004e7SWill Deacon 		pmdp = pmd_offset(pudp, addr);
116220a004e7SWill Deacon 		if (pmd_none(READ_ONCE(*pmdp))) {
1163c1cc1552SCatalin Marinas 			void *p = NULL;
1164c1cc1552SCatalin Marinas 
1165eee07935SAnshuman Khandual 			p = vmemmap_alloc_block_buf(PMD_SIZE, node, altmap);
11669f84f39fSSudarshan Rajagopalan 			if (!p) {
11679f84f39fSSudarshan Rajagopalan 				if (vmemmap_populate_basepages(addr, next, node, altmap))
1168c1cc1552SCatalin Marinas 					return -ENOMEM;
11699f84f39fSSudarshan Rajagopalan 				continue;
11709f84f39fSSudarshan Rajagopalan 			}
1171c1cc1552SCatalin Marinas 
117220a004e7SWill Deacon 			pmd_set_huge(pmdp, __pa(p), __pgprot(PROT_SECT_NORMAL));
1173c1cc1552SCatalin Marinas 		} else
117420a004e7SWill Deacon 			vmemmap_verify((pte_t *)pmdp, node, addr, next);
1175c1cc1552SCatalin Marinas 	} while (addr = next, addr != end);
1176c1cc1552SCatalin Marinas 
1177c1cc1552SCatalin Marinas 	return 0;
1178c1cc1552SCatalin Marinas }
11792062d44dSAnshuman Khandual #endif	/* !ARM64_KERNEL_USES_PMD_MAPS */
118040221c73SAnshuman Khandual 
118140221c73SAnshuman Khandual #ifdef CONFIG_MEMORY_HOTPLUG
118224b6d416SChristoph Hellwig void vmemmap_free(unsigned long start, unsigned long end,
118324b6d416SChristoph Hellwig 		struct vmem_altmap *altmap)
11840197518cSTang Chen {
1185bbd6ec60SAnshuman Khandual 	WARN_ON((start < VMEMMAP_START) || (end > VMEMMAP_END));
1186bbd6ec60SAnshuman Khandual 
1187eee07935SAnshuman Khandual 	unmap_hotplug_range(start, end, true, altmap);
1188bbd6ec60SAnshuman Khandual 	free_empty_tables(start, end, VMEMMAP_START, VMEMMAP_END);
11890197518cSTang Chen }
119040221c73SAnshuman Khandual #endif /* CONFIG_MEMORY_HOTPLUG */
1191af86e597SLaura Abbott 
1192af86e597SLaura Abbott static inline pud_t *fixmap_pud(unsigned long addr)
1193af86e597SLaura Abbott {
119420a004e7SWill Deacon 	pgd_t *pgdp = pgd_offset_k(addr);
1195e9f63768SMike Rapoport 	p4d_t *p4dp = p4d_offset(pgdp, addr);
1196e9f63768SMike Rapoport 	p4d_t p4d = READ_ONCE(*p4dp);
1197af86e597SLaura Abbott 
1198e9f63768SMike Rapoport 	BUG_ON(p4d_none(p4d) || p4d_bad(p4d));
1199af86e597SLaura Abbott 
1200e9f63768SMike Rapoport 	return pud_offset_kimg(p4dp, addr);
1201af86e597SLaura Abbott }
1202af86e597SLaura Abbott 
1203af86e597SLaura Abbott static inline pmd_t *fixmap_pmd(unsigned long addr)
1204af86e597SLaura Abbott {
120520a004e7SWill Deacon 	pud_t *pudp = fixmap_pud(addr);
120620a004e7SWill Deacon 	pud_t pud = READ_ONCE(*pudp);
1207af86e597SLaura Abbott 
120820a004e7SWill Deacon 	BUG_ON(pud_none(pud) || pud_bad(pud));
1209af86e597SLaura Abbott 
121020a004e7SWill Deacon 	return pmd_offset_kimg(pudp, addr);
1211af86e597SLaura Abbott }
1212af86e597SLaura Abbott 
1213af86e597SLaura Abbott static inline pte_t *fixmap_pte(unsigned long addr)
1214af86e597SLaura Abbott {
1215157962f5SArd Biesheuvel 	return &bm_pte[pte_index(addr)];
1216af86e597SLaura Abbott }
1217af86e597SLaura Abbott 
12182077be67SLaura Abbott /*
12192077be67SLaura Abbott  * The p*d_populate functions call virt_to_phys implicitly so they can't be used
12202077be67SLaura Abbott  * directly on kernel symbols (bm_p*d). This function is called too early to use
12212077be67SLaura Abbott  * lm_alias so __p*d_populate functions must be used to populate with the
12222077be67SLaura Abbott  * physical address from __pa_symbol.
12232077be67SLaura Abbott  */
1224af86e597SLaura Abbott void __init early_fixmap_init(void)
1225af86e597SLaura Abbott {
1226e9f63768SMike Rapoport 	pgd_t *pgdp;
1227e9f63768SMike Rapoport 	p4d_t *p4dp, p4d;
122820a004e7SWill Deacon 	pud_t *pudp;
122920a004e7SWill Deacon 	pmd_t *pmdp;
1230af86e597SLaura Abbott 	unsigned long addr = FIXADDR_START;
1231af86e597SLaura Abbott 
123220a004e7SWill Deacon 	pgdp = pgd_offset_k(addr);
1233e9f63768SMike Rapoport 	p4dp = p4d_offset(pgdp, addr);
1234e9f63768SMike Rapoport 	p4d = READ_ONCE(*p4dp);
1235f80fb3a3SArd Biesheuvel 	if (CONFIG_PGTABLE_LEVELS > 3 &&
1236e9f63768SMike Rapoport 	    !(p4d_none(p4d) || p4d_page_paddr(p4d) == __pa_symbol(bm_pud))) {
1237f9040773SArd Biesheuvel 		/*
1238f9040773SArd Biesheuvel 		 * We only end up here if the kernel mapping and the fixmap
1239f9040773SArd Biesheuvel 		 * share the top level pgd entry, which should only happen on
1240f9040773SArd Biesheuvel 		 * 16k/4 levels configurations.
1241f9040773SArd Biesheuvel 		 */
1242f9040773SArd Biesheuvel 		BUG_ON(!IS_ENABLED(CONFIG_ARM64_16K_PAGES));
1243e9f63768SMike Rapoport 		pudp = pud_offset_kimg(p4dp, addr);
1244f9040773SArd Biesheuvel 	} else {
1245e9f63768SMike Rapoport 		if (p4d_none(p4d))
1246c1fd78a7SArd Biesheuvel 			__p4d_populate(p4dp, __pa_symbol(bm_pud), P4D_TYPE_TABLE);
124720a004e7SWill Deacon 		pudp = fixmap_pud(addr);
1248f9040773SArd Biesheuvel 	}
124920a004e7SWill Deacon 	if (pud_none(READ_ONCE(*pudp)))
1250c1fd78a7SArd Biesheuvel 		__pud_populate(pudp, __pa_symbol(bm_pmd), PUD_TYPE_TABLE);
125120a004e7SWill Deacon 	pmdp = fixmap_pmd(addr);
125220a004e7SWill Deacon 	__pmd_populate(pmdp, __pa_symbol(bm_pte), PMD_TYPE_TABLE);
1253af86e597SLaura Abbott 
1254af86e597SLaura Abbott 	/*
1255af86e597SLaura Abbott 	 * The boot-ioremap range spans multiple pmds, for which
1256157962f5SArd Biesheuvel 	 * we are not prepared:
1257af86e597SLaura Abbott 	 */
1258af86e597SLaura Abbott 	BUILD_BUG_ON((__fix_to_virt(FIX_BTMAP_BEGIN) >> PMD_SHIFT)
1259af86e597SLaura Abbott 		     != (__fix_to_virt(FIX_BTMAP_END) >> PMD_SHIFT));
1260af86e597SLaura Abbott 
126120a004e7SWill Deacon 	if ((pmdp != fixmap_pmd(fix_to_virt(FIX_BTMAP_BEGIN)))
126220a004e7SWill Deacon 	     || pmdp != fixmap_pmd(fix_to_virt(FIX_BTMAP_END))) {
1263af86e597SLaura Abbott 		WARN_ON(1);
126420a004e7SWill Deacon 		pr_warn("pmdp %p != %p, %p\n",
126520a004e7SWill Deacon 			pmdp, fixmap_pmd(fix_to_virt(FIX_BTMAP_BEGIN)),
1266af86e597SLaura Abbott 			fixmap_pmd(fix_to_virt(FIX_BTMAP_END)));
1267af86e597SLaura Abbott 		pr_warn("fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n",
1268af86e597SLaura Abbott 			fix_to_virt(FIX_BTMAP_BEGIN));
1269af86e597SLaura Abbott 		pr_warn("fix_to_virt(FIX_BTMAP_END):   %08lx\n",
1270af86e597SLaura Abbott 			fix_to_virt(FIX_BTMAP_END));
1271af86e597SLaura Abbott 
1272af86e597SLaura Abbott 		pr_warn("FIX_BTMAP_END:       %d\n", FIX_BTMAP_END);
1273af86e597SLaura Abbott 		pr_warn("FIX_BTMAP_BEGIN:     %d\n", FIX_BTMAP_BEGIN);
1274af86e597SLaura Abbott 	}
1275af86e597SLaura Abbott }
1276af86e597SLaura Abbott 
127718b4b276SJames Morse /*
127818b4b276SJames Morse  * Unusually, this is also called in IRQ context (ghes_iounmap_irq) so if we
127918b4b276SJames Morse  * ever need to use IPIs for TLB broadcasting, then we're in trouble here.
128018b4b276SJames Morse  */
1281af86e597SLaura Abbott void __set_fixmap(enum fixed_addresses idx,
1282af86e597SLaura Abbott 			       phys_addr_t phys, pgprot_t flags)
1283af86e597SLaura Abbott {
1284af86e597SLaura Abbott 	unsigned long addr = __fix_to_virt(idx);
128520a004e7SWill Deacon 	pte_t *ptep;
1286af86e597SLaura Abbott 
1287b63dbef9SMark Rutland 	BUG_ON(idx <= FIX_HOLE || idx >= __end_of_fixed_addresses);
1288af86e597SLaura Abbott 
128920a004e7SWill Deacon 	ptep = fixmap_pte(addr);
1290af86e597SLaura Abbott 
1291af86e597SLaura Abbott 	if (pgprot_val(flags)) {
129220a004e7SWill Deacon 		set_pte(ptep, pfn_pte(phys >> PAGE_SHIFT, flags));
1293af86e597SLaura Abbott 	} else {
129420a004e7SWill Deacon 		pte_clear(&init_mm, addr, ptep);
1295af86e597SLaura Abbott 		flush_tlb_kernel_range(addr, addr+PAGE_SIZE);
1296af86e597SLaura Abbott 	}
1297af86e597SLaura Abbott }
129861bd93ceSArd Biesheuvel 
1299e112b032SHsin-Yi Wang void *__init fixmap_remap_fdt(phys_addr_t dt_phys, int *size, pgprot_t prot)
130061bd93ceSArd Biesheuvel {
130161bd93ceSArd Biesheuvel 	const u64 dt_virt_base = __fix_to_virt(FIX_FDT);
1302f80fb3a3SArd Biesheuvel 	int offset;
130361bd93ceSArd Biesheuvel 	void *dt_virt;
130461bd93ceSArd Biesheuvel 
130561bd93ceSArd Biesheuvel 	/*
130661bd93ceSArd Biesheuvel 	 * Check whether the physical FDT address is set and meets the minimum
130761bd93ceSArd Biesheuvel 	 * alignment requirement. Since we are relying on MIN_FDT_ALIGN to be
130804a84810SArd Biesheuvel 	 * at least 8 bytes so that we can always access the magic and size
130904a84810SArd Biesheuvel 	 * fields of the FDT header after mapping the first chunk, double check
131004a84810SArd Biesheuvel 	 * here if that is indeed the case.
131161bd93ceSArd Biesheuvel 	 */
131261bd93ceSArd Biesheuvel 	BUILD_BUG_ON(MIN_FDT_ALIGN < 8);
131361bd93ceSArd Biesheuvel 	if (!dt_phys || dt_phys % MIN_FDT_ALIGN)
131461bd93ceSArd Biesheuvel 		return NULL;
131561bd93ceSArd Biesheuvel 
131661bd93ceSArd Biesheuvel 	/*
131761bd93ceSArd Biesheuvel 	 * Make sure that the FDT region can be mapped without the need to
131861bd93ceSArd Biesheuvel 	 * allocate additional translation table pages, so that it is safe
1319132233a7SLaura Abbott 	 * to call create_mapping_noalloc() this early.
132061bd93ceSArd Biesheuvel 	 *
132161bd93ceSArd Biesheuvel 	 * On 64k pages, the FDT will be mapped using PTEs, so we need to
132261bd93ceSArd Biesheuvel 	 * be in the same PMD as the rest of the fixmap.
132361bd93ceSArd Biesheuvel 	 * On 4k pages, we'll use section mappings for the FDT so we only
132461bd93ceSArd Biesheuvel 	 * have to be in the same PUD.
132561bd93ceSArd Biesheuvel 	 */
132661bd93ceSArd Biesheuvel 	BUILD_BUG_ON(dt_virt_base % SZ_2M);
132761bd93ceSArd Biesheuvel 
1328b433dce0SSuzuki K. Poulose 	BUILD_BUG_ON(__fix_to_virt(FIX_FDT_END) >> SWAPPER_TABLE_SHIFT !=
1329b433dce0SSuzuki K. Poulose 		     __fix_to_virt(FIX_BTMAP_BEGIN) >> SWAPPER_TABLE_SHIFT);
133061bd93ceSArd Biesheuvel 
1331b433dce0SSuzuki K. Poulose 	offset = dt_phys % SWAPPER_BLOCK_SIZE;
133261bd93ceSArd Biesheuvel 	dt_virt = (void *)dt_virt_base + offset;
133361bd93ceSArd Biesheuvel 
133461bd93ceSArd Biesheuvel 	/* map the first chunk so we can read the size from the header */
1335132233a7SLaura Abbott 	create_mapping_noalloc(round_down(dt_phys, SWAPPER_BLOCK_SIZE),
1336132233a7SLaura Abbott 			dt_virt_base, SWAPPER_BLOCK_SIZE, prot);
133761bd93ceSArd Biesheuvel 
133804a84810SArd Biesheuvel 	if (fdt_magic(dt_virt) != FDT_MAGIC)
133961bd93ceSArd Biesheuvel 		return NULL;
134061bd93ceSArd Biesheuvel 
1341f80fb3a3SArd Biesheuvel 	*size = fdt_totalsize(dt_virt);
1342f80fb3a3SArd Biesheuvel 	if (*size > MAX_FDT_SIZE)
134361bd93ceSArd Biesheuvel 		return NULL;
134461bd93ceSArd Biesheuvel 
1345f80fb3a3SArd Biesheuvel 	if (offset + *size > SWAPPER_BLOCK_SIZE)
1346132233a7SLaura Abbott 		create_mapping_noalloc(round_down(dt_phys, SWAPPER_BLOCK_SIZE), dt_virt_base,
1347f80fb3a3SArd Biesheuvel 			       round_up(offset + *size, SWAPPER_BLOCK_SIZE), prot);
1348f80fb3a3SArd Biesheuvel 
1349f80fb3a3SArd Biesheuvel 	return dt_virt;
1350f80fb3a3SArd Biesheuvel }
1351f80fb3a3SArd Biesheuvel 
135220a004e7SWill Deacon int pud_set_huge(pud_t *pudp, phys_addr_t phys, pgprot_t prot)
1353324420bfSArd Biesheuvel {
1354f7f0097aSAnshuman Khandual 	pud_t new_pud = pfn_pud(__phys_to_pfn(phys), mk_pud_sect_prot(prot));
135515122ee2SWill Deacon 
135682034c23SLaura Abbott 	/* Only allow permission changes for now */
135782034c23SLaura Abbott 	if (!pgattr_change_is_safe(READ_ONCE(pud_val(*pudp)),
135882034c23SLaura Abbott 				   pud_val(new_pud)))
135915122ee2SWill Deacon 		return 0;
136015122ee2SWill Deacon 
136187dedf7cSAnshuman Khandual 	VM_BUG_ON(phys & ~PUD_MASK);
136282034c23SLaura Abbott 	set_pud(pudp, new_pud);
1363324420bfSArd Biesheuvel 	return 1;
1364324420bfSArd Biesheuvel }
1365324420bfSArd Biesheuvel 
136620a004e7SWill Deacon int pmd_set_huge(pmd_t *pmdp, phys_addr_t phys, pgprot_t prot)
1367324420bfSArd Biesheuvel {
1368f7f0097aSAnshuman Khandual 	pmd_t new_pmd = pfn_pmd(__phys_to_pfn(phys), mk_pmd_sect_prot(prot));
136915122ee2SWill Deacon 
137082034c23SLaura Abbott 	/* Only allow permission changes for now */
137182034c23SLaura Abbott 	if (!pgattr_change_is_safe(READ_ONCE(pmd_val(*pmdp)),
137282034c23SLaura Abbott 				   pmd_val(new_pmd)))
137315122ee2SWill Deacon 		return 0;
137415122ee2SWill Deacon 
137587dedf7cSAnshuman Khandual 	VM_BUG_ON(phys & ~PMD_MASK);
137682034c23SLaura Abbott 	set_pmd(pmdp, new_pmd);
1377324420bfSArd Biesheuvel 	return 1;
1378324420bfSArd Biesheuvel }
1379324420bfSArd Biesheuvel 
1380d8a71905SJonathan Marek int pud_clear_huge(pud_t *pudp)
1381d8a71905SJonathan Marek {
1382d8a71905SJonathan Marek 	if (!pud_sect(READ_ONCE(*pudp)))
1383d8a71905SJonathan Marek 		return 0;
1384d8a71905SJonathan Marek 	pud_clear(pudp);
1385d8a71905SJonathan Marek 	return 1;
1386d8a71905SJonathan Marek }
1387d8a71905SJonathan Marek 
138820a004e7SWill Deacon int pmd_clear_huge(pmd_t *pmdp)
1389324420bfSArd Biesheuvel {
139020a004e7SWill Deacon 	if (!pmd_sect(READ_ONCE(*pmdp)))
1391324420bfSArd Biesheuvel 		return 0;
139220a004e7SWill Deacon 	pmd_clear(pmdp);
1393324420bfSArd Biesheuvel 	return 1;
1394324420bfSArd Biesheuvel }
1395b6bdb751SToshi Kani 
1396ec28bb9cSChintan Pandya int pmd_free_pte_page(pmd_t *pmdp, unsigned long addr)
1397b6bdb751SToshi Kani {
1398ec28bb9cSChintan Pandya 	pte_t *table;
1399ec28bb9cSChintan Pandya 	pmd_t pmd;
1400ec28bb9cSChintan Pandya 
1401ec28bb9cSChintan Pandya 	pmd = READ_ONCE(*pmdp);
1402ec28bb9cSChintan Pandya 
1403fac880c7SMark Rutland 	if (!pmd_table(pmd)) {
14049c006972SWill Deacon 		VM_WARN_ON(1);
1405ec28bb9cSChintan Pandya 		return 1;
1406b6bdb751SToshi Kani 	}
1407b6bdb751SToshi Kani 
1408ec28bb9cSChintan Pandya 	table = pte_offset_kernel(pmdp, addr);
1409ec28bb9cSChintan Pandya 	pmd_clear(pmdp);
1410ec28bb9cSChintan Pandya 	__flush_tlb_kernel_pgtable(addr);
1411ec28bb9cSChintan Pandya 	pte_free_kernel(NULL, table);
1412ec28bb9cSChintan Pandya 	return 1;
1413ec28bb9cSChintan Pandya }
1414ec28bb9cSChintan Pandya 
1415ec28bb9cSChintan Pandya int pud_free_pmd_page(pud_t *pudp, unsigned long addr)
1416b6bdb751SToshi Kani {
1417ec28bb9cSChintan Pandya 	pmd_t *table;
1418ec28bb9cSChintan Pandya 	pmd_t *pmdp;
1419ec28bb9cSChintan Pandya 	pud_t pud;
1420ec28bb9cSChintan Pandya 	unsigned long next, end;
1421ec28bb9cSChintan Pandya 
1422ec28bb9cSChintan Pandya 	pud = READ_ONCE(*pudp);
1423ec28bb9cSChintan Pandya 
1424fac880c7SMark Rutland 	if (!pud_table(pud)) {
14259c006972SWill Deacon 		VM_WARN_ON(1);
1426ec28bb9cSChintan Pandya 		return 1;
1427ec28bb9cSChintan Pandya 	}
1428ec28bb9cSChintan Pandya 
1429ec28bb9cSChintan Pandya 	table = pmd_offset(pudp, addr);
1430ec28bb9cSChintan Pandya 	pmdp = table;
1431ec28bb9cSChintan Pandya 	next = addr;
1432ec28bb9cSChintan Pandya 	end = addr + PUD_SIZE;
1433ec28bb9cSChintan Pandya 	do {
1434ec28bb9cSChintan Pandya 		pmd_free_pte_page(pmdp, next);
1435ec28bb9cSChintan Pandya 	} while (pmdp++, next += PMD_SIZE, next != end);
1436ec28bb9cSChintan Pandya 
1437ec28bb9cSChintan Pandya 	pud_clear(pudp);
1438ec28bb9cSChintan Pandya 	__flush_tlb_kernel_pgtable(addr);
1439ec28bb9cSChintan Pandya 	pmd_free(NULL, table);
1440ec28bb9cSChintan Pandya 	return 1;
1441b6bdb751SToshi Kani }
14424ab21506SRobin Murphy 
14434ab21506SRobin Murphy #ifdef CONFIG_MEMORY_HOTPLUG
1444bbd6ec60SAnshuman Khandual static void __remove_pgd_mapping(pgd_t *pgdir, unsigned long start, u64 size)
1445bbd6ec60SAnshuman Khandual {
1446bbd6ec60SAnshuman Khandual 	unsigned long end = start + size;
1447bbd6ec60SAnshuman Khandual 
1448bbd6ec60SAnshuman Khandual 	WARN_ON(pgdir != init_mm.pgd);
1449bbd6ec60SAnshuman Khandual 	WARN_ON((start < PAGE_OFFSET) || (end > PAGE_END));
1450bbd6ec60SAnshuman Khandual 
1451eee07935SAnshuman Khandual 	unmap_hotplug_range(start, end, false, NULL);
1452bbd6ec60SAnshuman Khandual 	free_empty_tables(start, end, PAGE_OFFSET, PAGE_END);
1453bbd6ec60SAnshuman Khandual }
1454bbd6ec60SAnshuman Khandual 
145503aaf83fSAnshuman Khandual struct range arch_get_mappable_range(void)
145658284a90SAnshuman Khandual {
145703aaf83fSAnshuman Khandual 	struct range mhp_range;
1458ee7febceSPavel Tatashin 	u64 start_linear_pa = __pa(_PAGE_OFFSET(vabits_actual));
1459ee7febceSPavel Tatashin 	u64 end_linear_pa = __pa(PAGE_END - 1);
1460ee7febceSPavel Tatashin 
1461ee7febceSPavel Tatashin 	if (IS_ENABLED(CONFIG_RANDOMIZE_BASE)) {
1462ee7febceSPavel Tatashin 		/*
1463ee7febceSPavel Tatashin 		 * Check for a wrap, it is possible because of randomized linear
1464ee7febceSPavel Tatashin 		 * mapping the start physical address is actually bigger than
1465ee7febceSPavel Tatashin 		 * the end physical address. In this case set start to zero
1466ee7febceSPavel Tatashin 		 * because [0, end_linear_pa] range must still be able to cover
1467ee7febceSPavel Tatashin 		 * all addressable physical addresses.
1468ee7febceSPavel Tatashin 		 */
1469ee7febceSPavel Tatashin 		if (start_linear_pa > end_linear_pa)
1470ee7febceSPavel Tatashin 			start_linear_pa = 0;
1471ee7febceSPavel Tatashin 	}
1472ee7febceSPavel Tatashin 
1473ee7febceSPavel Tatashin 	WARN_ON(start_linear_pa > end_linear_pa);
147403aaf83fSAnshuman Khandual 
147558284a90SAnshuman Khandual 	/*
147658284a90SAnshuman Khandual 	 * Linear mapping region is the range [PAGE_OFFSET..(PAGE_END - 1)]
147758284a90SAnshuman Khandual 	 * accommodating both its ends but excluding PAGE_END. Max physical
147858284a90SAnshuman Khandual 	 * range which can be mapped inside this linear mapping range, must
147958284a90SAnshuman Khandual 	 * also be derived from its end points.
148058284a90SAnshuman Khandual 	 */
1481ee7febceSPavel Tatashin 	mhp_range.start = start_linear_pa;
1482ee7febceSPavel Tatashin 	mhp_range.end =  end_linear_pa;
1483ee7febceSPavel Tatashin 
148403aaf83fSAnshuman Khandual 	return mhp_range;
148558284a90SAnshuman Khandual }
148658284a90SAnshuman Khandual 
1487940519f0SMichal Hocko int arch_add_memory(int nid, u64 start, u64 size,
1488f5637d3bSLogan Gunthorpe 		    struct mhp_params *params)
14894ab21506SRobin Murphy {
149087143f40SArd Biesheuvel 	int ret, flags = NO_EXEC_MAPPINGS;
14914ab21506SRobin Murphy 
149203aaf83fSAnshuman Khandual 	VM_BUG_ON(!mhp_range_allowed(start, size, true));
1493840b2398SMarco Elver 
1494840b2398SMarco Elver 	/*
1495840b2398SMarco Elver 	 * KFENCE requires linear map to be mapped at page granularity, so that
1496840b2398SMarco Elver 	 * it is possible to protect/unprotect single pages in the KFENCE pool.
1497840b2398SMarco Elver 	 */
14986d47c23bSMike Rapoport 	if (can_set_direct_map() || IS_ENABLED(CONFIG_KFENCE))
149987143f40SArd Biesheuvel 		flags |= NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS;
15004ab21506SRobin Murphy 
15014ab21506SRobin Murphy 	__create_pgd_mapping(swapper_pg_dir, start, __phys_to_virt(start),
1502bfeb022fSLogan Gunthorpe 			     size, params->pgprot, __pgd_pgtable_alloc,
1503bfeb022fSLogan Gunthorpe 			     flags);
15044ab21506SRobin Murphy 
150516993c0fSDan Williams 	memblock_clear_nomap(start, size);
150616993c0fSDan Williams 
1507bbd6ec60SAnshuman Khandual 	ret = __add_pages(nid, start >> PAGE_SHIFT, size >> PAGE_SHIFT,
1508f5637d3bSLogan Gunthorpe 			   params);
1509bbd6ec60SAnshuman Khandual 	if (ret)
1510bbd6ec60SAnshuman Khandual 		__remove_pgd_mapping(swapper_pg_dir,
1511bbd6ec60SAnshuman Khandual 				     __phys_to_virt(start), size);
15128fac67caSSudarshan Rajagopalan 	else {
15138fac67caSSudarshan Rajagopalan 		max_pfn = PFN_UP(start + size);
15148fac67caSSudarshan Rajagopalan 		max_low_pfn = max_pfn;
15158fac67caSSudarshan Rajagopalan 	}
15168fac67caSSudarshan Rajagopalan 
1517bbd6ec60SAnshuman Khandual 	return ret;
15184ab21506SRobin Murphy }
1519bbd6ec60SAnshuman Khandual 
152065a2aa5fSDavid Hildenbrand void arch_remove_memory(u64 start, u64 size, struct vmem_altmap *altmap)
152122eb6346SDavid Hildenbrand {
152222eb6346SDavid Hildenbrand 	unsigned long start_pfn = start >> PAGE_SHIFT;
152322eb6346SDavid Hildenbrand 	unsigned long nr_pages = size >> PAGE_SHIFT;
152422eb6346SDavid Hildenbrand 
1525feee6b29SDavid Hildenbrand 	__remove_pages(start_pfn, nr_pages, altmap);
1526bbd6ec60SAnshuman Khandual 	__remove_pgd_mapping(swapper_pg_dir, __phys_to_virt(start), size);
152722eb6346SDavid Hildenbrand }
1528bbd6ec60SAnshuman Khandual 
1529bbd6ec60SAnshuman Khandual /*
1530bbd6ec60SAnshuman Khandual  * This memory hotplug notifier helps prevent boot memory from being
1531bbd6ec60SAnshuman Khandual  * inadvertently removed as it blocks pfn range offlining process in
1532bbd6ec60SAnshuman Khandual  * __offline_pages(). Hence this prevents both offlining as well as
1533bbd6ec60SAnshuman Khandual  * removal process for boot memory which is initially always online.
1534bbd6ec60SAnshuman Khandual  * In future if and when boot memory could be removed, this notifier
1535bbd6ec60SAnshuman Khandual  * should be dropped and free_hotplug_page_range() should handle any
1536bbd6ec60SAnshuman Khandual  * reserved pages allocated during boot.
1537bbd6ec60SAnshuman Khandual  */
1538bbd6ec60SAnshuman Khandual static int prevent_bootmem_remove_notifier(struct notifier_block *nb,
1539bbd6ec60SAnshuman Khandual 					   unsigned long action, void *data)
1540bbd6ec60SAnshuman Khandual {
1541bbd6ec60SAnshuman Khandual 	struct mem_section *ms;
1542bbd6ec60SAnshuman Khandual 	struct memory_notify *arg = data;
1543bbd6ec60SAnshuman Khandual 	unsigned long end_pfn = arg->start_pfn + arg->nr_pages;
1544bbd6ec60SAnshuman Khandual 	unsigned long pfn = arg->start_pfn;
1545bbd6ec60SAnshuman Khandual 
15469fb3d4a3SAnshuman Khandual 	if ((action != MEM_GOING_OFFLINE) && (action != MEM_OFFLINE))
1547bbd6ec60SAnshuman Khandual 		return NOTIFY_OK;
1548bbd6ec60SAnshuman Khandual 
1549bbd6ec60SAnshuman Khandual 	for (; pfn < end_pfn; pfn += PAGES_PER_SECTION) {
15509fb3d4a3SAnshuman Khandual 		unsigned long start = PFN_PHYS(pfn);
15519fb3d4a3SAnshuman Khandual 		unsigned long end = start + (1UL << PA_SECTION_SHIFT);
15529fb3d4a3SAnshuman Khandual 
1553bbd6ec60SAnshuman Khandual 		ms = __pfn_to_section(pfn);
15549fb3d4a3SAnshuman Khandual 		if (!early_section(ms))
15559fb3d4a3SAnshuman Khandual 			continue;
15569fb3d4a3SAnshuman Khandual 
15579fb3d4a3SAnshuman Khandual 		if (action == MEM_GOING_OFFLINE) {
15589fb3d4a3SAnshuman Khandual 			/*
15599fb3d4a3SAnshuman Khandual 			 * Boot memory removal is not supported. Prevent
15609fb3d4a3SAnshuman Khandual 			 * it via blocking any attempted offline request
15619fb3d4a3SAnshuman Khandual 			 * for the boot memory and just report it.
15629fb3d4a3SAnshuman Khandual 			 */
15639fb3d4a3SAnshuman Khandual 			pr_warn("Boot memory [%lx %lx] offlining attempted\n", start, end);
1564bbd6ec60SAnshuman Khandual 			return NOTIFY_BAD;
15659fb3d4a3SAnshuman Khandual 		} else if (action == MEM_OFFLINE) {
15669fb3d4a3SAnshuman Khandual 			/*
15679fb3d4a3SAnshuman Khandual 			 * This should have never happened. Boot memory
15689fb3d4a3SAnshuman Khandual 			 * offlining should have been prevented by this
15699fb3d4a3SAnshuman Khandual 			 * very notifier. Probably some memory removal
15709fb3d4a3SAnshuman Khandual 			 * procedure might have changed which would then
15719fb3d4a3SAnshuman Khandual 			 * require further debug.
15729fb3d4a3SAnshuman Khandual 			 */
15739fb3d4a3SAnshuman Khandual 			pr_err("Boot memory [%lx %lx] offlined\n", start, end);
15749fb3d4a3SAnshuman Khandual 
15759fb3d4a3SAnshuman Khandual 			/*
15769fb3d4a3SAnshuman Khandual 			 * Core memory hotplug does not process a return
15779fb3d4a3SAnshuman Khandual 			 * code from the notifier for MEM_OFFLINE events.
15789fb3d4a3SAnshuman Khandual 			 * The error condition has been reported. Return
15799fb3d4a3SAnshuman Khandual 			 * from here as if ignored.
15809fb3d4a3SAnshuman Khandual 			 */
15819fb3d4a3SAnshuman Khandual 			return NOTIFY_DONE;
15829fb3d4a3SAnshuman Khandual 		}
1583bbd6ec60SAnshuman Khandual 	}
1584bbd6ec60SAnshuman Khandual 	return NOTIFY_OK;
1585bbd6ec60SAnshuman Khandual }
1586bbd6ec60SAnshuman Khandual 
1587bbd6ec60SAnshuman Khandual static struct notifier_block prevent_bootmem_remove_nb = {
1588bbd6ec60SAnshuman Khandual 	.notifier_call = prevent_bootmem_remove_notifier,
1589bbd6ec60SAnshuman Khandual };
1590bbd6ec60SAnshuman Khandual 
1591fdd99a41SAnshuman Khandual /*
1592fdd99a41SAnshuman Khandual  * This ensures that boot memory sections on the platform are online
1593fdd99a41SAnshuman Khandual  * from early boot. Memory sections could not be prevented from being
1594fdd99a41SAnshuman Khandual  * offlined, unless for some reason they are not online to begin with.
1595fdd99a41SAnshuman Khandual  * This helps validate the basic assumption on which the above memory
1596fdd99a41SAnshuman Khandual  * event notifier works to prevent boot memory section offlining and
1597fdd99a41SAnshuman Khandual  * its possible removal.
1598fdd99a41SAnshuman Khandual  */
1599fdd99a41SAnshuman Khandual static void validate_bootmem_online(void)
1600fdd99a41SAnshuman Khandual {
1601fdd99a41SAnshuman Khandual 	phys_addr_t start, end, addr;
1602fdd99a41SAnshuman Khandual 	struct mem_section *ms;
1603fdd99a41SAnshuman Khandual 	u64 i;
1604fdd99a41SAnshuman Khandual 
1605fdd99a41SAnshuman Khandual 	/*
1606fdd99a41SAnshuman Khandual 	 * Scanning across all memblock might be expensive
1607fdd99a41SAnshuman Khandual 	 * on some big memory systems. Hence enable this
1608fdd99a41SAnshuman Khandual 	 * validation only with DEBUG_VM.
1609fdd99a41SAnshuman Khandual 	 */
1610fdd99a41SAnshuman Khandual 	if (!IS_ENABLED(CONFIG_DEBUG_VM))
1611fdd99a41SAnshuman Khandual 		return;
1612fdd99a41SAnshuman Khandual 
1613fdd99a41SAnshuman Khandual 	for_each_mem_range(i, &start, &end) {
1614fdd99a41SAnshuman Khandual 		for (addr = start; addr < end; addr += (1UL << PA_SECTION_SHIFT)) {
1615fdd99a41SAnshuman Khandual 			ms = __pfn_to_section(PHYS_PFN(addr));
1616fdd99a41SAnshuman Khandual 
1617fdd99a41SAnshuman Khandual 			/*
1618fdd99a41SAnshuman Khandual 			 * All memory ranges in the system at this point
1619fdd99a41SAnshuman Khandual 			 * should have been marked as early sections.
1620fdd99a41SAnshuman Khandual 			 */
1621fdd99a41SAnshuman Khandual 			WARN_ON(!early_section(ms));
1622fdd99a41SAnshuman Khandual 
1623fdd99a41SAnshuman Khandual 			/*
1624fdd99a41SAnshuman Khandual 			 * Memory notifier mechanism here to prevent boot
1625fdd99a41SAnshuman Khandual 			 * memory offlining depends on the fact that each
1626fdd99a41SAnshuman Khandual 			 * early section memory on the system is initially
1627fdd99a41SAnshuman Khandual 			 * online. Otherwise a given memory section which
1628fdd99a41SAnshuman Khandual 			 * is already offline will be overlooked and can
1629fdd99a41SAnshuman Khandual 			 * be removed completely. Call out such sections.
1630fdd99a41SAnshuman Khandual 			 */
1631fdd99a41SAnshuman Khandual 			if (!online_section(ms))
1632fdd99a41SAnshuman Khandual 				pr_err("Boot memory [%llx %llx] is offline, can be removed\n",
1633fdd99a41SAnshuman Khandual 					addr, addr + (1UL << PA_SECTION_SHIFT));
1634fdd99a41SAnshuman Khandual 		}
1635fdd99a41SAnshuman Khandual 	}
1636fdd99a41SAnshuman Khandual }
1637fdd99a41SAnshuman Khandual 
1638bbd6ec60SAnshuman Khandual static int __init prevent_bootmem_remove_init(void)
1639bbd6ec60SAnshuman Khandual {
1640cb45babeSAnshuman Khandual 	int ret = 0;
1641cb45babeSAnshuman Khandual 
1642cb45babeSAnshuman Khandual 	if (!IS_ENABLED(CONFIG_MEMORY_HOTREMOVE))
1643cb45babeSAnshuman Khandual 		return ret;
1644cb45babeSAnshuman Khandual 
1645fdd99a41SAnshuman Khandual 	validate_bootmem_online();
1646cb45babeSAnshuman Khandual 	ret = register_memory_notifier(&prevent_bootmem_remove_nb);
1647cb45babeSAnshuman Khandual 	if (ret)
1648cb45babeSAnshuman Khandual 		pr_err("%s: Notifier registration failed %d\n", __func__, ret);
1649cb45babeSAnshuman Khandual 
1650cb45babeSAnshuman Khandual 	return ret;
1651bbd6ec60SAnshuman Khandual }
1652cb45babeSAnshuman Khandual early_initcall(prevent_bootmem_remove_init);
165322eb6346SDavid Hildenbrand #endif
1654