xref: /openbmc/linux/arch/arm64/include/asm/kernel-pgtable.h (revision c3cee924bd855184d15bc4aa6088dcf8e2c1394c)
1caab277bSThomas Gleixner /* SPDX-License-Identifier: GPL-2.0-only */
287d1587bSSuzuki K. Poulose /*
387d1587bSSuzuki K. Poulose  * Kernel page table mapping
487d1587bSSuzuki K. Poulose  *
587d1587bSSuzuki K. Poulose  * Copyright (C) 2015 ARM Ltd.
687d1587bSSuzuki K. Poulose  */
787d1587bSSuzuki K. Poulose 
887d1587bSSuzuki K. Poulose #ifndef __ASM_KERNEL_PGTABLE_H
987d1587bSSuzuki K. Poulose #define __ASM_KERNEL_PGTABLE_H
1087d1587bSSuzuki K. Poulose 
115f1f7f6cSWill Deacon #include <asm/pgtable-hwdef.h>
1206e9bf2fSArd Biesheuvel #include <asm/sparsemem.h>
13b433dce0SSuzuki K. Poulose 
14b433dce0SSuzuki K. Poulose /*
15b433dce0SSuzuki K. Poulose  * The linear mapping and the start of memory are both 2M aligned (per
16b433dce0SSuzuki K. Poulose  * the arm64 booting.txt requirements). Hence we can use section mapping
17b433dce0SSuzuki K. Poulose  * with 4K (section size = 2M) but not with 16K (section size = 32M) or
18b433dce0SSuzuki K. Poulose  * 64K (section size = 512M).
19b433dce0SSuzuki K. Poulose  */
20b433dce0SSuzuki K. Poulose #ifdef CONFIG_ARM64_4K_PAGES
212062d44dSAnshuman Khandual #define ARM64_KERNEL_USES_PMD_MAPS 1
22b433dce0SSuzuki K. Poulose #else
232062d44dSAnshuman Khandual #define ARM64_KERNEL_USES_PMD_MAPS 0
24b433dce0SSuzuki K. Poulose #endif
25b433dce0SSuzuki K. Poulose 
2687d1587bSSuzuki K. Poulose /*
2787d1587bSSuzuki K. Poulose  * The idmap and swapper page tables need some space reserved in the kernel
2887d1587bSSuzuki K. Poulose  * image. Both require pgd, pud (4 levels only) and pmd tables to (section)
2987d1587bSSuzuki K. Poulose  * map the kernel. With the 64K page configuration, swapper and idmap need to
3087d1587bSSuzuki K. Poulose  * map to pte level. The swapper also maps the FDT (see __create_page_tables
3187d1587bSSuzuki K. Poulose  * for more information). Note that the number of ID map translation levels
3287d1587bSSuzuki K. Poulose  * could be increased on the fly if system RAM is out of reach for the default
33c265af51SSuzuki K. Poulose  * VA range, so pages required to map highest possible PA are reserved in all
34c265af51SSuzuki K. Poulose  * cases.
3587d1587bSSuzuki K. Poulose  */
362062d44dSAnshuman Khandual #if ARM64_KERNEL_USES_PMD_MAPS
3787d1587bSSuzuki K. Poulose #define SWAPPER_PGTABLE_LEVELS	(CONFIG_PGTABLE_LEVELS - 1)
38b433dce0SSuzuki K. Poulose #else
39b433dce0SSuzuki K. Poulose #define SWAPPER_PGTABLE_LEVELS	(CONFIG_PGTABLE_LEVELS)
4087d1587bSSuzuki K. Poulose #endif
4187d1587bSSuzuki K. Poulose 
420370b31eSSteve Capper 
430370b31eSSteve Capper /*
440370b31eSSteve Capper  * If KASLR is enabled, then an offset K is added to the kernel address
450370b31eSSteve Capper  * space. The bottom 21 bits of this offset are zero to guarantee 2MB
460370b31eSSteve Capper  * alignment for PA and VA.
470370b31eSSteve Capper  *
480370b31eSSteve Capper  * For each pagetable level of the swapper, we know that the shift will
490370b31eSSteve Capper  * be larger than 21 (for the 4KB granule case we use section maps thus
500370b31eSSteve Capper  * the smallest shift is actually 30) thus there is the possibility that
510370b31eSSteve Capper  * KASLR can increase the number of pagetable entries by 1, so we make
520370b31eSSteve Capper  * room for this extra entry.
530370b31eSSteve Capper  *
540370b31eSSteve Capper  * Note KASLR cannot increase the number of required entries for a level
550370b31eSSteve Capper  * by more than one because it increments both the virtual start and end
560370b31eSSteve Capper  * addresses equally (the extra entry comes from the case where the end
570370b31eSSteve Capper  * address is just pushed over a boundary and the start address isn't).
580370b31eSSteve Capper  */
590370b31eSSteve Capper 
600370b31eSSteve Capper #ifdef CONFIG_RANDOMIZE_BASE
610370b31eSSteve Capper #define EARLY_KASLR	(1)
620370b31eSSteve Capper #else
630370b31eSSteve Capper #define EARLY_KASLR	(0)
640370b31eSSteve Capper #endif
650370b31eSSteve Capper 
6690268574SMark Rutland #define EARLY_ENTRIES(vstart, vend, shift) \
6790268574SMark Rutland 	((((vend) - 1) >> (shift)) - ((vstart) >> (shift)) + 1 + EARLY_KASLR)
680370b31eSSteve Capper 
690370b31eSSteve Capper #define EARLY_PGDS(vstart, vend) (EARLY_ENTRIES(vstart, vend, PGDIR_SHIFT))
700370b31eSSteve Capper 
710370b31eSSteve Capper #if SWAPPER_PGTABLE_LEVELS > 3
720370b31eSSteve Capper #define EARLY_PUDS(vstart, vend) (EARLY_ENTRIES(vstart, vend, PUD_SHIFT))
730370b31eSSteve Capper #else
740370b31eSSteve Capper #define EARLY_PUDS(vstart, vend) (0)
750370b31eSSteve Capper #endif
760370b31eSSteve Capper 
770370b31eSSteve Capper #if SWAPPER_PGTABLE_LEVELS > 2
780370b31eSSteve Capper #define EARLY_PMDS(vstart, vend) (EARLY_ENTRIES(vstart, vend, SWAPPER_TABLE_SHIFT))
790370b31eSSteve Capper #else
800370b31eSSteve Capper #define EARLY_PMDS(vstart, vend) (0)
810370b31eSSteve Capper #endif
820370b31eSSteve Capper 
830370b31eSSteve Capper #define EARLY_PAGES(vstart, vend) ( 1 			/* PGDIR page */				\
840370b31eSSteve Capper 			+ EARLY_PGDS((vstart), (vend)) 	/* each PGDIR needs a next level page table */	\
850370b31eSSteve Capper 			+ EARLY_PUDS((vstart), (vend))	/* each PUD needs a next level page table */	\
860370b31eSSteve Capper 			+ EARLY_PMDS((vstart), (vend)))	/* each PMD needs a next level page table */
87120dc60dSArd Biesheuvel #define INIT_DIR_SIZE (PAGE_SIZE * EARLY_PAGES(KIMAGE_VADDR, _end))
88*c3cee924SArd Biesheuvel 
89*c3cee924SArd Biesheuvel /* the initial ID map may need two extra pages if it needs to be extended */
90*c3cee924SArd Biesheuvel #if VA_BITS < 48
91*c3cee924SArd Biesheuvel #define INIT_IDMAP_DIR_SIZE	(INIT_DIR_SIZE + (2 * PAGE_SIZE))
92*c3cee924SArd Biesheuvel #else
93*c3cee924SArd Biesheuvel #define INIT_IDMAP_DIR_SIZE	INIT_DIR_SIZE
94*c3cee924SArd Biesheuvel #endif
9587d1587bSSuzuki K. Poulose 
9687d1587bSSuzuki K. Poulose /* Initial memory map size */
972062d44dSAnshuman Khandual #if ARM64_KERNEL_USES_PMD_MAPS
984aaa87abSAnshuman Khandual #define SWAPPER_BLOCK_SHIFT	PMD_SHIFT
994aaa87abSAnshuman Khandual #define SWAPPER_BLOCK_SIZE	PMD_SIZE
10087d1587bSSuzuki K. Poulose #define SWAPPER_TABLE_SHIFT	PUD_SHIFT
101b433dce0SSuzuki K. Poulose #else
102b433dce0SSuzuki K. Poulose #define SWAPPER_BLOCK_SHIFT	PAGE_SHIFT
103b433dce0SSuzuki K. Poulose #define SWAPPER_BLOCK_SIZE	PAGE_SIZE
104b433dce0SSuzuki K. Poulose #define SWAPPER_TABLE_SHIFT	PMD_SHIFT
10587d1587bSSuzuki K. Poulose #endif
10687d1587bSSuzuki K. Poulose 
10787d1587bSSuzuki K. Poulose /*
10887d1587bSSuzuki K. Poulose  * Initial memory map attributes.
10987d1587bSSuzuki K. Poulose  */
11041acec62SWill Deacon #define SWAPPER_PTE_FLAGS	(PTE_TYPE_PAGE | PTE_AF | PTE_SHARED)
11141acec62SWill Deacon #define SWAPPER_PMD_FLAGS	(PMD_TYPE_SECT | PMD_SECT_AF | PMD_SECT_S)
11287d1587bSSuzuki K. Poulose 
1132062d44dSAnshuman Khandual #if ARM64_KERNEL_USES_PMD_MAPS
114*c3cee924SArd Biesheuvel #define SWAPPER_RW_MMUFLAGS	(PMD_ATTRINDX(MT_NORMAL) | SWAPPER_PMD_FLAGS)
115*c3cee924SArd Biesheuvel #define SWAPPER_RX_MMUFLAGS	(SWAPPER_RW_MMUFLAGS | PMD_SECT_RDONLY)
116b433dce0SSuzuki K. Poulose #else
117*c3cee924SArd Biesheuvel #define SWAPPER_RW_MMUFLAGS	(PTE_ATTRINDX(MT_NORMAL) | SWAPPER_PTE_FLAGS)
118*c3cee924SArd Biesheuvel #define SWAPPER_RX_MMUFLAGS	(SWAPPER_RW_MMUFLAGS | PTE_RDONLY)
11987d1587bSSuzuki K. Poulose #endif
12087d1587bSSuzuki K. Poulose 
121a7f8de16SArd Biesheuvel /*
122a7f8de16SArd Biesheuvel  * To make optimal use of block mappings when laying out the linear
123a7f8de16SArd Biesheuvel  * mapping, round down the base of physical memory to a size that can
124a7f8de16SArd Biesheuvel  * be mapped efficiently, i.e., either PUD_SIZE (4k granule) or PMD_SIZE
125a7f8de16SArd Biesheuvel  * (64k granule), or a multiple that can be mapped using contiguous bits
126a7f8de16SArd Biesheuvel  * in the page tables: 32 * PMD_SIZE (16k granule)
127a7f8de16SArd Biesheuvel  */
12806e9bf2fSArd Biesheuvel #if defined(CONFIG_ARM64_4K_PAGES)
12906e9bf2fSArd Biesheuvel #define ARM64_MEMSTART_SHIFT		PUD_SHIFT
13006e9bf2fSArd Biesheuvel #elif defined(CONFIG_ARM64_16K_PAGES)
131ca6ece6aSAnshuman Khandual #define ARM64_MEMSTART_SHIFT		CONT_PMD_SHIFT
132a7f8de16SArd Biesheuvel #else
13306e9bf2fSArd Biesheuvel #define ARM64_MEMSTART_SHIFT		PMD_SHIFT
13406e9bf2fSArd Biesheuvel #endif
13506e9bf2fSArd Biesheuvel 
13606e9bf2fSArd Biesheuvel /*
13706e9bf2fSArd Biesheuvel  * sparsemem vmemmap imposes an additional requirement on the alignment of
13806e9bf2fSArd Biesheuvel  * memstart_addr, due to the fact that the base of the vmemmap region
13906e9bf2fSArd Biesheuvel  * has a direct correspondence, and needs to appear sufficiently aligned
14006e9bf2fSArd Biesheuvel  * in the virtual address space.
14106e9bf2fSArd Biesheuvel  */
142782276b4SCatalin Marinas #if ARM64_MEMSTART_SHIFT < SECTION_SIZE_BITS
14306e9bf2fSArd Biesheuvel #define ARM64_MEMSTART_ALIGN	(1UL << SECTION_SIZE_BITS)
14406e9bf2fSArd Biesheuvel #else
14506e9bf2fSArd Biesheuvel #define ARM64_MEMSTART_ALIGN	(1UL << ARM64_MEMSTART_SHIFT)
146a7f8de16SArd Biesheuvel #endif
14787d1587bSSuzuki K. Poulose 
14887d1587bSSuzuki K. Poulose #endif	/* __ASM_KERNEL_PGTABLE_H */
149