xref: /openbmc/linux/arch/arc/include/asm/pgtable-levels.h (revision c900529f3d9161bfde5cca0754f83b4d3c3e0220)
1fe6cb7b0SVineet Gupta /* SPDX-License-Identifier: GPL-2.0-only */
2fe6cb7b0SVineet Gupta /*
3fe6cb7b0SVineet Gupta  * Copyright (C) 2020 Synopsys, Inc. (www.synopsys.com)
4fe6cb7b0SVineet Gupta  */
5fe6cb7b0SVineet Gupta 
6fe6cb7b0SVineet Gupta /*
7fe6cb7b0SVineet Gupta  * Helpers for implemenintg paging levels
8fe6cb7b0SVineet Gupta  */
9fe6cb7b0SVineet Gupta 
10fe6cb7b0SVineet Gupta #ifndef _ASM_ARC_PGTABLE_LEVELS_H
11fe6cb7b0SVineet Gupta #define _ASM_ARC_PGTABLE_LEVELS_H
12fe6cb7b0SVineet Gupta 
132dde02abSVineet Gupta #if CONFIG_PGTABLE_LEVELS == 2
142dde02abSVineet Gupta 
15fe6cb7b0SVineet Gupta /*
16fe6cb7b0SVineet Gupta  * 2 level paging setup for software walked MMUv3 (ARC700) and MMUv4 (HS)
17fe6cb7b0SVineet Gupta  *
18fe6cb7b0SVineet Gupta  * [31]            32 bit virtual address              [0]
19fe6cb7b0SVineet Gupta  * -------------------------------------------------------
20fe6cb7b0SVineet Gupta  * |               | <---------- PGDIR_SHIFT ----------> |
21fe6cb7b0SVineet Gupta  * |               |                | <-- PAGE_SHIFT --> |
22fe6cb7b0SVineet Gupta  * -------------------------------------------------------
23fe6cb7b0SVineet Gupta  *       |                  |                |
24fe6cb7b0SVineet Gupta  *       |                  |                --> off in page frame
25fe6cb7b0SVineet Gupta  *       |                  ---> index into Page Table
26fe6cb7b0SVineet Gupta  *       ----> index into Page Directory
27fe6cb7b0SVineet Gupta  *
28fe6cb7b0SVineet Gupta  * Given software walk, the vaddr split is arbitrary set to 11:8:13
29fe6cb7b0SVineet Gupta  * However enabling of super page in a 2 level regime pegs PGDIR_SHIFT to
30fe6cb7b0SVineet Gupta  * super page size.
31fe6cb7b0SVineet Gupta  */
32fe6cb7b0SVineet Gupta 
33fe6cb7b0SVineet Gupta #if defined(CONFIG_ARC_HUGEPAGE_16M)
34fe6cb7b0SVineet Gupta #define PGDIR_SHIFT		24
35fe6cb7b0SVineet Gupta #elif defined(CONFIG_ARC_HUGEPAGE_2M)
36fe6cb7b0SVineet Gupta #define PGDIR_SHIFT		21
37fe6cb7b0SVineet Gupta #else
38fe6cb7b0SVineet Gupta /*
39fe6cb7b0SVineet Gupta  * No Super page case
40d9820ff7SVineet Gupta  * Default value provides 11:8:13 (8K), 10:10:12 (4K)
41d9820ff7SVineet Gupta  * Limits imposed by pgtable_t only PAGE_SIZE long
42d9820ff7SVineet Gupta  * (so 4K page can only have 1K entries: or 10 bits)
43fe6cb7b0SVineet Gupta  */
44d9820ff7SVineet Gupta #ifdef CONFIG_ARC_PAGE_SIZE_4K
45d9820ff7SVineet Gupta #define PGDIR_SHIFT		22
46d9820ff7SVineet Gupta #else
47fe6cb7b0SVineet Gupta #define PGDIR_SHIFT		21
48d9820ff7SVineet Gupta #endif
49fe6cb7b0SVineet Gupta 
50fe6cb7b0SVineet Gupta #endif
51fe6cb7b0SVineet Gupta 
522dde02abSVineet Gupta #else /* CONFIG_PGTABLE_LEVELS != 2 */
53fe6cb7b0SVineet Gupta 
542dde02abSVineet Gupta /*
552dde02abSVineet Gupta  * A default 3 level paging testing setup in software walked MMU
562dde02abSVineet Gupta  *   MMUv4 (8K page): <4> : <7> : <8> : <13>
578747ff70SVineet Gupta  * A default 4 level paging testing setup in software walked MMU
588747ff70SVineet Gupta  *   MMUv4 (8K page): <4> : <3> : <4> : <8> : <13>
592dde02abSVineet Gupta  */
602dde02abSVineet Gupta #define PGDIR_SHIFT		28
618747ff70SVineet Gupta #if CONFIG_PGTABLE_LEVELS > 3
628747ff70SVineet Gupta #define PUD_SHIFT		25
638747ff70SVineet Gupta #endif
642dde02abSVineet Gupta #if CONFIG_PGTABLE_LEVELS > 2
652dde02abSVineet Gupta #define PMD_SHIFT		21
662dde02abSVineet Gupta #endif
672dde02abSVineet Gupta 
682dde02abSVineet Gupta #endif /* CONFIG_PGTABLE_LEVELS */
692dde02abSVineet Gupta 
702dde02abSVineet Gupta #define PGDIR_SIZE		BIT(PGDIR_SHIFT)
712dde02abSVineet Gupta #define PGDIR_MASK		(~(PGDIR_SIZE - 1))
72fe6cb7b0SVineet Gupta #define PTRS_PER_PGD		BIT(32 - PGDIR_SHIFT)
73fe6cb7b0SVineet Gupta 
748747ff70SVineet Gupta #if CONFIG_PGTABLE_LEVELS > 3
758747ff70SVineet Gupta #define PUD_SIZE		BIT(PUD_SHIFT)
768747ff70SVineet Gupta #define PUD_MASK		(~(PUD_SIZE - 1))
778747ff70SVineet Gupta #define PTRS_PER_PUD		BIT(PGDIR_SHIFT - PUD_SHIFT)
788747ff70SVineet Gupta #endif
798747ff70SVineet Gupta 
802dde02abSVineet Gupta #if CONFIG_PGTABLE_LEVELS > 2
812dde02abSVineet Gupta #define PMD_SIZE		BIT(PMD_SHIFT)
822dde02abSVineet Gupta #define PMD_MASK		(~(PMD_SIZE - 1))
838747ff70SVineet Gupta #define PTRS_PER_PMD		BIT(PUD_SHIFT - PMD_SHIFT)
842dde02abSVineet Gupta #endif
852dde02abSVineet Gupta 
862dde02abSVineet Gupta #define PTRS_PER_PTE		BIT(PMD_SHIFT - PAGE_SHIFT)
87fe6cb7b0SVineet Gupta 
88fe6cb7b0SVineet Gupta #ifndef __ASSEMBLY__
89fe6cb7b0SVineet Gupta 
908747ff70SVineet Gupta #if CONFIG_PGTABLE_LEVELS > 3
918747ff70SVineet Gupta #include <asm-generic/pgtable-nop4d.h>
928747ff70SVineet Gupta #elif CONFIG_PGTABLE_LEVELS > 2
932dde02abSVineet Gupta #include <asm-generic/pgtable-nopud.h>
942dde02abSVineet Gupta #else
95fe6cb7b0SVineet Gupta #include <asm-generic/pgtable-nopmd.h>
962dde02abSVineet Gupta #endif
97fe6cb7b0SVineet Gupta 
98fe6cb7b0SVineet Gupta /*
99fe6cb7b0SVineet Gupta  * 1st level paging: pgd
100fe6cb7b0SVineet Gupta  */
101fe6cb7b0SVineet Gupta #define pgd_ERROR(e) \
102fe6cb7b0SVineet Gupta 	pr_crit("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
103fe6cb7b0SVineet Gupta 
1048747ff70SVineet Gupta #if CONFIG_PGTABLE_LEVELS > 3
1058747ff70SVineet Gupta 
1068747ff70SVineet Gupta /* In 4 level paging, p4d_* macros work on pgd */
1078747ff70SVineet Gupta #define p4d_none(x)		(!p4d_val(x))
1088747ff70SVineet Gupta #define p4d_bad(x)		((p4d_val(x) & ~PAGE_MASK))
1098747ff70SVineet Gupta #define p4d_present(x)		(p4d_val(x))
1108747ff70SVineet Gupta #define p4d_clear(xp)		do { p4d_val(*(xp)) = 0; } while (0)
1118747ff70SVineet Gupta #define p4d_pgtable(p4d)	((pud_t *)(p4d_val(p4d) & PAGE_MASK))
1128747ff70SVineet Gupta #define p4d_page(p4d)		virt_to_page(p4d_pgtable(p4d))
1138747ff70SVineet Gupta #define set_p4d(p4dp, p4d)	(*(p4dp) = p4d)
1148747ff70SVineet Gupta 
1158747ff70SVineet Gupta /*
1168747ff70SVineet Gupta  * 2nd level paging: pud
1178747ff70SVineet Gupta  */
1188747ff70SVineet Gupta #define pud_ERROR(e) \
1198747ff70SVineet Gupta 	pr_crit("%s:%d: bad pud %08lx.\n", __FILE__, __LINE__, pud_val(e))
1208747ff70SVineet Gupta 
1218747ff70SVineet Gupta #endif
1228747ff70SVineet Gupta 
1232dde02abSVineet Gupta #if CONFIG_PGTABLE_LEVELS > 2
1242dde02abSVineet Gupta 
1258747ff70SVineet Gupta /*
1268747ff70SVineet Gupta  * In 3 level paging, pud_* macros work on pgd
1278747ff70SVineet Gupta  * In 4 level paging, pud_* macros work on pud
1288747ff70SVineet Gupta  */
1292dde02abSVineet Gupta #define pud_none(x)		(!pud_val(x))
1302dde02abSVineet Gupta #define pud_bad(x)		((pud_val(x) & ~PAGE_MASK))
1312dde02abSVineet Gupta #define pud_present(x)		(pud_val(x))
1322dde02abSVineet Gupta #define pud_clear(xp)		do { pud_val(*(xp)) = 0; } while (0)
1332dde02abSVineet Gupta #define pud_pgtable(pud)	((pmd_t *)(pud_val(pud) & PAGE_MASK))
1342dde02abSVineet Gupta #define pud_page(pud)		virt_to_page(pud_pgtable(pud))
1352dde02abSVineet Gupta #define set_pud(pudp, pud)	(*(pudp) = pud)
1362dde02abSVineet Gupta 
137fe6cb7b0SVineet Gupta /*
1388747ff70SVineet Gupta  * 3rd level paging: pmd
1392dde02abSVineet Gupta  */
1402dde02abSVineet Gupta #define pmd_ERROR(e) \
1412dde02abSVineet Gupta 	pr_crit("%s:%d: bad pmd %08lx.\n", __FILE__, __LINE__, pmd_val(e))
1422dde02abSVineet Gupta 
1432dde02abSVineet Gupta #define pmd_pfn(pmd)		((pmd_val(pmd) & PMD_MASK) >> PAGE_SHIFT)
1442dde02abSVineet Gupta #define pfn_pmd(pfn,prot)	__pmd(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
1452dde02abSVineet Gupta #define mk_pmd(page,prot)	pfn_pmd(page_to_pfn(page),prot)
1462dde02abSVineet Gupta 
1472dde02abSVineet Gupta #endif
1482dde02abSVineet Gupta 
1492dde02abSVineet Gupta /*
1502dde02abSVineet Gupta  * Due to the strange way generic pgtable level folding works, the pmd_* macros
1512dde02abSVineet Gupta  *  - are valid even for 2 levels (which supposedly only has pgd - pte)
1522dde02abSVineet Gupta  *  - behave differently for 2 vs. 3
1532dde02abSVineet Gupta  * In 2  level paging        (pgd -> pte), pmd_* macros work on pgd
1542dde02abSVineet Gupta  * In 3+ level paging (pgd -> pmd -> pte), pmd_* macros work on pmd
155fe6cb7b0SVineet Gupta  */
156fe6cb7b0SVineet Gupta #define pmd_none(x)		(!pmd_val(x))
157fe6cb7b0SVineet Gupta #define pmd_bad(x)		((pmd_val(x) & ~PAGE_MASK))
158fe6cb7b0SVineet Gupta #define pmd_present(x)		(pmd_val(x))
159fe6cb7b0SVineet Gupta #define pmd_clear(xp)		do { pmd_val(*(xp)) = 0; } while (0)
160fe6cb7b0SVineet Gupta #define pmd_page_vaddr(pmd)	(pmd_val(pmd) & PAGE_MASK)
1617106c51eSMike Rapoport #define pmd_pfn(pmd)		((pmd_val(pmd) & PAGE_MASK) >> PAGE_SHIFT)
162fe6cb7b0SVineet Gupta #define pmd_page(pmd)		virt_to_page(pmd_page_vaddr(pmd))
163fe6cb7b0SVineet Gupta #define set_pmd(pmdp, pmd)	(*(pmdp) = pmd)
1644fd9df10SPavel Kozlov #define pmd_pgtable(pmd)	((pgtable_t) pmd_page(pmd))
165fe6cb7b0SVineet Gupta 
1662dde02abSVineet Gupta /*
1678747ff70SVineet Gupta  * 4th level paging: pte
1682dde02abSVineet Gupta  */
169fe6cb7b0SVineet Gupta #define pte_ERROR(e) \
170fe6cb7b0SVineet Gupta 	pr_crit("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e))
171fe6cb7b0SVineet Gupta 
172*ac4cfaccSMatthew Wilcox (Oracle) #define PFN_PTE_SHIFT		PAGE_SHIFT
173fe6cb7b0SVineet Gupta #define pte_none(x)		(!pte_val(x))
174fe6cb7b0SVineet Gupta #define pte_present(x)		(pte_val(x) & _PAGE_PRESENT)
175fe6cb7b0SVineet Gupta #define pte_clear(mm,addr,ptep)	set_pte_at(mm, addr, ptep, __pte(0))
176fe6cb7b0SVineet Gupta #define pte_page(pte)		pfn_to_page(pte_pfn(pte))
177fe6cb7b0SVineet Gupta #define set_pte(ptep, pte)	((*(ptep)) = (pte))
178fe6cb7b0SVineet Gupta #define pte_pfn(pte)		(pte_val(pte) >> PAGE_SHIFT)
179fe6cb7b0SVineet Gupta #define pfn_pte(pfn, prot)	__pte(__pfn_to_phys(pfn) | pgprot_val(prot))
180fe6cb7b0SVineet Gupta #define mk_pte(page, prot)	pfn_pte(page_to_pfn(page), prot)
181fe6cb7b0SVineet Gupta 
182fe6cb7b0SVineet Gupta #ifdef CONFIG_ISA_ARCV2
183fe6cb7b0SVineet Gupta #define pmd_leaf(x)		(pmd_val(x) & _PAGE_HW_SZ)
184fe6cb7b0SVineet Gupta #endif
185fe6cb7b0SVineet Gupta 
186fe6cb7b0SVineet Gupta #endif	/* !__ASSEMBLY__ */
187fe6cb7b0SVineet Gupta 
188fe6cb7b0SVineet Gupta #endif
189