1fe6cb7b0SVineet Gupta /* SPDX-License-Identifier: GPL-2.0-only */ 2fe6cb7b0SVineet Gupta /* 3fe6cb7b0SVineet Gupta * Copyright (C) 2020 Synopsys, Inc. (www.synopsys.com) 4fe6cb7b0SVineet Gupta */ 5fe6cb7b0SVineet Gupta 6fe6cb7b0SVineet Gupta /* 7fe6cb7b0SVineet Gupta * Helpers for implemenintg paging levels 8fe6cb7b0SVineet Gupta */ 9fe6cb7b0SVineet Gupta 10fe6cb7b0SVineet Gupta #ifndef _ASM_ARC_PGTABLE_LEVELS_H 11fe6cb7b0SVineet Gupta #define _ASM_ARC_PGTABLE_LEVELS_H 12fe6cb7b0SVineet Gupta 132dde02abSVineet Gupta #if CONFIG_PGTABLE_LEVELS == 2 142dde02abSVineet Gupta 15fe6cb7b0SVineet Gupta /* 16fe6cb7b0SVineet Gupta * 2 level paging setup for software walked MMUv3 (ARC700) and MMUv4 (HS) 17fe6cb7b0SVineet Gupta * 18fe6cb7b0SVineet Gupta * [31] 32 bit virtual address [0] 19fe6cb7b0SVineet Gupta * ------------------------------------------------------- 20fe6cb7b0SVineet Gupta * | | <---------- PGDIR_SHIFT ----------> | 21fe6cb7b0SVineet Gupta * | | | <-- PAGE_SHIFT --> | 22fe6cb7b0SVineet Gupta * ------------------------------------------------------- 23fe6cb7b0SVineet Gupta * | | | 24fe6cb7b0SVineet Gupta * | | --> off in page frame 25fe6cb7b0SVineet Gupta * | ---> index into Page Table 26fe6cb7b0SVineet Gupta * ----> index into Page Directory 27fe6cb7b0SVineet Gupta * 28fe6cb7b0SVineet Gupta * Given software walk, the vaddr split is arbitrary set to 11:8:13 29fe6cb7b0SVineet Gupta * However enabling of super page in a 2 level regime pegs PGDIR_SHIFT to 30fe6cb7b0SVineet Gupta * super page size. 31fe6cb7b0SVineet Gupta */ 32fe6cb7b0SVineet Gupta 33fe6cb7b0SVineet Gupta #if defined(CONFIG_ARC_HUGEPAGE_16M) 34fe6cb7b0SVineet Gupta #define PGDIR_SHIFT 24 35fe6cb7b0SVineet Gupta #elif defined(CONFIG_ARC_HUGEPAGE_2M) 36fe6cb7b0SVineet Gupta #define PGDIR_SHIFT 21 37fe6cb7b0SVineet Gupta #else 38fe6cb7b0SVineet Gupta /* 39fe6cb7b0SVineet Gupta * No Super page case 40d9820ff7SVineet Gupta * Default value provides 11:8:13 (8K), 10:10:12 (4K) 41d9820ff7SVineet Gupta * Limits imposed by pgtable_t only PAGE_SIZE long 42d9820ff7SVineet Gupta * (so 4K page can only have 1K entries: or 10 bits) 43fe6cb7b0SVineet Gupta */ 44d9820ff7SVineet Gupta #ifdef CONFIG_ARC_PAGE_SIZE_4K 45d9820ff7SVineet Gupta #define PGDIR_SHIFT 22 46d9820ff7SVineet Gupta #else 47fe6cb7b0SVineet Gupta #define PGDIR_SHIFT 21 48d9820ff7SVineet Gupta #endif 49fe6cb7b0SVineet Gupta 50fe6cb7b0SVineet Gupta #endif 51fe6cb7b0SVineet Gupta 522dde02abSVineet Gupta #else /* CONFIG_PGTABLE_LEVELS != 2 */ 53fe6cb7b0SVineet Gupta 542dde02abSVineet Gupta /* 552dde02abSVineet Gupta * A default 3 level paging testing setup in software walked MMU 562dde02abSVineet Gupta * MMUv4 (8K page): <4> : <7> : <8> : <13> 57*8747ff70SVineet Gupta * A default 4 level paging testing setup in software walked MMU 58*8747ff70SVineet Gupta * MMUv4 (8K page): <4> : <3> : <4> : <8> : <13> 592dde02abSVineet Gupta */ 602dde02abSVineet Gupta #define PGDIR_SHIFT 28 61*8747ff70SVineet Gupta #if CONFIG_PGTABLE_LEVELS > 3 62*8747ff70SVineet Gupta #define PUD_SHIFT 25 63*8747ff70SVineet Gupta #endif 642dde02abSVineet Gupta #if CONFIG_PGTABLE_LEVELS > 2 652dde02abSVineet Gupta #define PMD_SHIFT 21 662dde02abSVineet Gupta #endif 672dde02abSVineet Gupta 682dde02abSVineet Gupta #endif /* CONFIG_PGTABLE_LEVELS */ 692dde02abSVineet Gupta 702dde02abSVineet Gupta #define PGDIR_SIZE BIT(PGDIR_SHIFT) 712dde02abSVineet Gupta #define PGDIR_MASK (~(PGDIR_SIZE - 1)) 72fe6cb7b0SVineet Gupta #define PTRS_PER_PGD BIT(32 - PGDIR_SHIFT) 73fe6cb7b0SVineet Gupta 74*8747ff70SVineet Gupta #if CONFIG_PGTABLE_LEVELS > 3 75*8747ff70SVineet Gupta #define PUD_SIZE BIT(PUD_SHIFT) 76*8747ff70SVineet Gupta #define PUD_MASK (~(PUD_SIZE - 1)) 77*8747ff70SVineet Gupta #define PTRS_PER_PUD BIT(PGDIR_SHIFT - PUD_SHIFT) 78*8747ff70SVineet Gupta #endif 79*8747ff70SVineet Gupta 802dde02abSVineet Gupta #if CONFIG_PGTABLE_LEVELS > 2 812dde02abSVineet Gupta #define PMD_SIZE BIT(PMD_SHIFT) 822dde02abSVineet Gupta #define PMD_MASK (~(PMD_SIZE - 1)) 83*8747ff70SVineet Gupta #define PTRS_PER_PMD BIT(PUD_SHIFT - PMD_SHIFT) 842dde02abSVineet Gupta #endif 852dde02abSVineet Gupta 862dde02abSVineet Gupta #define PTRS_PER_PTE BIT(PMD_SHIFT - PAGE_SHIFT) 87fe6cb7b0SVineet Gupta 88fe6cb7b0SVineet Gupta #ifndef __ASSEMBLY__ 89fe6cb7b0SVineet Gupta 90*8747ff70SVineet Gupta #if CONFIG_PGTABLE_LEVELS > 3 91*8747ff70SVineet Gupta #include <asm-generic/pgtable-nop4d.h> 92*8747ff70SVineet Gupta #elif CONFIG_PGTABLE_LEVELS > 2 932dde02abSVineet Gupta #include <asm-generic/pgtable-nopud.h> 942dde02abSVineet Gupta #else 95fe6cb7b0SVineet Gupta #include <asm-generic/pgtable-nopmd.h> 962dde02abSVineet Gupta #endif 97fe6cb7b0SVineet Gupta 98fe6cb7b0SVineet Gupta /* 99fe6cb7b0SVineet Gupta * 1st level paging: pgd 100fe6cb7b0SVineet Gupta */ 101fe6cb7b0SVineet Gupta #define pgd_index(addr) ((addr) >> PGDIR_SHIFT) 102fe6cb7b0SVineet Gupta #define pgd_offset(mm, addr) (((mm)->pgd) + pgd_index(addr)) 103fe6cb7b0SVineet Gupta #define pgd_offset_k(addr) pgd_offset(&init_mm, addr) 104fe6cb7b0SVineet Gupta #define pgd_ERROR(e) \ 105fe6cb7b0SVineet Gupta pr_crit("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e)) 106fe6cb7b0SVineet Gupta 107*8747ff70SVineet Gupta #if CONFIG_PGTABLE_LEVELS > 3 108*8747ff70SVineet Gupta 109*8747ff70SVineet Gupta /* In 4 level paging, p4d_* macros work on pgd */ 110*8747ff70SVineet Gupta #define p4d_none(x) (!p4d_val(x)) 111*8747ff70SVineet Gupta #define p4d_bad(x) ((p4d_val(x) & ~PAGE_MASK)) 112*8747ff70SVineet Gupta #define p4d_present(x) (p4d_val(x)) 113*8747ff70SVineet Gupta #define p4d_clear(xp) do { p4d_val(*(xp)) = 0; } while (0) 114*8747ff70SVineet Gupta #define p4d_pgtable(p4d) ((pud_t *)(p4d_val(p4d) & PAGE_MASK)) 115*8747ff70SVineet Gupta #define p4d_page(p4d) virt_to_page(p4d_pgtable(p4d)) 116*8747ff70SVineet Gupta #define set_p4d(p4dp, p4d) (*(p4dp) = p4d) 117*8747ff70SVineet Gupta 118*8747ff70SVineet Gupta /* 119*8747ff70SVineet Gupta * 2nd level paging: pud 120*8747ff70SVineet Gupta */ 121*8747ff70SVineet Gupta #define pud_ERROR(e) \ 122*8747ff70SVineet Gupta pr_crit("%s:%d: bad pud %08lx.\n", __FILE__, __LINE__, pud_val(e)) 123*8747ff70SVineet Gupta 124*8747ff70SVineet Gupta #endif 125*8747ff70SVineet Gupta 1262dde02abSVineet Gupta #if CONFIG_PGTABLE_LEVELS > 2 1272dde02abSVineet Gupta 128*8747ff70SVineet Gupta /* 129*8747ff70SVineet Gupta * In 3 level paging, pud_* macros work on pgd 130*8747ff70SVineet Gupta * In 4 level paging, pud_* macros work on pud 131*8747ff70SVineet Gupta */ 1322dde02abSVineet Gupta #define pud_none(x) (!pud_val(x)) 1332dde02abSVineet Gupta #define pud_bad(x) ((pud_val(x) & ~PAGE_MASK)) 1342dde02abSVineet Gupta #define pud_present(x) (pud_val(x)) 1352dde02abSVineet Gupta #define pud_clear(xp) do { pud_val(*(xp)) = 0; } while (0) 1362dde02abSVineet Gupta #define pud_pgtable(pud) ((pmd_t *)(pud_val(pud) & PAGE_MASK)) 1372dde02abSVineet Gupta #define pud_page(pud) virt_to_page(pud_pgtable(pud)) 1382dde02abSVineet Gupta #define set_pud(pudp, pud) (*(pudp) = pud) 1392dde02abSVineet Gupta 140fe6cb7b0SVineet Gupta /* 141*8747ff70SVineet Gupta * 3rd level paging: pmd 1422dde02abSVineet Gupta */ 1432dde02abSVineet Gupta #define pmd_ERROR(e) \ 1442dde02abSVineet Gupta pr_crit("%s:%d: bad pmd %08lx.\n", __FILE__, __LINE__, pmd_val(e)) 1452dde02abSVineet Gupta 1462dde02abSVineet Gupta #define pmd_pfn(pmd) ((pmd_val(pmd) & PMD_MASK) >> PAGE_SHIFT) 1472dde02abSVineet Gupta #define pfn_pmd(pfn,prot) __pmd(((pfn) << PAGE_SHIFT) | pgprot_val(prot)) 1482dde02abSVineet Gupta #define mk_pmd(page,prot) pfn_pmd(page_to_pfn(page),prot) 1492dde02abSVineet Gupta 1502dde02abSVineet Gupta #endif 1512dde02abSVineet Gupta 1522dde02abSVineet Gupta /* 1532dde02abSVineet Gupta * Due to the strange way generic pgtable level folding works, the pmd_* macros 1542dde02abSVineet Gupta * - are valid even for 2 levels (which supposedly only has pgd - pte) 1552dde02abSVineet Gupta * - behave differently for 2 vs. 3 1562dde02abSVineet Gupta * In 2 level paging (pgd -> pte), pmd_* macros work on pgd 1572dde02abSVineet Gupta * In 3+ level paging (pgd -> pmd -> pte), pmd_* macros work on pmd 158fe6cb7b0SVineet Gupta */ 159fe6cb7b0SVineet Gupta #define pmd_none(x) (!pmd_val(x)) 160fe6cb7b0SVineet Gupta #define pmd_bad(x) ((pmd_val(x) & ~PAGE_MASK)) 161fe6cb7b0SVineet Gupta #define pmd_present(x) (pmd_val(x)) 162fe6cb7b0SVineet Gupta #define pmd_clear(xp) do { pmd_val(*(xp)) = 0; } while (0) 163fe6cb7b0SVineet Gupta #define pmd_page_vaddr(pmd) (pmd_val(pmd) & PAGE_MASK) 164fe6cb7b0SVineet Gupta #define pmd_page(pmd) virt_to_page(pmd_page_vaddr(pmd)) 165fe6cb7b0SVineet Gupta #define set_pmd(pmdp, pmd) (*(pmdp) = pmd) 166fe6cb7b0SVineet Gupta #define pmd_pgtable(pmd) ((pgtable_t) pmd_page_vaddr(pmd)) 167fe6cb7b0SVineet Gupta 1682dde02abSVineet Gupta /* 169*8747ff70SVineet Gupta * 4th level paging: pte 1702dde02abSVineet Gupta */ 171fe6cb7b0SVineet Gupta #define pte_ERROR(e) \ 172fe6cb7b0SVineet Gupta pr_crit("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e)) 173fe6cb7b0SVineet Gupta 174fe6cb7b0SVineet Gupta #define pte_none(x) (!pte_val(x)) 175fe6cb7b0SVineet Gupta #define pte_present(x) (pte_val(x) & _PAGE_PRESENT) 176fe6cb7b0SVineet Gupta #define pte_clear(mm,addr,ptep) set_pte_at(mm, addr, ptep, __pte(0)) 177fe6cb7b0SVineet Gupta #define pte_page(pte) pfn_to_page(pte_pfn(pte)) 178fe6cb7b0SVineet Gupta #define set_pte(ptep, pte) ((*(ptep)) = (pte)) 179fe6cb7b0SVineet Gupta #define pte_pfn(pte) (pte_val(pte) >> PAGE_SHIFT) 180fe6cb7b0SVineet Gupta #define pfn_pte(pfn, prot) __pte(__pfn_to_phys(pfn) | pgprot_val(prot)) 181fe6cb7b0SVineet Gupta #define mk_pte(page, prot) pfn_pte(page_to_pfn(page), prot) 182fe6cb7b0SVineet Gupta 183fe6cb7b0SVineet Gupta #ifdef CONFIG_ISA_ARCV2 184fe6cb7b0SVineet Gupta #define pmd_leaf(x) (pmd_val(x) & _PAGE_HW_SZ) 185fe6cb7b0SVineet Gupta #endif 186fe6cb7b0SVineet Gupta 187fe6cb7b0SVineet Gupta #endif /* !__ASSEMBLY__ */ 188fe6cb7b0SVineet Gupta 189fe6cb7b0SVineet Gupta #endif 190