1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef _ASM_POWERPC_PGTABLE_H 3 #define _ASM_POWERPC_PGTABLE_H 4 5 #ifndef __ASSEMBLY__ 6 #include <linux/mmdebug.h> 7 #include <linux/mmzone.h> 8 #include <asm/processor.h> /* For TASK_SIZE */ 9 #include <asm/mmu.h> 10 #include <asm/page.h> 11 #include <asm/tlbflush.h> 12 13 struct mm_struct; 14 15 #endif /* !__ASSEMBLY__ */ 16 17 #ifdef CONFIG_PPC_BOOK3S 18 #include <asm/book3s/pgtable.h> 19 #else 20 #include <asm/nohash/pgtable.h> 21 #endif /* !CONFIG_PPC_BOOK3S */ 22 23 /* Note due to the way vm flags are laid out, the bits are XWR */ 24 #define __P000 PAGE_NONE 25 #define __P001 PAGE_READONLY 26 #define __P010 PAGE_COPY 27 #define __P011 PAGE_COPY 28 #define __P100 PAGE_READONLY_X 29 #define __P101 PAGE_READONLY_X 30 #define __P110 PAGE_COPY_X 31 #define __P111 PAGE_COPY_X 32 33 #define __S000 PAGE_NONE 34 #define __S001 PAGE_READONLY 35 #define __S010 PAGE_SHARED 36 #define __S011 PAGE_SHARED 37 #define __S100 PAGE_READONLY_X 38 #define __S101 PAGE_READONLY_X 39 #define __S110 PAGE_SHARED_X 40 #define __S111 PAGE_SHARED_X 41 42 #ifndef __ASSEMBLY__ 43 44 /* Keep these as a macros to avoid include dependency mess */ 45 #define pte_page(x) pfn_to_page(pte_pfn(x)) 46 #define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot)) 47 /* 48 * Select all bits except the pfn 49 */ 50 static inline pgprot_t pte_pgprot(pte_t pte) 51 { 52 unsigned long pte_flags; 53 54 pte_flags = pte_val(pte) & ~PTE_RPN_MASK; 55 return __pgprot(pte_flags); 56 } 57 58 #ifndef pmd_page_vaddr 59 static inline unsigned long pmd_page_vaddr(pmd_t pmd) 60 { 61 return ((unsigned long)__va(pmd_val(pmd) & ~PMD_MASKED_BITS)); 62 } 63 #define pmd_page_vaddr pmd_page_vaddr 64 #endif 65 /* 66 * ZERO_PAGE is a global shared page that is always zero: used 67 * for zero-mapped memory areas etc.. 68 */ 69 extern unsigned long empty_zero_page[]; 70 #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page)) 71 72 extern pgd_t swapper_pg_dir[]; 73 74 extern void paging_init(void); 75 76 extern unsigned long ioremap_bot; 77 78 /* 79 * kern_addr_valid is intended to indicate whether an address is a valid 80 * kernel address. Most 32-bit archs define it as always true (like this) 81 * but most 64-bit archs actually perform a test. What should we do here? 82 */ 83 #define kern_addr_valid(addr) (1) 84 85 #ifndef CONFIG_TRANSPARENT_HUGEPAGE 86 #define pmd_large(pmd) 0 87 #endif 88 89 /* can we use this in kvm */ 90 unsigned long vmalloc_to_phys(void *vmalloc_addr); 91 92 void pgtable_cache_add(unsigned int shift); 93 94 pte_t *early_pte_alloc_kernel(pmd_t *pmdp, unsigned long va); 95 96 #if defined(CONFIG_STRICT_KERNEL_RWX) || defined(CONFIG_PPC32) 97 void mark_initmem_nx(void); 98 #else 99 static inline void mark_initmem_nx(void) { } 100 #endif 101 102 /* 103 * When used, PTE_FRAG_NR is defined in subarch pgtable.h 104 * so we are sure it is included when arriving here. 105 */ 106 #ifdef PTE_FRAG_NR 107 static inline void *pte_frag_get(mm_context_t *ctx) 108 { 109 return ctx->pte_frag; 110 } 111 112 static inline void pte_frag_set(mm_context_t *ctx, void *p) 113 { 114 ctx->pte_frag = p; 115 } 116 #else 117 #define PTE_FRAG_NR 1 118 #define PTE_FRAG_SIZE_SHIFT PAGE_SHIFT 119 #define PTE_FRAG_SIZE (1UL << PTE_FRAG_SIZE_SHIFT) 120 121 static inline void *pte_frag_get(mm_context_t *ctx) 122 { 123 return NULL; 124 } 125 126 static inline void pte_frag_set(mm_context_t *ctx, void *p) 127 { 128 } 129 #endif 130 131 #ifndef pmd_is_leaf 132 #define pmd_is_leaf pmd_is_leaf 133 static inline bool pmd_is_leaf(pmd_t pmd) 134 { 135 return false; 136 } 137 #endif 138 139 #ifndef pud_is_leaf 140 #define pud_is_leaf pud_is_leaf 141 static inline bool pud_is_leaf(pud_t pud) 142 { 143 return false; 144 } 145 #endif 146 147 #ifndef p4d_is_leaf 148 #define p4d_is_leaf p4d_is_leaf 149 static inline bool p4d_is_leaf(p4d_t p4d) 150 { 151 return false; 152 } 153 #endif 154 155 #define pmd_pgtable pmd_pgtable 156 static inline pgtable_t pmd_pgtable(pmd_t pmd) 157 { 158 return (pgtable_t)pmd_page_vaddr(pmd); 159 } 160 161 #ifdef CONFIG_PPC64 162 #define is_ioremap_addr is_ioremap_addr 163 static inline bool is_ioremap_addr(const void *x) 164 { 165 unsigned long addr = (unsigned long)x; 166 167 return addr >= IOREMAP_BASE && addr < IOREMAP_END; 168 } 169 170 struct seq_file; 171 void arch_report_meminfo(struct seq_file *m); 172 #endif /* CONFIG_PPC64 */ 173 174 #endif /* __ASSEMBLY__ */ 175 176 #endif /* _ASM_POWERPC_PGTABLE_H */ 177