xref: /openbmc/linux/arch/powerpc/include/asm/pgtable.h (revision b348b5fe)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_POWERPC_PGTABLE_H
3 #define _ASM_POWERPC_PGTABLE_H
4 
5 #ifndef __ASSEMBLY__
6 #include <linux/mmdebug.h>
7 #include <linux/mmzone.h>
8 #include <asm/processor.h>		/* For TASK_SIZE */
9 #include <asm/mmu.h>
10 #include <asm/page.h>
11 #include <asm/tlbflush.h>
12 
13 struct mm_struct;
14 
15 #endif /* !__ASSEMBLY__ */
16 
17 #ifdef CONFIG_PPC_BOOK3S
18 #include <asm/book3s/pgtable.h>
19 #else
20 #include <asm/nohash/pgtable.h>
21 #endif /* !CONFIG_PPC_BOOK3S */
22 
23 /*
24  * Protection used for kernel text. We want the debuggers to be able to
25  * set breakpoints anywhere, so don't write protect the kernel text
26  * on platforms where such control is possible.
27  */
28 #if defined(CONFIG_KGDB) || defined(CONFIG_XMON) || defined(CONFIG_BDI_SWITCH) || \
29 	defined(CONFIG_KPROBES) || defined(CONFIG_DYNAMIC_FTRACE)
30 #define PAGE_KERNEL_TEXT	PAGE_KERNEL_X
31 #else
32 #define PAGE_KERNEL_TEXT	PAGE_KERNEL_ROX
33 #endif
34 
35 /* Make modules code happy. We don't set RO yet */
36 #define PAGE_KERNEL_EXEC	PAGE_KERNEL_X
37 
38 /* Advertise special mapping type for AGP */
39 #define PAGE_AGP		(PAGE_KERNEL_NC)
40 #define HAVE_PAGE_AGP
41 
42 #ifndef __ASSEMBLY__
43 
44 #ifndef MAX_PTRS_PER_PGD
45 #define MAX_PTRS_PER_PGD PTRS_PER_PGD
46 #endif
47 
48 /* Keep these as a macros to avoid include dependency mess */
49 #define pte_page(x)		pfn_to_page(pte_pfn(x))
50 #define mk_pte(page, pgprot)	pfn_pte(page_to_pfn(page), (pgprot))
51 /*
52  * Select all bits except the pfn
53  */
54 static inline pgprot_t pte_pgprot(pte_t pte)
55 {
56 	unsigned long pte_flags;
57 
58 	pte_flags = pte_val(pte) & ~PTE_RPN_MASK;
59 	return __pgprot(pte_flags);
60 }
61 
62 #ifndef pmd_page_vaddr
63 static inline unsigned long pmd_page_vaddr(pmd_t pmd)
64 {
65 	return ((unsigned long)__va(pmd_val(pmd) & ~PMD_MASKED_BITS));
66 }
67 #define pmd_page_vaddr pmd_page_vaddr
68 #endif
69 /*
70  * ZERO_PAGE is a global shared page that is always zero: used
71  * for zero-mapped memory areas etc..
72  */
73 extern unsigned long empty_zero_page[];
74 #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
75 
76 extern pgd_t swapper_pg_dir[];
77 
78 extern void paging_init(void);
79 void poking_init(void);
80 
81 extern unsigned long ioremap_bot;
82 extern const pgprot_t protection_map[16];
83 
84 #ifndef CONFIG_TRANSPARENT_HUGEPAGE
85 #define pmd_large(pmd)		0
86 #endif
87 
88 /* can we use this in kvm */
89 unsigned long vmalloc_to_phys(void *vmalloc_addr);
90 
91 void pgtable_cache_add(unsigned int shift);
92 
93 pte_t *early_pte_alloc_kernel(pmd_t *pmdp, unsigned long va);
94 
95 #if defined(CONFIG_STRICT_KERNEL_RWX) || defined(CONFIG_PPC32)
96 void mark_initmem_nx(void);
97 #else
98 static inline void mark_initmem_nx(void) { }
99 #endif
100 
101 /*
102  * When used, PTE_FRAG_NR is defined in subarch pgtable.h
103  * so we are sure it is included when arriving here.
104  */
105 #ifdef PTE_FRAG_NR
106 static inline void *pte_frag_get(mm_context_t *ctx)
107 {
108 	return ctx->pte_frag;
109 }
110 
111 static inline void pte_frag_set(mm_context_t *ctx, void *p)
112 {
113 	ctx->pte_frag = p;
114 }
115 #else
116 #define PTE_FRAG_NR		1
117 #define PTE_FRAG_SIZE_SHIFT	PAGE_SHIFT
118 #define PTE_FRAG_SIZE		(1UL << PTE_FRAG_SIZE_SHIFT)
119 
120 static inline void *pte_frag_get(mm_context_t *ctx)
121 {
122 	return NULL;
123 }
124 
125 static inline void pte_frag_set(mm_context_t *ctx, void *p)
126 {
127 }
128 #endif
129 
130 #ifndef pmd_is_leaf
131 #define pmd_is_leaf pmd_is_leaf
132 static inline bool pmd_is_leaf(pmd_t pmd)
133 {
134 	return false;
135 }
136 #endif
137 
138 #ifndef pud_is_leaf
139 #define pud_is_leaf pud_is_leaf
140 static inline bool pud_is_leaf(pud_t pud)
141 {
142 	return false;
143 }
144 #endif
145 
146 #ifndef p4d_is_leaf
147 #define p4d_is_leaf p4d_is_leaf
148 static inline bool p4d_is_leaf(p4d_t p4d)
149 {
150 	return false;
151 }
152 #endif
153 
154 #define pmd_pgtable pmd_pgtable
155 static inline pgtable_t pmd_pgtable(pmd_t pmd)
156 {
157 	return (pgtable_t)pmd_page_vaddr(pmd);
158 }
159 
160 #ifdef CONFIG_PPC64
161 int __meminit vmemmap_populated(unsigned long vmemmap_addr, int vmemmap_map_size);
162 bool altmap_cross_boundary(struct vmem_altmap *altmap, unsigned long start,
163 			   unsigned long page_size);
164 /*
165  * mm/memory_hotplug.c:mhp_supports_memmap_on_memory goes into details
166  * some of the restrictions. We don't check for PMD_SIZE because our
167  * vmemmap allocation code can fallback correctly. The pageblock
168  * alignment requirement is met using altmap->reserve blocks.
169  */
170 #define arch_supports_memmap_on_memory arch_supports_memmap_on_memory
171 static inline bool arch_supports_memmap_on_memory(unsigned long vmemmap_size)
172 {
173 	if (!radix_enabled())
174 		return false;
175 	/*
176 	 * With 4K page size and 2M PMD_SIZE, we can align
177 	 * things better with memory block size value
178 	 * starting from 128MB. Hence align things with PMD_SIZE.
179 	 */
180 	if (IS_ENABLED(CONFIG_PPC_4K_PAGES))
181 		return IS_ALIGNED(vmemmap_size, PMD_SIZE);
182 	return true;
183 }
184 
185 #endif /* CONFIG_PPC64 */
186 
187 #endif /* __ASSEMBLY__ */
188 
189 #endif /* _ASM_POWERPC_PGTABLE_H */
190