xref: /openbmc/linux/arch/powerpc/mm/nohash/8xx.c (revision 79e790ff)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * This file contains the routines for initializing the MMU
4  * on the 8xx series of chips.
5  *  -- christophe
6  *
7  *  Derived from arch/powerpc/mm/40x_mmu.c:
8  */
9 
10 #include <linux/memblock.h>
11 #include <linux/mmu_context.h>
12 #include <linux/hugetlb.h>
13 #include <asm/fixmap.h>
14 #include <asm/code-patching.h>
15 #include <asm/inst.h>
16 
17 #include <mm/mmu_decl.h>
18 
19 #define IMMR_SIZE (FIX_IMMR_SIZE << PAGE_SHIFT)
20 
21 extern int __map_without_ltlbs;
22 
23 static unsigned long block_mapped_ram;
24 
25 /*
26  * Return PA for this VA if it is in an area mapped with LTLBs or fixmap.
27  * Otherwise, returns 0
28  */
29 phys_addr_t v_block_mapped(unsigned long va)
30 {
31 	unsigned long p = PHYS_IMMR_BASE;
32 
33 	if (va >= VIRT_IMMR_BASE && va < VIRT_IMMR_BASE + IMMR_SIZE)
34 		return p + va - VIRT_IMMR_BASE;
35 	if (__map_without_ltlbs)
36 		return 0;
37 	if (va >= PAGE_OFFSET && va < PAGE_OFFSET + block_mapped_ram)
38 		return __pa(va);
39 	return 0;
40 }
41 
42 /*
43  * Return VA for a given PA mapped with LTLBs or fixmap
44  * Return 0 if not mapped
45  */
46 unsigned long p_block_mapped(phys_addr_t pa)
47 {
48 	unsigned long p = PHYS_IMMR_BASE;
49 
50 	if (pa >= p && pa < p + IMMR_SIZE)
51 		return VIRT_IMMR_BASE + pa - p;
52 	if (__map_without_ltlbs)
53 		return 0;
54 	if (pa < block_mapped_ram)
55 		return (unsigned long)__va(pa);
56 	return 0;
57 }
58 
59 static pte_t __init *early_hugepd_alloc_kernel(hugepd_t *pmdp, unsigned long va)
60 {
61 	if (hpd_val(*pmdp) == 0) {
62 		pte_t *ptep = memblock_alloc(sizeof(pte_basic_t), SZ_4K);
63 
64 		if (!ptep)
65 			return NULL;
66 
67 		hugepd_populate_kernel((hugepd_t *)pmdp, ptep, PAGE_SHIFT_8M);
68 		hugepd_populate_kernel((hugepd_t *)pmdp + 1, ptep, PAGE_SHIFT_8M);
69 	}
70 	return hugepte_offset(*(hugepd_t *)pmdp, va, PGDIR_SHIFT);
71 }
72 
73 static int __ref __early_map_kernel_hugepage(unsigned long va, phys_addr_t pa,
74 					     pgprot_t prot, int psize, bool new)
75 {
76 	pmd_t *pmdp = pmd_off_k(va);
77 	pte_t *ptep;
78 
79 	if (WARN_ON(psize != MMU_PAGE_512K && psize != MMU_PAGE_8M))
80 		return -EINVAL;
81 
82 	if (new) {
83 		if (WARN_ON(slab_is_available()))
84 			return -EINVAL;
85 
86 		if (psize == MMU_PAGE_512K)
87 			ptep = early_pte_alloc_kernel(pmdp, va);
88 		else
89 			ptep = early_hugepd_alloc_kernel((hugepd_t *)pmdp, va);
90 	} else {
91 		if (psize == MMU_PAGE_512K)
92 			ptep = pte_offset_kernel(pmdp, va);
93 		else
94 			ptep = hugepte_offset(*(hugepd_t *)pmdp, va, PGDIR_SHIFT);
95 	}
96 
97 	if (WARN_ON(!ptep))
98 		return -ENOMEM;
99 
100 	/* The PTE should never be already present */
101 	if (new && WARN_ON(pte_present(*ptep) && pgprot_val(prot)))
102 		return -EINVAL;
103 
104 	set_huge_pte_at(&init_mm, va, ptep, pte_mkhuge(pfn_pte(pa >> PAGE_SHIFT, prot)));
105 
106 	return 0;
107 }
108 
109 /*
110  * MMU_init_hw does the chip-specific initialization of the MMU hardware.
111  */
112 void __init MMU_init_hw(void)
113 {
114 }
115 
116 static bool immr_is_mapped __initdata;
117 
118 void __init mmu_mapin_immr(void)
119 {
120 	if (immr_is_mapped)
121 		return;
122 
123 	immr_is_mapped = true;
124 
125 	__early_map_kernel_hugepage(VIRT_IMMR_BASE, PHYS_IMMR_BASE,
126 				    PAGE_KERNEL_NCG, MMU_PAGE_512K, true);
127 }
128 
129 static void mmu_mapin_ram_chunk(unsigned long offset, unsigned long top,
130 				pgprot_t prot, bool new)
131 {
132 	unsigned long v = PAGE_OFFSET + offset;
133 	unsigned long p = offset;
134 
135 	WARN_ON(!IS_ALIGNED(offset, SZ_512K) || !IS_ALIGNED(top, SZ_512K));
136 
137 	for (; p < ALIGN(p, SZ_8M) && p < top; p += SZ_512K, v += SZ_512K)
138 		__early_map_kernel_hugepage(v, p, prot, MMU_PAGE_512K, new);
139 	for (; p < ALIGN_DOWN(top, SZ_8M) && p < top; p += SZ_8M, v += SZ_8M)
140 		__early_map_kernel_hugepage(v, p, prot, MMU_PAGE_8M, new);
141 	for (; p < ALIGN_DOWN(top, SZ_512K) && p < top; p += SZ_512K, v += SZ_512K)
142 		__early_map_kernel_hugepage(v, p, prot, MMU_PAGE_512K, new);
143 
144 	if (!new)
145 		flush_tlb_kernel_range(PAGE_OFFSET + v, PAGE_OFFSET + top);
146 }
147 
148 unsigned long __init mmu_mapin_ram(unsigned long base, unsigned long top)
149 {
150 	unsigned long etext8 = ALIGN(__pa(_etext), SZ_8M);
151 	unsigned long sinittext = __pa(_sinittext);
152 	bool strict_boundary = strict_kernel_rwx_enabled() || debug_pagealloc_enabled_or_kfence();
153 	unsigned long boundary = strict_boundary ? sinittext : etext8;
154 	unsigned long einittext8 = ALIGN(__pa(_einittext), SZ_8M);
155 
156 	WARN_ON(top < einittext8);
157 
158 	mmu_mapin_immr();
159 
160 	if (__map_without_ltlbs)
161 		return 0;
162 
163 	mmu_mapin_ram_chunk(0, boundary, PAGE_KERNEL_TEXT, true);
164 	if (debug_pagealloc_enabled_or_kfence()) {
165 		top = boundary;
166 	} else {
167 		mmu_mapin_ram_chunk(boundary, einittext8, PAGE_KERNEL_TEXT, true);
168 		mmu_mapin_ram_chunk(einittext8, top, PAGE_KERNEL, true);
169 	}
170 
171 	if (top > SZ_32M)
172 		memblock_set_current_limit(top);
173 
174 	block_mapped_ram = top;
175 
176 	return top;
177 }
178 
179 void mmu_mark_initmem_nx(void)
180 {
181 	unsigned long etext8 = ALIGN(__pa(_etext), SZ_8M);
182 	unsigned long sinittext = __pa(_sinittext);
183 	unsigned long boundary = strict_kernel_rwx_enabled() ? sinittext : etext8;
184 	unsigned long einittext8 = ALIGN(__pa(_einittext), SZ_8M);
185 
186 	mmu_mapin_ram_chunk(0, boundary, PAGE_KERNEL_TEXT, false);
187 	mmu_mapin_ram_chunk(boundary, einittext8, PAGE_KERNEL, false);
188 
189 	mmu_pin_tlb(block_mapped_ram, false);
190 }
191 
192 #ifdef CONFIG_STRICT_KERNEL_RWX
193 void mmu_mark_rodata_ro(void)
194 {
195 	unsigned long sinittext = __pa(_sinittext);
196 
197 	mmu_mapin_ram_chunk(0, sinittext, PAGE_KERNEL_ROX, false);
198 	if (IS_ENABLED(CONFIG_PIN_TLB_DATA))
199 		mmu_pin_tlb(block_mapped_ram, true);
200 }
201 #endif
202 
203 void __init setup_initial_memory_limit(phys_addr_t first_memblock_base,
204 				       phys_addr_t first_memblock_size)
205 {
206 	/* We don't currently support the first MEMBLOCK not mapping 0
207 	 * physical on those processors
208 	 */
209 	BUG_ON(first_memblock_base != 0);
210 
211 	/* 8xx can only access 32MB at the moment */
212 	memblock_set_current_limit(min_t(u64, first_memblock_size, SZ_32M));
213 }
214 
215 /*
216  * Set up to use a given MMU context.
217  * id is context number, pgd is PGD pointer.
218  *
219  * We place the physical address of the new task page directory loaded
220  * into the MMU base register, and set the ASID compare register with
221  * the new "context."
222  */
223 void set_context(unsigned long id, pgd_t *pgd)
224 {
225 	s16 offset = (s16)(__pa(swapper_pg_dir));
226 
227 	/* Context switch the PTE pointer for the Abatron BDI2000.
228 	 * The PGDIR is passed as second argument.
229 	 */
230 	if (IS_ENABLED(CONFIG_BDI_SWITCH))
231 		abatron_pteptrs[1] = pgd;
232 
233 	/* Register M_TWB will contain base address of level 1 table minus the
234 	 * lower part of the kernel PGDIR base address, so that all accesses to
235 	 * level 1 table are done relative to lower part of kernel PGDIR base
236 	 * address.
237 	 */
238 	mtspr(SPRN_M_TWB, __pa(pgd) - offset);
239 
240 	/* Update context */
241 	mtspr(SPRN_M_CASID, id - 1);
242 	/* sync */
243 	mb();
244 }
245 
246 #ifdef CONFIG_PPC_KUEP
247 void __init setup_kuep(bool disabled)
248 {
249 	if (disabled)
250 		return;
251 
252 	pr_info("Activating Kernel Userspace Execution Prevention\n");
253 
254 	mtspr(SPRN_MI_AP, MI_APG_KUEP);
255 }
256 #endif
257 
258 #ifdef CONFIG_PPC_KUAP
259 void __init setup_kuap(bool disabled)
260 {
261 	pr_info("Activating Kernel Userspace Access Protection\n");
262 
263 	if (disabled)
264 		pr_warn("KUAP cannot be disabled yet on 8xx when compiled in\n");
265 
266 	mtspr(SPRN_MD_AP, MD_APG_KUAP);
267 }
268 #endif
269