xref: /openbmc/linux/arch/powerpc/mm/nohash/8xx.c (revision 80d0624d)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * This file contains the routines for initializing the MMU
4  * on the 8xx series of chips.
5  *  -- christophe
6  *
7  *  Derived from arch/powerpc/mm/40x_mmu.c:
8  */
9 
10 #include <linux/memblock.h>
11 #include <linux/hugetlb.h>
12 
13 #include <mm/mmu_decl.h>
14 
15 #define IMMR_SIZE (FIX_IMMR_SIZE << PAGE_SHIFT)
16 
17 static unsigned long block_mapped_ram;
18 
19 /*
20  * Return PA for this VA if it is in an area mapped with LTLBs or fixmap.
21  * Otherwise, returns 0
22  */
23 phys_addr_t v_block_mapped(unsigned long va)
24 {
25 	unsigned long p = PHYS_IMMR_BASE;
26 
27 	if (va >= VIRT_IMMR_BASE && va < VIRT_IMMR_BASE + IMMR_SIZE)
28 		return p + va - VIRT_IMMR_BASE;
29 	if (va >= PAGE_OFFSET && va < PAGE_OFFSET + block_mapped_ram)
30 		return __pa(va);
31 	return 0;
32 }
33 
34 /*
35  * Return VA for a given PA mapped with LTLBs or fixmap
36  * Return 0 if not mapped
37  */
38 unsigned long p_block_mapped(phys_addr_t pa)
39 {
40 	unsigned long p = PHYS_IMMR_BASE;
41 
42 	if (pa >= p && pa < p + IMMR_SIZE)
43 		return VIRT_IMMR_BASE + pa - p;
44 	if (pa < block_mapped_ram)
45 		return (unsigned long)__va(pa);
46 	return 0;
47 }
48 
49 static pte_t __init *early_hugepd_alloc_kernel(hugepd_t *pmdp, unsigned long va)
50 {
51 	if (hpd_val(*pmdp) == 0) {
52 		pte_t *ptep = memblock_alloc(sizeof(pte_basic_t), SZ_4K);
53 
54 		if (!ptep)
55 			return NULL;
56 
57 		hugepd_populate_kernel((hugepd_t *)pmdp, ptep, PAGE_SHIFT_8M);
58 		hugepd_populate_kernel((hugepd_t *)pmdp + 1, ptep, PAGE_SHIFT_8M);
59 	}
60 	return hugepte_offset(*(hugepd_t *)pmdp, va, PGDIR_SHIFT);
61 }
62 
63 static int __ref __early_map_kernel_hugepage(unsigned long va, phys_addr_t pa,
64 					     pgprot_t prot, int psize, bool new)
65 {
66 	pmd_t *pmdp = pmd_off_k(va);
67 	pte_t *ptep;
68 
69 	if (WARN_ON(psize != MMU_PAGE_512K && psize != MMU_PAGE_8M))
70 		return -EINVAL;
71 
72 	if (new) {
73 		if (WARN_ON(slab_is_available()))
74 			return -EINVAL;
75 
76 		if (psize == MMU_PAGE_512K)
77 			ptep = early_pte_alloc_kernel(pmdp, va);
78 		else
79 			ptep = early_hugepd_alloc_kernel((hugepd_t *)pmdp, va);
80 	} else {
81 		if (psize == MMU_PAGE_512K)
82 			ptep = pte_offset_kernel(pmdp, va);
83 		else
84 			ptep = hugepte_offset(*(hugepd_t *)pmdp, va, PGDIR_SHIFT);
85 	}
86 
87 	if (WARN_ON(!ptep))
88 		return -ENOMEM;
89 
90 	/* The PTE should never be already present */
91 	if (new && WARN_ON(pte_present(*ptep) && pgprot_val(prot)))
92 		return -EINVAL;
93 
94 	set_huge_pte_at(&init_mm, va, ptep,
95 			pte_mkhuge(pfn_pte(pa >> PAGE_SHIFT, prot)),
96 			1UL << mmu_psize_to_shift(psize));
97 
98 	return 0;
99 }
100 
101 /*
102  * MMU_init_hw does the chip-specific initialization of the MMU hardware.
103  */
104 void __init MMU_init_hw(void)
105 {
106 }
107 
108 static bool immr_is_mapped __initdata;
109 
110 void __init mmu_mapin_immr(void)
111 {
112 	if (immr_is_mapped)
113 		return;
114 
115 	immr_is_mapped = true;
116 
117 	__early_map_kernel_hugepage(VIRT_IMMR_BASE, PHYS_IMMR_BASE,
118 				    PAGE_KERNEL_NCG, MMU_PAGE_512K, true);
119 }
120 
121 static void mmu_mapin_ram_chunk(unsigned long offset, unsigned long top,
122 				pgprot_t prot, bool new)
123 {
124 	unsigned long v = PAGE_OFFSET + offset;
125 	unsigned long p = offset;
126 
127 	WARN_ON(!IS_ALIGNED(offset, SZ_512K) || !IS_ALIGNED(top, SZ_512K));
128 
129 	for (; p < ALIGN(p, SZ_8M) && p < top; p += SZ_512K, v += SZ_512K)
130 		__early_map_kernel_hugepage(v, p, prot, MMU_PAGE_512K, new);
131 	for (; p < ALIGN_DOWN(top, SZ_8M) && p < top; p += SZ_8M, v += SZ_8M)
132 		__early_map_kernel_hugepage(v, p, prot, MMU_PAGE_8M, new);
133 	for (; p < ALIGN_DOWN(top, SZ_512K) && p < top; p += SZ_512K, v += SZ_512K)
134 		__early_map_kernel_hugepage(v, p, prot, MMU_PAGE_512K, new);
135 
136 	if (!new)
137 		flush_tlb_kernel_range(PAGE_OFFSET + v, PAGE_OFFSET + top);
138 }
139 
140 unsigned long __init mmu_mapin_ram(unsigned long base, unsigned long top)
141 {
142 	unsigned long etext8 = ALIGN(__pa(_etext), SZ_8M);
143 	unsigned long sinittext = __pa(_sinittext);
144 	bool strict_boundary = strict_kernel_rwx_enabled() || debug_pagealloc_enabled_or_kfence();
145 	unsigned long boundary = strict_boundary ? sinittext : etext8;
146 	unsigned long einittext8 = ALIGN(__pa(_einittext), SZ_8M);
147 
148 	WARN_ON(top < einittext8);
149 
150 	mmu_mapin_immr();
151 
152 	mmu_mapin_ram_chunk(0, boundary, PAGE_KERNEL_TEXT, true);
153 	if (debug_pagealloc_enabled_or_kfence()) {
154 		top = boundary;
155 	} else {
156 		mmu_mapin_ram_chunk(boundary, einittext8, PAGE_KERNEL_TEXT, true);
157 		mmu_mapin_ram_chunk(einittext8, top, PAGE_KERNEL, true);
158 	}
159 
160 	if (top > SZ_32M)
161 		memblock_set_current_limit(top);
162 
163 	block_mapped_ram = top;
164 
165 	return top;
166 }
167 
168 void mmu_mark_initmem_nx(void)
169 {
170 	unsigned long etext8 = ALIGN(__pa(_etext), SZ_8M);
171 	unsigned long sinittext = __pa(_sinittext);
172 	unsigned long boundary = strict_kernel_rwx_enabled() ? sinittext : etext8;
173 	unsigned long einittext8 = ALIGN(__pa(_einittext), SZ_8M);
174 
175 	if (!debug_pagealloc_enabled_or_kfence())
176 		mmu_mapin_ram_chunk(boundary, einittext8, PAGE_KERNEL, false);
177 
178 	mmu_pin_tlb(block_mapped_ram, false);
179 }
180 
181 #ifdef CONFIG_STRICT_KERNEL_RWX
182 void mmu_mark_rodata_ro(void)
183 {
184 	unsigned long sinittext = __pa(_sinittext);
185 
186 	mmu_mapin_ram_chunk(0, sinittext, PAGE_KERNEL_ROX, false);
187 	if (IS_ENABLED(CONFIG_PIN_TLB_DATA))
188 		mmu_pin_tlb(block_mapped_ram, true);
189 }
190 #endif
191 
192 void __init setup_initial_memory_limit(phys_addr_t first_memblock_base,
193 				       phys_addr_t first_memblock_size)
194 {
195 	/* We don't currently support the first MEMBLOCK not mapping 0
196 	 * physical on those processors
197 	 */
198 	BUG_ON(first_memblock_base != 0);
199 
200 	/* 8xx can only access 32MB at the moment */
201 	memblock_set_current_limit(min_t(u64, first_memblock_size, SZ_32M));
202 }
203 
204 int pud_clear_huge(pud_t *pud)
205 {
206 	 return 0;
207 }
208 
209 int pmd_clear_huge(pmd_t *pmd)
210 {
211 	 return 0;
212 }
213