xref: /openbmc/linux/arch/s390/mm/init.c (revision d9565bf4)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *  S390 version
4  *    Copyright IBM Corp. 1999
5  *    Author(s): Hartmut Penner (hp@de.ibm.com)
6  *
7  *  Derived from "arch/i386/mm/init.c"
8  *    Copyright (C) 1995  Linus Torvalds
9  */
10 
11 #include <linux/signal.h>
12 #include <linux/sched.h>
13 #include <linux/kernel.h>
14 #include <linux/errno.h>
15 #include <linux/string.h>
16 #include <linux/types.h>
17 #include <linux/ptrace.h>
18 #include <linux/mman.h>
19 #include <linux/mm.h>
20 #include <linux/swap.h>
21 #include <linux/swiotlb.h>
22 #include <linux/smp.h>
23 #include <linux/init.h>
24 #include <linux/pagemap.h>
25 #include <linux/memblock.h>
26 #include <linux/memory.h>
27 #include <linux/pfn.h>
28 #include <linux/poison.h>
29 #include <linux/initrd.h>
30 #include <linux/export.h>
31 #include <linux/cma.h>
32 #include <linux/gfp.h>
33 #include <linux/dma-direct.h>
34 #include <asm/processor.h>
35 #include <linux/uaccess.h>
36 #include <asm/pgalloc.h>
37 #include <asm/kfence.h>
38 #include <asm/ptdump.h>
39 #include <asm/dma.h>
40 #include <asm/lowcore.h>
41 #include <asm/tlb.h>
42 #include <asm/tlbflush.h>
43 #include <asm/sections.h>
44 #include <asm/ctl_reg.h>
45 #include <asm/sclp.h>
46 #include <asm/set_memory.h>
47 #include <asm/kasan.h>
48 #include <asm/dma-mapping.h>
49 #include <asm/uv.h>
50 #include <linux/virtio_config.h>
51 
52 pgd_t swapper_pg_dir[PTRS_PER_PGD] __section(".bss..swapper_pg_dir");
53 static pgd_t invalid_pg_dir[PTRS_PER_PGD] __section(".bss..invalid_pg_dir");
54 
55 unsigned long s390_invalid_asce;
56 
57 unsigned long empty_zero_page, zero_page_mask;
58 EXPORT_SYMBOL(empty_zero_page);
59 EXPORT_SYMBOL(zero_page_mask);
60 
61 static void __init setup_zero_pages(void)
62 {
63 	unsigned int order;
64 	struct page *page;
65 	int i;
66 
67 	/* Latest machines require a mapping granularity of 512KB */
68 	order = 7;
69 
70 	/* Limit number of empty zero pages for small memory sizes */
71 	while (order > 2 && (totalram_pages() >> 10) < (1UL << order))
72 		order--;
73 
74 	empty_zero_page = __get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
75 	if (!empty_zero_page)
76 		panic("Out of memory in setup_zero_pages");
77 
78 	page = virt_to_page((void *) empty_zero_page);
79 	split_page(page, order);
80 	for (i = 1 << order; i > 0; i--) {
81 		mark_page_reserved(page);
82 		page++;
83 	}
84 
85 	zero_page_mask = ((PAGE_SIZE << order) - 1) & PAGE_MASK;
86 }
87 
88 /*
89  * paging_init() sets up the page tables
90  */
91 void __init paging_init(void)
92 {
93 	unsigned long max_zone_pfns[MAX_NR_ZONES];
94 	unsigned long pgd_type, asce_bits;
95 	psw_t psw;
96 
97 	s390_invalid_asce  = (unsigned long)invalid_pg_dir;
98 	s390_invalid_asce |= _ASCE_TYPE_REGION3 | _ASCE_TABLE_LENGTH;
99 	crst_table_init((unsigned long *)invalid_pg_dir, _REGION3_ENTRY_EMPTY);
100 	init_mm.pgd = swapper_pg_dir;
101 	if (VMALLOC_END > _REGION2_SIZE) {
102 		asce_bits = _ASCE_TYPE_REGION2 | _ASCE_TABLE_LENGTH;
103 		pgd_type = _REGION2_ENTRY_EMPTY;
104 	} else {
105 		asce_bits = _ASCE_TYPE_REGION3 | _ASCE_TABLE_LENGTH;
106 		pgd_type = _REGION3_ENTRY_EMPTY;
107 	}
108 	init_mm.context.asce = (__pa(init_mm.pgd) & PAGE_MASK) | asce_bits;
109 	S390_lowcore.kernel_asce = init_mm.context.asce;
110 	S390_lowcore.user_asce = s390_invalid_asce;
111 	crst_table_init((unsigned long *) init_mm.pgd, pgd_type);
112 	vmem_map_init();
113 	kasan_copy_shadow_mapping();
114 
115 	/* enable virtual mapping in kernel mode */
116 	__ctl_load(S390_lowcore.kernel_asce, 1, 1);
117 	__ctl_load(S390_lowcore.user_asce, 7, 7);
118 	__ctl_load(S390_lowcore.kernel_asce, 13, 13);
119 	psw.mask = __extract_psw();
120 	psw_bits(psw).dat = 1;
121 	psw_bits(psw).as = PSW_BITS_AS_HOME;
122 	__load_psw_mask(psw.mask);
123 	kasan_free_early_identity();
124 
125 	sparse_init();
126 	zone_dma_bits = 31;
127 	memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
128 	max_zone_pfns[ZONE_DMA] = PFN_DOWN(MAX_DMA_ADDRESS);
129 	max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
130 	free_area_init(max_zone_pfns);
131 }
132 
133 void mark_rodata_ro(void)
134 {
135 	unsigned long size = __end_ro_after_init - __start_ro_after_init;
136 
137 	set_memory_ro((unsigned long)__start_ro_after_init, size >> PAGE_SHIFT);
138 	pr_info("Write protected read-only-after-init data: %luk\n", size >> 10);
139 	debug_checkwx();
140 }
141 
142 int set_memory_encrypted(unsigned long addr, int numpages)
143 {
144 	int i;
145 
146 	/* make specified pages unshared, (swiotlb, dma_free) */
147 	for (i = 0; i < numpages; ++i) {
148 		uv_remove_shared(addr);
149 		addr += PAGE_SIZE;
150 	}
151 	return 0;
152 }
153 
154 int set_memory_decrypted(unsigned long addr, int numpages)
155 {
156 	int i;
157 	/* make specified pages shared (swiotlb, dma_alloca) */
158 	for (i = 0; i < numpages; ++i) {
159 		uv_set_shared(addr);
160 		addr += PAGE_SIZE;
161 	}
162 	return 0;
163 }
164 
165 /* are we a protected virtualization guest? */
166 bool force_dma_unencrypted(struct device *dev)
167 {
168 	return is_prot_virt_guest();
169 }
170 
171 #ifdef CONFIG_ARCH_HAS_RESTRICTED_VIRTIO_MEMORY_ACCESS
172 
173 int arch_has_restricted_virtio_memory_access(void)
174 {
175 	return is_prot_virt_guest();
176 }
177 EXPORT_SYMBOL(arch_has_restricted_virtio_memory_access);
178 
179 #endif
180 
181 /* protected virtualization */
182 static void pv_init(void)
183 {
184 	if (!is_prot_virt_guest())
185 		return;
186 
187 	/* make sure bounce buffers are shared */
188 	swiotlb_force = SWIOTLB_FORCE;
189 	swiotlb_init(1);
190 	swiotlb_update_mem_attributes();
191 }
192 
193 void __init mem_init(void)
194 {
195 	cpumask_set_cpu(0, &init_mm.context.cpu_attach_mask);
196 	cpumask_set_cpu(0, mm_cpumask(&init_mm));
197 
198 	set_max_mapnr(max_low_pfn);
199         high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
200 
201 	pv_init();
202 	kfence_split_mapping();
203 	/* Setup guest page hinting */
204 	cmma_init();
205 
206 	/* this will put all low memory onto the freelists */
207 	memblock_free_all();
208 	setup_zero_pages();	/* Setup zeroed pages. */
209 
210 	cmma_init_nodat();
211 }
212 
213 void free_initmem(void)
214 {
215 	__set_memory((unsigned long)_sinittext,
216 		     (unsigned long)(_einittext - _sinittext) >> PAGE_SHIFT,
217 		     SET_MEMORY_RW | SET_MEMORY_NX);
218 	free_reserved_area(sclp_early_sccb,
219 			   sclp_early_sccb + EXT_SCCB_READ_SCP,
220 			   POISON_FREE_INITMEM, "unused early sccb");
221 	free_initmem_default(POISON_FREE_INITMEM);
222 }
223 
224 unsigned long memory_block_size_bytes(void)
225 {
226 	/*
227 	 * Make sure the memory block size is always greater
228 	 * or equal than the memory increment size.
229 	 */
230 	return max_t(unsigned long, MIN_MEMORY_BLOCK_SIZE, sclp.rzm);
231 }
232 
233 #ifdef CONFIG_MEMORY_HOTPLUG
234 
235 #ifdef CONFIG_CMA
236 
237 /* Prevent memory blocks which contain cma regions from going offline */
238 
239 struct s390_cma_mem_data {
240 	unsigned long start;
241 	unsigned long end;
242 };
243 
244 static int s390_cma_check_range(struct cma *cma, void *data)
245 {
246 	struct s390_cma_mem_data *mem_data;
247 	unsigned long start, end;
248 
249 	mem_data = data;
250 	start = cma_get_base(cma);
251 	end = start + cma_get_size(cma);
252 	if (end < mem_data->start)
253 		return 0;
254 	if (start >= mem_data->end)
255 		return 0;
256 	return -EBUSY;
257 }
258 
259 static int s390_cma_mem_notifier(struct notifier_block *nb,
260 				 unsigned long action, void *data)
261 {
262 	struct s390_cma_mem_data mem_data;
263 	struct memory_notify *arg;
264 	int rc = 0;
265 
266 	arg = data;
267 	mem_data.start = arg->start_pfn << PAGE_SHIFT;
268 	mem_data.end = mem_data.start + (arg->nr_pages << PAGE_SHIFT);
269 	if (action == MEM_GOING_OFFLINE)
270 		rc = cma_for_each_area(s390_cma_check_range, &mem_data);
271 	return notifier_from_errno(rc);
272 }
273 
274 static struct notifier_block s390_cma_mem_nb = {
275 	.notifier_call = s390_cma_mem_notifier,
276 };
277 
278 static int __init s390_cma_mem_init(void)
279 {
280 	return register_memory_notifier(&s390_cma_mem_nb);
281 }
282 device_initcall(s390_cma_mem_init);
283 
284 #endif /* CONFIG_CMA */
285 
286 int arch_add_memory(int nid, u64 start, u64 size,
287 		    struct mhp_params *params)
288 {
289 	unsigned long start_pfn = PFN_DOWN(start);
290 	unsigned long size_pages = PFN_DOWN(size);
291 	int rc;
292 
293 	if (WARN_ON_ONCE(params->altmap))
294 		return -EINVAL;
295 
296 	if (WARN_ON_ONCE(params->pgprot.pgprot != PAGE_KERNEL.pgprot))
297 		return -EINVAL;
298 
299 	VM_BUG_ON(!mhp_range_allowed(start, size, true));
300 	rc = vmem_add_mapping(start, size);
301 	if (rc)
302 		return rc;
303 
304 	rc = __add_pages(nid, start_pfn, size_pages, params);
305 	if (rc)
306 		vmem_remove_mapping(start, size);
307 	return rc;
308 }
309 
310 void arch_remove_memory(u64 start, u64 size, struct vmem_altmap *altmap)
311 {
312 	unsigned long start_pfn = start >> PAGE_SHIFT;
313 	unsigned long nr_pages = size >> PAGE_SHIFT;
314 
315 	__remove_pages(start_pfn, nr_pages, altmap);
316 	vmem_remove_mapping(start, size);
317 }
318 #endif /* CONFIG_MEMORY_HOTPLUG */
319