1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * S390 version 4 * Copyright IBM Corp. 1999 5 * Author(s): Hartmut Penner (hp@de.ibm.com) 6 * 7 * Derived from "arch/i386/mm/init.c" 8 * Copyright (C) 1995 Linus Torvalds 9 */ 10 11 #include <linux/signal.h> 12 #include <linux/sched.h> 13 #include <linux/kernel.h> 14 #include <linux/errno.h> 15 #include <linux/string.h> 16 #include <linux/types.h> 17 #include <linux/ptrace.h> 18 #include <linux/mman.h> 19 #include <linux/mm.h> 20 #include <linux/swap.h> 21 #include <linux/swiotlb.h> 22 #include <linux/smp.h> 23 #include <linux/init.h> 24 #include <linux/pagemap.h> 25 #include <linux/memblock.h> 26 #include <linux/memory.h> 27 #include <linux/pfn.h> 28 #include <linux/poison.h> 29 #include <linux/initrd.h> 30 #include <linux/export.h> 31 #include <linux/cma.h> 32 #include <linux/gfp.h> 33 #include <linux/dma-direct.h> 34 #include <linux/percpu.h> 35 #include <asm/processor.h> 36 #include <linux/uaccess.h> 37 #include <asm/pgalloc.h> 38 #include <asm/kfence.h> 39 #include <asm/ptdump.h> 40 #include <asm/dma.h> 41 #include <asm/abs_lowcore.h> 42 #include <asm/tlb.h> 43 #include <asm/tlbflush.h> 44 #include <asm/sections.h> 45 #include <asm/ctl_reg.h> 46 #include <asm/sclp.h> 47 #include <asm/set_memory.h> 48 #include <asm/kasan.h> 49 #include <asm/dma-mapping.h> 50 #include <asm/uv.h> 51 #include <linux/virtio_anchor.h> 52 #include <linux/virtio_config.h> 53 54 pgd_t swapper_pg_dir[PTRS_PER_PGD] __section(".bss..swapper_pg_dir"); 55 static pgd_t invalid_pg_dir[PTRS_PER_PGD] __section(".bss..invalid_pg_dir"); 56 57 unsigned long s390_invalid_asce; 58 59 unsigned long empty_zero_page, zero_page_mask; 60 EXPORT_SYMBOL(empty_zero_page); 61 EXPORT_SYMBOL(zero_page_mask); 62 63 static void __init setup_zero_pages(void) 64 { 65 unsigned int order; 66 struct page *page; 67 int i; 68 69 /* Latest machines require a mapping granularity of 512KB */ 70 order = 7; 71 72 /* Limit number of empty zero pages for small memory sizes */ 73 while (order > 2 && (totalram_pages() >> 10) < (1UL << order)) 74 order--; 75 76 empty_zero_page = __get_free_pages(GFP_KERNEL | __GFP_ZERO, order); 77 if (!empty_zero_page) 78 panic("Out of memory in setup_zero_pages"); 79 80 page = virt_to_page((void *) empty_zero_page); 81 split_page(page, order); 82 for (i = 1 << order; i > 0; i--) { 83 mark_page_reserved(page); 84 page++; 85 } 86 87 zero_page_mask = ((PAGE_SIZE << order) - 1) & PAGE_MASK; 88 } 89 90 /* 91 * paging_init() sets up the page tables 92 */ 93 void __init paging_init(void) 94 { 95 unsigned long max_zone_pfns[MAX_NR_ZONES]; 96 unsigned long pgd_type, asce_bits; 97 psw_t psw; 98 99 s390_invalid_asce = (unsigned long)invalid_pg_dir; 100 s390_invalid_asce |= _ASCE_TYPE_REGION3 | _ASCE_TABLE_LENGTH; 101 crst_table_init((unsigned long *)invalid_pg_dir, _REGION3_ENTRY_EMPTY); 102 init_mm.pgd = swapper_pg_dir; 103 if (VMALLOC_END > _REGION2_SIZE) { 104 asce_bits = _ASCE_TYPE_REGION2 | _ASCE_TABLE_LENGTH; 105 pgd_type = _REGION2_ENTRY_EMPTY; 106 } else { 107 asce_bits = _ASCE_TYPE_REGION3 | _ASCE_TABLE_LENGTH; 108 pgd_type = _REGION3_ENTRY_EMPTY; 109 } 110 init_mm.context.asce = (__pa(init_mm.pgd) & PAGE_MASK) | asce_bits; 111 S390_lowcore.kernel_asce = init_mm.context.asce; 112 S390_lowcore.user_asce = s390_invalid_asce; 113 crst_table_init((unsigned long *) init_mm.pgd, pgd_type); 114 vmem_map_init(); 115 kasan_copy_shadow_mapping(); 116 117 /* enable virtual mapping in kernel mode */ 118 __ctl_load(S390_lowcore.kernel_asce, 1, 1); 119 __ctl_load(S390_lowcore.user_asce, 7, 7); 120 __ctl_load(S390_lowcore.kernel_asce, 13, 13); 121 psw.mask = __extract_psw(); 122 psw_bits(psw).dat = 1; 123 psw_bits(psw).as = PSW_BITS_AS_HOME; 124 __load_psw_mask(psw.mask); 125 kasan_free_early_identity(); 126 127 sparse_init(); 128 zone_dma_bits = 31; 129 memset(max_zone_pfns, 0, sizeof(max_zone_pfns)); 130 max_zone_pfns[ZONE_DMA] = PFN_DOWN(MAX_DMA_ADDRESS); 131 max_zone_pfns[ZONE_NORMAL] = max_low_pfn; 132 free_area_init(max_zone_pfns); 133 } 134 135 void mark_rodata_ro(void) 136 { 137 unsigned long size = __end_ro_after_init - __start_ro_after_init; 138 139 set_memory_ro((unsigned long)__start_ro_after_init, size >> PAGE_SHIFT); 140 pr_info("Write protected read-only-after-init data: %luk\n", size >> 10); 141 debug_checkwx(); 142 } 143 144 int set_memory_encrypted(unsigned long vaddr, int numpages) 145 { 146 int i; 147 148 /* make specified pages unshared, (swiotlb, dma_free) */ 149 for (i = 0; i < numpages; ++i) { 150 uv_remove_shared(virt_to_phys((void *)vaddr)); 151 vaddr += PAGE_SIZE; 152 } 153 return 0; 154 } 155 156 int set_memory_decrypted(unsigned long vaddr, int numpages) 157 { 158 int i; 159 /* make specified pages shared (swiotlb, dma_alloca) */ 160 for (i = 0; i < numpages; ++i) { 161 uv_set_shared(virt_to_phys((void *)vaddr)); 162 vaddr += PAGE_SIZE; 163 } 164 return 0; 165 } 166 167 /* are we a protected virtualization guest? */ 168 bool force_dma_unencrypted(struct device *dev) 169 { 170 return is_prot_virt_guest(); 171 } 172 173 /* protected virtualization */ 174 static void pv_init(void) 175 { 176 if (!is_prot_virt_guest()) 177 return; 178 179 virtio_set_mem_acc_cb(virtio_require_restricted_mem_acc); 180 181 /* make sure bounce buffers are shared */ 182 swiotlb_init(true, SWIOTLB_FORCE | SWIOTLB_VERBOSE); 183 swiotlb_update_mem_attributes(); 184 } 185 186 void __init mem_init(void) 187 { 188 cpumask_set_cpu(0, &init_mm.context.cpu_attach_mask); 189 cpumask_set_cpu(0, mm_cpumask(&init_mm)); 190 191 set_max_mapnr(max_low_pfn); 192 high_memory = (void *) __va(max_low_pfn * PAGE_SIZE); 193 194 pv_init(); 195 kfence_split_mapping(); 196 /* Setup guest page hinting */ 197 cmma_init(); 198 199 /* this will put all low memory onto the freelists */ 200 memblock_free_all(); 201 setup_zero_pages(); /* Setup zeroed pages. */ 202 203 cmma_init_nodat(); 204 } 205 206 void free_initmem(void) 207 { 208 __set_memory((unsigned long)_sinittext, 209 (unsigned long)(_einittext - _sinittext) >> PAGE_SHIFT, 210 SET_MEMORY_RW | SET_MEMORY_NX); 211 free_initmem_default(POISON_FREE_INITMEM); 212 } 213 214 unsigned long memory_block_size_bytes(void) 215 { 216 /* 217 * Make sure the memory block size is always greater 218 * or equal than the memory increment size. 219 */ 220 return max_t(unsigned long, MIN_MEMORY_BLOCK_SIZE, sclp.rzm); 221 } 222 223 unsigned long __per_cpu_offset[NR_CPUS] __read_mostly; 224 EXPORT_SYMBOL(__per_cpu_offset); 225 226 static int __init pcpu_cpu_distance(unsigned int from, unsigned int to) 227 { 228 return LOCAL_DISTANCE; 229 } 230 231 static int __init pcpu_cpu_to_node(int cpu) 232 { 233 return 0; 234 } 235 236 void __init setup_per_cpu_areas(void) 237 { 238 unsigned long delta; 239 unsigned int cpu; 240 int rc; 241 242 /* 243 * Always reserve area for module percpu variables. That's 244 * what the legacy allocator did. 245 */ 246 rc = pcpu_embed_first_chunk(PERCPU_MODULE_RESERVE, 247 PERCPU_DYNAMIC_RESERVE, PAGE_SIZE, 248 pcpu_cpu_distance, 249 pcpu_cpu_to_node); 250 if (rc < 0) 251 panic("Failed to initialize percpu areas."); 252 253 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start; 254 for_each_possible_cpu(cpu) 255 __per_cpu_offset[cpu] = delta + pcpu_unit_offsets[cpu]; 256 } 257 258 #ifdef CONFIG_MEMORY_HOTPLUG 259 260 #ifdef CONFIG_CMA 261 262 /* Prevent memory blocks which contain cma regions from going offline */ 263 264 struct s390_cma_mem_data { 265 unsigned long start; 266 unsigned long end; 267 }; 268 269 static int s390_cma_check_range(struct cma *cma, void *data) 270 { 271 struct s390_cma_mem_data *mem_data; 272 unsigned long start, end; 273 274 mem_data = data; 275 start = cma_get_base(cma); 276 end = start + cma_get_size(cma); 277 if (end < mem_data->start) 278 return 0; 279 if (start >= mem_data->end) 280 return 0; 281 return -EBUSY; 282 } 283 284 static int s390_cma_mem_notifier(struct notifier_block *nb, 285 unsigned long action, void *data) 286 { 287 struct s390_cma_mem_data mem_data; 288 struct memory_notify *arg; 289 int rc = 0; 290 291 arg = data; 292 mem_data.start = arg->start_pfn << PAGE_SHIFT; 293 mem_data.end = mem_data.start + (arg->nr_pages << PAGE_SHIFT); 294 if (action == MEM_GOING_OFFLINE) 295 rc = cma_for_each_area(s390_cma_check_range, &mem_data); 296 return notifier_from_errno(rc); 297 } 298 299 static struct notifier_block s390_cma_mem_nb = { 300 .notifier_call = s390_cma_mem_notifier, 301 }; 302 303 static int __init s390_cma_mem_init(void) 304 { 305 return register_memory_notifier(&s390_cma_mem_nb); 306 } 307 device_initcall(s390_cma_mem_init); 308 309 #endif /* CONFIG_CMA */ 310 311 int arch_add_memory(int nid, u64 start, u64 size, 312 struct mhp_params *params) 313 { 314 unsigned long start_pfn = PFN_DOWN(start); 315 unsigned long size_pages = PFN_DOWN(size); 316 int rc; 317 318 if (WARN_ON_ONCE(params->altmap)) 319 return -EINVAL; 320 321 if (WARN_ON_ONCE(params->pgprot.pgprot != PAGE_KERNEL.pgprot)) 322 return -EINVAL; 323 324 VM_BUG_ON(!mhp_range_allowed(start, size, true)); 325 rc = vmem_add_mapping(start, size); 326 if (rc) 327 return rc; 328 329 rc = __add_pages(nid, start_pfn, size_pages, params); 330 if (rc) 331 vmem_remove_mapping(start, size); 332 return rc; 333 } 334 335 void arch_remove_memory(u64 start, u64 size, struct vmem_altmap *altmap) 336 { 337 unsigned long start_pfn = start >> PAGE_SHIFT; 338 unsigned long nr_pages = size >> PAGE_SHIFT; 339 340 __remove_pages(start_pfn, nr_pages, altmap); 341 vmem_remove_mapping(start, size); 342 } 343 #endif /* CONFIG_MEMORY_HOTPLUG */ 344