xref: /openbmc/linux/arch/arm64/mm/mmu.c (revision 93431e06)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Based on arch/arm/mm/mmu.c
4  *
5  * Copyright (C) 1995-2005 Russell King
6  * Copyright (C) 2012 ARM Ltd.
7  */
8 
9 #include <linux/cache.h>
10 #include <linux/export.h>
11 #include <linux/kernel.h>
12 #include <linux/errno.h>
13 #include <linux/init.h>
14 #include <linux/ioport.h>
15 #include <linux/kexec.h>
16 #include <linux/libfdt.h>
17 #include <linux/mman.h>
18 #include <linux/nodemask.h>
19 #include <linux/memblock.h>
20 #include <linux/memory.h>
21 #include <linux/fs.h>
22 #include <linux/io.h>
23 #include <linux/mm.h>
24 #include <linux/vmalloc.h>
25 
26 #include <asm/barrier.h>
27 #include <asm/cputype.h>
28 #include <asm/fixmap.h>
29 #include <asm/kasan.h>
30 #include <asm/kernel-pgtable.h>
31 #include <asm/sections.h>
32 #include <asm/setup.h>
33 #include <linux/sizes.h>
34 #include <asm/tlb.h>
35 #include <asm/mmu_context.h>
36 #include <asm/ptdump.h>
37 #include <asm/tlbflush.h>
38 
39 #define NO_BLOCK_MAPPINGS	BIT(0)
40 #define NO_CONT_MAPPINGS	BIT(1)
41 
42 u64 idmap_t0sz = TCR_T0SZ(VA_BITS);
43 u64 idmap_ptrs_per_pgd = PTRS_PER_PGD;
44 
45 u64 __section(".mmuoff.data.write") vabits_actual;
46 EXPORT_SYMBOL(vabits_actual);
47 
48 u64 kimage_voffset __ro_after_init;
49 EXPORT_SYMBOL(kimage_voffset);
50 
51 /*
52  * Empty_zero_page is a special page that is used for zero-initialized data
53  * and COW.
54  */
55 unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)] __page_aligned_bss;
56 EXPORT_SYMBOL(empty_zero_page);
57 
58 static pte_t bm_pte[PTRS_PER_PTE] __page_aligned_bss;
59 static pmd_t bm_pmd[PTRS_PER_PMD] __page_aligned_bss __maybe_unused;
60 static pud_t bm_pud[PTRS_PER_PUD] __page_aligned_bss __maybe_unused;
61 
62 static DEFINE_SPINLOCK(swapper_pgdir_lock);
63 
64 void set_swapper_pgd(pgd_t *pgdp, pgd_t pgd)
65 {
66 	pgd_t *fixmap_pgdp;
67 
68 	spin_lock(&swapper_pgdir_lock);
69 	fixmap_pgdp = pgd_set_fixmap(__pa_symbol(pgdp));
70 	WRITE_ONCE(*fixmap_pgdp, pgd);
71 	/*
72 	 * We need dsb(ishst) here to ensure the page-table-walker sees
73 	 * our new entry before set_p?d() returns. The fixmap's
74 	 * flush_tlb_kernel_range() via clear_fixmap() does this for us.
75 	 */
76 	pgd_clear_fixmap();
77 	spin_unlock(&swapper_pgdir_lock);
78 }
79 
80 pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
81 			      unsigned long size, pgprot_t vma_prot)
82 {
83 	if (!pfn_valid(pfn))
84 		return pgprot_noncached(vma_prot);
85 	else if (file->f_flags & O_SYNC)
86 		return pgprot_writecombine(vma_prot);
87 	return vma_prot;
88 }
89 EXPORT_SYMBOL(phys_mem_access_prot);
90 
91 static phys_addr_t __init early_pgtable_alloc(int shift)
92 {
93 	phys_addr_t phys;
94 	void *ptr;
95 
96 	phys = memblock_phys_alloc(PAGE_SIZE, PAGE_SIZE);
97 	if (!phys)
98 		panic("Failed to allocate page table page\n");
99 
100 	/*
101 	 * The FIX_{PGD,PUD,PMD} slots may be in active use, but the FIX_PTE
102 	 * slot will be free, so we can (ab)use the FIX_PTE slot to initialise
103 	 * any level of table.
104 	 */
105 	ptr = pte_set_fixmap(phys);
106 
107 	memset(ptr, 0, PAGE_SIZE);
108 
109 	/*
110 	 * Implicit barriers also ensure the zeroed page is visible to the page
111 	 * table walker
112 	 */
113 	pte_clear_fixmap();
114 
115 	return phys;
116 }
117 
118 static bool pgattr_change_is_safe(u64 old, u64 new)
119 {
120 	/*
121 	 * The following mapping attributes may be updated in live
122 	 * kernel mappings without the need for break-before-make.
123 	 */
124 	static const pteval_t mask = PTE_PXN | PTE_RDONLY | PTE_WRITE | PTE_NG;
125 
126 	/* creating or taking down mappings is always safe */
127 	if (old == 0 || new == 0)
128 		return true;
129 
130 	/* live contiguous mappings may not be manipulated at all */
131 	if ((old | new) & PTE_CONT)
132 		return false;
133 
134 	/* Transitioning from Non-Global to Global is unsafe */
135 	if (old & ~new & PTE_NG)
136 		return false;
137 
138 	return ((old ^ new) & ~mask) == 0;
139 }
140 
141 static void init_pte(pmd_t *pmdp, unsigned long addr, unsigned long end,
142 		     phys_addr_t phys, pgprot_t prot)
143 {
144 	pte_t *ptep;
145 
146 	ptep = pte_set_fixmap_offset(pmdp, addr);
147 	do {
148 		pte_t old_pte = READ_ONCE(*ptep);
149 
150 		set_pte(ptep, pfn_pte(__phys_to_pfn(phys), prot));
151 
152 		/*
153 		 * After the PTE entry has been populated once, we
154 		 * only allow updates to the permission attributes.
155 		 */
156 		BUG_ON(!pgattr_change_is_safe(pte_val(old_pte),
157 					      READ_ONCE(pte_val(*ptep))));
158 
159 		phys += PAGE_SIZE;
160 	} while (ptep++, addr += PAGE_SIZE, addr != end);
161 
162 	pte_clear_fixmap();
163 }
164 
165 static void alloc_init_cont_pte(pmd_t *pmdp, unsigned long addr,
166 				unsigned long end, phys_addr_t phys,
167 				pgprot_t prot,
168 				phys_addr_t (*pgtable_alloc)(int),
169 				int flags)
170 {
171 	unsigned long next;
172 	pmd_t pmd = READ_ONCE(*pmdp);
173 
174 	BUG_ON(pmd_sect(pmd));
175 	if (pmd_none(pmd)) {
176 		phys_addr_t pte_phys;
177 		BUG_ON(!pgtable_alloc);
178 		pte_phys = pgtable_alloc(PAGE_SHIFT);
179 		__pmd_populate(pmdp, pte_phys, PMD_TYPE_TABLE);
180 		pmd = READ_ONCE(*pmdp);
181 	}
182 	BUG_ON(pmd_bad(pmd));
183 
184 	do {
185 		pgprot_t __prot = prot;
186 
187 		next = pte_cont_addr_end(addr, end);
188 
189 		/* use a contiguous mapping if the range is suitably aligned */
190 		if ((((addr | next | phys) & ~CONT_PTE_MASK) == 0) &&
191 		    (flags & NO_CONT_MAPPINGS) == 0)
192 			__prot = __pgprot(pgprot_val(prot) | PTE_CONT);
193 
194 		init_pte(pmdp, addr, next, phys, __prot);
195 
196 		phys += next - addr;
197 	} while (addr = next, addr != end);
198 }
199 
200 static void init_pmd(pud_t *pudp, unsigned long addr, unsigned long end,
201 		     phys_addr_t phys, pgprot_t prot,
202 		     phys_addr_t (*pgtable_alloc)(int), int flags)
203 {
204 	unsigned long next;
205 	pmd_t *pmdp;
206 
207 	pmdp = pmd_set_fixmap_offset(pudp, addr);
208 	do {
209 		pmd_t old_pmd = READ_ONCE(*pmdp);
210 
211 		next = pmd_addr_end(addr, end);
212 
213 		/* try section mapping first */
214 		if (((addr | next | phys) & ~SECTION_MASK) == 0 &&
215 		    (flags & NO_BLOCK_MAPPINGS) == 0) {
216 			pmd_set_huge(pmdp, phys, prot);
217 
218 			/*
219 			 * After the PMD entry has been populated once, we
220 			 * only allow updates to the permission attributes.
221 			 */
222 			BUG_ON(!pgattr_change_is_safe(pmd_val(old_pmd),
223 						      READ_ONCE(pmd_val(*pmdp))));
224 		} else {
225 			alloc_init_cont_pte(pmdp, addr, next, phys, prot,
226 					    pgtable_alloc, flags);
227 
228 			BUG_ON(pmd_val(old_pmd) != 0 &&
229 			       pmd_val(old_pmd) != READ_ONCE(pmd_val(*pmdp)));
230 		}
231 		phys += next - addr;
232 	} while (pmdp++, addr = next, addr != end);
233 
234 	pmd_clear_fixmap();
235 }
236 
237 static void alloc_init_cont_pmd(pud_t *pudp, unsigned long addr,
238 				unsigned long end, phys_addr_t phys,
239 				pgprot_t prot,
240 				phys_addr_t (*pgtable_alloc)(int), int flags)
241 {
242 	unsigned long next;
243 	pud_t pud = READ_ONCE(*pudp);
244 
245 	/*
246 	 * Check for initial section mappings in the pgd/pud.
247 	 */
248 	BUG_ON(pud_sect(pud));
249 	if (pud_none(pud)) {
250 		phys_addr_t pmd_phys;
251 		BUG_ON(!pgtable_alloc);
252 		pmd_phys = pgtable_alloc(PMD_SHIFT);
253 		__pud_populate(pudp, pmd_phys, PUD_TYPE_TABLE);
254 		pud = READ_ONCE(*pudp);
255 	}
256 	BUG_ON(pud_bad(pud));
257 
258 	do {
259 		pgprot_t __prot = prot;
260 
261 		next = pmd_cont_addr_end(addr, end);
262 
263 		/* use a contiguous mapping if the range is suitably aligned */
264 		if ((((addr | next | phys) & ~CONT_PMD_MASK) == 0) &&
265 		    (flags & NO_CONT_MAPPINGS) == 0)
266 			__prot = __pgprot(pgprot_val(prot) | PTE_CONT);
267 
268 		init_pmd(pudp, addr, next, phys, __prot, pgtable_alloc, flags);
269 
270 		phys += next - addr;
271 	} while (addr = next, addr != end);
272 }
273 
274 static inline bool use_1G_block(unsigned long addr, unsigned long next,
275 			unsigned long phys)
276 {
277 	if (PAGE_SHIFT != 12)
278 		return false;
279 
280 	if (((addr | next | phys) & ~PUD_MASK) != 0)
281 		return false;
282 
283 	return true;
284 }
285 
286 static void alloc_init_pud(pgd_t *pgdp, unsigned long addr, unsigned long end,
287 			   phys_addr_t phys, pgprot_t prot,
288 			   phys_addr_t (*pgtable_alloc)(int),
289 			   int flags)
290 {
291 	unsigned long next;
292 	pud_t *pudp;
293 	pgd_t pgd = READ_ONCE(*pgdp);
294 
295 	if (pgd_none(pgd)) {
296 		phys_addr_t pud_phys;
297 		BUG_ON(!pgtable_alloc);
298 		pud_phys = pgtable_alloc(PUD_SHIFT);
299 		__pgd_populate(pgdp, pud_phys, PUD_TYPE_TABLE);
300 		pgd = READ_ONCE(*pgdp);
301 	}
302 	BUG_ON(pgd_bad(pgd));
303 
304 	pudp = pud_set_fixmap_offset(pgdp, addr);
305 	do {
306 		pud_t old_pud = READ_ONCE(*pudp);
307 
308 		next = pud_addr_end(addr, end);
309 
310 		/*
311 		 * For 4K granule only, attempt to put down a 1GB block
312 		 */
313 		if (use_1G_block(addr, next, phys) &&
314 		    (flags & NO_BLOCK_MAPPINGS) == 0) {
315 			pud_set_huge(pudp, phys, prot);
316 
317 			/*
318 			 * After the PUD entry has been populated once, we
319 			 * only allow updates to the permission attributes.
320 			 */
321 			BUG_ON(!pgattr_change_is_safe(pud_val(old_pud),
322 						      READ_ONCE(pud_val(*pudp))));
323 		} else {
324 			alloc_init_cont_pmd(pudp, addr, next, phys, prot,
325 					    pgtable_alloc, flags);
326 
327 			BUG_ON(pud_val(old_pud) != 0 &&
328 			       pud_val(old_pud) != READ_ONCE(pud_val(*pudp)));
329 		}
330 		phys += next - addr;
331 	} while (pudp++, addr = next, addr != end);
332 
333 	pud_clear_fixmap();
334 }
335 
336 static void __create_pgd_mapping(pgd_t *pgdir, phys_addr_t phys,
337 				 unsigned long virt, phys_addr_t size,
338 				 pgprot_t prot,
339 				 phys_addr_t (*pgtable_alloc)(int),
340 				 int flags)
341 {
342 	unsigned long addr, end, next;
343 	pgd_t *pgdp = pgd_offset_raw(pgdir, virt);
344 
345 	/*
346 	 * If the virtual and physical address don't have the same offset
347 	 * within a page, we cannot map the region as the caller expects.
348 	 */
349 	if (WARN_ON((phys ^ virt) & ~PAGE_MASK))
350 		return;
351 
352 	phys &= PAGE_MASK;
353 	addr = virt & PAGE_MASK;
354 	end = PAGE_ALIGN(virt + size);
355 
356 	do {
357 		next = pgd_addr_end(addr, end);
358 		alloc_init_pud(pgdp, addr, next, phys, prot, pgtable_alloc,
359 			       flags);
360 		phys += next - addr;
361 	} while (pgdp++, addr = next, addr != end);
362 }
363 
364 static phys_addr_t __pgd_pgtable_alloc(int shift)
365 {
366 	void *ptr = (void *)__get_free_page(GFP_PGTABLE_KERNEL);
367 	BUG_ON(!ptr);
368 
369 	/* Ensure the zeroed page is visible to the page table walker */
370 	dsb(ishst);
371 	return __pa(ptr);
372 }
373 
374 static phys_addr_t pgd_pgtable_alloc(int shift)
375 {
376 	phys_addr_t pa = __pgd_pgtable_alloc(shift);
377 
378 	/*
379 	 * Call proper page table ctor in case later we need to
380 	 * call core mm functions like apply_to_page_range() on
381 	 * this pre-allocated page table.
382 	 *
383 	 * We don't select ARCH_ENABLE_SPLIT_PMD_PTLOCK if pmd is
384 	 * folded, and if so pgtable_pmd_page_ctor() becomes nop.
385 	 */
386 	if (shift == PAGE_SHIFT)
387 		BUG_ON(!pgtable_pte_page_ctor(phys_to_page(pa)));
388 	else if (shift == PMD_SHIFT)
389 		BUG_ON(!pgtable_pmd_page_ctor(phys_to_page(pa)));
390 
391 	return pa;
392 }
393 
394 /*
395  * This function can only be used to modify existing table entries,
396  * without allocating new levels of table. Note that this permits the
397  * creation of new section or page entries.
398  */
399 static void __init create_mapping_noalloc(phys_addr_t phys, unsigned long virt,
400 				  phys_addr_t size, pgprot_t prot)
401 {
402 	if ((virt >= PAGE_END) && (virt < VMALLOC_START)) {
403 		pr_warn("BUG: not creating mapping for %pa at 0x%016lx - outside kernel range\n",
404 			&phys, virt);
405 		return;
406 	}
407 	__create_pgd_mapping(init_mm.pgd, phys, virt, size, prot, NULL,
408 			     NO_CONT_MAPPINGS);
409 }
410 
411 void __init create_pgd_mapping(struct mm_struct *mm, phys_addr_t phys,
412 			       unsigned long virt, phys_addr_t size,
413 			       pgprot_t prot, bool page_mappings_only)
414 {
415 	int flags = 0;
416 
417 	BUG_ON(mm == &init_mm);
418 
419 	if (page_mappings_only)
420 		flags = NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS;
421 
422 	__create_pgd_mapping(mm->pgd, phys, virt, size, prot,
423 			     pgd_pgtable_alloc, flags);
424 }
425 
426 static void update_mapping_prot(phys_addr_t phys, unsigned long virt,
427 				phys_addr_t size, pgprot_t prot)
428 {
429 	if ((virt >= PAGE_END) && (virt < VMALLOC_START)) {
430 		pr_warn("BUG: not updating mapping for %pa at 0x%016lx - outside kernel range\n",
431 			&phys, virt);
432 		return;
433 	}
434 
435 	__create_pgd_mapping(init_mm.pgd, phys, virt, size, prot, NULL,
436 			     NO_CONT_MAPPINGS);
437 
438 	/* flush the TLBs after updating live kernel mappings */
439 	flush_tlb_kernel_range(virt, virt + size);
440 }
441 
442 static void __init __map_memblock(pgd_t *pgdp, phys_addr_t start,
443 				  phys_addr_t end, pgprot_t prot, int flags)
444 {
445 	__create_pgd_mapping(pgdp, start, __phys_to_virt(start), end - start,
446 			     prot, early_pgtable_alloc, flags);
447 }
448 
449 void __init mark_linear_text_alias_ro(void)
450 {
451 	/*
452 	 * Remove the write permissions from the linear alias of .text/.rodata
453 	 */
454 	update_mapping_prot(__pa_symbol(_text), (unsigned long)lm_alias(_text),
455 			    (unsigned long)__init_begin - (unsigned long)_text,
456 			    PAGE_KERNEL_RO);
457 }
458 
459 static void __init map_mem(pgd_t *pgdp)
460 {
461 	phys_addr_t kernel_start = __pa_symbol(_text);
462 	phys_addr_t kernel_end = __pa_symbol(__init_begin);
463 	struct memblock_region *reg;
464 	int flags = 0;
465 
466 	if (rodata_full || debug_pagealloc_enabled())
467 		flags = NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS;
468 
469 	/*
470 	 * Take care not to create a writable alias for the
471 	 * read-only text and rodata sections of the kernel image.
472 	 * So temporarily mark them as NOMAP to skip mappings in
473 	 * the following for-loop
474 	 */
475 	memblock_mark_nomap(kernel_start, kernel_end - kernel_start);
476 #ifdef CONFIG_KEXEC_CORE
477 	if (crashk_res.end)
478 		memblock_mark_nomap(crashk_res.start,
479 				    resource_size(&crashk_res));
480 #endif
481 
482 	/* map all the memory banks */
483 	for_each_memblock(memory, reg) {
484 		phys_addr_t start = reg->base;
485 		phys_addr_t end = start + reg->size;
486 
487 		if (start >= end)
488 			break;
489 		if (memblock_is_nomap(reg))
490 			continue;
491 
492 		__map_memblock(pgdp, start, end, PAGE_KERNEL, flags);
493 	}
494 
495 	/*
496 	 * Map the linear alias of the [_text, __init_begin) interval
497 	 * as non-executable now, and remove the write permission in
498 	 * mark_linear_text_alias_ro() below (which will be called after
499 	 * alternative patching has completed). This makes the contents
500 	 * of the region accessible to subsystems such as hibernate,
501 	 * but protects it from inadvertent modification or execution.
502 	 * Note that contiguous mappings cannot be remapped in this way,
503 	 * so we should avoid them here.
504 	 */
505 	__map_memblock(pgdp, kernel_start, kernel_end,
506 		       PAGE_KERNEL, NO_CONT_MAPPINGS);
507 	memblock_clear_nomap(kernel_start, kernel_end - kernel_start);
508 
509 #ifdef CONFIG_KEXEC_CORE
510 	/*
511 	 * Use page-level mappings here so that we can shrink the region
512 	 * in page granularity and put back unused memory to buddy system
513 	 * through /sys/kernel/kexec_crash_size interface.
514 	 */
515 	if (crashk_res.end) {
516 		__map_memblock(pgdp, crashk_res.start, crashk_res.end + 1,
517 			       PAGE_KERNEL,
518 			       NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS);
519 		memblock_clear_nomap(crashk_res.start,
520 				     resource_size(&crashk_res));
521 	}
522 #endif
523 }
524 
525 void mark_rodata_ro(void)
526 {
527 	unsigned long section_size;
528 
529 	/*
530 	 * mark .rodata as read only. Use __init_begin rather than __end_rodata
531 	 * to cover NOTES and EXCEPTION_TABLE.
532 	 */
533 	section_size = (unsigned long)__init_begin - (unsigned long)__start_rodata;
534 	update_mapping_prot(__pa_symbol(__start_rodata), (unsigned long)__start_rodata,
535 			    section_size, PAGE_KERNEL_RO);
536 
537 	debug_checkwx();
538 }
539 
540 static void __init map_kernel_segment(pgd_t *pgdp, void *va_start, void *va_end,
541 				      pgprot_t prot, struct vm_struct *vma,
542 				      int flags, unsigned long vm_flags)
543 {
544 	phys_addr_t pa_start = __pa_symbol(va_start);
545 	unsigned long size = va_end - va_start;
546 
547 	BUG_ON(!PAGE_ALIGNED(pa_start));
548 	BUG_ON(!PAGE_ALIGNED(size));
549 
550 	__create_pgd_mapping(pgdp, pa_start, (unsigned long)va_start, size, prot,
551 			     early_pgtable_alloc, flags);
552 
553 	if (!(vm_flags & VM_NO_GUARD))
554 		size += PAGE_SIZE;
555 
556 	vma->addr	= va_start;
557 	vma->phys_addr	= pa_start;
558 	vma->size	= size;
559 	vma->flags	= VM_MAP | vm_flags;
560 	vma->caller	= __builtin_return_address(0);
561 
562 	vm_area_add_early(vma);
563 }
564 
565 static int __init parse_rodata(char *arg)
566 {
567 	int ret = strtobool(arg, &rodata_enabled);
568 	if (!ret) {
569 		rodata_full = false;
570 		return 0;
571 	}
572 
573 	/* permit 'full' in addition to boolean options */
574 	if (strcmp(arg, "full"))
575 		return -EINVAL;
576 
577 	rodata_enabled = true;
578 	rodata_full = true;
579 	return 0;
580 }
581 early_param("rodata", parse_rodata);
582 
583 #ifdef CONFIG_UNMAP_KERNEL_AT_EL0
584 static int __init map_entry_trampoline(void)
585 {
586 	pgprot_t prot = rodata_enabled ? PAGE_KERNEL_ROX : PAGE_KERNEL_EXEC;
587 	phys_addr_t pa_start = __pa_symbol(__entry_tramp_text_start);
588 
589 	/* The trampoline is always mapped and can therefore be global */
590 	pgprot_val(prot) &= ~PTE_NG;
591 
592 	/* Map only the text into the trampoline page table */
593 	memset(tramp_pg_dir, 0, PGD_SIZE);
594 	__create_pgd_mapping(tramp_pg_dir, pa_start, TRAMP_VALIAS, PAGE_SIZE,
595 			     prot, __pgd_pgtable_alloc, 0);
596 
597 	/* Map both the text and data into the kernel page table */
598 	__set_fixmap(FIX_ENTRY_TRAMP_TEXT, pa_start, prot);
599 	if (IS_ENABLED(CONFIG_RANDOMIZE_BASE)) {
600 		extern char __entry_tramp_data_start[];
601 
602 		__set_fixmap(FIX_ENTRY_TRAMP_DATA,
603 			     __pa_symbol(__entry_tramp_data_start),
604 			     PAGE_KERNEL_RO);
605 	}
606 
607 	return 0;
608 }
609 core_initcall(map_entry_trampoline);
610 #endif
611 
612 /*
613  * Create fine-grained mappings for the kernel.
614  */
615 static void __init map_kernel(pgd_t *pgdp)
616 {
617 	static struct vm_struct vmlinux_text, vmlinux_rodata, vmlinux_inittext,
618 				vmlinux_initdata, vmlinux_data;
619 
620 	/*
621 	 * External debuggers may need to write directly to the text
622 	 * mapping to install SW breakpoints. Allow this (only) when
623 	 * explicitly requested with rodata=off.
624 	 */
625 	pgprot_t text_prot = rodata_enabled ? PAGE_KERNEL_ROX : PAGE_KERNEL_EXEC;
626 
627 	/*
628 	 * Only rodata will be remapped with different permissions later on,
629 	 * all other segments are allowed to use contiguous mappings.
630 	 */
631 	map_kernel_segment(pgdp, _text, _etext, text_prot, &vmlinux_text, 0,
632 			   VM_NO_GUARD);
633 	map_kernel_segment(pgdp, __start_rodata, __inittext_begin, PAGE_KERNEL,
634 			   &vmlinux_rodata, NO_CONT_MAPPINGS, VM_NO_GUARD);
635 	map_kernel_segment(pgdp, __inittext_begin, __inittext_end, text_prot,
636 			   &vmlinux_inittext, 0, VM_NO_GUARD);
637 	map_kernel_segment(pgdp, __initdata_begin, __initdata_end, PAGE_KERNEL,
638 			   &vmlinux_initdata, 0, VM_NO_GUARD);
639 	map_kernel_segment(pgdp, _data, _end, PAGE_KERNEL, &vmlinux_data, 0, 0);
640 
641 	if (!READ_ONCE(pgd_val(*pgd_offset_raw(pgdp, FIXADDR_START)))) {
642 		/*
643 		 * The fixmap falls in a separate pgd to the kernel, and doesn't
644 		 * live in the carveout for the swapper_pg_dir. We can simply
645 		 * re-use the existing dir for the fixmap.
646 		 */
647 		set_pgd(pgd_offset_raw(pgdp, FIXADDR_START),
648 			READ_ONCE(*pgd_offset_k(FIXADDR_START)));
649 	} else if (CONFIG_PGTABLE_LEVELS > 3) {
650 		pgd_t *bm_pgdp;
651 		pud_t *bm_pudp;
652 		/*
653 		 * The fixmap shares its top level pgd entry with the kernel
654 		 * mapping. This can really only occur when we are running
655 		 * with 16k/4 levels, so we can simply reuse the pud level
656 		 * entry instead.
657 		 */
658 		BUG_ON(!IS_ENABLED(CONFIG_ARM64_16K_PAGES));
659 		bm_pgdp = pgd_offset_raw(pgdp, FIXADDR_START);
660 		bm_pudp = pud_set_fixmap_offset(bm_pgdp, FIXADDR_START);
661 		pud_populate(&init_mm, bm_pudp, lm_alias(bm_pmd));
662 		pud_clear_fixmap();
663 	} else {
664 		BUG();
665 	}
666 
667 	kasan_copy_shadow(pgdp);
668 }
669 
670 void __init paging_init(void)
671 {
672 	pgd_t *pgdp = pgd_set_fixmap(__pa_symbol(swapper_pg_dir));
673 
674 	map_kernel(pgdp);
675 	map_mem(pgdp);
676 
677 	pgd_clear_fixmap();
678 
679 	cpu_replace_ttbr1(lm_alias(swapper_pg_dir));
680 	init_mm.pgd = swapper_pg_dir;
681 
682 	memblock_free(__pa_symbol(init_pg_dir),
683 		      __pa_symbol(init_pg_end) - __pa_symbol(init_pg_dir));
684 
685 	memblock_allow_resize();
686 }
687 
688 /*
689  * Check whether a kernel address is valid (derived from arch/x86/).
690  */
691 int kern_addr_valid(unsigned long addr)
692 {
693 	pgd_t *pgdp;
694 	pud_t *pudp, pud;
695 	pmd_t *pmdp, pmd;
696 	pte_t *ptep, pte;
697 
698 	if ((((long)addr) >> VA_BITS) != -1UL)
699 		return 0;
700 
701 	pgdp = pgd_offset_k(addr);
702 	if (pgd_none(READ_ONCE(*pgdp)))
703 		return 0;
704 
705 	pudp = pud_offset(pgdp, addr);
706 	pud = READ_ONCE(*pudp);
707 	if (pud_none(pud))
708 		return 0;
709 
710 	if (pud_sect(pud))
711 		return pfn_valid(pud_pfn(pud));
712 
713 	pmdp = pmd_offset(pudp, addr);
714 	pmd = READ_ONCE(*pmdp);
715 	if (pmd_none(pmd))
716 		return 0;
717 
718 	if (pmd_sect(pmd))
719 		return pfn_valid(pmd_pfn(pmd));
720 
721 	ptep = pte_offset_kernel(pmdp, addr);
722 	pte = READ_ONCE(*ptep);
723 	if (pte_none(pte))
724 		return 0;
725 
726 	return pfn_valid(pte_pfn(pte));
727 }
728 
729 #ifdef CONFIG_MEMORY_HOTPLUG
730 static void free_hotplug_page_range(struct page *page, size_t size)
731 {
732 	WARN_ON(PageReserved(page));
733 	free_pages((unsigned long)page_address(page), get_order(size));
734 }
735 
736 static void free_hotplug_pgtable_page(struct page *page)
737 {
738 	free_hotplug_page_range(page, PAGE_SIZE);
739 }
740 
741 static bool pgtable_range_aligned(unsigned long start, unsigned long end,
742 				  unsigned long floor, unsigned long ceiling,
743 				  unsigned long mask)
744 {
745 	start &= mask;
746 	if (start < floor)
747 		return false;
748 
749 	if (ceiling) {
750 		ceiling &= mask;
751 		if (!ceiling)
752 			return false;
753 	}
754 
755 	if (end - 1 > ceiling - 1)
756 		return false;
757 	return true;
758 }
759 
760 static void unmap_hotplug_pte_range(pmd_t *pmdp, unsigned long addr,
761 				    unsigned long end, bool free_mapped)
762 {
763 	pte_t *ptep, pte;
764 
765 	do {
766 		ptep = pte_offset_kernel(pmdp, addr);
767 		pte = READ_ONCE(*ptep);
768 		if (pte_none(pte))
769 			continue;
770 
771 		WARN_ON(!pte_present(pte));
772 		pte_clear(&init_mm, addr, ptep);
773 		flush_tlb_kernel_range(addr, addr + PAGE_SIZE);
774 		if (free_mapped)
775 			free_hotplug_page_range(pte_page(pte), PAGE_SIZE);
776 	} while (addr += PAGE_SIZE, addr < end);
777 }
778 
779 static void unmap_hotplug_pmd_range(pud_t *pudp, unsigned long addr,
780 				    unsigned long end, bool free_mapped)
781 {
782 	unsigned long next;
783 	pmd_t *pmdp, pmd;
784 
785 	do {
786 		next = pmd_addr_end(addr, end);
787 		pmdp = pmd_offset(pudp, addr);
788 		pmd = READ_ONCE(*pmdp);
789 		if (pmd_none(pmd))
790 			continue;
791 
792 		WARN_ON(!pmd_present(pmd));
793 		if (pmd_sect(pmd)) {
794 			pmd_clear(pmdp);
795 
796 			/*
797 			 * One TLBI should be sufficient here as the PMD_SIZE
798 			 * range is mapped with a single block entry.
799 			 */
800 			flush_tlb_kernel_range(addr, addr + PAGE_SIZE);
801 			if (free_mapped)
802 				free_hotplug_page_range(pmd_page(pmd),
803 							PMD_SIZE);
804 			continue;
805 		}
806 		WARN_ON(!pmd_table(pmd));
807 		unmap_hotplug_pte_range(pmdp, addr, next, free_mapped);
808 	} while (addr = next, addr < end);
809 }
810 
811 static void unmap_hotplug_pud_range(p4d_t *p4dp, unsigned long addr,
812 				    unsigned long end, bool free_mapped)
813 {
814 	unsigned long next;
815 	pud_t *pudp, pud;
816 
817 	do {
818 		next = pud_addr_end(addr, end);
819 		pudp = pud_offset(p4dp, addr);
820 		pud = READ_ONCE(*pudp);
821 		if (pud_none(pud))
822 			continue;
823 
824 		WARN_ON(!pud_present(pud));
825 		if (pud_sect(pud)) {
826 			pud_clear(pudp);
827 
828 			/*
829 			 * One TLBI should be sufficient here as the PUD_SIZE
830 			 * range is mapped with a single block entry.
831 			 */
832 			flush_tlb_kernel_range(addr, addr + PAGE_SIZE);
833 			if (free_mapped)
834 				free_hotplug_page_range(pud_page(pud),
835 							PUD_SIZE);
836 			continue;
837 		}
838 		WARN_ON(!pud_table(pud));
839 		unmap_hotplug_pmd_range(pudp, addr, next, free_mapped);
840 	} while (addr = next, addr < end);
841 }
842 
843 static void unmap_hotplug_p4d_range(pgd_t *pgdp, unsigned long addr,
844 				    unsigned long end, bool free_mapped)
845 {
846 	unsigned long next;
847 	p4d_t *p4dp, p4d;
848 
849 	do {
850 		next = p4d_addr_end(addr, end);
851 		p4dp = p4d_offset(pgdp, addr);
852 		p4d = READ_ONCE(*p4dp);
853 		if (p4d_none(p4d))
854 			continue;
855 
856 		WARN_ON(!p4d_present(p4d));
857 		unmap_hotplug_pud_range(p4dp, addr, next, free_mapped);
858 	} while (addr = next, addr < end);
859 }
860 
861 static void unmap_hotplug_range(unsigned long addr, unsigned long end,
862 				bool free_mapped)
863 {
864 	unsigned long next;
865 	pgd_t *pgdp, pgd;
866 
867 	do {
868 		next = pgd_addr_end(addr, end);
869 		pgdp = pgd_offset_k(addr);
870 		pgd = READ_ONCE(*pgdp);
871 		if (pgd_none(pgd))
872 			continue;
873 
874 		WARN_ON(!pgd_present(pgd));
875 		unmap_hotplug_p4d_range(pgdp, addr, next, free_mapped);
876 	} while (addr = next, addr < end);
877 }
878 
879 static void free_empty_pte_table(pmd_t *pmdp, unsigned long addr,
880 				 unsigned long end, unsigned long floor,
881 				 unsigned long ceiling)
882 {
883 	pte_t *ptep, pte;
884 	unsigned long i, start = addr;
885 
886 	do {
887 		ptep = pte_offset_kernel(pmdp, addr);
888 		pte = READ_ONCE(*ptep);
889 
890 		/*
891 		 * This is just a sanity check here which verifies that
892 		 * pte clearing has been done by earlier unmap loops.
893 		 */
894 		WARN_ON(!pte_none(pte));
895 	} while (addr += PAGE_SIZE, addr < end);
896 
897 	if (!pgtable_range_aligned(start, end, floor, ceiling, PMD_MASK))
898 		return;
899 
900 	/*
901 	 * Check whether we can free the pte page if the rest of the
902 	 * entries are empty. Overlap with other regions have been
903 	 * handled by the floor/ceiling check.
904 	 */
905 	ptep = pte_offset_kernel(pmdp, 0UL);
906 	for (i = 0; i < PTRS_PER_PTE; i++) {
907 		if (!pte_none(READ_ONCE(ptep[i])))
908 			return;
909 	}
910 
911 	pmd_clear(pmdp);
912 	__flush_tlb_kernel_pgtable(start);
913 	free_hotplug_pgtable_page(virt_to_page(ptep));
914 }
915 
916 static void free_empty_pmd_table(pud_t *pudp, unsigned long addr,
917 				 unsigned long end, unsigned long floor,
918 				 unsigned long ceiling)
919 {
920 	pmd_t *pmdp, pmd;
921 	unsigned long i, next, start = addr;
922 
923 	do {
924 		next = pmd_addr_end(addr, end);
925 		pmdp = pmd_offset(pudp, addr);
926 		pmd = READ_ONCE(*pmdp);
927 		if (pmd_none(pmd))
928 			continue;
929 
930 		WARN_ON(!pmd_present(pmd) || !pmd_table(pmd) || pmd_sect(pmd));
931 		free_empty_pte_table(pmdp, addr, next, floor, ceiling);
932 	} while (addr = next, addr < end);
933 
934 	if (CONFIG_PGTABLE_LEVELS <= 2)
935 		return;
936 
937 	if (!pgtable_range_aligned(start, end, floor, ceiling, PUD_MASK))
938 		return;
939 
940 	/*
941 	 * Check whether we can free the pmd page if the rest of the
942 	 * entries are empty. Overlap with other regions have been
943 	 * handled by the floor/ceiling check.
944 	 */
945 	pmdp = pmd_offset(pudp, 0UL);
946 	for (i = 0; i < PTRS_PER_PMD; i++) {
947 		if (!pmd_none(READ_ONCE(pmdp[i])))
948 			return;
949 	}
950 
951 	pud_clear(pudp);
952 	__flush_tlb_kernel_pgtable(start);
953 	free_hotplug_pgtable_page(virt_to_page(pmdp));
954 }
955 
956 static void free_empty_pud_table(p4d_t *p4dp, unsigned long addr,
957 				 unsigned long end, unsigned long floor,
958 				 unsigned long ceiling)
959 {
960 	pud_t *pudp, pud;
961 	unsigned long i, next, start = addr;
962 
963 	do {
964 		next = pud_addr_end(addr, end);
965 		pudp = pud_offset(p4dp, addr);
966 		pud = READ_ONCE(*pudp);
967 		if (pud_none(pud))
968 			continue;
969 
970 		WARN_ON(!pud_present(pud) || !pud_table(pud) || pud_sect(pud));
971 		free_empty_pmd_table(pudp, addr, next, floor, ceiling);
972 	} while (addr = next, addr < end);
973 
974 	if (CONFIG_PGTABLE_LEVELS <= 3)
975 		return;
976 
977 	if (!pgtable_range_aligned(start, end, floor, ceiling, PGDIR_MASK))
978 		return;
979 
980 	/*
981 	 * Check whether we can free the pud page if the rest of the
982 	 * entries are empty. Overlap with other regions have been
983 	 * handled by the floor/ceiling check.
984 	 */
985 	pudp = pud_offset(p4dp, 0UL);
986 	for (i = 0; i < PTRS_PER_PUD; i++) {
987 		if (!pud_none(READ_ONCE(pudp[i])))
988 			return;
989 	}
990 
991 	p4d_clear(p4dp);
992 	__flush_tlb_kernel_pgtable(start);
993 	free_hotplug_pgtable_page(virt_to_page(pudp));
994 }
995 
996 static void free_empty_p4d_table(pgd_t *pgdp, unsigned long addr,
997 				 unsigned long end, unsigned long floor,
998 				 unsigned long ceiling)
999 {
1000 	unsigned long next;
1001 	p4d_t *p4dp, p4d;
1002 
1003 	do {
1004 		next = p4d_addr_end(addr, end);
1005 		p4dp = p4d_offset(pgdp, addr);
1006 		p4d = READ_ONCE(*p4dp);
1007 		if (p4d_none(p4d))
1008 			continue;
1009 
1010 		WARN_ON(!p4d_present(p4d));
1011 		free_empty_pud_table(p4dp, addr, next, floor, ceiling);
1012 	} while (addr = next, addr < end);
1013 }
1014 
1015 static void free_empty_tables(unsigned long addr, unsigned long end,
1016 			      unsigned long floor, unsigned long ceiling)
1017 {
1018 	unsigned long next;
1019 	pgd_t *pgdp, pgd;
1020 
1021 	do {
1022 		next = pgd_addr_end(addr, end);
1023 		pgdp = pgd_offset_k(addr);
1024 		pgd = READ_ONCE(*pgdp);
1025 		if (pgd_none(pgd))
1026 			continue;
1027 
1028 		WARN_ON(!pgd_present(pgd));
1029 		free_empty_p4d_table(pgdp, addr, next, floor, ceiling);
1030 	} while (addr = next, addr < end);
1031 }
1032 #endif
1033 
1034 #ifdef CONFIG_SPARSEMEM_VMEMMAP
1035 #if !ARM64_SWAPPER_USES_SECTION_MAPS
1036 int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node,
1037 		struct vmem_altmap *altmap)
1038 {
1039 	return vmemmap_populate_basepages(start, end, node);
1040 }
1041 #else	/* !ARM64_SWAPPER_USES_SECTION_MAPS */
1042 int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node,
1043 		struct vmem_altmap *altmap)
1044 {
1045 	unsigned long addr = start;
1046 	unsigned long next;
1047 	pgd_t *pgdp;
1048 	pud_t *pudp;
1049 	pmd_t *pmdp;
1050 
1051 	do {
1052 		next = pmd_addr_end(addr, end);
1053 
1054 		pgdp = vmemmap_pgd_populate(addr, node);
1055 		if (!pgdp)
1056 			return -ENOMEM;
1057 
1058 		pudp = vmemmap_pud_populate(pgdp, addr, node);
1059 		if (!pudp)
1060 			return -ENOMEM;
1061 
1062 		pmdp = pmd_offset(pudp, addr);
1063 		if (pmd_none(READ_ONCE(*pmdp))) {
1064 			void *p = NULL;
1065 
1066 			p = vmemmap_alloc_block_buf(PMD_SIZE, node);
1067 			if (!p)
1068 				return -ENOMEM;
1069 
1070 			pmd_set_huge(pmdp, __pa(p), __pgprot(PROT_SECT_NORMAL));
1071 		} else
1072 			vmemmap_verify((pte_t *)pmdp, node, addr, next);
1073 	} while (addr = next, addr != end);
1074 
1075 	return 0;
1076 }
1077 #endif	/* !ARM64_SWAPPER_USES_SECTION_MAPS */
1078 void vmemmap_free(unsigned long start, unsigned long end,
1079 		struct vmem_altmap *altmap)
1080 {
1081 #ifdef CONFIG_MEMORY_HOTPLUG
1082 	WARN_ON((start < VMEMMAP_START) || (end > VMEMMAP_END));
1083 
1084 	unmap_hotplug_range(start, end, true);
1085 	free_empty_tables(start, end, VMEMMAP_START, VMEMMAP_END);
1086 #endif
1087 }
1088 #endif	/* CONFIG_SPARSEMEM_VMEMMAP */
1089 
1090 static inline pud_t * fixmap_pud(unsigned long addr)
1091 {
1092 	pgd_t *pgdp = pgd_offset_k(addr);
1093 	pgd_t pgd = READ_ONCE(*pgdp);
1094 
1095 	BUG_ON(pgd_none(pgd) || pgd_bad(pgd));
1096 
1097 	return pud_offset_kimg(pgdp, addr);
1098 }
1099 
1100 static inline pmd_t * fixmap_pmd(unsigned long addr)
1101 {
1102 	pud_t *pudp = fixmap_pud(addr);
1103 	pud_t pud = READ_ONCE(*pudp);
1104 
1105 	BUG_ON(pud_none(pud) || pud_bad(pud));
1106 
1107 	return pmd_offset_kimg(pudp, addr);
1108 }
1109 
1110 static inline pte_t * fixmap_pte(unsigned long addr)
1111 {
1112 	return &bm_pte[pte_index(addr)];
1113 }
1114 
1115 /*
1116  * The p*d_populate functions call virt_to_phys implicitly so they can't be used
1117  * directly on kernel symbols (bm_p*d). This function is called too early to use
1118  * lm_alias so __p*d_populate functions must be used to populate with the
1119  * physical address from __pa_symbol.
1120  */
1121 void __init early_fixmap_init(void)
1122 {
1123 	pgd_t *pgdp, pgd;
1124 	pud_t *pudp;
1125 	pmd_t *pmdp;
1126 	unsigned long addr = FIXADDR_START;
1127 
1128 	pgdp = pgd_offset_k(addr);
1129 	pgd = READ_ONCE(*pgdp);
1130 	if (CONFIG_PGTABLE_LEVELS > 3 &&
1131 	    !(pgd_none(pgd) || pgd_page_paddr(pgd) == __pa_symbol(bm_pud))) {
1132 		/*
1133 		 * We only end up here if the kernel mapping and the fixmap
1134 		 * share the top level pgd entry, which should only happen on
1135 		 * 16k/4 levels configurations.
1136 		 */
1137 		BUG_ON(!IS_ENABLED(CONFIG_ARM64_16K_PAGES));
1138 		pudp = pud_offset_kimg(pgdp, addr);
1139 	} else {
1140 		if (pgd_none(pgd))
1141 			__pgd_populate(pgdp, __pa_symbol(bm_pud), PUD_TYPE_TABLE);
1142 		pudp = fixmap_pud(addr);
1143 	}
1144 	if (pud_none(READ_ONCE(*pudp)))
1145 		__pud_populate(pudp, __pa_symbol(bm_pmd), PMD_TYPE_TABLE);
1146 	pmdp = fixmap_pmd(addr);
1147 	__pmd_populate(pmdp, __pa_symbol(bm_pte), PMD_TYPE_TABLE);
1148 
1149 	/*
1150 	 * The boot-ioremap range spans multiple pmds, for which
1151 	 * we are not prepared:
1152 	 */
1153 	BUILD_BUG_ON((__fix_to_virt(FIX_BTMAP_BEGIN) >> PMD_SHIFT)
1154 		     != (__fix_to_virt(FIX_BTMAP_END) >> PMD_SHIFT));
1155 
1156 	if ((pmdp != fixmap_pmd(fix_to_virt(FIX_BTMAP_BEGIN)))
1157 	     || pmdp != fixmap_pmd(fix_to_virt(FIX_BTMAP_END))) {
1158 		WARN_ON(1);
1159 		pr_warn("pmdp %p != %p, %p\n",
1160 			pmdp, fixmap_pmd(fix_to_virt(FIX_BTMAP_BEGIN)),
1161 			fixmap_pmd(fix_to_virt(FIX_BTMAP_END)));
1162 		pr_warn("fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n",
1163 			fix_to_virt(FIX_BTMAP_BEGIN));
1164 		pr_warn("fix_to_virt(FIX_BTMAP_END):   %08lx\n",
1165 			fix_to_virt(FIX_BTMAP_END));
1166 
1167 		pr_warn("FIX_BTMAP_END:       %d\n", FIX_BTMAP_END);
1168 		pr_warn("FIX_BTMAP_BEGIN:     %d\n", FIX_BTMAP_BEGIN);
1169 	}
1170 }
1171 
1172 /*
1173  * Unusually, this is also called in IRQ context (ghes_iounmap_irq) so if we
1174  * ever need to use IPIs for TLB broadcasting, then we're in trouble here.
1175  */
1176 void __set_fixmap(enum fixed_addresses idx,
1177 			       phys_addr_t phys, pgprot_t flags)
1178 {
1179 	unsigned long addr = __fix_to_virt(idx);
1180 	pte_t *ptep;
1181 
1182 	BUG_ON(idx <= FIX_HOLE || idx >= __end_of_fixed_addresses);
1183 
1184 	ptep = fixmap_pte(addr);
1185 
1186 	if (pgprot_val(flags)) {
1187 		set_pte(ptep, pfn_pte(phys >> PAGE_SHIFT, flags));
1188 	} else {
1189 		pte_clear(&init_mm, addr, ptep);
1190 		flush_tlb_kernel_range(addr, addr+PAGE_SIZE);
1191 	}
1192 }
1193 
1194 void *__init fixmap_remap_fdt(phys_addr_t dt_phys, int *size, pgprot_t prot)
1195 {
1196 	const u64 dt_virt_base = __fix_to_virt(FIX_FDT);
1197 	int offset;
1198 	void *dt_virt;
1199 
1200 	/*
1201 	 * Check whether the physical FDT address is set and meets the minimum
1202 	 * alignment requirement. Since we are relying on MIN_FDT_ALIGN to be
1203 	 * at least 8 bytes so that we can always access the magic and size
1204 	 * fields of the FDT header after mapping the first chunk, double check
1205 	 * here if that is indeed the case.
1206 	 */
1207 	BUILD_BUG_ON(MIN_FDT_ALIGN < 8);
1208 	if (!dt_phys || dt_phys % MIN_FDT_ALIGN)
1209 		return NULL;
1210 
1211 	/*
1212 	 * Make sure that the FDT region can be mapped without the need to
1213 	 * allocate additional translation table pages, so that it is safe
1214 	 * to call create_mapping_noalloc() this early.
1215 	 *
1216 	 * On 64k pages, the FDT will be mapped using PTEs, so we need to
1217 	 * be in the same PMD as the rest of the fixmap.
1218 	 * On 4k pages, we'll use section mappings for the FDT so we only
1219 	 * have to be in the same PUD.
1220 	 */
1221 	BUILD_BUG_ON(dt_virt_base % SZ_2M);
1222 
1223 	BUILD_BUG_ON(__fix_to_virt(FIX_FDT_END) >> SWAPPER_TABLE_SHIFT !=
1224 		     __fix_to_virt(FIX_BTMAP_BEGIN) >> SWAPPER_TABLE_SHIFT);
1225 
1226 	offset = dt_phys % SWAPPER_BLOCK_SIZE;
1227 	dt_virt = (void *)dt_virt_base + offset;
1228 
1229 	/* map the first chunk so we can read the size from the header */
1230 	create_mapping_noalloc(round_down(dt_phys, SWAPPER_BLOCK_SIZE),
1231 			dt_virt_base, SWAPPER_BLOCK_SIZE, prot);
1232 
1233 	if (fdt_magic(dt_virt) != FDT_MAGIC)
1234 		return NULL;
1235 
1236 	*size = fdt_totalsize(dt_virt);
1237 	if (*size > MAX_FDT_SIZE)
1238 		return NULL;
1239 
1240 	if (offset + *size > SWAPPER_BLOCK_SIZE)
1241 		create_mapping_noalloc(round_down(dt_phys, SWAPPER_BLOCK_SIZE), dt_virt_base,
1242 			       round_up(offset + *size, SWAPPER_BLOCK_SIZE), prot);
1243 
1244 	return dt_virt;
1245 }
1246 
1247 int __init arch_ioremap_p4d_supported(void)
1248 {
1249 	return 0;
1250 }
1251 
1252 int __init arch_ioremap_pud_supported(void)
1253 {
1254 	/*
1255 	 * Only 4k granule supports level 1 block mappings.
1256 	 * SW table walks can't handle removal of intermediate entries.
1257 	 */
1258 	return IS_ENABLED(CONFIG_ARM64_4K_PAGES) &&
1259 	       !IS_ENABLED(CONFIG_PTDUMP_DEBUGFS);
1260 }
1261 
1262 int __init arch_ioremap_pmd_supported(void)
1263 {
1264 	/* See arch_ioremap_pud_supported() */
1265 	return !IS_ENABLED(CONFIG_PTDUMP_DEBUGFS);
1266 }
1267 
1268 int pud_set_huge(pud_t *pudp, phys_addr_t phys, pgprot_t prot)
1269 {
1270 	pud_t new_pud = pfn_pud(__phys_to_pfn(phys), mk_pud_sect_prot(prot));
1271 
1272 	/* Only allow permission changes for now */
1273 	if (!pgattr_change_is_safe(READ_ONCE(pud_val(*pudp)),
1274 				   pud_val(new_pud)))
1275 		return 0;
1276 
1277 	VM_BUG_ON(phys & ~PUD_MASK);
1278 	set_pud(pudp, new_pud);
1279 	return 1;
1280 }
1281 
1282 int pmd_set_huge(pmd_t *pmdp, phys_addr_t phys, pgprot_t prot)
1283 {
1284 	pmd_t new_pmd = pfn_pmd(__phys_to_pfn(phys), mk_pmd_sect_prot(prot));
1285 
1286 	/* Only allow permission changes for now */
1287 	if (!pgattr_change_is_safe(READ_ONCE(pmd_val(*pmdp)),
1288 				   pmd_val(new_pmd)))
1289 		return 0;
1290 
1291 	VM_BUG_ON(phys & ~PMD_MASK);
1292 	set_pmd(pmdp, new_pmd);
1293 	return 1;
1294 }
1295 
1296 int pud_clear_huge(pud_t *pudp)
1297 {
1298 	if (!pud_sect(READ_ONCE(*pudp)))
1299 		return 0;
1300 	pud_clear(pudp);
1301 	return 1;
1302 }
1303 
1304 int pmd_clear_huge(pmd_t *pmdp)
1305 {
1306 	if (!pmd_sect(READ_ONCE(*pmdp)))
1307 		return 0;
1308 	pmd_clear(pmdp);
1309 	return 1;
1310 }
1311 
1312 int pmd_free_pte_page(pmd_t *pmdp, unsigned long addr)
1313 {
1314 	pte_t *table;
1315 	pmd_t pmd;
1316 
1317 	pmd = READ_ONCE(*pmdp);
1318 
1319 	if (!pmd_table(pmd)) {
1320 		VM_WARN_ON(1);
1321 		return 1;
1322 	}
1323 
1324 	table = pte_offset_kernel(pmdp, addr);
1325 	pmd_clear(pmdp);
1326 	__flush_tlb_kernel_pgtable(addr);
1327 	pte_free_kernel(NULL, table);
1328 	return 1;
1329 }
1330 
1331 int pud_free_pmd_page(pud_t *pudp, unsigned long addr)
1332 {
1333 	pmd_t *table;
1334 	pmd_t *pmdp;
1335 	pud_t pud;
1336 	unsigned long next, end;
1337 
1338 	pud = READ_ONCE(*pudp);
1339 
1340 	if (!pud_table(pud)) {
1341 		VM_WARN_ON(1);
1342 		return 1;
1343 	}
1344 
1345 	table = pmd_offset(pudp, addr);
1346 	pmdp = table;
1347 	next = addr;
1348 	end = addr + PUD_SIZE;
1349 	do {
1350 		pmd_free_pte_page(pmdp, next);
1351 	} while (pmdp++, next += PMD_SIZE, next != end);
1352 
1353 	pud_clear(pudp);
1354 	__flush_tlb_kernel_pgtable(addr);
1355 	pmd_free(NULL, table);
1356 	return 1;
1357 }
1358 
1359 int p4d_free_pud_page(p4d_t *p4d, unsigned long addr)
1360 {
1361 	return 0;	/* Don't attempt a block mapping */
1362 }
1363 
1364 #ifdef CONFIG_MEMORY_HOTPLUG
1365 static void __remove_pgd_mapping(pgd_t *pgdir, unsigned long start, u64 size)
1366 {
1367 	unsigned long end = start + size;
1368 
1369 	WARN_ON(pgdir != init_mm.pgd);
1370 	WARN_ON((start < PAGE_OFFSET) || (end > PAGE_END));
1371 
1372 	unmap_hotplug_range(start, end, false);
1373 	free_empty_tables(start, end, PAGE_OFFSET, PAGE_END);
1374 }
1375 
1376 int arch_add_memory(int nid, u64 start, u64 size,
1377 		    struct mhp_params *params)
1378 {
1379 	int ret, flags = 0;
1380 
1381 	if (rodata_full || debug_pagealloc_enabled())
1382 		flags = NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS;
1383 
1384 	__create_pgd_mapping(swapper_pg_dir, start, __phys_to_virt(start),
1385 			     size, params->pgprot, __pgd_pgtable_alloc,
1386 			     flags);
1387 
1388 	memblock_clear_nomap(start, size);
1389 
1390 	ret = __add_pages(nid, start >> PAGE_SHIFT, size >> PAGE_SHIFT,
1391 			   params);
1392 	if (ret)
1393 		__remove_pgd_mapping(swapper_pg_dir,
1394 				     __phys_to_virt(start), size);
1395 	return ret;
1396 }
1397 
1398 void arch_remove_memory(int nid, u64 start, u64 size,
1399 			struct vmem_altmap *altmap)
1400 {
1401 	unsigned long start_pfn = start >> PAGE_SHIFT;
1402 	unsigned long nr_pages = size >> PAGE_SHIFT;
1403 
1404 	__remove_pages(start_pfn, nr_pages, altmap);
1405 	__remove_pgd_mapping(swapper_pg_dir, __phys_to_virt(start), size);
1406 }
1407 
1408 /*
1409  * This memory hotplug notifier helps prevent boot memory from being
1410  * inadvertently removed as it blocks pfn range offlining process in
1411  * __offline_pages(). Hence this prevents both offlining as well as
1412  * removal process for boot memory which is initially always online.
1413  * In future if and when boot memory could be removed, this notifier
1414  * should be dropped and free_hotplug_page_range() should handle any
1415  * reserved pages allocated during boot.
1416  */
1417 static int prevent_bootmem_remove_notifier(struct notifier_block *nb,
1418 					   unsigned long action, void *data)
1419 {
1420 	struct mem_section *ms;
1421 	struct memory_notify *arg = data;
1422 	unsigned long end_pfn = arg->start_pfn + arg->nr_pages;
1423 	unsigned long pfn = arg->start_pfn;
1424 
1425 	if (action != MEM_GOING_OFFLINE)
1426 		return NOTIFY_OK;
1427 
1428 	for (; pfn < end_pfn; pfn += PAGES_PER_SECTION) {
1429 		ms = __pfn_to_section(pfn);
1430 		if (early_section(ms))
1431 			return NOTIFY_BAD;
1432 	}
1433 	return NOTIFY_OK;
1434 }
1435 
1436 static struct notifier_block prevent_bootmem_remove_nb = {
1437 	.notifier_call = prevent_bootmem_remove_notifier,
1438 };
1439 
1440 static int __init prevent_bootmem_remove_init(void)
1441 {
1442 	return register_memory_notifier(&prevent_bootmem_remove_nb);
1443 }
1444 device_initcall(prevent_bootmem_remove_init);
1445 #endif
1446