xref: /openbmc/linux/arch/arm64/mm/mmu.c (revision 2fa5ebe3)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Based on arch/arm/mm/mmu.c
4  *
5  * Copyright (C) 1995-2005 Russell King
6  * Copyright (C) 2012 ARM Ltd.
7  */
8 
9 #include <linux/cache.h>
10 #include <linux/export.h>
11 #include <linux/kernel.h>
12 #include <linux/errno.h>
13 #include <linux/init.h>
14 #include <linux/ioport.h>
15 #include <linux/kexec.h>
16 #include <linux/libfdt.h>
17 #include <linux/mman.h>
18 #include <linux/nodemask.h>
19 #include <linux/memblock.h>
20 #include <linux/memremap.h>
21 #include <linux/memory.h>
22 #include <linux/fs.h>
23 #include <linux/io.h>
24 #include <linux/mm.h>
25 #include <linux/vmalloc.h>
26 #include <linux/set_memory.h>
27 
28 #include <asm/barrier.h>
29 #include <asm/cputype.h>
30 #include <asm/fixmap.h>
31 #include <asm/kasan.h>
32 #include <asm/kernel-pgtable.h>
33 #include <asm/sections.h>
34 #include <asm/setup.h>
35 #include <linux/sizes.h>
36 #include <asm/tlb.h>
37 #include <asm/mmu_context.h>
38 #include <asm/ptdump.h>
39 #include <asm/tlbflush.h>
40 #include <asm/pgalloc.h>
41 
42 #define NO_BLOCK_MAPPINGS	BIT(0)
43 #define NO_CONT_MAPPINGS	BIT(1)
44 #define NO_EXEC_MAPPINGS	BIT(2)	/* assumes FEAT_HPDS is not used */
45 
46 int idmap_t0sz __ro_after_init;
47 
48 #if VA_BITS > 48
49 u64 vabits_actual __ro_after_init = VA_BITS_MIN;
50 EXPORT_SYMBOL(vabits_actual);
51 #endif
52 
53 u64 kimage_vaddr __ro_after_init = (u64)&_text;
54 EXPORT_SYMBOL(kimage_vaddr);
55 
56 u64 kimage_voffset __ro_after_init;
57 EXPORT_SYMBOL(kimage_voffset);
58 
59 u32 __boot_cpu_mode[] = { BOOT_CPU_MODE_EL2, BOOT_CPU_MODE_EL1 };
60 
61 /*
62  * The booting CPU updates the failed status @__early_cpu_boot_status,
63  * with MMU turned off.
64  */
65 long __section(".mmuoff.data.write") __early_cpu_boot_status;
66 
67 /*
68  * Empty_zero_page is a special page that is used for zero-initialized data
69  * and COW.
70  */
71 unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)] __page_aligned_bss;
72 EXPORT_SYMBOL(empty_zero_page);
73 
74 static pte_t bm_pte[PTRS_PER_PTE] __page_aligned_bss;
75 static pmd_t bm_pmd[PTRS_PER_PMD] __page_aligned_bss __maybe_unused;
76 static pud_t bm_pud[PTRS_PER_PUD] __page_aligned_bss __maybe_unused;
77 
78 static DEFINE_SPINLOCK(swapper_pgdir_lock);
79 static DEFINE_MUTEX(fixmap_lock);
80 
81 void set_swapper_pgd(pgd_t *pgdp, pgd_t pgd)
82 {
83 	pgd_t *fixmap_pgdp;
84 
85 	spin_lock(&swapper_pgdir_lock);
86 	fixmap_pgdp = pgd_set_fixmap(__pa_symbol(pgdp));
87 	WRITE_ONCE(*fixmap_pgdp, pgd);
88 	/*
89 	 * We need dsb(ishst) here to ensure the page-table-walker sees
90 	 * our new entry before set_p?d() returns. The fixmap's
91 	 * flush_tlb_kernel_range() via clear_fixmap() does this for us.
92 	 */
93 	pgd_clear_fixmap();
94 	spin_unlock(&swapper_pgdir_lock);
95 }
96 
97 pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
98 			      unsigned long size, pgprot_t vma_prot)
99 {
100 	if (!pfn_is_map_memory(pfn))
101 		return pgprot_noncached(vma_prot);
102 	else if (file->f_flags & O_SYNC)
103 		return pgprot_writecombine(vma_prot);
104 	return vma_prot;
105 }
106 EXPORT_SYMBOL(phys_mem_access_prot);
107 
108 static phys_addr_t __init early_pgtable_alloc(int shift)
109 {
110 	phys_addr_t phys;
111 	void *ptr;
112 
113 	phys = memblock_phys_alloc_range(PAGE_SIZE, PAGE_SIZE, 0,
114 					 MEMBLOCK_ALLOC_NOLEAKTRACE);
115 	if (!phys)
116 		panic("Failed to allocate page table page\n");
117 
118 	/*
119 	 * The FIX_{PGD,PUD,PMD} slots may be in active use, but the FIX_PTE
120 	 * slot will be free, so we can (ab)use the FIX_PTE slot to initialise
121 	 * any level of table.
122 	 */
123 	ptr = pte_set_fixmap(phys);
124 
125 	memset(ptr, 0, PAGE_SIZE);
126 
127 	/*
128 	 * Implicit barriers also ensure the zeroed page is visible to the page
129 	 * table walker
130 	 */
131 	pte_clear_fixmap();
132 
133 	return phys;
134 }
135 
136 bool pgattr_change_is_safe(u64 old, u64 new)
137 {
138 	/*
139 	 * The following mapping attributes may be updated in live
140 	 * kernel mappings without the need for break-before-make.
141 	 */
142 	pteval_t mask = PTE_PXN | PTE_RDONLY | PTE_WRITE | PTE_NG;
143 
144 	/* creating or taking down mappings is always safe */
145 	if (!pte_valid(__pte(old)) || !pte_valid(__pte(new)))
146 		return true;
147 
148 	/* A live entry's pfn should not change */
149 	if (pte_pfn(__pte(old)) != pte_pfn(__pte(new)))
150 		return false;
151 
152 	/* live contiguous mappings may not be manipulated at all */
153 	if ((old | new) & PTE_CONT)
154 		return false;
155 
156 	/* Transitioning from Non-Global to Global is unsafe */
157 	if (old & ~new & PTE_NG)
158 		return false;
159 
160 	/*
161 	 * Changing the memory type between Normal and Normal-Tagged is safe
162 	 * since Tagged is considered a permission attribute from the
163 	 * mismatched attribute aliases perspective.
164 	 */
165 	if (((old & PTE_ATTRINDX_MASK) == PTE_ATTRINDX(MT_NORMAL) ||
166 	     (old & PTE_ATTRINDX_MASK) == PTE_ATTRINDX(MT_NORMAL_TAGGED)) &&
167 	    ((new & PTE_ATTRINDX_MASK) == PTE_ATTRINDX(MT_NORMAL) ||
168 	     (new & PTE_ATTRINDX_MASK) == PTE_ATTRINDX(MT_NORMAL_TAGGED)))
169 		mask |= PTE_ATTRINDX_MASK;
170 
171 	return ((old ^ new) & ~mask) == 0;
172 }
173 
174 static void init_pte(pmd_t *pmdp, unsigned long addr, unsigned long end,
175 		     phys_addr_t phys, pgprot_t prot)
176 {
177 	pte_t *ptep;
178 
179 	ptep = pte_set_fixmap_offset(pmdp, addr);
180 	do {
181 		pte_t old_pte = READ_ONCE(*ptep);
182 
183 		set_pte(ptep, pfn_pte(__phys_to_pfn(phys), prot));
184 
185 		/*
186 		 * After the PTE entry has been populated once, we
187 		 * only allow updates to the permission attributes.
188 		 */
189 		BUG_ON(!pgattr_change_is_safe(pte_val(old_pte),
190 					      READ_ONCE(pte_val(*ptep))));
191 
192 		phys += PAGE_SIZE;
193 	} while (ptep++, addr += PAGE_SIZE, addr != end);
194 
195 	pte_clear_fixmap();
196 }
197 
198 static void alloc_init_cont_pte(pmd_t *pmdp, unsigned long addr,
199 				unsigned long end, phys_addr_t phys,
200 				pgprot_t prot,
201 				phys_addr_t (*pgtable_alloc)(int),
202 				int flags)
203 {
204 	unsigned long next;
205 	pmd_t pmd = READ_ONCE(*pmdp);
206 
207 	BUG_ON(pmd_sect(pmd));
208 	if (pmd_none(pmd)) {
209 		pmdval_t pmdval = PMD_TYPE_TABLE | PMD_TABLE_UXN;
210 		phys_addr_t pte_phys;
211 
212 		if (flags & NO_EXEC_MAPPINGS)
213 			pmdval |= PMD_TABLE_PXN;
214 		BUG_ON(!pgtable_alloc);
215 		pte_phys = pgtable_alloc(PAGE_SHIFT);
216 		__pmd_populate(pmdp, pte_phys, pmdval);
217 		pmd = READ_ONCE(*pmdp);
218 	}
219 	BUG_ON(pmd_bad(pmd));
220 
221 	do {
222 		pgprot_t __prot = prot;
223 
224 		next = pte_cont_addr_end(addr, end);
225 
226 		/* use a contiguous mapping if the range is suitably aligned */
227 		if ((((addr | next | phys) & ~CONT_PTE_MASK) == 0) &&
228 		    (flags & NO_CONT_MAPPINGS) == 0)
229 			__prot = __pgprot(pgprot_val(prot) | PTE_CONT);
230 
231 		init_pte(pmdp, addr, next, phys, __prot);
232 
233 		phys += next - addr;
234 	} while (addr = next, addr != end);
235 }
236 
237 static void init_pmd(pud_t *pudp, unsigned long addr, unsigned long end,
238 		     phys_addr_t phys, pgprot_t prot,
239 		     phys_addr_t (*pgtable_alloc)(int), int flags)
240 {
241 	unsigned long next;
242 	pmd_t *pmdp;
243 
244 	pmdp = pmd_set_fixmap_offset(pudp, addr);
245 	do {
246 		pmd_t old_pmd = READ_ONCE(*pmdp);
247 
248 		next = pmd_addr_end(addr, end);
249 
250 		/* try section mapping first */
251 		if (((addr | next | phys) & ~PMD_MASK) == 0 &&
252 		    (flags & NO_BLOCK_MAPPINGS) == 0) {
253 			pmd_set_huge(pmdp, phys, prot);
254 
255 			/*
256 			 * After the PMD entry has been populated once, we
257 			 * only allow updates to the permission attributes.
258 			 */
259 			BUG_ON(!pgattr_change_is_safe(pmd_val(old_pmd),
260 						      READ_ONCE(pmd_val(*pmdp))));
261 		} else {
262 			alloc_init_cont_pte(pmdp, addr, next, phys, prot,
263 					    pgtable_alloc, flags);
264 
265 			BUG_ON(pmd_val(old_pmd) != 0 &&
266 			       pmd_val(old_pmd) != READ_ONCE(pmd_val(*pmdp)));
267 		}
268 		phys += next - addr;
269 	} while (pmdp++, addr = next, addr != end);
270 
271 	pmd_clear_fixmap();
272 }
273 
274 static void alloc_init_cont_pmd(pud_t *pudp, unsigned long addr,
275 				unsigned long end, phys_addr_t phys,
276 				pgprot_t prot,
277 				phys_addr_t (*pgtable_alloc)(int), int flags)
278 {
279 	unsigned long next;
280 	pud_t pud = READ_ONCE(*pudp);
281 
282 	/*
283 	 * Check for initial section mappings in the pgd/pud.
284 	 */
285 	BUG_ON(pud_sect(pud));
286 	if (pud_none(pud)) {
287 		pudval_t pudval = PUD_TYPE_TABLE | PUD_TABLE_UXN;
288 		phys_addr_t pmd_phys;
289 
290 		if (flags & NO_EXEC_MAPPINGS)
291 			pudval |= PUD_TABLE_PXN;
292 		BUG_ON(!pgtable_alloc);
293 		pmd_phys = pgtable_alloc(PMD_SHIFT);
294 		__pud_populate(pudp, pmd_phys, pudval);
295 		pud = READ_ONCE(*pudp);
296 	}
297 	BUG_ON(pud_bad(pud));
298 
299 	do {
300 		pgprot_t __prot = prot;
301 
302 		next = pmd_cont_addr_end(addr, end);
303 
304 		/* use a contiguous mapping if the range is suitably aligned */
305 		if ((((addr | next | phys) & ~CONT_PMD_MASK) == 0) &&
306 		    (flags & NO_CONT_MAPPINGS) == 0)
307 			__prot = __pgprot(pgprot_val(prot) | PTE_CONT);
308 
309 		init_pmd(pudp, addr, next, phys, __prot, pgtable_alloc, flags);
310 
311 		phys += next - addr;
312 	} while (addr = next, addr != end);
313 }
314 
315 static void alloc_init_pud(pgd_t *pgdp, unsigned long addr, unsigned long end,
316 			   phys_addr_t phys, pgprot_t prot,
317 			   phys_addr_t (*pgtable_alloc)(int),
318 			   int flags)
319 {
320 	unsigned long next;
321 	pud_t *pudp;
322 	p4d_t *p4dp = p4d_offset(pgdp, addr);
323 	p4d_t p4d = READ_ONCE(*p4dp);
324 
325 	if (p4d_none(p4d)) {
326 		p4dval_t p4dval = P4D_TYPE_TABLE | P4D_TABLE_UXN;
327 		phys_addr_t pud_phys;
328 
329 		if (flags & NO_EXEC_MAPPINGS)
330 			p4dval |= P4D_TABLE_PXN;
331 		BUG_ON(!pgtable_alloc);
332 		pud_phys = pgtable_alloc(PUD_SHIFT);
333 		__p4d_populate(p4dp, pud_phys, p4dval);
334 		p4d = READ_ONCE(*p4dp);
335 	}
336 	BUG_ON(p4d_bad(p4d));
337 
338 	pudp = pud_set_fixmap_offset(p4dp, addr);
339 	do {
340 		pud_t old_pud = READ_ONCE(*pudp);
341 
342 		next = pud_addr_end(addr, end);
343 
344 		/*
345 		 * For 4K granule only, attempt to put down a 1GB block
346 		 */
347 		if (pud_sect_supported() &&
348 		   ((addr | next | phys) & ~PUD_MASK) == 0 &&
349 		    (flags & NO_BLOCK_MAPPINGS) == 0) {
350 			pud_set_huge(pudp, phys, prot);
351 
352 			/*
353 			 * After the PUD entry has been populated once, we
354 			 * only allow updates to the permission attributes.
355 			 */
356 			BUG_ON(!pgattr_change_is_safe(pud_val(old_pud),
357 						      READ_ONCE(pud_val(*pudp))));
358 		} else {
359 			alloc_init_cont_pmd(pudp, addr, next, phys, prot,
360 					    pgtable_alloc, flags);
361 
362 			BUG_ON(pud_val(old_pud) != 0 &&
363 			       pud_val(old_pud) != READ_ONCE(pud_val(*pudp)));
364 		}
365 		phys += next - addr;
366 	} while (pudp++, addr = next, addr != end);
367 
368 	pud_clear_fixmap();
369 }
370 
371 static void __create_pgd_mapping_locked(pgd_t *pgdir, phys_addr_t phys,
372 					unsigned long virt, phys_addr_t size,
373 					pgprot_t prot,
374 					phys_addr_t (*pgtable_alloc)(int),
375 					int flags)
376 {
377 	unsigned long addr, end, next;
378 	pgd_t *pgdp = pgd_offset_pgd(pgdir, virt);
379 
380 	/*
381 	 * If the virtual and physical address don't have the same offset
382 	 * within a page, we cannot map the region as the caller expects.
383 	 */
384 	if (WARN_ON((phys ^ virt) & ~PAGE_MASK))
385 		return;
386 
387 	phys &= PAGE_MASK;
388 	addr = virt & PAGE_MASK;
389 	end = PAGE_ALIGN(virt + size);
390 
391 	do {
392 		next = pgd_addr_end(addr, end);
393 		alloc_init_pud(pgdp, addr, next, phys, prot, pgtable_alloc,
394 			       flags);
395 		phys += next - addr;
396 	} while (pgdp++, addr = next, addr != end);
397 }
398 
399 static void __create_pgd_mapping(pgd_t *pgdir, phys_addr_t phys,
400 				 unsigned long virt, phys_addr_t size,
401 				 pgprot_t prot,
402 				 phys_addr_t (*pgtable_alloc)(int),
403 				 int flags)
404 {
405 	mutex_lock(&fixmap_lock);
406 	__create_pgd_mapping_locked(pgdir, phys, virt, size, prot,
407 				    pgtable_alloc, flags);
408 	mutex_unlock(&fixmap_lock);
409 }
410 
411 #ifdef CONFIG_UNMAP_KERNEL_AT_EL0
412 extern __alias(__create_pgd_mapping_locked)
413 void create_kpti_ng_temp_pgd(pgd_t *pgdir, phys_addr_t phys, unsigned long virt,
414 			     phys_addr_t size, pgprot_t prot,
415 			     phys_addr_t (*pgtable_alloc)(int), int flags);
416 #endif
417 
418 static phys_addr_t __pgd_pgtable_alloc(int shift)
419 {
420 	void *ptr = (void *)__get_free_page(GFP_PGTABLE_KERNEL);
421 	BUG_ON(!ptr);
422 
423 	/* Ensure the zeroed page is visible to the page table walker */
424 	dsb(ishst);
425 	return __pa(ptr);
426 }
427 
428 static phys_addr_t pgd_pgtable_alloc(int shift)
429 {
430 	phys_addr_t pa = __pgd_pgtable_alloc(shift);
431 
432 	/*
433 	 * Call proper page table ctor in case later we need to
434 	 * call core mm functions like apply_to_page_range() on
435 	 * this pre-allocated page table.
436 	 *
437 	 * We don't select ARCH_ENABLE_SPLIT_PMD_PTLOCK if pmd is
438 	 * folded, and if so pgtable_pmd_page_ctor() becomes nop.
439 	 */
440 	if (shift == PAGE_SHIFT)
441 		BUG_ON(!pgtable_pte_page_ctor(phys_to_page(pa)));
442 	else if (shift == PMD_SHIFT)
443 		BUG_ON(!pgtable_pmd_page_ctor(phys_to_page(pa)));
444 
445 	return pa;
446 }
447 
448 /*
449  * This function can only be used to modify existing table entries,
450  * without allocating new levels of table. Note that this permits the
451  * creation of new section or page entries.
452  */
453 static void __init create_mapping_noalloc(phys_addr_t phys, unsigned long virt,
454 				  phys_addr_t size, pgprot_t prot)
455 {
456 	if ((virt >= PAGE_END) && (virt < VMALLOC_START)) {
457 		pr_warn("BUG: not creating mapping for %pa at 0x%016lx - outside kernel range\n",
458 			&phys, virt);
459 		return;
460 	}
461 	__create_pgd_mapping(init_mm.pgd, phys, virt, size, prot, NULL,
462 			     NO_CONT_MAPPINGS);
463 }
464 
465 void __init create_pgd_mapping(struct mm_struct *mm, phys_addr_t phys,
466 			       unsigned long virt, phys_addr_t size,
467 			       pgprot_t prot, bool page_mappings_only)
468 {
469 	int flags = 0;
470 
471 	BUG_ON(mm == &init_mm);
472 
473 	if (page_mappings_only)
474 		flags = NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS;
475 
476 	__create_pgd_mapping(mm->pgd, phys, virt, size, prot,
477 			     pgd_pgtable_alloc, flags);
478 }
479 
480 static void update_mapping_prot(phys_addr_t phys, unsigned long virt,
481 				phys_addr_t size, pgprot_t prot)
482 {
483 	if ((virt >= PAGE_END) && (virt < VMALLOC_START)) {
484 		pr_warn("BUG: not updating mapping for %pa at 0x%016lx - outside kernel range\n",
485 			&phys, virt);
486 		return;
487 	}
488 
489 	__create_pgd_mapping(init_mm.pgd, phys, virt, size, prot, NULL,
490 			     NO_CONT_MAPPINGS);
491 
492 	/* flush the TLBs after updating live kernel mappings */
493 	flush_tlb_kernel_range(virt, virt + size);
494 }
495 
496 static void __init __map_memblock(pgd_t *pgdp, phys_addr_t start,
497 				  phys_addr_t end, pgprot_t prot, int flags)
498 {
499 	__create_pgd_mapping(pgdp, start, __phys_to_virt(start), end - start,
500 			     prot, early_pgtable_alloc, flags);
501 }
502 
503 void __init mark_linear_text_alias_ro(void)
504 {
505 	/*
506 	 * Remove the write permissions from the linear alias of .text/.rodata
507 	 */
508 	update_mapping_prot(__pa_symbol(_stext), (unsigned long)lm_alias(_stext),
509 			    (unsigned long)__init_begin - (unsigned long)_stext,
510 			    PAGE_KERNEL_RO);
511 }
512 
513 static bool crash_mem_map __initdata;
514 
515 static int __init enable_crash_mem_map(char *arg)
516 {
517 	/*
518 	 * Proper parameter parsing is done by reserve_crashkernel(). We only
519 	 * need to know if the linear map has to avoid block mappings so that
520 	 * the crashkernel reservations can be unmapped later.
521 	 */
522 	crash_mem_map = true;
523 
524 	return 0;
525 }
526 early_param("crashkernel", enable_crash_mem_map);
527 
528 static void __init map_mem(pgd_t *pgdp)
529 {
530 	static const u64 direct_map_end = _PAGE_END(VA_BITS_MIN);
531 	phys_addr_t kernel_start = __pa_symbol(_stext);
532 	phys_addr_t kernel_end = __pa_symbol(__init_begin);
533 	phys_addr_t start, end;
534 	int flags = NO_EXEC_MAPPINGS;
535 	u64 i;
536 
537 	/*
538 	 * Setting hierarchical PXNTable attributes on table entries covering
539 	 * the linear region is only possible if it is guaranteed that no table
540 	 * entries at any level are being shared between the linear region and
541 	 * the vmalloc region. Check whether this is true for the PGD level, in
542 	 * which case it is guaranteed to be true for all other levels as well.
543 	 */
544 	BUILD_BUG_ON(pgd_index(direct_map_end - 1) == pgd_index(direct_map_end));
545 
546 	if (can_set_direct_map())
547 		flags |= NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS;
548 
549 	/*
550 	 * Take care not to create a writable alias for the
551 	 * read-only text and rodata sections of the kernel image.
552 	 * So temporarily mark them as NOMAP to skip mappings in
553 	 * the following for-loop
554 	 */
555 	memblock_mark_nomap(kernel_start, kernel_end - kernel_start);
556 
557 #ifdef CONFIG_KEXEC_CORE
558 	if (crash_mem_map) {
559 		if (defer_reserve_crashkernel())
560 			flags |= NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS;
561 		else if (crashk_res.end)
562 			memblock_mark_nomap(crashk_res.start,
563 			    resource_size(&crashk_res));
564 	}
565 #endif
566 
567 	/* map all the memory banks */
568 	for_each_mem_range(i, &start, &end) {
569 		if (start >= end)
570 			break;
571 		/*
572 		 * The linear map must allow allocation tags reading/writing
573 		 * if MTE is present. Otherwise, it has the same attributes as
574 		 * PAGE_KERNEL.
575 		 */
576 		__map_memblock(pgdp, start, end, pgprot_tagged(PAGE_KERNEL),
577 			       flags);
578 	}
579 
580 	/*
581 	 * Map the linear alias of the [_stext, __init_begin) interval
582 	 * as non-executable now, and remove the write permission in
583 	 * mark_linear_text_alias_ro() below (which will be called after
584 	 * alternative patching has completed). This makes the contents
585 	 * of the region accessible to subsystems such as hibernate,
586 	 * but protects it from inadvertent modification or execution.
587 	 * Note that contiguous mappings cannot be remapped in this way,
588 	 * so we should avoid them here.
589 	 */
590 	__map_memblock(pgdp, kernel_start, kernel_end,
591 		       PAGE_KERNEL, NO_CONT_MAPPINGS);
592 	memblock_clear_nomap(kernel_start, kernel_end - kernel_start);
593 
594 	/*
595 	 * Use page-level mappings here so that we can shrink the region
596 	 * in page granularity and put back unused memory to buddy system
597 	 * through /sys/kernel/kexec_crash_size interface.
598 	 */
599 #ifdef CONFIG_KEXEC_CORE
600 	if (crash_mem_map && !defer_reserve_crashkernel()) {
601 		if (crashk_res.end) {
602 			__map_memblock(pgdp, crashk_res.start,
603 				       crashk_res.end + 1,
604 				       PAGE_KERNEL,
605 				       NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS);
606 			memblock_clear_nomap(crashk_res.start,
607 					     resource_size(&crashk_res));
608 		}
609 	}
610 #endif
611 }
612 
613 void mark_rodata_ro(void)
614 {
615 	unsigned long section_size;
616 
617 	/*
618 	 * mark .rodata as read only. Use __init_begin rather than __end_rodata
619 	 * to cover NOTES and EXCEPTION_TABLE.
620 	 */
621 	section_size = (unsigned long)__init_begin - (unsigned long)__start_rodata;
622 	update_mapping_prot(__pa_symbol(__start_rodata), (unsigned long)__start_rodata,
623 			    section_size, PAGE_KERNEL_RO);
624 
625 	debug_checkwx();
626 }
627 
628 static void __init map_kernel_segment(pgd_t *pgdp, void *va_start, void *va_end,
629 				      pgprot_t prot, struct vm_struct *vma,
630 				      int flags, unsigned long vm_flags)
631 {
632 	phys_addr_t pa_start = __pa_symbol(va_start);
633 	unsigned long size = va_end - va_start;
634 
635 	BUG_ON(!PAGE_ALIGNED(pa_start));
636 	BUG_ON(!PAGE_ALIGNED(size));
637 
638 	__create_pgd_mapping(pgdp, pa_start, (unsigned long)va_start, size, prot,
639 			     early_pgtable_alloc, flags);
640 
641 	if (!(vm_flags & VM_NO_GUARD))
642 		size += PAGE_SIZE;
643 
644 	vma->addr	= va_start;
645 	vma->phys_addr	= pa_start;
646 	vma->size	= size;
647 	vma->flags	= VM_MAP | vm_flags;
648 	vma->caller	= __builtin_return_address(0);
649 
650 	vm_area_add_early(vma);
651 }
652 
653 #ifdef CONFIG_UNMAP_KERNEL_AT_EL0
654 static int __init map_entry_trampoline(void)
655 {
656 	int i;
657 
658 	pgprot_t prot = rodata_enabled ? PAGE_KERNEL_ROX : PAGE_KERNEL_EXEC;
659 	phys_addr_t pa_start = __pa_symbol(__entry_tramp_text_start);
660 
661 	/* The trampoline is always mapped and can therefore be global */
662 	pgprot_val(prot) &= ~PTE_NG;
663 
664 	/* Map only the text into the trampoline page table */
665 	memset(tramp_pg_dir, 0, PGD_SIZE);
666 	__create_pgd_mapping(tramp_pg_dir, pa_start, TRAMP_VALIAS,
667 			     entry_tramp_text_size(), prot,
668 			     __pgd_pgtable_alloc, NO_BLOCK_MAPPINGS);
669 
670 	/* Map both the text and data into the kernel page table */
671 	for (i = 0; i < DIV_ROUND_UP(entry_tramp_text_size(), PAGE_SIZE); i++)
672 		__set_fixmap(FIX_ENTRY_TRAMP_TEXT1 - i,
673 			     pa_start + i * PAGE_SIZE, prot);
674 
675 	if (IS_ENABLED(CONFIG_RELOCATABLE))
676 		__set_fixmap(FIX_ENTRY_TRAMP_TEXT1 - i,
677 			     pa_start + i * PAGE_SIZE, PAGE_KERNEL_RO);
678 
679 	return 0;
680 }
681 core_initcall(map_entry_trampoline);
682 #endif
683 
684 /*
685  * Open coded check for BTI, only for use to determine configuration
686  * for early mappings for before the cpufeature code has run.
687  */
688 static bool arm64_early_this_cpu_has_bti(void)
689 {
690 	u64 pfr1;
691 
692 	if (!IS_ENABLED(CONFIG_ARM64_BTI_KERNEL))
693 		return false;
694 
695 	pfr1 = __read_sysreg_by_encoding(SYS_ID_AA64PFR1_EL1);
696 	return cpuid_feature_extract_unsigned_field(pfr1,
697 						    ID_AA64PFR1_EL1_BT_SHIFT);
698 }
699 
700 /*
701  * Create fine-grained mappings for the kernel.
702  */
703 static void __init map_kernel(pgd_t *pgdp)
704 {
705 	static struct vm_struct vmlinux_text, vmlinux_rodata, vmlinux_inittext,
706 				vmlinux_initdata, vmlinux_data;
707 
708 	/*
709 	 * External debuggers may need to write directly to the text
710 	 * mapping to install SW breakpoints. Allow this (only) when
711 	 * explicitly requested with rodata=off.
712 	 */
713 	pgprot_t text_prot = rodata_enabled ? PAGE_KERNEL_ROX : PAGE_KERNEL_EXEC;
714 
715 	/*
716 	 * If we have a CPU that supports BTI and a kernel built for
717 	 * BTI then mark the kernel executable text as guarded pages
718 	 * now so we don't have to rewrite the page tables later.
719 	 */
720 	if (arm64_early_this_cpu_has_bti())
721 		text_prot = __pgprot_modify(text_prot, PTE_GP, PTE_GP);
722 
723 	/*
724 	 * Only rodata will be remapped with different permissions later on,
725 	 * all other segments are allowed to use contiguous mappings.
726 	 */
727 	map_kernel_segment(pgdp, _stext, _etext, text_prot, &vmlinux_text, 0,
728 			   VM_NO_GUARD);
729 	map_kernel_segment(pgdp, __start_rodata, __inittext_begin, PAGE_KERNEL,
730 			   &vmlinux_rodata, NO_CONT_MAPPINGS, VM_NO_GUARD);
731 	map_kernel_segment(pgdp, __inittext_begin, __inittext_end, text_prot,
732 			   &vmlinux_inittext, 0, VM_NO_GUARD);
733 	map_kernel_segment(pgdp, __initdata_begin, __initdata_end, PAGE_KERNEL,
734 			   &vmlinux_initdata, 0, VM_NO_GUARD);
735 	map_kernel_segment(pgdp, _data, _end, PAGE_KERNEL, &vmlinux_data, 0, 0);
736 
737 	if (!READ_ONCE(pgd_val(*pgd_offset_pgd(pgdp, FIXADDR_START)))) {
738 		/*
739 		 * The fixmap falls in a separate pgd to the kernel, and doesn't
740 		 * live in the carveout for the swapper_pg_dir. We can simply
741 		 * re-use the existing dir for the fixmap.
742 		 */
743 		set_pgd(pgd_offset_pgd(pgdp, FIXADDR_START),
744 			READ_ONCE(*pgd_offset_k(FIXADDR_START)));
745 	} else if (CONFIG_PGTABLE_LEVELS > 3) {
746 		pgd_t *bm_pgdp;
747 		p4d_t *bm_p4dp;
748 		pud_t *bm_pudp;
749 		/*
750 		 * The fixmap shares its top level pgd entry with the kernel
751 		 * mapping. This can really only occur when we are running
752 		 * with 16k/4 levels, so we can simply reuse the pud level
753 		 * entry instead.
754 		 */
755 		BUG_ON(!IS_ENABLED(CONFIG_ARM64_16K_PAGES));
756 		bm_pgdp = pgd_offset_pgd(pgdp, FIXADDR_START);
757 		bm_p4dp = p4d_offset(bm_pgdp, FIXADDR_START);
758 		bm_pudp = pud_set_fixmap_offset(bm_p4dp, FIXADDR_START);
759 		pud_populate(&init_mm, bm_pudp, lm_alias(bm_pmd));
760 		pud_clear_fixmap();
761 	} else {
762 		BUG();
763 	}
764 
765 	kasan_copy_shadow(pgdp);
766 }
767 
768 static void __init create_idmap(void)
769 {
770 	u64 start = __pa_symbol(__idmap_text_start);
771 	u64 size = __pa_symbol(__idmap_text_end) - start;
772 	pgd_t *pgd = idmap_pg_dir;
773 	u64 pgd_phys;
774 
775 	/* check if we need an additional level of translation */
776 	if (VA_BITS < 48 && idmap_t0sz < (64 - VA_BITS_MIN)) {
777 		pgd_phys = early_pgtable_alloc(PAGE_SHIFT);
778 		set_pgd(&idmap_pg_dir[start >> VA_BITS],
779 			__pgd(pgd_phys | P4D_TYPE_TABLE));
780 		pgd = __va(pgd_phys);
781 	}
782 	__create_pgd_mapping(pgd, start, start, size, PAGE_KERNEL_ROX,
783 			     early_pgtable_alloc, 0);
784 
785 	if (IS_ENABLED(CONFIG_UNMAP_KERNEL_AT_EL0)) {
786 		extern u32 __idmap_kpti_flag;
787 		u64 pa = __pa_symbol(&__idmap_kpti_flag);
788 
789 		/*
790 		 * The KPTI G-to-nG conversion code needs a read-write mapping
791 		 * of its synchronization flag in the ID map.
792 		 */
793 		__create_pgd_mapping(pgd, pa, pa, sizeof(u32), PAGE_KERNEL,
794 				     early_pgtable_alloc, 0);
795 	}
796 }
797 
798 void __init paging_init(void)
799 {
800 	pgd_t *pgdp = pgd_set_fixmap(__pa_symbol(swapper_pg_dir));
801 	extern pgd_t init_idmap_pg_dir[];
802 
803 	idmap_t0sz = 63UL - __fls(__pa_symbol(_end) | GENMASK(VA_BITS_MIN - 1, 0));
804 
805 	map_kernel(pgdp);
806 	map_mem(pgdp);
807 
808 	pgd_clear_fixmap();
809 
810 	cpu_replace_ttbr1(lm_alias(swapper_pg_dir), init_idmap_pg_dir);
811 	init_mm.pgd = swapper_pg_dir;
812 
813 	memblock_phys_free(__pa_symbol(init_pg_dir),
814 			   __pa_symbol(init_pg_end) - __pa_symbol(init_pg_dir));
815 
816 	memblock_allow_resize();
817 
818 	create_idmap();
819 }
820 
821 #ifdef CONFIG_MEMORY_HOTPLUG
822 static void free_hotplug_page_range(struct page *page, size_t size,
823 				    struct vmem_altmap *altmap)
824 {
825 	if (altmap) {
826 		vmem_altmap_free(altmap, size >> PAGE_SHIFT);
827 	} else {
828 		WARN_ON(PageReserved(page));
829 		free_pages((unsigned long)page_address(page), get_order(size));
830 	}
831 }
832 
833 static void free_hotplug_pgtable_page(struct page *page)
834 {
835 	free_hotplug_page_range(page, PAGE_SIZE, NULL);
836 }
837 
838 static bool pgtable_range_aligned(unsigned long start, unsigned long end,
839 				  unsigned long floor, unsigned long ceiling,
840 				  unsigned long mask)
841 {
842 	start &= mask;
843 	if (start < floor)
844 		return false;
845 
846 	if (ceiling) {
847 		ceiling &= mask;
848 		if (!ceiling)
849 			return false;
850 	}
851 
852 	if (end - 1 > ceiling - 1)
853 		return false;
854 	return true;
855 }
856 
857 static void unmap_hotplug_pte_range(pmd_t *pmdp, unsigned long addr,
858 				    unsigned long end, bool free_mapped,
859 				    struct vmem_altmap *altmap)
860 {
861 	pte_t *ptep, pte;
862 
863 	do {
864 		ptep = pte_offset_kernel(pmdp, addr);
865 		pte = READ_ONCE(*ptep);
866 		if (pte_none(pte))
867 			continue;
868 
869 		WARN_ON(!pte_present(pte));
870 		pte_clear(&init_mm, addr, ptep);
871 		flush_tlb_kernel_range(addr, addr + PAGE_SIZE);
872 		if (free_mapped)
873 			free_hotplug_page_range(pte_page(pte),
874 						PAGE_SIZE, altmap);
875 	} while (addr += PAGE_SIZE, addr < end);
876 }
877 
878 static void unmap_hotplug_pmd_range(pud_t *pudp, unsigned long addr,
879 				    unsigned long end, bool free_mapped,
880 				    struct vmem_altmap *altmap)
881 {
882 	unsigned long next;
883 	pmd_t *pmdp, pmd;
884 
885 	do {
886 		next = pmd_addr_end(addr, end);
887 		pmdp = pmd_offset(pudp, addr);
888 		pmd = READ_ONCE(*pmdp);
889 		if (pmd_none(pmd))
890 			continue;
891 
892 		WARN_ON(!pmd_present(pmd));
893 		if (pmd_sect(pmd)) {
894 			pmd_clear(pmdp);
895 
896 			/*
897 			 * One TLBI should be sufficient here as the PMD_SIZE
898 			 * range is mapped with a single block entry.
899 			 */
900 			flush_tlb_kernel_range(addr, addr + PAGE_SIZE);
901 			if (free_mapped)
902 				free_hotplug_page_range(pmd_page(pmd),
903 							PMD_SIZE, altmap);
904 			continue;
905 		}
906 		WARN_ON(!pmd_table(pmd));
907 		unmap_hotplug_pte_range(pmdp, addr, next, free_mapped, altmap);
908 	} while (addr = next, addr < end);
909 }
910 
911 static void unmap_hotplug_pud_range(p4d_t *p4dp, unsigned long addr,
912 				    unsigned long end, bool free_mapped,
913 				    struct vmem_altmap *altmap)
914 {
915 	unsigned long next;
916 	pud_t *pudp, pud;
917 
918 	do {
919 		next = pud_addr_end(addr, end);
920 		pudp = pud_offset(p4dp, addr);
921 		pud = READ_ONCE(*pudp);
922 		if (pud_none(pud))
923 			continue;
924 
925 		WARN_ON(!pud_present(pud));
926 		if (pud_sect(pud)) {
927 			pud_clear(pudp);
928 
929 			/*
930 			 * One TLBI should be sufficient here as the PUD_SIZE
931 			 * range is mapped with a single block entry.
932 			 */
933 			flush_tlb_kernel_range(addr, addr + PAGE_SIZE);
934 			if (free_mapped)
935 				free_hotplug_page_range(pud_page(pud),
936 							PUD_SIZE, altmap);
937 			continue;
938 		}
939 		WARN_ON(!pud_table(pud));
940 		unmap_hotplug_pmd_range(pudp, addr, next, free_mapped, altmap);
941 	} while (addr = next, addr < end);
942 }
943 
944 static void unmap_hotplug_p4d_range(pgd_t *pgdp, unsigned long addr,
945 				    unsigned long end, bool free_mapped,
946 				    struct vmem_altmap *altmap)
947 {
948 	unsigned long next;
949 	p4d_t *p4dp, p4d;
950 
951 	do {
952 		next = p4d_addr_end(addr, end);
953 		p4dp = p4d_offset(pgdp, addr);
954 		p4d = READ_ONCE(*p4dp);
955 		if (p4d_none(p4d))
956 			continue;
957 
958 		WARN_ON(!p4d_present(p4d));
959 		unmap_hotplug_pud_range(p4dp, addr, next, free_mapped, altmap);
960 	} while (addr = next, addr < end);
961 }
962 
963 static void unmap_hotplug_range(unsigned long addr, unsigned long end,
964 				bool free_mapped, struct vmem_altmap *altmap)
965 {
966 	unsigned long next;
967 	pgd_t *pgdp, pgd;
968 
969 	/*
970 	 * altmap can only be used as vmemmap mapping backing memory.
971 	 * In case the backing memory itself is not being freed, then
972 	 * altmap is irrelevant. Warn about this inconsistency when
973 	 * encountered.
974 	 */
975 	WARN_ON(!free_mapped && altmap);
976 
977 	do {
978 		next = pgd_addr_end(addr, end);
979 		pgdp = pgd_offset_k(addr);
980 		pgd = READ_ONCE(*pgdp);
981 		if (pgd_none(pgd))
982 			continue;
983 
984 		WARN_ON(!pgd_present(pgd));
985 		unmap_hotplug_p4d_range(pgdp, addr, next, free_mapped, altmap);
986 	} while (addr = next, addr < end);
987 }
988 
989 static void free_empty_pte_table(pmd_t *pmdp, unsigned long addr,
990 				 unsigned long end, unsigned long floor,
991 				 unsigned long ceiling)
992 {
993 	pte_t *ptep, pte;
994 	unsigned long i, start = addr;
995 
996 	do {
997 		ptep = pte_offset_kernel(pmdp, addr);
998 		pte = READ_ONCE(*ptep);
999 
1000 		/*
1001 		 * This is just a sanity check here which verifies that
1002 		 * pte clearing has been done by earlier unmap loops.
1003 		 */
1004 		WARN_ON(!pte_none(pte));
1005 	} while (addr += PAGE_SIZE, addr < end);
1006 
1007 	if (!pgtable_range_aligned(start, end, floor, ceiling, PMD_MASK))
1008 		return;
1009 
1010 	/*
1011 	 * Check whether we can free the pte page if the rest of the
1012 	 * entries are empty. Overlap with other regions have been
1013 	 * handled by the floor/ceiling check.
1014 	 */
1015 	ptep = pte_offset_kernel(pmdp, 0UL);
1016 	for (i = 0; i < PTRS_PER_PTE; i++) {
1017 		if (!pte_none(READ_ONCE(ptep[i])))
1018 			return;
1019 	}
1020 
1021 	pmd_clear(pmdp);
1022 	__flush_tlb_kernel_pgtable(start);
1023 	free_hotplug_pgtable_page(virt_to_page(ptep));
1024 }
1025 
1026 static void free_empty_pmd_table(pud_t *pudp, unsigned long addr,
1027 				 unsigned long end, unsigned long floor,
1028 				 unsigned long ceiling)
1029 {
1030 	pmd_t *pmdp, pmd;
1031 	unsigned long i, next, start = addr;
1032 
1033 	do {
1034 		next = pmd_addr_end(addr, end);
1035 		pmdp = pmd_offset(pudp, addr);
1036 		pmd = READ_ONCE(*pmdp);
1037 		if (pmd_none(pmd))
1038 			continue;
1039 
1040 		WARN_ON(!pmd_present(pmd) || !pmd_table(pmd) || pmd_sect(pmd));
1041 		free_empty_pte_table(pmdp, addr, next, floor, ceiling);
1042 	} while (addr = next, addr < end);
1043 
1044 	if (CONFIG_PGTABLE_LEVELS <= 2)
1045 		return;
1046 
1047 	if (!pgtable_range_aligned(start, end, floor, ceiling, PUD_MASK))
1048 		return;
1049 
1050 	/*
1051 	 * Check whether we can free the pmd page if the rest of the
1052 	 * entries are empty. Overlap with other regions have been
1053 	 * handled by the floor/ceiling check.
1054 	 */
1055 	pmdp = pmd_offset(pudp, 0UL);
1056 	for (i = 0; i < PTRS_PER_PMD; i++) {
1057 		if (!pmd_none(READ_ONCE(pmdp[i])))
1058 			return;
1059 	}
1060 
1061 	pud_clear(pudp);
1062 	__flush_tlb_kernel_pgtable(start);
1063 	free_hotplug_pgtable_page(virt_to_page(pmdp));
1064 }
1065 
1066 static void free_empty_pud_table(p4d_t *p4dp, unsigned long addr,
1067 				 unsigned long end, unsigned long floor,
1068 				 unsigned long ceiling)
1069 {
1070 	pud_t *pudp, pud;
1071 	unsigned long i, next, start = addr;
1072 
1073 	do {
1074 		next = pud_addr_end(addr, end);
1075 		pudp = pud_offset(p4dp, addr);
1076 		pud = READ_ONCE(*pudp);
1077 		if (pud_none(pud))
1078 			continue;
1079 
1080 		WARN_ON(!pud_present(pud) || !pud_table(pud) || pud_sect(pud));
1081 		free_empty_pmd_table(pudp, addr, next, floor, ceiling);
1082 	} while (addr = next, addr < end);
1083 
1084 	if (CONFIG_PGTABLE_LEVELS <= 3)
1085 		return;
1086 
1087 	if (!pgtable_range_aligned(start, end, floor, ceiling, PGDIR_MASK))
1088 		return;
1089 
1090 	/*
1091 	 * Check whether we can free the pud page if the rest of the
1092 	 * entries are empty. Overlap with other regions have been
1093 	 * handled by the floor/ceiling check.
1094 	 */
1095 	pudp = pud_offset(p4dp, 0UL);
1096 	for (i = 0; i < PTRS_PER_PUD; i++) {
1097 		if (!pud_none(READ_ONCE(pudp[i])))
1098 			return;
1099 	}
1100 
1101 	p4d_clear(p4dp);
1102 	__flush_tlb_kernel_pgtable(start);
1103 	free_hotplug_pgtable_page(virt_to_page(pudp));
1104 }
1105 
1106 static void free_empty_p4d_table(pgd_t *pgdp, unsigned long addr,
1107 				 unsigned long end, unsigned long floor,
1108 				 unsigned long ceiling)
1109 {
1110 	unsigned long next;
1111 	p4d_t *p4dp, p4d;
1112 
1113 	do {
1114 		next = p4d_addr_end(addr, end);
1115 		p4dp = p4d_offset(pgdp, addr);
1116 		p4d = READ_ONCE(*p4dp);
1117 		if (p4d_none(p4d))
1118 			continue;
1119 
1120 		WARN_ON(!p4d_present(p4d));
1121 		free_empty_pud_table(p4dp, addr, next, floor, ceiling);
1122 	} while (addr = next, addr < end);
1123 }
1124 
1125 static void free_empty_tables(unsigned long addr, unsigned long end,
1126 			      unsigned long floor, unsigned long ceiling)
1127 {
1128 	unsigned long next;
1129 	pgd_t *pgdp, pgd;
1130 
1131 	do {
1132 		next = pgd_addr_end(addr, end);
1133 		pgdp = pgd_offset_k(addr);
1134 		pgd = READ_ONCE(*pgdp);
1135 		if (pgd_none(pgd))
1136 			continue;
1137 
1138 		WARN_ON(!pgd_present(pgd));
1139 		free_empty_p4d_table(pgdp, addr, next, floor, ceiling);
1140 	} while (addr = next, addr < end);
1141 }
1142 #endif
1143 
1144 void __meminit vmemmap_set_pmd(pmd_t *pmdp, void *p, int node,
1145 			       unsigned long addr, unsigned long next)
1146 {
1147 	pmd_set_huge(pmdp, __pa(p), __pgprot(PROT_SECT_NORMAL));
1148 }
1149 
1150 int __meminit vmemmap_check_pmd(pmd_t *pmdp, int node,
1151 				unsigned long addr, unsigned long next)
1152 {
1153 	vmemmap_verify((pte_t *)pmdp, node, addr, next);
1154 	return 1;
1155 }
1156 
1157 int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node,
1158 		struct vmem_altmap *altmap)
1159 {
1160 	WARN_ON((start < VMEMMAP_START) || (end > VMEMMAP_END));
1161 
1162 	if (!IS_ENABLED(CONFIG_ARM64_4K_PAGES))
1163 		return vmemmap_populate_basepages(start, end, node, altmap);
1164 	else
1165 		return vmemmap_populate_hugepages(start, end, node, altmap);
1166 }
1167 
1168 #ifdef CONFIG_MEMORY_HOTPLUG
1169 void vmemmap_free(unsigned long start, unsigned long end,
1170 		struct vmem_altmap *altmap)
1171 {
1172 	WARN_ON((start < VMEMMAP_START) || (end > VMEMMAP_END));
1173 
1174 	unmap_hotplug_range(start, end, true, altmap);
1175 	free_empty_tables(start, end, VMEMMAP_START, VMEMMAP_END);
1176 }
1177 #endif /* CONFIG_MEMORY_HOTPLUG */
1178 
1179 static inline pud_t *fixmap_pud(unsigned long addr)
1180 {
1181 	pgd_t *pgdp = pgd_offset_k(addr);
1182 	p4d_t *p4dp = p4d_offset(pgdp, addr);
1183 	p4d_t p4d = READ_ONCE(*p4dp);
1184 
1185 	BUG_ON(p4d_none(p4d) || p4d_bad(p4d));
1186 
1187 	return pud_offset_kimg(p4dp, addr);
1188 }
1189 
1190 static inline pmd_t *fixmap_pmd(unsigned long addr)
1191 {
1192 	pud_t *pudp = fixmap_pud(addr);
1193 	pud_t pud = READ_ONCE(*pudp);
1194 
1195 	BUG_ON(pud_none(pud) || pud_bad(pud));
1196 
1197 	return pmd_offset_kimg(pudp, addr);
1198 }
1199 
1200 static inline pte_t *fixmap_pte(unsigned long addr)
1201 {
1202 	return &bm_pte[pte_index(addr)];
1203 }
1204 
1205 /*
1206  * The p*d_populate functions call virt_to_phys implicitly so they can't be used
1207  * directly on kernel symbols (bm_p*d). This function is called too early to use
1208  * lm_alias so __p*d_populate functions must be used to populate with the
1209  * physical address from __pa_symbol.
1210  */
1211 void __init early_fixmap_init(void)
1212 {
1213 	pgd_t *pgdp;
1214 	p4d_t *p4dp, p4d;
1215 	pud_t *pudp;
1216 	pmd_t *pmdp;
1217 	unsigned long addr = FIXADDR_START;
1218 
1219 	pgdp = pgd_offset_k(addr);
1220 	p4dp = p4d_offset(pgdp, addr);
1221 	p4d = READ_ONCE(*p4dp);
1222 	if (CONFIG_PGTABLE_LEVELS > 3 &&
1223 	    !(p4d_none(p4d) || p4d_page_paddr(p4d) == __pa_symbol(bm_pud))) {
1224 		/*
1225 		 * We only end up here if the kernel mapping and the fixmap
1226 		 * share the top level pgd entry, which should only happen on
1227 		 * 16k/4 levels configurations.
1228 		 */
1229 		BUG_ON(!IS_ENABLED(CONFIG_ARM64_16K_PAGES));
1230 		pudp = pud_offset_kimg(p4dp, addr);
1231 	} else {
1232 		if (p4d_none(p4d))
1233 			__p4d_populate(p4dp, __pa_symbol(bm_pud), P4D_TYPE_TABLE);
1234 		pudp = fixmap_pud(addr);
1235 	}
1236 	if (pud_none(READ_ONCE(*pudp)))
1237 		__pud_populate(pudp, __pa_symbol(bm_pmd), PUD_TYPE_TABLE);
1238 	pmdp = fixmap_pmd(addr);
1239 	__pmd_populate(pmdp, __pa_symbol(bm_pte), PMD_TYPE_TABLE);
1240 
1241 	/*
1242 	 * The boot-ioremap range spans multiple pmds, for which
1243 	 * we are not prepared:
1244 	 */
1245 	BUILD_BUG_ON((__fix_to_virt(FIX_BTMAP_BEGIN) >> PMD_SHIFT)
1246 		     != (__fix_to_virt(FIX_BTMAP_END) >> PMD_SHIFT));
1247 
1248 	if ((pmdp != fixmap_pmd(fix_to_virt(FIX_BTMAP_BEGIN)))
1249 	     || pmdp != fixmap_pmd(fix_to_virt(FIX_BTMAP_END))) {
1250 		WARN_ON(1);
1251 		pr_warn("pmdp %p != %p, %p\n",
1252 			pmdp, fixmap_pmd(fix_to_virt(FIX_BTMAP_BEGIN)),
1253 			fixmap_pmd(fix_to_virt(FIX_BTMAP_END)));
1254 		pr_warn("fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n",
1255 			fix_to_virt(FIX_BTMAP_BEGIN));
1256 		pr_warn("fix_to_virt(FIX_BTMAP_END):   %08lx\n",
1257 			fix_to_virt(FIX_BTMAP_END));
1258 
1259 		pr_warn("FIX_BTMAP_END:       %d\n", FIX_BTMAP_END);
1260 		pr_warn("FIX_BTMAP_BEGIN:     %d\n", FIX_BTMAP_BEGIN);
1261 	}
1262 }
1263 
1264 /*
1265  * Unusually, this is also called in IRQ context (ghes_iounmap_irq) so if we
1266  * ever need to use IPIs for TLB broadcasting, then we're in trouble here.
1267  */
1268 void __set_fixmap(enum fixed_addresses idx,
1269 			       phys_addr_t phys, pgprot_t flags)
1270 {
1271 	unsigned long addr = __fix_to_virt(idx);
1272 	pte_t *ptep;
1273 
1274 	BUG_ON(idx <= FIX_HOLE || idx >= __end_of_fixed_addresses);
1275 
1276 	ptep = fixmap_pte(addr);
1277 
1278 	if (pgprot_val(flags)) {
1279 		set_pte(ptep, pfn_pte(phys >> PAGE_SHIFT, flags));
1280 	} else {
1281 		pte_clear(&init_mm, addr, ptep);
1282 		flush_tlb_kernel_range(addr, addr+PAGE_SIZE);
1283 	}
1284 }
1285 
1286 void *__init fixmap_remap_fdt(phys_addr_t dt_phys, int *size, pgprot_t prot)
1287 {
1288 	const u64 dt_virt_base = __fix_to_virt(FIX_FDT);
1289 	int offset;
1290 	void *dt_virt;
1291 
1292 	/*
1293 	 * Check whether the physical FDT address is set and meets the minimum
1294 	 * alignment requirement. Since we are relying on MIN_FDT_ALIGN to be
1295 	 * at least 8 bytes so that we can always access the magic and size
1296 	 * fields of the FDT header after mapping the first chunk, double check
1297 	 * here if that is indeed the case.
1298 	 */
1299 	BUILD_BUG_ON(MIN_FDT_ALIGN < 8);
1300 	if (!dt_phys || dt_phys % MIN_FDT_ALIGN)
1301 		return NULL;
1302 
1303 	/*
1304 	 * Make sure that the FDT region can be mapped without the need to
1305 	 * allocate additional translation table pages, so that it is safe
1306 	 * to call create_mapping_noalloc() this early.
1307 	 *
1308 	 * On 64k pages, the FDT will be mapped using PTEs, so we need to
1309 	 * be in the same PMD as the rest of the fixmap.
1310 	 * On 4k pages, we'll use section mappings for the FDT so we only
1311 	 * have to be in the same PUD.
1312 	 */
1313 	BUILD_BUG_ON(dt_virt_base % SZ_2M);
1314 
1315 	BUILD_BUG_ON(__fix_to_virt(FIX_FDT_END) >> SWAPPER_TABLE_SHIFT !=
1316 		     __fix_to_virt(FIX_BTMAP_BEGIN) >> SWAPPER_TABLE_SHIFT);
1317 
1318 	offset = dt_phys % SWAPPER_BLOCK_SIZE;
1319 	dt_virt = (void *)dt_virt_base + offset;
1320 
1321 	/* map the first chunk so we can read the size from the header */
1322 	create_mapping_noalloc(round_down(dt_phys, SWAPPER_BLOCK_SIZE),
1323 			dt_virt_base, SWAPPER_BLOCK_SIZE, prot);
1324 
1325 	if (fdt_magic(dt_virt) != FDT_MAGIC)
1326 		return NULL;
1327 
1328 	*size = fdt_totalsize(dt_virt);
1329 	if (*size > MAX_FDT_SIZE)
1330 		return NULL;
1331 
1332 	if (offset + *size > SWAPPER_BLOCK_SIZE)
1333 		create_mapping_noalloc(round_down(dt_phys, SWAPPER_BLOCK_SIZE), dt_virt_base,
1334 			       round_up(offset + *size, SWAPPER_BLOCK_SIZE), prot);
1335 
1336 	return dt_virt;
1337 }
1338 
1339 int pud_set_huge(pud_t *pudp, phys_addr_t phys, pgprot_t prot)
1340 {
1341 	pud_t new_pud = pfn_pud(__phys_to_pfn(phys), mk_pud_sect_prot(prot));
1342 
1343 	/* Only allow permission changes for now */
1344 	if (!pgattr_change_is_safe(READ_ONCE(pud_val(*pudp)),
1345 				   pud_val(new_pud)))
1346 		return 0;
1347 
1348 	VM_BUG_ON(phys & ~PUD_MASK);
1349 	set_pud(pudp, new_pud);
1350 	return 1;
1351 }
1352 
1353 int pmd_set_huge(pmd_t *pmdp, phys_addr_t phys, pgprot_t prot)
1354 {
1355 	pmd_t new_pmd = pfn_pmd(__phys_to_pfn(phys), mk_pmd_sect_prot(prot));
1356 
1357 	/* Only allow permission changes for now */
1358 	if (!pgattr_change_is_safe(READ_ONCE(pmd_val(*pmdp)),
1359 				   pmd_val(new_pmd)))
1360 		return 0;
1361 
1362 	VM_BUG_ON(phys & ~PMD_MASK);
1363 	set_pmd(pmdp, new_pmd);
1364 	return 1;
1365 }
1366 
1367 int pud_clear_huge(pud_t *pudp)
1368 {
1369 	if (!pud_sect(READ_ONCE(*pudp)))
1370 		return 0;
1371 	pud_clear(pudp);
1372 	return 1;
1373 }
1374 
1375 int pmd_clear_huge(pmd_t *pmdp)
1376 {
1377 	if (!pmd_sect(READ_ONCE(*pmdp)))
1378 		return 0;
1379 	pmd_clear(pmdp);
1380 	return 1;
1381 }
1382 
1383 int pmd_free_pte_page(pmd_t *pmdp, unsigned long addr)
1384 {
1385 	pte_t *table;
1386 	pmd_t pmd;
1387 
1388 	pmd = READ_ONCE(*pmdp);
1389 
1390 	if (!pmd_table(pmd)) {
1391 		VM_WARN_ON(1);
1392 		return 1;
1393 	}
1394 
1395 	table = pte_offset_kernel(pmdp, addr);
1396 	pmd_clear(pmdp);
1397 	__flush_tlb_kernel_pgtable(addr);
1398 	pte_free_kernel(NULL, table);
1399 	return 1;
1400 }
1401 
1402 int pud_free_pmd_page(pud_t *pudp, unsigned long addr)
1403 {
1404 	pmd_t *table;
1405 	pmd_t *pmdp;
1406 	pud_t pud;
1407 	unsigned long next, end;
1408 
1409 	pud = READ_ONCE(*pudp);
1410 
1411 	if (!pud_table(pud)) {
1412 		VM_WARN_ON(1);
1413 		return 1;
1414 	}
1415 
1416 	table = pmd_offset(pudp, addr);
1417 	pmdp = table;
1418 	next = addr;
1419 	end = addr + PUD_SIZE;
1420 	do {
1421 		pmd_free_pte_page(pmdp, next);
1422 	} while (pmdp++, next += PMD_SIZE, next != end);
1423 
1424 	pud_clear(pudp);
1425 	__flush_tlb_kernel_pgtable(addr);
1426 	pmd_free(NULL, table);
1427 	return 1;
1428 }
1429 
1430 #ifdef CONFIG_MEMORY_HOTPLUG
1431 static void __remove_pgd_mapping(pgd_t *pgdir, unsigned long start, u64 size)
1432 {
1433 	unsigned long end = start + size;
1434 
1435 	WARN_ON(pgdir != init_mm.pgd);
1436 	WARN_ON((start < PAGE_OFFSET) || (end > PAGE_END));
1437 
1438 	unmap_hotplug_range(start, end, false, NULL);
1439 	free_empty_tables(start, end, PAGE_OFFSET, PAGE_END);
1440 }
1441 
1442 struct range arch_get_mappable_range(void)
1443 {
1444 	struct range mhp_range;
1445 	u64 start_linear_pa = __pa(_PAGE_OFFSET(vabits_actual));
1446 	u64 end_linear_pa = __pa(PAGE_END - 1);
1447 
1448 	if (IS_ENABLED(CONFIG_RANDOMIZE_BASE)) {
1449 		/*
1450 		 * Check for a wrap, it is possible because of randomized linear
1451 		 * mapping the start physical address is actually bigger than
1452 		 * the end physical address. In this case set start to zero
1453 		 * because [0, end_linear_pa] range must still be able to cover
1454 		 * all addressable physical addresses.
1455 		 */
1456 		if (start_linear_pa > end_linear_pa)
1457 			start_linear_pa = 0;
1458 	}
1459 
1460 	WARN_ON(start_linear_pa > end_linear_pa);
1461 
1462 	/*
1463 	 * Linear mapping region is the range [PAGE_OFFSET..(PAGE_END - 1)]
1464 	 * accommodating both its ends but excluding PAGE_END. Max physical
1465 	 * range which can be mapped inside this linear mapping range, must
1466 	 * also be derived from its end points.
1467 	 */
1468 	mhp_range.start = start_linear_pa;
1469 	mhp_range.end =  end_linear_pa;
1470 
1471 	return mhp_range;
1472 }
1473 
1474 int arch_add_memory(int nid, u64 start, u64 size,
1475 		    struct mhp_params *params)
1476 {
1477 	int ret, flags = NO_EXEC_MAPPINGS;
1478 
1479 	VM_BUG_ON(!mhp_range_allowed(start, size, true));
1480 
1481 	if (can_set_direct_map())
1482 		flags |= NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS;
1483 
1484 	__create_pgd_mapping(swapper_pg_dir, start, __phys_to_virt(start),
1485 			     size, params->pgprot, __pgd_pgtable_alloc,
1486 			     flags);
1487 
1488 	memblock_clear_nomap(start, size);
1489 
1490 	ret = __add_pages(nid, start >> PAGE_SHIFT, size >> PAGE_SHIFT,
1491 			   params);
1492 	if (ret)
1493 		__remove_pgd_mapping(swapper_pg_dir,
1494 				     __phys_to_virt(start), size);
1495 	else {
1496 		max_pfn = PFN_UP(start + size);
1497 		max_low_pfn = max_pfn;
1498 	}
1499 
1500 	return ret;
1501 }
1502 
1503 void arch_remove_memory(u64 start, u64 size, struct vmem_altmap *altmap)
1504 {
1505 	unsigned long start_pfn = start >> PAGE_SHIFT;
1506 	unsigned long nr_pages = size >> PAGE_SHIFT;
1507 
1508 	__remove_pages(start_pfn, nr_pages, altmap);
1509 	__remove_pgd_mapping(swapper_pg_dir, __phys_to_virt(start), size);
1510 }
1511 
1512 /*
1513  * This memory hotplug notifier helps prevent boot memory from being
1514  * inadvertently removed as it blocks pfn range offlining process in
1515  * __offline_pages(). Hence this prevents both offlining as well as
1516  * removal process for boot memory which is initially always online.
1517  * In future if and when boot memory could be removed, this notifier
1518  * should be dropped and free_hotplug_page_range() should handle any
1519  * reserved pages allocated during boot.
1520  */
1521 static int prevent_bootmem_remove_notifier(struct notifier_block *nb,
1522 					   unsigned long action, void *data)
1523 {
1524 	struct mem_section *ms;
1525 	struct memory_notify *arg = data;
1526 	unsigned long end_pfn = arg->start_pfn + arg->nr_pages;
1527 	unsigned long pfn = arg->start_pfn;
1528 
1529 	if ((action != MEM_GOING_OFFLINE) && (action != MEM_OFFLINE))
1530 		return NOTIFY_OK;
1531 
1532 	for (; pfn < end_pfn; pfn += PAGES_PER_SECTION) {
1533 		unsigned long start = PFN_PHYS(pfn);
1534 		unsigned long end = start + (1UL << PA_SECTION_SHIFT);
1535 
1536 		ms = __pfn_to_section(pfn);
1537 		if (!early_section(ms))
1538 			continue;
1539 
1540 		if (action == MEM_GOING_OFFLINE) {
1541 			/*
1542 			 * Boot memory removal is not supported. Prevent
1543 			 * it via blocking any attempted offline request
1544 			 * for the boot memory and just report it.
1545 			 */
1546 			pr_warn("Boot memory [%lx %lx] offlining attempted\n", start, end);
1547 			return NOTIFY_BAD;
1548 		} else if (action == MEM_OFFLINE) {
1549 			/*
1550 			 * This should have never happened. Boot memory
1551 			 * offlining should have been prevented by this
1552 			 * very notifier. Probably some memory removal
1553 			 * procedure might have changed which would then
1554 			 * require further debug.
1555 			 */
1556 			pr_err("Boot memory [%lx %lx] offlined\n", start, end);
1557 
1558 			/*
1559 			 * Core memory hotplug does not process a return
1560 			 * code from the notifier for MEM_OFFLINE events.
1561 			 * The error condition has been reported. Return
1562 			 * from here as if ignored.
1563 			 */
1564 			return NOTIFY_DONE;
1565 		}
1566 	}
1567 	return NOTIFY_OK;
1568 }
1569 
1570 static struct notifier_block prevent_bootmem_remove_nb = {
1571 	.notifier_call = prevent_bootmem_remove_notifier,
1572 };
1573 
1574 /*
1575  * This ensures that boot memory sections on the platform are online
1576  * from early boot. Memory sections could not be prevented from being
1577  * offlined, unless for some reason they are not online to begin with.
1578  * This helps validate the basic assumption on which the above memory
1579  * event notifier works to prevent boot memory section offlining and
1580  * its possible removal.
1581  */
1582 static void validate_bootmem_online(void)
1583 {
1584 	phys_addr_t start, end, addr;
1585 	struct mem_section *ms;
1586 	u64 i;
1587 
1588 	/*
1589 	 * Scanning across all memblock might be expensive
1590 	 * on some big memory systems. Hence enable this
1591 	 * validation only with DEBUG_VM.
1592 	 */
1593 	if (!IS_ENABLED(CONFIG_DEBUG_VM))
1594 		return;
1595 
1596 	for_each_mem_range(i, &start, &end) {
1597 		for (addr = start; addr < end; addr += (1UL << PA_SECTION_SHIFT)) {
1598 			ms = __pfn_to_section(PHYS_PFN(addr));
1599 
1600 			/*
1601 			 * All memory ranges in the system at this point
1602 			 * should have been marked as early sections.
1603 			 */
1604 			WARN_ON(!early_section(ms));
1605 
1606 			/*
1607 			 * Memory notifier mechanism here to prevent boot
1608 			 * memory offlining depends on the fact that each
1609 			 * early section memory on the system is initially
1610 			 * online. Otherwise a given memory section which
1611 			 * is already offline will be overlooked and can
1612 			 * be removed completely. Call out such sections.
1613 			 */
1614 			if (!online_section(ms))
1615 				pr_err("Boot memory [%llx %llx] is offline, can be removed\n",
1616 					addr, addr + (1UL << PA_SECTION_SHIFT));
1617 		}
1618 	}
1619 }
1620 
1621 static int __init prevent_bootmem_remove_init(void)
1622 {
1623 	int ret = 0;
1624 
1625 	if (!IS_ENABLED(CONFIG_MEMORY_HOTREMOVE))
1626 		return ret;
1627 
1628 	validate_bootmem_online();
1629 	ret = register_memory_notifier(&prevent_bootmem_remove_nb);
1630 	if (ret)
1631 		pr_err("%s: Notifier registration failed %d\n", __func__, ret);
1632 
1633 	return ret;
1634 }
1635 early_initcall(prevent_bootmem_remove_init);
1636 #endif
1637 
1638 pte_t ptep_modify_prot_start(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep)
1639 {
1640 	if (IS_ENABLED(CONFIG_ARM64_ERRATUM_2645198) &&
1641 	    cpus_have_const_cap(ARM64_WORKAROUND_2645198)) {
1642 		/*
1643 		 * Break-before-make (BBM) is required for all user space mappings
1644 		 * when the permission changes from executable to non-executable
1645 		 * in cases where cpu is affected with errata #2645198.
1646 		 */
1647 		if (pte_user_exec(READ_ONCE(*ptep)))
1648 			return ptep_clear_flush(vma, addr, ptep);
1649 	}
1650 	return ptep_get_and_clear(vma->vm_mm, addr, ptep);
1651 }
1652 
1653 void ptep_modify_prot_commit(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep,
1654 			     pte_t old_pte, pte_t pte)
1655 {
1656 	set_pte_at(vma->vm_mm, addr, ptep, pte);
1657 }
1658