xref: /openbmc/linux/arch/powerpc/mm/book3s64/radix_pgtable.c (revision 4d75f5c664195b970e1cd2fd25b65b5eff257a0a)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Page table handling routines for radix page table.
4  *
5  * Copyright 2015-2016, Aneesh Kumar K.V, IBM Corporation.
6  */
7 
8 #define pr_fmt(fmt) "radix-mmu: " fmt
9 
10 #include <linux/io.h>
11 #include <linux/kernel.h>
12 #include <linux/sched/mm.h>
13 #include <linux/memblock.h>
14 #include <linux/of.h>
15 #include <linux/of_fdt.h>
16 #include <linux/mm.h>
17 #include <linux/hugetlb.h>
18 #include <linux/string_helpers.h>
19 #include <linux/memory.h>
20 
21 #include <asm/pgalloc.h>
22 #include <asm/mmu_context.h>
23 #include <asm/dma.h>
24 #include <asm/machdep.h>
25 #include <asm/mmu.h>
26 #include <asm/firmware.h>
27 #include <asm/powernv.h>
28 #include <asm/sections.h>
29 #include <asm/smp.h>
30 #include <asm/trace.h>
31 #include <asm/uaccess.h>
32 #include <asm/ultravisor.h>
33 #include <asm/set_memory.h>
34 
35 #include <trace/events/thp.h>
36 
37 #include <mm/mmu_decl.h>
38 
39 unsigned int mmu_base_pid;
40 
early_alloc_pgtable(unsigned long size,int nid,unsigned long region_start,unsigned long region_end)41 static __ref void *early_alloc_pgtable(unsigned long size, int nid,
42 			unsigned long region_start, unsigned long region_end)
43 {
44 	phys_addr_t min_addr = MEMBLOCK_LOW_LIMIT;
45 	phys_addr_t max_addr = MEMBLOCK_ALLOC_ANYWHERE;
46 	void *ptr;
47 
48 	if (region_start)
49 		min_addr = region_start;
50 	if (region_end)
51 		max_addr = region_end;
52 
53 	ptr = memblock_alloc_try_nid(size, size, min_addr, max_addr, nid);
54 
55 	if (!ptr)
56 		panic("%s: Failed to allocate %lu bytes align=0x%lx nid=%d from=%pa max_addr=%pa\n",
57 		      __func__, size, size, nid, &min_addr, &max_addr);
58 
59 	return ptr;
60 }
61 
62 /*
63  * When allocating pud or pmd pointers, we allocate a complete page
64  * of PAGE_SIZE rather than PUD_TABLE_SIZE or PMD_TABLE_SIZE. This
65  * is to ensure that the page obtained from the memblock allocator
66  * can be completely used as page table page and can be freed
67  * correctly when the page table entries are removed.
68  */
early_map_kernel_page(unsigned long ea,unsigned long pa,pgprot_t flags,unsigned int map_page_size,int nid,unsigned long region_start,unsigned long region_end)69 static int early_map_kernel_page(unsigned long ea, unsigned long pa,
70 			  pgprot_t flags,
71 			  unsigned int map_page_size,
72 			  int nid,
73 			  unsigned long region_start, unsigned long region_end)
74 {
75 	unsigned long pfn = pa >> PAGE_SHIFT;
76 	pgd_t *pgdp;
77 	p4d_t *p4dp;
78 	pud_t *pudp;
79 	pmd_t *pmdp;
80 	pte_t *ptep;
81 
82 	pgdp = pgd_offset_k(ea);
83 	p4dp = p4d_offset(pgdp, ea);
84 	if (p4d_none(*p4dp)) {
85 		pudp = early_alloc_pgtable(PAGE_SIZE, nid,
86 					   region_start, region_end);
87 		p4d_populate(&init_mm, p4dp, pudp);
88 	}
89 	pudp = pud_offset(p4dp, ea);
90 	if (map_page_size == PUD_SIZE) {
91 		ptep = (pte_t *)pudp;
92 		goto set_the_pte;
93 	}
94 	if (pud_none(*pudp)) {
95 		pmdp = early_alloc_pgtable(PAGE_SIZE, nid, region_start,
96 					   region_end);
97 		pud_populate(&init_mm, pudp, pmdp);
98 	}
99 	pmdp = pmd_offset(pudp, ea);
100 	if (map_page_size == PMD_SIZE) {
101 		ptep = pmdp_ptep(pmdp);
102 		goto set_the_pte;
103 	}
104 	if (!pmd_present(*pmdp)) {
105 		ptep = early_alloc_pgtable(PAGE_SIZE, nid,
106 						region_start, region_end);
107 		pmd_populate_kernel(&init_mm, pmdp, ptep);
108 	}
109 	ptep = pte_offset_kernel(pmdp, ea);
110 
111 set_the_pte:
112 	set_pte_at(&init_mm, ea, ptep, pfn_pte(pfn, flags));
113 	asm volatile("ptesync": : :"memory");
114 	return 0;
115 }
116 
117 /*
118  * nid, region_start, and region_end are hints to try to place the page
119  * table memory in the same node or region.
120  */
__map_kernel_page(unsigned long ea,unsigned long pa,pgprot_t flags,unsigned int map_page_size,int nid,unsigned long region_start,unsigned long region_end)121 static int __map_kernel_page(unsigned long ea, unsigned long pa,
122 			  pgprot_t flags,
123 			  unsigned int map_page_size,
124 			  int nid,
125 			  unsigned long region_start, unsigned long region_end)
126 {
127 	unsigned long pfn = pa >> PAGE_SHIFT;
128 	pgd_t *pgdp;
129 	p4d_t *p4dp;
130 	pud_t *pudp;
131 	pmd_t *pmdp;
132 	pte_t *ptep;
133 	/*
134 	 * Make sure task size is correct as per the max adddr
135 	 */
136 	BUILD_BUG_ON(TASK_SIZE_USER64 > RADIX_PGTABLE_RANGE);
137 
138 #ifdef CONFIG_PPC_64K_PAGES
139 	BUILD_BUG_ON(RADIX_KERN_MAP_SIZE != (1UL << MAX_EA_BITS_PER_CONTEXT));
140 #endif
141 
142 	if (unlikely(!slab_is_available()))
143 		return early_map_kernel_page(ea, pa, flags, map_page_size,
144 						nid, region_start, region_end);
145 
146 	/*
147 	 * Should make page table allocation functions be able to take a
148 	 * node, so we can place kernel page tables on the right nodes after
149 	 * boot.
150 	 */
151 	pgdp = pgd_offset_k(ea);
152 	p4dp = p4d_offset(pgdp, ea);
153 	pudp = pud_alloc(&init_mm, p4dp, ea);
154 	if (!pudp)
155 		return -ENOMEM;
156 	if (map_page_size == PUD_SIZE) {
157 		ptep = (pte_t *)pudp;
158 		goto set_the_pte;
159 	}
160 	pmdp = pmd_alloc(&init_mm, pudp, ea);
161 	if (!pmdp)
162 		return -ENOMEM;
163 	if (map_page_size == PMD_SIZE) {
164 		ptep = pmdp_ptep(pmdp);
165 		goto set_the_pte;
166 	}
167 	ptep = pte_alloc_kernel(pmdp, ea);
168 	if (!ptep)
169 		return -ENOMEM;
170 
171 set_the_pte:
172 	set_pte_at(&init_mm, ea, ptep, pfn_pte(pfn, flags));
173 	asm volatile("ptesync": : :"memory");
174 	return 0;
175 }
176 
radix__map_kernel_page(unsigned long ea,unsigned long pa,pgprot_t flags,unsigned int map_page_size)177 int radix__map_kernel_page(unsigned long ea, unsigned long pa,
178 			  pgprot_t flags,
179 			  unsigned int map_page_size)
180 {
181 	return __map_kernel_page(ea, pa, flags, map_page_size, -1, 0, 0);
182 }
183 
184 #ifdef CONFIG_STRICT_KERNEL_RWX
radix__change_memory_range(unsigned long start,unsigned long end,unsigned long clear)185 static void radix__change_memory_range(unsigned long start, unsigned long end,
186 				       unsigned long clear)
187 {
188 	unsigned long idx;
189 	pgd_t *pgdp;
190 	p4d_t *p4dp;
191 	pud_t *pudp;
192 	pmd_t *pmdp;
193 	pte_t *ptep;
194 
195 	start = ALIGN_DOWN(start, PAGE_SIZE);
196 	end = PAGE_ALIGN(end); // aligns up
197 
198 	pr_debug("Changing flags on range %lx-%lx removing 0x%lx\n",
199 		 start, end, clear);
200 
201 	for (idx = start; idx < end; idx += PAGE_SIZE) {
202 		pgdp = pgd_offset_k(idx);
203 		p4dp = p4d_offset(pgdp, idx);
204 		pudp = pud_alloc(&init_mm, p4dp, idx);
205 		if (!pudp)
206 			continue;
207 		if (pud_is_leaf(*pudp)) {
208 			ptep = (pte_t *)pudp;
209 			goto update_the_pte;
210 		}
211 		pmdp = pmd_alloc(&init_mm, pudp, idx);
212 		if (!pmdp)
213 			continue;
214 		if (pmd_is_leaf(*pmdp)) {
215 			ptep = pmdp_ptep(pmdp);
216 			goto update_the_pte;
217 		}
218 		ptep = pte_alloc_kernel(pmdp, idx);
219 		if (!ptep)
220 			continue;
221 update_the_pte:
222 		radix__pte_update(&init_mm, idx, ptep, clear, 0, 0);
223 	}
224 
225 	radix__flush_tlb_kernel_range(start, end);
226 }
227 
radix__mark_rodata_ro(void)228 void radix__mark_rodata_ro(void)
229 {
230 	unsigned long start, end;
231 
232 	start = (unsigned long)_stext;
233 	end = (unsigned long)__end_rodata;
234 
235 	radix__change_memory_range(start, end, _PAGE_WRITE);
236 
237 	for (start = PAGE_OFFSET; start < (unsigned long)_stext; start += PAGE_SIZE) {
238 		end = start + PAGE_SIZE;
239 		if (overlaps_interrupt_vector_text(start, end))
240 			radix__change_memory_range(start, end, _PAGE_WRITE);
241 		else
242 			break;
243 	}
244 }
245 
radix__mark_initmem_nx(void)246 void radix__mark_initmem_nx(void)
247 {
248 	unsigned long start = (unsigned long)__init_begin;
249 	unsigned long end = (unsigned long)__init_end;
250 
251 	radix__change_memory_range(start, end, _PAGE_EXEC);
252 }
253 #endif /* CONFIG_STRICT_KERNEL_RWX */
254 
255 static inline void __meminit
print_mapping(unsigned long start,unsigned long end,unsigned long size,bool exec)256 print_mapping(unsigned long start, unsigned long end, unsigned long size, bool exec)
257 {
258 	char buf[10];
259 
260 	if (end <= start)
261 		return;
262 
263 	string_get_size(size, 1, STRING_UNITS_2, buf, sizeof(buf));
264 
265 	pr_info("Mapped 0x%016lx-0x%016lx with %s pages%s\n", start, end, buf,
266 		exec ? " (exec)" : "");
267 }
268 
next_boundary(unsigned long addr,unsigned long end)269 static unsigned long next_boundary(unsigned long addr, unsigned long end)
270 {
271 #ifdef CONFIG_STRICT_KERNEL_RWX
272 	unsigned long stext_phys;
273 
274 	stext_phys = __pa_symbol(_stext);
275 
276 	// Relocatable kernel running at non-zero real address
277 	if (stext_phys != 0) {
278 		// The end of interrupts code at zero is a rodata boundary
279 		unsigned long end_intr = __pa_symbol(__end_interrupts) - stext_phys;
280 		if (addr < end_intr)
281 			return end_intr;
282 
283 		// Start of relocated kernel text is a rodata boundary
284 		if (addr < stext_phys)
285 			return stext_phys;
286 	}
287 
288 	if (addr < __pa_symbol(__srwx_boundary))
289 		return __pa_symbol(__srwx_boundary);
290 #endif
291 	return end;
292 }
293 
create_physical_mapping(unsigned long start,unsigned long end,int nid,pgprot_t _prot)294 static int __meminit create_physical_mapping(unsigned long start,
295 					     unsigned long end,
296 					     int nid, pgprot_t _prot)
297 {
298 	unsigned long vaddr, addr, mapping_size = 0;
299 	bool prev_exec, exec = false;
300 	pgprot_t prot;
301 	int psize;
302 	unsigned long max_mapping_size = memory_block_size;
303 
304 	if (debug_pagealloc_enabled_or_kfence())
305 		max_mapping_size = PAGE_SIZE;
306 
307 	start = ALIGN(start, PAGE_SIZE);
308 	end   = ALIGN_DOWN(end, PAGE_SIZE);
309 	for (addr = start; addr < end; addr += mapping_size) {
310 		unsigned long gap, previous_size;
311 		int rc;
312 
313 		gap = next_boundary(addr, end) - addr;
314 		if (gap > max_mapping_size)
315 			gap = max_mapping_size;
316 		previous_size = mapping_size;
317 		prev_exec = exec;
318 
319 		if (IS_ALIGNED(addr, PUD_SIZE) && gap >= PUD_SIZE &&
320 		    mmu_psize_defs[MMU_PAGE_1G].shift) {
321 			mapping_size = PUD_SIZE;
322 			psize = MMU_PAGE_1G;
323 		} else if (IS_ALIGNED(addr, PMD_SIZE) && gap >= PMD_SIZE &&
324 			   mmu_psize_defs[MMU_PAGE_2M].shift) {
325 			mapping_size = PMD_SIZE;
326 			psize = MMU_PAGE_2M;
327 		} else {
328 			mapping_size = PAGE_SIZE;
329 			psize = mmu_virtual_psize;
330 		}
331 
332 		vaddr = (unsigned long)__va(addr);
333 
334 		if (overlaps_kernel_text(vaddr, vaddr + mapping_size) ||
335 		    overlaps_interrupt_vector_text(vaddr, vaddr + mapping_size)) {
336 			prot = PAGE_KERNEL_X;
337 			exec = true;
338 		} else {
339 			prot = _prot;
340 			exec = false;
341 		}
342 
343 		if (mapping_size != previous_size || exec != prev_exec) {
344 			print_mapping(start, addr, previous_size, prev_exec);
345 			start = addr;
346 		}
347 
348 		rc = __map_kernel_page(vaddr, addr, prot, mapping_size, nid, start, end);
349 		if (rc)
350 			return rc;
351 
352 		update_page_count(psize, 1);
353 	}
354 
355 	print_mapping(start, addr, mapping_size, exec);
356 	return 0;
357 }
358 
radix_init_pgtable(void)359 static void __init radix_init_pgtable(void)
360 {
361 	unsigned long rts_field;
362 	phys_addr_t start, end;
363 	u64 i;
364 
365 	/* We don't support slb for radix */
366 	slb_set_size(0);
367 
368 	/*
369 	 * Create the linear mapping
370 	 */
371 	for_each_mem_range(i, &start, &end) {
372 		/*
373 		 * The memblock allocator  is up at this point, so the
374 		 * page tables will be allocated within the range. No
375 		 * need or a node (which we don't have yet).
376 		 */
377 
378 		if (end >= RADIX_VMALLOC_START) {
379 			pr_warn("Outside the supported range\n");
380 			continue;
381 		}
382 
383 		WARN_ON(create_physical_mapping(start, end,
384 						-1, PAGE_KERNEL));
385 	}
386 
387 	if (!cpu_has_feature(CPU_FTR_HVMODE) &&
388 			cpu_has_feature(CPU_FTR_P9_RADIX_PREFETCH_BUG)) {
389 		/*
390 		 * Older versions of KVM on these machines prefer if the
391 		 * guest only uses the low 19 PID bits.
392 		 */
393 		mmu_pid_bits = 19;
394 	}
395 	mmu_base_pid = 1;
396 
397 	/*
398 	 * Allocate Partition table and process table for the
399 	 * host.
400 	 */
401 	BUG_ON(PRTB_SIZE_SHIFT > 36);
402 	process_tb = early_alloc_pgtable(1UL << PRTB_SIZE_SHIFT, -1, 0, 0);
403 	/*
404 	 * Fill in the process table.
405 	 */
406 	rts_field = radix__get_tree_size();
407 	process_tb->prtb0 = cpu_to_be64(rts_field | __pa(init_mm.pgd) | RADIX_PGD_INDEX_SIZE);
408 
409 	/*
410 	 * The init_mm context is given the first available (non-zero) PID,
411 	 * which is the "guard PID" and contains no page table. PIDR should
412 	 * never be set to zero because that duplicates the kernel address
413 	 * space at the 0x0... offset (quadrant 0)!
414 	 *
415 	 * An arbitrary PID that may later be allocated by the PID allocator
416 	 * for userspace processes must not be used either, because that
417 	 * would cause stale user mappings for that PID on CPUs outside of
418 	 * the TLB invalidation scheme (because it won't be in mm_cpumask).
419 	 *
420 	 * So permanently carve out one PID for the purpose of a guard PID.
421 	 */
422 	init_mm.context.id = mmu_base_pid;
423 	mmu_base_pid++;
424 }
425 
radix_init_partition_table(void)426 static void __init radix_init_partition_table(void)
427 {
428 	unsigned long rts_field, dw0, dw1;
429 
430 	mmu_partition_table_init();
431 	rts_field = radix__get_tree_size();
432 	dw0 = rts_field | __pa(init_mm.pgd) | RADIX_PGD_INDEX_SIZE | PATB_HR;
433 	dw1 = __pa(process_tb) | (PRTB_SIZE_SHIFT - 12) | PATB_GR;
434 	mmu_partition_table_set_entry(0, dw0, dw1, false);
435 
436 	pr_info("Initializing Radix MMU\n");
437 }
438 
get_idx_from_shift(unsigned int shift)439 static int __init get_idx_from_shift(unsigned int shift)
440 {
441 	int idx = -1;
442 
443 	switch (shift) {
444 	case 0xc:
445 		idx = MMU_PAGE_4K;
446 		break;
447 	case 0x10:
448 		idx = MMU_PAGE_64K;
449 		break;
450 	case 0x15:
451 		idx = MMU_PAGE_2M;
452 		break;
453 	case 0x1e:
454 		idx = MMU_PAGE_1G;
455 		break;
456 	}
457 	return idx;
458 }
459 
radix_dt_scan_page_sizes(unsigned long node,const char * uname,int depth,void * data)460 static int __init radix_dt_scan_page_sizes(unsigned long node,
461 					   const char *uname, int depth,
462 					   void *data)
463 {
464 	int size = 0;
465 	int shift, idx;
466 	unsigned int ap;
467 	const __be32 *prop;
468 	const char *type = of_get_flat_dt_prop(node, "device_type", NULL);
469 
470 	/* We are scanning "cpu" nodes only */
471 	if (type == NULL || strcmp(type, "cpu") != 0)
472 		return 0;
473 
474 	/* Grab page size encodings */
475 	prop = of_get_flat_dt_prop(node, "ibm,processor-radix-AP-encodings", &size);
476 	if (!prop)
477 		return 0;
478 
479 	pr_info("Page sizes from device-tree:\n");
480 	for (; size >= 4; size -= 4, ++prop) {
481 
482 		struct mmu_psize_def *def;
483 
484 		/* top 3 bit is AP encoding */
485 		shift = be32_to_cpu(prop[0]) & ~(0xe << 28);
486 		ap = be32_to_cpu(prop[0]) >> 29;
487 		pr_info("Page size shift = %d AP=0x%x\n", shift, ap);
488 
489 		idx = get_idx_from_shift(shift);
490 		if (idx < 0)
491 			continue;
492 
493 		def = &mmu_psize_defs[idx];
494 		def->shift = shift;
495 		def->ap  = ap;
496 		def->h_rpt_pgsize = psize_to_rpti_pgsize(idx);
497 	}
498 
499 	/* needed ? */
500 	cur_cpu_spec->mmu_features &= ~MMU_FTR_NO_SLBIE_B;
501 	return 1;
502 }
503 
radix__early_init_devtree(void)504 void __init radix__early_init_devtree(void)
505 {
506 	int rc;
507 
508 	/*
509 	 * Try to find the available page sizes in the device-tree
510 	 */
511 	rc = of_scan_flat_dt(radix_dt_scan_page_sizes, NULL);
512 	if (!rc) {
513 		/*
514 		 * No page size details found in device tree.
515 		 * Let's assume we have page 4k and 64k support
516 		 */
517 		mmu_psize_defs[MMU_PAGE_4K].shift = 12;
518 		mmu_psize_defs[MMU_PAGE_4K].ap = 0x0;
519 		mmu_psize_defs[MMU_PAGE_4K].h_rpt_pgsize =
520 			psize_to_rpti_pgsize(MMU_PAGE_4K);
521 
522 		mmu_psize_defs[MMU_PAGE_64K].shift = 16;
523 		mmu_psize_defs[MMU_PAGE_64K].ap = 0x5;
524 		mmu_psize_defs[MMU_PAGE_64K].h_rpt_pgsize =
525 			psize_to_rpti_pgsize(MMU_PAGE_64K);
526 	}
527 	return;
528 }
529 
radix__early_init_mmu(void)530 void __init radix__early_init_mmu(void)
531 {
532 	unsigned long lpcr;
533 
534 #ifdef CONFIG_PPC_64S_HASH_MMU
535 #ifdef CONFIG_PPC_64K_PAGES
536 	/* PAGE_SIZE mappings */
537 	mmu_virtual_psize = MMU_PAGE_64K;
538 #else
539 	mmu_virtual_psize = MMU_PAGE_4K;
540 #endif
541 #endif
542 	/*
543 	 * initialize page table size
544 	 */
545 	__pte_index_size = RADIX_PTE_INDEX_SIZE;
546 	__pmd_index_size = RADIX_PMD_INDEX_SIZE;
547 	__pud_index_size = RADIX_PUD_INDEX_SIZE;
548 	__pgd_index_size = RADIX_PGD_INDEX_SIZE;
549 	__pud_cache_index = RADIX_PUD_INDEX_SIZE;
550 	__pte_table_size = RADIX_PTE_TABLE_SIZE;
551 	__pmd_table_size = RADIX_PMD_TABLE_SIZE;
552 	__pud_table_size = RADIX_PUD_TABLE_SIZE;
553 	__pgd_table_size = RADIX_PGD_TABLE_SIZE;
554 
555 	__pmd_val_bits = RADIX_PMD_VAL_BITS;
556 	__pud_val_bits = RADIX_PUD_VAL_BITS;
557 	__pgd_val_bits = RADIX_PGD_VAL_BITS;
558 
559 	__kernel_virt_start = RADIX_KERN_VIRT_START;
560 	__vmalloc_start = RADIX_VMALLOC_START;
561 	__vmalloc_end = RADIX_VMALLOC_END;
562 	__kernel_io_start = RADIX_KERN_IO_START;
563 	__kernel_io_end = RADIX_KERN_IO_END;
564 	vmemmap = (struct page *)RADIX_VMEMMAP_START;
565 	ioremap_bot = IOREMAP_BASE;
566 
567 #ifdef CONFIG_PCI
568 	pci_io_base = ISA_IO_BASE;
569 #endif
570 	__pte_frag_nr = RADIX_PTE_FRAG_NR;
571 	__pte_frag_size_shift = RADIX_PTE_FRAG_SIZE_SHIFT;
572 	__pmd_frag_nr = RADIX_PMD_FRAG_NR;
573 	__pmd_frag_size_shift = RADIX_PMD_FRAG_SIZE_SHIFT;
574 
575 	radix_init_pgtable();
576 
577 	if (!firmware_has_feature(FW_FEATURE_LPAR)) {
578 		lpcr = mfspr(SPRN_LPCR);
579 		mtspr(SPRN_LPCR, lpcr | LPCR_UPRT | LPCR_HR);
580 		radix_init_partition_table();
581 	} else {
582 		radix_init_pseries();
583 	}
584 
585 	memblock_set_current_limit(MEMBLOCK_ALLOC_ANYWHERE);
586 
587 	/* Switch to the guard PID before turning on MMU */
588 	radix__switch_mmu_context(NULL, &init_mm);
589 	tlbiel_all();
590 }
591 
radix__early_init_mmu_secondary(void)592 void radix__early_init_mmu_secondary(void)
593 {
594 	unsigned long lpcr;
595 	/*
596 	 * update partition table control register and UPRT
597 	 */
598 	if (!firmware_has_feature(FW_FEATURE_LPAR)) {
599 		lpcr = mfspr(SPRN_LPCR);
600 		mtspr(SPRN_LPCR, lpcr | LPCR_UPRT | LPCR_HR);
601 
602 		set_ptcr_when_no_uv(__pa(partition_tb) |
603 				    (PATB_SIZE_SHIFT - 12));
604 	}
605 
606 	radix__switch_mmu_context(NULL, &init_mm);
607 	tlbiel_all();
608 
609 	/* Make sure userspace can't change the AMR */
610 	mtspr(SPRN_UAMOR, 0);
611 }
612 
613 /* Called during kexec sequence with MMU off */
radix__mmu_cleanup_all(void)614 notrace void radix__mmu_cleanup_all(void)
615 {
616 	unsigned long lpcr;
617 
618 	if (!firmware_has_feature(FW_FEATURE_LPAR)) {
619 		lpcr = mfspr(SPRN_LPCR);
620 		mtspr(SPRN_LPCR, lpcr & ~LPCR_UPRT);
621 		set_ptcr_when_no_uv(0);
622 		powernv_set_nmmu_ptcr(0);
623 		radix__flush_tlb_all();
624 	}
625 }
626 
627 #ifdef CONFIG_MEMORY_HOTPLUG
free_pte_table(pte_t * pte_start,pmd_t * pmd)628 static void free_pte_table(pte_t *pte_start, pmd_t *pmd)
629 {
630 	pte_t *pte;
631 	int i;
632 
633 	for (i = 0; i < PTRS_PER_PTE; i++) {
634 		pte = pte_start + i;
635 		if (!pte_none(*pte))
636 			return;
637 	}
638 
639 	pte_free_kernel(&init_mm, pte_start);
640 	pmd_clear(pmd);
641 }
642 
free_pmd_table(pmd_t * pmd_start,pud_t * pud)643 static void free_pmd_table(pmd_t *pmd_start, pud_t *pud)
644 {
645 	pmd_t *pmd;
646 	int i;
647 
648 	for (i = 0; i < PTRS_PER_PMD; i++) {
649 		pmd = pmd_start + i;
650 		if (!pmd_none(*pmd))
651 			return;
652 	}
653 
654 	pmd_free(&init_mm, pmd_start);
655 	pud_clear(pud);
656 }
657 
free_pud_table(pud_t * pud_start,p4d_t * p4d)658 static void free_pud_table(pud_t *pud_start, p4d_t *p4d)
659 {
660 	pud_t *pud;
661 	int i;
662 
663 	for (i = 0; i < PTRS_PER_PUD; i++) {
664 		pud = pud_start + i;
665 		if (!pud_none(*pud))
666 			return;
667 	}
668 
669 	pud_free(&init_mm, pud_start);
670 	p4d_clear(p4d);
671 }
672 
673 #ifdef CONFIG_SPARSEMEM_VMEMMAP
vmemmap_pmd_is_unused(unsigned long addr,unsigned long end)674 static bool __meminit vmemmap_pmd_is_unused(unsigned long addr, unsigned long end)
675 {
676 	unsigned long start = ALIGN_DOWN(addr, PMD_SIZE);
677 
678 	return !vmemmap_populated(start, PMD_SIZE);
679 }
680 
vmemmap_page_is_unused(unsigned long addr,unsigned long end)681 static bool __meminit vmemmap_page_is_unused(unsigned long addr, unsigned long end)
682 {
683 	unsigned long start = ALIGN_DOWN(addr, PAGE_SIZE);
684 
685 	return !vmemmap_populated(start, PAGE_SIZE);
686 
687 }
688 #endif
689 
free_vmemmap_pages(struct page * page,struct vmem_altmap * altmap,int order)690 static void __meminit free_vmemmap_pages(struct page *page,
691 					 struct vmem_altmap *altmap,
692 					 int order)
693 {
694 	unsigned int nr_pages = 1 << order;
695 
696 	if (altmap) {
697 		unsigned long alt_start, alt_end;
698 		unsigned long base_pfn = page_to_pfn(page);
699 
700 		/*
701 		 * with 2M vmemmap mmaping we can have things setup
702 		 * such that even though atlmap is specified we never
703 		 * used altmap.
704 		 */
705 		alt_start = altmap->base_pfn;
706 		alt_end = altmap->base_pfn + altmap->reserve + altmap->free;
707 
708 		if (base_pfn >= alt_start && base_pfn < alt_end) {
709 			vmem_altmap_free(altmap, nr_pages);
710 			return;
711 		}
712 	}
713 
714 	if (PageReserved(page)) {
715 		/* allocated from memblock */
716 		while (nr_pages--)
717 			free_reserved_page(page++);
718 	} else
719 		free_pages((unsigned long)page_address(page), order);
720 }
721 
remove_pte_table(pte_t * pte_start,unsigned long addr,unsigned long end,bool direct,struct vmem_altmap * altmap)722 static void __meminit remove_pte_table(pte_t *pte_start, unsigned long addr,
723 				       unsigned long end, bool direct,
724 				       struct vmem_altmap *altmap)
725 {
726 	unsigned long next, pages = 0;
727 	pte_t *pte;
728 
729 	pte = pte_start + pte_index(addr);
730 	for (; addr < end; addr = next, pte++) {
731 		next = (addr + PAGE_SIZE) & PAGE_MASK;
732 		if (next > end)
733 			next = end;
734 
735 		if (!pte_present(*pte))
736 			continue;
737 
738 		if (PAGE_ALIGNED(addr) && PAGE_ALIGNED(next)) {
739 			if (!direct)
740 				free_vmemmap_pages(pte_page(*pte), altmap, 0);
741 			pte_clear(&init_mm, addr, pte);
742 			pages++;
743 		}
744 #ifdef CONFIG_SPARSEMEM_VMEMMAP
745 		else if (!direct && vmemmap_page_is_unused(addr, next)) {
746 			free_vmemmap_pages(pte_page(*pte), altmap, 0);
747 			pte_clear(&init_mm, addr, pte);
748 		}
749 #endif
750 	}
751 	if (direct)
752 		update_page_count(mmu_virtual_psize, -pages);
753 }
754 
remove_pmd_table(pmd_t * pmd_start,unsigned long addr,unsigned long end,bool direct,struct vmem_altmap * altmap)755 static void __meminit remove_pmd_table(pmd_t *pmd_start, unsigned long addr,
756 				       unsigned long end, bool direct,
757 				       struct vmem_altmap *altmap)
758 {
759 	unsigned long next, pages = 0;
760 	pte_t *pte_base;
761 	pmd_t *pmd;
762 
763 	pmd = pmd_start + pmd_index(addr);
764 	for (; addr < end; addr = next, pmd++) {
765 		next = pmd_addr_end(addr, end);
766 
767 		if (!pmd_present(*pmd))
768 			continue;
769 
770 		if (pmd_is_leaf(*pmd)) {
771 			if (IS_ALIGNED(addr, PMD_SIZE) &&
772 			    IS_ALIGNED(next, PMD_SIZE)) {
773 				if (!direct)
774 					free_vmemmap_pages(pmd_page(*pmd), altmap, get_order(PMD_SIZE));
775 				pte_clear(&init_mm, addr, (pte_t *)pmd);
776 				pages++;
777 			}
778 #ifdef CONFIG_SPARSEMEM_VMEMMAP
779 			else if (!direct && vmemmap_pmd_is_unused(addr, next)) {
780 				free_vmemmap_pages(pmd_page(*pmd), altmap, get_order(PMD_SIZE));
781 				pte_clear(&init_mm, addr, (pte_t *)pmd);
782 			}
783 #endif
784 			continue;
785 		}
786 
787 		pte_base = (pte_t *)pmd_page_vaddr(*pmd);
788 		remove_pte_table(pte_base, addr, next, direct, altmap);
789 		free_pte_table(pte_base, pmd);
790 	}
791 	if (direct)
792 		update_page_count(MMU_PAGE_2M, -pages);
793 }
794 
remove_pud_table(pud_t * pud_start,unsigned long addr,unsigned long end,bool direct,struct vmem_altmap * altmap)795 static void __meminit remove_pud_table(pud_t *pud_start, unsigned long addr,
796 				       unsigned long end, bool direct,
797 				       struct vmem_altmap *altmap)
798 {
799 	unsigned long next, pages = 0;
800 	pmd_t *pmd_base;
801 	pud_t *pud;
802 
803 	pud = pud_start + pud_index(addr);
804 	for (; addr < end; addr = next, pud++) {
805 		next = pud_addr_end(addr, end);
806 
807 		if (!pud_present(*pud))
808 			continue;
809 
810 		if (pud_is_leaf(*pud)) {
811 			if (!IS_ALIGNED(addr, PUD_SIZE) ||
812 			    !IS_ALIGNED(next, PUD_SIZE)) {
813 				WARN_ONCE(1, "%s: unaligned range\n", __func__);
814 				continue;
815 			}
816 			pte_clear(&init_mm, addr, (pte_t *)pud);
817 			pages++;
818 			continue;
819 		}
820 
821 		pmd_base = pud_pgtable(*pud);
822 		remove_pmd_table(pmd_base, addr, next, direct, altmap);
823 		free_pmd_table(pmd_base, pud);
824 	}
825 	if (direct)
826 		update_page_count(MMU_PAGE_1G, -pages);
827 }
828 
829 static void __meminit
remove_pagetable(unsigned long start,unsigned long end,bool direct,struct vmem_altmap * altmap)830 remove_pagetable(unsigned long start, unsigned long end, bool direct,
831 		 struct vmem_altmap *altmap)
832 {
833 	unsigned long addr, next;
834 	pud_t *pud_base;
835 	pgd_t *pgd;
836 	p4d_t *p4d;
837 
838 	spin_lock(&init_mm.page_table_lock);
839 
840 	for (addr = start; addr < end; addr = next) {
841 		next = pgd_addr_end(addr, end);
842 
843 		pgd = pgd_offset_k(addr);
844 		p4d = p4d_offset(pgd, addr);
845 		if (!p4d_present(*p4d))
846 			continue;
847 
848 		if (p4d_is_leaf(*p4d)) {
849 			if (!IS_ALIGNED(addr, P4D_SIZE) ||
850 			    !IS_ALIGNED(next, P4D_SIZE)) {
851 				WARN_ONCE(1, "%s: unaligned range\n", __func__);
852 				continue;
853 			}
854 
855 			pte_clear(&init_mm, addr, (pte_t *)pgd);
856 			continue;
857 		}
858 
859 		pud_base = p4d_pgtable(*p4d);
860 		remove_pud_table(pud_base, addr, next, direct, altmap);
861 		free_pud_table(pud_base, p4d);
862 	}
863 
864 	spin_unlock(&init_mm.page_table_lock);
865 	radix__flush_tlb_kernel_range(start, end);
866 }
867 
radix__create_section_mapping(unsigned long start,unsigned long end,int nid,pgprot_t prot)868 int __meminit radix__create_section_mapping(unsigned long start,
869 					    unsigned long end, int nid,
870 					    pgprot_t prot)
871 {
872 	if (end >= RADIX_VMALLOC_START) {
873 		pr_warn("Outside the supported range\n");
874 		return -1;
875 	}
876 
877 	return create_physical_mapping(__pa(start), __pa(end),
878 				       nid, prot);
879 }
880 
radix__remove_section_mapping(unsigned long start,unsigned long end)881 int __meminit radix__remove_section_mapping(unsigned long start, unsigned long end)
882 {
883 	remove_pagetable(start, end, true, NULL);
884 	return 0;
885 }
886 #endif /* CONFIG_MEMORY_HOTPLUG */
887 
888 #ifdef CONFIG_SPARSEMEM_VMEMMAP
__map_kernel_page_nid(unsigned long ea,unsigned long pa,pgprot_t flags,unsigned int map_page_size,int nid)889 static int __map_kernel_page_nid(unsigned long ea, unsigned long pa,
890 				 pgprot_t flags, unsigned int map_page_size,
891 				 int nid)
892 {
893 	return __map_kernel_page(ea, pa, flags, map_page_size, nid, 0, 0);
894 }
895 
radix__vmemmap_create_mapping(unsigned long start,unsigned long page_size,unsigned long phys)896 int __meminit radix__vmemmap_create_mapping(unsigned long start,
897 				      unsigned long page_size,
898 				      unsigned long phys)
899 {
900 	/* Create a PTE encoding */
901 	int nid = early_pfn_to_nid(phys >> PAGE_SHIFT);
902 	int ret;
903 
904 	if ((start + page_size) >= RADIX_VMEMMAP_END) {
905 		pr_warn("Outside the supported range\n");
906 		return -1;
907 	}
908 
909 	ret = __map_kernel_page_nid(start, phys, PAGE_KERNEL, page_size, nid);
910 	BUG_ON(ret);
911 
912 	return 0;
913 }
914 
915 #ifdef CONFIG_ARCH_WANT_OPTIMIZE_DAX_VMEMMAP
vmemmap_can_optimize(struct vmem_altmap * altmap,struct dev_pagemap * pgmap)916 bool vmemmap_can_optimize(struct vmem_altmap *altmap, struct dev_pagemap *pgmap)
917 {
918 	if (radix_enabled())
919 		return __vmemmap_can_optimize(altmap, pgmap);
920 
921 	return false;
922 }
923 #endif
924 
vmemmap_check_pmd(pmd_t * pmdp,int node,unsigned long addr,unsigned long next)925 int __meminit vmemmap_check_pmd(pmd_t *pmdp, int node,
926 				unsigned long addr, unsigned long next)
927 {
928 	int large = pmd_large(*pmdp);
929 
930 	if (large)
931 		vmemmap_verify(pmdp_ptep(pmdp), node, addr, next);
932 
933 	return large;
934 }
935 
vmemmap_set_pmd(pmd_t * pmdp,void * p,int node,unsigned long addr,unsigned long next)936 void __meminit vmemmap_set_pmd(pmd_t *pmdp, void *p, int node,
937 			       unsigned long addr, unsigned long next)
938 {
939 	pte_t entry;
940 	pte_t *ptep = pmdp_ptep(pmdp);
941 
942 	VM_BUG_ON(!IS_ALIGNED(addr, PMD_SIZE));
943 	entry = pfn_pte(__pa(p) >> PAGE_SHIFT, PAGE_KERNEL);
944 	set_pte_at(&init_mm, addr, ptep, entry);
945 	asm volatile("ptesync": : :"memory");
946 
947 	vmemmap_verify(ptep, node, addr, next);
948 }
949 
radix__vmemmap_pte_populate(pmd_t * pmdp,unsigned long addr,int node,struct vmem_altmap * altmap,struct page * reuse)950 static pte_t * __meminit radix__vmemmap_pte_populate(pmd_t *pmdp, unsigned long addr,
951 						     int node,
952 						     struct vmem_altmap *altmap,
953 						     struct page *reuse)
954 {
955 	pte_t *pte = pte_offset_kernel(pmdp, addr);
956 
957 	if (pte_none(*pte)) {
958 		pte_t entry;
959 		void *p;
960 
961 		if (!reuse) {
962 			/*
963 			 * make sure we don't create altmap mappings
964 			 * covering things outside the device.
965 			 */
966 			if (altmap && altmap_cross_boundary(altmap, addr, PAGE_SIZE))
967 				altmap = NULL;
968 
969 			p = vmemmap_alloc_block_buf(PAGE_SIZE, node, altmap);
970 			if (!p && altmap)
971 				p = vmemmap_alloc_block_buf(PAGE_SIZE, node, NULL);
972 			if (!p)
973 				return NULL;
974 			pr_debug("PAGE_SIZE vmemmap mapping\n");
975 		} else {
976 			/*
977 			 * When a PTE/PMD entry is freed from the init_mm
978 			 * there's a free_pages() call to this page allocated
979 			 * above. Thus this get_page() is paired with the
980 			 * put_page_testzero() on the freeing path.
981 			 * This can only called by certain ZONE_DEVICE path,
982 			 * and through vmemmap_populate_compound_pages() when
983 			 * slab is available.
984 			 */
985 			get_page(reuse);
986 			p = page_to_virt(reuse);
987 			pr_debug("Tail page reuse vmemmap mapping\n");
988 		}
989 
990 		VM_BUG_ON(!PAGE_ALIGNED(addr));
991 		entry = pfn_pte(__pa(p) >> PAGE_SHIFT, PAGE_KERNEL);
992 		set_pte_at(&init_mm, addr, pte, entry);
993 		asm volatile("ptesync": : :"memory");
994 	}
995 	return pte;
996 }
997 
vmemmap_pud_alloc(p4d_t * p4dp,int node,unsigned long address)998 static inline pud_t *vmemmap_pud_alloc(p4d_t *p4dp, int node,
999 				       unsigned long address)
1000 {
1001 	pud_t *pud;
1002 
1003 	/* All early vmemmap mapping to keep simple do it at PAGE_SIZE */
1004 	if (unlikely(p4d_none(*p4dp))) {
1005 		if (unlikely(!slab_is_available())) {
1006 			pud = early_alloc_pgtable(PAGE_SIZE, node, 0, 0);
1007 			p4d_populate(&init_mm, p4dp, pud);
1008 			/* go to the pud_offset */
1009 		} else
1010 			return pud_alloc(&init_mm, p4dp, address);
1011 	}
1012 	return pud_offset(p4dp, address);
1013 }
1014 
vmemmap_pmd_alloc(pud_t * pudp,int node,unsigned long address)1015 static inline pmd_t *vmemmap_pmd_alloc(pud_t *pudp, int node,
1016 				       unsigned long address)
1017 {
1018 	pmd_t *pmd;
1019 
1020 	/* All early vmemmap mapping to keep simple do it at PAGE_SIZE */
1021 	if (unlikely(pud_none(*pudp))) {
1022 		if (unlikely(!slab_is_available())) {
1023 			pmd = early_alloc_pgtable(PAGE_SIZE, node, 0, 0);
1024 			pud_populate(&init_mm, pudp, pmd);
1025 		} else
1026 			return pmd_alloc(&init_mm, pudp, address);
1027 	}
1028 	return pmd_offset(pudp, address);
1029 }
1030 
vmemmap_pte_alloc(pmd_t * pmdp,int node,unsigned long address)1031 static inline pte_t *vmemmap_pte_alloc(pmd_t *pmdp, int node,
1032 				       unsigned long address)
1033 {
1034 	pte_t *pte;
1035 
1036 	/* All early vmemmap mapping to keep simple do it at PAGE_SIZE */
1037 	if (unlikely(pmd_none(*pmdp))) {
1038 		if (unlikely(!slab_is_available())) {
1039 			pte = early_alloc_pgtable(PAGE_SIZE, node, 0, 0);
1040 			pmd_populate(&init_mm, pmdp, pte);
1041 		} else
1042 			return pte_alloc_kernel(pmdp, address);
1043 	}
1044 	return pte_offset_kernel(pmdp, address);
1045 }
1046 
1047 
1048 
radix__vmemmap_populate(unsigned long start,unsigned long end,int node,struct vmem_altmap * altmap)1049 int __meminit radix__vmemmap_populate(unsigned long start, unsigned long end, int node,
1050 				      struct vmem_altmap *altmap)
1051 {
1052 	unsigned long addr;
1053 	unsigned long next;
1054 	pgd_t *pgd;
1055 	p4d_t *p4d;
1056 	pud_t *pud;
1057 	pmd_t *pmd;
1058 	pte_t *pte;
1059 
1060 	/*
1061 	 * Make sure we align the start vmemmap addr so that we calculate
1062 	 * the correct start_pfn in altmap boundary check to decided whether
1063 	 * we should use altmap or RAM based backing memory allocation. Also
1064 	 * the address need to be aligned for set_pte operation.
1065 
1066 	 * If the start addr is already PMD_SIZE aligned we will try to use
1067 	 * a pmd mapping. We don't want to be too aggressive here beacause
1068 	 * that will cause more allocations in RAM. So only if the namespace
1069 	 * vmemmap start addr is PMD_SIZE aligned we will use PMD mapping.
1070 	 */
1071 
1072 	start = ALIGN_DOWN(start, PAGE_SIZE);
1073 	for (addr = start; addr < end; addr = next) {
1074 		next = pmd_addr_end(addr, end);
1075 
1076 		pgd = pgd_offset_k(addr);
1077 		p4d = p4d_offset(pgd, addr);
1078 		pud = vmemmap_pud_alloc(p4d, node, addr);
1079 		if (!pud)
1080 			return -ENOMEM;
1081 		pmd = vmemmap_pmd_alloc(pud, node, addr);
1082 		if (!pmd)
1083 			return -ENOMEM;
1084 
1085 		if (pmd_none(READ_ONCE(*pmd))) {
1086 			void *p;
1087 
1088 			/*
1089 			 * keep it simple by checking addr PMD_SIZE alignment
1090 			 * and verifying the device boundary condition.
1091 			 * For us to use a pmd mapping, both addr and pfn should
1092 			 * be aligned. We skip if addr is not aligned and for
1093 			 * pfn we hope we have extra area in the altmap that
1094 			 * can help to find an aligned block. This can result
1095 			 * in altmap block allocation failures, in which case
1096 			 * we fallback to RAM for vmemmap allocation.
1097 			 */
1098 			if (!IS_ALIGNED(addr, PMD_SIZE) || (altmap &&
1099 			    altmap_cross_boundary(altmap, addr, PMD_SIZE))) {
1100 				/*
1101 				 * make sure we don't create altmap mappings
1102 				 * covering things outside the device.
1103 				 */
1104 				goto base_mapping;
1105 			}
1106 
1107 			p = vmemmap_alloc_block_buf(PMD_SIZE, node, altmap);
1108 			if (p) {
1109 				vmemmap_set_pmd(pmd, p, node, addr, next);
1110 				pr_debug("PMD_SIZE vmemmap mapping\n");
1111 				continue;
1112 			} else if (altmap) {
1113 				/*
1114 				 * A vmemmap block allocation can fail due to
1115 				 * alignment requirements and we trying to align
1116 				 * things aggressively there by running out of
1117 				 * space. Try base mapping on failure.
1118 				 */
1119 				goto base_mapping;
1120 			}
1121 		} else if (vmemmap_check_pmd(pmd, node, addr, next)) {
1122 			/*
1123 			 * If a huge mapping exist due to early call to
1124 			 * vmemmap_populate, let's try to use that.
1125 			 */
1126 			continue;
1127 		}
1128 base_mapping:
1129 		/*
1130 		 * Not able allocate higher order memory to back memmap
1131 		 * or we found a pointer to pte page. Allocate base page
1132 		 * size vmemmap
1133 		 */
1134 		pte = vmemmap_pte_alloc(pmd, node, addr);
1135 		if (!pte)
1136 			return -ENOMEM;
1137 
1138 		pte = radix__vmemmap_pte_populate(pmd, addr, node, altmap, NULL);
1139 		if (!pte)
1140 			return -ENOMEM;
1141 
1142 		vmemmap_verify(pte, node, addr, addr + PAGE_SIZE);
1143 		next = addr + PAGE_SIZE;
1144 	}
1145 	return 0;
1146 }
1147 
radix__vmemmap_populate_address(unsigned long addr,int node,struct vmem_altmap * altmap,struct page * reuse)1148 static pte_t * __meminit radix__vmemmap_populate_address(unsigned long addr, int node,
1149 							 struct vmem_altmap *altmap,
1150 							 struct page *reuse)
1151 {
1152 	pgd_t *pgd;
1153 	p4d_t *p4d;
1154 	pud_t *pud;
1155 	pmd_t *pmd;
1156 	pte_t *pte;
1157 
1158 	pgd = pgd_offset_k(addr);
1159 	p4d = p4d_offset(pgd, addr);
1160 	pud = vmemmap_pud_alloc(p4d, node, addr);
1161 	if (!pud)
1162 		return NULL;
1163 	pmd = vmemmap_pmd_alloc(pud, node, addr);
1164 	if (!pmd)
1165 		return NULL;
1166 	if (pmd_leaf(*pmd))
1167 		/*
1168 		 * The second page is mapped as a hugepage due to a nearby request.
1169 		 * Force our mapping to page size without deduplication
1170 		 */
1171 		return NULL;
1172 	pte = vmemmap_pte_alloc(pmd, node, addr);
1173 	if (!pte)
1174 		return NULL;
1175 	radix__vmemmap_pte_populate(pmd, addr, node, NULL, NULL);
1176 	vmemmap_verify(pte, node, addr, addr + PAGE_SIZE);
1177 
1178 	return pte;
1179 }
1180 
vmemmap_compound_tail_page(unsigned long addr,unsigned long pfn_offset,int node)1181 static pte_t * __meminit vmemmap_compound_tail_page(unsigned long addr,
1182 						    unsigned long pfn_offset, int node)
1183 {
1184 	pgd_t *pgd;
1185 	p4d_t *p4d;
1186 	pud_t *pud;
1187 	pmd_t *pmd;
1188 	pte_t *pte;
1189 	unsigned long map_addr;
1190 
1191 	/* the second vmemmap page which we use for duplication */
1192 	map_addr = addr - pfn_offset * sizeof(struct page) + PAGE_SIZE;
1193 	pgd = pgd_offset_k(map_addr);
1194 	p4d = p4d_offset(pgd, map_addr);
1195 	pud = vmemmap_pud_alloc(p4d, node, map_addr);
1196 	if (!pud)
1197 		return NULL;
1198 	pmd = vmemmap_pmd_alloc(pud, node, map_addr);
1199 	if (!pmd)
1200 		return NULL;
1201 	if (pmd_leaf(*pmd))
1202 		/*
1203 		 * The second page is mapped as a hugepage due to a nearby request.
1204 		 * Force our mapping to page size without deduplication
1205 		 */
1206 		return NULL;
1207 	pte = vmemmap_pte_alloc(pmd, node, map_addr);
1208 	if (!pte)
1209 		return NULL;
1210 	/*
1211 	 * Check if there exist a mapping to the left
1212 	 */
1213 	if (pte_none(*pte)) {
1214 		/*
1215 		 * Populate the head page vmemmap page.
1216 		 * It can fall in different pmd, hence
1217 		 * vmemmap_populate_address()
1218 		 */
1219 		pte = radix__vmemmap_populate_address(map_addr - PAGE_SIZE, node, NULL, NULL);
1220 		if (!pte)
1221 			return NULL;
1222 		/*
1223 		 * Populate the tail pages vmemmap page
1224 		 */
1225 		pte = radix__vmemmap_pte_populate(pmd, map_addr, node, NULL, NULL);
1226 		if (!pte)
1227 			return NULL;
1228 		vmemmap_verify(pte, node, map_addr, map_addr + PAGE_SIZE);
1229 		return pte;
1230 	}
1231 	return pte;
1232 }
1233 
vmemmap_populate_compound_pages(unsigned long start_pfn,unsigned long start,unsigned long end,int node,struct dev_pagemap * pgmap)1234 int __meminit vmemmap_populate_compound_pages(unsigned long start_pfn,
1235 					      unsigned long start,
1236 					      unsigned long end, int node,
1237 					      struct dev_pagemap *pgmap)
1238 {
1239 	/*
1240 	 * we want to map things as base page size mapping so that
1241 	 * we can save space in vmemmap. We could have huge mapping
1242 	 * covering out both edges.
1243 	 */
1244 	unsigned long addr;
1245 	unsigned long addr_pfn = start_pfn;
1246 	unsigned long next;
1247 	pgd_t *pgd;
1248 	p4d_t *p4d;
1249 	pud_t *pud;
1250 	pmd_t *pmd;
1251 	pte_t *pte;
1252 
1253 	for (addr = start; addr < end; addr = next) {
1254 
1255 		pgd = pgd_offset_k(addr);
1256 		p4d = p4d_offset(pgd, addr);
1257 		pud = vmemmap_pud_alloc(p4d, node, addr);
1258 		if (!pud)
1259 			return -ENOMEM;
1260 		pmd = vmemmap_pmd_alloc(pud, node, addr);
1261 		if (!pmd)
1262 			return -ENOMEM;
1263 
1264 		if (pmd_leaf(READ_ONCE(*pmd))) {
1265 			/* existing huge mapping. Skip the range */
1266 			addr_pfn += (PMD_SIZE >> PAGE_SHIFT);
1267 			next = pmd_addr_end(addr, end);
1268 			continue;
1269 		}
1270 		pte = vmemmap_pte_alloc(pmd, node, addr);
1271 		if (!pte)
1272 			return -ENOMEM;
1273 		if (!pte_none(*pte)) {
1274 			/*
1275 			 * This could be because we already have a compound
1276 			 * page whose VMEMMAP_RESERVE_NR pages were mapped and
1277 			 * this request fall in those pages.
1278 			 */
1279 			addr_pfn += 1;
1280 			next = addr + PAGE_SIZE;
1281 			continue;
1282 		} else {
1283 			unsigned long nr_pages = pgmap_vmemmap_nr(pgmap);
1284 			unsigned long pfn_offset = addr_pfn - ALIGN_DOWN(addr_pfn, nr_pages);
1285 			pte_t *tail_page_pte;
1286 
1287 			/*
1288 			 * if the address is aligned to huge page size it is the
1289 			 * head mapping.
1290 			 */
1291 			if (pfn_offset == 0) {
1292 				/* Populate the head page vmemmap page */
1293 				pte = radix__vmemmap_pte_populate(pmd, addr, node, NULL, NULL);
1294 				if (!pte)
1295 					return -ENOMEM;
1296 				vmemmap_verify(pte, node, addr, addr + PAGE_SIZE);
1297 
1298 				/*
1299 				 * Populate the tail pages vmemmap page
1300 				 * It can fall in different pmd, hence
1301 				 * vmemmap_populate_address()
1302 				 */
1303 				pte = radix__vmemmap_populate_address(addr + PAGE_SIZE, node, NULL, NULL);
1304 				if (!pte)
1305 					return -ENOMEM;
1306 
1307 				addr_pfn += 2;
1308 				next = addr + 2 * PAGE_SIZE;
1309 				continue;
1310 			}
1311 			/*
1312 			 * get the 2nd mapping details
1313 			 * Also create it if that doesn't exist
1314 			 */
1315 			tail_page_pte = vmemmap_compound_tail_page(addr, pfn_offset, node);
1316 			if (!tail_page_pte) {
1317 
1318 				pte = radix__vmemmap_pte_populate(pmd, addr, node, NULL, NULL);
1319 				if (!pte)
1320 					return -ENOMEM;
1321 				vmemmap_verify(pte, node, addr, addr + PAGE_SIZE);
1322 
1323 				addr_pfn += 1;
1324 				next = addr + PAGE_SIZE;
1325 				continue;
1326 			}
1327 
1328 			pte = radix__vmemmap_pte_populate(pmd, addr, node, NULL, pte_page(*tail_page_pte));
1329 			if (!pte)
1330 				return -ENOMEM;
1331 			vmemmap_verify(pte, node, addr, addr + PAGE_SIZE);
1332 
1333 			addr_pfn += 1;
1334 			next = addr + PAGE_SIZE;
1335 			continue;
1336 		}
1337 	}
1338 	return 0;
1339 }
1340 
1341 
1342 #ifdef CONFIG_MEMORY_HOTPLUG
radix__vmemmap_remove_mapping(unsigned long start,unsigned long page_size)1343 void __meminit radix__vmemmap_remove_mapping(unsigned long start, unsigned long page_size)
1344 {
1345 	remove_pagetable(start, start + page_size, true, NULL);
1346 }
1347 
radix__vmemmap_free(unsigned long start,unsigned long end,struct vmem_altmap * altmap)1348 void __ref radix__vmemmap_free(unsigned long start, unsigned long end,
1349 			       struct vmem_altmap *altmap)
1350 {
1351 	remove_pagetable(start, end, false, altmap);
1352 }
1353 #endif
1354 #endif
1355 
1356 #if defined(CONFIG_DEBUG_PAGEALLOC) || defined(CONFIG_KFENCE)
radix__kernel_map_pages(struct page * page,int numpages,int enable)1357 void radix__kernel_map_pages(struct page *page, int numpages, int enable)
1358 {
1359 	unsigned long addr;
1360 
1361 	addr = (unsigned long)page_address(page);
1362 
1363 	if (enable)
1364 		set_memory_p(addr, numpages);
1365 	else
1366 		set_memory_np(addr, numpages);
1367 }
1368 #endif
1369 
1370 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
1371 
radix__pmd_hugepage_update(struct mm_struct * mm,unsigned long addr,pmd_t * pmdp,unsigned long clr,unsigned long set)1372 unsigned long radix__pmd_hugepage_update(struct mm_struct *mm, unsigned long addr,
1373 				  pmd_t *pmdp, unsigned long clr,
1374 				  unsigned long set)
1375 {
1376 	unsigned long old;
1377 
1378 #ifdef CONFIG_DEBUG_VM
1379 	WARN_ON(!radix__pmd_trans_huge(*pmdp) && !pmd_devmap(*pmdp));
1380 	assert_spin_locked(pmd_lockptr(mm, pmdp));
1381 #endif
1382 
1383 	old = radix__pte_update(mm, addr, pmdp_ptep(pmdp), clr, set, 1);
1384 	trace_hugepage_update_pmd(addr, old, clr, set);
1385 
1386 	return old;
1387 }
1388 
radix__pud_hugepage_update(struct mm_struct * mm,unsigned long addr,pud_t * pudp,unsigned long clr,unsigned long set)1389 unsigned long radix__pud_hugepage_update(struct mm_struct *mm, unsigned long addr,
1390 					 pud_t *pudp, unsigned long clr,
1391 					 unsigned long set)
1392 {
1393 	unsigned long old;
1394 
1395 #ifdef CONFIG_DEBUG_VM
1396 	WARN_ON(!pud_devmap(*pudp));
1397 	assert_spin_locked(pud_lockptr(mm, pudp));
1398 #endif
1399 
1400 	old = radix__pte_update(mm, addr, pudp_ptep(pudp), clr, set, 1);
1401 	trace_hugepage_update_pud(addr, old, clr, set);
1402 
1403 	return old;
1404 }
1405 
radix__pmdp_collapse_flush(struct vm_area_struct * vma,unsigned long address,pmd_t * pmdp)1406 pmd_t radix__pmdp_collapse_flush(struct vm_area_struct *vma, unsigned long address,
1407 			pmd_t *pmdp)
1408 
1409 {
1410 	pmd_t pmd;
1411 
1412 	VM_BUG_ON(address & ~HPAGE_PMD_MASK);
1413 	VM_BUG_ON(radix__pmd_trans_huge(*pmdp));
1414 	VM_BUG_ON(pmd_devmap(*pmdp));
1415 	/*
1416 	 * khugepaged calls this for normal pmd
1417 	 */
1418 	pmd = *pmdp;
1419 	pmd_clear(pmdp);
1420 
1421 	radix__flush_tlb_collapsed_pmd(vma->vm_mm, address);
1422 
1423 	return pmd;
1424 }
1425 
1426 /*
1427  * For us pgtable_t is pte_t *. Inorder to save the deposisted
1428  * page table, we consider the allocated page table as a list
1429  * head. On withdraw we need to make sure we zero out the used
1430  * list_head memory area.
1431  */
radix__pgtable_trans_huge_deposit(struct mm_struct * mm,pmd_t * pmdp,pgtable_t pgtable)1432 void radix__pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
1433 				 pgtable_t pgtable)
1434 {
1435 	struct list_head *lh = (struct list_head *) pgtable;
1436 
1437 	assert_spin_locked(pmd_lockptr(mm, pmdp));
1438 
1439 	/* FIFO */
1440 	if (!pmd_huge_pte(mm, pmdp))
1441 		INIT_LIST_HEAD(lh);
1442 	else
1443 		list_add(lh, (struct list_head *) pmd_huge_pte(mm, pmdp));
1444 	pmd_huge_pte(mm, pmdp) = pgtable;
1445 }
1446 
radix__pgtable_trans_huge_withdraw(struct mm_struct * mm,pmd_t * pmdp)1447 pgtable_t radix__pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp)
1448 {
1449 	pte_t *ptep;
1450 	pgtable_t pgtable;
1451 	struct list_head *lh;
1452 
1453 	assert_spin_locked(pmd_lockptr(mm, pmdp));
1454 
1455 	/* FIFO */
1456 	pgtable = pmd_huge_pte(mm, pmdp);
1457 	lh = (struct list_head *) pgtable;
1458 	if (list_empty(lh))
1459 		pmd_huge_pte(mm, pmdp) = NULL;
1460 	else {
1461 		pmd_huge_pte(mm, pmdp) = (pgtable_t) lh->next;
1462 		list_del(lh);
1463 	}
1464 	ptep = (pte_t *) pgtable;
1465 	*ptep = __pte(0);
1466 	ptep++;
1467 	*ptep = __pte(0);
1468 	return pgtable;
1469 }
1470 
radix__pmdp_huge_get_and_clear(struct mm_struct * mm,unsigned long addr,pmd_t * pmdp)1471 pmd_t radix__pmdp_huge_get_and_clear(struct mm_struct *mm,
1472 				     unsigned long addr, pmd_t *pmdp)
1473 {
1474 	pmd_t old_pmd;
1475 	unsigned long old;
1476 
1477 	old = radix__pmd_hugepage_update(mm, addr, pmdp, ~0UL, 0);
1478 	old_pmd = __pmd(old);
1479 	return old_pmd;
1480 }
1481 
radix__pudp_huge_get_and_clear(struct mm_struct * mm,unsigned long addr,pud_t * pudp)1482 pud_t radix__pudp_huge_get_and_clear(struct mm_struct *mm,
1483 				     unsigned long addr, pud_t *pudp)
1484 {
1485 	pud_t old_pud;
1486 	unsigned long old;
1487 
1488 	old = radix__pud_hugepage_update(mm, addr, pudp, ~0UL, 0);
1489 	old_pud = __pud(old);
1490 	return old_pud;
1491 }
1492 
1493 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
1494 
radix__ptep_set_access_flags(struct vm_area_struct * vma,pte_t * ptep,pte_t entry,unsigned long address,int psize)1495 void radix__ptep_set_access_flags(struct vm_area_struct *vma, pte_t *ptep,
1496 				  pte_t entry, unsigned long address, int psize)
1497 {
1498 	struct mm_struct *mm = vma->vm_mm;
1499 	unsigned long set = pte_val(entry) & (_PAGE_DIRTY | _PAGE_SOFT_DIRTY |
1500 					      _PAGE_ACCESSED | _PAGE_RW | _PAGE_EXEC);
1501 
1502 	unsigned long change = pte_val(entry) ^ pte_val(*ptep);
1503 	/*
1504 	 * On POWER9, the NMMU is not able to relax PTE access permissions
1505 	 * for a translation with a TLB. The PTE must be invalidated, TLB
1506 	 * flushed before the new PTE is installed.
1507 	 *
1508 	 * This only needs to be done for radix, because hash translation does
1509 	 * flush when updating the linux pte (and we don't support NMMU
1510 	 * accelerators on HPT on POWER9 anyway XXX: do we?).
1511 	 *
1512 	 * POWER10 (and P9P) NMMU does behave as per ISA.
1513 	 */
1514 	if (!cpu_has_feature(CPU_FTR_ARCH_31) && (change & _PAGE_RW) &&
1515 	    atomic_read(&mm->context.copros) > 0) {
1516 		unsigned long old_pte, new_pte;
1517 
1518 		old_pte = __radix_pte_update(ptep, _PAGE_PRESENT, _PAGE_INVALID);
1519 		new_pte = old_pte | set;
1520 		radix__flush_tlb_page_psize(mm, address, psize);
1521 		__radix_pte_update(ptep, _PAGE_INVALID, new_pte);
1522 	} else {
1523 		__radix_pte_update(ptep, 0, set);
1524 		/*
1525 		 * Book3S does not require a TLB flush when relaxing access
1526 		 * restrictions when the address space (modulo the POWER9 nest
1527 		 * MMU issue above) because the MMU will reload the PTE after
1528 		 * taking an access fault, as defined by the architecture. See
1529 		 * "Setting a Reference or Change Bit or Upgrading Access
1530 		 *  Authority (PTE Subject to Atomic Hardware Updates)" in
1531 		 *  Power ISA Version 3.1B.
1532 		 */
1533 	}
1534 	/* See ptesync comment in radix__set_pte_at */
1535 }
1536 
radix__ptep_modify_prot_commit(struct vm_area_struct * vma,unsigned long addr,pte_t * ptep,pte_t old_pte,pte_t pte)1537 void radix__ptep_modify_prot_commit(struct vm_area_struct *vma,
1538 				    unsigned long addr, pte_t *ptep,
1539 				    pte_t old_pte, pte_t pte)
1540 {
1541 	struct mm_struct *mm = vma->vm_mm;
1542 
1543 	/*
1544 	 * POWER9 NMMU must flush the TLB after clearing the PTE before
1545 	 * installing a PTE with more relaxed access permissions, see
1546 	 * radix__ptep_set_access_flags.
1547 	 */
1548 	if (!cpu_has_feature(CPU_FTR_ARCH_31) &&
1549 	    is_pte_rw_upgrade(pte_val(old_pte), pte_val(pte)) &&
1550 	    (atomic_read(&mm->context.copros) > 0))
1551 		radix__flush_tlb_page(vma, addr);
1552 
1553 	set_pte_at(mm, addr, ptep, pte);
1554 }
1555 
pud_set_huge(pud_t * pud,phys_addr_t addr,pgprot_t prot)1556 int pud_set_huge(pud_t *pud, phys_addr_t addr, pgprot_t prot)
1557 {
1558 	pte_t *ptep = (pte_t *)pud;
1559 	pte_t new_pud = pfn_pte(__phys_to_pfn(addr), prot);
1560 
1561 	if (!radix_enabled())
1562 		return 0;
1563 
1564 	set_pte_at(&init_mm, 0 /* radix unused */, ptep, new_pud);
1565 
1566 	return 1;
1567 }
1568 
pud_clear_huge(pud_t * pud)1569 int pud_clear_huge(pud_t *pud)
1570 {
1571 	if (pud_is_leaf(*pud)) {
1572 		pud_clear(pud);
1573 		return 1;
1574 	}
1575 
1576 	return 0;
1577 }
1578 
pud_free_pmd_page(pud_t * pud,unsigned long addr)1579 int pud_free_pmd_page(pud_t *pud, unsigned long addr)
1580 {
1581 	pmd_t *pmd;
1582 	int i;
1583 
1584 	pmd = pud_pgtable(*pud);
1585 	pud_clear(pud);
1586 
1587 	flush_tlb_kernel_range(addr, addr + PUD_SIZE);
1588 
1589 	for (i = 0; i < PTRS_PER_PMD; i++) {
1590 		if (!pmd_none(pmd[i])) {
1591 			pte_t *pte;
1592 			pte = (pte_t *)pmd_page_vaddr(pmd[i]);
1593 
1594 			pte_free_kernel(&init_mm, pte);
1595 		}
1596 	}
1597 
1598 	pmd_free(&init_mm, pmd);
1599 
1600 	return 1;
1601 }
1602 
pmd_set_huge(pmd_t * pmd,phys_addr_t addr,pgprot_t prot)1603 int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t prot)
1604 {
1605 	pte_t *ptep = (pte_t *)pmd;
1606 	pte_t new_pmd = pfn_pte(__phys_to_pfn(addr), prot);
1607 
1608 	if (!radix_enabled())
1609 		return 0;
1610 
1611 	set_pte_at(&init_mm, 0 /* radix unused */, ptep, new_pmd);
1612 
1613 	return 1;
1614 }
1615 
pmd_clear_huge(pmd_t * pmd)1616 int pmd_clear_huge(pmd_t *pmd)
1617 {
1618 	if (pmd_is_leaf(*pmd)) {
1619 		pmd_clear(pmd);
1620 		return 1;
1621 	}
1622 
1623 	return 0;
1624 }
1625 
pmd_free_pte_page(pmd_t * pmd,unsigned long addr)1626 int pmd_free_pte_page(pmd_t *pmd, unsigned long addr)
1627 {
1628 	pte_t *pte;
1629 
1630 	pte = (pte_t *)pmd_page_vaddr(*pmd);
1631 	pmd_clear(pmd);
1632 
1633 	flush_tlb_kernel_range(addr, addr + PMD_SIZE);
1634 
1635 	pte_free_kernel(&init_mm, pte);
1636 
1637 	return 1;
1638 }
1639