1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Page table handling routines for radix page table.
4  *
5  * Copyright 2015-2016, Aneesh Kumar K.V, IBM Corporation.
6  */
7 
8 #define pr_fmt(fmt) "radix-mmu: " fmt
9 
10 #include <linux/io.h>
11 #include <linux/kernel.h>
12 #include <linux/sched/mm.h>
13 #include <linux/memblock.h>
14 #include <linux/of_fdt.h>
15 #include <linux/mm.h>
16 #include <linux/string_helpers.h>
17 #include <linux/stop_machine.h>
18 
19 #include <asm/pgtable.h>
20 #include <asm/pgalloc.h>
21 #include <asm/mmu_context.h>
22 #include <asm/dma.h>
23 #include <asm/machdep.h>
24 #include <asm/mmu.h>
25 #include <asm/firmware.h>
26 #include <asm/powernv.h>
27 #include <asm/sections.h>
28 #include <asm/trace.h>
29 #include <asm/uaccess.h>
30 #include <asm/ultravisor.h>
31 
32 #include <trace/events/thp.h>
33 
34 unsigned int mmu_pid_bits;
35 unsigned int mmu_base_pid;
36 
37 static __ref void *early_alloc_pgtable(unsigned long size, int nid,
38 			unsigned long region_start, unsigned long region_end)
39 {
40 	phys_addr_t min_addr = MEMBLOCK_LOW_LIMIT;
41 	phys_addr_t max_addr = MEMBLOCK_ALLOC_ANYWHERE;
42 	void *ptr;
43 
44 	if (region_start)
45 		min_addr = region_start;
46 	if (region_end)
47 		max_addr = region_end;
48 
49 	ptr = memblock_alloc_try_nid(size, size, min_addr, max_addr, nid);
50 
51 	if (!ptr)
52 		panic("%s: Failed to allocate %lu bytes align=0x%lx nid=%d from=%pa max_addr=%pa\n",
53 		      __func__, size, size, nid, &min_addr, &max_addr);
54 
55 	return ptr;
56 }
57 
58 static int early_map_kernel_page(unsigned long ea, unsigned long pa,
59 			  pgprot_t flags,
60 			  unsigned int map_page_size,
61 			  int nid,
62 			  unsigned long region_start, unsigned long region_end)
63 {
64 	unsigned long pfn = pa >> PAGE_SHIFT;
65 	pgd_t *pgdp;
66 	pud_t *pudp;
67 	pmd_t *pmdp;
68 	pte_t *ptep;
69 
70 	pgdp = pgd_offset_k(ea);
71 	if (pgd_none(*pgdp)) {
72 		pudp = early_alloc_pgtable(PUD_TABLE_SIZE, nid,
73 						region_start, region_end);
74 		pgd_populate(&init_mm, pgdp, pudp);
75 	}
76 	pudp = pud_offset(pgdp, ea);
77 	if (map_page_size == PUD_SIZE) {
78 		ptep = (pte_t *)pudp;
79 		goto set_the_pte;
80 	}
81 	if (pud_none(*pudp)) {
82 		pmdp = early_alloc_pgtable(PMD_TABLE_SIZE, nid,
83 						region_start, region_end);
84 		pud_populate(&init_mm, pudp, pmdp);
85 	}
86 	pmdp = pmd_offset(pudp, ea);
87 	if (map_page_size == PMD_SIZE) {
88 		ptep = pmdp_ptep(pmdp);
89 		goto set_the_pte;
90 	}
91 	if (!pmd_present(*pmdp)) {
92 		ptep = early_alloc_pgtable(PAGE_SIZE, nid,
93 						region_start, region_end);
94 		pmd_populate_kernel(&init_mm, pmdp, ptep);
95 	}
96 	ptep = pte_offset_kernel(pmdp, ea);
97 
98 set_the_pte:
99 	set_pte_at(&init_mm, ea, ptep, pfn_pte(pfn, flags));
100 	smp_wmb();
101 	return 0;
102 }
103 
104 /*
105  * nid, region_start, and region_end are hints to try to place the page
106  * table memory in the same node or region.
107  */
108 static int __map_kernel_page(unsigned long ea, unsigned long pa,
109 			  pgprot_t flags,
110 			  unsigned int map_page_size,
111 			  int nid,
112 			  unsigned long region_start, unsigned long region_end)
113 {
114 	unsigned long pfn = pa >> PAGE_SHIFT;
115 	pgd_t *pgdp;
116 	pud_t *pudp;
117 	pmd_t *pmdp;
118 	pte_t *ptep;
119 	/*
120 	 * Make sure task size is correct as per the max adddr
121 	 */
122 	BUILD_BUG_ON(TASK_SIZE_USER64 > RADIX_PGTABLE_RANGE);
123 
124 #ifdef CONFIG_PPC_64K_PAGES
125 	BUILD_BUG_ON(RADIX_KERN_MAP_SIZE != (1UL << MAX_EA_BITS_PER_CONTEXT));
126 #endif
127 
128 	if (unlikely(!slab_is_available()))
129 		return early_map_kernel_page(ea, pa, flags, map_page_size,
130 						nid, region_start, region_end);
131 
132 	/*
133 	 * Should make page table allocation functions be able to take a
134 	 * node, so we can place kernel page tables on the right nodes after
135 	 * boot.
136 	 */
137 	pgdp = pgd_offset_k(ea);
138 	pudp = pud_alloc(&init_mm, pgdp, ea);
139 	if (!pudp)
140 		return -ENOMEM;
141 	if (map_page_size == PUD_SIZE) {
142 		ptep = (pte_t *)pudp;
143 		goto set_the_pte;
144 	}
145 	pmdp = pmd_alloc(&init_mm, pudp, ea);
146 	if (!pmdp)
147 		return -ENOMEM;
148 	if (map_page_size == PMD_SIZE) {
149 		ptep = pmdp_ptep(pmdp);
150 		goto set_the_pte;
151 	}
152 	ptep = pte_alloc_kernel(pmdp, ea);
153 	if (!ptep)
154 		return -ENOMEM;
155 
156 set_the_pte:
157 	set_pte_at(&init_mm, ea, ptep, pfn_pte(pfn, flags));
158 	smp_wmb();
159 	return 0;
160 }
161 
162 int radix__map_kernel_page(unsigned long ea, unsigned long pa,
163 			  pgprot_t flags,
164 			  unsigned int map_page_size)
165 {
166 	return __map_kernel_page(ea, pa, flags, map_page_size, -1, 0, 0);
167 }
168 
169 #ifdef CONFIG_STRICT_KERNEL_RWX
170 void radix__change_memory_range(unsigned long start, unsigned long end,
171 				unsigned long clear)
172 {
173 	unsigned long idx;
174 	pgd_t *pgdp;
175 	pud_t *pudp;
176 	pmd_t *pmdp;
177 	pte_t *ptep;
178 
179 	start = ALIGN_DOWN(start, PAGE_SIZE);
180 	end = PAGE_ALIGN(end); // aligns up
181 
182 	pr_debug("Changing flags on range %lx-%lx removing 0x%lx\n",
183 		 start, end, clear);
184 
185 	for (idx = start; idx < end; idx += PAGE_SIZE) {
186 		pgdp = pgd_offset_k(idx);
187 		pudp = pud_alloc(&init_mm, pgdp, idx);
188 		if (!pudp)
189 			continue;
190 		if (pud_is_leaf(*pudp)) {
191 			ptep = (pte_t *)pudp;
192 			goto update_the_pte;
193 		}
194 		pmdp = pmd_alloc(&init_mm, pudp, idx);
195 		if (!pmdp)
196 			continue;
197 		if (pmd_is_leaf(*pmdp)) {
198 			ptep = pmdp_ptep(pmdp);
199 			goto update_the_pte;
200 		}
201 		ptep = pte_alloc_kernel(pmdp, idx);
202 		if (!ptep)
203 			continue;
204 update_the_pte:
205 		radix__pte_update(&init_mm, idx, ptep, clear, 0, 0);
206 	}
207 
208 	radix__flush_tlb_kernel_range(start, end);
209 }
210 
211 void radix__mark_rodata_ro(void)
212 {
213 	unsigned long start, end;
214 
215 	start = (unsigned long)_stext;
216 	end = (unsigned long)__init_begin;
217 
218 	radix__change_memory_range(start, end, _PAGE_WRITE);
219 }
220 
221 void radix__mark_initmem_nx(void)
222 {
223 	unsigned long start = (unsigned long)__init_begin;
224 	unsigned long end = (unsigned long)__init_end;
225 
226 	radix__change_memory_range(start, end, _PAGE_EXEC);
227 }
228 #endif /* CONFIG_STRICT_KERNEL_RWX */
229 
230 static inline void __meminit
231 print_mapping(unsigned long start, unsigned long end, unsigned long size, bool exec)
232 {
233 	char buf[10];
234 
235 	if (end <= start)
236 		return;
237 
238 	string_get_size(size, 1, STRING_UNITS_2, buf, sizeof(buf));
239 
240 	pr_info("Mapped 0x%016lx-0x%016lx with %s pages%s\n", start, end, buf,
241 		exec ? " (exec)" : "");
242 }
243 
244 static unsigned long next_boundary(unsigned long addr, unsigned long end)
245 {
246 #ifdef CONFIG_STRICT_KERNEL_RWX
247 	if (addr < __pa_symbol(__init_begin))
248 		return __pa_symbol(__init_begin);
249 #endif
250 	return end;
251 }
252 
253 static int __meminit create_physical_mapping(unsigned long start,
254 					     unsigned long end,
255 					     int nid)
256 {
257 	unsigned long vaddr, addr, mapping_size = 0;
258 	bool prev_exec, exec = false;
259 	pgprot_t prot;
260 	int psize;
261 
262 	start = _ALIGN_UP(start, PAGE_SIZE);
263 	for (addr = start; addr < end; addr += mapping_size) {
264 		unsigned long gap, previous_size;
265 		int rc;
266 
267 		gap = next_boundary(addr, end) - addr;
268 		previous_size = mapping_size;
269 		prev_exec = exec;
270 
271 		if (IS_ALIGNED(addr, PUD_SIZE) && gap >= PUD_SIZE &&
272 		    mmu_psize_defs[MMU_PAGE_1G].shift) {
273 			mapping_size = PUD_SIZE;
274 			psize = MMU_PAGE_1G;
275 		} else if (IS_ALIGNED(addr, PMD_SIZE) && gap >= PMD_SIZE &&
276 			   mmu_psize_defs[MMU_PAGE_2M].shift) {
277 			mapping_size = PMD_SIZE;
278 			psize = MMU_PAGE_2M;
279 		} else {
280 			mapping_size = PAGE_SIZE;
281 			psize = mmu_virtual_psize;
282 		}
283 
284 		vaddr = (unsigned long)__va(addr);
285 
286 		if (overlaps_kernel_text(vaddr, vaddr + mapping_size) ||
287 		    overlaps_interrupt_vector_text(vaddr, vaddr + mapping_size)) {
288 			prot = PAGE_KERNEL_X;
289 			exec = true;
290 		} else {
291 			prot = PAGE_KERNEL;
292 			exec = false;
293 		}
294 
295 		if (mapping_size != previous_size || exec != prev_exec) {
296 			print_mapping(start, addr, previous_size, prev_exec);
297 			start = addr;
298 		}
299 
300 		rc = __map_kernel_page(vaddr, addr, prot, mapping_size, nid, start, end);
301 		if (rc)
302 			return rc;
303 
304 		update_page_count(psize, 1);
305 	}
306 
307 	print_mapping(start, addr, mapping_size, exec);
308 	return 0;
309 }
310 
311 static void __init radix_init_pgtable(void)
312 {
313 	unsigned long rts_field;
314 	struct memblock_region *reg;
315 
316 	/* We don't support slb for radix */
317 	mmu_slb_size = 0;
318 	/*
319 	 * Create the linear mapping, using standard page size for now
320 	 */
321 	for_each_memblock(memory, reg) {
322 		/*
323 		 * The memblock allocator  is up at this point, so the
324 		 * page tables will be allocated within the range. No
325 		 * need or a node (which we don't have yet).
326 		 */
327 
328 		if ((reg->base + reg->size) >= RADIX_VMALLOC_START) {
329 			pr_warn("Outside the supported range\n");
330 			continue;
331 		}
332 
333 		WARN_ON(create_physical_mapping(reg->base,
334 						reg->base + reg->size,
335 						-1));
336 	}
337 
338 	/* Find out how many PID bits are supported */
339 	if (cpu_has_feature(CPU_FTR_HVMODE)) {
340 		if (!mmu_pid_bits)
341 			mmu_pid_bits = 20;
342 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
343 		/*
344 		 * When KVM is possible, we only use the top half of the
345 		 * PID space to avoid collisions between host and guest PIDs
346 		 * which can cause problems due to prefetch when exiting the
347 		 * guest with AIL=3
348 		 */
349 		mmu_base_pid = 1 << (mmu_pid_bits - 1);
350 #else
351 		mmu_base_pid = 1;
352 #endif
353 	} else {
354 		/* The guest uses the bottom half of the PID space */
355 		if (!mmu_pid_bits)
356 			mmu_pid_bits = 19;
357 		mmu_base_pid = 1;
358 	}
359 
360 	/*
361 	 * Allocate Partition table and process table for the
362 	 * host.
363 	 */
364 	BUG_ON(PRTB_SIZE_SHIFT > 36);
365 	process_tb = early_alloc_pgtable(1UL << PRTB_SIZE_SHIFT, -1, 0, 0);
366 	/*
367 	 * Fill in the process table.
368 	 */
369 	rts_field = radix__get_tree_size();
370 	process_tb->prtb0 = cpu_to_be64(rts_field | __pa(init_mm.pgd) | RADIX_PGD_INDEX_SIZE);
371 
372 	/*
373 	 * The init_mm context is given the first available (non-zero) PID,
374 	 * which is the "guard PID" and contains no page table. PIDR should
375 	 * never be set to zero because that duplicates the kernel address
376 	 * space at the 0x0... offset (quadrant 0)!
377 	 *
378 	 * An arbitrary PID that may later be allocated by the PID allocator
379 	 * for userspace processes must not be used either, because that
380 	 * would cause stale user mappings for that PID on CPUs outside of
381 	 * the TLB invalidation scheme (because it won't be in mm_cpumask).
382 	 *
383 	 * So permanently carve out one PID for the purpose of a guard PID.
384 	 */
385 	init_mm.context.id = mmu_base_pid;
386 	mmu_base_pid++;
387 }
388 
389 static void __init radix_init_partition_table(void)
390 {
391 	unsigned long rts_field, dw0, dw1;
392 
393 	mmu_partition_table_init();
394 	rts_field = radix__get_tree_size();
395 	dw0 = rts_field | __pa(init_mm.pgd) | RADIX_PGD_INDEX_SIZE | PATB_HR;
396 	dw1 = __pa(process_tb) | (PRTB_SIZE_SHIFT - 12) | PATB_GR;
397 	mmu_partition_table_set_entry(0, dw0, dw1, false);
398 
399 	pr_info("Initializing Radix MMU\n");
400 }
401 
402 static int __init get_idx_from_shift(unsigned int shift)
403 {
404 	int idx = -1;
405 
406 	switch (shift) {
407 	case 0xc:
408 		idx = MMU_PAGE_4K;
409 		break;
410 	case 0x10:
411 		idx = MMU_PAGE_64K;
412 		break;
413 	case 0x15:
414 		idx = MMU_PAGE_2M;
415 		break;
416 	case 0x1e:
417 		idx = MMU_PAGE_1G;
418 		break;
419 	}
420 	return idx;
421 }
422 
423 static int __init radix_dt_scan_page_sizes(unsigned long node,
424 					   const char *uname, int depth,
425 					   void *data)
426 {
427 	int size = 0;
428 	int shift, idx;
429 	unsigned int ap;
430 	const __be32 *prop;
431 	const char *type = of_get_flat_dt_prop(node, "device_type", NULL);
432 
433 	/* We are scanning "cpu" nodes only */
434 	if (type == NULL || strcmp(type, "cpu") != 0)
435 		return 0;
436 
437 	/* Find MMU PID size */
438 	prop = of_get_flat_dt_prop(node, "ibm,mmu-pid-bits", &size);
439 	if (prop && size == 4)
440 		mmu_pid_bits = be32_to_cpup(prop);
441 
442 	/* Grab page size encodings */
443 	prop = of_get_flat_dt_prop(node, "ibm,processor-radix-AP-encodings", &size);
444 	if (!prop)
445 		return 0;
446 
447 	pr_info("Page sizes from device-tree:\n");
448 	for (; size >= 4; size -= 4, ++prop) {
449 
450 		struct mmu_psize_def *def;
451 
452 		/* top 3 bit is AP encoding */
453 		shift = be32_to_cpu(prop[0]) & ~(0xe << 28);
454 		ap = be32_to_cpu(prop[0]) >> 29;
455 		pr_info("Page size shift = %d AP=0x%x\n", shift, ap);
456 
457 		idx = get_idx_from_shift(shift);
458 		if (idx < 0)
459 			continue;
460 
461 		def = &mmu_psize_defs[idx];
462 		def->shift = shift;
463 		def->ap  = ap;
464 	}
465 
466 	/* needed ? */
467 	cur_cpu_spec->mmu_features &= ~MMU_FTR_NO_SLBIE_B;
468 	return 1;
469 }
470 
471 void __init radix__early_init_devtree(void)
472 {
473 	int rc;
474 
475 	/*
476 	 * Try to find the available page sizes in the device-tree
477 	 */
478 	rc = of_scan_flat_dt(radix_dt_scan_page_sizes, NULL);
479 	if (rc != 0)  /* Found */
480 		goto found;
481 	/*
482 	 * let's assume we have page 4k and 64k support
483 	 */
484 	mmu_psize_defs[MMU_PAGE_4K].shift = 12;
485 	mmu_psize_defs[MMU_PAGE_4K].ap = 0x0;
486 
487 	mmu_psize_defs[MMU_PAGE_64K].shift = 16;
488 	mmu_psize_defs[MMU_PAGE_64K].ap = 0x5;
489 found:
490 	return;
491 }
492 
493 static void radix_init_amor(void)
494 {
495 	/*
496 	* In HV mode, we init AMOR (Authority Mask Override Register) so that
497 	* the hypervisor and guest can setup IAMR (Instruction Authority Mask
498 	* Register), enable key 0 and set it to 1.
499 	*
500 	* AMOR = 0b1100 .... 0000 (Mask for key 0 is 11)
501 	*/
502 	mtspr(SPRN_AMOR, (3ul << 62));
503 }
504 
505 #ifdef CONFIG_PPC_KUEP
506 void setup_kuep(bool disabled)
507 {
508 	if (disabled || !early_radix_enabled())
509 		return;
510 
511 	if (smp_processor_id() == boot_cpuid)
512 		pr_info("Activating Kernel Userspace Execution Prevention\n");
513 
514 	/*
515 	 * Radix always uses key0 of the IAMR to determine if an access is
516 	 * allowed. We set bit 0 (IBM bit 1) of key0, to prevent instruction
517 	 * fetch.
518 	 */
519 	mtspr(SPRN_IAMR, (1ul << 62));
520 }
521 #endif
522 
523 #ifdef CONFIG_PPC_KUAP
524 void setup_kuap(bool disabled)
525 {
526 	if (disabled || !early_radix_enabled())
527 		return;
528 
529 	if (smp_processor_id() == boot_cpuid) {
530 		pr_info("Activating Kernel Userspace Access Prevention\n");
531 		cur_cpu_spec->mmu_features |= MMU_FTR_RADIX_KUAP;
532 	}
533 
534 	/* Make sure userspace can't change the AMR */
535 	mtspr(SPRN_UAMOR, 0);
536 	mtspr(SPRN_AMR, AMR_KUAP_BLOCKED);
537 	isync();
538 }
539 #endif
540 
541 void __init radix__early_init_mmu(void)
542 {
543 	unsigned long lpcr;
544 
545 #ifdef CONFIG_PPC_64K_PAGES
546 	/* PAGE_SIZE mappings */
547 	mmu_virtual_psize = MMU_PAGE_64K;
548 #else
549 	mmu_virtual_psize = MMU_PAGE_4K;
550 #endif
551 
552 #ifdef CONFIG_SPARSEMEM_VMEMMAP
553 	/* vmemmap mapping */
554 	if (mmu_psize_defs[MMU_PAGE_2M].shift) {
555 		/*
556 		 * map vmemmap using 2M if available
557 		 */
558 		mmu_vmemmap_psize = MMU_PAGE_2M;
559 	} else
560 		mmu_vmemmap_psize = mmu_virtual_psize;
561 #endif
562 	/*
563 	 * initialize page table size
564 	 */
565 	__pte_index_size = RADIX_PTE_INDEX_SIZE;
566 	__pmd_index_size = RADIX_PMD_INDEX_SIZE;
567 	__pud_index_size = RADIX_PUD_INDEX_SIZE;
568 	__pgd_index_size = RADIX_PGD_INDEX_SIZE;
569 	__pud_cache_index = RADIX_PUD_INDEX_SIZE;
570 	__pte_table_size = RADIX_PTE_TABLE_SIZE;
571 	__pmd_table_size = RADIX_PMD_TABLE_SIZE;
572 	__pud_table_size = RADIX_PUD_TABLE_SIZE;
573 	__pgd_table_size = RADIX_PGD_TABLE_SIZE;
574 
575 	__pmd_val_bits = RADIX_PMD_VAL_BITS;
576 	__pud_val_bits = RADIX_PUD_VAL_BITS;
577 	__pgd_val_bits = RADIX_PGD_VAL_BITS;
578 
579 	__kernel_virt_start = RADIX_KERN_VIRT_START;
580 	__vmalloc_start = RADIX_VMALLOC_START;
581 	__vmalloc_end = RADIX_VMALLOC_END;
582 	__kernel_io_start = RADIX_KERN_IO_START;
583 	__kernel_io_end = RADIX_KERN_IO_END;
584 	vmemmap = (struct page *)RADIX_VMEMMAP_START;
585 	ioremap_bot = IOREMAP_BASE;
586 
587 #ifdef CONFIG_PCI
588 	pci_io_base = ISA_IO_BASE;
589 #endif
590 	__pte_frag_nr = RADIX_PTE_FRAG_NR;
591 	__pte_frag_size_shift = RADIX_PTE_FRAG_SIZE_SHIFT;
592 	__pmd_frag_nr = RADIX_PMD_FRAG_NR;
593 	__pmd_frag_size_shift = RADIX_PMD_FRAG_SIZE_SHIFT;
594 
595 	radix_init_pgtable();
596 
597 	if (!firmware_has_feature(FW_FEATURE_LPAR)) {
598 		lpcr = mfspr(SPRN_LPCR);
599 		mtspr(SPRN_LPCR, lpcr | LPCR_UPRT | LPCR_HR);
600 		radix_init_partition_table();
601 		radix_init_amor();
602 	} else {
603 		radix_init_pseries();
604 	}
605 
606 	memblock_set_current_limit(MEMBLOCK_ALLOC_ANYWHERE);
607 
608 	/* Switch to the guard PID before turning on MMU */
609 	radix__switch_mmu_context(NULL, &init_mm);
610 	tlbiel_all();
611 }
612 
613 void radix__early_init_mmu_secondary(void)
614 {
615 	unsigned long lpcr;
616 	/*
617 	 * update partition table control register and UPRT
618 	 */
619 	if (!firmware_has_feature(FW_FEATURE_LPAR)) {
620 		lpcr = mfspr(SPRN_LPCR);
621 		mtspr(SPRN_LPCR, lpcr | LPCR_UPRT | LPCR_HR);
622 
623 		set_ptcr_when_no_uv(__pa(partition_tb) |
624 				    (PATB_SIZE_SHIFT - 12));
625 
626 		radix_init_amor();
627 	}
628 
629 	radix__switch_mmu_context(NULL, &init_mm);
630 	tlbiel_all();
631 }
632 
633 void radix__mmu_cleanup_all(void)
634 {
635 	unsigned long lpcr;
636 
637 	if (!firmware_has_feature(FW_FEATURE_LPAR)) {
638 		lpcr = mfspr(SPRN_LPCR);
639 		mtspr(SPRN_LPCR, lpcr & ~LPCR_UPRT);
640 		set_ptcr_when_no_uv(0);
641 		powernv_set_nmmu_ptcr(0);
642 		radix__flush_tlb_all();
643 	}
644 }
645 
646 void radix__setup_initial_memory_limit(phys_addr_t first_memblock_base,
647 				phys_addr_t first_memblock_size)
648 {
649 	/*
650 	 * We don't currently support the first MEMBLOCK not mapping 0
651 	 * physical on those processors
652 	 */
653 	BUG_ON(first_memblock_base != 0);
654 
655 	/*
656 	 * Radix mode is not limited by RMA / VRMA addressing.
657 	 */
658 	ppc64_rma_size = ULONG_MAX;
659 }
660 
661 #ifdef CONFIG_MEMORY_HOTPLUG
662 static void free_pte_table(pte_t *pte_start, pmd_t *pmd)
663 {
664 	pte_t *pte;
665 	int i;
666 
667 	for (i = 0; i < PTRS_PER_PTE; i++) {
668 		pte = pte_start + i;
669 		if (!pte_none(*pte))
670 			return;
671 	}
672 
673 	pte_free_kernel(&init_mm, pte_start);
674 	pmd_clear(pmd);
675 }
676 
677 static void free_pmd_table(pmd_t *pmd_start, pud_t *pud)
678 {
679 	pmd_t *pmd;
680 	int i;
681 
682 	for (i = 0; i < PTRS_PER_PMD; i++) {
683 		pmd = pmd_start + i;
684 		if (!pmd_none(*pmd))
685 			return;
686 	}
687 
688 	pmd_free(&init_mm, pmd_start);
689 	pud_clear(pud);
690 }
691 
692 struct change_mapping_params {
693 	pte_t *pte;
694 	unsigned long start;
695 	unsigned long end;
696 	unsigned long aligned_start;
697 	unsigned long aligned_end;
698 };
699 
700 static int __meminit stop_machine_change_mapping(void *data)
701 {
702 	struct change_mapping_params *params =
703 			(struct change_mapping_params *)data;
704 
705 	if (!data)
706 		return -1;
707 
708 	spin_unlock(&init_mm.page_table_lock);
709 	pte_clear(&init_mm, params->aligned_start, params->pte);
710 	create_physical_mapping(__pa(params->aligned_start), __pa(params->start), -1);
711 	create_physical_mapping(__pa(params->end), __pa(params->aligned_end), -1);
712 	spin_lock(&init_mm.page_table_lock);
713 	return 0;
714 }
715 
716 static void remove_pte_table(pte_t *pte_start, unsigned long addr,
717 			     unsigned long end)
718 {
719 	unsigned long next;
720 	pte_t *pte;
721 
722 	pte = pte_start + pte_index(addr);
723 	for (; addr < end; addr = next, pte++) {
724 		next = (addr + PAGE_SIZE) & PAGE_MASK;
725 		if (next > end)
726 			next = end;
727 
728 		if (!pte_present(*pte))
729 			continue;
730 
731 		if (!PAGE_ALIGNED(addr) || !PAGE_ALIGNED(next)) {
732 			/*
733 			 * The vmemmap_free() and remove_section_mapping()
734 			 * codepaths call us with aligned addresses.
735 			 */
736 			WARN_ONCE(1, "%s: unaligned range\n", __func__);
737 			continue;
738 		}
739 
740 		pte_clear(&init_mm, addr, pte);
741 	}
742 }
743 
744 /*
745  * clear the pte and potentially split the mapping helper
746  */
747 static void __meminit split_kernel_mapping(unsigned long addr, unsigned long end,
748 				unsigned long size, pte_t *pte)
749 {
750 	unsigned long mask = ~(size - 1);
751 	unsigned long aligned_start = addr & mask;
752 	unsigned long aligned_end = addr + size;
753 	struct change_mapping_params params;
754 	bool split_region = false;
755 
756 	if ((end - addr) < size) {
757 		/*
758 		 * We're going to clear the PTE, but not flushed
759 		 * the mapping, time to remap and flush. The
760 		 * effects if visible outside the processor or
761 		 * if we are running in code close to the
762 		 * mapping we cleared, we are in trouble.
763 		 */
764 		if (overlaps_kernel_text(aligned_start, addr) ||
765 			overlaps_kernel_text(end, aligned_end)) {
766 			/*
767 			 * Hack, just return, don't pte_clear
768 			 */
769 			WARN_ONCE(1, "Linear mapping %lx->%lx overlaps kernel "
770 				  "text, not splitting\n", addr, end);
771 			return;
772 		}
773 		split_region = true;
774 	}
775 
776 	if (split_region) {
777 		params.pte = pte;
778 		params.start = addr;
779 		params.end = end;
780 		params.aligned_start = addr & ~(size - 1);
781 		params.aligned_end = min_t(unsigned long, aligned_end,
782 				(unsigned long)__va(memblock_end_of_DRAM()));
783 		stop_machine(stop_machine_change_mapping, &params, NULL);
784 		return;
785 	}
786 
787 	pte_clear(&init_mm, addr, pte);
788 }
789 
790 static void remove_pmd_table(pmd_t *pmd_start, unsigned long addr,
791 			     unsigned long end)
792 {
793 	unsigned long next;
794 	pte_t *pte_base;
795 	pmd_t *pmd;
796 
797 	pmd = pmd_start + pmd_index(addr);
798 	for (; addr < end; addr = next, pmd++) {
799 		next = pmd_addr_end(addr, end);
800 
801 		if (!pmd_present(*pmd))
802 			continue;
803 
804 		if (pmd_is_leaf(*pmd)) {
805 			split_kernel_mapping(addr, end, PMD_SIZE, (pte_t *)pmd);
806 			continue;
807 		}
808 
809 		pte_base = (pte_t *)pmd_page_vaddr(*pmd);
810 		remove_pte_table(pte_base, addr, next);
811 		free_pte_table(pte_base, pmd);
812 	}
813 }
814 
815 static void remove_pud_table(pud_t *pud_start, unsigned long addr,
816 			     unsigned long end)
817 {
818 	unsigned long next;
819 	pmd_t *pmd_base;
820 	pud_t *pud;
821 
822 	pud = pud_start + pud_index(addr);
823 	for (; addr < end; addr = next, pud++) {
824 		next = pud_addr_end(addr, end);
825 
826 		if (!pud_present(*pud))
827 			continue;
828 
829 		if (pud_is_leaf(*pud)) {
830 			split_kernel_mapping(addr, end, PUD_SIZE, (pte_t *)pud);
831 			continue;
832 		}
833 
834 		pmd_base = (pmd_t *)pud_page_vaddr(*pud);
835 		remove_pmd_table(pmd_base, addr, next);
836 		free_pmd_table(pmd_base, pud);
837 	}
838 }
839 
840 static void __meminit remove_pagetable(unsigned long start, unsigned long end)
841 {
842 	unsigned long addr, next;
843 	pud_t *pud_base;
844 	pgd_t *pgd;
845 
846 	spin_lock(&init_mm.page_table_lock);
847 
848 	for (addr = start; addr < end; addr = next) {
849 		next = pgd_addr_end(addr, end);
850 
851 		pgd = pgd_offset_k(addr);
852 		if (!pgd_present(*pgd))
853 			continue;
854 
855 		if (pgd_is_leaf(*pgd)) {
856 			split_kernel_mapping(addr, end, PGDIR_SIZE, (pte_t *)pgd);
857 			continue;
858 		}
859 
860 		pud_base = (pud_t *)pgd_page_vaddr(*pgd);
861 		remove_pud_table(pud_base, addr, next);
862 	}
863 
864 	spin_unlock(&init_mm.page_table_lock);
865 	radix__flush_tlb_kernel_range(start, end);
866 }
867 
868 int __meminit radix__create_section_mapping(unsigned long start, unsigned long end, int nid)
869 {
870 	if (end >= RADIX_VMALLOC_START) {
871 		pr_warn("Outside the supported range\n");
872 		return -1;
873 	}
874 
875 	return create_physical_mapping(__pa(start), __pa(end), nid);
876 }
877 
878 int __meminit radix__remove_section_mapping(unsigned long start, unsigned long end)
879 {
880 	remove_pagetable(start, end);
881 	return 0;
882 }
883 #endif /* CONFIG_MEMORY_HOTPLUG */
884 
885 #ifdef CONFIG_SPARSEMEM_VMEMMAP
886 static int __map_kernel_page_nid(unsigned long ea, unsigned long pa,
887 				 pgprot_t flags, unsigned int map_page_size,
888 				 int nid)
889 {
890 	return __map_kernel_page(ea, pa, flags, map_page_size, nid, 0, 0);
891 }
892 
893 int __meminit radix__vmemmap_create_mapping(unsigned long start,
894 				      unsigned long page_size,
895 				      unsigned long phys)
896 {
897 	/* Create a PTE encoding */
898 	unsigned long flags = _PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_KERNEL_RW;
899 	int nid = early_pfn_to_nid(phys >> PAGE_SHIFT);
900 	int ret;
901 
902 	if ((start + page_size) >= RADIX_VMEMMAP_END) {
903 		pr_warn("Outside the supported range\n");
904 		return -1;
905 	}
906 
907 	ret = __map_kernel_page_nid(start, phys, __pgprot(flags), page_size, nid);
908 	BUG_ON(ret);
909 
910 	return 0;
911 }
912 
913 #ifdef CONFIG_MEMORY_HOTPLUG
914 void __meminit radix__vmemmap_remove_mapping(unsigned long start, unsigned long page_size)
915 {
916 	remove_pagetable(start, start + page_size);
917 }
918 #endif
919 #endif
920 
921 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
922 
923 unsigned long radix__pmd_hugepage_update(struct mm_struct *mm, unsigned long addr,
924 				  pmd_t *pmdp, unsigned long clr,
925 				  unsigned long set)
926 {
927 	unsigned long old;
928 
929 #ifdef CONFIG_DEBUG_VM
930 	WARN_ON(!radix__pmd_trans_huge(*pmdp) && !pmd_devmap(*pmdp));
931 	assert_spin_locked(pmd_lockptr(mm, pmdp));
932 #endif
933 
934 	old = radix__pte_update(mm, addr, (pte_t *)pmdp, clr, set, 1);
935 	trace_hugepage_update(addr, old, clr, set);
936 
937 	return old;
938 }
939 
940 pmd_t radix__pmdp_collapse_flush(struct vm_area_struct *vma, unsigned long address,
941 			pmd_t *pmdp)
942 
943 {
944 	pmd_t pmd;
945 
946 	VM_BUG_ON(address & ~HPAGE_PMD_MASK);
947 	VM_BUG_ON(radix__pmd_trans_huge(*pmdp));
948 	VM_BUG_ON(pmd_devmap(*pmdp));
949 	/*
950 	 * khugepaged calls this for normal pmd
951 	 */
952 	pmd = *pmdp;
953 	pmd_clear(pmdp);
954 
955 	/*FIXME!!  Verify whether we need this kick below */
956 	serialize_against_pte_lookup(vma->vm_mm);
957 
958 	radix__flush_tlb_collapsed_pmd(vma->vm_mm, address);
959 
960 	return pmd;
961 }
962 
963 /*
964  * For us pgtable_t is pte_t *. Inorder to save the deposisted
965  * page table, we consider the allocated page table as a list
966  * head. On withdraw we need to make sure we zero out the used
967  * list_head memory area.
968  */
969 void radix__pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
970 				 pgtable_t pgtable)
971 {
972 	struct list_head *lh = (struct list_head *) pgtable;
973 
974 	assert_spin_locked(pmd_lockptr(mm, pmdp));
975 
976 	/* FIFO */
977 	if (!pmd_huge_pte(mm, pmdp))
978 		INIT_LIST_HEAD(lh);
979 	else
980 		list_add(lh, (struct list_head *) pmd_huge_pte(mm, pmdp));
981 	pmd_huge_pte(mm, pmdp) = pgtable;
982 }
983 
984 pgtable_t radix__pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp)
985 {
986 	pte_t *ptep;
987 	pgtable_t pgtable;
988 	struct list_head *lh;
989 
990 	assert_spin_locked(pmd_lockptr(mm, pmdp));
991 
992 	/* FIFO */
993 	pgtable = pmd_huge_pte(mm, pmdp);
994 	lh = (struct list_head *) pgtable;
995 	if (list_empty(lh))
996 		pmd_huge_pte(mm, pmdp) = NULL;
997 	else {
998 		pmd_huge_pte(mm, pmdp) = (pgtable_t) lh->next;
999 		list_del(lh);
1000 	}
1001 	ptep = (pte_t *) pgtable;
1002 	*ptep = __pte(0);
1003 	ptep++;
1004 	*ptep = __pte(0);
1005 	return pgtable;
1006 }
1007 
1008 pmd_t radix__pmdp_huge_get_and_clear(struct mm_struct *mm,
1009 				     unsigned long addr, pmd_t *pmdp)
1010 {
1011 	pmd_t old_pmd;
1012 	unsigned long old;
1013 
1014 	old = radix__pmd_hugepage_update(mm, addr, pmdp, ~0UL, 0);
1015 	old_pmd = __pmd(old);
1016 	/*
1017 	 * Serialize against find_current_mm_pte which does lock-less
1018 	 * lookup in page tables with local interrupts disabled. For huge pages
1019 	 * it casts pmd_t to pte_t. Since format of pte_t is different from
1020 	 * pmd_t we want to prevent transit from pmd pointing to page table
1021 	 * to pmd pointing to huge page (and back) while interrupts are disabled.
1022 	 * We clear pmd to possibly replace it with page table pointer in
1023 	 * different code paths. So make sure we wait for the parallel
1024 	 * find_current_mm_pte to finish.
1025 	 */
1026 	serialize_against_pte_lookup(mm);
1027 	return old_pmd;
1028 }
1029 
1030 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
1031 
1032 void radix__ptep_set_access_flags(struct vm_area_struct *vma, pte_t *ptep,
1033 				  pte_t entry, unsigned long address, int psize)
1034 {
1035 	struct mm_struct *mm = vma->vm_mm;
1036 	unsigned long set = pte_val(entry) & (_PAGE_DIRTY | _PAGE_ACCESSED |
1037 					      _PAGE_RW | _PAGE_EXEC);
1038 
1039 	unsigned long change = pte_val(entry) ^ pte_val(*ptep);
1040 	/*
1041 	 * To avoid NMMU hang while relaxing access, we need mark
1042 	 * the pte invalid in between.
1043 	 */
1044 	if ((change & _PAGE_RW) && atomic_read(&mm->context.copros) > 0) {
1045 		unsigned long old_pte, new_pte;
1046 
1047 		old_pte = __radix_pte_update(ptep, _PAGE_PRESENT, _PAGE_INVALID);
1048 		/*
1049 		 * new value of pte
1050 		 */
1051 		new_pte = old_pte | set;
1052 		radix__flush_tlb_page_psize(mm, address, psize);
1053 		__radix_pte_update(ptep, _PAGE_INVALID, new_pte);
1054 	} else {
1055 		__radix_pte_update(ptep, 0, set);
1056 		/*
1057 		 * Book3S does not require a TLB flush when relaxing access
1058 		 * restrictions when the address space is not attached to a
1059 		 * NMMU, because the core MMU will reload the pte after taking
1060 		 * an access fault, which is defined by the architectue.
1061 		 */
1062 	}
1063 	/* See ptesync comment in radix__set_pte_at */
1064 }
1065 
1066 void radix__ptep_modify_prot_commit(struct vm_area_struct *vma,
1067 				    unsigned long addr, pte_t *ptep,
1068 				    pte_t old_pte, pte_t pte)
1069 {
1070 	struct mm_struct *mm = vma->vm_mm;
1071 
1072 	/*
1073 	 * To avoid NMMU hang while relaxing access we need to flush the tlb before
1074 	 * we set the new value. We need to do this only for radix, because hash
1075 	 * translation does flush when updating the linux pte.
1076 	 */
1077 	if (is_pte_rw_upgrade(pte_val(old_pte), pte_val(pte)) &&
1078 	    (atomic_read(&mm->context.copros) > 0))
1079 		radix__flush_tlb_page(vma, addr);
1080 
1081 	set_pte_at(mm, addr, ptep, pte);
1082 }
1083 
1084 int __init arch_ioremap_pud_supported(void)
1085 {
1086 	/* HPT does not cope with large pages in the vmalloc area */
1087 	return radix_enabled();
1088 }
1089 
1090 int __init arch_ioremap_pmd_supported(void)
1091 {
1092 	return radix_enabled();
1093 }
1094 
1095 int p4d_free_pud_page(p4d_t *p4d, unsigned long addr)
1096 {
1097 	return 0;
1098 }
1099 
1100 int pud_set_huge(pud_t *pud, phys_addr_t addr, pgprot_t prot)
1101 {
1102 	pte_t *ptep = (pte_t *)pud;
1103 	pte_t new_pud = pfn_pte(__phys_to_pfn(addr), prot);
1104 
1105 	if (!radix_enabled())
1106 		return 0;
1107 
1108 	set_pte_at(&init_mm, 0 /* radix unused */, ptep, new_pud);
1109 
1110 	return 1;
1111 }
1112 
1113 int pud_clear_huge(pud_t *pud)
1114 {
1115 	if (pud_huge(*pud)) {
1116 		pud_clear(pud);
1117 		return 1;
1118 	}
1119 
1120 	return 0;
1121 }
1122 
1123 int pud_free_pmd_page(pud_t *pud, unsigned long addr)
1124 {
1125 	pmd_t *pmd;
1126 	int i;
1127 
1128 	pmd = (pmd_t *)pud_page_vaddr(*pud);
1129 	pud_clear(pud);
1130 
1131 	flush_tlb_kernel_range(addr, addr + PUD_SIZE);
1132 
1133 	for (i = 0; i < PTRS_PER_PMD; i++) {
1134 		if (!pmd_none(pmd[i])) {
1135 			pte_t *pte;
1136 			pte = (pte_t *)pmd_page_vaddr(pmd[i]);
1137 
1138 			pte_free_kernel(&init_mm, pte);
1139 		}
1140 	}
1141 
1142 	pmd_free(&init_mm, pmd);
1143 
1144 	return 1;
1145 }
1146 
1147 int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t prot)
1148 {
1149 	pte_t *ptep = (pte_t *)pmd;
1150 	pte_t new_pmd = pfn_pte(__phys_to_pfn(addr), prot);
1151 
1152 	if (!radix_enabled())
1153 		return 0;
1154 
1155 	set_pte_at(&init_mm, 0 /* radix unused */, ptep, new_pmd);
1156 
1157 	return 1;
1158 }
1159 
1160 int pmd_clear_huge(pmd_t *pmd)
1161 {
1162 	if (pmd_huge(*pmd)) {
1163 		pmd_clear(pmd);
1164 		return 1;
1165 	}
1166 
1167 	return 0;
1168 }
1169 
1170 int pmd_free_pte_page(pmd_t *pmd, unsigned long addr)
1171 {
1172 	pte_t *pte;
1173 
1174 	pte = (pte_t *)pmd_page_vaddr(*pmd);
1175 	pmd_clear(pmd);
1176 
1177 	flush_tlb_kernel_range(addr, addr + PMD_SIZE);
1178 
1179 	pte_free_kernel(&init_mm, pte);
1180 
1181 	return 1;
1182 }
1183 
1184 int __init arch_ioremap_p4d_supported(void)
1185 {
1186 	return 0;
1187 }
1188