1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Page table handling routines for radix page table.
4  *
5  * Copyright 2015-2016, Aneesh Kumar K.V, IBM Corporation.
6  */
7 
8 #define pr_fmt(fmt) "radix-mmu: " fmt
9 
10 #include <linux/io.h>
11 #include <linux/kernel.h>
12 #include <linux/sched/mm.h>
13 #include <linux/memblock.h>
14 #include <linux/of_fdt.h>
15 #include <linux/mm.h>
16 #include <linux/hugetlb.h>
17 #include <linux/string_helpers.h>
18 #include <linux/memory.h>
19 
20 #include <asm/pgalloc.h>
21 #include <asm/mmu_context.h>
22 #include <asm/dma.h>
23 #include <asm/machdep.h>
24 #include <asm/mmu.h>
25 #include <asm/firmware.h>
26 #include <asm/powernv.h>
27 #include <asm/sections.h>
28 #include <asm/smp.h>
29 #include <asm/trace.h>
30 #include <asm/uaccess.h>
31 #include <asm/ultravisor.h>
32 
33 #include <trace/events/thp.h>
34 
35 unsigned int mmu_pid_bits;
36 unsigned int mmu_base_pid;
37 unsigned int radix_mem_block_size __ro_after_init;
38 
39 static __ref void *early_alloc_pgtable(unsigned long size, int nid,
40 			unsigned long region_start, unsigned long region_end)
41 {
42 	phys_addr_t min_addr = MEMBLOCK_LOW_LIMIT;
43 	phys_addr_t max_addr = MEMBLOCK_ALLOC_ANYWHERE;
44 	void *ptr;
45 
46 	if (region_start)
47 		min_addr = region_start;
48 	if (region_end)
49 		max_addr = region_end;
50 
51 	ptr = memblock_alloc_try_nid(size, size, min_addr, max_addr, nid);
52 
53 	if (!ptr)
54 		panic("%s: Failed to allocate %lu bytes align=0x%lx nid=%d from=%pa max_addr=%pa\n",
55 		      __func__, size, size, nid, &min_addr, &max_addr);
56 
57 	return ptr;
58 }
59 
60 /*
61  * When allocating pud or pmd pointers, we allocate a complete page
62  * of PAGE_SIZE rather than PUD_TABLE_SIZE or PMD_TABLE_SIZE. This
63  * is to ensure that the page obtained from the memblock allocator
64  * can be completely used as page table page and can be freed
65  * correctly when the page table entries are removed.
66  */
67 static int early_map_kernel_page(unsigned long ea, unsigned long pa,
68 			  pgprot_t flags,
69 			  unsigned int map_page_size,
70 			  int nid,
71 			  unsigned long region_start, unsigned long region_end)
72 {
73 	unsigned long pfn = pa >> PAGE_SHIFT;
74 	pgd_t *pgdp;
75 	p4d_t *p4dp;
76 	pud_t *pudp;
77 	pmd_t *pmdp;
78 	pte_t *ptep;
79 
80 	pgdp = pgd_offset_k(ea);
81 	p4dp = p4d_offset(pgdp, ea);
82 	if (p4d_none(*p4dp)) {
83 		pudp = early_alloc_pgtable(PAGE_SIZE, nid,
84 					   region_start, region_end);
85 		p4d_populate(&init_mm, p4dp, pudp);
86 	}
87 	pudp = pud_offset(p4dp, ea);
88 	if (map_page_size == PUD_SIZE) {
89 		ptep = (pte_t *)pudp;
90 		goto set_the_pte;
91 	}
92 	if (pud_none(*pudp)) {
93 		pmdp = early_alloc_pgtable(PAGE_SIZE, nid, region_start,
94 					   region_end);
95 		pud_populate(&init_mm, pudp, pmdp);
96 	}
97 	pmdp = pmd_offset(pudp, ea);
98 	if (map_page_size == PMD_SIZE) {
99 		ptep = pmdp_ptep(pmdp);
100 		goto set_the_pte;
101 	}
102 	if (!pmd_present(*pmdp)) {
103 		ptep = early_alloc_pgtable(PAGE_SIZE, nid,
104 						region_start, region_end);
105 		pmd_populate_kernel(&init_mm, pmdp, ptep);
106 	}
107 	ptep = pte_offset_kernel(pmdp, ea);
108 
109 set_the_pte:
110 	set_pte_at(&init_mm, ea, ptep, pfn_pte(pfn, flags));
111 	smp_wmb();
112 	return 0;
113 }
114 
115 /*
116  * nid, region_start, and region_end are hints to try to place the page
117  * table memory in the same node or region.
118  */
119 static int __map_kernel_page(unsigned long ea, unsigned long pa,
120 			  pgprot_t flags,
121 			  unsigned int map_page_size,
122 			  int nid,
123 			  unsigned long region_start, unsigned long region_end)
124 {
125 	unsigned long pfn = pa >> PAGE_SHIFT;
126 	pgd_t *pgdp;
127 	p4d_t *p4dp;
128 	pud_t *pudp;
129 	pmd_t *pmdp;
130 	pte_t *ptep;
131 	/*
132 	 * Make sure task size is correct as per the max adddr
133 	 */
134 	BUILD_BUG_ON(TASK_SIZE_USER64 > RADIX_PGTABLE_RANGE);
135 
136 #ifdef CONFIG_PPC_64K_PAGES
137 	BUILD_BUG_ON(RADIX_KERN_MAP_SIZE != (1UL << MAX_EA_BITS_PER_CONTEXT));
138 #endif
139 
140 	if (unlikely(!slab_is_available()))
141 		return early_map_kernel_page(ea, pa, flags, map_page_size,
142 						nid, region_start, region_end);
143 
144 	/*
145 	 * Should make page table allocation functions be able to take a
146 	 * node, so we can place kernel page tables on the right nodes after
147 	 * boot.
148 	 */
149 	pgdp = pgd_offset_k(ea);
150 	p4dp = p4d_offset(pgdp, ea);
151 	pudp = pud_alloc(&init_mm, p4dp, ea);
152 	if (!pudp)
153 		return -ENOMEM;
154 	if (map_page_size == PUD_SIZE) {
155 		ptep = (pte_t *)pudp;
156 		goto set_the_pte;
157 	}
158 	pmdp = pmd_alloc(&init_mm, pudp, ea);
159 	if (!pmdp)
160 		return -ENOMEM;
161 	if (map_page_size == PMD_SIZE) {
162 		ptep = pmdp_ptep(pmdp);
163 		goto set_the_pte;
164 	}
165 	ptep = pte_alloc_kernel(pmdp, ea);
166 	if (!ptep)
167 		return -ENOMEM;
168 
169 set_the_pte:
170 	set_pte_at(&init_mm, ea, ptep, pfn_pte(pfn, flags));
171 	smp_wmb();
172 	return 0;
173 }
174 
175 int radix__map_kernel_page(unsigned long ea, unsigned long pa,
176 			  pgprot_t flags,
177 			  unsigned int map_page_size)
178 {
179 	return __map_kernel_page(ea, pa, flags, map_page_size, -1, 0, 0);
180 }
181 
182 #ifdef CONFIG_STRICT_KERNEL_RWX
183 void radix__change_memory_range(unsigned long start, unsigned long end,
184 				unsigned long clear)
185 {
186 	unsigned long idx;
187 	pgd_t *pgdp;
188 	p4d_t *p4dp;
189 	pud_t *pudp;
190 	pmd_t *pmdp;
191 	pte_t *ptep;
192 
193 	start = ALIGN_DOWN(start, PAGE_SIZE);
194 	end = PAGE_ALIGN(end); // aligns up
195 
196 	pr_debug("Changing flags on range %lx-%lx removing 0x%lx\n",
197 		 start, end, clear);
198 
199 	for (idx = start; idx < end; idx += PAGE_SIZE) {
200 		pgdp = pgd_offset_k(idx);
201 		p4dp = p4d_offset(pgdp, idx);
202 		pudp = pud_alloc(&init_mm, p4dp, idx);
203 		if (!pudp)
204 			continue;
205 		if (pud_is_leaf(*pudp)) {
206 			ptep = (pte_t *)pudp;
207 			goto update_the_pte;
208 		}
209 		pmdp = pmd_alloc(&init_mm, pudp, idx);
210 		if (!pmdp)
211 			continue;
212 		if (pmd_is_leaf(*pmdp)) {
213 			ptep = pmdp_ptep(pmdp);
214 			goto update_the_pte;
215 		}
216 		ptep = pte_alloc_kernel(pmdp, idx);
217 		if (!ptep)
218 			continue;
219 update_the_pte:
220 		radix__pte_update(&init_mm, idx, ptep, clear, 0, 0);
221 	}
222 
223 	radix__flush_tlb_kernel_range(start, end);
224 }
225 
226 void radix__mark_rodata_ro(void)
227 {
228 	unsigned long start, end;
229 
230 	start = (unsigned long)_stext;
231 	end = (unsigned long)__init_begin;
232 
233 	radix__change_memory_range(start, end, _PAGE_WRITE);
234 }
235 
236 void radix__mark_initmem_nx(void)
237 {
238 	unsigned long start = (unsigned long)__init_begin;
239 	unsigned long end = (unsigned long)__init_end;
240 
241 	radix__change_memory_range(start, end, _PAGE_EXEC);
242 }
243 #endif /* CONFIG_STRICT_KERNEL_RWX */
244 
245 static inline void __meminit
246 print_mapping(unsigned long start, unsigned long end, unsigned long size, bool exec)
247 {
248 	char buf[10];
249 
250 	if (end <= start)
251 		return;
252 
253 	string_get_size(size, 1, STRING_UNITS_2, buf, sizeof(buf));
254 
255 	pr_info("Mapped 0x%016lx-0x%016lx with %s pages%s\n", start, end, buf,
256 		exec ? " (exec)" : "");
257 }
258 
259 static unsigned long next_boundary(unsigned long addr, unsigned long end)
260 {
261 #ifdef CONFIG_STRICT_KERNEL_RWX
262 	if (addr < __pa_symbol(__init_begin))
263 		return __pa_symbol(__init_begin);
264 #endif
265 	return end;
266 }
267 
268 static int __meminit create_physical_mapping(unsigned long start,
269 					     unsigned long end,
270 					     unsigned long max_mapping_size,
271 					     int nid, pgprot_t _prot)
272 {
273 	unsigned long vaddr, addr, mapping_size = 0;
274 	bool prev_exec, exec = false;
275 	pgprot_t prot;
276 	int psize;
277 
278 	start = ALIGN(start, PAGE_SIZE);
279 	for (addr = start; addr < end; addr += mapping_size) {
280 		unsigned long gap, previous_size;
281 		int rc;
282 
283 		gap = next_boundary(addr, end) - addr;
284 		if (gap > max_mapping_size)
285 			gap = max_mapping_size;
286 		previous_size = mapping_size;
287 		prev_exec = exec;
288 
289 		if (IS_ALIGNED(addr, PUD_SIZE) && gap >= PUD_SIZE &&
290 		    mmu_psize_defs[MMU_PAGE_1G].shift) {
291 			mapping_size = PUD_SIZE;
292 			psize = MMU_PAGE_1G;
293 		} else if (IS_ALIGNED(addr, PMD_SIZE) && gap >= PMD_SIZE &&
294 			   mmu_psize_defs[MMU_PAGE_2M].shift) {
295 			mapping_size = PMD_SIZE;
296 			psize = MMU_PAGE_2M;
297 		} else {
298 			mapping_size = PAGE_SIZE;
299 			psize = mmu_virtual_psize;
300 		}
301 
302 		vaddr = (unsigned long)__va(addr);
303 
304 		if (overlaps_kernel_text(vaddr, vaddr + mapping_size) ||
305 		    overlaps_interrupt_vector_text(vaddr, vaddr + mapping_size)) {
306 			prot = PAGE_KERNEL_X;
307 			exec = true;
308 		} else {
309 			prot = _prot;
310 			exec = false;
311 		}
312 
313 		if (mapping_size != previous_size || exec != prev_exec) {
314 			print_mapping(start, addr, previous_size, prev_exec);
315 			start = addr;
316 		}
317 
318 		rc = __map_kernel_page(vaddr, addr, prot, mapping_size, nid, start, end);
319 		if (rc)
320 			return rc;
321 
322 		update_page_count(psize, 1);
323 	}
324 
325 	print_mapping(start, addr, mapping_size, exec);
326 	return 0;
327 }
328 
329 static void __init radix_init_pgtable(void)
330 {
331 	unsigned long rts_field;
332 	struct memblock_region *reg;
333 
334 	/* We don't support slb for radix */
335 	mmu_slb_size = 0;
336 
337 	/*
338 	 * Create the linear mapping
339 	 */
340 	for_each_memblock(memory, reg) {
341 		/*
342 		 * The memblock allocator  is up at this point, so the
343 		 * page tables will be allocated within the range. No
344 		 * need or a node (which we don't have yet).
345 		 */
346 
347 		if ((reg->base + reg->size) >= RADIX_VMALLOC_START) {
348 			pr_warn("Outside the supported range\n");
349 			continue;
350 		}
351 
352 		WARN_ON(create_physical_mapping(reg->base,
353 						reg->base + reg->size,
354 						radix_mem_block_size,
355 						-1, PAGE_KERNEL));
356 	}
357 
358 	/* Find out how many PID bits are supported */
359 	if (!cpu_has_feature(CPU_FTR_P9_RADIX_PREFETCH_BUG)) {
360 		if (!mmu_pid_bits)
361 			mmu_pid_bits = 20;
362 		mmu_base_pid = 1;
363 	} else if (cpu_has_feature(CPU_FTR_HVMODE)) {
364 		if (!mmu_pid_bits)
365 			mmu_pid_bits = 20;
366 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
367 		/*
368 		 * When KVM is possible, we only use the top half of the
369 		 * PID space to avoid collisions between host and guest PIDs
370 		 * which can cause problems due to prefetch when exiting the
371 		 * guest with AIL=3
372 		 */
373 		mmu_base_pid = 1 << (mmu_pid_bits - 1);
374 #else
375 		mmu_base_pid = 1;
376 #endif
377 	} else {
378 		/* The guest uses the bottom half of the PID space */
379 		if (!mmu_pid_bits)
380 			mmu_pid_bits = 19;
381 		mmu_base_pid = 1;
382 	}
383 
384 	/*
385 	 * Allocate Partition table and process table for the
386 	 * host.
387 	 */
388 	BUG_ON(PRTB_SIZE_SHIFT > 36);
389 	process_tb = early_alloc_pgtable(1UL << PRTB_SIZE_SHIFT, -1, 0, 0);
390 	/*
391 	 * Fill in the process table.
392 	 */
393 	rts_field = radix__get_tree_size();
394 	process_tb->prtb0 = cpu_to_be64(rts_field | __pa(init_mm.pgd) | RADIX_PGD_INDEX_SIZE);
395 
396 	/*
397 	 * The init_mm context is given the first available (non-zero) PID,
398 	 * which is the "guard PID" and contains no page table. PIDR should
399 	 * never be set to zero because that duplicates the kernel address
400 	 * space at the 0x0... offset (quadrant 0)!
401 	 *
402 	 * An arbitrary PID that may later be allocated by the PID allocator
403 	 * for userspace processes must not be used either, because that
404 	 * would cause stale user mappings for that PID on CPUs outside of
405 	 * the TLB invalidation scheme (because it won't be in mm_cpumask).
406 	 *
407 	 * So permanently carve out one PID for the purpose of a guard PID.
408 	 */
409 	init_mm.context.id = mmu_base_pid;
410 	mmu_base_pid++;
411 }
412 
413 static void __init radix_init_partition_table(void)
414 {
415 	unsigned long rts_field, dw0, dw1;
416 
417 	mmu_partition_table_init();
418 	rts_field = radix__get_tree_size();
419 	dw0 = rts_field | __pa(init_mm.pgd) | RADIX_PGD_INDEX_SIZE | PATB_HR;
420 	dw1 = __pa(process_tb) | (PRTB_SIZE_SHIFT - 12) | PATB_GR;
421 	mmu_partition_table_set_entry(0, dw0, dw1, false);
422 
423 	pr_info("Initializing Radix MMU\n");
424 }
425 
426 static int __init get_idx_from_shift(unsigned int shift)
427 {
428 	int idx = -1;
429 
430 	switch (shift) {
431 	case 0xc:
432 		idx = MMU_PAGE_4K;
433 		break;
434 	case 0x10:
435 		idx = MMU_PAGE_64K;
436 		break;
437 	case 0x15:
438 		idx = MMU_PAGE_2M;
439 		break;
440 	case 0x1e:
441 		idx = MMU_PAGE_1G;
442 		break;
443 	}
444 	return idx;
445 }
446 
447 static int __init radix_dt_scan_page_sizes(unsigned long node,
448 					   const char *uname, int depth,
449 					   void *data)
450 {
451 	int size = 0;
452 	int shift, idx;
453 	unsigned int ap;
454 	const __be32 *prop;
455 	const char *type = of_get_flat_dt_prop(node, "device_type", NULL);
456 
457 	/* We are scanning "cpu" nodes only */
458 	if (type == NULL || strcmp(type, "cpu") != 0)
459 		return 0;
460 
461 	/* Find MMU PID size */
462 	prop = of_get_flat_dt_prop(node, "ibm,mmu-pid-bits", &size);
463 	if (prop && size == 4)
464 		mmu_pid_bits = be32_to_cpup(prop);
465 
466 	/* Grab page size encodings */
467 	prop = of_get_flat_dt_prop(node, "ibm,processor-radix-AP-encodings", &size);
468 	if (!prop)
469 		return 0;
470 
471 	pr_info("Page sizes from device-tree:\n");
472 	for (; size >= 4; size -= 4, ++prop) {
473 
474 		struct mmu_psize_def *def;
475 
476 		/* top 3 bit is AP encoding */
477 		shift = be32_to_cpu(prop[0]) & ~(0xe << 28);
478 		ap = be32_to_cpu(prop[0]) >> 29;
479 		pr_info("Page size shift = %d AP=0x%x\n", shift, ap);
480 
481 		idx = get_idx_from_shift(shift);
482 		if (idx < 0)
483 			continue;
484 
485 		def = &mmu_psize_defs[idx];
486 		def->shift = shift;
487 		def->ap  = ap;
488 	}
489 
490 	/* needed ? */
491 	cur_cpu_spec->mmu_features &= ~MMU_FTR_NO_SLBIE_B;
492 	return 1;
493 }
494 
495 #ifdef CONFIG_MEMORY_HOTPLUG
496 static int __init probe_memory_block_size(unsigned long node, const char *uname, int
497 					  depth, void *data)
498 {
499 	unsigned long *mem_block_size = (unsigned long *)data;
500 	const __be64 *prop;
501 	int len;
502 
503 	if (depth != 1)
504 		return 0;
505 
506 	if (strcmp(uname, "ibm,dynamic-reconfiguration-memory"))
507 		return 0;
508 
509 	prop = of_get_flat_dt_prop(node, "ibm,lmb-size", &len);
510 	if (!prop || len < sizeof(__be64))
511 		/*
512 		 * Nothing in the device tree
513 		 */
514 		*mem_block_size = MIN_MEMORY_BLOCK_SIZE;
515 	else
516 		*mem_block_size = be64_to_cpup(prop);
517 	return 1;
518 }
519 
520 static unsigned long radix_memory_block_size(void)
521 {
522 	unsigned long mem_block_size = MIN_MEMORY_BLOCK_SIZE;
523 
524 	/*
525 	 * OPAL firmware feature is set by now. Hence we are ok
526 	 * to test OPAL feature.
527 	 */
528 	if (firmware_has_feature(FW_FEATURE_OPAL))
529 		mem_block_size = 1UL * 1024 * 1024 * 1024;
530 	else
531 		of_scan_flat_dt(probe_memory_block_size, &mem_block_size);
532 
533 	return mem_block_size;
534 }
535 
536 #else   /* CONFIG_MEMORY_HOTPLUG */
537 
538 static unsigned long radix_memory_block_size(void)
539 {
540 	return 1UL * 1024 * 1024 * 1024;
541 }
542 
543 #endif /* CONFIG_MEMORY_HOTPLUG */
544 
545 
546 void __init radix__early_init_devtree(void)
547 {
548 	int rc;
549 
550 	/*
551 	 * Try to find the available page sizes in the device-tree
552 	 */
553 	rc = of_scan_flat_dt(radix_dt_scan_page_sizes, NULL);
554 	if (!rc) {
555 		/*
556 		 * No page size details found in device tree.
557 		 * Let's assume we have page 4k and 64k support
558 		 */
559 		mmu_psize_defs[MMU_PAGE_4K].shift = 12;
560 		mmu_psize_defs[MMU_PAGE_4K].ap = 0x0;
561 
562 		mmu_psize_defs[MMU_PAGE_64K].shift = 16;
563 		mmu_psize_defs[MMU_PAGE_64K].ap = 0x5;
564 	}
565 
566 	/*
567 	 * Max mapping size used when mapping pages. We don't use
568 	 * ppc_md.memory_block_size() here because this get called
569 	 * early and we don't have machine probe called yet. Also
570 	 * the pseries implementation only check for ibm,lmb-size.
571 	 * All hypervisor supporting radix do expose that device
572 	 * tree node.
573 	 */
574 	radix_mem_block_size = radix_memory_block_size();
575 	return;
576 }
577 
578 static void radix_init_amor(void)
579 {
580 	/*
581 	* In HV mode, we init AMOR (Authority Mask Override Register) so that
582 	* the hypervisor and guest can setup IAMR (Instruction Authority Mask
583 	* Register), enable key 0 and set it to 1.
584 	*
585 	* AMOR = 0b1100 .... 0000 (Mask for key 0 is 11)
586 	*/
587 	mtspr(SPRN_AMOR, (3ul << 62));
588 }
589 
590 #ifdef CONFIG_PPC_KUEP
591 void setup_kuep(bool disabled)
592 {
593 	if (disabled || !early_radix_enabled())
594 		return;
595 
596 	if (smp_processor_id() == boot_cpuid) {
597 		pr_info("Activating Kernel Userspace Execution Prevention\n");
598 		cur_cpu_spec->mmu_features |= MMU_FTR_KUEP;
599 	}
600 
601 	/*
602 	 * Radix always uses key0 of the IAMR to determine if an access is
603 	 * allowed. We set bit 0 (IBM bit 1) of key0, to prevent instruction
604 	 * fetch.
605 	 */
606 	mtspr(SPRN_IAMR, (1ul << 62));
607 }
608 #endif
609 
610 #ifdef CONFIG_PPC_KUAP
611 void setup_kuap(bool disabled)
612 {
613 	if (disabled || !early_radix_enabled())
614 		return;
615 
616 	if (smp_processor_id() == boot_cpuid) {
617 		pr_info("Activating Kernel Userspace Access Prevention\n");
618 		cur_cpu_spec->mmu_features |= MMU_FTR_RADIX_KUAP;
619 	}
620 
621 	/* Make sure userspace can't change the AMR */
622 	mtspr(SPRN_UAMOR, 0);
623 
624 	/*
625 	 * Set the default kernel AMR values on all cpus.
626 	 */
627 	mtspr(SPRN_AMR, AMR_KUAP_BLOCKED);
628 	isync();
629 }
630 #endif
631 
632 void __init radix__early_init_mmu(void)
633 {
634 	unsigned long lpcr;
635 
636 #ifdef CONFIG_PPC_64K_PAGES
637 	/* PAGE_SIZE mappings */
638 	mmu_virtual_psize = MMU_PAGE_64K;
639 #else
640 	mmu_virtual_psize = MMU_PAGE_4K;
641 #endif
642 
643 #ifdef CONFIG_SPARSEMEM_VMEMMAP
644 	/* vmemmap mapping */
645 	if (mmu_psize_defs[MMU_PAGE_2M].shift) {
646 		/*
647 		 * map vmemmap using 2M if available
648 		 */
649 		mmu_vmemmap_psize = MMU_PAGE_2M;
650 	} else
651 		mmu_vmemmap_psize = mmu_virtual_psize;
652 #endif
653 	/*
654 	 * initialize page table size
655 	 */
656 	__pte_index_size = RADIX_PTE_INDEX_SIZE;
657 	__pmd_index_size = RADIX_PMD_INDEX_SIZE;
658 	__pud_index_size = RADIX_PUD_INDEX_SIZE;
659 	__pgd_index_size = RADIX_PGD_INDEX_SIZE;
660 	__pud_cache_index = RADIX_PUD_INDEX_SIZE;
661 	__pte_table_size = RADIX_PTE_TABLE_SIZE;
662 	__pmd_table_size = RADIX_PMD_TABLE_SIZE;
663 	__pud_table_size = RADIX_PUD_TABLE_SIZE;
664 	__pgd_table_size = RADIX_PGD_TABLE_SIZE;
665 
666 	__pmd_val_bits = RADIX_PMD_VAL_BITS;
667 	__pud_val_bits = RADIX_PUD_VAL_BITS;
668 	__pgd_val_bits = RADIX_PGD_VAL_BITS;
669 
670 	__kernel_virt_start = RADIX_KERN_VIRT_START;
671 	__vmalloc_start = RADIX_VMALLOC_START;
672 	__vmalloc_end = RADIX_VMALLOC_END;
673 	__kernel_io_start = RADIX_KERN_IO_START;
674 	__kernel_io_end = RADIX_KERN_IO_END;
675 	vmemmap = (struct page *)RADIX_VMEMMAP_START;
676 	ioremap_bot = IOREMAP_BASE;
677 
678 #ifdef CONFIG_PCI
679 	pci_io_base = ISA_IO_BASE;
680 #endif
681 	__pte_frag_nr = RADIX_PTE_FRAG_NR;
682 	__pte_frag_size_shift = RADIX_PTE_FRAG_SIZE_SHIFT;
683 	__pmd_frag_nr = RADIX_PMD_FRAG_NR;
684 	__pmd_frag_size_shift = RADIX_PMD_FRAG_SIZE_SHIFT;
685 
686 	radix_init_pgtable();
687 
688 	if (!firmware_has_feature(FW_FEATURE_LPAR)) {
689 		lpcr = mfspr(SPRN_LPCR);
690 		mtspr(SPRN_LPCR, lpcr | LPCR_UPRT | LPCR_HR);
691 		radix_init_partition_table();
692 		radix_init_amor();
693 	} else {
694 		radix_init_pseries();
695 	}
696 
697 	memblock_set_current_limit(MEMBLOCK_ALLOC_ANYWHERE);
698 
699 	/* Switch to the guard PID before turning on MMU */
700 	radix__switch_mmu_context(NULL, &init_mm);
701 	tlbiel_all();
702 }
703 
704 void radix__early_init_mmu_secondary(void)
705 {
706 	unsigned long lpcr;
707 	/*
708 	 * update partition table control register and UPRT
709 	 */
710 	if (!firmware_has_feature(FW_FEATURE_LPAR)) {
711 		lpcr = mfspr(SPRN_LPCR);
712 		mtspr(SPRN_LPCR, lpcr | LPCR_UPRT | LPCR_HR);
713 
714 		set_ptcr_when_no_uv(__pa(partition_tb) |
715 				    (PATB_SIZE_SHIFT - 12));
716 
717 		radix_init_amor();
718 	}
719 
720 	radix__switch_mmu_context(NULL, &init_mm);
721 	tlbiel_all();
722 }
723 
724 void radix__mmu_cleanup_all(void)
725 {
726 	unsigned long lpcr;
727 
728 	if (!firmware_has_feature(FW_FEATURE_LPAR)) {
729 		lpcr = mfspr(SPRN_LPCR);
730 		mtspr(SPRN_LPCR, lpcr & ~LPCR_UPRT);
731 		set_ptcr_when_no_uv(0);
732 		powernv_set_nmmu_ptcr(0);
733 		radix__flush_tlb_all();
734 	}
735 }
736 
737 #ifdef CONFIG_MEMORY_HOTPLUG
738 static void free_pte_table(pte_t *pte_start, pmd_t *pmd)
739 {
740 	pte_t *pte;
741 	int i;
742 
743 	for (i = 0; i < PTRS_PER_PTE; i++) {
744 		pte = pte_start + i;
745 		if (!pte_none(*pte))
746 			return;
747 	}
748 
749 	pte_free_kernel(&init_mm, pte_start);
750 	pmd_clear(pmd);
751 }
752 
753 static void free_pmd_table(pmd_t *pmd_start, pud_t *pud)
754 {
755 	pmd_t *pmd;
756 	int i;
757 
758 	for (i = 0; i < PTRS_PER_PMD; i++) {
759 		pmd = pmd_start + i;
760 		if (!pmd_none(*pmd))
761 			return;
762 	}
763 
764 	pmd_free(&init_mm, pmd_start);
765 	pud_clear(pud);
766 }
767 
768 static void free_pud_table(pud_t *pud_start, p4d_t *p4d)
769 {
770 	pud_t *pud;
771 	int i;
772 
773 	for (i = 0; i < PTRS_PER_PUD; i++) {
774 		pud = pud_start + i;
775 		if (!pud_none(*pud))
776 			return;
777 	}
778 
779 	pud_free(&init_mm, pud_start);
780 	p4d_clear(p4d);
781 }
782 
783 static void remove_pte_table(pte_t *pte_start, unsigned long addr,
784 			     unsigned long end)
785 {
786 	unsigned long next;
787 	pte_t *pte;
788 
789 	pte = pte_start + pte_index(addr);
790 	for (; addr < end; addr = next, pte++) {
791 		next = (addr + PAGE_SIZE) & PAGE_MASK;
792 		if (next > end)
793 			next = end;
794 
795 		if (!pte_present(*pte))
796 			continue;
797 
798 		if (!PAGE_ALIGNED(addr) || !PAGE_ALIGNED(next)) {
799 			/*
800 			 * The vmemmap_free() and remove_section_mapping()
801 			 * codepaths call us with aligned addresses.
802 			 */
803 			WARN_ONCE(1, "%s: unaligned range\n", __func__);
804 			continue;
805 		}
806 
807 		pte_clear(&init_mm, addr, pte);
808 	}
809 }
810 
811 static void __meminit remove_pmd_table(pmd_t *pmd_start, unsigned long addr,
812 			     unsigned long end)
813 {
814 	unsigned long next;
815 	pte_t *pte_base;
816 	pmd_t *pmd;
817 
818 	pmd = pmd_start + pmd_index(addr);
819 	for (; addr < end; addr = next, pmd++) {
820 		next = pmd_addr_end(addr, end);
821 
822 		if (!pmd_present(*pmd))
823 			continue;
824 
825 		if (pmd_is_leaf(*pmd)) {
826 			if (!IS_ALIGNED(addr, PMD_SIZE) ||
827 			    !IS_ALIGNED(next, PMD_SIZE)) {
828 				WARN_ONCE(1, "%s: unaligned range\n", __func__);
829 				continue;
830 			}
831 			pte_clear(&init_mm, addr, (pte_t *)pmd);
832 			continue;
833 		}
834 
835 		pte_base = (pte_t *)pmd_page_vaddr(*pmd);
836 		remove_pte_table(pte_base, addr, next);
837 		free_pte_table(pte_base, pmd);
838 	}
839 }
840 
841 static void __meminit remove_pud_table(pud_t *pud_start, unsigned long addr,
842 			     unsigned long end)
843 {
844 	unsigned long next;
845 	pmd_t *pmd_base;
846 	pud_t *pud;
847 
848 	pud = pud_start + pud_index(addr);
849 	for (; addr < end; addr = next, pud++) {
850 		next = pud_addr_end(addr, end);
851 
852 		if (!pud_present(*pud))
853 			continue;
854 
855 		if (pud_is_leaf(*pud)) {
856 			if (!IS_ALIGNED(addr, PUD_SIZE) ||
857 			    !IS_ALIGNED(next, PUD_SIZE)) {
858 				WARN_ONCE(1, "%s: unaligned range\n", __func__);
859 				continue;
860 			}
861 			pte_clear(&init_mm, addr, (pte_t *)pud);
862 			continue;
863 		}
864 
865 		pmd_base = (pmd_t *)pud_page_vaddr(*pud);
866 		remove_pmd_table(pmd_base, addr, next);
867 		free_pmd_table(pmd_base, pud);
868 	}
869 }
870 
871 static void __meminit remove_pagetable(unsigned long start, unsigned long end)
872 {
873 	unsigned long addr, next;
874 	pud_t *pud_base;
875 	pgd_t *pgd;
876 	p4d_t *p4d;
877 
878 	spin_lock(&init_mm.page_table_lock);
879 
880 	for (addr = start; addr < end; addr = next) {
881 		next = pgd_addr_end(addr, end);
882 
883 		pgd = pgd_offset_k(addr);
884 		p4d = p4d_offset(pgd, addr);
885 		if (!p4d_present(*p4d))
886 			continue;
887 
888 		if (p4d_is_leaf(*p4d)) {
889 			if (!IS_ALIGNED(addr, P4D_SIZE) ||
890 			    !IS_ALIGNED(next, P4D_SIZE)) {
891 				WARN_ONCE(1, "%s: unaligned range\n", __func__);
892 				continue;
893 			}
894 
895 			pte_clear(&init_mm, addr, (pte_t *)pgd);
896 			continue;
897 		}
898 
899 		pud_base = (pud_t *)p4d_page_vaddr(*p4d);
900 		remove_pud_table(pud_base, addr, next);
901 		free_pud_table(pud_base, p4d);
902 	}
903 
904 	spin_unlock(&init_mm.page_table_lock);
905 	radix__flush_tlb_kernel_range(start, end);
906 }
907 
908 int __meminit radix__create_section_mapping(unsigned long start,
909 					    unsigned long end, int nid,
910 					    pgprot_t prot)
911 {
912 	if (end >= RADIX_VMALLOC_START) {
913 		pr_warn("Outside the supported range\n");
914 		return -1;
915 	}
916 
917 	return create_physical_mapping(__pa(start), __pa(end),
918 				       radix_mem_block_size, nid, prot);
919 }
920 
921 int __meminit radix__remove_section_mapping(unsigned long start, unsigned long end)
922 {
923 	remove_pagetable(start, end);
924 	return 0;
925 }
926 #endif /* CONFIG_MEMORY_HOTPLUG */
927 
928 #ifdef CONFIG_SPARSEMEM_VMEMMAP
929 static int __map_kernel_page_nid(unsigned long ea, unsigned long pa,
930 				 pgprot_t flags, unsigned int map_page_size,
931 				 int nid)
932 {
933 	return __map_kernel_page(ea, pa, flags, map_page_size, nid, 0, 0);
934 }
935 
936 int __meminit radix__vmemmap_create_mapping(unsigned long start,
937 				      unsigned long page_size,
938 				      unsigned long phys)
939 {
940 	/* Create a PTE encoding */
941 	unsigned long flags = _PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_KERNEL_RW;
942 	int nid = early_pfn_to_nid(phys >> PAGE_SHIFT);
943 	int ret;
944 
945 	if ((start + page_size) >= RADIX_VMEMMAP_END) {
946 		pr_warn("Outside the supported range\n");
947 		return -1;
948 	}
949 
950 	ret = __map_kernel_page_nid(start, phys, __pgprot(flags), page_size, nid);
951 	BUG_ON(ret);
952 
953 	return 0;
954 }
955 
956 #ifdef CONFIG_MEMORY_HOTPLUG
957 void __meminit radix__vmemmap_remove_mapping(unsigned long start, unsigned long page_size)
958 {
959 	remove_pagetable(start, start + page_size);
960 }
961 #endif
962 #endif
963 
964 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
965 
966 unsigned long radix__pmd_hugepage_update(struct mm_struct *mm, unsigned long addr,
967 				  pmd_t *pmdp, unsigned long clr,
968 				  unsigned long set)
969 {
970 	unsigned long old;
971 
972 #ifdef CONFIG_DEBUG_VM
973 	WARN_ON(!radix__pmd_trans_huge(*pmdp) && !pmd_devmap(*pmdp));
974 	assert_spin_locked(pmd_lockptr(mm, pmdp));
975 #endif
976 
977 	old = radix__pte_update(mm, addr, (pte_t *)pmdp, clr, set, 1);
978 	trace_hugepage_update(addr, old, clr, set);
979 
980 	return old;
981 }
982 
983 pmd_t radix__pmdp_collapse_flush(struct vm_area_struct *vma, unsigned long address,
984 			pmd_t *pmdp)
985 
986 {
987 	pmd_t pmd;
988 
989 	VM_BUG_ON(address & ~HPAGE_PMD_MASK);
990 	VM_BUG_ON(radix__pmd_trans_huge(*pmdp));
991 	VM_BUG_ON(pmd_devmap(*pmdp));
992 	/*
993 	 * khugepaged calls this for normal pmd
994 	 */
995 	pmd = *pmdp;
996 	pmd_clear(pmdp);
997 
998 	/*
999 	 * pmdp collapse_flush need to ensure that there are no parallel gup
1000 	 * walk after this call. This is needed so that we can have stable
1001 	 * page ref count when collapsing a page. We don't allow a collapse page
1002 	 * if we have gup taken on the page. We can ensure that by sending IPI
1003 	 * because gup walk happens with IRQ disabled.
1004 	 */
1005 	serialize_against_pte_lookup(vma->vm_mm);
1006 
1007 	radix__flush_tlb_collapsed_pmd(vma->vm_mm, address);
1008 
1009 	return pmd;
1010 }
1011 
1012 /*
1013  * For us pgtable_t is pte_t *. Inorder to save the deposisted
1014  * page table, we consider the allocated page table as a list
1015  * head. On withdraw we need to make sure we zero out the used
1016  * list_head memory area.
1017  */
1018 void radix__pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
1019 				 pgtable_t pgtable)
1020 {
1021 	struct list_head *lh = (struct list_head *) pgtable;
1022 
1023 	assert_spin_locked(pmd_lockptr(mm, pmdp));
1024 
1025 	/* FIFO */
1026 	if (!pmd_huge_pte(mm, pmdp))
1027 		INIT_LIST_HEAD(lh);
1028 	else
1029 		list_add(lh, (struct list_head *) pmd_huge_pte(mm, pmdp));
1030 	pmd_huge_pte(mm, pmdp) = pgtable;
1031 }
1032 
1033 pgtable_t radix__pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp)
1034 {
1035 	pte_t *ptep;
1036 	pgtable_t pgtable;
1037 	struct list_head *lh;
1038 
1039 	assert_spin_locked(pmd_lockptr(mm, pmdp));
1040 
1041 	/* FIFO */
1042 	pgtable = pmd_huge_pte(mm, pmdp);
1043 	lh = (struct list_head *) pgtable;
1044 	if (list_empty(lh))
1045 		pmd_huge_pte(mm, pmdp) = NULL;
1046 	else {
1047 		pmd_huge_pte(mm, pmdp) = (pgtable_t) lh->next;
1048 		list_del(lh);
1049 	}
1050 	ptep = (pte_t *) pgtable;
1051 	*ptep = __pte(0);
1052 	ptep++;
1053 	*ptep = __pte(0);
1054 	return pgtable;
1055 }
1056 
1057 pmd_t radix__pmdp_huge_get_and_clear(struct mm_struct *mm,
1058 				     unsigned long addr, pmd_t *pmdp)
1059 {
1060 	pmd_t old_pmd;
1061 	unsigned long old;
1062 
1063 	old = radix__pmd_hugepage_update(mm, addr, pmdp, ~0UL, 0);
1064 	old_pmd = __pmd(old);
1065 	return old_pmd;
1066 }
1067 
1068 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
1069 
1070 void radix__ptep_set_access_flags(struct vm_area_struct *vma, pte_t *ptep,
1071 				  pte_t entry, unsigned long address, int psize)
1072 {
1073 	struct mm_struct *mm = vma->vm_mm;
1074 	unsigned long set = pte_val(entry) & (_PAGE_DIRTY | _PAGE_ACCESSED |
1075 					      _PAGE_RW | _PAGE_EXEC);
1076 
1077 	unsigned long change = pte_val(entry) ^ pte_val(*ptep);
1078 	/*
1079 	 * To avoid NMMU hang while relaxing access, we need mark
1080 	 * the pte invalid in between.
1081 	 */
1082 	if ((change & _PAGE_RW) && atomic_read(&mm->context.copros) > 0) {
1083 		unsigned long old_pte, new_pte;
1084 
1085 		old_pte = __radix_pte_update(ptep, _PAGE_PRESENT, _PAGE_INVALID);
1086 		/*
1087 		 * new value of pte
1088 		 */
1089 		new_pte = old_pte | set;
1090 		radix__flush_tlb_page_psize(mm, address, psize);
1091 		__radix_pte_update(ptep, _PAGE_INVALID, new_pte);
1092 	} else {
1093 		__radix_pte_update(ptep, 0, set);
1094 		/*
1095 		 * Book3S does not require a TLB flush when relaxing access
1096 		 * restrictions when the address space is not attached to a
1097 		 * NMMU, because the core MMU will reload the pte after taking
1098 		 * an access fault, which is defined by the architectue.
1099 		 */
1100 	}
1101 	/* See ptesync comment in radix__set_pte_at */
1102 }
1103 
1104 void radix__ptep_modify_prot_commit(struct vm_area_struct *vma,
1105 				    unsigned long addr, pte_t *ptep,
1106 				    pte_t old_pte, pte_t pte)
1107 {
1108 	struct mm_struct *mm = vma->vm_mm;
1109 
1110 	/*
1111 	 * To avoid NMMU hang while relaxing access we need to flush the tlb before
1112 	 * we set the new value. We need to do this only for radix, because hash
1113 	 * translation does flush when updating the linux pte.
1114 	 */
1115 	if (is_pte_rw_upgrade(pte_val(old_pte), pte_val(pte)) &&
1116 	    (atomic_read(&mm->context.copros) > 0))
1117 		radix__flush_tlb_page(vma, addr);
1118 
1119 	set_pte_at(mm, addr, ptep, pte);
1120 }
1121 
1122 int __init arch_ioremap_pud_supported(void)
1123 {
1124 	/* HPT does not cope with large pages in the vmalloc area */
1125 	return radix_enabled();
1126 }
1127 
1128 int __init arch_ioremap_pmd_supported(void)
1129 {
1130 	return radix_enabled();
1131 }
1132 
1133 int p4d_free_pud_page(p4d_t *p4d, unsigned long addr)
1134 {
1135 	return 0;
1136 }
1137 
1138 int pud_set_huge(pud_t *pud, phys_addr_t addr, pgprot_t prot)
1139 {
1140 	pte_t *ptep = (pte_t *)pud;
1141 	pte_t new_pud = pfn_pte(__phys_to_pfn(addr), prot);
1142 
1143 	if (!radix_enabled())
1144 		return 0;
1145 
1146 	set_pte_at(&init_mm, 0 /* radix unused */, ptep, new_pud);
1147 
1148 	return 1;
1149 }
1150 
1151 int pud_clear_huge(pud_t *pud)
1152 {
1153 	if (pud_huge(*pud)) {
1154 		pud_clear(pud);
1155 		return 1;
1156 	}
1157 
1158 	return 0;
1159 }
1160 
1161 int pud_free_pmd_page(pud_t *pud, unsigned long addr)
1162 {
1163 	pmd_t *pmd;
1164 	int i;
1165 
1166 	pmd = (pmd_t *)pud_page_vaddr(*pud);
1167 	pud_clear(pud);
1168 
1169 	flush_tlb_kernel_range(addr, addr + PUD_SIZE);
1170 
1171 	for (i = 0; i < PTRS_PER_PMD; i++) {
1172 		if (!pmd_none(pmd[i])) {
1173 			pte_t *pte;
1174 			pte = (pte_t *)pmd_page_vaddr(pmd[i]);
1175 
1176 			pte_free_kernel(&init_mm, pte);
1177 		}
1178 	}
1179 
1180 	pmd_free(&init_mm, pmd);
1181 
1182 	return 1;
1183 }
1184 
1185 int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t prot)
1186 {
1187 	pte_t *ptep = (pte_t *)pmd;
1188 	pte_t new_pmd = pfn_pte(__phys_to_pfn(addr), prot);
1189 
1190 	if (!radix_enabled())
1191 		return 0;
1192 
1193 	set_pte_at(&init_mm, 0 /* radix unused */, ptep, new_pmd);
1194 
1195 	return 1;
1196 }
1197 
1198 int pmd_clear_huge(pmd_t *pmd)
1199 {
1200 	if (pmd_huge(*pmd)) {
1201 		pmd_clear(pmd);
1202 		return 1;
1203 	}
1204 
1205 	return 0;
1206 }
1207 
1208 int pmd_free_pte_page(pmd_t *pmd, unsigned long addr)
1209 {
1210 	pte_t *pte;
1211 
1212 	pte = (pte_t *)pmd_page_vaddr(*pmd);
1213 	pmd_clear(pmd);
1214 
1215 	flush_tlb_kernel_range(addr, addr + PMD_SIZE);
1216 
1217 	pte_free_kernel(&init_mm, pte);
1218 
1219 	return 1;
1220 }
1221 
1222 int __init arch_ioremap_p4d_supported(void)
1223 {
1224 	return 0;
1225 }
1226