1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Page table handling routines for radix page table.
4  *
5  * Copyright 2015-2016, Aneesh Kumar K.V, IBM Corporation.
6  */
7 
8 #define pr_fmt(fmt) "radix-mmu: " fmt
9 
10 #include <linux/io.h>
11 #include <linux/kernel.h>
12 #include <linux/sched/mm.h>
13 #include <linux/memblock.h>
14 #include <linux/of.h>
15 #include <linux/of_fdt.h>
16 #include <linux/mm.h>
17 #include <linux/hugetlb.h>
18 #include <linux/string_helpers.h>
19 #include <linux/memory.h>
20 
21 #include <asm/pgalloc.h>
22 #include <asm/mmu_context.h>
23 #include <asm/dma.h>
24 #include <asm/machdep.h>
25 #include <asm/mmu.h>
26 #include <asm/firmware.h>
27 #include <asm/powernv.h>
28 #include <asm/sections.h>
29 #include <asm/smp.h>
30 #include <asm/trace.h>
31 #include <asm/uaccess.h>
32 #include <asm/ultravisor.h>
33 
34 #include <trace/events/thp.h>
35 
36 unsigned int mmu_pid_bits;
37 unsigned int mmu_base_pid;
38 unsigned long radix_mem_block_size __ro_after_init;
39 
40 static __ref void *early_alloc_pgtable(unsigned long size, int nid,
41 			unsigned long region_start, unsigned long region_end)
42 {
43 	phys_addr_t min_addr = MEMBLOCK_LOW_LIMIT;
44 	phys_addr_t max_addr = MEMBLOCK_ALLOC_ANYWHERE;
45 	void *ptr;
46 
47 	if (region_start)
48 		min_addr = region_start;
49 	if (region_end)
50 		max_addr = region_end;
51 
52 	ptr = memblock_alloc_try_nid(size, size, min_addr, max_addr, nid);
53 
54 	if (!ptr)
55 		panic("%s: Failed to allocate %lu bytes align=0x%lx nid=%d from=%pa max_addr=%pa\n",
56 		      __func__, size, size, nid, &min_addr, &max_addr);
57 
58 	return ptr;
59 }
60 
61 /*
62  * When allocating pud or pmd pointers, we allocate a complete page
63  * of PAGE_SIZE rather than PUD_TABLE_SIZE or PMD_TABLE_SIZE. This
64  * is to ensure that the page obtained from the memblock allocator
65  * can be completely used as page table page and can be freed
66  * correctly when the page table entries are removed.
67  */
68 static int early_map_kernel_page(unsigned long ea, unsigned long pa,
69 			  pgprot_t flags,
70 			  unsigned int map_page_size,
71 			  int nid,
72 			  unsigned long region_start, unsigned long region_end)
73 {
74 	unsigned long pfn = pa >> PAGE_SHIFT;
75 	pgd_t *pgdp;
76 	p4d_t *p4dp;
77 	pud_t *pudp;
78 	pmd_t *pmdp;
79 	pte_t *ptep;
80 
81 	pgdp = pgd_offset_k(ea);
82 	p4dp = p4d_offset(pgdp, ea);
83 	if (p4d_none(*p4dp)) {
84 		pudp = early_alloc_pgtable(PAGE_SIZE, nid,
85 					   region_start, region_end);
86 		p4d_populate(&init_mm, p4dp, pudp);
87 	}
88 	pudp = pud_offset(p4dp, ea);
89 	if (map_page_size == PUD_SIZE) {
90 		ptep = (pte_t *)pudp;
91 		goto set_the_pte;
92 	}
93 	if (pud_none(*pudp)) {
94 		pmdp = early_alloc_pgtable(PAGE_SIZE, nid, region_start,
95 					   region_end);
96 		pud_populate(&init_mm, pudp, pmdp);
97 	}
98 	pmdp = pmd_offset(pudp, ea);
99 	if (map_page_size == PMD_SIZE) {
100 		ptep = pmdp_ptep(pmdp);
101 		goto set_the_pte;
102 	}
103 	if (!pmd_present(*pmdp)) {
104 		ptep = early_alloc_pgtable(PAGE_SIZE, nid,
105 						region_start, region_end);
106 		pmd_populate_kernel(&init_mm, pmdp, ptep);
107 	}
108 	ptep = pte_offset_kernel(pmdp, ea);
109 
110 set_the_pte:
111 	set_pte_at(&init_mm, ea, ptep, pfn_pte(pfn, flags));
112 	asm volatile("ptesync": : :"memory");
113 	return 0;
114 }
115 
116 /*
117  * nid, region_start, and region_end are hints to try to place the page
118  * table memory in the same node or region.
119  */
120 static int __map_kernel_page(unsigned long ea, unsigned long pa,
121 			  pgprot_t flags,
122 			  unsigned int map_page_size,
123 			  int nid,
124 			  unsigned long region_start, unsigned long region_end)
125 {
126 	unsigned long pfn = pa >> PAGE_SHIFT;
127 	pgd_t *pgdp;
128 	p4d_t *p4dp;
129 	pud_t *pudp;
130 	pmd_t *pmdp;
131 	pte_t *ptep;
132 	/*
133 	 * Make sure task size is correct as per the max adddr
134 	 */
135 	BUILD_BUG_ON(TASK_SIZE_USER64 > RADIX_PGTABLE_RANGE);
136 
137 #ifdef CONFIG_PPC_64K_PAGES
138 	BUILD_BUG_ON(RADIX_KERN_MAP_SIZE != (1UL << MAX_EA_BITS_PER_CONTEXT));
139 #endif
140 
141 	if (unlikely(!slab_is_available()))
142 		return early_map_kernel_page(ea, pa, flags, map_page_size,
143 						nid, region_start, region_end);
144 
145 	/*
146 	 * Should make page table allocation functions be able to take a
147 	 * node, so we can place kernel page tables on the right nodes after
148 	 * boot.
149 	 */
150 	pgdp = pgd_offset_k(ea);
151 	p4dp = p4d_offset(pgdp, ea);
152 	pudp = pud_alloc(&init_mm, p4dp, ea);
153 	if (!pudp)
154 		return -ENOMEM;
155 	if (map_page_size == PUD_SIZE) {
156 		ptep = (pte_t *)pudp;
157 		goto set_the_pte;
158 	}
159 	pmdp = pmd_alloc(&init_mm, pudp, ea);
160 	if (!pmdp)
161 		return -ENOMEM;
162 	if (map_page_size == PMD_SIZE) {
163 		ptep = pmdp_ptep(pmdp);
164 		goto set_the_pte;
165 	}
166 	ptep = pte_alloc_kernel(pmdp, ea);
167 	if (!ptep)
168 		return -ENOMEM;
169 
170 set_the_pte:
171 	set_pte_at(&init_mm, ea, ptep, pfn_pte(pfn, flags));
172 	asm volatile("ptesync": : :"memory");
173 	return 0;
174 }
175 
176 int radix__map_kernel_page(unsigned long ea, unsigned long pa,
177 			  pgprot_t flags,
178 			  unsigned int map_page_size)
179 {
180 	return __map_kernel_page(ea, pa, flags, map_page_size, -1, 0, 0);
181 }
182 
183 #ifdef CONFIG_STRICT_KERNEL_RWX
184 static void radix__change_memory_range(unsigned long start, unsigned long end,
185 				       unsigned long clear)
186 {
187 	unsigned long idx;
188 	pgd_t *pgdp;
189 	p4d_t *p4dp;
190 	pud_t *pudp;
191 	pmd_t *pmdp;
192 	pte_t *ptep;
193 
194 	start = ALIGN_DOWN(start, PAGE_SIZE);
195 	end = PAGE_ALIGN(end); // aligns up
196 
197 	pr_debug("Changing flags on range %lx-%lx removing 0x%lx\n",
198 		 start, end, clear);
199 
200 	for (idx = start; idx < end; idx += PAGE_SIZE) {
201 		pgdp = pgd_offset_k(idx);
202 		p4dp = p4d_offset(pgdp, idx);
203 		pudp = pud_alloc(&init_mm, p4dp, idx);
204 		if (!pudp)
205 			continue;
206 		if (pud_is_leaf(*pudp)) {
207 			ptep = (pte_t *)pudp;
208 			goto update_the_pte;
209 		}
210 		pmdp = pmd_alloc(&init_mm, pudp, idx);
211 		if (!pmdp)
212 			continue;
213 		if (pmd_is_leaf(*pmdp)) {
214 			ptep = pmdp_ptep(pmdp);
215 			goto update_the_pte;
216 		}
217 		ptep = pte_alloc_kernel(pmdp, idx);
218 		if (!ptep)
219 			continue;
220 update_the_pte:
221 		radix__pte_update(&init_mm, idx, ptep, clear, 0, 0);
222 	}
223 
224 	radix__flush_tlb_kernel_range(start, end);
225 }
226 
227 void radix__mark_rodata_ro(void)
228 {
229 	unsigned long start, end;
230 
231 	start = (unsigned long)_stext;
232 	end = (unsigned long)__init_begin;
233 
234 	radix__change_memory_range(start, end, _PAGE_WRITE);
235 }
236 
237 void radix__mark_initmem_nx(void)
238 {
239 	unsigned long start = (unsigned long)__init_begin;
240 	unsigned long end = (unsigned long)__init_end;
241 
242 	radix__change_memory_range(start, end, _PAGE_EXEC);
243 }
244 #endif /* CONFIG_STRICT_KERNEL_RWX */
245 
246 static inline void __meminit
247 print_mapping(unsigned long start, unsigned long end, unsigned long size, bool exec)
248 {
249 	char buf[10];
250 
251 	if (end <= start)
252 		return;
253 
254 	string_get_size(size, 1, STRING_UNITS_2, buf, sizeof(buf));
255 
256 	pr_info("Mapped 0x%016lx-0x%016lx with %s pages%s\n", start, end, buf,
257 		exec ? " (exec)" : "");
258 }
259 
260 static unsigned long next_boundary(unsigned long addr, unsigned long end)
261 {
262 #ifdef CONFIG_STRICT_KERNEL_RWX
263 	if (addr < __pa_symbol(__init_begin))
264 		return __pa_symbol(__init_begin);
265 #endif
266 	return end;
267 }
268 
269 static int __meminit create_physical_mapping(unsigned long start,
270 					     unsigned long end,
271 					     unsigned long max_mapping_size,
272 					     int nid, pgprot_t _prot)
273 {
274 	unsigned long vaddr, addr, mapping_size = 0;
275 	bool prev_exec, exec = false;
276 	pgprot_t prot;
277 	int psize;
278 
279 	start = ALIGN(start, PAGE_SIZE);
280 	end   = ALIGN_DOWN(end, PAGE_SIZE);
281 	for (addr = start; addr < end; addr += mapping_size) {
282 		unsigned long gap, previous_size;
283 		int rc;
284 
285 		gap = next_boundary(addr, end) - addr;
286 		if (gap > max_mapping_size)
287 			gap = max_mapping_size;
288 		previous_size = mapping_size;
289 		prev_exec = exec;
290 
291 		if (IS_ALIGNED(addr, PUD_SIZE) && gap >= PUD_SIZE &&
292 		    mmu_psize_defs[MMU_PAGE_1G].shift) {
293 			mapping_size = PUD_SIZE;
294 			psize = MMU_PAGE_1G;
295 		} else if (IS_ALIGNED(addr, PMD_SIZE) && gap >= PMD_SIZE &&
296 			   mmu_psize_defs[MMU_PAGE_2M].shift) {
297 			mapping_size = PMD_SIZE;
298 			psize = MMU_PAGE_2M;
299 		} else {
300 			mapping_size = PAGE_SIZE;
301 			psize = mmu_virtual_psize;
302 		}
303 
304 		vaddr = (unsigned long)__va(addr);
305 
306 		if (overlaps_kernel_text(vaddr, vaddr + mapping_size) ||
307 		    overlaps_interrupt_vector_text(vaddr, vaddr + mapping_size)) {
308 			prot = PAGE_KERNEL_X;
309 			exec = true;
310 		} else {
311 			prot = _prot;
312 			exec = false;
313 		}
314 
315 		if (mapping_size != previous_size || exec != prev_exec) {
316 			print_mapping(start, addr, previous_size, prev_exec);
317 			start = addr;
318 		}
319 
320 		rc = __map_kernel_page(vaddr, addr, prot, mapping_size, nid, start, end);
321 		if (rc)
322 			return rc;
323 
324 		update_page_count(psize, 1);
325 	}
326 
327 	print_mapping(start, addr, mapping_size, exec);
328 	return 0;
329 }
330 
331 static void __init radix_init_pgtable(void)
332 {
333 	unsigned long rts_field;
334 	phys_addr_t start, end;
335 	u64 i;
336 
337 	/* We don't support slb for radix */
338 	mmu_slb_size = 0;
339 
340 	/*
341 	 * Create the linear mapping
342 	 */
343 	for_each_mem_range(i, &start, &end) {
344 		/*
345 		 * The memblock allocator  is up at this point, so the
346 		 * page tables will be allocated within the range. No
347 		 * need or a node (which we don't have yet).
348 		 */
349 
350 		if (end >= RADIX_VMALLOC_START) {
351 			pr_warn("Outside the supported range\n");
352 			continue;
353 		}
354 
355 		WARN_ON(create_physical_mapping(start, end,
356 						radix_mem_block_size,
357 						-1, PAGE_KERNEL));
358 	}
359 
360 	/* Find out how many PID bits are supported */
361 	if (!cpu_has_feature(CPU_FTR_HVMODE) &&
362 			cpu_has_feature(CPU_FTR_P9_RADIX_PREFETCH_BUG)) {
363 		/*
364 		 * Older versions of KVM on these machines perfer if the
365 		 * guest only uses the low 19 PID bits.
366 		 */
367 		if (!mmu_pid_bits)
368 			mmu_pid_bits = 19;
369 	} else {
370 		if (!mmu_pid_bits)
371 			mmu_pid_bits = 20;
372 	}
373 	mmu_base_pid = 1;
374 
375 	/*
376 	 * Allocate Partition table and process table for the
377 	 * host.
378 	 */
379 	BUG_ON(PRTB_SIZE_SHIFT > 36);
380 	process_tb = early_alloc_pgtable(1UL << PRTB_SIZE_SHIFT, -1, 0, 0);
381 	/*
382 	 * Fill in the process table.
383 	 */
384 	rts_field = radix__get_tree_size();
385 	process_tb->prtb0 = cpu_to_be64(rts_field | __pa(init_mm.pgd) | RADIX_PGD_INDEX_SIZE);
386 
387 	/*
388 	 * The init_mm context is given the first available (non-zero) PID,
389 	 * which is the "guard PID" and contains no page table. PIDR should
390 	 * never be set to zero because that duplicates the kernel address
391 	 * space at the 0x0... offset (quadrant 0)!
392 	 *
393 	 * An arbitrary PID that may later be allocated by the PID allocator
394 	 * for userspace processes must not be used either, because that
395 	 * would cause stale user mappings for that PID on CPUs outside of
396 	 * the TLB invalidation scheme (because it won't be in mm_cpumask).
397 	 *
398 	 * So permanently carve out one PID for the purpose of a guard PID.
399 	 */
400 	init_mm.context.id = mmu_base_pid;
401 	mmu_base_pid++;
402 }
403 
404 static void __init radix_init_partition_table(void)
405 {
406 	unsigned long rts_field, dw0, dw1;
407 
408 	mmu_partition_table_init();
409 	rts_field = radix__get_tree_size();
410 	dw0 = rts_field | __pa(init_mm.pgd) | RADIX_PGD_INDEX_SIZE | PATB_HR;
411 	dw1 = __pa(process_tb) | (PRTB_SIZE_SHIFT - 12) | PATB_GR;
412 	mmu_partition_table_set_entry(0, dw0, dw1, false);
413 
414 	pr_info("Initializing Radix MMU\n");
415 }
416 
417 static int __init get_idx_from_shift(unsigned int shift)
418 {
419 	int idx = -1;
420 
421 	switch (shift) {
422 	case 0xc:
423 		idx = MMU_PAGE_4K;
424 		break;
425 	case 0x10:
426 		idx = MMU_PAGE_64K;
427 		break;
428 	case 0x15:
429 		idx = MMU_PAGE_2M;
430 		break;
431 	case 0x1e:
432 		idx = MMU_PAGE_1G;
433 		break;
434 	}
435 	return idx;
436 }
437 
438 static int __init radix_dt_scan_page_sizes(unsigned long node,
439 					   const char *uname, int depth,
440 					   void *data)
441 {
442 	int size = 0;
443 	int shift, idx;
444 	unsigned int ap;
445 	const __be32 *prop;
446 	const char *type = of_get_flat_dt_prop(node, "device_type", NULL);
447 
448 	/* We are scanning "cpu" nodes only */
449 	if (type == NULL || strcmp(type, "cpu") != 0)
450 		return 0;
451 
452 	/* Find MMU PID size */
453 	prop = of_get_flat_dt_prop(node, "ibm,mmu-pid-bits", &size);
454 	if (prop && size == 4)
455 		mmu_pid_bits = be32_to_cpup(prop);
456 
457 	/* Grab page size encodings */
458 	prop = of_get_flat_dt_prop(node, "ibm,processor-radix-AP-encodings", &size);
459 	if (!prop)
460 		return 0;
461 
462 	pr_info("Page sizes from device-tree:\n");
463 	for (; size >= 4; size -= 4, ++prop) {
464 
465 		struct mmu_psize_def *def;
466 
467 		/* top 3 bit is AP encoding */
468 		shift = be32_to_cpu(prop[0]) & ~(0xe << 28);
469 		ap = be32_to_cpu(prop[0]) >> 29;
470 		pr_info("Page size shift = %d AP=0x%x\n", shift, ap);
471 
472 		idx = get_idx_from_shift(shift);
473 		if (idx < 0)
474 			continue;
475 
476 		def = &mmu_psize_defs[idx];
477 		def->shift = shift;
478 		def->ap  = ap;
479 		def->h_rpt_pgsize = psize_to_rpti_pgsize(idx);
480 	}
481 
482 	/* needed ? */
483 	cur_cpu_spec->mmu_features &= ~MMU_FTR_NO_SLBIE_B;
484 	return 1;
485 }
486 
487 #ifdef CONFIG_MEMORY_HOTPLUG
488 static int __init probe_memory_block_size(unsigned long node, const char *uname, int
489 					  depth, void *data)
490 {
491 	unsigned long *mem_block_size = (unsigned long *)data;
492 	const __be32 *prop;
493 	int len;
494 
495 	if (depth != 1)
496 		return 0;
497 
498 	if (strcmp(uname, "ibm,dynamic-reconfiguration-memory"))
499 		return 0;
500 
501 	prop = of_get_flat_dt_prop(node, "ibm,lmb-size", &len);
502 
503 	if (!prop || len < dt_root_size_cells * sizeof(__be32))
504 		/*
505 		 * Nothing in the device tree
506 		 */
507 		*mem_block_size = MIN_MEMORY_BLOCK_SIZE;
508 	else
509 		*mem_block_size = of_read_number(prop, dt_root_size_cells);
510 	return 1;
511 }
512 
513 static unsigned long radix_memory_block_size(void)
514 {
515 	unsigned long mem_block_size = MIN_MEMORY_BLOCK_SIZE;
516 
517 	/*
518 	 * OPAL firmware feature is set by now. Hence we are ok
519 	 * to test OPAL feature.
520 	 */
521 	if (firmware_has_feature(FW_FEATURE_OPAL))
522 		mem_block_size = 1UL * 1024 * 1024 * 1024;
523 	else
524 		of_scan_flat_dt(probe_memory_block_size, &mem_block_size);
525 
526 	return mem_block_size;
527 }
528 
529 #else   /* CONFIG_MEMORY_HOTPLUG */
530 
531 static unsigned long radix_memory_block_size(void)
532 {
533 	return 1UL * 1024 * 1024 * 1024;
534 }
535 
536 #endif /* CONFIG_MEMORY_HOTPLUG */
537 
538 
539 void __init radix__early_init_devtree(void)
540 {
541 	int rc;
542 
543 	/*
544 	 * Try to find the available page sizes in the device-tree
545 	 */
546 	rc = of_scan_flat_dt(radix_dt_scan_page_sizes, NULL);
547 	if (!rc) {
548 		/*
549 		 * No page size details found in device tree.
550 		 * Let's assume we have page 4k and 64k support
551 		 */
552 		mmu_psize_defs[MMU_PAGE_4K].shift = 12;
553 		mmu_psize_defs[MMU_PAGE_4K].ap = 0x0;
554 		mmu_psize_defs[MMU_PAGE_4K].h_rpt_pgsize =
555 			psize_to_rpti_pgsize(MMU_PAGE_4K);
556 
557 		mmu_psize_defs[MMU_PAGE_64K].shift = 16;
558 		mmu_psize_defs[MMU_PAGE_64K].ap = 0x5;
559 		mmu_psize_defs[MMU_PAGE_64K].h_rpt_pgsize =
560 			psize_to_rpti_pgsize(MMU_PAGE_64K);
561 	}
562 
563 	/*
564 	 * Max mapping size used when mapping pages. We don't use
565 	 * ppc_md.memory_block_size() here because this get called
566 	 * early and we don't have machine probe called yet. Also
567 	 * the pseries implementation only check for ibm,lmb-size.
568 	 * All hypervisor supporting radix do expose that device
569 	 * tree node.
570 	 */
571 	radix_mem_block_size = radix_memory_block_size();
572 	return;
573 }
574 
575 static void radix_init_amor(void)
576 {
577 	/*
578 	* In HV mode, we init AMOR (Authority Mask Override Register) so that
579 	* the hypervisor and guest can setup IAMR (Instruction Authority Mask
580 	* Register), enable key 0 and set it to 1.
581 	*
582 	* AMOR = 0b1100 .... 0000 (Mask for key 0 is 11)
583 	*/
584 	mtspr(SPRN_AMOR, (3ul << 62));
585 }
586 
587 void __init radix__early_init_mmu(void)
588 {
589 	unsigned long lpcr;
590 
591 #ifdef CONFIG_PPC_64K_PAGES
592 	/* PAGE_SIZE mappings */
593 	mmu_virtual_psize = MMU_PAGE_64K;
594 #else
595 	mmu_virtual_psize = MMU_PAGE_4K;
596 #endif
597 
598 #ifdef CONFIG_SPARSEMEM_VMEMMAP
599 	/* vmemmap mapping */
600 	if (mmu_psize_defs[MMU_PAGE_2M].shift) {
601 		/*
602 		 * map vmemmap using 2M if available
603 		 */
604 		mmu_vmemmap_psize = MMU_PAGE_2M;
605 	} else
606 		mmu_vmemmap_psize = mmu_virtual_psize;
607 #endif
608 	/*
609 	 * initialize page table size
610 	 */
611 	__pte_index_size = RADIX_PTE_INDEX_SIZE;
612 	__pmd_index_size = RADIX_PMD_INDEX_SIZE;
613 	__pud_index_size = RADIX_PUD_INDEX_SIZE;
614 	__pgd_index_size = RADIX_PGD_INDEX_SIZE;
615 	__pud_cache_index = RADIX_PUD_INDEX_SIZE;
616 	__pte_table_size = RADIX_PTE_TABLE_SIZE;
617 	__pmd_table_size = RADIX_PMD_TABLE_SIZE;
618 	__pud_table_size = RADIX_PUD_TABLE_SIZE;
619 	__pgd_table_size = RADIX_PGD_TABLE_SIZE;
620 
621 	__pmd_val_bits = RADIX_PMD_VAL_BITS;
622 	__pud_val_bits = RADIX_PUD_VAL_BITS;
623 	__pgd_val_bits = RADIX_PGD_VAL_BITS;
624 
625 	__kernel_virt_start = RADIX_KERN_VIRT_START;
626 	__vmalloc_start = RADIX_VMALLOC_START;
627 	__vmalloc_end = RADIX_VMALLOC_END;
628 	__kernel_io_start = RADIX_KERN_IO_START;
629 	__kernel_io_end = RADIX_KERN_IO_END;
630 	vmemmap = (struct page *)RADIX_VMEMMAP_START;
631 	ioremap_bot = IOREMAP_BASE;
632 
633 #ifdef CONFIG_PCI
634 	pci_io_base = ISA_IO_BASE;
635 #endif
636 	__pte_frag_nr = RADIX_PTE_FRAG_NR;
637 	__pte_frag_size_shift = RADIX_PTE_FRAG_SIZE_SHIFT;
638 	__pmd_frag_nr = RADIX_PMD_FRAG_NR;
639 	__pmd_frag_size_shift = RADIX_PMD_FRAG_SIZE_SHIFT;
640 
641 	radix_init_pgtable();
642 
643 	if (!firmware_has_feature(FW_FEATURE_LPAR)) {
644 		lpcr = mfspr(SPRN_LPCR);
645 		mtspr(SPRN_LPCR, lpcr | LPCR_UPRT | LPCR_HR);
646 		radix_init_partition_table();
647 		radix_init_amor();
648 	} else {
649 		radix_init_pseries();
650 	}
651 
652 	memblock_set_current_limit(MEMBLOCK_ALLOC_ANYWHERE);
653 
654 	/* Switch to the guard PID before turning on MMU */
655 	radix__switch_mmu_context(NULL, &init_mm);
656 	tlbiel_all();
657 }
658 
659 void radix__early_init_mmu_secondary(void)
660 {
661 	unsigned long lpcr;
662 	/*
663 	 * update partition table control register and UPRT
664 	 */
665 	if (!firmware_has_feature(FW_FEATURE_LPAR)) {
666 		lpcr = mfspr(SPRN_LPCR);
667 		mtspr(SPRN_LPCR, lpcr | LPCR_UPRT | LPCR_HR);
668 
669 		set_ptcr_when_no_uv(__pa(partition_tb) |
670 				    (PATB_SIZE_SHIFT - 12));
671 
672 		radix_init_amor();
673 	}
674 
675 	radix__switch_mmu_context(NULL, &init_mm);
676 	tlbiel_all();
677 
678 	/* Make sure userspace can't change the AMR */
679 	mtspr(SPRN_UAMOR, 0);
680 }
681 
682 /* Called during kexec sequence with MMU off */
683 notrace void radix__mmu_cleanup_all(void)
684 {
685 	unsigned long lpcr;
686 
687 	if (!firmware_has_feature(FW_FEATURE_LPAR)) {
688 		lpcr = mfspr(SPRN_LPCR);
689 		mtspr(SPRN_LPCR, lpcr & ~LPCR_UPRT);
690 		set_ptcr_when_no_uv(0);
691 		powernv_set_nmmu_ptcr(0);
692 		radix__flush_tlb_all();
693 	}
694 }
695 
696 #ifdef CONFIG_MEMORY_HOTPLUG
697 static void free_pte_table(pte_t *pte_start, pmd_t *pmd)
698 {
699 	pte_t *pte;
700 	int i;
701 
702 	for (i = 0; i < PTRS_PER_PTE; i++) {
703 		pte = pte_start + i;
704 		if (!pte_none(*pte))
705 			return;
706 	}
707 
708 	pte_free_kernel(&init_mm, pte_start);
709 	pmd_clear(pmd);
710 }
711 
712 static void free_pmd_table(pmd_t *pmd_start, pud_t *pud)
713 {
714 	pmd_t *pmd;
715 	int i;
716 
717 	for (i = 0; i < PTRS_PER_PMD; i++) {
718 		pmd = pmd_start + i;
719 		if (!pmd_none(*pmd))
720 			return;
721 	}
722 
723 	pmd_free(&init_mm, pmd_start);
724 	pud_clear(pud);
725 }
726 
727 static void free_pud_table(pud_t *pud_start, p4d_t *p4d)
728 {
729 	pud_t *pud;
730 	int i;
731 
732 	for (i = 0; i < PTRS_PER_PUD; i++) {
733 		pud = pud_start + i;
734 		if (!pud_none(*pud))
735 			return;
736 	}
737 
738 	pud_free(&init_mm, pud_start);
739 	p4d_clear(p4d);
740 }
741 
742 static void remove_pte_table(pte_t *pte_start, unsigned long addr,
743 			     unsigned long end)
744 {
745 	unsigned long next;
746 	pte_t *pte;
747 
748 	pte = pte_start + pte_index(addr);
749 	for (; addr < end; addr = next, pte++) {
750 		next = (addr + PAGE_SIZE) & PAGE_MASK;
751 		if (next > end)
752 			next = end;
753 
754 		if (!pte_present(*pte))
755 			continue;
756 
757 		if (!PAGE_ALIGNED(addr) || !PAGE_ALIGNED(next)) {
758 			/*
759 			 * The vmemmap_free() and remove_section_mapping()
760 			 * codepaths call us with aligned addresses.
761 			 */
762 			WARN_ONCE(1, "%s: unaligned range\n", __func__);
763 			continue;
764 		}
765 
766 		pte_clear(&init_mm, addr, pte);
767 	}
768 }
769 
770 static void __meminit remove_pmd_table(pmd_t *pmd_start, unsigned long addr,
771 			     unsigned long end)
772 {
773 	unsigned long next;
774 	pte_t *pte_base;
775 	pmd_t *pmd;
776 
777 	pmd = pmd_start + pmd_index(addr);
778 	for (; addr < end; addr = next, pmd++) {
779 		next = pmd_addr_end(addr, end);
780 
781 		if (!pmd_present(*pmd))
782 			continue;
783 
784 		if (pmd_is_leaf(*pmd)) {
785 			if (!IS_ALIGNED(addr, PMD_SIZE) ||
786 			    !IS_ALIGNED(next, PMD_SIZE)) {
787 				WARN_ONCE(1, "%s: unaligned range\n", __func__);
788 				continue;
789 			}
790 			pte_clear(&init_mm, addr, (pte_t *)pmd);
791 			continue;
792 		}
793 
794 		pte_base = (pte_t *)pmd_page_vaddr(*pmd);
795 		remove_pte_table(pte_base, addr, next);
796 		free_pte_table(pte_base, pmd);
797 	}
798 }
799 
800 static void __meminit remove_pud_table(pud_t *pud_start, unsigned long addr,
801 			     unsigned long end)
802 {
803 	unsigned long next;
804 	pmd_t *pmd_base;
805 	pud_t *pud;
806 
807 	pud = pud_start + pud_index(addr);
808 	for (; addr < end; addr = next, pud++) {
809 		next = pud_addr_end(addr, end);
810 
811 		if (!pud_present(*pud))
812 			continue;
813 
814 		if (pud_is_leaf(*pud)) {
815 			if (!IS_ALIGNED(addr, PUD_SIZE) ||
816 			    !IS_ALIGNED(next, PUD_SIZE)) {
817 				WARN_ONCE(1, "%s: unaligned range\n", __func__);
818 				continue;
819 			}
820 			pte_clear(&init_mm, addr, (pte_t *)pud);
821 			continue;
822 		}
823 
824 		pmd_base = pud_pgtable(*pud);
825 		remove_pmd_table(pmd_base, addr, next);
826 		free_pmd_table(pmd_base, pud);
827 	}
828 }
829 
830 static void __meminit remove_pagetable(unsigned long start, unsigned long end)
831 {
832 	unsigned long addr, next;
833 	pud_t *pud_base;
834 	pgd_t *pgd;
835 	p4d_t *p4d;
836 
837 	spin_lock(&init_mm.page_table_lock);
838 
839 	for (addr = start; addr < end; addr = next) {
840 		next = pgd_addr_end(addr, end);
841 
842 		pgd = pgd_offset_k(addr);
843 		p4d = p4d_offset(pgd, addr);
844 		if (!p4d_present(*p4d))
845 			continue;
846 
847 		if (p4d_is_leaf(*p4d)) {
848 			if (!IS_ALIGNED(addr, P4D_SIZE) ||
849 			    !IS_ALIGNED(next, P4D_SIZE)) {
850 				WARN_ONCE(1, "%s: unaligned range\n", __func__);
851 				continue;
852 			}
853 
854 			pte_clear(&init_mm, addr, (pte_t *)pgd);
855 			continue;
856 		}
857 
858 		pud_base = p4d_pgtable(*p4d);
859 		remove_pud_table(pud_base, addr, next);
860 		free_pud_table(pud_base, p4d);
861 	}
862 
863 	spin_unlock(&init_mm.page_table_lock);
864 	radix__flush_tlb_kernel_range(start, end);
865 }
866 
867 int __meminit radix__create_section_mapping(unsigned long start,
868 					    unsigned long end, int nid,
869 					    pgprot_t prot)
870 {
871 	if (end >= RADIX_VMALLOC_START) {
872 		pr_warn("Outside the supported range\n");
873 		return -1;
874 	}
875 
876 	return create_physical_mapping(__pa(start), __pa(end),
877 				       radix_mem_block_size, nid, prot);
878 }
879 
880 int __meminit radix__remove_section_mapping(unsigned long start, unsigned long end)
881 {
882 	remove_pagetable(start, end);
883 	return 0;
884 }
885 #endif /* CONFIG_MEMORY_HOTPLUG */
886 
887 #ifdef CONFIG_SPARSEMEM_VMEMMAP
888 static int __map_kernel_page_nid(unsigned long ea, unsigned long pa,
889 				 pgprot_t flags, unsigned int map_page_size,
890 				 int nid)
891 {
892 	return __map_kernel_page(ea, pa, flags, map_page_size, nid, 0, 0);
893 }
894 
895 int __meminit radix__vmemmap_create_mapping(unsigned long start,
896 				      unsigned long page_size,
897 				      unsigned long phys)
898 {
899 	/* Create a PTE encoding */
900 	unsigned long flags = _PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_KERNEL_RW;
901 	int nid = early_pfn_to_nid(phys >> PAGE_SHIFT);
902 	int ret;
903 
904 	if ((start + page_size) >= RADIX_VMEMMAP_END) {
905 		pr_warn("Outside the supported range\n");
906 		return -1;
907 	}
908 
909 	ret = __map_kernel_page_nid(start, phys, __pgprot(flags), page_size, nid);
910 	BUG_ON(ret);
911 
912 	return 0;
913 }
914 
915 #ifdef CONFIG_MEMORY_HOTPLUG
916 void __meminit radix__vmemmap_remove_mapping(unsigned long start, unsigned long page_size)
917 {
918 	remove_pagetable(start, start + page_size);
919 }
920 #endif
921 #endif
922 
923 #ifdef CONFIG_DEBUG_PAGEALLOC
924 void radix__kernel_map_pages(struct page *page, int numpages, int enable)
925 {
926 	pr_warn_once("DEBUG_PAGEALLOC not supported in radix mode\n");
927 }
928 #endif
929 
930 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
931 
932 unsigned long radix__pmd_hugepage_update(struct mm_struct *mm, unsigned long addr,
933 				  pmd_t *pmdp, unsigned long clr,
934 				  unsigned long set)
935 {
936 	unsigned long old;
937 
938 #ifdef CONFIG_DEBUG_VM
939 	WARN_ON(!radix__pmd_trans_huge(*pmdp) && !pmd_devmap(*pmdp));
940 	assert_spin_locked(pmd_lockptr(mm, pmdp));
941 #endif
942 
943 	old = radix__pte_update(mm, addr, (pte_t *)pmdp, clr, set, 1);
944 	trace_hugepage_update(addr, old, clr, set);
945 
946 	return old;
947 }
948 
949 pmd_t radix__pmdp_collapse_flush(struct vm_area_struct *vma, unsigned long address,
950 			pmd_t *pmdp)
951 
952 {
953 	pmd_t pmd;
954 
955 	VM_BUG_ON(address & ~HPAGE_PMD_MASK);
956 	VM_BUG_ON(radix__pmd_trans_huge(*pmdp));
957 	VM_BUG_ON(pmd_devmap(*pmdp));
958 	/*
959 	 * khugepaged calls this for normal pmd
960 	 */
961 	pmd = *pmdp;
962 	pmd_clear(pmdp);
963 
964 	/*
965 	 * pmdp collapse_flush need to ensure that there are no parallel gup
966 	 * walk after this call. This is needed so that we can have stable
967 	 * page ref count when collapsing a page. We don't allow a collapse page
968 	 * if we have gup taken on the page. We can ensure that by sending IPI
969 	 * because gup walk happens with IRQ disabled.
970 	 */
971 	serialize_against_pte_lookup(vma->vm_mm);
972 
973 	radix__flush_tlb_collapsed_pmd(vma->vm_mm, address);
974 
975 	return pmd;
976 }
977 
978 /*
979  * For us pgtable_t is pte_t *. Inorder to save the deposisted
980  * page table, we consider the allocated page table as a list
981  * head. On withdraw we need to make sure we zero out the used
982  * list_head memory area.
983  */
984 void radix__pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
985 				 pgtable_t pgtable)
986 {
987 	struct list_head *lh = (struct list_head *) pgtable;
988 
989 	assert_spin_locked(pmd_lockptr(mm, pmdp));
990 
991 	/* FIFO */
992 	if (!pmd_huge_pte(mm, pmdp))
993 		INIT_LIST_HEAD(lh);
994 	else
995 		list_add(lh, (struct list_head *) pmd_huge_pte(mm, pmdp));
996 	pmd_huge_pte(mm, pmdp) = pgtable;
997 }
998 
999 pgtable_t radix__pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp)
1000 {
1001 	pte_t *ptep;
1002 	pgtable_t pgtable;
1003 	struct list_head *lh;
1004 
1005 	assert_spin_locked(pmd_lockptr(mm, pmdp));
1006 
1007 	/* FIFO */
1008 	pgtable = pmd_huge_pte(mm, pmdp);
1009 	lh = (struct list_head *) pgtable;
1010 	if (list_empty(lh))
1011 		pmd_huge_pte(mm, pmdp) = NULL;
1012 	else {
1013 		pmd_huge_pte(mm, pmdp) = (pgtable_t) lh->next;
1014 		list_del(lh);
1015 	}
1016 	ptep = (pte_t *) pgtable;
1017 	*ptep = __pte(0);
1018 	ptep++;
1019 	*ptep = __pte(0);
1020 	return pgtable;
1021 }
1022 
1023 pmd_t radix__pmdp_huge_get_and_clear(struct mm_struct *mm,
1024 				     unsigned long addr, pmd_t *pmdp)
1025 {
1026 	pmd_t old_pmd;
1027 	unsigned long old;
1028 
1029 	old = radix__pmd_hugepage_update(mm, addr, pmdp, ~0UL, 0);
1030 	old_pmd = __pmd(old);
1031 	return old_pmd;
1032 }
1033 
1034 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
1035 
1036 void radix__ptep_set_access_flags(struct vm_area_struct *vma, pte_t *ptep,
1037 				  pte_t entry, unsigned long address, int psize)
1038 {
1039 	struct mm_struct *mm = vma->vm_mm;
1040 	unsigned long set = pte_val(entry) & (_PAGE_DIRTY | _PAGE_ACCESSED |
1041 					      _PAGE_RW | _PAGE_EXEC);
1042 
1043 	unsigned long change = pte_val(entry) ^ pte_val(*ptep);
1044 	/*
1045 	 * To avoid NMMU hang while relaxing access, we need mark
1046 	 * the pte invalid in between.
1047 	 */
1048 	if ((change & _PAGE_RW) && atomic_read(&mm->context.copros) > 0) {
1049 		unsigned long old_pte, new_pte;
1050 
1051 		old_pte = __radix_pte_update(ptep, _PAGE_PRESENT, _PAGE_INVALID);
1052 		/*
1053 		 * new value of pte
1054 		 */
1055 		new_pte = old_pte | set;
1056 		radix__flush_tlb_page_psize(mm, address, psize);
1057 		__radix_pte_update(ptep, _PAGE_INVALID, new_pte);
1058 	} else {
1059 		__radix_pte_update(ptep, 0, set);
1060 		/*
1061 		 * Book3S does not require a TLB flush when relaxing access
1062 		 * restrictions when the address space is not attached to a
1063 		 * NMMU, because the core MMU will reload the pte after taking
1064 		 * an access fault, which is defined by the architecture.
1065 		 */
1066 	}
1067 	/* See ptesync comment in radix__set_pte_at */
1068 }
1069 
1070 void radix__ptep_modify_prot_commit(struct vm_area_struct *vma,
1071 				    unsigned long addr, pte_t *ptep,
1072 				    pte_t old_pte, pte_t pte)
1073 {
1074 	struct mm_struct *mm = vma->vm_mm;
1075 
1076 	/*
1077 	 * To avoid NMMU hang while relaxing access we need to flush the tlb before
1078 	 * we set the new value. We need to do this only for radix, because hash
1079 	 * translation does flush when updating the linux pte.
1080 	 */
1081 	if (is_pte_rw_upgrade(pte_val(old_pte), pte_val(pte)) &&
1082 	    (atomic_read(&mm->context.copros) > 0))
1083 		radix__flush_tlb_page(vma, addr);
1084 
1085 	set_pte_at(mm, addr, ptep, pte);
1086 }
1087 
1088 int pud_set_huge(pud_t *pud, phys_addr_t addr, pgprot_t prot)
1089 {
1090 	pte_t *ptep = (pte_t *)pud;
1091 	pte_t new_pud = pfn_pte(__phys_to_pfn(addr), prot);
1092 
1093 	if (!radix_enabled())
1094 		return 0;
1095 
1096 	set_pte_at(&init_mm, 0 /* radix unused */, ptep, new_pud);
1097 
1098 	return 1;
1099 }
1100 
1101 int pud_clear_huge(pud_t *pud)
1102 {
1103 	if (pud_huge(*pud)) {
1104 		pud_clear(pud);
1105 		return 1;
1106 	}
1107 
1108 	return 0;
1109 }
1110 
1111 int pud_free_pmd_page(pud_t *pud, unsigned long addr)
1112 {
1113 	pmd_t *pmd;
1114 	int i;
1115 
1116 	pmd = pud_pgtable(*pud);
1117 	pud_clear(pud);
1118 
1119 	flush_tlb_kernel_range(addr, addr + PUD_SIZE);
1120 
1121 	for (i = 0; i < PTRS_PER_PMD; i++) {
1122 		if (!pmd_none(pmd[i])) {
1123 			pte_t *pte;
1124 			pte = (pte_t *)pmd_page_vaddr(pmd[i]);
1125 
1126 			pte_free_kernel(&init_mm, pte);
1127 		}
1128 	}
1129 
1130 	pmd_free(&init_mm, pmd);
1131 
1132 	return 1;
1133 }
1134 
1135 int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t prot)
1136 {
1137 	pte_t *ptep = (pte_t *)pmd;
1138 	pte_t new_pmd = pfn_pte(__phys_to_pfn(addr), prot);
1139 
1140 	if (!radix_enabled())
1141 		return 0;
1142 
1143 	set_pte_at(&init_mm, 0 /* radix unused */, ptep, new_pmd);
1144 
1145 	return 1;
1146 }
1147 
1148 int pmd_clear_huge(pmd_t *pmd)
1149 {
1150 	if (pmd_huge(*pmd)) {
1151 		pmd_clear(pmd);
1152 		return 1;
1153 	}
1154 
1155 	return 0;
1156 }
1157 
1158 int pmd_free_pte_page(pmd_t *pmd, unsigned long addr)
1159 {
1160 	pte_t *pte;
1161 
1162 	pte = (pte_t *)pmd_page_vaddr(*pmd);
1163 	pmd_clear(pmd);
1164 
1165 	flush_tlb_kernel_range(addr, addr + PMD_SIZE);
1166 
1167 	pte_free_kernel(&init_mm, pte);
1168 
1169 	return 1;
1170 }
1171