1 /*
2  * Copyright 2005, Paul Mackerras, IBM Corporation.
3  * Copyright 2009, Benjamin Herrenschmidt, IBM Corporation.
4  * Copyright 2015-2016, Aneesh Kumar K.V, IBM Corporation.
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU General Public License
8  * as published by the Free Software Foundation; either version
9  * 2 of the License, or (at your option) any later version.
10  */
11 
12 #include <linux/sched.h>
13 #include <linux/memblock.h>
14 #include <asm/pgalloc.h>
15 #include <asm/tlb.h>
16 #include <asm/dma.h>
17 
18 #include <mm/mmu_decl.h>
19 
20 #ifdef CONFIG_SPARSEMEM_VMEMMAP
21 /*
22  * On Book3E CPUs, the vmemmap is currently mapped in the top half of
23  * the vmalloc space using normal page tables, though the size of
24  * pages encoded in the PTEs can be different
25  */
26 int __meminit vmemmap_create_mapping(unsigned long start,
27 				     unsigned long page_size,
28 				     unsigned long phys)
29 {
30 	/* Create a PTE encoding without page size */
31 	unsigned long i, flags = _PAGE_PRESENT | _PAGE_ACCESSED |
32 		_PAGE_KERNEL_RW;
33 
34 	/* PTEs only contain page size encodings up to 32M */
35 	BUG_ON(mmu_psize_defs[mmu_vmemmap_psize].enc > 0xf);
36 
37 	/* Encode the size in the PTE */
38 	flags |= mmu_psize_defs[mmu_vmemmap_psize].enc << 8;
39 
40 	/* For each PTE for that area, map things. Note that we don't
41 	 * increment phys because all PTEs are of the large size and
42 	 * thus must have the low bits clear
43 	 */
44 	for (i = 0; i < page_size; i += PAGE_SIZE)
45 		BUG_ON(map_kernel_page(start + i, phys, __pgprot(flags)));
46 
47 	return 0;
48 }
49 
50 #ifdef CONFIG_MEMORY_HOTPLUG
51 void vmemmap_remove_mapping(unsigned long start,
52 			    unsigned long page_size)
53 {
54 }
55 #endif
56 #endif /* CONFIG_SPARSEMEM_VMEMMAP */
57 
58 static void __init *early_alloc_pgtable(unsigned long size)
59 {
60 	void *ptr;
61 
62 	ptr = memblock_alloc_try_nid(size, size, MEMBLOCK_LOW_LIMIT,
63 				     __pa(MAX_DMA_ADDRESS), NUMA_NO_NODE);
64 
65 	if (!ptr)
66 		panic("%s: Failed to allocate %lu bytes align=0x%lx max_addr=%lx\n",
67 		      __func__, size, size, __pa(MAX_DMA_ADDRESS));
68 
69 	return ptr;
70 }
71 
72 /*
73  * map_kernel_page currently only called by __ioremap
74  * map_kernel_page adds an entry to the ioremap page table
75  * and adds an entry to the HPT, possibly bolting it
76  */
77 int __ref map_kernel_page(unsigned long ea, unsigned long pa, pgprot_t prot)
78 {
79 	pgd_t *pgdp;
80 	pud_t *pudp;
81 	pmd_t *pmdp;
82 	pte_t *ptep;
83 
84 	BUILD_BUG_ON(TASK_SIZE_USER64 > PGTABLE_RANGE);
85 	if (slab_is_available()) {
86 		pgdp = pgd_offset_k(ea);
87 		pudp = pud_alloc(&init_mm, pgdp, ea);
88 		if (!pudp)
89 			return -ENOMEM;
90 		pmdp = pmd_alloc(&init_mm, pudp, ea);
91 		if (!pmdp)
92 			return -ENOMEM;
93 		ptep = pte_alloc_kernel(pmdp, ea);
94 		if (!ptep)
95 			return -ENOMEM;
96 	} else {
97 		pgdp = pgd_offset_k(ea);
98 #ifndef __PAGETABLE_PUD_FOLDED
99 		if (pgd_none(*pgdp)) {
100 			pudp = early_alloc_pgtable(PUD_TABLE_SIZE);
101 			pgd_populate(&init_mm, pgdp, pudp);
102 		}
103 #endif /* !__PAGETABLE_PUD_FOLDED */
104 		pudp = pud_offset(pgdp, ea);
105 		if (pud_none(*pudp)) {
106 			pmdp = early_alloc_pgtable(PMD_TABLE_SIZE);
107 			pud_populate(&init_mm, pudp, pmdp);
108 		}
109 		pmdp = pmd_offset(pudp, ea);
110 		if (!pmd_present(*pmdp)) {
111 			ptep = early_alloc_pgtable(PAGE_SIZE);
112 			pmd_populate_kernel(&init_mm, pmdp, ptep);
113 		}
114 		ptep = pte_offset_kernel(pmdp, ea);
115 	}
116 	set_pte_at(&init_mm, ea, ptep, pfn_pte(pa >> PAGE_SHIFT, prot));
117 
118 	smp_wmb();
119 	return 0;
120 }
121