xref: /openbmc/linux/arch/x86/mm/ident_map.c (revision adb19164)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Helper routines for building identity mapping page tables. This is
4  * included by both the compressed kernel and the regular kernel.
5  */
6 
7 static void ident_pmd_init(struct x86_mapping_info *info, pmd_t *pmd_page,
8 			   unsigned long addr, unsigned long end)
9 {
10 	addr &= PMD_MASK;
11 	for (; addr < end; addr += PMD_SIZE) {
12 		pmd_t *pmd = pmd_page + pmd_index(addr);
13 
14 		if (pmd_present(*pmd))
15 			continue;
16 
17 		set_pmd(pmd, __pmd((addr - info->offset) | info->page_flag));
18 	}
19 }
20 
21 static int ident_pud_init(struct x86_mapping_info *info, pud_t *pud_page,
22 			  unsigned long addr, unsigned long end)
23 {
24 	unsigned long next;
25 
26 	for (; addr < end; addr = next) {
27 		pud_t *pud = pud_page + pud_index(addr);
28 		pmd_t *pmd;
29 		bool use_gbpage;
30 
31 		next = (addr & PUD_MASK) + PUD_SIZE;
32 		if (next > end)
33 			next = end;
34 
35 		/* if this is already a gbpage, this portion is already mapped */
36 		if (pud_large(*pud))
37 			continue;
38 
39 		/* Is using a gbpage allowed? */
40 		use_gbpage = info->direct_gbpages;
41 
42 		/* Don't use gbpage if it maps more than the requested region. */
43 		/* at the begining: */
44 		use_gbpage &= ((addr & ~PUD_MASK) == 0);
45 		/* ... or at the end: */
46 		use_gbpage &= ((next & ~PUD_MASK) == 0);
47 
48 		/* Never overwrite existing mappings */
49 		use_gbpage &= !pud_present(*pud);
50 
51 		if (use_gbpage) {
52 			pud_t pudval;
53 
54 			pudval = __pud((addr - info->offset) | info->page_flag);
55 			set_pud(pud, pudval);
56 			continue;
57 		}
58 
59 		if (pud_present(*pud)) {
60 			pmd = pmd_offset(pud, 0);
61 			ident_pmd_init(info, pmd, addr, next);
62 			continue;
63 		}
64 		pmd = (pmd_t *)info->alloc_pgt_page(info->context);
65 		if (!pmd)
66 			return -ENOMEM;
67 		ident_pmd_init(info, pmd, addr, next);
68 		set_pud(pud, __pud(__pa(pmd) | info->kernpg_flag));
69 	}
70 
71 	return 0;
72 }
73 
74 static int ident_p4d_init(struct x86_mapping_info *info, p4d_t *p4d_page,
75 			  unsigned long addr, unsigned long end)
76 {
77 	unsigned long next;
78 	int result;
79 
80 	for (; addr < end; addr = next) {
81 		p4d_t *p4d = p4d_page + p4d_index(addr);
82 		pud_t *pud;
83 
84 		next = (addr & P4D_MASK) + P4D_SIZE;
85 		if (next > end)
86 			next = end;
87 
88 		if (p4d_present(*p4d)) {
89 			pud = pud_offset(p4d, 0);
90 			result = ident_pud_init(info, pud, addr, next);
91 			if (result)
92 				return result;
93 
94 			continue;
95 		}
96 		pud = (pud_t *)info->alloc_pgt_page(info->context);
97 		if (!pud)
98 			return -ENOMEM;
99 
100 		result = ident_pud_init(info, pud, addr, next);
101 		if (result)
102 			return result;
103 
104 		set_p4d(p4d, __p4d(__pa(pud) | info->kernpg_flag));
105 	}
106 
107 	return 0;
108 }
109 
110 int kernel_ident_mapping_init(struct x86_mapping_info *info, pgd_t *pgd_page,
111 			      unsigned long pstart, unsigned long pend)
112 {
113 	unsigned long addr = pstart + info->offset;
114 	unsigned long end = pend + info->offset;
115 	unsigned long next;
116 	int result;
117 
118 	/* Set the default pagetable flags if not supplied */
119 	if (!info->kernpg_flag)
120 		info->kernpg_flag = _KERNPG_TABLE;
121 
122 	/* Filter out unsupported __PAGE_KERNEL_* bits: */
123 	info->kernpg_flag &= __default_kernel_pte_mask;
124 
125 	for (; addr < end; addr = next) {
126 		pgd_t *pgd = pgd_page + pgd_index(addr);
127 		p4d_t *p4d;
128 
129 		next = (addr & PGDIR_MASK) + PGDIR_SIZE;
130 		if (next > end)
131 			next = end;
132 
133 		if (pgd_present(*pgd)) {
134 			p4d = p4d_offset(pgd, 0);
135 			result = ident_p4d_init(info, p4d, addr, next);
136 			if (result)
137 				return result;
138 			continue;
139 		}
140 
141 		p4d = (p4d_t *)info->alloc_pgt_page(info->context);
142 		if (!p4d)
143 			return -ENOMEM;
144 		result = ident_p4d_init(info, p4d, addr, next);
145 		if (result)
146 			return result;
147 		if (pgtable_l5_enabled()) {
148 			set_pgd(pgd, __pgd(__pa(p4d) | info->kernpg_flag));
149 		} else {
150 			/*
151 			 * With p4d folded, pgd is equal to p4d.
152 			 * The pgd entry has to point to the pud page table in this case.
153 			 */
154 			pud_t *pud = pud_offset(p4d, 0);
155 			set_pgd(pgd, __pgd(__pa(pud) | info->kernpg_flag));
156 		}
157 	}
158 
159 	return 0;
160 }
161