xref: /openbmc/linux/arch/x86/mm/ident_map.c (revision b78412b8)
1 /*
2  * Helper routines for building identity mapping page tables. This is
3  * included by both the compressed kernel and the regular kernel.
4  */
5 
6 static void ident_pmd_init(struct x86_mapping_info *info, pmd_t *pmd_page,
7 			   unsigned long addr, unsigned long end)
8 {
9 	addr &= PMD_MASK;
10 	for (; addr < end; addr += PMD_SIZE) {
11 		pmd_t *pmd = pmd_page + pmd_index(addr);
12 
13 		if (pmd_present(*pmd))
14 			continue;
15 
16 		set_pmd(pmd, __pmd((addr - info->offset) | info->page_flag));
17 	}
18 }
19 
20 static int ident_pud_init(struct x86_mapping_info *info, pud_t *pud_page,
21 			  unsigned long addr, unsigned long end)
22 {
23 	unsigned long next;
24 
25 	for (; addr < end; addr = next) {
26 		pud_t *pud = pud_page + pud_index(addr);
27 		pmd_t *pmd;
28 
29 		next = (addr & PUD_MASK) + PUD_SIZE;
30 		if (next > end)
31 			next = end;
32 
33 		if (info->direct_gbpages) {
34 			pud_t pudval;
35 
36 			if (pud_present(*pud))
37 				continue;
38 
39 			addr &= PUD_MASK;
40 			pudval = __pud((addr - info->offset) | info->page_flag);
41 			set_pud(pud, pudval);
42 			continue;
43 		}
44 
45 		if (pud_present(*pud)) {
46 			pmd = pmd_offset(pud, 0);
47 			ident_pmd_init(info, pmd, addr, next);
48 			continue;
49 		}
50 		pmd = (pmd_t *)info->alloc_pgt_page(info->context);
51 		if (!pmd)
52 			return -ENOMEM;
53 		ident_pmd_init(info, pmd, addr, next);
54 		set_pud(pud, __pud(__pa(pmd) | info->kernpg_flag));
55 	}
56 
57 	return 0;
58 }
59 
60 static int ident_p4d_init(struct x86_mapping_info *info, p4d_t *p4d_page,
61 			  unsigned long addr, unsigned long end)
62 {
63 	unsigned long next;
64 
65 	for (; addr < end; addr = next) {
66 		p4d_t *p4d = p4d_page + p4d_index(addr);
67 		pud_t *pud;
68 
69 		next = (addr & P4D_MASK) + P4D_SIZE;
70 		if (next > end)
71 			next = end;
72 
73 		if (p4d_present(*p4d)) {
74 			pud = pud_offset(p4d, 0);
75 			ident_pud_init(info, pud, addr, next);
76 			continue;
77 		}
78 		pud = (pud_t *)info->alloc_pgt_page(info->context);
79 		if (!pud)
80 			return -ENOMEM;
81 		ident_pud_init(info, pud, addr, next);
82 		set_p4d(p4d, __p4d(__pa(pud) | info->kernpg_flag));
83 	}
84 
85 	return 0;
86 }
87 
88 int kernel_ident_mapping_init(struct x86_mapping_info *info, pgd_t *pgd_page,
89 			      unsigned long pstart, unsigned long pend)
90 {
91 	unsigned long addr = pstart + info->offset;
92 	unsigned long end = pend + info->offset;
93 	unsigned long next;
94 	int result;
95 
96 	/* Set the default pagetable flags if not supplied */
97 	if (!info->kernpg_flag)
98 		info->kernpg_flag = _KERNPG_TABLE;
99 
100 	for (; addr < end; addr = next) {
101 		pgd_t *pgd = pgd_page + pgd_index(addr);
102 		p4d_t *p4d;
103 
104 		next = (addr & PGDIR_MASK) + PGDIR_SIZE;
105 		if (next > end)
106 			next = end;
107 
108 		if (pgd_present(*pgd)) {
109 			p4d = p4d_offset(pgd, 0);
110 			result = ident_p4d_init(info, p4d, addr, next);
111 			if (result)
112 				return result;
113 			continue;
114 		}
115 
116 		p4d = (p4d_t *)info->alloc_pgt_page(info->context);
117 		if (!p4d)
118 			return -ENOMEM;
119 		result = ident_p4d_init(info, p4d, addr, next);
120 		if (result)
121 			return result;
122 		if (IS_ENABLED(CONFIG_X86_5LEVEL)) {
123 			set_pgd(pgd, __pgd(__pa(p4d) | info->kernpg_flag));
124 		} else {
125 			/*
126 			 * With p4d folded, pgd is equal to p4d.
127 			 * The pgd entry has to point to the pud page table in this case.
128 			 */
129 			pud_t *pud = pud_offset(p4d, 0);
130 			set_pgd(pgd, __pgd(__pa(pud) | info->kernpg_flag));
131 		}
132 	}
133 
134 	return 0;
135 }
136