xref: /openbmc/linux/arch/powerpc/mm/init-common.c (revision 145febd8)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  *  PowerPC version
4  *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
5  *
6  *  Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
7  *  and Cort Dougan (PReP) (cort@cs.nmt.edu)
8  *    Copyright (C) 1996 Paul Mackerras
9  *
10  *  Derived from "arch/i386/mm/init.c"
11  *    Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
12  *
13  *  Dave Engebretsen <engebret@us.ibm.com>
14  *      Rework for PPC64 port.
15  */
16 
17 #undef DEBUG
18 
19 #include <linux/string.h>
20 #include <linux/pgtable.h>
21 #include <asm/pgalloc.h>
22 #include <asm/kup.h>
23 #include <asm/smp.h>
24 
25 phys_addr_t memstart_addr __ro_after_init = (phys_addr_t)~0ull;
26 EXPORT_SYMBOL_GPL(memstart_addr);
27 phys_addr_t kernstart_addr __ro_after_init;
28 EXPORT_SYMBOL_GPL(kernstart_addr);
29 unsigned long kernstart_virt_addr __ro_after_init = KERNELBASE;
30 EXPORT_SYMBOL_GPL(kernstart_virt_addr);
31 
32 bool disable_kuep = !IS_ENABLED(CONFIG_PPC_KUEP);
33 bool disable_kuap = !IS_ENABLED(CONFIG_PPC_KUAP);
34 
parse_nosmep(char * p)35 static int __init parse_nosmep(char *p)
36 {
37 	if (!IS_ENABLED(CONFIG_PPC_BOOK3S_64))
38 		return 0;
39 
40 	disable_kuep = true;
41 	pr_warn("Disabling Kernel Userspace Execution Prevention\n");
42 	return 0;
43 }
44 early_param("nosmep", parse_nosmep);
45 
parse_nosmap(char * p)46 static int __init parse_nosmap(char *p)
47 {
48 	disable_kuap = true;
49 	pr_warn("Disabling Kernel Userspace Access Protection\n");
50 	return 0;
51 }
52 early_param("nosmap", parse_nosmap);
53 
setup_kuep(bool disabled)54 void __weak setup_kuep(bool disabled)
55 {
56 	if (!IS_ENABLED(CONFIG_PPC_KUEP) || disabled)
57 		return;
58 
59 	if (smp_processor_id() != boot_cpuid)
60 		return;
61 
62 	pr_info("Activating Kernel Userspace Execution Prevention\n");
63 }
64 
setup_kup(void)65 void setup_kup(void)
66 {
67 	setup_kuap(disable_kuap);
68 	setup_kuep(disable_kuep);
69 }
70 
71 #define CTOR(shift) static void ctor_##shift(void *addr) \
72 {							\
73 	memset(addr, 0, sizeof(void *) << (shift));	\
74 }
75 
76 CTOR(0); CTOR(1); CTOR(2); CTOR(3); CTOR(4); CTOR(5); CTOR(6); CTOR(7);
77 CTOR(8); CTOR(9); CTOR(10); CTOR(11); CTOR(12); CTOR(13); CTOR(14); CTOR(15);
78 
ctor(int shift)79 static inline void (*ctor(int shift))(void *)
80 {
81 	BUILD_BUG_ON(MAX_PGTABLE_INDEX_SIZE != 15);
82 
83 	switch (shift) {
84 	case 0: return ctor_0;
85 	case 1: return ctor_1;
86 	case 2: return ctor_2;
87 	case 3: return ctor_3;
88 	case 4: return ctor_4;
89 	case 5: return ctor_5;
90 	case 6: return ctor_6;
91 	case 7: return ctor_7;
92 	case 8: return ctor_8;
93 	case 9: return ctor_9;
94 	case 10: return ctor_10;
95 	case 11: return ctor_11;
96 	case 12: return ctor_12;
97 	case 13: return ctor_13;
98 	case 14: return ctor_14;
99 	case 15: return ctor_15;
100 	}
101 	return NULL;
102 }
103 
104 struct kmem_cache *pgtable_cache[MAX_PGTABLE_INDEX_SIZE + 1];
105 EXPORT_SYMBOL_GPL(pgtable_cache);	/* used by kvm_hv module */
106 
107 /*
108  * Create a kmem_cache() for pagetables.  This is not used for PTE
109  * pages - they're linked to struct page, come from the normal free
110  * pages pool and have a different entry size (see real_pte_t) to
111  * everything else.  Caches created by this function are used for all
112  * the higher level pagetables, and for hugepage pagetables.
113  */
pgtable_cache_add(unsigned int shift)114 void pgtable_cache_add(unsigned int shift)
115 {
116 	char *name;
117 	unsigned long table_size = sizeof(void *) << shift;
118 	unsigned long align = table_size;
119 
120 	/* When batching pgtable pointers for RCU freeing, we store
121 	 * the index size in the low bits.  Table alignment must be
122 	 * big enough to fit it.
123 	 *
124 	 * Likewise, hugeapge pagetable pointers contain a (different)
125 	 * shift value in the low bits.  All tables must be aligned so
126 	 * as to leave enough 0 bits in the address to contain it. */
127 	unsigned long minalign = max(MAX_PGTABLE_INDEX_SIZE + 1,
128 				     HUGEPD_SHIFT_MASK + 1);
129 	struct kmem_cache *new = NULL;
130 
131 	/* It would be nice if this was a BUILD_BUG_ON(), but at the
132 	 * moment, gcc doesn't seem to recognize is_power_of_2 as a
133 	 * constant expression, so so much for that. */
134 	BUG_ON(!is_power_of_2(minalign));
135 	BUG_ON(shift > MAX_PGTABLE_INDEX_SIZE);
136 
137 	if (PGT_CACHE(shift))
138 		return; /* Already have a cache of this size */
139 
140 	align = max_t(unsigned long, align, minalign);
141 	name = kasprintf(GFP_KERNEL, "pgtable-2^%d", shift);
142 	if (name)
143 		new = kmem_cache_create(name, table_size, align, 0, ctor(shift));
144 	if (!new)
145 		panic("Could not allocate pgtable cache for order %d", shift);
146 
147 	kfree(name);
148 	pgtable_cache[shift] = new;
149 
150 	pr_debug("Allocated pgtable cache for order %d\n", shift);
151 }
152 EXPORT_SYMBOL_GPL(pgtable_cache_add);	/* used by kvm_hv module */
153 
pgtable_cache_init(void)154 void pgtable_cache_init(void)
155 {
156 	pgtable_cache_add(PGD_INDEX_SIZE);
157 
158 	if (PMD_CACHE_INDEX)
159 		pgtable_cache_add(PMD_CACHE_INDEX);
160 	/*
161 	 * In all current configs, when the PUD index exists it's the
162 	 * same size as either the pgd or pmd index except with THP enabled
163 	 * on book3s 64
164 	 */
165 	if (PUD_CACHE_INDEX)
166 		pgtable_cache_add(PUD_CACHE_INDEX);
167 }
168