xref: /openbmc/linux/drivers/misc/lkdtm/heap.c (revision 73f62e60)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * This is for all the tests relating directly to heap memory, including
4  * page allocation and slab allocations.
5  */
6 #include "lkdtm.h"
7 #include <linux/slab.h>
8 #include <linux/vmalloc.h>
9 #include <linux/sched.h>
10 
11 static struct kmem_cache *double_free_cache;
12 static struct kmem_cache *a_cache;
13 static struct kmem_cache *b_cache;
14 
15 /*
16  * Using volatile here means the compiler cannot ever make assumptions
17  * about this value. This means compile-time length checks involving
18  * this variable cannot be performed; only run-time checks.
19  */
20 static volatile int __offset = 1;
21 
22 /*
23  * If there aren't guard pages, it's likely that a consecutive allocation will
24  * let us overflow into the second allocation without overwriting something real.
25  *
26  * This should always be caught because there is an unconditional unmapped
27  * page after vmap allocations.
28  */
29 static void lkdtm_VMALLOC_LINEAR_OVERFLOW(void)
30 {
31 	char *one, *two;
32 
33 	one = vzalloc(PAGE_SIZE);
34 	two = vzalloc(PAGE_SIZE);
35 
36 	pr_info("Attempting vmalloc linear overflow ...\n");
37 	memset(one, 0xAA, PAGE_SIZE + __offset);
38 
39 	vfree(two);
40 	vfree(one);
41 }
42 
43 /*
44  * This tries to stay within the next largest power-of-2 kmalloc cache
45  * to avoid actually overwriting anything important if it's not detected
46  * correctly.
47  *
48  * This should get caught by either memory tagging, KASan, or by using
49  * CONFIG_SLUB_DEBUG=y and slub_debug=ZF (or CONFIG_SLUB_DEBUG_ON=y).
50  */
51 static void lkdtm_SLAB_LINEAR_OVERFLOW(void)
52 {
53 	size_t len = 1020;
54 	u32 *data = kmalloc(len, GFP_KERNEL);
55 	if (!data)
56 		return;
57 
58 	pr_info("Attempting slab linear overflow ...\n");
59 	data[1024 / sizeof(u32)] = 0x12345678;
60 	kfree(data);
61 }
62 
63 static void lkdtm_WRITE_AFTER_FREE(void)
64 {
65 	int *base, *again;
66 	size_t len = 1024;
67 	/*
68 	 * The slub allocator uses the first word to store the free
69 	 * pointer in some configurations. Use the middle of the
70 	 * allocation to avoid running into the freelist
71 	 */
72 	size_t offset = (len / sizeof(*base)) / 2;
73 
74 	base = kmalloc(len, GFP_KERNEL);
75 	if (!base)
76 		return;
77 	pr_info("Allocated memory %p-%p\n", base, &base[offset * 2]);
78 	pr_info("Attempting bad write to freed memory at %p\n",
79 		&base[offset]);
80 	kfree(base);
81 	base[offset] = 0x0abcdef0;
82 	/* Attempt to notice the overwrite. */
83 	again = kmalloc(len, GFP_KERNEL);
84 	kfree(again);
85 	if (again != base)
86 		pr_info("Hmm, didn't get the same memory range.\n");
87 }
88 
89 static void lkdtm_READ_AFTER_FREE(void)
90 {
91 	int *base, *val, saw;
92 	size_t len = 1024;
93 	/*
94 	 * The slub allocator will use the either the first word or
95 	 * the middle of the allocation to store the free pointer,
96 	 * depending on configurations. Store in the second word to
97 	 * avoid running into the freelist.
98 	 */
99 	size_t offset = sizeof(*base);
100 
101 	base = kmalloc(len, GFP_KERNEL);
102 	if (!base) {
103 		pr_info("Unable to allocate base memory.\n");
104 		return;
105 	}
106 
107 	val = kmalloc(len, GFP_KERNEL);
108 	if (!val) {
109 		pr_info("Unable to allocate val memory.\n");
110 		kfree(base);
111 		return;
112 	}
113 
114 	*val = 0x12345678;
115 	base[offset] = *val;
116 	pr_info("Value in memory before free: %x\n", base[offset]);
117 
118 	kfree(base);
119 
120 	pr_info("Attempting bad read from freed memory\n");
121 	saw = base[offset];
122 	if (saw != *val) {
123 		/* Good! Poisoning happened, so declare a win. */
124 		pr_info("Memory correctly poisoned (%x)\n", saw);
125 	} else {
126 		pr_err("FAIL: Memory was not poisoned!\n");
127 		pr_expected_config_param(CONFIG_INIT_ON_FREE_DEFAULT_ON, "init_on_free");
128 	}
129 
130 	kfree(val);
131 }
132 
133 static void lkdtm_WRITE_BUDDY_AFTER_FREE(void)
134 {
135 	unsigned long p = __get_free_page(GFP_KERNEL);
136 	if (!p) {
137 		pr_info("Unable to allocate free page\n");
138 		return;
139 	}
140 
141 	pr_info("Writing to the buddy page before free\n");
142 	memset((void *)p, 0x3, PAGE_SIZE);
143 	free_page(p);
144 	schedule();
145 	pr_info("Attempting bad write to the buddy page after free\n");
146 	memset((void *)p, 0x78, PAGE_SIZE);
147 	/* Attempt to notice the overwrite. */
148 	p = __get_free_page(GFP_KERNEL);
149 	free_page(p);
150 	schedule();
151 }
152 
153 static void lkdtm_READ_BUDDY_AFTER_FREE(void)
154 {
155 	unsigned long p = __get_free_page(GFP_KERNEL);
156 	int saw, *val;
157 	int *base;
158 
159 	if (!p) {
160 		pr_info("Unable to allocate free page\n");
161 		return;
162 	}
163 
164 	val = kmalloc(1024, GFP_KERNEL);
165 	if (!val) {
166 		pr_info("Unable to allocate val memory.\n");
167 		free_page(p);
168 		return;
169 	}
170 
171 	base = (int *)p;
172 
173 	*val = 0x12345678;
174 	base[0] = *val;
175 	pr_info("Value in memory before free: %x\n", base[0]);
176 	free_page(p);
177 	pr_info("Attempting to read from freed memory\n");
178 	saw = base[0];
179 	if (saw != *val) {
180 		/* Good! Poisoning happened, so declare a win. */
181 		pr_info("Memory correctly poisoned (%x)\n", saw);
182 	} else {
183 		pr_err("FAIL: Buddy page was not poisoned!\n");
184 		pr_expected_config_param(CONFIG_INIT_ON_FREE_DEFAULT_ON, "init_on_free");
185 	}
186 
187 	kfree(val);
188 }
189 
190 static void lkdtm_SLAB_INIT_ON_ALLOC(void)
191 {
192 	u8 *first;
193 	u8 *val;
194 
195 	first = kmalloc(512, GFP_KERNEL);
196 	if (!first) {
197 		pr_info("Unable to allocate 512 bytes the first time.\n");
198 		return;
199 	}
200 
201 	memset(first, 0xAB, 512);
202 	kfree(first);
203 
204 	val = kmalloc(512, GFP_KERNEL);
205 	if (!val) {
206 		pr_info("Unable to allocate 512 bytes the second time.\n");
207 		return;
208 	}
209 	if (val != first) {
210 		pr_warn("Reallocation missed clobbered memory.\n");
211 	}
212 
213 	if (memchr(val, 0xAB, 512) == NULL) {
214 		pr_info("Memory appears initialized (%x, no earlier values)\n", *val);
215 	} else {
216 		pr_err("FAIL: Slab was not initialized\n");
217 		pr_expected_config_param(CONFIG_INIT_ON_ALLOC_DEFAULT_ON, "init_on_alloc");
218 	}
219 	kfree(val);
220 }
221 
222 static void lkdtm_BUDDY_INIT_ON_ALLOC(void)
223 {
224 	u8 *first;
225 	u8 *val;
226 
227 	first = (u8 *)__get_free_page(GFP_KERNEL);
228 	if (!first) {
229 		pr_info("Unable to allocate first free page\n");
230 		return;
231 	}
232 
233 	memset(first, 0xAB, PAGE_SIZE);
234 	free_page((unsigned long)first);
235 
236 	val = (u8 *)__get_free_page(GFP_KERNEL);
237 	if (!val) {
238 		pr_info("Unable to allocate second free page\n");
239 		return;
240 	}
241 
242 	if (val != first) {
243 		pr_warn("Reallocation missed clobbered memory.\n");
244 	}
245 
246 	if (memchr(val, 0xAB, PAGE_SIZE) == NULL) {
247 		pr_info("Memory appears initialized (%x, no earlier values)\n", *val);
248 	} else {
249 		pr_err("FAIL: Slab was not initialized\n");
250 		pr_expected_config_param(CONFIG_INIT_ON_ALLOC_DEFAULT_ON, "init_on_alloc");
251 	}
252 	free_page((unsigned long)val);
253 }
254 
255 static void lkdtm_SLAB_FREE_DOUBLE(void)
256 {
257 	int *val;
258 
259 	val = kmem_cache_alloc(double_free_cache, GFP_KERNEL);
260 	if (!val) {
261 		pr_info("Unable to allocate double_free_cache memory.\n");
262 		return;
263 	}
264 
265 	/* Just make sure we got real memory. */
266 	*val = 0x12345678;
267 	pr_info("Attempting double slab free ...\n");
268 	kmem_cache_free(double_free_cache, val);
269 	kmem_cache_free(double_free_cache, val);
270 }
271 
272 static void lkdtm_SLAB_FREE_CROSS(void)
273 {
274 	int *val;
275 
276 	val = kmem_cache_alloc(a_cache, GFP_KERNEL);
277 	if (!val) {
278 		pr_info("Unable to allocate a_cache memory.\n");
279 		return;
280 	}
281 
282 	/* Just make sure we got real memory. */
283 	*val = 0x12345679;
284 	pr_info("Attempting cross-cache slab free ...\n");
285 	kmem_cache_free(b_cache, val);
286 }
287 
288 static void lkdtm_SLAB_FREE_PAGE(void)
289 {
290 	unsigned long p = __get_free_page(GFP_KERNEL);
291 
292 	pr_info("Attempting non-Slab slab free ...\n");
293 	kmem_cache_free(NULL, (void *)p);
294 	free_page(p);
295 }
296 
297 /*
298  * We have constructors to keep the caches distinctly separated without
299  * needing to boot with "slab_nomerge".
300  */
301 static void ctor_double_free(void *region)
302 { }
303 static void ctor_a(void *region)
304 { }
305 static void ctor_b(void *region)
306 { }
307 
308 void __init lkdtm_heap_init(void)
309 {
310 	double_free_cache = kmem_cache_create("lkdtm-heap-double_free",
311 					      64, 0, 0, ctor_double_free);
312 	a_cache = kmem_cache_create("lkdtm-heap-a", 64, 0, 0, ctor_a);
313 	b_cache = kmem_cache_create("lkdtm-heap-b", 64, 0, 0, ctor_b);
314 }
315 
316 void __exit lkdtm_heap_exit(void)
317 {
318 	kmem_cache_destroy(double_free_cache);
319 	kmem_cache_destroy(a_cache);
320 	kmem_cache_destroy(b_cache);
321 }
322 
323 static struct crashtype crashtypes[] = {
324 	CRASHTYPE(SLAB_LINEAR_OVERFLOW),
325 	CRASHTYPE(VMALLOC_LINEAR_OVERFLOW),
326 	CRASHTYPE(WRITE_AFTER_FREE),
327 	CRASHTYPE(READ_AFTER_FREE),
328 	CRASHTYPE(WRITE_BUDDY_AFTER_FREE),
329 	CRASHTYPE(READ_BUDDY_AFTER_FREE),
330 	CRASHTYPE(SLAB_INIT_ON_ALLOC),
331 	CRASHTYPE(BUDDY_INIT_ON_ALLOC),
332 	CRASHTYPE(SLAB_FREE_DOUBLE),
333 	CRASHTYPE(SLAB_FREE_CROSS),
334 	CRASHTYPE(SLAB_FREE_PAGE),
335 };
336 
337 struct crashtype_category heap_crashtypes = {
338 	.crashtypes = crashtypes,
339 	.len	    = ARRAY_SIZE(crashtypes),
340 };
341