xref: /openbmc/linux/drivers/misc/lkdtm/heap.c (revision e6d468d3)
1039a1c42SKees Cook // SPDX-License-Identifier: GPL-2.0
2039a1c42SKees Cook /*
3039a1c42SKees Cook  * This is for all the tests relating directly to heap memory, including
4039a1c42SKees Cook  * page allocation and slab allocations.
5039a1c42SKees Cook  */
6039a1c42SKees Cook #include "lkdtm.h"
7039a1c42SKees Cook #include <linux/slab.h>
89c4f6ebcSKees Cook #include <linux/vmalloc.h>
9039a1c42SKees Cook #include <linux/sched.h>
10039a1c42SKees Cook 
11966fede8SKees Cook static struct kmem_cache *double_free_cache;
12966fede8SKees Cook static struct kmem_cache *a_cache;
13966fede8SKees Cook static struct kmem_cache *b_cache;
14966fede8SKees Cook 
15039a1c42SKees Cook /*
16*e6d468d3SKees Cook  * Using volatile here means the compiler cannot ever make assumptions
17*e6d468d3SKees Cook  * about this value. This means compile-time length checks involving
18*e6d468d3SKees Cook  * this variable cannot be performed; only run-time checks.
19*e6d468d3SKees Cook  */
20*e6d468d3SKees Cook static volatile int __offset = 1;
21*e6d468d3SKees Cook 
22*e6d468d3SKees Cook /*
239c4f6ebcSKees Cook  * If there aren't guard pages, it's likely that a consecutive allocation will
249c4f6ebcSKees Cook  * let us overflow into the second allocation without overwriting something real.
259c4f6ebcSKees Cook  */
269c4f6ebcSKees Cook void lkdtm_VMALLOC_LINEAR_OVERFLOW(void)
279c4f6ebcSKees Cook {
289c4f6ebcSKees Cook 	char *one, *two;
299c4f6ebcSKees Cook 
309c4f6ebcSKees Cook 	one = vzalloc(PAGE_SIZE);
319c4f6ebcSKees Cook 	two = vzalloc(PAGE_SIZE);
329c4f6ebcSKees Cook 
339c4f6ebcSKees Cook 	pr_info("Attempting vmalloc linear overflow ...\n");
34*e6d468d3SKees Cook 	memset(one, 0xAA, PAGE_SIZE + __offset);
359c4f6ebcSKees Cook 
369c4f6ebcSKees Cook 	vfree(two);
379c4f6ebcSKees Cook 	vfree(one);
389c4f6ebcSKees Cook }
399c4f6ebcSKees Cook 
409c4f6ebcSKees Cook /*
41039a1c42SKees Cook  * This tries to stay within the next largest power-of-2 kmalloc cache
42039a1c42SKees Cook  * to avoid actually overwriting anything important if it's not detected
43039a1c42SKees Cook  * correctly.
44039a1c42SKees Cook  */
459c4f6ebcSKees Cook void lkdtm_SLAB_LINEAR_OVERFLOW(void)
46039a1c42SKees Cook {
47039a1c42SKees Cook 	size_t len = 1020;
48039a1c42SKees Cook 	u32 *data = kmalloc(len, GFP_KERNEL);
49039a1c42SKees Cook 	if (!data)
50039a1c42SKees Cook 		return;
51039a1c42SKees Cook 
529c4f6ebcSKees Cook 	pr_info("Attempting slab linear overflow ...\n");
53039a1c42SKees Cook 	data[1024 / sizeof(u32)] = 0x12345678;
54039a1c42SKees Cook 	kfree(data);
55039a1c42SKees Cook }
56039a1c42SKees Cook 
57039a1c42SKees Cook void lkdtm_WRITE_AFTER_FREE(void)
58039a1c42SKees Cook {
59039a1c42SKees Cook 	int *base, *again;
60039a1c42SKees Cook 	size_t len = 1024;
61039a1c42SKees Cook 	/*
62039a1c42SKees Cook 	 * The slub allocator uses the first word to store the free
63039a1c42SKees Cook 	 * pointer in some configurations. Use the middle of the
64039a1c42SKees Cook 	 * allocation to avoid running into the freelist
65039a1c42SKees Cook 	 */
66039a1c42SKees Cook 	size_t offset = (len / sizeof(*base)) / 2;
67039a1c42SKees Cook 
68039a1c42SKees Cook 	base = kmalloc(len, GFP_KERNEL);
69039a1c42SKees Cook 	if (!base)
70039a1c42SKees Cook 		return;
71039a1c42SKees Cook 	pr_info("Allocated memory %p-%p\n", base, &base[offset * 2]);
72039a1c42SKees Cook 	pr_info("Attempting bad write to freed memory at %p\n",
73039a1c42SKees Cook 		&base[offset]);
74039a1c42SKees Cook 	kfree(base);
75039a1c42SKees Cook 	base[offset] = 0x0abcdef0;
76039a1c42SKees Cook 	/* Attempt to notice the overwrite. */
77039a1c42SKees Cook 	again = kmalloc(len, GFP_KERNEL);
78039a1c42SKees Cook 	kfree(again);
79039a1c42SKees Cook 	if (again != base)
80039a1c42SKees Cook 		pr_info("Hmm, didn't get the same memory range.\n");
81039a1c42SKees Cook }
82039a1c42SKees Cook 
83039a1c42SKees Cook void lkdtm_READ_AFTER_FREE(void)
84039a1c42SKees Cook {
85039a1c42SKees Cook 	int *base, *val, saw;
86039a1c42SKees Cook 	size_t len = 1024;
87039a1c42SKees Cook 	/*
88e12145cfSKees Cook 	 * The slub allocator will use the either the first word or
89e12145cfSKees Cook 	 * the middle of the allocation to store the free pointer,
90e12145cfSKees Cook 	 * depending on configurations. Store in the second word to
91e12145cfSKees Cook 	 * avoid running into the freelist.
92039a1c42SKees Cook 	 */
93e12145cfSKees Cook 	size_t offset = sizeof(*base);
94039a1c42SKees Cook 
95039a1c42SKees Cook 	base = kmalloc(len, GFP_KERNEL);
96039a1c42SKees Cook 	if (!base) {
97039a1c42SKees Cook 		pr_info("Unable to allocate base memory.\n");
98039a1c42SKees Cook 		return;
99039a1c42SKees Cook 	}
100039a1c42SKees Cook 
101039a1c42SKees Cook 	val = kmalloc(len, GFP_KERNEL);
102039a1c42SKees Cook 	if (!val) {
103039a1c42SKees Cook 		pr_info("Unable to allocate val memory.\n");
104039a1c42SKees Cook 		kfree(base);
105039a1c42SKees Cook 		return;
106039a1c42SKees Cook 	}
107039a1c42SKees Cook 
108039a1c42SKees Cook 	*val = 0x12345678;
109039a1c42SKees Cook 	base[offset] = *val;
110039a1c42SKees Cook 	pr_info("Value in memory before free: %x\n", base[offset]);
111039a1c42SKees Cook 
112039a1c42SKees Cook 	kfree(base);
113039a1c42SKees Cook 
114039a1c42SKees Cook 	pr_info("Attempting bad read from freed memory\n");
115039a1c42SKees Cook 	saw = base[offset];
116039a1c42SKees Cook 	if (saw != *val) {
117039a1c42SKees Cook 		/* Good! Poisoning happened, so declare a win. */
118039a1c42SKees Cook 		pr_info("Memory correctly poisoned (%x)\n", saw);
1195b777131SKees Cook 	} else {
1205b777131SKees Cook 		pr_err("FAIL: Memory was not poisoned!\n");
1215b777131SKees Cook 		pr_expected_config_param(CONFIG_INIT_ON_FREE_DEFAULT_ON, "init_on_free");
122039a1c42SKees Cook 	}
123039a1c42SKees Cook 
124039a1c42SKees Cook 	kfree(val);
125039a1c42SKees Cook }
126039a1c42SKees Cook 
127039a1c42SKees Cook void lkdtm_WRITE_BUDDY_AFTER_FREE(void)
128039a1c42SKees Cook {
129039a1c42SKees Cook 	unsigned long p = __get_free_page(GFP_KERNEL);
130039a1c42SKees Cook 	if (!p) {
131039a1c42SKees Cook 		pr_info("Unable to allocate free page\n");
132039a1c42SKees Cook 		return;
133039a1c42SKees Cook 	}
134039a1c42SKees Cook 
135039a1c42SKees Cook 	pr_info("Writing to the buddy page before free\n");
136039a1c42SKees Cook 	memset((void *)p, 0x3, PAGE_SIZE);
137039a1c42SKees Cook 	free_page(p);
138039a1c42SKees Cook 	schedule();
139039a1c42SKees Cook 	pr_info("Attempting bad write to the buddy page after free\n");
140039a1c42SKees Cook 	memset((void *)p, 0x78, PAGE_SIZE);
141039a1c42SKees Cook 	/* Attempt to notice the overwrite. */
142039a1c42SKees Cook 	p = __get_free_page(GFP_KERNEL);
143039a1c42SKees Cook 	free_page(p);
144039a1c42SKees Cook 	schedule();
145039a1c42SKees Cook }
146039a1c42SKees Cook 
147039a1c42SKees Cook void lkdtm_READ_BUDDY_AFTER_FREE(void)
148039a1c42SKees Cook {
149039a1c42SKees Cook 	unsigned long p = __get_free_page(GFP_KERNEL);
150039a1c42SKees Cook 	int saw, *val;
151039a1c42SKees Cook 	int *base;
152039a1c42SKees Cook 
153039a1c42SKees Cook 	if (!p) {
154039a1c42SKees Cook 		pr_info("Unable to allocate free page\n");
155039a1c42SKees Cook 		return;
156039a1c42SKees Cook 	}
157039a1c42SKees Cook 
158039a1c42SKees Cook 	val = kmalloc(1024, GFP_KERNEL);
159039a1c42SKees Cook 	if (!val) {
160039a1c42SKees Cook 		pr_info("Unable to allocate val memory.\n");
161039a1c42SKees Cook 		free_page(p);
162039a1c42SKees Cook 		return;
163039a1c42SKees Cook 	}
164039a1c42SKees Cook 
165039a1c42SKees Cook 	base = (int *)p;
166039a1c42SKees Cook 
167039a1c42SKees Cook 	*val = 0x12345678;
168039a1c42SKees Cook 	base[0] = *val;
169039a1c42SKees Cook 	pr_info("Value in memory before free: %x\n", base[0]);
170039a1c42SKees Cook 	free_page(p);
171039a1c42SKees Cook 	pr_info("Attempting to read from freed memory\n");
172039a1c42SKees Cook 	saw = base[0];
173039a1c42SKees Cook 	if (saw != *val) {
174039a1c42SKees Cook 		/* Good! Poisoning happened, so declare a win. */
175039a1c42SKees Cook 		pr_info("Memory correctly poisoned (%x)\n", saw);
1765b777131SKees Cook 	} else {
1775b777131SKees Cook 		pr_err("FAIL: Buddy page was not poisoned!\n");
1785b777131SKees Cook 		pr_expected_config_param(CONFIG_INIT_ON_FREE_DEFAULT_ON, "init_on_free");
179039a1c42SKees Cook 	}
180039a1c42SKees Cook 
181039a1c42SKees Cook 	kfree(val);
182039a1c42SKees Cook }
183966fede8SKees Cook 
18437a0ca7fSKees Cook void lkdtm_SLAB_INIT_ON_ALLOC(void)
18537a0ca7fSKees Cook {
18637a0ca7fSKees Cook 	u8 *first;
18737a0ca7fSKees Cook 	u8 *val;
18837a0ca7fSKees Cook 
18937a0ca7fSKees Cook 	first = kmalloc(512, GFP_KERNEL);
19037a0ca7fSKees Cook 	if (!first) {
19137a0ca7fSKees Cook 		pr_info("Unable to allocate 512 bytes the first time.\n");
19237a0ca7fSKees Cook 		return;
19337a0ca7fSKees Cook 	}
19437a0ca7fSKees Cook 
19537a0ca7fSKees Cook 	memset(first, 0xAB, 512);
19637a0ca7fSKees Cook 	kfree(first);
19737a0ca7fSKees Cook 
19837a0ca7fSKees Cook 	val = kmalloc(512, GFP_KERNEL);
19937a0ca7fSKees Cook 	if (!val) {
20037a0ca7fSKees Cook 		pr_info("Unable to allocate 512 bytes the second time.\n");
20137a0ca7fSKees Cook 		return;
20237a0ca7fSKees Cook 	}
20337a0ca7fSKees Cook 	if (val != first) {
20437a0ca7fSKees Cook 		pr_warn("Reallocation missed clobbered memory.\n");
20537a0ca7fSKees Cook 	}
20637a0ca7fSKees Cook 
20737a0ca7fSKees Cook 	if (memchr(val, 0xAB, 512) == NULL) {
20837a0ca7fSKees Cook 		pr_info("Memory appears initialized (%x, no earlier values)\n", *val);
20937a0ca7fSKees Cook 	} else {
21037a0ca7fSKees Cook 		pr_err("FAIL: Slab was not initialized\n");
21137a0ca7fSKees Cook 		pr_expected_config_param(CONFIG_INIT_ON_ALLOC_DEFAULT_ON, "init_on_alloc");
21237a0ca7fSKees Cook 	}
21337a0ca7fSKees Cook 	kfree(val);
21437a0ca7fSKees Cook }
21537a0ca7fSKees Cook 
21637a0ca7fSKees Cook void lkdtm_BUDDY_INIT_ON_ALLOC(void)
21737a0ca7fSKees Cook {
21837a0ca7fSKees Cook 	u8 *first;
21937a0ca7fSKees Cook 	u8 *val;
22037a0ca7fSKees Cook 
22137a0ca7fSKees Cook 	first = (u8 *)__get_free_page(GFP_KERNEL);
22237a0ca7fSKees Cook 	if (!first) {
22337a0ca7fSKees Cook 		pr_info("Unable to allocate first free page\n");
22437a0ca7fSKees Cook 		return;
22537a0ca7fSKees Cook 	}
22637a0ca7fSKees Cook 
22737a0ca7fSKees Cook 	memset(first, 0xAB, PAGE_SIZE);
22837a0ca7fSKees Cook 	free_page((unsigned long)first);
22937a0ca7fSKees Cook 
23037a0ca7fSKees Cook 	val = (u8 *)__get_free_page(GFP_KERNEL);
23137a0ca7fSKees Cook 	if (!val) {
23237a0ca7fSKees Cook 		pr_info("Unable to allocate second free page\n");
23337a0ca7fSKees Cook 		return;
23437a0ca7fSKees Cook 	}
23537a0ca7fSKees Cook 
23637a0ca7fSKees Cook 	if (val != first) {
23737a0ca7fSKees Cook 		pr_warn("Reallocation missed clobbered memory.\n");
23837a0ca7fSKees Cook 	}
23937a0ca7fSKees Cook 
24037a0ca7fSKees Cook 	if (memchr(val, 0xAB, PAGE_SIZE) == NULL) {
24137a0ca7fSKees Cook 		pr_info("Memory appears initialized (%x, no earlier values)\n", *val);
24237a0ca7fSKees Cook 	} else {
24337a0ca7fSKees Cook 		pr_err("FAIL: Slab was not initialized\n");
24437a0ca7fSKees Cook 		pr_expected_config_param(CONFIG_INIT_ON_ALLOC_DEFAULT_ON, "init_on_alloc");
24537a0ca7fSKees Cook 	}
24637a0ca7fSKees Cook 	free_page((unsigned long)val);
24737a0ca7fSKees Cook }
24837a0ca7fSKees Cook 
249966fede8SKees Cook void lkdtm_SLAB_FREE_DOUBLE(void)
250966fede8SKees Cook {
251966fede8SKees Cook 	int *val;
252966fede8SKees Cook 
253966fede8SKees Cook 	val = kmem_cache_alloc(double_free_cache, GFP_KERNEL);
254966fede8SKees Cook 	if (!val) {
255966fede8SKees Cook 		pr_info("Unable to allocate double_free_cache memory.\n");
256966fede8SKees Cook 		return;
257966fede8SKees Cook 	}
258966fede8SKees Cook 
259966fede8SKees Cook 	/* Just make sure we got real memory. */
260966fede8SKees Cook 	*val = 0x12345678;
261966fede8SKees Cook 	pr_info("Attempting double slab free ...\n");
262966fede8SKees Cook 	kmem_cache_free(double_free_cache, val);
263966fede8SKees Cook 	kmem_cache_free(double_free_cache, val);
264966fede8SKees Cook }
265966fede8SKees Cook 
266966fede8SKees Cook void lkdtm_SLAB_FREE_CROSS(void)
267966fede8SKees Cook {
268966fede8SKees Cook 	int *val;
269966fede8SKees Cook 
270966fede8SKees Cook 	val = kmem_cache_alloc(a_cache, GFP_KERNEL);
271966fede8SKees Cook 	if (!val) {
272966fede8SKees Cook 		pr_info("Unable to allocate a_cache memory.\n");
273966fede8SKees Cook 		return;
274966fede8SKees Cook 	}
275966fede8SKees Cook 
276966fede8SKees Cook 	/* Just make sure we got real memory. */
277966fede8SKees Cook 	*val = 0x12345679;
278966fede8SKees Cook 	pr_info("Attempting cross-cache slab free ...\n");
279966fede8SKees Cook 	kmem_cache_free(b_cache, val);
280966fede8SKees Cook }
281966fede8SKees Cook 
282966fede8SKees Cook void lkdtm_SLAB_FREE_PAGE(void)
283966fede8SKees Cook {
284966fede8SKees Cook 	unsigned long p = __get_free_page(GFP_KERNEL);
285966fede8SKees Cook 
286966fede8SKees Cook 	pr_info("Attempting non-Slab slab free ...\n");
287966fede8SKees Cook 	kmem_cache_free(NULL, (void *)p);
288966fede8SKees Cook 	free_page(p);
289966fede8SKees Cook }
290966fede8SKees Cook 
291966fede8SKees Cook /*
292966fede8SKees Cook  * We have constructors to keep the caches distinctly separated without
293966fede8SKees Cook  * needing to boot with "slab_nomerge".
294966fede8SKees Cook  */
295966fede8SKees Cook static void ctor_double_free(void *region)
296966fede8SKees Cook { }
297966fede8SKees Cook static void ctor_a(void *region)
298966fede8SKees Cook { }
299966fede8SKees Cook static void ctor_b(void *region)
300966fede8SKees Cook { }
301966fede8SKees Cook 
302966fede8SKees Cook void __init lkdtm_heap_init(void)
303966fede8SKees Cook {
304966fede8SKees Cook 	double_free_cache = kmem_cache_create("lkdtm-heap-double_free",
305966fede8SKees Cook 					      64, 0, 0, ctor_double_free);
306966fede8SKees Cook 	a_cache = kmem_cache_create("lkdtm-heap-a", 64, 0, 0, ctor_a);
307966fede8SKees Cook 	b_cache = kmem_cache_create("lkdtm-heap-b", 64, 0, 0, ctor_b);
308966fede8SKees Cook }
309966fede8SKees Cook 
310966fede8SKees Cook void __exit lkdtm_heap_exit(void)
311966fede8SKees Cook {
312966fede8SKees Cook 	kmem_cache_destroy(double_free_cache);
313966fede8SKees Cook 	kmem_cache_destroy(a_cache);
314966fede8SKees Cook 	kmem_cache_destroy(b_cache);
315966fede8SKees Cook }
316