1039a1c42SKees Cook // SPDX-License-Identifier: GPL-2.0
2039a1c42SKees Cook /*
3039a1c42SKees Cook * This is for all the tests relating directly to heap memory, including
4039a1c42SKees Cook * page allocation and slab allocations.
5039a1c42SKees Cook */
6039a1c42SKees Cook #include "lkdtm.h"
7039a1c42SKees Cook #include <linux/slab.h>
89c4f6ebcSKees Cook #include <linux/vmalloc.h>
9039a1c42SKees Cook #include <linux/sched.h>
10039a1c42SKees Cook
11966fede8SKees Cook static struct kmem_cache *double_free_cache;
12966fede8SKees Cook static struct kmem_cache *a_cache;
13966fede8SKees Cook static struct kmem_cache *b_cache;
14966fede8SKees Cook
15039a1c42SKees Cook /*
16e6d468d3SKees Cook * Using volatile here means the compiler cannot ever make assumptions
17e6d468d3SKees Cook * about this value. This means compile-time length checks involving
18e6d468d3SKees Cook * this variable cannot be performed; only run-time checks.
19e6d468d3SKees Cook */
20e6d468d3SKees Cook static volatile int __offset = 1;
21e6d468d3SKees Cook
22e6d468d3SKees Cook /*
239c4f6ebcSKees Cook * If there aren't guard pages, it's likely that a consecutive allocation will
249c4f6ebcSKees Cook * let us overflow into the second allocation without overwriting something real.
2542db2594SKees Cook *
2642db2594SKees Cook * This should always be caught because there is an unconditional unmapped
2742db2594SKees Cook * page after vmap allocations.
289c4f6ebcSKees Cook */
lkdtm_VMALLOC_LINEAR_OVERFLOW(void)2973f62e60SKees Cook static void lkdtm_VMALLOC_LINEAR_OVERFLOW(void)
309c4f6ebcSKees Cook {
319c4f6ebcSKees Cook char *one, *two;
329c4f6ebcSKees Cook
339c4f6ebcSKees Cook one = vzalloc(PAGE_SIZE);
34*439a1bcaSKees Cook OPTIMIZER_HIDE_VAR(one);
359c4f6ebcSKees Cook two = vzalloc(PAGE_SIZE);
369c4f6ebcSKees Cook
379c4f6ebcSKees Cook pr_info("Attempting vmalloc linear overflow ...\n");
38e6d468d3SKees Cook memset(one, 0xAA, PAGE_SIZE + __offset);
399c4f6ebcSKees Cook
409c4f6ebcSKees Cook vfree(two);
419c4f6ebcSKees Cook vfree(one);
429c4f6ebcSKees Cook }
439c4f6ebcSKees Cook
449c4f6ebcSKees Cook /*
45039a1c42SKees Cook * This tries to stay within the next largest power-of-2 kmalloc cache
46039a1c42SKees Cook * to avoid actually overwriting anything important if it's not detected
47039a1c42SKees Cook * correctly.
4842db2594SKees Cook *
4942db2594SKees Cook * This should get caught by either memory tagging, KASan, or by using
5042db2594SKees Cook * CONFIG_SLUB_DEBUG=y and slub_debug=ZF (or CONFIG_SLUB_DEBUG_ON=y).
51039a1c42SKees Cook */
lkdtm_SLAB_LINEAR_OVERFLOW(void)5273f62e60SKees Cook static void lkdtm_SLAB_LINEAR_OVERFLOW(void)
53039a1c42SKees Cook {
54039a1c42SKees Cook size_t len = 1020;
55039a1c42SKees Cook u32 *data = kmalloc(len, GFP_KERNEL);
56039a1c42SKees Cook if (!data)
57039a1c42SKees Cook return;
58039a1c42SKees Cook
599c4f6ebcSKees Cook pr_info("Attempting slab linear overflow ...\n");
60f260fd59SKees Cook OPTIMIZER_HIDE_VAR(data);
61039a1c42SKees Cook data[1024 / sizeof(u32)] = 0x12345678;
62039a1c42SKees Cook kfree(data);
63039a1c42SKees Cook }
64039a1c42SKees Cook
lkdtm_WRITE_AFTER_FREE(void)6573f62e60SKees Cook static void lkdtm_WRITE_AFTER_FREE(void)
66039a1c42SKees Cook {
67039a1c42SKees Cook int *base, *again;
68039a1c42SKees Cook size_t len = 1024;
69039a1c42SKees Cook /*
70039a1c42SKees Cook * The slub allocator uses the first word to store the free
71039a1c42SKees Cook * pointer in some configurations. Use the middle of the
72039a1c42SKees Cook * allocation to avoid running into the freelist
73039a1c42SKees Cook */
74039a1c42SKees Cook size_t offset = (len / sizeof(*base)) / 2;
75039a1c42SKees Cook
76039a1c42SKees Cook base = kmalloc(len, GFP_KERNEL);
77039a1c42SKees Cook if (!base)
78039a1c42SKees Cook return;
79039a1c42SKees Cook pr_info("Allocated memory %p-%p\n", base, &base[offset * 2]);
80039a1c42SKees Cook pr_info("Attempting bad write to freed memory at %p\n",
81039a1c42SKees Cook &base[offset]);
82039a1c42SKees Cook kfree(base);
83039a1c42SKees Cook base[offset] = 0x0abcdef0;
84039a1c42SKees Cook /* Attempt to notice the overwrite. */
85039a1c42SKees Cook again = kmalloc(len, GFP_KERNEL);
86039a1c42SKees Cook kfree(again);
87039a1c42SKees Cook if (again != base)
88039a1c42SKees Cook pr_info("Hmm, didn't get the same memory range.\n");
89039a1c42SKees Cook }
90039a1c42SKees Cook
lkdtm_READ_AFTER_FREE(void)9173f62e60SKees Cook static void lkdtm_READ_AFTER_FREE(void)
92039a1c42SKees Cook {
93039a1c42SKees Cook int *base, *val, saw;
94039a1c42SKees Cook size_t len = 1024;
95039a1c42SKees Cook /*
96e12145cfSKees Cook * The slub allocator will use the either the first word or
97e12145cfSKees Cook * the middle of the allocation to store the free pointer,
98e12145cfSKees Cook * depending on configurations. Store in the second word to
99e12145cfSKees Cook * avoid running into the freelist.
100039a1c42SKees Cook */
101e12145cfSKees Cook size_t offset = sizeof(*base);
102039a1c42SKees Cook
103039a1c42SKees Cook base = kmalloc(len, GFP_KERNEL);
104039a1c42SKees Cook if (!base) {
105039a1c42SKees Cook pr_info("Unable to allocate base memory.\n");
106039a1c42SKees Cook return;
107039a1c42SKees Cook }
108039a1c42SKees Cook
109039a1c42SKees Cook val = kmalloc(len, GFP_KERNEL);
110039a1c42SKees Cook if (!val) {
111039a1c42SKees Cook pr_info("Unable to allocate val memory.\n");
112039a1c42SKees Cook kfree(base);
113039a1c42SKees Cook return;
114039a1c42SKees Cook }
115039a1c42SKees Cook
116039a1c42SKees Cook *val = 0x12345678;
117039a1c42SKees Cook base[offset] = *val;
118039a1c42SKees Cook pr_info("Value in memory before free: %x\n", base[offset]);
119039a1c42SKees Cook
120039a1c42SKees Cook kfree(base);
121039a1c42SKees Cook
122039a1c42SKees Cook pr_info("Attempting bad read from freed memory\n");
123039a1c42SKees Cook saw = base[offset];
124039a1c42SKees Cook if (saw != *val) {
125039a1c42SKees Cook /* Good! Poisoning happened, so declare a win. */
126039a1c42SKees Cook pr_info("Memory correctly poisoned (%x)\n", saw);
1275b777131SKees Cook } else {
1285b777131SKees Cook pr_err("FAIL: Memory was not poisoned!\n");
1295b777131SKees Cook pr_expected_config_param(CONFIG_INIT_ON_FREE_DEFAULT_ON, "init_on_free");
130039a1c42SKees Cook }
131039a1c42SKees Cook
132039a1c42SKees Cook kfree(val);
133039a1c42SKees Cook }
134039a1c42SKees Cook
lkdtm_WRITE_BUDDY_AFTER_FREE(void)13573f62e60SKees Cook static void lkdtm_WRITE_BUDDY_AFTER_FREE(void)
136039a1c42SKees Cook {
137039a1c42SKees Cook unsigned long p = __get_free_page(GFP_KERNEL);
138039a1c42SKees Cook if (!p) {
139039a1c42SKees Cook pr_info("Unable to allocate free page\n");
140039a1c42SKees Cook return;
141039a1c42SKees Cook }
142039a1c42SKees Cook
143039a1c42SKees Cook pr_info("Writing to the buddy page before free\n");
144039a1c42SKees Cook memset((void *)p, 0x3, PAGE_SIZE);
145039a1c42SKees Cook free_page(p);
146039a1c42SKees Cook schedule();
147039a1c42SKees Cook pr_info("Attempting bad write to the buddy page after free\n");
148039a1c42SKees Cook memset((void *)p, 0x78, PAGE_SIZE);
149039a1c42SKees Cook /* Attempt to notice the overwrite. */
150039a1c42SKees Cook p = __get_free_page(GFP_KERNEL);
151039a1c42SKees Cook free_page(p);
152039a1c42SKees Cook schedule();
153039a1c42SKees Cook }
154039a1c42SKees Cook
lkdtm_READ_BUDDY_AFTER_FREE(void)15573f62e60SKees Cook static void lkdtm_READ_BUDDY_AFTER_FREE(void)
156039a1c42SKees Cook {
157039a1c42SKees Cook unsigned long p = __get_free_page(GFP_KERNEL);
158039a1c42SKees Cook int saw, *val;
159039a1c42SKees Cook int *base;
160039a1c42SKees Cook
161039a1c42SKees Cook if (!p) {
162039a1c42SKees Cook pr_info("Unable to allocate free page\n");
163039a1c42SKees Cook return;
164039a1c42SKees Cook }
165039a1c42SKees Cook
166039a1c42SKees Cook val = kmalloc(1024, GFP_KERNEL);
167039a1c42SKees Cook if (!val) {
168039a1c42SKees Cook pr_info("Unable to allocate val memory.\n");
169039a1c42SKees Cook free_page(p);
170039a1c42SKees Cook return;
171039a1c42SKees Cook }
172039a1c42SKees Cook
173039a1c42SKees Cook base = (int *)p;
174039a1c42SKees Cook
175039a1c42SKees Cook *val = 0x12345678;
176039a1c42SKees Cook base[0] = *val;
177039a1c42SKees Cook pr_info("Value in memory before free: %x\n", base[0]);
178039a1c42SKees Cook free_page(p);
179039a1c42SKees Cook pr_info("Attempting to read from freed memory\n");
180039a1c42SKees Cook saw = base[0];
181039a1c42SKees Cook if (saw != *val) {
182039a1c42SKees Cook /* Good! Poisoning happened, so declare a win. */
183039a1c42SKees Cook pr_info("Memory correctly poisoned (%x)\n", saw);
1845b777131SKees Cook } else {
1855b777131SKees Cook pr_err("FAIL: Buddy page was not poisoned!\n");
1865b777131SKees Cook pr_expected_config_param(CONFIG_INIT_ON_FREE_DEFAULT_ON, "init_on_free");
187039a1c42SKees Cook }
188039a1c42SKees Cook
189039a1c42SKees Cook kfree(val);
190039a1c42SKees Cook }
191966fede8SKees Cook
lkdtm_SLAB_INIT_ON_ALLOC(void)19273f62e60SKees Cook static void lkdtm_SLAB_INIT_ON_ALLOC(void)
19337a0ca7fSKees Cook {
19437a0ca7fSKees Cook u8 *first;
19537a0ca7fSKees Cook u8 *val;
19637a0ca7fSKees Cook
19737a0ca7fSKees Cook first = kmalloc(512, GFP_KERNEL);
19837a0ca7fSKees Cook if (!first) {
19937a0ca7fSKees Cook pr_info("Unable to allocate 512 bytes the first time.\n");
20037a0ca7fSKees Cook return;
20137a0ca7fSKees Cook }
20237a0ca7fSKees Cook
20337a0ca7fSKees Cook memset(first, 0xAB, 512);
20437a0ca7fSKees Cook kfree(first);
20537a0ca7fSKees Cook
20637a0ca7fSKees Cook val = kmalloc(512, GFP_KERNEL);
20737a0ca7fSKees Cook if (!val) {
20837a0ca7fSKees Cook pr_info("Unable to allocate 512 bytes the second time.\n");
20937a0ca7fSKees Cook return;
21037a0ca7fSKees Cook }
21137a0ca7fSKees Cook if (val != first) {
21237a0ca7fSKees Cook pr_warn("Reallocation missed clobbered memory.\n");
21337a0ca7fSKees Cook }
21437a0ca7fSKees Cook
21537a0ca7fSKees Cook if (memchr(val, 0xAB, 512) == NULL) {
21637a0ca7fSKees Cook pr_info("Memory appears initialized (%x, no earlier values)\n", *val);
21737a0ca7fSKees Cook } else {
21837a0ca7fSKees Cook pr_err("FAIL: Slab was not initialized\n");
21937a0ca7fSKees Cook pr_expected_config_param(CONFIG_INIT_ON_ALLOC_DEFAULT_ON, "init_on_alloc");
22037a0ca7fSKees Cook }
22137a0ca7fSKees Cook kfree(val);
22237a0ca7fSKees Cook }
22337a0ca7fSKees Cook
lkdtm_BUDDY_INIT_ON_ALLOC(void)22473f62e60SKees Cook static void lkdtm_BUDDY_INIT_ON_ALLOC(void)
22537a0ca7fSKees Cook {
22637a0ca7fSKees Cook u8 *first;
22737a0ca7fSKees Cook u8 *val;
22837a0ca7fSKees Cook
22937a0ca7fSKees Cook first = (u8 *)__get_free_page(GFP_KERNEL);
23037a0ca7fSKees Cook if (!first) {
23137a0ca7fSKees Cook pr_info("Unable to allocate first free page\n");
23237a0ca7fSKees Cook return;
23337a0ca7fSKees Cook }
23437a0ca7fSKees Cook
23537a0ca7fSKees Cook memset(first, 0xAB, PAGE_SIZE);
23637a0ca7fSKees Cook free_page((unsigned long)first);
23737a0ca7fSKees Cook
23837a0ca7fSKees Cook val = (u8 *)__get_free_page(GFP_KERNEL);
23937a0ca7fSKees Cook if (!val) {
24037a0ca7fSKees Cook pr_info("Unable to allocate second free page\n");
24137a0ca7fSKees Cook return;
24237a0ca7fSKees Cook }
24337a0ca7fSKees Cook
24437a0ca7fSKees Cook if (val != first) {
24537a0ca7fSKees Cook pr_warn("Reallocation missed clobbered memory.\n");
24637a0ca7fSKees Cook }
24737a0ca7fSKees Cook
24837a0ca7fSKees Cook if (memchr(val, 0xAB, PAGE_SIZE) == NULL) {
24937a0ca7fSKees Cook pr_info("Memory appears initialized (%x, no earlier values)\n", *val);
25037a0ca7fSKees Cook } else {
25137a0ca7fSKees Cook pr_err("FAIL: Slab was not initialized\n");
25237a0ca7fSKees Cook pr_expected_config_param(CONFIG_INIT_ON_ALLOC_DEFAULT_ON, "init_on_alloc");
25337a0ca7fSKees Cook }
25437a0ca7fSKees Cook free_page((unsigned long)val);
25537a0ca7fSKees Cook }
25637a0ca7fSKees Cook
lkdtm_SLAB_FREE_DOUBLE(void)25773f62e60SKees Cook static void lkdtm_SLAB_FREE_DOUBLE(void)
258966fede8SKees Cook {
259966fede8SKees Cook int *val;
260966fede8SKees Cook
261966fede8SKees Cook val = kmem_cache_alloc(double_free_cache, GFP_KERNEL);
262966fede8SKees Cook if (!val) {
263966fede8SKees Cook pr_info("Unable to allocate double_free_cache memory.\n");
264966fede8SKees Cook return;
265966fede8SKees Cook }
266966fede8SKees Cook
267966fede8SKees Cook /* Just make sure we got real memory. */
268966fede8SKees Cook *val = 0x12345678;
269966fede8SKees Cook pr_info("Attempting double slab free ...\n");
270966fede8SKees Cook kmem_cache_free(double_free_cache, val);
271966fede8SKees Cook kmem_cache_free(double_free_cache, val);
272966fede8SKees Cook }
273966fede8SKees Cook
lkdtm_SLAB_FREE_CROSS(void)27473f62e60SKees Cook static void lkdtm_SLAB_FREE_CROSS(void)
275966fede8SKees Cook {
276966fede8SKees Cook int *val;
277966fede8SKees Cook
278966fede8SKees Cook val = kmem_cache_alloc(a_cache, GFP_KERNEL);
279966fede8SKees Cook if (!val) {
280966fede8SKees Cook pr_info("Unable to allocate a_cache memory.\n");
281966fede8SKees Cook return;
282966fede8SKees Cook }
283966fede8SKees Cook
284966fede8SKees Cook /* Just make sure we got real memory. */
285966fede8SKees Cook *val = 0x12345679;
286966fede8SKees Cook pr_info("Attempting cross-cache slab free ...\n");
287966fede8SKees Cook kmem_cache_free(b_cache, val);
288966fede8SKees Cook }
289966fede8SKees Cook
lkdtm_SLAB_FREE_PAGE(void)29073f62e60SKees Cook static void lkdtm_SLAB_FREE_PAGE(void)
291966fede8SKees Cook {
292966fede8SKees Cook unsigned long p = __get_free_page(GFP_KERNEL);
293966fede8SKees Cook
294966fede8SKees Cook pr_info("Attempting non-Slab slab free ...\n");
295966fede8SKees Cook kmem_cache_free(NULL, (void *)p);
296966fede8SKees Cook free_page(p);
297966fede8SKees Cook }
298966fede8SKees Cook
299966fede8SKees Cook /*
300966fede8SKees Cook * We have constructors to keep the caches distinctly separated without
301966fede8SKees Cook * needing to boot with "slab_nomerge".
302966fede8SKees Cook */
ctor_double_free(void * region)303966fede8SKees Cook static void ctor_double_free(void *region)
304966fede8SKees Cook { }
ctor_a(void * region)305966fede8SKees Cook static void ctor_a(void *region)
306966fede8SKees Cook { }
ctor_b(void * region)307966fede8SKees Cook static void ctor_b(void *region)
308966fede8SKees Cook { }
309966fede8SKees Cook
lkdtm_heap_init(void)310966fede8SKees Cook void __init lkdtm_heap_init(void)
311966fede8SKees Cook {
312966fede8SKees Cook double_free_cache = kmem_cache_create("lkdtm-heap-double_free",
313966fede8SKees Cook 64, 0, 0, ctor_double_free);
314966fede8SKees Cook a_cache = kmem_cache_create("lkdtm-heap-a", 64, 0, 0, ctor_a);
315966fede8SKees Cook b_cache = kmem_cache_create("lkdtm-heap-b", 64, 0, 0, ctor_b);
316966fede8SKees Cook }
317966fede8SKees Cook
lkdtm_heap_exit(void)318966fede8SKees Cook void __exit lkdtm_heap_exit(void)
319966fede8SKees Cook {
320966fede8SKees Cook kmem_cache_destroy(double_free_cache);
321966fede8SKees Cook kmem_cache_destroy(a_cache);
322966fede8SKees Cook kmem_cache_destroy(b_cache);
323966fede8SKees Cook }
32473f62e60SKees Cook
32573f62e60SKees Cook static struct crashtype crashtypes[] = {
32673f62e60SKees Cook CRASHTYPE(SLAB_LINEAR_OVERFLOW),
32773f62e60SKees Cook CRASHTYPE(VMALLOC_LINEAR_OVERFLOW),
32873f62e60SKees Cook CRASHTYPE(WRITE_AFTER_FREE),
32973f62e60SKees Cook CRASHTYPE(READ_AFTER_FREE),
33073f62e60SKees Cook CRASHTYPE(WRITE_BUDDY_AFTER_FREE),
33173f62e60SKees Cook CRASHTYPE(READ_BUDDY_AFTER_FREE),
33273f62e60SKees Cook CRASHTYPE(SLAB_INIT_ON_ALLOC),
33373f62e60SKees Cook CRASHTYPE(BUDDY_INIT_ON_ALLOC),
33473f62e60SKees Cook CRASHTYPE(SLAB_FREE_DOUBLE),
33573f62e60SKees Cook CRASHTYPE(SLAB_FREE_CROSS),
33673f62e60SKees Cook CRASHTYPE(SLAB_FREE_PAGE),
33773f62e60SKees Cook };
33873f62e60SKees Cook
33973f62e60SKees Cook struct crashtype_category heap_crashtypes = {
34073f62e60SKees Cook .crashtypes = crashtypes,
34173f62e60SKees Cook .len = ARRAY_SIZE(crashtypes),
34273f62e60SKees Cook };
343