xref: /openbmc/linux/mm/kasan/common.c (revision 2cdbed63490d0d2bcbae60abcc5639caa5aba49b)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * This file contains common KASAN code.
4  *
5  * Copyright (c) 2014 Samsung Electronics Co., Ltd.
6  * Author: Andrey Ryabinin <ryabinin.a.a@gmail.com>
7  *
8  * Some code borrowed from https://github.com/xairy/kasan-prototype by
9  *        Andrey Konovalov <andreyknvl@gmail.com>
10  */
11 
12 #include <linux/export.h>
13 #include <linux/init.h>
14 #include <linux/kasan.h>
15 #include <linux/kernel.h>
16 #include <linux/linkage.h>
17 #include <linux/memblock.h>
18 #include <linux/memory.h>
19 #include <linux/mm.h>
20 #include <linux/module.h>
21 #include <linux/printk.h>
22 #include <linux/sched.h>
23 #include <linux/sched/task_stack.h>
24 #include <linux/slab.h>
25 #include <linux/stacktrace.h>
26 #include <linux/string.h>
27 #include <linux/types.h>
28 #include <linux/bug.h>
29 
30 #include "kasan.h"
31 #include "../slab.h"
32 
33 depot_stack_handle_t kasan_save_stack(gfp_t flags)
34 {
35 	unsigned long entries[KASAN_STACK_DEPTH];
36 	unsigned int nr_entries;
37 
38 	nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 0);
39 	nr_entries = filter_irq_stacks(entries, nr_entries);
40 	return stack_depot_save(entries, nr_entries, flags);
41 }
42 
43 void kasan_set_track(struct kasan_track *track, gfp_t flags)
44 {
45 	track->pid = current->pid;
46 	track->stack = kasan_save_stack(flags);
47 }
48 
49 void kasan_enable_current(void)
50 {
51 	current->kasan_depth++;
52 }
53 
54 void kasan_disable_current(void)
55 {
56 	current->kasan_depth--;
57 }
58 
59 void kasan_unpoison_range(const void *address, size_t size)
60 {
61 	unpoison_range(address, size);
62 }
63 
64 static void __kasan_unpoison_stack(struct task_struct *task, const void *sp)
65 {
66 	void *base = task_stack_page(task);
67 	size_t size = sp - base;
68 
69 	unpoison_range(base, size);
70 }
71 
72 /* Unpoison the entire stack for a task. */
73 void kasan_unpoison_task_stack(struct task_struct *task)
74 {
75 	__kasan_unpoison_stack(task, task_stack_page(task) + THREAD_SIZE);
76 }
77 
78 /* Unpoison the stack for the current task beyond a watermark sp value. */
79 asmlinkage void kasan_unpoison_task_stack_below(const void *watermark)
80 {
81 	/*
82 	 * Calculate the task stack base address.  Avoid using 'current'
83 	 * because this function is called by early resume code which hasn't
84 	 * yet set up the percpu register (%gs).
85 	 */
86 	void *base = (void *)((unsigned long)watermark & ~(THREAD_SIZE - 1));
87 
88 	unpoison_range(base, watermark - base);
89 }
90 
91 void kasan_alloc_pages(struct page *page, unsigned int order)
92 {
93 	u8 tag;
94 	unsigned long i;
95 
96 	if (unlikely(PageHighMem(page)))
97 		return;
98 
99 	tag = random_tag();
100 	for (i = 0; i < (1 << order); i++)
101 		page_kasan_tag_set(page + i, tag);
102 	unpoison_range(page_address(page), PAGE_SIZE << order);
103 }
104 
105 void kasan_free_pages(struct page *page, unsigned int order)
106 {
107 	if (likely(!PageHighMem(page)))
108 		poison_range(page_address(page),
109 				PAGE_SIZE << order,
110 				KASAN_FREE_PAGE);
111 }
112 
113 /*
114  * Adaptive redzone policy taken from the userspace AddressSanitizer runtime.
115  * For larger allocations larger redzones are used.
116  */
117 static inline unsigned int optimal_redzone(unsigned int object_size)
118 {
119 	if (IS_ENABLED(CONFIG_KASAN_SW_TAGS))
120 		return 0;
121 
122 	return
123 		object_size <= 64        - 16   ? 16 :
124 		object_size <= 128       - 32   ? 32 :
125 		object_size <= 512       - 64   ? 64 :
126 		object_size <= 4096      - 128  ? 128 :
127 		object_size <= (1 << 14) - 256  ? 256 :
128 		object_size <= (1 << 15) - 512  ? 512 :
129 		object_size <= (1 << 16) - 1024 ? 1024 : 2048;
130 }
131 
132 void kasan_cache_create(struct kmem_cache *cache, unsigned int *size,
133 			slab_flags_t *flags)
134 {
135 	unsigned int orig_size = *size;
136 	unsigned int redzone_size;
137 	int redzone_adjust;
138 
139 	/* Add alloc meta. */
140 	cache->kasan_info.alloc_meta_offset = *size;
141 	*size += sizeof(struct kasan_alloc_meta);
142 
143 	/* Add free meta. */
144 	if (IS_ENABLED(CONFIG_KASAN_GENERIC) &&
145 	    (cache->flags & SLAB_TYPESAFE_BY_RCU || cache->ctor ||
146 	     cache->object_size < sizeof(struct kasan_free_meta))) {
147 		cache->kasan_info.free_meta_offset = *size;
148 		*size += sizeof(struct kasan_free_meta);
149 	}
150 
151 	redzone_size = optimal_redzone(cache->object_size);
152 	redzone_adjust = redzone_size -	(*size - cache->object_size);
153 	if (redzone_adjust > 0)
154 		*size += redzone_adjust;
155 
156 	*size = min_t(unsigned int, KMALLOC_MAX_SIZE,
157 			max(*size, cache->object_size + redzone_size));
158 
159 	/*
160 	 * If the metadata doesn't fit, don't enable KASAN at all.
161 	 */
162 	if (*size <= cache->kasan_info.alloc_meta_offset ||
163 			*size <= cache->kasan_info.free_meta_offset) {
164 		cache->kasan_info.alloc_meta_offset = 0;
165 		cache->kasan_info.free_meta_offset = 0;
166 		*size = orig_size;
167 		return;
168 	}
169 
170 	*flags |= SLAB_KASAN;
171 }
172 
173 size_t kasan_metadata_size(struct kmem_cache *cache)
174 {
175 	return (cache->kasan_info.alloc_meta_offset ?
176 		sizeof(struct kasan_alloc_meta) : 0) +
177 		(cache->kasan_info.free_meta_offset ?
178 		sizeof(struct kasan_free_meta) : 0);
179 }
180 
181 struct kasan_alloc_meta *get_alloc_info(struct kmem_cache *cache,
182 					const void *object)
183 {
184 	return (void *)object + cache->kasan_info.alloc_meta_offset;
185 }
186 
187 struct kasan_free_meta *get_free_info(struct kmem_cache *cache,
188 				      const void *object)
189 {
190 	BUILD_BUG_ON(sizeof(struct kasan_free_meta) > 32);
191 	return (void *)object + cache->kasan_info.free_meta_offset;
192 }
193 
194 void kasan_poison_slab(struct page *page)
195 {
196 	unsigned long i;
197 
198 	for (i = 0; i < compound_nr(page); i++)
199 		page_kasan_tag_reset(page + i);
200 	poison_range(page_address(page), page_size(page),
201 		     KASAN_KMALLOC_REDZONE);
202 }
203 
204 void kasan_unpoison_object_data(struct kmem_cache *cache, void *object)
205 {
206 	unpoison_range(object, cache->object_size);
207 }
208 
209 void kasan_poison_object_data(struct kmem_cache *cache, void *object)
210 {
211 	poison_range(object,
212 			round_up(cache->object_size, KASAN_GRANULE_SIZE),
213 			KASAN_KMALLOC_REDZONE);
214 }
215 
216 /*
217  * This function assigns a tag to an object considering the following:
218  * 1. A cache might have a constructor, which might save a pointer to a slab
219  *    object somewhere (e.g. in the object itself). We preassign a tag for
220  *    each object in caches with constructors during slab creation and reuse
221  *    the same tag each time a particular object is allocated.
222  * 2. A cache might be SLAB_TYPESAFE_BY_RCU, which means objects can be
223  *    accessed after being freed. We preassign tags for objects in these
224  *    caches as well.
225  * 3. For SLAB allocator we can't preassign tags randomly since the freelist
226  *    is stored as an array of indexes instead of a linked list. Assign tags
227  *    based on objects indexes, so that objects that are next to each other
228  *    get different tags.
229  */
230 static u8 assign_tag(struct kmem_cache *cache, const void *object,
231 			bool init, bool keep_tag)
232 {
233 	/*
234 	 * 1. When an object is kmalloc()'ed, two hooks are called:
235 	 *    kasan_slab_alloc() and kasan_kmalloc(). We assign the
236 	 *    tag only in the first one.
237 	 * 2. We reuse the same tag for krealloc'ed objects.
238 	 */
239 	if (keep_tag)
240 		return get_tag(object);
241 
242 	/*
243 	 * If the cache neither has a constructor nor has SLAB_TYPESAFE_BY_RCU
244 	 * set, assign a tag when the object is being allocated (init == false).
245 	 */
246 	if (!cache->ctor && !(cache->flags & SLAB_TYPESAFE_BY_RCU))
247 		return init ? KASAN_TAG_KERNEL : random_tag();
248 
249 	/* For caches that either have a constructor or SLAB_TYPESAFE_BY_RCU: */
250 #ifdef CONFIG_SLAB
251 	/* For SLAB assign tags based on the object index in the freelist. */
252 	return (u8)obj_to_index(cache, virt_to_page(object), (void *)object);
253 #else
254 	/*
255 	 * For SLUB assign a random tag during slab creation, otherwise reuse
256 	 * the already assigned tag.
257 	 */
258 	return init ? random_tag() : get_tag(object);
259 #endif
260 }
261 
262 void * __must_check kasan_init_slab_obj(struct kmem_cache *cache,
263 						const void *object)
264 {
265 	struct kasan_alloc_meta *alloc_info;
266 
267 	if (!(cache->flags & SLAB_KASAN))
268 		return (void *)object;
269 
270 	alloc_info = get_alloc_info(cache, object);
271 	__memset(alloc_info, 0, sizeof(*alloc_info));
272 
273 	if (IS_ENABLED(CONFIG_KASAN_SW_TAGS))
274 		object = set_tag(object,
275 				assign_tag(cache, object, true, false));
276 
277 	return (void *)object;
278 }
279 
280 static bool __kasan_slab_free(struct kmem_cache *cache, void *object,
281 			      unsigned long ip, bool quarantine)
282 {
283 	u8 tag;
284 	void *tagged_object;
285 	unsigned long rounded_up_size;
286 
287 	tag = get_tag(object);
288 	tagged_object = object;
289 	object = reset_tag(object);
290 
291 	if (unlikely(nearest_obj(cache, virt_to_head_page(object), object) !=
292 	    object)) {
293 		kasan_report_invalid_free(tagged_object, ip);
294 		return true;
295 	}
296 
297 	/* RCU slabs could be legally used after free within the RCU period */
298 	if (unlikely(cache->flags & SLAB_TYPESAFE_BY_RCU))
299 		return false;
300 
301 	if (check_invalid_free(tagged_object)) {
302 		kasan_report_invalid_free(tagged_object, ip);
303 		return true;
304 	}
305 
306 	rounded_up_size = round_up(cache->object_size, KASAN_GRANULE_SIZE);
307 	poison_range(object, rounded_up_size, KASAN_KMALLOC_FREE);
308 
309 	if ((IS_ENABLED(CONFIG_KASAN_GENERIC) && !quarantine) ||
310 			unlikely(!(cache->flags & SLAB_KASAN)))
311 		return false;
312 
313 	kasan_set_free_info(cache, object, tag);
314 
315 	quarantine_put(get_free_info(cache, object), cache);
316 
317 	return IS_ENABLED(CONFIG_KASAN_GENERIC);
318 }
319 
320 bool kasan_slab_free(struct kmem_cache *cache, void *object, unsigned long ip)
321 {
322 	return __kasan_slab_free(cache, object, ip, true);
323 }
324 
325 static void *__kasan_kmalloc(struct kmem_cache *cache, const void *object,
326 				size_t size, gfp_t flags, bool keep_tag)
327 {
328 	unsigned long redzone_start;
329 	unsigned long redzone_end;
330 	u8 tag = 0xff;
331 
332 	if (gfpflags_allow_blocking(flags))
333 		quarantine_reduce();
334 
335 	if (unlikely(object == NULL))
336 		return NULL;
337 
338 	redzone_start = round_up((unsigned long)(object + size),
339 				KASAN_GRANULE_SIZE);
340 	redzone_end = round_up((unsigned long)object + cache->object_size,
341 				KASAN_GRANULE_SIZE);
342 
343 	if (IS_ENABLED(CONFIG_KASAN_SW_TAGS))
344 		tag = assign_tag(cache, object, false, keep_tag);
345 
346 	/* Tag is ignored in set_tag without CONFIG_KASAN_SW_TAGS */
347 	unpoison_range(set_tag(object, tag), size);
348 	poison_range((void *)redzone_start, redzone_end - redzone_start,
349 		     KASAN_KMALLOC_REDZONE);
350 
351 	if (cache->flags & SLAB_KASAN)
352 		kasan_set_track(&get_alloc_info(cache, object)->alloc_track, flags);
353 
354 	return set_tag(object, tag);
355 }
356 
357 void * __must_check kasan_slab_alloc(struct kmem_cache *cache, void *object,
358 					gfp_t flags)
359 {
360 	return __kasan_kmalloc(cache, object, cache->object_size, flags, false);
361 }
362 
363 void * __must_check kasan_kmalloc(struct kmem_cache *cache, const void *object,
364 				size_t size, gfp_t flags)
365 {
366 	return __kasan_kmalloc(cache, object, size, flags, true);
367 }
368 EXPORT_SYMBOL(kasan_kmalloc);
369 
370 void * __must_check kasan_kmalloc_large(const void *ptr, size_t size,
371 						gfp_t flags)
372 {
373 	struct page *page;
374 	unsigned long redzone_start;
375 	unsigned long redzone_end;
376 
377 	if (gfpflags_allow_blocking(flags))
378 		quarantine_reduce();
379 
380 	if (unlikely(ptr == NULL))
381 		return NULL;
382 
383 	page = virt_to_page(ptr);
384 	redzone_start = round_up((unsigned long)(ptr + size),
385 				KASAN_GRANULE_SIZE);
386 	redzone_end = (unsigned long)ptr + page_size(page);
387 
388 	unpoison_range(ptr, size);
389 	poison_range((void *)redzone_start, redzone_end - redzone_start,
390 		     KASAN_PAGE_REDZONE);
391 
392 	return (void *)ptr;
393 }
394 
395 void * __must_check kasan_krealloc(const void *object, size_t size, gfp_t flags)
396 {
397 	struct page *page;
398 
399 	if (unlikely(object == ZERO_SIZE_PTR))
400 		return (void *)object;
401 
402 	page = virt_to_head_page(object);
403 
404 	if (unlikely(!PageSlab(page)))
405 		return kasan_kmalloc_large(object, size, flags);
406 	else
407 		return __kasan_kmalloc(page->slab_cache, object, size,
408 						flags, true);
409 }
410 
411 void kasan_poison_kfree(void *ptr, unsigned long ip)
412 {
413 	struct page *page;
414 
415 	page = virt_to_head_page(ptr);
416 
417 	if (unlikely(!PageSlab(page))) {
418 		if (ptr != page_address(page)) {
419 			kasan_report_invalid_free(ptr, ip);
420 			return;
421 		}
422 		poison_range(ptr, page_size(page), KASAN_FREE_PAGE);
423 	} else {
424 		__kasan_slab_free(page->slab_cache, ptr, ip, false);
425 	}
426 }
427 
428 void kasan_kfree_large(void *ptr, unsigned long ip)
429 {
430 	if (ptr != page_address(virt_to_head_page(ptr)))
431 		kasan_report_invalid_free(ptr, ip);
432 	/* The object will be poisoned by page_alloc. */
433 }
434