xref: /openbmc/linux/mm/kasan/tags.c (revision 7d5cb68a)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * This file contains common tag-based KASAN code.
4  *
5  * Copyright (c) 2018 Google, Inc.
6  * Copyright (c) 2020 Google, Inc.
7  */
8 
9 #include <linux/atomic.h>
10 #include <linux/init.h>
11 #include <linux/kasan.h>
12 #include <linux/kernel.h>
13 #include <linux/memblock.h>
14 #include <linux/memory.h>
15 #include <linux/mm.h>
16 #include <linux/static_key.h>
17 #include <linux/string.h>
18 #include <linux/types.h>
19 
20 #include "kasan.h"
21 #include "../slab.h"
22 
23 #define KASAN_STACK_RING_SIZE_DEFAULT (32 << 10)
24 
25 enum kasan_arg_stacktrace {
26 	KASAN_ARG_STACKTRACE_DEFAULT,
27 	KASAN_ARG_STACKTRACE_OFF,
28 	KASAN_ARG_STACKTRACE_ON,
29 };
30 
31 static enum kasan_arg_stacktrace kasan_arg_stacktrace __initdata;
32 
33 /* Whether to collect alloc/free stack traces. */
34 DEFINE_STATIC_KEY_TRUE(kasan_flag_stacktrace);
35 
36 /* Non-zero, as initial pointer values are 0. */
37 #define STACK_RING_BUSY_PTR ((void *)1)
38 
39 struct kasan_stack_ring stack_ring = {
40 	.lock = __RW_LOCK_UNLOCKED(stack_ring.lock)
41 };
42 
43 /* kasan.stacktrace=off/on */
44 static int __init early_kasan_flag_stacktrace(char *arg)
45 {
46 	if (!arg)
47 		return -EINVAL;
48 
49 	if (!strcmp(arg, "off"))
50 		kasan_arg_stacktrace = KASAN_ARG_STACKTRACE_OFF;
51 	else if (!strcmp(arg, "on"))
52 		kasan_arg_stacktrace = KASAN_ARG_STACKTRACE_ON;
53 	else
54 		return -EINVAL;
55 
56 	return 0;
57 }
58 early_param("kasan.stacktrace", early_kasan_flag_stacktrace);
59 
60 /* kasan.stack_ring_size=<number of entries> */
61 static int __init early_kasan_flag_stack_ring_size(char *arg)
62 {
63 	if (!arg)
64 		return -EINVAL;
65 
66 	return kstrtoul(arg, 0, &stack_ring.size);
67 }
68 early_param("kasan.stack_ring_size", early_kasan_flag_stack_ring_size);
69 
70 void __init kasan_init_tags(void)
71 {
72 	switch (kasan_arg_stacktrace) {
73 	case KASAN_ARG_STACKTRACE_DEFAULT:
74 		/* Default is specified by kasan_flag_stacktrace definition. */
75 		break;
76 	case KASAN_ARG_STACKTRACE_OFF:
77 		static_branch_disable(&kasan_flag_stacktrace);
78 		break;
79 	case KASAN_ARG_STACKTRACE_ON:
80 		static_branch_enable(&kasan_flag_stacktrace);
81 		break;
82 	}
83 
84 	if (kasan_stack_collection_enabled()) {
85 		if (!stack_ring.size)
86 			stack_ring.size = KASAN_STACK_RING_SIZE_DEFAULT;
87 		stack_ring.entries = memblock_alloc(
88 			sizeof(stack_ring.entries[0]) * stack_ring.size,
89 			SMP_CACHE_BYTES);
90 		if (WARN_ON(!stack_ring.entries))
91 			static_branch_disable(&kasan_flag_stacktrace);
92 	}
93 }
94 
95 static void save_stack_info(struct kmem_cache *cache, void *object,
96 			gfp_t gfp_flags, bool is_free)
97 {
98 	unsigned long flags;
99 	depot_stack_handle_t stack;
100 	u64 pos;
101 	struct kasan_stack_ring_entry *entry;
102 	void *old_ptr;
103 
104 	stack = kasan_save_stack(gfp_flags, true);
105 
106 	/*
107 	 * Prevent save_stack_info() from modifying stack ring
108 	 * when kasan_complete_mode_report_info() is walking it.
109 	 */
110 	read_lock_irqsave(&stack_ring.lock, flags);
111 
112 next:
113 	pos = atomic64_fetch_add(1, &stack_ring.pos);
114 	entry = &stack_ring.entries[pos % stack_ring.size];
115 
116 	/* Detect stack ring entry slots that are being written to. */
117 	old_ptr = READ_ONCE(entry->ptr);
118 	if (old_ptr == STACK_RING_BUSY_PTR)
119 		goto next; /* Busy slot. */
120 	if (!try_cmpxchg(&entry->ptr, &old_ptr, STACK_RING_BUSY_PTR))
121 		goto next; /* Busy slot. */
122 
123 	WRITE_ONCE(entry->size, cache->object_size);
124 	WRITE_ONCE(entry->pid, current->pid);
125 	WRITE_ONCE(entry->stack, stack);
126 	WRITE_ONCE(entry->is_free, is_free);
127 
128 	/*
129 	 * Paired with smp_load_acquire() in kasan_complete_mode_report_info().
130 	 */
131 	smp_store_release(&entry->ptr, (s64)object);
132 
133 	read_unlock_irqrestore(&stack_ring.lock, flags);
134 }
135 
136 void kasan_save_alloc_info(struct kmem_cache *cache, void *object, gfp_t flags)
137 {
138 	save_stack_info(cache, object, flags, false);
139 }
140 
141 void kasan_save_free_info(struct kmem_cache *cache, void *object)
142 {
143 	save_stack_info(cache, object, 0, true);
144 }
145