xref: /openbmc/linux/kernel/kcsan/debugfs.c (revision dca4e74a)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * KCSAN debugfs interface.
4  *
5  * Copyright (C) 2019, Google LLC.
6  */
7 
8 #define pr_fmt(fmt) "kcsan: " fmt
9 
10 #include <linux/atomic.h>
11 #include <linux/bsearch.h>
12 #include <linux/bug.h>
13 #include <linux/debugfs.h>
14 #include <linux/init.h>
15 #include <linux/kallsyms.h>
16 #include <linux/sched.h>
17 #include <linux/seq_file.h>
18 #include <linux/slab.h>
19 #include <linux/sort.h>
20 #include <linux/string.h>
21 #include <linux/uaccess.h>
22 
23 #include "kcsan.h"
24 
25 atomic_long_t kcsan_counters[KCSAN_COUNTER_COUNT];
26 static const char *const counter_names[] = {
27 	[KCSAN_COUNTER_USED_WATCHPOINTS]		= "used_watchpoints",
28 	[KCSAN_COUNTER_SETUP_WATCHPOINTS]		= "setup_watchpoints",
29 	[KCSAN_COUNTER_DATA_RACES]			= "data_races",
30 	[KCSAN_COUNTER_ASSERT_FAILURES]			= "assert_failures",
31 	[KCSAN_COUNTER_NO_CAPACITY]			= "no_capacity",
32 	[KCSAN_COUNTER_REPORT_RACES]			= "report_races",
33 	[KCSAN_COUNTER_RACES_UNKNOWN_ORIGIN]		= "races_unknown_origin",
34 	[KCSAN_COUNTER_UNENCODABLE_ACCESSES]		= "unencodable_accesses",
35 	[KCSAN_COUNTER_ENCODING_FALSE_POSITIVES]	= "encoding_false_positives",
36 };
37 static_assert(ARRAY_SIZE(counter_names) == KCSAN_COUNTER_COUNT);
38 
39 /*
40  * Addresses for filtering functions from reporting. This list can be used as a
41  * whitelist or blacklist.
42  */
43 static struct {
44 	unsigned long	*addrs;		/* array of addresses */
45 	size_t		size;		/* current size */
46 	int		used;		/* number of elements used */
47 	bool		sorted;		/* if elements are sorted */
48 	bool		whitelist;	/* if list is a blacklist or whitelist */
49 } report_filterlist;
50 static DEFINE_RAW_SPINLOCK(report_filterlist_lock);
51 
52 /*
53  * The microbenchmark allows benchmarking KCSAN core runtime only. To run
54  * multiple threads, pipe 'microbench=<iters>' from multiple tasks into the
55  * debugfs file. This will not generate any conflicts, and tests fast-path only.
56  */
microbenchmark(unsigned long iters)57 static noinline void microbenchmark(unsigned long iters)
58 {
59 	const struct kcsan_ctx ctx_save = current->kcsan_ctx;
60 	const bool was_enabled = READ_ONCE(kcsan_enabled);
61 	u64 cycles;
62 
63 	/* We may have been called from an atomic region; reset context. */
64 	memset(&current->kcsan_ctx, 0, sizeof(current->kcsan_ctx));
65 	/*
66 	 * Disable to benchmark fast-path for all accesses, and (expected
67 	 * negligible) call into slow-path, but never set up watchpoints.
68 	 */
69 	WRITE_ONCE(kcsan_enabled, false);
70 
71 	pr_info("%s begin | iters: %lu\n", __func__, iters);
72 
73 	cycles = get_cycles();
74 	while (iters--) {
75 		unsigned long addr = iters & ((PAGE_SIZE << 8) - 1);
76 		int type = !(iters & 0x7f) ? KCSAN_ACCESS_ATOMIC :
77 				(!(iters & 0xf) ? KCSAN_ACCESS_WRITE : 0);
78 		__kcsan_check_access((void *)addr, sizeof(long), type);
79 	}
80 	cycles = get_cycles() - cycles;
81 
82 	pr_info("%s end   | cycles: %llu\n", __func__, cycles);
83 
84 	WRITE_ONCE(kcsan_enabled, was_enabled);
85 	/* restore context */
86 	current->kcsan_ctx = ctx_save;
87 }
88 
cmp_filterlist_addrs(const void * rhs,const void * lhs)89 static int cmp_filterlist_addrs(const void *rhs, const void *lhs)
90 {
91 	const unsigned long a = *(const unsigned long *)rhs;
92 	const unsigned long b = *(const unsigned long *)lhs;
93 
94 	return a < b ? -1 : a == b ? 0 : 1;
95 }
96 
kcsan_skip_report_debugfs(unsigned long func_addr)97 bool kcsan_skip_report_debugfs(unsigned long func_addr)
98 {
99 	unsigned long symbolsize, offset;
100 	unsigned long flags;
101 	bool ret = false;
102 
103 	if (!kallsyms_lookup_size_offset(func_addr, &symbolsize, &offset))
104 		return false;
105 	func_addr -= offset; /* Get function start */
106 
107 	raw_spin_lock_irqsave(&report_filterlist_lock, flags);
108 	if (report_filterlist.used == 0)
109 		goto out;
110 
111 	/* Sort array if it is unsorted, and then do a binary search. */
112 	if (!report_filterlist.sorted) {
113 		sort(report_filterlist.addrs, report_filterlist.used,
114 		     sizeof(unsigned long), cmp_filterlist_addrs, NULL);
115 		report_filterlist.sorted = true;
116 	}
117 	ret = !!bsearch(&func_addr, report_filterlist.addrs,
118 			report_filterlist.used, sizeof(unsigned long),
119 			cmp_filterlist_addrs);
120 	if (report_filterlist.whitelist)
121 		ret = !ret;
122 
123 out:
124 	raw_spin_unlock_irqrestore(&report_filterlist_lock, flags);
125 	return ret;
126 }
127 
set_report_filterlist_whitelist(bool whitelist)128 static void set_report_filterlist_whitelist(bool whitelist)
129 {
130 	unsigned long flags;
131 
132 	raw_spin_lock_irqsave(&report_filterlist_lock, flags);
133 	report_filterlist.whitelist = whitelist;
134 	raw_spin_unlock_irqrestore(&report_filterlist_lock, flags);
135 }
136 
137 /* Returns 0 on success, error-code otherwise. */
insert_report_filterlist(const char * func)138 static ssize_t insert_report_filterlist(const char *func)
139 {
140 	unsigned long flags;
141 	unsigned long addr = kallsyms_lookup_name(func);
142 	unsigned long *delay_free = NULL;
143 	unsigned long *new_addrs = NULL;
144 	size_t new_size = 0;
145 	ssize_t ret = 0;
146 
147 	if (!addr) {
148 		pr_err("could not find function: '%s'\n", func);
149 		return -ENOENT;
150 	}
151 
152 retry_alloc:
153 	/*
154 	 * Check if we need an allocation, and re-validate under the lock. Since
155 	 * the report_filterlist_lock is a raw, cannot allocate under the lock.
156 	 */
157 	if (data_race(report_filterlist.used == report_filterlist.size)) {
158 		new_size = (report_filterlist.size ?: 4) * 2;
159 		delay_free = new_addrs = kmalloc_array(new_size, sizeof(unsigned long), GFP_KERNEL);
160 		if (!new_addrs)
161 			return -ENOMEM;
162 	}
163 
164 	raw_spin_lock_irqsave(&report_filterlist_lock, flags);
165 	if (report_filterlist.used == report_filterlist.size) {
166 		/* Check we pre-allocated enough, and retry if not. */
167 		if (report_filterlist.used >= new_size) {
168 			raw_spin_unlock_irqrestore(&report_filterlist_lock, flags);
169 			kfree(new_addrs); /* kfree(NULL) is safe */
170 			delay_free = new_addrs = NULL;
171 			goto retry_alloc;
172 		}
173 
174 		if (report_filterlist.used)
175 			memcpy(new_addrs, report_filterlist.addrs, report_filterlist.used * sizeof(unsigned long));
176 		delay_free = report_filterlist.addrs; /* free the old list */
177 		report_filterlist.addrs = new_addrs;  /* switch to the new list */
178 		report_filterlist.size = new_size;
179 	}
180 
181 	/* Note: deduplicating should be done in userspace. */
182 	report_filterlist.addrs[report_filterlist.used++] =
183 		kallsyms_lookup_name(func);
184 	report_filterlist.sorted = false;
185 
186 	raw_spin_unlock_irqrestore(&report_filterlist_lock, flags);
187 
188 	kfree(delay_free);
189 	return ret;
190 }
191 
show_info(struct seq_file * file,void * v)192 static int show_info(struct seq_file *file, void *v)
193 {
194 	int i;
195 	unsigned long flags;
196 
197 	/* show stats */
198 	seq_printf(file, "enabled: %i\n", READ_ONCE(kcsan_enabled));
199 	for (i = 0; i < KCSAN_COUNTER_COUNT; ++i) {
200 		seq_printf(file, "%s: %ld\n", counter_names[i],
201 			   atomic_long_read(&kcsan_counters[i]));
202 	}
203 
204 	/* show filter functions, and filter type */
205 	raw_spin_lock_irqsave(&report_filterlist_lock, flags);
206 	seq_printf(file, "\n%s functions: %s\n",
207 		   report_filterlist.whitelist ? "whitelisted" : "blacklisted",
208 		   report_filterlist.used == 0 ? "none" : "");
209 	for (i = 0; i < report_filterlist.used; ++i)
210 		seq_printf(file, " %ps\n", (void *)report_filterlist.addrs[i]);
211 	raw_spin_unlock_irqrestore(&report_filterlist_lock, flags);
212 
213 	return 0;
214 }
215 
debugfs_open(struct inode * inode,struct file * file)216 static int debugfs_open(struct inode *inode, struct file *file)
217 {
218 	return single_open(file, show_info, NULL);
219 }
220 
221 static ssize_t
debugfs_write(struct file * file,const char __user * buf,size_t count,loff_t * off)222 debugfs_write(struct file *file, const char __user *buf, size_t count, loff_t *off)
223 {
224 	char kbuf[KSYM_NAME_LEN];
225 	char *arg;
226 	int read_len = count < (sizeof(kbuf) - 1) ? count : (sizeof(kbuf) - 1);
227 
228 	if (copy_from_user(kbuf, buf, read_len))
229 		return -EFAULT;
230 	kbuf[read_len] = '\0';
231 	arg = strstrip(kbuf);
232 
233 	if (!strcmp(arg, "on")) {
234 		WRITE_ONCE(kcsan_enabled, true);
235 	} else if (!strcmp(arg, "off")) {
236 		WRITE_ONCE(kcsan_enabled, false);
237 	} else if (str_has_prefix(arg, "microbench=")) {
238 		unsigned long iters;
239 
240 		if (kstrtoul(&arg[strlen("microbench=")], 0, &iters))
241 			return -EINVAL;
242 		microbenchmark(iters);
243 	} else if (!strcmp(arg, "whitelist")) {
244 		set_report_filterlist_whitelist(true);
245 	} else if (!strcmp(arg, "blacklist")) {
246 		set_report_filterlist_whitelist(false);
247 	} else if (arg[0] == '!') {
248 		ssize_t ret = insert_report_filterlist(&arg[1]);
249 
250 		if (ret < 0)
251 			return ret;
252 	} else {
253 		return -EINVAL;
254 	}
255 
256 	return count;
257 }
258 
259 static const struct file_operations debugfs_ops =
260 {
261 	.read	 = seq_read,
262 	.open	 = debugfs_open,
263 	.write	 = debugfs_write,
264 	.release = single_release
265 };
266 
kcsan_debugfs_init(void)267 static int __init kcsan_debugfs_init(void)
268 {
269 	debugfs_create_file("kcsan", 0644, NULL, NULL, &debugfs_ops);
270 	return 0;
271 }
272 
273 late_initcall(kcsan_debugfs_init);
274