xref: /openbmc/linux/kernel/kcsan/debugfs.c (revision 9eda7c1f)
1 // SPDX-License-Identifier: GPL-2.0
2 
3 #include <linux/atomic.h>
4 #include <linux/bsearch.h>
5 #include <linux/bug.h>
6 #include <linux/debugfs.h>
7 #include <linux/init.h>
8 #include <linux/kallsyms.h>
9 #include <linux/sched.h>
10 #include <linux/seq_file.h>
11 #include <linux/slab.h>
12 #include <linux/sort.h>
13 #include <linux/string.h>
14 #include <linux/uaccess.h>
15 
16 #include "kcsan.h"
17 
18 /*
19  * Statistics counters.
20  */
21 static atomic_long_t counters[KCSAN_COUNTER_COUNT];
22 
23 /*
24  * Addresses for filtering functions from reporting. This list can be used as a
25  * whitelist or blacklist.
26  */
27 static struct {
28 	unsigned long	*addrs;		/* array of addresses */
29 	size_t		size;		/* current size */
30 	int		used;		/* number of elements used */
31 	bool		sorted;		/* if elements are sorted */
32 	bool		whitelist;	/* if list is a blacklist or whitelist */
33 } report_filterlist = {
34 	.addrs		= NULL,
35 	.size		= 8,		/* small initial size */
36 	.used		= 0,
37 	.sorted		= false,
38 	.whitelist	= false,	/* default is blacklist */
39 };
40 static DEFINE_SPINLOCK(report_filterlist_lock);
41 
42 static const char *counter_to_name(enum kcsan_counter_id id)
43 {
44 	switch (id) {
45 	case KCSAN_COUNTER_USED_WATCHPOINTS:		return "used_watchpoints";
46 	case KCSAN_COUNTER_SETUP_WATCHPOINTS:		return "setup_watchpoints";
47 	case KCSAN_COUNTER_DATA_RACES:			return "data_races";
48 	case KCSAN_COUNTER_ASSERT_FAILURES:		return "assert_failures";
49 	case KCSAN_COUNTER_NO_CAPACITY:			return "no_capacity";
50 	case KCSAN_COUNTER_REPORT_RACES:		return "report_races";
51 	case KCSAN_COUNTER_RACES_UNKNOWN_ORIGIN:	return "races_unknown_origin";
52 	case KCSAN_COUNTER_UNENCODABLE_ACCESSES:	return "unencodable_accesses";
53 	case KCSAN_COUNTER_ENCODING_FALSE_POSITIVES:	return "encoding_false_positives";
54 	case KCSAN_COUNTER_COUNT:
55 		BUG();
56 	}
57 	return NULL;
58 }
59 
60 void kcsan_counter_inc(enum kcsan_counter_id id)
61 {
62 	atomic_long_inc(&counters[id]);
63 }
64 
65 void kcsan_counter_dec(enum kcsan_counter_id id)
66 {
67 	atomic_long_dec(&counters[id]);
68 }
69 
70 /*
71  * The microbenchmark allows benchmarking KCSAN core runtime only. To run
72  * multiple threads, pipe 'microbench=<iters>' from multiple tasks into the
73  * debugfs file. This will not generate any conflicts, and tests fast-path only.
74  */
75 static noinline void microbenchmark(unsigned long iters)
76 {
77 	const struct kcsan_ctx ctx_save = current->kcsan_ctx;
78 	const bool was_enabled = READ_ONCE(kcsan_enabled);
79 	cycles_t cycles;
80 
81 	/* We may have been called from an atomic region; reset context. */
82 	memset(&current->kcsan_ctx, 0, sizeof(current->kcsan_ctx));
83 	/*
84 	 * Disable to benchmark fast-path for all accesses, and (expected
85 	 * negligible) call into slow-path, but never set up watchpoints.
86 	 */
87 	WRITE_ONCE(kcsan_enabled, false);
88 
89 	pr_info("KCSAN: %s begin | iters: %lu\n", __func__, iters);
90 
91 	cycles = get_cycles();
92 	while (iters--) {
93 		unsigned long addr = iters & ((PAGE_SIZE << 8) - 1);
94 		int type = !(iters & 0x7f) ? KCSAN_ACCESS_ATOMIC :
95 				(!(iters & 0xf) ? KCSAN_ACCESS_WRITE : 0);
96 		__kcsan_check_access((void *)addr, sizeof(long), type);
97 	}
98 	cycles = get_cycles() - cycles;
99 
100 	pr_info("KCSAN: %s end   | cycles: %llu\n", __func__, cycles);
101 
102 	WRITE_ONCE(kcsan_enabled, was_enabled);
103 	/* restore context */
104 	current->kcsan_ctx = ctx_save;
105 }
106 
107 /*
108  * Simple test to create conflicting accesses. Write 'test=<iters>' to KCSAN's
109  * debugfs file from multiple tasks to generate real conflicts and show reports.
110  */
111 static long test_dummy;
112 static long test_flags;
113 static long test_scoped;
114 static noinline void test_thread(unsigned long iters)
115 {
116 	const long CHANGE_BITS = 0xff00ff00ff00ff00L;
117 	const struct kcsan_ctx ctx_save = current->kcsan_ctx;
118 	cycles_t cycles;
119 
120 	/* We may have been called from an atomic region; reset context. */
121 	memset(&current->kcsan_ctx, 0, sizeof(current->kcsan_ctx));
122 
123 	pr_info("KCSAN: %s begin | iters: %lu\n", __func__, iters);
124 	pr_info("test_dummy@%px, test_flags@%px, test_scoped@%px,\n",
125 		&test_dummy, &test_flags, &test_scoped);
126 
127 	cycles = get_cycles();
128 	while (iters--) {
129 		/* These all should generate reports. */
130 		__kcsan_check_read(&test_dummy, sizeof(test_dummy));
131 		ASSERT_EXCLUSIVE_WRITER(test_dummy);
132 		ASSERT_EXCLUSIVE_ACCESS(test_dummy);
133 
134 		ASSERT_EXCLUSIVE_BITS(test_flags, ~CHANGE_BITS); /* no report */
135 		__kcsan_check_read(&test_flags, sizeof(test_flags)); /* no report */
136 
137 		ASSERT_EXCLUSIVE_BITS(test_flags, CHANGE_BITS); /* report */
138 		__kcsan_check_read(&test_flags, sizeof(test_flags)); /* no report */
139 
140 		/* not actually instrumented */
141 		WRITE_ONCE(test_dummy, iters);  /* to observe value-change */
142 		__kcsan_check_write(&test_dummy, sizeof(test_dummy));
143 
144 		test_flags ^= CHANGE_BITS; /* generate value-change */
145 		__kcsan_check_write(&test_flags, sizeof(test_flags));
146 
147 		BUG_ON(current->kcsan_ctx.scoped_accesses.prev);
148 		{
149 			/* Should generate reports anywhere in this block. */
150 			ASSERT_EXCLUSIVE_WRITER_SCOPED(test_scoped);
151 			ASSERT_EXCLUSIVE_ACCESS_SCOPED(test_scoped);
152 			BUG_ON(!current->kcsan_ctx.scoped_accesses.prev);
153 			/* Unrelated accesses. */
154 			__kcsan_check_access(&cycles, sizeof(cycles), 0);
155 			__kcsan_check_access(&cycles, sizeof(cycles), KCSAN_ACCESS_ATOMIC);
156 		}
157 		BUG_ON(current->kcsan_ctx.scoped_accesses.prev);
158 	}
159 	cycles = get_cycles() - cycles;
160 
161 	pr_info("KCSAN: %s end   | cycles: %llu\n", __func__, cycles);
162 
163 	/* restore context */
164 	current->kcsan_ctx = ctx_save;
165 }
166 
167 static int cmp_filterlist_addrs(const void *rhs, const void *lhs)
168 {
169 	const unsigned long a = *(const unsigned long *)rhs;
170 	const unsigned long b = *(const unsigned long *)lhs;
171 
172 	return a < b ? -1 : a == b ? 0 : 1;
173 }
174 
175 bool kcsan_skip_report_debugfs(unsigned long func_addr)
176 {
177 	unsigned long symbolsize, offset;
178 	unsigned long flags;
179 	bool ret = false;
180 
181 	if (!kallsyms_lookup_size_offset(func_addr, &symbolsize, &offset))
182 		return false;
183 	func_addr -= offset; /* Get function start */
184 
185 	spin_lock_irqsave(&report_filterlist_lock, flags);
186 	if (report_filterlist.used == 0)
187 		goto out;
188 
189 	/* Sort array if it is unsorted, and then do a binary search. */
190 	if (!report_filterlist.sorted) {
191 		sort(report_filterlist.addrs, report_filterlist.used,
192 		     sizeof(unsigned long), cmp_filterlist_addrs, NULL);
193 		report_filterlist.sorted = true;
194 	}
195 	ret = !!bsearch(&func_addr, report_filterlist.addrs,
196 			report_filterlist.used, sizeof(unsigned long),
197 			cmp_filterlist_addrs);
198 	if (report_filterlist.whitelist)
199 		ret = !ret;
200 
201 out:
202 	spin_unlock_irqrestore(&report_filterlist_lock, flags);
203 	return ret;
204 }
205 
206 static void set_report_filterlist_whitelist(bool whitelist)
207 {
208 	unsigned long flags;
209 
210 	spin_lock_irqsave(&report_filterlist_lock, flags);
211 	report_filterlist.whitelist = whitelist;
212 	spin_unlock_irqrestore(&report_filterlist_lock, flags);
213 }
214 
215 /* Returns 0 on success, error-code otherwise. */
216 static ssize_t insert_report_filterlist(const char *func)
217 {
218 	unsigned long flags;
219 	unsigned long addr = kallsyms_lookup_name(func);
220 	ssize_t ret = 0;
221 
222 	if (!addr) {
223 		pr_err("KCSAN: could not find function: '%s'\n", func);
224 		return -ENOENT;
225 	}
226 
227 	spin_lock_irqsave(&report_filterlist_lock, flags);
228 
229 	if (report_filterlist.addrs == NULL) {
230 		/* initial allocation */
231 		report_filterlist.addrs =
232 			kmalloc_array(report_filterlist.size,
233 				      sizeof(unsigned long), GFP_ATOMIC);
234 		if (report_filterlist.addrs == NULL) {
235 			ret = -ENOMEM;
236 			goto out;
237 		}
238 	} else if (report_filterlist.used == report_filterlist.size) {
239 		/* resize filterlist */
240 		size_t new_size = report_filterlist.size * 2;
241 		unsigned long *new_addrs =
242 			krealloc(report_filterlist.addrs,
243 				 new_size * sizeof(unsigned long), GFP_ATOMIC);
244 
245 		if (new_addrs == NULL) {
246 			/* leave filterlist itself untouched */
247 			ret = -ENOMEM;
248 			goto out;
249 		}
250 
251 		report_filterlist.size = new_size;
252 		report_filterlist.addrs = new_addrs;
253 	}
254 
255 	/* Note: deduplicating should be done in userspace. */
256 	report_filterlist.addrs[report_filterlist.used++] =
257 		kallsyms_lookup_name(func);
258 	report_filterlist.sorted = false;
259 
260 out:
261 	spin_unlock_irqrestore(&report_filterlist_lock, flags);
262 
263 	return ret;
264 }
265 
266 static int show_info(struct seq_file *file, void *v)
267 {
268 	int i;
269 	unsigned long flags;
270 
271 	/* show stats */
272 	seq_printf(file, "enabled: %i\n", READ_ONCE(kcsan_enabled));
273 	for (i = 0; i < KCSAN_COUNTER_COUNT; ++i)
274 		seq_printf(file, "%s: %ld\n", counter_to_name(i),
275 			   atomic_long_read(&counters[i]));
276 
277 	/* show filter functions, and filter type */
278 	spin_lock_irqsave(&report_filterlist_lock, flags);
279 	seq_printf(file, "\n%s functions: %s\n",
280 		   report_filterlist.whitelist ? "whitelisted" : "blacklisted",
281 		   report_filterlist.used == 0 ? "none" : "");
282 	for (i = 0; i < report_filterlist.used; ++i)
283 		seq_printf(file, " %ps\n", (void *)report_filterlist.addrs[i]);
284 	spin_unlock_irqrestore(&report_filterlist_lock, flags);
285 
286 	return 0;
287 }
288 
289 static int debugfs_open(struct inode *inode, struct file *file)
290 {
291 	return single_open(file, show_info, NULL);
292 }
293 
294 static ssize_t
295 debugfs_write(struct file *file, const char __user *buf, size_t count, loff_t *off)
296 {
297 	char kbuf[KSYM_NAME_LEN];
298 	char *arg;
299 	int read_len = count < (sizeof(kbuf) - 1) ? count : (sizeof(kbuf) - 1);
300 
301 	if (copy_from_user(kbuf, buf, read_len))
302 		return -EFAULT;
303 	kbuf[read_len] = '\0';
304 	arg = strstrip(kbuf);
305 
306 	if (!strcmp(arg, "on")) {
307 		WRITE_ONCE(kcsan_enabled, true);
308 	} else if (!strcmp(arg, "off")) {
309 		WRITE_ONCE(kcsan_enabled, false);
310 	} else if (!strncmp(arg, "microbench=", sizeof("microbench=") - 1)) {
311 		unsigned long iters;
312 
313 		if (kstrtoul(&arg[sizeof("microbench=") - 1], 0, &iters))
314 			return -EINVAL;
315 		microbenchmark(iters);
316 	} else if (!strncmp(arg, "test=", sizeof("test=") - 1)) {
317 		unsigned long iters;
318 
319 		if (kstrtoul(&arg[sizeof("test=") - 1], 0, &iters))
320 			return -EINVAL;
321 		test_thread(iters);
322 	} else if (!strcmp(arg, "whitelist")) {
323 		set_report_filterlist_whitelist(true);
324 	} else if (!strcmp(arg, "blacklist")) {
325 		set_report_filterlist_whitelist(false);
326 	} else if (arg[0] == '!') {
327 		ssize_t ret = insert_report_filterlist(&arg[1]);
328 
329 		if (ret < 0)
330 			return ret;
331 	} else {
332 		return -EINVAL;
333 	}
334 
335 	return count;
336 }
337 
338 static const struct file_operations debugfs_ops =
339 {
340 	.read	 = seq_read,
341 	.open	 = debugfs_open,
342 	.write	 = debugfs_write,
343 	.release = single_release
344 };
345 
346 void __init kcsan_debugfs_init(void)
347 {
348 	debugfs_create_file("kcsan", 0644, NULL, NULL, &debugfs_ops);
349 }
350