xref: /openbmc/linux/kernel/kcsan/debugfs.c (revision 09b1b134)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * KCSAN debugfs interface.
4  *
5  * Copyright (C) 2019, Google LLC.
6  */
7 
8 #define pr_fmt(fmt) "kcsan: " fmt
9 
10 #include <linux/atomic.h>
11 #include <linux/bsearch.h>
12 #include <linux/bug.h>
13 #include <linux/debugfs.h>
14 #include <linux/init.h>
15 #include <linux/kallsyms.h>
16 #include <linux/sched.h>
17 #include <linux/seq_file.h>
18 #include <linux/slab.h>
19 #include <linux/sort.h>
20 #include <linux/string.h>
21 #include <linux/uaccess.h>
22 
23 #include "kcsan.h"
24 
25 atomic_long_t kcsan_counters[KCSAN_COUNTER_COUNT];
26 static const char *const counter_names[] = {
27 	[KCSAN_COUNTER_USED_WATCHPOINTS]		= "used_watchpoints",
28 	[KCSAN_COUNTER_SETUP_WATCHPOINTS]		= "setup_watchpoints",
29 	[KCSAN_COUNTER_DATA_RACES]			= "data_races",
30 	[KCSAN_COUNTER_ASSERT_FAILURES]			= "assert_failures",
31 	[KCSAN_COUNTER_NO_CAPACITY]			= "no_capacity",
32 	[KCSAN_COUNTER_REPORT_RACES]			= "report_races",
33 	[KCSAN_COUNTER_RACES_UNKNOWN_ORIGIN]		= "races_unknown_origin",
34 	[KCSAN_COUNTER_UNENCODABLE_ACCESSES]		= "unencodable_accesses",
35 	[KCSAN_COUNTER_ENCODING_FALSE_POSITIVES]	= "encoding_false_positives",
36 };
37 static_assert(ARRAY_SIZE(counter_names) == KCSAN_COUNTER_COUNT);
38 
39 /*
40  * Addresses for filtering functions from reporting. This list can be used as a
41  * whitelist or blacklist.
42  */
43 static struct {
44 	unsigned long	*addrs;		/* array of addresses */
45 	size_t		size;		/* current size */
46 	int		used;		/* number of elements used */
47 	bool		sorted;		/* if elements are sorted */
48 	bool		whitelist;	/* if list is a blacklist or whitelist */
49 } report_filterlist = {
50 	.addrs		= NULL,
51 	.size		= 8,		/* small initial size */
52 	.used		= 0,
53 	.sorted		= false,
54 	.whitelist	= false,	/* default is blacklist */
55 };
56 static DEFINE_SPINLOCK(report_filterlist_lock);
57 
58 /*
59  * The microbenchmark allows benchmarking KCSAN core runtime only. To run
60  * multiple threads, pipe 'microbench=<iters>' from multiple tasks into the
61  * debugfs file. This will not generate any conflicts, and tests fast-path only.
62  */
microbenchmark(unsigned long iters)63 static noinline void microbenchmark(unsigned long iters)
64 {
65 	const struct kcsan_ctx ctx_save = current->kcsan_ctx;
66 	const bool was_enabled = READ_ONCE(kcsan_enabled);
67 	u64 cycles;
68 
69 	/* We may have been called from an atomic region; reset context. */
70 	memset(&current->kcsan_ctx, 0, sizeof(current->kcsan_ctx));
71 	/*
72 	 * Disable to benchmark fast-path for all accesses, and (expected
73 	 * negligible) call into slow-path, but never set up watchpoints.
74 	 */
75 	WRITE_ONCE(kcsan_enabled, false);
76 
77 	pr_info("%s begin | iters: %lu\n", __func__, iters);
78 
79 	cycles = get_cycles();
80 	while (iters--) {
81 		unsigned long addr = iters & ((PAGE_SIZE << 8) - 1);
82 		int type = !(iters & 0x7f) ? KCSAN_ACCESS_ATOMIC :
83 				(!(iters & 0xf) ? KCSAN_ACCESS_WRITE : 0);
84 		__kcsan_check_access((void *)addr, sizeof(long), type);
85 	}
86 	cycles = get_cycles() - cycles;
87 
88 	pr_info("%s end   | cycles: %llu\n", __func__, cycles);
89 
90 	WRITE_ONCE(kcsan_enabled, was_enabled);
91 	/* restore context */
92 	current->kcsan_ctx = ctx_save;
93 }
94 
cmp_filterlist_addrs(const void * rhs,const void * lhs)95 static int cmp_filterlist_addrs(const void *rhs, const void *lhs)
96 {
97 	const unsigned long a = *(const unsigned long *)rhs;
98 	const unsigned long b = *(const unsigned long *)lhs;
99 
100 	return a < b ? -1 : a == b ? 0 : 1;
101 }
102 
kcsan_skip_report_debugfs(unsigned long func_addr)103 bool kcsan_skip_report_debugfs(unsigned long func_addr)
104 {
105 	unsigned long symbolsize, offset;
106 	unsigned long flags;
107 	bool ret = false;
108 
109 	if (!kallsyms_lookup_size_offset(func_addr, &symbolsize, &offset))
110 		return false;
111 	func_addr -= offset; /* Get function start */
112 
113 	spin_lock_irqsave(&report_filterlist_lock, flags);
114 	if (report_filterlist.used == 0)
115 		goto out;
116 
117 	/* Sort array if it is unsorted, and then do a binary search. */
118 	if (!report_filterlist.sorted) {
119 		sort(report_filterlist.addrs, report_filterlist.used,
120 		     sizeof(unsigned long), cmp_filterlist_addrs, NULL);
121 		report_filterlist.sorted = true;
122 	}
123 	ret = !!bsearch(&func_addr, report_filterlist.addrs,
124 			report_filterlist.used, sizeof(unsigned long),
125 			cmp_filterlist_addrs);
126 	if (report_filterlist.whitelist)
127 		ret = !ret;
128 
129 out:
130 	spin_unlock_irqrestore(&report_filterlist_lock, flags);
131 	return ret;
132 }
133 
set_report_filterlist_whitelist(bool whitelist)134 static void set_report_filterlist_whitelist(bool whitelist)
135 {
136 	unsigned long flags;
137 
138 	spin_lock_irqsave(&report_filterlist_lock, flags);
139 	report_filterlist.whitelist = whitelist;
140 	spin_unlock_irqrestore(&report_filterlist_lock, flags);
141 }
142 
143 /* Returns 0 on success, error-code otherwise. */
insert_report_filterlist(const char * func)144 static ssize_t insert_report_filterlist(const char *func)
145 {
146 	unsigned long flags;
147 	unsigned long addr = kallsyms_lookup_name(func);
148 	ssize_t ret = 0;
149 
150 	if (!addr) {
151 		pr_err("could not find function: '%s'\n", func);
152 		return -ENOENT;
153 	}
154 
155 	spin_lock_irqsave(&report_filterlist_lock, flags);
156 
157 	if (report_filterlist.addrs == NULL) {
158 		/* initial allocation */
159 		report_filterlist.addrs =
160 			kmalloc_array(report_filterlist.size,
161 				      sizeof(unsigned long), GFP_ATOMIC);
162 		if (report_filterlist.addrs == NULL) {
163 			ret = -ENOMEM;
164 			goto out;
165 		}
166 	} else if (report_filterlist.used == report_filterlist.size) {
167 		/* resize filterlist */
168 		size_t new_size = report_filterlist.size * 2;
169 		unsigned long *new_addrs =
170 			krealloc(report_filterlist.addrs,
171 				 new_size * sizeof(unsigned long), GFP_ATOMIC);
172 
173 		if (new_addrs == NULL) {
174 			/* leave filterlist itself untouched */
175 			ret = -ENOMEM;
176 			goto out;
177 		}
178 
179 		report_filterlist.size = new_size;
180 		report_filterlist.addrs = new_addrs;
181 	}
182 
183 	/* Note: deduplicating should be done in userspace. */
184 	report_filterlist.addrs[report_filterlist.used++] =
185 		kallsyms_lookup_name(func);
186 	report_filterlist.sorted = false;
187 
188 out:
189 	spin_unlock_irqrestore(&report_filterlist_lock, flags);
190 
191 	return ret;
192 }
193 
show_info(struct seq_file * file,void * v)194 static int show_info(struct seq_file *file, void *v)
195 {
196 	int i;
197 	unsigned long flags;
198 
199 	/* show stats */
200 	seq_printf(file, "enabled: %i\n", READ_ONCE(kcsan_enabled));
201 	for (i = 0; i < KCSAN_COUNTER_COUNT; ++i) {
202 		seq_printf(file, "%s: %ld\n", counter_names[i],
203 			   atomic_long_read(&kcsan_counters[i]));
204 	}
205 
206 	/* show filter functions, and filter type */
207 	spin_lock_irqsave(&report_filterlist_lock, flags);
208 	seq_printf(file, "\n%s functions: %s\n",
209 		   report_filterlist.whitelist ? "whitelisted" : "blacklisted",
210 		   report_filterlist.used == 0 ? "none" : "");
211 	for (i = 0; i < report_filterlist.used; ++i)
212 		seq_printf(file, " %ps\n", (void *)report_filterlist.addrs[i]);
213 	spin_unlock_irqrestore(&report_filterlist_lock, flags);
214 
215 	return 0;
216 }
217 
debugfs_open(struct inode * inode,struct file * file)218 static int debugfs_open(struct inode *inode, struct file *file)
219 {
220 	return single_open(file, show_info, NULL);
221 }
222 
223 static ssize_t
debugfs_write(struct file * file,const char __user * buf,size_t count,loff_t * off)224 debugfs_write(struct file *file, const char __user *buf, size_t count, loff_t *off)
225 {
226 	char kbuf[KSYM_NAME_LEN];
227 	char *arg;
228 	int read_len = count < (sizeof(kbuf) - 1) ? count : (sizeof(kbuf) - 1);
229 
230 	if (copy_from_user(kbuf, buf, read_len))
231 		return -EFAULT;
232 	kbuf[read_len] = '\0';
233 	arg = strstrip(kbuf);
234 
235 	if (!strcmp(arg, "on")) {
236 		WRITE_ONCE(kcsan_enabled, true);
237 	} else if (!strcmp(arg, "off")) {
238 		WRITE_ONCE(kcsan_enabled, false);
239 	} else if (str_has_prefix(arg, "microbench=")) {
240 		unsigned long iters;
241 
242 		if (kstrtoul(&arg[strlen("microbench=")], 0, &iters))
243 			return -EINVAL;
244 		microbenchmark(iters);
245 	} else if (!strcmp(arg, "whitelist")) {
246 		set_report_filterlist_whitelist(true);
247 	} else if (!strcmp(arg, "blacklist")) {
248 		set_report_filterlist_whitelist(false);
249 	} else if (arg[0] == '!') {
250 		ssize_t ret = insert_report_filterlist(&arg[1]);
251 
252 		if (ret < 0)
253 			return ret;
254 	} else {
255 		return -EINVAL;
256 	}
257 
258 	return count;
259 }
260 
261 static const struct file_operations debugfs_ops =
262 {
263 	.read	 = seq_read,
264 	.open	 = debugfs_open,
265 	.write	 = debugfs_write,
266 	.release = single_release
267 };
268 
kcsan_debugfs_init(void)269 static int __init kcsan_debugfs_init(void)
270 {
271 	debugfs_create_file("kcsan", 0644, NULL, NULL, &debugfs_ops);
272 	return 0;
273 }
274 
275 late_initcall(kcsan_debugfs_init);
276