xref: /openbmc/linux/kernel/kcov.c (revision 1c2dd16a)
1 #define pr_fmt(fmt) "kcov: " fmt
2 
3 #define DISABLE_BRANCH_PROFILING
4 #include <linux/atomic.h>
5 #include <linux/compiler.h>
6 #include <linux/errno.h>
7 #include <linux/export.h>
8 #include <linux/types.h>
9 #include <linux/file.h>
10 #include <linux/fs.h>
11 #include <linux/init.h>
12 #include <linux/mm.h>
13 #include <linux/preempt.h>
14 #include <linux/printk.h>
15 #include <linux/sched.h>
16 #include <linux/slab.h>
17 #include <linux/spinlock.h>
18 #include <linux/vmalloc.h>
19 #include <linux/debugfs.h>
20 #include <linux/uaccess.h>
21 #include <linux/kcov.h>
22 #include <asm/setup.h>
23 
24 /*
25  * kcov descriptor (one per opened debugfs file).
26  * State transitions of the descriptor:
27  *  - initial state after open()
28  *  - then there must be a single ioctl(KCOV_INIT_TRACE) call
29  *  - then, mmap() call (several calls are allowed but not useful)
30  *  - then, repeated enable/disable for a task (only one task a time allowed)
31  */
32 struct kcov {
33 	/*
34 	 * Reference counter. We keep one for:
35 	 *  - opened file descriptor
36 	 *  - task with enabled coverage (we can't unwire it from another task)
37 	 */
38 	atomic_t		refcount;
39 	/* The lock protects mode, size, area and t. */
40 	spinlock_t		lock;
41 	enum kcov_mode		mode;
42 	/* Size of arena (in long's for KCOV_MODE_TRACE). */
43 	unsigned		size;
44 	/* Coverage buffer shared with user space. */
45 	void			*area;
46 	/* Task for which we collect coverage, or NULL. */
47 	struct task_struct	*t;
48 };
49 
50 /*
51  * Entry point from instrumented code.
52  * This is called once per basic-block/edge.
53  */
54 void notrace __sanitizer_cov_trace_pc(void)
55 {
56 	struct task_struct *t;
57 	enum kcov_mode mode;
58 
59 	t = current;
60 	/*
61 	 * We are interested in code coverage as a function of a syscall inputs,
62 	 * so we ignore code executed in interrupts.
63 	 * The checks for whether we are in an interrupt are open-coded, because
64 	 * 1. We can't use in_interrupt() here, since it also returns true
65 	 *    when we are inside local_bh_disable() section.
66 	 * 2. We don't want to use (in_irq() | in_serving_softirq() | in_nmi()),
67 	 *    since that leads to slower generated code (three separate tests,
68 	 *    one for each of the flags).
69 	 */
70 	if (!t || (preempt_count() & (HARDIRQ_MASK | SOFTIRQ_OFFSET
71 							| NMI_MASK)))
72 		return;
73 	mode = READ_ONCE(t->kcov_mode);
74 	if (mode == KCOV_MODE_TRACE) {
75 		unsigned long *area;
76 		unsigned long pos;
77 		unsigned long ip = _RET_IP_;
78 
79 #ifdef CONFIG_RANDOMIZE_BASE
80 		ip -= kaslr_offset();
81 #endif
82 
83 		/*
84 		 * There is some code that runs in interrupts but for which
85 		 * in_interrupt() returns false (e.g. preempt_schedule_irq()).
86 		 * READ_ONCE()/barrier() effectively provides load-acquire wrt
87 		 * interrupts, there are paired barrier()/WRITE_ONCE() in
88 		 * kcov_ioctl_locked().
89 		 */
90 		barrier();
91 		area = t->kcov_area;
92 		/* The first word is number of subsequent PCs. */
93 		pos = READ_ONCE(area[0]) + 1;
94 		if (likely(pos < t->kcov_size)) {
95 			area[pos] = ip;
96 			WRITE_ONCE(area[0], pos);
97 		}
98 	}
99 }
100 EXPORT_SYMBOL(__sanitizer_cov_trace_pc);
101 
102 static void kcov_get(struct kcov *kcov)
103 {
104 	atomic_inc(&kcov->refcount);
105 }
106 
107 static void kcov_put(struct kcov *kcov)
108 {
109 	if (atomic_dec_and_test(&kcov->refcount)) {
110 		vfree(kcov->area);
111 		kfree(kcov);
112 	}
113 }
114 
115 void kcov_task_init(struct task_struct *t)
116 {
117 	t->kcov_mode = KCOV_MODE_DISABLED;
118 	t->kcov_size = 0;
119 	t->kcov_area = NULL;
120 	t->kcov = NULL;
121 }
122 
123 void kcov_task_exit(struct task_struct *t)
124 {
125 	struct kcov *kcov;
126 
127 	kcov = t->kcov;
128 	if (kcov == NULL)
129 		return;
130 	spin_lock(&kcov->lock);
131 	if (WARN_ON(kcov->t != t)) {
132 		spin_unlock(&kcov->lock);
133 		return;
134 	}
135 	/* Just to not leave dangling references behind. */
136 	kcov_task_init(t);
137 	kcov->t = NULL;
138 	spin_unlock(&kcov->lock);
139 	kcov_put(kcov);
140 }
141 
142 static int kcov_mmap(struct file *filep, struct vm_area_struct *vma)
143 {
144 	int res = 0;
145 	void *area;
146 	struct kcov *kcov = vma->vm_file->private_data;
147 	unsigned long size, off;
148 	struct page *page;
149 
150 	area = vmalloc_user(vma->vm_end - vma->vm_start);
151 	if (!area)
152 		return -ENOMEM;
153 
154 	spin_lock(&kcov->lock);
155 	size = kcov->size * sizeof(unsigned long);
156 	if (kcov->mode == KCOV_MODE_DISABLED || vma->vm_pgoff != 0 ||
157 	    vma->vm_end - vma->vm_start != size) {
158 		res = -EINVAL;
159 		goto exit;
160 	}
161 	if (!kcov->area) {
162 		kcov->area = area;
163 		vma->vm_flags |= VM_DONTEXPAND;
164 		spin_unlock(&kcov->lock);
165 		for (off = 0; off < size; off += PAGE_SIZE) {
166 			page = vmalloc_to_page(kcov->area + off);
167 			if (vm_insert_page(vma, vma->vm_start + off, page))
168 				WARN_ONCE(1, "vm_insert_page() failed");
169 		}
170 		return 0;
171 	}
172 exit:
173 	spin_unlock(&kcov->lock);
174 	vfree(area);
175 	return res;
176 }
177 
178 static int kcov_open(struct inode *inode, struct file *filep)
179 {
180 	struct kcov *kcov;
181 
182 	kcov = kzalloc(sizeof(*kcov), GFP_KERNEL);
183 	if (!kcov)
184 		return -ENOMEM;
185 	atomic_set(&kcov->refcount, 1);
186 	spin_lock_init(&kcov->lock);
187 	filep->private_data = kcov;
188 	return nonseekable_open(inode, filep);
189 }
190 
191 static int kcov_close(struct inode *inode, struct file *filep)
192 {
193 	kcov_put(filep->private_data);
194 	return 0;
195 }
196 
197 static int kcov_ioctl_locked(struct kcov *kcov, unsigned int cmd,
198 			     unsigned long arg)
199 {
200 	struct task_struct *t;
201 	unsigned long size, unused;
202 
203 	switch (cmd) {
204 	case KCOV_INIT_TRACE:
205 		/*
206 		 * Enable kcov in trace mode and setup buffer size.
207 		 * Must happen before anything else.
208 		 */
209 		if (kcov->mode != KCOV_MODE_DISABLED)
210 			return -EBUSY;
211 		/*
212 		 * Size must be at least 2 to hold current position and one PC.
213 		 * Later we allocate size * sizeof(unsigned long) memory,
214 		 * that must not overflow.
215 		 */
216 		size = arg;
217 		if (size < 2 || size > INT_MAX / sizeof(unsigned long))
218 			return -EINVAL;
219 		kcov->size = size;
220 		kcov->mode = KCOV_MODE_TRACE;
221 		return 0;
222 	case KCOV_ENABLE:
223 		/*
224 		 * Enable coverage for the current task.
225 		 * At this point user must have been enabled trace mode,
226 		 * and mmapped the file. Coverage collection is disabled only
227 		 * at task exit or voluntary by KCOV_DISABLE. After that it can
228 		 * be enabled for another task.
229 		 */
230 		unused = arg;
231 		if (unused != 0 || kcov->mode == KCOV_MODE_DISABLED ||
232 		    kcov->area == NULL)
233 			return -EINVAL;
234 		if (kcov->t != NULL)
235 			return -EBUSY;
236 		t = current;
237 		/* Cache in task struct for performance. */
238 		t->kcov_size = kcov->size;
239 		t->kcov_area = kcov->area;
240 		/* See comment in __sanitizer_cov_trace_pc(). */
241 		barrier();
242 		WRITE_ONCE(t->kcov_mode, kcov->mode);
243 		t->kcov = kcov;
244 		kcov->t = t;
245 		/* This is put either in kcov_task_exit() or in KCOV_DISABLE. */
246 		kcov_get(kcov);
247 		return 0;
248 	case KCOV_DISABLE:
249 		/* Disable coverage for the current task. */
250 		unused = arg;
251 		if (unused != 0 || current->kcov != kcov)
252 			return -EINVAL;
253 		t = current;
254 		if (WARN_ON(kcov->t != t))
255 			return -EINVAL;
256 		kcov_task_init(t);
257 		kcov->t = NULL;
258 		kcov_put(kcov);
259 		return 0;
260 	default:
261 		return -ENOTTY;
262 	}
263 }
264 
265 static long kcov_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
266 {
267 	struct kcov *kcov;
268 	int res;
269 
270 	kcov = filep->private_data;
271 	spin_lock(&kcov->lock);
272 	res = kcov_ioctl_locked(kcov, cmd, arg);
273 	spin_unlock(&kcov->lock);
274 	return res;
275 }
276 
277 static const struct file_operations kcov_fops = {
278 	.open		= kcov_open,
279 	.unlocked_ioctl	= kcov_ioctl,
280 	.mmap		= kcov_mmap,
281 	.release        = kcov_close,
282 };
283 
284 static int __init kcov_init(void)
285 {
286 	/*
287 	 * The kcov debugfs file won't ever get removed and thus,
288 	 * there is no need to protect it against removal races. The
289 	 * use of debugfs_create_file_unsafe() is actually safe here.
290 	 */
291 	if (!debugfs_create_file_unsafe("kcov", 0600, NULL, NULL, &kcov_fops)) {
292 		pr_err("failed to create kcov in debugfs\n");
293 		return -ENOMEM;
294 	}
295 	return 0;
296 }
297 
298 device_initcall(kcov_init);
299