xref: /openbmc/linux/kernel/kcov.c (revision 5c9a8750a6409c63a0f01d51a9024861022f6593)
1*5c9a8750SDmitry Vyukov #define pr_fmt(fmt) "kcov: " fmt
2*5c9a8750SDmitry Vyukov 
3*5c9a8750SDmitry Vyukov #include <linux/compiler.h>
4*5c9a8750SDmitry Vyukov #include <linux/types.h>
5*5c9a8750SDmitry Vyukov #include <linux/file.h>
6*5c9a8750SDmitry Vyukov #include <linux/fs.h>
7*5c9a8750SDmitry Vyukov #include <linux/mm.h>
8*5c9a8750SDmitry Vyukov #include <linux/printk.h>
9*5c9a8750SDmitry Vyukov #include <linux/slab.h>
10*5c9a8750SDmitry Vyukov #include <linux/spinlock.h>
11*5c9a8750SDmitry Vyukov #include <linux/vmalloc.h>
12*5c9a8750SDmitry Vyukov #include <linux/debugfs.h>
13*5c9a8750SDmitry Vyukov #include <linux/uaccess.h>
14*5c9a8750SDmitry Vyukov #include <linux/kcov.h>
15*5c9a8750SDmitry Vyukov 
16*5c9a8750SDmitry Vyukov /*
17*5c9a8750SDmitry Vyukov  * kcov descriptor (one per opened debugfs file).
18*5c9a8750SDmitry Vyukov  * State transitions of the descriptor:
19*5c9a8750SDmitry Vyukov  *  - initial state after open()
20*5c9a8750SDmitry Vyukov  *  - then there must be a single ioctl(KCOV_INIT_TRACE) call
21*5c9a8750SDmitry Vyukov  *  - then, mmap() call (several calls are allowed but not useful)
22*5c9a8750SDmitry Vyukov  *  - then, repeated enable/disable for a task (only one task a time allowed)
23*5c9a8750SDmitry Vyukov  */
24*5c9a8750SDmitry Vyukov struct kcov {
25*5c9a8750SDmitry Vyukov 	/*
26*5c9a8750SDmitry Vyukov 	 * Reference counter. We keep one for:
27*5c9a8750SDmitry Vyukov 	 *  - opened file descriptor
28*5c9a8750SDmitry Vyukov 	 *  - task with enabled coverage (we can't unwire it from another task)
29*5c9a8750SDmitry Vyukov 	 */
30*5c9a8750SDmitry Vyukov 	atomic_t		refcount;
31*5c9a8750SDmitry Vyukov 	/* The lock protects mode, size, area and t. */
32*5c9a8750SDmitry Vyukov 	spinlock_t		lock;
33*5c9a8750SDmitry Vyukov 	enum kcov_mode		mode;
34*5c9a8750SDmitry Vyukov 	/* Size of arena (in long's for KCOV_MODE_TRACE). */
35*5c9a8750SDmitry Vyukov 	unsigned		size;
36*5c9a8750SDmitry Vyukov 	/* Coverage buffer shared with user space. */
37*5c9a8750SDmitry Vyukov 	void			*area;
38*5c9a8750SDmitry Vyukov 	/* Task for which we collect coverage, or NULL. */
39*5c9a8750SDmitry Vyukov 	struct task_struct	*t;
40*5c9a8750SDmitry Vyukov };
41*5c9a8750SDmitry Vyukov 
42*5c9a8750SDmitry Vyukov /*
43*5c9a8750SDmitry Vyukov  * Entry point from instrumented code.
44*5c9a8750SDmitry Vyukov  * This is called once per basic-block/edge.
45*5c9a8750SDmitry Vyukov  */
46*5c9a8750SDmitry Vyukov void __sanitizer_cov_trace_pc(void)
47*5c9a8750SDmitry Vyukov {
48*5c9a8750SDmitry Vyukov 	struct task_struct *t;
49*5c9a8750SDmitry Vyukov 	enum kcov_mode mode;
50*5c9a8750SDmitry Vyukov 
51*5c9a8750SDmitry Vyukov 	t = current;
52*5c9a8750SDmitry Vyukov 	/*
53*5c9a8750SDmitry Vyukov 	 * We are interested in code coverage as a function of a syscall inputs,
54*5c9a8750SDmitry Vyukov 	 * so we ignore code executed in interrupts.
55*5c9a8750SDmitry Vyukov 	 */
56*5c9a8750SDmitry Vyukov 	if (!t || in_interrupt())
57*5c9a8750SDmitry Vyukov 		return;
58*5c9a8750SDmitry Vyukov 	mode = READ_ONCE(t->kcov_mode);
59*5c9a8750SDmitry Vyukov 	if (mode == KCOV_MODE_TRACE) {
60*5c9a8750SDmitry Vyukov 		unsigned long *area;
61*5c9a8750SDmitry Vyukov 		unsigned long pos;
62*5c9a8750SDmitry Vyukov 
63*5c9a8750SDmitry Vyukov 		/*
64*5c9a8750SDmitry Vyukov 		 * There is some code that runs in interrupts but for which
65*5c9a8750SDmitry Vyukov 		 * in_interrupt() returns false (e.g. preempt_schedule_irq()).
66*5c9a8750SDmitry Vyukov 		 * READ_ONCE()/barrier() effectively provides load-acquire wrt
67*5c9a8750SDmitry Vyukov 		 * interrupts, there are paired barrier()/WRITE_ONCE() in
68*5c9a8750SDmitry Vyukov 		 * kcov_ioctl_locked().
69*5c9a8750SDmitry Vyukov 		 */
70*5c9a8750SDmitry Vyukov 		barrier();
71*5c9a8750SDmitry Vyukov 		area = t->kcov_area;
72*5c9a8750SDmitry Vyukov 		/* The first word is number of subsequent PCs. */
73*5c9a8750SDmitry Vyukov 		pos = READ_ONCE(area[0]) + 1;
74*5c9a8750SDmitry Vyukov 		if (likely(pos < t->kcov_size)) {
75*5c9a8750SDmitry Vyukov 			area[pos] = _RET_IP_;
76*5c9a8750SDmitry Vyukov 			WRITE_ONCE(area[0], pos);
77*5c9a8750SDmitry Vyukov 		}
78*5c9a8750SDmitry Vyukov 	}
79*5c9a8750SDmitry Vyukov }
80*5c9a8750SDmitry Vyukov EXPORT_SYMBOL(__sanitizer_cov_trace_pc);
81*5c9a8750SDmitry Vyukov 
82*5c9a8750SDmitry Vyukov static void kcov_get(struct kcov *kcov)
83*5c9a8750SDmitry Vyukov {
84*5c9a8750SDmitry Vyukov 	atomic_inc(&kcov->refcount);
85*5c9a8750SDmitry Vyukov }
86*5c9a8750SDmitry Vyukov 
87*5c9a8750SDmitry Vyukov static void kcov_put(struct kcov *kcov)
88*5c9a8750SDmitry Vyukov {
89*5c9a8750SDmitry Vyukov 	if (atomic_dec_and_test(&kcov->refcount)) {
90*5c9a8750SDmitry Vyukov 		vfree(kcov->area);
91*5c9a8750SDmitry Vyukov 		kfree(kcov);
92*5c9a8750SDmitry Vyukov 	}
93*5c9a8750SDmitry Vyukov }
94*5c9a8750SDmitry Vyukov 
95*5c9a8750SDmitry Vyukov void kcov_task_init(struct task_struct *t)
96*5c9a8750SDmitry Vyukov {
97*5c9a8750SDmitry Vyukov 	t->kcov_mode = KCOV_MODE_DISABLED;
98*5c9a8750SDmitry Vyukov 	t->kcov_size = 0;
99*5c9a8750SDmitry Vyukov 	t->kcov_area = NULL;
100*5c9a8750SDmitry Vyukov 	t->kcov = NULL;
101*5c9a8750SDmitry Vyukov }
102*5c9a8750SDmitry Vyukov 
103*5c9a8750SDmitry Vyukov void kcov_task_exit(struct task_struct *t)
104*5c9a8750SDmitry Vyukov {
105*5c9a8750SDmitry Vyukov 	struct kcov *kcov;
106*5c9a8750SDmitry Vyukov 
107*5c9a8750SDmitry Vyukov 	kcov = t->kcov;
108*5c9a8750SDmitry Vyukov 	if (kcov == NULL)
109*5c9a8750SDmitry Vyukov 		return;
110*5c9a8750SDmitry Vyukov 	spin_lock(&kcov->lock);
111*5c9a8750SDmitry Vyukov 	if (WARN_ON(kcov->t != t)) {
112*5c9a8750SDmitry Vyukov 		spin_unlock(&kcov->lock);
113*5c9a8750SDmitry Vyukov 		return;
114*5c9a8750SDmitry Vyukov 	}
115*5c9a8750SDmitry Vyukov 	/* Just to not leave dangling references behind. */
116*5c9a8750SDmitry Vyukov 	kcov_task_init(t);
117*5c9a8750SDmitry Vyukov 	kcov->t = NULL;
118*5c9a8750SDmitry Vyukov 	spin_unlock(&kcov->lock);
119*5c9a8750SDmitry Vyukov 	kcov_put(kcov);
120*5c9a8750SDmitry Vyukov }
121*5c9a8750SDmitry Vyukov 
122*5c9a8750SDmitry Vyukov static int kcov_mmap(struct file *filep, struct vm_area_struct *vma)
123*5c9a8750SDmitry Vyukov {
124*5c9a8750SDmitry Vyukov 	int res = 0;
125*5c9a8750SDmitry Vyukov 	void *area;
126*5c9a8750SDmitry Vyukov 	struct kcov *kcov = vma->vm_file->private_data;
127*5c9a8750SDmitry Vyukov 	unsigned long size, off;
128*5c9a8750SDmitry Vyukov 	struct page *page;
129*5c9a8750SDmitry Vyukov 
130*5c9a8750SDmitry Vyukov 	area = vmalloc_user(vma->vm_end - vma->vm_start);
131*5c9a8750SDmitry Vyukov 	if (!area)
132*5c9a8750SDmitry Vyukov 		return -ENOMEM;
133*5c9a8750SDmitry Vyukov 
134*5c9a8750SDmitry Vyukov 	spin_lock(&kcov->lock);
135*5c9a8750SDmitry Vyukov 	size = kcov->size * sizeof(unsigned long);
136*5c9a8750SDmitry Vyukov 	if (kcov->mode == KCOV_MODE_DISABLED || vma->vm_pgoff != 0 ||
137*5c9a8750SDmitry Vyukov 	    vma->vm_end - vma->vm_start != size) {
138*5c9a8750SDmitry Vyukov 		res = -EINVAL;
139*5c9a8750SDmitry Vyukov 		goto exit;
140*5c9a8750SDmitry Vyukov 	}
141*5c9a8750SDmitry Vyukov 	if (!kcov->area) {
142*5c9a8750SDmitry Vyukov 		kcov->area = area;
143*5c9a8750SDmitry Vyukov 		vma->vm_flags |= VM_DONTEXPAND;
144*5c9a8750SDmitry Vyukov 		spin_unlock(&kcov->lock);
145*5c9a8750SDmitry Vyukov 		for (off = 0; off < size; off += PAGE_SIZE) {
146*5c9a8750SDmitry Vyukov 			page = vmalloc_to_page(kcov->area + off);
147*5c9a8750SDmitry Vyukov 			if (vm_insert_page(vma, vma->vm_start + off, page))
148*5c9a8750SDmitry Vyukov 				WARN_ONCE(1, "vm_insert_page() failed");
149*5c9a8750SDmitry Vyukov 		}
150*5c9a8750SDmitry Vyukov 		return 0;
151*5c9a8750SDmitry Vyukov 	}
152*5c9a8750SDmitry Vyukov exit:
153*5c9a8750SDmitry Vyukov 	spin_unlock(&kcov->lock);
154*5c9a8750SDmitry Vyukov 	vfree(area);
155*5c9a8750SDmitry Vyukov 	return res;
156*5c9a8750SDmitry Vyukov }
157*5c9a8750SDmitry Vyukov 
158*5c9a8750SDmitry Vyukov static int kcov_open(struct inode *inode, struct file *filep)
159*5c9a8750SDmitry Vyukov {
160*5c9a8750SDmitry Vyukov 	struct kcov *kcov;
161*5c9a8750SDmitry Vyukov 
162*5c9a8750SDmitry Vyukov 	kcov = kzalloc(sizeof(*kcov), GFP_KERNEL);
163*5c9a8750SDmitry Vyukov 	if (!kcov)
164*5c9a8750SDmitry Vyukov 		return -ENOMEM;
165*5c9a8750SDmitry Vyukov 	atomic_set(&kcov->refcount, 1);
166*5c9a8750SDmitry Vyukov 	spin_lock_init(&kcov->lock);
167*5c9a8750SDmitry Vyukov 	filep->private_data = kcov;
168*5c9a8750SDmitry Vyukov 	return nonseekable_open(inode, filep);
169*5c9a8750SDmitry Vyukov }
170*5c9a8750SDmitry Vyukov 
171*5c9a8750SDmitry Vyukov static int kcov_close(struct inode *inode, struct file *filep)
172*5c9a8750SDmitry Vyukov {
173*5c9a8750SDmitry Vyukov 	kcov_put(filep->private_data);
174*5c9a8750SDmitry Vyukov 	return 0;
175*5c9a8750SDmitry Vyukov }
176*5c9a8750SDmitry Vyukov 
177*5c9a8750SDmitry Vyukov static int kcov_ioctl_locked(struct kcov *kcov, unsigned int cmd,
178*5c9a8750SDmitry Vyukov 			     unsigned long arg)
179*5c9a8750SDmitry Vyukov {
180*5c9a8750SDmitry Vyukov 	struct task_struct *t;
181*5c9a8750SDmitry Vyukov 	unsigned long size, unused;
182*5c9a8750SDmitry Vyukov 
183*5c9a8750SDmitry Vyukov 	switch (cmd) {
184*5c9a8750SDmitry Vyukov 	case KCOV_INIT_TRACE:
185*5c9a8750SDmitry Vyukov 		/*
186*5c9a8750SDmitry Vyukov 		 * Enable kcov in trace mode and setup buffer size.
187*5c9a8750SDmitry Vyukov 		 * Must happen before anything else.
188*5c9a8750SDmitry Vyukov 		 */
189*5c9a8750SDmitry Vyukov 		if (kcov->mode != KCOV_MODE_DISABLED)
190*5c9a8750SDmitry Vyukov 			return -EBUSY;
191*5c9a8750SDmitry Vyukov 		/*
192*5c9a8750SDmitry Vyukov 		 * Size must be at least 2 to hold current position and one PC.
193*5c9a8750SDmitry Vyukov 		 * Later we allocate size * sizeof(unsigned long) memory,
194*5c9a8750SDmitry Vyukov 		 * that must not overflow.
195*5c9a8750SDmitry Vyukov 		 */
196*5c9a8750SDmitry Vyukov 		size = arg;
197*5c9a8750SDmitry Vyukov 		if (size < 2 || size > INT_MAX / sizeof(unsigned long))
198*5c9a8750SDmitry Vyukov 			return -EINVAL;
199*5c9a8750SDmitry Vyukov 		kcov->size = size;
200*5c9a8750SDmitry Vyukov 		kcov->mode = KCOV_MODE_TRACE;
201*5c9a8750SDmitry Vyukov 		return 0;
202*5c9a8750SDmitry Vyukov 	case KCOV_ENABLE:
203*5c9a8750SDmitry Vyukov 		/*
204*5c9a8750SDmitry Vyukov 		 * Enable coverage for the current task.
205*5c9a8750SDmitry Vyukov 		 * At this point user must have been enabled trace mode,
206*5c9a8750SDmitry Vyukov 		 * and mmapped the file. Coverage collection is disabled only
207*5c9a8750SDmitry Vyukov 		 * at task exit or voluntary by KCOV_DISABLE. After that it can
208*5c9a8750SDmitry Vyukov 		 * be enabled for another task.
209*5c9a8750SDmitry Vyukov 		 */
210*5c9a8750SDmitry Vyukov 		unused = arg;
211*5c9a8750SDmitry Vyukov 		if (unused != 0 || kcov->mode == KCOV_MODE_DISABLED ||
212*5c9a8750SDmitry Vyukov 		    kcov->area == NULL)
213*5c9a8750SDmitry Vyukov 			return -EINVAL;
214*5c9a8750SDmitry Vyukov 		if (kcov->t != NULL)
215*5c9a8750SDmitry Vyukov 			return -EBUSY;
216*5c9a8750SDmitry Vyukov 		t = current;
217*5c9a8750SDmitry Vyukov 		/* Cache in task struct for performance. */
218*5c9a8750SDmitry Vyukov 		t->kcov_size = kcov->size;
219*5c9a8750SDmitry Vyukov 		t->kcov_area = kcov->area;
220*5c9a8750SDmitry Vyukov 		/* See comment in __sanitizer_cov_trace_pc(). */
221*5c9a8750SDmitry Vyukov 		barrier();
222*5c9a8750SDmitry Vyukov 		WRITE_ONCE(t->kcov_mode, kcov->mode);
223*5c9a8750SDmitry Vyukov 		t->kcov = kcov;
224*5c9a8750SDmitry Vyukov 		kcov->t = t;
225*5c9a8750SDmitry Vyukov 		/* This is put either in kcov_task_exit() or in KCOV_DISABLE. */
226*5c9a8750SDmitry Vyukov 		kcov_get(kcov);
227*5c9a8750SDmitry Vyukov 		return 0;
228*5c9a8750SDmitry Vyukov 	case KCOV_DISABLE:
229*5c9a8750SDmitry Vyukov 		/* Disable coverage for the current task. */
230*5c9a8750SDmitry Vyukov 		unused = arg;
231*5c9a8750SDmitry Vyukov 		if (unused != 0 || current->kcov != kcov)
232*5c9a8750SDmitry Vyukov 			return -EINVAL;
233*5c9a8750SDmitry Vyukov 		t = current;
234*5c9a8750SDmitry Vyukov 		if (WARN_ON(kcov->t != t))
235*5c9a8750SDmitry Vyukov 			return -EINVAL;
236*5c9a8750SDmitry Vyukov 		kcov_task_init(t);
237*5c9a8750SDmitry Vyukov 		kcov->t = NULL;
238*5c9a8750SDmitry Vyukov 		kcov_put(kcov);
239*5c9a8750SDmitry Vyukov 		return 0;
240*5c9a8750SDmitry Vyukov 	default:
241*5c9a8750SDmitry Vyukov 		return -ENOTTY;
242*5c9a8750SDmitry Vyukov 	}
243*5c9a8750SDmitry Vyukov }
244*5c9a8750SDmitry Vyukov 
245*5c9a8750SDmitry Vyukov static long kcov_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
246*5c9a8750SDmitry Vyukov {
247*5c9a8750SDmitry Vyukov 	struct kcov *kcov;
248*5c9a8750SDmitry Vyukov 	int res;
249*5c9a8750SDmitry Vyukov 
250*5c9a8750SDmitry Vyukov 	kcov = filep->private_data;
251*5c9a8750SDmitry Vyukov 	spin_lock(&kcov->lock);
252*5c9a8750SDmitry Vyukov 	res = kcov_ioctl_locked(kcov, cmd, arg);
253*5c9a8750SDmitry Vyukov 	spin_unlock(&kcov->lock);
254*5c9a8750SDmitry Vyukov 	return res;
255*5c9a8750SDmitry Vyukov }
256*5c9a8750SDmitry Vyukov 
257*5c9a8750SDmitry Vyukov static const struct file_operations kcov_fops = {
258*5c9a8750SDmitry Vyukov 	.open		= kcov_open,
259*5c9a8750SDmitry Vyukov 	.unlocked_ioctl	= kcov_ioctl,
260*5c9a8750SDmitry Vyukov 	.mmap		= kcov_mmap,
261*5c9a8750SDmitry Vyukov 	.release        = kcov_close,
262*5c9a8750SDmitry Vyukov };
263*5c9a8750SDmitry Vyukov 
264*5c9a8750SDmitry Vyukov static int __init kcov_init(void)
265*5c9a8750SDmitry Vyukov {
266*5c9a8750SDmitry Vyukov 	if (!debugfs_create_file("kcov", 0600, NULL, NULL, &kcov_fops)) {
267*5c9a8750SDmitry Vyukov 		pr_err("failed to create kcov in debugfs\n");
268*5c9a8750SDmitry Vyukov 		return -ENOMEM;
269*5c9a8750SDmitry Vyukov 	}
270*5c9a8750SDmitry Vyukov 	return 0;
271*5c9a8750SDmitry Vyukov }
272*5c9a8750SDmitry Vyukov 
273*5c9a8750SDmitry Vyukov device_initcall(kcov_init);
274