xref: /openbmc/linux/kernel/kcov.c (revision dc55daff9040a90adce97208e776ee0bf515ab12)
1b2441318SGreg Kroah-Hartman // SPDX-License-Identifier: GPL-2.0
25c9a8750SDmitry Vyukov #define pr_fmt(fmt) "kcov: " fmt
35c9a8750SDmitry Vyukov 
436f05ae8SAndrey Ryabinin #define DISABLE_BRANCH_PROFILING
5db862358SKefeng Wang #include <linux/atomic.h>
65c9a8750SDmitry Vyukov #include <linux/compiler.h>
7db862358SKefeng Wang #include <linux/errno.h>
8db862358SKefeng Wang #include <linux/export.h>
95c9a8750SDmitry Vyukov #include <linux/types.h>
105c9a8750SDmitry Vyukov #include <linux/file.h>
115c9a8750SDmitry Vyukov #include <linux/fs.h>
12db862358SKefeng Wang #include <linux/init.h>
135c9a8750SDmitry Vyukov #include <linux/mm.h>
14db862358SKefeng Wang #include <linux/preempt.h>
155c9a8750SDmitry Vyukov #include <linux/printk.h>
16166ad0e1SKefeng Wang #include <linux/sched.h>
175c9a8750SDmitry Vyukov #include <linux/slab.h>
185c9a8750SDmitry Vyukov #include <linux/spinlock.h>
195c9a8750SDmitry Vyukov #include <linux/vmalloc.h>
205c9a8750SDmitry Vyukov #include <linux/debugfs.h>
215c9a8750SDmitry Vyukov #include <linux/uaccess.h>
225c9a8750SDmitry Vyukov #include <linux/kcov.h>
234983f0abSAlexander Popov #include <asm/setup.h>
245c9a8750SDmitry Vyukov 
25ded97d2cSVictor Chibotaru /* Number of 64-bit words written per one comparison: */
26ded97d2cSVictor Chibotaru #define KCOV_WORDS_PER_CMP 4
27ded97d2cSVictor Chibotaru 
285c9a8750SDmitry Vyukov /*
295c9a8750SDmitry Vyukov  * kcov descriptor (one per opened debugfs file).
305c9a8750SDmitry Vyukov  * State transitions of the descriptor:
315c9a8750SDmitry Vyukov  *  - initial state after open()
325c9a8750SDmitry Vyukov  *  - then there must be a single ioctl(KCOV_INIT_TRACE) call
335c9a8750SDmitry Vyukov  *  - then, mmap() call (several calls are allowed but not useful)
34ded97d2cSVictor Chibotaru  *  - then, ioctl(KCOV_ENABLE, arg), where arg is
35ded97d2cSVictor Chibotaru  *	KCOV_TRACE_PC - to trace only the PCs
36ded97d2cSVictor Chibotaru  *	or
37ded97d2cSVictor Chibotaru  *	KCOV_TRACE_CMP - to trace only the comparison operands
38ded97d2cSVictor Chibotaru  *  - then, ioctl(KCOV_DISABLE) to disable the task.
39ded97d2cSVictor Chibotaru  * Enabling/disabling ioctls can be repeated (only one task a time allowed).
405c9a8750SDmitry Vyukov  */
415c9a8750SDmitry Vyukov struct kcov {
425c9a8750SDmitry Vyukov 	/*
435c9a8750SDmitry Vyukov 	 * Reference counter. We keep one for:
445c9a8750SDmitry Vyukov 	 *  - opened file descriptor
455c9a8750SDmitry Vyukov 	 *  - task with enabled coverage (we can't unwire it from another task)
465c9a8750SDmitry Vyukov 	 */
475c9a8750SDmitry Vyukov 	atomic_t		refcount;
485c9a8750SDmitry Vyukov 	/* The lock protects mode, size, area and t. */
495c9a8750SDmitry Vyukov 	spinlock_t		lock;
505c9a8750SDmitry Vyukov 	enum kcov_mode		mode;
515c9a8750SDmitry Vyukov 	/* Size of arena (in long's for KCOV_MODE_TRACE). */
525c9a8750SDmitry Vyukov 	unsigned		size;
535c9a8750SDmitry Vyukov 	/* Coverage buffer shared with user space. */
545c9a8750SDmitry Vyukov 	void			*area;
555c9a8750SDmitry Vyukov 	/* Task for which we collect coverage, or NULL. */
565c9a8750SDmitry Vyukov 	struct task_struct	*t;
575c9a8750SDmitry Vyukov };
585c9a8750SDmitry Vyukov 
59ded97d2cSVictor Chibotaru static bool check_kcov_mode(enum kcov_mode needed_mode, struct task_struct *t)
605c9a8750SDmitry Vyukov {
615c9a8750SDmitry Vyukov 	enum kcov_mode mode;
625c9a8750SDmitry Vyukov 
635c9a8750SDmitry Vyukov 	/*
645c9a8750SDmitry Vyukov 	 * We are interested in code coverage as a function of a syscall inputs,
655c9a8750SDmitry Vyukov 	 * so we ignore code executed in interrupts.
665c9a8750SDmitry Vyukov 	 */
67fcf4edacSAndrey Ryabinin 	if (!in_task())
68ded97d2cSVictor Chibotaru 		return false;
695c9a8750SDmitry Vyukov 	mode = READ_ONCE(t->kcov_mode);
705c9a8750SDmitry Vyukov 	/*
715c9a8750SDmitry Vyukov 	 * There is some code that runs in interrupts but for which
725c9a8750SDmitry Vyukov 	 * in_interrupt() returns false (e.g. preempt_schedule_irq()).
735c9a8750SDmitry Vyukov 	 * READ_ONCE()/barrier() effectively provides load-acquire wrt
745c9a8750SDmitry Vyukov 	 * interrupts, there are paired barrier()/WRITE_ONCE() in
755c9a8750SDmitry Vyukov 	 * kcov_ioctl_locked().
765c9a8750SDmitry Vyukov 	 */
775c9a8750SDmitry Vyukov 	barrier();
78ded97d2cSVictor Chibotaru 	return mode == needed_mode;
79ded97d2cSVictor Chibotaru }
80ded97d2cSVictor Chibotaru 
81ded97d2cSVictor Chibotaru static unsigned long canonicalize_ip(unsigned long ip)
82ded97d2cSVictor Chibotaru {
83ded97d2cSVictor Chibotaru #ifdef CONFIG_RANDOMIZE_BASE
84ded97d2cSVictor Chibotaru 	ip -= kaslr_offset();
85ded97d2cSVictor Chibotaru #endif
86ded97d2cSVictor Chibotaru 	return ip;
87ded97d2cSVictor Chibotaru }
88ded97d2cSVictor Chibotaru 
89ded97d2cSVictor Chibotaru /*
90ded97d2cSVictor Chibotaru  * Entry point from instrumented code.
91ded97d2cSVictor Chibotaru  * This is called once per basic-block/edge.
92ded97d2cSVictor Chibotaru  */
93ded97d2cSVictor Chibotaru void notrace __sanitizer_cov_trace_pc(void)
94ded97d2cSVictor Chibotaru {
95ded97d2cSVictor Chibotaru 	struct task_struct *t;
96ded97d2cSVictor Chibotaru 	unsigned long *area;
97ded97d2cSVictor Chibotaru 	unsigned long ip = canonicalize_ip(_RET_IP_);
98ded97d2cSVictor Chibotaru 	unsigned long pos;
99ded97d2cSVictor Chibotaru 
100ded97d2cSVictor Chibotaru 	t = current;
101ded97d2cSVictor Chibotaru 	if (!check_kcov_mode(KCOV_MODE_TRACE_PC, t))
102ded97d2cSVictor Chibotaru 		return;
103ded97d2cSVictor Chibotaru 
1045c9a8750SDmitry Vyukov 	area = t->kcov_area;
105ded97d2cSVictor Chibotaru 	/* The first 64-bit word is the number of subsequent PCs. */
1065c9a8750SDmitry Vyukov 	pos = READ_ONCE(area[0]) + 1;
1075c9a8750SDmitry Vyukov 	if (likely(pos < t->kcov_size)) {
1084983f0abSAlexander Popov 		area[pos] = ip;
1095c9a8750SDmitry Vyukov 		WRITE_ONCE(area[0], pos);
1105c9a8750SDmitry Vyukov 	}
1115c9a8750SDmitry Vyukov }
1125c9a8750SDmitry Vyukov EXPORT_SYMBOL(__sanitizer_cov_trace_pc);
1135c9a8750SDmitry Vyukov 
114ded97d2cSVictor Chibotaru #ifdef CONFIG_KCOV_ENABLE_COMPARISONS
115ded97d2cSVictor Chibotaru static void write_comp_data(u64 type, u64 arg1, u64 arg2, u64 ip)
116ded97d2cSVictor Chibotaru {
117ded97d2cSVictor Chibotaru 	struct task_struct *t;
118ded97d2cSVictor Chibotaru 	u64 *area;
119ded97d2cSVictor Chibotaru 	u64 count, start_index, end_pos, max_pos;
120ded97d2cSVictor Chibotaru 
121ded97d2cSVictor Chibotaru 	t = current;
122ded97d2cSVictor Chibotaru 	if (!check_kcov_mode(KCOV_MODE_TRACE_CMP, t))
123ded97d2cSVictor Chibotaru 		return;
124ded97d2cSVictor Chibotaru 
125ded97d2cSVictor Chibotaru 	ip = canonicalize_ip(ip);
126ded97d2cSVictor Chibotaru 
127ded97d2cSVictor Chibotaru 	/*
128ded97d2cSVictor Chibotaru 	 * We write all comparison arguments and types as u64.
129ded97d2cSVictor Chibotaru 	 * The buffer was allocated for t->kcov_size unsigned longs.
130ded97d2cSVictor Chibotaru 	 */
131ded97d2cSVictor Chibotaru 	area = (u64 *)t->kcov_area;
132ded97d2cSVictor Chibotaru 	max_pos = t->kcov_size * sizeof(unsigned long);
133ded97d2cSVictor Chibotaru 
134ded97d2cSVictor Chibotaru 	count = READ_ONCE(area[0]);
135ded97d2cSVictor Chibotaru 
136ded97d2cSVictor Chibotaru 	/* Every record is KCOV_WORDS_PER_CMP 64-bit words. */
137ded97d2cSVictor Chibotaru 	start_index = 1 + count * KCOV_WORDS_PER_CMP;
138ded97d2cSVictor Chibotaru 	end_pos = (start_index + KCOV_WORDS_PER_CMP) * sizeof(u64);
139ded97d2cSVictor Chibotaru 	if (likely(end_pos <= max_pos)) {
140ded97d2cSVictor Chibotaru 		area[start_index] = type;
141ded97d2cSVictor Chibotaru 		area[start_index + 1] = arg1;
142ded97d2cSVictor Chibotaru 		area[start_index + 2] = arg2;
143ded97d2cSVictor Chibotaru 		area[start_index + 3] = ip;
144ded97d2cSVictor Chibotaru 		WRITE_ONCE(area[0], count + 1);
145ded97d2cSVictor Chibotaru 	}
146ded97d2cSVictor Chibotaru }
147ded97d2cSVictor Chibotaru 
148ded97d2cSVictor Chibotaru void notrace __sanitizer_cov_trace_cmp1(u8 arg1, u8 arg2)
149ded97d2cSVictor Chibotaru {
150ded97d2cSVictor Chibotaru 	write_comp_data(KCOV_CMP_SIZE(0), arg1, arg2, _RET_IP_);
151ded97d2cSVictor Chibotaru }
152ded97d2cSVictor Chibotaru EXPORT_SYMBOL(__sanitizer_cov_trace_cmp1);
153ded97d2cSVictor Chibotaru 
154ded97d2cSVictor Chibotaru void notrace __sanitizer_cov_trace_cmp2(u16 arg1, u16 arg2)
155ded97d2cSVictor Chibotaru {
156ded97d2cSVictor Chibotaru 	write_comp_data(KCOV_CMP_SIZE(1), arg1, arg2, _RET_IP_);
157ded97d2cSVictor Chibotaru }
158ded97d2cSVictor Chibotaru EXPORT_SYMBOL(__sanitizer_cov_trace_cmp2);
159ded97d2cSVictor Chibotaru 
160689d77f0SDmitry Vyukov void notrace __sanitizer_cov_trace_cmp4(u32 arg1, u32 arg2)
161ded97d2cSVictor Chibotaru {
162ded97d2cSVictor Chibotaru 	write_comp_data(KCOV_CMP_SIZE(2), arg1, arg2, _RET_IP_);
163ded97d2cSVictor Chibotaru }
164ded97d2cSVictor Chibotaru EXPORT_SYMBOL(__sanitizer_cov_trace_cmp4);
165ded97d2cSVictor Chibotaru 
166ded97d2cSVictor Chibotaru void notrace __sanitizer_cov_trace_cmp8(u64 arg1, u64 arg2)
167ded97d2cSVictor Chibotaru {
168ded97d2cSVictor Chibotaru 	write_comp_data(KCOV_CMP_SIZE(3), arg1, arg2, _RET_IP_);
169ded97d2cSVictor Chibotaru }
170ded97d2cSVictor Chibotaru EXPORT_SYMBOL(__sanitizer_cov_trace_cmp8);
171ded97d2cSVictor Chibotaru 
172ded97d2cSVictor Chibotaru void notrace __sanitizer_cov_trace_const_cmp1(u8 arg1, u8 arg2)
173ded97d2cSVictor Chibotaru {
174ded97d2cSVictor Chibotaru 	write_comp_data(KCOV_CMP_SIZE(0) | KCOV_CMP_CONST, arg1, arg2,
175ded97d2cSVictor Chibotaru 			_RET_IP_);
176ded97d2cSVictor Chibotaru }
177ded97d2cSVictor Chibotaru EXPORT_SYMBOL(__sanitizer_cov_trace_const_cmp1);
178ded97d2cSVictor Chibotaru 
179ded97d2cSVictor Chibotaru void notrace __sanitizer_cov_trace_const_cmp2(u16 arg1, u16 arg2)
180ded97d2cSVictor Chibotaru {
181ded97d2cSVictor Chibotaru 	write_comp_data(KCOV_CMP_SIZE(1) | KCOV_CMP_CONST, arg1, arg2,
182ded97d2cSVictor Chibotaru 			_RET_IP_);
183ded97d2cSVictor Chibotaru }
184ded97d2cSVictor Chibotaru EXPORT_SYMBOL(__sanitizer_cov_trace_const_cmp2);
185ded97d2cSVictor Chibotaru 
186689d77f0SDmitry Vyukov void notrace __sanitizer_cov_trace_const_cmp4(u32 arg1, u32 arg2)
187ded97d2cSVictor Chibotaru {
188ded97d2cSVictor Chibotaru 	write_comp_data(KCOV_CMP_SIZE(2) | KCOV_CMP_CONST, arg1, arg2,
189ded97d2cSVictor Chibotaru 			_RET_IP_);
190ded97d2cSVictor Chibotaru }
191ded97d2cSVictor Chibotaru EXPORT_SYMBOL(__sanitizer_cov_trace_const_cmp4);
192ded97d2cSVictor Chibotaru 
193ded97d2cSVictor Chibotaru void notrace __sanitizer_cov_trace_const_cmp8(u64 arg1, u64 arg2)
194ded97d2cSVictor Chibotaru {
195ded97d2cSVictor Chibotaru 	write_comp_data(KCOV_CMP_SIZE(3) | KCOV_CMP_CONST, arg1, arg2,
196ded97d2cSVictor Chibotaru 			_RET_IP_);
197ded97d2cSVictor Chibotaru }
198ded97d2cSVictor Chibotaru EXPORT_SYMBOL(__sanitizer_cov_trace_const_cmp8);
199ded97d2cSVictor Chibotaru 
200ded97d2cSVictor Chibotaru void notrace __sanitizer_cov_trace_switch(u64 val, u64 *cases)
201ded97d2cSVictor Chibotaru {
202ded97d2cSVictor Chibotaru 	u64 i;
203ded97d2cSVictor Chibotaru 	u64 count = cases[0];
204ded97d2cSVictor Chibotaru 	u64 size = cases[1];
205ded97d2cSVictor Chibotaru 	u64 type = KCOV_CMP_CONST;
206ded97d2cSVictor Chibotaru 
207ded97d2cSVictor Chibotaru 	switch (size) {
208ded97d2cSVictor Chibotaru 	case 8:
209ded97d2cSVictor Chibotaru 		type |= KCOV_CMP_SIZE(0);
210ded97d2cSVictor Chibotaru 		break;
211ded97d2cSVictor Chibotaru 	case 16:
212ded97d2cSVictor Chibotaru 		type |= KCOV_CMP_SIZE(1);
213ded97d2cSVictor Chibotaru 		break;
214ded97d2cSVictor Chibotaru 	case 32:
215ded97d2cSVictor Chibotaru 		type |= KCOV_CMP_SIZE(2);
216ded97d2cSVictor Chibotaru 		break;
217ded97d2cSVictor Chibotaru 	case 64:
218ded97d2cSVictor Chibotaru 		type |= KCOV_CMP_SIZE(3);
219ded97d2cSVictor Chibotaru 		break;
220ded97d2cSVictor Chibotaru 	default:
221ded97d2cSVictor Chibotaru 		return;
222ded97d2cSVictor Chibotaru 	}
223ded97d2cSVictor Chibotaru 	for (i = 0; i < count; i++)
224ded97d2cSVictor Chibotaru 		write_comp_data(type, cases[i + 2], val, _RET_IP_);
225ded97d2cSVictor Chibotaru }
226ded97d2cSVictor Chibotaru EXPORT_SYMBOL(__sanitizer_cov_trace_switch);
227ded97d2cSVictor Chibotaru #endif /* ifdef CONFIG_KCOV_ENABLE_COMPARISONS */
228ded97d2cSVictor Chibotaru 
2295c9a8750SDmitry Vyukov static void kcov_get(struct kcov *kcov)
2305c9a8750SDmitry Vyukov {
2315c9a8750SDmitry Vyukov 	atomic_inc(&kcov->refcount);
2325c9a8750SDmitry Vyukov }
2335c9a8750SDmitry Vyukov 
2345c9a8750SDmitry Vyukov static void kcov_put(struct kcov *kcov)
2355c9a8750SDmitry Vyukov {
2365c9a8750SDmitry Vyukov 	if (atomic_dec_and_test(&kcov->refcount)) {
2375c9a8750SDmitry Vyukov 		vfree(kcov->area);
2385c9a8750SDmitry Vyukov 		kfree(kcov);
2395c9a8750SDmitry Vyukov 	}
2405c9a8750SDmitry Vyukov }
2415c9a8750SDmitry Vyukov 
2425c9a8750SDmitry Vyukov void kcov_task_init(struct task_struct *t)
2435c9a8750SDmitry Vyukov {
244c9484b98SMark Rutland 	WRITE_ONCE(t->kcov_mode, KCOV_MODE_DISABLED);
245c9484b98SMark Rutland 	barrier();
2465c9a8750SDmitry Vyukov 	t->kcov_size = 0;
2475c9a8750SDmitry Vyukov 	t->kcov_area = NULL;
2485c9a8750SDmitry Vyukov 	t->kcov = NULL;
2495c9a8750SDmitry Vyukov }
2505c9a8750SDmitry Vyukov 
2515c9a8750SDmitry Vyukov void kcov_task_exit(struct task_struct *t)
2525c9a8750SDmitry Vyukov {
2535c9a8750SDmitry Vyukov 	struct kcov *kcov;
2545c9a8750SDmitry Vyukov 
2555c9a8750SDmitry Vyukov 	kcov = t->kcov;
2565c9a8750SDmitry Vyukov 	if (kcov == NULL)
2575c9a8750SDmitry Vyukov 		return;
2585c9a8750SDmitry Vyukov 	spin_lock(&kcov->lock);
2595c9a8750SDmitry Vyukov 	if (WARN_ON(kcov->t != t)) {
2605c9a8750SDmitry Vyukov 		spin_unlock(&kcov->lock);
2615c9a8750SDmitry Vyukov 		return;
2625c9a8750SDmitry Vyukov 	}
2635c9a8750SDmitry Vyukov 	/* Just to not leave dangling references behind. */
2645c9a8750SDmitry Vyukov 	kcov_task_init(t);
2655c9a8750SDmitry Vyukov 	kcov->t = NULL;
266ded97d2cSVictor Chibotaru 	kcov->mode = KCOV_MODE_INIT;
2675c9a8750SDmitry Vyukov 	spin_unlock(&kcov->lock);
2685c9a8750SDmitry Vyukov 	kcov_put(kcov);
2695c9a8750SDmitry Vyukov }
2705c9a8750SDmitry Vyukov 
2715c9a8750SDmitry Vyukov static int kcov_mmap(struct file *filep, struct vm_area_struct *vma)
2725c9a8750SDmitry Vyukov {
2735c9a8750SDmitry Vyukov 	int res = 0;
2745c9a8750SDmitry Vyukov 	void *area;
2755c9a8750SDmitry Vyukov 	struct kcov *kcov = vma->vm_file->private_data;
2765c9a8750SDmitry Vyukov 	unsigned long size, off;
2775c9a8750SDmitry Vyukov 	struct page *page;
2785c9a8750SDmitry Vyukov 
2795c9a8750SDmitry Vyukov 	area = vmalloc_user(vma->vm_end - vma->vm_start);
2805c9a8750SDmitry Vyukov 	if (!area)
2815c9a8750SDmitry Vyukov 		return -ENOMEM;
2825c9a8750SDmitry Vyukov 
2835c9a8750SDmitry Vyukov 	spin_lock(&kcov->lock);
2845c9a8750SDmitry Vyukov 	size = kcov->size * sizeof(unsigned long);
285ded97d2cSVictor Chibotaru 	if (kcov->mode != KCOV_MODE_INIT || vma->vm_pgoff != 0 ||
2865c9a8750SDmitry Vyukov 	    vma->vm_end - vma->vm_start != size) {
2875c9a8750SDmitry Vyukov 		res = -EINVAL;
2885c9a8750SDmitry Vyukov 		goto exit;
2895c9a8750SDmitry Vyukov 	}
2905c9a8750SDmitry Vyukov 	if (!kcov->area) {
2915c9a8750SDmitry Vyukov 		kcov->area = area;
2925c9a8750SDmitry Vyukov 		vma->vm_flags |= VM_DONTEXPAND;
2935c9a8750SDmitry Vyukov 		spin_unlock(&kcov->lock);
2945c9a8750SDmitry Vyukov 		for (off = 0; off < size; off += PAGE_SIZE) {
2955c9a8750SDmitry Vyukov 			page = vmalloc_to_page(kcov->area + off);
2965c9a8750SDmitry Vyukov 			if (vm_insert_page(vma, vma->vm_start + off, page))
2975c9a8750SDmitry Vyukov 				WARN_ONCE(1, "vm_insert_page() failed");
2985c9a8750SDmitry Vyukov 		}
2995c9a8750SDmitry Vyukov 		return 0;
3005c9a8750SDmitry Vyukov 	}
3015c9a8750SDmitry Vyukov exit:
3025c9a8750SDmitry Vyukov 	spin_unlock(&kcov->lock);
3035c9a8750SDmitry Vyukov 	vfree(area);
3045c9a8750SDmitry Vyukov 	return res;
3055c9a8750SDmitry Vyukov }
3065c9a8750SDmitry Vyukov 
3075c9a8750SDmitry Vyukov static int kcov_open(struct inode *inode, struct file *filep)
3085c9a8750SDmitry Vyukov {
3095c9a8750SDmitry Vyukov 	struct kcov *kcov;
3105c9a8750SDmitry Vyukov 
3115c9a8750SDmitry Vyukov 	kcov = kzalloc(sizeof(*kcov), GFP_KERNEL);
3125c9a8750SDmitry Vyukov 	if (!kcov)
3135c9a8750SDmitry Vyukov 		return -ENOMEM;
314ded97d2cSVictor Chibotaru 	kcov->mode = KCOV_MODE_DISABLED;
3155c9a8750SDmitry Vyukov 	atomic_set(&kcov->refcount, 1);
3165c9a8750SDmitry Vyukov 	spin_lock_init(&kcov->lock);
3175c9a8750SDmitry Vyukov 	filep->private_data = kcov;
3185c9a8750SDmitry Vyukov 	return nonseekable_open(inode, filep);
3195c9a8750SDmitry Vyukov }
3205c9a8750SDmitry Vyukov 
3215c9a8750SDmitry Vyukov static int kcov_close(struct inode *inode, struct file *filep)
3225c9a8750SDmitry Vyukov {
3235c9a8750SDmitry Vyukov 	kcov_put(filep->private_data);
3245c9a8750SDmitry Vyukov 	return 0;
3255c9a8750SDmitry Vyukov }
3265c9a8750SDmitry Vyukov 
327*dc55daffSMark Rutland /*
328*dc55daffSMark Rutland  * Fault in a lazily-faulted vmalloc area before it can be used by
329*dc55daffSMark Rutland  * __santizer_cov_trace_pc(), to avoid recursion issues if any code on the
330*dc55daffSMark Rutland  * vmalloc fault handling path is instrumented.
331*dc55daffSMark Rutland  */
332*dc55daffSMark Rutland static void kcov_fault_in_area(struct kcov *kcov)
333*dc55daffSMark Rutland {
334*dc55daffSMark Rutland 	unsigned long stride = PAGE_SIZE / sizeof(unsigned long);
335*dc55daffSMark Rutland 	unsigned long *area = kcov->area;
336*dc55daffSMark Rutland 	unsigned long offset;
337*dc55daffSMark Rutland 
338*dc55daffSMark Rutland 	for (offset = 0; offset < kcov->size; offset += stride)
339*dc55daffSMark Rutland 		READ_ONCE(area[offset]);
340*dc55daffSMark Rutland }
341*dc55daffSMark Rutland 
3425c9a8750SDmitry Vyukov static int kcov_ioctl_locked(struct kcov *kcov, unsigned int cmd,
3435c9a8750SDmitry Vyukov 			     unsigned long arg)
3445c9a8750SDmitry Vyukov {
3455c9a8750SDmitry Vyukov 	struct task_struct *t;
3465c9a8750SDmitry Vyukov 	unsigned long size, unused;
3475c9a8750SDmitry Vyukov 
3485c9a8750SDmitry Vyukov 	switch (cmd) {
3495c9a8750SDmitry Vyukov 	case KCOV_INIT_TRACE:
3505c9a8750SDmitry Vyukov 		/*
3515c9a8750SDmitry Vyukov 		 * Enable kcov in trace mode and setup buffer size.
3525c9a8750SDmitry Vyukov 		 * Must happen before anything else.
3535c9a8750SDmitry Vyukov 		 */
3545c9a8750SDmitry Vyukov 		if (kcov->mode != KCOV_MODE_DISABLED)
3555c9a8750SDmitry Vyukov 			return -EBUSY;
3565c9a8750SDmitry Vyukov 		/*
3575c9a8750SDmitry Vyukov 		 * Size must be at least 2 to hold current position and one PC.
3585c9a8750SDmitry Vyukov 		 * Later we allocate size * sizeof(unsigned long) memory,
3595c9a8750SDmitry Vyukov 		 * that must not overflow.
3605c9a8750SDmitry Vyukov 		 */
3615c9a8750SDmitry Vyukov 		size = arg;
3625c9a8750SDmitry Vyukov 		if (size < 2 || size > INT_MAX / sizeof(unsigned long))
3635c9a8750SDmitry Vyukov 			return -EINVAL;
3645c9a8750SDmitry Vyukov 		kcov->size = size;
365ded97d2cSVictor Chibotaru 		kcov->mode = KCOV_MODE_INIT;
3665c9a8750SDmitry Vyukov 		return 0;
3675c9a8750SDmitry Vyukov 	case KCOV_ENABLE:
3685c9a8750SDmitry Vyukov 		/*
3695c9a8750SDmitry Vyukov 		 * Enable coverage for the current task.
3705c9a8750SDmitry Vyukov 		 * At this point user must have been enabled trace mode,
3715c9a8750SDmitry Vyukov 		 * and mmapped the file. Coverage collection is disabled only
3725c9a8750SDmitry Vyukov 		 * at task exit or voluntary by KCOV_DISABLE. After that it can
3735c9a8750SDmitry Vyukov 		 * be enabled for another task.
3745c9a8750SDmitry Vyukov 		 */
375ded97d2cSVictor Chibotaru 		if (kcov->mode != KCOV_MODE_INIT || !kcov->area)
3765c9a8750SDmitry Vyukov 			return -EINVAL;
377a77660d2SDmitry Vyukov 		t = current;
378a77660d2SDmitry Vyukov 		if (kcov->t != NULL || t->kcov != NULL)
3795c9a8750SDmitry Vyukov 			return -EBUSY;
380ded97d2cSVictor Chibotaru 		if (arg == KCOV_TRACE_PC)
381ded97d2cSVictor Chibotaru 			kcov->mode = KCOV_MODE_TRACE_PC;
382ded97d2cSVictor Chibotaru 		else if (arg == KCOV_TRACE_CMP)
383ded97d2cSVictor Chibotaru #ifdef CONFIG_KCOV_ENABLE_COMPARISONS
384ded97d2cSVictor Chibotaru 			kcov->mode = KCOV_MODE_TRACE_CMP;
385ded97d2cSVictor Chibotaru #else
386ded97d2cSVictor Chibotaru 		return -ENOTSUPP;
387ded97d2cSVictor Chibotaru #endif
388ded97d2cSVictor Chibotaru 		else
389ded97d2cSVictor Chibotaru 			return -EINVAL;
390*dc55daffSMark Rutland 		kcov_fault_in_area(kcov);
3915c9a8750SDmitry Vyukov 		/* Cache in task struct for performance. */
3925c9a8750SDmitry Vyukov 		t->kcov_size = kcov->size;
3935c9a8750SDmitry Vyukov 		t->kcov_area = kcov->area;
394ded97d2cSVictor Chibotaru 		/* See comment in check_kcov_mode(). */
3955c9a8750SDmitry Vyukov 		barrier();
3965c9a8750SDmitry Vyukov 		WRITE_ONCE(t->kcov_mode, kcov->mode);
3975c9a8750SDmitry Vyukov 		t->kcov = kcov;
3985c9a8750SDmitry Vyukov 		kcov->t = t;
3995c9a8750SDmitry Vyukov 		/* This is put either in kcov_task_exit() or in KCOV_DISABLE. */
4005c9a8750SDmitry Vyukov 		kcov_get(kcov);
4015c9a8750SDmitry Vyukov 		return 0;
4025c9a8750SDmitry Vyukov 	case KCOV_DISABLE:
4035c9a8750SDmitry Vyukov 		/* Disable coverage for the current task. */
4045c9a8750SDmitry Vyukov 		unused = arg;
4055c9a8750SDmitry Vyukov 		if (unused != 0 || current->kcov != kcov)
4065c9a8750SDmitry Vyukov 			return -EINVAL;
4075c9a8750SDmitry Vyukov 		t = current;
4085c9a8750SDmitry Vyukov 		if (WARN_ON(kcov->t != t))
4095c9a8750SDmitry Vyukov 			return -EINVAL;
4105c9a8750SDmitry Vyukov 		kcov_task_init(t);
4115c9a8750SDmitry Vyukov 		kcov->t = NULL;
412ded97d2cSVictor Chibotaru 		kcov->mode = KCOV_MODE_INIT;
4135c9a8750SDmitry Vyukov 		kcov_put(kcov);
4145c9a8750SDmitry Vyukov 		return 0;
4155c9a8750SDmitry Vyukov 	default:
4165c9a8750SDmitry Vyukov 		return -ENOTTY;
4175c9a8750SDmitry Vyukov 	}
4185c9a8750SDmitry Vyukov }
4195c9a8750SDmitry Vyukov 
4205c9a8750SDmitry Vyukov static long kcov_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
4215c9a8750SDmitry Vyukov {
4225c9a8750SDmitry Vyukov 	struct kcov *kcov;
4235c9a8750SDmitry Vyukov 	int res;
4245c9a8750SDmitry Vyukov 
4255c9a8750SDmitry Vyukov 	kcov = filep->private_data;
4265c9a8750SDmitry Vyukov 	spin_lock(&kcov->lock);
4275c9a8750SDmitry Vyukov 	res = kcov_ioctl_locked(kcov, cmd, arg);
4285c9a8750SDmitry Vyukov 	spin_unlock(&kcov->lock);
4295c9a8750SDmitry Vyukov 	return res;
4305c9a8750SDmitry Vyukov }
4315c9a8750SDmitry Vyukov 
4325c9a8750SDmitry Vyukov static const struct file_operations kcov_fops = {
4335c9a8750SDmitry Vyukov 	.open		= kcov_open,
4345c9a8750SDmitry Vyukov 	.unlocked_ioctl	= kcov_ioctl,
4357483e5d4SDmitry Vyukov 	.compat_ioctl	= kcov_ioctl,
4365c9a8750SDmitry Vyukov 	.mmap		= kcov_mmap,
4375c9a8750SDmitry Vyukov 	.release        = kcov_close,
4385c9a8750SDmitry Vyukov };
4395c9a8750SDmitry Vyukov 
4405c9a8750SDmitry Vyukov static int __init kcov_init(void)
4415c9a8750SDmitry Vyukov {
442df4565f9SNicolai Stange 	/*
443df4565f9SNicolai Stange 	 * The kcov debugfs file won't ever get removed and thus,
444df4565f9SNicolai Stange 	 * there is no need to protect it against removal races. The
445df4565f9SNicolai Stange 	 * use of debugfs_create_file_unsafe() is actually safe here.
446df4565f9SNicolai Stange 	 */
447df4565f9SNicolai Stange 	if (!debugfs_create_file_unsafe("kcov", 0600, NULL, NULL, &kcov_fops)) {
4485c9a8750SDmitry Vyukov 		pr_err("failed to create kcov in debugfs\n");
4495c9a8750SDmitry Vyukov 		return -ENOMEM;
4505c9a8750SDmitry Vyukov 	}
4515c9a8750SDmitry Vyukov 	return 0;
4525c9a8750SDmitry Vyukov }
4535c9a8750SDmitry Vyukov 
4545c9a8750SDmitry Vyukov device_initcall(kcov_init);
455