xref: /openbmc/linux/kernel/kcov.c (revision 52beb1fc)
1 // SPDX-License-Identifier: GPL-2.0
2 #define pr_fmt(fmt) "kcov: " fmt
3 
4 #define DISABLE_BRANCH_PROFILING
5 #include <linux/atomic.h>
6 #include <linux/compiler.h>
7 #include <linux/errno.h>
8 #include <linux/export.h>
9 #include <linux/types.h>
10 #include <linux/file.h>
11 #include <linux/fs.h>
12 #include <linux/hashtable.h>
13 #include <linux/init.h>
14 #include <linux/mm.h>
15 #include <linux/preempt.h>
16 #include <linux/printk.h>
17 #include <linux/sched.h>
18 #include <linux/slab.h>
19 #include <linux/spinlock.h>
20 #include <linux/vmalloc.h>
21 #include <linux/debugfs.h>
22 #include <linux/uaccess.h>
23 #include <linux/kcov.h>
24 #include <linux/refcount.h>
25 #include <linux/log2.h>
26 #include <asm/setup.h>
27 
28 #define kcov_debug(fmt, ...) pr_debug("%s: " fmt, __func__, ##__VA_ARGS__)
29 
30 /* Number of 64-bit words written per one comparison: */
31 #define KCOV_WORDS_PER_CMP 4
32 
33 /*
34  * kcov descriptor (one per opened debugfs file).
35  * State transitions of the descriptor:
36  *  - initial state after open()
37  *  - then there must be a single ioctl(KCOV_INIT_TRACE) call
38  *  - then, mmap() call (several calls are allowed but not useful)
39  *  - then, ioctl(KCOV_ENABLE, arg), where arg is
40  *	KCOV_TRACE_PC - to trace only the PCs
41  *	or
42  *	KCOV_TRACE_CMP - to trace only the comparison operands
43  *  - then, ioctl(KCOV_DISABLE) to disable the task.
44  * Enabling/disabling ioctls can be repeated (only one task a time allowed).
45  */
46 struct kcov {
47 	/*
48 	 * Reference counter. We keep one for:
49 	 *  - opened file descriptor
50 	 *  - task with enabled coverage (we can't unwire it from another task)
51 	 *  - each code section for remote coverage collection
52 	 */
53 	refcount_t		refcount;
54 	/* The lock protects mode, size, area and t. */
55 	spinlock_t		lock;
56 	enum kcov_mode		mode;
57 	/* Size of arena (in long's). */
58 	unsigned int		size;
59 	/* Coverage buffer shared with user space. */
60 	void			*area;
61 	/* Task for which we collect coverage, or NULL. */
62 	struct task_struct	*t;
63 	/* Collecting coverage from remote (background) threads. */
64 	bool			remote;
65 	/* Size of remote area (in long's). */
66 	unsigned int		remote_size;
67 	/*
68 	 * Sequence is incremented each time kcov is reenabled, used by
69 	 * kcov_remote_stop(), see the comment there.
70 	 */
71 	int			sequence;
72 };
73 
74 struct kcov_remote_area {
75 	struct list_head	list;
76 	unsigned int		size;
77 };
78 
79 struct kcov_remote {
80 	u64			handle;
81 	struct kcov		*kcov;
82 	struct hlist_node	hnode;
83 };
84 
85 static DEFINE_SPINLOCK(kcov_remote_lock);
86 static DEFINE_HASHTABLE(kcov_remote_map, 4);
87 static struct list_head kcov_remote_areas = LIST_HEAD_INIT(kcov_remote_areas);
88 
89 struct kcov_percpu_data {
90 	void			*irq_area;
91 	local_lock_t		lock;
92 
93 	unsigned int		saved_mode;
94 	unsigned int		saved_size;
95 	void			*saved_area;
96 	struct kcov		*saved_kcov;
97 	int			saved_sequence;
98 };
99 
100 static DEFINE_PER_CPU(struct kcov_percpu_data, kcov_percpu_data) = {
101 	.lock = INIT_LOCAL_LOCK(lock),
102 };
103 
104 /* Must be called with kcov_remote_lock locked. */
105 static struct kcov_remote *kcov_remote_find(u64 handle)
106 {
107 	struct kcov_remote *remote;
108 
109 	hash_for_each_possible(kcov_remote_map, remote, hnode, handle) {
110 		if (remote->handle == handle)
111 			return remote;
112 	}
113 	return NULL;
114 }
115 
116 /* Must be called with kcov_remote_lock locked. */
117 static struct kcov_remote *kcov_remote_add(struct kcov *kcov, u64 handle)
118 {
119 	struct kcov_remote *remote;
120 
121 	if (kcov_remote_find(handle))
122 		return ERR_PTR(-EEXIST);
123 	remote = kmalloc(sizeof(*remote), GFP_ATOMIC);
124 	if (!remote)
125 		return ERR_PTR(-ENOMEM);
126 	remote->handle = handle;
127 	remote->kcov = kcov;
128 	hash_add(kcov_remote_map, &remote->hnode, handle);
129 	return remote;
130 }
131 
132 /* Must be called with kcov_remote_lock locked. */
133 static struct kcov_remote_area *kcov_remote_area_get(unsigned int size)
134 {
135 	struct kcov_remote_area *area;
136 	struct list_head *pos;
137 
138 	list_for_each(pos, &kcov_remote_areas) {
139 		area = list_entry(pos, struct kcov_remote_area, list);
140 		if (area->size == size) {
141 			list_del(&area->list);
142 			return area;
143 		}
144 	}
145 	return NULL;
146 }
147 
148 /* Must be called with kcov_remote_lock locked. */
149 static void kcov_remote_area_put(struct kcov_remote_area *area,
150 					unsigned int size)
151 {
152 	INIT_LIST_HEAD(&area->list);
153 	area->size = size;
154 	list_add(&area->list, &kcov_remote_areas);
155 }
156 
157 static notrace bool check_kcov_mode(enum kcov_mode needed_mode, struct task_struct *t)
158 {
159 	unsigned int mode;
160 
161 	/*
162 	 * We are interested in code coverage as a function of a syscall inputs,
163 	 * so we ignore code executed in interrupts, unless we are in a remote
164 	 * coverage collection section in a softirq.
165 	 */
166 	if (!in_task() && !(in_serving_softirq() && t->kcov_softirq))
167 		return false;
168 	mode = READ_ONCE(t->kcov_mode);
169 	/*
170 	 * There is some code that runs in interrupts but for which
171 	 * in_interrupt() returns false (e.g. preempt_schedule_irq()).
172 	 * READ_ONCE()/barrier() effectively provides load-acquire wrt
173 	 * interrupts, there are paired barrier()/WRITE_ONCE() in
174 	 * kcov_start().
175 	 */
176 	barrier();
177 	return mode == needed_mode;
178 }
179 
180 static notrace unsigned long canonicalize_ip(unsigned long ip)
181 {
182 #ifdef CONFIG_RANDOMIZE_BASE
183 	ip -= kaslr_offset();
184 #endif
185 	return ip;
186 }
187 
188 /*
189  * Entry point from instrumented code.
190  * This is called once per basic-block/edge.
191  */
192 void notrace __sanitizer_cov_trace_pc(void)
193 {
194 	struct task_struct *t;
195 	unsigned long *area;
196 	unsigned long ip = canonicalize_ip(_RET_IP_);
197 	unsigned long pos;
198 
199 	t = current;
200 	if (!check_kcov_mode(KCOV_MODE_TRACE_PC, t))
201 		return;
202 
203 	area = t->kcov_area;
204 	/* The first 64-bit word is the number of subsequent PCs. */
205 	pos = READ_ONCE(area[0]) + 1;
206 	if (likely(pos < t->kcov_size)) {
207 		area[pos] = ip;
208 		WRITE_ONCE(area[0], pos);
209 	}
210 }
211 EXPORT_SYMBOL(__sanitizer_cov_trace_pc);
212 
213 #ifdef CONFIG_KCOV_ENABLE_COMPARISONS
214 static void notrace write_comp_data(u64 type, u64 arg1, u64 arg2, u64 ip)
215 {
216 	struct task_struct *t;
217 	u64 *area;
218 	u64 count, start_index, end_pos, max_pos;
219 
220 	t = current;
221 	if (!check_kcov_mode(KCOV_MODE_TRACE_CMP, t))
222 		return;
223 
224 	ip = canonicalize_ip(ip);
225 
226 	/*
227 	 * We write all comparison arguments and types as u64.
228 	 * The buffer was allocated for t->kcov_size unsigned longs.
229 	 */
230 	area = (u64 *)t->kcov_area;
231 	max_pos = t->kcov_size * sizeof(unsigned long);
232 
233 	count = READ_ONCE(area[0]);
234 
235 	/* Every record is KCOV_WORDS_PER_CMP 64-bit words. */
236 	start_index = 1 + count * KCOV_WORDS_PER_CMP;
237 	end_pos = (start_index + KCOV_WORDS_PER_CMP) * sizeof(u64);
238 	if (likely(end_pos <= max_pos)) {
239 		area[start_index] = type;
240 		area[start_index + 1] = arg1;
241 		area[start_index + 2] = arg2;
242 		area[start_index + 3] = ip;
243 		WRITE_ONCE(area[0], count + 1);
244 	}
245 }
246 
247 void notrace __sanitizer_cov_trace_cmp1(u8 arg1, u8 arg2)
248 {
249 	write_comp_data(KCOV_CMP_SIZE(0), arg1, arg2, _RET_IP_);
250 }
251 EXPORT_SYMBOL(__sanitizer_cov_trace_cmp1);
252 
253 void notrace __sanitizer_cov_trace_cmp2(u16 arg1, u16 arg2)
254 {
255 	write_comp_data(KCOV_CMP_SIZE(1), arg1, arg2, _RET_IP_);
256 }
257 EXPORT_SYMBOL(__sanitizer_cov_trace_cmp2);
258 
259 void notrace __sanitizer_cov_trace_cmp4(u32 arg1, u32 arg2)
260 {
261 	write_comp_data(KCOV_CMP_SIZE(2), arg1, arg2, _RET_IP_);
262 }
263 EXPORT_SYMBOL(__sanitizer_cov_trace_cmp4);
264 
265 void notrace __sanitizer_cov_trace_cmp8(u64 arg1, u64 arg2)
266 {
267 	write_comp_data(KCOV_CMP_SIZE(3), arg1, arg2, _RET_IP_);
268 }
269 EXPORT_SYMBOL(__sanitizer_cov_trace_cmp8);
270 
271 void notrace __sanitizer_cov_trace_const_cmp1(u8 arg1, u8 arg2)
272 {
273 	write_comp_data(KCOV_CMP_SIZE(0) | KCOV_CMP_CONST, arg1, arg2,
274 			_RET_IP_);
275 }
276 EXPORT_SYMBOL(__sanitizer_cov_trace_const_cmp1);
277 
278 void notrace __sanitizer_cov_trace_const_cmp2(u16 arg1, u16 arg2)
279 {
280 	write_comp_data(KCOV_CMP_SIZE(1) | KCOV_CMP_CONST, arg1, arg2,
281 			_RET_IP_);
282 }
283 EXPORT_SYMBOL(__sanitizer_cov_trace_const_cmp2);
284 
285 void notrace __sanitizer_cov_trace_const_cmp4(u32 arg1, u32 arg2)
286 {
287 	write_comp_data(KCOV_CMP_SIZE(2) | KCOV_CMP_CONST, arg1, arg2,
288 			_RET_IP_);
289 }
290 EXPORT_SYMBOL(__sanitizer_cov_trace_const_cmp4);
291 
292 void notrace __sanitizer_cov_trace_const_cmp8(u64 arg1, u64 arg2)
293 {
294 	write_comp_data(KCOV_CMP_SIZE(3) | KCOV_CMP_CONST, arg1, arg2,
295 			_RET_IP_);
296 }
297 EXPORT_SYMBOL(__sanitizer_cov_trace_const_cmp8);
298 
299 void notrace __sanitizer_cov_trace_switch(u64 val, u64 *cases)
300 {
301 	u64 i;
302 	u64 count = cases[0];
303 	u64 size = cases[1];
304 	u64 type = KCOV_CMP_CONST;
305 
306 	switch (size) {
307 	case 8:
308 		type |= KCOV_CMP_SIZE(0);
309 		break;
310 	case 16:
311 		type |= KCOV_CMP_SIZE(1);
312 		break;
313 	case 32:
314 		type |= KCOV_CMP_SIZE(2);
315 		break;
316 	case 64:
317 		type |= KCOV_CMP_SIZE(3);
318 		break;
319 	default:
320 		return;
321 	}
322 	for (i = 0; i < count; i++)
323 		write_comp_data(type, cases[i + 2], val, _RET_IP_);
324 }
325 EXPORT_SYMBOL(__sanitizer_cov_trace_switch);
326 #endif /* ifdef CONFIG_KCOV_ENABLE_COMPARISONS */
327 
328 static void kcov_start(struct task_struct *t, struct kcov *kcov,
329 			unsigned int size, void *area, enum kcov_mode mode,
330 			int sequence)
331 {
332 	kcov_debug("t = %px, size = %u, area = %px\n", t, size, area);
333 	t->kcov = kcov;
334 	/* Cache in task struct for performance. */
335 	t->kcov_size = size;
336 	t->kcov_area = area;
337 	t->kcov_sequence = sequence;
338 	/* See comment in check_kcov_mode(). */
339 	barrier();
340 	WRITE_ONCE(t->kcov_mode, mode);
341 }
342 
343 static void kcov_stop(struct task_struct *t)
344 {
345 	WRITE_ONCE(t->kcov_mode, KCOV_MODE_DISABLED);
346 	barrier();
347 	t->kcov = NULL;
348 	t->kcov_size = 0;
349 	t->kcov_area = NULL;
350 }
351 
352 static void kcov_task_reset(struct task_struct *t)
353 {
354 	kcov_stop(t);
355 	t->kcov_sequence = 0;
356 	t->kcov_handle = 0;
357 }
358 
359 void kcov_task_init(struct task_struct *t)
360 {
361 	kcov_task_reset(t);
362 	t->kcov_handle = current->kcov_handle;
363 }
364 
365 static void kcov_reset(struct kcov *kcov)
366 {
367 	kcov->t = NULL;
368 	kcov->mode = KCOV_MODE_INIT;
369 	kcov->remote = false;
370 	kcov->remote_size = 0;
371 	kcov->sequence++;
372 }
373 
374 static void kcov_remote_reset(struct kcov *kcov)
375 {
376 	int bkt;
377 	struct kcov_remote *remote;
378 	struct hlist_node *tmp;
379 	unsigned long flags;
380 
381 	spin_lock_irqsave(&kcov_remote_lock, flags);
382 	hash_for_each_safe(kcov_remote_map, bkt, tmp, remote, hnode) {
383 		if (remote->kcov != kcov)
384 			continue;
385 		hash_del(&remote->hnode);
386 		kfree(remote);
387 	}
388 	/* Do reset before unlock to prevent races with kcov_remote_start(). */
389 	kcov_reset(kcov);
390 	spin_unlock_irqrestore(&kcov_remote_lock, flags);
391 }
392 
393 static void kcov_disable(struct task_struct *t, struct kcov *kcov)
394 {
395 	kcov_task_reset(t);
396 	if (kcov->remote)
397 		kcov_remote_reset(kcov);
398 	else
399 		kcov_reset(kcov);
400 }
401 
402 static void kcov_get(struct kcov *kcov)
403 {
404 	refcount_inc(&kcov->refcount);
405 }
406 
407 static void kcov_put(struct kcov *kcov)
408 {
409 	if (refcount_dec_and_test(&kcov->refcount)) {
410 		kcov_remote_reset(kcov);
411 		vfree(kcov->area);
412 		kfree(kcov);
413 	}
414 }
415 
416 void kcov_task_exit(struct task_struct *t)
417 {
418 	struct kcov *kcov;
419 	unsigned long flags;
420 
421 	kcov = t->kcov;
422 	if (kcov == NULL)
423 		return;
424 
425 	spin_lock_irqsave(&kcov->lock, flags);
426 	kcov_debug("t = %px, kcov->t = %px\n", t, kcov->t);
427 	/*
428 	 * For KCOV_ENABLE devices we want to make sure that t->kcov->t == t,
429 	 * which comes down to:
430 	 *        WARN_ON(!kcov->remote && kcov->t != t);
431 	 *
432 	 * For KCOV_REMOTE_ENABLE devices, the exiting task is either:
433 	 *
434 	 * 1. A remote task between kcov_remote_start() and kcov_remote_stop().
435 	 *    In this case we should print a warning right away, since a task
436 	 *    shouldn't be exiting when it's in a kcov coverage collection
437 	 *    section. Here t points to the task that is collecting remote
438 	 *    coverage, and t->kcov->t points to the thread that created the
439 	 *    kcov device. Which means that to detect this case we need to
440 	 *    check that t != t->kcov->t, and this gives us the following:
441 	 *        WARN_ON(kcov->remote && kcov->t != t);
442 	 *
443 	 * 2. The task that created kcov exiting without calling KCOV_DISABLE,
444 	 *    and then again we make sure that t->kcov->t == t:
445 	 *        WARN_ON(kcov->remote && kcov->t != t);
446 	 *
447 	 * By combining all three checks into one we get:
448 	 */
449 	if (WARN_ON(kcov->t != t)) {
450 		spin_unlock_irqrestore(&kcov->lock, flags);
451 		return;
452 	}
453 	/* Just to not leave dangling references behind. */
454 	kcov_disable(t, kcov);
455 	spin_unlock_irqrestore(&kcov->lock, flags);
456 	kcov_put(kcov);
457 }
458 
459 static int kcov_mmap(struct file *filep, struct vm_area_struct *vma)
460 {
461 	int res = 0;
462 	void *area;
463 	struct kcov *kcov = vma->vm_file->private_data;
464 	unsigned long size, off;
465 	struct page *page;
466 	unsigned long flags;
467 
468 	area = vmalloc_user(vma->vm_end - vma->vm_start);
469 	if (!area)
470 		return -ENOMEM;
471 
472 	spin_lock_irqsave(&kcov->lock, flags);
473 	size = kcov->size * sizeof(unsigned long);
474 	if (kcov->mode != KCOV_MODE_INIT || vma->vm_pgoff != 0 ||
475 	    vma->vm_end - vma->vm_start != size) {
476 		res = -EINVAL;
477 		goto exit;
478 	}
479 	if (!kcov->area) {
480 		kcov->area = area;
481 		vma->vm_flags |= VM_DONTEXPAND;
482 		spin_unlock_irqrestore(&kcov->lock, flags);
483 		for (off = 0; off < size; off += PAGE_SIZE) {
484 			page = vmalloc_to_page(kcov->area + off);
485 			if (vm_insert_page(vma, vma->vm_start + off, page))
486 				WARN_ONCE(1, "vm_insert_page() failed");
487 		}
488 		return 0;
489 	}
490 exit:
491 	spin_unlock_irqrestore(&kcov->lock, flags);
492 	vfree(area);
493 	return res;
494 }
495 
496 static int kcov_open(struct inode *inode, struct file *filep)
497 {
498 	struct kcov *kcov;
499 
500 	kcov = kzalloc(sizeof(*kcov), GFP_KERNEL);
501 	if (!kcov)
502 		return -ENOMEM;
503 	kcov->mode = KCOV_MODE_DISABLED;
504 	kcov->sequence = 1;
505 	refcount_set(&kcov->refcount, 1);
506 	spin_lock_init(&kcov->lock);
507 	filep->private_data = kcov;
508 	return nonseekable_open(inode, filep);
509 }
510 
511 static int kcov_close(struct inode *inode, struct file *filep)
512 {
513 	kcov_put(filep->private_data);
514 	return 0;
515 }
516 
517 static int kcov_get_mode(unsigned long arg)
518 {
519 	if (arg == KCOV_TRACE_PC)
520 		return KCOV_MODE_TRACE_PC;
521 	else if (arg == KCOV_TRACE_CMP)
522 #ifdef CONFIG_KCOV_ENABLE_COMPARISONS
523 		return KCOV_MODE_TRACE_CMP;
524 #else
525 		return -ENOTSUPP;
526 #endif
527 	else
528 		return -EINVAL;
529 }
530 
531 /*
532  * Fault in a lazily-faulted vmalloc area before it can be used by
533  * __santizer_cov_trace_pc(), to avoid recursion issues if any code on the
534  * vmalloc fault handling path is instrumented.
535  */
536 static void kcov_fault_in_area(struct kcov *kcov)
537 {
538 	unsigned long stride = PAGE_SIZE / sizeof(unsigned long);
539 	unsigned long *area = kcov->area;
540 	unsigned long offset;
541 
542 	for (offset = 0; offset < kcov->size; offset += stride)
543 		READ_ONCE(area[offset]);
544 }
545 
546 static inline bool kcov_check_handle(u64 handle, bool common_valid,
547 				bool uncommon_valid, bool zero_valid)
548 {
549 	if (handle & ~(KCOV_SUBSYSTEM_MASK | KCOV_INSTANCE_MASK))
550 		return false;
551 	switch (handle & KCOV_SUBSYSTEM_MASK) {
552 	case KCOV_SUBSYSTEM_COMMON:
553 		return (handle & KCOV_INSTANCE_MASK) ?
554 			common_valid : zero_valid;
555 	case KCOV_SUBSYSTEM_USB:
556 		return uncommon_valid;
557 	default:
558 		return false;
559 	}
560 	return false;
561 }
562 
563 static int kcov_ioctl_locked(struct kcov *kcov, unsigned int cmd,
564 			     unsigned long arg)
565 {
566 	struct task_struct *t;
567 	unsigned long size, unused;
568 	int mode, i;
569 	struct kcov_remote_arg *remote_arg;
570 	struct kcov_remote *remote;
571 	unsigned long flags;
572 
573 	switch (cmd) {
574 	case KCOV_INIT_TRACE:
575 		/*
576 		 * Enable kcov in trace mode and setup buffer size.
577 		 * Must happen before anything else.
578 		 */
579 		if (kcov->mode != KCOV_MODE_DISABLED)
580 			return -EBUSY;
581 		/*
582 		 * Size must be at least 2 to hold current position and one PC.
583 		 * Later we allocate size * sizeof(unsigned long) memory,
584 		 * that must not overflow.
585 		 */
586 		size = arg;
587 		if (size < 2 || size > INT_MAX / sizeof(unsigned long))
588 			return -EINVAL;
589 		kcov->size = size;
590 		kcov->mode = KCOV_MODE_INIT;
591 		return 0;
592 	case KCOV_ENABLE:
593 		/*
594 		 * Enable coverage for the current task.
595 		 * At this point user must have been enabled trace mode,
596 		 * and mmapped the file. Coverage collection is disabled only
597 		 * at task exit or voluntary by KCOV_DISABLE. After that it can
598 		 * be enabled for another task.
599 		 */
600 		if (kcov->mode != KCOV_MODE_INIT || !kcov->area)
601 			return -EINVAL;
602 		t = current;
603 		if (kcov->t != NULL || t->kcov != NULL)
604 			return -EBUSY;
605 		mode = kcov_get_mode(arg);
606 		if (mode < 0)
607 			return mode;
608 		kcov_fault_in_area(kcov);
609 		kcov->mode = mode;
610 		kcov_start(t, kcov, kcov->size, kcov->area, kcov->mode,
611 				kcov->sequence);
612 		kcov->t = t;
613 		/* Put either in kcov_task_exit() or in KCOV_DISABLE. */
614 		kcov_get(kcov);
615 		return 0;
616 	case KCOV_DISABLE:
617 		/* Disable coverage for the current task. */
618 		unused = arg;
619 		if (unused != 0 || current->kcov != kcov)
620 			return -EINVAL;
621 		t = current;
622 		if (WARN_ON(kcov->t != t))
623 			return -EINVAL;
624 		kcov_disable(t, kcov);
625 		kcov_put(kcov);
626 		return 0;
627 	case KCOV_REMOTE_ENABLE:
628 		if (kcov->mode != KCOV_MODE_INIT || !kcov->area)
629 			return -EINVAL;
630 		t = current;
631 		if (kcov->t != NULL || t->kcov != NULL)
632 			return -EBUSY;
633 		remote_arg = (struct kcov_remote_arg *)arg;
634 		mode = kcov_get_mode(remote_arg->trace_mode);
635 		if (mode < 0)
636 			return mode;
637 		if (remote_arg->area_size > LONG_MAX / sizeof(unsigned long))
638 			return -EINVAL;
639 		kcov->mode = mode;
640 		t->kcov = kcov;
641 		kcov->t = t;
642 		kcov->remote = true;
643 		kcov->remote_size = remote_arg->area_size;
644 		spin_lock_irqsave(&kcov_remote_lock, flags);
645 		for (i = 0; i < remote_arg->num_handles; i++) {
646 			if (!kcov_check_handle(remote_arg->handles[i],
647 						false, true, false)) {
648 				spin_unlock_irqrestore(&kcov_remote_lock,
649 							flags);
650 				kcov_disable(t, kcov);
651 				return -EINVAL;
652 			}
653 			remote = kcov_remote_add(kcov, remote_arg->handles[i]);
654 			if (IS_ERR(remote)) {
655 				spin_unlock_irqrestore(&kcov_remote_lock,
656 							flags);
657 				kcov_disable(t, kcov);
658 				return PTR_ERR(remote);
659 			}
660 		}
661 		if (remote_arg->common_handle) {
662 			if (!kcov_check_handle(remote_arg->common_handle,
663 						true, false, false)) {
664 				spin_unlock_irqrestore(&kcov_remote_lock,
665 							flags);
666 				kcov_disable(t, kcov);
667 				return -EINVAL;
668 			}
669 			remote = kcov_remote_add(kcov,
670 					remote_arg->common_handle);
671 			if (IS_ERR(remote)) {
672 				spin_unlock_irqrestore(&kcov_remote_lock,
673 							flags);
674 				kcov_disable(t, kcov);
675 				return PTR_ERR(remote);
676 			}
677 			t->kcov_handle = remote_arg->common_handle;
678 		}
679 		spin_unlock_irqrestore(&kcov_remote_lock, flags);
680 		/* Put either in kcov_task_exit() or in KCOV_DISABLE. */
681 		kcov_get(kcov);
682 		return 0;
683 	default:
684 		return -ENOTTY;
685 	}
686 }
687 
688 static long kcov_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
689 {
690 	struct kcov *kcov;
691 	int res;
692 	struct kcov_remote_arg *remote_arg = NULL;
693 	unsigned int remote_num_handles;
694 	unsigned long remote_arg_size;
695 	unsigned long flags;
696 
697 	if (cmd == KCOV_REMOTE_ENABLE) {
698 		if (get_user(remote_num_handles, (unsigned __user *)(arg +
699 				offsetof(struct kcov_remote_arg, num_handles))))
700 			return -EFAULT;
701 		if (remote_num_handles > KCOV_REMOTE_MAX_HANDLES)
702 			return -EINVAL;
703 		remote_arg_size = struct_size(remote_arg, handles,
704 					remote_num_handles);
705 		remote_arg = memdup_user((void __user *)arg, remote_arg_size);
706 		if (IS_ERR(remote_arg))
707 			return PTR_ERR(remote_arg);
708 		if (remote_arg->num_handles != remote_num_handles) {
709 			kfree(remote_arg);
710 			return -EINVAL;
711 		}
712 		arg = (unsigned long)remote_arg;
713 	}
714 
715 	kcov = filep->private_data;
716 	spin_lock_irqsave(&kcov->lock, flags);
717 	res = kcov_ioctl_locked(kcov, cmd, arg);
718 	spin_unlock_irqrestore(&kcov->lock, flags);
719 
720 	kfree(remote_arg);
721 
722 	return res;
723 }
724 
725 static const struct file_operations kcov_fops = {
726 	.open		= kcov_open,
727 	.unlocked_ioctl	= kcov_ioctl,
728 	.compat_ioctl	= kcov_ioctl,
729 	.mmap		= kcov_mmap,
730 	.release        = kcov_close,
731 };
732 
733 /*
734  * kcov_remote_start() and kcov_remote_stop() can be used to annotate a section
735  * of code in a kernel background thread or in a softirq to allow kcov to be
736  * used to collect coverage from that part of code.
737  *
738  * The handle argument of kcov_remote_start() identifies a code section that is
739  * used for coverage collection. A userspace process passes this handle to
740  * KCOV_REMOTE_ENABLE ioctl to make the used kcov device start collecting
741  * coverage for the code section identified by this handle.
742  *
743  * The usage of these annotations in the kernel code is different depending on
744  * the type of the kernel thread whose code is being annotated.
745  *
746  * For global kernel threads that are spawned in a limited number of instances
747  * (e.g. one USB hub_event() worker thread is spawned per USB HCD) and for
748  * softirqs, each instance must be assigned a unique 4-byte instance id. The
749  * instance id is then combined with a 1-byte subsystem id to get a handle via
750  * kcov_remote_handle(subsystem_id, instance_id).
751  *
752  * For local kernel threads that are spawned from system calls handler when a
753  * user interacts with some kernel interface (e.g. vhost workers), a handle is
754  * passed from a userspace process as the common_handle field of the
755  * kcov_remote_arg struct (note, that the user must generate a handle by using
756  * kcov_remote_handle() with KCOV_SUBSYSTEM_COMMON as the subsystem id and an
757  * arbitrary 4-byte non-zero number as the instance id). This common handle
758  * then gets saved into the task_struct of the process that issued the
759  * KCOV_REMOTE_ENABLE ioctl. When this process issues system calls that spawn
760  * kernel threads, the common handle must be retrieved via kcov_common_handle()
761  * and passed to the spawned threads via custom annotations. Those kernel
762  * threads must in turn be annotated with kcov_remote_start(common_handle) and
763  * kcov_remote_stop(). All of the threads that are spawned by the same process
764  * obtain the same handle, hence the name "common".
765  *
766  * See Documentation/dev-tools/kcov.rst for more details.
767  *
768  * Internally, kcov_remote_start() looks up the kcov device associated with the
769  * provided handle, allocates an area for coverage collection, and saves the
770  * pointers to kcov and area into the current task_struct to allow coverage to
771  * be collected via __sanitizer_cov_trace_pc().
772  * In turns kcov_remote_stop() clears those pointers from task_struct to stop
773  * collecting coverage and copies all collected coverage into the kcov area.
774  */
775 
776 static inline bool kcov_mode_enabled(unsigned int mode)
777 {
778 	return (mode & ~KCOV_IN_CTXSW) != KCOV_MODE_DISABLED;
779 }
780 
781 static void kcov_remote_softirq_start(struct task_struct *t)
782 {
783 	struct kcov_percpu_data *data = this_cpu_ptr(&kcov_percpu_data);
784 	unsigned int mode;
785 
786 	mode = READ_ONCE(t->kcov_mode);
787 	barrier();
788 	if (kcov_mode_enabled(mode)) {
789 		data->saved_mode = mode;
790 		data->saved_size = t->kcov_size;
791 		data->saved_area = t->kcov_area;
792 		data->saved_sequence = t->kcov_sequence;
793 		data->saved_kcov = t->kcov;
794 		kcov_stop(t);
795 	}
796 }
797 
798 static void kcov_remote_softirq_stop(struct task_struct *t)
799 {
800 	struct kcov_percpu_data *data = this_cpu_ptr(&kcov_percpu_data);
801 
802 	if (data->saved_kcov) {
803 		kcov_start(t, data->saved_kcov, data->saved_size,
804 				data->saved_area, data->saved_mode,
805 				data->saved_sequence);
806 		data->saved_mode = 0;
807 		data->saved_size = 0;
808 		data->saved_area = NULL;
809 		data->saved_sequence = 0;
810 		data->saved_kcov = NULL;
811 	}
812 }
813 
814 void kcov_remote_start(u64 handle)
815 {
816 	struct task_struct *t = current;
817 	struct kcov_remote *remote;
818 	struct kcov *kcov;
819 	unsigned int mode;
820 	void *area;
821 	unsigned int size;
822 	int sequence;
823 	unsigned long flags;
824 
825 	if (WARN_ON(!kcov_check_handle(handle, true, true, true)))
826 		return;
827 	if (!in_task() && !in_serving_softirq())
828 		return;
829 
830 	local_lock_irqsave(&kcov_percpu_data.lock, flags);
831 
832 	/*
833 	 * Check that kcov_remote_start() is not called twice in background
834 	 * threads nor called by user tasks (with enabled kcov).
835 	 */
836 	mode = READ_ONCE(t->kcov_mode);
837 	if (WARN_ON(in_task() && kcov_mode_enabled(mode))) {
838 		local_unlock_irqrestore(&kcov_percpu_data.lock, flags);
839 		return;
840 	}
841 	/*
842 	 * Check that kcov_remote_start() is not called twice in softirqs.
843 	 * Note, that kcov_remote_start() can be called from a softirq that
844 	 * happened while collecting coverage from a background thread.
845 	 */
846 	if (WARN_ON(in_serving_softirq() && t->kcov_softirq)) {
847 		local_unlock_irqrestore(&kcov_percpu_data.lock, flags);
848 		return;
849 	}
850 
851 	spin_lock(&kcov_remote_lock);
852 	remote = kcov_remote_find(handle);
853 	if (!remote) {
854 		spin_unlock(&kcov_remote_lock);
855 		local_unlock_irqrestore(&kcov_percpu_data.lock, flags);
856 		return;
857 	}
858 	kcov_debug("handle = %llx, context: %s\n", handle,
859 			in_task() ? "task" : "softirq");
860 	kcov = remote->kcov;
861 	/* Put in kcov_remote_stop(). */
862 	kcov_get(kcov);
863 	/*
864 	 * Read kcov fields before unlock to prevent races with
865 	 * KCOV_DISABLE / kcov_remote_reset().
866 	 */
867 	mode = kcov->mode;
868 	sequence = kcov->sequence;
869 	if (in_task()) {
870 		size = kcov->remote_size;
871 		area = kcov_remote_area_get(size);
872 	} else {
873 		size = CONFIG_KCOV_IRQ_AREA_SIZE;
874 		area = this_cpu_ptr(&kcov_percpu_data)->irq_area;
875 	}
876 	spin_unlock(&kcov_remote_lock);
877 
878 	/* Can only happen when in_task(). */
879 	if (!area) {
880 		local_unlock_irqrestore(&kcov_percpu_data.lock, flags);
881 		area = vmalloc(size * sizeof(unsigned long));
882 		if (!area) {
883 			kcov_put(kcov);
884 			return;
885 		}
886 		local_lock_irqsave(&kcov_percpu_data.lock, flags);
887 	}
888 
889 	/* Reset coverage size. */
890 	*(u64 *)area = 0;
891 
892 	if (in_serving_softirq()) {
893 		kcov_remote_softirq_start(t);
894 		t->kcov_softirq = 1;
895 	}
896 	kcov_start(t, kcov, size, area, mode, sequence);
897 
898 	local_unlock_irqrestore(&kcov_percpu_data.lock, flags);
899 
900 }
901 EXPORT_SYMBOL(kcov_remote_start);
902 
903 static void kcov_move_area(enum kcov_mode mode, void *dst_area,
904 				unsigned int dst_area_size, void *src_area)
905 {
906 	u64 word_size = sizeof(unsigned long);
907 	u64 count_size, entry_size_log;
908 	u64 dst_len, src_len;
909 	void *dst_entries, *src_entries;
910 	u64 dst_occupied, dst_free, bytes_to_move, entries_moved;
911 
912 	kcov_debug("%px %u <= %px %lu\n",
913 		dst_area, dst_area_size, src_area, *(unsigned long *)src_area);
914 
915 	switch (mode) {
916 	case KCOV_MODE_TRACE_PC:
917 		dst_len = READ_ONCE(*(unsigned long *)dst_area);
918 		src_len = *(unsigned long *)src_area;
919 		count_size = sizeof(unsigned long);
920 		entry_size_log = __ilog2_u64(sizeof(unsigned long));
921 		break;
922 	case KCOV_MODE_TRACE_CMP:
923 		dst_len = READ_ONCE(*(u64 *)dst_area);
924 		src_len = *(u64 *)src_area;
925 		count_size = sizeof(u64);
926 		BUILD_BUG_ON(!is_power_of_2(KCOV_WORDS_PER_CMP));
927 		entry_size_log = __ilog2_u64(sizeof(u64) * KCOV_WORDS_PER_CMP);
928 		break;
929 	default:
930 		WARN_ON(1);
931 		return;
932 	}
933 
934 	/* As arm can't divide u64 integers use log of entry size. */
935 	if (dst_len > ((dst_area_size * word_size - count_size) >>
936 				entry_size_log))
937 		return;
938 	dst_occupied = count_size + (dst_len << entry_size_log);
939 	dst_free = dst_area_size * word_size - dst_occupied;
940 	bytes_to_move = min(dst_free, src_len << entry_size_log);
941 	dst_entries = dst_area + dst_occupied;
942 	src_entries = src_area + count_size;
943 	memcpy(dst_entries, src_entries, bytes_to_move);
944 	entries_moved = bytes_to_move >> entry_size_log;
945 
946 	switch (mode) {
947 	case KCOV_MODE_TRACE_PC:
948 		WRITE_ONCE(*(unsigned long *)dst_area, dst_len + entries_moved);
949 		break;
950 	case KCOV_MODE_TRACE_CMP:
951 		WRITE_ONCE(*(u64 *)dst_area, dst_len + entries_moved);
952 		break;
953 	default:
954 		break;
955 	}
956 }
957 
958 /* See the comment before kcov_remote_start() for usage details. */
959 void kcov_remote_stop(void)
960 {
961 	struct task_struct *t = current;
962 	struct kcov *kcov;
963 	unsigned int mode;
964 	void *area;
965 	unsigned int size;
966 	int sequence;
967 	unsigned long flags;
968 
969 	if (!in_task() && !in_serving_softirq())
970 		return;
971 
972 	local_lock_irqsave(&kcov_percpu_data.lock, flags);
973 
974 	mode = READ_ONCE(t->kcov_mode);
975 	barrier();
976 	if (!kcov_mode_enabled(mode)) {
977 		local_unlock_irqrestore(&kcov_percpu_data.lock, flags);
978 		return;
979 	}
980 	/*
981 	 * When in softirq, check if the corresponding kcov_remote_start()
982 	 * actually found the remote handle and started collecting coverage.
983 	 */
984 	if (in_serving_softirq() && !t->kcov_softirq) {
985 		local_unlock_irqrestore(&kcov_percpu_data.lock, flags);
986 		return;
987 	}
988 	/* Make sure that kcov_softirq is only set when in softirq. */
989 	if (WARN_ON(!in_serving_softirq() && t->kcov_softirq)) {
990 		local_unlock_irqrestore(&kcov_percpu_data.lock, flags);
991 		return;
992 	}
993 
994 	kcov = t->kcov;
995 	area = t->kcov_area;
996 	size = t->kcov_size;
997 	sequence = t->kcov_sequence;
998 
999 	kcov_stop(t);
1000 	if (in_serving_softirq()) {
1001 		t->kcov_softirq = 0;
1002 		kcov_remote_softirq_stop(t);
1003 	}
1004 
1005 	spin_lock(&kcov->lock);
1006 	/*
1007 	 * KCOV_DISABLE could have been called between kcov_remote_start()
1008 	 * and kcov_remote_stop(), hence the sequence check.
1009 	 */
1010 	if (sequence == kcov->sequence && kcov->remote)
1011 		kcov_move_area(kcov->mode, kcov->area, kcov->size, area);
1012 	spin_unlock(&kcov->lock);
1013 
1014 	if (in_task()) {
1015 		spin_lock(&kcov_remote_lock);
1016 		kcov_remote_area_put(area, size);
1017 		spin_unlock(&kcov_remote_lock);
1018 	}
1019 
1020 	local_unlock_irqrestore(&kcov_percpu_data.lock, flags);
1021 
1022 	/* Get in kcov_remote_start(). */
1023 	kcov_put(kcov);
1024 }
1025 EXPORT_SYMBOL(kcov_remote_stop);
1026 
1027 /* See the comment before kcov_remote_start() for usage details. */
1028 u64 kcov_common_handle(void)
1029 {
1030 	if (!in_task())
1031 		return 0;
1032 	return current->kcov_handle;
1033 }
1034 EXPORT_SYMBOL(kcov_common_handle);
1035 
1036 static int __init kcov_init(void)
1037 {
1038 	int cpu;
1039 
1040 	for_each_possible_cpu(cpu) {
1041 		void *area = vmalloc_node(CONFIG_KCOV_IRQ_AREA_SIZE *
1042 				sizeof(unsigned long), cpu_to_node(cpu));
1043 		if (!area)
1044 			return -ENOMEM;
1045 		per_cpu_ptr(&kcov_percpu_data, cpu)->irq_area = area;
1046 	}
1047 
1048 	/*
1049 	 * The kcov debugfs file won't ever get removed and thus,
1050 	 * there is no need to protect it against removal races. The
1051 	 * use of debugfs_create_file_unsafe() is actually safe here.
1052 	 */
1053 	debugfs_create_file_unsafe("kcov", 0600, NULL, NULL, &kcov_fops);
1054 
1055 	return 0;
1056 }
1057 
1058 device_initcall(kcov_init);
1059