1*b2441318SGreg Kroah-Hartman // SPDX-License-Identifier: GPL-2.0 25c9a8750SDmitry Vyukov #define pr_fmt(fmt) "kcov: " fmt 35c9a8750SDmitry Vyukov 436f05ae8SAndrey Ryabinin #define DISABLE_BRANCH_PROFILING 5db862358SKefeng Wang #include <linux/atomic.h> 65c9a8750SDmitry Vyukov #include <linux/compiler.h> 7db862358SKefeng Wang #include <linux/errno.h> 8db862358SKefeng Wang #include <linux/export.h> 95c9a8750SDmitry Vyukov #include <linux/types.h> 105c9a8750SDmitry Vyukov #include <linux/file.h> 115c9a8750SDmitry Vyukov #include <linux/fs.h> 12db862358SKefeng Wang #include <linux/init.h> 135c9a8750SDmitry Vyukov #include <linux/mm.h> 14db862358SKefeng Wang #include <linux/preempt.h> 155c9a8750SDmitry Vyukov #include <linux/printk.h> 16166ad0e1SKefeng Wang #include <linux/sched.h> 175c9a8750SDmitry Vyukov #include <linux/slab.h> 185c9a8750SDmitry Vyukov #include <linux/spinlock.h> 195c9a8750SDmitry Vyukov #include <linux/vmalloc.h> 205c9a8750SDmitry Vyukov #include <linux/debugfs.h> 215c9a8750SDmitry Vyukov #include <linux/uaccess.h> 225c9a8750SDmitry Vyukov #include <linux/kcov.h> 234983f0abSAlexander Popov #include <asm/setup.h> 245c9a8750SDmitry Vyukov 255c9a8750SDmitry Vyukov /* 265c9a8750SDmitry Vyukov * kcov descriptor (one per opened debugfs file). 275c9a8750SDmitry Vyukov * State transitions of the descriptor: 285c9a8750SDmitry Vyukov * - initial state after open() 295c9a8750SDmitry Vyukov * - then there must be a single ioctl(KCOV_INIT_TRACE) call 305c9a8750SDmitry Vyukov * - then, mmap() call (several calls are allowed but not useful) 315c9a8750SDmitry Vyukov * - then, repeated enable/disable for a task (only one task a time allowed) 325c9a8750SDmitry Vyukov */ 335c9a8750SDmitry Vyukov struct kcov { 345c9a8750SDmitry Vyukov /* 355c9a8750SDmitry Vyukov * Reference counter. We keep one for: 365c9a8750SDmitry Vyukov * - opened file descriptor 375c9a8750SDmitry Vyukov * - task with enabled coverage (we can't unwire it from another task) 385c9a8750SDmitry Vyukov */ 395c9a8750SDmitry Vyukov atomic_t refcount; 405c9a8750SDmitry Vyukov /* The lock protects mode, size, area and t. */ 415c9a8750SDmitry Vyukov spinlock_t lock; 425c9a8750SDmitry Vyukov enum kcov_mode mode; 435c9a8750SDmitry Vyukov /* Size of arena (in long's for KCOV_MODE_TRACE). */ 445c9a8750SDmitry Vyukov unsigned size; 455c9a8750SDmitry Vyukov /* Coverage buffer shared with user space. */ 465c9a8750SDmitry Vyukov void *area; 475c9a8750SDmitry Vyukov /* Task for which we collect coverage, or NULL. */ 485c9a8750SDmitry Vyukov struct task_struct *t; 495c9a8750SDmitry Vyukov }; 505c9a8750SDmitry Vyukov 515c9a8750SDmitry Vyukov /* 525c9a8750SDmitry Vyukov * Entry point from instrumented code. 535c9a8750SDmitry Vyukov * This is called once per basic-block/edge. 545c9a8750SDmitry Vyukov */ 55bdab42dfSJames Morse void notrace __sanitizer_cov_trace_pc(void) 565c9a8750SDmitry Vyukov { 575c9a8750SDmitry Vyukov struct task_struct *t; 585c9a8750SDmitry Vyukov enum kcov_mode mode; 595c9a8750SDmitry Vyukov 605c9a8750SDmitry Vyukov t = current; 615c9a8750SDmitry Vyukov /* 625c9a8750SDmitry Vyukov * We are interested in code coverage as a function of a syscall inputs, 635c9a8750SDmitry Vyukov * so we ignore code executed in interrupts. 645c9a8750SDmitry Vyukov */ 65f61e869dSDmitry Vyukov if (!t || !in_task()) 665c9a8750SDmitry Vyukov return; 675c9a8750SDmitry Vyukov mode = READ_ONCE(t->kcov_mode); 685c9a8750SDmitry Vyukov if (mode == KCOV_MODE_TRACE) { 695c9a8750SDmitry Vyukov unsigned long *area; 705c9a8750SDmitry Vyukov unsigned long pos; 714983f0abSAlexander Popov unsigned long ip = _RET_IP_; 724983f0abSAlexander Popov 734983f0abSAlexander Popov #ifdef CONFIG_RANDOMIZE_BASE 744983f0abSAlexander Popov ip -= kaslr_offset(); 754983f0abSAlexander Popov #endif 765c9a8750SDmitry Vyukov 775c9a8750SDmitry Vyukov /* 785c9a8750SDmitry Vyukov * There is some code that runs in interrupts but for which 795c9a8750SDmitry Vyukov * in_interrupt() returns false (e.g. preempt_schedule_irq()). 805c9a8750SDmitry Vyukov * READ_ONCE()/barrier() effectively provides load-acquire wrt 815c9a8750SDmitry Vyukov * interrupts, there are paired barrier()/WRITE_ONCE() in 825c9a8750SDmitry Vyukov * kcov_ioctl_locked(). 835c9a8750SDmitry Vyukov */ 845c9a8750SDmitry Vyukov barrier(); 855c9a8750SDmitry Vyukov area = t->kcov_area; 865c9a8750SDmitry Vyukov /* The first word is number of subsequent PCs. */ 875c9a8750SDmitry Vyukov pos = READ_ONCE(area[0]) + 1; 885c9a8750SDmitry Vyukov if (likely(pos < t->kcov_size)) { 894983f0abSAlexander Popov area[pos] = ip; 905c9a8750SDmitry Vyukov WRITE_ONCE(area[0], pos); 915c9a8750SDmitry Vyukov } 925c9a8750SDmitry Vyukov } 935c9a8750SDmitry Vyukov } 945c9a8750SDmitry Vyukov EXPORT_SYMBOL(__sanitizer_cov_trace_pc); 955c9a8750SDmitry Vyukov 965c9a8750SDmitry Vyukov static void kcov_get(struct kcov *kcov) 975c9a8750SDmitry Vyukov { 985c9a8750SDmitry Vyukov atomic_inc(&kcov->refcount); 995c9a8750SDmitry Vyukov } 1005c9a8750SDmitry Vyukov 1015c9a8750SDmitry Vyukov static void kcov_put(struct kcov *kcov) 1025c9a8750SDmitry Vyukov { 1035c9a8750SDmitry Vyukov if (atomic_dec_and_test(&kcov->refcount)) { 1045c9a8750SDmitry Vyukov vfree(kcov->area); 1055c9a8750SDmitry Vyukov kfree(kcov); 1065c9a8750SDmitry Vyukov } 1075c9a8750SDmitry Vyukov } 1085c9a8750SDmitry Vyukov 1095c9a8750SDmitry Vyukov void kcov_task_init(struct task_struct *t) 1105c9a8750SDmitry Vyukov { 1115c9a8750SDmitry Vyukov t->kcov_mode = KCOV_MODE_DISABLED; 1125c9a8750SDmitry Vyukov t->kcov_size = 0; 1135c9a8750SDmitry Vyukov t->kcov_area = NULL; 1145c9a8750SDmitry Vyukov t->kcov = NULL; 1155c9a8750SDmitry Vyukov } 1165c9a8750SDmitry Vyukov 1175c9a8750SDmitry Vyukov void kcov_task_exit(struct task_struct *t) 1185c9a8750SDmitry Vyukov { 1195c9a8750SDmitry Vyukov struct kcov *kcov; 1205c9a8750SDmitry Vyukov 1215c9a8750SDmitry Vyukov kcov = t->kcov; 1225c9a8750SDmitry Vyukov if (kcov == NULL) 1235c9a8750SDmitry Vyukov return; 1245c9a8750SDmitry Vyukov spin_lock(&kcov->lock); 1255c9a8750SDmitry Vyukov if (WARN_ON(kcov->t != t)) { 1265c9a8750SDmitry Vyukov spin_unlock(&kcov->lock); 1275c9a8750SDmitry Vyukov return; 1285c9a8750SDmitry Vyukov } 1295c9a8750SDmitry Vyukov /* Just to not leave dangling references behind. */ 1305c9a8750SDmitry Vyukov kcov_task_init(t); 1315c9a8750SDmitry Vyukov kcov->t = NULL; 1325c9a8750SDmitry Vyukov spin_unlock(&kcov->lock); 1335c9a8750SDmitry Vyukov kcov_put(kcov); 1345c9a8750SDmitry Vyukov } 1355c9a8750SDmitry Vyukov 1365c9a8750SDmitry Vyukov static int kcov_mmap(struct file *filep, struct vm_area_struct *vma) 1375c9a8750SDmitry Vyukov { 1385c9a8750SDmitry Vyukov int res = 0; 1395c9a8750SDmitry Vyukov void *area; 1405c9a8750SDmitry Vyukov struct kcov *kcov = vma->vm_file->private_data; 1415c9a8750SDmitry Vyukov unsigned long size, off; 1425c9a8750SDmitry Vyukov struct page *page; 1435c9a8750SDmitry Vyukov 1445c9a8750SDmitry Vyukov area = vmalloc_user(vma->vm_end - vma->vm_start); 1455c9a8750SDmitry Vyukov if (!area) 1465c9a8750SDmitry Vyukov return -ENOMEM; 1475c9a8750SDmitry Vyukov 1485c9a8750SDmitry Vyukov spin_lock(&kcov->lock); 1495c9a8750SDmitry Vyukov size = kcov->size * sizeof(unsigned long); 1505c9a8750SDmitry Vyukov if (kcov->mode == KCOV_MODE_DISABLED || vma->vm_pgoff != 0 || 1515c9a8750SDmitry Vyukov vma->vm_end - vma->vm_start != size) { 1525c9a8750SDmitry Vyukov res = -EINVAL; 1535c9a8750SDmitry Vyukov goto exit; 1545c9a8750SDmitry Vyukov } 1555c9a8750SDmitry Vyukov if (!kcov->area) { 1565c9a8750SDmitry Vyukov kcov->area = area; 1575c9a8750SDmitry Vyukov vma->vm_flags |= VM_DONTEXPAND; 1585c9a8750SDmitry Vyukov spin_unlock(&kcov->lock); 1595c9a8750SDmitry Vyukov for (off = 0; off < size; off += PAGE_SIZE) { 1605c9a8750SDmitry Vyukov page = vmalloc_to_page(kcov->area + off); 1615c9a8750SDmitry Vyukov if (vm_insert_page(vma, vma->vm_start + off, page)) 1625c9a8750SDmitry Vyukov WARN_ONCE(1, "vm_insert_page() failed"); 1635c9a8750SDmitry Vyukov } 1645c9a8750SDmitry Vyukov return 0; 1655c9a8750SDmitry Vyukov } 1665c9a8750SDmitry Vyukov exit: 1675c9a8750SDmitry Vyukov spin_unlock(&kcov->lock); 1685c9a8750SDmitry Vyukov vfree(area); 1695c9a8750SDmitry Vyukov return res; 1705c9a8750SDmitry Vyukov } 1715c9a8750SDmitry Vyukov 1725c9a8750SDmitry Vyukov static int kcov_open(struct inode *inode, struct file *filep) 1735c9a8750SDmitry Vyukov { 1745c9a8750SDmitry Vyukov struct kcov *kcov; 1755c9a8750SDmitry Vyukov 1765c9a8750SDmitry Vyukov kcov = kzalloc(sizeof(*kcov), GFP_KERNEL); 1775c9a8750SDmitry Vyukov if (!kcov) 1785c9a8750SDmitry Vyukov return -ENOMEM; 1795c9a8750SDmitry Vyukov atomic_set(&kcov->refcount, 1); 1805c9a8750SDmitry Vyukov spin_lock_init(&kcov->lock); 1815c9a8750SDmitry Vyukov filep->private_data = kcov; 1825c9a8750SDmitry Vyukov return nonseekable_open(inode, filep); 1835c9a8750SDmitry Vyukov } 1845c9a8750SDmitry Vyukov 1855c9a8750SDmitry Vyukov static int kcov_close(struct inode *inode, struct file *filep) 1865c9a8750SDmitry Vyukov { 1875c9a8750SDmitry Vyukov kcov_put(filep->private_data); 1885c9a8750SDmitry Vyukov return 0; 1895c9a8750SDmitry Vyukov } 1905c9a8750SDmitry Vyukov 1915c9a8750SDmitry Vyukov static int kcov_ioctl_locked(struct kcov *kcov, unsigned int cmd, 1925c9a8750SDmitry Vyukov unsigned long arg) 1935c9a8750SDmitry Vyukov { 1945c9a8750SDmitry Vyukov struct task_struct *t; 1955c9a8750SDmitry Vyukov unsigned long size, unused; 1965c9a8750SDmitry Vyukov 1975c9a8750SDmitry Vyukov switch (cmd) { 1985c9a8750SDmitry Vyukov case KCOV_INIT_TRACE: 1995c9a8750SDmitry Vyukov /* 2005c9a8750SDmitry Vyukov * Enable kcov in trace mode and setup buffer size. 2015c9a8750SDmitry Vyukov * Must happen before anything else. 2025c9a8750SDmitry Vyukov */ 2035c9a8750SDmitry Vyukov if (kcov->mode != KCOV_MODE_DISABLED) 2045c9a8750SDmitry Vyukov return -EBUSY; 2055c9a8750SDmitry Vyukov /* 2065c9a8750SDmitry Vyukov * Size must be at least 2 to hold current position and one PC. 2075c9a8750SDmitry Vyukov * Later we allocate size * sizeof(unsigned long) memory, 2085c9a8750SDmitry Vyukov * that must not overflow. 2095c9a8750SDmitry Vyukov */ 2105c9a8750SDmitry Vyukov size = arg; 2115c9a8750SDmitry Vyukov if (size < 2 || size > INT_MAX / sizeof(unsigned long)) 2125c9a8750SDmitry Vyukov return -EINVAL; 2135c9a8750SDmitry Vyukov kcov->size = size; 2145c9a8750SDmitry Vyukov kcov->mode = KCOV_MODE_TRACE; 2155c9a8750SDmitry Vyukov return 0; 2165c9a8750SDmitry Vyukov case KCOV_ENABLE: 2175c9a8750SDmitry Vyukov /* 2185c9a8750SDmitry Vyukov * Enable coverage for the current task. 2195c9a8750SDmitry Vyukov * At this point user must have been enabled trace mode, 2205c9a8750SDmitry Vyukov * and mmapped the file. Coverage collection is disabled only 2215c9a8750SDmitry Vyukov * at task exit or voluntary by KCOV_DISABLE. After that it can 2225c9a8750SDmitry Vyukov * be enabled for another task. 2235c9a8750SDmitry Vyukov */ 2245c9a8750SDmitry Vyukov unused = arg; 2255c9a8750SDmitry Vyukov if (unused != 0 || kcov->mode == KCOV_MODE_DISABLED || 2265c9a8750SDmitry Vyukov kcov->area == NULL) 2275c9a8750SDmitry Vyukov return -EINVAL; 2285c9a8750SDmitry Vyukov if (kcov->t != NULL) 2295c9a8750SDmitry Vyukov return -EBUSY; 2305c9a8750SDmitry Vyukov t = current; 2315c9a8750SDmitry Vyukov /* Cache in task struct for performance. */ 2325c9a8750SDmitry Vyukov t->kcov_size = kcov->size; 2335c9a8750SDmitry Vyukov t->kcov_area = kcov->area; 2345c9a8750SDmitry Vyukov /* See comment in __sanitizer_cov_trace_pc(). */ 2355c9a8750SDmitry Vyukov barrier(); 2365c9a8750SDmitry Vyukov WRITE_ONCE(t->kcov_mode, kcov->mode); 2375c9a8750SDmitry Vyukov t->kcov = kcov; 2385c9a8750SDmitry Vyukov kcov->t = t; 2395c9a8750SDmitry Vyukov /* This is put either in kcov_task_exit() or in KCOV_DISABLE. */ 2405c9a8750SDmitry Vyukov kcov_get(kcov); 2415c9a8750SDmitry Vyukov return 0; 2425c9a8750SDmitry Vyukov case KCOV_DISABLE: 2435c9a8750SDmitry Vyukov /* Disable coverage for the current task. */ 2445c9a8750SDmitry Vyukov unused = arg; 2455c9a8750SDmitry Vyukov if (unused != 0 || current->kcov != kcov) 2465c9a8750SDmitry Vyukov return -EINVAL; 2475c9a8750SDmitry Vyukov t = current; 2485c9a8750SDmitry Vyukov if (WARN_ON(kcov->t != t)) 2495c9a8750SDmitry Vyukov return -EINVAL; 2505c9a8750SDmitry Vyukov kcov_task_init(t); 2515c9a8750SDmitry Vyukov kcov->t = NULL; 2525c9a8750SDmitry Vyukov kcov_put(kcov); 2535c9a8750SDmitry Vyukov return 0; 2545c9a8750SDmitry Vyukov default: 2555c9a8750SDmitry Vyukov return -ENOTTY; 2565c9a8750SDmitry Vyukov } 2575c9a8750SDmitry Vyukov } 2585c9a8750SDmitry Vyukov 2595c9a8750SDmitry Vyukov static long kcov_ioctl(struct file *filep, unsigned int cmd, unsigned long arg) 2605c9a8750SDmitry Vyukov { 2615c9a8750SDmitry Vyukov struct kcov *kcov; 2625c9a8750SDmitry Vyukov int res; 2635c9a8750SDmitry Vyukov 2645c9a8750SDmitry Vyukov kcov = filep->private_data; 2655c9a8750SDmitry Vyukov spin_lock(&kcov->lock); 2665c9a8750SDmitry Vyukov res = kcov_ioctl_locked(kcov, cmd, arg); 2675c9a8750SDmitry Vyukov spin_unlock(&kcov->lock); 2685c9a8750SDmitry Vyukov return res; 2695c9a8750SDmitry Vyukov } 2705c9a8750SDmitry Vyukov 2715c9a8750SDmitry Vyukov static const struct file_operations kcov_fops = { 2725c9a8750SDmitry Vyukov .open = kcov_open, 2735c9a8750SDmitry Vyukov .unlocked_ioctl = kcov_ioctl, 2747483e5d4SDmitry Vyukov .compat_ioctl = kcov_ioctl, 2755c9a8750SDmitry Vyukov .mmap = kcov_mmap, 2765c9a8750SDmitry Vyukov .release = kcov_close, 2775c9a8750SDmitry Vyukov }; 2785c9a8750SDmitry Vyukov 2795c9a8750SDmitry Vyukov static int __init kcov_init(void) 2805c9a8750SDmitry Vyukov { 281df4565f9SNicolai Stange /* 282df4565f9SNicolai Stange * The kcov debugfs file won't ever get removed and thus, 283df4565f9SNicolai Stange * there is no need to protect it against removal races. The 284df4565f9SNicolai Stange * use of debugfs_create_file_unsafe() is actually safe here. 285df4565f9SNicolai Stange */ 286df4565f9SNicolai Stange if (!debugfs_create_file_unsafe("kcov", 0600, NULL, NULL, &kcov_fops)) { 2875c9a8750SDmitry Vyukov pr_err("failed to create kcov in debugfs\n"); 2885c9a8750SDmitry Vyukov return -ENOMEM; 2895c9a8750SDmitry Vyukov } 2905c9a8750SDmitry Vyukov return 0; 2915c9a8750SDmitry Vyukov } 2925c9a8750SDmitry Vyukov 2935c9a8750SDmitry Vyukov device_initcall(kcov_init); 294