15c9a8750SDmitry Vyukov #define pr_fmt(fmt) "kcov: " fmt 25c9a8750SDmitry Vyukov 336f05ae8SAndrey Ryabinin #define DISABLE_BRANCH_PROFILING 4db862358SKefeng Wang #include <linux/atomic.h> 55c9a8750SDmitry Vyukov #include <linux/compiler.h> 6db862358SKefeng Wang #include <linux/errno.h> 7db862358SKefeng Wang #include <linux/export.h> 85c9a8750SDmitry Vyukov #include <linux/types.h> 95c9a8750SDmitry Vyukov #include <linux/file.h> 105c9a8750SDmitry Vyukov #include <linux/fs.h> 11db862358SKefeng Wang #include <linux/init.h> 125c9a8750SDmitry Vyukov #include <linux/mm.h> 13db862358SKefeng Wang #include <linux/preempt.h> 145c9a8750SDmitry Vyukov #include <linux/printk.h> 15166ad0e1SKefeng Wang #include <linux/sched.h> 165c9a8750SDmitry Vyukov #include <linux/slab.h> 175c9a8750SDmitry Vyukov #include <linux/spinlock.h> 185c9a8750SDmitry Vyukov #include <linux/vmalloc.h> 195c9a8750SDmitry Vyukov #include <linux/debugfs.h> 205c9a8750SDmitry Vyukov #include <linux/uaccess.h> 215c9a8750SDmitry Vyukov #include <linux/kcov.h> 22*4983f0abSAlexander Popov #include <asm/setup.h> 235c9a8750SDmitry Vyukov 245c9a8750SDmitry Vyukov /* 255c9a8750SDmitry Vyukov * kcov descriptor (one per opened debugfs file). 265c9a8750SDmitry Vyukov * State transitions of the descriptor: 275c9a8750SDmitry Vyukov * - initial state after open() 285c9a8750SDmitry Vyukov * - then there must be a single ioctl(KCOV_INIT_TRACE) call 295c9a8750SDmitry Vyukov * - then, mmap() call (several calls are allowed but not useful) 305c9a8750SDmitry Vyukov * - then, repeated enable/disable for a task (only one task a time allowed) 315c9a8750SDmitry Vyukov */ 325c9a8750SDmitry Vyukov struct kcov { 335c9a8750SDmitry Vyukov /* 345c9a8750SDmitry Vyukov * Reference counter. We keep one for: 355c9a8750SDmitry Vyukov * - opened file descriptor 365c9a8750SDmitry Vyukov * - task with enabled coverage (we can't unwire it from another task) 375c9a8750SDmitry Vyukov */ 385c9a8750SDmitry Vyukov atomic_t refcount; 395c9a8750SDmitry Vyukov /* The lock protects mode, size, area and t. */ 405c9a8750SDmitry Vyukov spinlock_t lock; 415c9a8750SDmitry Vyukov enum kcov_mode mode; 425c9a8750SDmitry Vyukov /* Size of arena (in long's for KCOV_MODE_TRACE). */ 435c9a8750SDmitry Vyukov unsigned size; 445c9a8750SDmitry Vyukov /* Coverage buffer shared with user space. */ 455c9a8750SDmitry Vyukov void *area; 465c9a8750SDmitry Vyukov /* Task for which we collect coverage, or NULL. */ 475c9a8750SDmitry Vyukov struct task_struct *t; 485c9a8750SDmitry Vyukov }; 495c9a8750SDmitry Vyukov 505c9a8750SDmitry Vyukov /* 515c9a8750SDmitry Vyukov * Entry point from instrumented code. 525c9a8750SDmitry Vyukov * This is called once per basic-block/edge. 535c9a8750SDmitry Vyukov */ 54bdab42dfSJames Morse void notrace __sanitizer_cov_trace_pc(void) 555c9a8750SDmitry Vyukov { 565c9a8750SDmitry Vyukov struct task_struct *t; 575c9a8750SDmitry Vyukov enum kcov_mode mode; 585c9a8750SDmitry Vyukov 595c9a8750SDmitry Vyukov t = current; 605c9a8750SDmitry Vyukov /* 615c9a8750SDmitry Vyukov * We are interested in code coverage as a function of a syscall inputs, 625c9a8750SDmitry Vyukov * so we ignore code executed in interrupts. 63b274c0bbSAndrey Konovalov * The checks for whether we are in an interrupt are open-coded, because 64b274c0bbSAndrey Konovalov * 1. We can't use in_interrupt() here, since it also returns true 65b274c0bbSAndrey Konovalov * when we are inside local_bh_disable() section. 66b274c0bbSAndrey Konovalov * 2. We don't want to use (in_irq() | in_serving_softirq() | in_nmi()), 67b274c0bbSAndrey Konovalov * since that leads to slower generated code (three separate tests, 68b274c0bbSAndrey Konovalov * one for each of the flags). 695c9a8750SDmitry Vyukov */ 70b274c0bbSAndrey Konovalov if (!t || (preempt_count() & (HARDIRQ_MASK | SOFTIRQ_OFFSET 71b274c0bbSAndrey Konovalov | NMI_MASK))) 725c9a8750SDmitry Vyukov return; 735c9a8750SDmitry Vyukov mode = READ_ONCE(t->kcov_mode); 745c9a8750SDmitry Vyukov if (mode == KCOV_MODE_TRACE) { 755c9a8750SDmitry Vyukov unsigned long *area; 765c9a8750SDmitry Vyukov unsigned long pos; 77*4983f0abSAlexander Popov unsigned long ip = _RET_IP_; 78*4983f0abSAlexander Popov 79*4983f0abSAlexander Popov #ifdef CONFIG_RANDOMIZE_BASE 80*4983f0abSAlexander Popov ip -= kaslr_offset(); 81*4983f0abSAlexander Popov #endif 825c9a8750SDmitry Vyukov 835c9a8750SDmitry Vyukov /* 845c9a8750SDmitry Vyukov * There is some code that runs in interrupts but for which 855c9a8750SDmitry Vyukov * in_interrupt() returns false (e.g. preempt_schedule_irq()). 865c9a8750SDmitry Vyukov * READ_ONCE()/barrier() effectively provides load-acquire wrt 875c9a8750SDmitry Vyukov * interrupts, there are paired barrier()/WRITE_ONCE() in 885c9a8750SDmitry Vyukov * kcov_ioctl_locked(). 895c9a8750SDmitry Vyukov */ 905c9a8750SDmitry Vyukov barrier(); 915c9a8750SDmitry Vyukov area = t->kcov_area; 925c9a8750SDmitry Vyukov /* The first word is number of subsequent PCs. */ 935c9a8750SDmitry Vyukov pos = READ_ONCE(area[0]) + 1; 945c9a8750SDmitry Vyukov if (likely(pos < t->kcov_size)) { 95*4983f0abSAlexander Popov area[pos] = ip; 965c9a8750SDmitry Vyukov WRITE_ONCE(area[0], pos); 975c9a8750SDmitry Vyukov } 985c9a8750SDmitry Vyukov } 995c9a8750SDmitry Vyukov } 1005c9a8750SDmitry Vyukov EXPORT_SYMBOL(__sanitizer_cov_trace_pc); 1015c9a8750SDmitry Vyukov 1025c9a8750SDmitry Vyukov static void kcov_get(struct kcov *kcov) 1035c9a8750SDmitry Vyukov { 1045c9a8750SDmitry Vyukov atomic_inc(&kcov->refcount); 1055c9a8750SDmitry Vyukov } 1065c9a8750SDmitry Vyukov 1075c9a8750SDmitry Vyukov static void kcov_put(struct kcov *kcov) 1085c9a8750SDmitry Vyukov { 1095c9a8750SDmitry Vyukov if (atomic_dec_and_test(&kcov->refcount)) { 1105c9a8750SDmitry Vyukov vfree(kcov->area); 1115c9a8750SDmitry Vyukov kfree(kcov); 1125c9a8750SDmitry Vyukov } 1135c9a8750SDmitry Vyukov } 1145c9a8750SDmitry Vyukov 1155c9a8750SDmitry Vyukov void kcov_task_init(struct task_struct *t) 1165c9a8750SDmitry Vyukov { 1175c9a8750SDmitry Vyukov t->kcov_mode = KCOV_MODE_DISABLED; 1185c9a8750SDmitry Vyukov t->kcov_size = 0; 1195c9a8750SDmitry Vyukov t->kcov_area = NULL; 1205c9a8750SDmitry Vyukov t->kcov = NULL; 1215c9a8750SDmitry Vyukov } 1225c9a8750SDmitry Vyukov 1235c9a8750SDmitry Vyukov void kcov_task_exit(struct task_struct *t) 1245c9a8750SDmitry Vyukov { 1255c9a8750SDmitry Vyukov struct kcov *kcov; 1265c9a8750SDmitry Vyukov 1275c9a8750SDmitry Vyukov kcov = t->kcov; 1285c9a8750SDmitry Vyukov if (kcov == NULL) 1295c9a8750SDmitry Vyukov return; 1305c9a8750SDmitry Vyukov spin_lock(&kcov->lock); 1315c9a8750SDmitry Vyukov if (WARN_ON(kcov->t != t)) { 1325c9a8750SDmitry Vyukov spin_unlock(&kcov->lock); 1335c9a8750SDmitry Vyukov return; 1345c9a8750SDmitry Vyukov } 1355c9a8750SDmitry Vyukov /* Just to not leave dangling references behind. */ 1365c9a8750SDmitry Vyukov kcov_task_init(t); 1375c9a8750SDmitry Vyukov kcov->t = NULL; 1385c9a8750SDmitry Vyukov spin_unlock(&kcov->lock); 1395c9a8750SDmitry Vyukov kcov_put(kcov); 1405c9a8750SDmitry Vyukov } 1415c9a8750SDmitry Vyukov 1425c9a8750SDmitry Vyukov static int kcov_mmap(struct file *filep, struct vm_area_struct *vma) 1435c9a8750SDmitry Vyukov { 1445c9a8750SDmitry Vyukov int res = 0; 1455c9a8750SDmitry Vyukov void *area; 1465c9a8750SDmitry Vyukov struct kcov *kcov = vma->vm_file->private_data; 1475c9a8750SDmitry Vyukov unsigned long size, off; 1485c9a8750SDmitry Vyukov struct page *page; 1495c9a8750SDmitry Vyukov 1505c9a8750SDmitry Vyukov area = vmalloc_user(vma->vm_end - vma->vm_start); 1515c9a8750SDmitry Vyukov if (!area) 1525c9a8750SDmitry Vyukov return -ENOMEM; 1535c9a8750SDmitry Vyukov 1545c9a8750SDmitry Vyukov spin_lock(&kcov->lock); 1555c9a8750SDmitry Vyukov size = kcov->size * sizeof(unsigned long); 1565c9a8750SDmitry Vyukov if (kcov->mode == KCOV_MODE_DISABLED || vma->vm_pgoff != 0 || 1575c9a8750SDmitry Vyukov vma->vm_end - vma->vm_start != size) { 1585c9a8750SDmitry Vyukov res = -EINVAL; 1595c9a8750SDmitry Vyukov goto exit; 1605c9a8750SDmitry Vyukov } 1615c9a8750SDmitry Vyukov if (!kcov->area) { 1625c9a8750SDmitry Vyukov kcov->area = area; 1635c9a8750SDmitry Vyukov vma->vm_flags |= VM_DONTEXPAND; 1645c9a8750SDmitry Vyukov spin_unlock(&kcov->lock); 1655c9a8750SDmitry Vyukov for (off = 0; off < size; off += PAGE_SIZE) { 1665c9a8750SDmitry Vyukov page = vmalloc_to_page(kcov->area + off); 1675c9a8750SDmitry Vyukov if (vm_insert_page(vma, vma->vm_start + off, page)) 1685c9a8750SDmitry Vyukov WARN_ONCE(1, "vm_insert_page() failed"); 1695c9a8750SDmitry Vyukov } 1705c9a8750SDmitry Vyukov return 0; 1715c9a8750SDmitry Vyukov } 1725c9a8750SDmitry Vyukov exit: 1735c9a8750SDmitry Vyukov spin_unlock(&kcov->lock); 1745c9a8750SDmitry Vyukov vfree(area); 1755c9a8750SDmitry Vyukov return res; 1765c9a8750SDmitry Vyukov } 1775c9a8750SDmitry Vyukov 1785c9a8750SDmitry Vyukov static int kcov_open(struct inode *inode, struct file *filep) 1795c9a8750SDmitry Vyukov { 1805c9a8750SDmitry Vyukov struct kcov *kcov; 1815c9a8750SDmitry Vyukov 1825c9a8750SDmitry Vyukov kcov = kzalloc(sizeof(*kcov), GFP_KERNEL); 1835c9a8750SDmitry Vyukov if (!kcov) 1845c9a8750SDmitry Vyukov return -ENOMEM; 1855c9a8750SDmitry Vyukov atomic_set(&kcov->refcount, 1); 1865c9a8750SDmitry Vyukov spin_lock_init(&kcov->lock); 1875c9a8750SDmitry Vyukov filep->private_data = kcov; 1885c9a8750SDmitry Vyukov return nonseekable_open(inode, filep); 1895c9a8750SDmitry Vyukov } 1905c9a8750SDmitry Vyukov 1915c9a8750SDmitry Vyukov static int kcov_close(struct inode *inode, struct file *filep) 1925c9a8750SDmitry Vyukov { 1935c9a8750SDmitry Vyukov kcov_put(filep->private_data); 1945c9a8750SDmitry Vyukov return 0; 1955c9a8750SDmitry Vyukov } 1965c9a8750SDmitry Vyukov 1975c9a8750SDmitry Vyukov static int kcov_ioctl_locked(struct kcov *kcov, unsigned int cmd, 1985c9a8750SDmitry Vyukov unsigned long arg) 1995c9a8750SDmitry Vyukov { 2005c9a8750SDmitry Vyukov struct task_struct *t; 2015c9a8750SDmitry Vyukov unsigned long size, unused; 2025c9a8750SDmitry Vyukov 2035c9a8750SDmitry Vyukov switch (cmd) { 2045c9a8750SDmitry Vyukov case KCOV_INIT_TRACE: 2055c9a8750SDmitry Vyukov /* 2065c9a8750SDmitry Vyukov * Enable kcov in trace mode and setup buffer size. 2075c9a8750SDmitry Vyukov * Must happen before anything else. 2085c9a8750SDmitry Vyukov */ 2095c9a8750SDmitry Vyukov if (kcov->mode != KCOV_MODE_DISABLED) 2105c9a8750SDmitry Vyukov return -EBUSY; 2115c9a8750SDmitry Vyukov /* 2125c9a8750SDmitry Vyukov * Size must be at least 2 to hold current position and one PC. 2135c9a8750SDmitry Vyukov * Later we allocate size * sizeof(unsigned long) memory, 2145c9a8750SDmitry Vyukov * that must not overflow. 2155c9a8750SDmitry Vyukov */ 2165c9a8750SDmitry Vyukov size = arg; 2175c9a8750SDmitry Vyukov if (size < 2 || size > INT_MAX / sizeof(unsigned long)) 2185c9a8750SDmitry Vyukov return -EINVAL; 2195c9a8750SDmitry Vyukov kcov->size = size; 2205c9a8750SDmitry Vyukov kcov->mode = KCOV_MODE_TRACE; 2215c9a8750SDmitry Vyukov return 0; 2225c9a8750SDmitry Vyukov case KCOV_ENABLE: 2235c9a8750SDmitry Vyukov /* 2245c9a8750SDmitry Vyukov * Enable coverage for the current task. 2255c9a8750SDmitry Vyukov * At this point user must have been enabled trace mode, 2265c9a8750SDmitry Vyukov * and mmapped the file. Coverage collection is disabled only 2275c9a8750SDmitry Vyukov * at task exit or voluntary by KCOV_DISABLE. After that it can 2285c9a8750SDmitry Vyukov * be enabled for another task. 2295c9a8750SDmitry Vyukov */ 2305c9a8750SDmitry Vyukov unused = arg; 2315c9a8750SDmitry Vyukov if (unused != 0 || kcov->mode == KCOV_MODE_DISABLED || 2325c9a8750SDmitry Vyukov kcov->area == NULL) 2335c9a8750SDmitry Vyukov return -EINVAL; 2345c9a8750SDmitry Vyukov if (kcov->t != NULL) 2355c9a8750SDmitry Vyukov return -EBUSY; 2365c9a8750SDmitry Vyukov t = current; 2375c9a8750SDmitry Vyukov /* Cache in task struct for performance. */ 2385c9a8750SDmitry Vyukov t->kcov_size = kcov->size; 2395c9a8750SDmitry Vyukov t->kcov_area = kcov->area; 2405c9a8750SDmitry Vyukov /* See comment in __sanitizer_cov_trace_pc(). */ 2415c9a8750SDmitry Vyukov barrier(); 2425c9a8750SDmitry Vyukov WRITE_ONCE(t->kcov_mode, kcov->mode); 2435c9a8750SDmitry Vyukov t->kcov = kcov; 2445c9a8750SDmitry Vyukov kcov->t = t; 2455c9a8750SDmitry Vyukov /* This is put either in kcov_task_exit() or in KCOV_DISABLE. */ 2465c9a8750SDmitry Vyukov kcov_get(kcov); 2475c9a8750SDmitry Vyukov return 0; 2485c9a8750SDmitry Vyukov case KCOV_DISABLE: 2495c9a8750SDmitry Vyukov /* Disable coverage for the current task. */ 2505c9a8750SDmitry Vyukov unused = arg; 2515c9a8750SDmitry Vyukov if (unused != 0 || current->kcov != kcov) 2525c9a8750SDmitry Vyukov return -EINVAL; 2535c9a8750SDmitry Vyukov t = current; 2545c9a8750SDmitry Vyukov if (WARN_ON(kcov->t != t)) 2555c9a8750SDmitry Vyukov return -EINVAL; 2565c9a8750SDmitry Vyukov kcov_task_init(t); 2575c9a8750SDmitry Vyukov kcov->t = NULL; 2585c9a8750SDmitry Vyukov kcov_put(kcov); 2595c9a8750SDmitry Vyukov return 0; 2605c9a8750SDmitry Vyukov default: 2615c9a8750SDmitry Vyukov return -ENOTTY; 2625c9a8750SDmitry Vyukov } 2635c9a8750SDmitry Vyukov } 2645c9a8750SDmitry Vyukov 2655c9a8750SDmitry Vyukov static long kcov_ioctl(struct file *filep, unsigned int cmd, unsigned long arg) 2665c9a8750SDmitry Vyukov { 2675c9a8750SDmitry Vyukov struct kcov *kcov; 2685c9a8750SDmitry Vyukov int res; 2695c9a8750SDmitry Vyukov 2705c9a8750SDmitry Vyukov kcov = filep->private_data; 2715c9a8750SDmitry Vyukov spin_lock(&kcov->lock); 2725c9a8750SDmitry Vyukov res = kcov_ioctl_locked(kcov, cmd, arg); 2735c9a8750SDmitry Vyukov spin_unlock(&kcov->lock); 2745c9a8750SDmitry Vyukov return res; 2755c9a8750SDmitry Vyukov } 2765c9a8750SDmitry Vyukov 2775c9a8750SDmitry Vyukov static const struct file_operations kcov_fops = { 2785c9a8750SDmitry Vyukov .open = kcov_open, 2795c9a8750SDmitry Vyukov .unlocked_ioctl = kcov_ioctl, 2805c9a8750SDmitry Vyukov .mmap = kcov_mmap, 2815c9a8750SDmitry Vyukov .release = kcov_close, 2825c9a8750SDmitry Vyukov }; 2835c9a8750SDmitry Vyukov 2845c9a8750SDmitry Vyukov static int __init kcov_init(void) 2855c9a8750SDmitry Vyukov { 286df4565f9SNicolai Stange /* 287df4565f9SNicolai Stange * The kcov debugfs file won't ever get removed and thus, 288df4565f9SNicolai Stange * there is no need to protect it against removal races. The 289df4565f9SNicolai Stange * use of debugfs_create_file_unsafe() is actually safe here. 290df4565f9SNicolai Stange */ 291df4565f9SNicolai Stange if (!debugfs_create_file_unsafe("kcov", 0600, NULL, NULL, &kcov_fops)) { 2925c9a8750SDmitry Vyukov pr_err("failed to create kcov in debugfs\n"); 2935c9a8750SDmitry Vyukov return -ENOMEM; 2945c9a8750SDmitry Vyukov } 2955c9a8750SDmitry Vyukov return 0; 2965c9a8750SDmitry Vyukov } 2975c9a8750SDmitry Vyukov 2985c9a8750SDmitry Vyukov device_initcall(kcov_init); 299