1 // SPDX-License-Identifier: GPL-2.0 2 #define pr_fmt(fmt) "kcov: " fmt 3 4 #define DISABLE_BRANCH_PROFILING 5 #include <linux/atomic.h> 6 #include <linux/compiler.h> 7 #include <linux/errno.h> 8 #include <linux/export.h> 9 #include <linux/types.h> 10 #include <linux/file.h> 11 #include <linux/fs.h> 12 #include <linux/hashtable.h> 13 #include <linux/init.h> 14 #include <linux/mm.h> 15 #include <linux/preempt.h> 16 #include <linux/printk.h> 17 #include <linux/sched.h> 18 #include <linux/slab.h> 19 #include <linux/spinlock.h> 20 #include <linux/vmalloc.h> 21 #include <linux/debugfs.h> 22 #include <linux/uaccess.h> 23 #include <linux/kcov.h> 24 #include <linux/refcount.h> 25 #include <linux/log2.h> 26 #include <asm/setup.h> 27 28 #define kcov_debug(fmt, ...) pr_debug("%s: " fmt, __func__, ##__VA_ARGS__) 29 30 /* Number of 64-bit words written per one comparison: */ 31 #define KCOV_WORDS_PER_CMP 4 32 33 /* 34 * kcov descriptor (one per opened debugfs file). 35 * State transitions of the descriptor: 36 * - initial state after open() 37 * - then there must be a single ioctl(KCOV_INIT_TRACE) call 38 * - then, mmap() call (several calls are allowed but not useful) 39 * - then, ioctl(KCOV_ENABLE, arg), where arg is 40 * KCOV_TRACE_PC - to trace only the PCs 41 * or 42 * KCOV_TRACE_CMP - to trace only the comparison operands 43 * - then, ioctl(KCOV_DISABLE) to disable the task. 44 * Enabling/disabling ioctls can be repeated (only one task a time allowed). 45 */ 46 struct kcov { 47 /* 48 * Reference counter. We keep one for: 49 * - opened file descriptor 50 * - task with enabled coverage (we can't unwire it from another task) 51 * - each code section for remote coverage collection 52 */ 53 refcount_t refcount; 54 /* The lock protects mode, size, area and t. */ 55 spinlock_t lock; 56 enum kcov_mode mode; 57 /* Size of arena (in long's). */ 58 unsigned int size; 59 /* Coverage buffer shared with user space. */ 60 void *area; 61 /* Task for which we collect coverage, or NULL. */ 62 struct task_struct *t; 63 /* Collecting coverage from remote (background) threads. */ 64 bool remote; 65 /* Size of remote area (in long's). */ 66 unsigned int remote_size; 67 /* 68 * Sequence is incremented each time kcov is reenabled, used by 69 * kcov_remote_stop(), see the comment there. 70 */ 71 int sequence; 72 }; 73 74 struct kcov_remote_area { 75 struct list_head list; 76 unsigned int size; 77 }; 78 79 struct kcov_remote { 80 u64 handle; 81 struct kcov *kcov; 82 struct hlist_node hnode; 83 }; 84 85 static DEFINE_SPINLOCK(kcov_remote_lock); 86 static DEFINE_HASHTABLE(kcov_remote_map, 4); 87 static struct list_head kcov_remote_areas = LIST_HEAD_INIT(kcov_remote_areas); 88 89 /* Must be called with kcov_remote_lock locked. */ 90 static struct kcov_remote *kcov_remote_find(u64 handle) 91 { 92 struct kcov_remote *remote; 93 94 hash_for_each_possible(kcov_remote_map, remote, hnode, handle) { 95 if (remote->handle == handle) 96 return remote; 97 } 98 return NULL; 99 } 100 101 static struct kcov_remote *kcov_remote_add(struct kcov *kcov, u64 handle) 102 { 103 struct kcov_remote *remote; 104 105 if (kcov_remote_find(handle)) 106 return ERR_PTR(-EEXIST); 107 remote = kmalloc(sizeof(*remote), GFP_ATOMIC); 108 if (!remote) 109 return ERR_PTR(-ENOMEM); 110 remote->handle = handle; 111 remote->kcov = kcov; 112 hash_add(kcov_remote_map, &remote->hnode, handle); 113 return remote; 114 } 115 116 /* Must be called with kcov_remote_lock locked. */ 117 static struct kcov_remote_area *kcov_remote_area_get(unsigned int size) 118 { 119 struct kcov_remote_area *area; 120 struct list_head *pos; 121 122 kcov_debug("size = %u\n", size); 123 list_for_each(pos, &kcov_remote_areas) { 124 area = list_entry(pos, struct kcov_remote_area, list); 125 if (area->size == size) { 126 list_del(&area->list); 127 kcov_debug("rv = %px\n", area); 128 return area; 129 } 130 } 131 kcov_debug("rv = NULL\n"); 132 return NULL; 133 } 134 135 /* Must be called with kcov_remote_lock locked. */ 136 static void kcov_remote_area_put(struct kcov_remote_area *area, 137 unsigned int size) 138 { 139 kcov_debug("area = %px, size = %u\n", area, size); 140 INIT_LIST_HEAD(&area->list); 141 area->size = size; 142 list_add(&area->list, &kcov_remote_areas); 143 } 144 145 static notrace bool check_kcov_mode(enum kcov_mode needed_mode, struct task_struct *t) 146 { 147 unsigned int mode; 148 149 /* 150 * We are interested in code coverage as a function of a syscall inputs, 151 * so we ignore code executed in interrupts. 152 */ 153 if (!in_task()) 154 return false; 155 mode = READ_ONCE(t->kcov_mode); 156 /* 157 * There is some code that runs in interrupts but for which 158 * in_interrupt() returns false (e.g. preempt_schedule_irq()). 159 * READ_ONCE()/barrier() effectively provides load-acquire wrt 160 * interrupts, there are paired barrier()/WRITE_ONCE() in 161 * kcov_start(). 162 */ 163 barrier(); 164 return mode == needed_mode; 165 } 166 167 static notrace unsigned long canonicalize_ip(unsigned long ip) 168 { 169 #ifdef CONFIG_RANDOMIZE_BASE 170 ip -= kaslr_offset(); 171 #endif 172 return ip; 173 } 174 175 /* 176 * Entry point from instrumented code. 177 * This is called once per basic-block/edge. 178 */ 179 void notrace __sanitizer_cov_trace_pc(void) 180 { 181 struct task_struct *t; 182 unsigned long *area; 183 unsigned long ip = canonicalize_ip(_RET_IP_); 184 unsigned long pos; 185 186 t = current; 187 if (!check_kcov_mode(KCOV_MODE_TRACE_PC, t)) 188 return; 189 190 area = t->kcov_area; 191 /* The first 64-bit word is the number of subsequent PCs. */ 192 pos = READ_ONCE(area[0]) + 1; 193 if (likely(pos < t->kcov_size)) { 194 area[pos] = ip; 195 WRITE_ONCE(area[0], pos); 196 } 197 } 198 EXPORT_SYMBOL(__sanitizer_cov_trace_pc); 199 200 #ifdef CONFIG_KCOV_ENABLE_COMPARISONS 201 static void notrace write_comp_data(u64 type, u64 arg1, u64 arg2, u64 ip) 202 { 203 struct task_struct *t; 204 u64 *area; 205 u64 count, start_index, end_pos, max_pos; 206 207 t = current; 208 if (!check_kcov_mode(KCOV_MODE_TRACE_CMP, t)) 209 return; 210 211 ip = canonicalize_ip(ip); 212 213 /* 214 * We write all comparison arguments and types as u64. 215 * The buffer was allocated for t->kcov_size unsigned longs. 216 */ 217 area = (u64 *)t->kcov_area; 218 max_pos = t->kcov_size * sizeof(unsigned long); 219 220 count = READ_ONCE(area[0]); 221 222 /* Every record is KCOV_WORDS_PER_CMP 64-bit words. */ 223 start_index = 1 + count * KCOV_WORDS_PER_CMP; 224 end_pos = (start_index + KCOV_WORDS_PER_CMP) * sizeof(u64); 225 if (likely(end_pos <= max_pos)) { 226 area[start_index] = type; 227 area[start_index + 1] = arg1; 228 area[start_index + 2] = arg2; 229 area[start_index + 3] = ip; 230 WRITE_ONCE(area[0], count + 1); 231 } 232 } 233 234 void notrace __sanitizer_cov_trace_cmp1(u8 arg1, u8 arg2) 235 { 236 write_comp_data(KCOV_CMP_SIZE(0), arg1, arg2, _RET_IP_); 237 } 238 EXPORT_SYMBOL(__sanitizer_cov_trace_cmp1); 239 240 void notrace __sanitizer_cov_trace_cmp2(u16 arg1, u16 arg2) 241 { 242 write_comp_data(KCOV_CMP_SIZE(1), arg1, arg2, _RET_IP_); 243 } 244 EXPORT_SYMBOL(__sanitizer_cov_trace_cmp2); 245 246 void notrace __sanitizer_cov_trace_cmp4(u32 arg1, u32 arg2) 247 { 248 write_comp_data(KCOV_CMP_SIZE(2), arg1, arg2, _RET_IP_); 249 } 250 EXPORT_SYMBOL(__sanitizer_cov_trace_cmp4); 251 252 void notrace __sanitizer_cov_trace_cmp8(u64 arg1, u64 arg2) 253 { 254 write_comp_data(KCOV_CMP_SIZE(3), arg1, arg2, _RET_IP_); 255 } 256 EXPORT_SYMBOL(__sanitizer_cov_trace_cmp8); 257 258 void notrace __sanitizer_cov_trace_const_cmp1(u8 arg1, u8 arg2) 259 { 260 write_comp_data(KCOV_CMP_SIZE(0) | KCOV_CMP_CONST, arg1, arg2, 261 _RET_IP_); 262 } 263 EXPORT_SYMBOL(__sanitizer_cov_trace_const_cmp1); 264 265 void notrace __sanitizer_cov_trace_const_cmp2(u16 arg1, u16 arg2) 266 { 267 write_comp_data(KCOV_CMP_SIZE(1) | KCOV_CMP_CONST, arg1, arg2, 268 _RET_IP_); 269 } 270 EXPORT_SYMBOL(__sanitizer_cov_trace_const_cmp2); 271 272 void notrace __sanitizer_cov_trace_const_cmp4(u32 arg1, u32 arg2) 273 { 274 write_comp_data(KCOV_CMP_SIZE(2) | KCOV_CMP_CONST, arg1, arg2, 275 _RET_IP_); 276 } 277 EXPORT_SYMBOL(__sanitizer_cov_trace_const_cmp4); 278 279 void notrace __sanitizer_cov_trace_const_cmp8(u64 arg1, u64 arg2) 280 { 281 write_comp_data(KCOV_CMP_SIZE(3) | KCOV_CMP_CONST, arg1, arg2, 282 _RET_IP_); 283 } 284 EXPORT_SYMBOL(__sanitizer_cov_trace_const_cmp8); 285 286 void notrace __sanitizer_cov_trace_switch(u64 val, u64 *cases) 287 { 288 u64 i; 289 u64 count = cases[0]; 290 u64 size = cases[1]; 291 u64 type = KCOV_CMP_CONST; 292 293 switch (size) { 294 case 8: 295 type |= KCOV_CMP_SIZE(0); 296 break; 297 case 16: 298 type |= KCOV_CMP_SIZE(1); 299 break; 300 case 32: 301 type |= KCOV_CMP_SIZE(2); 302 break; 303 case 64: 304 type |= KCOV_CMP_SIZE(3); 305 break; 306 default: 307 return; 308 } 309 for (i = 0; i < count; i++) 310 write_comp_data(type, cases[i + 2], val, _RET_IP_); 311 } 312 EXPORT_SYMBOL(__sanitizer_cov_trace_switch); 313 #endif /* ifdef CONFIG_KCOV_ENABLE_COMPARISONS */ 314 315 static void kcov_start(struct task_struct *t, unsigned int size, 316 void *area, enum kcov_mode mode, int sequence) 317 { 318 kcov_debug("t = %px, size = %u, area = %px\n", t, size, area); 319 /* Cache in task struct for performance. */ 320 t->kcov_size = size; 321 t->kcov_area = area; 322 /* See comment in check_kcov_mode(). */ 323 barrier(); 324 WRITE_ONCE(t->kcov_mode, mode); 325 t->kcov_sequence = sequence; 326 } 327 328 static void kcov_stop(struct task_struct *t) 329 { 330 WRITE_ONCE(t->kcov_mode, KCOV_MODE_DISABLED); 331 barrier(); 332 t->kcov_size = 0; 333 t->kcov_area = NULL; 334 } 335 336 static void kcov_task_reset(struct task_struct *t) 337 { 338 kcov_stop(t); 339 t->kcov = NULL; 340 t->kcov_sequence = 0; 341 t->kcov_handle = 0; 342 } 343 344 void kcov_task_init(struct task_struct *t) 345 { 346 kcov_task_reset(t); 347 t->kcov_handle = current->kcov_handle; 348 } 349 350 static void kcov_reset(struct kcov *kcov) 351 { 352 kcov->t = NULL; 353 kcov->mode = KCOV_MODE_INIT; 354 kcov->remote = false; 355 kcov->remote_size = 0; 356 kcov->sequence++; 357 } 358 359 static void kcov_remote_reset(struct kcov *kcov) 360 { 361 int bkt; 362 struct kcov_remote *remote; 363 struct hlist_node *tmp; 364 365 spin_lock(&kcov_remote_lock); 366 hash_for_each_safe(kcov_remote_map, bkt, tmp, remote, hnode) { 367 if (remote->kcov != kcov) 368 continue; 369 kcov_debug("removing handle %llx\n", remote->handle); 370 hash_del(&remote->hnode); 371 kfree(remote); 372 } 373 /* Do reset before unlock to prevent races with kcov_remote_start(). */ 374 kcov_reset(kcov); 375 spin_unlock(&kcov_remote_lock); 376 } 377 378 static void kcov_disable(struct task_struct *t, struct kcov *kcov) 379 { 380 kcov_task_reset(t); 381 if (kcov->remote) 382 kcov_remote_reset(kcov); 383 else 384 kcov_reset(kcov); 385 } 386 387 static void kcov_get(struct kcov *kcov) 388 { 389 refcount_inc(&kcov->refcount); 390 } 391 392 static void kcov_put(struct kcov *kcov) 393 { 394 if (refcount_dec_and_test(&kcov->refcount)) { 395 kcov_remote_reset(kcov); 396 vfree(kcov->area); 397 kfree(kcov); 398 } 399 } 400 401 void kcov_task_exit(struct task_struct *t) 402 { 403 struct kcov *kcov; 404 405 kcov = t->kcov; 406 if (kcov == NULL) 407 return; 408 409 spin_lock(&kcov->lock); 410 kcov_debug("t = %px, kcov->t = %px\n", t, kcov->t); 411 /* 412 * For KCOV_ENABLE devices we want to make sure that t->kcov->t == t, 413 * which comes down to: 414 * WARN_ON(!kcov->remote && kcov->t != t); 415 * 416 * For KCOV_REMOTE_ENABLE devices, the exiting task is either: 417 * 2. A remote task between kcov_remote_start() and kcov_remote_stop(). 418 * In this case we should print a warning right away, since a task 419 * shouldn't be exiting when it's in a kcov coverage collection 420 * section. Here t points to the task that is collecting remote 421 * coverage, and t->kcov->t points to the thread that created the 422 * kcov device. Which means that to detect this case we need to 423 * check that t != t->kcov->t, and this gives us the following: 424 * WARN_ON(kcov->remote && kcov->t != t); 425 * 426 * 2. The task that created kcov exiting without calling KCOV_DISABLE, 427 * and then again we can make sure that t->kcov->t == t: 428 * WARN_ON(kcov->remote && kcov->t != t); 429 * 430 * By combining all three checks into one we get: 431 */ 432 if (WARN_ON(kcov->t != t)) { 433 spin_unlock(&kcov->lock); 434 return; 435 } 436 /* Just to not leave dangling references behind. */ 437 kcov_disable(t, kcov); 438 spin_unlock(&kcov->lock); 439 kcov_put(kcov); 440 } 441 442 static int kcov_mmap(struct file *filep, struct vm_area_struct *vma) 443 { 444 int res = 0; 445 void *area; 446 struct kcov *kcov = vma->vm_file->private_data; 447 unsigned long size, off; 448 struct page *page; 449 450 area = vmalloc_user(vma->vm_end - vma->vm_start); 451 if (!area) 452 return -ENOMEM; 453 454 spin_lock(&kcov->lock); 455 size = kcov->size * sizeof(unsigned long); 456 if (kcov->mode != KCOV_MODE_INIT || vma->vm_pgoff != 0 || 457 vma->vm_end - vma->vm_start != size) { 458 res = -EINVAL; 459 goto exit; 460 } 461 if (!kcov->area) { 462 kcov->area = area; 463 vma->vm_flags |= VM_DONTEXPAND; 464 spin_unlock(&kcov->lock); 465 for (off = 0; off < size; off += PAGE_SIZE) { 466 page = vmalloc_to_page(kcov->area + off); 467 if (vm_insert_page(vma, vma->vm_start + off, page)) 468 WARN_ONCE(1, "vm_insert_page() failed"); 469 } 470 return 0; 471 } 472 exit: 473 spin_unlock(&kcov->lock); 474 vfree(area); 475 return res; 476 } 477 478 static int kcov_open(struct inode *inode, struct file *filep) 479 { 480 struct kcov *kcov; 481 482 kcov = kzalloc(sizeof(*kcov), GFP_KERNEL); 483 if (!kcov) 484 return -ENOMEM; 485 kcov->mode = KCOV_MODE_DISABLED; 486 kcov->sequence = 1; 487 refcount_set(&kcov->refcount, 1); 488 spin_lock_init(&kcov->lock); 489 filep->private_data = kcov; 490 return nonseekable_open(inode, filep); 491 } 492 493 static int kcov_close(struct inode *inode, struct file *filep) 494 { 495 kcov_put(filep->private_data); 496 return 0; 497 } 498 499 static int kcov_get_mode(unsigned long arg) 500 { 501 if (arg == KCOV_TRACE_PC) 502 return KCOV_MODE_TRACE_PC; 503 else if (arg == KCOV_TRACE_CMP) 504 #ifdef CONFIG_KCOV_ENABLE_COMPARISONS 505 return KCOV_MODE_TRACE_CMP; 506 #else 507 return -ENOTSUPP; 508 #endif 509 else 510 return -EINVAL; 511 } 512 513 /* 514 * Fault in a lazily-faulted vmalloc area before it can be used by 515 * __santizer_cov_trace_pc(), to avoid recursion issues if any code on the 516 * vmalloc fault handling path is instrumented. 517 */ 518 static void kcov_fault_in_area(struct kcov *kcov) 519 { 520 unsigned long stride = PAGE_SIZE / sizeof(unsigned long); 521 unsigned long *area = kcov->area; 522 unsigned long offset; 523 524 for (offset = 0; offset < kcov->size; offset += stride) 525 READ_ONCE(area[offset]); 526 } 527 528 static inline bool kcov_check_handle(u64 handle, bool common_valid, 529 bool uncommon_valid, bool zero_valid) 530 { 531 if (handle & ~(KCOV_SUBSYSTEM_MASK | KCOV_INSTANCE_MASK)) 532 return false; 533 switch (handle & KCOV_SUBSYSTEM_MASK) { 534 case KCOV_SUBSYSTEM_COMMON: 535 return (handle & KCOV_INSTANCE_MASK) ? 536 common_valid : zero_valid; 537 case KCOV_SUBSYSTEM_USB: 538 return uncommon_valid; 539 default: 540 return false; 541 } 542 return false; 543 } 544 545 static int kcov_ioctl_locked(struct kcov *kcov, unsigned int cmd, 546 unsigned long arg) 547 { 548 struct task_struct *t; 549 unsigned long size, unused; 550 int mode, i; 551 struct kcov_remote_arg *remote_arg; 552 struct kcov_remote *remote; 553 554 switch (cmd) { 555 case KCOV_INIT_TRACE: 556 kcov_debug("KCOV_INIT_TRACE\n"); 557 /* 558 * Enable kcov in trace mode and setup buffer size. 559 * Must happen before anything else. 560 */ 561 if (kcov->mode != KCOV_MODE_DISABLED) 562 return -EBUSY; 563 /* 564 * Size must be at least 2 to hold current position and one PC. 565 * Later we allocate size * sizeof(unsigned long) memory, 566 * that must not overflow. 567 */ 568 size = arg; 569 if (size < 2 || size > INT_MAX / sizeof(unsigned long)) 570 return -EINVAL; 571 kcov->size = size; 572 kcov->mode = KCOV_MODE_INIT; 573 return 0; 574 case KCOV_ENABLE: 575 kcov_debug("KCOV_ENABLE\n"); 576 /* 577 * Enable coverage for the current task. 578 * At this point user must have been enabled trace mode, 579 * and mmapped the file. Coverage collection is disabled only 580 * at task exit or voluntary by KCOV_DISABLE. After that it can 581 * be enabled for another task. 582 */ 583 if (kcov->mode != KCOV_MODE_INIT || !kcov->area) 584 return -EINVAL; 585 t = current; 586 if (kcov->t != NULL || t->kcov != NULL) 587 return -EBUSY; 588 mode = kcov_get_mode(arg); 589 if (mode < 0) 590 return mode; 591 kcov_fault_in_area(kcov); 592 kcov->mode = mode; 593 kcov_start(t, kcov->size, kcov->area, kcov->mode, 594 kcov->sequence); 595 t->kcov = kcov; 596 kcov->t = t; 597 /* Put either in kcov_task_exit() or in KCOV_DISABLE. */ 598 kcov_get(kcov); 599 return 0; 600 case KCOV_DISABLE: 601 kcov_debug("KCOV_DISABLE\n"); 602 /* Disable coverage for the current task. */ 603 unused = arg; 604 if (unused != 0 || current->kcov != kcov) 605 return -EINVAL; 606 t = current; 607 if (WARN_ON(kcov->t != t)) 608 return -EINVAL; 609 kcov_disable(t, kcov); 610 kcov_put(kcov); 611 return 0; 612 case KCOV_REMOTE_ENABLE: 613 kcov_debug("KCOV_REMOTE_ENABLE\n"); 614 if (kcov->mode != KCOV_MODE_INIT || !kcov->area) 615 return -EINVAL; 616 t = current; 617 if (kcov->t != NULL || t->kcov != NULL) 618 return -EBUSY; 619 remote_arg = (struct kcov_remote_arg *)arg; 620 mode = kcov_get_mode(remote_arg->trace_mode); 621 if (mode < 0) 622 return mode; 623 if (remote_arg->area_size > LONG_MAX / sizeof(unsigned long)) 624 return -EINVAL; 625 kcov->mode = mode; 626 t->kcov = kcov; 627 kcov->t = t; 628 kcov->remote = true; 629 kcov->remote_size = remote_arg->area_size; 630 spin_lock(&kcov_remote_lock); 631 for (i = 0; i < remote_arg->num_handles; i++) { 632 kcov_debug("handle %llx\n", remote_arg->handles[i]); 633 if (!kcov_check_handle(remote_arg->handles[i], 634 false, true, false)) { 635 spin_unlock(&kcov_remote_lock); 636 kcov_disable(t, kcov); 637 return -EINVAL; 638 } 639 remote = kcov_remote_add(kcov, remote_arg->handles[i]); 640 if (IS_ERR(remote)) { 641 spin_unlock(&kcov_remote_lock); 642 kcov_disable(t, kcov); 643 return PTR_ERR(remote); 644 } 645 } 646 if (remote_arg->common_handle) { 647 kcov_debug("common handle %llx\n", 648 remote_arg->common_handle); 649 if (!kcov_check_handle(remote_arg->common_handle, 650 true, false, false)) { 651 spin_unlock(&kcov_remote_lock); 652 kcov_disable(t, kcov); 653 return -EINVAL; 654 } 655 remote = kcov_remote_add(kcov, 656 remote_arg->common_handle); 657 if (IS_ERR(remote)) { 658 spin_unlock(&kcov_remote_lock); 659 kcov_disable(t, kcov); 660 return PTR_ERR(remote); 661 } 662 t->kcov_handle = remote_arg->common_handle; 663 } 664 spin_unlock(&kcov_remote_lock); 665 /* Put either in kcov_task_exit() or in KCOV_DISABLE. */ 666 kcov_get(kcov); 667 return 0; 668 default: 669 return -ENOTTY; 670 } 671 } 672 673 static long kcov_ioctl(struct file *filep, unsigned int cmd, unsigned long arg) 674 { 675 struct kcov *kcov; 676 int res; 677 struct kcov_remote_arg *remote_arg = NULL; 678 unsigned int remote_num_handles; 679 unsigned long remote_arg_size; 680 681 if (cmd == KCOV_REMOTE_ENABLE) { 682 if (get_user(remote_num_handles, (unsigned __user *)(arg + 683 offsetof(struct kcov_remote_arg, num_handles)))) 684 return -EFAULT; 685 if (remote_num_handles > KCOV_REMOTE_MAX_HANDLES) 686 return -EINVAL; 687 remote_arg_size = struct_size(remote_arg, handles, 688 remote_num_handles); 689 remote_arg = memdup_user((void __user *)arg, remote_arg_size); 690 if (IS_ERR(remote_arg)) 691 return PTR_ERR(remote_arg); 692 if (remote_arg->num_handles != remote_num_handles) { 693 kfree(remote_arg); 694 return -EINVAL; 695 } 696 arg = (unsigned long)remote_arg; 697 } 698 699 kcov = filep->private_data; 700 spin_lock(&kcov->lock); 701 res = kcov_ioctl_locked(kcov, cmd, arg); 702 spin_unlock(&kcov->lock); 703 704 kfree(remote_arg); 705 706 return res; 707 } 708 709 static const struct file_operations kcov_fops = { 710 .open = kcov_open, 711 .unlocked_ioctl = kcov_ioctl, 712 .compat_ioctl = kcov_ioctl, 713 .mmap = kcov_mmap, 714 .release = kcov_close, 715 }; 716 717 /* 718 * kcov_remote_start() and kcov_remote_stop() can be used to annotate a section 719 * of code in a kernel background thread to allow kcov to be used to collect 720 * coverage from that part of code. 721 * 722 * The handle argument of kcov_remote_start() identifies a code section that is 723 * used for coverage collection. A userspace process passes this handle to 724 * KCOV_REMOTE_ENABLE ioctl to make the used kcov device start collecting 725 * coverage for the code section identified by this handle. 726 * 727 * The usage of these annotations in the kernel code is different depending on 728 * the type of the kernel thread whose code is being annotated. 729 * 730 * For global kernel threads that are spawned in a limited number of instances 731 * (e.g. one USB hub_event() worker thread is spawned per USB HCD), each 732 * instance must be assigned a unique 4-byte instance id. The instance id is 733 * then combined with a 1-byte subsystem id to get a handle via 734 * kcov_remote_handle(subsystem_id, instance_id). 735 * 736 * For local kernel threads that are spawned from system calls handler when a 737 * user interacts with some kernel interface (e.g. vhost workers), a handle is 738 * passed from a userspace process as the common_handle field of the 739 * kcov_remote_arg struct (note, that the user must generate a handle by using 740 * kcov_remote_handle() with KCOV_SUBSYSTEM_COMMON as the subsystem id and an 741 * arbitrary 4-byte non-zero number as the instance id). This common handle 742 * then gets saved into the task_struct of the process that issued the 743 * KCOV_REMOTE_ENABLE ioctl. When this proccess issues system calls that spawn 744 * kernel threads, the common handle must be retrived via kcov_common_handle() 745 * and passed to the spawned threads via custom annotations. Those kernel 746 * threads must in turn be annotated with kcov_remote_start(common_handle) and 747 * kcov_remote_stop(). All of the threads that are spawned by the same process 748 * obtain the same handle, hence the name "common". 749 * 750 * See Documentation/dev-tools/kcov.rst for more details. 751 * 752 * Internally, this function looks up the kcov device associated with the 753 * provided handle, allocates an area for coverage collection, and saves the 754 * pointers to kcov and area into the current task_struct to allow coverage to 755 * be collected via __sanitizer_cov_trace_pc() 756 * In turns kcov_remote_stop() clears those pointers from task_struct to stop 757 * collecting coverage and copies all collected coverage into the kcov area. 758 */ 759 void kcov_remote_start(u64 handle) 760 { 761 struct kcov_remote *remote; 762 void *area; 763 struct task_struct *t; 764 unsigned int size; 765 enum kcov_mode mode; 766 int sequence; 767 768 if (WARN_ON(!kcov_check_handle(handle, true, true, true))) 769 return; 770 if (WARN_ON(!in_task())) 771 return; 772 t = current; 773 /* 774 * Check that kcov_remote_start is not called twice 775 * nor called by user tasks (with enabled kcov). 776 */ 777 if (WARN_ON(t->kcov)) 778 return; 779 780 kcov_debug("handle = %llx\n", handle); 781 782 spin_lock(&kcov_remote_lock); 783 remote = kcov_remote_find(handle); 784 if (!remote) { 785 kcov_debug("no remote found"); 786 spin_unlock(&kcov_remote_lock); 787 return; 788 } 789 /* Put in kcov_remote_stop(). */ 790 kcov_get(remote->kcov); 791 t->kcov = remote->kcov; 792 /* 793 * Read kcov fields before unlock to prevent races with 794 * KCOV_DISABLE / kcov_remote_reset(). 795 */ 796 size = remote->kcov->remote_size; 797 mode = remote->kcov->mode; 798 sequence = remote->kcov->sequence; 799 area = kcov_remote_area_get(size); 800 spin_unlock(&kcov_remote_lock); 801 802 if (!area) { 803 area = vmalloc(size * sizeof(unsigned long)); 804 if (!area) { 805 t->kcov = NULL; 806 kcov_put(remote->kcov); 807 return; 808 } 809 } 810 /* Reset coverage size. */ 811 *(u64 *)area = 0; 812 813 kcov_debug("area = %px, size = %u", area, size); 814 815 kcov_start(t, size, area, mode, sequence); 816 817 } 818 EXPORT_SYMBOL(kcov_remote_start); 819 820 static void kcov_move_area(enum kcov_mode mode, void *dst_area, 821 unsigned int dst_area_size, void *src_area) 822 { 823 u64 word_size = sizeof(unsigned long); 824 u64 count_size, entry_size_log; 825 u64 dst_len, src_len; 826 void *dst_entries, *src_entries; 827 u64 dst_occupied, dst_free, bytes_to_move, entries_moved; 828 829 kcov_debug("%px %u <= %px %lu\n", 830 dst_area, dst_area_size, src_area, *(unsigned long *)src_area); 831 832 switch (mode) { 833 case KCOV_MODE_TRACE_PC: 834 dst_len = READ_ONCE(*(unsigned long *)dst_area); 835 src_len = *(unsigned long *)src_area; 836 count_size = sizeof(unsigned long); 837 entry_size_log = __ilog2_u64(sizeof(unsigned long)); 838 break; 839 case KCOV_MODE_TRACE_CMP: 840 dst_len = READ_ONCE(*(u64 *)dst_area); 841 src_len = *(u64 *)src_area; 842 count_size = sizeof(u64); 843 BUILD_BUG_ON(!is_power_of_2(KCOV_WORDS_PER_CMP)); 844 entry_size_log = __ilog2_u64(sizeof(u64) * KCOV_WORDS_PER_CMP); 845 break; 846 default: 847 WARN_ON(1); 848 return; 849 } 850 851 /* As arm can't divide u64 integers use log of entry size. */ 852 if (dst_len > ((dst_area_size * word_size - count_size) >> 853 entry_size_log)) 854 return; 855 dst_occupied = count_size + (dst_len << entry_size_log); 856 dst_free = dst_area_size * word_size - dst_occupied; 857 bytes_to_move = min(dst_free, src_len << entry_size_log); 858 dst_entries = dst_area + dst_occupied; 859 src_entries = src_area + count_size; 860 memcpy(dst_entries, src_entries, bytes_to_move); 861 entries_moved = bytes_to_move >> entry_size_log; 862 863 switch (mode) { 864 case KCOV_MODE_TRACE_PC: 865 WRITE_ONCE(*(unsigned long *)dst_area, dst_len + entries_moved); 866 break; 867 case KCOV_MODE_TRACE_CMP: 868 WRITE_ONCE(*(u64 *)dst_area, dst_len + entries_moved); 869 break; 870 default: 871 break; 872 } 873 } 874 875 /* See the comment before kcov_remote_start() for usage details. */ 876 void kcov_remote_stop(void) 877 { 878 struct task_struct *t = current; 879 struct kcov *kcov = t->kcov; 880 void *area = t->kcov_area; 881 unsigned int size = t->kcov_size; 882 int sequence = t->kcov_sequence; 883 884 if (!kcov) { 885 kcov_debug("no kcov found\n"); 886 return; 887 } 888 889 kcov_stop(t); 890 t->kcov = NULL; 891 892 spin_lock(&kcov->lock); 893 /* 894 * KCOV_DISABLE could have been called between kcov_remote_start() 895 * and kcov_remote_stop(), hence the check. 896 */ 897 kcov_debug("move if: %d == %d && %d\n", 898 sequence, kcov->sequence, (int)kcov->remote); 899 if (sequence == kcov->sequence && kcov->remote) 900 kcov_move_area(kcov->mode, kcov->area, kcov->size, area); 901 spin_unlock(&kcov->lock); 902 903 spin_lock(&kcov_remote_lock); 904 kcov_remote_area_put(area, size); 905 spin_unlock(&kcov_remote_lock); 906 907 kcov_put(kcov); 908 } 909 EXPORT_SYMBOL(kcov_remote_stop); 910 911 /* See the comment before kcov_remote_start() for usage details. */ 912 u64 kcov_common_handle(void) 913 { 914 return current->kcov_handle; 915 } 916 EXPORT_SYMBOL(kcov_common_handle); 917 918 static int __init kcov_init(void) 919 { 920 /* 921 * The kcov debugfs file won't ever get removed and thus, 922 * there is no need to protect it against removal races. The 923 * use of debugfs_create_file_unsafe() is actually safe here. 924 */ 925 debugfs_create_file_unsafe("kcov", 0600, NULL, NULL, &kcov_fops); 926 927 return 0; 928 } 929 930 device_initcall(kcov_init); 931