1 // SPDX-License-Identifier: GPL-2.0 2 #define pr_fmt(fmt) "kcov: " fmt 3 4 #define DISABLE_BRANCH_PROFILING 5 #include <linux/atomic.h> 6 #include <linux/compiler.h> 7 #include <linux/errno.h> 8 #include <linux/export.h> 9 #include <linux/types.h> 10 #include <linux/file.h> 11 #include <linux/fs.h> 12 #include <linux/hashtable.h> 13 #include <linux/init.h> 14 #include <linux/mm.h> 15 #include <linux/preempt.h> 16 #include <linux/printk.h> 17 #include <linux/sched.h> 18 #include <linux/slab.h> 19 #include <linux/spinlock.h> 20 #include <linux/vmalloc.h> 21 #include <linux/debugfs.h> 22 #include <linux/uaccess.h> 23 #include <linux/kcov.h> 24 #include <linux/refcount.h> 25 #include <linux/log2.h> 26 #include <asm/setup.h> 27 28 #define kcov_debug(fmt, ...) pr_debug("%s: " fmt, __func__, ##__VA_ARGS__) 29 30 /* Number of 64-bit words written per one comparison: */ 31 #define KCOV_WORDS_PER_CMP 4 32 33 /* 34 * kcov descriptor (one per opened debugfs file). 35 * State transitions of the descriptor: 36 * - initial state after open() 37 * - then there must be a single ioctl(KCOV_INIT_TRACE) call 38 * - then, mmap() call (several calls are allowed but not useful) 39 * - then, ioctl(KCOV_ENABLE, arg), where arg is 40 * KCOV_TRACE_PC - to trace only the PCs 41 * or 42 * KCOV_TRACE_CMP - to trace only the comparison operands 43 * - then, ioctl(KCOV_DISABLE) to disable the task. 44 * Enabling/disabling ioctls can be repeated (only one task a time allowed). 45 */ 46 struct kcov { 47 /* 48 * Reference counter. We keep one for: 49 * - opened file descriptor 50 * - task with enabled coverage (we can't unwire it from another task) 51 * - each code section for remote coverage collection 52 */ 53 refcount_t refcount; 54 /* The lock protects mode, size, area and t. */ 55 spinlock_t lock; 56 enum kcov_mode mode; 57 /* Size of arena (in long's). */ 58 unsigned int size; 59 /* Coverage buffer shared with user space. */ 60 void *area; 61 /* Task for which we collect coverage, or NULL. */ 62 struct task_struct *t; 63 /* Collecting coverage from remote (background) threads. */ 64 bool remote; 65 /* Size of remote area (in long's). */ 66 unsigned int remote_size; 67 /* 68 * Sequence is incremented each time kcov is reenabled, used by 69 * kcov_remote_stop(), see the comment there. 70 */ 71 int sequence; 72 }; 73 74 struct kcov_remote_area { 75 struct list_head list; 76 unsigned int size; 77 }; 78 79 struct kcov_remote { 80 u64 handle; 81 struct kcov *kcov; 82 struct hlist_node hnode; 83 }; 84 85 static DEFINE_SPINLOCK(kcov_remote_lock); 86 static DEFINE_HASHTABLE(kcov_remote_map, 4); 87 static struct list_head kcov_remote_areas = LIST_HEAD_INIT(kcov_remote_areas); 88 89 struct kcov_percpu_data { 90 void *irq_area; 91 92 unsigned int saved_mode; 93 unsigned int saved_size; 94 void *saved_area; 95 struct kcov *saved_kcov; 96 int saved_sequence; 97 }; 98 99 DEFINE_PER_CPU(struct kcov_percpu_data, kcov_percpu_data); 100 101 /* Must be called with kcov_remote_lock locked. */ 102 static struct kcov_remote *kcov_remote_find(u64 handle) 103 { 104 struct kcov_remote *remote; 105 106 hash_for_each_possible(kcov_remote_map, remote, hnode, handle) { 107 if (remote->handle == handle) 108 return remote; 109 } 110 return NULL; 111 } 112 113 /* Must be called with kcov_remote_lock locked. */ 114 static struct kcov_remote *kcov_remote_add(struct kcov *kcov, u64 handle) 115 { 116 struct kcov_remote *remote; 117 118 if (kcov_remote_find(handle)) 119 return ERR_PTR(-EEXIST); 120 remote = kmalloc(sizeof(*remote), GFP_ATOMIC); 121 if (!remote) 122 return ERR_PTR(-ENOMEM); 123 remote->handle = handle; 124 remote->kcov = kcov; 125 hash_add(kcov_remote_map, &remote->hnode, handle); 126 return remote; 127 } 128 129 /* Must be called with kcov_remote_lock locked. */ 130 static struct kcov_remote_area *kcov_remote_area_get(unsigned int size) 131 { 132 struct kcov_remote_area *area; 133 struct list_head *pos; 134 135 list_for_each(pos, &kcov_remote_areas) { 136 area = list_entry(pos, struct kcov_remote_area, list); 137 if (area->size == size) { 138 list_del(&area->list); 139 return area; 140 } 141 } 142 return NULL; 143 } 144 145 /* Must be called with kcov_remote_lock locked. */ 146 static void kcov_remote_area_put(struct kcov_remote_area *area, 147 unsigned int size) 148 { 149 INIT_LIST_HEAD(&area->list); 150 area->size = size; 151 list_add(&area->list, &kcov_remote_areas); 152 } 153 154 static notrace bool check_kcov_mode(enum kcov_mode needed_mode, struct task_struct *t) 155 { 156 unsigned int mode; 157 158 /* 159 * We are interested in code coverage as a function of a syscall inputs, 160 * so we ignore code executed in interrupts, unless we are in a remote 161 * coverage collection section in a softirq. 162 */ 163 if (!in_task() && !(in_serving_softirq() && t->kcov_softirq)) 164 return false; 165 mode = READ_ONCE(t->kcov_mode); 166 /* 167 * There is some code that runs in interrupts but for which 168 * in_interrupt() returns false (e.g. preempt_schedule_irq()). 169 * READ_ONCE()/barrier() effectively provides load-acquire wrt 170 * interrupts, there are paired barrier()/WRITE_ONCE() in 171 * kcov_start(). 172 */ 173 barrier(); 174 return mode == needed_mode; 175 } 176 177 static notrace unsigned long canonicalize_ip(unsigned long ip) 178 { 179 #ifdef CONFIG_RANDOMIZE_BASE 180 ip -= kaslr_offset(); 181 #endif 182 return ip; 183 } 184 185 /* 186 * Entry point from instrumented code. 187 * This is called once per basic-block/edge. 188 */ 189 void notrace __sanitizer_cov_trace_pc(void) 190 { 191 struct task_struct *t; 192 unsigned long *area; 193 unsigned long ip = canonicalize_ip(_RET_IP_); 194 unsigned long pos; 195 196 t = current; 197 if (!check_kcov_mode(KCOV_MODE_TRACE_PC, t)) 198 return; 199 200 area = t->kcov_area; 201 /* The first 64-bit word is the number of subsequent PCs. */ 202 pos = READ_ONCE(area[0]) + 1; 203 if (likely(pos < t->kcov_size)) { 204 area[pos] = ip; 205 WRITE_ONCE(area[0], pos); 206 } 207 } 208 EXPORT_SYMBOL(__sanitizer_cov_trace_pc); 209 210 #ifdef CONFIG_KCOV_ENABLE_COMPARISONS 211 static void notrace write_comp_data(u64 type, u64 arg1, u64 arg2, u64 ip) 212 { 213 struct task_struct *t; 214 u64 *area; 215 u64 count, start_index, end_pos, max_pos; 216 217 t = current; 218 if (!check_kcov_mode(KCOV_MODE_TRACE_CMP, t)) 219 return; 220 221 ip = canonicalize_ip(ip); 222 223 /* 224 * We write all comparison arguments and types as u64. 225 * The buffer was allocated for t->kcov_size unsigned longs. 226 */ 227 area = (u64 *)t->kcov_area; 228 max_pos = t->kcov_size * sizeof(unsigned long); 229 230 count = READ_ONCE(area[0]); 231 232 /* Every record is KCOV_WORDS_PER_CMP 64-bit words. */ 233 start_index = 1 + count * KCOV_WORDS_PER_CMP; 234 end_pos = (start_index + KCOV_WORDS_PER_CMP) * sizeof(u64); 235 if (likely(end_pos <= max_pos)) { 236 area[start_index] = type; 237 area[start_index + 1] = arg1; 238 area[start_index + 2] = arg2; 239 area[start_index + 3] = ip; 240 WRITE_ONCE(area[0], count + 1); 241 } 242 } 243 244 void notrace __sanitizer_cov_trace_cmp1(u8 arg1, u8 arg2) 245 { 246 write_comp_data(KCOV_CMP_SIZE(0), arg1, arg2, _RET_IP_); 247 } 248 EXPORT_SYMBOL(__sanitizer_cov_trace_cmp1); 249 250 void notrace __sanitizer_cov_trace_cmp2(u16 arg1, u16 arg2) 251 { 252 write_comp_data(KCOV_CMP_SIZE(1), arg1, arg2, _RET_IP_); 253 } 254 EXPORT_SYMBOL(__sanitizer_cov_trace_cmp2); 255 256 void notrace __sanitizer_cov_trace_cmp4(u32 arg1, u32 arg2) 257 { 258 write_comp_data(KCOV_CMP_SIZE(2), arg1, arg2, _RET_IP_); 259 } 260 EXPORT_SYMBOL(__sanitizer_cov_trace_cmp4); 261 262 void notrace __sanitizer_cov_trace_cmp8(u64 arg1, u64 arg2) 263 { 264 write_comp_data(KCOV_CMP_SIZE(3), arg1, arg2, _RET_IP_); 265 } 266 EXPORT_SYMBOL(__sanitizer_cov_trace_cmp8); 267 268 void notrace __sanitizer_cov_trace_const_cmp1(u8 arg1, u8 arg2) 269 { 270 write_comp_data(KCOV_CMP_SIZE(0) | KCOV_CMP_CONST, arg1, arg2, 271 _RET_IP_); 272 } 273 EXPORT_SYMBOL(__sanitizer_cov_trace_const_cmp1); 274 275 void notrace __sanitizer_cov_trace_const_cmp2(u16 arg1, u16 arg2) 276 { 277 write_comp_data(KCOV_CMP_SIZE(1) | KCOV_CMP_CONST, arg1, arg2, 278 _RET_IP_); 279 } 280 EXPORT_SYMBOL(__sanitizer_cov_trace_const_cmp2); 281 282 void notrace __sanitizer_cov_trace_const_cmp4(u32 arg1, u32 arg2) 283 { 284 write_comp_data(KCOV_CMP_SIZE(2) | KCOV_CMP_CONST, arg1, arg2, 285 _RET_IP_); 286 } 287 EXPORT_SYMBOL(__sanitizer_cov_trace_const_cmp4); 288 289 void notrace __sanitizer_cov_trace_const_cmp8(u64 arg1, u64 arg2) 290 { 291 write_comp_data(KCOV_CMP_SIZE(3) | KCOV_CMP_CONST, arg1, arg2, 292 _RET_IP_); 293 } 294 EXPORT_SYMBOL(__sanitizer_cov_trace_const_cmp8); 295 296 void notrace __sanitizer_cov_trace_switch(u64 val, u64 *cases) 297 { 298 u64 i; 299 u64 count = cases[0]; 300 u64 size = cases[1]; 301 u64 type = KCOV_CMP_CONST; 302 303 switch (size) { 304 case 8: 305 type |= KCOV_CMP_SIZE(0); 306 break; 307 case 16: 308 type |= KCOV_CMP_SIZE(1); 309 break; 310 case 32: 311 type |= KCOV_CMP_SIZE(2); 312 break; 313 case 64: 314 type |= KCOV_CMP_SIZE(3); 315 break; 316 default: 317 return; 318 } 319 for (i = 0; i < count; i++) 320 write_comp_data(type, cases[i + 2], val, _RET_IP_); 321 } 322 EXPORT_SYMBOL(__sanitizer_cov_trace_switch); 323 #endif /* ifdef CONFIG_KCOV_ENABLE_COMPARISONS */ 324 325 static void kcov_start(struct task_struct *t, struct kcov *kcov, 326 unsigned int size, void *area, enum kcov_mode mode, 327 int sequence) 328 { 329 kcov_debug("t = %px, size = %u, area = %px\n", t, size, area); 330 t->kcov = kcov; 331 /* Cache in task struct for performance. */ 332 t->kcov_size = size; 333 t->kcov_area = area; 334 t->kcov_sequence = sequence; 335 /* See comment in check_kcov_mode(). */ 336 barrier(); 337 WRITE_ONCE(t->kcov_mode, mode); 338 } 339 340 static void kcov_stop(struct task_struct *t) 341 { 342 WRITE_ONCE(t->kcov_mode, KCOV_MODE_DISABLED); 343 barrier(); 344 t->kcov = NULL; 345 t->kcov_size = 0; 346 t->kcov_area = NULL; 347 } 348 349 static void kcov_task_reset(struct task_struct *t) 350 { 351 kcov_stop(t); 352 t->kcov_sequence = 0; 353 t->kcov_handle = 0; 354 } 355 356 void kcov_task_init(struct task_struct *t) 357 { 358 kcov_task_reset(t); 359 t->kcov_handle = current->kcov_handle; 360 } 361 362 static void kcov_reset(struct kcov *kcov) 363 { 364 kcov->t = NULL; 365 kcov->mode = KCOV_MODE_INIT; 366 kcov->remote = false; 367 kcov->remote_size = 0; 368 kcov->sequence++; 369 } 370 371 static void kcov_remote_reset(struct kcov *kcov) 372 { 373 int bkt; 374 struct kcov_remote *remote; 375 struct hlist_node *tmp; 376 unsigned long flags; 377 378 spin_lock_irqsave(&kcov_remote_lock, flags); 379 hash_for_each_safe(kcov_remote_map, bkt, tmp, remote, hnode) { 380 if (remote->kcov != kcov) 381 continue; 382 hash_del(&remote->hnode); 383 kfree(remote); 384 } 385 /* Do reset before unlock to prevent races with kcov_remote_start(). */ 386 kcov_reset(kcov); 387 spin_unlock_irqrestore(&kcov_remote_lock, flags); 388 } 389 390 static void kcov_disable(struct task_struct *t, struct kcov *kcov) 391 { 392 kcov_task_reset(t); 393 if (kcov->remote) 394 kcov_remote_reset(kcov); 395 else 396 kcov_reset(kcov); 397 } 398 399 static void kcov_get(struct kcov *kcov) 400 { 401 refcount_inc(&kcov->refcount); 402 } 403 404 static void kcov_put(struct kcov *kcov) 405 { 406 if (refcount_dec_and_test(&kcov->refcount)) { 407 kcov_remote_reset(kcov); 408 vfree(kcov->area); 409 kfree(kcov); 410 } 411 } 412 413 void kcov_task_exit(struct task_struct *t) 414 { 415 struct kcov *kcov; 416 unsigned long flags; 417 418 kcov = t->kcov; 419 if (kcov == NULL) 420 return; 421 422 spin_lock_irqsave(&kcov->lock, flags); 423 kcov_debug("t = %px, kcov->t = %px\n", t, kcov->t); 424 /* 425 * For KCOV_ENABLE devices we want to make sure that t->kcov->t == t, 426 * which comes down to: 427 * WARN_ON(!kcov->remote && kcov->t != t); 428 * 429 * For KCOV_REMOTE_ENABLE devices, the exiting task is either: 430 * 2. A remote task between kcov_remote_start() and kcov_remote_stop(). 431 * In this case we should print a warning right away, since a task 432 * shouldn't be exiting when it's in a kcov coverage collection 433 * section. Here t points to the task that is collecting remote 434 * coverage, and t->kcov->t points to the thread that created the 435 * kcov device. Which means that to detect this case we need to 436 * check that t != t->kcov->t, and this gives us the following: 437 * WARN_ON(kcov->remote && kcov->t != t); 438 * 439 * 2. The task that created kcov exiting without calling KCOV_DISABLE, 440 * and then again we can make sure that t->kcov->t == t: 441 * WARN_ON(kcov->remote && kcov->t != t); 442 * 443 * By combining all three checks into one we get: 444 */ 445 if (WARN_ON(kcov->t != t)) { 446 spin_unlock_irqrestore(&kcov->lock, flags); 447 return; 448 } 449 /* Just to not leave dangling references behind. */ 450 kcov_disable(t, kcov); 451 spin_unlock_irqrestore(&kcov->lock, flags); 452 kcov_put(kcov); 453 } 454 455 static int kcov_mmap(struct file *filep, struct vm_area_struct *vma) 456 { 457 int res = 0; 458 void *area; 459 struct kcov *kcov = vma->vm_file->private_data; 460 unsigned long size, off; 461 struct page *page; 462 unsigned long flags; 463 464 area = vmalloc_user(vma->vm_end - vma->vm_start); 465 if (!area) 466 return -ENOMEM; 467 468 spin_lock_irqsave(&kcov->lock, flags); 469 size = kcov->size * sizeof(unsigned long); 470 if (kcov->mode != KCOV_MODE_INIT || vma->vm_pgoff != 0 || 471 vma->vm_end - vma->vm_start != size) { 472 res = -EINVAL; 473 goto exit; 474 } 475 if (!kcov->area) { 476 kcov->area = area; 477 vma->vm_flags |= VM_DONTEXPAND; 478 spin_unlock_irqrestore(&kcov->lock, flags); 479 for (off = 0; off < size; off += PAGE_SIZE) { 480 page = vmalloc_to_page(kcov->area + off); 481 if (vm_insert_page(vma, vma->vm_start + off, page)) 482 WARN_ONCE(1, "vm_insert_page() failed"); 483 } 484 return 0; 485 } 486 exit: 487 spin_unlock_irqrestore(&kcov->lock, flags); 488 vfree(area); 489 return res; 490 } 491 492 static int kcov_open(struct inode *inode, struct file *filep) 493 { 494 struct kcov *kcov; 495 496 kcov = kzalloc(sizeof(*kcov), GFP_KERNEL); 497 if (!kcov) 498 return -ENOMEM; 499 kcov->mode = KCOV_MODE_DISABLED; 500 kcov->sequence = 1; 501 refcount_set(&kcov->refcount, 1); 502 spin_lock_init(&kcov->lock); 503 filep->private_data = kcov; 504 return nonseekable_open(inode, filep); 505 } 506 507 static int kcov_close(struct inode *inode, struct file *filep) 508 { 509 kcov_put(filep->private_data); 510 return 0; 511 } 512 513 static int kcov_get_mode(unsigned long arg) 514 { 515 if (arg == KCOV_TRACE_PC) 516 return KCOV_MODE_TRACE_PC; 517 else if (arg == KCOV_TRACE_CMP) 518 #ifdef CONFIG_KCOV_ENABLE_COMPARISONS 519 return KCOV_MODE_TRACE_CMP; 520 #else 521 return -ENOTSUPP; 522 #endif 523 else 524 return -EINVAL; 525 } 526 527 /* 528 * Fault in a lazily-faulted vmalloc area before it can be used by 529 * __santizer_cov_trace_pc(), to avoid recursion issues if any code on the 530 * vmalloc fault handling path is instrumented. 531 */ 532 static void kcov_fault_in_area(struct kcov *kcov) 533 { 534 unsigned long stride = PAGE_SIZE / sizeof(unsigned long); 535 unsigned long *area = kcov->area; 536 unsigned long offset; 537 538 for (offset = 0; offset < kcov->size; offset += stride) 539 READ_ONCE(area[offset]); 540 } 541 542 static inline bool kcov_check_handle(u64 handle, bool common_valid, 543 bool uncommon_valid, bool zero_valid) 544 { 545 if (handle & ~(KCOV_SUBSYSTEM_MASK | KCOV_INSTANCE_MASK)) 546 return false; 547 switch (handle & KCOV_SUBSYSTEM_MASK) { 548 case KCOV_SUBSYSTEM_COMMON: 549 return (handle & KCOV_INSTANCE_MASK) ? 550 common_valid : zero_valid; 551 case KCOV_SUBSYSTEM_USB: 552 return uncommon_valid; 553 default: 554 return false; 555 } 556 return false; 557 } 558 559 static int kcov_ioctl_locked(struct kcov *kcov, unsigned int cmd, 560 unsigned long arg) 561 { 562 struct task_struct *t; 563 unsigned long size, unused; 564 int mode, i; 565 struct kcov_remote_arg *remote_arg; 566 struct kcov_remote *remote; 567 unsigned long flags; 568 569 switch (cmd) { 570 case KCOV_INIT_TRACE: 571 /* 572 * Enable kcov in trace mode and setup buffer size. 573 * Must happen before anything else. 574 */ 575 if (kcov->mode != KCOV_MODE_DISABLED) 576 return -EBUSY; 577 /* 578 * Size must be at least 2 to hold current position and one PC. 579 * Later we allocate size * sizeof(unsigned long) memory, 580 * that must not overflow. 581 */ 582 size = arg; 583 if (size < 2 || size > INT_MAX / sizeof(unsigned long)) 584 return -EINVAL; 585 kcov->size = size; 586 kcov->mode = KCOV_MODE_INIT; 587 return 0; 588 case KCOV_ENABLE: 589 /* 590 * Enable coverage for the current task. 591 * At this point user must have been enabled trace mode, 592 * and mmapped the file. Coverage collection is disabled only 593 * at task exit or voluntary by KCOV_DISABLE. After that it can 594 * be enabled for another task. 595 */ 596 if (kcov->mode != KCOV_MODE_INIT || !kcov->area) 597 return -EINVAL; 598 t = current; 599 if (kcov->t != NULL || t->kcov != NULL) 600 return -EBUSY; 601 mode = kcov_get_mode(arg); 602 if (mode < 0) 603 return mode; 604 kcov_fault_in_area(kcov); 605 kcov->mode = mode; 606 kcov_start(t, kcov, kcov->size, kcov->area, kcov->mode, 607 kcov->sequence); 608 kcov->t = t; 609 /* Put either in kcov_task_exit() or in KCOV_DISABLE. */ 610 kcov_get(kcov); 611 return 0; 612 case KCOV_DISABLE: 613 /* Disable coverage for the current task. */ 614 unused = arg; 615 if (unused != 0 || current->kcov != kcov) 616 return -EINVAL; 617 t = current; 618 if (WARN_ON(kcov->t != t)) 619 return -EINVAL; 620 kcov_disable(t, kcov); 621 kcov_put(kcov); 622 return 0; 623 case KCOV_REMOTE_ENABLE: 624 if (kcov->mode != KCOV_MODE_INIT || !kcov->area) 625 return -EINVAL; 626 t = current; 627 if (kcov->t != NULL || t->kcov != NULL) 628 return -EBUSY; 629 remote_arg = (struct kcov_remote_arg *)arg; 630 mode = kcov_get_mode(remote_arg->trace_mode); 631 if (mode < 0) 632 return mode; 633 if (remote_arg->area_size > LONG_MAX / sizeof(unsigned long)) 634 return -EINVAL; 635 kcov->mode = mode; 636 t->kcov = kcov; 637 kcov->t = t; 638 kcov->remote = true; 639 kcov->remote_size = remote_arg->area_size; 640 spin_lock_irqsave(&kcov_remote_lock, flags); 641 for (i = 0; i < remote_arg->num_handles; i++) { 642 if (!kcov_check_handle(remote_arg->handles[i], 643 false, true, false)) { 644 spin_unlock_irqrestore(&kcov_remote_lock, 645 flags); 646 kcov_disable(t, kcov); 647 return -EINVAL; 648 } 649 remote = kcov_remote_add(kcov, remote_arg->handles[i]); 650 if (IS_ERR(remote)) { 651 spin_unlock_irqrestore(&kcov_remote_lock, 652 flags); 653 kcov_disable(t, kcov); 654 return PTR_ERR(remote); 655 } 656 } 657 if (remote_arg->common_handle) { 658 if (!kcov_check_handle(remote_arg->common_handle, 659 true, false, false)) { 660 spin_unlock_irqrestore(&kcov_remote_lock, 661 flags); 662 kcov_disable(t, kcov); 663 return -EINVAL; 664 } 665 remote = kcov_remote_add(kcov, 666 remote_arg->common_handle); 667 if (IS_ERR(remote)) { 668 spin_unlock_irqrestore(&kcov_remote_lock, 669 flags); 670 kcov_disable(t, kcov); 671 return PTR_ERR(remote); 672 } 673 t->kcov_handle = remote_arg->common_handle; 674 } 675 spin_unlock_irqrestore(&kcov_remote_lock, flags); 676 /* Put either in kcov_task_exit() or in KCOV_DISABLE. */ 677 kcov_get(kcov); 678 return 0; 679 default: 680 return -ENOTTY; 681 } 682 } 683 684 static long kcov_ioctl(struct file *filep, unsigned int cmd, unsigned long arg) 685 { 686 struct kcov *kcov; 687 int res; 688 struct kcov_remote_arg *remote_arg = NULL; 689 unsigned int remote_num_handles; 690 unsigned long remote_arg_size; 691 unsigned long flags; 692 693 if (cmd == KCOV_REMOTE_ENABLE) { 694 if (get_user(remote_num_handles, (unsigned __user *)(arg + 695 offsetof(struct kcov_remote_arg, num_handles)))) 696 return -EFAULT; 697 if (remote_num_handles > KCOV_REMOTE_MAX_HANDLES) 698 return -EINVAL; 699 remote_arg_size = struct_size(remote_arg, handles, 700 remote_num_handles); 701 remote_arg = memdup_user((void __user *)arg, remote_arg_size); 702 if (IS_ERR(remote_arg)) 703 return PTR_ERR(remote_arg); 704 if (remote_arg->num_handles != remote_num_handles) { 705 kfree(remote_arg); 706 return -EINVAL; 707 } 708 arg = (unsigned long)remote_arg; 709 } 710 711 kcov = filep->private_data; 712 spin_lock_irqsave(&kcov->lock, flags); 713 res = kcov_ioctl_locked(kcov, cmd, arg); 714 spin_unlock_irqrestore(&kcov->lock, flags); 715 716 kfree(remote_arg); 717 718 return res; 719 } 720 721 static const struct file_operations kcov_fops = { 722 .open = kcov_open, 723 .unlocked_ioctl = kcov_ioctl, 724 .compat_ioctl = kcov_ioctl, 725 .mmap = kcov_mmap, 726 .release = kcov_close, 727 }; 728 729 /* 730 * kcov_remote_start() and kcov_remote_stop() can be used to annotate a section 731 * of code in a kernel background thread or in a softirq to allow kcov to be 732 * used to collect coverage from that part of code. 733 * 734 * The handle argument of kcov_remote_start() identifies a code section that is 735 * used for coverage collection. A userspace process passes this handle to 736 * KCOV_REMOTE_ENABLE ioctl to make the used kcov device start collecting 737 * coverage for the code section identified by this handle. 738 * 739 * The usage of these annotations in the kernel code is different depending on 740 * the type of the kernel thread whose code is being annotated. 741 * 742 * For global kernel threads that are spawned in a limited number of instances 743 * (e.g. one USB hub_event() worker thread is spawned per USB HCD) and for 744 * softirqs, each instance must be assigned a unique 4-byte instance id. The 745 * instance id is then combined with a 1-byte subsystem id to get a handle via 746 * kcov_remote_handle(subsystem_id, instance_id). 747 * 748 * For local kernel threads that are spawned from system calls handler when a 749 * user interacts with some kernel interface (e.g. vhost workers), a handle is 750 * passed from a userspace process as the common_handle field of the 751 * kcov_remote_arg struct (note, that the user must generate a handle by using 752 * kcov_remote_handle() with KCOV_SUBSYSTEM_COMMON as the subsystem id and an 753 * arbitrary 4-byte non-zero number as the instance id). This common handle 754 * then gets saved into the task_struct of the process that issued the 755 * KCOV_REMOTE_ENABLE ioctl. When this process issues system calls that spawn 756 * kernel threads, the common handle must be retrieved via kcov_common_handle() 757 * and passed to the spawned threads via custom annotations. Those kernel 758 * threads must in turn be annotated with kcov_remote_start(common_handle) and 759 * kcov_remote_stop(). All of the threads that are spawned by the same process 760 * obtain the same handle, hence the name "common". 761 * 762 * See Documentation/dev-tools/kcov.rst for more details. 763 * 764 * Internally, kcov_remote_start() looks up the kcov device associated with the 765 * provided handle, allocates an area for coverage collection, and saves the 766 * pointers to kcov and area into the current task_struct to allow coverage to 767 * be collected via __sanitizer_cov_trace_pc() 768 * In turns kcov_remote_stop() clears those pointers from task_struct to stop 769 * collecting coverage and copies all collected coverage into the kcov area. 770 */ 771 772 static inline bool kcov_mode_enabled(unsigned int mode) 773 { 774 return (mode & ~KCOV_IN_CTXSW) != KCOV_MODE_DISABLED; 775 } 776 777 void kcov_remote_softirq_start(struct task_struct *t) 778 { 779 struct kcov_percpu_data *data = this_cpu_ptr(&kcov_percpu_data); 780 unsigned int mode; 781 782 mode = READ_ONCE(t->kcov_mode); 783 barrier(); 784 if (kcov_mode_enabled(mode)) { 785 data->saved_mode = mode; 786 data->saved_size = t->kcov_size; 787 data->saved_area = t->kcov_area; 788 data->saved_sequence = t->kcov_sequence; 789 data->saved_kcov = t->kcov; 790 kcov_stop(t); 791 } 792 } 793 794 void kcov_remote_softirq_stop(struct task_struct *t) 795 { 796 struct kcov_percpu_data *data = this_cpu_ptr(&kcov_percpu_data); 797 798 if (data->saved_kcov) { 799 kcov_start(t, data->saved_kcov, data->saved_size, 800 data->saved_area, data->saved_mode, 801 data->saved_sequence); 802 data->saved_mode = 0; 803 data->saved_size = 0; 804 data->saved_area = NULL; 805 data->saved_sequence = 0; 806 data->saved_kcov = NULL; 807 } 808 } 809 810 void kcov_remote_start(u64 handle) 811 { 812 struct task_struct *t = current; 813 struct kcov_remote *remote; 814 struct kcov *kcov; 815 unsigned int mode; 816 void *area; 817 unsigned int size; 818 int sequence; 819 unsigned long flags; 820 821 if (WARN_ON(!kcov_check_handle(handle, true, true, true))) 822 return; 823 if (!in_task() && !in_serving_softirq()) 824 return; 825 826 local_irq_save(flags); 827 828 /* 829 * Check that kcov_remote_start() is not called twice in background 830 * threads nor called by user tasks (with enabled kcov). 831 */ 832 mode = READ_ONCE(t->kcov_mode); 833 if (WARN_ON(in_task() && kcov_mode_enabled(mode))) { 834 local_irq_restore(flags); 835 return; 836 } 837 /* 838 * Check that kcov_remote_start() is not called twice in softirqs. 839 * Note, that kcov_remote_start() can be called from a softirq that 840 * happened while collecting coverage from a background thread. 841 */ 842 if (WARN_ON(in_serving_softirq() && t->kcov_softirq)) { 843 local_irq_restore(flags); 844 return; 845 } 846 847 spin_lock(&kcov_remote_lock); 848 remote = kcov_remote_find(handle); 849 if (!remote) { 850 spin_unlock_irqrestore(&kcov_remote_lock, flags); 851 return; 852 } 853 kcov_debug("handle = %llx, context: %s\n", handle, 854 in_task() ? "task" : "softirq"); 855 kcov = remote->kcov; 856 /* Put in kcov_remote_stop(). */ 857 kcov_get(kcov); 858 /* 859 * Read kcov fields before unlock to prevent races with 860 * KCOV_DISABLE / kcov_remote_reset(). 861 */ 862 mode = kcov->mode; 863 sequence = kcov->sequence; 864 if (in_task()) { 865 size = kcov->remote_size; 866 area = kcov_remote_area_get(size); 867 } else { 868 size = CONFIG_KCOV_IRQ_AREA_SIZE; 869 area = this_cpu_ptr(&kcov_percpu_data)->irq_area; 870 } 871 spin_unlock_irqrestore(&kcov_remote_lock, flags); 872 873 /* Can only happen when in_task(). */ 874 if (!area) { 875 area = vmalloc(size * sizeof(unsigned long)); 876 if (!area) { 877 kcov_put(kcov); 878 return; 879 } 880 } 881 882 local_irq_save(flags); 883 884 /* Reset coverage size. */ 885 *(u64 *)area = 0; 886 887 if (in_serving_softirq()) { 888 kcov_remote_softirq_start(t); 889 t->kcov_softirq = 1; 890 } 891 kcov_start(t, kcov, size, area, mode, sequence); 892 893 local_irq_restore(flags); 894 895 } 896 EXPORT_SYMBOL(kcov_remote_start); 897 898 static void kcov_move_area(enum kcov_mode mode, void *dst_area, 899 unsigned int dst_area_size, void *src_area) 900 { 901 u64 word_size = sizeof(unsigned long); 902 u64 count_size, entry_size_log; 903 u64 dst_len, src_len; 904 void *dst_entries, *src_entries; 905 u64 dst_occupied, dst_free, bytes_to_move, entries_moved; 906 907 kcov_debug("%px %u <= %px %lu\n", 908 dst_area, dst_area_size, src_area, *(unsigned long *)src_area); 909 910 switch (mode) { 911 case KCOV_MODE_TRACE_PC: 912 dst_len = READ_ONCE(*(unsigned long *)dst_area); 913 src_len = *(unsigned long *)src_area; 914 count_size = sizeof(unsigned long); 915 entry_size_log = __ilog2_u64(sizeof(unsigned long)); 916 break; 917 case KCOV_MODE_TRACE_CMP: 918 dst_len = READ_ONCE(*(u64 *)dst_area); 919 src_len = *(u64 *)src_area; 920 count_size = sizeof(u64); 921 BUILD_BUG_ON(!is_power_of_2(KCOV_WORDS_PER_CMP)); 922 entry_size_log = __ilog2_u64(sizeof(u64) * KCOV_WORDS_PER_CMP); 923 break; 924 default: 925 WARN_ON(1); 926 return; 927 } 928 929 /* As arm can't divide u64 integers use log of entry size. */ 930 if (dst_len > ((dst_area_size * word_size - count_size) >> 931 entry_size_log)) 932 return; 933 dst_occupied = count_size + (dst_len << entry_size_log); 934 dst_free = dst_area_size * word_size - dst_occupied; 935 bytes_to_move = min(dst_free, src_len << entry_size_log); 936 dst_entries = dst_area + dst_occupied; 937 src_entries = src_area + count_size; 938 memcpy(dst_entries, src_entries, bytes_to_move); 939 entries_moved = bytes_to_move >> entry_size_log; 940 941 switch (mode) { 942 case KCOV_MODE_TRACE_PC: 943 WRITE_ONCE(*(unsigned long *)dst_area, dst_len + entries_moved); 944 break; 945 case KCOV_MODE_TRACE_CMP: 946 WRITE_ONCE(*(u64 *)dst_area, dst_len + entries_moved); 947 break; 948 default: 949 break; 950 } 951 } 952 953 /* See the comment before kcov_remote_start() for usage details. */ 954 void kcov_remote_stop(void) 955 { 956 struct task_struct *t = current; 957 struct kcov *kcov; 958 unsigned int mode; 959 void *area; 960 unsigned int size; 961 int sequence; 962 unsigned long flags; 963 964 if (!in_task() && !in_serving_softirq()) 965 return; 966 967 local_irq_save(flags); 968 969 mode = READ_ONCE(t->kcov_mode); 970 barrier(); 971 if (!kcov_mode_enabled(mode)) { 972 local_irq_restore(flags); 973 return; 974 } 975 kcov = t->kcov; 976 area = t->kcov_area; 977 size = t->kcov_size; 978 sequence = t->kcov_sequence; 979 980 if (WARN_ON(!in_serving_softirq() && t->kcov_softirq)) { 981 local_irq_restore(flags); 982 return; 983 } 984 985 kcov_stop(t); 986 if (in_serving_softirq()) { 987 t->kcov_softirq = 0; 988 kcov_remote_softirq_stop(t); 989 } 990 991 spin_lock(&kcov->lock); 992 /* 993 * KCOV_DISABLE could have been called between kcov_remote_start() 994 * and kcov_remote_stop(), hence the sequence check. 995 */ 996 if (sequence == kcov->sequence && kcov->remote) 997 kcov_move_area(kcov->mode, kcov->area, kcov->size, area); 998 spin_unlock(&kcov->lock); 999 1000 if (in_task()) { 1001 spin_lock(&kcov_remote_lock); 1002 kcov_remote_area_put(area, size); 1003 spin_unlock(&kcov_remote_lock); 1004 } 1005 1006 local_irq_restore(flags); 1007 1008 /* Get in kcov_remote_start(). */ 1009 kcov_put(kcov); 1010 } 1011 EXPORT_SYMBOL(kcov_remote_stop); 1012 1013 /* See the comment before kcov_remote_start() for usage details. */ 1014 u64 kcov_common_handle(void) 1015 { 1016 return current->kcov_handle; 1017 } 1018 EXPORT_SYMBOL(kcov_common_handle); 1019 1020 static int __init kcov_init(void) 1021 { 1022 int cpu; 1023 1024 for_each_possible_cpu(cpu) { 1025 void *area = vmalloc(CONFIG_KCOV_IRQ_AREA_SIZE * 1026 sizeof(unsigned long)); 1027 if (!area) 1028 return -ENOMEM; 1029 per_cpu_ptr(&kcov_percpu_data, cpu)->irq_area = area; 1030 } 1031 1032 /* 1033 * The kcov debugfs file won't ever get removed and thus, 1034 * there is no need to protect it against removal races. The 1035 * use of debugfs_create_file_unsafe() is actually safe here. 1036 */ 1037 debugfs_create_file_unsafe("kcov", 0600, NULL, NULL, &kcov_fops); 1038 1039 return 0; 1040 } 1041 1042 device_initcall(kcov_init); 1043