1 // SPDX-License-Identifier: GPL-2.0 2 #define pr_fmt(fmt) "kcov: " fmt 3 4 #define DISABLE_BRANCH_PROFILING 5 #include <linux/atomic.h> 6 #include <linux/compiler.h> 7 #include <linux/errno.h> 8 #include <linux/export.h> 9 #include <linux/types.h> 10 #include <linux/file.h> 11 #include <linux/fs.h> 12 #include <linux/hashtable.h> 13 #include <linux/init.h> 14 #include <linux/kmsan-checks.h> 15 #include <linux/mm.h> 16 #include <linux/preempt.h> 17 #include <linux/printk.h> 18 #include <linux/sched.h> 19 #include <linux/slab.h> 20 #include <linux/spinlock.h> 21 #include <linux/vmalloc.h> 22 #include <linux/debugfs.h> 23 #include <linux/uaccess.h> 24 #include <linux/kcov.h> 25 #include <linux/refcount.h> 26 #include <linux/log2.h> 27 #include <asm/setup.h> 28 29 #define kcov_debug(fmt, ...) pr_debug("%s: " fmt, __func__, ##__VA_ARGS__) 30 31 /* Number of 64-bit words written per one comparison: */ 32 #define KCOV_WORDS_PER_CMP 4 33 34 /* 35 * kcov descriptor (one per opened debugfs file). 36 * State transitions of the descriptor: 37 * - initial state after open() 38 * - then there must be a single ioctl(KCOV_INIT_TRACE) call 39 * - then, mmap() call (several calls are allowed but not useful) 40 * - then, ioctl(KCOV_ENABLE, arg), where arg is 41 * KCOV_TRACE_PC - to trace only the PCs 42 * or 43 * KCOV_TRACE_CMP - to trace only the comparison operands 44 * - then, ioctl(KCOV_DISABLE) to disable the task. 45 * Enabling/disabling ioctls can be repeated (only one task a time allowed). 46 */ 47 struct kcov { 48 /* 49 * Reference counter. We keep one for: 50 * - opened file descriptor 51 * - task with enabled coverage (we can't unwire it from another task) 52 * - each code section for remote coverage collection 53 */ 54 refcount_t refcount; 55 /* The lock protects mode, size, area and t. */ 56 spinlock_t lock; 57 enum kcov_mode mode; 58 /* Size of arena (in long's). */ 59 unsigned int size; 60 /* Coverage buffer shared with user space. */ 61 void *area; 62 /* Task for which we collect coverage, or NULL. */ 63 struct task_struct *t; 64 /* Collecting coverage from remote (background) threads. */ 65 bool remote; 66 /* Size of remote area (in long's). */ 67 unsigned int remote_size; 68 /* 69 * Sequence is incremented each time kcov is reenabled, used by 70 * kcov_remote_stop(), see the comment there. 71 */ 72 int sequence; 73 }; 74 75 struct kcov_remote_area { 76 struct list_head list; 77 unsigned int size; 78 }; 79 80 struct kcov_remote { 81 u64 handle; 82 struct kcov *kcov; 83 struct hlist_node hnode; 84 }; 85 86 static DEFINE_SPINLOCK(kcov_remote_lock); 87 static DEFINE_HASHTABLE(kcov_remote_map, 4); 88 static struct list_head kcov_remote_areas = LIST_HEAD_INIT(kcov_remote_areas); 89 90 struct kcov_percpu_data { 91 void *irq_area; 92 local_lock_t lock; 93 94 unsigned int saved_mode; 95 unsigned int saved_size; 96 void *saved_area; 97 struct kcov *saved_kcov; 98 int saved_sequence; 99 }; 100 101 static DEFINE_PER_CPU(struct kcov_percpu_data, kcov_percpu_data) = { 102 .lock = INIT_LOCAL_LOCK(lock), 103 }; 104 105 /* Must be called with kcov_remote_lock locked. */ 106 static struct kcov_remote *kcov_remote_find(u64 handle) 107 { 108 struct kcov_remote *remote; 109 110 hash_for_each_possible(kcov_remote_map, remote, hnode, handle) { 111 if (remote->handle == handle) 112 return remote; 113 } 114 return NULL; 115 } 116 117 /* Must be called with kcov_remote_lock locked. */ 118 static struct kcov_remote *kcov_remote_add(struct kcov *kcov, u64 handle) 119 { 120 struct kcov_remote *remote; 121 122 if (kcov_remote_find(handle)) 123 return ERR_PTR(-EEXIST); 124 remote = kmalloc(sizeof(*remote), GFP_ATOMIC); 125 if (!remote) 126 return ERR_PTR(-ENOMEM); 127 remote->handle = handle; 128 remote->kcov = kcov; 129 hash_add(kcov_remote_map, &remote->hnode, handle); 130 return remote; 131 } 132 133 /* Must be called with kcov_remote_lock locked. */ 134 static struct kcov_remote_area *kcov_remote_area_get(unsigned int size) 135 { 136 struct kcov_remote_area *area; 137 struct list_head *pos; 138 139 list_for_each(pos, &kcov_remote_areas) { 140 area = list_entry(pos, struct kcov_remote_area, list); 141 if (area->size == size) { 142 list_del(&area->list); 143 return area; 144 } 145 } 146 return NULL; 147 } 148 149 /* Must be called with kcov_remote_lock locked. */ 150 static void kcov_remote_area_put(struct kcov_remote_area *area, 151 unsigned int size) 152 { 153 INIT_LIST_HEAD(&area->list); 154 area->size = size; 155 list_add(&area->list, &kcov_remote_areas); 156 /* 157 * KMSAN doesn't instrument this file, so it may not know area->list 158 * is initialized. Unpoison it explicitly to avoid reports in 159 * kcov_remote_area_get(). 160 */ 161 kmsan_unpoison_memory(&area->list, sizeof(area->list)); 162 } 163 164 static notrace bool check_kcov_mode(enum kcov_mode needed_mode, struct task_struct *t) 165 { 166 unsigned int mode; 167 168 /* 169 * We are interested in code coverage as a function of a syscall inputs, 170 * so we ignore code executed in interrupts, unless we are in a remote 171 * coverage collection section in a softirq. 172 */ 173 if (!in_task() && !(in_serving_softirq() && t->kcov_softirq)) 174 return false; 175 mode = READ_ONCE(t->kcov_mode); 176 /* 177 * There is some code that runs in interrupts but for which 178 * in_interrupt() returns false (e.g. preempt_schedule_irq()). 179 * READ_ONCE()/barrier() effectively provides load-acquire wrt 180 * interrupts, there are paired barrier()/WRITE_ONCE() in 181 * kcov_start(). 182 */ 183 barrier(); 184 return mode == needed_mode; 185 } 186 187 static notrace unsigned long canonicalize_ip(unsigned long ip) 188 { 189 #ifdef CONFIG_RANDOMIZE_BASE 190 ip -= kaslr_offset(); 191 #endif 192 return ip; 193 } 194 195 /* 196 * Entry point from instrumented code. 197 * This is called once per basic-block/edge. 198 */ 199 void notrace __sanitizer_cov_trace_pc(void) 200 { 201 struct task_struct *t; 202 unsigned long *area; 203 unsigned long ip = canonicalize_ip(_RET_IP_); 204 unsigned long pos; 205 206 t = current; 207 if (!check_kcov_mode(KCOV_MODE_TRACE_PC, t)) 208 return; 209 210 area = t->kcov_area; 211 /* The first 64-bit word is the number of subsequent PCs. */ 212 pos = READ_ONCE(area[0]) + 1; 213 if (likely(pos < t->kcov_size)) { 214 /* Previously we write pc before updating pos. However, some 215 * early interrupt code could bypass check_kcov_mode() check 216 * and invoke __sanitizer_cov_trace_pc(). If such interrupt is 217 * raised between writing pc and updating pos, the pc could be 218 * overitten by the recursive __sanitizer_cov_trace_pc(). 219 * Update pos before writing pc to avoid such interleaving. 220 */ 221 WRITE_ONCE(area[0], pos); 222 barrier(); 223 area[pos] = ip; 224 } 225 } 226 EXPORT_SYMBOL(__sanitizer_cov_trace_pc); 227 228 #ifdef CONFIG_KCOV_ENABLE_COMPARISONS 229 static void notrace write_comp_data(u64 type, u64 arg1, u64 arg2, u64 ip) 230 { 231 struct task_struct *t; 232 u64 *area; 233 u64 count, start_index, end_pos, max_pos; 234 235 t = current; 236 if (!check_kcov_mode(KCOV_MODE_TRACE_CMP, t)) 237 return; 238 239 ip = canonicalize_ip(ip); 240 241 /* 242 * We write all comparison arguments and types as u64. 243 * The buffer was allocated for t->kcov_size unsigned longs. 244 */ 245 area = (u64 *)t->kcov_area; 246 max_pos = t->kcov_size * sizeof(unsigned long); 247 248 count = READ_ONCE(area[0]); 249 250 /* Every record is KCOV_WORDS_PER_CMP 64-bit words. */ 251 start_index = 1 + count * KCOV_WORDS_PER_CMP; 252 end_pos = (start_index + KCOV_WORDS_PER_CMP) * sizeof(u64); 253 if (likely(end_pos <= max_pos)) { 254 /* See comment in __sanitizer_cov_trace_pc(). */ 255 WRITE_ONCE(area[0], count + 1); 256 barrier(); 257 area[start_index] = type; 258 area[start_index + 1] = arg1; 259 area[start_index + 2] = arg2; 260 area[start_index + 3] = ip; 261 } 262 } 263 264 void notrace __sanitizer_cov_trace_cmp1(u8 arg1, u8 arg2) 265 { 266 write_comp_data(KCOV_CMP_SIZE(0), arg1, arg2, _RET_IP_); 267 } 268 EXPORT_SYMBOL(__sanitizer_cov_trace_cmp1); 269 270 void notrace __sanitizer_cov_trace_cmp2(u16 arg1, u16 arg2) 271 { 272 write_comp_data(KCOV_CMP_SIZE(1), arg1, arg2, _RET_IP_); 273 } 274 EXPORT_SYMBOL(__sanitizer_cov_trace_cmp2); 275 276 void notrace __sanitizer_cov_trace_cmp4(u32 arg1, u32 arg2) 277 { 278 write_comp_data(KCOV_CMP_SIZE(2), arg1, arg2, _RET_IP_); 279 } 280 EXPORT_SYMBOL(__sanitizer_cov_trace_cmp4); 281 282 void notrace __sanitizer_cov_trace_cmp8(u64 arg1, u64 arg2) 283 { 284 write_comp_data(KCOV_CMP_SIZE(3), arg1, arg2, _RET_IP_); 285 } 286 EXPORT_SYMBOL(__sanitizer_cov_trace_cmp8); 287 288 void notrace __sanitizer_cov_trace_const_cmp1(u8 arg1, u8 arg2) 289 { 290 write_comp_data(KCOV_CMP_SIZE(0) | KCOV_CMP_CONST, arg1, arg2, 291 _RET_IP_); 292 } 293 EXPORT_SYMBOL(__sanitizer_cov_trace_const_cmp1); 294 295 void notrace __sanitizer_cov_trace_const_cmp2(u16 arg1, u16 arg2) 296 { 297 write_comp_data(KCOV_CMP_SIZE(1) | KCOV_CMP_CONST, arg1, arg2, 298 _RET_IP_); 299 } 300 EXPORT_SYMBOL(__sanitizer_cov_trace_const_cmp2); 301 302 void notrace __sanitizer_cov_trace_const_cmp4(u32 arg1, u32 arg2) 303 { 304 write_comp_data(KCOV_CMP_SIZE(2) | KCOV_CMP_CONST, arg1, arg2, 305 _RET_IP_); 306 } 307 EXPORT_SYMBOL(__sanitizer_cov_trace_const_cmp4); 308 309 void notrace __sanitizer_cov_trace_const_cmp8(u64 arg1, u64 arg2) 310 { 311 write_comp_data(KCOV_CMP_SIZE(3) | KCOV_CMP_CONST, arg1, arg2, 312 _RET_IP_); 313 } 314 EXPORT_SYMBOL(__sanitizer_cov_trace_const_cmp8); 315 316 void notrace __sanitizer_cov_trace_switch(u64 val, u64 *cases) 317 { 318 u64 i; 319 u64 count = cases[0]; 320 u64 size = cases[1]; 321 u64 type = KCOV_CMP_CONST; 322 323 switch (size) { 324 case 8: 325 type |= KCOV_CMP_SIZE(0); 326 break; 327 case 16: 328 type |= KCOV_CMP_SIZE(1); 329 break; 330 case 32: 331 type |= KCOV_CMP_SIZE(2); 332 break; 333 case 64: 334 type |= KCOV_CMP_SIZE(3); 335 break; 336 default: 337 return; 338 } 339 for (i = 0; i < count; i++) 340 write_comp_data(type, cases[i + 2], val, _RET_IP_); 341 } 342 EXPORT_SYMBOL(__sanitizer_cov_trace_switch); 343 #endif /* ifdef CONFIG_KCOV_ENABLE_COMPARISONS */ 344 345 static void kcov_start(struct task_struct *t, struct kcov *kcov, 346 unsigned int size, void *area, enum kcov_mode mode, 347 int sequence) 348 { 349 kcov_debug("t = %px, size = %u, area = %px\n", t, size, area); 350 t->kcov = kcov; 351 /* Cache in task struct for performance. */ 352 t->kcov_size = size; 353 t->kcov_area = area; 354 t->kcov_sequence = sequence; 355 /* See comment in check_kcov_mode(). */ 356 barrier(); 357 WRITE_ONCE(t->kcov_mode, mode); 358 } 359 360 static void kcov_stop(struct task_struct *t) 361 { 362 WRITE_ONCE(t->kcov_mode, KCOV_MODE_DISABLED); 363 barrier(); 364 t->kcov = NULL; 365 t->kcov_size = 0; 366 t->kcov_area = NULL; 367 } 368 369 static void kcov_task_reset(struct task_struct *t) 370 { 371 kcov_stop(t); 372 t->kcov_sequence = 0; 373 t->kcov_handle = 0; 374 } 375 376 void kcov_task_init(struct task_struct *t) 377 { 378 kcov_task_reset(t); 379 t->kcov_handle = current->kcov_handle; 380 } 381 382 static void kcov_reset(struct kcov *kcov) 383 { 384 kcov->t = NULL; 385 kcov->mode = KCOV_MODE_INIT; 386 kcov->remote = false; 387 kcov->remote_size = 0; 388 kcov->sequence++; 389 } 390 391 static void kcov_remote_reset(struct kcov *kcov) 392 { 393 int bkt; 394 struct kcov_remote *remote; 395 struct hlist_node *tmp; 396 unsigned long flags; 397 398 spin_lock_irqsave(&kcov_remote_lock, flags); 399 hash_for_each_safe(kcov_remote_map, bkt, tmp, remote, hnode) { 400 if (remote->kcov != kcov) 401 continue; 402 hash_del(&remote->hnode); 403 kfree(remote); 404 } 405 /* Do reset before unlock to prevent races with kcov_remote_start(). */ 406 kcov_reset(kcov); 407 spin_unlock_irqrestore(&kcov_remote_lock, flags); 408 } 409 410 static void kcov_disable(struct task_struct *t, struct kcov *kcov) 411 { 412 kcov_task_reset(t); 413 if (kcov->remote) 414 kcov_remote_reset(kcov); 415 else 416 kcov_reset(kcov); 417 } 418 419 static void kcov_get(struct kcov *kcov) 420 { 421 refcount_inc(&kcov->refcount); 422 } 423 424 static void kcov_put(struct kcov *kcov) 425 { 426 if (refcount_dec_and_test(&kcov->refcount)) { 427 kcov_remote_reset(kcov); 428 vfree(kcov->area); 429 kfree(kcov); 430 } 431 } 432 433 void kcov_task_exit(struct task_struct *t) 434 { 435 struct kcov *kcov; 436 unsigned long flags; 437 438 kcov = t->kcov; 439 if (kcov == NULL) 440 return; 441 442 spin_lock_irqsave(&kcov->lock, flags); 443 kcov_debug("t = %px, kcov->t = %px\n", t, kcov->t); 444 /* 445 * For KCOV_ENABLE devices we want to make sure that t->kcov->t == t, 446 * which comes down to: 447 * WARN_ON(!kcov->remote && kcov->t != t); 448 * 449 * For KCOV_REMOTE_ENABLE devices, the exiting task is either: 450 * 451 * 1. A remote task between kcov_remote_start() and kcov_remote_stop(). 452 * In this case we should print a warning right away, since a task 453 * shouldn't be exiting when it's in a kcov coverage collection 454 * section. Here t points to the task that is collecting remote 455 * coverage, and t->kcov->t points to the thread that created the 456 * kcov device. Which means that to detect this case we need to 457 * check that t != t->kcov->t, and this gives us the following: 458 * WARN_ON(kcov->remote && kcov->t != t); 459 * 460 * 2. The task that created kcov exiting without calling KCOV_DISABLE, 461 * and then again we make sure that t->kcov->t == t: 462 * WARN_ON(kcov->remote && kcov->t != t); 463 * 464 * By combining all three checks into one we get: 465 */ 466 if (WARN_ON(kcov->t != t)) { 467 spin_unlock_irqrestore(&kcov->lock, flags); 468 return; 469 } 470 /* Just to not leave dangling references behind. */ 471 kcov_disable(t, kcov); 472 spin_unlock_irqrestore(&kcov->lock, flags); 473 kcov_put(kcov); 474 } 475 476 static int kcov_mmap(struct file *filep, struct vm_area_struct *vma) 477 { 478 int res = 0; 479 struct kcov *kcov = vma->vm_file->private_data; 480 unsigned long size, off; 481 struct page *page; 482 unsigned long flags; 483 484 spin_lock_irqsave(&kcov->lock, flags); 485 size = kcov->size * sizeof(unsigned long); 486 if (kcov->area == NULL || vma->vm_pgoff != 0 || 487 vma->vm_end - vma->vm_start != size) { 488 res = -EINVAL; 489 goto exit; 490 } 491 spin_unlock_irqrestore(&kcov->lock, flags); 492 vm_flags_set(vma, VM_DONTEXPAND); 493 for (off = 0; off < size; off += PAGE_SIZE) { 494 page = vmalloc_to_page(kcov->area + off); 495 res = vm_insert_page(vma, vma->vm_start + off, page); 496 if (res) { 497 pr_warn_once("kcov: vm_insert_page() failed\n"); 498 return res; 499 } 500 } 501 return 0; 502 exit: 503 spin_unlock_irqrestore(&kcov->lock, flags); 504 return res; 505 } 506 507 static int kcov_open(struct inode *inode, struct file *filep) 508 { 509 struct kcov *kcov; 510 511 kcov = kzalloc(sizeof(*kcov), GFP_KERNEL); 512 if (!kcov) 513 return -ENOMEM; 514 kcov->mode = KCOV_MODE_DISABLED; 515 kcov->sequence = 1; 516 refcount_set(&kcov->refcount, 1); 517 spin_lock_init(&kcov->lock); 518 filep->private_data = kcov; 519 return nonseekable_open(inode, filep); 520 } 521 522 static int kcov_close(struct inode *inode, struct file *filep) 523 { 524 kcov_put(filep->private_data); 525 return 0; 526 } 527 528 static int kcov_get_mode(unsigned long arg) 529 { 530 if (arg == KCOV_TRACE_PC) 531 return KCOV_MODE_TRACE_PC; 532 else if (arg == KCOV_TRACE_CMP) 533 #ifdef CONFIG_KCOV_ENABLE_COMPARISONS 534 return KCOV_MODE_TRACE_CMP; 535 #else 536 return -ENOTSUPP; 537 #endif 538 else 539 return -EINVAL; 540 } 541 542 /* 543 * Fault in a lazily-faulted vmalloc area before it can be used by 544 * __santizer_cov_trace_pc(), to avoid recursion issues if any code on the 545 * vmalloc fault handling path is instrumented. 546 */ 547 static void kcov_fault_in_area(struct kcov *kcov) 548 { 549 unsigned long stride = PAGE_SIZE / sizeof(unsigned long); 550 unsigned long *area = kcov->area; 551 unsigned long offset; 552 553 for (offset = 0; offset < kcov->size; offset += stride) 554 READ_ONCE(area[offset]); 555 } 556 557 static inline bool kcov_check_handle(u64 handle, bool common_valid, 558 bool uncommon_valid, bool zero_valid) 559 { 560 if (handle & ~(KCOV_SUBSYSTEM_MASK | KCOV_INSTANCE_MASK)) 561 return false; 562 switch (handle & KCOV_SUBSYSTEM_MASK) { 563 case KCOV_SUBSYSTEM_COMMON: 564 return (handle & KCOV_INSTANCE_MASK) ? 565 common_valid : zero_valid; 566 case KCOV_SUBSYSTEM_USB: 567 return uncommon_valid; 568 default: 569 return false; 570 } 571 return false; 572 } 573 574 static int kcov_ioctl_locked(struct kcov *kcov, unsigned int cmd, 575 unsigned long arg) 576 { 577 struct task_struct *t; 578 unsigned long flags, unused; 579 int mode, i; 580 struct kcov_remote_arg *remote_arg; 581 struct kcov_remote *remote; 582 583 switch (cmd) { 584 case KCOV_ENABLE: 585 /* 586 * Enable coverage for the current task. 587 * At this point user must have been enabled trace mode, 588 * and mmapped the file. Coverage collection is disabled only 589 * at task exit or voluntary by KCOV_DISABLE. After that it can 590 * be enabled for another task. 591 */ 592 if (kcov->mode != KCOV_MODE_INIT || !kcov->area) 593 return -EINVAL; 594 t = current; 595 if (kcov->t != NULL || t->kcov != NULL) 596 return -EBUSY; 597 mode = kcov_get_mode(arg); 598 if (mode < 0) 599 return mode; 600 kcov_fault_in_area(kcov); 601 kcov->mode = mode; 602 kcov_start(t, kcov, kcov->size, kcov->area, kcov->mode, 603 kcov->sequence); 604 kcov->t = t; 605 /* Put either in kcov_task_exit() or in KCOV_DISABLE. */ 606 kcov_get(kcov); 607 return 0; 608 case KCOV_DISABLE: 609 /* Disable coverage for the current task. */ 610 unused = arg; 611 if (unused != 0 || current->kcov != kcov) 612 return -EINVAL; 613 t = current; 614 if (WARN_ON(kcov->t != t)) 615 return -EINVAL; 616 kcov_disable(t, kcov); 617 kcov_put(kcov); 618 return 0; 619 case KCOV_REMOTE_ENABLE: 620 if (kcov->mode != KCOV_MODE_INIT || !kcov->area) 621 return -EINVAL; 622 t = current; 623 if (kcov->t != NULL || t->kcov != NULL) 624 return -EBUSY; 625 remote_arg = (struct kcov_remote_arg *)arg; 626 mode = kcov_get_mode(remote_arg->trace_mode); 627 if (mode < 0) 628 return mode; 629 if (remote_arg->area_size > LONG_MAX / sizeof(unsigned long)) 630 return -EINVAL; 631 kcov->mode = mode; 632 t->kcov = kcov; 633 kcov->t = t; 634 kcov->remote = true; 635 kcov->remote_size = remote_arg->area_size; 636 spin_lock_irqsave(&kcov_remote_lock, flags); 637 for (i = 0; i < remote_arg->num_handles; i++) { 638 if (!kcov_check_handle(remote_arg->handles[i], 639 false, true, false)) { 640 spin_unlock_irqrestore(&kcov_remote_lock, 641 flags); 642 kcov_disable(t, kcov); 643 return -EINVAL; 644 } 645 remote = kcov_remote_add(kcov, remote_arg->handles[i]); 646 if (IS_ERR(remote)) { 647 spin_unlock_irqrestore(&kcov_remote_lock, 648 flags); 649 kcov_disable(t, kcov); 650 return PTR_ERR(remote); 651 } 652 } 653 if (remote_arg->common_handle) { 654 if (!kcov_check_handle(remote_arg->common_handle, 655 true, false, false)) { 656 spin_unlock_irqrestore(&kcov_remote_lock, 657 flags); 658 kcov_disable(t, kcov); 659 return -EINVAL; 660 } 661 remote = kcov_remote_add(kcov, 662 remote_arg->common_handle); 663 if (IS_ERR(remote)) { 664 spin_unlock_irqrestore(&kcov_remote_lock, 665 flags); 666 kcov_disable(t, kcov); 667 return PTR_ERR(remote); 668 } 669 t->kcov_handle = remote_arg->common_handle; 670 } 671 spin_unlock_irqrestore(&kcov_remote_lock, flags); 672 /* Put either in kcov_task_exit() or in KCOV_DISABLE. */ 673 kcov_get(kcov); 674 return 0; 675 default: 676 return -ENOTTY; 677 } 678 } 679 680 static long kcov_ioctl(struct file *filep, unsigned int cmd, unsigned long arg) 681 { 682 struct kcov *kcov; 683 int res; 684 struct kcov_remote_arg *remote_arg = NULL; 685 unsigned int remote_num_handles; 686 unsigned long remote_arg_size; 687 unsigned long size, flags; 688 void *area; 689 690 kcov = filep->private_data; 691 switch (cmd) { 692 case KCOV_INIT_TRACE: 693 /* 694 * Enable kcov in trace mode and setup buffer size. 695 * Must happen before anything else. 696 * 697 * First check the size argument - it must be at least 2 698 * to hold the current position and one PC. 699 */ 700 size = arg; 701 if (size < 2 || size > INT_MAX / sizeof(unsigned long)) 702 return -EINVAL; 703 area = vmalloc_user(size * sizeof(unsigned long)); 704 if (area == NULL) 705 return -ENOMEM; 706 spin_lock_irqsave(&kcov->lock, flags); 707 if (kcov->mode != KCOV_MODE_DISABLED) { 708 spin_unlock_irqrestore(&kcov->lock, flags); 709 vfree(area); 710 return -EBUSY; 711 } 712 kcov->area = area; 713 kcov->size = size; 714 kcov->mode = KCOV_MODE_INIT; 715 spin_unlock_irqrestore(&kcov->lock, flags); 716 return 0; 717 case KCOV_REMOTE_ENABLE: 718 if (get_user(remote_num_handles, (unsigned __user *)(arg + 719 offsetof(struct kcov_remote_arg, num_handles)))) 720 return -EFAULT; 721 if (remote_num_handles > KCOV_REMOTE_MAX_HANDLES) 722 return -EINVAL; 723 remote_arg_size = struct_size(remote_arg, handles, 724 remote_num_handles); 725 remote_arg = memdup_user((void __user *)arg, remote_arg_size); 726 if (IS_ERR(remote_arg)) 727 return PTR_ERR(remote_arg); 728 if (remote_arg->num_handles != remote_num_handles) { 729 kfree(remote_arg); 730 return -EINVAL; 731 } 732 arg = (unsigned long)remote_arg; 733 fallthrough; 734 default: 735 /* 736 * All other commands can be normally executed under a spin lock, so we 737 * obtain and release it here in order to simplify kcov_ioctl_locked(). 738 */ 739 spin_lock_irqsave(&kcov->lock, flags); 740 res = kcov_ioctl_locked(kcov, cmd, arg); 741 spin_unlock_irqrestore(&kcov->lock, flags); 742 kfree(remote_arg); 743 return res; 744 } 745 } 746 747 static const struct file_operations kcov_fops = { 748 .open = kcov_open, 749 .unlocked_ioctl = kcov_ioctl, 750 .compat_ioctl = kcov_ioctl, 751 .mmap = kcov_mmap, 752 .release = kcov_close, 753 }; 754 755 /* 756 * kcov_remote_start() and kcov_remote_stop() can be used to annotate a section 757 * of code in a kernel background thread or in a softirq to allow kcov to be 758 * used to collect coverage from that part of code. 759 * 760 * The handle argument of kcov_remote_start() identifies a code section that is 761 * used for coverage collection. A userspace process passes this handle to 762 * KCOV_REMOTE_ENABLE ioctl to make the used kcov device start collecting 763 * coverage for the code section identified by this handle. 764 * 765 * The usage of these annotations in the kernel code is different depending on 766 * the type of the kernel thread whose code is being annotated. 767 * 768 * For global kernel threads that are spawned in a limited number of instances 769 * (e.g. one USB hub_event() worker thread is spawned per USB HCD) and for 770 * softirqs, each instance must be assigned a unique 4-byte instance id. The 771 * instance id is then combined with a 1-byte subsystem id to get a handle via 772 * kcov_remote_handle(subsystem_id, instance_id). 773 * 774 * For local kernel threads that are spawned from system calls handler when a 775 * user interacts with some kernel interface (e.g. vhost workers), a handle is 776 * passed from a userspace process as the common_handle field of the 777 * kcov_remote_arg struct (note, that the user must generate a handle by using 778 * kcov_remote_handle() with KCOV_SUBSYSTEM_COMMON as the subsystem id and an 779 * arbitrary 4-byte non-zero number as the instance id). This common handle 780 * then gets saved into the task_struct of the process that issued the 781 * KCOV_REMOTE_ENABLE ioctl. When this process issues system calls that spawn 782 * kernel threads, the common handle must be retrieved via kcov_common_handle() 783 * and passed to the spawned threads via custom annotations. Those kernel 784 * threads must in turn be annotated with kcov_remote_start(common_handle) and 785 * kcov_remote_stop(). All of the threads that are spawned by the same process 786 * obtain the same handle, hence the name "common". 787 * 788 * See Documentation/dev-tools/kcov.rst for more details. 789 * 790 * Internally, kcov_remote_start() looks up the kcov device associated with the 791 * provided handle, allocates an area for coverage collection, and saves the 792 * pointers to kcov and area into the current task_struct to allow coverage to 793 * be collected via __sanitizer_cov_trace_pc(). 794 * In turns kcov_remote_stop() clears those pointers from task_struct to stop 795 * collecting coverage and copies all collected coverage into the kcov area. 796 */ 797 798 static inline bool kcov_mode_enabled(unsigned int mode) 799 { 800 return (mode & ~KCOV_IN_CTXSW) != KCOV_MODE_DISABLED; 801 } 802 803 static void kcov_remote_softirq_start(struct task_struct *t) 804 { 805 struct kcov_percpu_data *data = this_cpu_ptr(&kcov_percpu_data); 806 unsigned int mode; 807 808 mode = READ_ONCE(t->kcov_mode); 809 barrier(); 810 if (kcov_mode_enabled(mode)) { 811 data->saved_mode = mode; 812 data->saved_size = t->kcov_size; 813 data->saved_area = t->kcov_area; 814 data->saved_sequence = t->kcov_sequence; 815 data->saved_kcov = t->kcov; 816 kcov_stop(t); 817 } 818 } 819 820 static void kcov_remote_softirq_stop(struct task_struct *t) 821 { 822 struct kcov_percpu_data *data = this_cpu_ptr(&kcov_percpu_data); 823 824 if (data->saved_kcov) { 825 kcov_start(t, data->saved_kcov, data->saved_size, 826 data->saved_area, data->saved_mode, 827 data->saved_sequence); 828 data->saved_mode = 0; 829 data->saved_size = 0; 830 data->saved_area = NULL; 831 data->saved_sequence = 0; 832 data->saved_kcov = NULL; 833 } 834 } 835 836 void kcov_remote_start(u64 handle) 837 { 838 struct task_struct *t = current; 839 struct kcov_remote *remote; 840 struct kcov *kcov; 841 unsigned int mode; 842 void *area; 843 unsigned int size; 844 int sequence; 845 unsigned long flags; 846 847 if (WARN_ON(!kcov_check_handle(handle, true, true, true))) 848 return; 849 if (!in_task() && !in_serving_softirq()) 850 return; 851 852 local_lock_irqsave(&kcov_percpu_data.lock, flags); 853 854 /* 855 * Check that kcov_remote_start() is not called twice in background 856 * threads nor called by user tasks (with enabled kcov). 857 */ 858 mode = READ_ONCE(t->kcov_mode); 859 if (WARN_ON(in_task() && kcov_mode_enabled(mode))) { 860 local_unlock_irqrestore(&kcov_percpu_data.lock, flags); 861 return; 862 } 863 /* 864 * Check that kcov_remote_start() is not called twice in softirqs. 865 * Note, that kcov_remote_start() can be called from a softirq that 866 * happened while collecting coverage from a background thread. 867 */ 868 if (WARN_ON(in_serving_softirq() && t->kcov_softirq)) { 869 local_unlock_irqrestore(&kcov_percpu_data.lock, flags); 870 return; 871 } 872 873 spin_lock(&kcov_remote_lock); 874 remote = kcov_remote_find(handle); 875 if (!remote) { 876 spin_unlock(&kcov_remote_lock); 877 local_unlock_irqrestore(&kcov_percpu_data.lock, flags); 878 return; 879 } 880 kcov_debug("handle = %llx, context: %s\n", handle, 881 in_task() ? "task" : "softirq"); 882 kcov = remote->kcov; 883 /* Put in kcov_remote_stop(). */ 884 kcov_get(kcov); 885 /* 886 * Read kcov fields before unlock to prevent races with 887 * KCOV_DISABLE / kcov_remote_reset(). 888 */ 889 mode = kcov->mode; 890 sequence = kcov->sequence; 891 if (in_task()) { 892 size = kcov->remote_size; 893 area = kcov_remote_area_get(size); 894 } else { 895 size = CONFIG_KCOV_IRQ_AREA_SIZE; 896 area = this_cpu_ptr(&kcov_percpu_data)->irq_area; 897 } 898 spin_unlock(&kcov_remote_lock); 899 900 /* Can only happen when in_task(). */ 901 if (!area) { 902 local_unlock_irqrestore(&kcov_percpu_data.lock, flags); 903 area = vmalloc(size * sizeof(unsigned long)); 904 if (!area) { 905 kcov_put(kcov); 906 return; 907 } 908 local_lock_irqsave(&kcov_percpu_data.lock, flags); 909 } 910 911 /* Reset coverage size. */ 912 *(u64 *)area = 0; 913 914 if (in_serving_softirq()) { 915 kcov_remote_softirq_start(t); 916 t->kcov_softirq = 1; 917 } 918 kcov_start(t, kcov, size, area, mode, sequence); 919 920 local_unlock_irqrestore(&kcov_percpu_data.lock, flags); 921 922 } 923 EXPORT_SYMBOL(kcov_remote_start); 924 925 static void kcov_move_area(enum kcov_mode mode, void *dst_area, 926 unsigned int dst_area_size, void *src_area) 927 { 928 u64 word_size = sizeof(unsigned long); 929 u64 count_size, entry_size_log; 930 u64 dst_len, src_len; 931 void *dst_entries, *src_entries; 932 u64 dst_occupied, dst_free, bytes_to_move, entries_moved; 933 934 kcov_debug("%px %u <= %px %lu\n", 935 dst_area, dst_area_size, src_area, *(unsigned long *)src_area); 936 937 switch (mode) { 938 case KCOV_MODE_TRACE_PC: 939 dst_len = READ_ONCE(*(unsigned long *)dst_area); 940 src_len = *(unsigned long *)src_area; 941 count_size = sizeof(unsigned long); 942 entry_size_log = __ilog2_u64(sizeof(unsigned long)); 943 break; 944 case KCOV_MODE_TRACE_CMP: 945 dst_len = READ_ONCE(*(u64 *)dst_area); 946 src_len = *(u64 *)src_area; 947 count_size = sizeof(u64); 948 BUILD_BUG_ON(!is_power_of_2(KCOV_WORDS_PER_CMP)); 949 entry_size_log = __ilog2_u64(sizeof(u64) * KCOV_WORDS_PER_CMP); 950 break; 951 default: 952 WARN_ON(1); 953 return; 954 } 955 956 /* As arm can't divide u64 integers use log of entry size. */ 957 if (dst_len > ((dst_area_size * word_size - count_size) >> 958 entry_size_log)) 959 return; 960 dst_occupied = count_size + (dst_len << entry_size_log); 961 dst_free = dst_area_size * word_size - dst_occupied; 962 bytes_to_move = min(dst_free, src_len << entry_size_log); 963 dst_entries = dst_area + dst_occupied; 964 src_entries = src_area + count_size; 965 memcpy(dst_entries, src_entries, bytes_to_move); 966 entries_moved = bytes_to_move >> entry_size_log; 967 968 switch (mode) { 969 case KCOV_MODE_TRACE_PC: 970 WRITE_ONCE(*(unsigned long *)dst_area, dst_len + entries_moved); 971 break; 972 case KCOV_MODE_TRACE_CMP: 973 WRITE_ONCE(*(u64 *)dst_area, dst_len + entries_moved); 974 break; 975 default: 976 break; 977 } 978 } 979 980 /* See the comment before kcov_remote_start() for usage details. */ 981 void kcov_remote_stop(void) 982 { 983 struct task_struct *t = current; 984 struct kcov *kcov; 985 unsigned int mode; 986 void *area; 987 unsigned int size; 988 int sequence; 989 unsigned long flags; 990 991 if (!in_task() && !in_serving_softirq()) 992 return; 993 994 local_lock_irqsave(&kcov_percpu_data.lock, flags); 995 996 mode = READ_ONCE(t->kcov_mode); 997 barrier(); 998 if (!kcov_mode_enabled(mode)) { 999 local_unlock_irqrestore(&kcov_percpu_data.lock, flags); 1000 return; 1001 } 1002 /* 1003 * When in softirq, check if the corresponding kcov_remote_start() 1004 * actually found the remote handle and started collecting coverage. 1005 */ 1006 if (in_serving_softirq() && !t->kcov_softirq) { 1007 local_unlock_irqrestore(&kcov_percpu_data.lock, flags); 1008 return; 1009 } 1010 /* Make sure that kcov_softirq is only set when in softirq. */ 1011 if (WARN_ON(!in_serving_softirq() && t->kcov_softirq)) { 1012 local_unlock_irqrestore(&kcov_percpu_data.lock, flags); 1013 return; 1014 } 1015 1016 kcov = t->kcov; 1017 area = t->kcov_area; 1018 size = t->kcov_size; 1019 sequence = t->kcov_sequence; 1020 1021 kcov_stop(t); 1022 if (in_serving_softirq()) { 1023 t->kcov_softirq = 0; 1024 kcov_remote_softirq_stop(t); 1025 } 1026 1027 spin_lock(&kcov->lock); 1028 /* 1029 * KCOV_DISABLE could have been called between kcov_remote_start() 1030 * and kcov_remote_stop(), hence the sequence check. 1031 */ 1032 if (sequence == kcov->sequence && kcov->remote) 1033 kcov_move_area(kcov->mode, kcov->area, kcov->size, area); 1034 spin_unlock(&kcov->lock); 1035 1036 if (in_task()) { 1037 spin_lock(&kcov_remote_lock); 1038 kcov_remote_area_put(area, size); 1039 spin_unlock(&kcov_remote_lock); 1040 } 1041 1042 local_unlock_irqrestore(&kcov_percpu_data.lock, flags); 1043 1044 /* Get in kcov_remote_start(). */ 1045 kcov_put(kcov); 1046 } 1047 EXPORT_SYMBOL(kcov_remote_stop); 1048 1049 /* See the comment before kcov_remote_start() for usage details. */ 1050 u64 kcov_common_handle(void) 1051 { 1052 if (!in_task()) 1053 return 0; 1054 return current->kcov_handle; 1055 } 1056 EXPORT_SYMBOL(kcov_common_handle); 1057 1058 static int __init kcov_init(void) 1059 { 1060 int cpu; 1061 1062 for_each_possible_cpu(cpu) { 1063 void *area = vmalloc_node(CONFIG_KCOV_IRQ_AREA_SIZE * 1064 sizeof(unsigned long), cpu_to_node(cpu)); 1065 if (!area) 1066 return -ENOMEM; 1067 per_cpu_ptr(&kcov_percpu_data, cpu)->irq_area = area; 1068 } 1069 1070 /* 1071 * The kcov debugfs file won't ever get removed and thus, 1072 * there is no need to protect it against removal races. The 1073 * use of debugfs_create_file_unsafe() is actually safe here. 1074 */ 1075 debugfs_create_file_unsafe("kcov", 0600, NULL, NULL, &kcov_fops); 1076 1077 return 0; 1078 } 1079 1080 device_initcall(kcov_init); 1081