1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2008 Advanced Micro Devices, Inc. 4 * 5 * Author: Joerg Roedel <joerg.roedel@amd.com> 6 */ 7 8 #define pr_fmt(fmt) "DMA-API: " fmt 9 10 #include <linux/sched/task_stack.h> 11 #include <linux/scatterlist.h> 12 #include <linux/dma-mapping.h> 13 #include <linux/sched/task.h> 14 #include <linux/stacktrace.h> 15 #include <linux/dma-debug.h> 16 #include <linux/spinlock.h> 17 #include <linux/vmalloc.h> 18 #include <linux/debugfs.h> 19 #include <linux/uaccess.h> 20 #include <linux/export.h> 21 #include <linux/device.h> 22 #include <linux/types.h> 23 #include <linux/sched.h> 24 #include <linux/ctype.h> 25 #include <linux/list.h> 26 #include <linux/slab.h> 27 28 #include <asm/sections.h> 29 30 #define HASH_SIZE 16384ULL 31 #define HASH_FN_SHIFT 13 32 #define HASH_FN_MASK (HASH_SIZE - 1) 33 34 #define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16) 35 /* If the pool runs out, add this many new entries at once */ 36 #define DMA_DEBUG_DYNAMIC_ENTRIES (PAGE_SIZE / sizeof(struct dma_debug_entry)) 37 38 enum { 39 dma_debug_single, 40 dma_debug_sg, 41 dma_debug_coherent, 42 dma_debug_resource, 43 }; 44 45 enum map_err_types { 46 MAP_ERR_CHECK_NOT_APPLICABLE, 47 MAP_ERR_NOT_CHECKED, 48 MAP_ERR_CHECKED, 49 }; 50 51 #define DMA_DEBUG_STACKTRACE_ENTRIES 5 52 53 /** 54 * struct dma_debug_entry - track a dma_map* or dma_alloc_coherent mapping 55 * @list: node on pre-allocated free_entries list 56 * @dev: 'dev' argument to dma_map_{page|single|sg} or dma_alloc_coherent 57 * @size: length of the mapping 58 * @type: single, page, sg, coherent 59 * @direction: enum dma_data_direction 60 * @sg_call_ents: 'nents' from dma_map_sg 61 * @sg_mapped_ents: 'mapped_ents' from dma_map_sg 62 * @pfn: page frame of the start address 63 * @offset: offset of mapping relative to pfn 64 * @map_err_type: track whether dma_mapping_error() was checked 65 * @stacktrace: support backtraces when a violation is detected 66 */ 67 struct dma_debug_entry { 68 struct list_head list; 69 struct device *dev; 70 u64 dev_addr; 71 u64 size; 72 int type; 73 int direction; 74 int sg_call_ents; 75 int sg_mapped_ents; 76 unsigned long pfn; 77 size_t offset; 78 enum map_err_types map_err_type; 79 #ifdef CONFIG_STACKTRACE 80 unsigned int stack_len; 81 unsigned long stack_entries[DMA_DEBUG_STACKTRACE_ENTRIES]; 82 #endif 83 } ____cacheline_aligned_in_smp; 84 85 typedef bool (*match_fn)(struct dma_debug_entry *, struct dma_debug_entry *); 86 87 struct hash_bucket { 88 struct list_head list; 89 spinlock_t lock; 90 }; 91 92 /* Hash list to save the allocated dma addresses */ 93 static struct hash_bucket dma_entry_hash[HASH_SIZE]; 94 /* List of pre-allocated dma_debug_entry's */ 95 static LIST_HEAD(free_entries); 96 /* Lock for the list above */ 97 static DEFINE_SPINLOCK(free_entries_lock); 98 99 /* Global disable flag - will be set in case of an error */ 100 static bool global_disable __read_mostly; 101 102 /* Early initialization disable flag, set at the end of dma_debug_init */ 103 static bool dma_debug_initialized __read_mostly; 104 105 static inline bool dma_debug_disabled(void) 106 { 107 return global_disable || !dma_debug_initialized; 108 } 109 110 /* Global error count */ 111 static u32 error_count; 112 113 /* Global error show enable*/ 114 static u32 show_all_errors __read_mostly; 115 /* Number of errors to show */ 116 static u32 show_num_errors = 1; 117 118 static u32 num_free_entries; 119 static u32 min_free_entries; 120 static u32 nr_total_entries; 121 122 /* number of preallocated entries requested by kernel cmdline */ 123 static u32 nr_prealloc_entries = PREALLOC_DMA_DEBUG_ENTRIES; 124 125 /* per-driver filter related state */ 126 127 #define NAME_MAX_LEN 64 128 129 static char current_driver_name[NAME_MAX_LEN] __read_mostly; 130 static struct device_driver *current_driver __read_mostly; 131 132 static DEFINE_RWLOCK(driver_name_lock); 133 134 static const char *const maperr2str[] = { 135 [MAP_ERR_CHECK_NOT_APPLICABLE] = "dma map error check not applicable", 136 [MAP_ERR_NOT_CHECKED] = "dma map error not checked", 137 [MAP_ERR_CHECKED] = "dma map error checked", 138 }; 139 140 static const char *type2name[] = { 141 [dma_debug_single] = "single", 142 [dma_debug_sg] = "scather-gather", 143 [dma_debug_coherent] = "coherent", 144 [dma_debug_resource] = "resource", 145 }; 146 147 static const char *dir2name[] = { 148 [DMA_BIDIRECTIONAL] = "DMA_BIDIRECTIONAL", 149 [DMA_TO_DEVICE] = "DMA_TO_DEVICE", 150 [DMA_FROM_DEVICE] = "DMA_FROM_DEVICE", 151 [DMA_NONE] = "DMA_NONE", 152 }; 153 154 /* 155 * The access to some variables in this macro is racy. We can't use atomic_t 156 * here because all these variables are exported to debugfs. Some of them even 157 * writeable. This is also the reason why a lock won't help much. But anyway, 158 * the races are no big deal. Here is why: 159 * 160 * error_count: the addition is racy, but the worst thing that can happen is 161 * that we don't count some errors 162 * show_num_errors: the subtraction is racy. Also no big deal because in 163 * worst case this will result in one warning more in the 164 * system log than the user configured. This variable is 165 * writeable via debugfs. 166 */ 167 static inline void dump_entry_trace(struct dma_debug_entry *entry) 168 { 169 #ifdef CONFIG_STACKTRACE 170 if (entry) { 171 pr_warn("Mapped at:\n"); 172 stack_trace_print(entry->stack_entries, entry->stack_len, 0); 173 } 174 #endif 175 } 176 177 static bool driver_filter(struct device *dev) 178 { 179 struct device_driver *drv; 180 unsigned long flags; 181 bool ret; 182 183 /* driver filter off */ 184 if (likely(!current_driver_name[0])) 185 return true; 186 187 /* driver filter on and initialized */ 188 if (current_driver && dev && dev->driver == current_driver) 189 return true; 190 191 /* driver filter on, but we can't filter on a NULL device... */ 192 if (!dev) 193 return false; 194 195 if (current_driver || !current_driver_name[0]) 196 return false; 197 198 /* driver filter on but not yet initialized */ 199 drv = dev->driver; 200 if (!drv) 201 return false; 202 203 /* lock to protect against change of current_driver_name */ 204 read_lock_irqsave(&driver_name_lock, flags); 205 206 ret = false; 207 if (drv->name && 208 strncmp(current_driver_name, drv->name, NAME_MAX_LEN - 1) == 0) { 209 current_driver = drv; 210 ret = true; 211 } 212 213 read_unlock_irqrestore(&driver_name_lock, flags); 214 215 return ret; 216 } 217 218 #define err_printk(dev, entry, format, arg...) do { \ 219 error_count += 1; \ 220 if (driver_filter(dev) && \ 221 (show_all_errors || show_num_errors > 0)) { \ 222 WARN(1, pr_fmt("%s %s: ") format, \ 223 dev ? dev_driver_string(dev) : "NULL", \ 224 dev ? dev_name(dev) : "NULL", ## arg); \ 225 dump_entry_trace(entry); \ 226 } \ 227 if (!show_all_errors && show_num_errors > 0) \ 228 show_num_errors -= 1; \ 229 } while (0); 230 231 /* 232 * Hash related functions 233 * 234 * Every DMA-API request is saved into a struct dma_debug_entry. To 235 * have quick access to these structs they are stored into a hash. 236 */ 237 static int hash_fn(struct dma_debug_entry *entry) 238 { 239 /* 240 * Hash function is based on the dma address. 241 * We use bits 20-27 here as the index into the hash 242 */ 243 return (entry->dev_addr >> HASH_FN_SHIFT) & HASH_FN_MASK; 244 } 245 246 /* 247 * Request exclusive access to a hash bucket for a given dma_debug_entry. 248 */ 249 static struct hash_bucket *get_hash_bucket(struct dma_debug_entry *entry, 250 unsigned long *flags) 251 __acquires(&dma_entry_hash[idx].lock) 252 { 253 int idx = hash_fn(entry); 254 unsigned long __flags; 255 256 spin_lock_irqsave(&dma_entry_hash[idx].lock, __flags); 257 *flags = __flags; 258 return &dma_entry_hash[idx]; 259 } 260 261 /* 262 * Give up exclusive access to the hash bucket 263 */ 264 static void put_hash_bucket(struct hash_bucket *bucket, 265 unsigned long flags) 266 __releases(&bucket->lock) 267 { 268 spin_unlock_irqrestore(&bucket->lock, flags); 269 } 270 271 static bool exact_match(struct dma_debug_entry *a, struct dma_debug_entry *b) 272 { 273 return ((a->dev_addr == b->dev_addr) && 274 (a->dev == b->dev)) ? true : false; 275 } 276 277 static bool containing_match(struct dma_debug_entry *a, 278 struct dma_debug_entry *b) 279 { 280 if (a->dev != b->dev) 281 return false; 282 283 if ((b->dev_addr <= a->dev_addr) && 284 ((b->dev_addr + b->size) >= (a->dev_addr + a->size))) 285 return true; 286 287 return false; 288 } 289 290 /* 291 * Search a given entry in the hash bucket list 292 */ 293 static struct dma_debug_entry *__hash_bucket_find(struct hash_bucket *bucket, 294 struct dma_debug_entry *ref, 295 match_fn match) 296 { 297 struct dma_debug_entry *entry, *ret = NULL; 298 int matches = 0, match_lvl, last_lvl = -1; 299 300 list_for_each_entry(entry, &bucket->list, list) { 301 if (!match(ref, entry)) 302 continue; 303 304 /* 305 * Some drivers map the same physical address multiple 306 * times. Without a hardware IOMMU this results in the 307 * same device addresses being put into the dma-debug 308 * hash multiple times too. This can result in false 309 * positives being reported. Therefore we implement a 310 * best-fit algorithm here which returns the entry from 311 * the hash which fits best to the reference value 312 * instead of the first-fit. 313 */ 314 matches += 1; 315 match_lvl = 0; 316 entry->size == ref->size ? ++match_lvl : 0; 317 entry->type == ref->type ? ++match_lvl : 0; 318 entry->direction == ref->direction ? ++match_lvl : 0; 319 entry->sg_call_ents == ref->sg_call_ents ? ++match_lvl : 0; 320 321 if (match_lvl == 4) { 322 /* perfect-fit - return the result */ 323 return entry; 324 } else if (match_lvl > last_lvl) { 325 /* 326 * We found an entry that fits better then the 327 * previous one or it is the 1st match. 328 */ 329 last_lvl = match_lvl; 330 ret = entry; 331 } 332 } 333 334 /* 335 * If we have multiple matches but no perfect-fit, just return 336 * NULL. 337 */ 338 ret = (matches == 1) ? ret : NULL; 339 340 return ret; 341 } 342 343 static struct dma_debug_entry *bucket_find_exact(struct hash_bucket *bucket, 344 struct dma_debug_entry *ref) 345 { 346 return __hash_bucket_find(bucket, ref, exact_match); 347 } 348 349 static struct dma_debug_entry *bucket_find_contain(struct hash_bucket **bucket, 350 struct dma_debug_entry *ref, 351 unsigned long *flags) 352 { 353 354 unsigned int max_range = dma_get_max_seg_size(ref->dev); 355 struct dma_debug_entry *entry, index = *ref; 356 unsigned int range = 0; 357 358 while (range <= max_range) { 359 entry = __hash_bucket_find(*bucket, ref, containing_match); 360 361 if (entry) 362 return entry; 363 364 /* 365 * Nothing found, go back a hash bucket 366 */ 367 put_hash_bucket(*bucket, *flags); 368 range += (1 << HASH_FN_SHIFT); 369 index.dev_addr -= (1 << HASH_FN_SHIFT); 370 *bucket = get_hash_bucket(&index, flags); 371 } 372 373 return NULL; 374 } 375 376 /* 377 * Add an entry to a hash bucket 378 */ 379 static void hash_bucket_add(struct hash_bucket *bucket, 380 struct dma_debug_entry *entry) 381 { 382 list_add_tail(&entry->list, &bucket->list); 383 } 384 385 /* 386 * Remove entry from a hash bucket list 387 */ 388 static void hash_bucket_del(struct dma_debug_entry *entry) 389 { 390 list_del(&entry->list); 391 } 392 393 static unsigned long long phys_addr(struct dma_debug_entry *entry) 394 { 395 if (entry->type == dma_debug_resource) 396 return __pfn_to_phys(entry->pfn) + entry->offset; 397 398 return page_to_phys(pfn_to_page(entry->pfn)) + entry->offset; 399 } 400 401 /* 402 * Dump mapping entries for debugging purposes 403 */ 404 void debug_dma_dump_mappings(struct device *dev) 405 { 406 int idx; 407 408 for (idx = 0; idx < HASH_SIZE; idx++) { 409 struct hash_bucket *bucket = &dma_entry_hash[idx]; 410 struct dma_debug_entry *entry; 411 unsigned long flags; 412 413 spin_lock_irqsave(&bucket->lock, flags); 414 415 list_for_each_entry(entry, &bucket->list, list) { 416 if (!dev || dev == entry->dev) { 417 dev_info(entry->dev, 418 "%s idx %d P=%Lx N=%lx D=%Lx L=%Lx %s %s\n", 419 type2name[entry->type], idx, 420 phys_addr(entry), entry->pfn, 421 entry->dev_addr, entry->size, 422 dir2name[entry->direction], 423 maperr2str[entry->map_err_type]); 424 } 425 } 426 427 spin_unlock_irqrestore(&bucket->lock, flags); 428 cond_resched(); 429 } 430 } 431 432 /* 433 * For each mapping (initial cacheline in the case of 434 * dma_alloc_coherent/dma_map_page, initial cacheline in each page of a 435 * scatterlist, or the cacheline specified in dma_map_single) insert 436 * into this tree using the cacheline as the key. At 437 * dma_unmap_{single|sg|page} or dma_free_coherent delete the entry. If 438 * the entry already exists at insertion time add a tag as a reference 439 * count for the overlapping mappings. For now, the overlap tracking 440 * just ensures that 'unmaps' balance 'maps' before marking the 441 * cacheline idle, but we should also be flagging overlaps as an API 442 * violation. 443 * 444 * Memory usage is mostly constrained by the maximum number of available 445 * dma-debug entries in that we need a free dma_debug_entry before 446 * inserting into the tree. In the case of dma_map_page and 447 * dma_alloc_coherent there is only one dma_debug_entry and one 448 * dma_active_cacheline entry to track per event. dma_map_sg(), on the 449 * other hand, consumes a single dma_debug_entry, but inserts 'nents' 450 * entries into the tree. 451 */ 452 static RADIX_TREE(dma_active_cacheline, GFP_NOWAIT); 453 static DEFINE_SPINLOCK(radix_lock); 454 #define ACTIVE_CACHELINE_MAX_OVERLAP ((1 << RADIX_TREE_MAX_TAGS) - 1) 455 #define CACHELINE_PER_PAGE_SHIFT (PAGE_SHIFT - L1_CACHE_SHIFT) 456 #define CACHELINES_PER_PAGE (1 << CACHELINE_PER_PAGE_SHIFT) 457 458 static phys_addr_t to_cacheline_number(struct dma_debug_entry *entry) 459 { 460 return (entry->pfn << CACHELINE_PER_PAGE_SHIFT) + 461 (entry->offset >> L1_CACHE_SHIFT); 462 } 463 464 static int active_cacheline_read_overlap(phys_addr_t cln) 465 { 466 int overlap = 0, i; 467 468 for (i = RADIX_TREE_MAX_TAGS - 1; i >= 0; i--) 469 if (radix_tree_tag_get(&dma_active_cacheline, cln, i)) 470 overlap |= 1 << i; 471 return overlap; 472 } 473 474 static int active_cacheline_set_overlap(phys_addr_t cln, int overlap) 475 { 476 int i; 477 478 if (overlap > ACTIVE_CACHELINE_MAX_OVERLAP || overlap < 0) 479 return overlap; 480 481 for (i = RADIX_TREE_MAX_TAGS - 1; i >= 0; i--) 482 if (overlap & 1 << i) 483 radix_tree_tag_set(&dma_active_cacheline, cln, i); 484 else 485 radix_tree_tag_clear(&dma_active_cacheline, cln, i); 486 487 return overlap; 488 } 489 490 static void active_cacheline_inc_overlap(phys_addr_t cln) 491 { 492 int overlap = active_cacheline_read_overlap(cln); 493 494 overlap = active_cacheline_set_overlap(cln, ++overlap); 495 496 /* If we overflowed the overlap counter then we're potentially 497 * leaking dma-mappings. 498 */ 499 WARN_ONCE(overlap > ACTIVE_CACHELINE_MAX_OVERLAP, 500 pr_fmt("exceeded %d overlapping mappings of cacheline %pa\n"), 501 ACTIVE_CACHELINE_MAX_OVERLAP, &cln); 502 } 503 504 static int active_cacheline_dec_overlap(phys_addr_t cln) 505 { 506 int overlap = active_cacheline_read_overlap(cln); 507 508 return active_cacheline_set_overlap(cln, --overlap); 509 } 510 511 static int active_cacheline_insert(struct dma_debug_entry *entry) 512 { 513 phys_addr_t cln = to_cacheline_number(entry); 514 unsigned long flags; 515 int rc; 516 517 /* If the device is not writing memory then we don't have any 518 * concerns about the cpu consuming stale data. This mitigates 519 * legitimate usages of overlapping mappings. 520 */ 521 if (entry->direction == DMA_TO_DEVICE) 522 return 0; 523 524 spin_lock_irqsave(&radix_lock, flags); 525 rc = radix_tree_insert(&dma_active_cacheline, cln, entry); 526 if (rc == -EEXIST) 527 active_cacheline_inc_overlap(cln); 528 spin_unlock_irqrestore(&radix_lock, flags); 529 530 return rc; 531 } 532 533 static void active_cacheline_remove(struct dma_debug_entry *entry) 534 { 535 phys_addr_t cln = to_cacheline_number(entry); 536 unsigned long flags; 537 538 /* ...mirror the insert case */ 539 if (entry->direction == DMA_TO_DEVICE) 540 return; 541 542 spin_lock_irqsave(&radix_lock, flags); 543 /* since we are counting overlaps the final put of the 544 * cacheline will occur when the overlap count is 0. 545 * active_cacheline_dec_overlap() returns -1 in that case 546 */ 547 if (active_cacheline_dec_overlap(cln) < 0) 548 radix_tree_delete(&dma_active_cacheline, cln); 549 spin_unlock_irqrestore(&radix_lock, flags); 550 } 551 552 /* 553 * Wrapper function for adding an entry to the hash. 554 * This function takes care of locking itself. 555 */ 556 static void add_dma_entry(struct dma_debug_entry *entry) 557 { 558 struct hash_bucket *bucket; 559 unsigned long flags; 560 int rc; 561 562 bucket = get_hash_bucket(entry, &flags); 563 hash_bucket_add(bucket, entry); 564 put_hash_bucket(bucket, flags); 565 566 rc = active_cacheline_insert(entry); 567 if (rc == -ENOMEM) { 568 pr_err("cacheline tracking ENOMEM, dma-debug disabled\n"); 569 global_disable = true; 570 } 571 572 /* TODO: report -EEXIST errors here as overlapping mappings are 573 * not supported by the DMA API 574 */ 575 } 576 577 static int dma_debug_create_entries(gfp_t gfp) 578 { 579 struct dma_debug_entry *entry; 580 int i; 581 582 entry = (void *)get_zeroed_page(gfp); 583 if (!entry) 584 return -ENOMEM; 585 586 for (i = 0; i < DMA_DEBUG_DYNAMIC_ENTRIES; i++) 587 list_add_tail(&entry[i].list, &free_entries); 588 589 num_free_entries += DMA_DEBUG_DYNAMIC_ENTRIES; 590 nr_total_entries += DMA_DEBUG_DYNAMIC_ENTRIES; 591 592 return 0; 593 } 594 595 static struct dma_debug_entry *__dma_entry_alloc(void) 596 { 597 struct dma_debug_entry *entry; 598 599 entry = list_entry(free_entries.next, struct dma_debug_entry, list); 600 list_del(&entry->list); 601 memset(entry, 0, sizeof(*entry)); 602 603 num_free_entries -= 1; 604 if (num_free_entries < min_free_entries) 605 min_free_entries = num_free_entries; 606 607 return entry; 608 } 609 610 static void __dma_entry_alloc_check_leak(void) 611 { 612 u32 tmp = nr_total_entries % nr_prealloc_entries; 613 614 /* Shout each time we tick over some multiple of the initial pool */ 615 if (tmp < DMA_DEBUG_DYNAMIC_ENTRIES) { 616 pr_info("dma_debug_entry pool grown to %u (%u00%%)\n", 617 nr_total_entries, 618 (nr_total_entries / nr_prealloc_entries)); 619 } 620 } 621 622 /* struct dma_entry allocator 623 * 624 * The next two functions implement the allocator for 625 * struct dma_debug_entries. 626 */ 627 static struct dma_debug_entry *dma_entry_alloc(void) 628 { 629 struct dma_debug_entry *entry; 630 unsigned long flags; 631 632 spin_lock_irqsave(&free_entries_lock, flags); 633 if (num_free_entries == 0) { 634 if (dma_debug_create_entries(GFP_ATOMIC)) { 635 global_disable = true; 636 spin_unlock_irqrestore(&free_entries_lock, flags); 637 pr_err("debugging out of memory - disabling\n"); 638 return NULL; 639 } 640 __dma_entry_alloc_check_leak(); 641 } 642 643 entry = __dma_entry_alloc(); 644 645 spin_unlock_irqrestore(&free_entries_lock, flags); 646 647 #ifdef CONFIG_STACKTRACE 648 entry->stack_len = stack_trace_save(entry->stack_entries, 649 ARRAY_SIZE(entry->stack_entries), 650 1); 651 #endif 652 return entry; 653 } 654 655 static void dma_entry_free(struct dma_debug_entry *entry) 656 { 657 unsigned long flags; 658 659 active_cacheline_remove(entry); 660 661 /* 662 * add to beginning of the list - this way the entries are 663 * more likely cache hot when they are reallocated. 664 */ 665 spin_lock_irqsave(&free_entries_lock, flags); 666 list_add(&entry->list, &free_entries); 667 num_free_entries += 1; 668 spin_unlock_irqrestore(&free_entries_lock, flags); 669 } 670 671 /* 672 * DMA-API debugging init code 673 * 674 * The init code does two things: 675 * 1. Initialize core data structures 676 * 2. Preallocate a given number of dma_debug_entry structs 677 */ 678 679 static ssize_t filter_read(struct file *file, char __user *user_buf, 680 size_t count, loff_t *ppos) 681 { 682 char buf[NAME_MAX_LEN + 1]; 683 unsigned long flags; 684 int len; 685 686 if (!current_driver_name[0]) 687 return 0; 688 689 /* 690 * We can't copy to userspace directly because current_driver_name can 691 * only be read under the driver_name_lock with irqs disabled. So 692 * create a temporary copy first. 693 */ 694 read_lock_irqsave(&driver_name_lock, flags); 695 len = scnprintf(buf, NAME_MAX_LEN + 1, "%s\n", current_driver_name); 696 read_unlock_irqrestore(&driver_name_lock, flags); 697 698 return simple_read_from_buffer(user_buf, count, ppos, buf, len); 699 } 700 701 static ssize_t filter_write(struct file *file, const char __user *userbuf, 702 size_t count, loff_t *ppos) 703 { 704 char buf[NAME_MAX_LEN]; 705 unsigned long flags; 706 size_t len; 707 int i; 708 709 /* 710 * We can't copy from userspace directly. Access to 711 * current_driver_name is protected with a write_lock with irqs 712 * disabled. Since copy_from_user can fault and may sleep we 713 * need to copy to temporary buffer first 714 */ 715 len = min(count, (size_t)(NAME_MAX_LEN - 1)); 716 if (copy_from_user(buf, userbuf, len)) 717 return -EFAULT; 718 719 buf[len] = 0; 720 721 write_lock_irqsave(&driver_name_lock, flags); 722 723 /* 724 * Now handle the string we got from userspace very carefully. 725 * The rules are: 726 * - only use the first token we got 727 * - token delimiter is everything looking like a space 728 * character (' ', '\n', '\t' ...) 729 * 730 */ 731 if (!isalnum(buf[0])) { 732 /* 733 * If the first character userspace gave us is not 734 * alphanumerical then assume the filter should be 735 * switched off. 736 */ 737 if (current_driver_name[0]) 738 pr_info("switching off dma-debug driver filter\n"); 739 current_driver_name[0] = 0; 740 current_driver = NULL; 741 goto out_unlock; 742 } 743 744 /* 745 * Now parse out the first token and use it as the name for the 746 * driver to filter for. 747 */ 748 for (i = 0; i < NAME_MAX_LEN - 1; ++i) { 749 current_driver_name[i] = buf[i]; 750 if (isspace(buf[i]) || buf[i] == ' ' || buf[i] == 0) 751 break; 752 } 753 current_driver_name[i] = 0; 754 current_driver = NULL; 755 756 pr_info("enable driver filter for driver [%s]\n", 757 current_driver_name); 758 759 out_unlock: 760 write_unlock_irqrestore(&driver_name_lock, flags); 761 762 return count; 763 } 764 765 static const struct file_operations filter_fops = { 766 .read = filter_read, 767 .write = filter_write, 768 .llseek = default_llseek, 769 }; 770 771 static int dump_show(struct seq_file *seq, void *v) 772 { 773 int idx; 774 775 for (idx = 0; idx < HASH_SIZE; idx++) { 776 struct hash_bucket *bucket = &dma_entry_hash[idx]; 777 struct dma_debug_entry *entry; 778 unsigned long flags; 779 780 spin_lock_irqsave(&bucket->lock, flags); 781 list_for_each_entry(entry, &bucket->list, list) { 782 seq_printf(seq, 783 "%s %s %s idx %d P=%llx N=%lx D=%llx L=%llx %s %s\n", 784 dev_name(entry->dev), 785 dev_driver_string(entry->dev), 786 type2name[entry->type], idx, 787 phys_addr(entry), entry->pfn, 788 entry->dev_addr, entry->size, 789 dir2name[entry->direction], 790 maperr2str[entry->map_err_type]); 791 } 792 spin_unlock_irqrestore(&bucket->lock, flags); 793 } 794 return 0; 795 } 796 DEFINE_SHOW_ATTRIBUTE(dump); 797 798 static void dma_debug_fs_init(void) 799 { 800 struct dentry *dentry = debugfs_create_dir("dma-api", NULL); 801 802 debugfs_create_bool("disabled", 0444, dentry, &global_disable); 803 debugfs_create_u32("error_count", 0444, dentry, &error_count); 804 debugfs_create_u32("all_errors", 0644, dentry, &show_all_errors); 805 debugfs_create_u32("num_errors", 0644, dentry, &show_num_errors); 806 debugfs_create_u32("num_free_entries", 0444, dentry, &num_free_entries); 807 debugfs_create_u32("min_free_entries", 0444, dentry, &min_free_entries); 808 debugfs_create_u32("nr_total_entries", 0444, dentry, &nr_total_entries); 809 debugfs_create_file("driver_filter", 0644, dentry, NULL, &filter_fops); 810 debugfs_create_file("dump", 0444, dentry, NULL, &dump_fops); 811 } 812 813 static int device_dma_allocations(struct device *dev, struct dma_debug_entry **out_entry) 814 { 815 struct dma_debug_entry *entry; 816 unsigned long flags; 817 int count = 0, i; 818 819 for (i = 0; i < HASH_SIZE; ++i) { 820 spin_lock_irqsave(&dma_entry_hash[i].lock, flags); 821 list_for_each_entry(entry, &dma_entry_hash[i].list, list) { 822 if (entry->dev == dev) { 823 count += 1; 824 *out_entry = entry; 825 } 826 } 827 spin_unlock_irqrestore(&dma_entry_hash[i].lock, flags); 828 } 829 830 return count; 831 } 832 833 static int dma_debug_device_change(struct notifier_block *nb, unsigned long action, void *data) 834 { 835 struct device *dev = data; 836 struct dma_debug_entry *entry; 837 int count; 838 839 if (dma_debug_disabled()) 840 return 0; 841 842 switch (action) { 843 case BUS_NOTIFY_UNBOUND_DRIVER: 844 count = device_dma_allocations(dev, &entry); 845 if (count == 0) 846 break; 847 err_printk(dev, entry, "device driver has pending " 848 "DMA allocations while released from device " 849 "[count=%d]\n" 850 "One of leaked entries details: " 851 "[device address=0x%016llx] [size=%llu bytes] " 852 "[mapped with %s] [mapped as %s]\n", 853 count, entry->dev_addr, entry->size, 854 dir2name[entry->direction], type2name[entry->type]); 855 break; 856 default: 857 break; 858 } 859 860 return 0; 861 } 862 863 void dma_debug_add_bus(struct bus_type *bus) 864 { 865 struct notifier_block *nb; 866 867 if (dma_debug_disabled()) 868 return; 869 870 nb = kzalloc(sizeof(struct notifier_block), GFP_KERNEL); 871 if (nb == NULL) { 872 pr_err("dma_debug_add_bus: out of memory\n"); 873 return; 874 } 875 876 nb->notifier_call = dma_debug_device_change; 877 878 bus_register_notifier(bus, nb); 879 } 880 881 static int dma_debug_init(void) 882 { 883 int i, nr_pages; 884 885 /* Do not use dma_debug_initialized here, since we really want to be 886 * called to set dma_debug_initialized 887 */ 888 if (global_disable) 889 return 0; 890 891 for (i = 0; i < HASH_SIZE; ++i) { 892 INIT_LIST_HEAD(&dma_entry_hash[i].list); 893 spin_lock_init(&dma_entry_hash[i].lock); 894 } 895 896 dma_debug_fs_init(); 897 898 nr_pages = DIV_ROUND_UP(nr_prealloc_entries, DMA_DEBUG_DYNAMIC_ENTRIES); 899 for (i = 0; i < nr_pages; ++i) 900 dma_debug_create_entries(GFP_KERNEL); 901 if (num_free_entries >= nr_prealloc_entries) { 902 pr_info("preallocated %d debug entries\n", nr_total_entries); 903 } else if (num_free_entries > 0) { 904 pr_warn("%d debug entries requested but only %d allocated\n", 905 nr_prealloc_entries, nr_total_entries); 906 } else { 907 pr_err("debugging out of memory error - disabled\n"); 908 global_disable = true; 909 910 return 0; 911 } 912 min_free_entries = num_free_entries; 913 914 dma_debug_initialized = true; 915 916 pr_info("debugging enabled by kernel config\n"); 917 return 0; 918 } 919 core_initcall(dma_debug_init); 920 921 static __init int dma_debug_cmdline(char *str) 922 { 923 if (!str) 924 return -EINVAL; 925 926 if (strncmp(str, "off", 3) == 0) { 927 pr_info("debugging disabled on kernel command line\n"); 928 global_disable = true; 929 } 930 931 return 0; 932 } 933 934 static __init int dma_debug_entries_cmdline(char *str) 935 { 936 if (!str) 937 return -EINVAL; 938 if (!get_option(&str, &nr_prealloc_entries)) 939 nr_prealloc_entries = PREALLOC_DMA_DEBUG_ENTRIES; 940 return 0; 941 } 942 943 __setup("dma_debug=", dma_debug_cmdline); 944 __setup("dma_debug_entries=", dma_debug_entries_cmdline); 945 946 static void check_unmap(struct dma_debug_entry *ref) 947 { 948 struct dma_debug_entry *entry; 949 struct hash_bucket *bucket; 950 unsigned long flags; 951 952 bucket = get_hash_bucket(ref, &flags); 953 entry = bucket_find_exact(bucket, ref); 954 955 if (!entry) { 956 /* must drop lock before calling dma_mapping_error */ 957 put_hash_bucket(bucket, flags); 958 959 if (dma_mapping_error(ref->dev, ref->dev_addr)) { 960 err_printk(ref->dev, NULL, 961 "device driver tries to free an " 962 "invalid DMA memory address\n"); 963 } else { 964 err_printk(ref->dev, NULL, 965 "device driver tries to free DMA " 966 "memory it has not allocated [device " 967 "address=0x%016llx] [size=%llu bytes]\n", 968 ref->dev_addr, ref->size); 969 } 970 return; 971 } 972 973 if (ref->size != entry->size) { 974 err_printk(ref->dev, entry, "device driver frees " 975 "DMA memory with different size " 976 "[device address=0x%016llx] [map size=%llu bytes] " 977 "[unmap size=%llu bytes]\n", 978 ref->dev_addr, entry->size, ref->size); 979 } 980 981 if (ref->type != entry->type) { 982 err_printk(ref->dev, entry, "device driver frees " 983 "DMA memory with wrong function " 984 "[device address=0x%016llx] [size=%llu bytes] " 985 "[mapped as %s] [unmapped as %s]\n", 986 ref->dev_addr, ref->size, 987 type2name[entry->type], type2name[ref->type]); 988 } else if ((entry->type == dma_debug_coherent) && 989 (phys_addr(ref) != phys_addr(entry))) { 990 err_printk(ref->dev, entry, "device driver frees " 991 "DMA memory with different CPU address " 992 "[device address=0x%016llx] [size=%llu bytes] " 993 "[cpu alloc address=0x%016llx] " 994 "[cpu free address=0x%016llx]", 995 ref->dev_addr, ref->size, 996 phys_addr(entry), 997 phys_addr(ref)); 998 } 999 1000 if (ref->sg_call_ents && ref->type == dma_debug_sg && 1001 ref->sg_call_ents != entry->sg_call_ents) { 1002 err_printk(ref->dev, entry, "device driver frees " 1003 "DMA sg list with different entry count " 1004 "[map count=%d] [unmap count=%d]\n", 1005 entry->sg_call_ents, ref->sg_call_ents); 1006 } 1007 1008 /* 1009 * This may be no bug in reality - but most implementations of the 1010 * DMA API don't handle this properly, so check for it here 1011 */ 1012 if (ref->direction != entry->direction) { 1013 err_printk(ref->dev, entry, "device driver frees " 1014 "DMA memory with different direction " 1015 "[device address=0x%016llx] [size=%llu bytes] " 1016 "[mapped with %s] [unmapped with %s]\n", 1017 ref->dev_addr, ref->size, 1018 dir2name[entry->direction], 1019 dir2name[ref->direction]); 1020 } 1021 1022 /* 1023 * Drivers should use dma_mapping_error() to check the returned 1024 * addresses of dma_map_single() and dma_map_page(). 1025 * If not, print this warning message. See Documentation/core-api/dma-api.rst. 1026 */ 1027 if (entry->map_err_type == MAP_ERR_NOT_CHECKED) { 1028 err_printk(ref->dev, entry, 1029 "device driver failed to check map error" 1030 "[device address=0x%016llx] [size=%llu bytes] " 1031 "[mapped as %s]", 1032 ref->dev_addr, ref->size, 1033 type2name[entry->type]); 1034 } 1035 1036 hash_bucket_del(entry); 1037 dma_entry_free(entry); 1038 1039 put_hash_bucket(bucket, flags); 1040 } 1041 1042 static void check_for_stack(struct device *dev, 1043 struct page *page, size_t offset) 1044 { 1045 void *addr; 1046 struct vm_struct *stack_vm_area = task_stack_vm_area(current); 1047 1048 if (!stack_vm_area) { 1049 /* Stack is direct-mapped. */ 1050 if (PageHighMem(page)) 1051 return; 1052 addr = page_address(page) + offset; 1053 if (object_is_on_stack(addr)) 1054 err_printk(dev, NULL, "device driver maps memory from stack [addr=%p]\n", addr); 1055 } else { 1056 /* Stack is vmalloced. */ 1057 int i; 1058 1059 for (i = 0; i < stack_vm_area->nr_pages; i++) { 1060 if (page != stack_vm_area->pages[i]) 1061 continue; 1062 1063 addr = (u8 *)current->stack + i * PAGE_SIZE + offset; 1064 err_printk(dev, NULL, "device driver maps memory from stack [probable addr=%p]\n", addr); 1065 break; 1066 } 1067 } 1068 } 1069 1070 static inline bool overlap(void *addr, unsigned long len, void *start, void *end) 1071 { 1072 unsigned long a1 = (unsigned long)addr; 1073 unsigned long b1 = a1 + len; 1074 unsigned long a2 = (unsigned long)start; 1075 unsigned long b2 = (unsigned long)end; 1076 1077 return !(b1 <= a2 || a1 >= b2); 1078 } 1079 1080 static void check_for_illegal_area(struct device *dev, void *addr, unsigned long len) 1081 { 1082 if (overlap(addr, len, _stext, _etext) || 1083 overlap(addr, len, __start_rodata, __end_rodata)) 1084 err_printk(dev, NULL, "device driver maps memory from kernel text or rodata [addr=%p] [len=%lu]\n", addr, len); 1085 } 1086 1087 static void check_sync(struct device *dev, 1088 struct dma_debug_entry *ref, 1089 bool to_cpu) 1090 { 1091 struct dma_debug_entry *entry; 1092 struct hash_bucket *bucket; 1093 unsigned long flags; 1094 1095 bucket = get_hash_bucket(ref, &flags); 1096 1097 entry = bucket_find_contain(&bucket, ref, &flags); 1098 1099 if (!entry) { 1100 err_printk(dev, NULL, "device driver tries " 1101 "to sync DMA memory it has not allocated " 1102 "[device address=0x%016llx] [size=%llu bytes]\n", 1103 (unsigned long long)ref->dev_addr, ref->size); 1104 goto out; 1105 } 1106 1107 if (ref->size > entry->size) { 1108 err_printk(dev, entry, "device driver syncs" 1109 " DMA memory outside allocated range " 1110 "[device address=0x%016llx] " 1111 "[allocation size=%llu bytes] " 1112 "[sync offset+size=%llu]\n", 1113 entry->dev_addr, entry->size, 1114 ref->size); 1115 } 1116 1117 if (entry->direction == DMA_BIDIRECTIONAL) 1118 goto out; 1119 1120 if (ref->direction != entry->direction) { 1121 err_printk(dev, entry, "device driver syncs " 1122 "DMA memory with different direction " 1123 "[device address=0x%016llx] [size=%llu bytes] " 1124 "[mapped with %s] [synced with %s]\n", 1125 (unsigned long long)ref->dev_addr, entry->size, 1126 dir2name[entry->direction], 1127 dir2name[ref->direction]); 1128 } 1129 1130 if (to_cpu && !(entry->direction == DMA_FROM_DEVICE) && 1131 !(ref->direction == DMA_TO_DEVICE)) 1132 err_printk(dev, entry, "device driver syncs " 1133 "device read-only DMA memory for cpu " 1134 "[device address=0x%016llx] [size=%llu bytes] " 1135 "[mapped with %s] [synced with %s]\n", 1136 (unsigned long long)ref->dev_addr, entry->size, 1137 dir2name[entry->direction], 1138 dir2name[ref->direction]); 1139 1140 if (!to_cpu && !(entry->direction == DMA_TO_DEVICE) && 1141 !(ref->direction == DMA_FROM_DEVICE)) 1142 err_printk(dev, entry, "device driver syncs " 1143 "device write-only DMA memory to device " 1144 "[device address=0x%016llx] [size=%llu bytes] " 1145 "[mapped with %s] [synced with %s]\n", 1146 (unsigned long long)ref->dev_addr, entry->size, 1147 dir2name[entry->direction], 1148 dir2name[ref->direction]); 1149 1150 if (ref->sg_call_ents && ref->type == dma_debug_sg && 1151 ref->sg_call_ents != entry->sg_call_ents) { 1152 err_printk(ref->dev, entry, "device driver syncs " 1153 "DMA sg list with different entry count " 1154 "[map count=%d] [sync count=%d]\n", 1155 entry->sg_call_ents, ref->sg_call_ents); 1156 } 1157 1158 out: 1159 put_hash_bucket(bucket, flags); 1160 } 1161 1162 static void check_sg_segment(struct device *dev, struct scatterlist *sg) 1163 { 1164 #ifdef CONFIG_DMA_API_DEBUG_SG 1165 unsigned int max_seg = dma_get_max_seg_size(dev); 1166 u64 start, end, boundary = dma_get_seg_boundary(dev); 1167 1168 /* 1169 * Either the driver forgot to set dma_parms appropriately, or 1170 * whoever generated the list forgot to check them. 1171 */ 1172 if (sg->length > max_seg) 1173 err_printk(dev, NULL, "mapping sg segment longer than device claims to support [len=%u] [max=%u]\n", 1174 sg->length, max_seg); 1175 /* 1176 * In some cases this could potentially be the DMA API 1177 * implementation's fault, but it would usually imply that 1178 * the scatterlist was built inappropriately to begin with. 1179 */ 1180 start = sg_dma_address(sg); 1181 end = start + sg_dma_len(sg) - 1; 1182 if ((start ^ end) & ~boundary) 1183 err_printk(dev, NULL, "mapping sg segment across boundary [start=0x%016llx] [end=0x%016llx] [boundary=0x%016llx]\n", 1184 start, end, boundary); 1185 #endif 1186 } 1187 1188 void debug_dma_map_single(struct device *dev, const void *addr, 1189 unsigned long len) 1190 { 1191 if (unlikely(dma_debug_disabled())) 1192 return; 1193 1194 if (!virt_addr_valid(addr)) 1195 err_printk(dev, NULL, "device driver maps memory from invalid area [addr=%p] [len=%lu]\n", 1196 addr, len); 1197 1198 if (is_vmalloc_addr(addr)) 1199 err_printk(dev, NULL, "device driver maps memory from vmalloc area [addr=%p] [len=%lu]\n", 1200 addr, len); 1201 } 1202 EXPORT_SYMBOL(debug_dma_map_single); 1203 1204 void debug_dma_map_page(struct device *dev, struct page *page, size_t offset, 1205 size_t size, int direction, dma_addr_t dma_addr) 1206 { 1207 struct dma_debug_entry *entry; 1208 1209 if (unlikely(dma_debug_disabled())) 1210 return; 1211 1212 if (dma_mapping_error(dev, dma_addr)) 1213 return; 1214 1215 entry = dma_entry_alloc(); 1216 if (!entry) 1217 return; 1218 1219 entry->dev = dev; 1220 entry->type = dma_debug_single; 1221 entry->pfn = page_to_pfn(page); 1222 entry->offset = offset, 1223 entry->dev_addr = dma_addr; 1224 entry->size = size; 1225 entry->direction = direction; 1226 entry->map_err_type = MAP_ERR_NOT_CHECKED; 1227 1228 check_for_stack(dev, page, offset); 1229 1230 if (!PageHighMem(page)) { 1231 void *addr = page_address(page) + offset; 1232 1233 check_for_illegal_area(dev, addr, size); 1234 } 1235 1236 add_dma_entry(entry); 1237 } 1238 EXPORT_SYMBOL(debug_dma_map_page); 1239 1240 void debug_dma_mapping_error(struct device *dev, dma_addr_t dma_addr) 1241 { 1242 struct dma_debug_entry ref; 1243 struct dma_debug_entry *entry; 1244 struct hash_bucket *bucket; 1245 unsigned long flags; 1246 1247 if (unlikely(dma_debug_disabled())) 1248 return; 1249 1250 ref.dev = dev; 1251 ref.dev_addr = dma_addr; 1252 bucket = get_hash_bucket(&ref, &flags); 1253 1254 list_for_each_entry(entry, &bucket->list, list) { 1255 if (!exact_match(&ref, entry)) 1256 continue; 1257 1258 /* 1259 * The same physical address can be mapped multiple 1260 * times. Without a hardware IOMMU this results in the 1261 * same device addresses being put into the dma-debug 1262 * hash multiple times too. This can result in false 1263 * positives being reported. Therefore we implement a 1264 * best-fit algorithm here which updates the first entry 1265 * from the hash which fits the reference value and is 1266 * not currently listed as being checked. 1267 */ 1268 if (entry->map_err_type == MAP_ERR_NOT_CHECKED) { 1269 entry->map_err_type = MAP_ERR_CHECKED; 1270 break; 1271 } 1272 } 1273 1274 put_hash_bucket(bucket, flags); 1275 } 1276 EXPORT_SYMBOL(debug_dma_mapping_error); 1277 1278 void debug_dma_unmap_page(struct device *dev, dma_addr_t addr, 1279 size_t size, int direction) 1280 { 1281 struct dma_debug_entry ref = { 1282 .type = dma_debug_single, 1283 .dev = dev, 1284 .dev_addr = addr, 1285 .size = size, 1286 .direction = direction, 1287 }; 1288 1289 if (unlikely(dma_debug_disabled())) 1290 return; 1291 check_unmap(&ref); 1292 } 1293 EXPORT_SYMBOL(debug_dma_unmap_page); 1294 1295 void debug_dma_map_sg(struct device *dev, struct scatterlist *sg, 1296 int nents, int mapped_ents, int direction) 1297 { 1298 struct dma_debug_entry *entry; 1299 struct scatterlist *s; 1300 int i; 1301 1302 if (unlikely(dma_debug_disabled())) 1303 return; 1304 1305 for_each_sg(sg, s, mapped_ents, i) { 1306 entry = dma_entry_alloc(); 1307 if (!entry) 1308 return; 1309 1310 entry->type = dma_debug_sg; 1311 entry->dev = dev; 1312 entry->pfn = page_to_pfn(sg_page(s)); 1313 entry->offset = s->offset, 1314 entry->size = sg_dma_len(s); 1315 entry->dev_addr = sg_dma_address(s); 1316 entry->direction = direction; 1317 entry->sg_call_ents = nents; 1318 entry->sg_mapped_ents = mapped_ents; 1319 1320 check_for_stack(dev, sg_page(s), s->offset); 1321 1322 if (!PageHighMem(sg_page(s))) { 1323 check_for_illegal_area(dev, sg_virt(s), sg_dma_len(s)); 1324 } 1325 1326 check_sg_segment(dev, s); 1327 1328 add_dma_entry(entry); 1329 } 1330 } 1331 EXPORT_SYMBOL(debug_dma_map_sg); 1332 1333 static int get_nr_mapped_entries(struct device *dev, 1334 struct dma_debug_entry *ref) 1335 { 1336 struct dma_debug_entry *entry; 1337 struct hash_bucket *bucket; 1338 unsigned long flags; 1339 int mapped_ents; 1340 1341 bucket = get_hash_bucket(ref, &flags); 1342 entry = bucket_find_exact(bucket, ref); 1343 mapped_ents = 0; 1344 1345 if (entry) 1346 mapped_ents = entry->sg_mapped_ents; 1347 put_hash_bucket(bucket, flags); 1348 1349 return mapped_ents; 1350 } 1351 1352 void debug_dma_unmap_sg(struct device *dev, struct scatterlist *sglist, 1353 int nelems, int dir) 1354 { 1355 struct scatterlist *s; 1356 int mapped_ents = 0, i; 1357 1358 if (unlikely(dma_debug_disabled())) 1359 return; 1360 1361 for_each_sg(sglist, s, nelems, i) { 1362 1363 struct dma_debug_entry ref = { 1364 .type = dma_debug_sg, 1365 .dev = dev, 1366 .pfn = page_to_pfn(sg_page(s)), 1367 .offset = s->offset, 1368 .dev_addr = sg_dma_address(s), 1369 .size = sg_dma_len(s), 1370 .direction = dir, 1371 .sg_call_ents = nelems, 1372 }; 1373 1374 if (mapped_ents && i >= mapped_ents) 1375 break; 1376 1377 if (!i) 1378 mapped_ents = get_nr_mapped_entries(dev, &ref); 1379 1380 check_unmap(&ref); 1381 } 1382 } 1383 EXPORT_SYMBOL(debug_dma_unmap_sg); 1384 1385 void debug_dma_alloc_coherent(struct device *dev, size_t size, 1386 dma_addr_t dma_addr, void *virt) 1387 { 1388 struct dma_debug_entry *entry; 1389 1390 if (unlikely(dma_debug_disabled())) 1391 return; 1392 1393 if (unlikely(virt == NULL)) 1394 return; 1395 1396 /* handle vmalloc and linear addresses */ 1397 if (!is_vmalloc_addr(virt) && !virt_addr_valid(virt)) 1398 return; 1399 1400 entry = dma_entry_alloc(); 1401 if (!entry) 1402 return; 1403 1404 entry->type = dma_debug_coherent; 1405 entry->dev = dev; 1406 entry->offset = offset_in_page(virt); 1407 entry->size = size; 1408 entry->dev_addr = dma_addr; 1409 entry->direction = DMA_BIDIRECTIONAL; 1410 1411 if (is_vmalloc_addr(virt)) 1412 entry->pfn = vmalloc_to_pfn(virt); 1413 else 1414 entry->pfn = page_to_pfn(virt_to_page(virt)); 1415 1416 add_dma_entry(entry); 1417 } 1418 1419 void debug_dma_free_coherent(struct device *dev, size_t size, 1420 void *virt, dma_addr_t addr) 1421 { 1422 struct dma_debug_entry ref = { 1423 .type = dma_debug_coherent, 1424 .dev = dev, 1425 .offset = offset_in_page(virt), 1426 .dev_addr = addr, 1427 .size = size, 1428 .direction = DMA_BIDIRECTIONAL, 1429 }; 1430 1431 /* handle vmalloc and linear addresses */ 1432 if (!is_vmalloc_addr(virt) && !virt_addr_valid(virt)) 1433 return; 1434 1435 if (is_vmalloc_addr(virt)) 1436 ref.pfn = vmalloc_to_pfn(virt); 1437 else 1438 ref.pfn = page_to_pfn(virt_to_page(virt)); 1439 1440 if (unlikely(dma_debug_disabled())) 1441 return; 1442 1443 check_unmap(&ref); 1444 } 1445 1446 void debug_dma_map_resource(struct device *dev, phys_addr_t addr, size_t size, 1447 int direction, dma_addr_t dma_addr) 1448 { 1449 struct dma_debug_entry *entry; 1450 1451 if (unlikely(dma_debug_disabled())) 1452 return; 1453 1454 entry = dma_entry_alloc(); 1455 if (!entry) 1456 return; 1457 1458 entry->type = dma_debug_resource; 1459 entry->dev = dev; 1460 entry->pfn = PHYS_PFN(addr); 1461 entry->offset = offset_in_page(addr); 1462 entry->size = size; 1463 entry->dev_addr = dma_addr; 1464 entry->direction = direction; 1465 entry->map_err_type = MAP_ERR_NOT_CHECKED; 1466 1467 add_dma_entry(entry); 1468 } 1469 EXPORT_SYMBOL(debug_dma_map_resource); 1470 1471 void debug_dma_unmap_resource(struct device *dev, dma_addr_t dma_addr, 1472 size_t size, int direction) 1473 { 1474 struct dma_debug_entry ref = { 1475 .type = dma_debug_resource, 1476 .dev = dev, 1477 .dev_addr = dma_addr, 1478 .size = size, 1479 .direction = direction, 1480 }; 1481 1482 if (unlikely(dma_debug_disabled())) 1483 return; 1484 1485 check_unmap(&ref); 1486 } 1487 EXPORT_SYMBOL(debug_dma_unmap_resource); 1488 1489 void debug_dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, 1490 size_t size, int direction) 1491 { 1492 struct dma_debug_entry ref; 1493 1494 if (unlikely(dma_debug_disabled())) 1495 return; 1496 1497 ref.type = dma_debug_single; 1498 ref.dev = dev; 1499 ref.dev_addr = dma_handle; 1500 ref.size = size; 1501 ref.direction = direction; 1502 ref.sg_call_ents = 0; 1503 1504 check_sync(dev, &ref, true); 1505 } 1506 EXPORT_SYMBOL(debug_dma_sync_single_for_cpu); 1507 1508 void debug_dma_sync_single_for_device(struct device *dev, 1509 dma_addr_t dma_handle, size_t size, 1510 int direction) 1511 { 1512 struct dma_debug_entry ref; 1513 1514 if (unlikely(dma_debug_disabled())) 1515 return; 1516 1517 ref.type = dma_debug_single; 1518 ref.dev = dev; 1519 ref.dev_addr = dma_handle; 1520 ref.size = size; 1521 ref.direction = direction; 1522 ref.sg_call_ents = 0; 1523 1524 check_sync(dev, &ref, false); 1525 } 1526 EXPORT_SYMBOL(debug_dma_sync_single_for_device); 1527 1528 void debug_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, 1529 int nelems, int direction) 1530 { 1531 struct scatterlist *s; 1532 int mapped_ents = 0, i; 1533 1534 if (unlikely(dma_debug_disabled())) 1535 return; 1536 1537 for_each_sg(sg, s, nelems, i) { 1538 1539 struct dma_debug_entry ref = { 1540 .type = dma_debug_sg, 1541 .dev = dev, 1542 .pfn = page_to_pfn(sg_page(s)), 1543 .offset = s->offset, 1544 .dev_addr = sg_dma_address(s), 1545 .size = sg_dma_len(s), 1546 .direction = direction, 1547 .sg_call_ents = nelems, 1548 }; 1549 1550 if (!i) 1551 mapped_ents = get_nr_mapped_entries(dev, &ref); 1552 1553 if (i >= mapped_ents) 1554 break; 1555 1556 check_sync(dev, &ref, true); 1557 } 1558 } 1559 EXPORT_SYMBOL(debug_dma_sync_sg_for_cpu); 1560 1561 void debug_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, 1562 int nelems, int direction) 1563 { 1564 struct scatterlist *s; 1565 int mapped_ents = 0, i; 1566 1567 if (unlikely(dma_debug_disabled())) 1568 return; 1569 1570 for_each_sg(sg, s, nelems, i) { 1571 1572 struct dma_debug_entry ref = { 1573 .type = dma_debug_sg, 1574 .dev = dev, 1575 .pfn = page_to_pfn(sg_page(s)), 1576 .offset = s->offset, 1577 .dev_addr = sg_dma_address(s), 1578 .size = sg_dma_len(s), 1579 .direction = direction, 1580 .sg_call_ents = nelems, 1581 }; 1582 if (!i) 1583 mapped_ents = get_nr_mapped_entries(dev, &ref); 1584 1585 if (i >= mapped_ents) 1586 break; 1587 1588 check_sync(dev, &ref, false); 1589 } 1590 } 1591 EXPORT_SYMBOL(debug_dma_sync_sg_for_device); 1592 1593 static int __init dma_debug_driver_setup(char *str) 1594 { 1595 int i; 1596 1597 for (i = 0; i < NAME_MAX_LEN - 1; ++i, ++str) { 1598 current_driver_name[i] = *str; 1599 if (*str == 0) 1600 break; 1601 } 1602 1603 if (current_driver_name[0]) 1604 pr_info("enable driver filter for driver [%s]\n", 1605 current_driver_name); 1606 1607 1608 return 1; 1609 } 1610 __setup("dma_debug_driver=", dma_debug_driver_setup); 1611