1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2008 Advanced Micro Devices, Inc. 4 * 5 * Author: Joerg Roedel <joerg.roedel@amd.com> 6 */ 7 8 #define pr_fmt(fmt) "DMA-API: " fmt 9 10 #include <linux/sched/task_stack.h> 11 #include <linux/scatterlist.h> 12 #include <linux/dma-mapping.h> 13 #include <linux/sched/task.h> 14 #include <linux/stacktrace.h> 15 #include <linux/dma-debug.h> 16 #include <linux/spinlock.h> 17 #include <linux/vmalloc.h> 18 #include <linux/debugfs.h> 19 #include <linux/uaccess.h> 20 #include <linux/export.h> 21 #include <linux/device.h> 22 #include <linux/types.h> 23 #include <linux/sched.h> 24 #include <linux/ctype.h> 25 #include <linux/list.h> 26 #include <linux/slab.h> 27 28 #include <asm/sections.h> 29 30 #define HASH_SIZE 1024ULL 31 #define HASH_FN_SHIFT 13 32 #define HASH_FN_MASK (HASH_SIZE - 1) 33 34 #define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16) 35 /* If the pool runs out, add this many new entries at once */ 36 #define DMA_DEBUG_DYNAMIC_ENTRIES (PAGE_SIZE / sizeof(struct dma_debug_entry)) 37 38 enum { 39 dma_debug_single, 40 dma_debug_sg, 41 dma_debug_coherent, 42 dma_debug_resource, 43 }; 44 45 enum map_err_types { 46 MAP_ERR_CHECK_NOT_APPLICABLE, 47 MAP_ERR_NOT_CHECKED, 48 MAP_ERR_CHECKED, 49 }; 50 51 #define DMA_DEBUG_STACKTRACE_ENTRIES 5 52 53 /** 54 * struct dma_debug_entry - track a dma_map* or dma_alloc_coherent mapping 55 * @list: node on pre-allocated free_entries list 56 * @dev: 'dev' argument to dma_map_{page|single|sg} or dma_alloc_coherent 57 * @type: single, page, sg, coherent 58 * @pfn: page frame of the start address 59 * @offset: offset of mapping relative to pfn 60 * @size: length of the mapping 61 * @direction: enum dma_data_direction 62 * @sg_call_ents: 'nents' from dma_map_sg 63 * @sg_mapped_ents: 'mapped_ents' from dma_map_sg 64 * @map_err_type: track whether dma_mapping_error() was checked 65 * @stacktrace: support backtraces when a violation is detected 66 */ 67 struct dma_debug_entry { 68 struct list_head list; 69 struct device *dev; 70 int type; 71 unsigned long pfn; 72 size_t offset; 73 u64 dev_addr; 74 u64 size; 75 int direction; 76 int sg_call_ents; 77 int sg_mapped_ents; 78 enum map_err_types map_err_type; 79 #ifdef CONFIG_STACKTRACE 80 unsigned int stack_len; 81 unsigned long stack_entries[DMA_DEBUG_STACKTRACE_ENTRIES]; 82 #endif 83 }; 84 85 typedef bool (*match_fn)(struct dma_debug_entry *, struct dma_debug_entry *); 86 87 struct hash_bucket { 88 struct list_head list; 89 spinlock_t lock; 90 } ____cacheline_aligned_in_smp; 91 92 /* Hash list to save the allocated dma addresses */ 93 static struct hash_bucket dma_entry_hash[HASH_SIZE]; 94 /* List of pre-allocated dma_debug_entry's */ 95 static LIST_HEAD(free_entries); 96 /* Lock for the list above */ 97 static DEFINE_SPINLOCK(free_entries_lock); 98 99 /* Global disable flag - will be set in case of an error */ 100 static bool global_disable __read_mostly; 101 102 /* Early initialization disable flag, set at the end of dma_debug_init */ 103 static bool dma_debug_initialized __read_mostly; 104 105 static inline bool dma_debug_disabled(void) 106 { 107 return global_disable || !dma_debug_initialized; 108 } 109 110 /* Global error count */ 111 static u32 error_count; 112 113 /* Global error show enable*/ 114 static u32 show_all_errors __read_mostly; 115 /* Number of errors to show */ 116 static u32 show_num_errors = 1; 117 118 static u32 num_free_entries; 119 static u32 min_free_entries; 120 static u32 nr_total_entries; 121 122 /* number of preallocated entries requested by kernel cmdline */ 123 static u32 nr_prealloc_entries = PREALLOC_DMA_DEBUG_ENTRIES; 124 125 /* per-driver filter related state */ 126 127 #define NAME_MAX_LEN 64 128 129 static char current_driver_name[NAME_MAX_LEN] __read_mostly; 130 static struct device_driver *current_driver __read_mostly; 131 132 static DEFINE_RWLOCK(driver_name_lock); 133 134 static const char *const maperr2str[] = { 135 [MAP_ERR_CHECK_NOT_APPLICABLE] = "dma map error check not applicable", 136 [MAP_ERR_NOT_CHECKED] = "dma map error not checked", 137 [MAP_ERR_CHECKED] = "dma map error checked", 138 }; 139 140 static const char *type2name[5] = { "single", "page", 141 "scather-gather", "coherent", 142 "resource" }; 143 144 static const char *dir2name[4] = { "DMA_BIDIRECTIONAL", "DMA_TO_DEVICE", 145 "DMA_FROM_DEVICE", "DMA_NONE" }; 146 147 /* 148 * The access to some variables in this macro is racy. We can't use atomic_t 149 * here because all these variables are exported to debugfs. Some of them even 150 * writeable. This is also the reason why a lock won't help much. But anyway, 151 * the races are no big deal. Here is why: 152 * 153 * error_count: the addition is racy, but the worst thing that can happen is 154 * that we don't count some errors 155 * show_num_errors: the subtraction is racy. Also no big deal because in 156 * worst case this will result in one warning more in the 157 * system log than the user configured. This variable is 158 * writeable via debugfs. 159 */ 160 static inline void dump_entry_trace(struct dma_debug_entry *entry) 161 { 162 #ifdef CONFIG_STACKTRACE 163 if (entry) { 164 pr_warning("Mapped at:\n"); 165 stack_trace_print(entry->stack_entries, entry->stack_len, 0); 166 } 167 #endif 168 } 169 170 static bool driver_filter(struct device *dev) 171 { 172 struct device_driver *drv; 173 unsigned long flags; 174 bool ret; 175 176 /* driver filter off */ 177 if (likely(!current_driver_name[0])) 178 return true; 179 180 /* driver filter on and initialized */ 181 if (current_driver && dev && dev->driver == current_driver) 182 return true; 183 184 /* driver filter on, but we can't filter on a NULL device... */ 185 if (!dev) 186 return false; 187 188 if (current_driver || !current_driver_name[0]) 189 return false; 190 191 /* driver filter on but not yet initialized */ 192 drv = dev->driver; 193 if (!drv) 194 return false; 195 196 /* lock to protect against change of current_driver_name */ 197 read_lock_irqsave(&driver_name_lock, flags); 198 199 ret = false; 200 if (drv->name && 201 strncmp(current_driver_name, drv->name, NAME_MAX_LEN - 1) == 0) { 202 current_driver = drv; 203 ret = true; 204 } 205 206 read_unlock_irqrestore(&driver_name_lock, flags); 207 208 return ret; 209 } 210 211 #define err_printk(dev, entry, format, arg...) do { \ 212 error_count += 1; \ 213 if (driver_filter(dev) && \ 214 (show_all_errors || show_num_errors > 0)) { \ 215 WARN(1, pr_fmt("%s %s: ") format, \ 216 dev ? dev_driver_string(dev) : "NULL", \ 217 dev ? dev_name(dev) : "NULL", ## arg); \ 218 dump_entry_trace(entry); \ 219 } \ 220 if (!show_all_errors && show_num_errors > 0) \ 221 show_num_errors -= 1; \ 222 } while (0); 223 224 /* 225 * Hash related functions 226 * 227 * Every DMA-API request is saved into a struct dma_debug_entry. To 228 * have quick access to these structs they are stored into a hash. 229 */ 230 static int hash_fn(struct dma_debug_entry *entry) 231 { 232 /* 233 * Hash function is based on the dma address. 234 * We use bits 20-27 here as the index into the hash 235 */ 236 return (entry->dev_addr >> HASH_FN_SHIFT) & HASH_FN_MASK; 237 } 238 239 /* 240 * Request exclusive access to a hash bucket for a given dma_debug_entry. 241 */ 242 static struct hash_bucket *get_hash_bucket(struct dma_debug_entry *entry, 243 unsigned long *flags) 244 __acquires(&dma_entry_hash[idx].lock) 245 { 246 int idx = hash_fn(entry); 247 unsigned long __flags; 248 249 spin_lock_irqsave(&dma_entry_hash[idx].lock, __flags); 250 *flags = __flags; 251 return &dma_entry_hash[idx]; 252 } 253 254 /* 255 * Give up exclusive access to the hash bucket 256 */ 257 static void put_hash_bucket(struct hash_bucket *bucket, 258 unsigned long *flags) 259 __releases(&bucket->lock) 260 { 261 unsigned long __flags = *flags; 262 263 spin_unlock_irqrestore(&bucket->lock, __flags); 264 } 265 266 static bool exact_match(struct dma_debug_entry *a, struct dma_debug_entry *b) 267 { 268 return ((a->dev_addr == b->dev_addr) && 269 (a->dev == b->dev)) ? true : false; 270 } 271 272 static bool containing_match(struct dma_debug_entry *a, 273 struct dma_debug_entry *b) 274 { 275 if (a->dev != b->dev) 276 return false; 277 278 if ((b->dev_addr <= a->dev_addr) && 279 ((b->dev_addr + b->size) >= (a->dev_addr + a->size))) 280 return true; 281 282 return false; 283 } 284 285 /* 286 * Search a given entry in the hash bucket list 287 */ 288 static struct dma_debug_entry *__hash_bucket_find(struct hash_bucket *bucket, 289 struct dma_debug_entry *ref, 290 match_fn match) 291 { 292 struct dma_debug_entry *entry, *ret = NULL; 293 int matches = 0, match_lvl, last_lvl = -1; 294 295 list_for_each_entry(entry, &bucket->list, list) { 296 if (!match(ref, entry)) 297 continue; 298 299 /* 300 * Some drivers map the same physical address multiple 301 * times. Without a hardware IOMMU this results in the 302 * same device addresses being put into the dma-debug 303 * hash multiple times too. This can result in false 304 * positives being reported. Therefore we implement a 305 * best-fit algorithm here which returns the entry from 306 * the hash which fits best to the reference value 307 * instead of the first-fit. 308 */ 309 matches += 1; 310 match_lvl = 0; 311 entry->size == ref->size ? ++match_lvl : 0; 312 entry->type == ref->type ? ++match_lvl : 0; 313 entry->direction == ref->direction ? ++match_lvl : 0; 314 entry->sg_call_ents == ref->sg_call_ents ? ++match_lvl : 0; 315 316 if (match_lvl == 4) { 317 /* perfect-fit - return the result */ 318 return entry; 319 } else if (match_lvl > last_lvl) { 320 /* 321 * We found an entry that fits better then the 322 * previous one or it is the 1st match. 323 */ 324 last_lvl = match_lvl; 325 ret = entry; 326 } 327 } 328 329 /* 330 * If we have multiple matches but no perfect-fit, just return 331 * NULL. 332 */ 333 ret = (matches == 1) ? ret : NULL; 334 335 return ret; 336 } 337 338 static struct dma_debug_entry *bucket_find_exact(struct hash_bucket *bucket, 339 struct dma_debug_entry *ref) 340 { 341 return __hash_bucket_find(bucket, ref, exact_match); 342 } 343 344 static struct dma_debug_entry *bucket_find_contain(struct hash_bucket **bucket, 345 struct dma_debug_entry *ref, 346 unsigned long *flags) 347 { 348 349 unsigned int max_range = dma_get_max_seg_size(ref->dev); 350 struct dma_debug_entry *entry, index = *ref; 351 unsigned int range = 0; 352 353 while (range <= max_range) { 354 entry = __hash_bucket_find(*bucket, ref, containing_match); 355 356 if (entry) 357 return entry; 358 359 /* 360 * Nothing found, go back a hash bucket 361 */ 362 put_hash_bucket(*bucket, flags); 363 range += (1 << HASH_FN_SHIFT); 364 index.dev_addr -= (1 << HASH_FN_SHIFT); 365 *bucket = get_hash_bucket(&index, flags); 366 } 367 368 return NULL; 369 } 370 371 /* 372 * Add an entry to a hash bucket 373 */ 374 static void hash_bucket_add(struct hash_bucket *bucket, 375 struct dma_debug_entry *entry) 376 { 377 list_add_tail(&entry->list, &bucket->list); 378 } 379 380 /* 381 * Remove entry from a hash bucket list 382 */ 383 static void hash_bucket_del(struct dma_debug_entry *entry) 384 { 385 list_del(&entry->list); 386 } 387 388 static unsigned long long phys_addr(struct dma_debug_entry *entry) 389 { 390 if (entry->type == dma_debug_resource) 391 return __pfn_to_phys(entry->pfn) + entry->offset; 392 393 return page_to_phys(pfn_to_page(entry->pfn)) + entry->offset; 394 } 395 396 /* 397 * Dump mapping entries for debugging purposes 398 */ 399 void debug_dma_dump_mappings(struct device *dev) 400 { 401 int idx; 402 403 for (idx = 0; idx < HASH_SIZE; idx++) { 404 struct hash_bucket *bucket = &dma_entry_hash[idx]; 405 struct dma_debug_entry *entry; 406 unsigned long flags; 407 408 spin_lock_irqsave(&bucket->lock, flags); 409 410 list_for_each_entry(entry, &bucket->list, list) { 411 if (!dev || dev == entry->dev) { 412 dev_info(entry->dev, 413 "%s idx %d P=%Lx N=%lx D=%Lx L=%Lx %s %s\n", 414 type2name[entry->type], idx, 415 phys_addr(entry), entry->pfn, 416 entry->dev_addr, entry->size, 417 dir2name[entry->direction], 418 maperr2str[entry->map_err_type]); 419 } 420 } 421 422 spin_unlock_irqrestore(&bucket->lock, flags); 423 } 424 } 425 426 /* 427 * For each mapping (initial cacheline in the case of 428 * dma_alloc_coherent/dma_map_page, initial cacheline in each page of a 429 * scatterlist, or the cacheline specified in dma_map_single) insert 430 * into this tree using the cacheline as the key. At 431 * dma_unmap_{single|sg|page} or dma_free_coherent delete the entry. If 432 * the entry already exists at insertion time add a tag as a reference 433 * count for the overlapping mappings. For now, the overlap tracking 434 * just ensures that 'unmaps' balance 'maps' before marking the 435 * cacheline idle, but we should also be flagging overlaps as an API 436 * violation. 437 * 438 * Memory usage is mostly constrained by the maximum number of available 439 * dma-debug entries in that we need a free dma_debug_entry before 440 * inserting into the tree. In the case of dma_map_page and 441 * dma_alloc_coherent there is only one dma_debug_entry and one 442 * dma_active_cacheline entry to track per event. dma_map_sg(), on the 443 * other hand, consumes a single dma_debug_entry, but inserts 'nents' 444 * entries into the tree. 445 * 446 * At any time debug_dma_assert_idle() can be called to trigger a 447 * warning if any cachelines in the given page are in the active set. 448 */ 449 static RADIX_TREE(dma_active_cacheline, GFP_NOWAIT); 450 static DEFINE_SPINLOCK(radix_lock); 451 #define ACTIVE_CACHELINE_MAX_OVERLAP ((1 << RADIX_TREE_MAX_TAGS) - 1) 452 #define CACHELINE_PER_PAGE_SHIFT (PAGE_SHIFT - L1_CACHE_SHIFT) 453 #define CACHELINES_PER_PAGE (1 << CACHELINE_PER_PAGE_SHIFT) 454 455 static phys_addr_t to_cacheline_number(struct dma_debug_entry *entry) 456 { 457 return (entry->pfn << CACHELINE_PER_PAGE_SHIFT) + 458 (entry->offset >> L1_CACHE_SHIFT); 459 } 460 461 static int active_cacheline_read_overlap(phys_addr_t cln) 462 { 463 int overlap = 0, i; 464 465 for (i = RADIX_TREE_MAX_TAGS - 1; i >= 0; i--) 466 if (radix_tree_tag_get(&dma_active_cacheline, cln, i)) 467 overlap |= 1 << i; 468 return overlap; 469 } 470 471 static int active_cacheline_set_overlap(phys_addr_t cln, int overlap) 472 { 473 int i; 474 475 if (overlap > ACTIVE_CACHELINE_MAX_OVERLAP || overlap < 0) 476 return overlap; 477 478 for (i = RADIX_TREE_MAX_TAGS - 1; i >= 0; i--) 479 if (overlap & 1 << i) 480 radix_tree_tag_set(&dma_active_cacheline, cln, i); 481 else 482 radix_tree_tag_clear(&dma_active_cacheline, cln, i); 483 484 return overlap; 485 } 486 487 static void active_cacheline_inc_overlap(phys_addr_t cln) 488 { 489 int overlap = active_cacheline_read_overlap(cln); 490 491 overlap = active_cacheline_set_overlap(cln, ++overlap); 492 493 /* If we overflowed the overlap counter then we're potentially 494 * leaking dma-mappings. Otherwise, if maps and unmaps are 495 * balanced then this overflow may cause false negatives in 496 * debug_dma_assert_idle() as the cacheline may be marked idle 497 * prematurely. 498 */ 499 WARN_ONCE(overlap > ACTIVE_CACHELINE_MAX_OVERLAP, 500 pr_fmt("exceeded %d overlapping mappings of cacheline %pa\n"), 501 ACTIVE_CACHELINE_MAX_OVERLAP, &cln); 502 } 503 504 static int active_cacheline_dec_overlap(phys_addr_t cln) 505 { 506 int overlap = active_cacheline_read_overlap(cln); 507 508 return active_cacheline_set_overlap(cln, --overlap); 509 } 510 511 static int active_cacheline_insert(struct dma_debug_entry *entry) 512 { 513 phys_addr_t cln = to_cacheline_number(entry); 514 unsigned long flags; 515 int rc; 516 517 /* If the device is not writing memory then we don't have any 518 * concerns about the cpu consuming stale data. This mitigates 519 * legitimate usages of overlapping mappings. 520 */ 521 if (entry->direction == DMA_TO_DEVICE) 522 return 0; 523 524 spin_lock_irqsave(&radix_lock, flags); 525 rc = radix_tree_insert(&dma_active_cacheline, cln, entry); 526 if (rc == -EEXIST) 527 active_cacheline_inc_overlap(cln); 528 spin_unlock_irqrestore(&radix_lock, flags); 529 530 return rc; 531 } 532 533 static void active_cacheline_remove(struct dma_debug_entry *entry) 534 { 535 phys_addr_t cln = to_cacheline_number(entry); 536 unsigned long flags; 537 538 /* ...mirror the insert case */ 539 if (entry->direction == DMA_TO_DEVICE) 540 return; 541 542 spin_lock_irqsave(&radix_lock, flags); 543 /* since we are counting overlaps the final put of the 544 * cacheline will occur when the overlap count is 0. 545 * active_cacheline_dec_overlap() returns -1 in that case 546 */ 547 if (active_cacheline_dec_overlap(cln) < 0) 548 radix_tree_delete(&dma_active_cacheline, cln); 549 spin_unlock_irqrestore(&radix_lock, flags); 550 } 551 552 /** 553 * debug_dma_assert_idle() - assert that a page is not undergoing dma 554 * @page: page to lookup in the dma_active_cacheline tree 555 * 556 * Place a call to this routine in cases where the cpu touching the page 557 * before the dma completes (page is dma_unmapped) will lead to data 558 * corruption. 559 */ 560 void debug_dma_assert_idle(struct page *page) 561 { 562 static struct dma_debug_entry *ents[CACHELINES_PER_PAGE]; 563 struct dma_debug_entry *entry = NULL; 564 void **results = (void **) &ents; 565 unsigned int nents, i; 566 unsigned long flags; 567 phys_addr_t cln; 568 569 if (dma_debug_disabled()) 570 return; 571 572 if (!page) 573 return; 574 575 cln = (phys_addr_t) page_to_pfn(page) << CACHELINE_PER_PAGE_SHIFT; 576 spin_lock_irqsave(&radix_lock, flags); 577 nents = radix_tree_gang_lookup(&dma_active_cacheline, results, cln, 578 CACHELINES_PER_PAGE); 579 for (i = 0; i < nents; i++) { 580 phys_addr_t ent_cln = to_cacheline_number(ents[i]); 581 582 if (ent_cln == cln) { 583 entry = ents[i]; 584 break; 585 } else if (ent_cln >= cln + CACHELINES_PER_PAGE) 586 break; 587 } 588 spin_unlock_irqrestore(&radix_lock, flags); 589 590 if (!entry) 591 return; 592 593 cln = to_cacheline_number(entry); 594 err_printk(entry->dev, entry, 595 "cpu touching an active dma mapped cacheline [cln=%pa]\n", 596 &cln); 597 } 598 599 /* 600 * Wrapper function for adding an entry to the hash. 601 * This function takes care of locking itself. 602 */ 603 static void add_dma_entry(struct dma_debug_entry *entry) 604 { 605 struct hash_bucket *bucket; 606 unsigned long flags; 607 int rc; 608 609 bucket = get_hash_bucket(entry, &flags); 610 hash_bucket_add(bucket, entry); 611 put_hash_bucket(bucket, &flags); 612 613 rc = active_cacheline_insert(entry); 614 if (rc == -ENOMEM) { 615 pr_err("cacheline tracking ENOMEM, dma-debug disabled\n"); 616 global_disable = true; 617 } 618 619 /* TODO: report -EEXIST errors here as overlapping mappings are 620 * not supported by the DMA API 621 */ 622 } 623 624 static int dma_debug_create_entries(gfp_t gfp) 625 { 626 struct dma_debug_entry *entry; 627 int i; 628 629 entry = (void *)get_zeroed_page(gfp); 630 if (!entry) 631 return -ENOMEM; 632 633 for (i = 0; i < DMA_DEBUG_DYNAMIC_ENTRIES; i++) 634 list_add_tail(&entry[i].list, &free_entries); 635 636 num_free_entries += DMA_DEBUG_DYNAMIC_ENTRIES; 637 nr_total_entries += DMA_DEBUG_DYNAMIC_ENTRIES; 638 639 return 0; 640 } 641 642 static struct dma_debug_entry *__dma_entry_alloc(void) 643 { 644 struct dma_debug_entry *entry; 645 646 entry = list_entry(free_entries.next, struct dma_debug_entry, list); 647 list_del(&entry->list); 648 memset(entry, 0, sizeof(*entry)); 649 650 num_free_entries -= 1; 651 if (num_free_entries < min_free_entries) 652 min_free_entries = num_free_entries; 653 654 return entry; 655 } 656 657 void __dma_entry_alloc_check_leak(void) 658 { 659 u32 tmp = nr_total_entries % nr_prealloc_entries; 660 661 /* Shout each time we tick over some multiple of the initial pool */ 662 if (tmp < DMA_DEBUG_DYNAMIC_ENTRIES) { 663 pr_info("dma_debug_entry pool grown to %u (%u00%%)\n", 664 nr_total_entries, 665 (nr_total_entries / nr_prealloc_entries)); 666 } 667 } 668 669 /* struct dma_entry allocator 670 * 671 * The next two functions implement the allocator for 672 * struct dma_debug_entries. 673 */ 674 static struct dma_debug_entry *dma_entry_alloc(void) 675 { 676 struct dma_debug_entry *entry; 677 unsigned long flags; 678 679 spin_lock_irqsave(&free_entries_lock, flags); 680 if (num_free_entries == 0) { 681 if (dma_debug_create_entries(GFP_ATOMIC)) { 682 global_disable = true; 683 spin_unlock_irqrestore(&free_entries_lock, flags); 684 pr_err("debugging out of memory - disabling\n"); 685 return NULL; 686 } 687 __dma_entry_alloc_check_leak(); 688 } 689 690 entry = __dma_entry_alloc(); 691 692 spin_unlock_irqrestore(&free_entries_lock, flags); 693 694 #ifdef CONFIG_STACKTRACE 695 entry->stack_len = stack_trace_save(entry->stack_entries, 696 ARRAY_SIZE(entry->stack_entries), 697 1); 698 #endif 699 return entry; 700 } 701 702 static void dma_entry_free(struct dma_debug_entry *entry) 703 { 704 unsigned long flags; 705 706 active_cacheline_remove(entry); 707 708 /* 709 * add to beginning of the list - this way the entries are 710 * more likely cache hot when they are reallocated. 711 */ 712 spin_lock_irqsave(&free_entries_lock, flags); 713 list_add(&entry->list, &free_entries); 714 num_free_entries += 1; 715 spin_unlock_irqrestore(&free_entries_lock, flags); 716 } 717 718 /* 719 * DMA-API debugging init code 720 * 721 * The init code does two things: 722 * 1. Initialize core data structures 723 * 2. Preallocate a given number of dma_debug_entry structs 724 */ 725 726 static ssize_t filter_read(struct file *file, char __user *user_buf, 727 size_t count, loff_t *ppos) 728 { 729 char buf[NAME_MAX_LEN + 1]; 730 unsigned long flags; 731 int len; 732 733 if (!current_driver_name[0]) 734 return 0; 735 736 /* 737 * We can't copy to userspace directly because current_driver_name can 738 * only be read under the driver_name_lock with irqs disabled. So 739 * create a temporary copy first. 740 */ 741 read_lock_irqsave(&driver_name_lock, flags); 742 len = scnprintf(buf, NAME_MAX_LEN + 1, "%s\n", current_driver_name); 743 read_unlock_irqrestore(&driver_name_lock, flags); 744 745 return simple_read_from_buffer(user_buf, count, ppos, buf, len); 746 } 747 748 static ssize_t filter_write(struct file *file, const char __user *userbuf, 749 size_t count, loff_t *ppos) 750 { 751 char buf[NAME_MAX_LEN]; 752 unsigned long flags; 753 size_t len; 754 int i; 755 756 /* 757 * We can't copy from userspace directly. Access to 758 * current_driver_name is protected with a write_lock with irqs 759 * disabled. Since copy_from_user can fault and may sleep we 760 * need to copy to temporary buffer first 761 */ 762 len = min(count, (size_t)(NAME_MAX_LEN - 1)); 763 if (copy_from_user(buf, userbuf, len)) 764 return -EFAULT; 765 766 buf[len] = 0; 767 768 write_lock_irqsave(&driver_name_lock, flags); 769 770 /* 771 * Now handle the string we got from userspace very carefully. 772 * The rules are: 773 * - only use the first token we got 774 * - token delimiter is everything looking like a space 775 * character (' ', '\n', '\t' ...) 776 * 777 */ 778 if (!isalnum(buf[0])) { 779 /* 780 * If the first character userspace gave us is not 781 * alphanumerical then assume the filter should be 782 * switched off. 783 */ 784 if (current_driver_name[0]) 785 pr_info("switching off dma-debug driver filter\n"); 786 current_driver_name[0] = 0; 787 current_driver = NULL; 788 goto out_unlock; 789 } 790 791 /* 792 * Now parse out the first token and use it as the name for the 793 * driver to filter for. 794 */ 795 for (i = 0; i < NAME_MAX_LEN - 1; ++i) { 796 current_driver_name[i] = buf[i]; 797 if (isspace(buf[i]) || buf[i] == ' ' || buf[i] == 0) 798 break; 799 } 800 current_driver_name[i] = 0; 801 current_driver = NULL; 802 803 pr_info("enable driver filter for driver [%s]\n", 804 current_driver_name); 805 806 out_unlock: 807 write_unlock_irqrestore(&driver_name_lock, flags); 808 809 return count; 810 } 811 812 static const struct file_operations filter_fops = { 813 .read = filter_read, 814 .write = filter_write, 815 .llseek = default_llseek, 816 }; 817 818 static int dump_show(struct seq_file *seq, void *v) 819 { 820 int idx; 821 822 for (idx = 0; idx < HASH_SIZE; idx++) { 823 struct hash_bucket *bucket = &dma_entry_hash[idx]; 824 struct dma_debug_entry *entry; 825 unsigned long flags; 826 827 spin_lock_irqsave(&bucket->lock, flags); 828 list_for_each_entry(entry, &bucket->list, list) { 829 seq_printf(seq, 830 "%s %s %s idx %d P=%llx N=%lx D=%llx L=%llx %s %s\n", 831 dev_name(entry->dev), 832 dev_driver_string(entry->dev), 833 type2name[entry->type], idx, 834 phys_addr(entry), entry->pfn, 835 entry->dev_addr, entry->size, 836 dir2name[entry->direction], 837 maperr2str[entry->map_err_type]); 838 } 839 spin_unlock_irqrestore(&bucket->lock, flags); 840 } 841 return 0; 842 } 843 DEFINE_SHOW_ATTRIBUTE(dump); 844 845 static void dma_debug_fs_init(void) 846 { 847 struct dentry *dentry = debugfs_create_dir("dma-api", NULL); 848 849 debugfs_create_bool("disabled", 0444, dentry, &global_disable); 850 debugfs_create_u32("error_count", 0444, dentry, &error_count); 851 debugfs_create_u32("all_errors", 0644, dentry, &show_all_errors); 852 debugfs_create_u32("num_errors", 0644, dentry, &show_num_errors); 853 debugfs_create_u32("num_free_entries", 0444, dentry, &num_free_entries); 854 debugfs_create_u32("min_free_entries", 0444, dentry, &min_free_entries); 855 debugfs_create_u32("nr_total_entries", 0444, dentry, &nr_total_entries); 856 debugfs_create_file("driver_filter", 0644, dentry, NULL, &filter_fops); 857 debugfs_create_file("dump", 0444, dentry, NULL, &dump_fops); 858 } 859 860 static int device_dma_allocations(struct device *dev, struct dma_debug_entry **out_entry) 861 { 862 struct dma_debug_entry *entry; 863 unsigned long flags; 864 int count = 0, i; 865 866 for (i = 0; i < HASH_SIZE; ++i) { 867 spin_lock_irqsave(&dma_entry_hash[i].lock, flags); 868 list_for_each_entry(entry, &dma_entry_hash[i].list, list) { 869 if (entry->dev == dev) { 870 count += 1; 871 *out_entry = entry; 872 } 873 } 874 spin_unlock_irqrestore(&dma_entry_hash[i].lock, flags); 875 } 876 877 return count; 878 } 879 880 static int dma_debug_device_change(struct notifier_block *nb, unsigned long action, void *data) 881 { 882 struct device *dev = data; 883 struct dma_debug_entry *uninitialized_var(entry); 884 int count; 885 886 if (dma_debug_disabled()) 887 return 0; 888 889 switch (action) { 890 case BUS_NOTIFY_UNBOUND_DRIVER: 891 count = device_dma_allocations(dev, &entry); 892 if (count == 0) 893 break; 894 err_printk(dev, entry, "device driver has pending " 895 "DMA allocations while released from device " 896 "[count=%d]\n" 897 "One of leaked entries details: " 898 "[device address=0x%016llx] [size=%llu bytes] " 899 "[mapped with %s] [mapped as %s]\n", 900 count, entry->dev_addr, entry->size, 901 dir2name[entry->direction], type2name[entry->type]); 902 break; 903 default: 904 break; 905 } 906 907 return 0; 908 } 909 910 void dma_debug_add_bus(struct bus_type *bus) 911 { 912 struct notifier_block *nb; 913 914 if (dma_debug_disabled()) 915 return; 916 917 nb = kzalloc(sizeof(struct notifier_block), GFP_KERNEL); 918 if (nb == NULL) { 919 pr_err("dma_debug_add_bus: out of memory\n"); 920 return; 921 } 922 923 nb->notifier_call = dma_debug_device_change; 924 925 bus_register_notifier(bus, nb); 926 } 927 928 static int dma_debug_init(void) 929 { 930 int i, nr_pages; 931 932 /* Do not use dma_debug_initialized here, since we really want to be 933 * called to set dma_debug_initialized 934 */ 935 if (global_disable) 936 return 0; 937 938 for (i = 0; i < HASH_SIZE; ++i) { 939 INIT_LIST_HEAD(&dma_entry_hash[i].list); 940 spin_lock_init(&dma_entry_hash[i].lock); 941 } 942 943 dma_debug_fs_init(); 944 945 nr_pages = DIV_ROUND_UP(nr_prealloc_entries, DMA_DEBUG_DYNAMIC_ENTRIES); 946 for (i = 0; i < nr_pages; ++i) 947 dma_debug_create_entries(GFP_KERNEL); 948 if (num_free_entries >= nr_prealloc_entries) { 949 pr_info("preallocated %d debug entries\n", nr_total_entries); 950 } else if (num_free_entries > 0) { 951 pr_warn("%d debug entries requested but only %d allocated\n", 952 nr_prealloc_entries, nr_total_entries); 953 } else { 954 pr_err("debugging out of memory error - disabled\n"); 955 global_disable = true; 956 957 return 0; 958 } 959 min_free_entries = num_free_entries; 960 961 dma_debug_initialized = true; 962 963 pr_info("debugging enabled by kernel config\n"); 964 return 0; 965 } 966 core_initcall(dma_debug_init); 967 968 static __init int dma_debug_cmdline(char *str) 969 { 970 if (!str) 971 return -EINVAL; 972 973 if (strncmp(str, "off", 3) == 0) { 974 pr_info("debugging disabled on kernel command line\n"); 975 global_disable = true; 976 } 977 978 return 0; 979 } 980 981 static __init int dma_debug_entries_cmdline(char *str) 982 { 983 if (!str) 984 return -EINVAL; 985 if (!get_option(&str, &nr_prealloc_entries)) 986 nr_prealloc_entries = PREALLOC_DMA_DEBUG_ENTRIES; 987 return 0; 988 } 989 990 __setup("dma_debug=", dma_debug_cmdline); 991 __setup("dma_debug_entries=", dma_debug_entries_cmdline); 992 993 static void check_unmap(struct dma_debug_entry *ref) 994 { 995 struct dma_debug_entry *entry; 996 struct hash_bucket *bucket; 997 unsigned long flags; 998 999 bucket = get_hash_bucket(ref, &flags); 1000 entry = bucket_find_exact(bucket, ref); 1001 1002 if (!entry) { 1003 /* must drop lock before calling dma_mapping_error */ 1004 put_hash_bucket(bucket, &flags); 1005 1006 if (dma_mapping_error(ref->dev, ref->dev_addr)) { 1007 err_printk(ref->dev, NULL, 1008 "device driver tries to free an " 1009 "invalid DMA memory address\n"); 1010 } else { 1011 err_printk(ref->dev, NULL, 1012 "device driver tries to free DMA " 1013 "memory it has not allocated [device " 1014 "address=0x%016llx] [size=%llu bytes]\n", 1015 ref->dev_addr, ref->size); 1016 } 1017 return; 1018 } 1019 1020 if (ref->size != entry->size) { 1021 err_printk(ref->dev, entry, "device driver frees " 1022 "DMA memory with different size " 1023 "[device address=0x%016llx] [map size=%llu bytes] " 1024 "[unmap size=%llu bytes]\n", 1025 ref->dev_addr, entry->size, ref->size); 1026 } 1027 1028 if (ref->type != entry->type) { 1029 err_printk(ref->dev, entry, "device driver frees " 1030 "DMA memory with wrong function " 1031 "[device address=0x%016llx] [size=%llu bytes] " 1032 "[mapped as %s] [unmapped as %s]\n", 1033 ref->dev_addr, ref->size, 1034 type2name[entry->type], type2name[ref->type]); 1035 } else if ((entry->type == dma_debug_coherent) && 1036 (phys_addr(ref) != phys_addr(entry))) { 1037 err_printk(ref->dev, entry, "device driver frees " 1038 "DMA memory with different CPU address " 1039 "[device address=0x%016llx] [size=%llu bytes] " 1040 "[cpu alloc address=0x%016llx] " 1041 "[cpu free address=0x%016llx]", 1042 ref->dev_addr, ref->size, 1043 phys_addr(entry), 1044 phys_addr(ref)); 1045 } 1046 1047 if (ref->sg_call_ents && ref->type == dma_debug_sg && 1048 ref->sg_call_ents != entry->sg_call_ents) { 1049 err_printk(ref->dev, entry, "device driver frees " 1050 "DMA sg list with different entry count " 1051 "[map count=%d] [unmap count=%d]\n", 1052 entry->sg_call_ents, ref->sg_call_ents); 1053 } 1054 1055 /* 1056 * This may be no bug in reality - but most implementations of the 1057 * DMA API don't handle this properly, so check for it here 1058 */ 1059 if (ref->direction != entry->direction) { 1060 err_printk(ref->dev, entry, "device driver frees " 1061 "DMA memory with different direction " 1062 "[device address=0x%016llx] [size=%llu bytes] " 1063 "[mapped with %s] [unmapped with %s]\n", 1064 ref->dev_addr, ref->size, 1065 dir2name[entry->direction], 1066 dir2name[ref->direction]); 1067 } 1068 1069 /* 1070 * Drivers should use dma_mapping_error() to check the returned 1071 * addresses of dma_map_single() and dma_map_page(). 1072 * If not, print this warning message. See Documentation/DMA-API.txt. 1073 */ 1074 if (entry->map_err_type == MAP_ERR_NOT_CHECKED) { 1075 err_printk(ref->dev, entry, 1076 "device driver failed to check map error" 1077 "[device address=0x%016llx] [size=%llu bytes] " 1078 "[mapped as %s]", 1079 ref->dev_addr, ref->size, 1080 type2name[entry->type]); 1081 } 1082 1083 hash_bucket_del(entry); 1084 dma_entry_free(entry); 1085 1086 put_hash_bucket(bucket, &flags); 1087 } 1088 1089 static void check_for_stack(struct device *dev, 1090 struct page *page, size_t offset) 1091 { 1092 void *addr; 1093 struct vm_struct *stack_vm_area = task_stack_vm_area(current); 1094 1095 if (!stack_vm_area) { 1096 /* Stack is direct-mapped. */ 1097 if (PageHighMem(page)) 1098 return; 1099 addr = page_address(page) + offset; 1100 if (object_is_on_stack(addr)) 1101 err_printk(dev, NULL, "device driver maps memory from stack [addr=%p]\n", addr); 1102 } else { 1103 /* Stack is vmalloced. */ 1104 int i; 1105 1106 for (i = 0; i < stack_vm_area->nr_pages; i++) { 1107 if (page != stack_vm_area->pages[i]) 1108 continue; 1109 1110 addr = (u8 *)current->stack + i * PAGE_SIZE + offset; 1111 err_printk(dev, NULL, "device driver maps memory from stack [probable addr=%p]\n", addr); 1112 break; 1113 } 1114 } 1115 } 1116 1117 static inline bool overlap(void *addr, unsigned long len, void *start, void *end) 1118 { 1119 unsigned long a1 = (unsigned long)addr; 1120 unsigned long b1 = a1 + len; 1121 unsigned long a2 = (unsigned long)start; 1122 unsigned long b2 = (unsigned long)end; 1123 1124 return !(b1 <= a2 || a1 >= b2); 1125 } 1126 1127 static void check_for_illegal_area(struct device *dev, void *addr, unsigned long len) 1128 { 1129 if (overlap(addr, len, _stext, _etext) || 1130 overlap(addr, len, __start_rodata, __end_rodata)) 1131 err_printk(dev, NULL, "device driver maps memory from kernel text or rodata [addr=%p] [len=%lu]\n", addr, len); 1132 } 1133 1134 static void check_sync(struct device *dev, 1135 struct dma_debug_entry *ref, 1136 bool to_cpu) 1137 { 1138 struct dma_debug_entry *entry; 1139 struct hash_bucket *bucket; 1140 unsigned long flags; 1141 1142 bucket = get_hash_bucket(ref, &flags); 1143 1144 entry = bucket_find_contain(&bucket, ref, &flags); 1145 1146 if (!entry) { 1147 err_printk(dev, NULL, "device driver tries " 1148 "to sync DMA memory it has not allocated " 1149 "[device address=0x%016llx] [size=%llu bytes]\n", 1150 (unsigned long long)ref->dev_addr, ref->size); 1151 goto out; 1152 } 1153 1154 if (ref->size > entry->size) { 1155 err_printk(dev, entry, "device driver syncs" 1156 " DMA memory outside allocated range " 1157 "[device address=0x%016llx] " 1158 "[allocation size=%llu bytes] " 1159 "[sync offset+size=%llu]\n", 1160 entry->dev_addr, entry->size, 1161 ref->size); 1162 } 1163 1164 if (entry->direction == DMA_BIDIRECTIONAL) 1165 goto out; 1166 1167 if (ref->direction != entry->direction) { 1168 err_printk(dev, entry, "device driver syncs " 1169 "DMA memory with different direction " 1170 "[device address=0x%016llx] [size=%llu bytes] " 1171 "[mapped with %s] [synced with %s]\n", 1172 (unsigned long long)ref->dev_addr, entry->size, 1173 dir2name[entry->direction], 1174 dir2name[ref->direction]); 1175 } 1176 1177 if (to_cpu && !(entry->direction == DMA_FROM_DEVICE) && 1178 !(ref->direction == DMA_TO_DEVICE)) 1179 err_printk(dev, entry, "device driver syncs " 1180 "device read-only DMA memory for cpu " 1181 "[device address=0x%016llx] [size=%llu bytes] " 1182 "[mapped with %s] [synced with %s]\n", 1183 (unsigned long long)ref->dev_addr, entry->size, 1184 dir2name[entry->direction], 1185 dir2name[ref->direction]); 1186 1187 if (!to_cpu && !(entry->direction == DMA_TO_DEVICE) && 1188 !(ref->direction == DMA_FROM_DEVICE)) 1189 err_printk(dev, entry, "device driver syncs " 1190 "device write-only DMA memory to device " 1191 "[device address=0x%016llx] [size=%llu bytes] " 1192 "[mapped with %s] [synced with %s]\n", 1193 (unsigned long long)ref->dev_addr, entry->size, 1194 dir2name[entry->direction], 1195 dir2name[ref->direction]); 1196 1197 if (ref->sg_call_ents && ref->type == dma_debug_sg && 1198 ref->sg_call_ents != entry->sg_call_ents) { 1199 err_printk(ref->dev, entry, "device driver syncs " 1200 "DMA sg list with different entry count " 1201 "[map count=%d] [sync count=%d]\n", 1202 entry->sg_call_ents, ref->sg_call_ents); 1203 } 1204 1205 out: 1206 put_hash_bucket(bucket, &flags); 1207 } 1208 1209 static void check_sg_segment(struct device *dev, struct scatterlist *sg) 1210 { 1211 #ifdef CONFIG_DMA_API_DEBUG_SG 1212 unsigned int max_seg = dma_get_max_seg_size(dev); 1213 u64 start, end, boundary = dma_get_seg_boundary(dev); 1214 1215 /* 1216 * Either the driver forgot to set dma_parms appropriately, or 1217 * whoever generated the list forgot to check them. 1218 */ 1219 if (sg->length > max_seg) 1220 err_printk(dev, NULL, "mapping sg segment longer than device claims to support [len=%u] [max=%u]\n", 1221 sg->length, max_seg); 1222 /* 1223 * In some cases this could potentially be the DMA API 1224 * implementation's fault, but it would usually imply that 1225 * the scatterlist was built inappropriately to begin with. 1226 */ 1227 start = sg_dma_address(sg); 1228 end = start + sg_dma_len(sg) - 1; 1229 if ((start ^ end) & ~boundary) 1230 err_printk(dev, NULL, "mapping sg segment across boundary [start=0x%016llx] [end=0x%016llx] [boundary=0x%016llx]\n", 1231 start, end, boundary); 1232 #endif 1233 } 1234 1235 void debug_dma_map_single(struct device *dev, const void *addr, 1236 unsigned long len) 1237 { 1238 if (unlikely(dma_debug_disabled())) 1239 return; 1240 1241 if (!virt_addr_valid(addr)) 1242 err_printk(dev, NULL, "device driver maps memory from invalid area [addr=%p] [len=%lu]\n", 1243 addr, len); 1244 1245 if (is_vmalloc_addr(addr)) 1246 err_printk(dev, NULL, "device driver maps memory from vmalloc area [addr=%p] [len=%lu]\n", 1247 addr, len); 1248 } 1249 EXPORT_SYMBOL(debug_dma_map_single); 1250 1251 void debug_dma_map_page(struct device *dev, struct page *page, size_t offset, 1252 size_t size, int direction, dma_addr_t dma_addr) 1253 { 1254 struct dma_debug_entry *entry; 1255 1256 if (unlikely(dma_debug_disabled())) 1257 return; 1258 1259 if (dma_mapping_error(dev, dma_addr)) 1260 return; 1261 1262 entry = dma_entry_alloc(); 1263 if (!entry) 1264 return; 1265 1266 entry->dev = dev; 1267 entry->type = dma_debug_single; 1268 entry->pfn = page_to_pfn(page); 1269 entry->offset = offset, 1270 entry->dev_addr = dma_addr; 1271 entry->size = size; 1272 entry->direction = direction; 1273 entry->map_err_type = MAP_ERR_NOT_CHECKED; 1274 1275 check_for_stack(dev, page, offset); 1276 1277 if (!PageHighMem(page)) { 1278 void *addr = page_address(page) + offset; 1279 1280 check_for_illegal_area(dev, addr, size); 1281 } 1282 1283 add_dma_entry(entry); 1284 } 1285 EXPORT_SYMBOL(debug_dma_map_page); 1286 1287 void debug_dma_mapping_error(struct device *dev, dma_addr_t dma_addr) 1288 { 1289 struct dma_debug_entry ref; 1290 struct dma_debug_entry *entry; 1291 struct hash_bucket *bucket; 1292 unsigned long flags; 1293 1294 if (unlikely(dma_debug_disabled())) 1295 return; 1296 1297 ref.dev = dev; 1298 ref.dev_addr = dma_addr; 1299 bucket = get_hash_bucket(&ref, &flags); 1300 1301 list_for_each_entry(entry, &bucket->list, list) { 1302 if (!exact_match(&ref, entry)) 1303 continue; 1304 1305 /* 1306 * The same physical address can be mapped multiple 1307 * times. Without a hardware IOMMU this results in the 1308 * same device addresses being put into the dma-debug 1309 * hash multiple times too. This can result in false 1310 * positives being reported. Therefore we implement a 1311 * best-fit algorithm here which updates the first entry 1312 * from the hash which fits the reference value and is 1313 * not currently listed as being checked. 1314 */ 1315 if (entry->map_err_type == MAP_ERR_NOT_CHECKED) { 1316 entry->map_err_type = MAP_ERR_CHECKED; 1317 break; 1318 } 1319 } 1320 1321 put_hash_bucket(bucket, &flags); 1322 } 1323 EXPORT_SYMBOL(debug_dma_mapping_error); 1324 1325 void debug_dma_unmap_page(struct device *dev, dma_addr_t addr, 1326 size_t size, int direction) 1327 { 1328 struct dma_debug_entry ref = { 1329 .type = dma_debug_single, 1330 .dev = dev, 1331 .dev_addr = addr, 1332 .size = size, 1333 .direction = direction, 1334 }; 1335 1336 if (unlikely(dma_debug_disabled())) 1337 return; 1338 check_unmap(&ref); 1339 } 1340 EXPORT_SYMBOL(debug_dma_unmap_page); 1341 1342 void debug_dma_map_sg(struct device *dev, struct scatterlist *sg, 1343 int nents, int mapped_ents, int direction) 1344 { 1345 struct dma_debug_entry *entry; 1346 struct scatterlist *s; 1347 int i; 1348 1349 if (unlikely(dma_debug_disabled())) 1350 return; 1351 1352 for_each_sg(sg, s, mapped_ents, i) { 1353 entry = dma_entry_alloc(); 1354 if (!entry) 1355 return; 1356 1357 entry->type = dma_debug_sg; 1358 entry->dev = dev; 1359 entry->pfn = page_to_pfn(sg_page(s)); 1360 entry->offset = s->offset, 1361 entry->size = sg_dma_len(s); 1362 entry->dev_addr = sg_dma_address(s); 1363 entry->direction = direction; 1364 entry->sg_call_ents = nents; 1365 entry->sg_mapped_ents = mapped_ents; 1366 1367 check_for_stack(dev, sg_page(s), s->offset); 1368 1369 if (!PageHighMem(sg_page(s))) { 1370 check_for_illegal_area(dev, sg_virt(s), sg_dma_len(s)); 1371 } 1372 1373 check_sg_segment(dev, s); 1374 1375 add_dma_entry(entry); 1376 } 1377 } 1378 EXPORT_SYMBOL(debug_dma_map_sg); 1379 1380 static int get_nr_mapped_entries(struct device *dev, 1381 struct dma_debug_entry *ref) 1382 { 1383 struct dma_debug_entry *entry; 1384 struct hash_bucket *bucket; 1385 unsigned long flags; 1386 int mapped_ents; 1387 1388 bucket = get_hash_bucket(ref, &flags); 1389 entry = bucket_find_exact(bucket, ref); 1390 mapped_ents = 0; 1391 1392 if (entry) 1393 mapped_ents = entry->sg_mapped_ents; 1394 put_hash_bucket(bucket, &flags); 1395 1396 return mapped_ents; 1397 } 1398 1399 void debug_dma_unmap_sg(struct device *dev, struct scatterlist *sglist, 1400 int nelems, int dir) 1401 { 1402 struct scatterlist *s; 1403 int mapped_ents = 0, i; 1404 1405 if (unlikely(dma_debug_disabled())) 1406 return; 1407 1408 for_each_sg(sglist, s, nelems, i) { 1409 1410 struct dma_debug_entry ref = { 1411 .type = dma_debug_sg, 1412 .dev = dev, 1413 .pfn = page_to_pfn(sg_page(s)), 1414 .offset = s->offset, 1415 .dev_addr = sg_dma_address(s), 1416 .size = sg_dma_len(s), 1417 .direction = dir, 1418 .sg_call_ents = nelems, 1419 }; 1420 1421 if (mapped_ents && i >= mapped_ents) 1422 break; 1423 1424 if (!i) 1425 mapped_ents = get_nr_mapped_entries(dev, &ref); 1426 1427 check_unmap(&ref); 1428 } 1429 } 1430 EXPORT_SYMBOL(debug_dma_unmap_sg); 1431 1432 void debug_dma_alloc_coherent(struct device *dev, size_t size, 1433 dma_addr_t dma_addr, void *virt) 1434 { 1435 struct dma_debug_entry *entry; 1436 1437 if (unlikely(dma_debug_disabled())) 1438 return; 1439 1440 if (unlikely(virt == NULL)) 1441 return; 1442 1443 /* handle vmalloc and linear addresses */ 1444 if (!is_vmalloc_addr(virt) && !virt_addr_valid(virt)) 1445 return; 1446 1447 entry = dma_entry_alloc(); 1448 if (!entry) 1449 return; 1450 1451 entry->type = dma_debug_coherent; 1452 entry->dev = dev; 1453 entry->offset = offset_in_page(virt); 1454 entry->size = size; 1455 entry->dev_addr = dma_addr; 1456 entry->direction = DMA_BIDIRECTIONAL; 1457 1458 if (is_vmalloc_addr(virt)) 1459 entry->pfn = vmalloc_to_pfn(virt); 1460 else 1461 entry->pfn = page_to_pfn(virt_to_page(virt)); 1462 1463 add_dma_entry(entry); 1464 } 1465 1466 void debug_dma_free_coherent(struct device *dev, size_t size, 1467 void *virt, dma_addr_t addr) 1468 { 1469 struct dma_debug_entry ref = { 1470 .type = dma_debug_coherent, 1471 .dev = dev, 1472 .offset = offset_in_page(virt), 1473 .dev_addr = addr, 1474 .size = size, 1475 .direction = DMA_BIDIRECTIONAL, 1476 }; 1477 1478 /* handle vmalloc and linear addresses */ 1479 if (!is_vmalloc_addr(virt) && !virt_addr_valid(virt)) 1480 return; 1481 1482 if (is_vmalloc_addr(virt)) 1483 ref.pfn = vmalloc_to_pfn(virt); 1484 else 1485 ref.pfn = page_to_pfn(virt_to_page(virt)); 1486 1487 if (unlikely(dma_debug_disabled())) 1488 return; 1489 1490 check_unmap(&ref); 1491 } 1492 1493 void debug_dma_map_resource(struct device *dev, phys_addr_t addr, size_t size, 1494 int direction, dma_addr_t dma_addr) 1495 { 1496 struct dma_debug_entry *entry; 1497 1498 if (unlikely(dma_debug_disabled())) 1499 return; 1500 1501 entry = dma_entry_alloc(); 1502 if (!entry) 1503 return; 1504 1505 entry->type = dma_debug_resource; 1506 entry->dev = dev; 1507 entry->pfn = PHYS_PFN(addr); 1508 entry->offset = offset_in_page(addr); 1509 entry->size = size; 1510 entry->dev_addr = dma_addr; 1511 entry->direction = direction; 1512 entry->map_err_type = MAP_ERR_NOT_CHECKED; 1513 1514 add_dma_entry(entry); 1515 } 1516 EXPORT_SYMBOL(debug_dma_map_resource); 1517 1518 void debug_dma_unmap_resource(struct device *dev, dma_addr_t dma_addr, 1519 size_t size, int direction) 1520 { 1521 struct dma_debug_entry ref = { 1522 .type = dma_debug_resource, 1523 .dev = dev, 1524 .dev_addr = dma_addr, 1525 .size = size, 1526 .direction = direction, 1527 }; 1528 1529 if (unlikely(dma_debug_disabled())) 1530 return; 1531 1532 check_unmap(&ref); 1533 } 1534 EXPORT_SYMBOL(debug_dma_unmap_resource); 1535 1536 void debug_dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, 1537 size_t size, int direction) 1538 { 1539 struct dma_debug_entry ref; 1540 1541 if (unlikely(dma_debug_disabled())) 1542 return; 1543 1544 ref.type = dma_debug_single; 1545 ref.dev = dev; 1546 ref.dev_addr = dma_handle; 1547 ref.size = size; 1548 ref.direction = direction; 1549 ref.sg_call_ents = 0; 1550 1551 check_sync(dev, &ref, true); 1552 } 1553 EXPORT_SYMBOL(debug_dma_sync_single_for_cpu); 1554 1555 void debug_dma_sync_single_for_device(struct device *dev, 1556 dma_addr_t dma_handle, size_t size, 1557 int direction) 1558 { 1559 struct dma_debug_entry ref; 1560 1561 if (unlikely(dma_debug_disabled())) 1562 return; 1563 1564 ref.type = dma_debug_single; 1565 ref.dev = dev; 1566 ref.dev_addr = dma_handle; 1567 ref.size = size; 1568 ref.direction = direction; 1569 ref.sg_call_ents = 0; 1570 1571 check_sync(dev, &ref, false); 1572 } 1573 EXPORT_SYMBOL(debug_dma_sync_single_for_device); 1574 1575 void debug_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, 1576 int nelems, int direction) 1577 { 1578 struct scatterlist *s; 1579 int mapped_ents = 0, i; 1580 1581 if (unlikely(dma_debug_disabled())) 1582 return; 1583 1584 for_each_sg(sg, s, nelems, i) { 1585 1586 struct dma_debug_entry ref = { 1587 .type = dma_debug_sg, 1588 .dev = dev, 1589 .pfn = page_to_pfn(sg_page(s)), 1590 .offset = s->offset, 1591 .dev_addr = sg_dma_address(s), 1592 .size = sg_dma_len(s), 1593 .direction = direction, 1594 .sg_call_ents = nelems, 1595 }; 1596 1597 if (!i) 1598 mapped_ents = get_nr_mapped_entries(dev, &ref); 1599 1600 if (i >= mapped_ents) 1601 break; 1602 1603 check_sync(dev, &ref, true); 1604 } 1605 } 1606 EXPORT_SYMBOL(debug_dma_sync_sg_for_cpu); 1607 1608 void debug_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, 1609 int nelems, int direction) 1610 { 1611 struct scatterlist *s; 1612 int mapped_ents = 0, i; 1613 1614 if (unlikely(dma_debug_disabled())) 1615 return; 1616 1617 for_each_sg(sg, s, nelems, i) { 1618 1619 struct dma_debug_entry ref = { 1620 .type = dma_debug_sg, 1621 .dev = dev, 1622 .pfn = page_to_pfn(sg_page(s)), 1623 .offset = s->offset, 1624 .dev_addr = sg_dma_address(s), 1625 .size = sg_dma_len(s), 1626 .direction = direction, 1627 .sg_call_ents = nelems, 1628 }; 1629 if (!i) 1630 mapped_ents = get_nr_mapped_entries(dev, &ref); 1631 1632 if (i >= mapped_ents) 1633 break; 1634 1635 check_sync(dev, &ref, false); 1636 } 1637 } 1638 EXPORT_SYMBOL(debug_dma_sync_sg_for_device); 1639 1640 static int __init dma_debug_driver_setup(char *str) 1641 { 1642 int i; 1643 1644 for (i = 0; i < NAME_MAX_LEN - 1; ++i, ++str) { 1645 current_driver_name[i] = *str; 1646 if (*str == 0) 1647 break; 1648 } 1649 1650 if (current_driver_name[0]) 1651 pr_info("enable driver filter for driver [%s]\n", 1652 current_driver_name); 1653 1654 1655 return 1; 1656 } 1657 __setup("dma_debug_driver=", dma_debug_driver_setup); 1658