1 /* 2 * Copyright (C) 2008 Advanced Micro Devices, Inc. 3 * 4 * Author: Joerg Roedel <joerg.roedel@amd.com> 5 * 6 * This program is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License version 2 as published 8 * by the Free Software Foundation. 9 * 10 * This program is distributed in the hope that it will be useful, 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 * GNU General Public License for more details. 14 * 15 * You should have received a copy of the GNU General Public License 16 * along with this program; if not, write to the Free Software 17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 18 */ 19 20 #define pr_fmt(fmt) "DMA-API: " fmt 21 22 #include <linux/sched/task_stack.h> 23 #include <linux/scatterlist.h> 24 #include <linux/dma-mapping.h> 25 #include <linux/sched/task.h> 26 #include <linux/stacktrace.h> 27 #include <linux/dma-debug.h> 28 #include <linux/spinlock.h> 29 #include <linux/vmalloc.h> 30 #include <linux/debugfs.h> 31 #include <linux/uaccess.h> 32 #include <linux/export.h> 33 #include <linux/device.h> 34 #include <linux/types.h> 35 #include <linux/sched.h> 36 #include <linux/ctype.h> 37 #include <linux/list.h> 38 #include <linux/slab.h> 39 40 #include <asm/sections.h> 41 42 #define HASH_SIZE 1024ULL 43 #define HASH_FN_SHIFT 13 44 #define HASH_FN_MASK (HASH_SIZE - 1) 45 46 #define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16) 47 /* If the pool runs out, add this many new entries at once */ 48 #define DMA_DEBUG_DYNAMIC_ENTRIES (PAGE_SIZE / sizeof(struct dma_debug_entry)) 49 50 enum { 51 dma_debug_single, 52 dma_debug_sg, 53 dma_debug_coherent, 54 dma_debug_resource, 55 }; 56 57 enum map_err_types { 58 MAP_ERR_CHECK_NOT_APPLICABLE, 59 MAP_ERR_NOT_CHECKED, 60 MAP_ERR_CHECKED, 61 }; 62 63 #define DMA_DEBUG_STACKTRACE_ENTRIES 5 64 65 /** 66 * struct dma_debug_entry - track a dma_map* or dma_alloc_coherent mapping 67 * @list: node on pre-allocated free_entries list 68 * @dev: 'dev' argument to dma_map_{page|single|sg} or dma_alloc_coherent 69 * @type: single, page, sg, coherent 70 * @pfn: page frame of the start address 71 * @offset: offset of mapping relative to pfn 72 * @size: length of the mapping 73 * @direction: enum dma_data_direction 74 * @sg_call_ents: 'nents' from dma_map_sg 75 * @sg_mapped_ents: 'mapped_ents' from dma_map_sg 76 * @map_err_type: track whether dma_mapping_error() was checked 77 * @stacktrace: support backtraces when a violation is detected 78 */ 79 struct dma_debug_entry { 80 struct list_head list; 81 struct device *dev; 82 int type; 83 unsigned long pfn; 84 size_t offset; 85 u64 dev_addr; 86 u64 size; 87 int direction; 88 int sg_call_ents; 89 int sg_mapped_ents; 90 enum map_err_types map_err_type; 91 #ifdef CONFIG_STACKTRACE 92 struct stack_trace stacktrace; 93 unsigned long st_entries[DMA_DEBUG_STACKTRACE_ENTRIES]; 94 #endif 95 }; 96 97 typedef bool (*match_fn)(struct dma_debug_entry *, struct dma_debug_entry *); 98 99 struct hash_bucket { 100 struct list_head list; 101 spinlock_t lock; 102 } ____cacheline_aligned_in_smp; 103 104 /* Hash list to save the allocated dma addresses */ 105 static struct hash_bucket dma_entry_hash[HASH_SIZE]; 106 /* List of pre-allocated dma_debug_entry's */ 107 static LIST_HEAD(free_entries); 108 /* Lock for the list above */ 109 static DEFINE_SPINLOCK(free_entries_lock); 110 111 /* Global disable flag - will be set in case of an error */ 112 static bool global_disable __read_mostly; 113 114 /* Early initialization disable flag, set at the end of dma_debug_init */ 115 static bool dma_debug_initialized __read_mostly; 116 117 static inline bool dma_debug_disabled(void) 118 { 119 return global_disable || !dma_debug_initialized; 120 } 121 122 /* Global error count */ 123 static u32 error_count; 124 125 /* Global error show enable*/ 126 static u32 show_all_errors __read_mostly; 127 /* Number of errors to show */ 128 static u32 show_num_errors = 1; 129 130 static u32 num_free_entries; 131 static u32 min_free_entries; 132 static u32 nr_total_entries; 133 134 /* number of preallocated entries requested by kernel cmdline */ 135 static u32 nr_prealloc_entries = PREALLOC_DMA_DEBUG_ENTRIES; 136 137 /* per-driver filter related state */ 138 139 #define NAME_MAX_LEN 64 140 141 static char current_driver_name[NAME_MAX_LEN] __read_mostly; 142 static struct device_driver *current_driver __read_mostly; 143 144 static DEFINE_RWLOCK(driver_name_lock); 145 146 static const char *const maperr2str[] = { 147 [MAP_ERR_CHECK_NOT_APPLICABLE] = "dma map error check not applicable", 148 [MAP_ERR_NOT_CHECKED] = "dma map error not checked", 149 [MAP_ERR_CHECKED] = "dma map error checked", 150 }; 151 152 static const char *type2name[5] = { "single", "page", 153 "scather-gather", "coherent", 154 "resource" }; 155 156 static const char *dir2name[4] = { "DMA_BIDIRECTIONAL", "DMA_TO_DEVICE", 157 "DMA_FROM_DEVICE", "DMA_NONE" }; 158 159 /* 160 * The access to some variables in this macro is racy. We can't use atomic_t 161 * here because all these variables are exported to debugfs. Some of them even 162 * writeable. This is also the reason why a lock won't help much. But anyway, 163 * the races are no big deal. Here is why: 164 * 165 * error_count: the addition is racy, but the worst thing that can happen is 166 * that we don't count some errors 167 * show_num_errors: the subtraction is racy. Also no big deal because in 168 * worst case this will result in one warning more in the 169 * system log than the user configured. This variable is 170 * writeable via debugfs. 171 */ 172 static inline void dump_entry_trace(struct dma_debug_entry *entry) 173 { 174 #ifdef CONFIG_STACKTRACE 175 if (entry) { 176 pr_warning("Mapped at:\n"); 177 print_stack_trace(&entry->stacktrace, 0); 178 } 179 #endif 180 } 181 182 static bool driver_filter(struct device *dev) 183 { 184 struct device_driver *drv; 185 unsigned long flags; 186 bool ret; 187 188 /* driver filter off */ 189 if (likely(!current_driver_name[0])) 190 return true; 191 192 /* driver filter on and initialized */ 193 if (current_driver && dev && dev->driver == current_driver) 194 return true; 195 196 /* driver filter on, but we can't filter on a NULL device... */ 197 if (!dev) 198 return false; 199 200 if (current_driver || !current_driver_name[0]) 201 return false; 202 203 /* driver filter on but not yet initialized */ 204 drv = dev->driver; 205 if (!drv) 206 return false; 207 208 /* lock to protect against change of current_driver_name */ 209 read_lock_irqsave(&driver_name_lock, flags); 210 211 ret = false; 212 if (drv->name && 213 strncmp(current_driver_name, drv->name, NAME_MAX_LEN - 1) == 0) { 214 current_driver = drv; 215 ret = true; 216 } 217 218 read_unlock_irqrestore(&driver_name_lock, flags); 219 220 return ret; 221 } 222 223 #define err_printk(dev, entry, format, arg...) do { \ 224 error_count += 1; \ 225 if (driver_filter(dev) && \ 226 (show_all_errors || show_num_errors > 0)) { \ 227 WARN(1, pr_fmt("%s %s: ") format, \ 228 dev ? dev_driver_string(dev) : "NULL", \ 229 dev ? dev_name(dev) : "NULL", ## arg); \ 230 dump_entry_trace(entry); \ 231 } \ 232 if (!show_all_errors && show_num_errors > 0) \ 233 show_num_errors -= 1; \ 234 } while (0); 235 236 /* 237 * Hash related functions 238 * 239 * Every DMA-API request is saved into a struct dma_debug_entry. To 240 * have quick access to these structs they are stored into a hash. 241 */ 242 static int hash_fn(struct dma_debug_entry *entry) 243 { 244 /* 245 * Hash function is based on the dma address. 246 * We use bits 20-27 here as the index into the hash 247 */ 248 return (entry->dev_addr >> HASH_FN_SHIFT) & HASH_FN_MASK; 249 } 250 251 /* 252 * Request exclusive access to a hash bucket for a given dma_debug_entry. 253 */ 254 static struct hash_bucket *get_hash_bucket(struct dma_debug_entry *entry, 255 unsigned long *flags) 256 __acquires(&dma_entry_hash[idx].lock) 257 { 258 int idx = hash_fn(entry); 259 unsigned long __flags; 260 261 spin_lock_irqsave(&dma_entry_hash[idx].lock, __flags); 262 *flags = __flags; 263 return &dma_entry_hash[idx]; 264 } 265 266 /* 267 * Give up exclusive access to the hash bucket 268 */ 269 static void put_hash_bucket(struct hash_bucket *bucket, 270 unsigned long *flags) 271 __releases(&bucket->lock) 272 { 273 unsigned long __flags = *flags; 274 275 spin_unlock_irqrestore(&bucket->lock, __flags); 276 } 277 278 static bool exact_match(struct dma_debug_entry *a, struct dma_debug_entry *b) 279 { 280 return ((a->dev_addr == b->dev_addr) && 281 (a->dev == b->dev)) ? true : false; 282 } 283 284 static bool containing_match(struct dma_debug_entry *a, 285 struct dma_debug_entry *b) 286 { 287 if (a->dev != b->dev) 288 return false; 289 290 if ((b->dev_addr <= a->dev_addr) && 291 ((b->dev_addr + b->size) >= (a->dev_addr + a->size))) 292 return true; 293 294 return false; 295 } 296 297 /* 298 * Search a given entry in the hash bucket list 299 */ 300 static struct dma_debug_entry *__hash_bucket_find(struct hash_bucket *bucket, 301 struct dma_debug_entry *ref, 302 match_fn match) 303 { 304 struct dma_debug_entry *entry, *ret = NULL; 305 int matches = 0, match_lvl, last_lvl = -1; 306 307 list_for_each_entry(entry, &bucket->list, list) { 308 if (!match(ref, entry)) 309 continue; 310 311 /* 312 * Some drivers map the same physical address multiple 313 * times. Without a hardware IOMMU this results in the 314 * same device addresses being put into the dma-debug 315 * hash multiple times too. This can result in false 316 * positives being reported. Therefore we implement a 317 * best-fit algorithm here which returns the entry from 318 * the hash which fits best to the reference value 319 * instead of the first-fit. 320 */ 321 matches += 1; 322 match_lvl = 0; 323 entry->size == ref->size ? ++match_lvl : 0; 324 entry->type == ref->type ? ++match_lvl : 0; 325 entry->direction == ref->direction ? ++match_lvl : 0; 326 entry->sg_call_ents == ref->sg_call_ents ? ++match_lvl : 0; 327 328 if (match_lvl == 4) { 329 /* perfect-fit - return the result */ 330 return entry; 331 } else if (match_lvl > last_lvl) { 332 /* 333 * We found an entry that fits better then the 334 * previous one or it is the 1st match. 335 */ 336 last_lvl = match_lvl; 337 ret = entry; 338 } 339 } 340 341 /* 342 * If we have multiple matches but no perfect-fit, just return 343 * NULL. 344 */ 345 ret = (matches == 1) ? ret : NULL; 346 347 return ret; 348 } 349 350 static struct dma_debug_entry *bucket_find_exact(struct hash_bucket *bucket, 351 struct dma_debug_entry *ref) 352 { 353 return __hash_bucket_find(bucket, ref, exact_match); 354 } 355 356 static struct dma_debug_entry *bucket_find_contain(struct hash_bucket **bucket, 357 struct dma_debug_entry *ref, 358 unsigned long *flags) 359 { 360 361 unsigned int max_range = dma_get_max_seg_size(ref->dev); 362 struct dma_debug_entry *entry, index = *ref; 363 unsigned int range = 0; 364 365 while (range <= max_range) { 366 entry = __hash_bucket_find(*bucket, ref, containing_match); 367 368 if (entry) 369 return entry; 370 371 /* 372 * Nothing found, go back a hash bucket 373 */ 374 put_hash_bucket(*bucket, flags); 375 range += (1 << HASH_FN_SHIFT); 376 index.dev_addr -= (1 << HASH_FN_SHIFT); 377 *bucket = get_hash_bucket(&index, flags); 378 } 379 380 return NULL; 381 } 382 383 /* 384 * Add an entry to a hash bucket 385 */ 386 static void hash_bucket_add(struct hash_bucket *bucket, 387 struct dma_debug_entry *entry) 388 { 389 list_add_tail(&entry->list, &bucket->list); 390 } 391 392 /* 393 * Remove entry from a hash bucket list 394 */ 395 static void hash_bucket_del(struct dma_debug_entry *entry) 396 { 397 list_del(&entry->list); 398 } 399 400 static unsigned long long phys_addr(struct dma_debug_entry *entry) 401 { 402 if (entry->type == dma_debug_resource) 403 return __pfn_to_phys(entry->pfn) + entry->offset; 404 405 return page_to_phys(pfn_to_page(entry->pfn)) + entry->offset; 406 } 407 408 /* 409 * Dump mapping entries for debugging purposes 410 */ 411 void debug_dma_dump_mappings(struct device *dev) 412 { 413 int idx; 414 415 for (idx = 0; idx < HASH_SIZE; idx++) { 416 struct hash_bucket *bucket = &dma_entry_hash[idx]; 417 struct dma_debug_entry *entry; 418 unsigned long flags; 419 420 spin_lock_irqsave(&bucket->lock, flags); 421 422 list_for_each_entry(entry, &bucket->list, list) { 423 if (!dev || dev == entry->dev) { 424 dev_info(entry->dev, 425 "%s idx %d P=%Lx N=%lx D=%Lx L=%Lx %s %s\n", 426 type2name[entry->type], idx, 427 phys_addr(entry), entry->pfn, 428 entry->dev_addr, entry->size, 429 dir2name[entry->direction], 430 maperr2str[entry->map_err_type]); 431 } 432 } 433 434 spin_unlock_irqrestore(&bucket->lock, flags); 435 } 436 } 437 438 /* 439 * For each mapping (initial cacheline in the case of 440 * dma_alloc_coherent/dma_map_page, initial cacheline in each page of a 441 * scatterlist, or the cacheline specified in dma_map_single) insert 442 * into this tree using the cacheline as the key. At 443 * dma_unmap_{single|sg|page} or dma_free_coherent delete the entry. If 444 * the entry already exists at insertion time add a tag as a reference 445 * count for the overlapping mappings. For now, the overlap tracking 446 * just ensures that 'unmaps' balance 'maps' before marking the 447 * cacheline idle, but we should also be flagging overlaps as an API 448 * violation. 449 * 450 * Memory usage is mostly constrained by the maximum number of available 451 * dma-debug entries in that we need a free dma_debug_entry before 452 * inserting into the tree. In the case of dma_map_page and 453 * dma_alloc_coherent there is only one dma_debug_entry and one 454 * dma_active_cacheline entry to track per event. dma_map_sg(), on the 455 * other hand, consumes a single dma_debug_entry, but inserts 'nents' 456 * entries into the tree. 457 * 458 * At any time debug_dma_assert_idle() can be called to trigger a 459 * warning if any cachelines in the given page are in the active set. 460 */ 461 static RADIX_TREE(dma_active_cacheline, GFP_NOWAIT); 462 static DEFINE_SPINLOCK(radix_lock); 463 #define ACTIVE_CACHELINE_MAX_OVERLAP ((1 << RADIX_TREE_MAX_TAGS) - 1) 464 #define CACHELINE_PER_PAGE_SHIFT (PAGE_SHIFT - L1_CACHE_SHIFT) 465 #define CACHELINES_PER_PAGE (1 << CACHELINE_PER_PAGE_SHIFT) 466 467 static phys_addr_t to_cacheline_number(struct dma_debug_entry *entry) 468 { 469 return (entry->pfn << CACHELINE_PER_PAGE_SHIFT) + 470 (entry->offset >> L1_CACHE_SHIFT); 471 } 472 473 static int active_cacheline_read_overlap(phys_addr_t cln) 474 { 475 int overlap = 0, i; 476 477 for (i = RADIX_TREE_MAX_TAGS - 1; i >= 0; i--) 478 if (radix_tree_tag_get(&dma_active_cacheline, cln, i)) 479 overlap |= 1 << i; 480 return overlap; 481 } 482 483 static int active_cacheline_set_overlap(phys_addr_t cln, int overlap) 484 { 485 int i; 486 487 if (overlap > ACTIVE_CACHELINE_MAX_OVERLAP || overlap < 0) 488 return overlap; 489 490 for (i = RADIX_TREE_MAX_TAGS - 1; i >= 0; i--) 491 if (overlap & 1 << i) 492 radix_tree_tag_set(&dma_active_cacheline, cln, i); 493 else 494 radix_tree_tag_clear(&dma_active_cacheline, cln, i); 495 496 return overlap; 497 } 498 499 static void active_cacheline_inc_overlap(phys_addr_t cln) 500 { 501 int overlap = active_cacheline_read_overlap(cln); 502 503 overlap = active_cacheline_set_overlap(cln, ++overlap); 504 505 /* If we overflowed the overlap counter then we're potentially 506 * leaking dma-mappings. Otherwise, if maps and unmaps are 507 * balanced then this overflow may cause false negatives in 508 * debug_dma_assert_idle() as the cacheline may be marked idle 509 * prematurely. 510 */ 511 WARN_ONCE(overlap > ACTIVE_CACHELINE_MAX_OVERLAP, 512 pr_fmt("exceeded %d overlapping mappings of cacheline %pa\n"), 513 ACTIVE_CACHELINE_MAX_OVERLAP, &cln); 514 } 515 516 static int active_cacheline_dec_overlap(phys_addr_t cln) 517 { 518 int overlap = active_cacheline_read_overlap(cln); 519 520 return active_cacheline_set_overlap(cln, --overlap); 521 } 522 523 static int active_cacheline_insert(struct dma_debug_entry *entry) 524 { 525 phys_addr_t cln = to_cacheline_number(entry); 526 unsigned long flags; 527 int rc; 528 529 /* If the device is not writing memory then we don't have any 530 * concerns about the cpu consuming stale data. This mitigates 531 * legitimate usages of overlapping mappings. 532 */ 533 if (entry->direction == DMA_TO_DEVICE) 534 return 0; 535 536 spin_lock_irqsave(&radix_lock, flags); 537 rc = radix_tree_insert(&dma_active_cacheline, cln, entry); 538 if (rc == -EEXIST) 539 active_cacheline_inc_overlap(cln); 540 spin_unlock_irqrestore(&radix_lock, flags); 541 542 return rc; 543 } 544 545 static void active_cacheline_remove(struct dma_debug_entry *entry) 546 { 547 phys_addr_t cln = to_cacheline_number(entry); 548 unsigned long flags; 549 550 /* ...mirror the insert case */ 551 if (entry->direction == DMA_TO_DEVICE) 552 return; 553 554 spin_lock_irqsave(&radix_lock, flags); 555 /* since we are counting overlaps the final put of the 556 * cacheline will occur when the overlap count is 0. 557 * active_cacheline_dec_overlap() returns -1 in that case 558 */ 559 if (active_cacheline_dec_overlap(cln) < 0) 560 radix_tree_delete(&dma_active_cacheline, cln); 561 spin_unlock_irqrestore(&radix_lock, flags); 562 } 563 564 /** 565 * debug_dma_assert_idle() - assert that a page is not undergoing dma 566 * @page: page to lookup in the dma_active_cacheline tree 567 * 568 * Place a call to this routine in cases where the cpu touching the page 569 * before the dma completes (page is dma_unmapped) will lead to data 570 * corruption. 571 */ 572 void debug_dma_assert_idle(struct page *page) 573 { 574 static struct dma_debug_entry *ents[CACHELINES_PER_PAGE]; 575 struct dma_debug_entry *entry = NULL; 576 void **results = (void **) &ents; 577 unsigned int nents, i; 578 unsigned long flags; 579 phys_addr_t cln; 580 581 if (dma_debug_disabled()) 582 return; 583 584 if (!page) 585 return; 586 587 cln = (phys_addr_t) page_to_pfn(page) << CACHELINE_PER_PAGE_SHIFT; 588 spin_lock_irqsave(&radix_lock, flags); 589 nents = radix_tree_gang_lookup(&dma_active_cacheline, results, cln, 590 CACHELINES_PER_PAGE); 591 for (i = 0; i < nents; i++) { 592 phys_addr_t ent_cln = to_cacheline_number(ents[i]); 593 594 if (ent_cln == cln) { 595 entry = ents[i]; 596 break; 597 } else if (ent_cln >= cln + CACHELINES_PER_PAGE) 598 break; 599 } 600 spin_unlock_irqrestore(&radix_lock, flags); 601 602 if (!entry) 603 return; 604 605 cln = to_cacheline_number(entry); 606 err_printk(entry->dev, entry, 607 "cpu touching an active dma mapped cacheline [cln=%pa]\n", 608 &cln); 609 } 610 611 /* 612 * Wrapper function for adding an entry to the hash. 613 * This function takes care of locking itself. 614 */ 615 static void add_dma_entry(struct dma_debug_entry *entry) 616 { 617 struct hash_bucket *bucket; 618 unsigned long flags; 619 int rc; 620 621 bucket = get_hash_bucket(entry, &flags); 622 hash_bucket_add(bucket, entry); 623 put_hash_bucket(bucket, &flags); 624 625 rc = active_cacheline_insert(entry); 626 if (rc == -ENOMEM) { 627 pr_err("cacheline tracking ENOMEM, dma-debug disabled\n"); 628 global_disable = true; 629 } 630 631 /* TODO: report -EEXIST errors here as overlapping mappings are 632 * not supported by the DMA API 633 */ 634 } 635 636 static int dma_debug_create_entries(gfp_t gfp) 637 { 638 struct dma_debug_entry *entry; 639 int i; 640 641 entry = (void *)get_zeroed_page(gfp); 642 if (!entry) 643 return -ENOMEM; 644 645 for (i = 0; i < DMA_DEBUG_DYNAMIC_ENTRIES; i++) 646 list_add_tail(&entry[i].list, &free_entries); 647 648 num_free_entries += DMA_DEBUG_DYNAMIC_ENTRIES; 649 nr_total_entries += DMA_DEBUG_DYNAMIC_ENTRIES; 650 651 return 0; 652 } 653 654 static struct dma_debug_entry *__dma_entry_alloc(void) 655 { 656 struct dma_debug_entry *entry; 657 658 entry = list_entry(free_entries.next, struct dma_debug_entry, list); 659 list_del(&entry->list); 660 memset(entry, 0, sizeof(*entry)); 661 662 num_free_entries -= 1; 663 if (num_free_entries < min_free_entries) 664 min_free_entries = num_free_entries; 665 666 return entry; 667 } 668 669 void __dma_entry_alloc_check_leak(void) 670 { 671 u32 tmp = nr_total_entries % nr_prealloc_entries; 672 673 /* Shout each time we tick over some multiple of the initial pool */ 674 if (tmp < DMA_DEBUG_DYNAMIC_ENTRIES) { 675 pr_info("dma_debug_entry pool grown to %u (%u00%%)\n", 676 nr_total_entries, 677 (nr_total_entries / nr_prealloc_entries)); 678 } 679 } 680 681 /* struct dma_entry allocator 682 * 683 * The next two functions implement the allocator for 684 * struct dma_debug_entries. 685 */ 686 static struct dma_debug_entry *dma_entry_alloc(void) 687 { 688 struct dma_debug_entry *entry; 689 unsigned long flags; 690 691 spin_lock_irqsave(&free_entries_lock, flags); 692 if (num_free_entries == 0) { 693 if (dma_debug_create_entries(GFP_ATOMIC)) { 694 global_disable = true; 695 spin_unlock_irqrestore(&free_entries_lock, flags); 696 pr_err("debugging out of memory - disabling\n"); 697 return NULL; 698 } 699 __dma_entry_alloc_check_leak(); 700 } 701 702 entry = __dma_entry_alloc(); 703 704 spin_unlock_irqrestore(&free_entries_lock, flags); 705 706 #ifdef CONFIG_STACKTRACE 707 entry->stacktrace.max_entries = DMA_DEBUG_STACKTRACE_ENTRIES; 708 entry->stacktrace.entries = entry->st_entries; 709 entry->stacktrace.skip = 2; 710 save_stack_trace(&entry->stacktrace); 711 #endif 712 713 return entry; 714 } 715 716 static void dma_entry_free(struct dma_debug_entry *entry) 717 { 718 unsigned long flags; 719 720 active_cacheline_remove(entry); 721 722 /* 723 * add to beginning of the list - this way the entries are 724 * more likely cache hot when they are reallocated. 725 */ 726 spin_lock_irqsave(&free_entries_lock, flags); 727 list_add(&entry->list, &free_entries); 728 num_free_entries += 1; 729 spin_unlock_irqrestore(&free_entries_lock, flags); 730 } 731 732 /* 733 * DMA-API debugging init code 734 * 735 * The init code does two things: 736 * 1. Initialize core data structures 737 * 2. Preallocate a given number of dma_debug_entry structs 738 */ 739 740 static ssize_t filter_read(struct file *file, char __user *user_buf, 741 size_t count, loff_t *ppos) 742 { 743 char buf[NAME_MAX_LEN + 1]; 744 unsigned long flags; 745 int len; 746 747 if (!current_driver_name[0]) 748 return 0; 749 750 /* 751 * We can't copy to userspace directly because current_driver_name can 752 * only be read under the driver_name_lock with irqs disabled. So 753 * create a temporary copy first. 754 */ 755 read_lock_irqsave(&driver_name_lock, flags); 756 len = scnprintf(buf, NAME_MAX_LEN + 1, "%s\n", current_driver_name); 757 read_unlock_irqrestore(&driver_name_lock, flags); 758 759 return simple_read_from_buffer(user_buf, count, ppos, buf, len); 760 } 761 762 static ssize_t filter_write(struct file *file, const char __user *userbuf, 763 size_t count, loff_t *ppos) 764 { 765 char buf[NAME_MAX_LEN]; 766 unsigned long flags; 767 size_t len; 768 int i; 769 770 /* 771 * We can't copy from userspace directly. Access to 772 * current_driver_name is protected with a write_lock with irqs 773 * disabled. Since copy_from_user can fault and may sleep we 774 * need to copy to temporary buffer first 775 */ 776 len = min(count, (size_t)(NAME_MAX_LEN - 1)); 777 if (copy_from_user(buf, userbuf, len)) 778 return -EFAULT; 779 780 buf[len] = 0; 781 782 write_lock_irqsave(&driver_name_lock, flags); 783 784 /* 785 * Now handle the string we got from userspace very carefully. 786 * The rules are: 787 * - only use the first token we got 788 * - token delimiter is everything looking like a space 789 * character (' ', '\n', '\t' ...) 790 * 791 */ 792 if (!isalnum(buf[0])) { 793 /* 794 * If the first character userspace gave us is not 795 * alphanumerical then assume the filter should be 796 * switched off. 797 */ 798 if (current_driver_name[0]) 799 pr_info("switching off dma-debug driver filter\n"); 800 current_driver_name[0] = 0; 801 current_driver = NULL; 802 goto out_unlock; 803 } 804 805 /* 806 * Now parse out the first token and use it as the name for the 807 * driver to filter for. 808 */ 809 for (i = 0; i < NAME_MAX_LEN - 1; ++i) { 810 current_driver_name[i] = buf[i]; 811 if (isspace(buf[i]) || buf[i] == ' ' || buf[i] == 0) 812 break; 813 } 814 current_driver_name[i] = 0; 815 current_driver = NULL; 816 817 pr_info("enable driver filter for driver [%s]\n", 818 current_driver_name); 819 820 out_unlock: 821 write_unlock_irqrestore(&driver_name_lock, flags); 822 823 return count; 824 } 825 826 static const struct file_operations filter_fops = { 827 .read = filter_read, 828 .write = filter_write, 829 .llseek = default_llseek, 830 }; 831 832 static int dump_show(struct seq_file *seq, void *v) 833 { 834 int idx; 835 836 for (idx = 0; idx < HASH_SIZE; idx++) { 837 struct hash_bucket *bucket = &dma_entry_hash[idx]; 838 struct dma_debug_entry *entry; 839 unsigned long flags; 840 841 spin_lock_irqsave(&bucket->lock, flags); 842 list_for_each_entry(entry, &bucket->list, list) { 843 seq_printf(seq, 844 "%s %s %s idx %d P=%llx N=%lx D=%llx L=%llx %s %s\n", 845 dev_name(entry->dev), 846 dev_driver_string(entry->dev), 847 type2name[entry->type], idx, 848 phys_addr(entry), entry->pfn, 849 entry->dev_addr, entry->size, 850 dir2name[entry->direction], 851 maperr2str[entry->map_err_type]); 852 } 853 spin_unlock_irqrestore(&bucket->lock, flags); 854 } 855 return 0; 856 } 857 DEFINE_SHOW_ATTRIBUTE(dump); 858 859 static void dma_debug_fs_init(void) 860 { 861 struct dentry *dentry = debugfs_create_dir("dma-api", NULL); 862 863 debugfs_create_bool("disabled", 0444, dentry, &global_disable); 864 debugfs_create_u32("error_count", 0444, dentry, &error_count); 865 debugfs_create_u32("all_errors", 0644, dentry, &show_all_errors); 866 debugfs_create_u32("num_errors", 0644, dentry, &show_num_errors); 867 debugfs_create_u32("num_free_entries", 0444, dentry, &num_free_entries); 868 debugfs_create_u32("min_free_entries", 0444, dentry, &min_free_entries); 869 debugfs_create_u32("nr_total_entries", 0444, dentry, &nr_total_entries); 870 debugfs_create_file("driver_filter", 0644, dentry, NULL, &filter_fops); 871 debugfs_create_file("dump", 0444, dentry, NULL, &dump_fops); 872 } 873 874 static int device_dma_allocations(struct device *dev, struct dma_debug_entry **out_entry) 875 { 876 struct dma_debug_entry *entry; 877 unsigned long flags; 878 int count = 0, i; 879 880 for (i = 0; i < HASH_SIZE; ++i) { 881 spin_lock_irqsave(&dma_entry_hash[i].lock, flags); 882 list_for_each_entry(entry, &dma_entry_hash[i].list, list) { 883 if (entry->dev == dev) { 884 count += 1; 885 *out_entry = entry; 886 } 887 } 888 spin_unlock_irqrestore(&dma_entry_hash[i].lock, flags); 889 } 890 891 return count; 892 } 893 894 static int dma_debug_device_change(struct notifier_block *nb, unsigned long action, void *data) 895 { 896 struct device *dev = data; 897 struct dma_debug_entry *uninitialized_var(entry); 898 int count; 899 900 if (dma_debug_disabled()) 901 return 0; 902 903 switch (action) { 904 case BUS_NOTIFY_UNBOUND_DRIVER: 905 count = device_dma_allocations(dev, &entry); 906 if (count == 0) 907 break; 908 err_printk(dev, entry, "device driver has pending " 909 "DMA allocations while released from device " 910 "[count=%d]\n" 911 "One of leaked entries details: " 912 "[device address=0x%016llx] [size=%llu bytes] " 913 "[mapped with %s] [mapped as %s]\n", 914 count, entry->dev_addr, entry->size, 915 dir2name[entry->direction], type2name[entry->type]); 916 break; 917 default: 918 break; 919 } 920 921 return 0; 922 } 923 924 void dma_debug_add_bus(struct bus_type *bus) 925 { 926 struct notifier_block *nb; 927 928 if (dma_debug_disabled()) 929 return; 930 931 nb = kzalloc(sizeof(struct notifier_block), GFP_KERNEL); 932 if (nb == NULL) { 933 pr_err("dma_debug_add_bus: out of memory\n"); 934 return; 935 } 936 937 nb->notifier_call = dma_debug_device_change; 938 939 bus_register_notifier(bus, nb); 940 } 941 942 static int dma_debug_init(void) 943 { 944 int i, nr_pages; 945 946 /* Do not use dma_debug_initialized here, since we really want to be 947 * called to set dma_debug_initialized 948 */ 949 if (global_disable) 950 return 0; 951 952 for (i = 0; i < HASH_SIZE; ++i) { 953 INIT_LIST_HEAD(&dma_entry_hash[i].list); 954 spin_lock_init(&dma_entry_hash[i].lock); 955 } 956 957 dma_debug_fs_init(); 958 959 nr_pages = DIV_ROUND_UP(nr_prealloc_entries, DMA_DEBUG_DYNAMIC_ENTRIES); 960 for (i = 0; i < nr_pages; ++i) 961 dma_debug_create_entries(GFP_KERNEL); 962 if (num_free_entries >= nr_prealloc_entries) { 963 pr_info("preallocated %d debug entries\n", nr_total_entries); 964 } else if (num_free_entries > 0) { 965 pr_warn("%d debug entries requested but only %d allocated\n", 966 nr_prealloc_entries, nr_total_entries); 967 } else { 968 pr_err("debugging out of memory error - disabled\n"); 969 global_disable = true; 970 971 return 0; 972 } 973 min_free_entries = num_free_entries; 974 975 dma_debug_initialized = true; 976 977 pr_info("debugging enabled by kernel config\n"); 978 return 0; 979 } 980 core_initcall(dma_debug_init); 981 982 static __init int dma_debug_cmdline(char *str) 983 { 984 if (!str) 985 return -EINVAL; 986 987 if (strncmp(str, "off", 3) == 0) { 988 pr_info("debugging disabled on kernel command line\n"); 989 global_disable = true; 990 } 991 992 return 0; 993 } 994 995 static __init int dma_debug_entries_cmdline(char *str) 996 { 997 if (!str) 998 return -EINVAL; 999 if (!get_option(&str, &nr_prealloc_entries)) 1000 nr_prealloc_entries = PREALLOC_DMA_DEBUG_ENTRIES; 1001 return 0; 1002 } 1003 1004 __setup("dma_debug=", dma_debug_cmdline); 1005 __setup("dma_debug_entries=", dma_debug_entries_cmdline); 1006 1007 static void check_unmap(struct dma_debug_entry *ref) 1008 { 1009 struct dma_debug_entry *entry; 1010 struct hash_bucket *bucket; 1011 unsigned long flags; 1012 1013 bucket = get_hash_bucket(ref, &flags); 1014 entry = bucket_find_exact(bucket, ref); 1015 1016 if (!entry) { 1017 /* must drop lock before calling dma_mapping_error */ 1018 put_hash_bucket(bucket, &flags); 1019 1020 if (dma_mapping_error(ref->dev, ref->dev_addr)) { 1021 err_printk(ref->dev, NULL, 1022 "device driver tries to free an " 1023 "invalid DMA memory address\n"); 1024 } else { 1025 err_printk(ref->dev, NULL, 1026 "device driver tries to free DMA " 1027 "memory it has not allocated [device " 1028 "address=0x%016llx] [size=%llu bytes]\n", 1029 ref->dev_addr, ref->size); 1030 } 1031 return; 1032 } 1033 1034 if (ref->size != entry->size) { 1035 err_printk(ref->dev, entry, "device driver frees " 1036 "DMA memory with different size " 1037 "[device address=0x%016llx] [map size=%llu bytes] " 1038 "[unmap size=%llu bytes]\n", 1039 ref->dev_addr, entry->size, ref->size); 1040 } 1041 1042 if (ref->type != entry->type) { 1043 err_printk(ref->dev, entry, "device driver frees " 1044 "DMA memory with wrong function " 1045 "[device address=0x%016llx] [size=%llu bytes] " 1046 "[mapped as %s] [unmapped as %s]\n", 1047 ref->dev_addr, ref->size, 1048 type2name[entry->type], type2name[ref->type]); 1049 } else if ((entry->type == dma_debug_coherent) && 1050 (phys_addr(ref) != phys_addr(entry))) { 1051 err_printk(ref->dev, entry, "device driver frees " 1052 "DMA memory with different CPU address " 1053 "[device address=0x%016llx] [size=%llu bytes] " 1054 "[cpu alloc address=0x%016llx] " 1055 "[cpu free address=0x%016llx]", 1056 ref->dev_addr, ref->size, 1057 phys_addr(entry), 1058 phys_addr(ref)); 1059 } 1060 1061 if (ref->sg_call_ents && ref->type == dma_debug_sg && 1062 ref->sg_call_ents != entry->sg_call_ents) { 1063 err_printk(ref->dev, entry, "device driver frees " 1064 "DMA sg list with different entry count " 1065 "[map count=%d] [unmap count=%d]\n", 1066 entry->sg_call_ents, ref->sg_call_ents); 1067 } 1068 1069 /* 1070 * This may be no bug in reality - but most implementations of the 1071 * DMA API don't handle this properly, so check for it here 1072 */ 1073 if (ref->direction != entry->direction) { 1074 err_printk(ref->dev, entry, "device driver frees " 1075 "DMA memory with different direction " 1076 "[device address=0x%016llx] [size=%llu bytes] " 1077 "[mapped with %s] [unmapped with %s]\n", 1078 ref->dev_addr, ref->size, 1079 dir2name[entry->direction], 1080 dir2name[ref->direction]); 1081 } 1082 1083 /* 1084 * Drivers should use dma_mapping_error() to check the returned 1085 * addresses of dma_map_single() and dma_map_page(). 1086 * If not, print this warning message. See Documentation/DMA-API.txt. 1087 */ 1088 if (entry->map_err_type == MAP_ERR_NOT_CHECKED) { 1089 err_printk(ref->dev, entry, 1090 "device driver failed to check map error" 1091 "[device address=0x%016llx] [size=%llu bytes] " 1092 "[mapped as %s]", 1093 ref->dev_addr, ref->size, 1094 type2name[entry->type]); 1095 } 1096 1097 hash_bucket_del(entry); 1098 dma_entry_free(entry); 1099 1100 put_hash_bucket(bucket, &flags); 1101 } 1102 1103 static void check_for_stack(struct device *dev, 1104 struct page *page, size_t offset) 1105 { 1106 void *addr; 1107 struct vm_struct *stack_vm_area = task_stack_vm_area(current); 1108 1109 if (!stack_vm_area) { 1110 /* Stack is direct-mapped. */ 1111 if (PageHighMem(page)) 1112 return; 1113 addr = page_address(page) + offset; 1114 if (object_is_on_stack(addr)) 1115 err_printk(dev, NULL, "device driver maps memory from stack [addr=%p]\n", addr); 1116 } else { 1117 /* Stack is vmalloced. */ 1118 int i; 1119 1120 for (i = 0; i < stack_vm_area->nr_pages; i++) { 1121 if (page != stack_vm_area->pages[i]) 1122 continue; 1123 1124 addr = (u8 *)current->stack + i * PAGE_SIZE + offset; 1125 err_printk(dev, NULL, "device driver maps memory from stack [probable addr=%p]\n", addr); 1126 break; 1127 } 1128 } 1129 } 1130 1131 static inline bool overlap(void *addr, unsigned long len, void *start, void *end) 1132 { 1133 unsigned long a1 = (unsigned long)addr; 1134 unsigned long b1 = a1 + len; 1135 unsigned long a2 = (unsigned long)start; 1136 unsigned long b2 = (unsigned long)end; 1137 1138 return !(b1 <= a2 || a1 >= b2); 1139 } 1140 1141 static void check_for_illegal_area(struct device *dev, void *addr, unsigned long len) 1142 { 1143 if (overlap(addr, len, _stext, _etext) || 1144 overlap(addr, len, __start_rodata, __end_rodata)) 1145 err_printk(dev, NULL, "device driver maps memory from kernel text or rodata [addr=%p] [len=%lu]\n", addr, len); 1146 } 1147 1148 static void check_sync(struct device *dev, 1149 struct dma_debug_entry *ref, 1150 bool to_cpu) 1151 { 1152 struct dma_debug_entry *entry; 1153 struct hash_bucket *bucket; 1154 unsigned long flags; 1155 1156 bucket = get_hash_bucket(ref, &flags); 1157 1158 entry = bucket_find_contain(&bucket, ref, &flags); 1159 1160 if (!entry) { 1161 err_printk(dev, NULL, "device driver tries " 1162 "to sync DMA memory it has not allocated " 1163 "[device address=0x%016llx] [size=%llu bytes]\n", 1164 (unsigned long long)ref->dev_addr, ref->size); 1165 goto out; 1166 } 1167 1168 if (ref->size > entry->size) { 1169 err_printk(dev, entry, "device driver syncs" 1170 " DMA memory outside allocated range " 1171 "[device address=0x%016llx] " 1172 "[allocation size=%llu bytes] " 1173 "[sync offset+size=%llu]\n", 1174 entry->dev_addr, entry->size, 1175 ref->size); 1176 } 1177 1178 if (entry->direction == DMA_BIDIRECTIONAL) 1179 goto out; 1180 1181 if (ref->direction != entry->direction) { 1182 err_printk(dev, entry, "device driver syncs " 1183 "DMA memory with different direction " 1184 "[device address=0x%016llx] [size=%llu bytes] " 1185 "[mapped with %s] [synced with %s]\n", 1186 (unsigned long long)ref->dev_addr, entry->size, 1187 dir2name[entry->direction], 1188 dir2name[ref->direction]); 1189 } 1190 1191 if (to_cpu && !(entry->direction == DMA_FROM_DEVICE) && 1192 !(ref->direction == DMA_TO_DEVICE)) 1193 err_printk(dev, entry, "device driver syncs " 1194 "device read-only DMA memory for cpu " 1195 "[device address=0x%016llx] [size=%llu bytes] " 1196 "[mapped with %s] [synced with %s]\n", 1197 (unsigned long long)ref->dev_addr, entry->size, 1198 dir2name[entry->direction], 1199 dir2name[ref->direction]); 1200 1201 if (!to_cpu && !(entry->direction == DMA_TO_DEVICE) && 1202 !(ref->direction == DMA_FROM_DEVICE)) 1203 err_printk(dev, entry, "device driver syncs " 1204 "device write-only DMA memory to device " 1205 "[device address=0x%016llx] [size=%llu bytes] " 1206 "[mapped with %s] [synced with %s]\n", 1207 (unsigned long long)ref->dev_addr, entry->size, 1208 dir2name[entry->direction], 1209 dir2name[ref->direction]); 1210 1211 if (ref->sg_call_ents && ref->type == dma_debug_sg && 1212 ref->sg_call_ents != entry->sg_call_ents) { 1213 err_printk(ref->dev, entry, "device driver syncs " 1214 "DMA sg list with different entry count " 1215 "[map count=%d] [sync count=%d]\n", 1216 entry->sg_call_ents, ref->sg_call_ents); 1217 } 1218 1219 out: 1220 put_hash_bucket(bucket, &flags); 1221 } 1222 1223 static void check_sg_segment(struct device *dev, struct scatterlist *sg) 1224 { 1225 #ifdef CONFIG_DMA_API_DEBUG_SG 1226 unsigned int max_seg = dma_get_max_seg_size(dev); 1227 u64 start, end, boundary = dma_get_seg_boundary(dev); 1228 1229 /* 1230 * Either the driver forgot to set dma_parms appropriately, or 1231 * whoever generated the list forgot to check them. 1232 */ 1233 if (sg->length > max_seg) 1234 err_printk(dev, NULL, "mapping sg segment longer than device claims to support [len=%u] [max=%u]\n", 1235 sg->length, max_seg); 1236 /* 1237 * In some cases this could potentially be the DMA API 1238 * implementation's fault, but it would usually imply that 1239 * the scatterlist was built inappropriately to begin with. 1240 */ 1241 start = sg_dma_address(sg); 1242 end = start + sg_dma_len(sg) - 1; 1243 if ((start ^ end) & ~boundary) 1244 err_printk(dev, NULL, "mapping sg segment across boundary [start=0x%016llx] [end=0x%016llx] [boundary=0x%016llx]\n", 1245 start, end, boundary); 1246 #endif 1247 } 1248 1249 void debug_dma_map_single(struct device *dev, const void *addr, 1250 unsigned long len) 1251 { 1252 if (unlikely(dma_debug_disabled())) 1253 return; 1254 1255 if (!virt_addr_valid(addr)) 1256 err_printk(dev, NULL, "device driver maps memory from invalid area [addr=%p] [len=%lu]\n", 1257 addr, len); 1258 1259 if (is_vmalloc_addr(addr)) 1260 err_printk(dev, NULL, "device driver maps memory from vmalloc area [addr=%p] [len=%lu]\n", 1261 addr, len); 1262 } 1263 EXPORT_SYMBOL(debug_dma_map_single); 1264 1265 void debug_dma_map_page(struct device *dev, struct page *page, size_t offset, 1266 size_t size, int direction, dma_addr_t dma_addr) 1267 { 1268 struct dma_debug_entry *entry; 1269 1270 if (unlikely(dma_debug_disabled())) 1271 return; 1272 1273 if (dma_mapping_error(dev, dma_addr)) 1274 return; 1275 1276 entry = dma_entry_alloc(); 1277 if (!entry) 1278 return; 1279 1280 entry->dev = dev; 1281 entry->type = dma_debug_single; 1282 entry->pfn = page_to_pfn(page); 1283 entry->offset = offset, 1284 entry->dev_addr = dma_addr; 1285 entry->size = size; 1286 entry->direction = direction; 1287 entry->map_err_type = MAP_ERR_NOT_CHECKED; 1288 1289 check_for_stack(dev, page, offset); 1290 1291 if (!PageHighMem(page)) { 1292 void *addr = page_address(page) + offset; 1293 1294 check_for_illegal_area(dev, addr, size); 1295 } 1296 1297 add_dma_entry(entry); 1298 } 1299 EXPORT_SYMBOL(debug_dma_map_page); 1300 1301 void debug_dma_mapping_error(struct device *dev, dma_addr_t dma_addr) 1302 { 1303 struct dma_debug_entry ref; 1304 struct dma_debug_entry *entry; 1305 struct hash_bucket *bucket; 1306 unsigned long flags; 1307 1308 if (unlikely(dma_debug_disabled())) 1309 return; 1310 1311 ref.dev = dev; 1312 ref.dev_addr = dma_addr; 1313 bucket = get_hash_bucket(&ref, &flags); 1314 1315 list_for_each_entry(entry, &bucket->list, list) { 1316 if (!exact_match(&ref, entry)) 1317 continue; 1318 1319 /* 1320 * The same physical address can be mapped multiple 1321 * times. Without a hardware IOMMU this results in the 1322 * same device addresses being put into the dma-debug 1323 * hash multiple times too. This can result in false 1324 * positives being reported. Therefore we implement a 1325 * best-fit algorithm here which updates the first entry 1326 * from the hash which fits the reference value and is 1327 * not currently listed as being checked. 1328 */ 1329 if (entry->map_err_type == MAP_ERR_NOT_CHECKED) { 1330 entry->map_err_type = MAP_ERR_CHECKED; 1331 break; 1332 } 1333 } 1334 1335 put_hash_bucket(bucket, &flags); 1336 } 1337 EXPORT_SYMBOL(debug_dma_mapping_error); 1338 1339 void debug_dma_unmap_page(struct device *dev, dma_addr_t addr, 1340 size_t size, int direction) 1341 { 1342 struct dma_debug_entry ref = { 1343 .type = dma_debug_single, 1344 .dev = dev, 1345 .dev_addr = addr, 1346 .size = size, 1347 .direction = direction, 1348 }; 1349 1350 if (unlikely(dma_debug_disabled())) 1351 return; 1352 check_unmap(&ref); 1353 } 1354 EXPORT_SYMBOL(debug_dma_unmap_page); 1355 1356 void debug_dma_map_sg(struct device *dev, struct scatterlist *sg, 1357 int nents, int mapped_ents, int direction) 1358 { 1359 struct dma_debug_entry *entry; 1360 struct scatterlist *s; 1361 int i; 1362 1363 if (unlikely(dma_debug_disabled())) 1364 return; 1365 1366 for_each_sg(sg, s, mapped_ents, i) { 1367 entry = dma_entry_alloc(); 1368 if (!entry) 1369 return; 1370 1371 entry->type = dma_debug_sg; 1372 entry->dev = dev; 1373 entry->pfn = page_to_pfn(sg_page(s)); 1374 entry->offset = s->offset, 1375 entry->size = sg_dma_len(s); 1376 entry->dev_addr = sg_dma_address(s); 1377 entry->direction = direction; 1378 entry->sg_call_ents = nents; 1379 entry->sg_mapped_ents = mapped_ents; 1380 1381 check_for_stack(dev, sg_page(s), s->offset); 1382 1383 if (!PageHighMem(sg_page(s))) { 1384 check_for_illegal_area(dev, sg_virt(s), sg_dma_len(s)); 1385 } 1386 1387 check_sg_segment(dev, s); 1388 1389 add_dma_entry(entry); 1390 } 1391 } 1392 EXPORT_SYMBOL(debug_dma_map_sg); 1393 1394 static int get_nr_mapped_entries(struct device *dev, 1395 struct dma_debug_entry *ref) 1396 { 1397 struct dma_debug_entry *entry; 1398 struct hash_bucket *bucket; 1399 unsigned long flags; 1400 int mapped_ents; 1401 1402 bucket = get_hash_bucket(ref, &flags); 1403 entry = bucket_find_exact(bucket, ref); 1404 mapped_ents = 0; 1405 1406 if (entry) 1407 mapped_ents = entry->sg_mapped_ents; 1408 put_hash_bucket(bucket, &flags); 1409 1410 return mapped_ents; 1411 } 1412 1413 void debug_dma_unmap_sg(struct device *dev, struct scatterlist *sglist, 1414 int nelems, int dir) 1415 { 1416 struct scatterlist *s; 1417 int mapped_ents = 0, i; 1418 1419 if (unlikely(dma_debug_disabled())) 1420 return; 1421 1422 for_each_sg(sglist, s, nelems, i) { 1423 1424 struct dma_debug_entry ref = { 1425 .type = dma_debug_sg, 1426 .dev = dev, 1427 .pfn = page_to_pfn(sg_page(s)), 1428 .offset = s->offset, 1429 .dev_addr = sg_dma_address(s), 1430 .size = sg_dma_len(s), 1431 .direction = dir, 1432 .sg_call_ents = nelems, 1433 }; 1434 1435 if (mapped_ents && i >= mapped_ents) 1436 break; 1437 1438 if (!i) 1439 mapped_ents = get_nr_mapped_entries(dev, &ref); 1440 1441 check_unmap(&ref); 1442 } 1443 } 1444 EXPORT_SYMBOL(debug_dma_unmap_sg); 1445 1446 void debug_dma_alloc_coherent(struct device *dev, size_t size, 1447 dma_addr_t dma_addr, void *virt) 1448 { 1449 struct dma_debug_entry *entry; 1450 1451 if (unlikely(dma_debug_disabled())) 1452 return; 1453 1454 if (unlikely(virt == NULL)) 1455 return; 1456 1457 /* handle vmalloc and linear addresses */ 1458 if (!is_vmalloc_addr(virt) && !virt_addr_valid(virt)) 1459 return; 1460 1461 entry = dma_entry_alloc(); 1462 if (!entry) 1463 return; 1464 1465 entry->type = dma_debug_coherent; 1466 entry->dev = dev; 1467 entry->offset = offset_in_page(virt); 1468 entry->size = size; 1469 entry->dev_addr = dma_addr; 1470 entry->direction = DMA_BIDIRECTIONAL; 1471 1472 if (is_vmalloc_addr(virt)) 1473 entry->pfn = vmalloc_to_pfn(virt); 1474 else 1475 entry->pfn = page_to_pfn(virt_to_page(virt)); 1476 1477 add_dma_entry(entry); 1478 } 1479 1480 void debug_dma_free_coherent(struct device *dev, size_t size, 1481 void *virt, dma_addr_t addr) 1482 { 1483 struct dma_debug_entry ref = { 1484 .type = dma_debug_coherent, 1485 .dev = dev, 1486 .offset = offset_in_page(virt), 1487 .dev_addr = addr, 1488 .size = size, 1489 .direction = DMA_BIDIRECTIONAL, 1490 }; 1491 1492 /* handle vmalloc and linear addresses */ 1493 if (!is_vmalloc_addr(virt) && !virt_addr_valid(virt)) 1494 return; 1495 1496 if (is_vmalloc_addr(virt)) 1497 ref.pfn = vmalloc_to_pfn(virt); 1498 else 1499 ref.pfn = page_to_pfn(virt_to_page(virt)); 1500 1501 if (unlikely(dma_debug_disabled())) 1502 return; 1503 1504 check_unmap(&ref); 1505 } 1506 1507 void debug_dma_map_resource(struct device *dev, phys_addr_t addr, size_t size, 1508 int direction, dma_addr_t dma_addr) 1509 { 1510 struct dma_debug_entry *entry; 1511 1512 if (unlikely(dma_debug_disabled())) 1513 return; 1514 1515 entry = dma_entry_alloc(); 1516 if (!entry) 1517 return; 1518 1519 entry->type = dma_debug_resource; 1520 entry->dev = dev; 1521 entry->pfn = PHYS_PFN(addr); 1522 entry->offset = offset_in_page(addr); 1523 entry->size = size; 1524 entry->dev_addr = dma_addr; 1525 entry->direction = direction; 1526 entry->map_err_type = MAP_ERR_NOT_CHECKED; 1527 1528 add_dma_entry(entry); 1529 } 1530 EXPORT_SYMBOL(debug_dma_map_resource); 1531 1532 void debug_dma_unmap_resource(struct device *dev, dma_addr_t dma_addr, 1533 size_t size, int direction) 1534 { 1535 struct dma_debug_entry ref = { 1536 .type = dma_debug_resource, 1537 .dev = dev, 1538 .dev_addr = dma_addr, 1539 .size = size, 1540 .direction = direction, 1541 }; 1542 1543 if (unlikely(dma_debug_disabled())) 1544 return; 1545 1546 check_unmap(&ref); 1547 } 1548 EXPORT_SYMBOL(debug_dma_unmap_resource); 1549 1550 void debug_dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, 1551 size_t size, int direction) 1552 { 1553 struct dma_debug_entry ref; 1554 1555 if (unlikely(dma_debug_disabled())) 1556 return; 1557 1558 ref.type = dma_debug_single; 1559 ref.dev = dev; 1560 ref.dev_addr = dma_handle; 1561 ref.size = size; 1562 ref.direction = direction; 1563 ref.sg_call_ents = 0; 1564 1565 check_sync(dev, &ref, true); 1566 } 1567 EXPORT_SYMBOL(debug_dma_sync_single_for_cpu); 1568 1569 void debug_dma_sync_single_for_device(struct device *dev, 1570 dma_addr_t dma_handle, size_t size, 1571 int direction) 1572 { 1573 struct dma_debug_entry ref; 1574 1575 if (unlikely(dma_debug_disabled())) 1576 return; 1577 1578 ref.type = dma_debug_single; 1579 ref.dev = dev; 1580 ref.dev_addr = dma_handle; 1581 ref.size = size; 1582 ref.direction = direction; 1583 ref.sg_call_ents = 0; 1584 1585 check_sync(dev, &ref, false); 1586 } 1587 EXPORT_SYMBOL(debug_dma_sync_single_for_device); 1588 1589 void debug_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, 1590 int nelems, int direction) 1591 { 1592 struct scatterlist *s; 1593 int mapped_ents = 0, i; 1594 1595 if (unlikely(dma_debug_disabled())) 1596 return; 1597 1598 for_each_sg(sg, s, nelems, i) { 1599 1600 struct dma_debug_entry ref = { 1601 .type = dma_debug_sg, 1602 .dev = dev, 1603 .pfn = page_to_pfn(sg_page(s)), 1604 .offset = s->offset, 1605 .dev_addr = sg_dma_address(s), 1606 .size = sg_dma_len(s), 1607 .direction = direction, 1608 .sg_call_ents = nelems, 1609 }; 1610 1611 if (!i) 1612 mapped_ents = get_nr_mapped_entries(dev, &ref); 1613 1614 if (i >= mapped_ents) 1615 break; 1616 1617 check_sync(dev, &ref, true); 1618 } 1619 } 1620 EXPORT_SYMBOL(debug_dma_sync_sg_for_cpu); 1621 1622 void debug_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, 1623 int nelems, int direction) 1624 { 1625 struct scatterlist *s; 1626 int mapped_ents = 0, i; 1627 1628 if (unlikely(dma_debug_disabled())) 1629 return; 1630 1631 for_each_sg(sg, s, nelems, i) { 1632 1633 struct dma_debug_entry ref = { 1634 .type = dma_debug_sg, 1635 .dev = dev, 1636 .pfn = page_to_pfn(sg_page(s)), 1637 .offset = s->offset, 1638 .dev_addr = sg_dma_address(s), 1639 .size = sg_dma_len(s), 1640 .direction = direction, 1641 .sg_call_ents = nelems, 1642 }; 1643 if (!i) 1644 mapped_ents = get_nr_mapped_entries(dev, &ref); 1645 1646 if (i >= mapped_ents) 1647 break; 1648 1649 check_sync(dev, &ref, false); 1650 } 1651 } 1652 EXPORT_SYMBOL(debug_dma_sync_sg_for_device); 1653 1654 static int __init dma_debug_driver_setup(char *str) 1655 { 1656 int i; 1657 1658 for (i = 0; i < NAME_MAX_LEN - 1; ++i, ++str) { 1659 current_driver_name[i] = *str; 1660 if (*str == 0) 1661 break; 1662 } 1663 1664 if (current_driver_name[0]) 1665 pr_info("enable driver filter for driver [%s]\n", 1666 current_driver_name); 1667 1668 1669 return 1; 1670 } 1671 __setup("dma_debug_driver=", dma_debug_driver_setup); 1672