1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2008 Advanced Micro Devices, Inc. 4 * 5 * Author: Joerg Roedel <joerg.roedel@amd.com> 6 */ 7 8 #define pr_fmt(fmt) "DMA-API: " fmt 9 10 #include <linux/sched/task_stack.h> 11 #include <linux/scatterlist.h> 12 #include <linux/dma-map-ops.h> 13 #include <linux/sched/task.h> 14 #include <linux/stacktrace.h> 15 #include <linux/spinlock.h> 16 #include <linux/vmalloc.h> 17 #include <linux/debugfs.h> 18 #include <linux/uaccess.h> 19 #include <linux/export.h> 20 #include <linux/device.h> 21 #include <linux/types.h> 22 #include <linux/sched.h> 23 #include <linux/ctype.h> 24 #include <linux/list.h> 25 #include <linux/slab.h> 26 #include <asm/sections.h> 27 #include "debug.h" 28 29 #define HASH_SIZE 16384ULL 30 #define HASH_FN_SHIFT 13 31 #define HASH_FN_MASK (HASH_SIZE - 1) 32 33 #define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16) 34 /* If the pool runs out, add this many new entries at once */ 35 #define DMA_DEBUG_DYNAMIC_ENTRIES (PAGE_SIZE / sizeof(struct dma_debug_entry)) 36 37 enum { 38 dma_debug_single, 39 dma_debug_sg, 40 dma_debug_coherent, 41 dma_debug_resource, 42 }; 43 44 enum map_err_types { 45 MAP_ERR_CHECK_NOT_APPLICABLE, 46 MAP_ERR_NOT_CHECKED, 47 MAP_ERR_CHECKED, 48 }; 49 50 #define DMA_DEBUG_STACKTRACE_ENTRIES 5 51 52 /** 53 * struct dma_debug_entry - track a dma_map* or dma_alloc_coherent mapping 54 * @list: node on pre-allocated free_entries list 55 * @dev: 'dev' argument to dma_map_{page|single|sg} or dma_alloc_coherent 56 * @dev_addr: dma address 57 * @size: length of the mapping 58 * @type: single, page, sg, coherent 59 * @direction: enum dma_data_direction 60 * @sg_call_ents: 'nents' from dma_map_sg 61 * @sg_mapped_ents: 'mapped_ents' from dma_map_sg 62 * @pfn: page frame of the start address 63 * @offset: offset of mapping relative to pfn 64 * @map_err_type: track whether dma_mapping_error() was checked 65 * @stacktrace: support backtraces when a violation is detected 66 */ 67 struct dma_debug_entry { 68 struct list_head list; 69 struct device *dev; 70 u64 dev_addr; 71 u64 size; 72 int type; 73 int direction; 74 int sg_call_ents; 75 int sg_mapped_ents; 76 unsigned long pfn; 77 size_t offset; 78 enum map_err_types map_err_type; 79 #ifdef CONFIG_STACKTRACE 80 unsigned int stack_len; 81 unsigned long stack_entries[DMA_DEBUG_STACKTRACE_ENTRIES]; 82 #endif 83 } ____cacheline_aligned_in_smp; 84 85 typedef bool (*match_fn)(struct dma_debug_entry *, struct dma_debug_entry *); 86 87 struct hash_bucket { 88 struct list_head list; 89 spinlock_t lock; 90 }; 91 92 /* Hash list to save the allocated dma addresses */ 93 static struct hash_bucket dma_entry_hash[HASH_SIZE]; 94 /* List of pre-allocated dma_debug_entry's */ 95 static LIST_HEAD(free_entries); 96 /* Lock for the list above */ 97 static DEFINE_SPINLOCK(free_entries_lock); 98 99 /* Global disable flag - will be set in case of an error */ 100 static bool global_disable __read_mostly; 101 102 /* Early initialization disable flag, set at the end of dma_debug_init */ 103 static bool dma_debug_initialized __read_mostly; 104 105 static inline bool dma_debug_disabled(void) 106 { 107 return global_disable || !dma_debug_initialized; 108 } 109 110 /* Global error count */ 111 static u32 error_count; 112 113 /* Global error show enable*/ 114 static u32 show_all_errors __read_mostly; 115 /* Number of errors to show */ 116 static u32 show_num_errors = 1; 117 118 static u32 num_free_entries; 119 static u32 min_free_entries; 120 static u32 nr_total_entries; 121 122 /* number of preallocated entries requested by kernel cmdline */ 123 static u32 nr_prealloc_entries = PREALLOC_DMA_DEBUG_ENTRIES; 124 125 /* per-driver filter related state */ 126 127 #define NAME_MAX_LEN 64 128 129 static char current_driver_name[NAME_MAX_LEN] __read_mostly; 130 static struct device_driver *current_driver __read_mostly; 131 132 static DEFINE_RWLOCK(driver_name_lock); 133 134 static const char *const maperr2str[] = { 135 [MAP_ERR_CHECK_NOT_APPLICABLE] = "dma map error check not applicable", 136 [MAP_ERR_NOT_CHECKED] = "dma map error not checked", 137 [MAP_ERR_CHECKED] = "dma map error checked", 138 }; 139 140 static const char *type2name[] = { 141 [dma_debug_single] = "single", 142 [dma_debug_sg] = "scather-gather", 143 [dma_debug_coherent] = "coherent", 144 [dma_debug_resource] = "resource", 145 }; 146 147 static const char *dir2name[] = { 148 [DMA_BIDIRECTIONAL] = "DMA_BIDIRECTIONAL", 149 [DMA_TO_DEVICE] = "DMA_TO_DEVICE", 150 [DMA_FROM_DEVICE] = "DMA_FROM_DEVICE", 151 [DMA_NONE] = "DMA_NONE", 152 }; 153 154 /* 155 * The access to some variables in this macro is racy. We can't use atomic_t 156 * here because all these variables are exported to debugfs. Some of them even 157 * writeable. This is also the reason why a lock won't help much. But anyway, 158 * the races are no big deal. Here is why: 159 * 160 * error_count: the addition is racy, but the worst thing that can happen is 161 * that we don't count some errors 162 * show_num_errors: the subtraction is racy. Also no big deal because in 163 * worst case this will result in one warning more in the 164 * system log than the user configured. This variable is 165 * writeable via debugfs. 166 */ 167 static inline void dump_entry_trace(struct dma_debug_entry *entry) 168 { 169 #ifdef CONFIG_STACKTRACE 170 if (entry) { 171 pr_warn("Mapped at:\n"); 172 stack_trace_print(entry->stack_entries, entry->stack_len, 0); 173 } 174 #endif 175 } 176 177 static bool driver_filter(struct device *dev) 178 { 179 struct device_driver *drv; 180 unsigned long flags; 181 bool ret; 182 183 /* driver filter off */ 184 if (likely(!current_driver_name[0])) 185 return true; 186 187 /* driver filter on and initialized */ 188 if (current_driver && dev && dev->driver == current_driver) 189 return true; 190 191 /* driver filter on, but we can't filter on a NULL device... */ 192 if (!dev) 193 return false; 194 195 if (current_driver || !current_driver_name[0]) 196 return false; 197 198 /* driver filter on but not yet initialized */ 199 drv = dev->driver; 200 if (!drv) 201 return false; 202 203 /* lock to protect against change of current_driver_name */ 204 read_lock_irqsave(&driver_name_lock, flags); 205 206 ret = false; 207 if (drv->name && 208 strncmp(current_driver_name, drv->name, NAME_MAX_LEN - 1) == 0) { 209 current_driver = drv; 210 ret = true; 211 } 212 213 read_unlock_irqrestore(&driver_name_lock, flags); 214 215 return ret; 216 } 217 218 #define err_printk(dev, entry, format, arg...) do { \ 219 error_count += 1; \ 220 if (driver_filter(dev) && \ 221 (show_all_errors || show_num_errors > 0)) { \ 222 WARN(1, pr_fmt("%s %s: ") format, \ 223 dev ? dev_driver_string(dev) : "NULL", \ 224 dev ? dev_name(dev) : "NULL", ## arg); \ 225 dump_entry_trace(entry); \ 226 } \ 227 if (!show_all_errors && show_num_errors > 0) \ 228 show_num_errors -= 1; \ 229 } while (0); 230 231 /* 232 * Hash related functions 233 * 234 * Every DMA-API request is saved into a struct dma_debug_entry. To 235 * have quick access to these structs they are stored into a hash. 236 */ 237 static int hash_fn(struct dma_debug_entry *entry) 238 { 239 /* 240 * Hash function is based on the dma address. 241 * We use bits 20-27 here as the index into the hash 242 */ 243 return (entry->dev_addr >> HASH_FN_SHIFT) & HASH_FN_MASK; 244 } 245 246 /* 247 * Request exclusive access to a hash bucket for a given dma_debug_entry. 248 */ 249 static struct hash_bucket *get_hash_bucket(struct dma_debug_entry *entry, 250 unsigned long *flags) 251 __acquires(&dma_entry_hash[idx].lock) 252 { 253 int idx = hash_fn(entry); 254 unsigned long __flags; 255 256 spin_lock_irqsave(&dma_entry_hash[idx].lock, __flags); 257 *flags = __flags; 258 return &dma_entry_hash[idx]; 259 } 260 261 /* 262 * Give up exclusive access to the hash bucket 263 */ 264 static void put_hash_bucket(struct hash_bucket *bucket, 265 unsigned long flags) 266 __releases(&bucket->lock) 267 { 268 spin_unlock_irqrestore(&bucket->lock, flags); 269 } 270 271 static bool exact_match(struct dma_debug_entry *a, struct dma_debug_entry *b) 272 { 273 return ((a->dev_addr == b->dev_addr) && 274 (a->dev == b->dev)) ? true : false; 275 } 276 277 static bool containing_match(struct dma_debug_entry *a, 278 struct dma_debug_entry *b) 279 { 280 if (a->dev != b->dev) 281 return false; 282 283 if ((b->dev_addr <= a->dev_addr) && 284 ((b->dev_addr + b->size) >= (a->dev_addr + a->size))) 285 return true; 286 287 return false; 288 } 289 290 /* 291 * Search a given entry in the hash bucket list 292 */ 293 static struct dma_debug_entry *__hash_bucket_find(struct hash_bucket *bucket, 294 struct dma_debug_entry *ref, 295 match_fn match) 296 { 297 struct dma_debug_entry *entry, *ret = NULL; 298 int matches = 0, match_lvl, last_lvl = -1; 299 300 list_for_each_entry(entry, &bucket->list, list) { 301 if (!match(ref, entry)) 302 continue; 303 304 /* 305 * Some drivers map the same physical address multiple 306 * times. Without a hardware IOMMU this results in the 307 * same device addresses being put into the dma-debug 308 * hash multiple times too. This can result in false 309 * positives being reported. Therefore we implement a 310 * best-fit algorithm here which returns the entry from 311 * the hash which fits best to the reference value 312 * instead of the first-fit. 313 */ 314 matches += 1; 315 match_lvl = 0; 316 entry->size == ref->size ? ++match_lvl : 0; 317 entry->type == ref->type ? ++match_lvl : 0; 318 entry->direction == ref->direction ? ++match_lvl : 0; 319 entry->sg_call_ents == ref->sg_call_ents ? ++match_lvl : 0; 320 321 if (match_lvl == 4) { 322 /* perfect-fit - return the result */ 323 return entry; 324 } else if (match_lvl > last_lvl) { 325 /* 326 * We found an entry that fits better then the 327 * previous one or it is the 1st match. 328 */ 329 last_lvl = match_lvl; 330 ret = entry; 331 } 332 } 333 334 /* 335 * If we have multiple matches but no perfect-fit, just return 336 * NULL. 337 */ 338 ret = (matches == 1) ? ret : NULL; 339 340 return ret; 341 } 342 343 static struct dma_debug_entry *bucket_find_exact(struct hash_bucket *bucket, 344 struct dma_debug_entry *ref) 345 { 346 return __hash_bucket_find(bucket, ref, exact_match); 347 } 348 349 static struct dma_debug_entry *bucket_find_contain(struct hash_bucket **bucket, 350 struct dma_debug_entry *ref, 351 unsigned long *flags) 352 { 353 354 struct dma_debug_entry *entry, index = *ref; 355 int limit = min(HASH_SIZE, (index.dev_addr >> HASH_FN_SHIFT) + 1); 356 357 for (int i = 0; i < limit; i++) { 358 entry = __hash_bucket_find(*bucket, ref, containing_match); 359 360 if (entry) 361 return entry; 362 363 /* 364 * Nothing found, go back a hash bucket 365 */ 366 put_hash_bucket(*bucket, *flags); 367 index.dev_addr -= (1 << HASH_FN_SHIFT); 368 *bucket = get_hash_bucket(&index, flags); 369 } 370 371 return NULL; 372 } 373 374 /* 375 * Add an entry to a hash bucket 376 */ 377 static void hash_bucket_add(struct hash_bucket *bucket, 378 struct dma_debug_entry *entry) 379 { 380 list_add_tail(&entry->list, &bucket->list); 381 } 382 383 /* 384 * Remove entry from a hash bucket list 385 */ 386 static void hash_bucket_del(struct dma_debug_entry *entry) 387 { 388 list_del(&entry->list); 389 } 390 391 static unsigned long long phys_addr(struct dma_debug_entry *entry) 392 { 393 if (entry->type == dma_debug_resource) 394 return __pfn_to_phys(entry->pfn) + entry->offset; 395 396 return page_to_phys(pfn_to_page(entry->pfn)) + entry->offset; 397 } 398 399 /* 400 * For each mapping (initial cacheline in the case of 401 * dma_alloc_coherent/dma_map_page, initial cacheline in each page of a 402 * scatterlist, or the cacheline specified in dma_map_single) insert 403 * into this tree using the cacheline as the key. At 404 * dma_unmap_{single|sg|page} or dma_free_coherent delete the entry. If 405 * the entry already exists at insertion time add a tag as a reference 406 * count for the overlapping mappings. For now, the overlap tracking 407 * just ensures that 'unmaps' balance 'maps' before marking the 408 * cacheline idle, but we should also be flagging overlaps as an API 409 * violation. 410 * 411 * Memory usage is mostly constrained by the maximum number of available 412 * dma-debug entries in that we need a free dma_debug_entry before 413 * inserting into the tree. In the case of dma_map_page and 414 * dma_alloc_coherent there is only one dma_debug_entry and one 415 * dma_active_cacheline entry to track per event. dma_map_sg(), on the 416 * other hand, consumes a single dma_debug_entry, but inserts 'nents' 417 * entries into the tree. 418 */ 419 static RADIX_TREE(dma_active_cacheline, GFP_ATOMIC); 420 static DEFINE_SPINLOCK(radix_lock); 421 #define ACTIVE_CACHELINE_MAX_OVERLAP ((1 << RADIX_TREE_MAX_TAGS) - 1) 422 #define CACHELINE_PER_PAGE_SHIFT (PAGE_SHIFT - L1_CACHE_SHIFT) 423 #define CACHELINES_PER_PAGE (1 << CACHELINE_PER_PAGE_SHIFT) 424 425 static phys_addr_t to_cacheline_number(struct dma_debug_entry *entry) 426 { 427 return (entry->pfn << CACHELINE_PER_PAGE_SHIFT) + 428 (entry->offset >> L1_CACHE_SHIFT); 429 } 430 431 static int active_cacheline_read_overlap(phys_addr_t cln) 432 { 433 int overlap = 0, i; 434 435 for (i = RADIX_TREE_MAX_TAGS - 1; i >= 0; i--) 436 if (radix_tree_tag_get(&dma_active_cacheline, cln, i)) 437 overlap |= 1 << i; 438 return overlap; 439 } 440 441 static int active_cacheline_set_overlap(phys_addr_t cln, int overlap) 442 { 443 int i; 444 445 if (overlap > ACTIVE_CACHELINE_MAX_OVERLAP || overlap < 0) 446 return overlap; 447 448 for (i = RADIX_TREE_MAX_TAGS - 1; i >= 0; i--) 449 if (overlap & 1 << i) 450 radix_tree_tag_set(&dma_active_cacheline, cln, i); 451 else 452 radix_tree_tag_clear(&dma_active_cacheline, cln, i); 453 454 return overlap; 455 } 456 457 static void active_cacheline_inc_overlap(phys_addr_t cln) 458 { 459 int overlap = active_cacheline_read_overlap(cln); 460 461 overlap = active_cacheline_set_overlap(cln, ++overlap); 462 463 /* If we overflowed the overlap counter then we're potentially 464 * leaking dma-mappings. 465 */ 466 WARN_ONCE(overlap > ACTIVE_CACHELINE_MAX_OVERLAP, 467 pr_fmt("exceeded %d overlapping mappings of cacheline %pa\n"), 468 ACTIVE_CACHELINE_MAX_OVERLAP, &cln); 469 } 470 471 static int active_cacheline_dec_overlap(phys_addr_t cln) 472 { 473 int overlap = active_cacheline_read_overlap(cln); 474 475 return active_cacheline_set_overlap(cln, --overlap); 476 } 477 478 static int active_cacheline_insert(struct dma_debug_entry *entry) 479 { 480 phys_addr_t cln = to_cacheline_number(entry); 481 unsigned long flags; 482 int rc; 483 484 /* If the device is not writing memory then we don't have any 485 * concerns about the cpu consuming stale data. This mitigates 486 * legitimate usages of overlapping mappings. 487 */ 488 if (entry->direction == DMA_TO_DEVICE) 489 return 0; 490 491 spin_lock_irqsave(&radix_lock, flags); 492 rc = radix_tree_insert(&dma_active_cacheline, cln, entry); 493 if (rc == -EEXIST) 494 active_cacheline_inc_overlap(cln); 495 spin_unlock_irqrestore(&radix_lock, flags); 496 497 return rc; 498 } 499 500 static void active_cacheline_remove(struct dma_debug_entry *entry) 501 { 502 phys_addr_t cln = to_cacheline_number(entry); 503 unsigned long flags; 504 505 /* ...mirror the insert case */ 506 if (entry->direction == DMA_TO_DEVICE) 507 return; 508 509 spin_lock_irqsave(&radix_lock, flags); 510 /* since we are counting overlaps the final put of the 511 * cacheline will occur when the overlap count is 0. 512 * active_cacheline_dec_overlap() returns -1 in that case 513 */ 514 if (active_cacheline_dec_overlap(cln) < 0) 515 radix_tree_delete(&dma_active_cacheline, cln); 516 spin_unlock_irqrestore(&radix_lock, flags); 517 } 518 519 /* 520 * Dump mappings entries on kernel space for debugging purposes 521 */ 522 void debug_dma_dump_mappings(struct device *dev) 523 { 524 int idx; 525 phys_addr_t cln; 526 527 for (idx = 0; idx < HASH_SIZE; idx++) { 528 struct hash_bucket *bucket = &dma_entry_hash[idx]; 529 struct dma_debug_entry *entry; 530 unsigned long flags; 531 532 spin_lock_irqsave(&bucket->lock, flags); 533 list_for_each_entry(entry, &bucket->list, list) { 534 if (!dev || dev == entry->dev) { 535 cln = to_cacheline_number(entry); 536 dev_info(entry->dev, 537 "%s idx %d P=%llx N=%lx D=%llx L=%llx cln=%pa %s %s\n", 538 type2name[entry->type], idx, 539 phys_addr(entry), entry->pfn, 540 entry->dev_addr, entry->size, 541 &cln, dir2name[entry->direction], 542 maperr2str[entry->map_err_type]); 543 } 544 } 545 spin_unlock_irqrestore(&bucket->lock, flags); 546 547 cond_resched(); 548 } 549 } 550 551 /* 552 * Dump mappings entries on user space via debugfs 553 */ 554 static int dump_show(struct seq_file *seq, void *v) 555 { 556 int idx; 557 phys_addr_t cln; 558 559 for (idx = 0; idx < HASH_SIZE; idx++) { 560 struct hash_bucket *bucket = &dma_entry_hash[idx]; 561 struct dma_debug_entry *entry; 562 unsigned long flags; 563 564 spin_lock_irqsave(&bucket->lock, flags); 565 list_for_each_entry(entry, &bucket->list, list) { 566 cln = to_cacheline_number(entry); 567 seq_printf(seq, 568 "%s %s %s idx %d P=%llx N=%lx D=%llx L=%llx cln=%pa %s %s\n", 569 dev_driver_string(entry->dev), 570 dev_name(entry->dev), 571 type2name[entry->type], idx, 572 phys_addr(entry), entry->pfn, 573 entry->dev_addr, entry->size, 574 &cln, dir2name[entry->direction], 575 maperr2str[entry->map_err_type]); 576 } 577 spin_unlock_irqrestore(&bucket->lock, flags); 578 } 579 return 0; 580 } 581 DEFINE_SHOW_ATTRIBUTE(dump); 582 583 /* 584 * Wrapper function for adding an entry to the hash. 585 * This function takes care of locking itself. 586 */ 587 static void add_dma_entry(struct dma_debug_entry *entry, unsigned long attrs) 588 { 589 struct hash_bucket *bucket; 590 unsigned long flags; 591 int rc; 592 593 bucket = get_hash_bucket(entry, &flags); 594 hash_bucket_add(bucket, entry); 595 put_hash_bucket(bucket, flags); 596 597 rc = active_cacheline_insert(entry); 598 if (rc == -ENOMEM) { 599 pr_err_once("cacheline tracking ENOMEM, dma-debug disabled\n"); 600 global_disable = true; 601 } else if (rc == -EEXIST && !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) { 602 err_printk(entry->dev, entry, 603 "cacheline tracking EEXIST, overlapping mappings aren't supported\n"); 604 } 605 } 606 607 static int dma_debug_create_entries(gfp_t gfp) 608 { 609 struct dma_debug_entry *entry; 610 int i; 611 612 entry = (void *)get_zeroed_page(gfp); 613 if (!entry) 614 return -ENOMEM; 615 616 for (i = 0; i < DMA_DEBUG_DYNAMIC_ENTRIES; i++) 617 list_add_tail(&entry[i].list, &free_entries); 618 619 num_free_entries += DMA_DEBUG_DYNAMIC_ENTRIES; 620 nr_total_entries += DMA_DEBUG_DYNAMIC_ENTRIES; 621 622 return 0; 623 } 624 625 static struct dma_debug_entry *__dma_entry_alloc(void) 626 { 627 struct dma_debug_entry *entry; 628 629 entry = list_entry(free_entries.next, struct dma_debug_entry, list); 630 list_del(&entry->list); 631 memset(entry, 0, sizeof(*entry)); 632 633 num_free_entries -= 1; 634 if (num_free_entries < min_free_entries) 635 min_free_entries = num_free_entries; 636 637 return entry; 638 } 639 640 static void __dma_entry_alloc_check_leak(void) 641 { 642 u32 tmp = nr_total_entries % nr_prealloc_entries; 643 644 /* Shout each time we tick over some multiple of the initial pool */ 645 if (tmp < DMA_DEBUG_DYNAMIC_ENTRIES) { 646 pr_info("dma_debug_entry pool grown to %u (%u00%%)\n", 647 nr_total_entries, 648 (nr_total_entries / nr_prealloc_entries)); 649 } 650 } 651 652 /* struct dma_entry allocator 653 * 654 * The next two functions implement the allocator for 655 * struct dma_debug_entries. 656 */ 657 static struct dma_debug_entry *dma_entry_alloc(void) 658 { 659 struct dma_debug_entry *entry; 660 unsigned long flags; 661 662 spin_lock_irqsave(&free_entries_lock, flags); 663 if (num_free_entries == 0) { 664 if (dma_debug_create_entries(GFP_ATOMIC)) { 665 global_disable = true; 666 spin_unlock_irqrestore(&free_entries_lock, flags); 667 pr_err("debugging out of memory - disabling\n"); 668 return NULL; 669 } 670 __dma_entry_alloc_check_leak(); 671 } 672 673 entry = __dma_entry_alloc(); 674 675 spin_unlock_irqrestore(&free_entries_lock, flags); 676 677 #ifdef CONFIG_STACKTRACE 678 entry->stack_len = stack_trace_save(entry->stack_entries, 679 ARRAY_SIZE(entry->stack_entries), 680 1); 681 #endif 682 return entry; 683 } 684 685 static void dma_entry_free(struct dma_debug_entry *entry) 686 { 687 unsigned long flags; 688 689 active_cacheline_remove(entry); 690 691 /* 692 * add to beginning of the list - this way the entries are 693 * more likely cache hot when they are reallocated. 694 */ 695 spin_lock_irqsave(&free_entries_lock, flags); 696 list_add(&entry->list, &free_entries); 697 num_free_entries += 1; 698 spin_unlock_irqrestore(&free_entries_lock, flags); 699 } 700 701 /* 702 * DMA-API debugging init code 703 * 704 * The init code does two things: 705 * 1. Initialize core data structures 706 * 2. Preallocate a given number of dma_debug_entry structs 707 */ 708 709 static ssize_t filter_read(struct file *file, char __user *user_buf, 710 size_t count, loff_t *ppos) 711 { 712 char buf[NAME_MAX_LEN + 1]; 713 unsigned long flags; 714 int len; 715 716 if (!current_driver_name[0]) 717 return 0; 718 719 /* 720 * We can't copy to userspace directly because current_driver_name can 721 * only be read under the driver_name_lock with irqs disabled. So 722 * create a temporary copy first. 723 */ 724 read_lock_irqsave(&driver_name_lock, flags); 725 len = scnprintf(buf, NAME_MAX_LEN + 1, "%s\n", current_driver_name); 726 read_unlock_irqrestore(&driver_name_lock, flags); 727 728 return simple_read_from_buffer(user_buf, count, ppos, buf, len); 729 } 730 731 static ssize_t filter_write(struct file *file, const char __user *userbuf, 732 size_t count, loff_t *ppos) 733 { 734 char buf[NAME_MAX_LEN]; 735 unsigned long flags; 736 size_t len; 737 int i; 738 739 /* 740 * We can't copy from userspace directly. Access to 741 * current_driver_name is protected with a write_lock with irqs 742 * disabled. Since copy_from_user can fault and may sleep we 743 * need to copy to temporary buffer first 744 */ 745 len = min(count, (size_t)(NAME_MAX_LEN - 1)); 746 if (copy_from_user(buf, userbuf, len)) 747 return -EFAULT; 748 749 buf[len] = 0; 750 751 write_lock_irqsave(&driver_name_lock, flags); 752 753 /* 754 * Now handle the string we got from userspace very carefully. 755 * The rules are: 756 * - only use the first token we got 757 * - token delimiter is everything looking like a space 758 * character (' ', '\n', '\t' ...) 759 * 760 */ 761 if (!isalnum(buf[0])) { 762 /* 763 * If the first character userspace gave us is not 764 * alphanumerical then assume the filter should be 765 * switched off. 766 */ 767 if (current_driver_name[0]) 768 pr_info("switching off dma-debug driver filter\n"); 769 current_driver_name[0] = 0; 770 current_driver = NULL; 771 goto out_unlock; 772 } 773 774 /* 775 * Now parse out the first token and use it as the name for the 776 * driver to filter for. 777 */ 778 for (i = 0; i < NAME_MAX_LEN - 1; ++i) { 779 current_driver_name[i] = buf[i]; 780 if (isspace(buf[i]) || buf[i] == ' ' || buf[i] == 0) 781 break; 782 } 783 current_driver_name[i] = 0; 784 current_driver = NULL; 785 786 pr_info("enable driver filter for driver [%s]\n", 787 current_driver_name); 788 789 out_unlock: 790 write_unlock_irqrestore(&driver_name_lock, flags); 791 792 return count; 793 } 794 795 static const struct file_operations filter_fops = { 796 .read = filter_read, 797 .write = filter_write, 798 .llseek = default_llseek, 799 }; 800 801 static int __init dma_debug_fs_init(void) 802 { 803 struct dentry *dentry = debugfs_create_dir("dma-api", NULL); 804 805 debugfs_create_bool("disabled", 0444, dentry, &global_disable); 806 debugfs_create_u32("error_count", 0444, dentry, &error_count); 807 debugfs_create_u32("all_errors", 0644, dentry, &show_all_errors); 808 debugfs_create_u32("num_errors", 0644, dentry, &show_num_errors); 809 debugfs_create_u32("num_free_entries", 0444, dentry, &num_free_entries); 810 debugfs_create_u32("min_free_entries", 0444, dentry, &min_free_entries); 811 debugfs_create_u32("nr_total_entries", 0444, dentry, &nr_total_entries); 812 debugfs_create_file("driver_filter", 0644, dentry, NULL, &filter_fops); 813 debugfs_create_file("dump", 0444, dentry, NULL, &dump_fops); 814 815 return 0; 816 } 817 core_initcall_sync(dma_debug_fs_init); 818 819 static int device_dma_allocations(struct device *dev, struct dma_debug_entry **out_entry) 820 { 821 struct dma_debug_entry *entry; 822 unsigned long flags; 823 int count = 0, i; 824 825 for (i = 0; i < HASH_SIZE; ++i) { 826 spin_lock_irqsave(&dma_entry_hash[i].lock, flags); 827 list_for_each_entry(entry, &dma_entry_hash[i].list, list) { 828 if (entry->dev == dev) { 829 count += 1; 830 *out_entry = entry; 831 } 832 } 833 spin_unlock_irqrestore(&dma_entry_hash[i].lock, flags); 834 } 835 836 return count; 837 } 838 839 static int dma_debug_device_change(struct notifier_block *nb, unsigned long action, void *data) 840 { 841 struct device *dev = data; 842 struct dma_debug_entry *entry; 843 int count; 844 845 if (dma_debug_disabled()) 846 return 0; 847 848 switch (action) { 849 case BUS_NOTIFY_UNBOUND_DRIVER: 850 count = device_dma_allocations(dev, &entry); 851 if (count == 0) 852 break; 853 err_printk(dev, entry, "device driver has pending " 854 "DMA allocations while released from device " 855 "[count=%d]\n" 856 "One of leaked entries details: " 857 "[device address=0x%016llx] [size=%llu bytes] " 858 "[mapped with %s] [mapped as %s]\n", 859 count, entry->dev_addr, entry->size, 860 dir2name[entry->direction], type2name[entry->type]); 861 break; 862 default: 863 break; 864 } 865 866 return 0; 867 } 868 869 void dma_debug_add_bus(struct bus_type *bus) 870 { 871 struct notifier_block *nb; 872 873 if (dma_debug_disabled()) 874 return; 875 876 nb = kzalloc(sizeof(struct notifier_block), GFP_KERNEL); 877 if (nb == NULL) { 878 pr_err("dma_debug_add_bus: out of memory\n"); 879 return; 880 } 881 882 nb->notifier_call = dma_debug_device_change; 883 884 bus_register_notifier(bus, nb); 885 } 886 887 static int dma_debug_init(void) 888 { 889 int i, nr_pages; 890 891 /* Do not use dma_debug_initialized here, since we really want to be 892 * called to set dma_debug_initialized 893 */ 894 if (global_disable) 895 return 0; 896 897 for (i = 0; i < HASH_SIZE; ++i) { 898 INIT_LIST_HEAD(&dma_entry_hash[i].list); 899 spin_lock_init(&dma_entry_hash[i].lock); 900 } 901 902 nr_pages = DIV_ROUND_UP(nr_prealloc_entries, DMA_DEBUG_DYNAMIC_ENTRIES); 903 for (i = 0; i < nr_pages; ++i) 904 dma_debug_create_entries(GFP_KERNEL); 905 if (num_free_entries >= nr_prealloc_entries) { 906 pr_info("preallocated %d debug entries\n", nr_total_entries); 907 } else if (num_free_entries > 0) { 908 pr_warn("%d debug entries requested but only %d allocated\n", 909 nr_prealloc_entries, nr_total_entries); 910 } else { 911 pr_err("debugging out of memory error - disabled\n"); 912 global_disable = true; 913 914 return 0; 915 } 916 min_free_entries = num_free_entries; 917 918 dma_debug_initialized = true; 919 920 pr_info("debugging enabled by kernel config\n"); 921 return 0; 922 } 923 core_initcall(dma_debug_init); 924 925 static __init int dma_debug_cmdline(char *str) 926 { 927 if (!str) 928 return -EINVAL; 929 930 if (strncmp(str, "off", 3) == 0) { 931 pr_info("debugging disabled on kernel command line\n"); 932 global_disable = true; 933 } 934 935 return 1; 936 } 937 938 static __init int dma_debug_entries_cmdline(char *str) 939 { 940 if (!str) 941 return -EINVAL; 942 if (!get_option(&str, &nr_prealloc_entries)) 943 nr_prealloc_entries = PREALLOC_DMA_DEBUG_ENTRIES; 944 return 1; 945 } 946 947 __setup("dma_debug=", dma_debug_cmdline); 948 __setup("dma_debug_entries=", dma_debug_entries_cmdline); 949 950 static void check_unmap(struct dma_debug_entry *ref) 951 { 952 struct dma_debug_entry *entry; 953 struct hash_bucket *bucket; 954 unsigned long flags; 955 956 bucket = get_hash_bucket(ref, &flags); 957 entry = bucket_find_exact(bucket, ref); 958 959 if (!entry) { 960 /* must drop lock before calling dma_mapping_error */ 961 put_hash_bucket(bucket, flags); 962 963 if (dma_mapping_error(ref->dev, ref->dev_addr)) { 964 err_printk(ref->dev, NULL, 965 "device driver tries to free an " 966 "invalid DMA memory address\n"); 967 } else { 968 err_printk(ref->dev, NULL, 969 "device driver tries to free DMA " 970 "memory it has not allocated [device " 971 "address=0x%016llx] [size=%llu bytes]\n", 972 ref->dev_addr, ref->size); 973 } 974 return; 975 } 976 977 if (ref->size != entry->size) { 978 err_printk(ref->dev, entry, "device driver frees " 979 "DMA memory with different size " 980 "[device address=0x%016llx] [map size=%llu bytes] " 981 "[unmap size=%llu bytes]\n", 982 ref->dev_addr, entry->size, ref->size); 983 } 984 985 if (ref->type != entry->type) { 986 err_printk(ref->dev, entry, "device driver frees " 987 "DMA memory with wrong function " 988 "[device address=0x%016llx] [size=%llu bytes] " 989 "[mapped as %s] [unmapped as %s]\n", 990 ref->dev_addr, ref->size, 991 type2name[entry->type], type2name[ref->type]); 992 } else if ((entry->type == dma_debug_coherent) && 993 (phys_addr(ref) != phys_addr(entry))) { 994 err_printk(ref->dev, entry, "device driver frees " 995 "DMA memory with different CPU address " 996 "[device address=0x%016llx] [size=%llu bytes] " 997 "[cpu alloc address=0x%016llx] " 998 "[cpu free address=0x%016llx]", 999 ref->dev_addr, ref->size, 1000 phys_addr(entry), 1001 phys_addr(ref)); 1002 } 1003 1004 if (ref->sg_call_ents && ref->type == dma_debug_sg && 1005 ref->sg_call_ents != entry->sg_call_ents) { 1006 err_printk(ref->dev, entry, "device driver frees " 1007 "DMA sg list with different entry count " 1008 "[map count=%d] [unmap count=%d]\n", 1009 entry->sg_call_ents, ref->sg_call_ents); 1010 } 1011 1012 /* 1013 * This may be no bug in reality - but most implementations of the 1014 * DMA API don't handle this properly, so check for it here 1015 */ 1016 if (ref->direction != entry->direction) { 1017 err_printk(ref->dev, entry, "device driver frees " 1018 "DMA memory with different direction " 1019 "[device address=0x%016llx] [size=%llu bytes] " 1020 "[mapped with %s] [unmapped with %s]\n", 1021 ref->dev_addr, ref->size, 1022 dir2name[entry->direction], 1023 dir2name[ref->direction]); 1024 } 1025 1026 /* 1027 * Drivers should use dma_mapping_error() to check the returned 1028 * addresses of dma_map_single() and dma_map_page(). 1029 * If not, print this warning message. See Documentation/core-api/dma-api.rst. 1030 */ 1031 if (entry->map_err_type == MAP_ERR_NOT_CHECKED) { 1032 err_printk(ref->dev, entry, 1033 "device driver failed to check map error" 1034 "[device address=0x%016llx] [size=%llu bytes] " 1035 "[mapped as %s]", 1036 ref->dev_addr, ref->size, 1037 type2name[entry->type]); 1038 } 1039 1040 hash_bucket_del(entry); 1041 dma_entry_free(entry); 1042 1043 put_hash_bucket(bucket, flags); 1044 } 1045 1046 static void check_for_stack(struct device *dev, 1047 struct page *page, size_t offset) 1048 { 1049 void *addr; 1050 struct vm_struct *stack_vm_area = task_stack_vm_area(current); 1051 1052 if (!stack_vm_area) { 1053 /* Stack is direct-mapped. */ 1054 if (PageHighMem(page)) 1055 return; 1056 addr = page_address(page) + offset; 1057 if (object_is_on_stack(addr)) 1058 err_printk(dev, NULL, "device driver maps memory from stack [addr=%p]\n", addr); 1059 } else { 1060 /* Stack is vmalloced. */ 1061 int i; 1062 1063 for (i = 0; i < stack_vm_area->nr_pages; i++) { 1064 if (page != stack_vm_area->pages[i]) 1065 continue; 1066 1067 addr = (u8 *)current->stack + i * PAGE_SIZE + offset; 1068 err_printk(dev, NULL, "device driver maps memory from stack [probable addr=%p]\n", addr); 1069 break; 1070 } 1071 } 1072 } 1073 1074 static void check_for_illegal_area(struct device *dev, void *addr, unsigned long len) 1075 { 1076 if (memory_intersects(_stext, _etext, addr, len) || 1077 memory_intersects(__start_rodata, __end_rodata, addr, len)) 1078 err_printk(dev, NULL, "device driver maps memory from kernel text or rodata [addr=%p] [len=%lu]\n", addr, len); 1079 } 1080 1081 static void check_sync(struct device *dev, 1082 struct dma_debug_entry *ref, 1083 bool to_cpu) 1084 { 1085 struct dma_debug_entry *entry; 1086 struct hash_bucket *bucket; 1087 unsigned long flags; 1088 1089 bucket = get_hash_bucket(ref, &flags); 1090 1091 entry = bucket_find_contain(&bucket, ref, &flags); 1092 1093 if (!entry) { 1094 err_printk(dev, NULL, "device driver tries " 1095 "to sync DMA memory it has not allocated " 1096 "[device address=0x%016llx] [size=%llu bytes]\n", 1097 (unsigned long long)ref->dev_addr, ref->size); 1098 goto out; 1099 } 1100 1101 if (ref->size > entry->size) { 1102 err_printk(dev, entry, "device driver syncs" 1103 " DMA memory outside allocated range " 1104 "[device address=0x%016llx] " 1105 "[allocation size=%llu bytes] " 1106 "[sync offset+size=%llu]\n", 1107 entry->dev_addr, entry->size, 1108 ref->size); 1109 } 1110 1111 if (entry->direction == DMA_BIDIRECTIONAL) 1112 goto out; 1113 1114 if (ref->direction != entry->direction) { 1115 err_printk(dev, entry, "device driver syncs " 1116 "DMA memory with different direction " 1117 "[device address=0x%016llx] [size=%llu bytes] " 1118 "[mapped with %s] [synced with %s]\n", 1119 (unsigned long long)ref->dev_addr, entry->size, 1120 dir2name[entry->direction], 1121 dir2name[ref->direction]); 1122 } 1123 1124 if (to_cpu && !(entry->direction == DMA_FROM_DEVICE) && 1125 !(ref->direction == DMA_TO_DEVICE)) 1126 err_printk(dev, entry, "device driver syncs " 1127 "device read-only DMA memory for cpu " 1128 "[device address=0x%016llx] [size=%llu bytes] " 1129 "[mapped with %s] [synced with %s]\n", 1130 (unsigned long long)ref->dev_addr, entry->size, 1131 dir2name[entry->direction], 1132 dir2name[ref->direction]); 1133 1134 if (!to_cpu && !(entry->direction == DMA_TO_DEVICE) && 1135 !(ref->direction == DMA_FROM_DEVICE)) 1136 err_printk(dev, entry, "device driver syncs " 1137 "device write-only DMA memory to device " 1138 "[device address=0x%016llx] [size=%llu bytes] " 1139 "[mapped with %s] [synced with %s]\n", 1140 (unsigned long long)ref->dev_addr, entry->size, 1141 dir2name[entry->direction], 1142 dir2name[ref->direction]); 1143 1144 if (ref->sg_call_ents && ref->type == dma_debug_sg && 1145 ref->sg_call_ents != entry->sg_call_ents) { 1146 err_printk(ref->dev, entry, "device driver syncs " 1147 "DMA sg list with different entry count " 1148 "[map count=%d] [sync count=%d]\n", 1149 entry->sg_call_ents, ref->sg_call_ents); 1150 } 1151 1152 out: 1153 put_hash_bucket(bucket, flags); 1154 } 1155 1156 static void check_sg_segment(struct device *dev, struct scatterlist *sg) 1157 { 1158 #ifdef CONFIG_DMA_API_DEBUG_SG 1159 unsigned int max_seg = dma_get_max_seg_size(dev); 1160 u64 start, end, boundary = dma_get_seg_boundary(dev); 1161 1162 /* 1163 * Either the driver forgot to set dma_parms appropriately, or 1164 * whoever generated the list forgot to check them. 1165 */ 1166 if (sg->length > max_seg) 1167 err_printk(dev, NULL, "mapping sg segment longer than device claims to support [len=%u] [max=%u]\n", 1168 sg->length, max_seg); 1169 /* 1170 * In some cases this could potentially be the DMA API 1171 * implementation's fault, but it would usually imply that 1172 * the scatterlist was built inappropriately to begin with. 1173 */ 1174 start = sg_dma_address(sg); 1175 end = start + sg_dma_len(sg) - 1; 1176 if ((start ^ end) & ~boundary) 1177 err_printk(dev, NULL, "mapping sg segment across boundary [start=0x%016llx] [end=0x%016llx] [boundary=0x%016llx]\n", 1178 start, end, boundary); 1179 #endif 1180 } 1181 1182 void debug_dma_map_single(struct device *dev, const void *addr, 1183 unsigned long len) 1184 { 1185 if (unlikely(dma_debug_disabled())) 1186 return; 1187 1188 if (!virt_addr_valid(addr)) 1189 err_printk(dev, NULL, "device driver maps memory from invalid area [addr=%p] [len=%lu]\n", 1190 addr, len); 1191 1192 if (is_vmalloc_addr(addr)) 1193 err_printk(dev, NULL, "device driver maps memory from vmalloc area [addr=%p] [len=%lu]\n", 1194 addr, len); 1195 } 1196 EXPORT_SYMBOL(debug_dma_map_single); 1197 1198 void debug_dma_map_page(struct device *dev, struct page *page, size_t offset, 1199 size_t size, int direction, dma_addr_t dma_addr, 1200 unsigned long attrs) 1201 { 1202 struct dma_debug_entry *entry; 1203 1204 if (unlikely(dma_debug_disabled())) 1205 return; 1206 1207 if (dma_mapping_error(dev, dma_addr)) 1208 return; 1209 1210 entry = dma_entry_alloc(); 1211 if (!entry) 1212 return; 1213 1214 entry->dev = dev; 1215 entry->type = dma_debug_single; 1216 entry->pfn = page_to_pfn(page); 1217 entry->offset = offset; 1218 entry->dev_addr = dma_addr; 1219 entry->size = size; 1220 entry->direction = direction; 1221 entry->map_err_type = MAP_ERR_NOT_CHECKED; 1222 1223 check_for_stack(dev, page, offset); 1224 1225 if (!PageHighMem(page)) { 1226 void *addr = page_address(page) + offset; 1227 1228 check_for_illegal_area(dev, addr, size); 1229 } 1230 1231 add_dma_entry(entry, attrs); 1232 } 1233 1234 void debug_dma_mapping_error(struct device *dev, dma_addr_t dma_addr) 1235 { 1236 struct dma_debug_entry ref; 1237 struct dma_debug_entry *entry; 1238 struct hash_bucket *bucket; 1239 unsigned long flags; 1240 1241 if (unlikely(dma_debug_disabled())) 1242 return; 1243 1244 ref.dev = dev; 1245 ref.dev_addr = dma_addr; 1246 bucket = get_hash_bucket(&ref, &flags); 1247 1248 list_for_each_entry(entry, &bucket->list, list) { 1249 if (!exact_match(&ref, entry)) 1250 continue; 1251 1252 /* 1253 * The same physical address can be mapped multiple 1254 * times. Without a hardware IOMMU this results in the 1255 * same device addresses being put into the dma-debug 1256 * hash multiple times too. This can result in false 1257 * positives being reported. Therefore we implement a 1258 * best-fit algorithm here which updates the first entry 1259 * from the hash which fits the reference value and is 1260 * not currently listed as being checked. 1261 */ 1262 if (entry->map_err_type == MAP_ERR_NOT_CHECKED) { 1263 entry->map_err_type = MAP_ERR_CHECKED; 1264 break; 1265 } 1266 } 1267 1268 put_hash_bucket(bucket, flags); 1269 } 1270 EXPORT_SYMBOL(debug_dma_mapping_error); 1271 1272 void debug_dma_unmap_page(struct device *dev, dma_addr_t dma_addr, 1273 size_t size, int direction) 1274 { 1275 struct dma_debug_entry ref = { 1276 .type = dma_debug_single, 1277 .dev = dev, 1278 .dev_addr = dma_addr, 1279 .size = size, 1280 .direction = direction, 1281 }; 1282 1283 if (unlikely(dma_debug_disabled())) 1284 return; 1285 check_unmap(&ref); 1286 } 1287 1288 void debug_dma_map_sg(struct device *dev, struct scatterlist *sg, 1289 int nents, int mapped_ents, int direction, 1290 unsigned long attrs) 1291 { 1292 struct dma_debug_entry *entry; 1293 struct scatterlist *s; 1294 int i; 1295 1296 if (unlikely(dma_debug_disabled())) 1297 return; 1298 1299 for_each_sg(sg, s, nents, i) { 1300 check_for_stack(dev, sg_page(s), s->offset); 1301 if (!PageHighMem(sg_page(s))) 1302 check_for_illegal_area(dev, sg_virt(s), s->length); 1303 } 1304 1305 for_each_sg(sg, s, mapped_ents, i) { 1306 entry = dma_entry_alloc(); 1307 if (!entry) 1308 return; 1309 1310 entry->type = dma_debug_sg; 1311 entry->dev = dev; 1312 entry->pfn = page_to_pfn(sg_page(s)); 1313 entry->offset = s->offset; 1314 entry->size = sg_dma_len(s); 1315 entry->dev_addr = sg_dma_address(s); 1316 entry->direction = direction; 1317 entry->sg_call_ents = nents; 1318 entry->sg_mapped_ents = mapped_ents; 1319 1320 check_sg_segment(dev, s); 1321 1322 add_dma_entry(entry, attrs); 1323 } 1324 } 1325 1326 static int get_nr_mapped_entries(struct device *dev, 1327 struct dma_debug_entry *ref) 1328 { 1329 struct dma_debug_entry *entry; 1330 struct hash_bucket *bucket; 1331 unsigned long flags; 1332 int mapped_ents; 1333 1334 bucket = get_hash_bucket(ref, &flags); 1335 entry = bucket_find_exact(bucket, ref); 1336 mapped_ents = 0; 1337 1338 if (entry) 1339 mapped_ents = entry->sg_mapped_ents; 1340 put_hash_bucket(bucket, flags); 1341 1342 return mapped_ents; 1343 } 1344 1345 void debug_dma_unmap_sg(struct device *dev, struct scatterlist *sglist, 1346 int nelems, int dir) 1347 { 1348 struct scatterlist *s; 1349 int mapped_ents = 0, i; 1350 1351 if (unlikely(dma_debug_disabled())) 1352 return; 1353 1354 for_each_sg(sglist, s, nelems, i) { 1355 1356 struct dma_debug_entry ref = { 1357 .type = dma_debug_sg, 1358 .dev = dev, 1359 .pfn = page_to_pfn(sg_page(s)), 1360 .offset = s->offset, 1361 .dev_addr = sg_dma_address(s), 1362 .size = sg_dma_len(s), 1363 .direction = dir, 1364 .sg_call_ents = nelems, 1365 }; 1366 1367 if (mapped_ents && i >= mapped_ents) 1368 break; 1369 1370 if (!i) 1371 mapped_ents = get_nr_mapped_entries(dev, &ref); 1372 1373 check_unmap(&ref); 1374 } 1375 } 1376 1377 void debug_dma_alloc_coherent(struct device *dev, size_t size, 1378 dma_addr_t dma_addr, void *virt, 1379 unsigned long attrs) 1380 { 1381 struct dma_debug_entry *entry; 1382 1383 if (unlikely(dma_debug_disabled())) 1384 return; 1385 1386 if (unlikely(virt == NULL)) 1387 return; 1388 1389 /* handle vmalloc and linear addresses */ 1390 if (!is_vmalloc_addr(virt) && !virt_addr_valid(virt)) 1391 return; 1392 1393 entry = dma_entry_alloc(); 1394 if (!entry) 1395 return; 1396 1397 entry->type = dma_debug_coherent; 1398 entry->dev = dev; 1399 entry->offset = offset_in_page(virt); 1400 entry->size = size; 1401 entry->dev_addr = dma_addr; 1402 entry->direction = DMA_BIDIRECTIONAL; 1403 1404 if (is_vmalloc_addr(virt)) 1405 entry->pfn = vmalloc_to_pfn(virt); 1406 else 1407 entry->pfn = page_to_pfn(virt_to_page(virt)); 1408 1409 add_dma_entry(entry, attrs); 1410 } 1411 1412 void debug_dma_free_coherent(struct device *dev, size_t size, 1413 void *virt, dma_addr_t dma_addr) 1414 { 1415 struct dma_debug_entry ref = { 1416 .type = dma_debug_coherent, 1417 .dev = dev, 1418 .offset = offset_in_page(virt), 1419 .dev_addr = dma_addr, 1420 .size = size, 1421 .direction = DMA_BIDIRECTIONAL, 1422 }; 1423 1424 /* handle vmalloc and linear addresses */ 1425 if (!is_vmalloc_addr(virt) && !virt_addr_valid(virt)) 1426 return; 1427 1428 if (is_vmalloc_addr(virt)) 1429 ref.pfn = vmalloc_to_pfn(virt); 1430 else 1431 ref.pfn = page_to_pfn(virt_to_page(virt)); 1432 1433 if (unlikely(dma_debug_disabled())) 1434 return; 1435 1436 check_unmap(&ref); 1437 } 1438 1439 void debug_dma_map_resource(struct device *dev, phys_addr_t addr, size_t size, 1440 int direction, dma_addr_t dma_addr, 1441 unsigned long attrs) 1442 { 1443 struct dma_debug_entry *entry; 1444 1445 if (unlikely(dma_debug_disabled())) 1446 return; 1447 1448 entry = dma_entry_alloc(); 1449 if (!entry) 1450 return; 1451 1452 entry->type = dma_debug_resource; 1453 entry->dev = dev; 1454 entry->pfn = PHYS_PFN(addr); 1455 entry->offset = offset_in_page(addr); 1456 entry->size = size; 1457 entry->dev_addr = dma_addr; 1458 entry->direction = direction; 1459 entry->map_err_type = MAP_ERR_NOT_CHECKED; 1460 1461 add_dma_entry(entry, attrs); 1462 } 1463 1464 void debug_dma_unmap_resource(struct device *dev, dma_addr_t dma_addr, 1465 size_t size, int direction) 1466 { 1467 struct dma_debug_entry ref = { 1468 .type = dma_debug_resource, 1469 .dev = dev, 1470 .dev_addr = dma_addr, 1471 .size = size, 1472 .direction = direction, 1473 }; 1474 1475 if (unlikely(dma_debug_disabled())) 1476 return; 1477 1478 check_unmap(&ref); 1479 } 1480 1481 void debug_dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, 1482 size_t size, int direction) 1483 { 1484 struct dma_debug_entry ref; 1485 1486 if (unlikely(dma_debug_disabled())) 1487 return; 1488 1489 ref.type = dma_debug_single; 1490 ref.dev = dev; 1491 ref.dev_addr = dma_handle; 1492 ref.size = size; 1493 ref.direction = direction; 1494 ref.sg_call_ents = 0; 1495 1496 check_sync(dev, &ref, true); 1497 } 1498 1499 void debug_dma_sync_single_for_device(struct device *dev, 1500 dma_addr_t dma_handle, size_t size, 1501 int direction) 1502 { 1503 struct dma_debug_entry ref; 1504 1505 if (unlikely(dma_debug_disabled())) 1506 return; 1507 1508 ref.type = dma_debug_single; 1509 ref.dev = dev; 1510 ref.dev_addr = dma_handle; 1511 ref.size = size; 1512 ref.direction = direction; 1513 ref.sg_call_ents = 0; 1514 1515 check_sync(dev, &ref, false); 1516 } 1517 1518 void debug_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, 1519 int nelems, int direction) 1520 { 1521 struct scatterlist *s; 1522 int mapped_ents = 0, i; 1523 1524 if (unlikely(dma_debug_disabled())) 1525 return; 1526 1527 for_each_sg(sg, s, nelems, i) { 1528 1529 struct dma_debug_entry ref = { 1530 .type = dma_debug_sg, 1531 .dev = dev, 1532 .pfn = page_to_pfn(sg_page(s)), 1533 .offset = s->offset, 1534 .dev_addr = sg_dma_address(s), 1535 .size = sg_dma_len(s), 1536 .direction = direction, 1537 .sg_call_ents = nelems, 1538 }; 1539 1540 if (!i) 1541 mapped_ents = get_nr_mapped_entries(dev, &ref); 1542 1543 if (i >= mapped_ents) 1544 break; 1545 1546 check_sync(dev, &ref, true); 1547 } 1548 } 1549 1550 void debug_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, 1551 int nelems, int direction) 1552 { 1553 struct scatterlist *s; 1554 int mapped_ents = 0, i; 1555 1556 if (unlikely(dma_debug_disabled())) 1557 return; 1558 1559 for_each_sg(sg, s, nelems, i) { 1560 1561 struct dma_debug_entry ref = { 1562 .type = dma_debug_sg, 1563 .dev = dev, 1564 .pfn = page_to_pfn(sg_page(s)), 1565 .offset = s->offset, 1566 .dev_addr = sg_dma_address(s), 1567 .size = sg_dma_len(s), 1568 .direction = direction, 1569 .sg_call_ents = nelems, 1570 }; 1571 if (!i) 1572 mapped_ents = get_nr_mapped_entries(dev, &ref); 1573 1574 if (i >= mapped_ents) 1575 break; 1576 1577 check_sync(dev, &ref, false); 1578 } 1579 } 1580 1581 static int __init dma_debug_driver_setup(char *str) 1582 { 1583 int i; 1584 1585 for (i = 0; i < NAME_MAX_LEN - 1; ++i, ++str) { 1586 current_driver_name[i] = *str; 1587 if (*str == 0) 1588 break; 1589 } 1590 1591 if (current_driver_name[0]) 1592 pr_info("enable driver filter for driver [%s]\n", 1593 current_driver_name); 1594 1595 1596 return 1; 1597 } 1598 __setup("dma_debug_driver=", dma_debug_driver_setup); 1599