1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) 2008 Advanced Micro Devices, Inc.
4 *
5 * Author: Joerg Roedel <joerg.roedel@amd.com>
6 */
7
8 #define pr_fmt(fmt) "DMA-API: " fmt
9
10 #include <linux/sched/task_stack.h>
11 #include <linux/scatterlist.h>
12 #include <linux/dma-map-ops.h>
13 #include <linux/sched/task.h>
14 #include <linux/stacktrace.h>
15 #include <linux/spinlock.h>
16 #include <linux/vmalloc.h>
17 #include <linux/debugfs.h>
18 #include <linux/uaccess.h>
19 #include <linux/export.h>
20 #include <linux/device.h>
21 #include <linux/types.h>
22 #include <linux/sched.h>
23 #include <linux/ctype.h>
24 #include <linux/list.h>
25 #include <linux/slab.h>
26 #include <asm/sections.h>
27 #include "debug.h"
28
29 #define HASH_SIZE 16384ULL
30 #define HASH_FN_SHIFT 13
31 #define HASH_FN_MASK (HASH_SIZE - 1)
32
33 #define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16)
34 /* If the pool runs out, add this many new entries at once */
35 #define DMA_DEBUG_DYNAMIC_ENTRIES (PAGE_SIZE / sizeof(struct dma_debug_entry))
36
37 enum {
38 dma_debug_single,
39 dma_debug_sg,
40 dma_debug_coherent,
41 dma_debug_resource,
42 };
43
44 enum map_err_types {
45 MAP_ERR_CHECK_NOT_APPLICABLE,
46 MAP_ERR_NOT_CHECKED,
47 MAP_ERR_CHECKED,
48 };
49
50 #define DMA_DEBUG_STACKTRACE_ENTRIES 5
51
52 /**
53 * struct dma_debug_entry - track a dma_map* or dma_alloc_coherent mapping
54 * @list: node on pre-allocated free_entries list
55 * @dev: 'dev' argument to dma_map_{page|single|sg} or dma_alloc_coherent
56 * @dev_addr: dma address
57 * @size: length of the mapping
58 * @type: single, page, sg, coherent
59 * @direction: enum dma_data_direction
60 * @sg_call_ents: 'nents' from dma_map_sg
61 * @sg_mapped_ents: 'mapped_ents' from dma_map_sg
62 * @pfn: page frame of the start address
63 * @offset: offset of mapping relative to pfn
64 * @map_err_type: track whether dma_mapping_error() was checked
65 * @stacktrace: support backtraces when a violation is detected
66 */
67 struct dma_debug_entry {
68 struct list_head list;
69 struct device *dev;
70 u64 dev_addr;
71 u64 size;
72 int type;
73 int direction;
74 int sg_call_ents;
75 int sg_mapped_ents;
76 unsigned long pfn;
77 size_t offset;
78 enum map_err_types map_err_type;
79 #ifdef CONFIG_STACKTRACE
80 unsigned int stack_len;
81 unsigned long stack_entries[DMA_DEBUG_STACKTRACE_ENTRIES];
82 #endif
83 } ____cacheline_aligned_in_smp;
84
85 typedef bool (*match_fn)(struct dma_debug_entry *, struct dma_debug_entry *);
86
87 struct hash_bucket {
88 struct list_head list;
89 spinlock_t lock;
90 };
91
92 /* Hash list to save the allocated dma addresses */
93 static struct hash_bucket dma_entry_hash[HASH_SIZE];
94 /* List of pre-allocated dma_debug_entry's */
95 static LIST_HEAD(free_entries);
96 /* Lock for the list above */
97 static DEFINE_SPINLOCK(free_entries_lock);
98
99 /* Global disable flag - will be set in case of an error */
100 static bool global_disable __read_mostly;
101
102 /* Early initialization disable flag, set at the end of dma_debug_init */
103 static bool dma_debug_initialized __read_mostly;
104
dma_debug_disabled(void)105 static inline bool dma_debug_disabled(void)
106 {
107 return global_disable || !dma_debug_initialized;
108 }
109
110 /* Global error count */
111 static u32 error_count;
112
113 /* Global error show enable*/
114 static u32 show_all_errors __read_mostly;
115 /* Number of errors to show */
116 static u32 show_num_errors = 1;
117
118 static u32 num_free_entries;
119 static u32 min_free_entries;
120 static u32 nr_total_entries;
121
122 /* number of preallocated entries requested by kernel cmdline */
123 static u32 nr_prealloc_entries = PREALLOC_DMA_DEBUG_ENTRIES;
124
125 /* per-driver filter related state */
126
127 #define NAME_MAX_LEN 64
128
129 static char current_driver_name[NAME_MAX_LEN] __read_mostly;
130 static struct device_driver *current_driver __read_mostly;
131
132 static DEFINE_RWLOCK(driver_name_lock);
133
134 static const char *const maperr2str[] = {
135 [MAP_ERR_CHECK_NOT_APPLICABLE] = "dma map error check not applicable",
136 [MAP_ERR_NOT_CHECKED] = "dma map error not checked",
137 [MAP_ERR_CHECKED] = "dma map error checked",
138 };
139
140 static const char *type2name[] = {
141 [dma_debug_single] = "single",
142 [dma_debug_sg] = "scather-gather",
143 [dma_debug_coherent] = "coherent",
144 [dma_debug_resource] = "resource",
145 };
146
147 static const char *dir2name[] = {
148 [DMA_BIDIRECTIONAL] = "DMA_BIDIRECTIONAL",
149 [DMA_TO_DEVICE] = "DMA_TO_DEVICE",
150 [DMA_FROM_DEVICE] = "DMA_FROM_DEVICE",
151 [DMA_NONE] = "DMA_NONE",
152 };
153
154 /*
155 * The access to some variables in this macro is racy. We can't use atomic_t
156 * here because all these variables are exported to debugfs. Some of them even
157 * writeable. This is also the reason why a lock won't help much. But anyway,
158 * the races are no big deal. Here is why:
159 *
160 * error_count: the addition is racy, but the worst thing that can happen is
161 * that we don't count some errors
162 * show_num_errors: the subtraction is racy. Also no big deal because in
163 * worst case this will result in one warning more in the
164 * system log than the user configured. This variable is
165 * writeable via debugfs.
166 */
dump_entry_trace(struct dma_debug_entry * entry)167 static inline void dump_entry_trace(struct dma_debug_entry *entry)
168 {
169 #ifdef CONFIG_STACKTRACE
170 if (entry) {
171 pr_warn("Mapped at:\n");
172 stack_trace_print(entry->stack_entries, entry->stack_len, 0);
173 }
174 #endif
175 }
176
driver_filter(struct device * dev)177 static bool driver_filter(struct device *dev)
178 {
179 struct device_driver *drv;
180 unsigned long flags;
181 bool ret;
182
183 /* driver filter off */
184 if (likely(!current_driver_name[0]))
185 return true;
186
187 /* driver filter on and initialized */
188 if (current_driver && dev && dev->driver == current_driver)
189 return true;
190
191 /* driver filter on, but we can't filter on a NULL device... */
192 if (!dev)
193 return false;
194
195 if (current_driver || !current_driver_name[0])
196 return false;
197
198 /* driver filter on but not yet initialized */
199 drv = dev->driver;
200 if (!drv)
201 return false;
202
203 /* lock to protect against change of current_driver_name */
204 read_lock_irqsave(&driver_name_lock, flags);
205
206 ret = false;
207 if (drv->name &&
208 strncmp(current_driver_name, drv->name, NAME_MAX_LEN - 1) == 0) {
209 current_driver = drv;
210 ret = true;
211 }
212
213 read_unlock_irqrestore(&driver_name_lock, flags);
214
215 return ret;
216 }
217
218 #define err_printk(dev, entry, format, arg...) do { \
219 error_count += 1; \
220 if (driver_filter(dev) && \
221 (show_all_errors || show_num_errors > 0)) { \
222 WARN(1, pr_fmt("%s %s: ") format, \
223 dev ? dev_driver_string(dev) : "NULL", \
224 dev ? dev_name(dev) : "NULL", ## arg); \
225 dump_entry_trace(entry); \
226 } \
227 if (!show_all_errors && show_num_errors > 0) \
228 show_num_errors -= 1; \
229 } while (0);
230
231 /*
232 * Hash related functions
233 *
234 * Every DMA-API request is saved into a struct dma_debug_entry. To
235 * have quick access to these structs they are stored into a hash.
236 */
hash_fn(struct dma_debug_entry * entry)237 static int hash_fn(struct dma_debug_entry *entry)
238 {
239 /*
240 * Hash function is based on the dma address.
241 * We use bits 20-27 here as the index into the hash
242 */
243 return (entry->dev_addr >> HASH_FN_SHIFT) & HASH_FN_MASK;
244 }
245
246 /*
247 * Request exclusive access to a hash bucket for a given dma_debug_entry.
248 */
get_hash_bucket(struct dma_debug_entry * entry,unsigned long * flags)249 static struct hash_bucket *get_hash_bucket(struct dma_debug_entry *entry,
250 unsigned long *flags)
251 __acquires(&dma_entry_hash[idx].lock)
252 {
253 int idx = hash_fn(entry);
254 unsigned long __flags;
255
256 spin_lock_irqsave(&dma_entry_hash[idx].lock, __flags);
257 *flags = __flags;
258 return &dma_entry_hash[idx];
259 }
260
261 /*
262 * Give up exclusive access to the hash bucket
263 */
put_hash_bucket(struct hash_bucket * bucket,unsigned long flags)264 static void put_hash_bucket(struct hash_bucket *bucket,
265 unsigned long flags)
266 __releases(&bucket->lock)
267 {
268 spin_unlock_irqrestore(&bucket->lock, flags);
269 }
270
exact_match(struct dma_debug_entry * a,struct dma_debug_entry * b)271 static bool exact_match(struct dma_debug_entry *a, struct dma_debug_entry *b)
272 {
273 return ((a->dev_addr == b->dev_addr) &&
274 (a->dev == b->dev)) ? true : false;
275 }
276
containing_match(struct dma_debug_entry * a,struct dma_debug_entry * b)277 static bool containing_match(struct dma_debug_entry *a,
278 struct dma_debug_entry *b)
279 {
280 if (a->dev != b->dev)
281 return false;
282
283 if ((b->dev_addr <= a->dev_addr) &&
284 ((b->dev_addr + b->size) >= (a->dev_addr + a->size)))
285 return true;
286
287 return false;
288 }
289
290 /*
291 * Search a given entry in the hash bucket list
292 */
__hash_bucket_find(struct hash_bucket * bucket,struct dma_debug_entry * ref,match_fn match)293 static struct dma_debug_entry *__hash_bucket_find(struct hash_bucket *bucket,
294 struct dma_debug_entry *ref,
295 match_fn match)
296 {
297 struct dma_debug_entry *entry, *ret = NULL;
298 int matches = 0, match_lvl, last_lvl = -1;
299
300 list_for_each_entry(entry, &bucket->list, list) {
301 if (!match(ref, entry))
302 continue;
303
304 /*
305 * Some drivers map the same physical address multiple
306 * times. Without a hardware IOMMU this results in the
307 * same device addresses being put into the dma-debug
308 * hash multiple times too. This can result in false
309 * positives being reported. Therefore we implement a
310 * best-fit algorithm here which returns the entry from
311 * the hash which fits best to the reference value
312 * instead of the first-fit.
313 */
314 matches += 1;
315 match_lvl = 0;
316 entry->size == ref->size ? ++match_lvl : 0;
317 entry->type == ref->type ? ++match_lvl : 0;
318 entry->direction == ref->direction ? ++match_lvl : 0;
319 entry->sg_call_ents == ref->sg_call_ents ? ++match_lvl : 0;
320
321 if (match_lvl == 4) {
322 /* perfect-fit - return the result */
323 return entry;
324 } else if (match_lvl > last_lvl) {
325 /*
326 * We found an entry that fits better then the
327 * previous one or it is the 1st match.
328 */
329 last_lvl = match_lvl;
330 ret = entry;
331 }
332 }
333
334 /*
335 * If we have multiple matches but no perfect-fit, just return
336 * NULL.
337 */
338 ret = (matches == 1) ? ret : NULL;
339
340 return ret;
341 }
342
bucket_find_exact(struct hash_bucket * bucket,struct dma_debug_entry * ref)343 static struct dma_debug_entry *bucket_find_exact(struct hash_bucket *bucket,
344 struct dma_debug_entry *ref)
345 {
346 return __hash_bucket_find(bucket, ref, exact_match);
347 }
348
bucket_find_contain(struct hash_bucket ** bucket,struct dma_debug_entry * ref,unsigned long * flags)349 static struct dma_debug_entry *bucket_find_contain(struct hash_bucket **bucket,
350 struct dma_debug_entry *ref,
351 unsigned long *flags)
352 {
353
354 struct dma_debug_entry *entry, index = *ref;
355 int limit = min(HASH_SIZE, (index.dev_addr >> HASH_FN_SHIFT) + 1);
356
357 for (int i = 0; i < limit; i++) {
358 entry = __hash_bucket_find(*bucket, ref, containing_match);
359
360 if (entry)
361 return entry;
362
363 /*
364 * Nothing found, go back a hash bucket
365 */
366 put_hash_bucket(*bucket, *flags);
367 index.dev_addr -= (1 << HASH_FN_SHIFT);
368 *bucket = get_hash_bucket(&index, flags);
369 }
370
371 return NULL;
372 }
373
374 /*
375 * Add an entry to a hash bucket
376 */
hash_bucket_add(struct hash_bucket * bucket,struct dma_debug_entry * entry)377 static void hash_bucket_add(struct hash_bucket *bucket,
378 struct dma_debug_entry *entry)
379 {
380 list_add_tail(&entry->list, &bucket->list);
381 }
382
383 /*
384 * Remove entry from a hash bucket list
385 */
hash_bucket_del(struct dma_debug_entry * entry)386 static void hash_bucket_del(struct dma_debug_entry *entry)
387 {
388 list_del(&entry->list);
389 }
390
phys_addr(struct dma_debug_entry * entry)391 static unsigned long long phys_addr(struct dma_debug_entry *entry)
392 {
393 if (entry->type == dma_debug_resource)
394 return __pfn_to_phys(entry->pfn) + entry->offset;
395
396 return page_to_phys(pfn_to_page(entry->pfn)) + entry->offset;
397 }
398
399 /*
400 * For each mapping (initial cacheline in the case of
401 * dma_alloc_coherent/dma_map_page, initial cacheline in each page of a
402 * scatterlist, or the cacheline specified in dma_map_single) insert
403 * into this tree using the cacheline as the key. At
404 * dma_unmap_{single|sg|page} or dma_free_coherent delete the entry. If
405 * the entry already exists at insertion time add a tag as a reference
406 * count for the overlapping mappings. For now, the overlap tracking
407 * just ensures that 'unmaps' balance 'maps' before marking the
408 * cacheline idle, but we should also be flagging overlaps as an API
409 * violation.
410 *
411 * Memory usage is mostly constrained by the maximum number of available
412 * dma-debug entries in that we need a free dma_debug_entry before
413 * inserting into the tree. In the case of dma_map_page and
414 * dma_alloc_coherent there is only one dma_debug_entry and one
415 * dma_active_cacheline entry to track per event. dma_map_sg(), on the
416 * other hand, consumes a single dma_debug_entry, but inserts 'nents'
417 * entries into the tree.
418 *
419 * Use __GFP_NOWARN because the printk from an OOM, to netconsole, could end
420 * up right back in the DMA debugging code, leading to a deadlock.
421 */
422 static RADIX_TREE(dma_active_cacheline, GFP_ATOMIC | __GFP_NOWARN);
423 static DEFINE_SPINLOCK(radix_lock);
424 #define ACTIVE_CACHELINE_MAX_OVERLAP ((1 << RADIX_TREE_MAX_TAGS) - 1)
425 #define CACHELINE_PER_PAGE_SHIFT (PAGE_SHIFT - L1_CACHE_SHIFT)
426 #define CACHELINES_PER_PAGE (1 << CACHELINE_PER_PAGE_SHIFT)
427
to_cacheline_number(struct dma_debug_entry * entry)428 static phys_addr_t to_cacheline_number(struct dma_debug_entry *entry)
429 {
430 return (entry->pfn << CACHELINE_PER_PAGE_SHIFT) +
431 (entry->offset >> L1_CACHE_SHIFT);
432 }
433
active_cacheline_read_overlap(phys_addr_t cln)434 static int active_cacheline_read_overlap(phys_addr_t cln)
435 {
436 int overlap = 0, i;
437
438 for (i = RADIX_TREE_MAX_TAGS - 1; i >= 0; i--)
439 if (radix_tree_tag_get(&dma_active_cacheline, cln, i))
440 overlap |= 1 << i;
441 return overlap;
442 }
443
active_cacheline_set_overlap(phys_addr_t cln,int overlap)444 static int active_cacheline_set_overlap(phys_addr_t cln, int overlap)
445 {
446 int i;
447
448 if (overlap > ACTIVE_CACHELINE_MAX_OVERLAP || overlap < 0)
449 return overlap;
450
451 for (i = RADIX_TREE_MAX_TAGS - 1; i >= 0; i--)
452 if (overlap & 1 << i)
453 radix_tree_tag_set(&dma_active_cacheline, cln, i);
454 else
455 radix_tree_tag_clear(&dma_active_cacheline, cln, i);
456
457 return overlap;
458 }
459
active_cacheline_inc_overlap(phys_addr_t cln)460 static void active_cacheline_inc_overlap(phys_addr_t cln)
461 {
462 int overlap = active_cacheline_read_overlap(cln);
463
464 overlap = active_cacheline_set_overlap(cln, ++overlap);
465
466 /* If we overflowed the overlap counter then we're potentially
467 * leaking dma-mappings.
468 */
469 WARN_ONCE(overlap > ACTIVE_CACHELINE_MAX_OVERLAP,
470 pr_fmt("exceeded %d overlapping mappings of cacheline %pa\n"),
471 ACTIVE_CACHELINE_MAX_OVERLAP, &cln);
472 }
473
active_cacheline_dec_overlap(phys_addr_t cln)474 static int active_cacheline_dec_overlap(phys_addr_t cln)
475 {
476 int overlap = active_cacheline_read_overlap(cln);
477
478 return active_cacheline_set_overlap(cln, --overlap);
479 }
480
active_cacheline_insert(struct dma_debug_entry * entry)481 static int active_cacheline_insert(struct dma_debug_entry *entry)
482 {
483 phys_addr_t cln = to_cacheline_number(entry);
484 unsigned long flags;
485 int rc;
486
487 /* If the device is not writing memory then we don't have any
488 * concerns about the cpu consuming stale data. This mitigates
489 * legitimate usages of overlapping mappings.
490 */
491 if (entry->direction == DMA_TO_DEVICE)
492 return 0;
493
494 spin_lock_irqsave(&radix_lock, flags);
495 rc = radix_tree_insert(&dma_active_cacheline, cln, entry);
496 if (rc == -EEXIST)
497 active_cacheline_inc_overlap(cln);
498 spin_unlock_irqrestore(&radix_lock, flags);
499
500 return rc;
501 }
502
active_cacheline_remove(struct dma_debug_entry * entry)503 static void active_cacheline_remove(struct dma_debug_entry *entry)
504 {
505 phys_addr_t cln = to_cacheline_number(entry);
506 unsigned long flags;
507
508 /* ...mirror the insert case */
509 if (entry->direction == DMA_TO_DEVICE)
510 return;
511
512 spin_lock_irqsave(&radix_lock, flags);
513 /* since we are counting overlaps the final put of the
514 * cacheline will occur when the overlap count is 0.
515 * active_cacheline_dec_overlap() returns -1 in that case
516 */
517 if (active_cacheline_dec_overlap(cln) < 0)
518 radix_tree_delete(&dma_active_cacheline, cln);
519 spin_unlock_irqrestore(&radix_lock, flags);
520 }
521
522 /*
523 * Dump mappings entries on kernel space for debugging purposes
524 */
debug_dma_dump_mappings(struct device * dev)525 void debug_dma_dump_mappings(struct device *dev)
526 {
527 int idx;
528 phys_addr_t cln;
529
530 for (idx = 0; idx < HASH_SIZE; idx++) {
531 struct hash_bucket *bucket = &dma_entry_hash[idx];
532 struct dma_debug_entry *entry;
533 unsigned long flags;
534
535 spin_lock_irqsave(&bucket->lock, flags);
536 list_for_each_entry(entry, &bucket->list, list) {
537 if (!dev || dev == entry->dev) {
538 cln = to_cacheline_number(entry);
539 dev_info(entry->dev,
540 "%s idx %d P=%llx N=%lx D=%llx L=%llx cln=%pa %s %s\n",
541 type2name[entry->type], idx,
542 phys_addr(entry), entry->pfn,
543 entry->dev_addr, entry->size,
544 &cln, dir2name[entry->direction],
545 maperr2str[entry->map_err_type]);
546 }
547 }
548 spin_unlock_irqrestore(&bucket->lock, flags);
549
550 cond_resched();
551 }
552 }
553
554 /*
555 * Dump mappings entries on user space via debugfs
556 */
dump_show(struct seq_file * seq,void * v)557 static int dump_show(struct seq_file *seq, void *v)
558 {
559 int idx;
560 phys_addr_t cln;
561
562 for (idx = 0; idx < HASH_SIZE; idx++) {
563 struct hash_bucket *bucket = &dma_entry_hash[idx];
564 struct dma_debug_entry *entry;
565 unsigned long flags;
566
567 spin_lock_irqsave(&bucket->lock, flags);
568 list_for_each_entry(entry, &bucket->list, list) {
569 cln = to_cacheline_number(entry);
570 seq_printf(seq,
571 "%s %s %s idx %d P=%llx N=%lx D=%llx L=%llx cln=%pa %s %s\n",
572 dev_driver_string(entry->dev),
573 dev_name(entry->dev),
574 type2name[entry->type], idx,
575 phys_addr(entry), entry->pfn,
576 entry->dev_addr, entry->size,
577 &cln, dir2name[entry->direction],
578 maperr2str[entry->map_err_type]);
579 }
580 spin_unlock_irqrestore(&bucket->lock, flags);
581 }
582 return 0;
583 }
584 DEFINE_SHOW_ATTRIBUTE(dump);
585
586 /*
587 * Wrapper function for adding an entry to the hash.
588 * This function takes care of locking itself.
589 */
add_dma_entry(struct dma_debug_entry * entry,unsigned long attrs)590 static void add_dma_entry(struct dma_debug_entry *entry, unsigned long attrs)
591 {
592 struct hash_bucket *bucket;
593 unsigned long flags;
594 int rc;
595
596 bucket = get_hash_bucket(entry, &flags);
597 hash_bucket_add(bucket, entry);
598 put_hash_bucket(bucket, flags);
599
600 rc = active_cacheline_insert(entry);
601 if (rc == -ENOMEM) {
602 pr_err_once("cacheline tracking ENOMEM, dma-debug disabled\n");
603 global_disable = true;
604 } else if (rc == -EEXIST && !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) {
605 err_printk(entry->dev, entry,
606 "cacheline tracking EEXIST, overlapping mappings aren't supported\n");
607 }
608 }
609
dma_debug_create_entries(gfp_t gfp)610 static int dma_debug_create_entries(gfp_t gfp)
611 {
612 struct dma_debug_entry *entry;
613 int i;
614
615 entry = (void *)get_zeroed_page(gfp);
616 if (!entry)
617 return -ENOMEM;
618
619 for (i = 0; i < DMA_DEBUG_DYNAMIC_ENTRIES; i++)
620 list_add_tail(&entry[i].list, &free_entries);
621
622 num_free_entries += DMA_DEBUG_DYNAMIC_ENTRIES;
623 nr_total_entries += DMA_DEBUG_DYNAMIC_ENTRIES;
624
625 return 0;
626 }
627
__dma_entry_alloc(void)628 static struct dma_debug_entry *__dma_entry_alloc(void)
629 {
630 struct dma_debug_entry *entry;
631
632 entry = list_entry(free_entries.next, struct dma_debug_entry, list);
633 list_del(&entry->list);
634 memset(entry, 0, sizeof(*entry));
635
636 num_free_entries -= 1;
637 if (num_free_entries < min_free_entries)
638 min_free_entries = num_free_entries;
639
640 return entry;
641 }
642
643 /*
644 * This should be called outside of free_entries_lock scope to avoid potential
645 * deadlocks with serial consoles that use DMA.
646 */
__dma_entry_alloc_check_leak(u32 nr_entries)647 static void __dma_entry_alloc_check_leak(u32 nr_entries)
648 {
649 u32 tmp = nr_entries % nr_prealloc_entries;
650
651 /* Shout each time we tick over some multiple of the initial pool */
652 if (tmp < DMA_DEBUG_DYNAMIC_ENTRIES) {
653 pr_info("dma_debug_entry pool grown to %u (%u00%%)\n",
654 nr_entries,
655 (nr_entries / nr_prealloc_entries));
656 }
657 }
658
659 /* struct dma_entry allocator
660 *
661 * The next two functions implement the allocator for
662 * struct dma_debug_entries.
663 */
dma_entry_alloc(void)664 static struct dma_debug_entry *dma_entry_alloc(void)
665 {
666 bool alloc_check_leak = false;
667 struct dma_debug_entry *entry;
668 unsigned long flags;
669 u32 nr_entries;
670
671 spin_lock_irqsave(&free_entries_lock, flags);
672 if (num_free_entries == 0) {
673 if (dma_debug_create_entries(GFP_ATOMIC)) {
674 global_disable = true;
675 spin_unlock_irqrestore(&free_entries_lock, flags);
676 pr_err("debugging out of memory - disabling\n");
677 return NULL;
678 }
679 alloc_check_leak = true;
680 nr_entries = nr_total_entries;
681 }
682
683 entry = __dma_entry_alloc();
684
685 spin_unlock_irqrestore(&free_entries_lock, flags);
686
687 if (alloc_check_leak)
688 __dma_entry_alloc_check_leak(nr_entries);
689
690 #ifdef CONFIG_STACKTRACE
691 entry->stack_len = stack_trace_save(entry->stack_entries,
692 ARRAY_SIZE(entry->stack_entries),
693 1);
694 #endif
695 return entry;
696 }
697
dma_entry_free(struct dma_debug_entry * entry)698 static void dma_entry_free(struct dma_debug_entry *entry)
699 {
700 unsigned long flags;
701
702 active_cacheline_remove(entry);
703
704 /*
705 * add to beginning of the list - this way the entries are
706 * more likely cache hot when they are reallocated.
707 */
708 spin_lock_irqsave(&free_entries_lock, flags);
709 list_add(&entry->list, &free_entries);
710 num_free_entries += 1;
711 spin_unlock_irqrestore(&free_entries_lock, flags);
712 }
713
714 /*
715 * DMA-API debugging init code
716 *
717 * The init code does two things:
718 * 1. Initialize core data structures
719 * 2. Preallocate a given number of dma_debug_entry structs
720 */
721
filter_read(struct file * file,char __user * user_buf,size_t count,loff_t * ppos)722 static ssize_t filter_read(struct file *file, char __user *user_buf,
723 size_t count, loff_t *ppos)
724 {
725 char buf[NAME_MAX_LEN + 1];
726 unsigned long flags;
727 int len;
728
729 if (!current_driver_name[0])
730 return 0;
731
732 /*
733 * We can't copy to userspace directly because current_driver_name can
734 * only be read under the driver_name_lock with irqs disabled. So
735 * create a temporary copy first.
736 */
737 read_lock_irqsave(&driver_name_lock, flags);
738 len = scnprintf(buf, NAME_MAX_LEN + 1, "%s\n", current_driver_name);
739 read_unlock_irqrestore(&driver_name_lock, flags);
740
741 return simple_read_from_buffer(user_buf, count, ppos, buf, len);
742 }
743
filter_write(struct file * file,const char __user * userbuf,size_t count,loff_t * ppos)744 static ssize_t filter_write(struct file *file, const char __user *userbuf,
745 size_t count, loff_t *ppos)
746 {
747 char buf[NAME_MAX_LEN];
748 unsigned long flags;
749 size_t len;
750 int i;
751
752 /*
753 * We can't copy from userspace directly. Access to
754 * current_driver_name is protected with a write_lock with irqs
755 * disabled. Since copy_from_user can fault and may sleep we
756 * need to copy to temporary buffer first
757 */
758 len = min(count, (size_t)(NAME_MAX_LEN - 1));
759 if (copy_from_user(buf, userbuf, len))
760 return -EFAULT;
761
762 buf[len] = 0;
763
764 write_lock_irqsave(&driver_name_lock, flags);
765
766 /*
767 * Now handle the string we got from userspace very carefully.
768 * The rules are:
769 * - only use the first token we got
770 * - token delimiter is everything looking like a space
771 * character (' ', '\n', '\t' ...)
772 *
773 */
774 if (!isalnum(buf[0])) {
775 /*
776 * If the first character userspace gave us is not
777 * alphanumerical then assume the filter should be
778 * switched off.
779 */
780 if (current_driver_name[0])
781 pr_info("switching off dma-debug driver filter\n");
782 current_driver_name[0] = 0;
783 current_driver = NULL;
784 goto out_unlock;
785 }
786
787 /*
788 * Now parse out the first token and use it as the name for the
789 * driver to filter for.
790 */
791 for (i = 0; i < NAME_MAX_LEN - 1; ++i) {
792 current_driver_name[i] = buf[i];
793 if (isspace(buf[i]) || buf[i] == ' ' || buf[i] == 0)
794 break;
795 }
796 current_driver_name[i] = 0;
797 current_driver = NULL;
798
799 pr_info("enable driver filter for driver [%s]\n",
800 current_driver_name);
801
802 out_unlock:
803 write_unlock_irqrestore(&driver_name_lock, flags);
804
805 return count;
806 }
807
808 static const struct file_operations filter_fops = {
809 .read = filter_read,
810 .write = filter_write,
811 .llseek = default_llseek,
812 };
813
dma_debug_fs_init(void)814 static int __init dma_debug_fs_init(void)
815 {
816 struct dentry *dentry = debugfs_create_dir("dma-api", NULL);
817
818 debugfs_create_bool("disabled", 0444, dentry, &global_disable);
819 debugfs_create_u32("error_count", 0444, dentry, &error_count);
820 debugfs_create_u32("all_errors", 0644, dentry, &show_all_errors);
821 debugfs_create_u32("num_errors", 0644, dentry, &show_num_errors);
822 debugfs_create_u32("num_free_entries", 0444, dentry, &num_free_entries);
823 debugfs_create_u32("min_free_entries", 0444, dentry, &min_free_entries);
824 debugfs_create_u32("nr_total_entries", 0444, dentry, &nr_total_entries);
825 debugfs_create_file("driver_filter", 0644, dentry, NULL, &filter_fops);
826 debugfs_create_file("dump", 0444, dentry, NULL, &dump_fops);
827
828 return 0;
829 }
830 core_initcall_sync(dma_debug_fs_init);
831
device_dma_allocations(struct device * dev,struct dma_debug_entry ** out_entry)832 static int device_dma_allocations(struct device *dev, struct dma_debug_entry **out_entry)
833 {
834 struct dma_debug_entry *entry;
835 unsigned long flags;
836 int count = 0, i;
837
838 for (i = 0; i < HASH_SIZE; ++i) {
839 spin_lock_irqsave(&dma_entry_hash[i].lock, flags);
840 list_for_each_entry(entry, &dma_entry_hash[i].list, list) {
841 if (entry->dev == dev) {
842 count += 1;
843 *out_entry = entry;
844 }
845 }
846 spin_unlock_irqrestore(&dma_entry_hash[i].lock, flags);
847 }
848
849 return count;
850 }
851
dma_debug_device_change(struct notifier_block * nb,unsigned long action,void * data)852 static int dma_debug_device_change(struct notifier_block *nb, unsigned long action, void *data)
853 {
854 struct device *dev = data;
855 struct dma_debug_entry *entry;
856 int count;
857
858 if (dma_debug_disabled())
859 return 0;
860
861 switch (action) {
862 case BUS_NOTIFY_UNBOUND_DRIVER:
863 count = device_dma_allocations(dev, &entry);
864 if (count == 0)
865 break;
866 err_printk(dev, entry, "device driver has pending "
867 "DMA allocations while released from device "
868 "[count=%d]\n"
869 "One of leaked entries details: "
870 "[device address=0x%016llx] [size=%llu bytes] "
871 "[mapped with %s] [mapped as %s]\n",
872 count, entry->dev_addr, entry->size,
873 dir2name[entry->direction], type2name[entry->type]);
874 break;
875 default:
876 break;
877 }
878
879 return 0;
880 }
881
dma_debug_add_bus(struct bus_type * bus)882 void dma_debug_add_bus(struct bus_type *bus)
883 {
884 struct notifier_block *nb;
885
886 if (dma_debug_disabled())
887 return;
888
889 nb = kzalloc(sizeof(struct notifier_block), GFP_KERNEL);
890 if (nb == NULL) {
891 pr_err("dma_debug_add_bus: out of memory\n");
892 return;
893 }
894
895 nb->notifier_call = dma_debug_device_change;
896
897 bus_register_notifier(bus, nb);
898 }
899
dma_debug_init(void)900 static int dma_debug_init(void)
901 {
902 int i, nr_pages;
903
904 /* Do not use dma_debug_initialized here, since we really want to be
905 * called to set dma_debug_initialized
906 */
907 if (global_disable)
908 return 0;
909
910 for (i = 0; i < HASH_SIZE; ++i) {
911 INIT_LIST_HEAD(&dma_entry_hash[i].list);
912 spin_lock_init(&dma_entry_hash[i].lock);
913 }
914
915 nr_pages = DIV_ROUND_UP(nr_prealloc_entries, DMA_DEBUG_DYNAMIC_ENTRIES);
916 for (i = 0; i < nr_pages; ++i)
917 dma_debug_create_entries(GFP_KERNEL);
918 if (num_free_entries >= nr_prealloc_entries) {
919 pr_info("preallocated %d debug entries\n", nr_total_entries);
920 } else if (num_free_entries > 0) {
921 pr_warn("%d debug entries requested but only %d allocated\n",
922 nr_prealloc_entries, nr_total_entries);
923 } else {
924 pr_err("debugging out of memory error - disabled\n");
925 global_disable = true;
926
927 return 0;
928 }
929 min_free_entries = num_free_entries;
930
931 dma_debug_initialized = true;
932
933 pr_info("debugging enabled by kernel config\n");
934 return 0;
935 }
936 core_initcall(dma_debug_init);
937
dma_debug_cmdline(char * str)938 static __init int dma_debug_cmdline(char *str)
939 {
940 if (!str)
941 return -EINVAL;
942
943 if (strncmp(str, "off", 3) == 0) {
944 pr_info("debugging disabled on kernel command line\n");
945 global_disable = true;
946 }
947
948 return 1;
949 }
950
dma_debug_entries_cmdline(char * str)951 static __init int dma_debug_entries_cmdline(char *str)
952 {
953 if (!str)
954 return -EINVAL;
955 if (!get_option(&str, &nr_prealloc_entries))
956 nr_prealloc_entries = PREALLOC_DMA_DEBUG_ENTRIES;
957 return 1;
958 }
959
960 __setup("dma_debug=", dma_debug_cmdline);
961 __setup("dma_debug_entries=", dma_debug_entries_cmdline);
962
check_unmap(struct dma_debug_entry * ref)963 static void check_unmap(struct dma_debug_entry *ref)
964 {
965 struct dma_debug_entry *entry;
966 struct hash_bucket *bucket;
967 unsigned long flags;
968
969 bucket = get_hash_bucket(ref, &flags);
970 entry = bucket_find_exact(bucket, ref);
971
972 if (!entry) {
973 /* must drop lock before calling dma_mapping_error */
974 put_hash_bucket(bucket, flags);
975
976 if (dma_mapping_error(ref->dev, ref->dev_addr)) {
977 err_printk(ref->dev, NULL,
978 "device driver tries to free an "
979 "invalid DMA memory address\n");
980 } else {
981 err_printk(ref->dev, NULL,
982 "device driver tries to free DMA "
983 "memory it has not allocated [device "
984 "address=0x%016llx] [size=%llu bytes]\n",
985 ref->dev_addr, ref->size);
986 }
987 return;
988 }
989
990 if (ref->size != entry->size) {
991 err_printk(ref->dev, entry, "device driver frees "
992 "DMA memory with different size "
993 "[device address=0x%016llx] [map size=%llu bytes] "
994 "[unmap size=%llu bytes]\n",
995 ref->dev_addr, entry->size, ref->size);
996 }
997
998 if (ref->type != entry->type) {
999 err_printk(ref->dev, entry, "device driver frees "
1000 "DMA memory with wrong function "
1001 "[device address=0x%016llx] [size=%llu bytes] "
1002 "[mapped as %s] [unmapped as %s]\n",
1003 ref->dev_addr, ref->size,
1004 type2name[entry->type], type2name[ref->type]);
1005 } else if ((entry->type == dma_debug_coherent) &&
1006 (phys_addr(ref) != phys_addr(entry))) {
1007 err_printk(ref->dev, entry, "device driver frees "
1008 "DMA memory with different CPU address "
1009 "[device address=0x%016llx] [size=%llu bytes] "
1010 "[cpu alloc address=0x%016llx] "
1011 "[cpu free address=0x%016llx]",
1012 ref->dev_addr, ref->size,
1013 phys_addr(entry),
1014 phys_addr(ref));
1015 }
1016
1017 if (ref->sg_call_ents && ref->type == dma_debug_sg &&
1018 ref->sg_call_ents != entry->sg_call_ents) {
1019 err_printk(ref->dev, entry, "device driver frees "
1020 "DMA sg list with different entry count "
1021 "[map count=%d] [unmap count=%d]\n",
1022 entry->sg_call_ents, ref->sg_call_ents);
1023 }
1024
1025 /*
1026 * This may be no bug in reality - but most implementations of the
1027 * DMA API don't handle this properly, so check for it here
1028 */
1029 if (ref->direction != entry->direction) {
1030 err_printk(ref->dev, entry, "device driver frees "
1031 "DMA memory with different direction "
1032 "[device address=0x%016llx] [size=%llu bytes] "
1033 "[mapped with %s] [unmapped with %s]\n",
1034 ref->dev_addr, ref->size,
1035 dir2name[entry->direction],
1036 dir2name[ref->direction]);
1037 }
1038
1039 /*
1040 * Drivers should use dma_mapping_error() to check the returned
1041 * addresses of dma_map_single() and dma_map_page().
1042 * If not, print this warning message. See Documentation/core-api/dma-api.rst.
1043 */
1044 if (entry->map_err_type == MAP_ERR_NOT_CHECKED) {
1045 err_printk(ref->dev, entry,
1046 "device driver failed to check map error"
1047 "[device address=0x%016llx] [size=%llu bytes] "
1048 "[mapped as %s]",
1049 ref->dev_addr, ref->size,
1050 type2name[entry->type]);
1051 }
1052
1053 hash_bucket_del(entry);
1054 dma_entry_free(entry);
1055
1056 put_hash_bucket(bucket, flags);
1057 }
1058
check_for_stack(struct device * dev,struct page * page,size_t offset)1059 static void check_for_stack(struct device *dev,
1060 struct page *page, size_t offset)
1061 {
1062 void *addr;
1063 struct vm_struct *stack_vm_area = task_stack_vm_area(current);
1064
1065 if (!stack_vm_area) {
1066 /* Stack is direct-mapped. */
1067 if (PageHighMem(page))
1068 return;
1069 addr = page_address(page) + offset;
1070 if (object_is_on_stack(addr))
1071 err_printk(dev, NULL, "device driver maps memory from stack [addr=%p]\n", addr);
1072 } else {
1073 /* Stack is vmalloced. */
1074 int i;
1075
1076 for (i = 0; i < stack_vm_area->nr_pages; i++) {
1077 if (page != stack_vm_area->pages[i])
1078 continue;
1079
1080 addr = (u8 *)current->stack + i * PAGE_SIZE + offset;
1081 err_printk(dev, NULL, "device driver maps memory from stack [probable addr=%p]\n", addr);
1082 break;
1083 }
1084 }
1085 }
1086
check_for_illegal_area(struct device * dev,void * addr,unsigned long len)1087 static void check_for_illegal_area(struct device *dev, void *addr, unsigned long len)
1088 {
1089 if (memory_intersects(_stext, _etext, addr, len) ||
1090 memory_intersects(__start_rodata, __end_rodata, addr, len))
1091 err_printk(dev, NULL, "device driver maps memory from kernel text or rodata [addr=%p] [len=%lu]\n", addr, len);
1092 }
1093
check_sync(struct device * dev,struct dma_debug_entry * ref,bool to_cpu)1094 static void check_sync(struct device *dev,
1095 struct dma_debug_entry *ref,
1096 bool to_cpu)
1097 {
1098 struct dma_debug_entry *entry;
1099 struct hash_bucket *bucket;
1100 unsigned long flags;
1101
1102 bucket = get_hash_bucket(ref, &flags);
1103
1104 entry = bucket_find_contain(&bucket, ref, &flags);
1105
1106 if (!entry) {
1107 err_printk(dev, NULL, "device driver tries "
1108 "to sync DMA memory it has not allocated "
1109 "[device address=0x%016llx] [size=%llu bytes]\n",
1110 (unsigned long long)ref->dev_addr, ref->size);
1111 goto out;
1112 }
1113
1114 if (ref->size > entry->size) {
1115 err_printk(dev, entry, "device driver syncs"
1116 " DMA memory outside allocated range "
1117 "[device address=0x%016llx] "
1118 "[allocation size=%llu bytes] "
1119 "[sync offset+size=%llu]\n",
1120 entry->dev_addr, entry->size,
1121 ref->size);
1122 }
1123
1124 if (entry->direction == DMA_BIDIRECTIONAL)
1125 goto out;
1126
1127 if (ref->direction != entry->direction) {
1128 err_printk(dev, entry, "device driver syncs "
1129 "DMA memory with different direction "
1130 "[device address=0x%016llx] [size=%llu bytes] "
1131 "[mapped with %s] [synced with %s]\n",
1132 (unsigned long long)ref->dev_addr, entry->size,
1133 dir2name[entry->direction],
1134 dir2name[ref->direction]);
1135 }
1136
1137 if (to_cpu && !(entry->direction == DMA_FROM_DEVICE) &&
1138 !(ref->direction == DMA_TO_DEVICE))
1139 err_printk(dev, entry, "device driver syncs "
1140 "device read-only DMA memory for cpu "
1141 "[device address=0x%016llx] [size=%llu bytes] "
1142 "[mapped with %s] [synced with %s]\n",
1143 (unsigned long long)ref->dev_addr, entry->size,
1144 dir2name[entry->direction],
1145 dir2name[ref->direction]);
1146
1147 if (!to_cpu && !(entry->direction == DMA_TO_DEVICE) &&
1148 !(ref->direction == DMA_FROM_DEVICE))
1149 err_printk(dev, entry, "device driver syncs "
1150 "device write-only DMA memory to device "
1151 "[device address=0x%016llx] [size=%llu bytes] "
1152 "[mapped with %s] [synced with %s]\n",
1153 (unsigned long long)ref->dev_addr, entry->size,
1154 dir2name[entry->direction],
1155 dir2name[ref->direction]);
1156
1157 if (ref->sg_call_ents && ref->type == dma_debug_sg &&
1158 ref->sg_call_ents != entry->sg_call_ents) {
1159 err_printk(ref->dev, entry, "device driver syncs "
1160 "DMA sg list with different entry count "
1161 "[map count=%d] [sync count=%d]\n",
1162 entry->sg_call_ents, ref->sg_call_ents);
1163 }
1164
1165 out:
1166 put_hash_bucket(bucket, flags);
1167 }
1168
check_sg_segment(struct device * dev,struct scatterlist * sg)1169 static void check_sg_segment(struct device *dev, struct scatterlist *sg)
1170 {
1171 #ifdef CONFIG_DMA_API_DEBUG_SG
1172 unsigned int max_seg = dma_get_max_seg_size(dev);
1173 u64 start, end, boundary = dma_get_seg_boundary(dev);
1174
1175 /*
1176 * Either the driver forgot to set dma_parms appropriately, or
1177 * whoever generated the list forgot to check them.
1178 */
1179 if (sg->length > max_seg)
1180 err_printk(dev, NULL, "mapping sg segment longer than device claims to support [len=%u] [max=%u]\n",
1181 sg->length, max_seg);
1182 /*
1183 * In some cases this could potentially be the DMA API
1184 * implementation's fault, but it would usually imply that
1185 * the scatterlist was built inappropriately to begin with.
1186 */
1187 start = sg_dma_address(sg);
1188 end = start + sg_dma_len(sg) - 1;
1189 if ((start ^ end) & ~boundary)
1190 err_printk(dev, NULL, "mapping sg segment across boundary [start=0x%016llx] [end=0x%016llx] [boundary=0x%016llx]\n",
1191 start, end, boundary);
1192 #endif
1193 }
1194
debug_dma_map_single(struct device * dev,const void * addr,unsigned long len)1195 void debug_dma_map_single(struct device *dev, const void *addr,
1196 unsigned long len)
1197 {
1198 if (unlikely(dma_debug_disabled()))
1199 return;
1200
1201 if (!virt_addr_valid(addr))
1202 err_printk(dev, NULL, "device driver maps memory from invalid area [addr=%p] [len=%lu]\n",
1203 addr, len);
1204
1205 if (is_vmalloc_addr(addr))
1206 err_printk(dev, NULL, "device driver maps memory from vmalloc area [addr=%p] [len=%lu]\n",
1207 addr, len);
1208 }
1209 EXPORT_SYMBOL(debug_dma_map_single);
1210
debug_dma_map_page(struct device * dev,struct page * page,size_t offset,size_t size,int direction,dma_addr_t dma_addr,unsigned long attrs)1211 void debug_dma_map_page(struct device *dev, struct page *page, size_t offset,
1212 size_t size, int direction, dma_addr_t dma_addr,
1213 unsigned long attrs)
1214 {
1215 struct dma_debug_entry *entry;
1216
1217 if (unlikely(dma_debug_disabled()))
1218 return;
1219
1220 if (dma_mapping_error(dev, dma_addr))
1221 return;
1222
1223 entry = dma_entry_alloc();
1224 if (!entry)
1225 return;
1226
1227 entry->dev = dev;
1228 entry->type = dma_debug_single;
1229 entry->pfn = page_to_pfn(page);
1230 entry->offset = offset;
1231 entry->dev_addr = dma_addr;
1232 entry->size = size;
1233 entry->direction = direction;
1234 entry->map_err_type = MAP_ERR_NOT_CHECKED;
1235
1236 check_for_stack(dev, page, offset);
1237
1238 if (!PageHighMem(page)) {
1239 void *addr = page_address(page) + offset;
1240
1241 check_for_illegal_area(dev, addr, size);
1242 }
1243
1244 add_dma_entry(entry, attrs);
1245 }
1246
debug_dma_mapping_error(struct device * dev,dma_addr_t dma_addr)1247 void debug_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
1248 {
1249 struct dma_debug_entry ref;
1250 struct dma_debug_entry *entry;
1251 struct hash_bucket *bucket;
1252 unsigned long flags;
1253
1254 if (unlikely(dma_debug_disabled()))
1255 return;
1256
1257 ref.dev = dev;
1258 ref.dev_addr = dma_addr;
1259 bucket = get_hash_bucket(&ref, &flags);
1260
1261 list_for_each_entry(entry, &bucket->list, list) {
1262 if (!exact_match(&ref, entry))
1263 continue;
1264
1265 /*
1266 * The same physical address can be mapped multiple
1267 * times. Without a hardware IOMMU this results in the
1268 * same device addresses being put into the dma-debug
1269 * hash multiple times too. This can result in false
1270 * positives being reported. Therefore we implement a
1271 * best-fit algorithm here which updates the first entry
1272 * from the hash which fits the reference value and is
1273 * not currently listed as being checked.
1274 */
1275 if (entry->map_err_type == MAP_ERR_NOT_CHECKED) {
1276 entry->map_err_type = MAP_ERR_CHECKED;
1277 break;
1278 }
1279 }
1280
1281 put_hash_bucket(bucket, flags);
1282 }
1283 EXPORT_SYMBOL(debug_dma_mapping_error);
1284
debug_dma_unmap_page(struct device * dev,dma_addr_t dma_addr,size_t size,int direction)1285 void debug_dma_unmap_page(struct device *dev, dma_addr_t dma_addr,
1286 size_t size, int direction)
1287 {
1288 struct dma_debug_entry ref = {
1289 .type = dma_debug_single,
1290 .dev = dev,
1291 .dev_addr = dma_addr,
1292 .size = size,
1293 .direction = direction,
1294 };
1295
1296 if (unlikely(dma_debug_disabled()))
1297 return;
1298 check_unmap(&ref);
1299 }
1300
debug_dma_map_sg(struct device * dev,struct scatterlist * sg,int nents,int mapped_ents,int direction,unsigned long attrs)1301 void debug_dma_map_sg(struct device *dev, struct scatterlist *sg,
1302 int nents, int mapped_ents, int direction,
1303 unsigned long attrs)
1304 {
1305 struct dma_debug_entry *entry;
1306 struct scatterlist *s;
1307 int i;
1308
1309 if (unlikely(dma_debug_disabled()))
1310 return;
1311
1312 for_each_sg(sg, s, nents, i) {
1313 check_for_stack(dev, sg_page(s), s->offset);
1314 if (!PageHighMem(sg_page(s)))
1315 check_for_illegal_area(dev, sg_virt(s), s->length);
1316 }
1317
1318 for_each_sg(sg, s, mapped_ents, i) {
1319 entry = dma_entry_alloc();
1320 if (!entry)
1321 return;
1322
1323 entry->type = dma_debug_sg;
1324 entry->dev = dev;
1325 entry->pfn = page_to_pfn(sg_page(s));
1326 entry->offset = s->offset;
1327 entry->size = sg_dma_len(s);
1328 entry->dev_addr = sg_dma_address(s);
1329 entry->direction = direction;
1330 entry->sg_call_ents = nents;
1331 entry->sg_mapped_ents = mapped_ents;
1332
1333 check_sg_segment(dev, s);
1334
1335 add_dma_entry(entry, attrs);
1336 }
1337 }
1338
get_nr_mapped_entries(struct device * dev,struct dma_debug_entry * ref)1339 static int get_nr_mapped_entries(struct device *dev,
1340 struct dma_debug_entry *ref)
1341 {
1342 struct dma_debug_entry *entry;
1343 struct hash_bucket *bucket;
1344 unsigned long flags;
1345 int mapped_ents;
1346
1347 bucket = get_hash_bucket(ref, &flags);
1348 entry = bucket_find_exact(bucket, ref);
1349 mapped_ents = 0;
1350
1351 if (entry)
1352 mapped_ents = entry->sg_mapped_ents;
1353 put_hash_bucket(bucket, flags);
1354
1355 return mapped_ents;
1356 }
1357
debug_dma_unmap_sg(struct device * dev,struct scatterlist * sglist,int nelems,int dir)1358 void debug_dma_unmap_sg(struct device *dev, struct scatterlist *sglist,
1359 int nelems, int dir)
1360 {
1361 struct scatterlist *s;
1362 int mapped_ents = 0, i;
1363
1364 if (unlikely(dma_debug_disabled()))
1365 return;
1366
1367 for_each_sg(sglist, s, nelems, i) {
1368
1369 struct dma_debug_entry ref = {
1370 .type = dma_debug_sg,
1371 .dev = dev,
1372 .pfn = page_to_pfn(sg_page(s)),
1373 .offset = s->offset,
1374 .dev_addr = sg_dma_address(s),
1375 .size = sg_dma_len(s),
1376 .direction = dir,
1377 .sg_call_ents = nelems,
1378 };
1379
1380 if (mapped_ents && i >= mapped_ents)
1381 break;
1382
1383 if (!i)
1384 mapped_ents = get_nr_mapped_entries(dev, &ref);
1385
1386 check_unmap(&ref);
1387 }
1388 }
1389
debug_dma_alloc_coherent(struct device * dev,size_t size,dma_addr_t dma_addr,void * virt,unsigned long attrs)1390 void debug_dma_alloc_coherent(struct device *dev, size_t size,
1391 dma_addr_t dma_addr, void *virt,
1392 unsigned long attrs)
1393 {
1394 struct dma_debug_entry *entry;
1395
1396 if (unlikely(dma_debug_disabled()))
1397 return;
1398
1399 if (unlikely(virt == NULL))
1400 return;
1401
1402 /* handle vmalloc and linear addresses */
1403 if (!is_vmalloc_addr(virt) && !virt_addr_valid(virt))
1404 return;
1405
1406 entry = dma_entry_alloc();
1407 if (!entry)
1408 return;
1409
1410 entry->type = dma_debug_coherent;
1411 entry->dev = dev;
1412 entry->offset = offset_in_page(virt);
1413 entry->size = size;
1414 entry->dev_addr = dma_addr;
1415 entry->direction = DMA_BIDIRECTIONAL;
1416
1417 if (is_vmalloc_addr(virt))
1418 entry->pfn = vmalloc_to_pfn(virt);
1419 else
1420 entry->pfn = page_to_pfn(virt_to_page(virt));
1421
1422 add_dma_entry(entry, attrs);
1423 }
1424
debug_dma_free_coherent(struct device * dev,size_t size,void * virt,dma_addr_t dma_addr)1425 void debug_dma_free_coherent(struct device *dev, size_t size,
1426 void *virt, dma_addr_t dma_addr)
1427 {
1428 struct dma_debug_entry ref = {
1429 .type = dma_debug_coherent,
1430 .dev = dev,
1431 .offset = offset_in_page(virt),
1432 .dev_addr = dma_addr,
1433 .size = size,
1434 .direction = DMA_BIDIRECTIONAL,
1435 };
1436
1437 /* handle vmalloc and linear addresses */
1438 if (!is_vmalloc_addr(virt) && !virt_addr_valid(virt))
1439 return;
1440
1441 if (is_vmalloc_addr(virt))
1442 ref.pfn = vmalloc_to_pfn(virt);
1443 else
1444 ref.pfn = page_to_pfn(virt_to_page(virt));
1445
1446 if (unlikely(dma_debug_disabled()))
1447 return;
1448
1449 check_unmap(&ref);
1450 }
1451
debug_dma_map_resource(struct device * dev,phys_addr_t addr,size_t size,int direction,dma_addr_t dma_addr,unsigned long attrs)1452 void debug_dma_map_resource(struct device *dev, phys_addr_t addr, size_t size,
1453 int direction, dma_addr_t dma_addr,
1454 unsigned long attrs)
1455 {
1456 struct dma_debug_entry *entry;
1457
1458 if (unlikely(dma_debug_disabled()))
1459 return;
1460
1461 entry = dma_entry_alloc();
1462 if (!entry)
1463 return;
1464
1465 entry->type = dma_debug_resource;
1466 entry->dev = dev;
1467 entry->pfn = PHYS_PFN(addr);
1468 entry->offset = offset_in_page(addr);
1469 entry->size = size;
1470 entry->dev_addr = dma_addr;
1471 entry->direction = direction;
1472 entry->map_err_type = MAP_ERR_NOT_CHECKED;
1473
1474 add_dma_entry(entry, attrs);
1475 }
1476
debug_dma_unmap_resource(struct device * dev,dma_addr_t dma_addr,size_t size,int direction)1477 void debug_dma_unmap_resource(struct device *dev, dma_addr_t dma_addr,
1478 size_t size, int direction)
1479 {
1480 struct dma_debug_entry ref = {
1481 .type = dma_debug_resource,
1482 .dev = dev,
1483 .dev_addr = dma_addr,
1484 .size = size,
1485 .direction = direction,
1486 };
1487
1488 if (unlikely(dma_debug_disabled()))
1489 return;
1490
1491 check_unmap(&ref);
1492 }
1493
debug_dma_sync_single_for_cpu(struct device * dev,dma_addr_t dma_handle,size_t size,int direction)1494 void debug_dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
1495 size_t size, int direction)
1496 {
1497 struct dma_debug_entry ref;
1498
1499 if (unlikely(dma_debug_disabled()))
1500 return;
1501
1502 ref.type = dma_debug_single;
1503 ref.dev = dev;
1504 ref.dev_addr = dma_handle;
1505 ref.size = size;
1506 ref.direction = direction;
1507 ref.sg_call_ents = 0;
1508
1509 check_sync(dev, &ref, true);
1510 }
1511
debug_dma_sync_single_for_device(struct device * dev,dma_addr_t dma_handle,size_t size,int direction)1512 void debug_dma_sync_single_for_device(struct device *dev,
1513 dma_addr_t dma_handle, size_t size,
1514 int direction)
1515 {
1516 struct dma_debug_entry ref;
1517
1518 if (unlikely(dma_debug_disabled()))
1519 return;
1520
1521 ref.type = dma_debug_single;
1522 ref.dev = dev;
1523 ref.dev_addr = dma_handle;
1524 ref.size = size;
1525 ref.direction = direction;
1526 ref.sg_call_ents = 0;
1527
1528 check_sync(dev, &ref, false);
1529 }
1530
debug_dma_sync_sg_for_cpu(struct device * dev,struct scatterlist * sg,int nelems,int direction)1531 void debug_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
1532 int nelems, int direction)
1533 {
1534 struct scatterlist *s;
1535 int mapped_ents = 0, i;
1536
1537 if (unlikely(dma_debug_disabled()))
1538 return;
1539
1540 for_each_sg(sg, s, nelems, i) {
1541
1542 struct dma_debug_entry ref = {
1543 .type = dma_debug_sg,
1544 .dev = dev,
1545 .pfn = page_to_pfn(sg_page(s)),
1546 .offset = s->offset,
1547 .dev_addr = sg_dma_address(s),
1548 .size = sg_dma_len(s),
1549 .direction = direction,
1550 .sg_call_ents = nelems,
1551 };
1552
1553 if (!i)
1554 mapped_ents = get_nr_mapped_entries(dev, &ref);
1555
1556 if (i >= mapped_ents)
1557 break;
1558
1559 check_sync(dev, &ref, true);
1560 }
1561 }
1562
debug_dma_sync_sg_for_device(struct device * dev,struct scatterlist * sg,int nelems,int direction)1563 void debug_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
1564 int nelems, int direction)
1565 {
1566 struct scatterlist *s;
1567 int mapped_ents = 0, i;
1568
1569 if (unlikely(dma_debug_disabled()))
1570 return;
1571
1572 for_each_sg(sg, s, nelems, i) {
1573
1574 struct dma_debug_entry ref = {
1575 .type = dma_debug_sg,
1576 .dev = dev,
1577 .pfn = page_to_pfn(sg_page(s)),
1578 .offset = s->offset,
1579 .dev_addr = sg_dma_address(s),
1580 .size = sg_dma_len(s),
1581 .direction = direction,
1582 .sg_call_ents = nelems,
1583 };
1584 if (!i)
1585 mapped_ents = get_nr_mapped_entries(dev, &ref);
1586
1587 if (i >= mapped_ents)
1588 break;
1589
1590 check_sync(dev, &ref, false);
1591 }
1592 }
1593
dma_debug_driver_setup(char * str)1594 static int __init dma_debug_driver_setup(char *str)
1595 {
1596 int i;
1597
1598 for (i = 0; i < NAME_MAX_LEN - 1; ++i, ++str) {
1599 current_driver_name[i] = *str;
1600 if (*str == 0)
1601 break;
1602 }
1603
1604 if (current_driver_name[0])
1605 pr_info("enable driver filter for driver [%s]\n",
1606 current_driver_name);
1607
1608
1609 return 1;
1610 }
1611 __setup("dma_debug_driver=", dma_debug_driver_setup);
1612