xref: /openbmc/linux/kernel/dma/debug.c (revision abed054f)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2008 Advanced Micro Devices, Inc.
4  *
5  * Author: Joerg Roedel <joerg.roedel@amd.com>
6  */
7 
8 #define pr_fmt(fmt)	"DMA-API: " fmt
9 
10 #include <linux/sched/task_stack.h>
11 #include <linux/scatterlist.h>
12 #include <linux/dma-map-ops.h>
13 #include <linux/sched/task.h>
14 #include <linux/stacktrace.h>
15 #include <linux/spinlock.h>
16 #include <linux/vmalloc.h>
17 #include <linux/debugfs.h>
18 #include <linux/uaccess.h>
19 #include <linux/export.h>
20 #include <linux/device.h>
21 #include <linux/types.h>
22 #include <linux/sched.h>
23 #include <linux/ctype.h>
24 #include <linux/list.h>
25 #include <linux/slab.h>
26 #include <asm/sections.h>
27 #include "debug.h"
28 
29 #define HASH_SIZE       16384ULL
30 #define HASH_FN_SHIFT   13
31 #define HASH_FN_MASK    (HASH_SIZE - 1)
32 
33 #define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16)
34 /* If the pool runs out, add this many new entries at once */
35 #define DMA_DEBUG_DYNAMIC_ENTRIES (PAGE_SIZE / sizeof(struct dma_debug_entry))
36 
37 enum {
38 	dma_debug_single,
39 	dma_debug_sg,
40 	dma_debug_coherent,
41 	dma_debug_resource,
42 };
43 
44 enum map_err_types {
45 	MAP_ERR_CHECK_NOT_APPLICABLE,
46 	MAP_ERR_NOT_CHECKED,
47 	MAP_ERR_CHECKED,
48 };
49 
50 #define DMA_DEBUG_STACKTRACE_ENTRIES 5
51 
52 /**
53  * struct dma_debug_entry - track a dma_map* or dma_alloc_coherent mapping
54  * @list: node on pre-allocated free_entries list
55  * @dev: 'dev' argument to dma_map_{page|single|sg} or dma_alloc_coherent
56  * @size: length of the mapping
57  * @type: single, page, sg, coherent
58  * @direction: enum dma_data_direction
59  * @sg_call_ents: 'nents' from dma_map_sg
60  * @sg_mapped_ents: 'mapped_ents' from dma_map_sg
61  * @pfn: page frame of the start address
62  * @offset: offset of mapping relative to pfn
63  * @map_err_type: track whether dma_mapping_error() was checked
64  * @stacktrace: support backtraces when a violation is detected
65  */
66 struct dma_debug_entry {
67 	struct list_head list;
68 	struct device    *dev;
69 	u64              dev_addr;
70 	u64              size;
71 	int              type;
72 	int              direction;
73 	int		 sg_call_ents;
74 	int		 sg_mapped_ents;
75 	unsigned long	 pfn;
76 	size_t		 offset;
77 	enum map_err_types  map_err_type;
78 #ifdef CONFIG_STACKTRACE
79 	unsigned int	stack_len;
80 	unsigned long	stack_entries[DMA_DEBUG_STACKTRACE_ENTRIES];
81 #endif
82 } ____cacheline_aligned_in_smp;
83 
84 typedef bool (*match_fn)(struct dma_debug_entry *, struct dma_debug_entry *);
85 
86 struct hash_bucket {
87 	struct list_head list;
88 	spinlock_t lock;
89 };
90 
91 /* Hash list to save the allocated dma addresses */
92 static struct hash_bucket dma_entry_hash[HASH_SIZE];
93 /* List of pre-allocated dma_debug_entry's */
94 static LIST_HEAD(free_entries);
95 /* Lock for the list above */
96 static DEFINE_SPINLOCK(free_entries_lock);
97 
98 /* Global disable flag - will be set in case of an error */
99 static bool global_disable __read_mostly;
100 
101 /* Early initialization disable flag, set at the end of dma_debug_init */
102 static bool dma_debug_initialized __read_mostly;
103 
104 static inline bool dma_debug_disabled(void)
105 {
106 	return global_disable || !dma_debug_initialized;
107 }
108 
109 /* Global error count */
110 static u32 error_count;
111 
112 /* Global error show enable*/
113 static u32 show_all_errors __read_mostly;
114 /* Number of errors to show */
115 static u32 show_num_errors = 1;
116 
117 static u32 num_free_entries;
118 static u32 min_free_entries;
119 static u32 nr_total_entries;
120 
121 /* number of preallocated entries requested by kernel cmdline */
122 static u32 nr_prealloc_entries = PREALLOC_DMA_DEBUG_ENTRIES;
123 
124 /* per-driver filter related state */
125 
126 #define NAME_MAX_LEN	64
127 
128 static char                  current_driver_name[NAME_MAX_LEN] __read_mostly;
129 static struct device_driver *current_driver                    __read_mostly;
130 
131 static DEFINE_RWLOCK(driver_name_lock);
132 
133 static const char *const maperr2str[] = {
134 	[MAP_ERR_CHECK_NOT_APPLICABLE] = "dma map error check not applicable",
135 	[MAP_ERR_NOT_CHECKED] = "dma map error not checked",
136 	[MAP_ERR_CHECKED] = "dma map error checked",
137 };
138 
139 static const char *type2name[] = {
140 	[dma_debug_single] = "single",
141 	[dma_debug_sg] = "scather-gather",
142 	[dma_debug_coherent] = "coherent",
143 	[dma_debug_resource] = "resource",
144 };
145 
146 static const char *dir2name[] = {
147 	[DMA_BIDIRECTIONAL]	= "DMA_BIDIRECTIONAL",
148 	[DMA_TO_DEVICE]		= "DMA_TO_DEVICE",
149 	[DMA_FROM_DEVICE]	= "DMA_FROM_DEVICE",
150 	[DMA_NONE]		= "DMA_NONE",
151 };
152 
153 /*
154  * The access to some variables in this macro is racy. We can't use atomic_t
155  * here because all these variables are exported to debugfs. Some of them even
156  * writeable. This is also the reason why a lock won't help much. But anyway,
157  * the races are no big deal. Here is why:
158  *
159  *   error_count: the addition is racy, but the worst thing that can happen is
160  *                that we don't count some errors
161  *   show_num_errors: the subtraction is racy. Also no big deal because in
162  *                    worst case this will result in one warning more in the
163  *                    system log than the user configured. This variable is
164  *                    writeable via debugfs.
165  */
166 static inline void dump_entry_trace(struct dma_debug_entry *entry)
167 {
168 #ifdef CONFIG_STACKTRACE
169 	if (entry) {
170 		pr_warn("Mapped at:\n");
171 		stack_trace_print(entry->stack_entries, entry->stack_len, 0);
172 	}
173 #endif
174 }
175 
176 static bool driver_filter(struct device *dev)
177 {
178 	struct device_driver *drv;
179 	unsigned long flags;
180 	bool ret;
181 
182 	/* driver filter off */
183 	if (likely(!current_driver_name[0]))
184 		return true;
185 
186 	/* driver filter on and initialized */
187 	if (current_driver && dev && dev->driver == current_driver)
188 		return true;
189 
190 	/* driver filter on, but we can't filter on a NULL device... */
191 	if (!dev)
192 		return false;
193 
194 	if (current_driver || !current_driver_name[0])
195 		return false;
196 
197 	/* driver filter on but not yet initialized */
198 	drv = dev->driver;
199 	if (!drv)
200 		return false;
201 
202 	/* lock to protect against change of current_driver_name */
203 	read_lock_irqsave(&driver_name_lock, flags);
204 
205 	ret = false;
206 	if (drv->name &&
207 	    strncmp(current_driver_name, drv->name, NAME_MAX_LEN - 1) == 0) {
208 		current_driver = drv;
209 		ret = true;
210 	}
211 
212 	read_unlock_irqrestore(&driver_name_lock, flags);
213 
214 	return ret;
215 }
216 
217 #define err_printk(dev, entry, format, arg...) do {			\
218 		error_count += 1;					\
219 		if (driver_filter(dev) &&				\
220 		    (show_all_errors || show_num_errors > 0)) {		\
221 			WARN(1, pr_fmt("%s %s: ") format,		\
222 			     dev ? dev_driver_string(dev) : "NULL",	\
223 			     dev ? dev_name(dev) : "NULL", ## arg);	\
224 			dump_entry_trace(entry);			\
225 		}							\
226 		if (!show_all_errors && show_num_errors > 0)		\
227 			show_num_errors -= 1;				\
228 	} while (0);
229 
230 /*
231  * Hash related functions
232  *
233  * Every DMA-API request is saved into a struct dma_debug_entry. To
234  * have quick access to these structs they are stored into a hash.
235  */
236 static int hash_fn(struct dma_debug_entry *entry)
237 {
238 	/*
239 	 * Hash function is based on the dma address.
240 	 * We use bits 20-27 here as the index into the hash
241 	 */
242 	return (entry->dev_addr >> HASH_FN_SHIFT) & HASH_FN_MASK;
243 }
244 
245 /*
246  * Request exclusive access to a hash bucket for a given dma_debug_entry.
247  */
248 static struct hash_bucket *get_hash_bucket(struct dma_debug_entry *entry,
249 					   unsigned long *flags)
250 	__acquires(&dma_entry_hash[idx].lock)
251 {
252 	int idx = hash_fn(entry);
253 	unsigned long __flags;
254 
255 	spin_lock_irqsave(&dma_entry_hash[idx].lock, __flags);
256 	*flags = __flags;
257 	return &dma_entry_hash[idx];
258 }
259 
260 /*
261  * Give up exclusive access to the hash bucket
262  */
263 static void put_hash_bucket(struct hash_bucket *bucket,
264 			    unsigned long flags)
265 	__releases(&bucket->lock)
266 {
267 	spin_unlock_irqrestore(&bucket->lock, flags);
268 }
269 
270 static bool exact_match(struct dma_debug_entry *a, struct dma_debug_entry *b)
271 {
272 	return ((a->dev_addr == b->dev_addr) &&
273 		(a->dev == b->dev)) ? true : false;
274 }
275 
276 static bool containing_match(struct dma_debug_entry *a,
277 			     struct dma_debug_entry *b)
278 {
279 	if (a->dev != b->dev)
280 		return false;
281 
282 	if ((b->dev_addr <= a->dev_addr) &&
283 	    ((b->dev_addr + b->size) >= (a->dev_addr + a->size)))
284 		return true;
285 
286 	return false;
287 }
288 
289 /*
290  * Search a given entry in the hash bucket list
291  */
292 static struct dma_debug_entry *__hash_bucket_find(struct hash_bucket *bucket,
293 						  struct dma_debug_entry *ref,
294 						  match_fn match)
295 {
296 	struct dma_debug_entry *entry, *ret = NULL;
297 	int matches = 0, match_lvl, last_lvl = -1;
298 
299 	list_for_each_entry(entry, &bucket->list, list) {
300 		if (!match(ref, entry))
301 			continue;
302 
303 		/*
304 		 * Some drivers map the same physical address multiple
305 		 * times. Without a hardware IOMMU this results in the
306 		 * same device addresses being put into the dma-debug
307 		 * hash multiple times too. This can result in false
308 		 * positives being reported. Therefore we implement a
309 		 * best-fit algorithm here which returns the entry from
310 		 * the hash which fits best to the reference value
311 		 * instead of the first-fit.
312 		 */
313 		matches += 1;
314 		match_lvl = 0;
315 		entry->size         == ref->size         ? ++match_lvl : 0;
316 		entry->type         == ref->type         ? ++match_lvl : 0;
317 		entry->direction    == ref->direction    ? ++match_lvl : 0;
318 		entry->sg_call_ents == ref->sg_call_ents ? ++match_lvl : 0;
319 
320 		if (match_lvl == 4) {
321 			/* perfect-fit - return the result */
322 			return entry;
323 		} else if (match_lvl > last_lvl) {
324 			/*
325 			 * We found an entry that fits better then the
326 			 * previous one or it is the 1st match.
327 			 */
328 			last_lvl = match_lvl;
329 			ret      = entry;
330 		}
331 	}
332 
333 	/*
334 	 * If we have multiple matches but no perfect-fit, just return
335 	 * NULL.
336 	 */
337 	ret = (matches == 1) ? ret : NULL;
338 
339 	return ret;
340 }
341 
342 static struct dma_debug_entry *bucket_find_exact(struct hash_bucket *bucket,
343 						 struct dma_debug_entry *ref)
344 {
345 	return __hash_bucket_find(bucket, ref, exact_match);
346 }
347 
348 static struct dma_debug_entry *bucket_find_contain(struct hash_bucket **bucket,
349 						   struct dma_debug_entry *ref,
350 						   unsigned long *flags)
351 {
352 
353 	unsigned int max_range = dma_get_max_seg_size(ref->dev);
354 	struct dma_debug_entry *entry, index = *ref;
355 	unsigned int range = 0;
356 
357 	while (range <= max_range) {
358 		entry = __hash_bucket_find(*bucket, ref, containing_match);
359 
360 		if (entry)
361 			return entry;
362 
363 		/*
364 		 * Nothing found, go back a hash bucket
365 		 */
366 		put_hash_bucket(*bucket, *flags);
367 		range          += (1 << HASH_FN_SHIFT);
368 		index.dev_addr -= (1 << HASH_FN_SHIFT);
369 		*bucket = get_hash_bucket(&index, flags);
370 	}
371 
372 	return NULL;
373 }
374 
375 /*
376  * Add an entry to a hash bucket
377  */
378 static void hash_bucket_add(struct hash_bucket *bucket,
379 			    struct dma_debug_entry *entry)
380 {
381 	list_add_tail(&entry->list, &bucket->list);
382 }
383 
384 /*
385  * Remove entry from a hash bucket list
386  */
387 static void hash_bucket_del(struct dma_debug_entry *entry)
388 {
389 	list_del(&entry->list);
390 }
391 
392 static unsigned long long phys_addr(struct dma_debug_entry *entry)
393 {
394 	if (entry->type == dma_debug_resource)
395 		return __pfn_to_phys(entry->pfn) + entry->offset;
396 
397 	return page_to_phys(pfn_to_page(entry->pfn)) + entry->offset;
398 }
399 
400 /*
401  * Dump mapping entries for debugging purposes
402  */
403 void debug_dma_dump_mappings(struct device *dev)
404 {
405 	int idx;
406 
407 	for (idx = 0; idx < HASH_SIZE; idx++) {
408 		struct hash_bucket *bucket = &dma_entry_hash[idx];
409 		struct dma_debug_entry *entry;
410 		unsigned long flags;
411 
412 		spin_lock_irqsave(&bucket->lock, flags);
413 
414 		list_for_each_entry(entry, &bucket->list, list) {
415 			if (!dev || dev == entry->dev) {
416 				dev_info(entry->dev,
417 					 "%s idx %d P=%Lx N=%lx D=%Lx L=%Lx %s %s\n",
418 					 type2name[entry->type], idx,
419 					 phys_addr(entry), entry->pfn,
420 					 entry->dev_addr, entry->size,
421 					 dir2name[entry->direction],
422 					 maperr2str[entry->map_err_type]);
423 			}
424 		}
425 
426 		spin_unlock_irqrestore(&bucket->lock, flags);
427 		cond_resched();
428 	}
429 }
430 
431 /*
432  * For each mapping (initial cacheline in the case of
433  * dma_alloc_coherent/dma_map_page, initial cacheline in each page of a
434  * scatterlist, or the cacheline specified in dma_map_single) insert
435  * into this tree using the cacheline as the key. At
436  * dma_unmap_{single|sg|page} or dma_free_coherent delete the entry.  If
437  * the entry already exists at insertion time add a tag as a reference
438  * count for the overlapping mappings.  For now, the overlap tracking
439  * just ensures that 'unmaps' balance 'maps' before marking the
440  * cacheline idle, but we should also be flagging overlaps as an API
441  * violation.
442  *
443  * Memory usage is mostly constrained by the maximum number of available
444  * dma-debug entries in that we need a free dma_debug_entry before
445  * inserting into the tree.  In the case of dma_map_page and
446  * dma_alloc_coherent there is only one dma_debug_entry and one
447  * dma_active_cacheline entry to track per event.  dma_map_sg(), on the
448  * other hand, consumes a single dma_debug_entry, but inserts 'nents'
449  * entries into the tree.
450  */
451 static RADIX_TREE(dma_active_cacheline, GFP_NOWAIT);
452 static DEFINE_SPINLOCK(radix_lock);
453 #define ACTIVE_CACHELINE_MAX_OVERLAP ((1 << RADIX_TREE_MAX_TAGS) - 1)
454 #define CACHELINE_PER_PAGE_SHIFT (PAGE_SHIFT - L1_CACHE_SHIFT)
455 #define CACHELINES_PER_PAGE (1 << CACHELINE_PER_PAGE_SHIFT)
456 
457 static phys_addr_t to_cacheline_number(struct dma_debug_entry *entry)
458 {
459 	return (entry->pfn << CACHELINE_PER_PAGE_SHIFT) +
460 		(entry->offset >> L1_CACHE_SHIFT);
461 }
462 
463 static int active_cacheline_read_overlap(phys_addr_t cln)
464 {
465 	int overlap = 0, i;
466 
467 	for (i = RADIX_TREE_MAX_TAGS - 1; i >= 0; i--)
468 		if (radix_tree_tag_get(&dma_active_cacheline, cln, i))
469 			overlap |= 1 << i;
470 	return overlap;
471 }
472 
473 static int active_cacheline_set_overlap(phys_addr_t cln, int overlap)
474 {
475 	int i;
476 
477 	if (overlap > ACTIVE_CACHELINE_MAX_OVERLAP || overlap < 0)
478 		return overlap;
479 
480 	for (i = RADIX_TREE_MAX_TAGS - 1; i >= 0; i--)
481 		if (overlap & 1 << i)
482 			radix_tree_tag_set(&dma_active_cacheline, cln, i);
483 		else
484 			radix_tree_tag_clear(&dma_active_cacheline, cln, i);
485 
486 	return overlap;
487 }
488 
489 static void active_cacheline_inc_overlap(phys_addr_t cln)
490 {
491 	int overlap = active_cacheline_read_overlap(cln);
492 
493 	overlap = active_cacheline_set_overlap(cln, ++overlap);
494 
495 	/* If we overflowed the overlap counter then we're potentially
496 	 * leaking dma-mappings.
497 	 */
498 	WARN_ONCE(overlap > ACTIVE_CACHELINE_MAX_OVERLAP,
499 		  pr_fmt("exceeded %d overlapping mappings of cacheline %pa\n"),
500 		  ACTIVE_CACHELINE_MAX_OVERLAP, &cln);
501 }
502 
503 static int active_cacheline_dec_overlap(phys_addr_t cln)
504 {
505 	int overlap = active_cacheline_read_overlap(cln);
506 
507 	return active_cacheline_set_overlap(cln, --overlap);
508 }
509 
510 static int active_cacheline_insert(struct dma_debug_entry *entry)
511 {
512 	phys_addr_t cln = to_cacheline_number(entry);
513 	unsigned long flags;
514 	int rc;
515 
516 	/* If the device is not writing memory then we don't have any
517 	 * concerns about the cpu consuming stale data.  This mitigates
518 	 * legitimate usages of overlapping mappings.
519 	 */
520 	if (entry->direction == DMA_TO_DEVICE)
521 		return 0;
522 
523 	spin_lock_irqsave(&radix_lock, flags);
524 	rc = radix_tree_insert(&dma_active_cacheline, cln, entry);
525 	if (rc == -EEXIST)
526 		active_cacheline_inc_overlap(cln);
527 	spin_unlock_irqrestore(&radix_lock, flags);
528 
529 	return rc;
530 }
531 
532 static void active_cacheline_remove(struct dma_debug_entry *entry)
533 {
534 	phys_addr_t cln = to_cacheline_number(entry);
535 	unsigned long flags;
536 
537 	/* ...mirror the insert case */
538 	if (entry->direction == DMA_TO_DEVICE)
539 		return;
540 
541 	spin_lock_irqsave(&radix_lock, flags);
542 	/* since we are counting overlaps the final put of the
543 	 * cacheline will occur when the overlap count is 0.
544 	 * active_cacheline_dec_overlap() returns -1 in that case
545 	 */
546 	if (active_cacheline_dec_overlap(cln) < 0)
547 		radix_tree_delete(&dma_active_cacheline, cln);
548 	spin_unlock_irqrestore(&radix_lock, flags);
549 }
550 
551 /*
552  * Wrapper function for adding an entry to the hash.
553  * This function takes care of locking itself.
554  */
555 static void add_dma_entry(struct dma_debug_entry *entry)
556 {
557 	struct hash_bucket *bucket;
558 	unsigned long flags;
559 	int rc;
560 
561 	bucket = get_hash_bucket(entry, &flags);
562 	hash_bucket_add(bucket, entry);
563 	put_hash_bucket(bucket, flags);
564 
565 	rc = active_cacheline_insert(entry);
566 	if (rc == -ENOMEM) {
567 		pr_err("cacheline tracking ENOMEM, dma-debug disabled\n");
568 		global_disable = true;
569 	} else if (rc == -EEXIST) {
570 		pr_err("cacheline tracking EEXIST, overlapping mappings aren't supported\n");
571 	}
572 }
573 
574 static int dma_debug_create_entries(gfp_t gfp)
575 {
576 	struct dma_debug_entry *entry;
577 	int i;
578 
579 	entry = (void *)get_zeroed_page(gfp);
580 	if (!entry)
581 		return -ENOMEM;
582 
583 	for (i = 0; i < DMA_DEBUG_DYNAMIC_ENTRIES; i++)
584 		list_add_tail(&entry[i].list, &free_entries);
585 
586 	num_free_entries += DMA_DEBUG_DYNAMIC_ENTRIES;
587 	nr_total_entries += DMA_DEBUG_DYNAMIC_ENTRIES;
588 
589 	return 0;
590 }
591 
592 static struct dma_debug_entry *__dma_entry_alloc(void)
593 {
594 	struct dma_debug_entry *entry;
595 
596 	entry = list_entry(free_entries.next, struct dma_debug_entry, list);
597 	list_del(&entry->list);
598 	memset(entry, 0, sizeof(*entry));
599 
600 	num_free_entries -= 1;
601 	if (num_free_entries < min_free_entries)
602 		min_free_entries = num_free_entries;
603 
604 	return entry;
605 }
606 
607 static void __dma_entry_alloc_check_leak(void)
608 {
609 	u32 tmp = nr_total_entries % nr_prealloc_entries;
610 
611 	/* Shout each time we tick over some multiple of the initial pool */
612 	if (tmp < DMA_DEBUG_DYNAMIC_ENTRIES) {
613 		pr_info("dma_debug_entry pool grown to %u (%u00%%)\n",
614 			nr_total_entries,
615 			(nr_total_entries / nr_prealloc_entries));
616 	}
617 }
618 
619 /* struct dma_entry allocator
620  *
621  * The next two functions implement the allocator for
622  * struct dma_debug_entries.
623  */
624 static struct dma_debug_entry *dma_entry_alloc(void)
625 {
626 	struct dma_debug_entry *entry;
627 	unsigned long flags;
628 
629 	spin_lock_irqsave(&free_entries_lock, flags);
630 	if (num_free_entries == 0) {
631 		if (dma_debug_create_entries(GFP_ATOMIC)) {
632 			global_disable = true;
633 			spin_unlock_irqrestore(&free_entries_lock, flags);
634 			pr_err("debugging out of memory - disabling\n");
635 			return NULL;
636 		}
637 		__dma_entry_alloc_check_leak();
638 	}
639 
640 	entry = __dma_entry_alloc();
641 
642 	spin_unlock_irqrestore(&free_entries_lock, flags);
643 
644 #ifdef CONFIG_STACKTRACE
645 	entry->stack_len = stack_trace_save(entry->stack_entries,
646 					    ARRAY_SIZE(entry->stack_entries),
647 					    1);
648 #endif
649 	return entry;
650 }
651 
652 static void dma_entry_free(struct dma_debug_entry *entry)
653 {
654 	unsigned long flags;
655 
656 	active_cacheline_remove(entry);
657 
658 	/*
659 	 * add to beginning of the list - this way the entries are
660 	 * more likely cache hot when they are reallocated.
661 	 */
662 	spin_lock_irqsave(&free_entries_lock, flags);
663 	list_add(&entry->list, &free_entries);
664 	num_free_entries += 1;
665 	spin_unlock_irqrestore(&free_entries_lock, flags);
666 }
667 
668 /*
669  * DMA-API debugging init code
670  *
671  * The init code does two things:
672  *   1. Initialize core data structures
673  *   2. Preallocate a given number of dma_debug_entry structs
674  */
675 
676 static ssize_t filter_read(struct file *file, char __user *user_buf,
677 			   size_t count, loff_t *ppos)
678 {
679 	char buf[NAME_MAX_LEN + 1];
680 	unsigned long flags;
681 	int len;
682 
683 	if (!current_driver_name[0])
684 		return 0;
685 
686 	/*
687 	 * We can't copy to userspace directly because current_driver_name can
688 	 * only be read under the driver_name_lock with irqs disabled. So
689 	 * create a temporary copy first.
690 	 */
691 	read_lock_irqsave(&driver_name_lock, flags);
692 	len = scnprintf(buf, NAME_MAX_LEN + 1, "%s\n", current_driver_name);
693 	read_unlock_irqrestore(&driver_name_lock, flags);
694 
695 	return simple_read_from_buffer(user_buf, count, ppos, buf, len);
696 }
697 
698 static ssize_t filter_write(struct file *file, const char __user *userbuf,
699 			    size_t count, loff_t *ppos)
700 {
701 	char buf[NAME_MAX_LEN];
702 	unsigned long flags;
703 	size_t len;
704 	int i;
705 
706 	/*
707 	 * We can't copy from userspace directly. Access to
708 	 * current_driver_name is protected with a write_lock with irqs
709 	 * disabled. Since copy_from_user can fault and may sleep we
710 	 * need to copy to temporary buffer first
711 	 */
712 	len = min(count, (size_t)(NAME_MAX_LEN - 1));
713 	if (copy_from_user(buf, userbuf, len))
714 		return -EFAULT;
715 
716 	buf[len] = 0;
717 
718 	write_lock_irqsave(&driver_name_lock, flags);
719 
720 	/*
721 	 * Now handle the string we got from userspace very carefully.
722 	 * The rules are:
723 	 *         - only use the first token we got
724 	 *         - token delimiter is everything looking like a space
725 	 *           character (' ', '\n', '\t' ...)
726 	 *
727 	 */
728 	if (!isalnum(buf[0])) {
729 		/*
730 		 * If the first character userspace gave us is not
731 		 * alphanumerical then assume the filter should be
732 		 * switched off.
733 		 */
734 		if (current_driver_name[0])
735 			pr_info("switching off dma-debug driver filter\n");
736 		current_driver_name[0] = 0;
737 		current_driver = NULL;
738 		goto out_unlock;
739 	}
740 
741 	/*
742 	 * Now parse out the first token and use it as the name for the
743 	 * driver to filter for.
744 	 */
745 	for (i = 0; i < NAME_MAX_LEN - 1; ++i) {
746 		current_driver_name[i] = buf[i];
747 		if (isspace(buf[i]) || buf[i] == ' ' || buf[i] == 0)
748 			break;
749 	}
750 	current_driver_name[i] = 0;
751 	current_driver = NULL;
752 
753 	pr_info("enable driver filter for driver [%s]\n",
754 		current_driver_name);
755 
756 out_unlock:
757 	write_unlock_irqrestore(&driver_name_lock, flags);
758 
759 	return count;
760 }
761 
762 static const struct file_operations filter_fops = {
763 	.read  = filter_read,
764 	.write = filter_write,
765 	.llseek = default_llseek,
766 };
767 
768 static int dump_show(struct seq_file *seq, void *v)
769 {
770 	int idx;
771 
772 	for (idx = 0; idx < HASH_SIZE; idx++) {
773 		struct hash_bucket *bucket = &dma_entry_hash[idx];
774 		struct dma_debug_entry *entry;
775 		unsigned long flags;
776 
777 		spin_lock_irqsave(&bucket->lock, flags);
778 		list_for_each_entry(entry, &bucket->list, list) {
779 			seq_printf(seq,
780 				   "%s %s %s idx %d P=%llx N=%lx D=%llx L=%llx %s %s\n",
781 				   dev_name(entry->dev),
782 				   dev_driver_string(entry->dev),
783 				   type2name[entry->type], idx,
784 				   phys_addr(entry), entry->pfn,
785 				   entry->dev_addr, entry->size,
786 				   dir2name[entry->direction],
787 				   maperr2str[entry->map_err_type]);
788 		}
789 		spin_unlock_irqrestore(&bucket->lock, flags);
790 	}
791 	return 0;
792 }
793 DEFINE_SHOW_ATTRIBUTE(dump);
794 
795 static int __init dma_debug_fs_init(void)
796 {
797 	struct dentry *dentry = debugfs_create_dir("dma-api", NULL);
798 
799 	debugfs_create_bool("disabled", 0444, dentry, &global_disable);
800 	debugfs_create_u32("error_count", 0444, dentry, &error_count);
801 	debugfs_create_u32("all_errors", 0644, dentry, &show_all_errors);
802 	debugfs_create_u32("num_errors", 0644, dentry, &show_num_errors);
803 	debugfs_create_u32("num_free_entries", 0444, dentry, &num_free_entries);
804 	debugfs_create_u32("min_free_entries", 0444, dentry, &min_free_entries);
805 	debugfs_create_u32("nr_total_entries", 0444, dentry, &nr_total_entries);
806 	debugfs_create_file("driver_filter", 0644, dentry, NULL, &filter_fops);
807 	debugfs_create_file("dump", 0444, dentry, NULL, &dump_fops);
808 
809 	return 0;
810 }
811 core_initcall_sync(dma_debug_fs_init);
812 
813 static int device_dma_allocations(struct device *dev, struct dma_debug_entry **out_entry)
814 {
815 	struct dma_debug_entry *entry;
816 	unsigned long flags;
817 	int count = 0, i;
818 
819 	for (i = 0; i < HASH_SIZE; ++i) {
820 		spin_lock_irqsave(&dma_entry_hash[i].lock, flags);
821 		list_for_each_entry(entry, &dma_entry_hash[i].list, list) {
822 			if (entry->dev == dev) {
823 				count += 1;
824 				*out_entry = entry;
825 			}
826 		}
827 		spin_unlock_irqrestore(&dma_entry_hash[i].lock, flags);
828 	}
829 
830 	return count;
831 }
832 
833 static int dma_debug_device_change(struct notifier_block *nb, unsigned long action, void *data)
834 {
835 	struct device *dev = data;
836 	struct dma_debug_entry *entry;
837 	int count;
838 
839 	if (dma_debug_disabled())
840 		return 0;
841 
842 	switch (action) {
843 	case BUS_NOTIFY_UNBOUND_DRIVER:
844 		count = device_dma_allocations(dev, &entry);
845 		if (count == 0)
846 			break;
847 		err_printk(dev, entry, "device driver has pending "
848 				"DMA allocations while released from device "
849 				"[count=%d]\n"
850 				"One of leaked entries details: "
851 				"[device address=0x%016llx] [size=%llu bytes] "
852 				"[mapped with %s] [mapped as %s]\n",
853 			count, entry->dev_addr, entry->size,
854 			dir2name[entry->direction], type2name[entry->type]);
855 		break;
856 	default:
857 		break;
858 	}
859 
860 	return 0;
861 }
862 
863 void dma_debug_add_bus(struct bus_type *bus)
864 {
865 	struct notifier_block *nb;
866 
867 	if (dma_debug_disabled())
868 		return;
869 
870 	nb = kzalloc(sizeof(struct notifier_block), GFP_KERNEL);
871 	if (nb == NULL) {
872 		pr_err("dma_debug_add_bus: out of memory\n");
873 		return;
874 	}
875 
876 	nb->notifier_call = dma_debug_device_change;
877 
878 	bus_register_notifier(bus, nb);
879 }
880 
881 static int dma_debug_init(void)
882 {
883 	int i, nr_pages;
884 
885 	/* Do not use dma_debug_initialized here, since we really want to be
886 	 * called to set dma_debug_initialized
887 	 */
888 	if (global_disable)
889 		return 0;
890 
891 	for (i = 0; i < HASH_SIZE; ++i) {
892 		INIT_LIST_HEAD(&dma_entry_hash[i].list);
893 		spin_lock_init(&dma_entry_hash[i].lock);
894 	}
895 
896 	nr_pages = DIV_ROUND_UP(nr_prealloc_entries, DMA_DEBUG_DYNAMIC_ENTRIES);
897 	for (i = 0; i < nr_pages; ++i)
898 		dma_debug_create_entries(GFP_KERNEL);
899 	if (num_free_entries >= nr_prealloc_entries) {
900 		pr_info("preallocated %d debug entries\n", nr_total_entries);
901 	} else if (num_free_entries > 0) {
902 		pr_warn("%d debug entries requested but only %d allocated\n",
903 			nr_prealloc_entries, nr_total_entries);
904 	} else {
905 		pr_err("debugging out of memory error - disabled\n");
906 		global_disable = true;
907 
908 		return 0;
909 	}
910 	min_free_entries = num_free_entries;
911 
912 	dma_debug_initialized = true;
913 
914 	pr_info("debugging enabled by kernel config\n");
915 	return 0;
916 }
917 core_initcall(dma_debug_init);
918 
919 static __init int dma_debug_cmdline(char *str)
920 {
921 	if (!str)
922 		return -EINVAL;
923 
924 	if (strncmp(str, "off", 3) == 0) {
925 		pr_info("debugging disabled on kernel command line\n");
926 		global_disable = true;
927 	}
928 
929 	return 0;
930 }
931 
932 static __init int dma_debug_entries_cmdline(char *str)
933 {
934 	if (!str)
935 		return -EINVAL;
936 	if (!get_option(&str, &nr_prealloc_entries))
937 		nr_prealloc_entries = PREALLOC_DMA_DEBUG_ENTRIES;
938 	return 0;
939 }
940 
941 __setup("dma_debug=", dma_debug_cmdline);
942 __setup("dma_debug_entries=", dma_debug_entries_cmdline);
943 
944 static void check_unmap(struct dma_debug_entry *ref)
945 {
946 	struct dma_debug_entry *entry;
947 	struct hash_bucket *bucket;
948 	unsigned long flags;
949 
950 	bucket = get_hash_bucket(ref, &flags);
951 	entry = bucket_find_exact(bucket, ref);
952 
953 	if (!entry) {
954 		/* must drop lock before calling dma_mapping_error */
955 		put_hash_bucket(bucket, flags);
956 
957 		if (dma_mapping_error(ref->dev, ref->dev_addr)) {
958 			err_printk(ref->dev, NULL,
959 				   "device driver tries to free an "
960 				   "invalid DMA memory address\n");
961 		} else {
962 			err_printk(ref->dev, NULL,
963 				   "device driver tries to free DMA "
964 				   "memory it has not allocated [device "
965 				   "address=0x%016llx] [size=%llu bytes]\n",
966 				   ref->dev_addr, ref->size);
967 		}
968 		return;
969 	}
970 
971 	if (ref->size != entry->size) {
972 		err_printk(ref->dev, entry, "device driver frees "
973 			   "DMA memory with different size "
974 			   "[device address=0x%016llx] [map size=%llu bytes] "
975 			   "[unmap size=%llu bytes]\n",
976 			   ref->dev_addr, entry->size, ref->size);
977 	}
978 
979 	if (ref->type != entry->type) {
980 		err_printk(ref->dev, entry, "device driver frees "
981 			   "DMA memory with wrong function "
982 			   "[device address=0x%016llx] [size=%llu bytes] "
983 			   "[mapped as %s] [unmapped as %s]\n",
984 			   ref->dev_addr, ref->size,
985 			   type2name[entry->type], type2name[ref->type]);
986 	} else if ((entry->type == dma_debug_coherent) &&
987 		   (phys_addr(ref) != phys_addr(entry))) {
988 		err_printk(ref->dev, entry, "device driver frees "
989 			   "DMA memory with different CPU address "
990 			   "[device address=0x%016llx] [size=%llu bytes] "
991 			   "[cpu alloc address=0x%016llx] "
992 			   "[cpu free address=0x%016llx]",
993 			   ref->dev_addr, ref->size,
994 			   phys_addr(entry),
995 			   phys_addr(ref));
996 	}
997 
998 	if (ref->sg_call_ents && ref->type == dma_debug_sg &&
999 	    ref->sg_call_ents != entry->sg_call_ents) {
1000 		err_printk(ref->dev, entry, "device driver frees "
1001 			   "DMA sg list with different entry count "
1002 			   "[map count=%d] [unmap count=%d]\n",
1003 			   entry->sg_call_ents, ref->sg_call_ents);
1004 	}
1005 
1006 	/*
1007 	 * This may be no bug in reality - but most implementations of the
1008 	 * DMA API don't handle this properly, so check for it here
1009 	 */
1010 	if (ref->direction != entry->direction) {
1011 		err_printk(ref->dev, entry, "device driver frees "
1012 			   "DMA memory with different direction "
1013 			   "[device address=0x%016llx] [size=%llu bytes] "
1014 			   "[mapped with %s] [unmapped with %s]\n",
1015 			   ref->dev_addr, ref->size,
1016 			   dir2name[entry->direction],
1017 			   dir2name[ref->direction]);
1018 	}
1019 
1020 	/*
1021 	 * Drivers should use dma_mapping_error() to check the returned
1022 	 * addresses of dma_map_single() and dma_map_page().
1023 	 * If not, print this warning message. See Documentation/core-api/dma-api.rst.
1024 	 */
1025 	if (entry->map_err_type == MAP_ERR_NOT_CHECKED) {
1026 		err_printk(ref->dev, entry,
1027 			   "device driver failed to check map error"
1028 			   "[device address=0x%016llx] [size=%llu bytes] "
1029 			   "[mapped as %s]",
1030 			   ref->dev_addr, ref->size,
1031 			   type2name[entry->type]);
1032 	}
1033 
1034 	hash_bucket_del(entry);
1035 	dma_entry_free(entry);
1036 
1037 	put_hash_bucket(bucket, flags);
1038 }
1039 
1040 static void check_for_stack(struct device *dev,
1041 			    struct page *page, size_t offset)
1042 {
1043 	void *addr;
1044 	struct vm_struct *stack_vm_area = task_stack_vm_area(current);
1045 
1046 	if (!stack_vm_area) {
1047 		/* Stack is direct-mapped. */
1048 		if (PageHighMem(page))
1049 			return;
1050 		addr = page_address(page) + offset;
1051 		if (object_is_on_stack(addr))
1052 			err_printk(dev, NULL, "device driver maps memory from stack [addr=%p]\n", addr);
1053 	} else {
1054 		/* Stack is vmalloced. */
1055 		int i;
1056 
1057 		for (i = 0; i < stack_vm_area->nr_pages; i++) {
1058 			if (page != stack_vm_area->pages[i])
1059 				continue;
1060 
1061 			addr = (u8 *)current->stack + i * PAGE_SIZE + offset;
1062 			err_printk(dev, NULL, "device driver maps memory from stack [probable addr=%p]\n", addr);
1063 			break;
1064 		}
1065 	}
1066 }
1067 
1068 static void check_for_illegal_area(struct device *dev, void *addr, unsigned long len)
1069 {
1070 	if (memory_intersects(_stext, _etext, addr, len) ||
1071 	    memory_intersects(__start_rodata, __end_rodata, addr, len))
1072 		err_printk(dev, NULL, "device driver maps memory from kernel text or rodata [addr=%p] [len=%lu]\n", addr, len);
1073 }
1074 
1075 static void check_sync(struct device *dev,
1076 		       struct dma_debug_entry *ref,
1077 		       bool to_cpu)
1078 {
1079 	struct dma_debug_entry *entry;
1080 	struct hash_bucket *bucket;
1081 	unsigned long flags;
1082 
1083 	bucket = get_hash_bucket(ref, &flags);
1084 
1085 	entry = bucket_find_contain(&bucket, ref, &flags);
1086 
1087 	if (!entry) {
1088 		err_printk(dev, NULL, "device driver tries "
1089 				"to sync DMA memory it has not allocated "
1090 				"[device address=0x%016llx] [size=%llu bytes]\n",
1091 				(unsigned long long)ref->dev_addr, ref->size);
1092 		goto out;
1093 	}
1094 
1095 	if (ref->size > entry->size) {
1096 		err_printk(dev, entry, "device driver syncs"
1097 				" DMA memory outside allocated range "
1098 				"[device address=0x%016llx] "
1099 				"[allocation size=%llu bytes] "
1100 				"[sync offset+size=%llu]\n",
1101 				entry->dev_addr, entry->size,
1102 				ref->size);
1103 	}
1104 
1105 	if (entry->direction == DMA_BIDIRECTIONAL)
1106 		goto out;
1107 
1108 	if (ref->direction != entry->direction) {
1109 		err_printk(dev, entry, "device driver syncs "
1110 				"DMA memory with different direction "
1111 				"[device address=0x%016llx] [size=%llu bytes] "
1112 				"[mapped with %s] [synced with %s]\n",
1113 				(unsigned long long)ref->dev_addr, entry->size,
1114 				dir2name[entry->direction],
1115 				dir2name[ref->direction]);
1116 	}
1117 
1118 	if (to_cpu && !(entry->direction == DMA_FROM_DEVICE) &&
1119 		      !(ref->direction == DMA_TO_DEVICE))
1120 		err_printk(dev, entry, "device driver syncs "
1121 				"device read-only DMA memory for cpu "
1122 				"[device address=0x%016llx] [size=%llu bytes] "
1123 				"[mapped with %s] [synced with %s]\n",
1124 				(unsigned long long)ref->dev_addr, entry->size,
1125 				dir2name[entry->direction],
1126 				dir2name[ref->direction]);
1127 
1128 	if (!to_cpu && !(entry->direction == DMA_TO_DEVICE) &&
1129 		       !(ref->direction == DMA_FROM_DEVICE))
1130 		err_printk(dev, entry, "device driver syncs "
1131 				"device write-only DMA memory to device "
1132 				"[device address=0x%016llx] [size=%llu bytes] "
1133 				"[mapped with %s] [synced with %s]\n",
1134 				(unsigned long long)ref->dev_addr, entry->size,
1135 				dir2name[entry->direction],
1136 				dir2name[ref->direction]);
1137 
1138 	if (ref->sg_call_ents && ref->type == dma_debug_sg &&
1139 	    ref->sg_call_ents != entry->sg_call_ents) {
1140 		err_printk(ref->dev, entry, "device driver syncs "
1141 			   "DMA sg list with different entry count "
1142 			   "[map count=%d] [sync count=%d]\n",
1143 			   entry->sg_call_ents, ref->sg_call_ents);
1144 	}
1145 
1146 out:
1147 	put_hash_bucket(bucket, flags);
1148 }
1149 
1150 static void check_sg_segment(struct device *dev, struct scatterlist *sg)
1151 {
1152 #ifdef CONFIG_DMA_API_DEBUG_SG
1153 	unsigned int max_seg = dma_get_max_seg_size(dev);
1154 	u64 start, end, boundary = dma_get_seg_boundary(dev);
1155 
1156 	/*
1157 	 * Either the driver forgot to set dma_parms appropriately, or
1158 	 * whoever generated the list forgot to check them.
1159 	 */
1160 	if (sg->length > max_seg)
1161 		err_printk(dev, NULL, "mapping sg segment longer than device claims to support [len=%u] [max=%u]\n",
1162 			   sg->length, max_seg);
1163 	/*
1164 	 * In some cases this could potentially be the DMA API
1165 	 * implementation's fault, but it would usually imply that
1166 	 * the scatterlist was built inappropriately to begin with.
1167 	 */
1168 	start = sg_dma_address(sg);
1169 	end = start + sg_dma_len(sg) - 1;
1170 	if ((start ^ end) & ~boundary)
1171 		err_printk(dev, NULL, "mapping sg segment across boundary [start=0x%016llx] [end=0x%016llx] [boundary=0x%016llx]\n",
1172 			   start, end, boundary);
1173 #endif
1174 }
1175 
1176 void debug_dma_map_single(struct device *dev, const void *addr,
1177 			    unsigned long len)
1178 {
1179 	if (unlikely(dma_debug_disabled()))
1180 		return;
1181 
1182 	if (!virt_addr_valid(addr))
1183 		err_printk(dev, NULL, "device driver maps memory from invalid area [addr=%p] [len=%lu]\n",
1184 			   addr, len);
1185 
1186 	if (is_vmalloc_addr(addr))
1187 		err_printk(dev, NULL, "device driver maps memory from vmalloc area [addr=%p] [len=%lu]\n",
1188 			   addr, len);
1189 }
1190 EXPORT_SYMBOL(debug_dma_map_single);
1191 
1192 void debug_dma_map_page(struct device *dev, struct page *page, size_t offset,
1193 			size_t size, int direction, dma_addr_t dma_addr)
1194 {
1195 	struct dma_debug_entry *entry;
1196 
1197 	if (unlikely(dma_debug_disabled()))
1198 		return;
1199 
1200 	if (dma_mapping_error(dev, dma_addr))
1201 		return;
1202 
1203 	entry = dma_entry_alloc();
1204 	if (!entry)
1205 		return;
1206 
1207 	entry->dev       = dev;
1208 	entry->type      = dma_debug_single;
1209 	entry->pfn	 = page_to_pfn(page);
1210 	entry->offset	 = offset;
1211 	entry->dev_addr  = dma_addr;
1212 	entry->size      = size;
1213 	entry->direction = direction;
1214 	entry->map_err_type = MAP_ERR_NOT_CHECKED;
1215 
1216 	check_for_stack(dev, page, offset);
1217 
1218 	if (!PageHighMem(page)) {
1219 		void *addr = page_address(page) + offset;
1220 
1221 		check_for_illegal_area(dev, addr, size);
1222 	}
1223 
1224 	add_dma_entry(entry);
1225 }
1226 
1227 void debug_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
1228 {
1229 	struct dma_debug_entry ref;
1230 	struct dma_debug_entry *entry;
1231 	struct hash_bucket *bucket;
1232 	unsigned long flags;
1233 
1234 	if (unlikely(dma_debug_disabled()))
1235 		return;
1236 
1237 	ref.dev = dev;
1238 	ref.dev_addr = dma_addr;
1239 	bucket = get_hash_bucket(&ref, &flags);
1240 
1241 	list_for_each_entry(entry, &bucket->list, list) {
1242 		if (!exact_match(&ref, entry))
1243 			continue;
1244 
1245 		/*
1246 		 * The same physical address can be mapped multiple
1247 		 * times. Without a hardware IOMMU this results in the
1248 		 * same device addresses being put into the dma-debug
1249 		 * hash multiple times too. This can result in false
1250 		 * positives being reported. Therefore we implement a
1251 		 * best-fit algorithm here which updates the first entry
1252 		 * from the hash which fits the reference value and is
1253 		 * not currently listed as being checked.
1254 		 */
1255 		if (entry->map_err_type == MAP_ERR_NOT_CHECKED) {
1256 			entry->map_err_type = MAP_ERR_CHECKED;
1257 			break;
1258 		}
1259 	}
1260 
1261 	put_hash_bucket(bucket, flags);
1262 }
1263 EXPORT_SYMBOL(debug_dma_mapping_error);
1264 
1265 void debug_dma_unmap_page(struct device *dev, dma_addr_t addr,
1266 			  size_t size, int direction)
1267 {
1268 	struct dma_debug_entry ref = {
1269 		.type           = dma_debug_single,
1270 		.dev            = dev,
1271 		.dev_addr       = addr,
1272 		.size           = size,
1273 		.direction      = direction,
1274 	};
1275 
1276 	if (unlikely(dma_debug_disabled()))
1277 		return;
1278 	check_unmap(&ref);
1279 }
1280 
1281 void debug_dma_map_sg(struct device *dev, struct scatterlist *sg,
1282 		      int nents, int mapped_ents, int direction)
1283 {
1284 	struct dma_debug_entry *entry;
1285 	struct scatterlist *s;
1286 	int i;
1287 
1288 	if (unlikely(dma_debug_disabled()))
1289 		return;
1290 
1291 	for_each_sg(sg, s, mapped_ents, i) {
1292 		entry = dma_entry_alloc();
1293 		if (!entry)
1294 			return;
1295 
1296 		entry->type           = dma_debug_sg;
1297 		entry->dev            = dev;
1298 		entry->pfn	      = page_to_pfn(sg_page(s));
1299 		entry->offset	      = s->offset;
1300 		entry->size           = sg_dma_len(s);
1301 		entry->dev_addr       = sg_dma_address(s);
1302 		entry->direction      = direction;
1303 		entry->sg_call_ents   = nents;
1304 		entry->sg_mapped_ents = mapped_ents;
1305 
1306 		check_for_stack(dev, sg_page(s), s->offset);
1307 
1308 		if (!PageHighMem(sg_page(s))) {
1309 			check_for_illegal_area(dev, sg_virt(s), sg_dma_len(s));
1310 		}
1311 
1312 		check_sg_segment(dev, s);
1313 
1314 		add_dma_entry(entry);
1315 	}
1316 }
1317 
1318 static int get_nr_mapped_entries(struct device *dev,
1319 				 struct dma_debug_entry *ref)
1320 {
1321 	struct dma_debug_entry *entry;
1322 	struct hash_bucket *bucket;
1323 	unsigned long flags;
1324 	int mapped_ents;
1325 
1326 	bucket       = get_hash_bucket(ref, &flags);
1327 	entry        = bucket_find_exact(bucket, ref);
1328 	mapped_ents  = 0;
1329 
1330 	if (entry)
1331 		mapped_ents = entry->sg_mapped_ents;
1332 	put_hash_bucket(bucket, flags);
1333 
1334 	return mapped_ents;
1335 }
1336 
1337 void debug_dma_unmap_sg(struct device *dev, struct scatterlist *sglist,
1338 			int nelems, int dir)
1339 {
1340 	struct scatterlist *s;
1341 	int mapped_ents = 0, i;
1342 
1343 	if (unlikely(dma_debug_disabled()))
1344 		return;
1345 
1346 	for_each_sg(sglist, s, nelems, i) {
1347 
1348 		struct dma_debug_entry ref = {
1349 			.type           = dma_debug_sg,
1350 			.dev            = dev,
1351 			.pfn		= page_to_pfn(sg_page(s)),
1352 			.offset		= s->offset,
1353 			.dev_addr       = sg_dma_address(s),
1354 			.size           = sg_dma_len(s),
1355 			.direction      = dir,
1356 			.sg_call_ents   = nelems,
1357 		};
1358 
1359 		if (mapped_ents && i >= mapped_ents)
1360 			break;
1361 
1362 		if (!i)
1363 			mapped_ents = get_nr_mapped_entries(dev, &ref);
1364 
1365 		check_unmap(&ref);
1366 	}
1367 }
1368 
1369 void debug_dma_alloc_coherent(struct device *dev, size_t size,
1370 			      dma_addr_t dma_addr, void *virt)
1371 {
1372 	struct dma_debug_entry *entry;
1373 
1374 	if (unlikely(dma_debug_disabled()))
1375 		return;
1376 
1377 	if (unlikely(virt == NULL))
1378 		return;
1379 
1380 	/* handle vmalloc and linear addresses */
1381 	if (!is_vmalloc_addr(virt) && !virt_addr_valid(virt))
1382 		return;
1383 
1384 	entry = dma_entry_alloc();
1385 	if (!entry)
1386 		return;
1387 
1388 	entry->type      = dma_debug_coherent;
1389 	entry->dev       = dev;
1390 	entry->offset	 = offset_in_page(virt);
1391 	entry->size      = size;
1392 	entry->dev_addr  = dma_addr;
1393 	entry->direction = DMA_BIDIRECTIONAL;
1394 
1395 	if (is_vmalloc_addr(virt))
1396 		entry->pfn = vmalloc_to_pfn(virt);
1397 	else
1398 		entry->pfn = page_to_pfn(virt_to_page(virt));
1399 
1400 	add_dma_entry(entry);
1401 }
1402 
1403 void debug_dma_free_coherent(struct device *dev, size_t size,
1404 			 void *virt, dma_addr_t addr)
1405 {
1406 	struct dma_debug_entry ref = {
1407 		.type           = dma_debug_coherent,
1408 		.dev            = dev,
1409 		.offset		= offset_in_page(virt),
1410 		.dev_addr       = addr,
1411 		.size           = size,
1412 		.direction      = DMA_BIDIRECTIONAL,
1413 	};
1414 
1415 	/* handle vmalloc and linear addresses */
1416 	if (!is_vmalloc_addr(virt) && !virt_addr_valid(virt))
1417 		return;
1418 
1419 	if (is_vmalloc_addr(virt))
1420 		ref.pfn = vmalloc_to_pfn(virt);
1421 	else
1422 		ref.pfn = page_to_pfn(virt_to_page(virt));
1423 
1424 	if (unlikely(dma_debug_disabled()))
1425 		return;
1426 
1427 	check_unmap(&ref);
1428 }
1429 
1430 void debug_dma_map_resource(struct device *dev, phys_addr_t addr, size_t size,
1431 			    int direction, dma_addr_t dma_addr)
1432 {
1433 	struct dma_debug_entry *entry;
1434 
1435 	if (unlikely(dma_debug_disabled()))
1436 		return;
1437 
1438 	entry = dma_entry_alloc();
1439 	if (!entry)
1440 		return;
1441 
1442 	entry->type		= dma_debug_resource;
1443 	entry->dev		= dev;
1444 	entry->pfn		= PHYS_PFN(addr);
1445 	entry->offset		= offset_in_page(addr);
1446 	entry->size		= size;
1447 	entry->dev_addr		= dma_addr;
1448 	entry->direction	= direction;
1449 	entry->map_err_type	= MAP_ERR_NOT_CHECKED;
1450 
1451 	add_dma_entry(entry);
1452 }
1453 
1454 void debug_dma_unmap_resource(struct device *dev, dma_addr_t dma_addr,
1455 			      size_t size, int direction)
1456 {
1457 	struct dma_debug_entry ref = {
1458 		.type           = dma_debug_resource,
1459 		.dev            = dev,
1460 		.dev_addr       = dma_addr,
1461 		.size           = size,
1462 		.direction      = direction,
1463 	};
1464 
1465 	if (unlikely(dma_debug_disabled()))
1466 		return;
1467 
1468 	check_unmap(&ref);
1469 }
1470 
1471 void debug_dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
1472 				   size_t size, int direction)
1473 {
1474 	struct dma_debug_entry ref;
1475 
1476 	if (unlikely(dma_debug_disabled()))
1477 		return;
1478 
1479 	ref.type         = dma_debug_single;
1480 	ref.dev          = dev;
1481 	ref.dev_addr     = dma_handle;
1482 	ref.size         = size;
1483 	ref.direction    = direction;
1484 	ref.sg_call_ents = 0;
1485 
1486 	check_sync(dev, &ref, true);
1487 }
1488 
1489 void debug_dma_sync_single_for_device(struct device *dev,
1490 				      dma_addr_t dma_handle, size_t size,
1491 				      int direction)
1492 {
1493 	struct dma_debug_entry ref;
1494 
1495 	if (unlikely(dma_debug_disabled()))
1496 		return;
1497 
1498 	ref.type         = dma_debug_single;
1499 	ref.dev          = dev;
1500 	ref.dev_addr     = dma_handle;
1501 	ref.size         = size;
1502 	ref.direction    = direction;
1503 	ref.sg_call_ents = 0;
1504 
1505 	check_sync(dev, &ref, false);
1506 }
1507 
1508 void debug_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
1509 			       int nelems, int direction)
1510 {
1511 	struct scatterlist *s;
1512 	int mapped_ents = 0, i;
1513 
1514 	if (unlikely(dma_debug_disabled()))
1515 		return;
1516 
1517 	for_each_sg(sg, s, nelems, i) {
1518 
1519 		struct dma_debug_entry ref = {
1520 			.type           = dma_debug_sg,
1521 			.dev            = dev,
1522 			.pfn		= page_to_pfn(sg_page(s)),
1523 			.offset		= s->offset,
1524 			.dev_addr       = sg_dma_address(s),
1525 			.size           = sg_dma_len(s),
1526 			.direction      = direction,
1527 			.sg_call_ents   = nelems,
1528 		};
1529 
1530 		if (!i)
1531 			mapped_ents = get_nr_mapped_entries(dev, &ref);
1532 
1533 		if (i >= mapped_ents)
1534 			break;
1535 
1536 		check_sync(dev, &ref, true);
1537 	}
1538 }
1539 
1540 void debug_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
1541 				  int nelems, int direction)
1542 {
1543 	struct scatterlist *s;
1544 	int mapped_ents = 0, i;
1545 
1546 	if (unlikely(dma_debug_disabled()))
1547 		return;
1548 
1549 	for_each_sg(sg, s, nelems, i) {
1550 
1551 		struct dma_debug_entry ref = {
1552 			.type           = dma_debug_sg,
1553 			.dev            = dev,
1554 			.pfn		= page_to_pfn(sg_page(s)),
1555 			.offset		= s->offset,
1556 			.dev_addr       = sg_dma_address(s),
1557 			.size           = sg_dma_len(s),
1558 			.direction      = direction,
1559 			.sg_call_ents   = nelems,
1560 		};
1561 		if (!i)
1562 			mapped_ents = get_nr_mapped_entries(dev, &ref);
1563 
1564 		if (i >= mapped_ents)
1565 			break;
1566 
1567 		check_sync(dev, &ref, false);
1568 	}
1569 }
1570 
1571 static int __init dma_debug_driver_setup(char *str)
1572 {
1573 	int i;
1574 
1575 	for (i = 0; i < NAME_MAX_LEN - 1; ++i, ++str) {
1576 		current_driver_name[i] = *str;
1577 		if (*str == 0)
1578 			break;
1579 	}
1580 
1581 	if (current_driver_name[0])
1582 		pr_info("enable driver filter for driver [%s]\n",
1583 			current_driver_name);
1584 
1585 
1586 	return 1;
1587 }
1588 __setup("dma_debug_driver=", dma_debug_driver_setup);
1589