xref: /openbmc/linux/kernel/dma/debug.c (revision 7ff836f0)
1 /*
2  * Copyright (C) 2008 Advanced Micro Devices, Inc.
3  *
4  * Author: Joerg Roedel <joerg.roedel@amd.com>
5  *
6  * This program is free software; you can redistribute it and/or modify it
7  * under the terms of the GNU General Public License version 2 as published
8  * by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  * GNU General Public License for more details.
14  *
15  * You should have received a copy of the GNU General Public License
16  * along with this program; if not, write to the Free Software
17  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
18  */
19 
20 #define pr_fmt(fmt)	"DMA-API: " fmt
21 
22 #include <linux/sched/task_stack.h>
23 #include <linux/scatterlist.h>
24 #include <linux/dma-mapping.h>
25 #include <linux/sched/task.h>
26 #include <linux/stacktrace.h>
27 #include <linux/dma-debug.h>
28 #include <linux/spinlock.h>
29 #include <linux/vmalloc.h>
30 #include <linux/debugfs.h>
31 #include <linux/uaccess.h>
32 #include <linux/export.h>
33 #include <linux/device.h>
34 #include <linux/types.h>
35 #include <linux/sched.h>
36 #include <linux/ctype.h>
37 #include <linux/list.h>
38 #include <linux/slab.h>
39 
40 #include <asm/sections.h>
41 
42 #define HASH_SIZE       1024ULL
43 #define HASH_FN_SHIFT   13
44 #define HASH_FN_MASK    (HASH_SIZE - 1)
45 
46 #define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16)
47 /* If the pool runs out, add this many new entries at once */
48 #define DMA_DEBUG_DYNAMIC_ENTRIES (PAGE_SIZE / sizeof(struct dma_debug_entry))
49 
50 enum {
51 	dma_debug_single,
52 	dma_debug_sg,
53 	dma_debug_coherent,
54 	dma_debug_resource,
55 };
56 
57 enum map_err_types {
58 	MAP_ERR_CHECK_NOT_APPLICABLE,
59 	MAP_ERR_NOT_CHECKED,
60 	MAP_ERR_CHECKED,
61 };
62 
63 #define DMA_DEBUG_STACKTRACE_ENTRIES 5
64 
65 /**
66  * struct dma_debug_entry - track a dma_map* or dma_alloc_coherent mapping
67  * @list: node on pre-allocated free_entries list
68  * @dev: 'dev' argument to dma_map_{page|single|sg} or dma_alloc_coherent
69  * @type: single, page, sg, coherent
70  * @pfn: page frame of the start address
71  * @offset: offset of mapping relative to pfn
72  * @size: length of the mapping
73  * @direction: enum dma_data_direction
74  * @sg_call_ents: 'nents' from dma_map_sg
75  * @sg_mapped_ents: 'mapped_ents' from dma_map_sg
76  * @map_err_type: track whether dma_mapping_error() was checked
77  * @stacktrace: support backtraces when a violation is detected
78  */
79 struct dma_debug_entry {
80 	struct list_head list;
81 	struct device    *dev;
82 	int              type;
83 	unsigned long	 pfn;
84 	size_t		 offset;
85 	u64              dev_addr;
86 	u64              size;
87 	int              direction;
88 	int		 sg_call_ents;
89 	int		 sg_mapped_ents;
90 	enum map_err_types  map_err_type;
91 #ifdef CONFIG_STACKTRACE
92 	unsigned int	stack_len;
93 	unsigned long	stack_entries[DMA_DEBUG_STACKTRACE_ENTRIES];
94 #endif
95 };
96 
97 typedef bool (*match_fn)(struct dma_debug_entry *, struct dma_debug_entry *);
98 
99 struct hash_bucket {
100 	struct list_head list;
101 	spinlock_t lock;
102 } ____cacheline_aligned_in_smp;
103 
104 /* Hash list to save the allocated dma addresses */
105 static struct hash_bucket dma_entry_hash[HASH_SIZE];
106 /* List of pre-allocated dma_debug_entry's */
107 static LIST_HEAD(free_entries);
108 /* Lock for the list above */
109 static DEFINE_SPINLOCK(free_entries_lock);
110 
111 /* Global disable flag - will be set in case of an error */
112 static bool global_disable __read_mostly;
113 
114 /* Early initialization disable flag, set at the end of dma_debug_init */
115 static bool dma_debug_initialized __read_mostly;
116 
117 static inline bool dma_debug_disabled(void)
118 {
119 	return global_disable || !dma_debug_initialized;
120 }
121 
122 /* Global error count */
123 static u32 error_count;
124 
125 /* Global error show enable*/
126 static u32 show_all_errors __read_mostly;
127 /* Number of errors to show */
128 static u32 show_num_errors = 1;
129 
130 static u32 num_free_entries;
131 static u32 min_free_entries;
132 static u32 nr_total_entries;
133 
134 /* number of preallocated entries requested by kernel cmdline */
135 static u32 nr_prealloc_entries = PREALLOC_DMA_DEBUG_ENTRIES;
136 
137 /* per-driver filter related state */
138 
139 #define NAME_MAX_LEN	64
140 
141 static char                  current_driver_name[NAME_MAX_LEN] __read_mostly;
142 static struct device_driver *current_driver                    __read_mostly;
143 
144 static DEFINE_RWLOCK(driver_name_lock);
145 
146 static const char *const maperr2str[] = {
147 	[MAP_ERR_CHECK_NOT_APPLICABLE] = "dma map error check not applicable",
148 	[MAP_ERR_NOT_CHECKED] = "dma map error not checked",
149 	[MAP_ERR_CHECKED] = "dma map error checked",
150 };
151 
152 static const char *type2name[5] = { "single", "page",
153 				    "scather-gather", "coherent",
154 				    "resource" };
155 
156 static const char *dir2name[4] = { "DMA_BIDIRECTIONAL", "DMA_TO_DEVICE",
157 				   "DMA_FROM_DEVICE", "DMA_NONE" };
158 
159 /*
160  * The access to some variables in this macro is racy. We can't use atomic_t
161  * here because all these variables are exported to debugfs. Some of them even
162  * writeable. This is also the reason why a lock won't help much. But anyway,
163  * the races are no big deal. Here is why:
164  *
165  *   error_count: the addition is racy, but the worst thing that can happen is
166  *                that we don't count some errors
167  *   show_num_errors: the subtraction is racy. Also no big deal because in
168  *                    worst case this will result in one warning more in the
169  *                    system log than the user configured. This variable is
170  *                    writeable via debugfs.
171  */
172 static inline void dump_entry_trace(struct dma_debug_entry *entry)
173 {
174 #ifdef CONFIG_STACKTRACE
175 	if (entry) {
176 		pr_warning("Mapped at:\n");
177 		stack_trace_print(entry->stack_entries, entry->stack_len, 0);
178 	}
179 #endif
180 }
181 
182 static bool driver_filter(struct device *dev)
183 {
184 	struct device_driver *drv;
185 	unsigned long flags;
186 	bool ret;
187 
188 	/* driver filter off */
189 	if (likely(!current_driver_name[0]))
190 		return true;
191 
192 	/* driver filter on and initialized */
193 	if (current_driver && dev && dev->driver == current_driver)
194 		return true;
195 
196 	/* driver filter on, but we can't filter on a NULL device... */
197 	if (!dev)
198 		return false;
199 
200 	if (current_driver || !current_driver_name[0])
201 		return false;
202 
203 	/* driver filter on but not yet initialized */
204 	drv = dev->driver;
205 	if (!drv)
206 		return false;
207 
208 	/* lock to protect against change of current_driver_name */
209 	read_lock_irqsave(&driver_name_lock, flags);
210 
211 	ret = false;
212 	if (drv->name &&
213 	    strncmp(current_driver_name, drv->name, NAME_MAX_LEN - 1) == 0) {
214 		current_driver = drv;
215 		ret = true;
216 	}
217 
218 	read_unlock_irqrestore(&driver_name_lock, flags);
219 
220 	return ret;
221 }
222 
223 #define err_printk(dev, entry, format, arg...) do {			\
224 		error_count += 1;					\
225 		if (driver_filter(dev) &&				\
226 		    (show_all_errors || show_num_errors > 0)) {		\
227 			WARN(1, pr_fmt("%s %s: ") format,		\
228 			     dev ? dev_driver_string(dev) : "NULL",	\
229 			     dev ? dev_name(dev) : "NULL", ## arg);	\
230 			dump_entry_trace(entry);			\
231 		}							\
232 		if (!show_all_errors && show_num_errors > 0)		\
233 			show_num_errors -= 1;				\
234 	} while (0);
235 
236 /*
237  * Hash related functions
238  *
239  * Every DMA-API request is saved into a struct dma_debug_entry. To
240  * have quick access to these structs they are stored into a hash.
241  */
242 static int hash_fn(struct dma_debug_entry *entry)
243 {
244 	/*
245 	 * Hash function is based on the dma address.
246 	 * We use bits 20-27 here as the index into the hash
247 	 */
248 	return (entry->dev_addr >> HASH_FN_SHIFT) & HASH_FN_MASK;
249 }
250 
251 /*
252  * Request exclusive access to a hash bucket for a given dma_debug_entry.
253  */
254 static struct hash_bucket *get_hash_bucket(struct dma_debug_entry *entry,
255 					   unsigned long *flags)
256 	__acquires(&dma_entry_hash[idx].lock)
257 {
258 	int idx = hash_fn(entry);
259 	unsigned long __flags;
260 
261 	spin_lock_irqsave(&dma_entry_hash[idx].lock, __flags);
262 	*flags = __flags;
263 	return &dma_entry_hash[idx];
264 }
265 
266 /*
267  * Give up exclusive access to the hash bucket
268  */
269 static void put_hash_bucket(struct hash_bucket *bucket,
270 			    unsigned long *flags)
271 	__releases(&bucket->lock)
272 {
273 	unsigned long __flags = *flags;
274 
275 	spin_unlock_irqrestore(&bucket->lock, __flags);
276 }
277 
278 static bool exact_match(struct dma_debug_entry *a, struct dma_debug_entry *b)
279 {
280 	return ((a->dev_addr == b->dev_addr) &&
281 		(a->dev == b->dev)) ? true : false;
282 }
283 
284 static bool containing_match(struct dma_debug_entry *a,
285 			     struct dma_debug_entry *b)
286 {
287 	if (a->dev != b->dev)
288 		return false;
289 
290 	if ((b->dev_addr <= a->dev_addr) &&
291 	    ((b->dev_addr + b->size) >= (a->dev_addr + a->size)))
292 		return true;
293 
294 	return false;
295 }
296 
297 /*
298  * Search a given entry in the hash bucket list
299  */
300 static struct dma_debug_entry *__hash_bucket_find(struct hash_bucket *bucket,
301 						  struct dma_debug_entry *ref,
302 						  match_fn match)
303 {
304 	struct dma_debug_entry *entry, *ret = NULL;
305 	int matches = 0, match_lvl, last_lvl = -1;
306 
307 	list_for_each_entry(entry, &bucket->list, list) {
308 		if (!match(ref, entry))
309 			continue;
310 
311 		/*
312 		 * Some drivers map the same physical address multiple
313 		 * times. Without a hardware IOMMU this results in the
314 		 * same device addresses being put into the dma-debug
315 		 * hash multiple times too. This can result in false
316 		 * positives being reported. Therefore we implement a
317 		 * best-fit algorithm here which returns the entry from
318 		 * the hash which fits best to the reference value
319 		 * instead of the first-fit.
320 		 */
321 		matches += 1;
322 		match_lvl = 0;
323 		entry->size         == ref->size         ? ++match_lvl : 0;
324 		entry->type         == ref->type         ? ++match_lvl : 0;
325 		entry->direction    == ref->direction    ? ++match_lvl : 0;
326 		entry->sg_call_ents == ref->sg_call_ents ? ++match_lvl : 0;
327 
328 		if (match_lvl == 4) {
329 			/* perfect-fit - return the result */
330 			return entry;
331 		} else if (match_lvl > last_lvl) {
332 			/*
333 			 * We found an entry that fits better then the
334 			 * previous one or it is the 1st match.
335 			 */
336 			last_lvl = match_lvl;
337 			ret      = entry;
338 		}
339 	}
340 
341 	/*
342 	 * If we have multiple matches but no perfect-fit, just return
343 	 * NULL.
344 	 */
345 	ret = (matches == 1) ? ret : NULL;
346 
347 	return ret;
348 }
349 
350 static struct dma_debug_entry *bucket_find_exact(struct hash_bucket *bucket,
351 						 struct dma_debug_entry *ref)
352 {
353 	return __hash_bucket_find(bucket, ref, exact_match);
354 }
355 
356 static struct dma_debug_entry *bucket_find_contain(struct hash_bucket **bucket,
357 						   struct dma_debug_entry *ref,
358 						   unsigned long *flags)
359 {
360 
361 	unsigned int max_range = dma_get_max_seg_size(ref->dev);
362 	struct dma_debug_entry *entry, index = *ref;
363 	unsigned int range = 0;
364 
365 	while (range <= max_range) {
366 		entry = __hash_bucket_find(*bucket, ref, containing_match);
367 
368 		if (entry)
369 			return entry;
370 
371 		/*
372 		 * Nothing found, go back a hash bucket
373 		 */
374 		put_hash_bucket(*bucket, flags);
375 		range          += (1 << HASH_FN_SHIFT);
376 		index.dev_addr -= (1 << HASH_FN_SHIFT);
377 		*bucket = get_hash_bucket(&index, flags);
378 	}
379 
380 	return NULL;
381 }
382 
383 /*
384  * Add an entry to a hash bucket
385  */
386 static void hash_bucket_add(struct hash_bucket *bucket,
387 			    struct dma_debug_entry *entry)
388 {
389 	list_add_tail(&entry->list, &bucket->list);
390 }
391 
392 /*
393  * Remove entry from a hash bucket list
394  */
395 static void hash_bucket_del(struct dma_debug_entry *entry)
396 {
397 	list_del(&entry->list);
398 }
399 
400 static unsigned long long phys_addr(struct dma_debug_entry *entry)
401 {
402 	if (entry->type == dma_debug_resource)
403 		return __pfn_to_phys(entry->pfn) + entry->offset;
404 
405 	return page_to_phys(pfn_to_page(entry->pfn)) + entry->offset;
406 }
407 
408 /*
409  * Dump mapping entries for debugging purposes
410  */
411 void debug_dma_dump_mappings(struct device *dev)
412 {
413 	int idx;
414 
415 	for (idx = 0; idx < HASH_SIZE; idx++) {
416 		struct hash_bucket *bucket = &dma_entry_hash[idx];
417 		struct dma_debug_entry *entry;
418 		unsigned long flags;
419 
420 		spin_lock_irqsave(&bucket->lock, flags);
421 
422 		list_for_each_entry(entry, &bucket->list, list) {
423 			if (!dev || dev == entry->dev) {
424 				dev_info(entry->dev,
425 					 "%s idx %d P=%Lx N=%lx D=%Lx L=%Lx %s %s\n",
426 					 type2name[entry->type], idx,
427 					 phys_addr(entry), entry->pfn,
428 					 entry->dev_addr, entry->size,
429 					 dir2name[entry->direction],
430 					 maperr2str[entry->map_err_type]);
431 			}
432 		}
433 
434 		spin_unlock_irqrestore(&bucket->lock, flags);
435 	}
436 }
437 
438 /*
439  * For each mapping (initial cacheline in the case of
440  * dma_alloc_coherent/dma_map_page, initial cacheline in each page of a
441  * scatterlist, or the cacheline specified in dma_map_single) insert
442  * into this tree using the cacheline as the key. At
443  * dma_unmap_{single|sg|page} or dma_free_coherent delete the entry.  If
444  * the entry already exists at insertion time add a tag as a reference
445  * count for the overlapping mappings.  For now, the overlap tracking
446  * just ensures that 'unmaps' balance 'maps' before marking the
447  * cacheline idle, but we should also be flagging overlaps as an API
448  * violation.
449  *
450  * Memory usage is mostly constrained by the maximum number of available
451  * dma-debug entries in that we need a free dma_debug_entry before
452  * inserting into the tree.  In the case of dma_map_page and
453  * dma_alloc_coherent there is only one dma_debug_entry and one
454  * dma_active_cacheline entry to track per event.  dma_map_sg(), on the
455  * other hand, consumes a single dma_debug_entry, but inserts 'nents'
456  * entries into the tree.
457  *
458  * At any time debug_dma_assert_idle() can be called to trigger a
459  * warning if any cachelines in the given page are in the active set.
460  */
461 static RADIX_TREE(dma_active_cacheline, GFP_NOWAIT);
462 static DEFINE_SPINLOCK(radix_lock);
463 #define ACTIVE_CACHELINE_MAX_OVERLAP ((1 << RADIX_TREE_MAX_TAGS) - 1)
464 #define CACHELINE_PER_PAGE_SHIFT (PAGE_SHIFT - L1_CACHE_SHIFT)
465 #define CACHELINES_PER_PAGE (1 << CACHELINE_PER_PAGE_SHIFT)
466 
467 static phys_addr_t to_cacheline_number(struct dma_debug_entry *entry)
468 {
469 	return (entry->pfn << CACHELINE_PER_PAGE_SHIFT) +
470 		(entry->offset >> L1_CACHE_SHIFT);
471 }
472 
473 static int active_cacheline_read_overlap(phys_addr_t cln)
474 {
475 	int overlap = 0, i;
476 
477 	for (i = RADIX_TREE_MAX_TAGS - 1; i >= 0; i--)
478 		if (radix_tree_tag_get(&dma_active_cacheline, cln, i))
479 			overlap |= 1 << i;
480 	return overlap;
481 }
482 
483 static int active_cacheline_set_overlap(phys_addr_t cln, int overlap)
484 {
485 	int i;
486 
487 	if (overlap > ACTIVE_CACHELINE_MAX_OVERLAP || overlap < 0)
488 		return overlap;
489 
490 	for (i = RADIX_TREE_MAX_TAGS - 1; i >= 0; i--)
491 		if (overlap & 1 << i)
492 			radix_tree_tag_set(&dma_active_cacheline, cln, i);
493 		else
494 			radix_tree_tag_clear(&dma_active_cacheline, cln, i);
495 
496 	return overlap;
497 }
498 
499 static void active_cacheline_inc_overlap(phys_addr_t cln)
500 {
501 	int overlap = active_cacheline_read_overlap(cln);
502 
503 	overlap = active_cacheline_set_overlap(cln, ++overlap);
504 
505 	/* If we overflowed the overlap counter then we're potentially
506 	 * leaking dma-mappings.  Otherwise, if maps and unmaps are
507 	 * balanced then this overflow may cause false negatives in
508 	 * debug_dma_assert_idle() as the cacheline may be marked idle
509 	 * prematurely.
510 	 */
511 	WARN_ONCE(overlap > ACTIVE_CACHELINE_MAX_OVERLAP,
512 		  pr_fmt("exceeded %d overlapping mappings of cacheline %pa\n"),
513 		  ACTIVE_CACHELINE_MAX_OVERLAP, &cln);
514 }
515 
516 static int active_cacheline_dec_overlap(phys_addr_t cln)
517 {
518 	int overlap = active_cacheline_read_overlap(cln);
519 
520 	return active_cacheline_set_overlap(cln, --overlap);
521 }
522 
523 static int active_cacheline_insert(struct dma_debug_entry *entry)
524 {
525 	phys_addr_t cln = to_cacheline_number(entry);
526 	unsigned long flags;
527 	int rc;
528 
529 	/* If the device is not writing memory then we don't have any
530 	 * concerns about the cpu consuming stale data.  This mitigates
531 	 * legitimate usages of overlapping mappings.
532 	 */
533 	if (entry->direction == DMA_TO_DEVICE)
534 		return 0;
535 
536 	spin_lock_irqsave(&radix_lock, flags);
537 	rc = radix_tree_insert(&dma_active_cacheline, cln, entry);
538 	if (rc == -EEXIST)
539 		active_cacheline_inc_overlap(cln);
540 	spin_unlock_irqrestore(&radix_lock, flags);
541 
542 	return rc;
543 }
544 
545 static void active_cacheline_remove(struct dma_debug_entry *entry)
546 {
547 	phys_addr_t cln = to_cacheline_number(entry);
548 	unsigned long flags;
549 
550 	/* ...mirror the insert case */
551 	if (entry->direction == DMA_TO_DEVICE)
552 		return;
553 
554 	spin_lock_irqsave(&radix_lock, flags);
555 	/* since we are counting overlaps the final put of the
556 	 * cacheline will occur when the overlap count is 0.
557 	 * active_cacheline_dec_overlap() returns -1 in that case
558 	 */
559 	if (active_cacheline_dec_overlap(cln) < 0)
560 		radix_tree_delete(&dma_active_cacheline, cln);
561 	spin_unlock_irqrestore(&radix_lock, flags);
562 }
563 
564 /**
565  * debug_dma_assert_idle() - assert that a page is not undergoing dma
566  * @page: page to lookup in the dma_active_cacheline tree
567  *
568  * Place a call to this routine in cases where the cpu touching the page
569  * before the dma completes (page is dma_unmapped) will lead to data
570  * corruption.
571  */
572 void debug_dma_assert_idle(struct page *page)
573 {
574 	static struct dma_debug_entry *ents[CACHELINES_PER_PAGE];
575 	struct dma_debug_entry *entry = NULL;
576 	void **results = (void **) &ents;
577 	unsigned int nents, i;
578 	unsigned long flags;
579 	phys_addr_t cln;
580 
581 	if (dma_debug_disabled())
582 		return;
583 
584 	if (!page)
585 		return;
586 
587 	cln = (phys_addr_t) page_to_pfn(page) << CACHELINE_PER_PAGE_SHIFT;
588 	spin_lock_irqsave(&radix_lock, flags);
589 	nents = radix_tree_gang_lookup(&dma_active_cacheline, results, cln,
590 				       CACHELINES_PER_PAGE);
591 	for (i = 0; i < nents; i++) {
592 		phys_addr_t ent_cln = to_cacheline_number(ents[i]);
593 
594 		if (ent_cln == cln) {
595 			entry = ents[i];
596 			break;
597 		} else if (ent_cln >= cln + CACHELINES_PER_PAGE)
598 			break;
599 	}
600 	spin_unlock_irqrestore(&radix_lock, flags);
601 
602 	if (!entry)
603 		return;
604 
605 	cln = to_cacheline_number(entry);
606 	err_printk(entry->dev, entry,
607 		   "cpu touching an active dma mapped cacheline [cln=%pa]\n",
608 		   &cln);
609 }
610 
611 /*
612  * Wrapper function for adding an entry to the hash.
613  * This function takes care of locking itself.
614  */
615 static void add_dma_entry(struct dma_debug_entry *entry)
616 {
617 	struct hash_bucket *bucket;
618 	unsigned long flags;
619 	int rc;
620 
621 	bucket = get_hash_bucket(entry, &flags);
622 	hash_bucket_add(bucket, entry);
623 	put_hash_bucket(bucket, &flags);
624 
625 	rc = active_cacheline_insert(entry);
626 	if (rc == -ENOMEM) {
627 		pr_err("cacheline tracking ENOMEM, dma-debug disabled\n");
628 		global_disable = true;
629 	}
630 
631 	/* TODO: report -EEXIST errors here as overlapping mappings are
632 	 * not supported by the DMA API
633 	 */
634 }
635 
636 static int dma_debug_create_entries(gfp_t gfp)
637 {
638 	struct dma_debug_entry *entry;
639 	int i;
640 
641 	entry = (void *)get_zeroed_page(gfp);
642 	if (!entry)
643 		return -ENOMEM;
644 
645 	for (i = 0; i < DMA_DEBUG_DYNAMIC_ENTRIES; i++)
646 		list_add_tail(&entry[i].list, &free_entries);
647 
648 	num_free_entries += DMA_DEBUG_DYNAMIC_ENTRIES;
649 	nr_total_entries += DMA_DEBUG_DYNAMIC_ENTRIES;
650 
651 	return 0;
652 }
653 
654 static struct dma_debug_entry *__dma_entry_alloc(void)
655 {
656 	struct dma_debug_entry *entry;
657 
658 	entry = list_entry(free_entries.next, struct dma_debug_entry, list);
659 	list_del(&entry->list);
660 	memset(entry, 0, sizeof(*entry));
661 
662 	num_free_entries -= 1;
663 	if (num_free_entries < min_free_entries)
664 		min_free_entries = num_free_entries;
665 
666 	return entry;
667 }
668 
669 void __dma_entry_alloc_check_leak(void)
670 {
671 	u32 tmp = nr_total_entries % nr_prealloc_entries;
672 
673 	/* Shout each time we tick over some multiple of the initial pool */
674 	if (tmp < DMA_DEBUG_DYNAMIC_ENTRIES) {
675 		pr_info("dma_debug_entry pool grown to %u (%u00%%)\n",
676 			nr_total_entries,
677 			(nr_total_entries / nr_prealloc_entries));
678 	}
679 }
680 
681 /* struct dma_entry allocator
682  *
683  * The next two functions implement the allocator for
684  * struct dma_debug_entries.
685  */
686 static struct dma_debug_entry *dma_entry_alloc(void)
687 {
688 	struct dma_debug_entry *entry;
689 	unsigned long flags;
690 
691 	spin_lock_irqsave(&free_entries_lock, flags);
692 	if (num_free_entries == 0) {
693 		if (dma_debug_create_entries(GFP_ATOMIC)) {
694 			global_disable = true;
695 			spin_unlock_irqrestore(&free_entries_lock, flags);
696 			pr_err("debugging out of memory - disabling\n");
697 			return NULL;
698 		}
699 		__dma_entry_alloc_check_leak();
700 	}
701 
702 	entry = __dma_entry_alloc();
703 
704 	spin_unlock_irqrestore(&free_entries_lock, flags);
705 
706 #ifdef CONFIG_STACKTRACE
707 	entry->stack_len = stack_trace_save(entry->stack_entries,
708 					    ARRAY_SIZE(entry->stack_entries),
709 					    1);
710 #endif
711 	return entry;
712 }
713 
714 static void dma_entry_free(struct dma_debug_entry *entry)
715 {
716 	unsigned long flags;
717 
718 	active_cacheline_remove(entry);
719 
720 	/*
721 	 * add to beginning of the list - this way the entries are
722 	 * more likely cache hot when they are reallocated.
723 	 */
724 	spin_lock_irqsave(&free_entries_lock, flags);
725 	list_add(&entry->list, &free_entries);
726 	num_free_entries += 1;
727 	spin_unlock_irqrestore(&free_entries_lock, flags);
728 }
729 
730 /*
731  * DMA-API debugging init code
732  *
733  * The init code does two things:
734  *   1. Initialize core data structures
735  *   2. Preallocate a given number of dma_debug_entry structs
736  */
737 
738 static ssize_t filter_read(struct file *file, char __user *user_buf,
739 			   size_t count, loff_t *ppos)
740 {
741 	char buf[NAME_MAX_LEN + 1];
742 	unsigned long flags;
743 	int len;
744 
745 	if (!current_driver_name[0])
746 		return 0;
747 
748 	/*
749 	 * We can't copy to userspace directly because current_driver_name can
750 	 * only be read under the driver_name_lock with irqs disabled. So
751 	 * create a temporary copy first.
752 	 */
753 	read_lock_irqsave(&driver_name_lock, flags);
754 	len = scnprintf(buf, NAME_MAX_LEN + 1, "%s\n", current_driver_name);
755 	read_unlock_irqrestore(&driver_name_lock, flags);
756 
757 	return simple_read_from_buffer(user_buf, count, ppos, buf, len);
758 }
759 
760 static ssize_t filter_write(struct file *file, const char __user *userbuf,
761 			    size_t count, loff_t *ppos)
762 {
763 	char buf[NAME_MAX_LEN];
764 	unsigned long flags;
765 	size_t len;
766 	int i;
767 
768 	/*
769 	 * We can't copy from userspace directly. Access to
770 	 * current_driver_name is protected with a write_lock with irqs
771 	 * disabled. Since copy_from_user can fault and may sleep we
772 	 * need to copy to temporary buffer first
773 	 */
774 	len = min(count, (size_t)(NAME_MAX_LEN - 1));
775 	if (copy_from_user(buf, userbuf, len))
776 		return -EFAULT;
777 
778 	buf[len] = 0;
779 
780 	write_lock_irqsave(&driver_name_lock, flags);
781 
782 	/*
783 	 * Now handle the string we got from userspace very carefully.
784 	 * The rules are:
785 	 *         - only use the first token we got
786 	 *         - token delimiter is everything looking like a space
787 	 *           character (' ', '\n', '\t' ...)
788 	 *
789 	 */
790 	if (!isalnum(buf[0])) {
791 		/*
792 		 * If the first character userspace gave us is not
793 		 * alphanumerical then assume the filter should be
794 		 * switched off.
795 		 */
796 		if (current_driver_name[0])
797 			pr_info("switching off dma-debug driver filter\n");
798 		current_driver_name[0] = 0;
799 		current_driver = NULL;
800 		goto out_unlock;
801 	}
802 
803 	/*
804 	 * Now parse out the first token and use it as the name for the
805 	 * driver to filter for.
806 	 */
807 	for (i = 0; i < NAME_MAX_LEN - 1; ++i) {
808 		current_driver_name[i] = buf[i];
809 		if (isspace(buf[i]) || buf[i] == ' ' || buf[i] == 0)
810 			break;
811 	}
812 	current_driver_name[i] = 0;
813 	current_driver = NULL;
814 
815 	pr_info("enable driver filter for driver [%s]\n",
816 		current_driver_name);
817 
818 out_unlock:
819 	write_unlock_irqrestore(&driver_name_lock, flags);
820 
821 	return count;
822 }
823 
824 static const struct file_operations filter_fops = {
825 	.read  = filter_read,
826 	.write = filter_write,
827 	.llseek = default_llseek,
828 };
829 
830 static int dump_show(struct seq_file *seq, void *v)
831 {
832 	int idx;
833 
834 	for (idx = 0; idx < HASH_SIZE; idx++) {
835 		struct hash_bucket *bucket = &dma_entry_hash[idx];
836 		struct dma_debug_entry *entry;
837 		unsigned long flags;
838 
839 		spin_lock_irqsave(&bucket->lock, flags);
840 		list_for_each_entry(entry, &bucket->list, list) {
841 			seq_printf(seq,
842 				   "%s %s %s idx %d P=%llx N=%lx D=%llx L=%llx %s %s\n",
843 				   dev_name(entry->dev),
844 				   dev_driver_string(entry->dev),
845 				   type2name[entry->type], idx,
846 				   phys_addr(entry), entry->pfn,
847 				   entry->dev_addr, entry->size,
848 				   dir2name[entry->direction],
849 				   maperr2str[entry->map_err_type]);
850 		}
851 		spin_unlock_irqrestore(&bucket->lock, flags);
852 	}
853 	return 0;
854 }
855 DEFINE_SHOW_ATTRIBUTE(dump);
856 
857 static void dma_debug_fs_init(void)
858 {
859 	struct dentry *dentry = debugfs_create_dir("dma-api", NULL);
860 
861 	debugfs_create_bool("disabled", 0444, dentry, &global_disable);
862 	debugfs_create_u32("error_count", 0444, dentry, &error_count);
863 	debugfs_create_u32("all_errors", 0644, dentry, &show_all_errors);
864 	debugfs_create_u32("num_errors", 0644, dentry, &show_num_errors);
865 	debugfs_create_u32("num_free_entries", 0444, dentry, &num_free_entries);
866 	debugfs_create_u32("min_free_entries", 0444, dentry, &min_free_entries);
867 	debugfs_create_u32("nr_total_entries", 0444, dentry, &nr_total_entries);
868 	debugfs_create_file("driver_filter", 0644, dentry, NULL, &filter_fops);
869 	debugfs_create_file("dump", 0444, dentry, NULL, &dump_fops);
870 }
871 
872 static int device_dma_allocations(struct device *dev, struct dma_debug_entry **out_entry)
873 {
874 	struct dma_debug_entry *entry;
875 	unsigned long flags;
876 	int count = 0, i;
877 
878 	for (i = 0; i < HASH_SIZE; ++i) {
879 		spin_lock_irqsave(&dma_entry_hash[i].lock, flags);
880 		list_for_each_entry(entry, &dma_entry_hash[i].list, list) {
881 			if (entry->dev == dev) {
882 				count += 1;
883 				*out_entry = entry;
884 			}
885 		}
886 		spin_unlock_irqrestore(&dma_entry_hash[i].lock, flags);
887 	}
888 
889 	return count;
890 }
891 
892 static int dma_debug_device_change(struct notifier_block *nb, unsigned long action, void *data)
893 {
894 	struct device *dev = data;
895 	struct dma_debug_entry *uninitialized_var(entry);
896 	int count;
897 
898 	if (dma_debug_disabled())
899 		return 0;
900 
901 	switch (action) {
902 	case BUS_NOTIFY_UNBOUND_DRIVER:
903 		count = device_dma_allocations(dev, &entry);
904 		if (count == 0)
905 			break;
906 		err_printk(dev, entry, "device driver has pending "
907 				"DMA allocations while released from device "
908 				"[count=%d]\n"
909 				"One of leaked entries details: "
910 				"[device address=0x%016llx] [size=%llu bytes] "
911 				"[mapped with %s] [mapped as %s]\n",
912 			count, entry->dev_addr, entry->size,
913 			dir2name[entry->direction], type2name[entry->type]);
914 		break;
915 	default:
916 		break;
917 	}
918 
919 	return 0;
920 }
921 
922 void dma_debug_add_bus(struct bus_type *bus)
923 {
924 	struct notifier_block *nb;
925 
926 	if (dma_debug_disabled())
927 		return;
928 
929 	nb = kzalloc(sizeof(struct notifier_block), GFP_KERNEL);
930 	if (nb == NULL) {
931 		pr_err("dma_debug_add_bus: out of memory\n");
932 		return;
933 	}
934 
935 	nb->notifier_call = dma_debug_device_change;
936 
937 	bus_register_notifier(bus, nb);
938 }
939 
940 static int dma_debug_init(void)
941 {
942 	int i, nr_pages;
943 
944 	/* Do not use dma_debug_initialized here, since we really want to be
945 	 * called to set dma_debug_initialized
946 	 */
947 	if (global_disable)
948 		return 0;
949 
950 	for (i = 0; i < HASH_SIZE; ++i) {
951 		INIT_LIST_HEAD(&dma_entry_hash[i].list);
952 		spin_lock_init(&dma_entry_hash[i].lock);
953 	}
954 
955 	dma_debug_fs_init();
956 
957 	nr_pages = DIV_ROUND_UP(nr_prealloc_entries, DMA_DEBUG_DYNAMIC_ENTRIES);
958 	for (i = 0; i < nr_pages; ++i)
959 		dma_debug_create_entries(GFP_KERNEL);
960 	if (num_free_entries >= nr_prealloc_entries) {
961 		pr_info("preallocated %d debug entries\n", nr_total_entries);
962 	} else if (num_free_entries > 0) {
963 		pr_warn("%d debug entries requested but only %d allocated\n",
964 			nr_prealloc_entries, nr_total_entries);
965 	} else {
966 		pr_err("debugging out of memory error - disabled\n");
967 		global_disable = true;
968 
969 		return 0;
970 	}
971 	min_free_entries = num_free_entries;
972 
973 	dma_debug_initialized = true;
974 
975 	pr_info("debugging enabled by kernel config\n");
976 	return 0;
977 }
978 core_initcall(dma_debug_init);
979 
980 static __init int dma_debug_cmdline(char *str)
981 {
982 	if (!str)
983 		return -EINVAL;
984 
985 	if (strncmp(str, "off", 3) == 0) {
986 		pr_info("debugging disabled on kernel command line\n");
987 		global_disable = true;
988 	}
989 
990 	return 0;
991 }
992 
993 static __init int dma_debug_entries_cmdline(char *str)
994 {
995 	if (!str)
996 		return -EINVAL;
997 	if (!get_option(&str, &nr_prealloc_entries))
998 		nr_prealloc_entries = PREALLOC_DMA_DEBUG_ENTRIES;
999 	return 0;
1000 }
1001 
1002 __setup("dma_debug=", dma_debug_cmdline);
1003 __setup("dma_debug_entries=", dma_debug_entries_cmdline);
1004 
1005 static void check_unmap(struct dma_debug_entry *ref)
1006 {
1007 	struct dma_debug_entry *entry;
1008 	struct hash_bucket *bucket;
1009 	unsigned long flags;
1010 
1011 	bucket = get_hash_bucket(ref, &flags);
1012 	entry = bucket_find_exact(bucket, ref);
1013 
1014 	if (!entry) {
1015 		/* must drop lock before calling dma_mapping_error */
1016 		put_hash_bucket(bucket, &flags);
1017 
1018 		if (dma_mapping_error(ref->dev, ref->dev_addr)) {
1019 			err_printk(ref->dev, NULL,
1020 				   "device driver tries to free an "
1021 				   "invalid DMA memory address\n");
1022 		} else {
1023 			err_printk(ref->dev, NULL,
1024 				   "device driver tries to free DMA "
1025 				   "memory it has not allocated [device "
1026 				   "address=0x%016llx] [size=%llu bytes]\n",
1027 				   ref->dev_addr, ref->size);
1028 		}
1029 		return;
1030 	}
1031 
1032 	if (ref->size != entry->size) {
1033 		err_printk(ref->dev, entry, "device driver frees "
1034 			   "DMA memory with different size "
1035 			   "[device address=0x%016llx] [map size=%llu bytes] "
1036 			   "[unmap size=%llu bytes]\n",
1037 			   ref->dev_addr, entry->size, ref->size);
1038 	}
1039 
1040 	if (ref->type != entry->type) {
1041 		err_printk(ref->dev, entry, "device driver frees "
1042 			   "DMA memory with wrong function "
1043 			   "[device address=0x%016llx] [size=%llu bytes] "
1044 			   "[mapped as %s] [unmapped as %s]\n",
1045 			   ref->dev_addr, ref->size,
1046 			   type2name[entry->type], type2name[ref->type]);
1047 	} else if ((entry->type == dma_debug_coherent) &&
1048 		   (phys_addr(ref) != phys_addr(entry))) {
1049 		err_printk(ref->dev, entry, "device driver frees "
1050 			   "DMA memory with different CPU address "
1051 			   "[device address=0x%016llx] [size=%llu bytes] "
1052 			   "[cpu alloc address=0x%016llx] "
1053 			   "[cpu free address=0x%016llx]",
1054 			   ref->dev_addr, ref->size,
1055 			   phys_addr(entry),
1056 			   phys_addr(ref));
1057 	}
1058 
1059 	if (ref->sg_call_ents && ref->type == dma_debug_sg &&
1060 	    ref->sg_call_ents != entry->sg_call_ents) {
1061 		err_printk(ref->dev, entry, "device driver frees "
1062 			   "DMA sg list with different entry count "
1063 			   "[map count=%d] [unmap count=%d]\n",
1064 			   entry->sg_call_ents, ref->sg_call_ents);
1065 	}
1066 
1067 	/*
1068 	 * This may be no bug in reality - but most implementations of the
1069 	 * DMA API don't handle this properly, so check for it here
1070 	 */
1071 	if (ref->direction != entry->direction) {
1072 		err_printk(ref->dev, entry, "device driver frees "
1073 			   "DMA memory with different direction "
1074 			   "[device address=0x%016llx] [size=%llu bytes] "
1075 			   "[mapped with %s] [unmapped with %s]\n",
1076 			   ref->dev_addr, ref->size,
1077 			   dir2name[entry->direction],
1078 			   dir2name[ref->direction]);
1079 	}
1080 
1081 	/*
1082 	 * Drivers should use dma_mapping_error() to check the returned
1083 	 * addresses of dma_map_single() and dma_map_page().
1084 	 * If not, print this warning message. See Documentation/DMA-API.txt.
1085 	 */
1086 	if (entry->map_err_type == MAP_ERR_NOT_CHECKED) {
1087 		err_printk(ref->dev, entry,
1088 			   "device driver failed to check map error"
1089 			   "[device address=0x%016llx] [size=%llu bytes] "
1090 			   "[mapped as %s]",
1091 			   ref->dev_addr, ref->size,
1092 			   type2name[entry->type]);
1093 	}
1094 
1095 	hash_bucket_del(entry);
1096 	dma_entry_free(entry);
1097 
1098 	put_hash_bucket(bucket, &flags);
1099 }
1100 
1101 static void check_for_stack(struct device *dev,
1102 			    struct page *page, size_t offset)
1103 {
1104 	void *addr;
1105 	struct vm_struct *stack_vm_area = task_stack_vm_area(current);
1106 
1107 	if (!stack_vm_area) {
1108 		/* Stack is direct-mapped. */
1109 		if (PageHighMem(page))
1110 			return;
1111 		addr = page_address(page) + offset;
1112 		if (object_is_on_stack(addr))
1113 			err_printk(dev, NULL, "device driver maps memory from stack [addr=%p]\n", addr);
1114 	} else {
1115 		/* Stack is vmalloced. */
1116 		int i;
1117 
1118 		for (i = 0; i < stack_vm_area->nr_pages; i++) {
1119 			if (page != stack_vm_area->pages[i])
1120 				continue;
1121 
1122 			addr = (u8 *)current->stack + i * PAGE_SIZE + offset;
1123 			err_printk(dev, NULL, "device driver maps memory from stack [probable addr=%p]\n", addr);
1124 			break;
1125 		}
1126 	}
1127 }
1128 
1129 static inline bool overlap(void *addr, unsigned long len, void *start, void *end)
1130 {
1131 	unsigned long a1 = (unsigned long)addr;
1132 	unsigned long b1 = a1 + len;
1133 	unsigned long a2 = (unsigned long)start;
1134 	unsigned long b2 = (unsigned long)end;
1135 
1136 	return !(b1 <= a2 || a1 >= b2);
1137 }
1138 
1139 static void check_for_illegal_area(struct device *dev, void *addr, unsigned long len)
1140 {
1141 	if (overlap(addr, len, _stext, _etext) ||
1142 	    overlap(addr, len, __start_rodata, __end_rodata))
1143 		err_printk(dev, NULL, "device driver maps memory from kernel text or rodata [addr=%p] [len=%lu]\n", addr, len);
1144 }
1145 
1146 static void check_sync(struct device *dev,
1147 		       struct dma_debug_entry *ref,
1148 		       bool to_cpu)
1149 {
1150 	struct dma_debug_entry *entry;
1151 	struct hash_bucket *bucket;
1152 	unsigned long flags;
1153 
1154 	bucket = get_hash_bucket(ref, &flags);
1155 
1156 	entry = bucket_find_contain(&bucket, ref, &flags);
1157 
1158 	if (!entry) {
1159 		err_printk(dev, NULL, "device driver tries "
1160 				"to sync DMA memory it has not allocated "
1161 				"[device address=0x%016llx] [size=%llu bytes]\n",
1162 				(unsigned long long)ref->dev_addr, ref->size);
1163 		goto out;
1164 	}
1165 
1166 	if (ref->size > entry->size) {
1167 		err_printk(dev, entry, "device driver syncs"
1168 				" DMA memory outside allocated range "
1169 				"[device address=0x%016llx] "
1170 				"[allocation size=%llu bytes] "
1171 				"[sync offset+size=%llu]\n",
1172 				entry->dev_addr, entry->size,
1173 				ref->size);
1174 	}
1175 
1176 	if (entry->direction == DMA_BIDIRECTIONAL)
1177 		goto out;
1178 
1179 	if (ref->direction != entry->direction) {
1180 		err_printk(dev, entry, "device driver syncs "
1181 				"DMA memory with different direction "
1182 				"[device address=0x%016llx] [size=%llu bytes] "
1183 				"[mapped with %s] [synced with %s]\n",
1184 				(unsigned long long)ref->dev_addr, entry->size,
1185 				dir2name[entry->direction],
1186 				dir2name[ref->direction]);
1187 	}
1188 
1189 	if (to_cpu && !(entry->direction == DMA_FROM_DEVICE) &&
1190 		      !(ref->direction == DMA_TO_DEVICE))
1191 		err_printk(dev, entry, "device driver syncs "
1192 				"device read-only DMA memory for cpu "
1193 				"[device address=0x%016llx] [size=%llu bytes] "
1194 				"[mapped with %s] [synced with %s]\n",
1195 				(unsigned long long)ref->dev_addr, entry->size,
1196 				dir2name[entry->direction],
1197 				dir2name[ref->direction]);
1198 
1199 	if (!to_cpu && !(entry->direction == DMA_TO_DEVICE) &&
1200 		       !(ref->direction == DMA_FROM_DEVICE))
1201 		err_printk(dev, entry, "device driver syncs "
1202 				"device write-only DMA memory to device "
1203 				"[device address=0x%016llx] [size=%llu bytes] "
1204 				"[mapped with %s] [synced with %s]\n",
1205 				(unsigned long long)ref->dev_addr, entry->size,
1206 				dir2name[entry->direction],
1207 				dir2name[ref->direction]);
1208 
1209 	if (ref->sg_call_ents && ref->type == dma_debug_sg &&
1210 	    ref->sg_call_ents != entry->sg_call_ents) {
1211 		err_printk(ref->dev, entry, "device driver syncs "
1212 			   "DMA sg list with different entry count "
1213 			   "[map count=%d] [sync count=%d]\n",
1214 			   entry->sg_call_ents, ref->sg_call_ents);
1215 	}
1216 
1217 out:
1218 	put_hash_bucket(bucket, &flags);
1219 }
1220 
1221 static void check_sg_segment(struct device *dev, struct scatterlist *sg)
1222 {
1223 #ifdef CONFIG_DMA_API_DEBUG_SG
1224 	unsigned int max_seg = dma_get_max_seg_size(dev);
1225 	u64 start, end, boundary = dma_get_seg_boundary(dev);
1226 
1227 	/*
1228 	 * Either the driver forgot to set dma_parms appropriately, or
1229 	 * whoever generated the list forgot to check them.
1230 	 */
1231 	if (sg->length > max_seg)
1232 		err_printk(dev, NULL, "mapping sg segment longer than device claims to support [len=%u] [max=%u]\n",
1233 			   sg->length, max_seg);
1234 	/*
1235 	 * In some cases this could potentially be the DMA API
1236 	 * implementation's fault, but it would usually imply that
1237 	 * the scatterlist was built inappropriately to begin with.
1238 	 */
1239 	start = sg_dma_address(sg);
1240 	end = start + sg_dma_len(sg) - 1;
1241 	if ((start ^ end) & ~boundary)
1242 		err_printk(dev, NULL, "mapping sg segment across boundary [start=0x%016llx] [end=0x%016llx] [boundary=0x%016llx]\n",
1243 			   start, end, boundary);
1244 #endif
1245 }
1246 
1247 void debug_dma_map_single(struct device *dev, const void *addr,
1248 			    unsigned long len)
1249 {
1250 	if (unlikely(dma_debug_disabled()))
1251 		return;
1252 
1253 	if (!virt_addr_valid(addr))
1254 		err_printk(dev, NULL, "device driver maps memory from invalid area [addr=%p] [len=%lu]\n",
1255 			   addr, len);
1256 
1257 	if (is_vmalloc_addr(addr))
1258 		err_printk(dev, NULL, "device driver maps memory from vmalloc area [addr=%p] [len=%lu]\n",
1259 			   addr, len);
1260 }
1261 EXPORT_SYMBOL(debug_dma_map_single);
1262 
1263 void debug_dma_map_page(struct device *dev, struct page *page, size_t offset,
1264 			size_t size, int direction, dma_addr_t dma_addr)
1265 {
1266 	struct dma_debug_entry *entry;
1267 
1268 	if (unlikely(dma_debug_disabled()))
1269 		return;
1270 
1271 	if (dma_mapping_error(dev, dma_addr))
1272 		return;
1273 
1274 	entry = dma_entry_alloc();
1275 	if (!entry)
1276 		return;
1277 
1278 	entry->dev       = dev;
1279 	entry->type      = dma_debug_single;
1280 	entry->pfn	 = page_to_pfn(page);
1281 	entry->offset	 = offset,
1282 	entry->dev_addr  = dma_addr;
1283 	entry->size      = size;
1284 	entry->direction = direction;
1285 	entry->map_err_type = MAP_ERR_NOT_CHECKED;
1286 
1287 	check_for_stack(dev, page, offset);
1288 
1289 	if (!PageHighMem(page)) {
1290 		void *addr = page_address(page) + offset;
1291 
1292 		check_for_illegal_area(dev, addr, size);
1293 	}
1294 
1295 	add_dma_entry(entry);
1296 }
1297 EXPORT_SYMBOL(debug_dma_map_page);
1298 
1299 void debug_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
1300 {
1301 	struct dma_debug_entry ref;
1302 	struct dma_debug_entry *entry;
1303 	struct hash_bucket *bucket;
1304 	unsigned long flags;
1305 
1306 	if (unlikely(dma_debug_disabled()))
1307 		return;
1308 
1309 	ref.dev = dev;
1310 	ref.dev_addr = dma_addr;
1311 	bucket = get_hash_bucket(&ref, &flags);
1312 
1313 	list_for_each_entry(entry, &bucket->list, list) {
1314 		if (!exact_match(&ref, entry))
1315 			continue;
1316 
1317 		/*
1318 		 * The same physical address can be mapped multiple
1319 		 * times. Without a hardware IOMMU this results in the
1320 		 * same device addresses being put into the dma-debug
1321 		 * hash multiple times too. This can result in false
1322 		 * positives being reported. Therefore we implement a
1323 		 * best-fit algorithm here which updates the first entry
1324 		 * from the hash which fits the reference value and is
1325 		 * not currently listed as being checked.
1326 		 */
1327 		if (entry->map_err_type == MAP_ERR_NOT_CHECKED) {
1328 			entry->map_err_type = MAP_ERR_CHECKED;
1329 			break;
1330 		}
1331 	}
1332 
1333 	put_hash_bucket(bucket, &flags);
1334 }
1335 EXPORT_SYMBOL(debug_dma_mapping_error);
1336 
1337 void debug_dma_unmap_page(struct device *dev, dma_addr_t addr,
1338 			  size_t size, int direction)
1339 {
1340 	struct dma_debug_entry ref = {
1341 		.type           = dma_debug_single,
1342 		.dev            = dev,
1343 		.dev_addr       = addr,
1344 		.size           = size,
1345 		.direction      = direction,
1346 	};
1347 
1348 	if (unlikely(dma_debug_disabled()))
1349 		return;
1350 	check_unmap(&ref);
1351 }
1352 EXPORT_SYMBOL(debug_dma_unmap_page);
1353 
1354 void debug_dma_map_sg(struct device *dev, struct scatterlist *sg,
1355 		      int nents, int mapped_ents, int direction)
1356 {
1357 	struct dma_debug_entry *entry;
1358 	struct scatterlist *s;
1359 	int i;
1360 
1361 	if (unlikely(dma_debug_disabled()))
1362 		return;
1363 
1364 	for_each_sg(sg, s, mapped_ents, i) {
1365 		entry = dma_entry_alloc();
1366 		if (!entry)
1367 			return;
1368 
1369 		entry->type           = dma_debug_sg;
1370 		entry->dev            = dev;
1371 		entry->pfn	      = page_to_pfn(sg_page(s));
1372 		entry->offset	      = s->offset,
1373 		entry->size           = sg_dma_len(s);
1374 		entry->dev_addr       = sg_dma_address(s);
1375 		entry->direction      = direction;
1376 		entry->sg_call_ents   = nents;
1377 		entry->sg_mapped_ents = mapped_ents;
1378 
1379 		check_for_stack(dev, sg_page(s), s->offset);
1380 
1381 		if (!PageHighMem(sg_page(s))) {
1382 			check_for_illegal_area(dev, sg_virt(s), sg_dma_len(s));
1383 		}
1384 
1385 		check_sg_segment(dev, s);
1386 
1387 		add_dma_entry(entry);
1388 	}
1389 }
1390 EXPORT_SYMBOL(debug_dma_map_sg);
1391 
1392 static int get_nr_mapped_entries(struct device *dev,
1393 				 struct dma_debug_entry *ref)
1394 {
1395 	struct dma_debug_entry *entry;
1396 	struct hash_bucket *bucket;
1397 	unsigned long flags;
1398 	int mapped_ents;
1399 
1400 	bucket       = get_hash_bucket(ref, &flags);
1401 	entry        = bucket_find_exact(bucket, ref);
1402 	mapped_ents  = 0;
1403 
1404 	if (entry)
1405 		mapped_ents = entry->sg_mapped_ents;
1406 	put_hash_bucket(bucket, &flags);
1407 
1408 	return mapped_ents;
1409 }
1410 
1411 void debug_dma_unmap_sg(struct device *dev, struct scatterlist *sglist,
1412 			int nelems, int dir)
1413 {
1414 	struct scatterlist *s;
1415 	int mapped_ents = 0, i;
1416 
1417 	if (unlikely(dma_debug_disabled()))
1418 		return;
1419 
1420 	for_each_sg(sglist, s, nelems, i) {
1421 
1422 		struct dma_debug_entry ref = {
1423 			.type           = dma_debug_sg,
1424 			.dev            = dev,
1425 			.pfn		= page_to_pfn(sg_page(s)),
1426 			.offset		= s->offset,
1427 			.dev_addr       = sg_dma_address(s),
1428 			.size           = sg_dma_len(s),
1429 			.direction      = dir,
1430 			.sg_call_ents   = nelems,
1431 		};
1432 
1433 		if (mapped_ents && i >= mapped_ents)
1434 			break;
1435 
1436 		if (!i)
1437 			mapped_ents = get_nr_mapped_entries(dev, &ref);
1438 
1439 		check_unmap(&ref);
1440 	}
1441 }
1442 EXPORT_SYMBOL(debug_dma_unmap_sg);
1443 
1444 void debug_dma_alloc_coherent(struct device *dev, size_t size,
1445 			      dma_addr_t dma_addr, void *virt)
1446 {
1447 	struct dma_debug_entry *entry;
1448 
1449 	if (unlikely(dma_debug_disabled()))
1450 		return;
1451 
1452 	if (unlikely(virt == NULL))
1453 		return;
1454 
1455 	/* handle vmalloc and linear addresses */
1456 	if (!is_vmalloc_addr(virt) && !virt_addr_valid(virt))
1457 		return;
1458 
1459 	entry = dma_entry_alloc();
1460 	if (!entry)
1461 		return;
1462 
1463 	entry->type      = dma_debug_coherent;
1464 	entry->dev       = dev;
1465 	entry->offset	 = offset_in_page(virt);
1466 	entry->size      = size;
1467 	entry->dev_addr  = dma_addr;
1468 	entry->direction = DMA_BIDIRECTIONAL;
1469 
1470 	if (is_vmalloc_addr(virt))
1471 		entry->pfn = vmalloc_to_pfn(virt);
1472 	else
1473 		entry->pfn = page_to_pfn(virt_to_page(virt));
1474 
1475 	add_dma_entry(entry);
1476 }
1477 
1478 void debug_dma_free_coherent(struct device *dev, size_t size,
1479 			 void *virt, dma_addr_t addr)
1480 {
1481 	struct dma_debug_entry ref = {
1482 		.type           = dma_debug_coherent,
1483 		.dev            = dev,
1484 		.offset		= offset_in_page(virt),
1485 		.dev_addr       = addr,
1486 		.size           = size,
1487 		.direction      = DMA_BIDIRECTIONAL,
1488 	};
1489 
1490 	/* handle vmalloc and linear addresses */
1491 	if (!is_vmalloc_addr(virt) && !virt_addr_valid(virt))
1492 		return;
1493 
1494 	if (is_vmalloc_addr(virt))
1495 		ref.pfn = vmalloc_to_pfn(virt);
1496 	else
1497 		ref.pfn = page_to_pfn(virt_to_page(virt));
1498 
1499 	if (unlikely(dma_debug_disabled()))
1500 		return;
1501 
1502 	check_unmap(&ref);
1503 }
1504 
1505 void debug_dma_map_resource(struct device *dev, phys_addr_t addr, size_t size,
1506 			    int direction, dma_addr_t dma_addr)
1507 {
1508 	struct dma_debug_entry *entry;
1509 
1510 	if (unlikely(dma_debug_disabled()))
1511 		return;
1512 
1513 	entry = dma_entry_alloc();
1514 	if (!entry)
1515 		return;
1516 
1517 	entry->type		= dma_debug_resource;
1518 	entry->dev		= dev;
1519 	entry->pfn		= PHYS_PFN(addr);
1520 	entry->offset		= offset_in_page(addr);
1521 	entry->size		= size;
1522 	entry->dev_addr		= dma_addr;
1523 	entry->direction	= direction;
1524 	entry->map_err_type	= MAP_ERR_NOT_CHECKED;
1525 
1526 	add_dma_entry(entry);
1527 }
1528 EXPORT_SYMBOL(debug_dma_map_resource);
1529 
1530 void debug_dma_unmap_resource(struct device *dev, dma_addr_t dma_addr,
1531 			      size_t size, int direction)
1532 {
1533 	struct dma_debug_entry ref = {
1534 		.type           = dma_debug_resource,
1535 		.dev            = dev,
1536 		.dev_addr       = dma_addr,
1537 		.size           = size,
1538 		.direction      = direction,
1539 	};
1540 
1541 	if (unlikely(dma_debug_disabled()))
1542 		return;
1543 
1544 	check_unmap(&ref);
1545 }
1546 EXPORT_SYMBOL(debug_dma_unmap_resource);
1547 
1548 void debug_dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
1549 				   size_t size, int direction)
1550 {
1551 	struct dma_debug_entry ref;
1552 
1553 	if (unlikely(dma_debug_disabled()))
1554 		return;
1555 
1556 	ref.type         = dma_debug_single;
1557 	ref.dev          = dev;
1558 	ref.dev_addr     = dma_handle;
1559 	ref.size         = size;
1560 	ref.direction    = direction;
1561 	ref.sg_call_ents = 0;
1562 
1563 	check_sync(dev, &ref, true);
1564 }
1565 EXPORT_SYMBOL(debug_dma_sync_single_for_cpu);
1566 
1567 void debug_dma_sync_single_for_device(struct device *dev,
1568 				      dma_addr_t dma_handle, size_t size,
1569 				      int direction)
1570 {
1571 	struct dma_debug_entry ref;
1572 
1573 	if (unlikely(dma_debug_disabled()))
1574 		return;
1575 
1576 	ref.type         = dma_debug_single;
1577 	ref.dev          = dev;
1578 	ref.dev_addr     = dma_handle;
1579 	ref.size         = size;
1580 	ref.direction    = direction;
1581 	ref.sg_call_ents = 0;
1582 
1583 	check_sync(dev, &ref, false);
1584 }
1585 EXPORT_SYMBOL(debug_dma_sync_single_for_device);
1586 
1587 void debug_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
1588 			       int nelems, int direction)
1589 {
1590 	struct scatterlist *s;
1591 	int mapped_ents = 0, i;
1592 
1593 	if (unlikely(dma_debug_disabled()))
1594 		return;
1595 
1596 	for_each_sg(sg, s, nelems, i) {
1597 
1598 		struct dma_debug_entry ref = {
1599 			.type           = dma_debug_sg,
1600 			.dev            = dev,
1601 			.pfn		= page_to_pfn(sg_page(s)),
1602 			.offset		= s->offset,
1603 			.dev_addr       = sg_dma_address(s),
1604 			.size           = sg_dma_len(s),
1605 			.direction      = direction,
1606 			.sg_call_ents   = nelems,
1607 		};
1608 
1609 		if (!i)
1610 			mapped_ents = get_nr_mapped_entries(dev, &ref);
1611 
1612 		if (i >= mapped_ents)
1613 			break;
1614 
1615 		check_sync(dev, &ref, true);
1616 	}
1617 }
1618 EXPORT_SYMBOL(debug_dma_sync_sg_for_cpu);
1619 
1620 void debug_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
1621 				  int nelems, int direction)
1622 {
1623 	struct scatterlist *s;
1624 	int mapped_ents = 0, i;
1625 
1626 	if (unlikely(dma_debug_disabled()))
1627 		return;
1628 
1629 	for_each_sg(sg, s, nelems, i) {
1630 
1631 		struct dma_debug_entry ref = {
1632 			.type           = dma_debug_sg,
1633 			.dev            = dev,
1634 			.pfn		= page_to_pfn(sg_page(s)),
1635 			.offset		= s->offset,
1636 			.dev_addr       = sg_dma_address(s),
1637 			.size           = sg_dma_len(s),
1638 			.direction      = direction,
1639 			.sg_call_ents   = nelems,
1640 		};
1641 		if (!i)
1642 			mapped_ents = get_nr_mapped_entries(dev, &ref);
1643 
1644 		if (i >= mapped_ents)
1645 			break;
1646 
1647 		check_sync(dev, &ref, false);
1648 	}
1649 }
1650 EXPORT_SYMBOL(debug_dma_sync_sg_for_device);
1651 
1652 static int __init dma_debug_driver_setup(char *str)
1653 {
1654 	int i;
1655 
1656 	for (i = 0; i < NAME_MAX_LEN - 1; ++i, ++str) {
1657 		current_driver_name[i] = *str;
1658 		if (*str == 0)
1659 			break;
1660 	}
1661 
1662 	if (current_driver_name[0])
1663 		pr_info("enable driver filter for driver [%s]\n",
1664 			current_driver_name);
1665 
1666 
1667 	return 1;
1668 }
1669 __setup("dma_debug_driver=", dma_debug_driver_setup);
1670