xref: /openbmc/linux/arch/arc/mm/dma.c (revision 0a0f0d8b)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
4  */
5 
6 #include <linux/dma-map-ops.h>
7 #include <linux/dma-noncoherent.h>
8 #include <asm/cache.h>
9 #include <asm/cacheflush.h>
10 
11 /*
12  * ARCH specific callbacks for generic noncoherent DMA ops
13  *  - hardware IOC not available (or "dma-coherent" not set for device in DT)
14  *  - But still handle both coherent and non-coherent requests from caller
15  *
16  * For DMA coherent hardware (IOC) generic code suffices
17  */
18 
19 void arch_dma_prep_coherent(struct page *page, size_t size)
20 {
21 	/*
22 	 * Evict any existing L1 and/or L2 lines for the backing page
23 	 * in case it was used earlier as a normal "cached" page.
24 	 * Yeah this bit us - STAR 9000898266
25 	 *
26 	 * Although core does call flush_cache_vmap(), it gets kvaddr hence
27 	 * can't be used to efficiently flush L1 and/or L2 which need paddr
28 	 * Currently flush_cache_vmap nukes the L1 cache completely which
29 	 * will be optimized as a separate commit
30 	 */
31 	dma_cache_wback_inv(page_to_phys(page), size);
32 }
33 
34 /*
35  * Cache operations depending on function and direction argument, inspired by
36  * https://lkml.org/lkml/2018/5/18/979
37  * "dma_sync_*_for_cpu and direction=TO_DEVICE (was Re: [PATCH 02/20]
38  * dma-mapping: provide a generic dma-noncoherent implementation)"
39  *
40  *          |   map          ==  for_device     |   unmap     ==  for_cpu
41  *          |----------------------------------------------------------------
42  * TO_DEV   |   writeback        writeback      |   none          none
43  * FROM_DEV |   invalidate       invalidate     |   invalidate*   invalidate*
44  * BIDIR    |   writeback+inv    writeback+inv  |   invalidate    invalidate
45  *
46  *     [*] needed for CPU speculative prefetches
47  *
48  * NOTE: we don't check the validity of direction argument as it is done in
49  * upper layer functions (in include/linux/dma-mapping.h)
50  */
51 
52 void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
53 		enum dma_data_direction dir)
54 {
55 	switch (dir) {
56 	case DMA_TO_DEVICE:
57 		dma_cache_wback(paddr, size);
58 		break;
59 
60 	case DMA_FROM_DEVICE:
61 		dma_cache_inv(paddr, size);
62 		break;
63 
64 	case DMA_BIDIRECTIONAL:
65 		dma_cache_wback_inv(paddr, size);
66 		break;
67 
68 	default:
69 		break;
70 	}
71 }
72 
73 void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
74 		enum dma_data_direction dir)
75 {
76 	switch (dir) {
77 	case DMA_TO_DEVICE:
78 		break;
79 
80 	/* FROM_DEVICE invalidate needed if speculative CPU prefetch only */
81 	case DMA_FROM_DEVICE:
82 	case DMA_BIDIRECTIONAL:
83 		dma_cache_inv(paddr, size);
84 		break;
85 
86 	default:
87 		break;
88 	}
89 }
90 
91 /*
92  * Plug in direct dma map ops.
93  */
94 void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
95 			const struct iommu_ops *iommu, bool coherent)
96 {
97 	/*
98 	 * IOC hardware snoops all DMA traffic keeping the caches consistent
99 	 * with memory - eliding need for any explicit cache maintenance of
100 	 * DMA buffers.
101 	 */
102 	if (is_isa_arcv2() && ioc_enable && coherent)
103 		dev->dma_coherent = true;
104 
105 	dev_info(dev, "use %scoherent DMA ops\n",
106 		 dev->dma_coherent ? "" : "non");
107 }
108