xref: /openbmc/linux/arch/arc/mm/dma.c (revision 1d1997db)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
4  */
5 
6 #include <linux/dma-noncoherent.h>
7 #include <asm/cache.h>
8 #include <asm/cacheflush.h>
9 
10 /*
11  * ARCH specific callbacks for generic noncoherent DMA ops
12  *  - hardware IOC not available (or "dma-coherent" not set for device in DT)
13  *  - But still handle both coherent and non-coherent requests from caller
14  *
15  * For DMA coherent hardware (IOC) generic code suffices
16  */
17 
18 void arch_dma_prep_coherent(struct page *page, size_t size)
19 {
20 	/*
21 	 * Evict any existing L1 and/or L2 lines for the backing page
22 	 * in case it was used earlier as a normal "cached" page.
23 	 * Yeah this bit us - STAR 9000898266
24 	 *
25 	 * Although core does call flush_cache_vmap(), it gets kvaddr hence
26 	 * can't be used to efficiently flush L1 and/or L2 which need paddr
27 	 * Currently flush_cache_vmap nukes the L1 cache completely which
28 	 * will be optimized as a separate commit
29 	 */
30 	dma_cache_wback_inv(page_to_phys(page), size);
31 }
32 
33 /*
34  * Cache operations depending on function and direction argument, inspired by
35  * https://lkml.org/lkml/2018/5/18/979
36  * "dma_sync_*_for_cpu and direction=TO_DEVICE (was Re: [PATCH 02/20]
37  * dma-mapping: provide a generic dma-noncoherent implementation)"
38  *
39  *          |   map          ==  for_device     |   unmap     ==  for_cpu
40  *          |----------------------------------------------------------------
41  * TO_DEV   |   writeback        writeback      |   none          none
42  * FROM_DEV |   invalidate       invalidate     |   invalidate*   invalidate*
43  * BIDIR    |   writeback+inv    writeback+inv  |   invalidate    invalidate
44  *
45  *     [*] needed for CPU speculative prefetches
46  *
47  * NOTE: we don't check the validity of direction argument as it is done in
48  * upper layer functions (in include/linux/dma-mapping.h)
49  */
50 
51 void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
52 		enum dma_data_direction dir)
53 {
54 	switch (dir) {
55 	case DMA_TO_DEVICE:
56 		dma_cache_wback(paddr, size);
57 		break;
58 
59 	case DMA_FROM_DEVICE:
60 		dma_cache_inv(paddr, size);
61 		break;
62 
63 	case DMA_BIDIRECTIONAL:
64 		dma_cache_wback_inv(paddr, size);
65 		break;
66 
67 	default:
68 		break;
69 	}
70 }
71 
72 void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
73 		enum dma_data_direction dir)
74 {
75 	switch (dir) {
76 	case DMA_TO_DEVICE:
77 		break;
78 
79 	/* FROM_DEVICE invalidate needed if speculative CPU prefetch only */
80 	case DMA_FROM_DEVICE:
81 	case DMA_BIDIRECTIONAL:
82 		dma_cache_inv(paddr, size);
83 		break;
84 
85 	default:
86 		break;
87 	}
88 }
89 
90 /*
91  * Plug in direct dma map ops.
92  */
93 void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
94 			const struct iommu_ops *iommu, bool coherent)
95 {
96 	/*
97 	 * IOC hardware snoops all DMA traffic keeping the caches consistent
98 	 * with memory - eliding need for any explicit cache maintenance of
99 	 * DMA buffers.
100 	 */
101 	if (is_isa_arcv2() && ioc_enable && coherent)
102 		dev->dma_coherent = true;
103 
104 	dev_info(dev, "use %scoherent DMA ops\n",
105 		 dev->dma_coherent ? "" : "non");
106 }
107