xref: /openbmc/linux/arch/arc/mm/dma.c (revision 80e61fcd23946cb222f780a49ab2eeb7ef1d3749)
1 /*
2  * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License version 2 as
6  * published by the Free Software Foundation.
7  */
8 
9 #include <linux/dma-noncoherent.h>
10 #include <asm/cache.h>
11 #include <asm/cacheflush.h>
12 
13 /*
14  * ARCH specific callbacks for generic noncoherent DMA ops (dma/noncoherent.c)
15  *  - hardware IOC not available (or "dma-coherent" not set for device in DT)
16  *  - But still handle both coherent and non-coherent requests from caller
17  *
18  * For DMA coherent hardware (IOC) generic code suffices
19  */
20 void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
21 		gfp_t gfp, unsigned long attrs)
22 {
23 	unsigned long order = get_order(size);
24 	struct page *page;
25 	phys_addr_t paddr;
26 	void *kvaddr;
27 
28 	/*
29 	 * __GFP_HIGHMEM flag is cleared by upper layer functions
30 	 * (in include/linux/dma-mapping.h) so we should never get a
31 	 * __GFP_HIGHMEM here.
32 	 */
33 	BUG_ON(gfp & __GFP_HIGHMEM);
34 
35 	page = alloc_pages(gfp | __GFP_ZERO, order);
36 	if (!page)
37 		return NULL;
38 
39 	/* This is linear addr (0x8000_0000 based) */
40 	paddr = page_to_phys(page);
41 
42 	*dma_handle = paddr;
43 
44 	/*
45 	 * A coherent buffer needs MMU mapping to enforce non-cachability.
46 	 * kvaddr is kernel Virtual address (0x7000_0000 based).
47 	 */
48 	kvaddr = ioremap_nocache(paddr, size);
49 	if (kvaddr == NULL) {
50 		__free_pages(page, order);
51 		return NULL;
52 	}
53 
54 	/*
55 	 * Evict any existing L1 and/or L2 lines for the backing page
56 	 * in case it was used earlier as a normal "cached" page.
57 	 * Yeah this bit us - STAR 9000898266
58 	 *
59 	 * Although core does call flush_cache_vmap(), it gets kvaddr hence
60 	 * can't be used to efficiently flush L1 and/or L2 which need paddr
61 	 * Currently flush_cache_vmap nukes the L1 cache completely which
62 	 * will be optimized as a separate commit
63 	 */
64 	dma_cache_wback_inv(paddr, size);
65 	return kvaddr;
66 }
67 
68 void arch_dma_free(struct device *dev, size_t size, void *vaddr,
69 		dma_addr_t dma_handle, unsigned long attrs)
70 {
71 	phys_addr_t paddr = dma_handle;
72 	struct page *page = virt_to_page(paddr);
73 
74 	iounmap((void __force __iomem *)vaddr);
75 	__free_pages(page, get_order(size));
76 }
77 
78 long arch_dma_coherent_to_pfn(struct device *dev, void *cpu_addr,
79 		dma_addr_t dma_addr)
80 {
81 	return __phys_to_pfn(dma_addr);
82 }
83 
84 /*
85  * Cache operations depending on function and direction argument, inspired by
86  * https://lkml.org/lkml/2018/5/18/979
87  * "dma_sync_*_for_cpu and direction=TO_DEVICE (was Re: [PATCH 02/20]
88  * dma-mapping: provide a generic dma-noncoherent implementation)"
89  *
90  *          |   map          ==  for_device     |   unmap     ==  for_cpu
91  *          |----------------------------------------------------------------
92  * TO_DEV   |   writeback        writeback      |   none          none
93  * FROM_DEV |   invalidate       invalidate     |   invalidate*   invalidate*
94  * BIDIR    |   writeback+inv    writeback+inv  |   invalidate    invalidate
95  *
96  *     [*] needed for CPU speculative prefetches
97  *
98  * NOTE: we don't check the validity of direction argument as it is done in
99  * upper layer functions (in include/linux/dma-mapping.h)
100  */
101 
102 void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr,
103 		size_t size, enum dma_data_direction dir)
104 {
105 	switch (dir) {
106 	case DMA_TO_DEVICE:
107 		dma_cache_wback(paddr, size);
108 		break;
109 
110 	case DMA_FROM_DEVICE:
111 		dma_cache_inv(paddr, size);
112 		break;
113 
114 	case DMA_BIDIRECTIONAL:
115 		dma_cache_wback_inv(paddr, size);
116 		break;
117 
118 	default:
119 		break;
120 	}
121 }
122 
123 void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr,
124 		size_t size, enum dma_data_direction dir)
125 {
126 	switch (dir) {
127 	case DMA_TO_DEVICE:
128 		break;
129 
130 	/* FROM_DEVICE invalidate needed if speculative CPU prefetch only */
131 	case DMA_FROM_DEVICE:
132 	case DMA_BIDIRECTIONAL:
133 		dma_cache_inv(paddr, size);
134 		break;
135 
136 	default:
137 		break;
138 	}
139 }
140 
141 /*
142  * Plug in direct dma map ops.
143  */
144 void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
145 			const struct iommu_ops *iommu, bool coherent)
146 {
147 	/*
148 	 * IOC hardware snoops all DMA traffic keeping the caches consistent
149 	 * with memory - eliding need for any explicit cache maintenance of
150 	 * DMA buffers.
151 	 */
152 	if (is_isa_arcv2() && ioc_enable && coherent)
153 		dev->dma_coherent = true;
154 
155 	dev_info(dev, "use %sncoherent DMA ops\n",
156 		 dev->dma_coherent ? "" : "non");
157 }
158