1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * DMA coherent memory allocation. 4 * 5 * Copyright (C) 2002 - 2005 Tensilica Inc. 6 * Copyright (C) 2015 Cadence Design Systems Inc. 7 * 8 * Based on version for i386. 9 * 10 * Chris Zankel <chris@zankel.net> 11 * Joe Taylor <joe@tensilica.com, joetylr@yahoo.com> 12 */ 13 14 #include <linux/dma-contiguous.h> 15 #include <linux/dma-noncoherent.h> 16 #include <linux/dma-direct.h> 17 #include <linux/gfp.h> 18 #include <linux/highmem.h> 19 #include <linux/mm.h> 20 #include <linux/types.h> 21 #include <asm/cacheflush.h> 22 #include <asm/io.h> 23 #include <asm/platform.h> 24 25 static void do_cache_op(phys_addr_t paddr, size_t size, 26 void (*fn)(unsigned long, unsigned long)) 27 { 28 unsigned long off = paddr & (PAGE_SIZE - 1); 29 unsigned long pfn = PFN_DOWN(paddr); 30 struct page *page = pfn_to_page(pfn); 31 32 if (!PageHighMem(page)) 33 fn((unsigned long)phys_to_virt(paddr), size); 34 else 35 while (size > 0) { 36 size_t sz = min_t(size_t, size, PAGE_SIZE - off); 37 void *vaddr = kmap_atomic(page); 38 39 fn((unsigned long)vaddr + off, sz); 40 kunmap_atomic(vaddr); 41 off = 0; 42 ++page; 43 size -= sz; 44 } 45 } 46 47 void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr, 48 size_t size, enum dma_data_direction dir) 49 { 50 switch (dir) { 51 case DMA_BIDIRECTIONAL: 52 case DMA_FROM_DEVICE: 53 do_cache_op(paddr, size, __invalidate_dcache_range); 54 break; 55 56 case DMA_NONE: 57 BUG(); 58 break; 59 60 default: 61 break; 62 } 63 } 64 65 void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr, 66 size_t size, enum dma_data_direction dir) 67 { 68 switch (dir) { 69 case DMA_BIDIRECTIONAL: 70 case DMA_TO_DEVICE: 71 if (XCHAL_DCACHE_IS_WRITEBACK) 72 do_cache_op(paddr, size, __flush_dcache_range); 73 break; 74 75 case DMA_NONE: 76 BUG(); 77 break; 78 79 default: 80 break; 81 } 82 } 83 84 #ifdef CONFIG_MMU 85 bool platform_vaddr_cached(const void *p) 86 { 87 unsigned long addr = (unsigned long)p; 88 89 return addr >= XCHAL_KSEG_CACHED_VADDR && 90 addr - XCHAL_KSEG_CACHED_VADDR < XCHAL_KSEG_SIZE; 91 } 92 93 bool platform_vaddr_uncached(const void *p) 94 { 95 unsigned long addr = (unsigned long)p; 96 97 return addr >= XCHAL_KSEG_BYPASS_VADDR && 98 addr - XCHAL_KSEG_BYPASS_VADDR < XCHAL_KSEG_SIZE; 99 } 100 101 void *platform_vaddr_to_uncached(void *p) 102 { 103 return p + XCHAL_KSEG_BYPASS_VADDR - XCHAL_KSEG_CACHED_VADDR; 104 } 105 106 void *platform_vaddr_to_cached(void *p) 107 { 108 return p + XCHAL_KSEG_CACHED_VADDR - XCHAL_KSEG_BYPASS_VADDR; 109 } 110 #else 111 bool __attribute__((weak)) platform_vaddr_cached(const void *p) 112 { 113 WARN_ONCE(1, "Default %s implementation is used\n", __func__); 114 return true; 115 } 116 117 bool __attribute__((weak)) platform_vaddr_uncached(const void *p) 118 { 119 WARN_ONCE(1, "Default %s implementation is used\n", __func__); 120 return false; 121 } 122 123 void __attribute__((weak)) *platform_vaddr_to_uncached(void *p) 124 { 125 WARN_ONCE(1, "Default %s implementation is used\n", __func__); 126 return p; 127 } 128 129 void __attribute__((weak)) *platform_vaddr_to_cached(void *p) 130 { 131 WARN_ONCE(1, "Default %s implementation is used\n", __func__); 132 return p; 133 } 134 #endif 135 136 /* 137 * Note: We assume that the full memory space is always mapped to 'kseg' 138 * Otherwise we have to use page attributes (not implemented). 139 */ 140 141 void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, 142 gfp_t flag, unsigned long attrs) 143 { 144 unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT; 145 struct page *page = NULL; 146 147 /* ignore region speicifiers */ 148 149 flag &= ~(__GFP_DMA | __GFP_HIGHMEM); 150 151 if (dev == NULL || (dev->coherent_dma_mask < 0xffffffff)) 152 flag |= GFP_DMA; 153 154 if (gfpflags_allow_blocking(flag)) 155 page = dma_alloc_from_contiguous(dev, count, get_order(size), 156 flag & __GFP_NOWARN); 157 158 if (!page) 159 page = alloc_pages(flag | __GFP_ZERO, get_order(size)); 160 161 if (!page) 162 return NULL; 163 164 *handle = phys_to_dma(dev, page_to_phys(page)); 165 166 if (attrs & DMA_ATTR_NO_KERNEL_MAPPING) { 167 return page; 168 } 169 170 #ifdef CONFIG_MMU 171 if (PageHighMem(page)) { 172 void *p; 173 174 p = dma_common_contiguous_remap(page, size, VM_MAP, 175 pgprot_noncached(PAGE_KERNEL), 176 __builtin_return_address(0)); 177 if (!p) { 178 if (!dma_release_from_contiguous(dev, page, count)) 179 __free_pages(page, get_order(size)); 180 } 181 return p; 182 } 183 #endif 184 BUG_ON(!platform_vaddr_cached(page_address(page))); 185 __invalidate_dcache_range((unsigned long)page_address(page), size); 186 return platform_vaddr_to_uncached(page_address(page)); 187 } 188 189 void arch_dma_free(struct device *dev, size_t size, void *vaddr, 190 dma_addr_t dma_handle, unsigned long attrs) 191 { 192 unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT; 193 struct page *page; 194 195 if (attrs & DMA_ATTR_NO_KERNEL_MAPPING) { 196 page = vaddr; 197 } else if (platform_vaddr_uncached(vaddr)) { 198 page = virt_to_page(platform_vaddr_to_cached(vaddr)); 199 } else { 200 #ifdef CONFIG_MMU 201 dma_common_free_remap(vaddr, size, VM_MAP); 202 #endif 203 page = pfn_to_page(PHYS_PFN(dma_to_phys(dev, dma_handle))); 204 } 205 206 if (!dma_release_from_contiguous(dev, page, count)) 207 __free_pages(page, get_order(size)); 208 } 209