1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 2000 Ani Joshi <ajoshi@unixbox.com> 4 * Copyright (C) 2000, 2001, 06 Ralf Baechle <ralf@linux-mips.org> 5 * swiped from i386, and cloned for MIPS by Geert, polished by Ralf. 6 */ 7 #include <linux/dma-direct.h> 8 #include <linux/dma-noncoherent.h> 9 #include <linux/dma-contiguous.h> 10 #include <linux/highmem.h> 11 12 #include <asm/cache.h> 13 #include <asm/cpu-type.h> 14 #include <asm/dma-coherence.h> 15 #include <asm/io.h> 16 17 /* 18 * The affected CPUs below in 'cpu_needs_post_dma_flush()' can speculatively 19 * fill random cachelines with stale data at any time, requiring an extra 20 * flush post-DMA. 21 * 22 * Warning on the terminology - Linux calls an uncached area coherent; MIPS 23 * terminology calls memory areas with hardware maintained coherency coherent. 24 * 25 * Note that the R14000 and R16000 should also be checked for in this condition. 26 * However this function is only called on non-I/O-coherent systems and only the 27 * R10000 and R12000 are used in such systems, the SGI IP28 Indigo² rsp. 28 * SGI IP32 aka O2. 29 */ 30 static inline bool cpu_needs_post_dma_flush(void) 31 { 32 switch (boot_cpu_type()) { 33 case CPU_R10000: 34 case CPU_R12000: 35 case CPU_BMIPS5000: 36 return true; 37 default: 38 /* 39 * Presence of MAARs suggests that the CPU supports 40 * speculatively prefetching data, and therefore requires 41 * the post-DMA flush/invalidate. 42 */ 43 return cpu_has_maar; 44 } 45 } 46 47 void arch_dma_prep_coherent(struct page *page, size_t size) 48 { 49 dma_cache_wback_inv((unsigned long)page_address(page), size); 50 } 51 52 void *arch_dma_set_uncached(void *addr, size_t size) 53 { 54 return (void *)(__pa(addr) + UNCAC_BASE); 55 } 56 57 static inline void dma_sync_virt(void *addr, size_t size, 58 enum dma_data_direction dir) 59 { 60 switch (dir) { 61 case DMA_TO_DEVICE: 62 dma_cache_wback((unsigned long)addr, size); 63 break; 64 65 case DMA_FROM_DEVICE: 66 dma_cache_inv((unsigned long)addr, size); 67 break; 68 69 case DMA_BIDIRECTIONAL: 70 dma_cache_wback_inv((unsigned long)addr, size); 71 break; 72 73 default: 74 BUG(); 75 } 76 } 77 78 /* 79 * A single sg entry may refer to multiple physically contiguous pages. But 80 * we still need to process highmem pages individually. If highmem is not 81 * configured then the bulk of this loop gets optimized out. 82 */ 83 static inline void dma_sync_phys(phys_addr_t paddr, size_t size, 84 enum dma_data_direction dir) 85 { 86 struct page *page = pfn_to_page(paddr >> PAGE_SHIFT); 87 unsigned long offset = paddr & ~PAGE_MASK; 88 size_t left = size; 89 90 do { 91 size_t len = left; 92 93 if (PageHighMem(page)) { 94 void *addr; 95 96 if (offset + len > PAGE_SIZE) 97 len = PAGE_SIZE - offset; 98 99 addr = kmap_atomic(page); 100 dma_sync_virt(addr + offset, len, dir); 101 kunmap_atomic(addr); 102 } else 103 dma_sync_virt(page_address(page) + offset, size, dir); 104 offset = 0; 105 page++; 106 left -= len; 107 } while (left); 108 } 109 110 void arch_sync_dma_for_device(phys_addr_t paddr, size_t size, 111 enum dma_data_direction dir) 112 { 113 dma_sync_phys(paddr, size, dir); 114 } 115 116 #ifdef CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU 117 void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size, 118 enum dma_data_direction dir) 119 { 120 if (cpu_needs_post_dma_flush()) 121 dma_sync_phys(paddr, size, dir); 122 } 123 #endif 124 125 void arch_dma_cache_sync(struct device *dev, void *vaddr, size_t size, 126 enum dma_data_direction direction) 127 { 128 BUG_ON(direction == DMA_NONE); 129 130 dma_sync_virt(vaddr, size, direction); 131 } 132 133 #ifdef CONFIG_DMA_PERDEV_COHERENT 134 void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size, 135 const struct iommu_ops *iommu, bool coherent) 136 { 137 dev->dma_coherent = coherent; 138 } 139 #endif 140