1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 2000 Ani Joshi <ajoshi@unixbox.com> 4 * Copyright (C) 2000, 2001, 06 Ralf Baechle <ralf@linux-mips.org> 5 * swiped from i386, and cloned for MIPS by Geert, polished by Ralf. 6 */ 7 #include <linux/dma-direct.h> 8 #include <linux/dma-noncoherent.h> 9 #include <linux/dma-contiguous.h> 10 #include <linux/highmem.h> 11 12 #include <asm/cache.h> 13 #include <asm/cpu-type.h> 14 #include <asm/dma-coherence.h> 15 #include <asm/io.h> 16 17 /* 18 * The affected CPUs below in 'cpu_needs_post_dma_flush()' can speculatively 19 * fill random cachelines with stale data at any time, requiring an extra 20 * flush post-DMA. 21 * 22 * Warning on the terminology - Linux calls an uncached area coherent; MIPS 23 * terminology calls memory areas with hardware maintained coherency coherent. 24 * 25 * Note that the R14000 and R16000 should also be checked for in this condition. 26 * However this function is only called on non-I/O-coherent systems and only the 27 * R10000 and R12000 are used in such systems, the SGI IP28 Indigo² rsp. 28 * SGI IP32 aka O2. 29 */ 30 static inline bool cpu_needs_post_dma_flush(struct device *dev) 31 { 32 switch (boot_cpu_type()) { 33 case CPU_R10000: 34 case CPU_R12000: 35 case CPU_BMIPS5000: 36 return true; 37 default: 38 /* 39 * Presence of MAARs suggests that the CPU supports 40 * speculatively prefetching data, and therefore requires 41 * the post-DMA flush/invalidate. 42 */ 43 return cpu_has_maar; 44 } 45 } 46 47 void arch_dma_prep_coherent(struct page *page, size_t size) 48 { 49 dma_cache_wback_inv((unsigned long)page_address(page), size); 50 } 51 52 void *uncached_kernel_address(void *addr) 53 { 54 return (void *)(__pa(addr) + UNCAC_BASE); 55 } 56 57 void *cached_kernel_address(void *addr) 58 { 59 return __va(addr) - UNCAC_BASE; 60 } 61 62 long arch_dma_coherent_to_pfn(struct device *dev, void *cpu_addr, 63 dma_addr_t dma_addr) 64 { 65 return page_to_pfn(virt_to_page(cached_kernel_address(cpu_addr))); 66 } 67 68 pgprot_t arch_dma_mmap_pgprot(struct device *dev, pgprot_t prot, 69 unsigned long attrs) 70 { 71 if (attrs & DMA_ATTR_WRITE_COMBINE) 72 return pgprot_writecombine(prot); 73 return pgprot_noncached(prot); 74 } 75 76 static inline void dma_sync_virt(void *addr, size_t size, 77 enum dma_data_direction dir) 78 { 79 switch (dir) { 80 case DMA_TO_DEVICE: 81 dma_cache_wback((unsigned long)addr, size); 82 break; 83 84 case DMA_FROM_DEVICE: 85 dma_cache_inv((unsigned long)addr, size); 86 break; 87 88 case DMA_BIDIRECTIONAL: 89 dma_cache_wback_inv((unsigned long)addr, size); 90 break; 91 92 default: 93 BUG(); 94 } 95 } 96 97 /* 98 * A single sg entry may refer to multiple physically contiguous pages. But 99 * we still need to process highmem pages individually. If highmem is not 100 * configured then the bulk of this loop gets optimized out. 101 */ 102 static inline void dma_sync_phys(phys_addr_t paddr, size_t size, 103 enum dma_data_direction dir) 104 { 105 struct page *page = pfn_to_page(paddr >> PAGE_SHIFT); 106 unsigned long offset = paddr & ~PAGE_MASK; 107 size_t left = size; 108 109 do { 110 size_t len = left; 111 112 if (PageHighMem(page)) { 113 void *addr; 114 115 if (offset + len > PAGE_SIZE) 116 len = PAGE_SIZE - offset; 117 118 addr = kmap_atomic(page); 119 dma_sync_virt(addr + offset, len, dir); 120 kunmap_atomic(addr); 121 } else 122 dma_sync_virt(page_address(page) + offset, size, dir); 123 offset = 0; 124 page++; 125 left -= len; 126 } while (left); 127 } 128 129 void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr, 130 size_t size, enum dma_data_direction dir) 131 { 132 dma_sync_phys(paddr, size, dir); 133 } 134 135 #ifdef CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU 136 void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr, 137 size_t size, enum dma_data_direction dir) 138 { 139 if (cpu_needs_post_dma_flush(dev)) 140 dma_sync_phys(paddr, size, dir); 141 } 142 #endif 143 144 void arch_dma_cache_sync(struct device *dev, void *vaddr, size_t size, 145 enum dma_data_direction direction) 146 { 147 BUG_ON(direction == DMA_NONE); 148 149 dma_sync_virt(vaddr, size, direction); 150 } 151 152 #ifdef CONFIG_DMA_PERDEV_COHERENT 153 void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size, 154 const struct iommu_ops *iommu, bool coherent) 155 { 156 dev->dma_coherent = coherent; 157 } 158 #endif 159