1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * RISC-V specific functions to support DMA for non-coherent devices 4 * 5 * Copyright (c) 2021 Western Digital Corporation or its affiliates. 6 */ 7 8 #include <linux/dma-direct.h> 9 #include <linux/dma-map-ops.h> 10 #include <linux/mm.h> 11 #include <asm/cacheflush.h> 12 #include <asm/dma-noncoherent.h> 13 14 static bool noncoherent_supported __ro_after_init; 15 int dma_cache_alignment __ro_after_init = ARCH_DMA_MINALIGN; 16 EXPORT_SYMBOL_GPL(dma_cache_alignment); 17 18 static inline void arch_dma_cache_wback(phys_addr_t paddr, size_t size) 19 { 20 void *vaddr = phys_to_virt(paddr); 21 22 #ifdef CONFIG_RISCV_NONSTANDARD_CACHE_OPS 23 if (unlikely(noncoherent_cache_ops.wback)) { 24 noncoherent_cache_ops.wback(paddr, size); 25 return; 26 } 27 #endif 28 ALT_CMO_OP(clean, vaddr, size, riscv_cbom_block_size); 29 } 30 31 static inline void arch_dma_cache_inv(phys_addr_t paddr, size_t size) 32 { 33 void *vaddr = phys_to_virt(paddr); 34 35 #ifdef CONFIG_RISCV_NONSTANDARD_CACHE_OPS 36 if (unlikely(noncoherent_cache_ops.inv)) { 37 noncoherent_cache_ops.inv(paddr, size); 38 return; 39 } 40 #endif 41 42 ALT_CMO_OP(inval, vaddr, size, riscv_cbom_block_size); 43 } 44 45 static inline void arch_dma_cache_wback_inv(phys_addr_t paddr, size_t size) 46 { 47 void *vaddr = phys_to_virt(paddr); 48 49 #ifdef CONFIG_RISCV_NONSTANDARD_CACHE_OPS 50 if (unlikely(noncoherent_cache_ops.wback_inv)) { 51 noncoherent_cache_ops.wback_inv(paddr, size); 52 return; 53 } 54 #endif 55 56 ALT_CMO_OP(flush, vaddr, size, riscv_cbom_block_size); 57 } 58 59 static inline bool arch_sync_dma_clean_before_fromdevice(void) 60 { 61 return true; 62 } 63 64 static inline bool arch_sync_dma_cpu_needs_post_dma_flush(void) 65 { 66 return true; 67 } 68 69 void arch_sync_dma_for_device(phys_addr_t paddr, size_t size, 70 enum dma_data_direction dir) 71 { 72 switch (dir) { 73 case DMA_TO_DEVICE: 74 arch_dma_cache_wback(paddr, size); 75 break; 76 77 case DMA_FROM_DEVICE: 78 if (!arch_sync_dma_clean_before_fromdevice()) { 79 arch_dma_cache_inv(paddr, size); 80 break; 81 } 82 fallthrough; 83 84 case DMA_BIDIRECTIONAL: 85 /* Skip the invalidate here if it's done later */ 86 if (IS_ENABLED(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) && 87 arch_sync_dma_cpu_needs_post_dma_flush()) 88 arch_dma_cache_wback(paddr, size); 89 else 90 arch_dma_cache_wback_inv(paddr, size); 91 break; 92 93 default: 94 break; 95 } 96 } 97 98 void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size, 99 enum dma_data_direction dir) 100 { 101 switch (dir) { 102 case DMA_TO_DEVICE: 103 break; 104 105 case DMA_FROM_DEVICE: 106 case DMA_BIDIRECTIONAL: 107 /* FROM_DEVICE invalidate needed if speculative CPU prefetch only */ 108 if (arch_sync_dma_cpu_needs_post_dma_flush()) 109 arch_dma_cache_inv(paddr, size); 110 break; 111 112 default: 113 break; 114 } 115 } 116 117 void arch_dma_prep_coherent(struct page *page, size_t size) 118 { 119 void *flush_addr = page_address(page); 120 121 #ifdef CONFIG_RISCV_NONSTANDARD_CACHE_OPS 122 if (unlikely(noncoherent_cache_ops.wback_inv)) { 123 noncoherent_cache_ops.wback_inv(page_to_phys(page), size); 124 return; 125 } 126 #endif 127 128 ALT_CMO_OP(flush, flush_addr, size, riscv_cbom_block_size); 129 } 130 131 void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size, 132 const struct iommu_ops *iommu, bool coherent) 133 { 134 WARN_TAINT(!coherent && riscv_cbom_block_size > ARCH_DMA_MINALIGN, 135 TAINT_CPU_OUT_OF_SPEC, 136 "%s %s: ARCH_DMA_MINALIGN smaller than riscv,cbom-block-size (%d < %d)", 137 dev_driver_string(dev), dev_name(dev), 138 ARCH_DMA_MINALIGN, riscv_cbom_block_size); 139 140 WARN_TAINT(!coherent && !noncoherent_supported, TAINT_CPU_OUT_OF_SPEC, 141 "%s %s: device non-coherent but no non-coherent operations supported", 142 dev_driver_string(dev), dev_name(dev)); 143 144 dev->dma_coherent = coherent; 145 } 146 147 void riscv_noncoherent_supported(void) 148 { 149 WARN(!riscv_cbom_block_size, 150 "Non-coherent DMA support enabled without a block size\n"); 151 noncoherent_supported = true; 152 } 153 154 void __init riscv_set_dma_cache_alignment(void) 155 { 156 if (!noncoherent_supported) 157 dma_cache_alignment = 1; 158 } 159