11631ba12SHeiko Stuebner // SPDX-License-Identifier: GPL-2.0-only 21631ba12SHeiko Stuebner /* 31631ba12SHeiko Stuebner * RISC-V specific functions to support DMA for non-coherent devices 41631ba12SHeiko Stuebner * 51631ba12SHeiko Stuebner * Copyright (c) 2021 Western Digital Corporation or its affiliates. 61631ba12SHeiko Stuebner */ 71631ba12SHeiko Stuebner 81631ba12SHeiko Stuebner #include <linux/dma-direct.h> 91631ba12SHeiko Stuebner #include <linux/dma-map-ops.h> 101631ba12SHeiko Stuebner #include <linux/mm.h> 111631ba12SHeiko Stuebner #include <asm/cacheflush.h> 121631ba12SHeiko Stuebner 138500808aSJisheng Zhang static bool noncoherent_supported __ro_after_init; 141631ba12SHeiko Stuebner 15*93573016SLad Prabhakar static inline void arch_dma_cache_wback(phys_addr_t paddr, size_t size) 161631ba12SHeiko Stuebner { 171631ba12SHeiko Stuebner void *vaddr = phys_to_virt(paddr); 181631ba12SHeiko Stuebner 19*93573016SLad Prabhakar ALT_CMO_OP(clean, vaddr, size, riscv_cbom_block_size); 20*93573016SLad Prabhakar } 21*93573016SLad Prabhakar 22*93573016SLad Prabhakar static inline void arch_dma_cache_inv(phys_addr_t paddr, size_t size) 23*93573016SLad Prabhakar { 24*93573016SLad Prabhakar void *vaddr = phys_to_virt(paddr); 25*93573016SLad Prabhakar 26*93573016SLad Prabhakar ALT_CMO_OP(inval, vaddr, size, riscv_cbom_block_size); 27*93573016SLad Prabhakar } 28*93573016SLad Prabhakar 29*93573016SLad Prabhakar static inline void arch_dma_cache_wback_inv(phys_addr_t paddr, size_t size) 30*93573016SLad Prabhakar { 31*93573016SLad Prabhakar void *vaddr = phys_to_virt(paddr); 32*93573016SLad Prabhakar 33*93573016SLad Prabhakar ALT_CMO_OP(flush, vaddr, size, riscv_cbom_block_size); 34*93573016SLad Prabhakar } 35*93573016SLad Prabhakar 36*93573016SLad Prabhakar static inline bool arch_sync_dma_clean_before_fromdevice(void) 37*93573016SLad Prabhakar { 38*93573016SLad Prabhakar return true; 39*93573016SLad Prabhakar } 40*93573016SLad Prabhakar 41*93573016SLad Prabhakar static inline bool arch_sync_dma_cpu_needs_post_dma_flush(void) 42*93573016SLad Prabhakar { 43*93573016SLad Prabhakar return true; 44*93573016SLad Prabhakar } 45*93573016SLad Prabhakar 46*93573016SLad Prabhakar void arch_sync_dma_for_device(phys_addr_t paddr, size_t size, 47*93573016SLad Prabhakar enum dma_data_direction dir) 48*93573016SLad Prabhakar { 491631ba12SHeiko Stuebner switch (dir) { 501631ba12SHeiko Stuebner case DMA_TO_DEVICE: 51*93573016SLad Prabhakar arch_dma_cache_wback(paddr, size); 521631ba12SHeiko Stuebner break; 53*93573016SLad Prabhakar 541631ba12SHeiko Stuebner case DMA_FROM_DEVICE: 55*93573016SLad Prabhakar if (!arch_sync_dma_clean_before_fromdevice()) { 56*93573016SLad Prabhakar arch_dma_cache_inv(paddr, size); 571631ba12SHeiko Stuebner break; 58*93573016SLad Prabhakar } 59*93573016SLad Prabhakar fallthrough; 60*93573016SLad Prabhakar 611631ba12SHeiko Stuebner case DMA_BIDIRECTIONAL: 62*93573016SLad Prabhakar /* Skip the invalidate here if it's done later */ 63*93573016SLad Prabhakar if (IS_ENABLED(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) && 64*93573016SLad Prabhakar arch_sync_dma_cpu_needs_post_dma_flush()) 65*93573016SLad Prabhakar arch_dma_cache_wback(paddr, size); 66*93573016SLad Prabhakar else 67*93573016SLad Prabhakar arch_dma_cache_wback_inv(paddr, size); 681631ba12SHeiko Stuebner break; 69*93573016SLad Prabhakar 701631ba12SHeiko Stuebner default: 711631ba12SHeiko Stuebner break; 721631ba12SHeiko Stuebner } 731631ba12SHeiko Stuebner } 741631ba12SHeiko Stuebner 751631ba12SHeiko Stuebner void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size, 761631ba12SHeiko Stuebner enum dma_data_direction dir) 771631ba12SHeiko Stuebner { 781631ba12SHeiko Stuebner switch (dir) { 791631ba12SHeiko Stuebner case DMA_TO_DEVICE: 801631ba12SHeiko Stuebner break; 81*93573016SLad Prabhakar 821631ba12SHeiko Stuebner case DMA_FROM_DEVICE: 831631ba12SHeiko Stuebner case DMA_BIDIRECTIONAL: 84*93573016SLad Prabhakar /* FROM_DEVICE invalidate needed if speculative CPU prefetch only */ 85*93573016SLad Prabhakar if (arch_sync_dma_cpu_needs_post_dma_flush()) 86*93573016SLad Prabhakar arch_dma_cache_inv(paddr, size); 871631ba12SHeiko Stuebner break; 88*93573016SLad Prabhakar 891631ba12SHeiko Stuebner default: 901631ba12SHeiko Stuebner break; 911631ba12SHeiko Stuebner } 921631ba12SHeiko Stuebner } 931631ba12SHeiko Stuebner 941631ba12SHeiko Stuebner void arch_dma_prep_coherent(struct page *page, size_t size) 951631ba12SHeiko Stuebner { 961631ba12SHeiko Stuebner void *flush_addr = page_address(page); 971631ba12SHeiko Stuebner 981631ba12SHeiko Stuebner ALT_CMO_OP(flush, flush_addr, size, riscv_cbom_block_size); 991631ba12SHeiko Stuebner } 1001631ba12SHeiko Stuebner 1011631ba12SHeiko Stuebner void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size, 1021631ba12SHeiko Stuebner const struct iommu_ops *iommu, bool coherent) 1031631ba12SHeiko Stuebner { 1041631ba12SHeiko Stuebner WARN_TAINT(!coherent && riscv_cbom_block_size > ARCH_DMA_MINALIGN, 1051631ba12SHeiko Stuebner TAINT_CPU_OUT_OF_SPEC, 1061631ba12SHeiko Stuebner "%s %s: ARCH_DMA_MINALIGN smaller than riscv,cbom-block-size (%d < %d)", 1071631ba12SHeiko Stuebner dev_driver_string(dev), dev_name(dev), 1081631ba12SHeiko Stuebner ARCH_DMA_MINALIGN, riscv_cbom_block_size); 1091631ba12SHeiko Stuebner 1101631ba12SHeiko Stuebner WARN_TAINT(!coherent && !noncoherent_supported, TAINT_CPU_OUT_OF_SPEC, 1111631ba12SHeiko Stuebner "%s %s: device non-coherent but no non-coherent operations supported", 1121631ba12SHeiko Stuebner dev_driver_string(dev), dev_name(dev)); 1131631ba12SHeiko Stuebner 1141631ba12SHeiko Stuebner dev->dma_coherent = coherent; 1151631ba12SHeiko Stuebner } 1161631ba12SHeiko Stuebner 1171631ba12SHeiko Stuebner void riscv_noncoherent_supported(void) 1181631ba12SHeiko Stuebner { 1198f7e001eSPalmer Dabbelt WARN(!riscv_cbom_block_size, 1208f7e001eSPalmer Dabbelt "Non-coherent DMA support enabled without a block size\n"); 1211631ba12SHeiko Stuebner noncoherent_supported = true; 1221631ba12SHeiko Stuebner } 123