11631ba12SHeiko Stuebner // SPDX-License-Identifier: GPL-2.0-only
21631ba12SHeiko Stuebner /*
31631ba12SHeiko Stuebner * RISC-V specific functions to support DMA for non-coherent devices
41631ba12SHeiko Stuebner *
51631ba12SHeiko Stuebner * Copyright (c) 2021 Western Digital Corporation or its affiliates.
61631ba12SHeiko Stuebner */
71631ba12SHeiko Stuebner
81631ba12SHeiko Stuebner #include <linux/dma-direct.h>
91631ba12SHeiko Stuebner #include <linux/dma-map-ops.h>
101631ba12SHeiko Stuebner #include <linux/mm.h>
111631ba12SHeiko Stuebner #include <asm/cacheflush.h>
12*b79f300cSLad Prabhakar #include <asm/dma-noncoherent.h>
131631ba12SHeiko Stuebner
148500808aSJisheng Zhang static bool noncoherent_supported __ro_after_init;
1529267151SJisheng Zhang int dma_cache_alignment __ro_after_init = ARCH_DMA_MINALIGN;
1629267151SJisheng Zhang EXPORT_SYMBOL_GPL(dma_cache_alignment);
171631ba12SHeiko Stuebner
arch_dma_cache_wback(phys_addr_t paddr,size_t size)1893573016SLad Prabhakar static inline void arch_dma_cache_wback(phys_addr_t paddr, size_t size)
191631ba12SHeiko Stuebner {
201631ba12SHeiko Stuebner void *vaddr = phys_to_virt(paddr);
211631ba12SHeiko Stuebner
22*b79f300cSLad Prabhakar #ifdef CONFIG_RISCV_NONSTANDARD_CACHE_OPS
23*b79f300cSLad Prabhakar if (unlikely(noncoherent_cache_ops.wback)) {
24*b79f300cSLad Prabhakar noncoherent_cache_ops.wback(paddr, size);
25*b79f300cSLad Prabhakar return;
26*b79f300cSLad Prabhakar }
27*b79f300cSLad Prabhakar #endif
2893573016SLad Prabhakar ALT_CMO_OP(clean, vaddr, size, riscv_cbom_block_size);
2993573016SLad Prabhakar }
3093573016SLad Prabhakar
arch_dma_cache_inv(phys_addr_t paddr,size_t size)3193573016SLad Prabhakar static inline void arch_dma_cache_inv(phys_addr_t paddr, size_t size)
3293573016SLad Prabhakar {
3393573016SLad Prabhakar void *vaddr = phys_to_virt(paddr);
3493573016SLad Prabhakar
35*b79f300cSLad Prabhakar #ifdef CONFIG_RISCV_NONSTANDARD_CACHE_OPS
36*b79f300cSLad Prabhakar if (unlikely(noncoherent_cache_ops.inv)) {
37*b79f300cSLad Prabhakar noncoherent_cache_ops.inv(paddr, size);
38*b79f300cSLad Prabhakar return;
39*b79f300cSLad Prabhakar }
40*b79f300cSLad Prabhakar #endif
41*b79f300cSLad Prabhakar
4293573016SLad Prabhakar ALT_CMO_OP(inval, vaddr, size, riscv_cbom_block_size);
4393573016SLad Prabhakar }
4493573016SLad Prabhakar
arch_dma_cache_wback_inv(phys_addr_t paddr,size_t size)4593573016SLad Prabhakar static inline void arch_dma_cache_wback_inv(phys_addr_t paddr, size_t size)
4693573016SLad Prabhakar {
4793573016SLad Prabhakar void *vaddr = phys_to_virt(paddr);
4893573016SLad Prabhakar
49*b79f300cSLad Prabhakar #ifdef CONFIG_RISCV_NONSTANDARD_CACHE_OPS
50*b79f300cSLad Prabhakar if (unlikely(noncoherent_cache_ops.wback_inv)) {
51*b79f300cSLad Prabhakar noncoherent_cache_ops.wback_inv(paddr, size);
52*b79f300cSLad Prabhakar return;
53*b79f300cSLad Prabhakar }
54*b79f300cSLad Prabhakar #endif
55*b79f300cSLad Prabhakar
5693573016SLad Prabhakar ALT_CMO_OP(flush, vaddr, size, riscv_cbom_block_size);
5793573016SLad Prabhakar }
5893573016SLad Prabhakar
arch_sync_dma_clean_before_fromdevice(void)5993573016SLad Prabhakar static inline bool arch_sync_dma_clean_before_fromdevice(void)
6093573016SLad Prabhakar {
6193573016SLad Prabhakar return true;
6293573016SLad Prabhakar }
6393573016SLad Prabhakar
arch_sync_dma_cpu_needs_post_dma_flush(void)6493573016SLad Prabhakar static inline bool arch_sync_dma_cpu_needs_post_dma_flush(void)
6593573016SLad Prabhakar {
6693573016SLad Prabhakar return true;
6793573016SLad Prabhakar }
6893573016SLad Prabhakar
arch_sync_dma_for_device(phys_addr_t paddr,size_t size,enum dma_data_direction dir)6993573016SLad Prabhakar void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
7093573016SLad Prabhakar enum dma_data_direction dir)
7193573016SLad Prabhakar {
721631ba12SHeiko Stuebner switch (dir) {
731631ba12SHeiko Stuebner case DMA_TO_DEVICE:
7493573016SLad Prabhakar arch_dma_cache_wback(paddr, size);
751631ba12SHeiko Stuebner break;
7693573016SLad Prabhakar
771631ba12SHeiko Stuebner case DMA_FROM_DEVICE:
7893573016SLad Prabhakar if (!arch_sync_dma_clean_before_fromdevice()) {
7993573016SLad Prabhakar arch_dma_cache_inv(paddr, size);
801631ba12SHeiko Stuebner break;
8193573016SLad Prabhakar }
8293573016SLad Prabhakar fallthrough;
8393573016SLad Prabhakar
841631ba12SHeiko Stuebner case DMA_BIDIRECTIONAL:
8593573016SLad Prabhakar /* Skip the invalidate here if it's done later */
8693573016SLad Prabhakar if (IS_ENABLED(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) &&
8793573016SLad Prabhakar arch_sync_dma_cpu_needs_post_dma_flush())
8893573016SLad Prabhakar arch_dma_cache_wback(paddr, size);
8993573016SLad Prabhakar else
9093573016SLad Prabhakar arch_dma_cache_wback_inv(paddr, size);
911631ba12SHeiko Stuebner break;
9293573016SLad Prabhakar
931631ba12SHeiko Stuebner default:
941631ba12SHeiko Stuebner break;
951631ba12SHeiko Stuebner }
961631ba12SHeiko Stuebner }
971631ba12SHeiko Stuebner
arch_sync_dma_for_cpu(phys_addr_t paddr,size_t size,enum dma_data_direction dir)981631ba12SHeiko Stuebner void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
991631ba12SHeiko Stuebner enum dma_data_direction dir)
1001631ba12SHeiko Stuebner {
1011631ba12SHeiko Stuebner switch (dir) {
1021631ba12SHeiko Stuebner case DMA_TO_DEVICE:
1031631ba12SHeiko Stuebner break;
10493573016SLad Prabhakar
1051631ba12SHeiko Stuebner case DMA_FROM_DEVICE:
1061631ba12SHeiko Stuebner case DMA_BIDIRECTIONAL:
10793573016SLad Prabhakar /* FROM_DEVICE invalidate needed if speculative CPU prefetch only */
10893573016SLad Prabhakar if (arch_sync_dma_cpu_needs_post_dma_flush())
10993573016SLad Prabhakar arch_dma_cache_inv(paddr, size);
1101631ba12SHeiko Stuebner break;
11193573016SLad Prabhakar
1121631ba12SHeiko Stuebner default:
1131631ba12SHeiko Stuebner break;
1141631ba12SHeiko Stuebner }
1151631ba12SHeiko Stuebner }
1161631ba12SHeiko Stuebner
arch_dma_prep_coherent(struct page * page,size_t size)1171631ba12SHeiko Stuebner void arch_dma_prep_coherent(struct page *page, size_t size)
1181631ba12SHeiko Stuebner {
1191631ba12SHeiko Stuebner void *flush_addr = page_address(page);
1201631ba12SHeiko Stuebner
121*b79f300cSLad Prabhakar #ifdef CONFIG_RISCV_NONSTANDARD_CACHE_OPS
122*b79f300cSLad Prabhakar if (unlikely(noncoherent_cache_ops.wback_inv)) {
123*b79f300cSLad Prabhakar noncoherent_cache_ops.wback_inv(page_to_phys(page), size);
124*b79f300cSLad Prabhakar return;
125*b79f300cSLad Prabhakar }
126*b79f300cSLad Prabhakar #endif
127*b79f300cSLad Prabhakar
1281631ba12SHeiko Stuebner ALT_CMO_OP(flush, flush_addr, size, riscv_cbom_block_size);
1291631ba12SHeiko Stuebner }
1301631ba12SHeiko Stuebner
arch_setup_dma_ops(struct device * dev,u64 dma_base,u64 size,const struct iommu_ops * iommu,bool coherent)1311631ba12SHeiko Stuebner void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
1321631ba12SHeiko Stuebner const struct iommu_ops *iommu, bool coherent)
1331631ba12SHeiko Stuebner {
1341631ba12SHeiko Stuebner WARN_TAINT(!coherent && riscv_cbom_block_size > ARCH_DMA_MINALIGN,
1351631ba12SHeiko Stuebner TAINT_CPU_OUT_OF_SPEC,
1361631ba12SHeiko Stuebner "%s %s: ARCH_DMA_MINALIGN smaller than riscv,cbom-block-size (%d < %d)",
1371631ba12SHeiko Stuebner dev_driver_string(dev), dev_name(dev),
1381631ba12SHeiko Stuebner ARCH_DMA_MINALIGN, riscv_cbom_block_size);
1391631ba12SHeiko Stuebner
1401631ba12SHeiko Stuebner WARN_TAINT(!coherent && !noncoherent_supported, TAINT_CPU_OUT_OF_SPEC,
1411631ba12SHeiko Stuebner "%s %s: device non-coherent but no non-coherent operations supported",
1421631ba12SHeiko Stuebner dev_driver_string(dev), dev_name(dev));
1431631ba12SHeiko Stuebner
1441631ba12SHeiko Stuebner dev->dma_coherent = coherent;
1451631ba12SHeiko Stuebner }
1461631ba12SHeiko Stuebner
riscv_noncoherent_supported(void)1471631ba12SHeiko Stuebner void riscv_noncoherent_supported(void)
1481631ba12SHeiko Stuebner {
1498f7e001eSPalmer Dabbelt WARN(!riscv_cbom_block_size,
1508f7e001eSPalmer Dabbelt "Non-coherent DMA support enabled without a block size\n");
1511631ba12SHeiko Stuebner noncoherent_supported = true;
1521631ba12SHeiko Stuebner }
15329267151SJisheng Zhang
riscv_set_dma_cache_alignment(void)15429267151SJisheng Zhang void __init riscv_set_dma_cache_alignment(void)
15529267151SJisheng Zhang {
15629267151SJisheng Zhang if (!noncoherent_supported)
15729267151SJisheng Zhang dma_cache_alignment = 1;
15829267151SJisheng Zhang }
159