xref: /openbmc/linux/arch/riscv/mm/dma-noncoherent.c (revision b79f300c1fd4bd83b1f827c7a0e043fca7aad73c)
11631ba12SHeiko Stuebner // SPDX-License-Identifier: GPL-2.0-only
21631ba12SHeiko Stuebner /*
31631ba12SHeiko Stuebner  * RISC-V specific functions to support DMA for non-coherent devices
41631ba12SHeiko Stuebner  *
51631ba12SHeiko Stuebner  * Copyright (c) 2021 Western Digital Corporation or its affiliates.
61631ba12SHeiko Stuebner  */
71631ba12SHeiko Stuebner 
81631ba12SHeiko Stuebner #include <linux/dma-direct.h>
91631ba12SHeiko Stuebner #include <linux/dma-map-ops.h>
101631ba12SHeiko Stuebner #include <linux/mm.h>
111631ba12SHeiko Stuebner #include <asm/cacheflush.h>
12*b79f300cSLad Prabhakar #include <asm/dma-noncoherent.h>
131631ba12SHeiko Stuebner 
148500808aSJisheng Zhang static bool noncoherent_supported __ro_after_init;
151631ba12SHeiko Stuebner 
16*b79f300cSLad Prabhakar struct riscv_nonstd_cache_ops noncoherent_cache_ops __ro_after_init = {
17*b79f300cSLad Prabhakar 	.wback = NULL,
18*b79f300cSLad Prabhakar 	.inv = NULL,
19*b79f300cSLad Prabhakar 	.wback_inv = NULL,
20*b79f300cSLad Prabhakar };
21*b79f300cSLad Prabhakar 
2293573016SLad Prabhakar static inline void arch_dma_cache_wback(phys_addr_t paddr, size_t size)
231631ba12SHeiko Stuebner {
241631ba12SHeiko Stuebner 	void *vaddr = phys_to_virt(paddr);
251631ba12SHeiko Stuebner 
26*b79f300cSLad Prabhakar #ifdef CONFIG_RISCV_NONSTANDARD_CACHE_OPS
27*b79f300cSLad Prabhakar 	if (unlikely(noncoherent_cache_ops.wback)) {
28*b79f300cSLad Prabhakar 		noncoherent_cache_ops.wback(paddr, size);
29*b79f300cSLad Prabhakar 		return;
30*b79f300cSLad Prabhakar 	}
31*b79f300cSLad Prabhakar #endif
3293573016SLad Prabhakar 	ALT_CMO_OP(clean, vaddr, size, riscv_cbom_block_size);
3393573016SLad Prabhakar }
3493573016SLad Prabhakar 
3593573016SLad Prabhakar static inline void arch_dma_cache_inv(phys_addr_t paddr, size_t size)
3693573016SLad Prabhakar {
3793573016SLad Prabhakar 	void *vaddr = phys_to_virt(paddr);
3893573016SLad Prabhakar 
39*b79f300cSLad Prabhakar #ifdef CONFIG_RISCV_NONSTANDARD_CACHE_OPS
40*b79f300cSLad Prabhakar 	if (unlikely(noncoherent_cache_ops.inv)) {
41*b79f300cSLad Prabhakar 		noncoherent_cache_ops.inv(paddr, size);
42*b79f300cSLad Prabhakar 		return;
43*b79f300cSLad Prabhakar 	}
44*b79f300cSLad Prabhakar #endif
45*b79f300cSLad Prabhakar 
4693573016SLad Prabhakar 	ALT_CMO_OP(inval, vaddr, size, riscv_cbom_block_size);
4793573016SLad Prabhakar }
4893573016SLad Prabhakar 
4993573016SLad Prabhakar static inline void arch_dma_cache_wback_inv(phys_addr_t paddr, size_t size)
5093573016SLad Prabhakar {
5193573016SLad Prabhakar 	void *vaddr = phys_to_virt(paddr);
5293573016SLad Prabhakar 
53*b79f300cSLad Prabhakar #ifdef CONFIG_RISCV_NONSTANDARD_CACHE_OPS
54*b79f300cSLad Prabhakar 	if (unlikely(noncoherent_cache_ops.wback_inv)) {
55*b79f300cSLad Prabhakar 		noncoherent_cache_ops.wback_inv(paddr, size);
56*b79f300cSLad Prabhakar 		return;
57*b79f300cSLad Prabhakar 	}
58*b79f300cSLad Prabhakar #endif
59*b79f300cSLad Prabhakar 
6093573016SLad Prabhakar 	ALT_CMO_OP(flush, vaddr, size, riscv_cbom_block_size);
6193573016SLad Prabhakar }
6293573016SLad Prabhakar 
6393573016SLad Prabhakar static inline bool arch_sync_dma_clean_before_fromdevice(void)
6493573016SLad Prabhakar {
6593573016SLad Prabhakar 	return true;
6693573016SLad Prabhakar }
6793573016SLad Prabhakar 
6893573016SLad Prabhakar static inline bool arch_sync_dma_cpu_needs_post_dma_flush(void)
6993573016SLad Prabhakar {
7093573016SLad Prabhakar 	return true;
7193573016SLad Prabhakar }
7293573016SLad Prabhakar 
7393573016SLad Prabhakar void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
7493573016SLad Prabhakar 			      enum dma_data_direction dir)
7593573016SLad Prabhakar {
761631ba12SHeiko Stuebner 	switch (dir) {
771631ba12SHeiko Stuebner 	case DMA_TO_DEVICE:
7893573016SLad Prabhakar 		arch_dma_cache_wback(paddr, size);
791631ba12SHeiko Stuebner 		break;
8093573016SLad Prabhakar 
811631ba12SHeiko Stuebner 	case DMA_FROM_DEVICE:
8293573016SLad Prabhakar 		if (!arch_sync_dma_clean_before_fromdevice()) {
8393573016SLad Prabhakar 			arch_dma_cache_inv(paddr, size);
841631ba12SHeiko Stuebner 			break;
8593573016SLad Prabhakar 		}
8693573016SLad Prabhakar 		fallthrough;
8793573016SLad Prabhakar 
881631ba12SHeiko Stuebner 	case DMA_BIDIRECTIONAL:
8993573016SLad Prabhakar 		/* Skip the invalidate here if it's done later */
9093573016SLad Prabhakar 		if (IS_ENABLED(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) &&
9193573016SLad Prabhakar 		    arch_sync_dma_cpu_needs_post_dma_flush())
9293573016SLad Prabhakar 			arch_dma_cache_wback(paddr, size);
9393573016SLad Prabhakar 		else
9493573016SLad Prabhakar 			arch_dma_cache_wback_inv(paddr, size);
951631ba12SHeiko Stuebner 		break;
9693573016SLad Prabhakar 
971631ba12SHeiko Stuebner 	default:
981631ba12SHeiko Stuebner 		break;
991631ba12SHeiko Stuebner 	}
1001631ba12SHeiko Stuebner }
1011631ba12SHeiko Stuebner 
1021631ba12SHeiko Stuebner void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
1031631ba12SHeiko Stuebner 			   enum dma_data_direction dir)
1041631ba12SHeiko Stuebner {
1051631ba12SHeiko Stuebner 	switch (dir) {
1061631ba12SHeiko Stuebner 	case DMA_TO_DEVICE:
1071631ba12SHeiko Stuebner 		break;
10893573016SLad Prabhakar 
1091631ba12SHeiko Stuebner 	case DMA_FROM_DEVICE:
1101631ba12SHeiko Stuebner 	case DMA_BIDIRECTIONAL:
11193573016SLad Prabhakar 		/* FROM_DEVICE invalidate needed if speculative CPU prefetch only */
11293573016SLad Prabhakar 		if (arch_sync_dma_cpu_needs_post_dma_flush())
11393573016SLad Prabhakar 			arch_dma_cache_inv(paddr, size);
1141631ba12SHeiko Stuebner 		break;
11593573016SLad Prabhakar 
1161631ba12SHeiko Stuebner 	default:
1171631ba12SHeiko Stuebner 		break;
1181631ba12SHeiko Stuebner 	}
1191631ba12SHeiko Stuebner }
1201631ba12SHeiko Stuebner 
1211631ba12SHeiko Stuebner void arch_dma_prep_coherent(struct page *page, size_t size)
1221631ba12SHeiko Stuebner {
1231631ba12SHeiko Stuebner 	void *flush_addr = page_address(page);
1241631ba12SHeiko Stuebner 
125*b79f300cSLad Prabhakar #ifdef CONFIG_RISCV_NONSTANDARD_CACHE_OPS
126*b79f300cSLad Prabhakar 	if (unlikely(noncoherent_cache_ops.wback_inv)) {
127*b79f300cSLad Prabhakar 		noncoherent_cache_ops.wback_inv(page_to_phys(page), size);
128*b79f300cSLad Prabhakar 		return;
129*b79f300cSLad Prabhakar 	}
130*b79f300cSLad Prabhakar #endif
131*b79f300cSLad Prabhakar 
1321631ba12SHeiko Stuebner 	ALT_CMO_OP(flush, flush_addr, size, riscv_cbom_block_size);
1331631ba12SHeiko Stuebner }
1341631ba12SHeiko Stuebner 
1351631ba12SHeiko Stuebner void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
1361631ba12SHeiko Stuebner 		const struct iommu_ops *iommu, bool coherent)
1371631ba12SHeiko Stuebner {
1381631ba12SHeiko Stuebner 	WARN_TAINT(!coherent && riscv_cbom_block_size > ARCH_DMA_MINALIGN,
1391631ba12SHeiko Stuebner 		   TAINT_CPU_OUT_OF_SPEC,
1401631ba12SHeiko Stuebner 		   "%s %s: ARCH_DMA_MINALIGN smaller than riscv,cbom-block-size (%d < %d)",
1411631ba12SHeiko Stuebner 		   dev_driver_string(dev), dev_name(dev),
1421631ba12SHeiko Stuebner 		   ARCH_DMA_MINALIGN, riscv_cbom_block_size);
1431631ba12SHeiko Stuebner 
1441631ba12SHeiko Stuebner 	WARN_TAINT(!coherent && !noncoherent_supported, TAINT_CPU_OUT_OF_SPEC,
1451631ba12SHeiko Stuebner 		   "%s %s: device non-coherent but no non-coherent operations supported",
1461631ba12SHeiko Stuebner 		   dev_driver_string(dev), dev_name(dev));
1471631ba12SHeiko Stuebner 
1481631ba12SHeiko Stuebner 	dev->dma_coherent = coherent;
1491631ba12SHeiko Stuebner }
1501631ba12SHeiko Stuebner 
1511631ba12SHeiko Stuebner void riscv_noncoherent_supported(void)
1521631ba12SHeiko Stuebner {
1538f7e001eSPalmer Dabbelt 	WARN(!riscv_cbom_block_size,
1548f7e001eSPalmer Dabbelt 	     "Non-coherent DMA support enabled without a block size\n");
1551631ba12SHeiko Stuebner 	noncoherent_supported = true;
1561631ba12SHeiko Stuebner }
157*b79f300cSLad Prabhakar 
158*b79f300cSLad Prabhakar void riscv_noncoherent_register_cache_ops(const struct riscv_nonstd_cache_ops *ops)
159*b79f300cSLad Prabhakar {
160*b79f300cSLad Prabhakar 	if (!ops)
161*b79f300cSLad Prabhakar 		return;
162*b79f300cSLad Prabhakar 
163*b79f300cSLad Prabhakar 	noncoherent_cache_ops = *ops;
164*b79f300cSLad Prabhakar }
165*b79f300cSLad Prabhakar EXPORT_SYMBOL_GPL(riscv_noncoherent_register_cache_ops);
166