xref: /openbmc/linux/arch/riscv/mm/dma-noncoherent.c (revision 8f7e001e0325de63a42f23342ac3b8139150c5cf)
11631ba12SHeiko Stuebner // SPDX-License-Identifier: GPL-2.0-only
21631ba12SHeiko Stuebner /*
31631ba12SHeiko Stuebner  * RISC-V specific functions to support DMA for non-coherent devices
41631ba12SHeiko Stuebner  *
51631ba12SHeiko Stuebner  * Copyright (c) 2021 Western Digital Corporation or its affiliates.
61631ba12SHeiko Stuebner  */
71631ba12SHeiko Stuebner 
81631ba12SHeiko Stuebner #include <linux/dma-direct.h>
91631ba12SHeiko Stuebner #include <linux/dma-map-ops.h>
101631ba12SHeiko Stuebner #include <linux/mm.h>
111631ba12SHeiko Stuebner #include <linux/of.h>
121631ba12SHeiko Stuebner #include <linux/of_device.h>
131631ba12SHeiko Stuebner #include <asm/cacheflush.h>
141631ba12SHeiko Stuebner 
15*8f7e001eSPalmer Dabbelt unsigned int riscv_cbom_block_size;
161631ba12SHeiko Stuebner static bool noncoherent_supported;
171631ba12SHeiko Stuebner 
181631ba12SHeiko Stuebner void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
191631ba12SHeiko Stuebner 			      enum dma_data_direction dir)
201631ba12SHeiko Stuebner {
211631ba12SHeiko Stuebner 	void *vaddr = phys_to_virt(paddr);
221631ba12SHeiko Stuebner 
231631ba12SHeiko Stuebner 	switch (dir) {
241631ba12SHeiko Stuebner 	case DMA_TO_DEVICE:
251631ba12SHeiko Stuebner 		ALT_CMO_OP(clean, vaddr, size, riscv_cbom_block_size);
261631ba12SHeiko Stuebner 		break;
271631ba12SHeiko Stuebner 	case DMA_FROM_DEVICE:
281631ba12SHeiko Stuebner 		ALT_CMO_OP(clean, vaddr, size, riscv_cbom_block_size);
291631ba12SHeiko Stuebner 		break;
301631ba12SHeiko Stuebner 	case DMA_BIDIRECTIONAL:
311631ba12SHeiko Stuebner 		ALT_CMO_OP(flush, vaddr, size, riscv_cbom_block_size);
321631ba12SHeiko Stuebner 		break;
331631ba12SHeiko Stuebner 	default:
341631ba12SHeiko Stuebner 		break;
351631ba12SHeiko Stuebner 	}
361631ba12SHeiko Stuebner }
371631ba12SHeiko Stuebner 
381631ba12SHeiko Stuebner void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
391631ba12SHeiko Stuebner 			   enum dma_data_direction dir)
401631ba12SHeiko Stuebner {
411631ba12SHeiko Stuebner 	void *vaddr = phys_to_virt(paddr);
421631ba12SHeiko Stuebner 
431631ba12SHeiko Stuebner 	switch (dir) {
441631ba12SHeiko Stuebner 	case DMA_TO_DEVICE:
451631ba12SHeiko Stuebner 		break;
461631ba12SHeiko Stuebner 	case DMA_FROM_DEVICE:
471631ba12SHeiko Stuebner 	case DMA_BIDIRECTIONAL:
481631ba12SHeiko Stuebner 		ALT_CMO_OP(flush, vaddr, size, riscv_cbom_block_size);
491631ba12SHeiko Stuebner 		break;
501631ba12SHeiko Stuebner 	default:
511631ba12SHeiko Stuebner 		break;
521631ba12SHeiko Stuebner 	}
531631ba12SHeiko Stuebner }
541631ba12SHeiko Stuebner 
551631ba12SHeiko Stuebner void arch_dma_prep_coherent(struct page *page, size_t size)
561631ba12SHeiko Stuebner {
571631ba12SHeiko Stuebner 	void *flush_addr = page_address(page);
581631ba12SHeiko Stuebner 
591631ba12SHeiko Stuebner 	ALT_CMO_OP(flush, flush_addr, size, riscv_cbom_block_size);
601631ba12SHeiko Stuebner }
611631ba12SHeiko Stuebner 
621631ba12SHeiko Stuebner void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
631631ba12SHeiko Stuebner 		const struct iommu_ops *iommu, bool coherent)
641631ba12SHeiko Stuebner {
651631ba12SHeiko Stuebner 	WARN_TAINT(!coherent && riscv_cbom_block_size > ARCH_DMA_MINALIGN,
661631ba12SHeiko Stuebner 		   TAINT_CPU_OUT_OF_SPEC,
671631ba12SHeiko Stuebner 		   "%s %s: ARCH_DMA_MINALIGN smaller than riscv,cbom-block-size (%d < %d)",
681631ba12SHeiko Stuebner 		   dev_driver_string(dev), dev_name(dev),
691631ba12SHeiko Stuebner 		   ARCH_DMA_MINALIGN, riscv_cbom_block_size);
701631ba12SHeiko Stuebner 
711631ba12SHeiko Stuebner 	WARN_TAINT(!coherent && !noncoherent_supported, TAINT_CPU_OUT_OF_SPEC,
721631ba12SHeiko Stuebner 		   "%s %s: device non-coherent but no non-coherent operations supported",
731631ba12SHeiko Stuebner 		   dev_driver_string(dev), dev_name(dev));
741631ba12SHeiko Stuebner 
751631ba12SHeiko Stuebner 	dev->dma_coherent = coherent;
761631ba12SHeiko Stuebner }
771631ba12SHeiko Stuebner 
781631ba12SHeiko Stuebner #ifdef CONFIG_RISCV_ISA_ZICBOM
791631ba12SHeiko Stuebner void riscv_init_cbom_blocksize(void)
801631ba12SHeiko Stuebner {
811631ba12SHeiko Stuebner 	struct device_node *node;
82*8f7e001eSPalmer Dabbelt 	unsigned long cbom_hartid;
83*8f7e001eSPalmer Dabbelt 	u32 val, probed_block_size;
841631ba12SHeiko Stuebner 	int ret;
851631ba12SHeiko Stuebner 
86*8f7e001eSPalmer Dabbelt 	probed_block_size = 0;
871631ba12SHeiko Stuebner 	for_each_of_cpu_node(node) {
883aefb2eeSPalmer Dabbelt 		unsigned long hartid;
891631ba12SHeiko Stuebner 
903aefb2eeSPalmer Dabbelt 		ret = riscv_of_processor_hartid(node, &hartid);
913aefb2eeSPalmer Dabbelt 		if (ret)
923aefb2eeSPalmer Dabbelt 			continue;
933aefb2eeSPalmer Dabbelt 
941631ba12SHeiko Stuebner 		/* set block-size for cbom extension if available */
951631ba12SHeiko Stuebner 		ret = of_property_read_u32(node, "riscv,cbom-block-size", &val);
961631ba12SHeiko Stuebner 		if (ret)
971631ba12SHeiko Stuebner 			continue;
981631ba12SHeiko Stuebner 
99*8f7e001eSPalmer Dabbelt 		if (!probed_block_size) {
100*8f7e001eSPalmer Dabbelt 			probed_block_size = val;
1011631ba12SHeiko Stuebner 			cbom_hartid = hartid;
1021631ba12SHeiko Stuebner 		} else {
103*8f7e001eSPalmer Dabbelt 			if (probed_block_size != val)
104*8f7e001eSPalmer Dabbelt 				pr_warn("cbom-block-size mismatched between harts %lu and %lu\n",
1051631ba12SHeiko Stuebner 					cbom_hartid, hartid);
1061631ba12SHeiko Stuebner 		}
1071631ba12SHeiko Stuebner 	}
108*8f7e001eSPalmer Dabbelt 
109*8f7e001eSPalmer Dabbelt 	if (probed_block_size)
110*8f7e001eSPalmer Dabbelt 		riscv_cbom_block_size = probed_block_size;
1111631ba12SHeiko Stuebner }
1121631ba12SHeiko Stuebner #endif
1131631ba12SHeiko Stuebner 
1141631ba12SHeiko Stuebner void riscv_noncoherent_supported(void)
1151631ba12SHeiko Stuebner {
116*8f7e001eSPalmer Dabbelt 	WARN(!riscv_cbom_block_size,
117*8f7e001eSPalmer Dabbelt 	     "Non-coherent DMA support enabled without a block size\n");
1181631ba12SHeiko Stuebner 	noncoherent_supported = true;
1191631ba12SHeiko Stuebner }
120