xref: /openbmc/linux/arch/riscv/mm/dma-noncoherent.c (revision 1631ba1259d6d7f49b6028f2a1a0fa02be1c522a)
1*1631ba12SHeiko Stuebner // SPDX-License-Identifier: GPL-2.0-only
2*1631ba12SHeiko Stuebner /*
3*1631ba12SHeiko Stuebner  * RISC-V specific functions to support DMA for non-coherent devices
4*1631ba12SHeiko Stuebner  *
5*1631ba12SHeiko Stuebner  * Copyright (c) 2021 Western Digital Corporation or its affiliates.
6*1631ba12SHeiko Stuebner  */
7*1631ba12SHeiko Stuebner 
8*1631ba12SHeiko Stuebner #include <linux/dma-direct.h>
9*1631ba12SHeiko Stuebner #include <linux/dma-map-ops.h>
10*1631ba12SHeiko Stuebner #include <linux/mm.h>
11*1631ba12SHeiko Stuebner #include <linux/of.h>
12*1631ba12SHeiko Stuebner #include <linux/of_device.h>
13*1631ba12SHeiko Stuebner #include <asm/cacheflush.h>
14*1631ba12SHeiko Stuebner 
15*1631ba12SHeiko Stuebner static unsigned int riscv_cbom_block_size = L1_CACHE_BYTES;
16*1631ba12SHeiko Stuebner static bool noncoherent_supported;
17*1631ba12SHeiko Stuebner 
18*1631ba12SHeiko Stuebner void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
19*1631ba12SHeiko Stuebner 			      enum dma_data_direction dir)
20*1631ba12SHeiko Stuebner {
21*1631ba12SHeiko Stuebner 	void *vaddr = phys_to_virt(paddr);
22*1631ba12SHeiko Stuebner 
23*1631ba12SHeiko Stuebner 	switch (dir) {
24*1631ba12SHeiko Stuebner 	case DMA_TO_DEVICE:
25*1631ba12SHeiko Stuebner 		ALT_CMO_OP(clean, vaddr, size, riscv_cbom_block_size);
26*1631ba12SHeiko Stuebner 		break;
27*1631ba12SHeiko Stuebner 	case DMA_FROM_DEVICE:
28*1631ba12SHeiko Stuebner 		ALT_CMO_OP(clean, vaddr, size, riscv_cbom_block_size);
29*1631ba12SHeiko Stuebner 		break;
30*1631ba12SHeiko Stuebner 	case DMA_BIDIRECTIONAL:
31*1631ba12SHeiko Stuebner 		ALT_CMO_OP(flush, vaddr, size, riscv_cbom_block_size);
32*1631ba12SHeiko Stuebner 		break;
33*1631ba12SHeiko Stuebner 	default:
34*1631ba12SHeiko Stuebner 		break;
35*1631ba12SHeiko Stuebner 	}
36*1631ba12SHeiko Stuebner }
37*1631ba12SHeiko Stuebner 
38*1631ba12SHeiko Stuebner void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
39*1631ba12SHeiko Stuebner 			   enum dma_data_direction dir)
40*1631ba12SHeiko Stuebner {
41*1631ba12SHeiko Stuebner 	void *vaddr = phys_to_virt(paddr);
42*1631ba12SHeiko Stuebner 
43*1631ba12SHeiko Stuebner 	switch (dir) {
44*1631ba12SHeiko Stuebner 	case DMA_TO_DEVICE:
45*1631ba12SHeiko Stuebner 		break;
46*1631ba12SHeiko Stuebner 	case DMA_FROM_DEVICE:
47*1631ba12SHeiko Stuebner 	case DMA_BIDIRECTIONAL:
48*1631ba12SHeiko Stuebner 		ALT_CMO_OP(flush, vaddr, size, riscv_cbom_block_size);
49*1631ba12SHeiko Stuebner 		break;
50*1631ba12SHeiko Stuebner 	default:
51*1631ba12SHeiko Stuebner 		break;
52*1631ba12SHeiko Stuebner 	}
53*1631ba12SHeiko Stuebner }
54*1631ba12SHeiko Stuebner 
55*1631ba12SHeiko Stuebner void arch_dma_prep_coherent(struct page *page, size_t size)
56*1631ba12SHeiko Stuebner {
57*1631ba12SHeiko Stuebner 	void *flush_addr = page_address(page);
58*1631ba12SHeiko Stuebner 
59*1631ba12SHeiko Stuebner 	ALT_CMO_OP(flush, flush_addr, size, riscv_cbom_block_size);
60*1631ba12SHeiko Stuebner }
61*1631ba12SHeiko Stuebner 
62*1631ba12SHeiko Stuebner void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
63*1631ba12SHeiko Stuebner 		const struct iommu_ops *iommu, bool coherent)
64*1631ba12SHeiko Stuebner {
65*1631ba12SHeiko Stuebner 	WARN_TAINT(!coherent && riscv_cbom_block_size > ARCH_DMA_MINALIGN,
66*1631ba12SHeiko Stuebner 		   TAINT_CPU_OUT_OF_SPEC,
67*1631ba12SHeiko Stuebner 		   "%s %s: ARCH_DMA_MINALIGN smaller than riscv,cbom-block-size (%d < %d)",
68*1631ba12SHeiko Stuebner 		   dev_driver_string(dev), dev_name(dev),
69*1631ba12SHeiko Stuebner 		   ARCH_DMA_MINALIGN, riscv_cbom_block_size);
70*1631ba12SHeiko Stuebner 
71*1631ba12SHeiko Stuebner 	WARN_TAINT(!coherent && !noncoherent_supported, TAINT_CPU_OUT_OF_SPEC,
72*1631ba12SHeiko Stuebner 		   "%s %s: device non-coherent but no non-coherent operations supported",
73*1631ba12SHeiko Stuebner 		   dev_driver_string(dev), dev_name(dev));
74*1631ba12SHeiko Stuebner 
75*1631ba12SHeiko Stuebner 	dev->dma_coherent = coherent;
76*1631ba12SHeiko Stuebner }
77*1631ba12SHeiko Stuebner 
78*1631ba12SHeiko Stuebner #ifdef CONFIG_RISCV_ISA_ZICBOM
79*1631ba12SHeiko Stuebner void riscv_init_cbom_blocksize(void)
80*1631ba12SHeiko Stuebner {
81*1631ba12SHeiko Stuebner 	struct device_node *node;
82*1631ba12SHeiko Stuebner 	int ret;
83*1631ba12SHeiko Stuebner 	u32 val;
84*1631ba12SHeiko Stuebner 
85*1631ba12SHeiko Stuebner 	for_each_of_cpu_node(node) {
86*1631ba12SHeiko Stuebner 		int hartid = riscv_of_processor_hartid(node);
87*1631ba12SHeiko Stuebner 		int cbom_hartid;
88*1631ba12SHeiko Stuebner 
89*1631ba12SHeiko Stuebner 		if (hartid < 0)
90*1631ba12SHeiko Stuebner 			continue;
91*1631ba12SHeiko Stuebner 
92*1631ba12SHeiko Stuebner 		/* set block-size for cbom extension if available */
93*1631ba12SHeiko Stuebner 		ret = of_property_read_u32(node, "riscv,cbom-block-size", &val);
94*1631ba12SHeiko Stuebner 		if (ret)
95*1631ba12SHeiko Stuebner 			continue;
96*1631ba12SHeiko Stuebner 
97*1631ba12SHeiko Stuebner 		if (!riscv_cbom_block_size) {
98*1631ba12SHeiko Stuebner 			riscv_cbom_block_size = val;
99*1631ba12SHeiko Stuebner 			cbom_hartid = hartid;
100*1631ba12SHeiko Stuebner 		} else {
101*1631ba12SHeiko Stuebner 			if (riscv_cbom_block_size != val)
102*1631ba12SHeiko Stuebner 				pr_warn("cbom-block-size mismatched between harts %d and %d\n",
103*1631ba12SHeiko Stuebner 					cbom_hartid, hartid);
104*1631ba12SHeiko Stuebner 		}
105*1631ba12SHeiko Stuebner 	}
106*1631ba12SHeiko Stuebner }
107*1631ba12SHeiko Stuebner #endif
108*1631ba12SHeiko Stuebner 
109*1631ba12SHeiko Stuebner void riscv_noncoherent_supported(void)
110*1631ba12SHeiko Stuebner {
111*1631ba12SHeiko Stuebner 	noncoherent_supported = true;
112*1631ba12SHeiko Stuebner }
113