xref: /openbmc/linux/arch/mips/mm/dma-noncoherent.c (revision cbf1449ba5aec9cf4c68b69f899391a8d42e9b8f)
1f8c55dc6SChristoph Hellwig // SPDX-License-Identifier: GPL-2.0
2f8c55dc6SChristoph Hellwig /*
3f8c55dc6SChristoph Hellwig  * Copyright (C) 2000  Ani Joshi <ajoshi@unixbox.com>
4f8c55dc6SChristoph Hellwig  * Copyright (C) 2000, 2001, 06	 Ralf Baechle <ralf@linux-mips.org>
5f8c55dc6SChristoph Hellwig  * swiped from i386, and cloned for MIPS by Geert, polished by Ralf.
6f8c55dc6SChristoph Hellwig  */
7f8c55dc6SChristoph Hellwig #include <linux/dma-direct.h>
8f8c55dc6SChristoph Hellwig #include <linux/dma-noncoherent.h>
9f8c55dc6SChristoph Hellwig #include <linux/dma-contiguous.h>
10f8c55dc6SChristoph Hellwig #include <linux/highmem.h>
11f8c55dc6SChristoph Hellwig 
12f8c55dc6SChristoph Hellwig #include <asm/cache.h>
13f8c55dc6SChristoph Hellwig #include <asm/cpu-type.h>
14f8c55dc6SChristoph Hellwig #include <asm/dma-coherence.h>
15f8c55dc6SChristoph Hellwig #include <asm/io.h>
16f8c55dc6SChristoph Hellwig 
17f8c55dc6SChristoph Hellwig /*
18f8c55dc6SChristoph Hellwig  * The affected CPUs below in 'cpu_needs_post_dma_flush()' can speculatively
19f8c55dc6SChristoph Hellwig  * fill random cachelines with stale data at any time, requiring an extra
20f8c55dc6SChristoph Hellwig  * flush post-DMA.
21f8c55dc6SChristoph Hellwig  *
22f8c55dc6SChristoph Hellwig  * Warning on the terminology - Linux calls an uncached area coherent;  MIPS
23f8c55dc6SChristoph Hellwig  * terminology calls memory areas with hardware maintained coherency coherent.
24f8c55dc6SChristoph Hellwig  *
25f8c55dc6SChristoph Hellwig  * Note that the R14000 and R16000 should also be checked for in this condition.
26f8c55dc6SChristoph Hellwig  * However this function is only called on non-I/O-coherent systems and only the
27f8c55dc6SChristoph Hellwig  * R10000 and R12000 are used in such systems, the SGI IP28 Indigo² rsp.
28f8c55dc6SChristoph Hellwig  * SGI IP32 aka O2.
29f8c55dc6SChristoph Hellwig  */
3056e35f9cSChristoph Hellwig static inline bool cpu_needs_post_dma_flush(void)
31f8c55dc6SChristoph Hellwig {
32f8c55dc6SChristoph Hellwig 	switch (boot_cpu_type()) {
33f8c55dc6SChristoph Hellwig 	case CPU_R10000:
34f8c55dc6SChristoph Hellwig 	case CPU_R12000:
35f8c55dc6SChristoph Hellwig 	case CPU_BMIPS5000:
36a202bf71SLichao Liu 	case CPU_LOONGSON2EF:
37f8c55dc6SChristoph Hellwig 		return true;
38f8c55dc6SChristoph Hellwig 	default:
39f8c55dc6SChristoph Hellwig 		/*
40f8c55dc6SChristoph Hellwig 		 * Presence of MAARs suggests that the CPU supports
41f8c55dc6SChristoph Hellwig 		 * speculatively prefetching data, and therefore requires
42f8c55dc6SChristoph Hellwig 		 * the post-DMA flush/invalidate.
43f8c55dc6SChristoph Hellwig 		 */
44f8c55dc6SChristoph Hellwig 		return cpu_has_maar;
45f8c55dc6SChristoph Hellwig 	}
46f8c55dc6SChristoph Hellwig }
47f8c55dc6SChristoph Hellwig 
482e96e04dSChristoph Hellwig void arch_dma_prep_coherent(struct page *page, size_t size)
49f8c55dc6SChristoph Hellwig {
502e96e04dSChristoph Hellwig 	dma_cache_wback_inv((unsigned long)page_address(page), size);
51f8c55dc6SChristoph Hellwig }
52f8c55dc6SChristoph Hellwig 
53fa7e2247SChristoph Hellwig void *arch_dma_set_uncached(void *addr, size_t size)
542e96e04dSChristoph Hellwig {
552e96e04dSChristoph Hellwig 	return (void *)(__pa(addr) + UNCAC_BASE);
56f8c55dc6SChristoph Hellwig }
57f8c55dc6SChristoph Hellwig 
58*cbf1449bSChristoph Hellwig static inline void dma_sync_virt_for_device(void *addr, size_t size,
59f8c55dc6SChristoph Hellwig 		enum dma_data_direction dir)
60f8c55dc6SChristoph Hellwig {
61f8c55dc6SChristoph Hellwig 	switch (dir) {
62f8c55dc6SChristoph Hellwig 	case DMA_TO_DEVICE:
63f8c55dc6SChristoph Hellwig 		dma_cache_wback((unsigned long)addr, size);
64f8c55dc6SChristoph Hellwig 		break;
65f8c55dc6SChristoph Hellwig 	case DMA_FROM_DEVICE:
66f8c55dc6SChristoph Hellwig 		dma_cache_inv((unsigned long)addr, size);
67f8c55dc6SChristoph Hellwig 		break;
68f8c55dc6SChristoph Hellwig 	case DMA_BIDIRECTIONAL:
69f8c55dc6SChristoph Hellwig 		dma_cache_wback_inv((unsigned long)addr, size);
70f8c55dc6SChristoph Hellwig 		break;
71*cbf1449bSChristoph Hellwig 	default:
72*cbf1449bSChristoph Hellwig 		BUG();
73*cbf1449bSChristoph Hellwig 	}
74*cbf1449bSChristoph Hellwig }
75f8c55dc6SChristoph Hellwig 
76*cbf1449bSChristoph Hellwig static inline void dma_sync_virt_for_cpu(void *addr, size_t size,
77*cbf1449bSChristoph Hellwig 		enum dma_data_direction dir)
78*cbf1449bSChristoph Hellwig {
79*cbf1449bSChristoph Hellwig 	switch (dir) {
80*cbf1449bSChristoph Hellwig 	case DMA_TO_DEVICE:
81*cbf1449bSChristoph Hellwig 		break;
82*cbf1449bSChristoph Hellwig 	case DMA_FROM_DEVICE:
83*cbf1449bSChristoph Hellwig 	case DMA_BIDIRECTIONAL:
84*cbf1449bSChristoph Hellwig 		dma_cache_inv((unsigned long)addr, size);
85*cbf1449bSChristoph Hellwig 		break;
86f8c55dc6SChristoph Hellwig 	default:
87f8c55dc6SChristoph Hellwig 		BUG();
88f8c55dc6SChristoph Hellwig 	}
89f8c55dc6SChristoph Hellwig }
90f8c55dc6SChristoph Hellwig 
91f8c55dc6SChristoph Hellwig /*
92f8c55dc6SChristoph Hellwig  * A single sg entry may refer to multiple physically contiguous pages.  But
93f8c55dc6SChristoph Hellwig  * we still need to process highmem pages individually.  If highmem is not
94f8c55dc6SChristoph Hellwig  * configured then the bulk of this loop gets optimized out.
95f8c55dc6SChristoph Hellwig  */
96f8c55dc6SChristoph Hellwig static inline void dma_sync_phys(phys_addr_t paddr, size_t size,
97*cbf1449bSChristoph Hellwig 		enum dma_data_direction dir, bool for_device)
98f8c55dc6SChristoph Hellwig {
99f8c55dc6SChristoph Hellwig 	struct page *page = pfn_to_page(paddr >> PAGE_SHIFT);
100f8c55dc6SChristoph Hellwig 	unsigned long offset = paddr & ~PAGE_MASK;
101f8c55dc6SChristoph Hellwig 	size_t left = size;
102f8c55dc6SChristoph Hellwig 
103f8c55dc6SChristoph Hellwig 	do {
104f8c55dc6SChristoph Hellwig 		size_t len = left;
105f8c55dc6SChristoph Hellwig 		void *addr;
106f8c55dc6SChristoph Hellwig 
107*cbf1449bSChristoph Hellwig 		if (PageHighMem(page)) {
108d411da06SPaul Burton 			if (offset + len > PAGE_SIZE)
109f8c55dc6SChristoph Hellwig 				len = PAGE_SIZE - offset;
110*cbf1449bSChristoph Hellwig 		}
111f8c55dc6SChristoph Hellwig 
112f8c55dc6SChristoph Hellwig 		addr = kmap_atomic(page);
113*cbf1449bSChristoph Hellwig 		if (for_device)
114*cbf1449bSChristoph Hellwig 			dma_sync_virt_for_device(addr + offset, len, dir);
115*cbf1449bSChristoph Hellwig 		else
116*cbf1449bSChristoph Hellwig 			dma_sync_virt_for_cpu(addr + offset, len, dir);
117f8c55dc6SChristoph Hellwig 		kunmap_atomic(addr);
118*cbf1449bSChristoph Hellwig 
119f8c55dc6SChristoph Hellwig 		offset = 0;
120f8c55dc6SChristoph Hellwig 		page++;
121f8c55dc6SChristoph Hellwig 		left -= len;
122f8c55dc6SChristoph Hellwig 	} while (left);
123f8c55dc6SChristoph Hellwig }
124f8c55dc6SChristoph Hellwig 
12556e35f9cSChristoph Hellwig void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
12656e35f9cSChristoph Hellwig 		enum dma_data_direction dir)
127f8c55dc6SChristoph Hellwig {
128*cbf1449bSChristoph Hellwig 	dma_sync_phys(paddr, size, dir, true);
129f8c55dc6SChristoph Hellwig }
130f8c55dc6SChristoph Hellwig 
131f263f2a2SHauke Mehrtens #ifdef CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU
13256e35f9cSChristoph Hellwig void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
13356e35f9cSChristoph Hellwig 		enum dma_data_direction dir)
134f8c55dc6SChristoph Hellwig {
13556e35f9cSChristoph Hellwig 	if (cpu_needs_post_dma_flush())
136*cbf1449bSChristoph Hellwig 		dma_sync_phys(paddr, size, dir, false);
137f8c55dc6SChristoph Hellwig }
138f263f2a2SHauke Mehrtens #endif
139f8c55dc6SChristoph Hellwig 
140f8c55dc6SChristoph Hellwig void arch_dma_cache_sync(struct device *dev, void *vaddr, size_t size,
141f8c55dc6SChristoph Hellwig 		enum dma_data_direction direction)
142f8c55dc6SChristoph Hellwig {
143*cbf1449bSChristoph Hellwig 	dma_sync_virt_for_device(vaddr, size, direction);
144f8c55dc6SChristoph Hellwig }
145347cb6afSChristoph Hellwig 
146347cb6afSChristoph Hellwig #ifdef CONFIG_DMA_PERDEV_COHERENT
147347cb6afSChristoph Hellwig void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
148347cb6afSChristoph Hellwig 		const struct iommu_ops *iommu, bool coherent)
149347cb6afSChristoph Hellwig {
150347cb6afSChristoph Hellwig 	dev->dma_coherent = coherent;
151347cb6afSChristoph Hellwig }
152347cb6afSChristoph Hellwig #endif
153