xref: /openbmc/linux/arch/mips/mm/dma-noncoherent.c (revision 4e0664416c70702731734ab8b3e4819a5a2c0486)
1f8c55dc6SChristoph Hellwig // SPDX-License-Identifier: GPL-2.0
2f8c55dc6SChristoph Hellwig /*
3f8c55dc6SChristoph Hellwig  * Copyright (C) 2000  Ani Joshi <ajoshi@unixbox.com>
4f8c55dc6SChristoph Hellwig  * Copyright (C) 2000, 2001, 06	 Ralf Baechle <ralf@linux-mips.org>
5f8c55dc6SChristoph Hellwig  * swiped from i386, and cloned for MIPS by Geert, polished by Ralf.
6f8c55dc6SChristoph Hellwig  */
7f8c55dc6SChristoph Hellwig #include <linux/dma-direct.h>
80a0f0d8bSChristoph Hellwig #include <linux/dma-map-ops.h>
9f8c55dc6SChristoph Hellwig #include <linux/highmem.h>
10f8c55dc6SChristoph Hellwig 
11f8c55dc6SChristoph Hellwig #include <asm/cache.h>
12f8c55dc6SChristoph Hellwig #include <asm/cpu-type.h>
13f8c55dc6SChristoph Hellwig #include <asm/io.h>
14f8c55dc6SChristoph Hellwig 
15f8c55dc6SChristoph Hellwig /*
16f8c55dc6SChristoph Hellwig  * The affected CPUs below in 'cpu_needs_post_dma_flush()' can speculatively
17f8c55dc6SChristoph Hellwig  * fill random cachelines with stale data at any time, requiring an extra
18f8c55dc6SChristoph Hellwig  * flush post-DMA.
19f8c55dc6SChristoph Hellwig  *
20f8c55dc6SChristoph Hellwig  * Warning on the terminology - Linux calls an uncached area coherent;  MIPS
21f8c55dc6SChristoph Hellwig  * terminology calls memory areas with hardware maintained coherency coherent.
22f8c55dc6SChristoph Hellwig  *
23f8c55dc6SChristoph Hellwig  * Note that the R14000 and R16000 should also be checked for in this condition.
24f8c55dc6SChristoph Hellwig  * However this function is only called on non-I/O-coherent systems and only the
25f8c55dc6SChristoph Hellwig  * R10000 and R12000 are used in such systems, the SGI IP28 Indigo² rsp.
26f8c55dc6SChristoph Hellwig  * SGI IP32 aka O2.
27f8c55dc6SChristoph Hellwig  */
2856e35f9cSChristoph Hellwig static inline bool cpu_needs_post_dma_flush(void)
29f8c55dc6SChristoph Hellwig {
30f8c55dc6SChristoph Hellwig 	switch (boot_cpu_type()) {
31f8c55dc6SChristoph Hellwig 	case CPU_R10000:
32f8c55dc6SChristoph Hellwig 	case CPU_R12000:
33f8c55dc6SChristoph Hellwig 	case CPU_BMIPS5000:
34a202bf71SLichao Liu 	case CPU_LOONGSON2EF:
35f8c55dc6SChristoph Hellwig 		return true;
36f8c55dc6SChristoph Hellwig 	default:
37f8c55dc6SChristoph Hellwig 		/*
38f8c55dc6SChristoph Hellwig 		 * Presence of MAARs suggests that the CPU supports
39f8c55dc6SChristoph Hellwig 		 * speculatively prefetching data, and therefore requires
40f8c55dc6SChristoph Hellwig 		 * the post-DMA flush/invalidate.
41f8c55dc6SChristoph Hellwig 		 */
42f8c55dc6SChristoph Hellwig 		return cpu_has_maar;
43f8c55dc6SChristoph Hellwig 	}
44f8c55dc6SChristoph Hellwig }
45f8c55dc6SChristoph Hellwig 
462e96e04dSChristoph Hellwig void arch_dma_prep_coherent(struct page *page, size_t size)
47f8c55dc6SChristoph Hellwig {
482e96e04dSChristoph Hellwig 	dma_cache_wback_inv((unsigned long)page_address(page), size);
49f8c55dc6SChristoph Hellwig }
50f8c55dc6SChristoph Hellwig 
51fa7e2247SChristoph Hellwig void *arch_dma_set_uncached(void *addr, size_t size)
522e96e04dSChristoph Hellwig {
532e96e04dSChristoph Hellwig 	return (void *)(__pa(addr) + UNCAC_BASE);
54f8c55dc6SChristoph Hellwig }
55f8c55dc6SChristoph Hellwig 
56cbf1449bSChristoph Hellwig static inline void dma_sync_virt_for_device(void *addr, size_t size,
57f8c55dc6SChristoph Hellwig 		enum dma_data_direction dir)
58f8c55dc6SChristoph Hellwig {
59f8c55dc6SChristoph Hellwig 	switch (dir) {
60f8c55dc6SChristoph Hellwig 	case DMA_TO_DEVICE:
61f8c55dc6SChristoph Hellwig 		dma_cache_wback((unsigned long)addr, size);
62f8c55dc6SChristoph Hellwig 		break;
63f8c55dc6SChristoph Hellwig 	case DMA_FROM_DEVICE:
64f8c55dc6SChristoph Hellwig 		dma_cache_inv((unsigned long)addr, size);
65f8c55dc6SChristoph Hellwig 		break;
66f8c55dc6SChristoph Hellwig 	case DMA_BIDIRECTIONAL:
67f8c55dc6SChristoph Hellwig 		dma_cache_wback_inv((unsigned long)addr, size);
68f8c55dc6SChristoph Hellwig 		break;
69cbf1449bSChristoph Hellwig 	default:
70cbf1449bSChristoph Hellwig 		BUG();
71cbf1449bSChristoph Hellwig 	}
72cbf1449bSChristoph Hellwig }
73f8c55dc6SChristoph Hellwig 
74cbf1449bSChristoph Hellwig static inline void dma_sync_virt_for_cpu(void *addr, size_t size,
75cbf1449bSChristoph Hellwig 		enum dma_data_direction dir)
76cbf1449bSChristoph Hellwig {
77cbf1449bSChristoph Hellwig 	switch (dir) {
78cbf1449bSChristoph Hellwig 	case DMA_TO_DEVICE:
79cbf1449bSChristoph Hellwig 		break;
80cbf1449bSChristoph Hellwig 	case DMA_FROM_DEVICE:
81cbf1449bSChristoph Hellwig 	case DMA_BIDIRECTIONAL:
82cbf1449bSChristoph Hellwig 		dma_cache_inv((unsigned long)addr, size);
83cbf1449bSChristoph Hellwig 		break;
84f8c55dc6SChristoph Hellwig 	default:
85f8c55dc6SChristoph Hellwig 		BUG();
86f8c55dc6SChristoph Hellwig 	}
87f8c55dc6SChristoph Hellwig }
88f8c55dc6SChristoph Hellwig 
89f8c55dc6SChristoph Hellwig /*
90f8c55dc6SChristoph Hellwig  * A single sg entry may refer to multiple physically contiguous pages.  But
91f8c55dc6SChristoph Hellwig  * we still need to process highmem pages individually.  If highmem is not
92f8c55dc6SChristoph Hellwig  * configured then the bulk of this loop gets optimized out.
93f8c55dc6SChristoph Hellwig  */
94f8c55dc6SChristoph Hellwig static inline void dma_sync_phys(phys_addr_t paddr, size_t size,
95cbf1449bSChristoph Hellwig 		enum dma_data_direction dir, bool for_device)
96f8c55dc6SChristoph Hellwig {
97f8c55dc6SChristoph Hellwig 	struct page *page = pfn_to_page(paddr >> PAGE_SHIFT);
98f8c55dc6SChristoph Hellwig 	unsigned long offset = paddr & ~PAGE_MASK;
99f8c55dc6SChristoph Hellwig 	size_t left = size;
100f8c55dc6SChristoph Hellwig 
101f8c55dc6SChristoph Hellwig 	do {
102f8c55dc6SChristoph Hellwig 		size_t len = left;
103f8c55dc6SChristoph Hellwig 		void *addr;
104f8c55dc6SChristoph Hellwig 
105cbf1449bSChristoph Hellwig 		if (PageHighMem(page)) {
106d411da06SPaul Burton 			if (offset + len > PAGE_SIZE)
107f8c55dc6SChristoph Hellwig 				len = PAGE_SIZE - offset;
108cbf1449bSChristoph Hellwig 		}
109f8c55dc6SChristoph Hellwig 
110f8c55dc6SChristoph Hellwig 		addr = kmap_atomic(page);
111cbf1449bSChristoph Hellwig 		if (for_device)
112cbf1449bSChristoph Hellwig 			dma_sync_virt_for_device(addr + offset, len, dir);
113cbf1449bSChristoph Hellwig 		else
114cbf1449bSChristoph Hellwig 			dma_sync_virt_for_cpu(addr + offset, len, dir);
115f8c55dc6SChristoph Hellwig 		kunmap_atomic(addr);
116cbf1449bSChristoph Hellwig 
117f8c55dc6SChristoph Hellwig 		offset = 0;
118f8c55dc6SChristoph Hellwig 		page++;
119f8c55dc6SChristoph Hellwig 		left -= len;
120f8c55dc6SChristoph Hellwig 	} while (left);
121f8c55dc6SChristoph Hellwig }
122f8c55dc6SChristoph Hellwig 
12356e35f9cSChristoph Hellwig void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
12456e35f9cSChristoph Hellwig 		enum dma_data_direction dir)
125f8c55dc6SChristoph Hellwig {
126cbf1449bSChristoph Hellwig 	dma_sync_phys(paddr, size, dir, true);
127f8c55dc6SChristoph Hellwig }
128f8c55dc6SChristoph Hellwig 
129f263f2a2SHauke Mehrtens #ifdef CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU
13056e35f9cSChristoph Hellwig void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
13156e35f9cSChristoph Hellwig 		enum dma_data_direction dir)
132f8c55dc6SChristoph Hellwig {
13356e35f9cSChristoph Hellwig 	if (cpu_needs_post_dma_flush())
134cbf1449bSChristoph Hellwig 		dma_sync_phys(paddr, size, dir, false);
135f8c55dc6SChristoph Hellwig }
136f263f2a2SHauke Mehrtens #endif
137f8c55dc6SChristoph Hellwig 
138*4e066441SChristoph Hellwig #ifdef CONFIG_ARCH_HAS_SETUP_DMA_OPS
139347cb6afSChristoph Hellwig void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
140347cb6afSChristoph Hellwig 		const struct iommu_ops *iommu, bool coherent)
141347cb6afSChristoph Hellwig {
142347cb6afSChristoph Hellwig 	dev->dma_coherent = coherent;
143347cb6afSChristoph Hellwig }
144347cb6afSChristoph Hellwig #endif
145