xref: /openbmc/linux/arch/mips/mm/dma-noncoherent.c (revision 58b0440663ec11372befb8ead0ee7099d8878590)
1f8c55dc6SChristoph Hellwig // SPDX-License-Identifier: GPL-2.0
2f8c55dc6SChristoph Hellwig /*
3f8c55dc6SChristoph Hellwig  * Copyright (C) 2000  Ani Joshi <ajoshi@unixbox.com>
4f8c55dc6SChristoph Hellwig  * Copyright (C) 2000, 2001, 06	 Ralf Baechle <ralf@linux-mips.org>
5f8c55dc6SChristoph Hellwig  * swiped from i386, and cloned for MIPS by Geert, polished by Ralf.
6f8c55dc6SChristoph Hellwig  */
7f8c55dc6SChristoph Hellwig #include <linux/dma-direct.h>
8f8c55dc6SChristoph Hellwig #include <linux/dma-noncoherent.h>
9f8c55dc6SChristoph Hellwig #include <linux/dma-contiguous.h>
10f8c55dc6SChristoph Hellwig #include <linux/highmem.h>
11f8c55dc6SChristoph Hellwig 
12f8c55dc6SChristoph Hellwig #include <asm/cache.h>
13f8c55dc6SChristoph Hellwig #include <asm/cpu-type.h>
14f8c55dc6SChristoph Hellwig #include <asm/dma-coherence.h>
15f8c55dc6SChristoph Hellwig #include <asm/io.h>
16f8c55dc6SChristoph Hellwig 
17f8c55dc6SChristoph Hellwig /*
18f8c55dc6SChristoph Hellwig  * The affected CPUs below in 'cpu_needs_post_dma_flush()' can speculatively
19f8c55dc6SChristoph Hellwig  * fill random cachelines with stale data at any time, requiring an extra
20f8c55dc6SChristoph Hellwig  * flush post-DMA.
21f8c55dc6SChristoph Hellwig  *
22f8c55dc6SChristoph Hellwig  * Warning on the terminology - Linux calls an uncached area coherent;  MIPS
23f8c55dc6SChristoph Hellwig  * terminology calls memory areas with hardware maintained coherency coherent.
24f8c55dc6SChristoph Hellwig  *
25f8c55dc6SChristoph Hellwig  * Note that the R14000 and R16000 should also be checked for in this condition.
26f8c55dc6SChristoph Hellwig  * However this function is only called on non-I/O-coherent systems and only the
27f8c55dc6SChristoph Hellwig  * R10000 and R12000 are used in such systems, the SGI IP28 Indigo² rsp.
28f8c55dc6SChristoph Hellwig  * SGI IP32 aka O2.
29f8c55dc6SChristoph Hellwig  */
30f8c55dc6SChristoph Hellwig static inline bool cpu_needs_post_dma_flush(struct device *dev)
31f8c55dc6SChristoph Hellwig {
32f8c55dc6SChristoph Hellwig 	switch (boot_cpu_type()) {
33f8c55dc6SChristoph Hellwig 	case CPU_R10000:
34f8c55dc6SChristoph Hellwig 	case CPU_R12000:
35f8c55dc6SChristoph Hellwig 	case CPU_BMIPS5000:
36f8c55dc6SChristoph Hellwig 		return true;
37f8c55dc6SChristoph Hellwig 	default:
38f8c55dc6SChristoph Hellwig 		/*
39f8c55dc6SChristoph Hellwig 		 * Presence of MAARs suggests that the CPU supports
40f8c55dc6SChristoph Hellwig 		 * speculatively prefetching data, and therefore requires
41f8c55dc6SChristoph Hellwig 		 * the post-DMA flush/invalidate.
42f8c55dc6SChristoph Hellwig 		 */
43f8c55dc6SChristoph Hellwig 		return cpu_has_maar;
44f8c55dc6SChristoph Hellwig 	}
45f8c55dc6SChristoph Hellwig }
46f8c55dc6SChristoph Hellwig 
47f8c55dc6SChristoph Hellwig void *arch_dma_alloc(struct device *dev, size_t size,
48f8c55dc6SChristoph Hellwig 		dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
49f8c55dc6SChristoph Hellwig {
50f8c55dc6SChristoph Hellwig 	void *ret;
51f8c55dc6SChristoph Hellwig 
52bc3ec75dSChristoph Hellwig 	ret = dma_direct_alloc_pages(dev, size, dma_handle, gfp, attrs);
53bc3ec75dSChristoph Hellwig 	if (!ret && !(attrs & DMA_ATTR_NON_CONSISTENT)) {
54f8c55dc6SChristoph Hellwig 		dma_cache_wback_inv((unsigned long) ret, size);
550d0e1477SPaul Burton 		ret = (void *)UNCAC_ADDR(ret);
56f8c55dc6SChristoph Hellwig 	}
57f8c55dc6SChristoph Hellwig 
58f8c55dc6SChristoph Hellwig 	return ret;
59f8c55dc6SChristoph Hellwig }
60f8c55dc6SChristoph Hellwig 
61f8c55dc6SChristoph Hellwig void arch_dma_free(struct device *dev, size_t size, void *cpu_addr,
62f8c55dc6SChristoph Hellwig 		dma_addr_t dma_addr, unsigned long attrs)
63f8c55dc6SChristoph Hellwig {
64bc3ec75dSChristoph Hellwig 	if (!(attrs & DMA_ATTR_NON_CONSISTENT))
65f8c55dc6SChristoph Hellwig 		cpu_addr = (void *)CAC_ADDR((unsigned long)cpu_addr);
66bc3ec75dSChristoph Hellwig 	dma_direct_free_pages(dev, size, cpu_addr, dma_addr, attrs);
67f8c55dc6SChristoph Hellwig }
68f8c55dc6SChristoph Hellwig 
69*58b04406SChristoph Hellwig long arch_dma_coherent_to_pfn(struct device *dev, void *cpu_addr,
70*58b04406SChristoph Hellwig 		dma_addr_t dma_addr)
71f8c55dc6SChristoph Hellwig {
72bc3ec75dSChristoph Hellwig 	unsigned long addr = CAC_ADDR((unsigned long)cpu_addr);
73*58b04406SChristoph Hellwig 	return page_to_pfn(virt_to_page((void *)addr));
74f8c55dc6SChristoph Hellwig }
75f8c55dc6SChristoph Hellwig 
76*58b04406SChristoph Hellwig pgprot_t arch_dma_mmap_pgprot(struct device *dev, pgprot_t prot,
77*58b04406SChristoph Hellwig 		unsigned long attrs)
78*58b04406SChristoph Hellwig {
79*58b04406SChristoph Hellwig 	if (attrs & DMA_ATTR_WRITE_COMBINE)
80*58b04406SChristoph Hellwig 		return pgprot_writecombine(prot);
81*58b04406SChristoph Hellwig 	return pgprot_noncached(prot);
82f8c55dc6SChristoph Hellwig }
83f8c55dc6SChristoph Hellwig 
84f8c55dc6SChristoph Hellwig static inline void dma_sync_virt(void *addr, size_t size,
85f8c55dc6SChristoph Hellwig 		enum dma_data_direction dir)
86f8c55dc6SChristoph Hellwig {
87f8c55dc6SChristoph Hellwig 	switch (dir) {
88f8c55dc6SChristoph Hellwig 	case DMA_TO_DEVICE:
89f8c55dc6SChristoph Hellwig 		dma_cache_wback((unsigned long)addr, size);
90f8c55dc6SChristoph Hellwig 		break;
91f8c55dc6SChristoph Hellwig 
92f8c55dc6SChristoph Hellwig 	case DMA_FROM_DEVICE:
93f8c55dc6SChristoph Hellwig 		dma_cache_inv((unsigned long)addr, size);
94f8c55dc6SChristoph Hellwig 		break;
95f8c55dc6SChristoph Hellwig 
96f8c55dc6SChristoph Hellwig 	case DMA_BIDIRECTIONAL:
97f8c55dc6SChristoph Hellwig 		dma_cache_wback_inv((unsigned long)addr, size);
98f8c55dc6SChristoph Hellwig 		break;
99f8c55dc6SChristoph Hellwig 
100f8c55dc6SChristoph Hellwig 	default:
101f8c55dc6SChristoph Hellwig 		BUG();
102f8c55dc6SChristoph Hellwig 	}
103f8c55dc6SChristoph Hellwig }
104f8c55dc6SChristoph Hellwig 
105f8c55dc6SChristoph Hellwig /*
106f8c55dc6SChristoph Hellwig  * A single sg entry may refer to multiple physically contiguous pages.  But
107f8c55dc6SChristoph Hellwig  * we still need to process highmem pages individually.  If highmem is not
108f8c55dc6SChristoph Hellwig  * configured then the bulk of this loop gets optimized out.
109f8c55dc6SChristoph Hellwig  */
110f8c55dc6SChristoph Hellwig static inline void dma_sync_phys(phys_addr_t paddr, size_t size,
111f8c55dc6SChristoph Hellwig 		enum dma_data_direction dir)
112f8c55dc6SChristoph Hellwig {
113f8c55dc6SChristoph Hellwig 	struct page *page = pfn_to_page(paddr >> PAGE_SHIFT);
114f8c55dc6SChristoph Hellwig 	unsigned long offset = paddr & ~PAGE_MASK;
115f8c55dc6SChristoph Hellwig 	size_t left = size;
116f8c55dc6SChristoph Hellwig 
117f8c55dc6SChristoph Hellwig 	do {
118f8c55dc6SChristoph Hellwig 		size_t len = left;
119f8c55dc6SChristoph Hellwig 
120f8c55dc6SChristoph Hellwig 		if (PageHighMem(page)) {
121f8c55dc6SChristoph Hellwig 			void *addr;
122f8c55dc6SChristoph Hellwig 
123f8c55dc6SChristoph Hellwig 			if (offset + len > PAGE_SIZE) {
124f8c55dc6SChristoph Hellwig 				if (offset >= PAGE_SIZE) {
125f8c55dc6SChristoph Hellwig 					page += offset >> PAGE_SHIFT;
126f8c55dc6SChristoph Hellwig 					offset &= ~PAGE_MASK;
127f8c55dc6SChristoph Hellwig 				}
128f8c55dc6SChristoph Hellwig 				len = PAGE_SIZE - offset;
129f8c55dc6SChristoph Hellwig 			}
130f8c55dc6SChristoph Hellwig 
131f8c55dc6SChristoph Hellwig 			addr = kmap_atomic(page);
132f8c55dc6SChristoph Hellwig 			dma_sync_virt(addr + offset, len, dir);
133f8c55dc6SChristoph Hellwig 			kunmap_atomic(addr);
134f8c55dc6SChristoph Hellwig 		} else
135f8c55dc6SChristoph Hellwig 			dma_sync_virt(page_address(page) + offset, size, dir);
136f8c55dc6SChristoph Hellwig 		offset = 0;
137f8c55dc6SChristoph Hellwig 		page++;
138f8c55dc6SChristoph Hellwig 		left -= len;
139f8c55dc6SChristoph Hellwig 	} while (left);
140f8c55dc6SChristoph Hellwig }
141f8c55dc6SChristoph Hellwig 
142f8c55dc6SChristoph Hellwig void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr,
143f8c55dc6SChristoph Hellwig 		size_t size, enum dma_data_direction dir)
144f8c55dc6SChristoph Hellwig {
145f8c55dc6SChristoph Hellwig 	dma_sync_phys(paddr, size, dir);
146f8c55dc6SChristoph Hellwig }
147f8c55dc6SChristoph Hellwig 
148f8c55dc6SChristoph Hellwig void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr,
149f8c55dc6SChristoph Hellwig 		size_t size, enum dma_data_direction dir)
150f8c55dc6SChristoph Hellwig {
151f8c55dc6SChristoph Hellwig 	if (cpu_needs_post_dma_flush(dev))
152f8c55dc6SChristoph Hellwig 		dma_sync_phys(paddr, size, dir);
153f8c55dc6SChristoph Hellwig }
154f8c55dc6SChristoph Hellwig 
155f8c55dc6SChristoph Hellwig void arch_dma_cache_sync(struct device *dev, void *vaddr, size_t size,
156f8c55dc6SChristoph Hellwig 		enum dma_data_direction direction)
157f8c55dc6SChristoph Hellwig {
158f8c55dc6SChristoph Hellwig 	BUG_ON(direction == DMA_NONE);
159f8c55dc6SChristoph Hellwig 
160f8c55dc6SChristoph Hellwig 	dma_sync_virt(vaddr, size, direction);
161f8c55dc6SChristoph Hellwig }
162