xref: /openbmc/linux/arch/mips/mm/dma-noncoherent.c (revision f8c55dc6e828324fc58c0bb32d72a5a4041d1c3b)
1*f8c55dc6SChristoph Hellwig // SPDX-License-Identifier: GPL-2.0
2*f8c55dc6SChristoph Hellwig /*
3*f8c55dc6SChristoph Hellwig  * Copyright (C) 2000  Ani Joshi <ajoshi@unixbox.com>
4*f8c55dc6SChristoph Hellwig  * Copyright (C) 2000, 2001, 06	 Ralf Baechle <ralf@linux-mips.org>
5*f8c55dc6SChristoph Hellwig  * swiped from i386, and cloned for MIPS by Geert, polished by Ralf.
6*f8c55dc6SChristoph Hellwig  */
7*f8c55dc6SChristoph Hellwig #include <linux/dma-direct.h>
8*f8c55dc6SChristoph Hellwig #include <linux/dma-noncoherent.h>
9*f8c55dc6SChristoph Hellwig #include <linux/dma-contiguous.h>
10*f8c55dc6SChristoph Hellwig #include <linux/highmem.h>
11*f8c55dc6SChristoph Hellwig 
12*f8c55dc6SChristoph Hellwig #include <asm/cache.h>
13*f8c55dc6SChristoph Hellwig #include <asm/cpu-type.h>
14*f8c55dc6SChristoph Hellwig #include <asm/dma-coherence.h>
15*f8c55dc6SChristoph Hellwig #include <asm/io.h>
16*f8c55dc6SChristoph Hellwig 
17*f8c55dc6SChristoph Hellwig #ifdef CONFIG_DMA_PERDEV_COHERENT
18*f8c55dc6SChristoph Hellwig static inline int dev_is_coherent(struct device *dev)
19*f8c55dc6SChristoph Hellwig {
20*f8c55dc6SChristoph Hellwig 	return dev->archdata.dma_coherent;
21*f8c55dc6SChristoph Hellwig }
22*f8c55dc6SChristoph Hellwig #else
23*f8c55dc6SChristoph Hellwig static inline int dev_is_coherent(struct device *dev)
24*f8c55dc6SChristoph Hellwig {
25*f8c55dc6SChristoph Hellwig 	switch (coherentio) {
26*f8c55dc6SChristoph Hellwig 	default:
27*f8c55dc6SChristoph Hellwig 	case IO_COHERENCE_DEFAULT:
28*f8c55dc6SChristoph Hellwig 		return hw_coherentio;
29*f8c55dc6SChristoph Hellwig 	case IO_COHERENCE_ENABLED:
30*f8c55dc6SChristoph Hellwig 		return 1;
31*f8c55dc6SChristoph Hellwig 	case IO_COHERENCE_DISABLED:
32*f8c55dc6SChristoph Hellwig 		return 0;
33*f8c55dc6SChristoph Hellwig 	}
34*f8c55dc6SChristoph Hellwig }
35*f8c55dc6SChristoph Hellwig #endif /* CONFIG_DMA_PERDEV_COHERENT */
36*f8c55dc6SChristoph Hellwig 
37*f8c55dc6SChristoph Hellwig /*
38*f8c55dc6SChristoph Hellwig  * The affected CPUs below in 'cpu_needs_post_dma_flush()' can speculatively
39*f8c55dc6SChristoph Hellwig  * fill random cachelines with stale data at any time, requiring an extra
40*f8c55dc6SChristoph Hellwig  * flush post-DMA.
41*f8c55dc6SChristoph Hellwig  *
42*f8c55dc6SChristoph Hellwig  * Warning on the terminology - Linux calls an uncached area coherent;  MIPS
43*f8c55dc6SChristoph Hellwig  * terminology calls memory areas with hardware maintained coherency coherent.
44*f8c55dc6SChristoph Hellwig  *
45*f8c55dc6SChristoph Hellwig  * Note that the R14000 and R16000 should also be checked for in this condition.
46*f8c55dc6SChristoph Hellwig  * However this function is only called on non-I/O-coherent systems and only the
47*f8c55dc6SChristoph Hellwig  * R10000 and R12000 are used in such systems, the SGI IP28 Indigo² rsp.
48*f8c55dc6SChristoph Hellwig  * SGI IP32 aka O2.
49*f8c55dc6SChristoph Hellwig  */
50*f8c55dc6SChristoph Hellwig static inline bool cpu_needs_post_dma_flush(struct device *dev)
51*f8c55dc6SChristoph Hellwig {
52*f8c55dc6SChristoph Hellwig 	if (dev_is_coherent(dev))
53*f8c55dc6SChristoph Hellwig 		return false;
54*f8c55dc6SChristoph Hellwig 
55*f8c55dc6SChristoph Hellwig 	switch (boot_cpu_type()) {
56*f8c55dc6SChristoph Hellwig 	case CPU_R10000:
57*f8c55dc6SChristoph Hellwig 	case CPU_R12000:
58*f8c55dc6SChristoph Hellwig 	case CPU_BMIPS5000:
59*f8c55dc6SChristoph Hellwig 		return true;
60*f8c55dc6SChristoph Hellwig 	default:
61*f8c55dc6SChristoph Hellwig 		/*
62*f8c55dc6SChristoph Hellwig 		 * Presence of MAARs suggests that the CPU supports
63*f8c55dc6SChristoph Hellwig 		 * speculatively prefetching data, and therefore requires
64*f8c55dc6SChristoph Hellwig 		 * the post-DMA flush/invalidate.
65*f8c55dc6SChristoph Hellwig 		 */
66*f8c55dc6SChristoph Hellwig 		return cpu_has_maar;
67*f8c55dc6SChristoph Hellwig 	}
68*f8c55dc6SChristoph Hellwig }
69*f8c55dc6SChristoph Hellwig 
70*f8c55dc6SChristoph Hellwig void *arch_dma_alloc(struct device *dev, size_t size,
71*f8c55dc6SChristoph Hellwig 		dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
72*f8c55dc6SChristoph Hellwig {
73*f8c55dc6SChristoph Hellwig 	void *ret;
74*f8c55dc6SChristoph Hellwig 
75*f8c55dc6SChristoph Hellwig 	ret = dma_direct_alloc(dev, size, dma_handle, gfp, attrs);
76*f8c55dc6SChristoph Hellwig 	if (!ret)
77*f8c55dc6SChristoph Hellwig 		return NULL;
78*f8c55dc6SChristoph Hellwig 
79*f8c55dc6SChristoph Hellwig 	if (!dev_is_coherent(dev) && !(attrs & DMA_ATTR_NON_CONSISTENT)) {
80*f8c55dc6SChristoph Hellwig 		dma_cache_wback_inv((unsigned long) ret, size);
81*f8c55dc6SChristoph Hellwig 		ret = UNCAC_ADDR(ret);
82*f8c55dc6SChristoph Hellwig 	}
83*f8c55dc6SChristoph Hellwig 
84*f8c55dc6SChristoph Hellwig 	return ret;
85*f8c55dc6SChristoph Hellwig }
86*f8c55dc6SChristoph Hellwig 
87*f8c55dc6SChristoph Hellwig void arch_dma_free(struct device *dev, size_t size, void *cpu_addr,
88*f8c55dc6SChristoph Hellwig 		dma_addr_t dma_addr, unsigned long attrs)
89*f8c55dc6SChristoph Hellwig {
90*f8c55dc6SChristoph Hellwig 	if (!(attrs & DMA_ATTR_NON_CONSISTENT) && !dev_is_coherent(dev))
91*f8c55dc6SChristoph Hellwig 		cpu_addr = (void *)CAC_ADDR((unsigned long)cpu_addr);
92*f8c55dc6SChristoph Hellwig 	dma_direct_free(dev, size, cpu_addr, dma_addr, attrs);
93*f8c55dc6SChristoph Hellwig }
94*f8c55dc6SChristoph Hellwig 
95*f8c55dc6SChristoph Hellwig int arch_dma_mmap(struct device *dev, struct vm_area_struct *vma,
96*f8c55dc6SChristoph Hellwig 		void *cpu_addr, dma_addr_t dma_addr, size_t size,
97*f8c55dc6SChristoph Hellwig 		unsigned long attrs)
98*f8c55dc6SChristoph Hellwig {
99*f8c55dc6SChristoph Hellwig 	unsigned long user_count = vma_pages(vma);
100*f8c55dc6SChristoph Hellwig 	unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
101*f8c55dc6SChristoph Hellwig 	unsigned long addr = (unsigned long)cpu_addr;
102*f8c55dc6SChristoph Hellwig 	unsigned long off = vma->vm_pgoff;
103*f8c55dc6SChristoph Hellwig 	unsigned long pfn;
104*f8c55dc6SChristoph Hellwig 	int ret = -ENXIO;
105*f8c55dc6SChristoph Hellwig 
106*f8c55dc6SChristoph Hellwig 	if (!dev_is_coherent(dev))
107*f8c55dc6SChristoph Hellwig 		addr = CAC_ADDR(addr);
108*f8c55dc6SChristoph Hellwig 
109*f8c55dc6SChristoph Hellwig 	pfn = page_to_pfn(virt_to_page((void *)addr));
110*f8c55dc6SChristoph Hellwig 
111*f8c55dc6SChristoph Hellwig 	if (attrs & DMA_ATTR_WRITE_COMBINE)
112*f8c55dc6SChristoph Hellwig 		vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
113*f8c55dc6SChristoph Hellwig 	else
114*f8c55dc6SChristoph Hellwig 		vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
115*f8c55dc6SChristoph Hellwig 
116*f8c55dc6SChristoph Hellwig 	if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret))
117*f8c55dc6SChristoph Hellwig 		return ret;
118*f8c55dc6SChristoph Hellwig 
119*f8c55dc6SChristoph Hellwig 	if (off < count && user_count <= (count - off)) {
120*f8c55dc6SChristoph Hellwig 		ret = remap_pfn_range(vma, vma->vm_start,
121*f8c55dc6SChristoph Hellwig 				      pfn + off,
122*f8c55dc6SChristoph Hellwig 				      user_count << PAGE_SHIFT,
123*f8c55dc6SChristoph Hellwig 				      vma->vm_page_prot);
124*f8c55dc6SChristoph Hellwig 	}
125*f8c55dc6SChristoph Hellwig 
126*f8c55dc6SChristoph Hellwig 	return ret;
127*f8c55dc6SChristoph Hellwig }
128*f8c55dc6SChristoph Hellwig 
129*f8c55dc6SChristoph Hellwig static inline void dma_sync_virt(void *addr, size_t size,
130*f8c55dc6SChristoph Hellwig 		enum dma_data_direction dir)
131*f8c55dc6SChristoph Hellwig {
132*f8c55dc6SChristoph Hellwig 	switch (dir) {
133*f8c55dc6SChristoph Hellwig 	case DMA_TO_DEVICE:
134*f8c55dc6SChristoph Hellwig 		dma_cache_wback((unsigned long)addr, size);
135*f8c55dc6SChristoph Hellwig 		break;
136*f8c55dc6SChristoph Hellwig 
137*f8c55dc6SChristoph Hellwig 	case DMA_FROM_DEVICE:
138*f8c55dc6SChristoph Hellwig 		dma_cache_inv((unsigned long)addr, size);
139*f8c55dc6SChristoph Hellwig 		break;
140*f8c55dc6SChristoph Hellwig 
141*f8c55dc6SChristoph Hellwig 	case DMA_BIDIRECTIONAL:
142*f8c55dc6SChristoph Hellwig 		dma_cache_wback_inv((unsigned long)addr, size);
143*f8c55dc6SChristoph Hellwig 		break;
144*f8c55dc6SChristoph Hellwig 
145*f8c55dc6SChristoph Hellwig 	default:
146*f8c55dc6SChristoph Hellwig 		BUG();
147*f8c55dc6SChristoph Hellwig 	}
148*f8c55dc6SChristoph Hellwig }
149*f8c55dc6SChristoph Hellwig 
150*f8c55dc6SChristoph Hellwig /*
151*f8c55dc6SChristoph Hellwig  * A single sg entry may refer to multiple physically contiguous pages.  But
152*f8c55dc6SChristoph Hellwig  * we still need to process highmem pages individually.  If highmem is not
153*f8c55dc6SChristoph Hellwig  * configured then the bulk of this loop gets optimized out.
154*f8c55dc6SChristoph Hellwig  */
155*f8c55dc6SChristoph Hellwig static inline void dma_sync_phys(phys_addr_t paddr, size_t size,
156*f8c55dc6SChristoph Hellwig 		enum dma_data_direction dir)
157*f8c55dc6SChristoph Hellwig {
158*f8c55dc6SChristoph Hellwig 	struct page *page = pfn_to_page(paddr >> PAGE_SHIFT);
159*f8c55dc6SChristoph Hellwig 	unsigned long offset = paddr & ~PAGE_MASK;
160*f8c55dc6SChristoph Hellwig 	size_t left = size;
161*f8c55dc6SChristoph Hellwig 
162*f8c55dc6SChristoph Hellwig 	do {
163*f8c55dc6SChristoph Hellwig 		size_t len = left;
164*f8c55dc6SChristoph Hellwig 
165*f8c55dc6SChristoph Hellwig 		if (PageHighMem(page)) {
166*f8c55dc6SChristoph Hellwig 			void *addr;
167*f8c55dc6SChristoph Hellwig 
168*f8c55dc6SChristoph Hellwig 			if (offset + len > PAGE_SIZE) {
169*f8c55dc6SChristoph Hellwig 				if (offset >= PAGE_SIZE) {
170*f8c55dc6SChristoph Hellwig 					page += offset >> PAGE_SHIFT;
171*f8c55dc6SChristoph Hellwig 					offset &= ~PAGE_MASK;
172*f8c55dc6SChristoph Hellwig 				}
173*f8c55dc6SChristoph Hellwig 				len = PAGE_SIZE - offset;
174*f8c55dc6SChristoph Hellwig 			}
175*f8c55dc6SChristoph Hellwig 
176*f8c55dc6SChristoph Hellwig 			addr = kmap_atomic(page);
177*f8c55dc6SChristoph Hellwig 			dma_sync_virt(addr + offset, len, dir);
178*f8c55dc6SChristoph Hellwig 			kunmap_atomic(addr);
179*f8c55dc6SChristoph Hellwig 		} else
180*f8c55dc6SChristoph Hellwig 			dma_sync_virt(page_address(page) + offset, size, dir);
181*f8c55dc6SChristoph Hellwig 		offset = 0;
182*f8c55dc6SChristoph Hellwig 		page++;
183*f8c55dc6SChristoph Hellwig 		left -= len;
184*f8c55dc6SChristoph Hellwig 	} while (left);
185*f8c55dc6SChristoph Hellwig }
186*f8c55dc6SChristoph Hellwig 
187*f8c55dc6SChristoph Hellwig void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr,
188*f8c55dc6SChristoph Hellwig 		size_t size, enum dma_data_direction dir)
189*f8c55dc6SChristoph Hellwig {
190*f8c55dc6SChristoph Hellwig 	if (!dev_is_coherent(dev))
191*f8c55dc6SChristoph Hellwig 		dma_sync_phys(paddr, size, dir);
192*f8c55dc6SChristoph Hellwig }
193*f8c55dc6SChristoph Hellwig 
194*f8c55dc6SChristoph Hellwig void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr,
195*f8c55dc6SChristoph Hellwig 		size_t size, enum dma_data_direction dir)
196*f8c55dc6SChristoph Hellwig {
197*f8c55dc6SChristoph Hellwig 	if (cpu_needs_post_dma_flush(dev))
198*f8c55dc6SChristoph Hellwig 		dma_sync_phys(paddr, size, dir);
199*f8c55dc6SChristoph Hellwig }
200*f8c55dc6SChristoph Hellwig 
201*f8c55dc6SChristoph Hellwig void arch_dma_cache_sync(struct device *dev, void *vaddr, size_t size,
202*f8c55dc6SChristoph Hellwig 		enum dma_data_direction direction)
203*f8c55dc6SChristoph Hellwig {
204*f8c55dc6SChristoph Hellwig 	BUG_ON(direction == DMA_NONE);
205*f8c55dc6SChristoph Hellwig 
206*f8c55dc6SChristoph Hellwig 	if (!dev_is_coherent(dev))
207*f8c55dc6SChristoph Hellwig 		dma_sync_virt(vaddr, size, direction);
208*f8c55dc6SChristoph Hellwig }
209