xref: /openbmc/linux/arch/mips/mm/dma-noncoherent.c (revision fa7e2247c5729f990c7456fe09f3af99c8f2571b)
1f8c55dc6SChristoph Hellwig // SPDX-License-Identifier: GPL-2.0
2f8c55dc6SChristoph Hellwig /*
3f8c55dc6SChristoph Hellwig  * Copyright (C) 2000  Ani Joshi <ajoshi@unixbox.com>
4f8c55dc6SChristoph Hellwig  * Copyright (C) 2000, 2001, 06	 Ralf Baechle <ralf@linux-mips.org>
5f8c55dc6SChristoph Hellwig  * swiped from i386, and cloned for MIPS by Geert, polished by Ralf.
6f8c55dc6SChristoph Hellwig  */
7f8c55dc6SChristoph Hellwig #include <linux/dma-direct.h>
8f8c55dc6SChristoph Hellwig #include <linux/dma-noncoherent.h>
9f8c55dc6SChristoph Hellwig #include <linux/dma-contiguous.h>
10f8c55dc6SChristoph Hellwig #include <linux/highmem.h>
11f8c55dc6SChristoph Hellwig 
12f8c55dc6SChristoph Hellwig #include <asm/cache.h>
13f8c55dc6SChristoph Hellwig #include <asm/cpu-type.h>
14f8c55dc6SChristoph Hellwig #include <asm/dma-coherence.h>
15f8c55dc6SChristoph Hellwig #include <asm/io.h>
16f8c55dc6SChristoph Hellwig 
17f8c55dc6SChristoph Hellwig /*
18f8c55dc6SChristoph Hellwig  * The affected CPUs below in 'cpu_needs_post_dma_flush()' can speculatively
19f8c55dc6SChristoph Hellwig  * fill random cachelines with stale data at any time, requiring an extra
20f8c55dc6SChristoph Hellwig  * flush post-DMA.
21f8c55dc6SChristoph Hellwig  *
22f8c55dc6SChristoph Hellwig  * Warning on the terminology - Linux calls an uncached area coherent;  MIPS
23f8c55dc6SChristoph Hellwig  * terminology calls memory areas with hardware maintained coherency coherent.
24f8c55dc6SChristoph Hellwig  *
25f8c55dc6SChristoph Hellwig  * Note that the R14000 and R16000 should also be checked for in this condition.
26f8c55dc6SChristoph Hellwig  * However this function is only called on non-I/O-coherent systems and only the
27f8c55dc6SChristoph Hellwig  * R10000 and R12000 are used in such systems, the SGI IP28 Indigo² rsp.
28f8c55dc6SChristoph Hellwig  * SGI IP32 aka O2.
29f8c55dc6SChristoph Hellwig  */
3056e35f9cSChristoph Hellwig static inline bool cpu_needs_post_dma_flush(void)
31f8c55dc6SChristoph Hellwig {
32f8c55dc6SChristoph Hellwig 	switch (boot_cpu_type()) {
33f8c55dc6SChristoph Hellwig 	case CPU_R10000:
34f8c55dc6SChristoph Hellwig 	case CPU_R12000:
35f8c55dc6SChristoph Hellwig 	case CPU_BMIPS5000:
36f8c55dc6SChristoph Hellwig 		return true;
37f8c55dc6SChristoph Hellwig 	default:
38f8c55dc6SChristoph Hellwig 		/*
39f8c55dc6SChristoph Hellwig 		 * Presence of MAARs suggests that the CPU supports
40f8c55dc6SChristoph Hellwig 		 * speculatively prefetching data, and therefore requires
41f8c55dc6SChristoph Hellwig 		 * the post-DMA flush/invalidate.
42f8c55dc6SChristoph Hellwig 		 */
43f8c55dc6SChristoph Hellwig 		return cpu_has_maar;
44f8c55dc6SChristoph Hellwig 	}
45f8c55dc6SChristoph Hellwig }
46f8c55dc6SChristoph Hellwig 
472e96e04dSChristoph Hellwig void arch_dma_prep_coherent(struct page *page, size_t size)
48f8c55dc6SChristoph Hellwig {
492e96e04dSChristoph Hellwig 	dma_cache_wback_inv((unsigned long)page_address(page), size);
50f8c55dc6SChristoph Hellwig }
51f8c55dc6SChristoph Hellwig 
52*fa7e2247SChristoph Hellwig void *arch_dma_set_uncached(void *addr, size_t size)
532e96e04dSChristoph Hellwig {
542e96e04dSChristoph Hellwig 	return (void *)(__pa(addr) + UNCAC_BASE);
55f8c55dc6SChristoph Hellwig }
56f8c55dc6SChristoph Hellwig 
57f8c55dc6SChristoph Hellwig static inline void dma_sync_virt(void *addr, size_t size,
58f8c55dc6SChristoph Hellwig 		enum dma_data_direction dir)
59f8c55dc6SChristoph Hellwig {
60f8c55dc6SChristoph Hellwig 	switch (dir) {
61f8c55dc6SChristoph Hellwig 	case DMA_TO_DEVICE:
62f8c55dc6SChristoph Hellwig 		dma_cache_wback((unsigned long)addr, size);
63f8c55dc6SChristoph Hellwig 		break;
64f8c55dc6SChristoph Hellwig 
65f8c55dc6SChristoph Hellwig 	case DMA_FROM_DEVICE:
66f8c55dc6SChristoph Hellwig 		dma_cache_inv((unsigned long)addr, size);
67f8c55dc6SChristoph Hellwig 		break;
68f8c55dc6SChristoph Hellwig 
69f8c55dc6SChristoph Hellwig 	case DMA_BIDIRECTIONAL:
70f8c55dc6SChristoph Hellwig 		dma_cache_wback_inv((unsigned long)addr, size);
71f8c55dc6SChristoph Hellwig 		break;
72f8c55dc6SChristoph Hellwig 
73f8c55dc6SChristoph Hellwig 	default:
74f8c55dc6SChristoph Hellwig 		BUG();
75f8c55dc6SChristoph Hellwig 	}
76f8c55dc6SChristoph Hellwig }
77f8c55dc6SChristoph Hellwig 
78f8c55dc6SChristoph Hellwig /*
79f8c55dc6SChristoph Hellwig  * A single sg entry may refer to multiple physically contiguous pages.  But
80f8c55dc6SChristoph Hellwig  * we still need to process highmem pages individually.  If highmem is not
81f8c55dc6SChristoph Hellwig  * configured then the bulk of this loop gets optimized out.
82f8c55dc6SChristoph Hellwig  */
83f8c55dc6SChristoph Hellwig static inline void dma_sync_phys(phys_addr_t paddr, size_t size,
84f8c55dc6SChristoph Hellwig 		enum dma_data_direction dir)
85f8c55dc6SChristoph Hellwig {
86f8c55dc6SChristoph Hellwig 	struct page *page = pfn_to_page(paddr >> PAGE_SHIFT);
87f8c55dc6SChristoph Hellwig 	unsigned long offset = paddr & ~PAGE_MASK;
88f8c55dc6SChristoph Hellwig 	size_t left = size;
89f8c55dc6SChristoph Hellwig 
90f8c55dc6SChristoph Hellwig 	do {
91f8c55dc6SChristoph Hellwig 		size_t len = left;
92f8c55dc6SChristoph Hellwig 
93f8c55dc6SChristoph Hellwig 		if (PageHighMem(page)) {
94f8c55dc6SChristoph Hellwig 			void *addr;
95f8c55dc6SChristoph Hellwig 
96d411da06SPaul Burton 			if (offset + len > PAGE_SIZE)
97f8c55dc6SChristoph Hellwig 				len = PAGE_SIZE - offset;
98f8c55dc6SChristoph Hellwig 
99f8c55dc6SChristoph Hellwig 			addr = kmap_atomic(page);
100f8c55dc6SChristoph Hellwig 			dma_sync_virt(addr + offset, len, dir);
101f8c55dc6SChristoph Hellwig 			kunmap_atomic(addr);
102f8c55dc6SChristoph Hellwig 		} else
103f8c55dc6SChristoph Hellwig 			dma_sync_virt(page_address(page) + offset, size, dir);
104f8c55dc6SChristoph Hellwig 		offset = 0;
105f8c55dc6SChristoph Hellwig 		page++;
106f8c55dc6SChristoph Hellwig 		left -= len;
107f8c55dc6SChristoph Hellwig 	} while (left);
108f8c55dc6SChristoph Hellwig }
109f8c55dc6SChristoph Hellwig 
11056e35f9cSChristoph Hellwig void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
11156e35f9cSChristoph Hellwig 		enum dma_data_direction dir)
112f8c55dc6SChristoph Hellwig {
113f8c55dc6SChristoph Hellwig 	dma_sync_phys(paddr, size, dir);
114f8c55dc6SChristoph Hellwig }
115f8c55dc6SChristoph Hellwig 
116f263f2a2SHauke Mehrtens #ifdef CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU
11756e35f9cSChristoph Hellwig void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
11856e35f9cSChristoph Hellwig 		enum dma_data_direction dir)
119f8c55dc6SChristoph Hellwig {
12056e35f9cSChristoph Hellwig 	if (cpu_needs_post_dma_flush())
121f8c55dc6SChristoph Hellwig 		dma_sync_phys(paddr, size, dir);
122f8c55dc6SChristoph Hellwig }
123f263f2a2SHauke Mehrtens #endif
124f8c55dc6SChristoph Hellwig 
125f8c55dc6SChristoph Hellwig void arch_dma_cache_sync(struct device *dev, void *vaddr, size_t size,
126f8c55dc6SChristoph Hellwig 		enum dma_data_direction direction)
127f8c55dc6SChristoph Hellwig {
128f8c55dc6SChristoph Hellwig 	BUG_ON(direction == DMA_NONE);
129f8c55dc6SChristoph Hellwig 
130f8c55dc6SChristoph Hellwig 	dma_sync_virt(vaddr, size, direction);
131f8c55dc6SChristoph Hellwig }
132347cb6afSChristoph Hellwig 
133347cb6afSChristoph Hellwig #ifdef CONFIG_DMA_PERDEV_COHERENT
134347cb6afSChristoph Hellwig void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
135347cb6afSChristoph Hellwig 		const struct iommu_ops *iommu, bool coherent)
136347cb6afSChristoph Hellwig {
137347cb6afSChristoph Hellwig 	dev->dma_coherent = coherent;
138347cb6afSChristoph Hellwig }
139347cb6afSChristoph Hellwig #endif
140