xref: /openbmc/linux/arch/mips/mm/dma-noncoherent.c (revision a202bf71f08b3ef15356db30535e30b03cf23aec)
1f8c55dc6SChristoph Hellwig // SPDX-License-Identifier: GPL-2.0
2f8c55dc6SChristoph Hellwig /*
3f8c55dc6SChristoph Hellwig  * Copyright (C) 2000  Ani Joshi <ajoshi@unixbox.com>
4f8c55dc6SChristoph Hellwig  * Copyright (C) 2000, 2001, 06	 Ralf Baechle <ralf@linux-mips.org>
5f8c55dc6SChristoph Hellwig  * swiped from i386, and cloned for MIPS by Geert, polished by Ralf.
6f8c55dc6SChristoph Hellwig  */
7f8c55dc6SChristoph Hellwig #include <linux/dma-direct.h>
8f8c55dc6SChristoph Hellwig #include <linux/dma-noncoherent.h>
9f8c55dc6SChristoph Hellwig #include <linux/dma-contiguous.h>
10f8c55dc6SChristoph Hellwig #include <linux/highmem.h>
11f8c55dc6SChristoph Hellwig 
12f8c55dc6SChristoph Hellwig #include <asm/cache.h>
13f8c55dc6SChristoph Hellwig #include <asm/cpu-type.h>
14f8c55dc6SChristoph Hellwig #include <asm/dma-coherence.h>
15f8c55dc6SChristoph Hellwig #include <asm/io.h>
16f8c55dc6SChristoph Hellwig 
17f8c55dc6SChristoph Hellwig /*
18f8c55dc6SChristoph Hellwig  * The affected CPUs below in 'cpu_needs_post_dma_flush()' can speculatively
19f8c55dc6SChristoph Hellwig  * fill random cachelines with stale data at any time, requiring an extra
20f8c55dc6SChristoph Hellwig  * flush post-DMA.
21f8c55dc6SChristoph Hellwig  *
22f8c55dc6SChristoph Hellwig  * Warning on the terminology - Linux calls an uncached area coherent;  MIPS
23f8c55dc6SChristoph Hellwig  * terminology calls memory areas with hardware maintained coherency coherent.
24f8c55dc6SChristoph Hellwig  *
25f8c55dc6SChristoph Hellwig  * Note that the R14000 and R16000 should also be checked for in this condition.
26f8c55dc6SChristoph Hellwig  * However this function is only called on non-I/O-coherent systems and only the
27f8c55dc6SChristoph Hellwig  * R10000 and R12000 are used in such systems, the SGI IP28 Indigo² rsp.
28f8c55dc6SChristoph Hellwig  * SGI IP32 aka O2.
29f8c55dc6SChristoph Hellwig  */
3056e35f9cSChristoph Hellwig static inline bool cpu_needs_post_dma_flush(void)
31f8c55dc6SChristoph Hellwig {
32f8c55dc6SChristoph Hellwig 	switch (boot_cpu_type()) {
33f8c55dc6SChristoph Hellwig 	case CPU_R10000:
34f8c55dc6SChristoph Hellwig 	case CPU_R12000:
35f8c55dc6SChristoph Hellwig 	case CPU_BMIPS5000:
36*a202bf71SLichao Liu 	case CPU_LOONGSON2EF:
37f8c55dc6SChristoph Hellwig 		return true;
38f8c55dc6SChristoph Hellwig 	default:
39f8c55dc6SChristoph Hellwig 		/*
40f8c55dc6SChristoph Hellwig 		 * Presence of MAARs suggests that the CPU supports
41f8c55dc6SChristoph Hellwig 		 * speculatively prefetching data, and therefore requires
42f8c55dc6SChristoph Hellwig 		 * the post-DMA flush/invalidate.
43f8c55dc6SChristoph Hellwig 		 */
44f8c55dc6SChristoph Hellwig 		return cpu_has_maar;
45f8c55dc6SChristoph Hellwig 	}
46f8c55dc6SChristoph Hellwig }
47f8c55dc6SChristoph Hellwig 
482e96e04dSChristoph Hellwig void arch_dma_prep_coherent(struct page *page, size_t size)
49f8c55dc6SChristoph Hellwig {
502e96e04dSChristoph Hellwig 	dma_cache_wback_inv((unsigned long)page_address(page), size);
51f8c55dc6SChristoph Hellwig }
52f8c55dc6SChristoph Hellwig 
53fa7e2247SChristoph Hellwig void *arch_dma_set_uncached(void *addr, size_t size)
542e96e04dSChristoph Hellwig {
552e96e04dSChristoph Hellwig 	return (void *)(__pa(addr) + UNCAC_BASE);
56f8c55dc6SChristoph Hellwig }
57f8c55dc6SChristoph Hellwig 
58f8c55dc6SChristoph Hellwig static inline void dma_sync_virt(void *addr, size_t size,
59f8c55dc6SChristoph Hellwig 		enum dma_data_direction dir)
60f8c55dc6SChristoph Hellwig {
61f8c55dc6SChristoph Hellwig 	switch (dir) {
62f8c55dc6SChristoph Hellwig 	case DMA_TO_DEVICE:
63f8c55dc6SChristoph Hellwig 		dma_cache_wback((unsigned long)addr, size);
64f8c55dc6SChristoph Hellwig 		break;
65f8c55dc6SChristoph Hellwig 
66f8c55dc6SChristoph Hellwig 	case DMA_FROM_DEVICE:
67f8c55dc6SChristoph Hellwig 		dma_cache_inv((unsigned long)addr, size);
68f8c55dc6SChristoph Hellwig 		break;
69f8c55dc6SChristoph Hellwig 
70f8c55dc6SChristoph Hellwig 	case DMA_BIDIRECTIONAL:
71f8c55dc6SChristoph Hellwig 		dma_cache_wback_inv((unsigned long)addr, size);
72f8c55dc6SChristoph Hellwig 		break;
73f8c55dc6SChristoph Hellwig 
74f8c55dc6SChristoph Hellwig 	default:
75f8c55dc6SChristoph Hellwig 		BUG();
76f8c55dc6SChristoph Hellwig 	}
77f8c55dc6SChristoph Hellwig }
78f8c55dc6SChristoph Hellwig 
79f8c55dc6SChristoph Hellwig /*
80f8c55dc6SChristoph Hellwig  * A single sg entry may refer to multiple physically contiguous pages.  But
81f8c55dc6SChristoph Hellwig  * we still need to process highmem pages individually.  If highmem is not
82f8c55dc6SChristoph Hellwig  * configured then the bulk of this loop gets optimized out.
83f8c55dc6SChristoph Hellwig  */
84f8c55dc6SChristoph Hellwig static inline void dma_sync_phys(phys_addr_t paddr, size_t size,
85f8c55dc6SChristoph Hellwig 		enum dma_data_direction dir)
86f8c55dc6SChristoph Hellwig {
87f8c55dc6SChristoph Hellwig 	struct page *page = pfn_to_page(paddr >> PAGE_SHIFT);
88f8c55dc6SChristoph Hellwig 	unsigned long offset = paddr & ~PAGE_MASK;
89f8c55dc6SChristoph Hellwig 	size_t left = size;
90f8c55dc6SChristoph Hellwig 
91f8c55dc6SChristoph Hellwig 	do {
92f8c55dc6SChristoph Hellwig 		size_t len = left;
93f8c55dc6SChristoph Hellwig 
94f8c55dc6SChristoph Hellwig 		if (PageHighMem(page)) {
95f8c55dc6SChristoph Hellwig 			void *addr;
96f8c55dc6SChristoph Hellwig 
97d411da06SPaul Burton 			if (offset + len > PAGE_SIZE)
98f8c55dc6SChristoph Hellwig 				len = PAGE_SIZE - offset;
99f8c55dc6SChristoph Hellwig 
100f8c55dc6SChristoph Hellwig 			addr = kmap_atomic(page);
101f8c55dc6SChristoph Hellwig 			dma_sync_virt(addr + offset, len, dir);
102f8c55dc6SChristoph Hellwig 			kunmap_atomic(addr);
103f8c55dc6SChristoph Hellwig 		} else
104f8c55dc6SChristoph Hellwig 			dma_sync_virt(page_address(page) + offset, size, dir);
105f8c55dc6SChristoph Hellwig 		offset = 0;
106f8c55dc6SChristoph Hellwig 		page++;
107f8c55dc6SChristoph Hellwig 		left -= len;
108f8c55dc6SChristoph Hellwig 	} while (left);
109f8c55dc6SChristoph Hellwig }
110f8c55dc6SChristoph Hellwig 
11156e35f9cSChristoph Hellwig void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
11256e35f9cSChristoph Hellwig 		enum dma_data_direction dir)
113f8c55dc6SChristoph Hellwig {
114f8c55dc6SChristoph Hellwig 	dma_sync_phys(paddr, size, dir);
115f8c55dc6SChristoph Hellwig }
116f8c55dc6SChristoph Hellwig 
117f263f2a2SHauke Mehrtens #ifdef CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU
11856e35f9cSChristoph Hellwig void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
11956e35f9cSChristoph Hellwig 		enum dma_data_direction dir)
120f8c55dc6SChristoph Hellwig {
12156e35f9cSChristoph Hellwig 	if (cpu_needs_post_dma_flush())
122f8c55dc6SChristoph Hellwig 		dma_sync_phys(paddr, size, dir);
123f8c55dc6SChristoph Hellwig }
124f263f2a2SHauke Mehrtens #endif
125f8c55dc6SChristoph Hellwig 
126f8c55dc6SChristoph Hellwig void arch_dma_cache_sync(struct device *dev, void *vaddr, size_t size,
127f8c55dc6SChristoph Hellwig 		enum dma_data_direction direction)
128f8c55dc6SChristoph Hellwig {
129f8c55dc6SChristoph Hellwig 	BUG_ON(direction == DMA_NONE);
130f8c55dc6SChristoph Hellwig 
131f8c55dc6SChristoph Hellwig 	dma_sync_virt(vaddr, size, direction);
132f8c55dc6SChristoph Hellwig }
133347cb6afSChristoph Hellwig 
134347cb6afSChristoph Hellwig #ifdef CONFIG_DMA_PERDEV_COHERENT
135347cb6afSChristoph Hellwig void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
136347cb6afSChristoph Hellwig 		const struct iommu_ops *iommu, bool coherent)
137347cb6afSChristoph Hellwig {
138347cb6afSChristoph Hellwig 	dev->dma_coherent = coherent;
139347cb6afSChristoph Hellwig }
140347cb6afSChristoph Hellwig #endif
141